hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0e5efbf488bea2086cd3d3d62901e7b0824c5f12 | 136 | py | Python | exercicios/exe012/exe0012.py | tiagolsouza/exercicios-Curso-em-video-PYTHON | e4e6975fac7e4883aeab58b970c6ca72895564e4 | [
"MIT"
] | null | null | null | exercicios/exe012/exe0012.py | tiagolsouza/exercicios-Curso-em-video-PYTHON | e4e6975fac7e4883aeab58b970c6ca72895564e4 | [
"MIT"
] | null | null | null | exercicios/exe012/exe0012.py | tiagolsouza/exercicios-Curso-em-video-PYTHON | e4e6975fac7e4883aeab58b970c6ca72895564e4 | [
"MIT"
] | null | null | null | n = float(input('Digite o preço atual do produto: R$'))
a = n * 0.95
print('O preço do produto com disconto sera: R$ {:.2f}'.format(a))
| 34 | 66 | 0.647059 |
70f01f93e70ac89be37cfa50afe428b32919dcc9 | 10,644 | py | Python | SCanalyzer/SCanalyzer.py | tylerharter/simulator | 75a77b458b10a9fd3f524ece6438f5e22ab19f2b | [
"Apache-2.0"
] | null | null | null | SCanalyzer/SCanalyzer.py | tylerharter/simulator | 75a77b458b10a9fd3f524ece6438f5e22ab19f2b | [
"Apache-2.0"
] | null | null | null | SCanalyzer/SCanalyzer.py | tylerharter/simulator | 75a77b458b10a9fd3f524ece6438f5e22ab19f2b | [
"Apache-2.0"
] | null | null | null | from .busSim.manager import managerFactory
from .result.searchResult import SearchResult
from .util import gen_start_time, transform
from .gtfs_edit import copy_with_edits
from .service.yelp import get_results
from .census import Census
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import Polygon
from shapely.wkt import loads
from pyproj import Transformer
from zipfile import ZipFile
from io import TextIOWrapper
import os
from pathlib import Path
from math import ceil, floor
from collections import defaultdict
import time
class SCanalyzer:
def __init__(self, gtfs_path):
self.gtfs_path = gtfs_path
self.orig_gtfs_path = gtfs_path
self.base_out_path = self._get_out_path()
self.out_path = self.base_out_path
self._preprocess_gtfs()
def gtfs_edit(self, edit_fn, route, from_orig=True):
orig_gtfs_name = os.path.basename(self.orig_gtfs_path)
modified_gtfs_name = f"{edit_fn.__name__}-{route}-{orig_gtfs_name}"
modified_gtfs_path = os.path.join(
self.base_out_path, modified_gtfs_name)
from_path = self.orig_gtfs_path if from_orig else self.gtfs_path
copy_with_edits(from_path, modified_gtfs_path, edit_fn, route)
self.gtfs_path = modified_gtfs_path
def set_batch_label(self, label):
self.out_path = os.path.join(self.base_out_path, label)
Path(self.out_path).mkdir(parents=True, exist_ok=True)
def reset_batch_label(self):
self.out_path = self.base_out_path
def search(self, config, perf_df=None):
# prerun check
if not config.is_runnable():
raise Exception("The current config is not runnable")
# dynamically init a manager
manager = managerFactory.create(
config.get_run_env(), gtfs_path=self.gtfs_path, out_path=self.out_path, borders=self.borders)
result_df = manager.run_batch(config, perf_df)
return result_df
def load_census(self, cache=True):
"""
Looks for a stops.csv file in data/mmt_gtfs, queries TigerWeb Census API to pull out census tracts
based on the center and radius of the system. An optional addition of 1km (default) is added to the radius.
From the tracts, and a default set of demographs the ACS 5-year 2019 dataset is queried to get the demographics
data for each tract. A few statistics are computed. It returns a geodataframe with all of this information and
saves it to the output folder.
cache default=True, if true will load a saved result and return
"""
# Pull from Cache and return:
cache_path = os.path.join(self.base_out_path, "census.csv")
if cache and os.path.exists(cache_path):
census_df = pd.read_csv(cache_path)
return self._csvdf_to_gdf(census_df)
# Create the Geodataframe:
c = Census(gtfs_filename="../data/mmt_gtfs/stops.csv")
gdf_tracts = c.getCensusTracts()
demographic_data = c.getDemographicsData(
gdf_tracts, demographics=['Race', 'Vehicles'])
# Save output:
demographic_data.to_csv(cache_path, index=False)
return self._csvdf_to_gdf(demographic_data)
def load_yelp(self, api_key, services=["banks", "clinics", "dentists", "hospitals", "supermarket"], cache=True):
cache_path = os.path.join(self.base_out_path, "services.csv")
if cache and os.path.exists(cache_path):
return pd.read_csv(cache_path)
dfs = [get_results(api_key, service, self.borders)
for service in services]
df = pd.concat(dfs)
df.to_csv(cache_path, index=False)
return df
def add_service_metrics(self, result_gdf, services_gdf, perf_df=None):
# load grid size from a map_identifier (pick the first one on result_gdf)
max_x, min_x, max_y, min_y, grid_size, x_num, y_num = self._load_grid_size(
result_gdf)
record_perf = (perf_df is not None)
def get_grid(df):
grid = np.zeros(x_num*y_num).reshape(y_num, -1)
for index, row in df.iterrows():
# convert to 3174 first
x, y = transform(row["latitude"], row["longitude"])
x_idx = floor((x - min_x) / grid_size)
y_idx = floor((y - min_y) / grid_size)
if x_idx >= 0 and x_idx < x_num and y_idx >= 0 and y_idx < y_num:
grid[y_idx][x_idx] += 1
return [grid]
services_grid_series = services_gdf.groupby("service").apply(get_grid)
services_counts = defaultdict(list)
service_perfs = []
# loop through all map_id in result_gdf
# for records with the same filename: group open them and pull out each bitmaps
curr_filename = None
grid_iter = None
for _, row in result_gdf.iterrows():
s = time.time()
filename, _ = self._parse_map_identifier(row["map_identifier"])
# check if a new file need to be open
if filename != curr_filename:
curr_filename = filename
grid_iter = SearchResult.grid_iter(filename)
grid, _ = next(grid_iter, None)
# combine bitmaps
for service, servicemap in services_grid_series.items():
services_counts[service].append(0)
for y, grid_row in enumerate(grid):
for x, bit in enumerate(grid_row):
if bit == 0:
continue
for service, servicemap in services_grid_series.items():
services_counts[service][-1] += servicemap[0][y][x]
service_perfs.append(time.time() - s)
for service, col in services_counts.items():
result_gdf[service] = col
if record_perf:
perf_df["add_service_time"] = service_perfs
return result_gdf
def add_demographic_metrics(self, result_gdf, census_gdf, perf_df=None):
stats = census_gdf.columns
max_x, min_x, max_y, min_y, grid_size, x_num, y_num = self._load_grid_size(
result_gdf)
record_perf = (perf_df is not None)
# iterate through all the starting locations (only the unique starting locations)
start_to_demographic_dict = {}
for result_i, row in result_gdf.iterrows():
s = time.time()
_, i = self._parse_map_identifier(row["map_identifier"])
if i not in start_to_demographic_dict:
start_to_demographic_dict[i] = np.nan
for census_i, census_row in census_gdf.iterrows():
if census_row["geometry"].contains(row["geometry"]):
start_to_demographic_dict[i] = census_i
break
if not np.isnan(start_to_demographic_dict[i]):
for stat in stats:
result_gdf.at[result_i,
stat] = census_gdf.at[start_to_demographic_dict[i], stat]
if record_perf:
perf_df.at[result_i, "add_census_time"] = time.time() - s
return result_gdf
def load_result_map(self, map_identifier):
filename, idx = self._parse_map_identifier(map_identifier)
grid, grid_size = SearchResult.load_grid(filename, idx)
max_x, min_x, max_y, min_y = self.borders
# generate gdf
df = pd.DataFrame(
columns=["geometry"])
i = 0
for y, row in enumerate(grid):
for x, bit in enumerate(row):
x0 = grid_size * x + min_x
x1 = x0 + grid_size
y0 = grid_size * y + min_y
y1 = y0 + grid_size
if bit == 1:
df.loc[i, "geometry"] = Polygon(
[(x0, y0), (x0, y1), (x1, y1), (x1, y0)])
i += 1
gdf = gpd.GeoDataFrame(df, crs="EPSG:3174")
return gdf
def _get_out_path(self):
dir_path = Path().absolute()
out_path = os.path.join(dir_path, "out")
Path(out_path).mkdir(parents=True, exist_ok=True)
return out_path
def _preprocess_gtfs(self):
self._reproject_stops()
self.borders = self._get_borders()
def _reproject_stops(self):
with ZipFile(self.gtfs_path) as zf:
if "stops-3174.txt" in zf.namelist():
return
with zf.open("stops.txt") as f:
stops_df = pd.read_csv(TextIOWrapper(f), sep=",")
transformer = Transformer.from_crs(4326, 3174)
stop_x, stop_y = transformer.transform(
stops_df["stop_lat"], stops_df["stop_lon"])
stops_df["stop_x"] = stop_x
stops_df["stop_y"] = stop_y
# TODO change this to a fake file wrapper
stops_df.to_csv("stops-3174.txt")
with ZipFile(self.gtfs_path, "a") as zf:
zf.write('stops-3174.txt')
os.remove('stops-3174.txt')
def _get_borders(self):
# TODO: optimize
# 1. combine with previous _reproject_stops to only open the file once
# 2. these can be computed within one loop
with ZipFile(self.gtfs_path) as zf:
with zf.open("stops-3174.txt") as f:
stops_df = pd.read_csv(TextIOWrapper(f), sep=",")
max_x = stops_df["stop_x"].max()
min_x = stops_df["stop_x"].min()
max_y = stops_df["stop_y"].max()
min_y = stops_df["stop_y"].min()
return (max_x, min_x, max_y, min_y)
def _parse_map_identifier(self, map_identifier):
tokens = map_identifier.split("!")
if len(tokens) != 2 or not tokens[1].isnumeric():
raise Exception("invalid map_identifier")
return os.path.join(self.out_path, tokens[0]), int(tokens[1])
def _load_grid_size(self, result_gdf):
map_identifier = result_gdf.at[0, "map_identifier"]
filename, idx = self._parse_map_identifier(map_identifier)
_, grid_size = SearchResult.load_grid(filename, idx)
max_x, min_x, max_y, min_y = self.borders
x_num = ceil(abs(max_x - min_x) / grid_size)
y_num = ceil(abs(max_y - min_y) / grid_size)
return max_x, min_x, max_y, min_y, grid_size, x_num, y_num
def _csvdf_to_gdf(self, df):
df['geometry'] = df['geometry'].apply(loads)
gdf = gpd.GeoDataFrame(
df, geometry="geometry", crs="EPSG:4326")
return gdf
| 39.422222 | 119 | 0.611988 |
80763c23c48650f0e1759925e5321a618629afc3 | 946 | py | Python | kubernetes/test/test_v1_volume_device.py | iamneha/python | 5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0 | [
"Apache-2.0"
] | 1 | 2019-02-17T15:28:39.000Z | 2019-02-17T15:28:39.000Z | kubernetes/test/test_v1_volume_device.py | iamneha/python | 5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_volume_device.py | iamneha/python | 5b208a1a49a8d6f8bbab28bcc226b9ef793bcbd0 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_volume_device import V1VolumeDevice
class TestV1VolumeDevice(unittest.TestCase):
""" V1VolumeDevice unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1VolumeDevice(self):
"""
Test V1VolumeDevice
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_volume_device.V1VolumeDevice()
pass
if __name__ == '__main__':
unittest.main()
| 21.022222 | 105 | 0.704017 |
f22644cb5ec37609b4cf110a1003d7a71d2b59b2 | 467 | py | Python | cobrakbase/core/kbasefbafba.py | cshenry/cobrakbase | 9ab3db059171e7532082646302db22338ab61a55 | [
"MIT"
] | 3 | 2018-11-28T12:48:54.000Z | 2022-02-28T22:20:32.000Z | cobrakbase/core/kbasefbafba.py | cshenry/cobrakbase | 9ab3db059171e7532082646302db22338ab61a55 | [
"MIT"
] | 2 | 2020-06-26T20:13:16.000Z | 2020-10-27T05:10:34.000Z | cobrakbase/core/kbasefbafba.py | cshenry/cobrakbase | 9ab3db059171e7532082646302db22338ab61a55 | [
"MIT"
] | 1 | 2020-09-02T17:40:34.000Z | 2020-09-02T17:40:34.000Z | from cobrakbase.core.kbaseobject import KBaseObjectBase
from cobrakbase.core.utils import get_id_from_ref
class KBaseFBA(KBaseObjectBase):
def get_reaction_variable_by_id(self, rxn_id):
for v in self.data['FBAReactionVariables']:
v_id = v['modelreaction_ref'].split('/')[-1]
if rxn_id == v_id:
return v
return None
@property
def objective_value(self):
return self.data['objectiveValue']
| 27.470588 | 56 | 0.665953 |
da824f8e3c4116c21554ddfbef3b2a04a3fbe691 | 38,251 | py | Python | PyU4V/tools/openstack/migrate_utils.py | SiSTm1/PyU4V | ce9784fc5f8192024cfa42509b8d45583b83f01d | [
"MIT"
] | 19 | 2020-01-06T12:02:25.000Z | 2021-12-14T06:50:04.000Z | PyU4V/tools/openstack/migrate_utils.py | SiSTm1/PyU4V | ce9784fc5f8192024cfa42509b8d45583b83f01d | [
"MIT"
] | 53 | 2019-12-17T17:26:44.000Z | 2022-02-03T12:28:34.000Z | PyU4V/tools/openstack/migrate_utils.py | SiSTm1/PyU4V | ce9784fc5f8192024cfa42509b8d45583b83f01d | [
"MIT"
] | 13 | 2019-01-24T17:10:05.000Z | 2019-12-09T06:33:21.000Z | # Copyright (c) 2020 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenStack migrate_utils.py.
Migrate utilities for OpenStack
"""
from __future__ import print_function
from builtins import input
import logging
import re
import sys
import uuid
from PyU4V.utils import exception
import prettytable
LOG = logging.getLogger(__name__)
YES_CONSTANTS = ['y', 'yes']
NO_CONSTANTS = ['n', 'no']
EXIT_CONSTANTS = ['x', 'exit']
VERSION = '2.0.0'
DEBUG = 'DEBUG'
INFO = 'INFO'
WARNING = 'WARNING'
ERROR = 'ERROR'
class MigrateUtils(object):
"""OpenStack migrate utils."""
def __init__(self, conn):
"""Initialize class.
:param conn: the request connection -- obj
"""
self.conn = conn
@staticmethod
def check_input(txt_str, option):
"""Check the input against the expected option.
:param txt_str: text string -- str
:param option: 'Y', 'N' or 'X' -- str
:returns: boolean
"""
if option == 'Y':
return txt_str.lower() in YES_CONSTANTS
elif option == 'N':
return txt_str.lower() in NO_CONSTANTS
elif option == 'X':
LOG.debug('I am exiting')
return txt_str.lower() in EXIT_CONSTANTS
else:
return False
@staticmethod
def print_to_log(print_str, level=DEBUG):
"""Print to the logs
:param print_str: string to print -- str
:param level: the debug level -- str
"""
if level == ERROR:
LOG.error(print_str)
elif level == WARNING:
LOG.warning(print_str)
elif level == INFO:
LOG.info(print_str)
else:
LOG.debug(print_str)
@staticmethod
def print_pretty_table(datadict):
"""Print the data in the dict.
:param datadict: the data dictionary -- dict
"""
t = prettytable.PrettyTable(['Key', 'Value'])
for k, v in datadict.items():
if v is not None:
t.add_row([k, v])
print(t)
LOG.debug(t)
print('\n')
def smart_print(self, print_str, level, *args):
"""Print with variable arguments.
:param print_str: the print string -- str
:param level: the debug level -- str
:param args: one or more arguments
"""
print_str = print_str % args
self.print_to_log(print_str, level)
print(print_str)
def get_elements_from_masking_view(self, masking_view_name):
"""Get components from masking view.
:param masking_view_name: masking view name -- str
:returns: portgroup -- str, storagegroup -- str, host -- str
"""
masking_view_components = dict()
try:
masking_view_components['portgroup'] = (
self.conn.provisioning.get_element_from_masking_view(
masking_view_name, portgroup=True))
masking_view_components['storagegroup'] = (
self.conn.provisioning.get_element_from_masking_view(
masking_view_name, storagegroup=True))
masking_view_components['initiatorgroup'] = (
self.conn.provisioning.get_element_from_masking_view(
masking_view_name, host=True))
except exception.ResourceNotFoundException as error:
exception_message = (
'Cannot find one of the components of %s' % masking_view_name)
self.smart_print(exception_message, ERROR)
raise exception.ResourceNotFoundException(
data=exception_message) from error
self.print_pretty_table(masking_view_components)
return masking_view_components
@staticmethod
def verify_protocol(protocol):
"""Verify the protocol.
:param protocol: 'I' or 'F' -- str
:returns: boolean
"""
return bool(
len(protocol) == 1 and ('I' in protocol or 'F' in protocol))
@staticmethod
def get_object_components(regex_str, input_str):
"""Get components from input string.
:param regex_str: the regex -- str
:param input_str: the input string -- str
:returns: dict
"""
full_str = re.compile(regex_str)
match = full_str.match(input_str)
return match.groupdict() if match else None
def get_object_components_and_correct_host(self, regex_str, input_str):
"""Get components from input string.
:param regex_str: the regex -- str
:param input_str: the input string -- str
:returns: object components -- dict
"""
object_dict = self.get_object_components(regex_str, input_str)
if object_dict and 'host' in object_dict:
if object_dict['host'].endswith('-'):
object_dict['host'] = object_dict['host'][:-1]
return object_dict
def get_masking_view_component_dict(
self, masking_view_name, revert=False):
"""Get components from input string.
:param masking_view_name: the masking view name -- str
:param revert: is it a revert back -- boolean
:returns: object components -- dict
"""
if revert:
regex_str = (r'^(?P<prefix>OS)-(?P<host>.+?)(?P<protocol>I|F)-'
r'(?P<portgroup>(?!CD|RE|CD-RE).+)-(?P<postfix>MV)$')
else:
regex_str = (r'^(?P<prefix>OS)-(?P<host>.+?)((?P<srp>SRP.+?)-'
r'(?P<slo>.+?)-(?P<workload>.+?)|(?P<no_slo>No_SLO))-'
r'(?P<protocol>I|F)(?P<CD>-CD|s*)(?P<RE>-RE|s*)-'
r'(?P<postfix>MV)$')
return self.get_object_components_and_correct_host(
regex_str, masking_view_name)
@staticmethod
def truncate_string(str_to_truncate, max_num):
"""Truncate a string by taking first and last characters.
:param str_to_truncate: the string to be truncated -- str
:param max_num: the maximum number of characters -- int
:returns: truncated string or original string -- str
"""
if len(str_to_truncate) > max_num:
new_num = len(str_to_truncate) - max_num // 2
first_chars = str_to_truncate[:max_num // 2]
last_chars = str_to_truncate[new_num:]
str_to_truncate = first_chars + last_chars
return str_to_truncate
def print_component_dict(self, masking_view_name, revert=False):
"""Print the components to the screen.
:param masking_view_name: the masking view name -- str
:param revert: is it a revert back -- boolean
:returns: component details -- dict
"""
if revert:
self.smart_print('\n', DEBUG)
self.smart_print(
'Checking if masking view is in the following format: ',
DEBUG)
self.smart_print(
'\tOS-[shortHostName]-[protocol]-[portgroup_name]-MV',
DEBUG)
component_dict = self.get_masking_view_component_dict(
masking_view_name, revert)
print('\n')
else:
self.smart_print('\n', DEBUG)
self.smart_print(
'Checking if masking view is in the following 2 formats: ',
DEBUG)
self.smart_print(
'\tOS-[shortHostName]-[SRP]-[SLO]-[workload]-[protocol]-MV',
DEBUG)
self.smart_print('\t\tOR', DEBUG)
self.smart_print('\tOS-[shortHostName]-No_SLO-[protocol]-MV',
DEBUG)
self.smart_print('\n', DEBUG)
component_dict = self.get_masking_view_component_dict(
masking_view_name)
self.smart_print('COMPONENTS OF %s', DEBUG, masking_view_name)
if component_dict:
self.print_pretty_table(component_dict)
return component_dict
def check_masking_view_for_migration(
self, masking_view_name, revert=False):
"""Check if the masking view can be migrated.
:param masking_view_name: the masking view name -- str
:param revert: is it a revert case -- boolean
:returns: flag -- boolean
"""
if revert:
component_dict = self.print_component_dict(
masking_view_name, revert)
if component_dict and self.verify_protocol(
component_dict['protocol']):
print_str = '%s HAS BEEN VERIFIED TO BEING IN THE NEW FORMAT.'
self.smart_print(print_str, DEBUG, masking_view_name)
return True
else:
print_str = (
'%s IS NOT IN THE NEW FORMAT, MIGRATION WILL NOT '
'PROCEED.')
self.smart_print(print_str, WARNING, masking_view_name)
return False
else:
component_dict = self.print_component_dict(masking_view_name)
if component_dict and self.verify_protocol(
component_dict['protocol']):
print_str = '%s HAS BEEN VERIFIED TO BEING IN THE OLD FORMAT.'
self.smart_print(print_str, DEBUG, masking_view_name)
return True
else:
print_str = ('%s IS NOT IN THE OLD FORMAT, MIGRATION WILL NOT '
'PROCEED.')
self.smart_print(print_str, WARNING, masking_view_name)
return False
def get_storage_group_component_dict(self, storage_group_name):
"""Parse the storage group string.
:param storage_group_name: the storage group name -- str
:returns: object components -- dict
"""
regex_str = (r'^(?P<prefix>OS)-(?P<host>.+?)'
r'((?P<no_slo>No_SLO)|((?P<srp>SRP.+?)-'
r'(?P<sloworkload>.+?)))-(?P<portgroup>.+?)'
r'(?P<after_pg>$|-CD|-RE)')
return self.get_object_components_and_correct_host(
regex_str, storage_group_name)
def get_element_dict_revert(
self, component_dict, storage_group_name, cd_str, re_str,
port_group_name, host_name):
"""Compile elements from mv, sg, host etc.
:param component_dict: masking view dict -- dict
:param storage_group_name: storage group name -- str
:param cd_str: compression disabled -- str
:param re_str: replication enabled -- str
:param port_group_name: port group name -- str
:param host_name: host name -- str
:returns: element details -- dict
"""
element_dict = dict()
sg_component_dict = self.get_storage_group_component_dict(
storage_group_name)
if sg_component_dict:
if sg_component_dict['sloworkload']:
storagegroup = self.get_storage_group(storage_group_name)
if storagegroup:
prefix = (component_dict['prefix']
+ '-' + component_dict['host']
+ '-' + sg_component_dict['srp']
+ '-' + storagegroup['slo']
+ '-' + self.get_workload(storagegroup)
+ '-' + component_dict['protocol']
+ cd_str + re_str)
element_dict['new_mv_name'] = prefix + '-MV'
element_dict['new_sg_name'] = prefix + '-SG'
element_dict['srp'] = sg_component_dict['srp']
element_dict['service_level'] = storagegroup['slo']
element_dict['workload'] = self.get_workload(storagegroup)
else:
return element_dict
else:
prefix = (component_dict['prefix'] + '-'
+ component_dict['host'] + '-'
+ 'No_SLO' + '-'
+ component_dict['protocol']
+ cd_str + re_str)
element_dict['new_mv_name'] = prefix + '-MV'
element_dict['new_sg_name'] = prefix + '-SG'
element_dict['port_group'] = port_group_name
element_dict['initiator_group'] = host_name
return element_dict
@staticmethod
def get_workload(storage_group):
"""Get the workload from the storagegroup object.
:param storage_group: storagegroup -- object
:returns: workload -- str
"""
try:
workload = storage_group['workload']
except KeyError:
workload = 'NONE'
return workload
def get_element_dict(self, component_dict, cd_str, re_str,
port_group_name, host_name):
"""Compile elements from mv, sg, host etc.
:param component_dict: masking view dict -- dict
:param cd_str: compression disabled -- str
:param re_str: replication enabled -- str
:param port_group_name: port group name -- str
:param host_name: host name -- str
:returns: element details -- dict
"""
element_dict = dict()
element_dict['new_mv_name'] = (component_dict['prefix'] + '-'
+ component_dict['host'] + '-'
+ component_dict['protocol'] + '-'
+ port_group_name + '-MV')
element_dict['new_sg_parent_name'] = (component_dict['prefix'] + '-'
+ component_dict['host'] + '-'
+ component_dict['protocol']
+ '-' + port_group_name + '-SG')
if component_dict['srp']:
slo_wl_combo = self.truncate_string(
component_dict['slo'] + component_dict['workload'], 10)
element_dict['new_sg_name'] = (component_dict['prefix'] + '-'
+ component_dict['host'] + '-'
+ component_dict['srp'] + '-'
+ slo_wl_combo
+ '-' + port_group_name + cd_str
+ re_str)
element_dict['srp'] = component_dict['srp']
element_dict['service_level'] = component_dict['slo']
element_dict['workload'] = component_dict['workload']
else:
element_dict['new_sg_name'] = (component_dict['prefix'] + '-'
+ component_dict['host'] + '-'
+ 'No_SLO' + '-'
+ port_group_name + cd_str
+ re_str)
element_dict['port_group'] = port_group_name
element_dict['initiator_group'] = host_name
return element_dict
def compile_new_element_names(
self, masking_view_name, port_group_name, host_name,
storage_group_name, revert=False):
"""Compile elements from mv, sg, host etc.
:param masking_view_name: masking view name -- str
:param port_group_name: port group name -- str
:param host_name: host name -- str
:param storage_group_name: storage group name -- str
:param revert: is it a revert case -- boolean
:returns: element details -- dict
"""
element_dict = dict()
cd_str = ''
re_str = ''
regex_all = '\\S+'
if re.search('^OS-' + regex_all + '-CD', storage_group_name):
cd_str = '-CD'
element_dict['CD'] = 'CD'
if re.search('^OS-' + regex_all + '-RE', storage_group_name):
re_str = '-RE'
element_dict['RE'] = 'RE'
component_dict = self.get_masking_view_component_dict(
masking_view_name, revert)
if component_dict:
if revert:
element_dict = self.get_element_dict_revert(
component_dict, storage_group_name, cd_str, re_str,
port_group_name, host_name)
else:
element_dict = self.get_element_dict(
component_dict, cd_str, re_str,
port_group_name, host_name)
else:
print_str = 'UNABLE TO PARSE %s, MIGRATION WILL NOT ' \
'PROCEED.'
self.smart_print(print_str, WARNING, masking_view_name)
return element_dict
def validate_existing_masking_view(
self, masking_view_details, old_port_group, old_host,
element_dict, revert=False):
"""Validate the masking view.
:param masking_view_details: masking view details -- dict
:param old_port_group: port group name -- str
:param old_host: host name -- str
:param element_dict: the element dictionary -- dict
:param revert: is it a revert back -- boolean
:returns: flag -- boolean
"""
self.smart_print(
'NEW MASKING VIEW %s', DEBUG,
masking_view_details['maskingViewId'])
masking_view_components = self.get_elements_from_masking_view(
masking_view_details['maskingViewId'])
if old_port_group != masking_view_components['portgroup']:
self.smart_print(
'Portgroups are not equal, please assess', DEBUG)
return False
if old_host != masking_view_components['initiatorgroup']:
print_str = 'Hosts are not equal, please assess'
self.smart_print(print_str, WARNING)
return False
if revert:
if element_dict['new_sg_name'] != (
masking_view_components['storagegroup']):
print_str = 'Storage group is not equal, please assess'
self.smart_print(print_str, WARNING)
return False
else:
if element_dict['new_sg_parent_name'] != (
masking_view_components['storagegroup']):
print_str = (
'Parent storage group is not equal, please assess')
self.smart_print(print_str, WARNING)
return False
# Check if child storage group exists
child_storage_group = self.get_storage_group(
element_dict['new_sg_name'])
if child_storage_group:
# Check if the child SG is part of the parent
self._existing_child_storage_group_check(
element_dict['new_sg_name'],
element_dict['new_sg_parent_name'])
else:
self.create_child_storage_group_and_add_to_parent(
element_dict)
return True
def get_storage_group(self, storage_group_name):
"""Get the storage group object from the name.
:param storage_group_name: storage group name -- str
:returns: storage group -- dict
"""
storage_group = None
try:
storage_group = self.conn.provisioning.get_storage_group(
storage_group_name)
except exception.ResourceNotFoundException:
print_str = 'Storage group %s not found'
self.smart_print(print_str, WARNING, storage_group_name)
return storage_group
def create_child_storage_group_and_add_to_parent(self, element_dict):
"""Create child storage group.
:param element_dict: element details -- dict
"""
print_str = '%s child storage group does not exist so creating it.'
self.smart_print(print_str, DEBUG, element_dict['new_sg_name'])
# Create a new child storage group with one volume in it
disable_compression = False
if 'CD' in element_dict:
disable_compression = True
if 'srp' in element_dict:
message = self.conn.provisioning.create_non_empty_storage_group(
element_dict['srp'],
element_dict['new_sg_name'],
element_dict['service_level'],
element_dict['workload'], '1', '1', 'GB',
disable_compression)
else:
message = self.conn.provisioning.create_empty_storage_group(
None, element_dict['new_sg_name'], None, None)
# Add a volume to it
self.conn.provisioning.create_volume_from_storage_group_return_id(
'first_vol', element_dict['new_sg_name'], '1')
print_str = 'CREATED CHILD STORAGE GROUP %s.'
self.smart_print(print_str, DEBUG, element_dict['new_sg_name'])
self.print_pretty_table(message)
# Add the child to the parent storage group
self._add_child_to_parent(element_dict['new_sg_name'],
element_dict['new_sg_parent_name'])
def _existing_child_storage_group_check(
self, storage_group_child, storage_group_parent):
"""Check that child is part of parent, if not, add it
:param storage_group_child: child storage group name -- str
:param storage_group_parent: parent storage group name -- str
"""
prov = self.conn.provisioning
if not prov.is_child_storage_group_in_parent_storage_group(
storage_group_child, storage_group_parent):
print_str = (
'The child sg is not part of the parent sg: %s')
self.smart_print(
print_str, DEBUG, storage_group_child)
self._add_child_to_parent(
storage_group_child, storage_group_parent)
def _add_child_to_parent(self, child_storage_group, parent_storage_group):
"""Add child storage group to parent storage group
:param child_storage_group: child storage group name -- str
:param parent_storage_group: parent storage group name -- str
"""
message = (
self.conn.provisioning.add_child_storage_group_to_parent_group(
child_storage_group, parent_storage_group))
print_str = 'ADDED CHILD STORAGE GROUP %s TO PARENT STORAGE GROUP %s.'
self.smart_print(print_str, DEBUG, child_storage_group,
parent_storage_group)
self.print_pretty_table(message)
def get_or_create_cascaded_storage_group(self, element_dict):
"""Get or create cascaded storage group.
:param element_dict: element dictionary -- dict
:returns: parent storage group -- dict
"""
storage_group_parent = self.get_storage_group(
element_dict['new_sg_parent_name'])
if not storage_group_parent:
print_str = (
'%s parent storage group does not exist so '
'creating it.')
self.smart_print(
print_str, DEBUG, element_dict['new_sg_parent_name'])
# Create a new empty parent storage group
message = self.conn.provisioning.create_empty_storage_group(
element_dict['srp'], element_dict['new_sg_parent_name'],
None, None)
self.print_pretty_table(message)
storage_group_parent = self.get_storage_group(
element_dict['new_sg_parent_name'])
storage_group_child = self.get_storage_group(
element_dict['new_sg_name'])
if not storage_group_child:
self.create_child_storage_group_and_add_to_parent(element_dict)
else:
self._existing_child_storage_group_check(
element_dict['new_sg_name'],
element_dict['new_sg_parent_name'])
return storage_group_parent
def get_or_create_elements(self, element_dict, revert=False):
"""Get or create component elements.
:param element_dict: element details -- dict
:param revert: is it a revert back -- boolean
"""
if revert:
storage_group = self.get_storage_group(
element_dict['new_sg_name'])
if not storage_group:
# Create a new storage group with one volume in it
prov = self.conn.provisioning
message = prov.create_non_empty_storage_group(
element_dict['srp'],
element_dict['new_sg_name'],
element_dict['service_level'],
element_dict['workload'], '1', '1', 'GB')
self.print_pretty_table(message)
storage_group = self.get_storage_group(
element_dict['new_sg_name'])
else:
storage_group = self.get_or_create_cascaded_storage_group(
element_dict)
if storage_group:
port_group = element_dict['port_group']
initiator_group = element_dict['initiator_group']
self.conn.provisioning.create_masking_view_existing_components(
port_group, element_dict['new_mv_name'],
storage_group['storageGroupId'],
host_name=initiator_group)
else:
exception_message = (
'Cannot create or find the storagegroup.')
self.smart_print(exception_message, ERROR)
raise exception.ResourceNotFoundException(
data=exception_message)
def get_masking_view(self, masking_view_name):
"""Get the masking view object from the name.
:param masking_view_name: masking view name -- str
:returns: masking view -- dict
"""
masking_view = None
try:
masking_view = self.conn.provisioning.get_masking_view(
masking_view_name)
except exception.ResourceNotFoundException:
print_str = 'Masking view %s not found.'
self.smart_print(print_str, WARNING, masking_view_name)
return masking_view
def get_or_create_masking_view(
self, element_dict, port_group, host, revert=False):
"""Get or create masking view from component elements.
:param element_dict: element details -- dict
:param port_group: port group name -- str
:param host: host name -- str
:param revert: is it a revert case -- boolean
:returns: masking view -- dict
"""
new_masking_view_details = self.get_masking_view(
element_dict['new_mv_name'])
if new_masking_view_details:
if self.validate_existing_masking_view(
new_masking_view_details, port_group, host,
element_dict, revert):
print_str = (
'The existing masking view %s will be used.')
self.smart_print(
print_str, DEBUG, element_dict['new_mv_name'])
self.smart_print('\n', DEBUG)
else:
print_str = (
'Something is wrong with the existing masking view %s.')
self.smart_print(
print_str, WARNING, element_dict['new_mv_name'])
else:
print_str = 'Creating masking view %s.'
self.smart_print(print_str, DEBUG, element_dict['new_mv_name'])
self.get_or_create_elements(element_dict, revert)
new_masking_view_details = self.get_masking_view(
element_dict['new_mv_name'])
return new_masking_view_details
def move_volumes_from_source_to_target(
self, device_ids, source_storage_group, target_storage_group,
create_volume_flag):
"""Get or create masking view from component elements.
:param device_ids: list of device ids -- str
:param source_storage_group: the source sg -- str
:param target_storage_group: the target sg -- str
:param create_volume_flag: create volume flag -- boolean
:returns: message -- str
"""
print_str = '\nMoving %d volume(s) from %s to %s.'
self.smart_print(
print_str, DEBUG, len(device_ids), source_storage_group,
target_storage_group)
self.smart_print(
'\nPlease be patient, this may take several minutes...',
DEBUG)
# Create a small volume
if create_volume_flag:
last_volume = 'last_vol' + str(uuid.uuid1())[-10:]
self.conn.provisioning.create_volume_from_storage_group_return_id(
last_volume, source_storage_group, '1')
# Move the volume from the old storage group to the
# new storage group
message = self.conn.provisioning.move_volumes_between_storage_groups(
device_ids, source_storage_group, target_storage_group,
force=False)
return message
@staticmethod
def validate_list(full_list, sub_list):
"""Validate the sub list is within the full list.
:param full_list: full list -- list
:param sub_list: sub list -- list
:returns: flag -- boolean
"""
return bool(all(elem in full_list for elem in sub_list))
def choose_subset_volumes(self, storage_group_name, volume_list):
"""Validate the sub list is within the full list.
:param storage_group_name: storage group name -- str
:param volume_list: sub list -- list
:returns: volume_list -- list
"""
create_volume_flag = False
self.smart_print('Here is the full list of volumes in SG %s: %s',
DEBUG, storage_group_name, volume_list)
txt = ('Which do you want to migrate (comma separated list): ',
volume_list)
txt_out = self.input(txt)
if txt_out:
sub_volume_list = txt_out.split(',')
sub_volume_list = [x.strip(' \'') for x in sub_volume_list]
if self.validate_list(volume_list, sub_volume_list):
if len(volume_list) == len(sub_volume_list):
create_volume_flag = True
volume_list = sub_volume_list
else:
print_str = ('Unable to validate your list, '
'no volumes will be migrated.')
self.smart_print(print_str, WARNING)
volume_list = list()
else:
self.smart_print('You cannot input an empty list', DEBUG)
txt = 'Do you want to choose again Y/N or X:'
txt_out = self.input(txt)
if self.check_input(txt_out, 'Y'):
volume_list, create_vol = self.choose_subset_volumes(
storage_group_name, volume_list)
else:
sys.exit()
return volume_list, create_volume_flag
def get_volume_list(self, storage_group_name):
"""Get the list of volumes from the storage group.
:param storage_group_name: the storage group name -- str
:returns: volume list -- list, create volume -- boolean
"""
volume_list = self.conn.provisioning.get_volumes_from_storage_group(
storage_group_name)
print_str = 'There are %d volume in storage group %s.'
self.smart_print(print_str, DEBUG, len(volume_list),
storage_group_name)
txt = ('Do you want to migrate all %s volumes: Y/N or X(ignore): '
% len(volume_list))
txt_out = self.input(txt)
if self.check_input(txt_out, 'Y'):
# Move list of devices from old to new masking view
print_str = ('Moving all volumes between source '
'and target storage groups.')
self.smart_print(print_str, DEBUG)
create_volume_flag = True
elif self.check_input(txt_out, 'N'):
volume_list, create_volume_flag = self.choose_subset_volumes(
storage_group_name, volume_list)
else:
return list(), False
return volume_list, create_volume_flag
def choose_storage_group(
self, masking_view_name, child_storage_group_list, port_group,
initiator_group, revert=False):
"""Choose a child storage group from the list.
:param masking_view_name: the masking view name -- str
:param child_storage_group_list: the storage group list -- list
:param port_group: The port group name -- str
:param initiator_group: the initiator group name -- str
:param revert: revert back -- boolean
:returns: element details -- dict, child storage group name -- str
"""
element_dict = dict()
child_storage_group_name = None
for child_storage_group_name in child_storage_group_list:
txt = ('Which storage group do you want to migrate:\n\t '
'%s. Y/N: ' % child_storage_group_name)
txt_out = self.input(txt)
if 'Y' in txt_out:
# Compile the new names of the SGs and MV
element_dict = self.compile_new_element_names(
masking_view_name, port_group, initiator_group,
child_storage_group_name, revert)
return element_dict, child_storage_group_name
return element_dict, child_storage_group_name
def validate_masking_view(self, masking_view_name, revert=False):
"""Validate the masking view string.
:param masking_view_name: masking view name -- str
:param revert: revert back -- boolean
:returns: validate flag -- boolean
"""
if re.search('^OS-', masking_view_name):
try:
if revert:
return bool(self.get_masking_view_component_dict(
masking_view_name, True))
else:
return bool(self.get_masking_view_component_dict(
masking_view_name))
except Exception:
return False
else:
return False
def input(self, txt):
"""Handle input.
:param txt: text in -- str
:returns: txt_out -- str
"""
txt_out = ''
try:
txt_out = input(txt)
except EOFError:
self.smart_print('Problem with input stream', ERROR)
return txt_out
def get_storage_group_qos_details(self, storage_group_details):
"""Get the storage group QoS details.
:param: storage_group_details -- dict
:returns: QoS details -- dict
"""
qos_dict = dict()
try:
storage_group_qos_details = storage_group_details['hostIOLimit']
qos_dict['sg_maxiops'] = (
storage_group_qos_details['host_io_limit_io_sec'])
qos_dict['sg_maxmbps'] = (
storage_group_qos_details['host_io_limit_mb_sec'])
qos_dict['sg_distribution_type'] = (
storage_group_qos_details['dynamicDistribution'])
except KeyError:
self.smart_print(
'hostIOLimit(QoS details) not set storage group %s.',
DEBUG, storage_group_details['storageGroupId'])
return qos_dict
def set_qos(self, source_storage_group_name, target_storage_group_name):
"""Check and set QoS.
:param source_storage_group_name: source SG name -- str
:param target_storage_group_name: target SG name -- str
"""
property_dict = dict()
source_qos_dict = dict()
target_qos_dict = dict()
source_storage_group_details = self.get_storage_group(
source_storage_group_name)
target_storage_group_details = self.get_storage_group(
target_storage_group_name)
if source_storage_group_details:
source_qos_dict = self.get_storage_group_qos_details(
source_storage_group_details)
if target_storage_group_details:
target_qos_dict = self.get_storage_group_qos_details(
target_storage_group_details)
if source_qos_dict and target_qos_dict:
if source_qos_dict['sg_maxiops'] != target_qos_dict['sg_maxiops']:
property_dict['host_io_limit_io_sec'] = (
source_qos_dict['sg_maxiops'])
if source_qos_dict['sg_maxmbps'] != target_qos_dict['sg_maxmbps']:
property_dict['host_io_limit_mb_sec'] = (
source_qos_dict['sg_maxmbps'])
if source_qos_dict['sg_distribution_type'] != (
target_qos_dict['sg_distribution_type']):
property_dict['dynamicDistribution'] = (
source_qos_dict['sg_distribution_type'])
elif source_qos_dict:
property_dict['host_io_limit_io_sec'] = (
source_qos_dict['sg_maxiops'])
property_dict['host_io_limit_mb_sec'] = (
source_qos_dict['sg_maxmbps'])
property_dict['dynamicDistribution'] = (
source_qos_dict['sg_distribution_type'])
elif target_qos_dict:
self.smart_print('Resetting target HostIO limits', DEBUG)
if property_dict:
payload = {'editStorageGroupActionParam': {
'setHostIOLimitsParam': property_dict}}
message = self.conn.provisioning.modify_storage_group(
target_storage_group_name, payload)
print_str = '%s Host IO details have changed:'
self.smart_print(print_str, DEBUG, target_storage_group_name)
self.print_pretty_table(message['hostIOLimit'])
| 41.53203 | 79 | 0.584691 |
b1772ccdf901bf788cf83538e1c29b5fc967e602 | 41,008 | py | Python | resolwe/process/fields.py | AnzeLovse/resolwe | 3b3d9466363fcd189c581bd581e594cb9e621606 | [
"Apache-2.0"
] | null | null | null | resolwe/process/fields.py | AnzeLovse/resolwe | 3b3d9466363fcd189c581bd581e594cb9e621606 | [
"Apache-2.0"
] | null | null | null | resolwe/process/fields.py | AnzeLovse/resolwe | 3b3d9466363fcd189c581bd581e594cb9e621606 | [
"Apache-2.0"
] | null | null | null | """Process input or output fields."""
import collections
import glob
import gzip
import os
import re
import shlex
import shutil
import subprocess
import tarfile
import zlib
from itertools import zip_longest
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Set, Tuple, Type, Union
import requests
from .communicator import communicator
from .descriptor import ProcessDescriptor
# Upload files in batches of 1000.
UPLOAD_FILE_BATCH_SIZE = 1000
def _get_dir_size(path):
"""Get directory size.
:param path: a Path object pointing to the directory.
:type path: pathlib.Path
"""
return sum(
file_.stat().st_size for file_ in Path(path).rglob("*") if file_.is_file()
)
def collect_entry(
entry: Union[Path, str], references: List[Union[Path, str]]
) -> Tuple[int, int]:
"""Get the size of the entry and its references and upload them.
The entry and its references are uploaded to the chosen storage connector.
NOTE: This process may take considerable amount of time.
:args entry: file or directory that is being collected.
:args references: references belonging to the entry.
"""
def grouper(iterable: Iterable, n: int, fillvalue=None):
"""Collect data into fixed-length chunks or blocks.
See https://docs.python.org/3/library/itertools.html#itertools-recipes.
"""
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def get_entries_size(
entries: Iterable[Path], processed_files: Set[Path], processed_dirs: Set[Path]
) -> int:
"""Get the total size of the entries.
Traverse all the files and add their sizes. Skip already processed
fles: is a common case that the file itself is also referenced under
references for instance.
:raises RuntimeError: when one of the entris is neither file nor
directory.
"""
total_size = 0
for entry in entries:
if entry_path in processed_files:
continue
elif entry.is_dir():
processed_dirs.add(entry)
total_size += get_entries_size(
entry.glob("*"), processed_files, processed_dirs
)
elif entry.is_file():
total_size += entry.stat().st_size
processed_files.add(entry)
else:
raise RuntimeError(
f"While collecting entries: {entry} must be either file of directory."
)
return total_size
assert communicator is not None, "Communicator should not be None."
processed_files: Set[Path] = set()
processed_dirs: Set[Path] = set()
entry_path = Path(entry)
entry_size = get_entries_size([entry_path], processed_files, processed_dirs)
references_size = get_entries_size(
(Path(reference) for reference in references), processed_files, processed_dirs
)
# Upload files in chunks. Avoid creation of a giant
# list when number of referenced files is huge: its size could be
# over half a milion in special cases.
for group in grouper(processed_files, UPLOAD_FILE_BATCH_SIZE):
communicator.upload_files(
[os.fspath(entry) for entry in group if entry is not None]
)
for group in grouper(processed_dirs, UPLOAD_FILE_BATCH_SIZE):
communicator.upload_dirs(
[os.fspath(entry) for entry in group if entry is not None]
)
return (entry_size, references_size)
class ValidationError(Exception):
"""Field value validation error."""
# ------Import file attributes ----------.
class ImportedFormat:
"""Import destination file format."""
EXTRACTED = "extracted"
COMPRESSED = "compressed"
BOTH = "both"
# ----------------------------------
class Field:
"""Process input or output field."""
field_type = None
def __init__(
self,
label=None,
required=True,
description=None,
default=None,
choices=None,
allow_custom_choice=None,
hidden=False,
*args,
**kwargs,
):
"""Construct a field descriptor."""
self.name = None
self.process: Optional[ProcessDescriptor] = None
self.label = label
self.required = required
self.description = description
self.default = default
self.choices = choices
self.allow_custom_choice = allow_custom_choice
self.hidden = hidden
@property
def _descriptor_field_name(self):
"""Get descriptor field name."""
return f"{self.name}"
def __get__(self, obj, objtype=None):
"""Make field a descriptor."""
return self if obj is None else obj._get_field_data(self)
def __set__(self, obj, value):
"""Make field a descriptor."""
if obj is None:
return
obj._set_field_data(self, value)
def get_field_type(self):
"""Return this field's type."""
return self.field_type
def contribute_to_class(self, process, fields, name):
"""Register this field with a specific process.
:param process: Process descriptor instance
:param fields: Fields registry to use
:param name: Field name
"""
self.name = name
self.process = process
fields[name] = self
def to_python(self, value):
"""Convert value if needed."""
return value
def to_schema(self):
"""Return field schema for this field."""
if not self.name or not self.process:
raise ValueError("field is not registered with process")
schema = {
"name": self.name,
"type": self.get_field_type(),
}
if self.required is not None:
schema["required"] = self.required
if self.label is not None:
schema["label"] = self.label
if self.description is not None:
schema["description"] = self.description
if self.default is not None:
schema["default"] = self.default
if self.hidden is not None:
schema["hidden"] = self.hidden
if self.allow_custom_choice is not None:
schema["allow_custom_choice"] = self.allow_custom_choice
if self.choices is not None:
for choice, label in self.choices:
schema.setdefault("choices", []).append(
{
"label": label,
"value": choice,
}
)
return schema
def to_list_schema(self, *args, **kwargs):
"""Return part of list field schema that is particular to this field."""
return {}
def to_output(self, value):
"""Convert value to process output format.
:returns: dict {name, value}.
"""
return value
def validate(self, value):
"""Validate field value."""
if self.required and value is None:
raise ValidationError("field is required")
if value is not None and self.choices is not None:
choices = [choice for choice, _ in self.choices]
if value not in choices and not self.allow_custom_choice:
raise ValidationError(
"field must be one of: {}".format(", ".join(choices))
)
def clean(self, value):
"""Run validators and return the clean value."""
if value is None:
value = self.default
try:
value = self.to_python(value)
self.validate(value)
except ValidationError as error:
raise ValidationError(
"invalid value for {}: {}".format(self.name, error.args[0])
)
return value
def __repr__(self):
"""Return string representation."""
return '<{klass} name={name} type={type} label="{label}">'.format(
klass=self.__class__.__name__,
name=self.name,
type=self.get_field_type(),
label=self.label,
)
class StringField(Field):
"""String field."""
field_type = "basic:string"
def validate(self, value):
"""Validate field value."""
if value is not None and not isinstance(value, str):
raise ValidationError("field must be a string")
super().validate(value)
class TextField(StringField):
"""Text field."""
field_type = "basic:text"
class BooleanField(Field):
"""Boolean field."""
field_type = "basic:boolean"
def validate(self, value):
"""Validate field value."""
if value is not None and not isinstance(value, bool):
raise ValidationError("field must be a boolean")
super().validate(value)
class IntegerField(Field):
"""Integer field."""
field_type = "basic:integer"
def to_python(self, value):
"""Convert value if needed."""
if value is not None:
try:
return int(value)
except (TypeError, ValueError):
raise ValidationError("field must be an integer")
class FloatField(Field):
"""Float field."""
# TODO: Fix the underlying field into basic:float once that is renamed.
field_type = "basic:decimal"
def to_python(self, value):
"""Convert value if needed."""
if value is not None:
try:
return float(value)
except (TypeError, ValueError):
raise ValidationError("field must be a float")
class DateField(Field):
"""Date field."""
field_type = "basic:date"
class DateTimeField(Field):
"""Date time field."""
field_type = "basic:datetime"
class UrlField(Field):
"""URL field."""
# Url types.
DOWNLOAD = "download"
VIEW = "view"
LINK = "link"
URL_TYPES = (DOWNLOAD, VIEW, LINK)
def __init__(self, url_type, *args, **kwargs):
"""Construct an URL field descriptor.
:param url_type: Type of URL
"""
if url_type not in self.URL_TYPES:
raise ValueError(
"url_type must be one of: {}".format(", ".join(self.URL_TYPES))
)
self.url_type = url_type
super().__init__(*args, **kwargs)
def to_python(self, value):
"""Convert value if needed."""
if isinstance(value, str):
return value
elif isinstance(value, dict):
try:
value = value["url"]
except KeyError:
raise ValidationError("dictionary must contain an 'url' element")
if not isinstance(value, str):
raise ValidationError("field's url element must be a string")
return value
elif not isinstance(value, None):
raise ValidationError("field must be a string or a dict")
def get_field_type(self):
"""Return this field's type."""
return "basic:url:{}".format(self.url_type)
class DownloadUrlField(UrlField):
"""Subclass of UrlField."""
field_type = "basic:url:download"
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(UrlField.DOWNLOAD, *args, **kwargs)
class ViewUrlField(UrlField):
"""Subclass of UrlField."""
field_type = "basic:url:view"
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(UrlField.VIEW, *args, **kwargs)
class LinkUrlField(UrlField):
"""Subclass of UrlField."""
field_type = "basic:url:link"
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(UrlField.LINK, *args, **kwargs)
class SecretField(Field):
"""Secret field."""
field_type = "basic:secret"
class FileDescriptor:
"""Descriptor for accessing files."""
CHUNK_SIZE = 10_000_000 # 10 Mbytes
def __init__(
self,
path,
size=None,
total_size=None,
is_remote=False,
file_temp=None,
refs=None,
file_field: Optional["FileField"] = None,
):
"""Construct a file descriptor."""
self.path = path
self.size = size
self.total_size = total_size
self.is_remote = is_remote
self.file_temp = file_temp
self.file_field = file_field
if refs is None:
refs = []
self.refs = refs
def import_file(self, imported_format=None, progress_from=0.0, progress_to=None):
"""Import field source file to working directory.
:param imported_format: Import file format (extracted, compressed or both)
:param progress_from: Initial progress value
:param progress_to: Final progress value
:return: Destination file path (if extracted and compressed, extracted path given)
"""
if imported_format is None:
imported_format = ImportedFormat.BOTH
src = self.file_temp
file_name = self.path
if progress_to is not None:
if not isinstance(progress_from, float) or not isinstance(
progress_to, float
):
raise ValueError("Progress_from and progress_to must be float")
if progress_from < 0 or progress_from > 1:
raise ValueError("Progress_from must be between 0 and 1")
if progress_to < 0 or progress_to > 1:
raise ValueError("Progress_to must be between 0 and 1")
if progress_from >= progress_to:
raise ValueError("Progress_to must be higher than progress_from")
print("Importing and compressing {}...".format(file_name))
def importGz():
"""Import gzipped file.
The file_name must have .gz extension.
"""
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
with open(file_name[:-3], "wb") as f_out, gzip.open(src, "rb") as f_in:
try:
shutil.copyfileobj(f_in, f_out, FileDescriptor.CHUNK_SIZE)
except zlib.error:
raise ValueError(
"Invalid gzip file format: {}".format(file_name)
)
else: # Extracted file not-required
# Verify the compressed file.
with gzip.open(src, "rb") as f:
try:
while f.read(FileDescriptor.CHUNK_SIZE) != b"":
pass
except zlib.error:
raise ValueError(
"Invalid gzip file format: {}".format(file_name)
)
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
try:
shutil.copyfile(src, file_name)
except shutil.SameFileError:
pass # Skip copy of downloaded files
if imported_format == ImportedFormat.COMPRESSED:
return file_name
else:
return file_name[:-3]
def import7z():
"""Import compressed file in various formats.
Supported extensions: .bz2, .zip, .rar, .7z, .tar.gz, and .tar.bz2.
"""
extracted_name, _ = os.path.splitext(file_name)
destination_name = extracted_name
temp_dir = "temp_{}".format(extracted_name)
# TODO: is this a problem? The 7z binary must be present.
cmd = "7z x -y -o{} {}".format(shlex.quote(temp_dir), shlex.quote(src))
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as err:
if err.returncode == 2:
raise ValueError("Failed to extract file: {}".format(file_name))
else:
raise
paths = os.listdir(temp_dir)
if len(paths) == 1 and os.path.isfile(os.path.join(temp_dir, paths[0])):
# Single file in archive.
temp_file = os.path.join(temp_dir, paths[0])
if (
imported_format != ImportedFormat.EXTRACTED
): # Compressed file required
with open(temp_file, "rb") as f_in, gzip.open(
extracted_name + ".gz", "wb"
) as f_out:
shutil.copyfileobj(f_in, f_out, FileDescriptor.CHUNK_SIZE)
if (
imported_format != ImportedFormat.COMPRESSED
): # Extracted file required
shutil.move(temp_file, "./{}".format(extracted_name))
if extracted_name.endswith(".tar"):
with tarfile.open(extracted_name) as tar:
tar.extractall()
os.remove(extracted_name)
destination_name, _ = os.path.splitext(extracted_name)
else:
destination_name = extracted_name + ".gz"
else:
# Directory or several files in archive.
if (
imported_format != ImportedFormat.EXTRACTED
): # Compressed file required
with tarfile.open(extracted_name + ".tar.gz", "w:gz") as tar:
for fname in glob.glob(os.path.join(temp_dir, "*")):
tar.add(fname, os.path.basename(fname))
if (
imported_format != ImportedFormat.COMPRESSED
): # Extracted file required
for path in os.listdir(temp_dir):
shutil.move(os.path.join(temp_dir, path), "./{}".format(path))
else:
destination_name = extracted_name + ".tar.gz"
shutil.rmtree(temp_dir)
return destination_name
def importUncompressed():
"""Import uncompressed file."""
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with open(src, "rb") as f_in, gzip.open(
file_name + ".gz", "wb"
) as f_out:
shutil.copyfileobj(f_in, f_out, FileDescriptor.CHUNK_SIZE)
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
try:
shutil.copyfile(src, file_name)
except shutil.SameFileError:
pass # Skip copy of downloaded files
return (
file_name + ".gz"
if imported_format == ImportedFormat.COMPRESSED
else file_name
)
# Large file download from Google Drive requires cookie and token.
try:
response = None
if re.match(
r"^https://drive.google.com/[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]$",
src,
):
session = requests.Session()
response = session.get(src, stream=True)
token = None
for key, value in response.cookies.items():
if key.startswith("download_warning"):
token = value
break
if token is not None:
params = {"confirm": token}
response = session.get(src, params=params, stream=True)
elif re.match(
r"^(https?|ftp)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]$",
src,
):
response = requests.get(src, stream=True)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError(
"Could not connect to {}".format(src)
)
if response:
with open(file_name, "wb") as f:
total = response.headers.get("content-length")
total = float(total) if total else None
downloaded = 0
current_progress = 0
for content in response.iter_content(
chunk_size=FileDescriptor.CHUNK_SIZE
):
f.write(content)
if total is not None and progress_to is not None:
downloaded += len(content)
progress_span = progress_to - progress_from
next_progress = (
progress_from + progress_span * downloaded / total
)
next_progress = round(next_progress, 2)
if next_progress > current_progress:
if (
self.file_field is not None
and self.file_field.process is not None
):
print(f"Reporting progress: {next_progress}")
communicator.progress(next_progress)
current_progress = next_progress
# Check if a temporary file exists.
if not os.path.isfile(file_name):
raise ValueError("Downloaded file not found {}".format(file_name))
src = file_name
else:
# If scr is file it needs to have upload directory prepended.
if "UPLOAD_DIR" not in os.environ:
raise RuntimeError(
"No upload directory on filesystem is defined, "
"can not import file {src} from filesystem."
)
src_path = Path(os.environ["UPLOAD_DIR"]) / src
if not src_path.is_file():
raise ValueError(f"Source file not found {src}")
src = os.fspath(src_path)
# Decide which import should be used.
if re.search(r"\.(bz2|zip|rar|7z|tgz|tar\.gz|tar\.bz2)$", file_name):
destination_file_name = import7z()
elif file_name.endswith(".gz"):
destination_file_name = importGz()
else:
destination_file_name = importUncompressed()
if (
progress_to is not None
and self.file_field is not None
and self.file_field.process is not None
):
print(f"Reporting finall progress: {progress_to}")
communicator.progress(progress_to)
return destination_file_name
def __repr__(self):
"""Return string representation."""
return "<FileDescriptor path={}>".format(self.path)
class FileField(Field):
"""File field."""
field_type = "basic:file"
def to_python(self, value):
"""Convert value if needed."""
if isinstance(value, FileDescriptor):
return value
elif isinstance(value, str):
return FileDescriptor(value, file_field=self)
elif isinstance(value, dict):
try:
# TODO: here we have to hydrate, get the whole path.
# TODO: make in nicer than hardcoded.
path = value["file"]
except KeyError:
raise ValidationError("dictionary must contain a 'file' element")
if not isinstance(path, str):
raise ValidationError("field's file element must be a string")
size = value.get("size", None)
if size is not None and not isinstance(size, int):
raise ValidationError("field's size element must be an integer")
total_size = value.get("total_size", None)
if total_size is not None and not isinstance(total_size, int):
raise ValidationError("field's total_size element must be an integer")
is_remote = value.get("is_remote", None)
if is_remote is not None and not isinstance(is_remote, bool):
raise ValidationError("field's is_remote element must be a boolean")
file_temp = value.get("file_temp", None)
if file_temp is not None and not isinstance(file_temp, str):
raise ValidationError("field's file_temp element must be a string")
refs = value.get("refs", None)
if refs is not None and not isinstance(refs, list):
# TODO: Validate that all refs are strings.
raise ValidationError("field's refs element must be a list of strings")
return FileDescriptor(
path,
size=size,
total_size=total_size,
is_remote=is_remote,
file_temp=file_temp,
refs=refs,
file_field=self,
)
elif not isinstance(value, None):
raise ValidationError("field must be a FileDescriptor, string or a dict")
def to_output(self, value):
"""Convert value to process output format.
Also copy the referenced file to the data volume.
"""
data = {"file": value.path}
if value.refs:
missing_refs = [
ref
for ref in value.refs
if not (Path(ref).is_file() or Path(ref).is_dir())
]
if missing_refs:
raise Exception(
"Output '{}' set to missing references: '{}'.".format(
self.name, ", ".join(missing_refs)
)
)
data["refs"] = value.refs
entry_size, refs_size = collect_entry(data["file"], data.get("refs", []))
data["size"] = entry_size
data["total_size"] = entry_size + refs_size
return data
class FileHtmlField(FileField):
"""HTML file field."""
field_type = "basic:file:html"
class DirDescriptor:
"""Descriptor for accessing directories."""
def __init__(self, path, size=None, total_size=None, refs=None):
"""Construct a file descriptor."""
self.path = path
self.size = size
self.total_size = total_size
if refs is None:
refs = []
self.refs = refs
def __repr__(self):
"""Return string representation."""
return "<DirDescriptor path={}>".format(self.path)
class DirField(Field):
"""Directory field."""
field_type = "basic:dir"
def to_python(self, value):
"""Convert value if needed."""
if isinstance(value, DirDescriptor):
return value
elif isinstance(value, str):
return DirDescriptor(value)
elif isinstance(value, dict):
try:
path = value["dir"]
except KeyError:
raise ValidationError("dictionary must contain a 'dir' element")
if not isinstance(path, str):
raise ValidationError("field's dir element must be a string")
size = value.get("size", None)
if size is not None and not isinstance(size, int):
raise ValidationError("field's size element must be an integer")
total_size = value.get("total_size", None)
if total_size is not None and not isinstance(total_size, int):
raise ValidationError("field's total_size element must be an integer")
refs = value.get("refs", None)
if refs is not None and not isinstance(refs, list):
# TODO: Validate that all refs are strings.
raise ValidationError("field's refs element must be a list of strings")
return DirDescriptor(path, size=size, total_size=total_size, refs=refs)
elif not isinstance(value, None):
raise ValidationError("field must be a DirDescriptor, string or a dict")
def to_output(self, value):
"""Convert value to process output format."""
data = {"dir": value.path}
if value.refs:
missing_refs = [
ref
for ref in value.refs
if not (Path(ref).is_file() or Path(ref).is_dir())
]
if missing_refs:
raise Exception(
"Output '{}' set to missing references: '{}'.".format(
self.name, ", ".join(missing_refs)
)
)
data["refs"] = value.refs
entry_size, refs_size = collect_entry(data["dir"], data.get("refs", []))
data["size"] = entry_size
data["total_size"] = entry_size + refs_size
return data
class JsonField(Field):
"""JSON field."""
field_type = "basic:json"
def __init__(self, *args, **kwargs):
"""JSON field init."""
self._model_instance = None
super().__init__(*args, **kwargs)
def __get__(self, obj, objtype=None):
"""Override parent method."""
self._model_instance = obj
return super().__get__(obj, objtype)
def to_python(self, value):
"""Convert value if needed."""
from .models import JSONDescriptor
descriptor_fields = {"flow.Data": {"input", "output", "descriptor"}}
if isinstance(value, JSONDescriptor):
return value
elif self._model_instance is not None and self.name in descriptor_fields.get(
self._model_instance.full_model_name, set()
):
if self.name in ["input", "output"]:
schema_name = f"{self.name}_schema"
schema = getattr(self._model_instance.process, schema_name)
if self.name == "descriptor":
schema = self._model_instance.descriptor_schema
if schema is None:
return super().to_python(value)
assert (
schema is not None
), f"Schema for field {self.name} on model {self._model_instance} is None"
return JSONDescriptor(
self._model_instance,
self.name,
cache=value,
field_schema=schema,
)
else:
return super().to_python(value)
def to_output(self, value):
"""Convert to output format."""
return value
class ListField(Field):
"""Generic list field."""
def __init__(self, inner, *args, **kwargs):
"""Construct a list field."""
if not isinstance(inner, Field):
raise TypeError("inner field must be an instance of Field")
self.inner = inner
self.args = args
self.kwargs = kwargs
super().__init__(*args, **kwargs)
def contribute_to_class(self, process, fields, name):
"""Register this field with a specific process.
:param process: Process descriptor instance
:param fields: Fields registry to use
:param name: Field name
"""
super().contribute_to_class(process, fields, name)
self.inner.name = name
self.inner.process = process
def to_python(self, value):
"""Convert value if needed."""
# ManyToMany with one relation.
if isinstance(value, int):
value = [value]
# ManyToMany without relations.
if value is None:
value = []
return [self.inner.to_python(v) for v in value]
def to_schema(self):
"""Return field schema for this field."""
schema = super().to_schema()
schema.update(self.inner.to_list_schema(*self.args, **self.kwargs))
return schema
def to_output(self, value):
"""Convert value to process output format."""
return [self.inner.to_output(v) for v in value]
def get_field_type(self):
"""Return this field's type."""
return "list:{}".format(self.inner.get_field_type())
def validate(self, value):
"""Validate field value."""
if value is not None:
if not isinstance(value, list):
raise ValidationError("field must be a list")
for index, element in enumerate(value):
try:
self.inner.validate(element)
except ValidationError as error:
raise ValidationError(
"invalid element {}: {}".format(index, error.args[0])
)
super().validate(value)
class RelationPartitionDescriptor:
"""Descriptor for accessing relation partitions."""
def __init__(self, entity_id, position=None, label=None):
"""Construct a relation partition descriptor."""
self.entity_id = entity_id
self.position = position
self.label = label
class RelationDescriptor:
"""Descriptor for accessing relations between data / entities."""
def __init__(self, id, type, ordered, category, partitions, unit=None):
"""Construct a relation descriptor."""
self.id = id
self.type = type
self.ordered = ordered
self.category = category
self.unit = unit
self.partitions = partitions
def __eq__(self, other):
"""Compare equality."""
if isinstance(other, RelationDescriptor):
return self.id == other.id
return False
def __hash__(self):
"""Get hash value."""
return hash(self.id)
@classmethod
def from_dict(cls, data):
"""Create relation descriptor from a dictionary."""
id = data["relation_id"]
type = data["relation_type_name"]
ordered = data["relation_type_ordered"]
category = data["category"]
unit = data.get("unit", None)
partitions = []
for partitinon_data in data["partitions"]:
partition = RelationPartitionDescriptor(
entity_id=partitinon_data["entity_id"],
position=partitinon_data.get("position"),
label=partitinon_data.get("label"),
)
partitions.append(partition)
return cls(
id=id,
type=type,
ordered=ordered,
category=category,
partitions=partitions,
unit=unit,
)
def fields_from_schema(schema: List[dict]) -> Dict[str, Field]:
"""Get fields from schema (input or output)."""
fields: Dict[str, Field] = dict()
field: Optional[Field] = None
for field_descriptor in schema:
field_name = field_descriptor["name"]
field_type = field_descriptor["type"].rstrip(":")
if field_type.startswith("list:"):
if field_type.startswith("list:data"):
field_class = DataField
else:
field_class = ALL_FIELDS_MAP[field_type[len("list:") :]]
extra_kwargs: dict = field_descriptor
if issubclass(field_class, DataField):
extra_kwargs["data_type"] = field_type[len("list:data:") :]
field = ListField(field_class(**extra_kwargs))
else:
if field_type.startswith("data:"):
field_class = DataField
else:
field_class = ALL_FIELDS_MAP[field_type]
extra_kwargs = {}
if issubclass(field_class, DataField):
extra_kwargs["data_type"] = field_type[len("data:") :]
if issubclass(field_class, GroupField):
group_schema = field_descriptor["group"]
field_group = fields_from_schema(group_schema)
class FieldGroup:
def __init__(self, values):
self.__dict__.update(values)
fg = FieldGroup(field_group)
extra_kwargs["field_group"] = fg
field = field_class(**extra_kwargs)
fields[field_name] = field
field.name = field_name
return fields
class DataField(Field):
"""Data object field."""
field_type = "data"
def __init__(
self, data_type, relation_type=None, relation_npartitions=None, *args, **kwargs
):
"""Construct a data field."""
# TODO: Validate data type format.
self.data_type = data_type
self.relation_type = relation_type
self.relation_npartitions = relation_npartitions
super().__init__(*args, **kwargs)
def get_field_type(self):
"""Return this field's type."""
return "data:{}".format(self.data_type)
@staticmethod
def _generate_relation(relation_type, relation_npartitions):
"""Generate relation part of data field schema."""
if relation_npartitions is not None and relation_type is None:
raise AttributeError(
"relation_type should be set when relation_npartition is not None."
)
if relation_type is None and relation_npartitions is None:
return {}
return {
"relation": {
"type": relation_type,
"npartitions": relation_npartitions or "none",
}
}
def to_schema(self):
"""Return field schema for this field."""
schema = super().to_schema()
relation = self._generate_relation(
self.relation_type, self.relation_npartitions
)
schema.update(relation)
return schema
def to_list_schema(
self, relation_type=None, relation_npartitions=None, *args, **kwargs
):
"""Add relation informations to list data field."""
return self._generate_relation(relation_type, relation_npartitions)
def to_python(self, value):
"""Convert value if needed."""
from .models import Data
if value is None:
return None
if isinstance(value, Data):
return value
elif isinstance(value, int):
return Data(value)
else:
raise ValidationError("field must be a DataDescriptor or int")
class GroupDescriptor:
"""Group descriptor."""
def __init__(self, value):
"""Construct a group descriptor."""
self._value = value
def __getattr__(self, name):
"""Get attribute."""
try:
return self._value[name]
except KeyError:
raise AttributeError(name)
class GroupField(Field):
"""Group field."""
field_type = "basic:group"
def __init__(
self,
field_group,
label=None,
description=None,
disabled=False,
collapsed=False,
hidden=False,
):
"""Construct a group field."""
super().__init__(
label=label, required=None, description=description, hidden=hidden
)
self.disabled = disabled
self.collapsed = collapsed
self.field_group = field_group
self.fields = collections.OrderedDict()
def contribute_to_class(self, process, fields, name):
"""Register this field with a specific process.
:param process: Process descriptor instance
:param fields: Fields registry to use
:param name: Field name
"""
# Use order-preserving definition namespace (__dict__) to respect the
# order of GroupField's fields definition.
for field_name in self.field_group.__dict__:
if field_name.startswith("_"):
continue
field = getattr(self.field_group, field_name)
field.contribute_to_class(process, self.fields, field_name)
super().contribute_to_class(process, fields, name)
def to_python(self, value):
"""Convert value if needed."""
if isinstance(value, GroupDescriptor):
value = value._value
result = {}
for name, field in self.fields.items():
result[name] = field.to_python(value.get(name, None))
return GroupDescriptor(result)
def to_schema(self):
"""Return field schema for this field."""
schema = super().to_schema()
if self.disabled is not None:
schema["disabled"] = self.disabled
if self.collapsed is not None:
schema["collapsed"] = self.collapsed
group = []
for field in self.fields.values():
group.append(field.to_schema())
schema["group"] = group
return schema
# List of available fields.
ALL_FIELDS = [
StringField,
TextField,
BooleanField,
IntegerField,
FloatField,
DateField,
DateTimeField,
DownloadUrlField,
ViewUrlField,
LinkUrlField,
UrlField,
SecretField,
FileField,
FileHtmlField,
DirField,
JsonField,
ListField,
DataField,
GroupField,
]
ALL_FIELDS_MAP: Dict[str, Type[Field]] = {
field.field_type: field for field in ALL_FIELDS
}
def get_available_fields():
"""Return a list of available field classes."""
return ALL_FIELDS
| 32.391785 | 100 | 0.564695 |
5fa2369d1e2d165a4e0ec7696d8bd2417f276b66 | 2,411 | py | Python | tests/airflow/providers/teradata/hooks/new_test_ttu.py | RUalexeyusman/apache-airflow-providers-teradata | 08fc77595209f1d9220f3c9301f4fecc45a7c6e0 | [
"MIT"
] | 2 | 2021-11-26T21:38:30.000Z | 2022-03-30T12:38:53.000Z | tests/airflow/providers/teradata/hooks/new_test_ttu.py | RUalexeyusman/apache-airflow-providers-teradata | 08fc77595209f1d9220f3c9301f4fecc45a7c6e0 | [
"MIT"
] | null | null | null | tests/airflow/providers/teradata/hooks/new_test_ttu.py | RUalexeyusman/apache-airflow-providers-teradata | 08fc77595209f1d9220f3c9301f4fecc45a7c6e0 | [
"MIT"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import os
import unittest
from unittest.mock import call, patch
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.teradata.hooks.ttu import TtuHook
from airflow.utils import db
class TestTtuHook(unittest.TestCase):
_simple_bteq = """SELECT CURRENT_DATE;
.IF ERRORCODE <> 0 THEN .QUIT 0300;.QUIT 0;"""
def setUp(self):
db.merge_conn(
Connection(
conn_id='ttu_default',
conn_type='ttu',
host='localhost',
login='login'
password='password',
)
)
def test_build_bteq_file(self):
# Given
hook = TtuHook(ttu_conn_id='ttu_default')
conn = hook.get_conn()
# When
bteq = hook._prepare_bteq_script(self._simple_bteq,
conn['host'],
conn['login'],
conn['password'],
conn['bteq_output_width'],
conn['bteq_session_encoding'],
conn['bteq_quit_zero']
)
# Then
expected_bteq = """
.LOGON localhost/login,
.SET WIDTH 65531;
.SET SESSION CHARSET 'ASCII';
SELECT CURRENT_DATE;
.IF ERRORCODE <> 0 THEN .QUIT 0300;
.QUIT 0;
"""
self.assertEqual(expected_bteq, expected_bteq)
| 33.486111 | 70 | 0.592285 |
7d2c75aac5f2e6427ca823970487d4b06cc95e5b | 596 | py | Python | day6/main.py | thiagowfx/adventofcode | 27b50075b86761f995a60d5b8be24cf253c13968 | [
"BSD-2-Clause"
] | 5 | 2021-12-02T16:54:04.000Z | 2022-02-28T04:26:38.000Z | day6/main.py | thiagowfx/adventofcode | 27b50075b86761f995a60d5b8be24cf253c13968 | [
"BSD-2-Clause"
] | null | null | null | day6/main.py | thiagowfx/adventofcode | 27b50075b86761f995a60d5b8be24cf253c13968 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
from collections import Counter
import sys
with open(sys.argv[1]) as input:
numbers = list(map(int, input.read().split(',')))
def simulation(days):
fish = Counter(numbers)
for _ in range(days):
next_fish = Counter()
for timer, count in fish.items():
if timer == 0:
next_fish[8] += fish[timer]
next_fish[6] += fish[timer]
else:
next_fish[timer - 1] += fish[timer]
fish = next_fish
print(sum(fish.values()))
# Part 1
simulation(80)
# Part 2
simulation(256)
| 21.285714 | 53 | 0.565436 |
a6116359e5e1449b17317d330ca14acafd652057 | 2,762 | py | Python | scripts/compare_masks.py | plaplant/SSINS | 6840fe277862d6bb5b76bbad2d4dcbd2604eaf36 | [
"BSD-2-Clause"
] | 4 | 2018-10-04T00:58:15.000Z | 2019-07-12T04:12:17.000Z | scripts/compare_masks.py | plaplant/SSINS | 6840fe277862d6bb5b76bbad2d4dcbd2604eaf36 | [
"BSD-2-Clause"
] | 67 | 2018-09-18T20:14:12.000Z | 2022-03-14T20:09:03.000Z | scripts/compare_masks.py | plaplant/SSINS | 6840fe277862d6bb5b76bbad2d4dcbd2604eaf36 | [
"BSD-2-Clause"
] | 5 | 2019-07-31T22:47:53.000Z | 2021-06-24T14:03:06.000Z | import argparse
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from pyuvdata import UVFlag
from astropy.time import Time
"""
A script for comparing masks from the same obsid with different settings.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--obsid", help="The obsid", required=True)
parser.add_argument("-a", "--file_a", help="The first mask to use for comparison",
required=True)
parser.add_argument("-b", "--file_b", help="The second mask to use for comparison",
required=True)
parser.add_argument("-x", "--label_a",
help="The label for file_a. Should identify settings in some way. Will go in output file name.",
required=True)
parser.add_argument("-y", "--label_b",
help="The label for file_b. Should identify settings in some way. Will go in output file name.",
required=True)
parser.add_argument("-p", "--outdir", help="The output directory", required=True)
args = parser.parse_args()
uvfa = UVFlag(args.file_a)
uvfb = UVFlag(args.file_b)
and_arr = np.logical_and(uvfa.flag_array, uvfb.flag_array)
a_not_b = np.logical_and(uvfa.flag_array, np.logical_not(uvfb.flag_array))
b_not_a = np.logical_and(uvfb.flag_array, np.logical_not(uvfa.flag_array))
neither = np.logical_and(np.logical_not(uvfa.flag_array), np.logical_not(uvfb.flag_array))
fig, ax = plt.subplots(figsize=(16, 12))
# Do not have to use neither with this initialization
flag_table = np.ones_like(uvfa.flag_array).astype(float)
flag_table[and_arr] = 7
flag_table[a_not_b] = 5
flag_table[b_not_a] = 3
# Prepare a colormap.
cmap = plt.cm.colors.ListedColormap(
["slategray", "darkturquoise", "plum", "lemonchiffon"]
)
bounds = [0, 2, 4, 6, 8]
norm = colors.BoundaryNorm(bounds, cmap.N)
cax = ax.imshow(flag_table[:, :, 0], aspect='auto',
extent=[uvfa.freq_array[0] / 1e6, uvfa.freq_array[-1] / 1e6,
uvfa.time_array[-1], uvfa.time_array[0]],
cmap=cmap, vmin=0, vmax=8, interpolation="none")
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel(f'Time (UTC)')
ax.set_yticklabels([Time(ytick, format='jd').iso[:-4] for ytick in ax.get_yticks()])
ax.set_title(f"{args.obsid} {args.label_a} vs. {args.label_b}")
cbar_ticklabels = ["Flagged in Neither", f"Flagged only in {args.label_b}",
f"Flagged only in {args.label_a}", "Flagged in Both"]
# Configure the colorbar so that labels are at the center of each section.
cbar = fig.colorbar(cax)
cbar_ticks = np.arange(1, 9, 2)
cbar.set_ticks(cbar_ticks)
cbar.set_ticklabels(cbar_ticklabels)
fig.savefig(f"{args.outdir}/{args.obsid}_{args.label_a}_{args.label_b}_SSINS_flag_comparison.pdf")
| 40.028986 | 116 | 0.691528 |
2318d1d4d5c12c158e5e3e7bc5439598c585e4c4 | 173 | py | Python | configs/visdrone/resnet50_b32x8_imagenet.py | w-sugar/mmclassification | 2ebc873948144df18feca5c5b11df16a55316723 | [
"Apache-2.0"
] | null | null | null | configs/visdrone/resnet50_b32x8_imagenet.py | w-sugar/mmclassification | 2ebc873948144df18feca5c5b11df16a55316723 | [
"Apache-2.0"
] | null | null | null | configs/visdrone/resnet50_b32x8_imagenet.py | w-sugar/mmclassification | 2ebc873948144df18feca5c5b11df16a55316723 | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/models/resnet50_visdrone.py', '../_base_/datasets/visdrone_bs32.py',
'../_base_/schedules/visdrone_bs256.py', '../_base_/default_runtime.py'
]
| 34.6 | 83 | 0.693642 |
f5278f5ecadbea799c57b6264870f6221229dc36 | 1,783 | py | Python | DroneV2.py | Discordmodsbers/Drone.py | d55933c11894960e7db65bb30d6d545feb2287db | [
"Apache-2.0"
] | null | null | null | DroneV2.py | Discordmodsbers/Drone.py | d55933c11894960e7db65bb30d6d545feb2287db | [
"Apache-2.0"
] | null | null | null | DroneV2.py | Discordmodsbers/Drone.py | d55933c11894960e7db65bb30d6d545feb2287db | [
"Apache-2.0"
] | null | null | null | import random
import socket
import struct
import sys
import os
print("Place Device Under Drone-")
import time
time.sleep(3)
print("-Injecting Maliware")
print("Closing injector file")
print("DONE!")
time.sleep(10)
os.system ('clear')
print("paid key")
k = input("")
if k == '1980':
os.system ('clear')
print("thank you")
else:
os.system('clear')
sys.exit()
time.sleep(10)
print("listing ip drones in area")
for x in range (1, 21):
print(y)
print("drones in area")
time.sleep(1)
os.system ('clear')
print("57.18.59.122 1")
import time
time.sleep(1)
print("104.233.202.42")
import time
time.sleep(1)
print("108.120.226.108")
import time
time.sleep(1)
print("118.242.98.157")
import time
time.sleep(2)
print("120.244.135.190")
v = input("your drone ip")
if v == '1':
import time
time.sleep(1)
os.system ('clear')
print("Fly:")
print("Crash")
print("Infect other drones")
print("RemotCTRL")
print("list random things")
print("Permantly Inject")
print("Next Page 'P'")
x = input("Enter number : ")
if x == '1':
os.system ('clear')
input("How High you want it")
print("loading up")
elif x == '2':
os.system ('clear')
print("GET REKT")
elif x == '3':
os.system ('clear')
print("Takes a while-")
print("-Very useful to get near drone")
import time
time.sleep(5)
print("Done")
print("the infected drone only mirrors your drone")
elif x == '4':
os.system ('clear')
print("1: Follow")
elif x == '5':
os.system ('clear')
print("Injecting Backdoor")
time.sleep(3)
print("LzRat.v7")
time.sleep(3)
print("Installing LolCat")
time.sleep(3)
print("restart script please")
elif x == 'p' or 'P':
os.system ('clear')
print("WELCOME TO THE MOD MENU PAGE")
print("Listing Mod Menus")
time.sleep(3)
print("Apex Legends")
print("Roblox")
h = input("")
| 19.380435 | 52 | 0.661806 |
86e388f83c92f985dd8990f5da608c5e67ea944c | 48,965 | py | Python | src/dashd_intf.py | muteio/ghostnode-tool | c42868ed6c009c47482d23ebac0d101adbd8c103 | [
"MIT"
] | 1 | 2019-11-02T01:39:52.000Z | 2019-11-02T01:39:52.000Z | src/dashd_intf.py | muteio/ghostnode-tool | c42868ed6c009c47482d23ebac0d101adbd8c103 | [
"MIT"
] | null | null | null | src/dashd_intf.py | muteio/ghostnode-tool | c42868ed6c009c47482d23ebac0d101adbd8c103 | [
"MIT"
] | 1 | 2021-03-05T13:34:53.000Z | 2021-03-05T13:34:53.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Bertrand256
# Created on: 2017-03
import bitcoin
import os
import re
import socket
import sqlite3
import ssl
import threading
import time
import datetime
import logging
import simplejson
from PyQt5.QtCore import QThread
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from paramiko import AuthenticationException, PasswordRequiredException, SSHException
from paramiko.ssh_exception import NoValidConnectionsError
import app_cache
import app_utils
from app_config import AppConfig
from random import randint
from wnd_utils import WndUtils
import socketserver
import select
from PyQt5.QtWidgets import QMessageBox
from psw_cache import SshPassCache, UserCancelledConnection
from common import AttrsProtected
try:
import http.client as httplib
except ImportError:
import httplib
# how many seconds cached masternodes data are valid; cached masternode data is used only for non-critical
# features
MASTERNODES_CACHE_VALID_SECONDS = 60 * 60 # 60 minutes
class ForwardServer (socketserver.ThreadingTCPServer):
daemon_threads = True
allow_reuse_address = True
class Handler(socketserver.BaseRequestHandler):
def handle(self):
try:
logging.debug('Handler, starting ssh_transport.open_channel')
chan = self.ssh_transport.open_channel(kind='direct-tcpip',
dest_addr=(self.chain_host, self.chain_port),
src_addr=self.request.getpeername())
logging.debug('Handler, started ssh_transport.open_channel')
except Exception as e:
logging.error('open_channel error: ' + str(e))
if self.broken_conn_callback is not None:
self.broken_conn_callback()
return
if chan is None:
return
try:
while True:
r, w, x = select.select([self.request, chan], [], [], 10)
if self.request in r:
data = self.request.recv(1024)
if len(data) == 0:
break
chan.send(data)
logging.debug(f'SSH tunnel - sent {len(data)} bytes')
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
self.request.send(data)
logging.debug(f'SSH tunnel - received {len(data)} bytes')
logging.debug('Finishing Handler.handle')
except socket.error as e:
logging.error('Handler socker.error occurred: ' + str(e))
except Exception as e:
logging.error('Handler exception occurred: ' + str(e))
finally:
chan.close()
self.request.close()
class SSHTunnelThread(QThread):
def __init__(self, local_port, remote_ip, remote_port, transport, ready_event,
on_connection_broken_callback=None, on_finish_thread_callback=None):
QThread.__init__(self)
self.local_port = local_port
self.remote_ip = remote_ip
self.remote_port = remote_port
self.transport = transport
self.ready_event = ready_event
self.forward_server = None
self.on_connection_broken_callback = on_connection_broken_callback
self.on_finish_thread_callback = on_finish_thread_callback
self.setObjectName('SSHTunnelThread')
def __del__(self):
pass
def stop(self):
if self.forward_server:
self.forward_server.shutdown()
def handler_broken_connection_callback(self):
try:
self.stop()
if self.on_connection_broken_callback is not None:
self.on_connection_broken_callback()
except:
logging.exception('Exception while shutting down forward server.')
def run(self):
class SubHander(Handler):
chain_host = self.remote_ip
chain_port = self.remote_port
ssh_transport = self.transport
broken_conn_callback = self.handler_broken_connection_callback
try:
self.ready_event.set()
logging.debug('Started SSHTunnelThread, local port forwarding 127.0.0.1:%s -> %s:%s' %
(str(self.local_port), self.remote_ip, str(self.remote_port)))
self.forward_server = ForwardServer(('127.0.0.1', self.local_port), SubHander)
self.forward_server.serve_forever()
logging.debug('Stopped local port forwarding 127.0.0.1:%s -> %s:%s' %
(str(self.local_port), self.remote_ip, str(self.remote_port)))
if self.on_finish_thread_callback:
self.on_finish_thread_callback()
except Exception as e:
logging.exception('SSH tunnel exception occurred')
class UnknownError(Exception):
pass
class DashdConnectionError(Exception):
def __init__(self, org_exception):
Exception.__init__(org_exception)
self.org_exception = org_exception
class DashdSSH(object):
def __init__(self, host, port, username, on_connection_broken_callback=None):
self.host = host
self.port = port
self.username = username
self.ssh = None
self.channel = None
self.fw_channel = None
self.connected = False
self.connection_broken = False
self.ssh_thread = None
self.on_connection_broken_callback = on_connection_broken_callback
def __del__(self):
self.disconnect()
def remote_command(self, cmd):
channel = None
try:
channel = self.ssh.get_transport().open_session()
channel.exec_command(cmd)
ret_code = channel.recv_exit_status()
if ret_code == 0:
for idx in range(1, 20):
if channel.recv_ready():
break
time.sleep(0.1)
if not channel.recv_ready():
raise Exception('Data not ready')
data = channel.recv(500)
return data.decode().split('\n')
else:
for idx in range(1, 20):
if channel.recv_stderr_ready():
break
time.sleep(0.1)
if channel.recv_stderr_ready():
data = channel.recv_stderr(500)
error = data.decode()
raise Exception(error)
else:
raise UnknownError('Unknown error executing remote command: ' + cmd)
finally:
if channel:
channel.close()
def connect(self):
import paramiko
if self.ssh is None:
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
password = None
pass_message = None
while True:
try:
self.ssh.connect(self.host, port=int(self.port), username=self.username, password=password)
self.connected = True
if password:
SshPassCache.save_password(self.username, self.host, password)
break
except PasswordRequiredException as e:
# private key with password protection is used; ask user for password
pass_message = "Enter passphrase for <b>private key</b> or password for %s" % \
(self.username + '@' + self.host)
while True:
password = SshPassCache.get_password(self.username, self.host, message=pass_message)
if password:
break
except AuthenticationException as e:
# This exception will be raised in the following cases:
# 1. a private key with password protectection is used but the user enters incorrect password
# 2. a private key exists but user's public key is not added to the server's allowed keys
# 3. normal login to server is performed but the user enters bad password
# So, in the first case, the second query for password will ask for normal password to server, not
# for a private key.
if password is not None:
WndUtils.errorMsg(message='Incorrect password, try again...')
while True:
password = SshPassCache.get_password(self.username, self.host, message=pass_message)
if password:
break
except SSHException as e:
if e.args and e.args[0] == 'No authentication methods available':
while True:
password = SshPassCache.get_password(self.username, self.host)
if password:
break
else:
raise
except Exception as e:
raise
def on_tunnel_thread_finish(self):
self.ssh_thread = None
def open_tunnel(self, local_port, remote_ip, remote_port):
if self.connected:
if self.ssh_thread is not None:
raise Exception('SSH tunnel already open.')
ready_event = threading.Event()
self.ssh_thread = SSHTunnelThread(local_port, remote_ip, remote_port, self.ssh.get_transport(), ready_event,
on_connection_broken_callback=self.on_connection_broken_callback,
on_finish_thread_callback=self.on_tunnel_thread_finish)
self.ssh_thread.start()
ready_event.wait(10)
# wait a moment for the tunnel to come-up
time.sleep(0.1)
logging.debug('Started local port forwarding 127.0.0.1:%s -> %s:%s' %
(str(local_port), remote_ip, str(remote_port)))
else:
raise Exception('SSH not connected')
def find_dashd_config(self):
"""
Try to read configuration of remote dash daemon. In particular we need parameters concering rpc
configuration.
:return: tuple (dashd_running, dashd_config_found, dashd config file contents as dict)
or error string in error occured
"""
dashd_running = False
dashd_config_found = False
if not self.ssh:
raise Exception('SSH session not ready')
try:
# find dashd process id if running
try:
pids = self.remote_command('ps -C "nixd" -o pid')
except UnknownError:
raise Exception('is nixd running on the remote machine?')
pid = None
if isinstance(pids, list):
pids = [pid.strip() for pid in pids]
if len(pids) >= 2 and pids[0] == 'PID' and re.match('\d+', pids[1]):
pid = pids[1]
elif len(pids) >= 1 and re.match('\d+', pids[0]):
pid = pids[1]
config = {}
if pid:
dashd_running = True
# using dashd pid find its executable path and then .nix directory and finally nix.conf file
executables = self.remote_command('ls -l /proc/' + str(pid) + '/exe')
if executables and len(executables) >= 1:
elems = executables[0].split('->')
if len(elems) == 2:
executable = elems[1].strip()
dashd_dir = os.path.dirname(executable)
dash_conf_file = dashd_dir + '/.nix/nix.conf'
conf_lines = []
try:
conf_lines = self.remote_command('cat ' + dash_conf_file)
except Exception as e:
# probably error no such file or directory
# try to read dashd's cwd + cmdline
cwd_lines = self.remote_command('ls -l /proc/' + str(pid) + '/cwd')
if cwd_lines:
elems = cwd_lines[0].split('->')
if len(elems) >= 2:
cwd = elems[1]
dash_conf_file = cwd + '/.nix/nix.conf'
try:
conf_lines = self.remote_command('cat ' + dash_conf_file)
except Exception as e:
# second method did not suceed, so assume, that conf file is located
# i /home/<username>/.nix directory
dash_conf_file = '/' + self.username + '/.nix/nix.conf'
if self.username != 'root':
dash_conf_file = '/home' + dash_conf_file
conf_lines = self.remote_command('cat ' + dash_conf_file)
for line in conf_lines:
elems = [e.strip() for e in line.split('=')]
if len(elems) == 2:
config[elems[0]] = elems[1]
dashd_config_found = True
return dashd_running, dashd_config_found, config
except Exception as e:
return str(e)
def disconnect(self):
if self.ssh:
if self.ssh_thread:
self.ssh_thread.stop()
self.ssh.close()
del self.ssh
self.ssh = None
self.connected = False
class DashdIndexException(JSONRPCException):
"""
Exception for notifying, that dash daemon should have indexing option tuned on
"""
def __init__(self, parent_exception):
JSONRPCException.__init__(self, parent_exception.error)
self.message = self.message + \
'\n\nMake sure the nixd daemon you are connecting to has the following options enabled in ' \
'its nix.conf:\n\n' + \
'addressindex=1\n' + \
'spentindex=1\n' + \
'timestampindex=1\n' + \
'txindex=1\n\n' + \
'Changing these parameters requires to execute nixd with "-reindex" option (linux: ./nixd -reindex)'
def control_rpc_call(func):
"""
Decorator function for catching HTTPConnection timeout and then resetting the connection.
:param func: DashdInterface's method decorated
"""
def catch_timeout_wrapper(*args, **kwargs):
ret = None
last_exception = None
self = args[0]
self.mark_call_begin()
try:
logging.debug('Trying to acquire http_lock')
self.http_lock.acquire()
logging.debug('Acquired http_lock')
for try_nr in range(1, 5):
try:
try:
logging.debug('Beginning call of "' + str(func) + '"')
begin_time = time.time()
ret = func(*args, **kwargs)
logging.debug('Finished call of "' + str(func) + '". Call time: ' +
str(time.time() - begin_time) + 's.')
last_exception = None
self.mark_cur_conn_cfg_is_ok()
break
except (ConnectionResetError, ConnectionAbortedError, httplib.CannotSendRequest,
BrokenPipeError) as e:
logging.error('Error while calling of "' + str(func) + ' (1)". Details: ' + str(e))
last_exception = e
self.reset_connection()
except JSONRPCException as e:
logging.error('Error while calling of "' + str(func) + ' (2)". Details: ' + str(e))
if e.code == -5 and e.message == 'No information available for address':
raise DashdIndexException(e)
elif e.error.get('message','').find('403 Forbidden') >= 0 or \
e.error.get('message', '').find('502 Bad Gateway') >= 0:
self.http_conn.close()
raise DashdConnectionError(e)
elif e.code in (-32603,):
# for these error codes don't retry the request with another rpc connetion
# -32603: failure to verify vote
raise
else:
last_exception = e
self.http_conn.close()
except (socket.gaierror, ConnectionRefusedError, TimeoutError, socket.timeout,
NoValidConnectionsError) as e:
# exceptions raised most likely by not functioning dashd node; try to switch to another node
# if there is any in the config
logging.error('Error while calling of "' + str(func) + ' (3)". Details: ' + str(e))
raise DashdConnectionError(e)
except DashdConnectionError as e:
# try another net config if possible
logging.error('Error while calling of "' + str(func) + '" (4). Details: ' + str(e))
if not self.switch_to_next_config():
self.last_error_message = str(e.org_exception)
raise e.org_exception # couldn't use another conn config, raise last exception
else:
try_nr -= 1 # another config retries do not count
last_exception = e.org_exception
except Exception:
raise
finally:
self.http_lock.release()
logging.debug('Released http_lock')
if last_exception:
raise last_exception
return ret
return catch_timeout_wrapper
class Masternode(AttrsProtected):
def __init__(self):
AttrsProtected.__init__(self)
self.ident = None
self.status = None
self.protocol = None
self.payee = None
self.lastseen = None
self.activeseconds = None
self.lastpaidtime = None
self.lastpaidblock = None
self.ip = None
self.db_id = None
self.marker = None
self.modified = False
self.monitor_changes = False
self.queue_position = None
self.set_attr_protection()
def __setattr__(self, name, value):
if hasattr(self, name) and name not in ('modified', 'marker', 'monitor_changes', '_AttrsProtected__allow_attr_definition'):
if self.monitor_changes and getattr(self, name) != value:
self.modified = True
super().__setattr__(name, value)
def json_cache_wrapper(func, intf, cache_file_ident):
"""
Wrapper for saving/restoring rpc-call results inside cache files.
"""
def json_call_wrapper(*args, **kwargs):
cache_file = intf.config.cache_dir + '/insight_dash_' + cache_file_ident + '.json'
try: # looking into cache first
j = simplejson.load(open(cache_file))
logging.debug('Loaded data from existing cache file: ' + cache_file)
return j
except:
pass
# if not found in cache, call the original function
j = func(*args, **kwargs)
try:
simplejson.dump(j, open(cache_file, 'w'))
except Exception as e:
logging.exception('Cannot save data to a cache file')
pass
return j
return json_call_wrapper
class DashdInterface(WndUtils):
def __init__(self, window,
on_connection_initiated_callback=None,
on_connection_failed_callback=None,
on_connection_successful_callback=None,
on_connection_disconnected_callback=None):
WndUtils.__init__(self, app_config=None)
self.config = None
self.db_intf = None
self.connections = []
self.cur_conn_index = 0
self.cur_conn_def = None
# below is the connection with which particular RPC call has started; if connection is switched because of
# problems with some nodes, switching stops if we close round and return to the starting connection
self.starting_conn = None
self.masternodes = [] # cached list of all masternodes (Masternode object)
self.masternodes_by_ident = {}
self.payment_queue = []
self.ssh = None
self.window = window
self.active = False
self.rpc_url = None
self.proxy = None
self.http_conn = None # HTTPConnection object passed to the AuthServiceProxy (for convinient connection reset)
self.on_connection_initiated_callback = on_connection_initiated_callback
self.on_connection_failed_callback = on_connection_failed_callback
self.on_connection_successful_callback = on_connection_successful_callback
self.on_connection_disconnected_callback = on_connection_disconnected_callback
self.last_error_message = None
self.http_lock = threading.Lock()
def initialize(self, config: AppConfig, connection=None, for_testing_connections_only=False):
self.config = config
self.app_config = config
self.db_intf = self.config.db_intf
# conn configurations are used from the first item in the list; if one fails, then next is taken
if connection:
# this parameter is used for testing specific connection
self.connections = [connection]
else:
# get connection list orderd by priority of use
self.connections = self.config.get_ordered_conn_list()
self.cur_conn_index = 0
if self.connections:
self.cur_conn_def = self.connections[self.cur_conn_index]
else:
self.cur_conn_def = None
if not for_testing_connections_only:
self.load_data_from_db_cache()
def load_data_from_db_cache(self):
self.masternodes.clear()
self.masternodes_by_ident.clear()
cur = self.db_intf.get_cursor()
cur2 = self.db_intf.get_cursor()
db_modified = False
try:
tm_start = time.time()
db_correction_duration = 0.0
logging.debug("Reading masternodes' data from DB")
cur.execute("SELECT id, ident, status, protocol, payee, last_seen, active_seconds,"
" last_paid_time, last_paid_block, IP from MASTERNODES where dmt_active=1")
for row in cur.fetchall():
db_id = row[0]
ident = row[1]
# correct duplicated masternodes issue
mn_first = self.masternodes_by_ident.get(ident)
if mn_first is not None:
continue
# delete duplicated (caused by breaking the app while loading)
tm_start_1 = time.time()
cur2.execute('DELETE from MASTERNODES where ident=? and id<>?', (ident, db_id))
if cur2.rowcount > 0:
db_modified = True
db_correction_duration += (time.time() - tm_start_1)
mn = Masternode()
mn.db_id = db_id
mn.ident = ident
mn.status = row[2]
mn.protocol = row[3]
mn.payee = row[4]
mn.lastseen = row[5]
mn.activeseconds = row[6]
mn.lastpaidtime = row[7]
mn.lastpaidblock = row[8]
mn.ip = row[9]
self.masternodes.append(mn)
self.masternodes_by_ident[mn.ident] = mn
tm_diff = time.time() - tm_start
logging.info('DB read time of %d MASTERNODES: %s s, db fix time: %s' %
(len(self.masternodes), str(tm_diff), str(db_correction_duration)))
self.update_mn_queue_values()
except Exception as e:
logging.exception('SQLite initialization error')
finally:
if db_modified:
self.db_intf.commit()
self.db_intf.release_cursor()
self.db_intf.release_cursor()
def reload_configuration(self):
"""Called after modification of connections' configuration or changes having impact on the file name
associated to database cache."""
# get connection list orderd by priority of use
self.disconnect()
self.connections = self.config.get_ordered_conn_list()
self.cur_conn_index = 0
if len(self.connections):
self.cur_conn_def = self.connections[self.cur_conn_index]
self.load_data_from_db_cache()
else:
self.cur_conn_def = None
def disconnect(self):
if self.active:
logging.debug('Disconnecting')
if self.ssh:
self.ssh.disconnect()
del self.ssh
self.ssh = None
self.active = False
if self.on_connection_disconnected_callback:
self.on_connection_disconnected_callback()
def mark_call_begin(self):
self.starting_conn = self.cur_conn_def
def switch_to_next_config(self):
"""
If there is another dashd config not used recently, switch to it. Called only when there was a problem
with current connection config.
:return: True if successfully switched or False if there was no another config
"""
if self.cur_conn_def:
self.config.conn_cfg_failure(self.cur_conn_def) # mark connection as defective
if self.cur_conn_index < len(self.connections)-1:
idx = self.cur_conn_index + 1
else:
idx = 0
conn = self.connections[idx]
if conn != self.starting_conn and conn != self.cur_conn_def:
logging.debug("Trying to switch to another connection: %s" % conn.get_description())
self.disconnect()
self.cur_conn_index = idx
self.cur_conn_def = conn
if not self.open():
return self.switch_to_next_config()
else:
return True
else:
logging.warning('Failed to connect: no another connection configurations.')
return False
def mark_cur_conn_cfg_is_ok(self):
if self.cur_conn_def:
self.config.conn_cfg_success(self.cur_conn_def)
def open(self):
"""
Opens connection to dash RPC. If it fails, then the next enabled conn config will be used, if any exists.
:return: True if successfully connected, False if user cancelled the operation. If all of the attempts
fail, then appropriate exception will be raised.
"""
try:
if not self.cur_conn_def:
raise Exception('There is no connections to NIX network enabled in the configuration.')
while True:
try:
if self.open_internal():
break
else:
if not self.switch_to_next_config():
return False
except UserCancelledConnection:
return False
except (socket.gaierror, ConnectionRefusedError, TimeoutError, socket.timeout,
NoValidConnectionsError) as e:
# exceptions raised by not likely functioning dashd node; try to switch to another node
# if there is any in the config
if not self.switch_to_next_config():
raise e # couldn't use another conn config, raise exception
else:
break
except Exception as e:
self.last_error_message = str(e)
raise
return True
def reset_connection(self):
"""
Called when communication errors are detected while sending RPC commands. Here we are closing the SSH-tunnel
(if used) and HTTP connection object to prepare for another try.
:return:
"""
if self.active:
if self.http_conn:
self.http_conn.close()
if self.ssh:
self.ssh.disconnect()
self.active = False
def open_internal(self):
"""
Try to establish connection to dash RPC daemon for current connection config.
:return: True, if connection successfully establishes, False if user Cancels the operation (not always
cancelling will be possible - only when user is prompted for a password).
"""
if not self.active:
logging.debug("Trying to open connection: %s" % self.cur_conn_def.get_description())
try:
# make the owner know, we are connecting
if self.on_connection_initiated_callback:
self.on_connection_initiated_callback()
except:
pass
if self.cur_conn_def.use_ssh_tunnel:
# RPC over SSH
if self.ssh is None:
self.ssh = DashdSSH(self.cur_conn_def.ssh_conn_cfg.host, self.cur_conn_def.ssh_conn_cfg.port,
self.cur_conn_def.ssh_conn_cfg.username)
try:
logging.debug('starting ssh.connect')
self.ssh.connect()
logging.debug('finished ssh.connect')
except Exception as e:
logging.error('error in ssh.connect')
try:
# make the owner know, connection attempt failed
if self.on_connection_failed_callback:
self.on_connection_failed_callback()
except:
logging.exception('on_connection_try_fail_callback call exception')
raise
# configure SSH tunnel
# get random local unprivileged port number to establish SSH tunnel
success = False
local_port = None
for try_nr in range(1, 10):
try:
logging.debug(f'beginning ssh.open_tunnel, try: {try_nr}')
local_port = randint(2000, 50000)
self.ssh.open_tunnel(local_port,
self.cur_conn_def.host,
int(self.cur_conn_def.port))
success = True
break
except Exception as e:
logging.exception('error in ssh.open_tunnel loop: ' + str(e))
logging.debug('finished ssh.open_tunnel loop')
if not success:
logging.error('finished ssh.open_tunnel loop with error')
return False
else:
rpc_user = self.cur_conn_def.username
rpc_password = self.cur_conn_def.password
rpc_host = '127.0.0.1' # SSH tunnel on loopback
rpc_port = local_port
else:
# direct RPC
rpc_host = self.cur_conn_def.host
rpc_port = self.cur_conn_def.port
rpc_user = self.cur_conn_def.username
rpc_password = self.cur_conn_def.password
if self.cur_conn_def.use_ssl:
self.rpc_url = 'https://'
self.http_conn = httplib.HTTPSConnection(rpc_host, rpc_port, timeout=5, context=ssl._create_unverified_context())
else:
self.rpc_url = 'http://'
self.http_conn = httplib.HTTPConnection(rpc_host, rpc_port, timeout=5)
self.rpc_url += rpc_user + ':' + rpc_password + '@' + rpc_host + ':' + str(rpc_port)
logging.debug('AuthServiceProxy configured to: %s' % self.rpc_url)
self.proxy = AuthServiceProxy(self.rpc_url, timeout=1000, connection=self.http_conn)
try:
# check the connection
self.http_conn.connect()
logging.debug('Successfully connected AuthServiceProxy')
try:
# make the owner know, we successfully finished connection
if self.on_connection_successful_callback:
self.on_connection_successful_callback()
except:
logging.exception('on_connection_finished_callback call exception')
except:
logging.exception('Connection failed')
try:
# make the owner know, connection attempt failed
if self.on_connection_failed_callback:
self.on_connection_failed_callback()
if self.ssh:
# if there is a ssh connection established earlier, disconnect it because apparently it isn't
# functioning
self.ssh.disconnect()
except:
logging.exception('on_connection_try_fail_callback call exception')
raise
finally:
logging.debug('http_conn.close()')
self.http_conn.close()
# timeout hase been initially set to 5 seconds to perform 'quick' connection test
self.http_conn.timeout = 20
self.active = True
return self.active
def get_active_conn_description(self):
if self.cur_conn_def:
return self.cur_conn_def.get_description()
else:
return '???'
@control_rpc_call
def getblockcount(self):
if self.open():
return self.proxy.getblockcount()
else:
raise Exception('Not connected')
@control_rpc_call
def getinfo(self, verify_node: bool = True):
if self.open():
info = self.proxy.getnetworkinfo()
if verify_node:
node_under_testnet = info.get('testnet')
if self.config.is_testnet() and not node_under_testnet:
raise Exception('This RPC node works under NIX MAINNET, but your current configuration is '
'for TESTNET.')
elif self.config.is_mainnet() and node_under_testnet:
raise Exception('This RPC node works under NIX TESTNET, but your current configuration is '
'for MAINNET.')
return info
else:
raise Exception('Not connected')
@control_rpc_call
def issynchronized(self):
if self.open():
# if connecting to HTTP(S) proxy do not check if dash daemon is synchronized
if self.cur_conn_def.is_http_proxy():
return True
else:
syn = self.proxy.ghostsync('status')
return syn.get('IsSynced')
else:
raise Exception('Not connected')
@control_rpc_call
def ghostsync(self):
if self.open():
# if connecting to HTTP(S) proxy do not call this function - it will not be exposed
if self.cur_conn_def.is_http_proxy():
return {}
else:
return self.proxy.ghostsync('status')
else:
raise Exception('Not connected')
@control_rpc_call
def ghostnodebroadcast(self, what, hexto):
if self.open():
# if what == 'relay':
if False:
# FIXME: relay does not report correct status without 3rd parameter due to bug in nixd
return self.proxy.ghostnodebroadcast(what, hexto, "not-safe")
else:
return self.proxy.ghostnodebroadcast(what, hexto)
else:
raise Exception('Not connected')
def update_mn_queue_values(self):
"""
Updates masternode payment queue order values.
"""
start_tm = time.time()
self.payment_queue = []
d = datetime.datetime.utcnow()
now = int(time.mktime((d.year, d.month, d.day, d.hour, d.minute, d.second, 0, 0, 0)))
for mn in self.masternodes:
if mn.status == 'ENABLED':
# estimate payment queue position: after loading all masternodes
# queue_position will be used to sort mn list and count the real queue position
if mn.lastpaidtime == 0:
mn.queue_position = mn.activeseconds
else:
lastpaid_ago = now - mn.lastpaidtime
mn.queue_position = min(lastpaid_ago, mn.activeseconds)
self.payment_queue.append(mn)
else:
mn.queue_position = None
duration1 = time.time() - start_tm
self.payment_queue.sort(key=lambda x: x.queue_position, reverse=True)
duration2 = time.time() - start_tm
for mn in self.masternodes:
if mn.status == 'ENABLED':
mn.queue_position = self.payment_queue.index(mn)
else:
mn.queue_position = None
duration3 = time.time() - start_tm
logging.info('Masternode queue build time1: %s, time2: %s, time3: %s' %
(str(duration1), str(duration2), str(duration3)))
@control_rpc_call
def get_ghostnodelist(self, *args, data_max_age=MASTERNODES_CACHE_VALID_SECONDS):
"""
Returns masternode list, read from the Dash network or from the internal cache.
:param args: arguments passed to the 'ghostnodelist' RPC call
:param data_max_age: maximum age (in seconds) of the cached masternode data to used; if the
cache is older than 'data_max_age', then an RPC call is performed to load newer masternode data;
value of 0 forces reading of the new data from the network
:return: list of Masternode objects, matching the 'args' arguments
"""
def parse_mns(mns_raw):
"""
Parses dictionary of strings returned from the RPC to Masternode object list.
:param mns_raw: Dict of masternodes in format of RPC ghostnodelist command
:return: list of Masternode object
"""
tm_begin = time.time()
ret_list = []
for mn_id in mns_raw.keys():
mn_raw = mns_raw.get(mn_id)
mn_raw = mn_raw.strip()
elems = mn_raw.split()
if len(elems) >= 8:
mn = Masternode()
# (status, protocol, payee, lastseen, activeseconds, lastpaidtime, pastpaidblock, ip)
mn.status, mn.protocol, mn.payee, mn.lastseen, mn.activeseconds, mn.lastpaidtime, \
mn.lastpaidblock, mn.ip = elems
mn.lastseen = int(mn.lastseen)
mn.activeseconds = int(mn.activeseconds)
mn.lastpaidtime = int(mn.lastpaidtime)
mn.lastpaidblock = int(mn.lastpaidblock)
mn.ident = mn_id
ret_list.append(mn)
duration = time.time() - tm_begin
logging.info('Parse ghostnodelist time: ' + str(duration))
return ret_list
def update_masternode_data(existing_mn, new_data, cursor):
# update cached masternode's properties
existing_mn.modified = False
existing_mn.monitor_changes = True
existing_mn.ident = new_data.ident
existing_mn.status = new_data.status
existing_mn.protocol = new_data.protocol
existing_mn.payee = new_data.payee
existing_mn.lastseen = new_data.lastseen
existing_mn.activeseconds = new_data.activeseconds
existing_mn.lastpaidtime = new_data.lastpaidtime
existing_mn.lastpaidblock = new_data.lastpaidblock
existing_mn.ip = new_data.ip
# ... and finally update MN db record
if cursor and existing_mn.modified:
cursor.execute("UPDATE MASTERNODES set ident=?, status=?, protocol=?, payee=?,"
" last_seen=?, active_seconds=?, last_paid_time=?, "
" last_paid_block=?, ip=?"
"WHERE id=?",
(new_data.ident, new_data.status, new_data.protocol, new_data.payee,
new_data.lastseen, new_data.activeseconds, new_data.lastpaidtime,
new_data.lastpaidblock, new_data.ip, existing_mn.db_id))
if self.open():
if len(args) == 1 and args[0] == 'full':
last_read_time = app_cache.get_value(f'MasternodesLastReadTime_{self.app_config.dash_network}', 0, int)
logging.info("MasternodesLastReadTime: %d" % last_read_time)
if self.masternodes and data_max_age > 0 and \
int(time.time()) - last_read_time < data_max_age:
logging.info('Using cached ghostnodelist (data age: %s)' % str(int(time.time()) - last_read_time))
return self.masternodes
else:
logging.info('Loading masternode list from NIX daemon...')
mns = self.proxy.ghostnodelist(*args)
mns = parse_mns(mns)
logging.info('Finished loading masternode list')
# mark already cached masternodes to identify those to delete
for mn in self.masternodes:
mn.marker = False
# save masternodes to the db cache
db_modified = False
cur = None
try:
if self.db_intf.db_active:
cur = self.db_intf.get_cursor()
for mn in mns:
# check if newly-read masternode already exists in the cache
existing_mn = self.masternodes_by_ident.get(mn.ident)
if not existing_mn:
mn.marker = True
self.masternodes.append(mn)
self.masternodes_by_ident[mn.ident] = mn
if self.db_intf.db_active:
cur.execute("INSERT INTO MASTERNODES(ident, status, protocol, payee, last_seen,"
" active_seconds, last_paid_time, last_paid_block, ip, dmt_active,"
" dmt_create_time) "
"VALUES (?,?,?,?,?,?,?,?,?,?,?)",
(mn.ident, mn.status, mn.protocol, mn.payee, mn.lastseen,
mn.activeseconds, mn.lastpaidtime, mn.lastpaidblock, mn.ip, 1,
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
mn.db_id = cur.lastrowid
db_modified = True
else:
existing_mn.marker = True
update_masternode_data(existing_mn, mn, cur)
db_modified = True
# remove from the cache masternodes that no longer exist
for mn_index in reversed(range(len(self.masternodes))):
mn = self.masternodes[mn_index]
if not mn.marker:
if self.db_intf.db_active:
cur.execute("UPDATE MASTERNODES set dmt_active=0, dmt_deactivation_time=?"
"WHERE ID=?",
(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
mn.db_id))
db_modified = True
self.masternodes_by_ident.pop(mn.ident,0)
del self.masternodes[mn_index]
app_cache.set_value(f'MasternodesLastReadTime_{self.app_config.dash_network}', int(time.time()))
self.update_mn_queue_values()
finally:
if db_modified:
self.db_intf.commit()
if cur is not None:
self.db_intf.release_cursor()
return self.masternodes
else:
mns = self.proxy.ghostnodelist(*args)
mns = parse_mns(mns)
return mns
else:
raise Exception('Not connected')
@control_rpc_call
def getaddressbalance(self, addresses):
if self.open():
return self.proxy.getaddressbalance({'addresses': addresses})
else:
raise Exception('Not connected')
@control_rpc_call
def getaddressutxos(self, addresses):
if self.open():
return self.proxy.getaddressutxos({'addresses': addresses})
else:
raise Exception('Not connected')
@control_rpc_call
def getrawtransaction(self, txid, verbose):
if self.open():
return json_cache_wrapper(self.proxy.getrawtransaction, self, 'tx-' + str(verbose) + '-' + txid)(txid, verbose)
else:
raise Exception('Not connected')
@control_rpc_call
def getblockhash(self, blockid):
if self.open():
return json_cache_wrapper(self.proxy.getblockhash, self, 'blockhash-' + str(blockid))(blockid)
else:
raise Exception('Not connected')
@control_rpc_call
def getblockheader(self, blockhash):
if self.open():
return json_cache_wrapper(self.proxy.getblockheader, self, 'blockheader-' + str(blockhash))(blockhash)
else:
raise Exception('Not connected')
@control_rpc_call
def validateaddress(self, address):
if self.open():
return self.proxy.validateaddress(address)
else:
raise Exception('Not connected')
@control_rpc_call
def decoderawtransaction(self, rawtx):
if self.open():
return self.proxy.decoderawtransaction(rawtx)
else:
raise Exception('Not connected')
@control_rpc_call
def sendrawtransaction(self, tx, use_instant_send):
if self.open():
return self.proxy.sendrawtransaction(tx, False)
else:
raise Exception('Not connected')
@control_rpc_call
def getcurrentvotes(self, hash):
if self.open():
return self.proxy.getcurrentvotes(hash)
else:
raise Exception('Not connected')
@control_rpc_call
def gobject(self, *args):
if self.open():
return self.proxy.gobject(*args)
else:
raise Exception('Not connected')
@control_rpc_call
def masternode(self, *args):
if self.open():
return self.proxy.masternode(*args)
else:
raise Exception('Not connected')
@control_rpc_call
def getgovernanceinfo(self):
if self.open():
return self.proxy.getgovernanceinfo()
else:
raise Exception('Not connected')
@control_rpc_call
def getsuperblockbudget(self, block_index):
if self.open():
return self.proxy.getsuperblockbudget(block_index)
else:
raise Exception('Not connected')
@control_rpc_call
def voteraw(self, masternode_tx_hash, masternode_tx_index, governance_hash, vote_signal, vote, sig_time, vote_sig):
if self.open():
return self.proxy.voteraw(masternode_tx_hash, masternode_tx_index, governance_hash, vote_signal, vote,
sig_time, vote_sig)
else:
raise Exception('Not connected')
| 42.284111 | 131 | 0.549637 |
85086400e571cf100b31cb67df8b0fa08a63b920 | 503 | py | Python | test_stream/udp_opencv_server.py | alphaciel/Balancing-Robot-Raspberry-Pi-DIY | 8a61acf688ea0915017c40eaff3841a9b219f9b7 | [
"MIT"
] | null | null | null | test_stream/udp_opencv_server.py | alphaciel/Balancing-Robot-Raspberry-Pi-DIY | 8a61acf688ea0915017c40eaff3841a9b219f9b7 | [
"MIT"
] | null | null | null | test_stream/udp_opencv_server.py | alphaciel/Balancing-Robot-Raspberry-Pi-DIY | 8a61acf688ea0915017c40eaff3841a9b219f9b7 | [
"MIT"
] | null | null | null | #Server prog
import socket
import numpy
import time
import cv2
UDP_IP="127.0.0.1"
UDP_PORT = 999
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((UDP_IP, UDP_PORT))
s=""
while True:
data, addr = sock.recvfrom(46080)
s+= data
if len(s) == (46080*20):
frame = numpy.fromstring (s, dtype=numpy.uint8)
frame = frame.reshape(480,640,3)
cv2.imshow("frame",frame)
s=""
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| 19.346154 | 57 | 0.59841 |
836bfec3cfb4efb4ef4c247055649bdd1c4e1afa | 9,384 | py | Python | ispyb/sp/core.py | rjgildea/ispyb-api | 24e76c79ef06c7cc8edc35066aa1021dee20d67a | [
"Apache-2.0"
] | null | null | null | ispyb/sp/core.py | rjgildea/ispyb-api | 24e76c79ef06c7cc8edc35066aa1021dee20d67a | [
"Apache-2.0"
] | null | null | null | ispyb/sp/core.py | rjgildea/ispyb-api | 24e76c79ef06c7cc8edc35066aa1021dee20d67a | [
"Apache-2.0"
] | null | null | null | # core.py
#
# Copyright (C) 2016 Diamond Light Source, Karl Levik
#
# 2016-11-30
#
# Methods to store and retrieve data in the core tables
#
import copy
import ispyb.interface.core
from ispyb.strictordereddict import StrictOrderedDict
class Core(ispyb.interface.core.IF):
"""Core provides methods to store and retrieve data in the core tables."""
def __init__(self):
pass
_proposal_params = StrictOrderedDict(
[
("id", None),
("person_id", None),
("title", None),
("proposal_code", None),
("proposal_number", None),
("proposal_type", None),
("external_pk_uuid", None),
]
)
_session_for_proposal_code_number_params = StrictOrderedDict(
[
("id", None),
("proposal_code", None),
("proposal_number", None),
("visit_number", None),
("beamline_setup_id", None),
("start_date", None),
("end_date", None),
("beamline_name", None),
("title", None),
("beamline_operator", None),
("nb_shifts", None),
("scheduled", None),
("used_flag", None),
("comments", None),
("external_pk_id", None),
("external_pk_uuid", None),
]
)
_person_params = StrictOrderedDict(
[
("id", None),
("laboratory_id", None),
("family_name", None),
("given_name", None),
("title", None),
("email_address", None),
("phone_number", None),
("login", None),
("external_pk_id", None),
("external_pk_uuid", None),
]
)
_proposal_has_person_params = StrictOrderedDict(
[("id", None), ("proposal_id", None), ("person_id", None), ("role", None)]
)
_session_has_person_params = StrictOrderedDict(
[("session_id", None), ("person_id", None), ("role", None), ("remote", None)]
)
_sample_params = StrictOrderedDict(
[
("id", None),
("authLogin", None),
("crystalid", None),
("containerid", None),
("name", None),
("code", None),
("location", None),
("holder_length", None),
("loop_length", None),
("loop_type", None),
("wire_width", None),
("comments", None),
("status", None),
("is_in_sc", None),
]
)
@classmethod
def get_proposal_params(cls):
return copy.deepcopy(cls._proposal_params)
@classmethod
def get_session_for_proposal_code_number_params(cls):
return copy.deepcopy(cls._session_for_proposal_code_number_params)
@classmethod
def get_person_params(cls):
return copy.deepcopy(cls._person_params)
@classmethod
def get_proposal_has_person_params(cls):
return copy.deepcopy(cls._proposal_has_person_params)
@classmethod
def get_session_has_person_params(cls):
return copy.deepcopy(cls._session_has_person_params)
@classmethod
def get_sample_params(cls):
return copy.deepcopy(cls._sample_params)
def upsert_proposal(self, values):
"""Insert or update a proposal"""
return self.get_connection().call_sp_write(
procname="upsert_proposal", args=values
)
def upsert_session_for_proposal_code_number(self, values):
"""Insert or update a session for a certain proposal with given proposal code and number."""
return self.get_connection().call_sp_write(
procname="upsert_session_for_proposal_code_number", args=values
)
def upsert_person(self, values):
"""Insert or update a person"""
return self.get_connection().call_sp_write(
procname="upsert_person", args=values
)
def upsert_session_has_person(self, values):
"""Insert or update a session-person association"""
return self.get_connection().call_sp_write(
procname="upsert_session_has_person", args=values
)
def upsert_proposal_has_person(self, values):
"""Insert or update a proposal-person association"""
return self.get_connection().call_sp_write(
procname="upsert_proposal_has_person", args=values
)
def upsert_sample(self, values):
"""Insert or update sample."""
return self.get_connection().call_sp_write(
procname="upsert_sample", args=values
)
def retrieve_samples_not_loaded_for_container_reg_barcode(self, barcode):
"""Retrieve the not-yet loaded samples in the most recent container that corresponds with the given container registry barcode"""
return self.get_connection().call_sp_retrieve(
procname="retrieve_samples_not_loaded_for_container_reg_barcode",
args=(barcode,),
)
def retrieve_visit_id(self, visit):
"""Get the database ID for a visit on the form mx1234-5."""
return self.get_connection().call_sf_retrieve(
funcname="retrieve_visit_id", args=(visit,)
)
def retrieve_datacollection_id(self, img_filename, img_fileloc):
"""Get the database ID for the data collection corresponding to the given diffraction image file."""
return self.get_connection().call_sf_retrieve(
funcname="retrieve_datacollection_id", args=(img_filename, img_fileloc)
)
def retrieve_current_sessions(self, beamline, tolerance_mins=0):
"""Get a result-set with the currently active sessions on the given beamline."""
return self.get_connection().call_sp_retrieve(
procname="retrieve_current_sessions", args=(beamline, tolerance_mins)
)
def retrieve_sessions_for_beamline_and_run(self, beamline, run):
"""Get a result-set with the sessions associated with the given beamline/instrument and run."""
return self.get_connection().call_sp_retrieve(
procname="retrieve_sessions_for_beamline_and_run", args=(beamline, run)
)
def retrieve_sessions_for_person_login(self, login):
"""Get a result-set with the sessions associated with the given unique person login."""
return self.get_connection().call_sp_retrieve(
procname="retrieve_sessions_for_person_login", args=(login,)
)
def retrieve_current_sessions_for_person(self, beamline, fed_id, tolerance_mins=0):
"""Get a result-set with the currently active sessions on the given beamline."""
return self.get_connection().call_sp_retrieve(
procname="retrieve_current_sessions_for_person",
args=(beamline, fed_id, tolerance_mins),
)
def retrieve_most_recent_session(self, beamline, proposal_code):
"""Get a result-set with the most recent session on the given beamline for the given proposal code """
return self.get_connection().call_sp_retrieve(
procname="retrieve_most_recent_session", args=(beamline, proposal_code)
)
def retrieve_expired_sessions_for_instrument_and_period(
self, instrument, start_date, end_date
):
"""Returns a multi-row result-set with the sessions that ended within the window defined by start_ate and end_date on instrument given by p_instrument (can contain database wildcards)"""
return self.get_connection().call_sp_retrieve(
procname="retrieve_expired_sessions_for_instrument_and_period",
args=(instrument, start_date, end_date),
)
def retrieve_persons_for_proposal(self, proposal_code, proposal_number):
"""Get a result-set with the persons associated with a given proposal specified by proposal code, proposal_number"""
return self.get_connection().call_sp_retrieve(
procname="retrieve_persons_for_proposal",
args=(proposal_code, proposal_number),
)
def retrieve_persons_for_session(
self, proposal_code, proposal_number, visit_number
):
"""Get a result-set with the persons associated with a given session specified by proposal code, proposal_number, visit_number"""
return self.get_connection().call_sp_retrieve(
procname="retrieve_persons_for_session",
args=(proposal_code, proposal_number, visit_number),
)
def retrieve_current_cm_sessions(self, beamline):
"""Get a result-set with the currently active commissioning (cm) sessions on the given beamline."""
return self.get_connection().call_sp_retrieve(
procname="retrieve_current_cm_sessions", args=(beamline,)
)
def retrieve_active_plates(self, beamline):
"""Get a result-set with the submitted plates not yet in local storage on a given beamline"""
return self.get_connection().call_sp_retrieve(
procname="retrieve_containers_submitted_non_ls", args=(beamline,)
)
def retrieve_proposal_title(self, proposal_code, proposal_number, auth_login=None):
"""Get the title of a given proposal"""
return self.get_connection().call_sp_retrieve(
procname="retrieve_proposal_title",
args=(proposal_code, proposal_number, auth_login),
)
| 37.536 | 194 | 0.641624 |
2d1a8d3504ff08205aaf11ac135173dac86e01fd | 29,760 | py | Python | pnacl/driver/pnacl-driver.py | yantrabuddhi/nativeclient | 3462f030ad4a8fc19e446a95ac312e70db8a0444 | [
"BSD-3-Clause"
] | null | null | null | pnacl/driver/pnacl-driver.py | yantrabuddhi/nativeclient | 3462f030ad4a8fc19e446a95ac312e70db8a0444 | [
"BSD-3-Clause"
] | null | null | null | pnacl/driver/pnacl-driver.py | yantrabuddhi/nativeclient | 3462f030ad4a8fc19e446a95ac312e70db8a0444 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import subprocess
from driver_tools import AddHostBinarySearchPath, DefaultOutputName, \
DefaultPCHOutputName, DriverChain, GetArch, ParseArgs, ParseTriple, \
Run, RunDriver, RunWithEnv, TempNameGen, UnrecognizedOption
from driver_env import env
from driver_log import DriverOpen, Log
import filetype
import pathtools
EXTRA_ENV = {
'ALLOW_TRANSLATE': '0', # Allow bitcode translation before linking.
# It doesn't normally make sense to do this.
'ALLOW_NATIVE' : '0', # Allow native objects (.S,.s,.o) to be in the
# linker line for .pexe generation.
# It doesn't normally make sense to do this.
# CXX_EH_MODE specifies how to deal with C++ exception handling:
# * 'none': Strips out use of C++ exception handling.
# * 'sjlj': Enables the setjmp()+longjmp()-based implementation of
# C++ exception handling.
'CXX_EH_MODE': 'none',
'FORCE_INTERMEDIATE_LL': '0',
# Produce an intermediate .ll file
# Useful for debugging.
# NOTE: potentially different code paths and bugs
# might be triggered by this
'LANGUAGE' : '', # C or CXX (set by SetTool)
'INCLUDE_CXX_HEADERS': '0', # This is set by RunCC.
# Command-line options
'GCC_MODE' : '', # '' (default), '-E', '-c', or '-S'
'STDINC' : '1', # Include standard headers (-nostdinc sets to 0)
'STDINCCXX' : '1', # Include standard cxx headers (-nostdinc++ sets to 0)
'USE_STDLIB' : '1', # Include standard libraries (-nostdlib sets to 0)
'STDLIB' : 'libc++', # C++ Standard Library.
'DEFAULTLIBS' : '1', # Link with default libraries
'DIAGNOSTIC' : '0', # Diagnostic flag detected
'PIC' : '0', # Generate PIC
# TODO(robertm): Switch the default to 1
'NO_ASM' : '0', # Disallow use of inline assembler
'NEED_DASH_E' : '0', # Used for stdin inputs, which must have an explicit
# type set (using -x) unless -E is specified.
'VERBOSE' : '0', # Verbose (-v)
'SHOW_VERSION': '0', # Version (--version)
'PTHREAD' : '0', # use pthreads?
'INPUTS' : '', # Input files
'OUTPUT' : '', # Output file
'UNMATCHED' : '', # Unrecognized parameters
'BIAS_NONE' : '',
'BIAS_ARM' : '-D__arm__ -D__ARM_ARCH_7A__ -D__ARMEL__',
'BIAS_MIPS32' : '-D__mips__',
'BIAS_X8632' : '-D__i386__ -D__i386 -D__i686 -D__i686__ -D__pentium4__',
'BIAS_X8664' : '-D__amd64__ -D__amd64 -D__x86_64__ -D__x86_64 -D__core2__',
'BIAS_ARM_NONSFI': '${BIAS_ARM} -D__native_client_nonsfi__',
'BIAS_X8632_NONSFI': '${BIAS_X8632} -D__native_client_nonsfi__',
'FRONTEND_TRIPLE' : 'le32-unknown-nacl',
'OPT_LEVEL' : '', # Default for most tools is 0, but we need to know
# if it's explicitly set or not when the driver
# is only used for linking + translating.
'CC_FLAGS' : '-O${#OPT_LEVEL ? ${OPT_LEVEL} : 0} ' +
'-fno-vectorize -fno-slp-vectorize ' +
'-fno-common ${PTHREAD ? -pthread} ' +
'-nostdinc ${BIAS_%BIAS%} ' +
# BUG: http://code.google.com/p/nativeclient/issues/detail?id=2345
# it would be better to detect asm use inside clang
# as some uses of asm are borderline legit, e.g.
# <prototype> asm("<function-name>");
'${NO_ASM ? -Dasm=ASM_FORBIDDEN -D__asm__=ASM_FORBIDDEN} ' +
'-target ${FRONTEND_TRIPLE}',
'ISYSTEM' : '${ISYSTEM_USER} ${STDINC ? ${ISYSTEM_BUILTIN}}',
'ISYSTEM_USER' : '', # System include directories specified by
# using the -isystem flag.
'ISYSTEM_BUILTIN':
'${BASE_USR}/usr/include ' +
'${ISYSTEM_CLANG} ' +
'${ISYSTEM_CXX} ' +
'${BASE_USR}/include ' +
'${BASE_SDK}/include ',
'ISYSTEM_CLANG' : '${BASE_LLVM}/lib/clang/${CLANG_VER}/include',
'ISYSTEM_CXX' :
'${INCLUDE_CXX_HEADERS && STDINCCXX ? ${ISYSTEM_CXX_include_paths}}',
'ISYSTEM_CXX_include_paths' : '${BASE_USR}/include/c++/v1',
# Only propagate opt level to linker if explicitly set, so that the
# linker will know if an opt level was explicitly set or not.
'LD_FLAGS' : '${#OPT_LEVEL ? -O${OPT_LEVEL}} -static ' +
'${PIC ? -fPIC} ${@AddPrefix:-L:SEARCH_DIRS} ' +
'--pnacl-exceptions=${CXX_EH_MODE}',
'SEARCH_DIRS' : '', # Directories specified using -L
# Library Strings
'EMITMODE' : '${!USE_STDLIB ? nostdlib : static}',
# This is setup so that LD_ARGS_xxx is evaluated lazily.
'LD_ARGS' : '${LD_ARGS_%EMITMODE%}',
# ${ld_inputs} signifies where to place the objects and libraries
# provided on the command-line.
'LD_ARGS_nostdlib': '-nostdlib ${ld_inputs}',
'LD_ARGS_static':
'-l:crt1.x -l:crti.bc -l:crtbegin.bc '
'${CXX_EH_MODE==sjlj ? -l:sjlj_eh_redirect.bc : '
'${CXX_EH_MODE==none ? -l:unwind_stubs.bc}} ' +
'${ld_inputs} ' +
'--start-group ${STDLIBS} --end-group',
'LLVM_PASSES_TO_DISABLE': '',
# Flags for translating to native .o files.
'TRANSLATE_FLAGS' : '-O${#OPT_LEVEL ? ${OPT_LEVEL} : 0}',
'STDLIBS' : '${DEFAULTLIBS ? '
'${LIBSTDCPP} ${LIBPTHREAD} ${LIBNACL} ${LIBC} '
'${LIBGCC_BC} ${LIBPNACLMM}}',
'LIBSTDCPP' : '${IS_CXX ? -lc++ -lm -lpthread }',
# The few functions in the bitcode version of compiler-rt unfortunately
# depend on libm. TODO(jvoung): try rewriting the compiler-rt functions
# to be standalone.
'LIBGCC_BC' : '-lgcc -lm',
'LIBC' : '-lc',
'LIBNACL' : '-lnacl',
'LIBPNACLMM': '-lpnaclmm',
# Enabled/disabled by -pthreads
'LIBPTHREAD': '${PTHREAD ? -lpthread}',
# IS_CXX is set by pnacl-clang and pnacl-clang++ programmatically
'CC' : '${IS_CXX ? ${CLANGXX} : ${CLANG}}',
'RUN_CC': '${CC} ${emit_llvm_flag} ${mode} ${CC_FLAGS} ' +
'${@AddPrefix:-isystem :ISYSTEM} ' +
'-x${typespec} ${infile} -o ${output}',
}
def AddLLVMPassDisableFlag(*args):
env.append('LLVM_PASSES_TO_DISABLE', *args)
env.append('LD_FLAGS', *args)
def AddLDFlag(*args):
env.append('LD_FLAGS', *args)
def AddTranslatorFlag(*args):
# pass translator args to ld in case we go all the way to .nexe
env.append('LD_FLAGS', *['-Wt,' + a for a in args])
# pass translator args to translator in case we go to .o
env.append('TRANSLATE_FLAGS', *args)
def AddCCFlag(*args):
env.append('CC_FLAGS', *args)
def AddDiagnosticFlag(*args):
env.append('CC_FLAGS', *args)
env.set('DIAGNOSTIC', '1')
def SetTarget(*args):
arch = ParseTriple(args[0])
env.set('FRONTEND_TRIPLE', args[0])
AddLDFlag('--target=' + args[0])
def SetStdLib(*args):
"""Set the C++ Standard Library."""
lib = args[0]
if lib != 'libc++':
Log.Fatal('Only libc++ is supported as standard library')
def IsPortable():
return env.getone('FRONTEND_TRIPLE').startswith('le32-')
stdin_count = 0
def AddInputFileStdin():
global stdin_count
# When stdin is an input, -x or -E must be given.
forced_type = filetype.GetForcedFileType()
if not forced_type:
# Only allowed if -E is specified.
forced_type = 'c'
env.set('NEED_DASH_E', '1')
stdin_name = '__stdin%d__' % stdin_count
env.append('INPUTS', stdin_name)
filetype.ForceFileType(stdin_name, forced_type)
stdin_count += 1
def IsStdinInput(f):
return f.startswith('__stdin') and f.endswith('__')
def HandleDashX(arg):
if arg == 'none':
filetype.SetForcedFileType(None)
return
filetype.SetForcedFileType(filetype.GCCTypeToFileType(arg))
def AddVersionFlag(*args):
env.set('SHOW_VERSION', '1')
AddDiagnosticFlag(*args)
def AddBPrefix(prefix):
""" Add a path to the list searched for host binaries and include dirs. """
AddHostBinarySearchPath(prefix)
prefix = pathtools.normalize(prefix)
if pathtools.isdir(prefix) and not prefix.endswith('/'):
prefix += '/'
# Add prefix/ to the library search dir if it exists
if pathtools.isdir(prefix):
env.append('SEARCH_DIRS', prefix)
# Add prefix/include to isystem if it exists
include_dir = prefix + 'include'
if pathtools.isdir(include_dir):
env.append('ISYSTEM_USER', include_dir)
CustomPatterns = [
( '--driver=(.+)', "env.set('CC', pathtools.normalize($0))\n"),
( '--pnacl-allow-native', "env.set('ALLOW_NATIVE', '1')"),
( '--pnacl-allow-translate', "env.set('ALLOW_TRANSLATE', '1')"),
( '--pnacl-frontend-triple=(.+)', SetTarget),
( ('-target','(.+)'), SetTarget),
( ('--target=(.+)'), SetTarget),
( '--pnacl-exceptions=(none|sjlj)', "env.set('CXX_EH_MODE', $0)"),
( '(--pnacl-allow-nexe-build-id)', AddLDFlag),
( '(--pnacl-disable-abi-check)', AddLDFlag),
( '(--pnacl-disable-pass=.+)', AddLLVMPassDisableFlag),
]
GCCPatterns = [
( '-o(.+)', "env.set('OUTPUT', pathtools.normalize($0))"),
( ('-o', '(.+)'), "env.set('OUTPUT', pathtools.normalize($0))"),
( '-E', "env.set('GCC_MODE', '-E')"),
( '-S', "env.set('GCC_MODE', '-S')"),
( '-c', "env.set('GCC_MODE', '-c')"),
( '-allow-asm', "env.set('NO_ASM', '0')"),
( '-nostdinc', "env.set('STDINC', '0')"),
( '-nostdinc\+\+', "env.set('STDINCCXX', '0')"),
( '-nostdlib', "env.set('USE_STDLIB', '0')"),
( '-nodefaultlibs', "env.set('DEFAULTLIBS', '0')"),
( '-?-stdlib=(.*)', SetStdLib),
( ('-?-stdlib', '(.*)'), SetStdLib),
# Flags to pass to native linker
( '(-Wn,.*)', AddLDFlag),
( '-rdynamic', "env.append('LD_FLAGS', '-export-dynamic')"),
# Flags to pass to pnacl-translate
( '-Wt,(.*)', AddTranslatorFlag),
( ('-Xtranslator','(.*)'), AddTranslatorFlag),
# We don't care about -fPIC, but pnacl-ld and pnacl-translate do.
( '-fPIC', "env.set('PIC', '1')"),
# We must include -l, -Xlinker, and -Wl options into the INPUTS
# in the order they appeared. This is the exactly behavior of gcc.
# For example: gcc foo.c -Wl,--start-group -lx -ly -Wl,--end-group
#
( '(-l.+)', "env.append('INPUTS', $0)"),
( ('(-l)','(.+)'), "env.append('INPUTS', $0+$1)"),
( ('-Xlinker','(.*)'), "env.append('INPUTS', '-Xlinker=' + $0)"),
( '(-Wl,.*)', "env.append('INPUTS', $0)"),
( '(-Bstatic)', "env.append('INPUTS', $0)"),
( '(-Bdynamic)', "env.append('INPUTS', $0)"),
( '-O([sz])', "env.set('OPT_LEVEL', $0)\n"),
( '-O([0-3])', "env.set('OPT_LEVEL', $0)\n"),
( '-O([0-9]+)', "env.set('OPT_LEVEL', '3')\n"),
( '-O', "env.set('OPT_LEVEL', '1')\n"),
( ('-isystem', '(.*)'),
"env.append('ISYSTEM_USER', pathtools.normalize($0))"),
( '-isystem(.+)',
"env.append('ISYSTEM_USER', pathtools.normalize($0))"),
( ('-I', '(.+)'), "env.append('CC_FLAGS', '-I'+pathtools.normalize($0))"),
( '-I(.+)', "env.append('CC_FLAGS', '-I'+pathtools.normalize($0))"),
# -I is passed through, so we allow -isysroot and pass it through as well.
# However -L is intercepted and interpreted, so it would take more work
# to handle -sysroot w/ libraries.
( ('-isysroot', '(.+)'),
"env.append('CC_FLAGS', '-isysroot ' + pathtools.normalize($0))"),
( '-isysroot(.+)',
"env.append('CC_FLAGS', '-isysroot ' + pathtools.normalize($0))"),
# NOTE: the -iquote =DIR syntax (substitute = with sysroot) doesn't work.
# Clang just says: ignoring nonexistent directory "=DIR"
( ('-iquote', '(.+)'),
"env.append('CC_FLAGS', '-iquote', pathtools.normalize($0))"),
( ('-iquote(.+)'),
"env.append('CC_FLAGS', '-iquote', pathtools.normalize($0))"),
( ('-idirafter', '(.+)'),
"env.append('CC_FLAGS', '-idirafter'+pathtools.normalize($0))"),
( '-idirafter(.+)',
"env.append('CC_FLAGS', '-idirafter'+pathtools.normalize($0))"),
( ('(-include)','(.+)'), AddCCFlag),
( ('(-include.+)'), AddCCFlag),
( '(--relocatable-pch)', AddCCFlag),
( '(-g)', AddCCFlag),
( '(-W.*)', AddCCFlag),
( '(-w)', AddCCFlag),
( '(-std=.*)', AddCCFlag),
( '(-ansi)', AddCCFlag),
( ('(-D)','(.*)'), AddCCFlag),
( '(-D.+)', AddCCFlag),
( ('(-U)','(.*)'), AddCCFlag),
( '(-U.+)', AddCCFlag),
( '(-f.*)', AddCCFlag),
( '(-pedantic)', AddCCFlag),
( '(-pedantic-errors)', AddCCFlag),
( '(-g.*)', AddCCFlag),
( '(-v|--v)', "env.append('CC_FLAGS', $0)\n"
"env.set('VERBOSE', '1')"),
( '(-pthreads?)', "env.set('PTHREAD', '1')"),
# No-op: accepted for compatibility in case build scripts pass it.
( '-static', ""),
( ('-B','(.*)'), AddBPrefix),
( ('-B(.+)'), AddBPrefix),
( ('-L','(.+)'), "env.append('SEARCH_DIRS', pathtools.normalize($0))"),
( '-L(.+)', "env.append('SEARCH_DIRS', pathtools.normalize($0))"),
( '(-Wp,.*)', AddCCFlag),
( '(-Xpreprocessor .*)', AddCCFlag),
( ('(-Xclang)', '(.*)'), AddCCFlag),
# Accept and ignore default flags
( '-m32', ""),
( '-emit-llvm', ""),
( '(-MG)', AddCCFlag),
( '(-MMD)', AddCCFlag),
( '(-MM?)', "env.append('CC_FLAGS', $0)\n"
"env.set('GCC_MODE', '-E')"),
( '(-MP)', AddCCFlag),
( ('(-MQ)','(.*)'), AddCCFlag),
( '(-MD)', AddCCFlag),
( ('(-MT)','(.*)'), AddCCFlag),
( ('(-MF)','(.*)'), "env.append('CC_FLAGS', $0, pathtools.normalize($1))"),
( ('-x', '(.+)'), HandleDashX),
( '-x(.+)', HandleDashX),
( ('(-mllvm)', '(.+)'), AddCCFlag),
# Ignore these gcc flags
( '(-msse)', ""),
( '(-march=armv7-a)', ""),
( '(-pipe)', ""),
( '(-s)', AddLDFlag),
( '(--strip-all)', AddLDFlag),
( '(--strip-debug)', AddLDFlag),
# Ignore these assembler flags
( '(-Qy)', ""),
( ('(--traditional-format)', '.*'), ""),
( '(-gstabs)', ""),
( '(--gstabs)', ""),
( '(-gdwarf2)', ""),
( '(--gdwarf2)', ""),
( '(--fatal-warnings)', ""),
( '(-meabi=.*)', ""),
( '(-mfpu=.*)', ""),
( '(-mfloat-abi=.+)', AddCCFlag),
# GCC diagnostic mode triggers
( '(-print-.*)', AddDiagnosticFlag),
( '(--print.*)', AddDiagnosticFlag),
( '(-dumpspecs)', AddDiagnosticFlag),
( '(--version)', AddVersionFlag),
# These are preprocessor flags which should be passed to the frontend, but
# should not prevent the usual -i flags (which DIAGNOSTIC mode does)
( '(-d[DIMNU])', AddCCFlag),
( '(-d.*)', AddDiagnosticFlag),
# Catch all other command-line arguments
( '(-.+)', "env.append('UNMATCHED', $0)"),
# Standard input
( '-', AddInputFileStdin),
# Input Files
# Call ForceFileType for all input files at the time they are
# parsed on the command-line. This ensures that the gcc "-x"
# setting is correctly applied.
( '(.*)', "env.append('INPUTS', pathtools.normalize($0))\n"
"filetype.ForceFileType(pathtools.normalize($0))"),
]
def CheckSetup():
if not env.has('IS_CXX'):
Log.Fatal('"pnacl-driver" cannot be used directly. '
'Use pnacl-clang or pnacl-clang++.')
def DriverOutputTypes(driver_flag, compiling_to_native):
output_type_map = {
('-E', False) : 'pp',
('-E', True) : 'pp',
('-c', False) : 'po',
('-c', True) : 'o',
('-S', False) : 'll',
('-S', True) : 's',
('', False) : 'pexe',
('', True) : 'nexe',
}
return output_type_map[(driver_flag, compiling_to_native)]
def ReadDriverRevision():
rev_file = env.getone('DRIVER_REV_FILE')
nacl_ver = DriverOpen(rev_file, 'rb').readlines()[0]
m = re.search(r'\[GIT\].*/native_client(?:\.git)?:\s*([0-9a-f]{40})',
nacl_ver)
if m:
return m.group(1)
# fail-fast: if the REV file exists but regex search failed,
# we need to fix the regex to get nacl-version.
if not m:
Log.Fatal('Failed to parse REV file to get nacl-version.')
def main(argv):
env.update(EXTRA_ENV)
CheckSetup()
ParseArgs(argv, CustomPatterns + GCCPatterns)
# "configure", especially when run as part of a toolchain bootstrap
# process, will invoke gcc with various diagnostic options and
# parse the output. In these cases we do not alter the incoming
# commandline. It is also important to not emit spurious messages.
if env.getbool('DIAGNOSTIC'):
if env.getbool('SHOW_VERSION'):
code, stdout, stderr = Run(env.get('CC') + env.get('CC_FLAGS'),
redirect_stdout=subprocess.PIPE)
out = stdout.split('\n')
nacl_version = ReadDriverRevision()
out[0] += ' nacl-version=%s' % nacl_version
stdout = '\n'.join(out)
print stdout,
else:
Run(env.get('CC') + env.get('CC_FLAGS'))
return 0
unmatched = env.get('UNMATCHED')
if len(unmatched) > 0:
UnrecognizedOption(*unmatched)
# If -arch was given, we are compiling directly to native code
compiling_to_native = GetArch() is not None
if env.getbool('ALLOW_NATIVE'):
if not compiling_to_native:
Log.Fatal("--pnacl-allow-native without -arch is not meaningful.")
# For native/mixed links, also bring in the native libgcc and
# libcrt_platform to avoid link failure if pre-translated native
# code needs functions from it.
env.append('LD_FLAGS', env.eval('-L${LIBS_NATIVE_ARCH}'))
env.append('STDLIBS', '-lgcc')
env.append('STDLIBS', '-lcrt_platform')
flags_and_inputs = env.get('INPUTS')
output = env.getone('OUTPUT')
if len(flags_and_inputs) == 0:
if env.getbool('VERBOSE'):
# -v can be invoked without any inputs. Runs the original
# command without modifying the commandline for this case.
Run(env.get('CC') + env.get('CC_FLAGS'))
return 0
else:
Log.Fatal('No input files')
gcc_mode = env.getone('GCC_MODE')
output_type = DriverOutputTypes(gcc_mode, compiling_to_native)
# INPUTS consists of actual input files and a subset of flags like -Wl,<foo>.
# Create a version with just the files.
inputs = [f for f in flags_and_inputs if not IsFlag(f)]
header_inputs = [f for f in inputs
if filetype.IsHeaderType(filetype.FileType(f))]
# Handle PCH case specially (but only for a limited sense...)
if header_inputs and gcc_mode != '-E':
# We only handle doing pre-compiled headers for all inputs or not at
# all at the moment. This is because DriverOutputTypes only assumes
# one type of output, depending on the "gcc_mode" flag. When mixing
# header inputs w/ non-header inputs, some of the outputs will be
# pch while others will be output_type. We would also need to modify
# the input->output chaining for the needs_linking case.
if len(header_inputs) != len(inputs):
Log.Fatal('mixed compiling of headers and source not supported')
CompileHeaders(header_inputs, output)
return 0
needs_linking = (gcc_mode == '')
if env.getbool('NEED_DASH_E') and gcc_mode != '-E':
Log.Fatal("-E or -x required when input is from stdin")
# There are multiple input files and no linking is being done.
# There will be multiple outputs. Handle this case separately.
if not needs_linking:
if output != '' and len(inputs) > 1:
Log.Fatal('Cannot have -o with -c, -S, or -E and multiple inputs: %s',
repr(inputs))
for f in inputs:
intype = filetype.FileType(f)
if not (filetype.IsSourceType(intype) or filetype.IsHeaderType(intype)):
if ((output_type == 'pp' and intype != 'S') or
(output_type == 'll') or
(output_type == 'po' and intype != 'll') or
(output_type == 's' and intype not in ('ll','po','S')) or
(output_type == 'o' and intype not in ('ll','po','S','s'))):
Log.Fatal("%s: Unexpected type of file for '%s'",
pathtools.touser(f), gcc_mode)
if output == '':
f_output = DefaultOutputName(f, output_type)
else:
f_output = output
namegen = TempNameGen([f], f_output)
CompileOne(f, output_type, namegen, f_output)
return 0
# Linking case
assert(needs_linking)
assert(output_type in ('pso','so','pexe','nexe'))
if output == '':
output = pathtools.normalize('a.out')
namegen = TempNameGen(flags_and_inputs, output)
# Compile all source files (c/c++/ll) to .po
for i in xrange(0, len(flags_and_inputs)):
if IsFlag(flags_and_inputs[i]):
continue
intype = filetype.FileType(flags_and_inputs[i])
if filetype.IsSourceType(intype) or intype == 'll':
flags_and_inputs[i] = CompileOne(flags_and_inputs[i], 'po', namegen)
# Compile all .s/.S to .o
if env.getbool('ALLOW_NATIVE'):
for i in xrange(0, len(flags_and_inputs)):
if IsFlag(flags_and_inputs[i]):
continue
intype = filetype.FileType(flags_and_inputs[i])
if intype in ('s','S'):
flags_and_inputs[i] = CompileOne(flags_and_inputs[i], 'o', namegen)
# We should only be left with .po and .o and libraries
for f in flags_and_inputs:
if IsFlag(f):
continue
intype = filetype.FileType(f)
if intype in ('o','s','S') or filetype.IsNativeArchive(f):
if not env.getbool('ALLOW_NATIVE'):
Log.Fatal('%s: Native object files not allowed in link. '
'Use --pnacl-allow-native to override.', pathtools.touser(f))
assert(intype in ('po','o','so','ldscript') or filetype.IsArchive(f))
# Fix the user-specified linker arguments
ld_inputs = []
for f in flags_and_inputs:
if f.startswith('-Xlinker='):
ld_inputs.append(f[len('-Xlinker='):])
elif f.startswith('-Wl,'):
ld_inputs += f[len('-Wl,'):].split(',')
else:
ld_inputs.append(f)
if env.getbool('ALLOW_NATIVE'):
ld_inputs.append('--pnacl-allow-native')
# Invoke the linker
env.set('ld_inputs', *ld_inputs)
ld_args = env.get('LD_ARGS')
ld_flags = env.get('LD_FLAGS')
RunDriver('pnacl-ld', ld_flags + ld_args + ['-o', output])
return 0
def IsFlag(f):
return f.startswith('-')
def CompileHeaders(header_inputs, output):
if output != '' and len(header_inputs) > 1:
Log.Fatal('Cannot have -o <out> and compile multiple header files: %s',
repr(header_inputs))
for f in header_inputs:
f_output = output if output else DefaultPCHOutputName(f)
RunCC(f, f_output, mode='', emit_llvm_flag='')
def CompileOne(infile, output_type, namegen, output = None):
if output is None:
output = namegen.TempNameForInput(infile, output_type)
chain = DriverChain(infile, output, namegen)
SetupChain(chain, filetype.FileType(infile), output_type)
chain.run()
return output
def RunCC(infile, output, mode, emit_llvm_flag='-emit-llvm'):
intype = filetype.FileType(infile)
typespec = filetype.FileTypeToGCCType(intype)
include_cxx_headers = ((env.get('LANGUAGE') == 'CXX') or
(intype in ('c++', 'c++-header')))
env.setbool('INCLUDE_CXX_HEADERS', include_cxx_headers)
if IsStdinInput(infile):
infile = '-'
RunWithEnv("${RUN_CC}", infile=infile, output=output,
emit_llvm_flag=emit_llvm_flag, mode=mode,
typespec=typespec)
def RunLLVMAS(infile, output):
if IsStdinInput(infile):
infile = '-'
# This is a bitcode only step - so get rid of "-arch xxx" which
# might be inherited from the current invocation
RunDriver('pnacl-as', [infile, '-o', output],
suppress_inherited_arch_args=True)
def RunNativeAS(infile, output):
if IsStdinInput(infile):
infile = '-'
RunDriver('pnacl-as', [infile, '-o', output])
def RunTranslate(infile, output, mode):
if not env.getbool('ALLOW_TRANSLATE'):
Log.Fatal('%s: Trying to convert bitcode to an object file before '
'bitcode linking. This is supposed to wait until '
'translation. Use --pnacl-allow-translate to override.',
pathtools.touser(infile))
args = env.get('TRANSLATE_FLAGS') + [mode, '--allow-llvm-bitcode-input',
infile, '-o', output]
if env.getbool('PIC'):
args += ['-fPIC']
RunDriver('pnacl-translate', args)
def RunOpt(infile, outfile, pass_list):
filtered_list = [pass_option for pass_option in pass_list
if pass_option not in env.get('LLVM_PASSES_TO_DISABLE')]
RunDriver('pnacl-opt', filtered_list + [infile, '-o', outfile])
def SetupChain(chain, input_type, output_type):
assert(output_type in ('pp','ll','po','s','o'))
cur_type = input_type
# source file -> pp
if filetype.IsSourceType(cur_type) and output_type == 'pp':
chain.add(RunCC, 'cpp', mode='-E')
cur_type = 'pp'
if cur_type == output_type:
return
# header file -> pre-process
if filetype.IsHeaderType(cur_type) and output_type == 'pp':
chain.add(RunCC, 'cpp', mode='-E')
cur_type = 'pp'
if cur_type == output_type:
return
# source file -> ll
if (filetype.IsSourceType(cur_type) and
(env.getbool('FORCE_INTERMEDIATE_LL') or output_type == 'll')):
chain.add(RunCC, 'll', mode='-S')
cur_type = 'll'
if cur_type == output_type:
return
# ll -> po
if cur_type == 'll':
chain.add(RunLLVMAS, 'po')
cur_type = 'po'
if cur_type == output_type:
return
# source file -> po (we also force native output to go through this phase
if filetype.IsSourceType(cur_type) and output_type in ('po', 'o', 's'):
chain.add(RunCC, 'po', mode='-c')
cur_type = 'po'
if cur_type == output_type:
return
# po -> o
if (cur_type == 'po' and output_type == 'o'):
# If we aren't using biased bitcode, then at least -expand-byval
# must be run to work with the PPAPI shim calling convention.
if IsPortable():
chain.add(RunOpt, 'expand.po', pass_list=['-expand-byval'])
chain.add(RunTranslate, 'o', mode='-c')
cur_type = 'o'
if cur_type == output_type:
return
# po -> s
if cur_type == 'po':
# If we aren't using biased bitcode, then at least -expand-byval
# must be run to work with the PPAPI shim calling convention.
if IsPortable():
chain.add(RunOpt, 'expand.po', pass_list=['-expand-byval'])
chain.add(RunTranslate, 's', mode='-S')
cur_type = 's'
if cur_type == output_type:
return
# S -> s
if cur_type == 'S':
chain.add(RunCC, 's', mode='-E')
cur_type = 's'
if output_type == 'pp':
return
if cur_type == output_type:
return
# s -> o
if cur_type == 's' and output_type == 'o':
chain.add(RunNativeAS, 'o')
cur_type = 'o'
if cur_type == output_type:
return
Log.Fatal("Unable to compile .%s to .%s", input_type, output_type)
def get_help(argv):
tool = env.getone('SCRIPT_NAME')
if '--help-full' in argv:
# To get ${CC}, etc.
env.update(EXTRA_ENV)
code, stdout, stderr = Run('"${CC}" -help',
redirect_stdout=subprocess.PIPE,
redirect_stderr=subprocess.STDOUT,
errexit=False)
return stdout
else:
return """
This is a "GCC-compatible" driver using clang under the hood.
Usage: %s [options] <inputs> ...
BASIC OPTIONS:
-o <file> Output to <file>.
-E Only run the preprocessor.
-S Generate bitcode assembly.
-c Generate bitcode object.
-I <dir> Add header search path.
-L <dir> Add library search path.
-D<key>[=<val>] Add definition for the preprocessor.
-W<id> Toggle warning <id>.
-f<feature> Enable <feature>.
-Wl,<arg> Pass <arg> to the linker.
-Xlinker <arg> Pass <arg> to the linker.
-Wt,<arg> Pass <arg> to the translator.
-Xtranslator <arg> Pass <arg> to the translator.
-Wp,<arg> Pass <arg> to the preprocessor.
-Xpreprocessor,<arg> Pass <arg> to the preprocessor.
-x <language> Treat subsequent input files as having type <language>.
-static Produce a static executable (the default).
-Bstatic Link subsequent libraries statically.
-Bdynamic Link subsequent libraries dynamically.
-fPIC Ignored (only used by translator backend)
(accepted for compatibility).
-pipe Ignored (for compatibility).
-O<n> Optimation level <n>: 0, 1, 2, 3, 4 or s.
-g Generate complete debug information.
-gline-tables-only Generate debug line-information only
(allowing for stack traces).
-flimit-debug-info Generate limited debug information.
-save-temps Keep intermediate compilation results.
-v Verbose output / show commands.
-h | --help Show this help.
--help-full Show underlying clang driver's help message
(warning: not all options supported).
""" % (tool)
| 37.014925 | 84 | 0.583199 |
ea5ce7cc5b6e740ea6de5f526c9f8c03dd0ed5b8 | 885 | py | Python | test/test_pipeline/components/classification/test_gaussian_nb.py | Louquinze/auto-sklearn | b2ac331c500ebef7becf372802493a7b235f7cec | [
"BSD-3-Clause"
] | 6,390 | 2015-07-11T07:59:51.000Z | 2022-03-31T16:45:15.000Z | test/test_pipeline/components/classification/test_gaussian_nb.py | Louquinze/auto-sklearn | b2ac331c500ebef7becf372802493a7b235f7cec | [
"BSD-3-Clause"
] | 1,276 | 2015-07-29T02:11:29.000Z | 2022-03-31T17:31:34.000Z | test/test_pipeline/components/classification/test_gaussian_nb.py | Louquinze/auto-sklearn | b2ac331c500ebef7becf372802493a7b235f7cec | [
"BSD-3-Clause"
] | 1,313 | 2015-07-20T14:11:39.000Z | 2022-03-25T18:22:48.000Z | import sklearn.naive_bayes
from autosklearn.pipeline.components.classification.gaussian_nb import \
GaussianNB
from .test_base import BaseClassificationComponentTest
class GaussianNBComponentTest(BaseClassificationComponentTest):
__test__ = True
res = dict()
res["default_iris"] = 0.95999999999999996
res["iris_n_calls"] = None
res["default_iris_iterative"] = 0.95999999999999996
res["default_iris_proba"] = 0.11199001987342033
res["default_iris_sparse"] = -1
res["default_digits"] = 0.80692167577413476
res["digits_n_calls"] = None
res["default_digits_iterative"] = 0.80692167577413476
res["default_digits_binary"] = 0.98664238008500305
res["default_digits_multilabel"] = 0.54135471896765841
res["default_digits_multilabel_proba"] = 0.99028976450984096
sk_mod = sklearn.naive_bayes.GaussianNB
module = GaussianNB
| 31.607143 | 72 | 0.761582 |
ceed04a687d3af77e3837eaac61586446d74cc12 | 4,457 | py | Python | legacy/dx/simulator/simulator_diagnoser/io/db_client.py | GaloisInc/adapt | 2ccff778d3e77505899266572f8f7caacb5b630f | [
"BSD-3-Clause"
] | 2 | 2020-04-09T13:04:25.000Z | 2021-09-24T14:17:26.000Z | legacy/dx/simulator/simulator_diagnoser/io/db_client.py | GaloisInc/adapt | 2ccff778d3e77505899266572f8f7caacb5b630f | [
"BSD-3-Clause"
] | null | null | null | legacy/dx/simulator/simulator_diagnoser/io/db_client.py | GaloisInc/adapt | 2ccff778d3e77505899266572f8f7caacb5b630f | [
"BSD-3-Clause"
] | 3 | 2019-09-20T20:49:54.000Z | 2021-09-02T17:33:47.000Z | import asyncio
import aiogremlin
import logging
import uuid
class DBClient(object):
def __init__(self, url='http://localhost:8182/'):
self.loop = asyncio.get_event_loop()
self.client = aiogremlin.GremlinClient(url=url, loop=self.loop)
self.log = logging.getLogger('dx-logger')
def __del__(self):
self.loop.run_until_complete(self.client.close())
def __query(self, gremlin, bindings={}):
self.log.debug("Query: " + gremlin + " bindings: " + str(bindings))
r = self.client.execute(gremlin, bindings=bindings)
msg = self.loop.run_until_complete(r)[0]
return msg.data
@staticmethod
def generate_uuid():
return str(uuid.uuid4())
def get_nodes(self, **attributes):
gremlin = "g.V()"
for x in list(attributes.keys()):
if x == 'label':
gremlin += '.hasLabel(l)'
attributes['l'] = attributes[x]
else:
gremlin += ".has('{}',{})".format(x,x)
return self.__query(gremlin, attributes)
def insert_node(self, node_label, **attributes):
gremlin = "graph.addVertex(label, l" + \
''.join(",'{}',{}".format(x,x) for x in attributes.keys()) + \
')'
attributes['l'] = node_label
return self.__query(gremlin, attributes)[0]
def drop_nodes(self, force=False, **attributes):
if not force and len(attributes) == 0:
return None
gremlin = "g.V()"
for x in list(attributes.keys()):
if x == 'label':
gremlin += '.hasLabel(l)'
attributes['l'] = attributes[x]
else:
gremlin += ".has('{}',{})".format(x,x)
gremlin += ".drop().iterate()"
return self.__query(gremlin, attributes)
def get_edges(self, **attributes):
gremlin = "g.E()"
for x in list(attributes.keys()):
if x == 'label':
gremlin += '.hasLabel(l)'
attributes['l'] = attributes[x]
else:
gremlin += ".has('{}',{})".format(x,x)
return self.__query(gremlin, attributes)
def insert_edge(self, outnode, innode, edge_label, **attributes):
gremlin = "graph.vertices({})[0].addEdge(l,".format(outnode) + \
"graph.vertices({})[0]".format(innode) + \
''.join(",'{}',{}".format(x,x) for x in attributes.keys()) + \
')'
attributes.update({'l': edge_label})
return self.__query(gremlin, attributes)
def drop_edges(self, force=False, **attributes):
if not force and len(attributes) == 0:
return None
gremlin = "g.E()"
for x in list(attributes.keys()):
if x == 'label':
gremlin += '.hasLabel(l)'
attributes['l'] = attributes[x]
else:
gremlin += ".has('{}',{})".format(x,x)
gremlin += ".drop().iterate()"
return self.__query(gremlin, attributes)
def get_successors(self, node):
gremlin = "g.V({}).out()".format(node)
return self.__query(gremlin)
def get_transitive_successors(self, node, **attributes):
gremlin = "g.V({}).repeat(out()".format(node)
for x in list(attributes.keys()):
if x == 'label':
gremlin += '.hasLabel(l)'
attributes['l'] = attributes[x]
else:
gremlin += ".has('{}',{})".format(x,x)
gremlin += ".dedup()).emit().simplePath().path()"
return self.__query(gremlin, attributes)
def get_predecessors(self, node):
gremlin = "g.V({}).in()".format(node)
return self.__query(gremlin)
def get_transitive_predecessors(self, node, **attributes):
gremlin = "g.V({}).repeat(in()".format(node)
for x in list(attributes.keys()):
if x == 'label':
gremlin += '.hasLabel(l)'
attributes['l'] = attributes[x]
else:
gremlin += ".has('{}',{})".format(x,x)
gremlin += ".dedup()).emit().simplePath().path()"
return self.__query(gremlin, attributes)
def set_node_properties(self, node, **attributes):
gremlin = "g.V({})".format(node)
for x in attributes.keys():
gremlin += ".property('{}',{})".format(x,x)
return self.__query(gremlin, attributes)
| 36.834711 | 80 | 0.528607 |
358f539cab695fe354af56785a148c8784d6e869 | 1,922 | py | Python | PyTests/RustPython/attr.py | LiarPrincess/Violet | 0a4268649b0eec3ab631d19015d7043394c6571e | [
"MIT"
] | null | null | null | PyTests/RustPython/attr.py | LiarPrincess/Violet | 0a4268649b0eec3ab631d19015d7043394c6571e | [
"MIT"
] | 6 | 2021-10-14T15:55:16.000Z | 2022-03-31T14:04:02.000Z | PyTests/RustPython/attr.py | LiarPrincess/Violet | 0a4268649b0eec3ab631d19015d7043394c6571e | [
"MIT"
] | null | null | null | from testutils import assert_raises
class A:
pass
class B:
x = 50
a = A()
a.b = 10
assert hasattr(a, 'b')
assert a.b == 10
assert B.x == 50
# test delete class attribute with del keyword
del B.x
with assert_raises(AttributeError):
_ = B.x
# test override attribute
setattr(a, 'b', 12)
assert a.b == 12
assert getattr(a, 'b') == 12
# test non-existent attribute
with assert_raises(AttributeError):
_ = a.c
with assert_raises(AttributeError):
getattr(a, 'c')
assert getattr(a, 'c', 21) == 21
# test set attribute
setattr(a, 'c', 20)
assert hasattr(a, 'c')
assert a.c == 20
# test delete attribute
delattr(a, 'c')
assert not hasattr(a, 'c')
with assert_raises(AttributeError):
_ = a.c
# test setting attribute on builtin
with assert_raises(AttributeError):
object().a = 1
with assert_raises(AttributeError):
del object().a
with assert_raises(AttributeError):
setattr(object(), 'a', 2)
with assert_raises(AttributeError):
delattr(object(), 'a')
attrs = {}
class CustomLookup:
def __getattr__(self, item):
# VIOLET: We do not have string formating
# return "value_{}".format(item)
return "value_" + str(item)
def __setattr__(self, key, value):
attrs[key] = value
custom = CustomLookup()
assert custom.attr == "value_attr"
custom.a = 2
custom.b = 5
assert attrs['a'] == 2
assert attrs['b'] == 5
class GetRaise:
def __init__(self, ex):
self.ex = ex
def __getattr__(self, item):
raise self.ex
assert not hasattr(GetRaise(AttributeError()), 'a')
with assert_raises(AttributeError):
getattr(GetRaise(AttributeError()), 'a')
assert getattr(GetRaise(AttributeError()), 'a', 11) == 11
with assert_raises(KeyError):
hasattr(GetRaise(KeyError()), 'a')
with assert_raises(KeyError):
getattr(GetRaise(KeyError()), 'a')
with assert_raises(KeyError):
getattr(GetRaise(KeyError()), 'a', 11)
| 18.660194 | 57 | 0.665453 |
e13ae30073666da184c4423dd856359982d80803 | 1,423 | py | Python | lexicon/tests/test_parser.py | 1500cloud/lexicon | 8fa65a4e8c844d5d7c33f55ac6e66242f7d415d9 | [
"MIT"
] | 1 | 2019-08-17T23:09:24.000Z | 2019-08-17T23:09:24.000Z | lexicon/tests/test_parser.py | 1500cloud/lexicon | 8fa65a4e8c844d5d7c33f55ac6e66242f7d415d9 | [
"MIT"
] | null | null | null | lexicon/tests/test_parser.py | 1500cloud/lexicon | 8fa65a4e8c844d5d7c33f55ac6e66242f7d415d9 | [
"MIT"
] | 1 | 2021-01-06T16:04:36.000Z | 2021-01-06T16:04:36.000Z | """Unit tests for the Lexicon CLI parser"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
import pytest
from lexicon.parser import (
generate_base_provider_parser,
generate_cli_main_parser,
)
def test_base_provider_parser():
baseparser = generate_base_provider_parser()
parsed = baseparser.parse_args(['list', 'capsulecd.com', 'TXT'])
assert parsed.action == 'list'
assert parsed.domain == 'capsulecd.com'
assert parsed.type == 'TXT'
assert parsed.ttl is None
assert parsed.output == 'TABLE'
def test_base_provider_parser_without_domain():
baseparser = generate_base_provider_parser()
with pytest.raises(SystemExit):
baseparser.parse_args(['list'])
def test_base_provider_parser_without_options():
baseparser = generate_base_provider_parser()
with pytest.raises(SystemExit):
baseparser.parse_args([])
def test_cli_main_parser():
baseparser = generate_cli_main_parser()
parsed = baseparser.parse_args(
['cloudflare', 'list', 'capsulecd.com', 'TXT'])
assert parsed.provider_name == 'cloudflare'
assert parsed.action == 'list'
assert parsed.domain == 'capsulecd.com'
assert parsed.type == 'TXT'
assert parsed.output == 'TABLE'
def test_cli_main_parser_without_args():
baseparser = generate_cli_main_parser()
with pytest.raises(SystemExit):
baseparser.parse_args([])
| 29.040816 | 68 | 0.722417 |
89638ce6ced20de1374c46368f2ddd6fe0a057b6 | 15,673 | py | Python | page/privacy.py | moorer2k/pinylib-rtc | a4dfe70ce7fbba4f41b140bb15349747d060a34a | [
"MIT"
] | null | null | null | page/privacy.py | moorer2k/pinylib-rtc | a4dfe70ce7fbba4f41b140bb15349747d060a34a | [
"MIT"
] | null | null | null | page/privacy.py | moorer2k/pinylib-rtc | a4dfe70ce7fbba4f41b140bb15349747d060a34a | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import util.web
class Privacy:
"""
This class represents tinychat's privacy page for a room,
it contains methods to change a rooms privacy settings.
"""
def __init__(self, proxy):
""" Create a instance of the Privacy class.
:param proxy: A proxy in the format IP:PORT
:type proxy: str | None
"""
self._proxy = proxy
self._privacy_url = 'https://tinychat.com/settings/privacy'
self._csrf_token = ''
self._room_password = None
self._roompass_enabled = 0
self._broadcast_password = None
self._broadcast_pass_enabled = 0
self.room_moderators = list()
self._form_data = dict()
@staticmethod
def _is_tc_account(account_name):
""" Helper method to check if a user account is a valid account name.
:param account_name: The account name to check.
:type account_name: str
:return: True if it is a valid account, False if invalid account.
:rtype: bool
"""
url = 'https://tinychat.com/api/tcinfo?username=%s' % account_name
response = util.web.http_get(url=url, json=True)
if response['json'] is not None:
if 'error' not in response['json']:
return True
return False
def clear_bans(self):
""" Clear all room bans.
:return: True if bans were cleared, else False.
:rtype: bool
"""
url = 'https://tinychat.com/settings/privacy/clearbans'
header = {
'X-Requested-With': 'XMLHttpRequest',
'Referer': self._privacy_url
}
form_data = {'_token': self._csrf_token}
response = util.web.http_post(post_url=url, post_data=form_data, header=header,
json=True, proxy=self._proxy)
if response['json']['error'] is False:
if response['json']['response'] == 'Bans cleared':
return True
return False
def parse_privacy_settings(self, response=None):
""" Parse privacy settings.
:param response: A http response.
:type response: dict
"""
if response is None:
response = util.web.http_get(url=self._privacy_url, referer=self._privacy_url, proxy=self._proxy)
if response is not None and response['content'] is not None:
soup = BeautifulSoup(response['content'], 'html.parser')
# csrf-token
token = soup.find(attrs={'name': 'csrf-token'})
self._csrf_token = token['content']
# guest settings
guest_settings = soup.find('input', {'name': 'allow_guest', 'checked': True})
if guest_settings is not None:
self._form_data['allow_guest'] = 1
twitter = soup.find('input', {'name': 'require_twitter', 'checked': True})
if twitter is not None:
self._form_data['require_twitter'] = 1
else:
self._form_data['require_twitter'] = 0
facebook = soup.find('input', {'name': 'require_facebook', 'checked': True})
if facebook is not None:
self._form_data['require_facebook'] = 1
else:
self._form_data['require_facebook'] = 0
else:
self._form_data['allow_guest'] = 0
self._form_data['require_twitter'] = 0
self._form_data['require_facebook'] = 0
# directory setting
dir_settings = soup.find('input', {'name': 'public_directory', 'checked': True})
if dir_settings is not None:
self._form_data['public_directory'] = 1
else:
self._form_data['public_directory'] = 0
# push2talk setting
push2talk = soup.find('input', {'name': 'push2talk', 'checked': True})
if push2talk is not None:
self._form_data['push2talk'] = 1
else:
self._form_data['push2talk'] = 0
# greenroom setting
greenroom = soup.find('input', {'name': 'greenroom', 'checked': True})
if greenroom is not None:
self._form_data['greenroom'] = 1
else:
self._form_data['greenroom'] = 0
# room password
roompass = soup.find(attrs={'name': 'roomPassword'})
if roompass['value']:
self._roompass_enabled = 1
else:
self._roompass_enabled = 0
# TODO:make sure this works as expected
if not self._form_data['greenroom']:
# broadcast password
broadcast_pass = soup.find(attrs={'name': 'broadcastPassword'})
if broadcast_pass['value']:
self._broadcast_pass_enabled = 1
else:
self._broadcast_pass_enabled = 0
# moderators
# There has to be a more elegant way of doing this..
pattern = 'var moderators = \''
if pattern in response['content']:
mod_str = str(response['content']).split(pattern)[1].split('\';')[0].replace('"', '\'')
mod_str_replaced = mod_str.replace('[', '').replace(']', '').replace('\'', '')
mods = mod_str_replaced.split(',')
if len(mods) > 0:
for mod in mods:
if mod != '' and mod not in self.room_moderators:
self.room_moderators.append(mod)
def set_room_password(self, password=None):
""" Set a room password or clear the password.
:param password: The room password or None to clear.
:type password: str | None
"""
if password is None:
self._room_password = ''
else:
self._room_password = password
if self._broadcast_password is None:
self._broadcast_password = ''
form_data = {
'roomPassword': self._room_password,
'broadcastPassword': self._broadcast_password,
'privacy_password': 1,
'_token': self._csrf_token
}
res = util.web.http_post(post_url=self._privacy_url, post_data=form_data,
referer=self._privacy_url, follow_redirect=True)
self.parse_privacy_settings(response=res)
def set_broadcast_password(self, password=None):
""" Set a broadcast password or clear the password.
:param password: The broadcast password or None to clear.
:type password: str | None
"""
if password is None:
self._broadcast_password = ''
else:
self._broadcast_password = password
if self._room_password is None:
self._room_password = ''
form_data = {
'roomPassword': self._room_password,
'broadcastPassword': self._broadcast_password,
'privacy_password': 1,
'_token': self._csrf_token
}
res = util.web.http_post(post_url=self._privacy_url, post_data=form_data,
referer=self._privacy_url, follow_redirect=True)
self.parse_privacy_settings(response=res)
def make_moderator(self, account):
""" Make a user account a moderator.
:param account: The account to make a moderator.
:type account: str
:return True if the account was added as a moderator, False if already a moderator
or None on invalid account name.
:rtype: bool | None
"""
url = 'https://tinychat.com/settings/privacy/addfeatureduser'
if self._is_tc_account(account):
if account not in self.room_moderators:
form_data = {
'_token': self._csrf_token,
'name': account,
'type': 'moderator'
}
response = util.web.http_post(post_url=url, post_data=form_data, json=True, proxy=self._proxy)
if response['json']['error'] is False and response['json']['response'] == 'Data added':
self.parse_privacy_settings()
if account in self.room_moderators:
return True
return False
return False
return None
def remove_moderator(self, account):
""" Remove a room moderator.
:param account: The moderator account to remove.
:return: True if removed else False
:rtype: bool
"""
url = 'https://tinychat.com/settings/privacy/removefeatureduser'
if account in self.room_moderators:
form_data = {
'_token': self._csrf_token,
'name': account,
'type': 'moderator'
}
response = util.web.http_post(post_url=url, post_data=form_data, json=True, proxy=self._proxy)
if response['json']['error'] is False and response['json']['response'] == 'Data removed':
self.room_moderators.remove(account)
return True
return False
return False
def set_guest_mode(self):
""" Enable/disable guest mode.
NOTE: I don't know if it is a bug on tinychat's end, but whether this is set or not,
does not seem to matter, you can still join as guest.
:return: True if guests are allowed, else False.
:rtype: bool
"""
if not self._form_data['allow_guest']:
self._form_data['allow_guest'] = 1
self._update()
return True
elif self._form_data['allow_guest']:
self._form_data['allow_guest'] = 0
self._form_data['require_twitter'] = 0
self._form_data['require_facebook'] = 0
self._update()
return False
def set_guest_mode_twitter(self):
""" Enable/disable guest mode twitter.
:return: True if guest mode is set to twitter, else False.
:rtype: bool
"""
if self._form_data['allow_guest']:
if not self._form_data['require_twitter']:
self._form_data['require_twitter'] = 1
self._update()
return True
elif self._form_data['require_twitter']:
self._form_data['require_twitter'] = 0
self._update()
return False
else:
self._form_data['allow_guest'] = 1
self._form_data['require_twitter'] = 1
self._update()
return True
def set_guest_mode_facebook(self):
""" Enable/disable guest mode facebook.
:return: True if guest mode is set to facebook, else False.
:rtype: bool
"""
if self._form_data['allow_guest']:
if not self._form_data['require_facebook']:
self._form_data['require_facebook'] = 1
self._update()
return True
elif self._form_data['require_facebook']:
self._form_data['require_facebook'] = 0
self._update()
return False
else:
self._form_data['allow_guest'] = 1
self._form_data['require_facebook'] = 1
self._update()
return True
def show_on_directory(self):
""" Enables/disables show up on directory setting.
:return: True if enabled else False.
:rtype: bool
"""
if not self._form_data['public_directory']:
self._form_data['public_directory'] = 1
self._update()
return True
elif self._form_data['public_directory']:
self._form_data['public_directory'] = 0
self._update()
return False
def set_push2talk(self):
""" Enables/disables push2talk setting.
:return: True if enabled else False.
:rtype: bool
"""
if not self._form_data['push2talk']:
self._form_data['push2talk'] = 1
self._update()
return True
elif self._form_data['push2talk']:
self._form_data['push2talk'] = 0
self._update()
return False
def set_greenroom(self):
""" Enables/disables greenroom setting.
:return: True if enabled else False.
:rtype: bool
"""
if not self._form_data['greenroom']:
self._form_data['greenroom'] = 1
self._update()
return True
elif self._form_data['greenroom']:
self._form_data['greenroom'] = 0
self._update()
return False
def current_settings(self):
""" Returns a dictionary of the current room settings.
:return A dictionary with the following keys: 'broadcast_pass', 'room_pass', 'allow_guest',
'show_on_directory', 'push2talk', 'greenroom'
:rtype: dict
"""
self.parse_privacy_settings()
settings = dict()
if self._broadcast_password or self._broadcast_pass_enabled:
settings['broadcast_pass'] = 'Enabled'
else:
settings['broadcast_pass'] = 'Disabled'
if self._room_password or self._roompass_enabled:
settings['room_pass'] = 'Enabled'
else:
settings['room_pass'] = 'Disabled'
settings['allow_guest'] = 'No login required' #
if self._form_data['allow_guest']:
if self._form_data['require_twitter'] and self._form_data['require_facebook']:
settings['allow_guest'] = 'Twitter/Facebook'
elif self._form_data['require_twitter']:
settings['allow_guest'] = 'Twitter'
elif self._form_data['require_facebook']:
settings['allow_guest'] = 'Facebook'
if self._room_password:
settings['show_on_directory'] = 'Hidden'
else:
if self._form_data['public_directory']:
settings['show_on_directory'] = 'Public'
else:
settings['show_on_directory'] = 'Hidden'
if self._form_data['push2talk']:
settings['push2talk'] = 'Enabled'
else:
settings['push2talk'] = 'Disabled'
if self._form_data['greenroom']:
settings['greenroom'] = 'Enabled'
else:
settings['greenroom'] = 'Disabled'
return settings
def _update(self):
""" Update the privacy page with the current settings.
This is called when ever a change is made.
"""
self._form_data['privacy_changes'] = 1
self._form_data['_token'] = self._csrf_token
if not self._form_data['allow_guest']:
del self._form_data['allow_guest']
if not self._form_data['require_twitter']:
del self._form_data['require_twitter']
if not self._form_data['require_facebook']:
del self._form_data['require_facebook']
if not self._form_data['public_directory']:
del self._form_data['public_directory']
if not self._form_data['push2talk']:
del self._form_data['push2talk']
if not self._form_data['greenroom']:
del self._form_data['greenroom']
pr = util.web.http_post(post_url=self._privacy_url, post_data=self._form_data, referer=self._privacy_url,
proxy=self._proxy, follow_redirect=True)
self.parse_privacy_settings(response=pr)
| 38.603448 | 113 | 0.56256 |
fc120c41dd5fc086d52c8c65662b4d4195162951 | 27,020 | py | Python | statsmodels/graphics/mosaicplot.py | haribharadwaj/statsmodels | 8675b890607fe6f116b1186dcba4c387c5e3778a | [
"BSD-3-Clause"
] | 4 | 2019-02-18T20:35:02.000Z | 2019-04-09T03:02:58.000Z | statsmodels/graphics/mosaicplot.py | haribharadwaj/statsmodels | 8675b890607fe6f116b1186dcba4c387c5e3778a | [
"BSD-3-Clause"
] | null | null | null | statsmodels/graphics/mosaicplot.py | haribharadwaj/statsmodels | 8675b890607fe6f116b1186dcba4c387c5e3778a | [
"BSD-3-Clause"
] | 1 | 2022-02-25T02:59:14.000Z | 2022-02-25T02:59:14.000Z | """Create a mosaic plot from a contingency table.
It allows to visualize multivariate categorical data in a rigorous
and informative way.
see the docstring of the mosaic function for more informations.
"""
# Author: Enrico Giampieri - 21 Jan 2013
from __future__ import division
from statsmodels.compat.python import (iteritems, iterkeys, lrange, string_types, lzip,
itervalues, zip, range)
import numpy as np
from collections import OrderedDict
from itertools import product
from numpy import iterable, r_, cumsum, array
from statsmodels.graphics import utils
from pandas import DataFrame
__all__ = ["mosaic"]
def _normalize_split(proportion):
"""
return a list of proportions of the available space given the division
if only a number is given, it will assume a split in two pieces
"""
if not iterable(proportion):
if proportion == 0:
proportion = array([0.0, 1.0])
elif proportion >= 1:
proportion = array([1.0, 0.0])
elif proportion < 0:
raise ValueError("proportions should be positive,"
"given value: {}".format(proportion))
else:
proportion = array([proportion, 1.0 - proportion])
proportion = np.asarray(proportion, dtype=float)
if np.any(proportion < 0):
raise ValueError("proportions should be positive,"
"given value: {}".format(proportion))
if np.allclose(proportion, 0):
raise ValueError("at least one proportion should be "
"greater than zero".format(proportion))
# ok, data are meaningful, so go on
if len(proportion) < 2:
return array([0.0, 1.0])
left = r_[0, cumsum(proportion)]
left /= left[-1] * 1.0
return left
def _split_rect(x, y, width, height, proportion, horizontal=True, gap=0.05):
"""
Split the given rectangle in n segments whose proportion is specified
along the given axis if a gap is inserted, they will be separated by a
certain amount of space, retaining the relative proportion between them
a gap of 1 correspond to a plot that is half void and the remaining half
space is proportionally divided among the pieces.
"""
x, y, w, h = float(x), float(y), float(width), float(height)
if (w < 0) or (h < 0):
raise ValueError("dimension of the square less than"
"zero w={} h=()".format(w, h))
proportions = _normalize_split(proportion)
# extract the starting point and the dimension of each subdivision
# in respect to the unit square
starting = proportions[:-1]
amplitude = proportions[1:] - starting
# how much each extrema is going to be displaced due to gaps
starting += gap * np.arange(len(proportions) - 1)
# how much the squares plus the gaps are extended
extension = starting[-1] + amplitude[-1] - starting[0]
# normalize everything for fit again in the original dimension
starting /= extension
amplitude /= extension
# bring everything to the original square
starting = (x if horizontal else y) + starting * (w if horizontal else h)
amplitude = amplitude * (w if horizontal else h)
# create each 4-tuple for each new block
results = [(s, y, a, h) if horizontal else (x, s, w, a)
for s, a in zip(starting, amplitude)]
return results
def _reduce_dict(count_dict, partial_key):
"""
Make partial sum on a counter dict.
Given a match for the beginning of the category, it will sum each value.
"""
L = len(partial_key)
count = sum(v for k, v in iteritems(count_dict) if k[:L] == partial_key)
return count
def _key_splitting(rect_dict, keys, values, key_subset, horizontal, gap):
"""
Given a dictionary where each entry is a rectangle, a list of key and
value (count of elements in each category) it split each rect accordingly,
as long as the key start with the tuple key_subset. The other keys are
returned without modification.
"""
result = OrderedDict()
L = len(key_subset)
for name, (x, y, w, h) in iteritems(rect_dict):
if key_subset == name[:L]:
# split base on the values given
divisions = _split_rect(x, y, w, h, values, horizontal, gap)
for key, rect in zip(keys, divisions):
result[name + (key,)] = rect
else:
result[name] = (x, y, w, h)
return result
def _tuplify(obj):
"""convert an object in a tuple of strings (even if it is not iterable,
like a single integer number, but keep the string healthy)
"""
if np.iterable(obj) and not isinstance(obj, string_types):
res = tuple(str(o) for o in obj)
else:
res = (str(obj),)
return res
def _categories_level(keys):
"""use the Ordered dict to implement a simple ordered set
return each level of each category
[[key_1_level_1,key_2_level_1],[key_1_level_2,key_2_level_2]]
"""
res = []
for i in zip(*(keys)):
tuplefied = _tuplify(i)
res.append(list(OrderedDict([(j, None) for j in tuplefied])))
return res
def _hierarchical_split(count_dict, horizontal=True, gap=0.05):
"""
Split a square in a hierarchical way given a contingency table.
Hierarchically split the unit square in alternate directions
in proportion to the subdivision contained in the contingency table
count_dict. This is the function that actually perform the tiling
for the creation of the mosaic plot. If the gap array has been specified
it will insert a corresponding amount of space (proportional to the
unit lenght), while retaining the proportionality of the tiles.
Parameters
----------
count_dict : dict
Dictionary containing the contingency table.
Each category should contain a non-negative number
with a tuple as index. It expects that all the combination
of keys to be representes; if that is not true, will
automatically consider the missing values as 0
horizontal : bool
The starting direction of the split (by default along
the horizontal axis)
gap : float or array of floats
The list of gaps to be applied on each subdivision.
If the lenght of the given array is less of the number
of subcategories (or if it's a single number) it will extend
it with exponentially decreasing gaps
Returns
----------
base_rect : dict
A dictionary containing the result of the split.
To each key is associated a 4-tuple of coordinates
that are required to create the corresponding rectangle:
0 - x position of the lower left corner
1 - y position of the lower left corner
2 - width of the rectangle
3 - height of the rectangle
"""
# this is the unit square that we are going to divide
base_rect = OrderedDict([(tuple(), (0, 0, 1, 1))])
# get the list of each possible value for each level
categories_levels = _categories_level(list(iterkeys(count_dict)))
L = len(categories_levels)
# recreate the gaps vector starting from an int
if not np.iterable(gap):
gap = [gap / 1.5 ** idx for idx in range(L)]
# extend if it's too short
if len(gap) < L:
last = gap[-1]
gap = list(*gap) + [last / 1.5 ** idx for idx in range(L)]
# trim if it's too long
gap = gap[:L]
# put the count dictionay in order for the keys
# this will allow some code simplification
count_ordered = OrderedDict([(k, count_dict[k])
for k in list(product(*categories_levels))])
for cat_idx, cat_enum in enumerate(categories_levels):
# get the partial key up to the actual level
base_keys = list(product(*categories_levels[:cat_idx]))
for key in base_keys:
# for each partial and each value calculate how many
# observation we have in the counting dictionary
part_count = [_reduce_dict(count_ordered, key + (partial,))
for partial in cat_enum]
# reduce the gap for subsequents levels
new_gap = gap[cat_idx]
# split the given subkeys in the rectangle dictionary
base_rect = _key_splitting(base_rect, cat_enum, part_count, key,
horizontal, new_gap)
horizontal = not horizontal
return base_rect
def _single_hsv_to_rgb(hsv):
"""Transform a color from the hsv space to the rgb."""
from matplotlib.colors import hsv_to_rgb
return hsv_to_rgb(array(hsv).reshape(1, 1, 3)).reshape(3)
def _create_default_properties(data):
""""Create the default properties of the mosaic given the data
first it will varies the color hue (first category) then the color
saturation (second category) and then the color value
(third category). If a fourth category is found, it will put
decoration on the rectangle. Doesn't manage more than four
level of categories
"""
categories_levels = _categories_level(list(iterkeys(data)))
Nlevels = len(categories_levels)
# first level, the hue
L = len(categories_levels[0])
# hue = np.linspace(1.0, 0.0, L+1)[:-1]
hue = np.linspace(0.0, 1.0, L + 2)[:-2]
# second level, the saturation
L = len(categories_levels[1]) if Nlevels > 1 else 1
saturation = np.linspace(0.5, 1.0, L + 1)[:-1]
# third level, the value
L = len(categories_levels[2]) if Nlevels > 2 else 1
value = np.linspace(0.5, 1.0, L + 1)[:-1]
# fourth level, the hatch
L = len(categories_levels[3]) if Nlevels > 3 else 1
hatch = ['', '/', '-', '|', '+'][:L + 1]
# convert in list and merge with the levels
hue = lzip(list(hue), categories_levels[0])
saturation = lzip(list(saturation),
categories_levels[1] if Nlevels > 1 else [''])
value = lzip(list(value),
categories_levels[2] if Nlevels > 2 else [''])
hatch = lzip(list(hatch),
categories_levels[3] if Nlevels > 3 else [''])
# create the properties dictionary
properties = {}
for h, s, v, t in product(hue, saturation, value, hatch):
hv, hn = h
sv, sn = s
vv, vn = v
tv, tn = t
level = (hn,) + ((sn,) if sn else tuple())
level = level + ((vn,) if vn else tuple())
level = level + ((tn,) if tn else tuple())
hsv = array([hv, sv, vv])
prop = {'color': _single_hsv_to_rgb(hsv), 'hatch': tv, 'lw': 0}
properties[level] = prop
return properties
def _normalize_data(data, index):
"""normalize the data to a dict with tuples of strings as keys
right now it works with:
0 - dictionary (or equivalent mappable)
1 - pandas.Series with simple or hierarchical indexes
2 - numpy.ndarrays
3 - everything that can be converted to a numpy array
4 - pandas.DataFrame (via the _normalize_dataframe function)
"""
# if data is a dataframe we need to take a completely new road
# before coming back here. Use the hasattr to avoid importing
# pandas explicitly
if hasattr(data, 'pivot') and hasattr(data, 'groupby'):
data = _normalize_dataframe(data, index)
index = None
# can it be used as a dictionary?
try:
items = list(iteritems(data))
except AttributeError:
# ok, I cannot use the data as a dictionary
# Try to convert it to a numpy array, or die trying
data = np.asarray(data)
temp = OrderedDict()
for idx in np.ndindex(data.shape):
name = tuple(i for i in idx)
temp[name] = data[idx]
data = temp
items = list(iteritems(data))
# make all the keys a tuple, even if simple numbers
data = OrderedDict([_tuplify(k), v] for k, v in items)
categories_levels = _categories_level(list(iterkeys(data)))
# fill the void in the counting dictionary
indexes = product(*categories_levels)
contingency = OrderedDict([(k, data.get(k, 0)) for k in indexes])
data = contingency
# reorder the keys order according to the one specified by the user
# or if the index is None convert it into a simple list
# right now it doesn't do any check, but can be modified in the future
index = lrange(len(categories_levels)) if index is None else index
contingency = OrderedDict()
for key, value in iteritems(data):
new_key = tuple(key[i] for i in index)
contingency[new_key] = value
data = contingency
return data
def _normalize_dataframe(dataframe, index):
"""Take a pandas DataFrame and count the element present in the
given columns, return a hierarchical index on those columns
"""
#groupby the given keys, extract the same columns and count the element
# then collapse them with a mean
data = dataframe[index].dropna()
grouped = data.groupby(index, sort=False)
counted = grouped[index].count()
averaged = counted.mean(axis=1)
return averaged
def _statistical_coloring(data):
"""evaluate colors from the indipendence properties of the matrix
It will encounter problem if one category has all zeros
"""
data = _normalize_data(data, None)
categories_levels = _categories_level(list(iterkeys(data)))
Nlevels = len(categories_levels)
total = 1.0 * sum(v for v in itervalues(data))
# count the proportion of observation
# for each level that has the given name
# at each level
levels_count = []
for level_idx in range(Nlevels):
proportion = {}
for level in categories_levels[level_idx]:
proportion[level] = 0.0
for key, value in iteritems(data):
if level == key[level_idx]:
proportion[level] += value
proportion[level] /= total
levels_count.append(proportion)
# for each key I obtain the expected value
# and it's standard deviation from a binomial distribution
# under the hipothesys of independence
expected = {}
for key, value in iteritems(data):
base = 1.0
for i, k in enumerate(key):
base *= levels_count[i][k]
expected[key] = base * total, np.sqrt(total * base * (1.0 - base))
# now we have the standard deviation of distance from the
# expected value for each tile. We create the colors from this
sigmas = dict((k, (data[k] - m) / s) for k, (m, s) in iteritems(expected))
props = {}
for key, dev in iteritems(sigmas):
red = 0.0 if dev < 0 else (dev / (1 + dev))
blue = 0.0 if dev > 0 else (dev / (-1 + dev))
green = (1.0 - red - blue) / 2.0
hatch = 'x' if dev > 2 else 'o' if dev < -2 else ''
props[key] = {'color': [red, green, blue], 'hatch': hatch}
return props
def _get_position(x, w, h, W):
if W == 0:
return x
return (x + w / 2.0) * w * h / W
def _create_labels(rects, horizontal, ax, rotation):
"""find the position of the label for each value of each category
right now it supports only up to the four categories
ax: the axis on which the label should be applied
rotation: the rotation list for each side
"""
categories = _categories_level(list(iterkeys(rects)))
if len(categories) > 4:
msg = ("maximum of 4 level supported for axes labeling..and 4"
"is alreay a lot of level, are you sure you need them all?")
raise NotImplementedError(msg)
labels = {}
#keep it fixed as will be used a lot of times
items = list(iteritems(rects))
vertical = not horizontal
#get the axis ticks and labels locator to put the correct values!
ax2 = ax.twinx()
ax3 = ax.twiny()
#this is the order of execution for horizontal disposition
ticks_pos = [ax.set_xticks, ax.set_yticks, ax3.set_xticks, ax2.set_yticks]
ticks_lab = [ax.set_xticklabels, ax.set_yticklabels,
ax3.set_xticklabels, ax2.set_yticklabels]
#for the vertical one, rotate it by one
if vertical:
ticks_pos = ticks_pos[1:] + ticks_pos[:1]
ticks_lab = ticks_lab[1:] + ticks_lab[:1]
#clean them
for pos, lab in zip(ticks_pos, ticks_lab):
pos([])
lab([])
#for each level, for each value in the level, take the mean of all
#the sublevel that correspond to that partial key
for level_idx, level in enumerate(categories):
#this dictionary keep the labels only for this level
level_ticks = dict()
for value in level:
#to which level it should refer to get the preceding
#values of labels? it's rather a tricky question...
#this is dependent on the side. It's a very crude management
#but I couldn't think a more general way...
if horizontal:
if level_idx == 3:
index_select = [-1, -1, -1]
else:
index_select = [+0, -1, -1]
else:
if level_idx == 3:
index_select = [+0, -1, +0]
else:
index_select = [-1, -1, -1]
#now I create the base key name and append the current value
#It will search on all the rects to find the corresponding one
#and use them to evaluate the mean position
basekey = tuple(categories[i][index_select[i]]
for i in range(level_idx))
basekey = basekey + (value,)
subset = dict((k, v) for k, v in items
if basekey == k[:level_idx + 1])
#now I extract the center of all the tiles and make a weighted
#mean of all these center on the area of the tile
#this should give me the (more or less) correct position
#of the center of the category
vals = list(itervalues(subset))
W = sum(w * h for (x, y, w, h) in vals)
x_lab = sum(_get_position(x, w, h, W) for (x, y, w, h) in vals)
y_lab = sum(_get_position(y, h, w, W) for (x, y, w, h) in vals)
#now base on the ordering, select which position to keep
#needs to be written in a more general form of 4 level are enough?
#should give also the horizontal and vertical alignment
side = (level_idx + vertical) % 4
level_ticks[value] = y_lab if side % 2 else x_lab
#now we add the labels of this level to the correct axis
ticks_pos[level_idx](list(itervalues(level_ticks)))
ticks_lab[level_idx](list(iterkeys(level_ticks)),
rotation=rotation[level_idx])
return labels
def mosaic(data, index=None, ax=None, horizontal=True, gap=0.005,
properties=lambda key: None, labelizer=None,
title='', statistic=False, axes_label=True,
label_rotation=0.0):
"""Create a mosaic plot from a contingency table.
It allows to visualize multivariate categorical data in a rigorous
and informative way.
Parameters
----------
data : dict, pandas.Series, np.ndarray, pandas.DataFrame
The contingency table that contains the data.
Each category should contain a non-negative number
with a tuple as index. It expects that all the combination
of keys to be representes; if that is not true, will
automatically consider the missing values as 0. The order
of the keys will be the same as the one of insertion.
If a dict of a Series (or any other dict like object)
is used, it will take the keys as labels. If a
np.ndarray is provided, it will generate a simple
numerical labels.
index: list, optional
Gives the preferred order for the category ordering. If not specified
will default to the given order. It doesn't support named indexes
for hierarchical Series. If a DataFrame is provided, it expects
a list with the name of the columns.
ax : matplotlib.Axes, optional
The graph where display the mosaic. If not given, will
create a new figure
horizontal : bool, optional (default True)
The starting direction of the split (by default along
the horizontal axis)
gap : float or array of floats
The list of gaps to be applied on each subdivision.
If the lenght of the given array is less of the number
of subcategories (or if it's a single number) it will extend
it with exponentially decreasing gaps
labelizer : function (key) -> string, optional
A function that generate the text to display at the center of
each tile base on the key of that tile
properties : function (key) -> dict, optional
A function that for each tile in the mosaic take the key
of the tile and returns the dictionary of properties
of the generated Rectangle, like color, hatch or similar.
A default properties set will be provided fot the keys whose
color has not been defined, and will use color variation to help
visually separates the various categories. It should return None
to indicate that it should use the default property for the tile.
A dictionary of the properties for each key can be passed,
and it will be internally converted to the correct function
statistic: bool, optional (default False)
if true will use a crude statistical model to give colors to the plot.
If the tile has a containt that is more than 2 standard deviation
from the expected value under independence hipotesys, it will
go from green to red (for positive deviations, blue otherwise) and
will acquire an hatching when crosses the 3 sigma.
title: string, optional
The title of the axis
axes_label: boolean, optional
Show the name of each value of each category
on the axis (default) or hide them.
label_rotation: float or list of float
the rotation of the axis label (if present). If a list is given
each axis can have a different rotation
Returns
----------
fig : matplotlib.Figure
The generate figure
rects : dict
A dictionary that has the same keys of the original
dataset, that holds a reference to the coordinates of the
tile and the Rectangle that represent it
See Also
----------
A Brief History of the Mosaic Display
Michael Friendly, York University, Psychology Department
Journal of Computational and Graphical Statistics, 2001
Mosaic Displays for Loglinear Models.
Michael Friendly, York University, Psychology Department
Proceedings of the Statistical Graphics Section, 1992, 61-68.
Mosaic displays for multi-way contingecy tables.
Michael Friendly, York University, Psychology Department
Journal of the american statistical association
March 1994, Vol. 89, No. 425, Theory and Methods
Examples
----------
The most simple use case is to take a dictionary and plot the result
>>> data = {'a': 10, 'b': 15, 'c': 16}
>>> mosaic(data, title='basic dictionary')
>>> pylab.show()
A more useful example is given by a dictionary with multiple indices.
In this case we use a wider gap to a better visual separation of the
resulting plot
>>> data = {('a', 'b'): 1, ('a', 'c'): 2, ('d', 'b'): 3, ('d', 'c'): 4}
>>> mosaic(data, gap=0.05, title='complete dictionary')
>>> pylab.show()
The same data can be given as a simple or hierarchical indexed Series
>>> rand = np.random.random
>>> from itertools import product
>>>
>>> tuples = list(product(['bar', 'baz', 'foo', 'qux'], ['one', 'two']))
>>> index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
>>> data = pd.Series(rand(8), index=index)
>>> mosaic(data, title='hierarchical index series')
>>> pylab.show()
The third accepted data structureis the np array, for which a
very simple index will be created.
>>> rand = np.random.random
>>> data = 1+rand((2,2))
>>> mosaic(data, title='random non-labeled array')
>>> pylab.show()
If you need to modify the labeling and the coloring you can give
a function tocreate the labels and one with the graphical properties
starting from the key tuple
>>> data = {'a': 10, 'b': 15, 'c': 16}
>>> props = lambda key: {'color': 'r' if 'a' in key else 'gray'}
>>> labelizer = lambda k: {('a',): 'first', ('b',): 'second', \
('c',): 'third'}[k]
>>> mosaic(data, title='colored dictionary', \
properties=props, labelizer=labelizer)
>>> pylab.show()
Using a DataFrame as source, specifying the name of the columns of interest
>>> gender = ['male', 'male', 'male', 'female', 'female', 'female']
>>> pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']
>>> data = pandas.DataFrame({'gender': gender, 'pet': pet})
>>> mosaic(data, ['pet', 'gender'])
>>> pylab.show()
"""
if isinstance(data, DataFrame) and index is None:
raise ValueError("You must pass an index if data is a DataFrame."
" See examples.")
from matplotlib.patches import Rectangle
#from pylab import Rectangle
fig, ax = utils.create_mpl_ax(ax)
# normalize the data to a dict with tuple of strings as keys
data = _normalize_data(data, index)
# split the graph into different areas
rects = _hierarchical_split(data, horizontal=horizontal, gap=gap)
# if there is no specified way to create the labels
# create a default one
if labelizer is None:
labelizer = lambda k: "\n".join(k)
if statistic:
default_props = _statistical_coloring(data)
else:
default_props = _create_default_properties(data)
if isinstance(properties, dict):
color_dict = properties
properties = lambda key: color_dict.get(key, None)
for k, v in iteritems(rects):
# create each rectangle and put a label on it
x, y, w, h = v
conf = properties(k)
props = conf if conf else default_props[k]
text = labelizer(k)
Rect = Rectangle((x, y), w, h, label=text, **props)
ax.add_patch(Rect)
ax.text(x + w / 2, y + h / 2, text, ha='center',
va='center', size='smaller')
#creating the labels on the axis
#o clearing it
if axes_label:
if np.iterable(label_rotation):
rotation = label_rotation
else:
rotation = [label_rotation] * 4
labels = _create_labels(rects, horizontal, ax, rotation)
else:
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_yticklabels([])
ax.set_title(title)
return fig, rects
| 41.001517 | 87 | 0.632531 |
553e3f9b7e6a4e4a9a08269d77f5e346a7330b5e | 1,951 | py | Python | code/old/mollweide_creator.py | andrewbowen19/ClusterEclipsingBinaries | e554cb6bb613e0d3703314e50fcf5289f50bf572 | [
"MIT"
] | null | null | null | code/old/mollweide_creator.py | andrewbowen19/ClusterEclipsingBinaries | e554cb6bb613e0d3703314e50fcf5289f50bf572 | [
"MIT"
] | null | null | null | code/old/mollweide_creator.py | andrewbowen19/ClusterEclipsingBinaries | e554cb6bb613e0d3703314e50fcf5289f50bf572 | [
"MIT"
] | null | null | null | # OLD mollweide creator -- deprecated do not cuse
# Script to generate Nobs mollweides for colossus and baseline strategies
import pandas as pd
import numpy as np
import os
from astropy.coordinates import Angle, SkyCoord
import astropy.units as u
from OpSim import OpSim
from TRILEGAL import TRILEGAL
from matplotlib import pyplot as plt
from vespa_update import trilegal
import os
p = os.environ['PATH']
pv = os.path.join(os.getcwd(),'vespa_update')
p2 = pv+':'+p
os.environ['PATH'] = p2
import sys
sys.path.insert(0, '/Users/ageller/WORK/LSST/onGitHub/EBLSST/code')
def createMollweide(filename, strategy, create_csv = True):
'''Function to create mollweide Nobs plots for different OpSim strategies'''
# Getting OpSim field data and Nobs for each field
OpS = OpSim()
OpS.dbFile = f'/Users/andrewbowen/{filename}'
OpS.getAllOpSimFields()
# OpSim Coords
coords = SkyCoord(OpS.RA, OpS.Dec, unit=(u.degree, u.degree), frame='icrs')
# OpSim field radius
fieldRad = Angle(1.75, unit = u.degree)
# Code from Aaron's script
raGal = coords.icrs.ra.wrap_at(180.*u.degree).degree
decGal = coords.icrs.dec.wrap_at(180.*u.degree).degree
lGal = coords.galactic.l.wrap_at(180.*u.degree).degree
bGal = coords.galactic.b.wrap_at(180.*u.degree).degree
f, ax = plt.subplots(figsize = (8,5), subplot_kw={'projection': "mollweide"})
ax.grid(True)
ax.set_xlabel(r"$RA$",fontsize=16)
ax.set_ylabel(r"$Dec$",fontsize=16)
xx = np.where(OpS.Nobs > 0)
ax.scatter(raGal[xx]*np.pi/180.,decGal[xx]*np.pi/180., s = fieldRad, c=OpS.Nobs[xx],
cmap='tab10', alpha = 1, vmin=0, vmax=1000)
ax.set_title(strategy, fontsize = 16)
plt.show()
if create_csv:
dat = pd.DataFrame({'OpSim ID' : OpS.ID, 'OpSim RA': OpS.RA, 'OpSim Dec': OpS.Dec, 'Nobs': OpS.Nobs})
dat.to_csv(strategy + '-OpSim-FieldData.csv')
createMollweide('../baseline2018a.db.gz.download', 'baseline')
createMollweide('../colossus_2665.db.gz.download', 'colossus')
| 30.484375 | 103 | 0.718606 |
e96b14b8ca8839b12c8d319791a9515519707aca | 4,074 | py | Python | bot/cogs/Config/__init__.py | abindent/Utility-Bot | a11b790e7930a035fdca2b153950e624e3abafe4 | [
"MIT"
] | 2 | 2022-03-20T13:12:35.000Z | 2022-03-27T08:52:37.000Z | bot/cogs/Config/__init__.py | abindent/Nextcord-Utility-Bot | a11b790e7930a035fdca2b153950e624e3abafe4 | [
"MIT"
] | 2 | 2022-03-07T01:10:21.000Z | 2022-03-08T07:33:06.000Z | bot/cogs/Config/__init__.py | abindent/Utility-Bot | a11b790e7930a035fdca2b153950e624e3abafe4 | [
"MIT"
] | 1 | 2022-03-08T07:41:46.000Z | 2022-03-08T07:41:46.000Z | import random, asyncio, nextcord
from nextcord.ext import commands
from util.databases.config import Blacklist_DB
class Configuration(commands.Cog, description="Configure the bot."):
COG_EMOJI = "<:config:956526378008846437>"
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
self.blacklist_db = Blacklist_DB(self.bot)
@commands.command(
name="prefix",
aliases=["changeprefix", "setprefix"],
description="Change your guilds prefix!",
usage="[prefix]",
)
@commands.cooldown(1, 2, commands.BucketType.member)
@commands.has_guild_permissions(manage_guild=True)
async def prefix(self, ctx, *, prefix="t!"):
await self.bot.config.upsert({"_id": ctx.guild.id, "prefix": prefix})
await ctx.send(
f"The guild prefix has been set to `{prefix}`. Use `{prefix}prefix [prefix]` to change it again!"
)
@commands.command(
name="deleteprefix", aliases=["dp"], description="Delete your guilds prefix!"
)
@commands.guild_only()
@commands.has_guild_permissions(administrator=True)
async def deleteprefix(self, ctx):
await self.bot.config.unset({"_id": ctx.guild.id, "prefix": 1})
await ctx.send("This guilds prefix has been set back to the default")
"""
As a viewer, watch up to episode 10 and then attempt to convert the following
to using a database rather then continuing to use json
"""
@commands.command(
name="blacklist", description="Blacklist a user from the bot", usage="<user>"
)
@commands.is_owner()
async def blacklist(self, ctx: commands.Context, user: nextcord.Member):
if ctx.message.author.id == user.id:
msg = await ctx.send("🚫 Sorry! You cannot blacklist yourself!")
await asyncio.sleep(5)
await msg.delete()
if await self.blacklist_db.check_user_blacklisted_status(user.id):
embed = nextcord.Embed(title="🚫 Sorry! The user is already blacklisted", color=0x00FFFF)
msg = await ctx.send(embed=embed)
await asyncio.sleep(5)
await msg.delete()
else:
await self.blacklist_db.create_user_table(ctx.message.guild.id, user)
embed = nextcord.Embed(title=f"✅ Successfully blacklisted {user.name}.", color=0x00FFFF)
msg = await ctx.send(embed=embed)
await asyncio.sleep(5)
await msg.delete()
@commands.command(
name="unblacklist",
description="Unblacklist a user from the bot",
usage="<user>",
)
@commands.is_owner()
async def unblacklist(self, ctx, user: nextcord.Member):
"""
Unblacklist someone from the bot
"""
if ctx.message.author.id == user.id:
msg = await ctx.send("🚫 Sorry! You cannot unblacklist yourself!")
await asyncio.sleep(5)
await msg.delete()
if not await self.blacklist_db.check_user_blacklisted_status(user.id):
embed = nextcord.Embed(title="🚫 Sorry! The user is not blacklisted", color=0x00FFFF)
msg = await ctx.send(embed=embed)
await asyncio.sleep(5)
await msg.delete()
else:
await self.blacklist_db.delete_user_table(user.id)
embed = nextcord.Embed(title=f"✅ Successfully unblacklisted {user.name}.", color=0x00FFFF)
msg = await ctx.send(embed=embed)
await asyncio.sleep(5)
await msg.delete()
@commands.command(
name="logout",
aliases=["close", "stopbot"],
description="Log the bot out of nextcord!",
)
@commands.is_owner()
async def logout(self, ctx):
"""
If the user running the command owns the bot then this will disconnect the bot from nextcord.
"""
await ctx.send(f"Hey {ctx.author.mention}, I am now logging out :wave:")
await self.bot.close()
| 35.736842 | 109 | 0.614384 |
ee4d5ad35c2e2a8c71c1f9cff65573a7fa34bc55 | 6,882 | py | Python | git_stalk/stalk.py | NefixEstrada/git-stalk-cli | 8ce85cbfd450b54f71ca3055c80460b6bda1e6c7 | [
"MIT"
] | null | null | null | git_stalk/stalk.py | NefixEstrada/git-stalk-cli | 8ce85cbfd450b54f71ca3055c80460b6bda1e6c7 | [
"MIT"
] | null | null | null | git_stalk/stalk.py | NefixEstrada/git-stalk-cli | 8ce85cbfd450b54f71ca3055c80460b6bda1e6c7 | [
"MIT"
] | null | null | null | import json
import requests
import datetime
import sys
import re
import os
import datetime
import argparse
from dateutil import tz
from prettytable import PrettyTable
def jft(user):
user_link = "https://api.github.com/users/" + str(user)
response = requests.get(user_link)
return response.status_code
def get_event(string):
event = ""
words = re.findall('[A-Z][^A-Z]*', string)
for word in words:
event += word
event += " "
if event == "Pull Request Review Comment Event ":
event = "PR Review Event "
if event == "Watch Event ":
event = "Starred Event "
if event == "Create Event ":
event = "Commit Event "
return event[:-7]
def get_details(event):
if event["type"] == "IssuesEvent":
return event["payload"]["issue"]["title"]
elif event["type"] == "IssueCommentEvent":
return event["payload"]["comment"]["body"]
elif event["type"] == "WatchEvent":
response = requests.get(event["repo"]["url"])
repo = response.json()
return repo["language"]
elif event["type"] == "PullRequestEvent":
return event["payload"]["pull_request"]["title"]
elif event["type"] == "PushEvent":
for commit in event["payload"]["commits"]:
if commit["distinct"]:
return commit["message"]
elif event["type"] == "MemberEvent":
return "Added " + event["payload"]["member"]["login"] + " as collaborator"
elif event["type"] == "ReleaseEvent":
return "Released binaries for version " + event["payload"]["release"]["tag_name"]
elif event["type"] == "ForkEvent":
return "Forked " + event["repo"]["name"]
def check_for_fork(link, user):
tukde = link.split('/')
if tukde[len(tukde)-2] == user:
response = requests.get(link)
repo = response.json()
if not repo["fork"]:
return True
return False
return True
def get_local_time(string):
local_time = convert_to_local(string)
tukde = local_time.split(' ')
samay = tukde[1].split('+')[0]
return samay
def get_basic_info(user):
user_link = "https://api.github.com/users/" + str(user)
user_profile = requests.get(user_link)
profile = user_profile.json()
print("Name:", profile["name"])
print("Company:", profile["company"])
print("Bio:", profile["bio"])
print("Followers:", profile["followers"])
print("Following:", profile["following"])
print("Public Repos:", profile["public_repos"])
print()
def convert_to_local(string):
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
utc_stamp = datetime.datetime.strptime(
string, '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=from_zone)
local_stamp = utc_stamp.astimezone(to_zone)
return(str(local_stamp))
def get_contributions(user, latest, org=None):
print("Contributions Today: ")
if latest:
table = PrettyTable(["Type", "Repository", "Time", "Details"])
for event in latest:
repo_name = event["repo"]["name"]
if org:
curr_org = ""
for c in repo_name:
if c == r'/':
break
curr_org += c
if curr_org == org:
table.add_row([get_event(event["type"]), event["repo"]["name"], get_local_time(
event["created_at"]), get_details(event)])
else:
table.add_row([get_event(event["type"]), event["repo"]["name"], get_local_time(
event["created_at"]), get_details(event)])
print(table)
print(user + " have made " + str(len(latest)) +
" public contribution(s) today.\n")
def get_other_activity(user, other):
print("Other Activity today: ")
if other:
other_table = PrettyTable(["Type", "Repository", "Time", "Details"])
for event in other:
other_table.add_row([get_event(event["type"]), event["repo"]["name"], get_local_time(
event["created_at"]), get_details(event)])
print(other_table)
print(user + " have done " + str(len(other)) +
" other public activit(y/ies) today.\n")
def get_stars(user, stars):
print("Starred today: ")
if stars:
star_table = PrettyTable(["Repository", "Language", "Time"])
for event in stars:
star_table.add_row([event["repo"]["name"], get_details(
event), get_local_time(event["created_at"])])
print(star_table)
print(user + " have starred " + str(len(stars)) + " repo(s) today.")
def fill_data(user, today, events, latest, stars, other):
for event in events:
if convert_to_local(event["created_at"]).startswith(today) and event["type"] != "IssueCommentEvent":
if event["type"] == "WatchEvent":
stars.append(event)
elif event["type"] == "ForkEvent" or event["type"] == "MemberEvent":
other.append(event)
elif check_for_fork(event["repo"]["url"], user):
latest.append(event)
return latest, stars, other
def update():
os.system("pip install --upgrade git-stalk")
def show_contri(args=None):
user = args["name"]
now = datetime.datetime.now()
today = str(now.strftime("%Y-%m-%d"))
link = "https://api.github.com/users/" + str(user) + "/events"
response = requests.get(link)
events = response.json()
latest = []
stars = []
other = []
if response.status_code == 200:
latest, stars, other = fill_data(
user, today, events, latest, stars, other)
else:
print("Something went wrong, check your internet or username. \nUse stalk --help for Help")
return
if args["np"] == False:
get_basic_info(user)
if args["org"]:
get_contributions(user, latest, args["org"])
else:
get_contributions(user, latest)
get_other_activity(user, other)
get_stars(user, stars)
def run():
ap = argparse.ArgumentParser()
ap.add_argument("name", nargs='?', help="name of the user")
ap.add_argument("--org", help="Organization Name")
ap.add_argument(
"-U", "--update", action='store_true', help="Update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)")
ap.add_argument("-np", action='store_true',
help="Stalks a user without showing their profile")
args = vars(ap.parse_args())
if len(args) > 1:
if args["update"]:
update()
# elif(sys.argv[1] == "--version"):
# show_version()
else:
show_contri(args)
else:
print("Enter a valid username to stalk. \nFor eg: stalk aashutoshrathi \nOr you can type stalk --help for help")
if __name__ == '__main__':
run()
| 32.616114 | 166 | 0.588928 |
9ecdba9c2814f25b33623f1e2d6b2f78e78c5f71 | 541 | py | Python | coord2vec/common/geographic/tests/flask_example_app.py | jonzarecki/coord2vec | 4f267fdd87af7b3d3558ca834b88e9ab7c309c18 | [
"Apache-2.0"
] | null | null | null | coord2vec/common/geographic/tests/flask_example_app.py | jonzarecki/coord2vec | 4f267fdd87af7b3d3558ca834b88e9ab7c309c18 | [
"Apache-2.0"
] | null | null | null | coord2vec/common/geographic/tests/flask_example_app.py | jonzarecki/coord2vec | 4f267fdd87af7b3d3558ca834b88e9ab7c309c18 | [
"Apache-2.0"
] | 1 | 2021-01-25T09:21:17.000Z | 2021-01-25T09:21:17.000Z | """ flask_example.py
Required packages:
- flask
- folium
Usage:
Start the flask server by running:
$ python flask_example.py
And then head to http://127.0.0.1:5000/ in your browser to see the map displayed
"""
from flask import Flask
import folium
app = Flask(__name__)
@app.route('/')
def index():
start_coords = (46.9540700, 142.7360300)
folium_map = folium.Map(location=start_coords, zoom_start=14)
return folium_map._repr_html_()
if __name__ == '__main__':
app.run(debug=True) | 16.90625 | 84 | 0.669131 |
77fc43181b550fdc77e418558ffb30b4f06e141c | 8,766 | py | Python | src/wallet/trading/trade_store.py | 13767849/chia-blockchain | ad7d7e0cced7f2f6deddc9e006dbaeee6dab8f66 | [
"Apache-2.0"
] | 1 | 2021-05-28T01:38:23.000Z | 2021-05-28T01:38:23.000Z | src/wallet/trading/trade_store.py | 13767849/chia-blockchain | ad7d7e0cced7f2f6deddc9e006dbaeee6dab8f66 | [
"Apache-2.0"
] | null | null | null | src/wallet/trading/trade_store.py | 13767849/chia-blockchain | ad7d7e0cced7f2f6deddc9e006dbaeee6dab8f66 | [
"Apache-2.0"
] | null | null | null | from typing import List, Optional
import aiosqlite
from src.types.blockchain_format.sized_bytes import bytes32
from src.types.mempool_inclusion_status import MempoolInclusionStatus
from src.util.errors import Err
from src.util.ints import uint8, uint32
from src.wallet.trade_record import TradeRecord
from src.wallet.trading.trade_status import TradeStatus
class TradeStore:
"""
TradeStore stores trading history.
"""
db_connection: aiosqlite.Connection
cache_size: uint32
@classmethod
async def create(cls, connection: aiosqlite.Connection, cache_size: uint32 = uint32(600000)):
self = cls()
self.cache_size = cache_size
self.db_connection = connection
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS trade_records("
" trade_record blob,"
" trade_id text PRIMARY KEY,"
" status int,"
" confirmed_at_index int,"
" created_at_time bigint,"
" sent int)"
)
)
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS trade_confirmed_index on trade_records(confirmed_at_index)"
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS trade_status on trade_records(status)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS trade_id on trade_records(trade_id)")
await self.db_connection.commit()
return self
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM trade_records")
await cursor.close()
await self.db_connection.commit()
async def add_trade_record(self, record: TradeRecord) -> None:
"""
Store TradeRecord into DB
"""
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO trade_records VALUES(?, ?, ?, ?, ?, ?)",
(
bytes(record),
record.trade_id.hex(),
record.status,
record.confirmed_at_index,
record.created_at_time,
record.sent,
),
)
await cursor.close()
await self.db_connection.commit()
async def set_status(self, trade_id: bytes32, status: TradeStatus, index: uint32 = uint32(0)):
"""
Updates the status of the trade
"""
current: Optional[TradeRecord] = await self.get_trade_record(trade_id)
if current is None:
return
confirmed_at_index = current.confirmed_at_index
if index != 0:
confirmed_at_index = index
tx: TradeRecord = TradeRecord(
confirmed_at_index=confirmed_at_index,
accepted_at_time=current.accepted_at_time,
created_at_time=current.created_at_time,
my_offer=current.my_offer,
sent=current.sent,
spend_bundle=current.spend_bundle,
tx_spend_bundle=current.tx_spend_bundle,
additions=current.additions,
removals=current.removals,
trade_id=current.trade_id,
status=uint32(status.value),
sent_to=current.sent_to,
)
await self.add_trade_record(tx)
async def increment_sent(
self,
id: bytes32,
name: str,
send_status: MempoolInclusionStatus,
err: Optional[Err],
) -> bool:
"""
Updates trade sent count (Full Node has received spend_bundle and sent ack).
"""
current: Optional[TradeRecord] = await self.get_trade_record(id)
if current is None:
return False
sent_to = current.sent_to.copy()
err_str = err.name if err is not None else None
append_data = (name, uint8(send_status.value), err_str)
# Don't increment count if it's already sent to this peer
if append_data in sent_to:
return False
sent_to.append(append_data)
tx: TradeRecord = TradeRecord(
confirmed_at_index=current.confirmed_at_index,
accepted_at_time=current.accepted_at_time,
created_at_time=current.created_at_time,
my_offer=current.my_offer,
sent=uint32(current.sent + 1),
spend_bundle=current.spend_bundle,
tx_spend_bundle=current.tx_spend_bundle,
additions=current.additions,
removals=current.removals,
trade_id=current.trade_id,
status=current.status,
sent_to=sent_to,
)
await self.add_trade_record(tx)
return True
async def set_not_sent(self, id: bytes32):
"""
Updates trade sent count to 0.
"""
current: Optional[TradeRecord] = await self.get_trade_record(id)
if current is None:
return
tx: TradeRecord = TradeRecord(
confirmed_at_index=current.confirmed_at_index,
accepted_at_time=current.accepted_at_time,
created_at_time=current.created_at_time,
my_offer=current.my_offer,
sent=uint32(0),
spend_bundle=current.spend_bundle,
tx_spend_bundle=current.tx_spend_bundle,
additions=current.additions,
removals=current.removals,
trade_id=current.trade_id,
status=uint32(TradeStatus.PENDING_CONFIRM.value),
sent_to=[],
)
await self.add_trade_record(tx)
async def get_trade_record(self, trade_id: bytes32) -> Optional[TradeRecord]:
"""
Checks DB for TradeRecord with id: id and returns it.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE trade_id=?", (trade_id.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
record = TradeRecord.from_bytes(row[0])
return record
return None
async def get_trade_record_with_status(self, status: TradeStatus) -> List[TradeRecord]:
"""
Checks DB for TradeRecord with id: id and returns it.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE status=?", (status.value,))
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_not_sent(self) -> List[TradeRecord]:
"""
Returns the list of trades that have not been received by full node yet.
"""
cursor = await self.db_connection.execute(
"SELECT * from trade_records WHERE sent<? and confirmed=?",
(
4,
0,
),
)
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_all_unconfirmed(self) -> List[TradeRecord]:
"""
Returns the list of all trades that have not yet been confirmed.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE confirmed=?", (0,))
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_all_trades(self) -> List[TradeRecord]:
"""
Returns all stored trades.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records")
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_trades_above(self, height: uint32) -> List[TradeRecord]:
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE confirmed_at_index>?", (height,))
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def rollback_to_block(self, block_index):
# Delete from storage
cursor = await self.db_connection.execute(
"DELETE FROM trade_records WHERE confirmed_at_index>?", (block_index,)
)
await cursor.close()
await self.db_connection.commit()
| 32.708955 | 118 | 0.606548 |
8163184bea4450d8faedd6f3d068c99c6560b188 | 2,814 | py | Python | tests/test_temperature_system.py | SmartSleepIoT/SmartSleepCoding | 21c19489f0c477cbfbabd3a1d232f526f84a9e49 | [
"BSD-3-Clause"
] | null | null | null | tests/test_temperature_system.py | SmartSleepIoT/SmartSleepCoding | 21c19489f0c477cbfbabd3a1d232f526f84a9e49 | [
"BSD-3-Clause"
] | 41 | 2021-10-20T17:54:59.000Z | 2022-02-02T20:43:53.000Z | tests/test_temperature_system.py | SmartSleepIoT/SmartSleepCoding | 21c19489f0c477cbfbabd3a1d232f526f84a9e49 | [
"BSD-3-Clause"
] | null | null | null | import time
import pytest
from flask import g
from flask import session
import paho.mqtt.client as paho
from SmartSleep.db import get_db
from flask import json
import runpy
msg_nr = 0
messages = [""]
broker = 'broker.emqx.io'
port = 1883
def update_contor():
global msg_nr
msg_nr += 1
def on_message(client, userdata, message):
received = json.loads(message.payload)
if "status" in received:
assert received['status'] == messages[msg_nr]
update_contor()
elif "db" in received:
assert received["db"] == messages[msg_nr]
update_contor()
def test_cooling_system(client, auth):
global msg_nr
msg_nr = 0
global messages
messages = ['16',
"Setting the temperature system level to 1.0", "New temperature system level set to 1.0",
'16',
"Setting the temperature system level to 2.0", "New temperature system level set to 2.0",
'16',
"Setting the temperature system level to 3.0", "New temperature system level set to 3.0",
'16',
"Setting the temperature system level to 4.0", "New temperature system level set to 4.0",
'19',
"Setting the temperature system level to 3.0", "New temperature system level set to 3.0",
'16',
"Setting the temperature system level to 4.0", "New temperature system level set to 4.0",
"18"
]
time.sleep(2)
client_mqtt = paho.Client("client-test-snoring")
client_mqtt.on_message = on_message
client_mqtt.connect(broker)
client_mqtt.loop_start()
client_mqtt.subscribe("SmartSleep/SoundSensor")
auth.login()
response = client.post(f"/config/start_to_sleep?sleep_now={True}")
assert response.status_code == 200
response = client.post("/config/temp?temperature=18")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=16")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=16")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=16")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=16")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=19")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=16")
assert response.status_code == 200
time.sleep(1.5)
response = client.post("/config/current_temp?sensor=18")
assert response.status_code == 200
time.sleep(1.5)
| 29.621053 | 105 | 0.646411 |
3bef32f6cc3c85d8deff531c8001d0c964461c54 | 12,138 | py | Python | sdk/python/pulumi_azure_native/network/v20181201/vpn_site.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20181201/vpn_site.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20181201/vpn_site.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['VpnSite']
class VpnSite(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None,
bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,
device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
VpnSite Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_properties: The set of bgp properties.
:param pulumi.Input[pulumi.InputType['DevicePropertiesArgs']] device_properties: The device properties
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the vpnSite belongs
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['address_space'] = address_space
__props__['bgp_properties'] = bgp_properties
__props__['device_properties'] = device_properties
__props__['id'] = id
__props__['ip_address'] = ip_address
__props__['is_security_site'] = is_security_site
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['site_key'] = site_key
__props__['tags'] = tags
__props__['virtual_wan'] = virtual_wan
__props__['vpn_site_name'] = vpn_site_name
__props__['etag'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20181201:VpnSite"), pulumi.Alias(type_="azure-native:network:VpnSite"), pulumi.Alias(type_="azure-nextgen:network:VpnSite"), pulumi.Alias(type_="azure-native:network/latest:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/latest:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181001:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190901:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VpnSite"), pulumi.Alias(type_="azure-native:network/v20191101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VpnSite"), pulumi.Alias(type_="azure-native:network/v20191201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200301:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200301:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200501:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VpnSite")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VpnSite, __self__).__init__(
'azure-native:network/v20181201:VpnSite',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VpnSite':
"""
Get an existing VpnSite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["address_space"] = None
__props__["bgp_properties"] = None
__props__["device_properties"] = None
__props__["etag"] = None
__props__["ip_address"] = None
__props__["is_security_site"] = None
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["site_key"] = None
__props__["tags"] = None
__props__["type"] = None
__props__["virtual_wan"] = None
return VpnSite(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressSpace")
def address_space(self) -> pulumi.Output[Optional['outputs.AddressSpaceResponse']]:
"""
The AddressSpace that contains an array of IP address ranges.
"""
return pulumi.get(self, "address_space")
@property
@pulumi.getter(name="bgpProperties")
def bgp_properties(self) -> pulumi.Output[Optional['outputs.BgpSettingsResponse']]:
"""
The set of bgp properties.
"""
return pulumi.get(self, "bgp_properties")
@property
@pulumi.getter(name="deviceProperties")
def device_properties(self) -> pulumi.Output[Optional['outputs.DevicePropertiesResponse']]:
"""
The device properties
"""
return pulumi.get(self, "device_properties")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[Optional[str]]:
"""
The ip-address for the vpn-site.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="isSecuritySite")
def is_security_site(self) -> pulumi.Output[Optional[bool]]:
"""
IsSecuritySite flag
"""
return pulumi.get(self, "is_security_site")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="siteKey")
def site_key(self) -> pulumi.Output[Optional[str]]:
"""
The key for vpn-site that can be used for connections.
"""
return pulumi.get(self, "site_key")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualWan")
def virtual_wan(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The VirtualWAN to which the vpnSite belongs
"""
return pulumi.get(self, "virtual_wan")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 51.21519 | 2,840 | 0.670456 |
61b59bf4c29481c32b18986d982dc1617da676bd | 5,744 | py | Python | Client-Code-2018/CurrentFlipperCode/Sensors/Hazmat2.py | Max-Godfrey-2018/RoboCup-2018-Driving-Code | a39dc01f7c1213c8079216d49d376b317efbf5f3 | [
"MIT"
] | 1 | 2018-11-27T05:11:04.000Z | 2018-11-27T05:11:04.000Z | Client-Code-2018/CurrentFlipperCode/Sensors/Hazmat2.py | Max-Godfrey-2018/RoboCup-2018-Driving-Code | a39dc01f7c1213c8079216d49d376b317efbf5f3 | [
"MIT"
] | 1 | 2017-06-14T11:08:38.000Z | 2017-06-14T11:08:38.000Z | Old Laptop Desktop/Code_Files/Driving_Code/Flipperes/Sensors/Hazmat2.py | CCGSRobotics/Archive-Code | a963926984bb1abb9a457ea2cf6212d8362f4642 | [
"Apache-2.0"
] | null | null | null | from collections import deque
import numpy as np
import argparse
import imutils
import cv2
import time as t
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-r", "--real", type=str, default="real",
help="real image or mask")
ap.add_argument("-b", "--buffer", type=int, default=0,
help="max buffer size")
args = vars(ap.parse_args())
real = args.get("real")
raspi = False
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
if raspi:
camera = cv2.VideoCapture("/dev/stdin")
else:
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# Hazmat classes and functions
class Sign(object):
def __init__(self, image, name, short, minHue, minSat, minBr, maxHue, maxSat, maxBr):
self.name = name
self.short = short
self.image = image
self.minHue = minHue
self.minSat = minSat
self.minBr = minBr
self.maxHue = maxHue
self.maxSat = maxSat
self.maxBr = maxBr
def rotate(sign):
closest_distance = 100
for i in range(9):
dist = match(sign, rotate=45*i)
if (dist < closest_distance):
closest_distance = dist
return closest_distance
def match(sign, rotate=-1, debug=False):
global img3
sign_image = cv2.imread(sign.image)
sign_image = cv2.cvtColor(sign_image, cv2.COLOR_BGR2GRAY)
mask = cv2.inRange(hsv, np.array([sign.minHue, sign.minSat, sign.minBr]), np.array([sign.maxHue, sign.maxSat, sign.maxBr]))
res = cv2.bitwise_and(check_image,check_image, mask= mask)
# Uncomment the below line to disable HSV
#res = check_image
if not (rotate == -1):
num_rows, num_cols = sign_image.shape[:2]
rotation_matrix = cv2.getRotationMatrix2D((num_cols/2, num_rows/2), rotate, 1)
sign_image = cv2.warpAffine(sign_image, rotation_matrix, (num_cols, num_rows))
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(res,None)
kp2, des2 = orb.detectAndCompute(sign_image,None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1,des2)
matches = sorted(matches, key = lambda x:x.distance)
img1_kp1 = matches[0].queryIdx
(x1, y1) = kp1[img1_kp1].pt
if(matches[0].distance < 36):
cv2.imshow("matches", cv2.imread(sign.image))
font = cv2.FONT_HERSHEY_SIMPLEX
img3 = cv2.putText(img3,sign.short,(int(x1),int(y1)), font, 1, (200,255,155), 2, cv2.LINE_AA)
cv2.imshow("match", img3)
if (debug):
img_matches = cv2.drawMatches(res,kp1,img2,kp2,matches[:1],None, flags=2)
img_matches = cv2.circle(img_matches,(int(x1), int(y1)), 15, (255,0,0), 2)
print(sign.name)
print((int(x1), int(y1)))
print (matches[0].distance)
cv2.imshow("matches", img_matches)
cv2.imshow("res", res)
cv2.imshow("template", cv2.imread(sign.image))
return matches[0].distance
def init_sign_list():
global sign_list
sign_list = []
sign_list.append(Sign("templates/template1.jpg", "Oxidizer", "Ox", 0, 100, 200, 255, 255, 255))#1
sign_list.append(Sign("templates/template2.jpg", "Organic Peroxide", "OP", 0, 100, 200, 255, 255, 255))#2
sign_list.append(Sign("templates/template3.png", "Flammable Gas", "FG", 0, 100, 200, 255, 255, 255))#3
sign_list.append(Sign("templates/template4.png", "Inhalation Hazard", "IN", 0, 0, 0, 255, 10, 255)) #4
sign_list.append(Sign("templates/template5.jpg", "Dangerous When Wet", "DWW", 80, 0, 0, 155, 255, 255))#5
sign_list.append(Sign("templates/template6.jpg", "Flammable Solid", "FS", 0, 0, 0, 255, 255, 255))#6
sign_list.append(Sign("templates/template7.jpg", "Spontaneously Combustible", "SP", 0, 0, 200, 255, 255, 255))#7
sign_list.append(Sign("templates/template8.png", "Explosives", "Ex", 0, 100, 200, 255, 255, 255))#8
sign_list.append(Sign("templates/template9.png", "Radioactive II", "Rad", 0, 100, 200, 255, 255, 255))#9
sign_list.append(Sign("templates/template10.png", "Corrosive", "Cor", 0, 0, 0, 255, 255, 255))#10
sign_list.append(Sign("templates/template11.jpg", "Non-flammable Gas", "NFG", 0, 0, 0, 255, 255, 255))#11
sign_list.append(Sign("templates/template12.png", "Infectious Substance", "IS", 0, 0, 0, 255, 10, 255))#12
# Motion detection
def motion_detection(my_frame):
im = cv2.cvtColor(my_frame, cv2.COLOR_BGR2GRAY)
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 10;
params.maxThreshold = 200;
# Filter by Area.
params.filterByArea = True
params.minArea = 50
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.1
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.87
# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.01
# Read image
#im = cv2.imread("blob_detection.jpg", cv2.IMREAD_GRAYSCALE)
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector_create()#//////////
#detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(im)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Show keypoints
#cv2.imshow('Frame',frame)
cv2.imshow("Motion Detection", im_with_keypoints)
# Main Loop
while(1):
_, frame = camera.read()
#dest = cv2.resize(frame, (320,240))
motion_detection(frame)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
elif k == ord('s'):
break
| 30.553191 | 124 | 0.705258 |
e31998ffe4ed7aaa8b1dc1abf59ac03b6a5a935d | 292 | py | Python | common/gl.py | quentin-xia/Maticv | 76d599b68ef5bdab10e8dbc0c120657610933ad8 | [
"MIT"
] | null | null | null | common/gl.py | quentin-xia/Maticv | 76d599b68ef5bdab10e8dbc0c120657610933ad8 | [
"MIT"
] | null | null | null | common/gl.py | quentin-xia/Maticv | 76d599b68ef5bdab10e8dbc0c120657610933ad8 | [
"MIT"
] | null | null | null | #!/usr/bin/evn python
#-*- coding:utf-8 -*-
import tempfile
color = [
[(0,0,255),0.7,0.5],
[(214,112,218),0.8,0.4],
[(238,104,123),0.8,0.3]
]
ADB_CMD = ""
AAPT_CMD = ""
MOBILE_WIDTH = 0
MOBILE_HEIGHT = 0
WINDOW = 0
TEMP_IMAGE_PATH = "%s/screenshot.png" % tempfile.gettempdir()
| 16.222222 | 61 | 0.599315 |
84020dbd8fe9eeab2cf5561b927ce0c224046b48 | 4,655 | py | Python | tests/churn_script_logging_and_tests.py | digitalemerge/clean_churn | b97bdeff4ab6169dcffd8d3bfc4529c5e9d58b46 | [
"MIT"
] | null | null | null | tests/churn_script_logging_and_tests.py | digitalemerge/clean_churn | b97bdeff4ab6169dcffd8d3bfc4529c5e9d58b46 | [
"MIT"
] | null | null | null | tests/churn_script_logging_and_tests.py | digitalemerge/clean_churn | b97bdeff4ab6169dcffd8d3bfc4529c5e9d58b46 | [
"MIT"
] | null | null | null | """
Script to perform tests by use of the pytest suite
"""
import os
from os import path
import logging
import pandas as pd
import joblib
import churn_library as cl
import constants
def test_import(test_dataframe):
"""
tests data import - Uses the test_dataframe fixture to read in the csv
"""
try:
assert test_dataframe.shape[0] > 0
assert test_dataframe.shape[1] > 0
except AssertionError as err:
logging.error(
"Testing import_data: The file doesn't appear to have rows or columns")
raise err
def test_create_churn_col():
"""
tests create_churn_col
"""
dataframe = pd.DataFrame(columns=['Attrition_Flag'])
dataframe.loc[0] = ["Existing Customer"]
dataframe.loc[1] = ["Attrited Customer"]
dataframe = cl.create_churn_col(dataframe)
try:
assert dataframe.Churn.loc[0] == 0
assert dataframe.Churn.loc[1] == 1
logging.info(
"Testing create_churn_col: The churn column was created successfully: SUCCESS"
)
except AssertionError as err:
logging.error(
"Testing create_churn_col: The transformation from Attrition_Flag to Churn bool "
"was not successful")
raise err
def test_eda(test_dataframe_w_churn, tmpdir):
"""
tests perform eda function. Use a fixture to read a test dataset and check if plot files
are produced.
"""
figures = [
'Churn',
'Customer_Age',
'Marital_Status',
'Total_Trans_Ct',
'Correlations']
try:
cl.perform_eda(test_dataframe_w_churn, tmpdir)
for figure in figures:
assert path.isfile(
path.join(
tmpdir,
'images',
'eda',
figure +
'.pdf'))
logging.info(
"Testing test_eda: All 5 eda figures created: SUCCESS"
)
except FileNotFoundError as err:
logging.error(
"Testing test_eda: Not all 5 eda figures were created"
)
raise err
def test_encoder_helper(test_dataframe_encoded):
"""
tests encoder helper
"""
try:
for column in constants.CATEGORY_LST:
assert column + '_' + constants.CHURN in test_dataframe_encoded.columns
logging.info(
'Testing test_encoder_helper: All expected categorical columns encoded: SUCCESS'
)
except AssertionError as err:
logging.info(
"Testing test_encoder_helper: At least one of the categorical columns was not created")
raise err
def test_perform_feature_engineering(features):
"""
tests perform_feature_engineering
"""
try:
x_train = features[0]
x_test = features[1]
y_train = features[2]
y_test = features[3]
assert len(x_train) == len(y_train) != 0
assert len(x_test) == len(y_test) != 0
logging.info(
"Testing test_perform_feature_engineering: Features created: SUCCESS")
except AssertionError as err:
logging.error(
"Testing test_perform_feature_engineering: Features length mismatch or empty")
raise err
def test_train_models(features, tmpdir):
"""
tests train_models
"""
if not path.exists(path.join(tmpdir, 'images', 'results')):
os.makedirs(path.join(tmpdir, 'images', 'results'))
cl.train_models(
features[0],
features[1],
features[2],
features[3],
tmpdir
)
try:
joblib.load(path.join(tmpdir, 'models', 'rfc_model' + '.pkl'))
joblib.load(path.join(tmpdir, 'models', 'logistic_model' + '.pkl'))
logging.info(
"Testing train_models: Models created and stored: SUCCESS")
except FileNotFoundError as err:
logging.error("Testing train_models: Not all models were stored")
raise err
for image_name in [
"Random_Forest",
"Logistic_Regression",
"Feature_Importance",
'Prediction_Explainer']:
try:
assert path.isfile(
path.join(
tmpdir,
'images',
'results',
image_name +
'.jpg')
)
logging.info(
"Testing train_models: Creating results images: SUCCESS"
)
except FileNotFoundError as err:
logging.error(
"Testing train_models: Generated results images missing")
raise err
if __name__ == "__main__":
pass
| 28.734568 | 99 | 0.588829 |
4575dd98344ae988fee86e5bf0774cf08e8fa242 | 16,168 | py | Python | tests/api/test_auth.py | TheJJ/synapse | 1032393dfb0c865fc540539dfe649e7b1a32037a | [
"Apache-2.0"
] | 2 | 2021-05-14T19:05:03.000Z | 2021-05-26T23:00:43.000Z | tests/api/test_auth.py | TheJJ/synapse | 1032393dfb0c865fc540539dfe649e7b1a32037a | [
"Apache-2.0"
] | null | null | null | tests/api/test_auth.py | TheJJ/synapse | 1032393dfb0c865fc540539dfe649e7b1a32037a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2015 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pymacaroons
from mock import Mock
from twisted.internet import defer
import synapse.handlers.auth
from synapse.api.auth import Auth
from synapse.api.errors import AuthError
from synapse.types import UserID
from tests import unittest
from tests.utils import setup_test_homeserver, mock_getRawHeaders
class TestHandlers(object):
def __init__(self, hs):
self.auth_handler = synapse.handlers.auth.AuthHandler(hs)
class AuthTestCase(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.state_handler = Mock()
self.store = Mock()
self.hs = yield setup_test_homeserver(handlers=None)
self.hs.get_datastore = Mock(return_value=self.store)
self.hs.handlers = TestHandlers(self.hs)
self.auth = Auth(self.hs)
self.test_user = "@foo:bar"
self.test_token = "_test_token_"
# this is overridden for the appservice tests
self.store.get_app_service_by_token = Mock(return_value=None)
@defer.inlineCallbacks
def test_get_user_by_req_user_valid_token(self):
user_info = {
"name": self.test_user,
"token_id": "ditto",
"device_id": "device",
}
self.store.get_user_by_access_token = Mock(return_value=user_info)
request = Mock(args={})
request.args["access_token"] = [self.test_token]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
requester = yield self.auth.get_user_by_req(request)
self.assertEquals(requester.user.to_string(), self.test_user)
def test_get_user_by_req_user_bad_token(self):
self.store.get_user_by_access_token = Mock(return_value=None)
request = Mock(args={})
request.args["access_token"] = [self.test_token]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
d = self.auth.get_user_by_req(request)
self.failureResultOf(d, AuthError)
def test_get_user_by_req_user_missing_token(self):
user_info = {
"name": self.test_user,
"token_id": "ditto",
}
self.store.get_user_by_access_token = Mock(return_value=user_info)
request = Mock(args={})
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
d = self.auth.get_user_by_req(request)
self.failureResultOf(d, AuthError)
@defer.inlineCallbacks
def test_get_user_by_req_appservice_valid_token(self):
app_service = Mock(token="foobar", url="a_url", sender=self.test_user)
self.store.get_app_service_by_token = Mock(return_value=app_service)
self.store.get_user_by_access_token = Mock(return_value=None)
request = Mock(args={})
request.args["access_token"] = [self.test_token]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
requester = yield self.auth.get_user_by_req(request)
self.assertEquals(requester.user.to_string(), self.test_user)
def test_get_user_by_req_appservice_bad_token(self):
self.store.get_app_service_by_token = Mock(return_value=None)
self.store.get_user_by_access_token = Mock(return_value=None)
request = Mock(args={})
request.args["access_token"] = [self.test_token]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
d = self.auth.get_user_by_req(request)
self.failureResultOf(d, AuthError)
def test_get_user_by_req_appservice_missing_token(self):
app_service = Mock(token="foobar", url="a_url", sender=self.test_user)
self.store.get_app_service_by_token = Mock(return_value=app_service)
self.store.get_user_by_access_token = Mock(return_value=None)
request = Mock(args={})
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
d = self.auth.get_user_by_req(request)
self.failureResultOf(d, AuthError)
@defer.inlineCallbacks
def test_get_user_by_req_appservice_valid_token_valid_user_id(self):
masquerading_user_id = "@doppelganger:matrix.org"
app_service = Mock(token="foobar", url="a_url", sender=self.test_user)
app_service.is_interested_in_user = Mock(return_value=True)
self.store.get_app_service_by_token = Mock(return_value=app_service)
self.store.get_user_by_access_token = Mock(return_value=None)
request = Mock(args={})
request.args["access_token"] = [self.test_token]
request.args["user_id"] = [masquerading_user_id]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
requester = yield self.auth.get_user_by_req(request)
self.assertEquals(requester.user.to_string(), masquerading_user_id)
def test_get_user_by_req_appservice_valid_token_bad_user_id(self):
masquerading_user_id = "@doppelganger:matrix.org"
app_service = Mock(token="foobar", url="a_url", sender=self.test_user)
app_service.is_interested_in_user = Mock(return_value=False)
self.store.get_app_service_by_token = Mock(return_value=app_service)
self.store.get_user_by_access_token = Mock(return_value=None)
request = Mock(args={})
request.args["access_token"] = [self.test_token]
request.args["user_id"] = [masquerading_user_id]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
d = self.auth.get_user_by_req(request)
self.failureResultOf(d, AuthError)
@defer.inlineCallbacks
def test_get_user_from_macaroon(self):
# TODO(danielwh): Remove this mock when we remove the
# get_user_by_access_token fallback.
self.store.get_user_by_access_token = Mock(
return_value={
"name": "@baldrick:matrix.org",
"device_id": "device",
}
)
user_id = "@baldrick:matrix.org"
macaroon = pymacaroons.Macaroon(
location=self.hs.config.server_name,
identifier="key",
key=self.hs.config.macaroon_secret_key)
macaroon.add_first_party_caveat("gen = 1")
macaroon.add_first_party_caveat("type = access")
macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
user_info = yield self.auth.get_user_by_access_token(macaroon.serialize())
user = user_info["user"]
self.assertEqual(UserID.from_string(user_id), user)
# TODO: device_id should come from the macaroon, but currently comes
# from the db.
self.assertEqual(user_info["device_id"], "device")
@defer.inlineCallbacks
def test_get_guest_user_from_macaroon(self):
self.store.get_user_by_id = Mock(return_value={
"is_guest": True,
})
user_id = "@baldrick:matrix.org"
macaroon = pymacaroons.Macaroon(
location=self.hs.config.server_name,
identifier="key",
key=self.hs.config.macaroon_secret_key)
macaroon.add_first_party_caveat("gen = 1")
macaroon.add_first_party_caveat("type = access")
macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
macaroon.add_first_party_caveat("guest = true")
serialized = macaroon.serialize()
user_info = yield self.auth.get_user_by_access_token(serialized)
user = user_info["user"]
is_guest = user_info["is_guest"]
self.assertEqual(UserID.from_string(user_id), user)
self.assertTrue(is_guest)
self.store.get_user_by_id.assert_called_with(user_id)
@defer.inlineCallbacks
def test_get_user_from_macaroon_user_db_mismatch(self):
self.store.get_user_by_access_token = Mock(
return_value={"name": "@percy:matrix.org"}
)
user = "@baldrick:matrix.org"
macaroon = pymacaroons.Macaroon(
location=self.hs.config.server_name,
identifier="key",
key=self.hs.config.macaroon_secret_key)
macaroon.add_first_party_caveat("gen = 1")
macaroon.add_first_party_caveat("type = access")
macaroon.add_first_party_caveat("user_id = %s" % (user,))
with self.assertRaises(AuthError) as cm:
yield self.auth.get_user_by_access_token(macaroon.serialize())
self.assertEqual(401, cm.exception.code)
self.assertIn("User mismatch", cm.exception.msg)
@defer.inlineCallbacks
def test_get_user_from_macaroon_missing_caveat(self):
# TODO(danielwh): Remove this mock when we remove the
# get_user_by_access_token fallback.
self.store.get_user_by_access_token = Mock(
return_value={"name": "@baldrick:matrix.org"}
)
macaroon = pymacaroons.Macaroon(
location=self.hs.config.server_name,
identifier="key",
key=self.hs.config.macaroon_secret_key)
macaroon.add_first_party_caveat("gen = 1")
macaroon.add_first_party_caveat("type = access")
with self.assertRaises(AuthError) as cm:
yield self.auth.get_user_by_access_token(macaroon.serialize())
self.assertEqual(401, cm.exception.code)
self.assertIn("No user caveat", cm.exception.msg)
@defer.inlineCallbacks
def test_get_user_from_macaroon_wrong_key(self):
# TODO(danielwh): Remove this mock when we remove the
# get_user_by_access_token fallback.
self.store.get_user_by_access_token = Mock(
return_value={"name": "@baldrick:matrix.org"}
)
user = "@baldrick:matrix.org"
macaroon = pymacaroons.Macaroon(
location=self.hs.config.server_name,
identifier="key",
key=self.hs.config.macaroon_secret_key + "wrong")
macaroon.add_first_party_caveat("gen = 1")
macaroon.add_first_party_caveat("type = access")
macaroon.add_first_party_caveat("user_id = %s" % (user,))
with self.assertRaises(AuthError) as cm:
yield self.auth.get_user_by_access_token(macaroon.serialize())
self.assertEqual(401, cm.exception.code)
self.assertIn("Invalid macaroon", cm.exception.msg)
@defer.inlineCallbacks
def test_get_user_from_macaroon_unknown_caveat(self):
# TODO(danielwh): Remove this mock when we remove the
# get_user_by_access_token fallback.
self.store.get_user_by_access_token = Mock(
return_value={"name": "@baldrick:matrix.org"}
)
user = "@baldrick:matrix.org"
macaroon = pymacaroons.Macaroon(
location=self.hs.config.server_name,
identifier="key",
key=self.hs.config.macaroon_secret_key)
macaroon.add_first_party_caveat("gen = 1")
macaroon.add_first_party_caveat("type = access")
macaroon.add_first_party_caveat("user_id = %s" % (user,))
macaroon.add_first_party_caveat("cunning > fox")
with self.assertRaises(AuthError) as cm:
yield self.auth.get_user_by_access_token(macaroon.serialize())
self.assertEqual(401, cm.exception.code)
self.assertIn("Invalid macaroon", cm.exception.msg)
@defer.inlineCallbacks
def test_get_user_from_macaroon_expired(self):
# TODO(danielwh): Remove this mock when we remove the
# get_user_by_access_token fallback.
self.store.get_user_by_access_token = Mock(
return_value={"name": "@baldrick:matrix.org"}
)
self.store.get_user_by_access_token = Mock(
return_value={"name": "@baldrick:matrix.org"}
)
user = "@baldrick:matrix.org"
macaroon = pymacaroons.Macaroon(
location=self.hs.config.server_name,
identifier="key",
key=self.hs.config.macaroon_secret_key)
macaroon.add_first_party_caveat("gen = 1")
macaroon.add_first_party_caveat("type = access")
macaroon.add_first_party_caveat("user_id = %s" % (user,))
macaroon.add_first_party_caveat("time < -2000") # ms
self.hs.clock.now = 5000 # seconds
self.hs.config.expire_access_token = True
# yield self.auth.get_user_by_access_token(macaroon.serialize())
# TODO(daniel): Turn on the check that we validate expiration, when we
# validate expiration (and remove the above line, which will start
# throwing).
with self.assertRaises(AuthError) as cm:
yield self.auth.get_user_by_access_token(macaroon.serialize())
self.assertEqual(401, cm.exception.code)
self.assertIn("Invalid macaroon", cm.exception.msg)
@defer.inlineCallbacks
def test_get_user_from_macaroon_with_valid_duration(self):
# TODO(danielwh): Remove this mock when we remove the
# get_user_by_access_token fallback.
self.store.get_user_by_access_token = Mock(
return_value={"name": "@baldrick:matrix.org"}
)
self.store.get_user_by_access_token = Mock(
return_value={"name": "@baldrick:matrix.org"}
)
user_id = "@baldrick:matrix.org"
macaroon = pymacaroons.Macaroon(
location=self.hs.config.server_name,
identifier="key",
key=self.hs.config.macaroon_secret_key)
macaroon.add_first_party_caveat("gen = 1")
macaroon.add_first_party_caveat("type = access")
macaroon.add_first_party_caveat("user_id = %s" % (user_id,))
macaroon.add_first_party_caveat("time < 900000000") # ms
self.hs.clock.now = 5000 # seconds
self.hs.config.expire_access_token = True
user_info = yield self.auth.get_user_by_access_token(macaroon.serialize())
user = user_info["user"]
self.assertEqual(UserID.from_string(user_id), user)
@defer.inlineCallbacks
def test_cannot_use_regular_token_as_guest(self):
USER_ID = "@percy:matrix.org"
self.store.add_access_token_to_user = Mock()
token = yield self.hs.handlers.auth_handler.issue_access_token(
USER_ID, "DEVICE"
)
self.store.add_access_token_to_user.assert_called_with(
USER_ID, token, "DEVICE"
)
def get_user(tok):
if token != tok:
return None
return {
"name": USER_ID,
"is_guest": False,
"token_id": 1234,
"device_id": "DEVICE",
}
self.store.get_user_by_access_token = get_user
self.store.get_user_by_id = Mock(return_value={
"is_guest": False,
})
# check the token works
request = Mock(args={})
request.args["access_token"] = [token]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
self.assertEqual(UserID.from_string(USER_ID), requester.user)
self.assertFalse(requester.is_guest)
# add an is_guest caveat
mac = pymacaroons.Macaroon.deserialize(token)
mac.add_first_party_caveat("guest = true")
guest_tok = mac.serialize()
# the token should *not* work now
request = Mock(args={})
request.args["access_token"] = [guest_tok]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
with self.assertRaises(AuthError) as cm:
yield self.auth.get_user_by_req(request, allow_guest=True)
self.assertEqual(401, cm.exception.code)
self.assertEqual("Guest access token used for regular user", cm.exception.msg)
self.store.get_user_by_id.assert_called_with(USER_ID)
| 40.828283 | 86 | 0.670584 |
20f053203283451b7664cb0acdb5c6694b0960f8 | 17,556 | py | Python | test_suite/generic/test_generic.py | narmaku/cloud-image-val | 5ec65df72799915f92d0a42108d22d50fd6322c1 | [
"Apache-2.0"
] | null | null | null | test_suite/generic/test_generic.py | narmaku/cloud-image-val | 5ec65df72799915f92d0a42108d22d50fd6322c1 | [
"Apache-2.0"
] | null | null | null | test_suite/generic/test_generic.py | narmaku/cloud-image-val | 5ec65df72799915f92d0a42108d22d50fd6322c1 | [
"Apache-2.0"
] | null | null | null | import pytest
from lib import test_lib
class TestsGeneric:
@pytest.mark.run_on(['all'])
def test_bash_history_is_empty(self, host):
users = [host.user().name, 'root']
for u in users:
file_path = f'/home/{u}/.bash_history'
bash_history_file = host.file(file_path)
if bash_history_file.exists:
file_content_length = len(bash_history_file.content_string)
assert file_content_length == 0, f'{file_path} must be empty or nonexistent'
@pytest.mark.run_on(['all'])
def test_console_is_redirected_to_ttys0(self, host):
"""
Console output should be redirected to serial for HVM instances.
"""
assert host.file('/proc/cmdline').contains('console=ttyS0'), \
'Serial console should be redirected to ttyS0'
# TODO: does this apply to fedora and centos
@pytest.mark.run_on(['rhel'])
def test_crashkernel_is_enabled_rhel(self, host):
"""
Check that crashkernel is enabled in image.
"""
product_release_version = float(host.system_info.release)
if product_release_version < 9.0:
expected_content = ['crashkernel=auto']
elif host.system_info.arch == 'x86_64':
expected_content = ['crashkernel=1G-4G:192M', '4G-64G:256M', '64G-:512M']
else:
expected_content = ['crashkernel=2G-:448M']
with host.sudo(host.user().name):
for item in expected_content:
assert host.file('/proc/cmdline').contains(item), \
'crashkernel must be enabled'
@pytest.mark.run_on(['all'])
def test_cpu_flags_are_correct(self, host):
"""
Check various CPU flags.
BugZilla 1061348
"""
arch = 'x86_64'
if host.system_info.arch == arch:
pytest.skip(f'Not applicable to {arch}')
expected_flags = [
'avx',
'xsave',
]
# TODO We may have a false positive here. The above flags are not applicable to ARM as per the thread below:
# https://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
with host.sudo():
for flag in expected_flags:
assert host.file('/proc/cpuinfo').contains(flag), \
f'Expected CPU flag "{flag}" not set'
@pytest.mark.run_on(['all'])
def test_rhgb_quiet_not_present_in_cmdline(self, host):
"""
Check that there is no "rhgb" or "quiet" in /proc/cmdline.
BugZilla 1122300
"""
excluded_settings = [
'rhgb',
'quiet',
]
with host.sudo():
for setting in excluded_settings:
assert not host.file('/proc/cmdline').contains(setting), \
f'{setting} must not be present in cmdline'
@pytest.mark.run_on(['all'])
def test_numa_settings(self, host):
"""
Check if NUMA is enabled on supported image.
"""
with host.sudo():
assert host.run_test('dmesg | grep -i numa'), \
'There is no NUMA information available'
lscpu_numa_nodes = host.check_output("lscpu | grep -i 'NUMA node(s)' | awk -F' ' '{print $NF}'")
dmesg_numa_nodes = host.check_output("dmesg | grep -i 'No NUMA'|wc -l")
if int(lscpu_numa_nodes) > 1:
assert dmesg_numa_nodes > 1, \
f'NUMA seems to be disabled, when it should be enabled (NUMA nodes: {lscpu_numa_nodes})'
@pytest.mark.run_on(['all'])
def test_no_avc_denials(self, host):
"""
Check there is no avc denials (selinux).
"""
with host.sudo():
assert 'no matches' in host.check_output('x=$(ausearch -m avc 2>&1 &); echo $x'), \
'There should not be any avc denials (selinux)'
@pytest.mark.run_on(['rhel'])
def test_cert_product_version_is_correct(self, host):
"""
BugZilla 1938930
Issue RHELPLAN-60817
"""
product_version = float(host.system_info.release)
if product_version < 8.0:
rpm_to_check = 'redhat-release-server'
else:
rpm_to_check = 'redhat-release'
with host.sudo():
host.run_test(f'rpm -q {rpm_to_check}')
cert_version = host.check_output('rct cat-cert /etc/pki/product-default/*.pem | grep Version')
assert f'Version: {product_version}' in cert_version, \
'Inconsistent version in pki certificate'
@pytest.mark.run_on(['all'])
def test_inittab_and_systemd(self, host):
"""
Check default runlevel or systemd target.
"""
kernel_release = host.check_output('uname -r')
with host.sudo():
if host.package('systemd').is_installed:
assert '/lib/systemd/system/multi-user.target' in \
host.check_output('readlink -f /etc/systemd/system/default.target'), \
'Unexpected systemd default target'
else:
assert 'id:3:initdefault' in host.check_output("grep '^id:' /etc/inittab"), \
'Unexpected default inittab "id"'
if 'el5' in kernel_release:
assert 'si::sysinit:/etc/rc.d/rc.sysinit' in host.check_output("grep '^si:' /etc/inittab"), \
'Unexpected default inittab "id"'
# TODO: does this apply to centos
# TODO: fix docstring
@pytest.mark.run_on(['rhel', 'fedora'])
def test_release_version(self, host):
"""
Check if rhel provider matches /etc/redhat-release
"""
if test_lib.is_rhel_atomic_host(host):
pytest.skip('Not run in atomic images')
product_version = float(host.system_info.release)
release_file = 'redhat-release'
if host.system_info.distribution == 'fedora':
release_file = 'fedora-release'
with host.sudo():
command_to_run = "rpm -q --qf '%{VERSION}' --whatprovides " + release_file
package_release_version = float(host.check_output(command_to_run))
assert product_version == package_release_version, \
f'product version ({product_version}) does not match package release version'
@pytest.mark.pub
@pytest.mark.run_on(['all'])
def test_release_version_in_image_name(self, host, instance_data):
"""
Check if release version is on the image name
"""
if test_lib.is_rhel_atomic_host(host):
pytest.skip('Not run in atomic images')
cloud_image_name = instance_data['name']
product_version = float(host.system_info.release)
assert str(product_version).replace('.', '-') in cloud_image_name, 'product version is not in image name'
@pytest.mark.run_on(['rhel'])
def test_root_is_locked(self, host):
"""
Check if root account is locked
"""
with host.sudo():
if test_lib.is_rhel_atomic_host(host):
result = host.run('passwd -S root | grep -q Alternate').rc
else:
result = host.run('passwd -S root | grep -q LK').rc
assert result == 0, 'Root account should be locked'
@pytest.mark.run_on(['all'])
def test_bash_in_shell_config(self, host):
"""
Check for bash/nologin shells in /etc/shells
"""
assert host.file('/etc/shells').contains('/bin/bash'), \
'/bin/bash is not declared in /etc/shells'
@pytest.mark.run_on(['all'])
def test_timezone_is_utc(self, host):
"""
Check that the default timezone is set to UTC.
BugZilla 1187669
"""
assert 'UTC' in host.check_output('date'), 'Unexpected timezone. Expected to be UTC'
@pytest.mark.pub
@pytest.mark.run_on(['all'])
def test_pkg_signature_and_gpg_keys(self, host):
"""
Check that "no pkg signature" is disabled
Check that specified gpg keys are installed
"""
if host.system_info.distribution == 'fedora':
num_of_gpg_keys = 1
else:
num_of_gpg_keys = 2
with host.sudo():
gpg_pubkey_cmd = "rpm -qa --qf '%{NAME}-%{VERSION}-%{RELEASE} %{SIGPGP:pgpsig}\n' | grep -v gpg-pubkey"
gpg_pubkey_content = host.check_output(gpg_pubkey_cmd)
assert 'none' not in gpg_pubkey_content, 'No pkg signature must be disabled'
num_of_key_ids = host.check_output(gpg_pubkey_cmd + " | awk -F' ' '{print $NF}'|sort|uniq|wc -l")
assert int(num_of_key_ids) == 1, 'Number of key IDs should be 1'
assert int(host.check_output('rpm -q gpg-pubkey|wc -l')) == num_of_gpg_keys, \
f'There should be {num_of_gpg_keys} gpg key(s) installed'
@pytest.mark.run_on(['rhel'])
def test_grub_config(self, host):
grub2_file = '/boot/grub2/grubenv'
linked_to = grub2_file
with host.sudo():
if host.file('/sys/firmware/efi').exists:
if float(host.system_info.release) < 8.0:
linked_to = '/boot/efi/EFI/redhat/grubenv'
assert host.file(grub2_file).linked_to == linked_to
@pytest.mark.run_on(['rhel'])
def test_tty0_config(self, host):
"""
BugZilla 1103344
Check that "/etc/init/ttyS0.conf" and its backup file do not exist.
"""
with host.sudo():
assert not host.file('/etc/init/ttyS0.conf').exists, 'ttyS0.conf file should not exist'
assert not host.file('/etc/init/ttyS0.bak').exists, 'ttyS0.conf backup file should not exist'
@pytest.mark.run_on(['rhel'])
def test_selinux_mode(self, host):
"""
BugZilla 1960628
SELinux should be in enforcing/targeted mode
"""
if test_lib.is_rhel_sap(host):
expected_mode = 'Permissive'
else:
expected_mode = 'Enforcing'
expected_file_config = [
f'SELINUX={expected_mode.lower()}',
'SELINUXTYPE=targeted'
]
selinux_config_file = '/etc/sysconfig/selinux'
with host.sudo():
assert host.check_output('getenforce') == expected_mode, \
f'SELinux should be in {expected_mode} mode'
for conf in expected_file_config:
assert host.file(selinux_config_file).contains(conf), \
f'Expected "{conf}" to be in {selinux_config_file}'
@pytest.mark.run_on(['all'])
def test_rpm_v_unsatisfied_dependencies(self, host):
"""
Check unsatisfied dependencies of pkgs.
"""
with host.sudo():
assert 'Unsatisfied' not in host.run('rpm -Va').stdout, \
'There are unsatisfied dependencies'
class TestsServices:
@pytest.mark.run_on(['all'])
def test_sshd(self, host):
with host.sudo():
sshd = host.service('sshd')
assert sshd.is_running, 'ssh.service is not active'
pass_auth_config_name = 'PasswordAuthentication'
assert host.file('/etc/ssh/sshd_config').contains(f'^{pass_auth_config_name} no'), \
f'{pass_auth_config_name} should be disabled (set to "no")'
# TODO: verify logic, think if we should divide
@pytest.mark.run_on(['rhel'])
def test_auditd(self, host):
"""
- Service should be running
- Config files should have the correct MD5 checksums
"""
if test_lib.is_rhel_atomic_host(host):
pytest.skip('Not applicable to Atomic hosts')
auditd_service = 'auditd'
assert host.service(auditd_service).is_running, f'{auditd_service} expected to be running'
rhel_version = float(host.system_info.release)
checksums = self.__get_auditd_checksums_by_rhel_major_version(int(rhel_version))
with host.sudo():
for path, md5 in checksums.items():
assert md5 == host.check_output(f'md5sum {path}'), f'Unexpected checksum for {path}'
def __get_auditd_checksums_by_rhel_major_version(self, major_version):
checksums_by_version = {
'8': {
'/etc/audit/auditd.conf': '7bfa16d314ddb8b96a61a7f617b8cca0',
'/etc/audit/audit.rules': '795528bd4c7b4131455c15d5d49991bb'
},
'7': {
'/etc/audit/auditd.conf': '29f4c6cd67a4ba11395a134cf7538dbd',
'/etc/audit/audit.rules': 'f1c2a2ef86e5db325cd2738e4aa7df2c'
}
}
return checksums_by_version[major_version]
@pytest.mark.run_on(['rhel', 'centos'])
def test_sysconfig_kernel(self, host):
"""
UPDATEDEFAULT=yes and DEFAULTKERNEL=kernel should be set in /etc/sysconfig/kernel
"""
if test_lib.is_rhel_atomic_host(host):
pytest.skip('Not run in atomic images')
kernel_config = '/etc/sysconfig/kernel'
with host.sudo():
assert host.file(kernel_config).contains('UPDATEDEFAULT=yes'), \
f'UPDATEDEFAULT should be set to `yes` in {kernel_config}'
assert host.file(kernel_config).contains('DEFAULTKERNEL=kernel'), \
f'DEFAULTKERNEL should be set to `kernel` in {kernel_config}'
class TestsCloudInit:
@pytest.mark.run_on(['all'])
def test_growpart_is_present_in_config(self, host):
"""
Make sure there is growpart in cloud_init_modules group in "/etc/cloud/cloud.cfg".
BugZilla 966888
"""
assert host.file('/etc/cloud/cloud.cfg').contains('- growpart'), \
'growpart must be present in cloud_init_modules'
@pytest.mark.run_on(['rhel'])
def test_wheel_group_not_set_to_default_user(self, host):
"""
Make sure there is no wheel in default_user's group in "/etc/cloud/cloud.cfg".
BugZilla 1549638
Customer Case 01965459
"""
assert not host.file('/etc/cloud/cloud.cfg').contains('wheel'), \
'wheel should not be configured as default_user group'
@pytest.mark.pub
class TestsYum:
# TODO: confirm if this test needs to be deprecated
@pytest.mark.run_on(['rhel', 'fedora'])
def test_yum_repoinfo(self, host):
if test_lib.is_rhel_atomic_host(host):
pytest.skip('Not applicable to RHEL Atomic host')
yum_command = 'yum repoinfo'
with host.sudo():
assert host.run_test(yum_command), 'Error while getting repo info'
if host.system_info.distribution != 'fedora':
assert 'Repo-pkgs : 0' not in host.check_output(yum_command), \
'Unexpected number of repo pkgs (0)'
@pytest.mark.run_on(['rhel'])
def test_yum_package_install(self, host):
with host.sudo():
if not host.package('rhui').is_installed:
pytest.skip('Not applicable to non-RHUI images')
assert \
host.run_test('yum clean all') and \
host.run_test('yum repolist') and \
host.run_test('yum check-update') and \
host.run_test('yum search zsh') and \
host.run_test('yum -y install zsh') and \
host.run_test(r"rpm -q --queryformat '%{NAME}' zsh") and \
host.run_test('rpm -e zsh'), \
'yum packages installation failed'
class TestsNetworking:
# TODO: redo test with test infra module
@pytest.mark.run_on(['all'])
def test_dns_resolving_works(self, host):
"""
Check if DNS resolving works.
"""
assert host.run_test('ping -c 5 google-public-dns-a.google.com'), \
'Public DNS resolution did not work'
@pytest.mark.run_on(['all'])
def test_ipv_localhost(self, host):
"""
Check that localhost ipv6 and ipv4 are set in /etc/hosts.
"""
expected_hosts = ['127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4',
'::1 localhost localhost.localdomain localhost6 localhost6.localdomain6']
with host.sudo():
for expected_host in expected_hosts:
assert host.file('/etc/hosts').contains(expected_host), \
'/etc/hosts does not contain ipv4 or ipv6 localhost'
@pytest.mark.run_on(['all'])
def test_eth0_network_adapter_setup(self, host):
"""
Make sure that eht0 default adapter is correctly setup:
1. NETWORKING=yes in /etc/sysconfig/network
2. DEVICE=eth0 in /etc/sysconfig/network-scripts/ifcfg-eth0
"""
device_name = 'eth0'
with host.sudo():
assert host.file('/etc/sysconfig/network').contains('^NETWORKING=yes'), \
'Invalid networking setup'
device_config_path = f'/etc/sysconfig/network-scripts/ifcfg-{device_name}'
assert host.file(device_config_path).contains(f'^DEVICE=[{device_name}|\"{device_name}\"]'), \
f'Unexpected device name. Expected: "{device_name}"'
class TestsSecurity:
@pytest.mark.run_on(['rhel'])
def test_firewalld_is_disabled(self, host):
"""
firewalld is not required in cloud because there are security groups or other security mechanisms.
"""
assert not host.package('firewalld').is_installed, \
'firewalld should not be installed in RHEL cloud images'
| 37.836207 | 116 | 0.59877 |
6b4ff617c175f65779567fe4ac46dfa2ab53c4d5 | 2,761 | py | Python | data_preprocess.py | Chenrj233/bert_pratice | 8dab98d1670ecddb3febc0d087b5f99cc07fb7f9 | [
"Apache-2.0"
] | null | null | null | data_preprocess.py | Chenrj233/bert_pratice | 8dab98d1670ecddb3febc0d087b5f99cc07fb7f9 | [
"Apache-2.0"
] | null | null | null | data_preprocess.py | Chenrj233/bert_pratice | 8dab98d1670ecddb3febc0d087b5f99cc07fb7f9 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import logging
import re
import nltk
import gensim
import pickle
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from collections import defaultdict
from sklearn.model_selection import train_test_split
# Read data from files
train = pd.read_csv("data/labeledTrainData.tsv", header=0,
delimiter="\t", quoting=3)
test = pd.read_csv("data/testData.tsv", header=0,
delimiter="\t", quoting=3)
# 获取列名
print([column for column in train])
print(train.head())
print(train.shape)
print(train.iloc[0])
print(train['review'][0])
print(BeautifulSoup(train['review'][0], "lxml").get_text())
print(train['review'][0])
print(BeautifulSoup(train['review'][0], "lxml").get_text())
print(len(train))
print("len str")
tmp = BeautifulSoup(train['review'][0], "lxml").get_text()
print(tmp)
print(tmp.strip('"'))
tmp = BeautifulSoup(train['review'][1], "lxml").get_text()
tmp = tmp.strip('"')
print(tmp)
tmp = tmp.replace('\\', '')
print(tmp)
train = train.drop(['id'], axis=1)
print(train.head())
print(train.shape)
print(train.iloc[1])
#swap 2 columns
cols = list(train)
print(cols[0])
print(cols[1])
print(cols)
cols.insert(0, cols.pop(cols.index('review')))
print(cols)
train = train.loc[:, cols]
print(train.head())
if __name__ == '__main__':
for i in range(len(train['review'])):
tmp = BeautifulSoup(train['review'][i], "lxml").get_text()
tmp = tmp.strip('"')
tmp = tmp.replace('\\', '')
train['review'][i] = tmp
train.columns = ['sentence', 'label']
print(train.head())
X_train, X_dev, Y_train, Y_dev = train_test_split(train['sentence'], train['label'], test_size=0.25, stratify=train['label'], random_state=1,
#shuffle=False
)
train_print = pd.DataFrame({'sentence' : X_train,
'label' : Y_train})
dev_print = pd.DataFrame({'sentence': X_dev,
'label': Y_dev})
train_print = train_print.sort_index()
dev_print = dev_print.sort_index()
print(train_print.head())
print(dev_print.head())
for i in range(len(test['review'])):
test['id'][i] = i
tmp = BeautifulSoup(test['review'][i], "lxml").get_text()
tmp = tmp.strip('"')
tmp = tmp.replace('\\', '')
test['review'][i] = tmp
test.columns = ['index', 'sentence']
print(test.head())
train_print.to_csv('data/train.tsv', index=False, sep='\t')
dev_print.to_csv('data/dev.tsv', index=False, sep='\t')
test.to_csv('data/test.tsv', index=False, sep='\t') | 27.336634 | 145 | 0.613546 |
2498296886cec23f9957b725ff1874db9d26e1c3 | 7,515 | py | Python | Computer-vision/@PeterChenYijie yolo-tensorflow/test.py | PeterChenYijie/DeepLearningZeroToALL | 8f629e326a84a4272e66f34ba5f918576a595c70 | [
"MIT"
] | 12 | 2018-03-07T00:44:56.000Z | 2019-01-25T11:07:43.000Z | Computer-vision/@PeterChenYijie yolo-tensorflow/test.py | PeterChenYijie/DeepLearning | 8f629e326a84a4272e66f34ba5f918576a595c70 | [
"MIT"
] | 3 | 2018-03-02T03:38:41.000Z | 2018-03-20T00:45:06.000Z | Computer-vision/@PeterChenYijie yolo-tensorflow/test.py | PeterChenYijie/DeepLearning | 8f629e326a84a4272e66f34ba5f918576a595c70 | [
"MIT"
] | 7 | 2018-03-02T07:14:53.000Z | 2019-01-04T08:06:47.000Z | import tensorflow as tf
import numpy as np
import os
import cv2
import argparse
import yolo.config as cfg
from yolo.yolo_net import YOLONet
from utils.timer import Timer
class Detector(object):
def __init__(self, net, weight_file):
self.net = net
self.weights_file = weight_file
self.classes = cfg.CLASSES
self.num_class = len(self.classes)
self.image_size = cfg.IMAGE_SIZE
self.cell_size = cfg.CELL_SIZE
self.boxes_per_cell = cfg.BOXES_PER_CELL
self.threshold = cfg.THRESHOLD
self.iou_threshold = cfg.IOU_THRESHOLD
self.boundary1 = self.cell_size * self.cell_size * self.num_class
self.boundary2 = self.boundary1 + self.cell_size * self.cell_size * self.boxes_per_cell
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
print ('Restoring weights from: ' + self.weights_file)
self.saver = tf.train.Saver()
self.saver.restore(self.sess, self.weights_file)
def draw_result(self, img, result):
for i in range(len(result)):
x = int(result[i][1])
y = int(result[i][2])
w = int(result[i][3] / 2)
h = int(result[i][4] / 2)
cv2.rectangle(img, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
cv2.rectangle(img, (x - w, y - h - 20),
(x + w, y - h), (125, 125, 125), -1)
cv2.putText(img, result[i][0] + ' : %.2f' % result[i][5], (x - w + 5, y - h - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
def detect(self, img):
img_h, img_w, _ = img.shape
inputs = cv2.resize(img, (self.image_size, self.image_size))
inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
inputs = (inputs / 255.0) * 2.0 - 1.0
inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))
result = self.detect_from_cvmat(inputs)[0]
for i in range(len(result)):
result[i][1] *= (1.0 * img_w / self.image_size)
result[i][2] *= (1.0 * img_h / self.image_size)
result[i][3] *= (1.0 * img_w / self.image_size)
result[i][4] *= (1.0 * img_h / self.image_size)
return result
def detect_from_cvmat(self, inputs):
net_output = self.sess.run(self.net.logits,
feed_dict={self.net.images: inputs})
results = []
for i in range(net_output.shape[0]):
results.append(self.interpret_output(net_output[i]))
return results
def interpret_output(self, output):
probs = np.zeros((self.cell_size, self.cell_size,
self.boxes_per_cell, self.num_class))
class_probs = np.reshape(output[0:self.boundary1], (self.cell_size, self.cell_size, self.num_class))
scales = np.reshape(output[self.boundary1:self.boundary2], (self.cell_size, self.cell_size, self.boxes_per_cell))
boxes = np.reshape(output[self.boundary2:], (self.cell_size, self.cell_size, self.boxes_per_cell, 4))
offset = np.transpose(np.reshape(np.array([np.arange(self.cell_size)] * self.cell_size * self.boxes_per_cell),
[self.boxes_per_cell, self.cell_size, self.cell_size]), (1, 2, 0))
boxes[:, :, :, 0] += offset
boxes[:, :, :, 1] += np.transpose(offset, (1, 0, 2))
boxes[:, :, :, :2] = 1.0 * boxes[:, :, :, 0:2] / self.cell_size
boxes[:, :, :, 2:] = np.square(boxes[:, :, :, 2:])
boxes *= self.image_size
for i in range(self.boxes_per_cell):
for j in range(self.num_class):
probs[:, :, i, j] = np.multiply(
class_probs[:, :, j], scales[:, :, i])
filter_mat_probs = np.array(probs >= self.threshold, dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0],
filter_mat_boxes[1], filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs, axis=3)[filter_mat_boxes[
0], filter_mat_boxes[1], filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0:
continue
for j in range(i + 1, len(boxes_filtered)):
if self.iou(boxes_filtered[i], boxes_filtered[j]) > self.iou_threshold:
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered > 0.0, dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
result.append([self.classes[classes_num_filtered[i]], boxes_filtered[i][0], boxes_filtered[
i][1], boxes_filtered[i][2], boxes_filtered[i][3], probs_filtered[i]])
return result
def iou(self, box1, box2):
tb = min(box1[0] + 0.5 * box1[2], box2[0] + 0.5 * box2[2]) - \
max(box1[0] - 0.5 * box1[2], box2[0] - 0.5 * box2[2])
lr = min(box1[1] + 0.5 * box1[3], box2[1] + 0.5 * box2[3]) - \
max(box1[1] - 0.5 * box1[3], box2[1] - 0.5 * box2[3])
if tb < 0 or lr < 0:
intersection = 0
else:
intersection = tb * lr
return intersection / (box1[2] * box1[3] + box2[2] * box2[3] - intersection)
def camera_detector(self, cap, wait=10):
detect_timer = Timer()
ret, _ = cap.read()
while ret:
ret, frame = cap.read()
detect_timer.tic()
result = self.detect(frame)
detect_timer.toc()
print('Average detecting time: {:.3f}s'.format(detect_timer.average_time))
self.draw_result(frame, result)
cv2.imshow('Camera', frame)
cv2.waitKey(wait)
ret, frame = cap.read()
def image_detector(self, imname, wait=0):
detect_timer = Timer()
image = cv2.imread(imname)
detect_timer.tic()
result = self.detect(image)
detect_timer.toc()
print('Average detecting time: {:.3f}s'.format(detect_timer.average_time))
self.draw_result(image, result)
cv2.imshow('Image', image)
cv2.waitKey(wait)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', default="YOLO_small.ckpt", type=str)
parser.add_argument('--weight_dir', default='weights', type=str)
parser.add_argument('--data_dir', default="data", type=str)
parser.add_argument('--gpu', default='', type=str)
parser.add_argument('--file_name',default="person.jpg",type=str)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
yolo = YOLONet(False)
weight_file = os.path.join(args.data_dir, args.weight_dir, args.weights)
detector = Detector(yolo, weight_file)
# detect from camera
# cap = cv2.VideoCapture(-1)
# detector.camera_detector(cap)
# detect from image file
imname = 'test/'+args.file_name
detector.image_detector(imname)
if __name__ == '__main__':
main()
| 39.140625 | 151 | 0.589754 |
bc10b3a754b1e07f5391ee346cb0afda2918b4d2 | 1,580 | py | Python | data_files/PROGRAMS/MEDIUM/0018_4Sum.py | sudhirrd007/LeetCode-scraper | de87ff17fff2c73e67392321df1107cce7cbf883 | [
"MIT"
] | null | null | null | data_files/PROGRAMS/MEDIUM/0018_4Sum.py | sudhirrd007/LeetCode-scraper | de87ff17fff2c73e67392321df1107cce7cbf883 | [
"MIT"
] | null | null | null | data_files/PROGRAMS/MEDIUM/0018_4Sum.py | sudhirrd007/LeetCode-scraper | de87ff17fff2c73e67392321df1107cce7cbf883 | [
"MIT"
] | null | null | null | # ID : 18
# Title : 4Sum
# Difficulty : MEDIUM
# Acceptance_rate : 35.2%
# Runtime : 72 ms
# Memory : 12.7 MB
# Tags : Array , Hash Table , Two Pointers
# Language : python3
# Problem_link : https://leetcode.com/problems/4sum
# Premium : 0
# Notes : -
###
def fourSum(self, nums: List[int], target: int) -> List[List[int]]:
nums.sort()
L = len(nums)
ans = []
if(L > 2):
last = nums[-1]
else:
return []
for i in range(L-3):
if(i>0 and nums[i]==nums[i-1] or nums[i] + 3*last < target):
continue
if(4*nums[i] > target):
break
for j in range(i+1, L-2):
if(j>i+1 and nums[j] == nums[j-1] or nums[i]+nums[j]+2*last < target):
continue
if(nums[i]+3*nums[j] > target):
break
temp = nums[i] + nums[j]
start,end = j+1, L-1
while(start < end):
t = temp + nums[start] + nums[end]
if(t < target):
start += 1
elif(t > target):
end -= 1
else:
if([nums[i], nums[j], nums[end], t] not in ans):
ans.append([nums[i], nums[j], nums[start], nums[end]])
while(start < end and nums[start]==nums[start+1]):
start += 1
start += 1
end -= 1
return ans
| 31.6 | 86 | 0.401899 |
56614e3ac5c943faa110d8320e2d7880650db82d | 2,274 | py | Python | tests/fixtures/test_references_json/content_42_expected.py | elifesciences/elife-tools | ee345bf0e6703ef0f7e718355e85730abbdfd117 | [
"MIT"
] | 9 | 2015-04-16T08:13:31.000Z | 2020-05-18T14:03:06.000Z | tests/fixtures/test_references_json/content_42_expected.py | elifesciences/elife-tools | ee345bf0e6703ef0f7e718355e85730abbdfd117 | [
"MIT"
] | 310 | 2015-02-11T00:30:09.000Z | 2021-07-14T23:58:50.000Z | tests/fixtures/test_references_json/content_42_expected.py | elifesciences/elife-tools | ee345bf0e6703ef0f7e718355e85730abbdfd117 | [
"MIT"
] | 9 | 2015-02-04T01:21:28.000Z | 2021-06-15T12:50:47.000Z | from collections import OrderedDict
expected = [
OrderedDict(
[
("type", u"data"),
("id", u"bib105"),
("date", u"2015"),
(
"authors",
[
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Steinbaugh MJ"),
("index", u"Steinbaugh, MJ"),
]
),
),
]
),
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Dreyfuss JM"),
("index", u"Dreyfuss, JM"),
]
),
),
]
),
OrderedDict(
[
("type", "person"),
(
"name",
OrderedDict(
[
("preferred", u"Blackwell TK"),
("index", u"Blackwell, TK"),
]
),
),
]
),
],
),
(
"title",
u"RNA-seq analysis of germline stem cell removal and loss of SKN-1 in c. elegans",
),
("source", u"NCBI Gene Expression Omnibus"),
("dataId", u"GSE63075"),
("uri", u"http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE63075"),
]
)
]
| 34.454545 | 98 | 0.207124 |
1275c2a33b28bd60b07402c1e758790b1ee5bc96 | 12,799 | py | Python | neo4j/api.py | neo4j/neo4j-python-driver | 1c009476710ab451e55e75d28d372664253fda4e | [
"Apache-2.0"
] | 739 | 2015-05-29T21:53:27.000Z | 2022-03-24T00:13:53.000Z | aws_cloudformation/DoctorAI_Lambda_Main/neo4j/api.py | dgg32/transfer_kg | b72d2ecbb3be0543dcd1edbb6def0bf25cd20c77 | [
"MIT"
] | 264 | 2015-08-26T13:45:33.000Z | 2022-03-17T17:26:50.000Z | neo4j/api.py | neo4j/neo4j-python-driver | 1c009476710ab451e55e75d28d372664253fda4e | [
"Apache-2.0"
] | 178 | 2015-05-21T08:10:54.000Z | 2022-03-31T11:49:08.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from urllib.parse import (
urlparse,
parse_qs,
)
from.exceptions import (
DriverError,
ConfigurationError,
)
from .meta import deprecated
""" Base classes and helpers.
"""
READ_ACCESS = "READ"
WRITE_ACCESS = "WRITE"
DRIVER_BOLT = "DRIVER_BOLT"
DRIVER_NEO4j = "DRIVER_NEO4J"
SECURITY_TYPE_NOT_SECURE = "SECURITY_TYPE_NOT_SECURE"
SECURITY_TYPE_SELF_SIGNED_CERTIFICATE = "SECURITY_TYPE_SELF_SIGNED_CERTIFICATE"
SECURITY_TYPE_SECURE = "SECURITY_TYPE_SECURE"
URI_SCHEME_BOLT = "bolt"
URI_SCHEME_BOLT_SELF_SIGNED_CERTIFICATE = "bolt+ssc"
URI_SCHEME_BOLT_SECURE = "bolt+s"
URI_SCHEME_NEO4J = "neo4j"
URI_SCHEME_NEO4J_SELF_SIGNED_CERTIFICATE = "neo4j+ssc"
URI_SCHEME_NEO4J_SECURE = "neo4j+s"
URI_SCHEME_BOLT_ROUTING = "bolt+routing"
TRUST_SYSTEM_CA_SIGNED_CERTIFICATES = "TRUST_SYSTEM_CA_SIGNED_CERTIFICATES" # Default
TRUST_ALL_CERTIFICATES = "TRUST_ALL_CERTIFICATES"
SYSTEM_DATABASE = "system"
DEFAULT_DATABASE = None # Must be a non string hashable value
# TODO: This class is not tested
class Auth:
"""Container for auth details.
:param scheme: specifies the type of authentication, examples: "basic",
"kerberos"
:type scheme: str
:param principal: specifies who is being authenticated
:type principal: str or None
:param credentials: authenticates the principal
:type credentials: str or None
:param realm: specifies the authentication provider
:type realm: str or None
:param parameters: extra key word parameters passed along to the
authentication provider
:type parameters: Dict[str, Any]
"""
def __init__(self, scheme, principal, credentials, realm=None, **parameters):
self.scheme = scheme
# Neo4j servers pre 4.4 require the principal field to always be
# present. Therefore, we transmit it even if it's an empty sting.
if principal is not None:
self.principal = principal
if credentials:
self.credentials = credentials
if realm:
self.realm = realm
if parameters:
self.parameters = parameters
# For backwards compatibility
AuthToken = Auth
def basic_auth(user, password, realm=None):
"""Generate a basic auth token for a given user and password.
This will set the scheme to "basic" for the auth token.
:param user: user name, this will set the
:type user: str
:param password: current password, this will set the credentials
:type password: str
:param realm: specifies the authentication provider
:type realm: str or None
:return: auth token for use with :meth:`GraphDatabase.driver`
:rtype: :class:`neo4j.Auth`
"""
return Auth("basic", user, password, realm)
def kerberos_auth(base64_encoded_ticket):
"""Generate a kerberos auth token with the base64 encoded ticket.
This will set the scheme to "kerberos" for the auth token.
:param base64_encoded_ticket: a base64 encoded service ticket, this will set
the credentials
:type base64_encoded_ticket: str
:return: auth token for use with :meth:`GraphDatabase.driver`
:rtype: :class:`neo4j.Auth`
"""
return Auth("kerberos", "", base64_encoded_ticket)
def bearer_auth(base64_encoded_token):
"""Generate an auth token for Single-Sign-On providers.
This will set the scheme to "bearer" for the auth token.
:param base64_encoded_token: a base64 encoded authentication token generated
by a Single-Sign-On provider.
:type base64_encoded_token: str
:return: auth token for use with :meth:`GraphDatabase.driver`
:rtype: :class:`neo4j.Auth`
"""
return Auth("bearer", None, base64_encoded_token)
def custom_auth(principal, credentials, realm, scheme, **parameters):
"""Generate a custom auth token.
:param principal: specifies who is being authenticated
:type principal: str or None
:param credentials: authenticates the principal
:type credentials: str or None
:param realm: specifies the authentication provider
:type realm: str or None
:param scheme: specifies the type of authentication
:type scheme: str or None
:param parameters: extra key word parameters passed along to the
authentication provider
:type parameters: Dict[str, Any]
:return: auth token for use with :meth:`GraphDatabase.driver`
:rtype: :class:`neo4j.Auth`
"""
return Auth(scheme, principal, credentials, realm, **parameters)
class Bookmark:
"""A Bookmark object contains an immutable list of bookmark string values.
:param values: ASCII string values
"""
def __init__(self, *values):
if values:
bookmarks = []
for ix in values:
try:
if ix:
ix.encode("ascii")
bookmarks.append(ix)
except UnicodeEncodeError as e:
raise ValueError("The value {} is not ASCII".format(ix))
self._values = frozenset(bookmarks)
else:
self._values = frozenset()
def __repr__(self):
"""
:return: repr string with sorted values
"""
return "<Bookmark values={{{}}}>".format(", ".join(["'{}'".format(ix) for ix in sorted(self._values)]))
def __bool__(self):
return bool(self._values)
@property
def values(self):
"""
:return: immutable list of bookmark string values
:rtype: frozenset
"""
return self._values
class ServerInfo:
""" Represents a package of information relating to a Neo4j server.
"""
def __init__(self, address, protocol_version):
self._address = address
self._protocol_version = protocol_version
self._metadata = {}
@property
def address(self):
""" Network address of the remote server.
"""
return self._address
@property
def protocol_version(self):
""" Bolt protocol version with which the remote server
communicates. This is returned as a :class:`.Version`
object, which itself extends a simple 2-tuple of
(major, minor) integers.
"""
return self._protocol_version
@property
def agent(self):
""" Server agent string by which the remote server identifies
itself.
"""
return self._metadata.get("server")
@property
def connection_id(self):
""" Unique identifier for the remote server connection.
"""
return self._metadata.get("connection_id")
# TODO in 5.0: remove this method
@deprecated("The version_info method is deprecated, please use "
"ServerInfo.agent, ServerInfo.protocol_version, or "
"call the dbms.components procedure instead")
def version_info(self):
"""Return the server version if available.
:return: Server Version or None
:rtype: tuple
.. deprecated:: 4.3
`version_info` will be removed in version 5.0. Use
:meth:`~ServerInfo.agent`, :meth:`~ServerInfo.protocol_version`,
or call the `dbms.components` procedure instead.
"""
if not self.agent:
return None
# Note: Confirm that the server agent string begins with "Neo4j/" and fail gracefully if not.
# This is intended to help prevent drivers working for non-genuine Neo4j instances.
prefix, _, value = self.agent.partition("/")
try:
assert prefix in ["Neo4j"]
except AssertionError:
raise DriverError("Server name does not start with Neo4j/")
try:
if self.protocol_version >= (4, 0):
return self.protocol_version
except TypeError:
pass
value = value.replace("-", ".").split(".")
for i, v in enumerate(value):
try:
value[i] = int(v)
except ValueError:
pass
return tuple(value)
def update(self, metadata):
""" Update server information with extra metadata. This is
typically drawn from the metadata received after successful
connection initialisation.
"""
self._metadata.update(metadata)
class Version(tuple):
def __new__(cls, *v):
return super().__new__(cls, v)
def __repr__(self):
return "{}{}".format(self.__class__.__name__, super().__repr__())
def __str__(self):
return ".".join(map(str, self))
def to_bytes(self):
b = bytearray(4)
for i, v in enumerate(self):
if not 0 <= i < 2:
raise ValueError("Too many version components")
if isinstance(v, list):
b[-i - 1] = int(v[0] % 0x100)
b[-i - 2] = int((v[0] - v[-1]) % 0x100)
else:
b[-i - 1] = int(v % 0x100)
return bytes(b)
@classmethod
def from_bytes(cls, b):
b = bytearray(b)
if len(b) != 4:
raise ValueError("Byte representation must be exactly four bytes")
if b[0] != 0 or b[1] != 0:
raise ValueError("First two bytes must contain zero")
return Version(b[-1], b[-2])
def parse_neo4j_uri(uri):
parsed = urlparse(uri)
if parsed.username:
raise ConfigurationError("Username is not supported in the URI")
if parsed.password:
raise ConfigurationError("Password is not supported in the URI")
if parsed.scheme == URI_SCHEME_BOLT_ROUTING:
raise ConfigurationError("Uri scheme {!r} have been renamed. Use {!r}".format(parsed.scheme, URI_SCHEME_NEO4J))
elif parsed.scheme == URI_SCHEME_BOLT:
driver_type = DRIVER_BOLT
security_type = SECURITY_TYPE_NOT_SECURE
elif parsed.scheme == URI_SCHEME_BOLT_SELF_SIGNED_CERTIFICATE:
driver_type = DRIVER_BOLT
security_type = SECURITY_TYPE_SELF_SIGNED_CERTIFICATE
elif parsed.scheme == URI_SCHEME_BOLT_SECURE:
driver_type = DRIVER_BOLT
security_type = SECURITY_TYPE_SECURE
elif parsed.scheme == URI_SCHEME_NEO4J:
driver_type = DRIVER_NEO4j
security_type = SECURITY_TYPE_NOT_SECURE
elif parsed.scheme == URI_SCHEME_NEO4J_SELF_SIGNED_CERTIFICATE:
driver_type = DRIVER_NEO4j
security_type = SECURITY_TYPE_SELF_SIGNED_CERTIFICATE
elif parsed.scheme == URI_SCHEME_NEO4J_SECURE:
driver_type = DRIVER_NEO4j
security_type = SECURITY_TYPE_SECURE
else:
raise ConfigurationError("URI scheme {!r} is not supported. Supported URI schemes are {}. Examples: bolt://host[:port] or neo4j://host[:port][?routing_context]".format(
parsed.scheme,
[
URI_SCHEME_BOLT,
URI_SCHEME_BOLT_SELF_SIGNED_CERTIFICATE,
URI_SCHEME_BOLT_SECURE,
URI_SCHEME_NEO4J,
URI_SCHEME_NEO4J_SELF_SIGNED_CERTIFICATE,
URI_SCHEME_NEO4J_SECURE
]
))
return driver_type, security_type, parsed
def check_access_mode(access_mode):
if access_mode is None:
return WRITE_ACCESS
if access_mode not in (READ_ACCESS, WRITE_ACCESS):
msg = "Unsupported access mode {}".format(access_mode)
raise ConfigurationError(msg)
return access_mode
def parse_routing_context(query):
""" Parse the query portion of a URI to generate a routing context dictionary.
"""
if not query:
return {}
context = {}
parameters = parse_qs(query, True)
for key in parameters:
value_list = parameters[key]
if len(value_list) != 1:
raise ConfigurationError("Duplicated query parameters with key '%s', value '%s' found in query string '%s'" % (key, value_list, query))
value = value_list[0]
if not value:
raise ConfigurationError("Invalid parameters:'%s=%s' in query string '%s'." % (key, value, query))
context[key] = value
return context
| 32.320707 | 176 | 0.645519 |
e7fd172132ee7727a5060aa43e5d3216cb533e52 | 246 | py | Python | longest_palindrome_substring.py | Chansazm/Project_25_LeetCode | 703c5b54b2070f234b1b0ddfd2c452083a698e9d | [
"MIT"
] | null | null | null | longest_palindrome_substring.py | Chansazm/Project_25_LeetCode | 703c5b54b2070f234b1b0ddfd2c452083a698e9d | [
"MIT"
] | null | null | null | longest_palindrome_substring.py | Chansazm/Project_25_LeetCode | 703c5b54b2070f234b1b0ddfd2c452083a698e9d | [
"MIT"
] | null | null | null | def longestpalindrome(s):
if len(s) == 1: return None
reverse = s[::-1]
result = ""
for i in s:
print(i,s[i])
#Driver function
print(longestpalindrome("babad"))
def foo(s):
n = len(s)
table = [n*0]
| 15.375 | 33 | 0.520325 |
386f84f52dd0394484ff1e625188e6b13b8f51e3 | 7,663 | py | Python | torch_geometric/data/batch.py | shashank-rv/pytorch_geometric | 3df768c4e523f5158fc10b4264ab08bc730aa61d | [
"MIT"
] | 1 | 2019-10-31T07:43:07.000Z | 2019-10-31T07:43:07.000Z | torch_geometric/data/batch.py | sdxshuai/pytorch_geometric | c066f4e515a96853c59ff82b9732eba2a25d247f | [
"MIT"
] | null | null | null | torch_geometric/data/batch.py | sdxshuai/pytorch_geometric | c066f4e515a96853c59ff82b9732eba2a25d247f | [
"MIT"
] | 1 | 2021-11-11T13:23:16.000Z | 2021-11-11T13:23:16.000Z | import torch
from torch import Tensor
from torch_sparse import SparseTensor, cat
import torch_geometric
from torch_geometric.data import Data
class Batch(Data):
r"""A plain old python object modeling a batch of graphs as one big
(disconnected) graph. With :class:`torch_geometric.data.Data` being the
base class, all its methods can also be used here.
In addition, single graphs can be reconstructed via the assignment vector
:obj:`batch`, which maps each node to its respective graph identifier.
"""
def __init__(self, batch=None, **kwargs):
super(Batch, self).__init__(**kwargs)
self.batch = batch
self.__data_class__ = Data
self.__slices__ = None
self.__cumsum__ = None
self.__cat_dims__ = None
self.__num_nodes_list__ = None
@staticmethod
def from_data_list(data_list, follow_batch=[]):
r"""Constructs a batch object from a python list holding
:class:`torch_geometric.data.Data` objects.
The assignment vector :obj:`batch` is created on the fly.
Additionally, creates assignment batch vectors for each key in
:obj:`follow_batch`."""
keys = [set(data.keys) for data in data_list]
keys = list(set.union(*keys))
assert 'batch' not in keys
batch = Batch()
batch.__data_class__ = data_list[0].__class__
for key in keys + ['batch']:
batch[key] = []
slices = {key: [0] for key in keys}
cumsum = {key: [0] for key in keys}
cat_dims = {}
num_nodes_list = []
for i, data in enumerate(data_list):
for key in keys:
item = data[key]
# Increase values by `cumsum` value.
cum = cumsum[key][-1]
if isinstance(item, Tensor) and item.dtype != torch.bool:
item = item + cum if cum != 0 else item
elif isinstance(item, SparseTensor):
value = item.storage.value()
if value is not None and value.dtype != torch.bool:
value = value + cum if cum != 0 else value
item = item.set_value(value, layout='coo')
elif isinstance(item, (int, float)):
item = item + cum
# Treat 0-dimensional tensors as 1-dimensional.
if isinstance(item, Tensor) and item.dim() == 0:
item = item.unsqueeze(0)
batch[key].append(item)
# Gather the size of the `cat` dimension.
size = 1
cat_dim = data.__cat_dim__(key, data[key])
cat_dims[key] = cat_dim
if isinstance(item, Tensor):
size = item.size(cat_dim)
elif isinstance(item, SparseTensor):
size = torch.tensor(item.sizes())[torch.tensor(cat_dim)]
slices[key].append(size + slices[key][-1])
inc = data.__inc__(key, item)
if isinstance(inc, (tuple, list)):
inc = torch.tensor(inc)
cumsum[key].append(inc + cumsum[key][-1])
if key in follow_batch:
if isinstance(size, Tensor):
for j, size in enumerate(size.tolist()):
tmp = f'{key}_{j}_batch'
batch[tmp] = [] if i == 0 else batch[tmp]
batch[tmp].append(
torch.full((size, ), i, dtype=torch.long))
else:
tmp = f'{key}_batch'
batch[tmp] = [] if i == 0 else batch[tmp]
batch[tmp].append(
torch.full((size, ), i, dtype=torch.long))
if hasattr(data, '__num_nodes__'):
num_nodes_list.append(data.__num_nodes__)
else:
num_nodes_list.append(None)
num_nodes = data.num_nodes
if num_nodes is not None:
item = torch.full((num_nodes, ), i, dtype=torch.long)
batch.batch.append(item)
# Fix initial slice values:
for key in keys:
slices[key][0] = slices[key][1] - slices[key][1]
batch.batch = None if len(batch.batch) == 0 else batch.batch
batch.__slices__ = slices
batch.__cumsum__ = cumsum
batch.__cat_dims__ = cat_dims
batch.__num_nodes_list__ = num_nodes_list
ref_data = data_list[0]
for key in batch.keys:
items = batch[key]
item = items[0]
if isinstance(item, Tensor):
batch[key] = torch.cat(items, ref_data.__cat_dim__(key, item))
elif isinstance(item, SparseTensor):
batch[key] = cat(items, ref_data.__cat_dim__(key, item))
elif isinstance(item, (int, float)):
batch[key] = torch.tensor(items)
if torch_geometric.is_debug_enabled():
batch.debug()
return batch.contiguous()
def to_data_list(self):
r"""Reconstructs the list of :class:`torch_geometric.data.Data` objects
from the batch object.
The batch object must have been created via :meth:`from_data_list` in
order to be able to reconstruct the initial objects."""
if self.__slices__ is None:
raise RuntimeError(
('Cannot reconstruct data list from batch because the batch '
'object was not created using `Batch.from_data_list()`.'))
data_list = []
for i in range(len(list(self.__slices__.values())[0]) - 1):
data = self.__data_class__()
for key in self.__slices__.keys():
item = self[key]
# Narrow the item based on the values in `__slices__`.
if isinstance(item, Tensor):
dim = self.__cat_dims__[key]
start = self.__slices__[key][i]
end = self.__slices__[key][i + 1]
item = item.narrow(dim, start, end - start)
elif isinstance(item, SparseTensor):
for j, dim in enumerate(self.__cat_dims__[key]):
start = self.__slices__[key][i][j].item()
end = self.__slices__[key][i + 1][j].item()
item = item.narrow(dim, start, end - start)
else:
item = item[self.__slices__[key][i]:self.
__slices__[key][i + 1]]
item = item[0] if len(item) == 1 else item
# Decrease its value by `cumsum` value:
cum = self.__cumsum__[key][i]
if isinstance(item, Tensor):
item = item - cum if cum != 0 else item
elif isinstance(item, SparseTensor):
value = item.storage.value()
if value is not None and value.dtype != torch.bool:
value = value - cum if cum != 0 else value
item = item.set_value(value, layout='coo')
elif isinstance(item, (int, float)):
item = item - cum
data[key] = item
if self.__num_nodes_list__[i] is not None:
data.num_nodes = self.__num_nodes_list__[i]
data_list.append(data)
return data_list
@property
def num_graphs(self):
"""Returns the number of graphs in the batch."""
return self.batch[-1].item() + 1
| 40.331579 | 79 | 0.527861 |
66dd532f48d3ae447268ee737c0f2b6dc162250e | 19,519 | py | Python | utils/fairseq_mod/tests/speech_recognition/asr_test_base.py | saidineshpola/Knowledge-Distillation-Toolkit | b05ebc28ae1385c9caa1c4c1c93db2d67356e85f | [
"MIT"
] | 69 | 2021-03-27T10:28:27.000Z | 2022-03-29T07:32:02.000Z | utils/fairseq_mod/tests/speech_recognition/asr_test_base.py | saidineshpola/Knowledge-Distillation-Toolkit | b05ebc28ae1385c9caa1c4c1c93db2d67356e85f | [
"MIT"
] | 5 | 2021-05-24T08:56:59.000Z | 2021-11-19T09:21:31.000Z | utils/fairseq_mod/tests/speech_recognition/asr_test_base.py | saidineshpola/Knowledge-Distillation-Toolkit | b05ebc28ae1385c9caa1c4c1c93db2d67356e85f | [
"MIT"
] | 20 | 2021-03-27T10:30:32.000Z | 2022-03-17T17:13:41.000Z | #!/usr/bin/env python3
import argparse
import os
import unittest
from inspect import currentframe, getframeinfo
import numpy as np
import torch
from fairseq_mod.data import data_utils as fairseq_data_utils
from fairseq_mod.data.dictionary import Dictionary
from fairseq_mod.models import (
BaseFairseqModel,
FairseqDecoder,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqModel,
)
from fairseq_mod.tasks.fairseq_task import LegacyFairseqTask
from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask
DEFAULT_TEST_VOCAB_SIZE = 100
# ///////////////////////////////////////////////////////////////////////////
# utility function to setup dummy dict/task/input
# ///////////////////////////////////////////////////////////////////////////
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
# add dummy symbol to satisfy vocab size
for id, _ in enumerate(range(vocab_size)):
dummy_dict.add_symbol("{}".format(id), 1000)
return dummy_dict
class DummyTask(LegacyFairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, "ctc", False):
self.dictionary.add_symbol("<ctc_blank>")
self.tgt_dict = self.dictionary
@property
def target_dictionary(self):
return self.dictionary
def get_dummy_task_and_parser():
"""
to build a fariseq model, we need some dummy parse and task. This function
is used to create dummy task and parser to faciliate model/criterion test
Note: we use FbSpeechRecognitionTask as the dummy task. You may want
to use other task by providing another function
"""
parser = argparse.ArgumentParser(
description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS
)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return task, parser
def get_dummy_input(T=100, D=80, B=5, K=100):
forward_input = {}
# T max sequence length
# D feature vector dimension
# B batch size
# K target dimension size
feature = torch.randn(B, T, D)
# this (B, T, D) layout is just a convention, you can override it by
# write your own _prepare_forward_input function
src_lengths = torch.from_numpy(
np.random.randint(low=1, high=T, size=B, dtype=np.int64)
)
src_lengths[0] = T # make sure the maximum length matches
prev_output_tokens = []
for b in range(B):
token_length = np.random.randint(low=1, high=src_lengths[b].item() + 1)
tokens = np.random.randint(low=0, high=K, size=token_length, dtype=np.int64)
prev_output_tokens.append(torch.from_numpy(tokens))
prev_output_tokens = fairseq_data_utils.collate_tokens(
prev_output_tokens,
pad_idx=1,
eos_idx=2,
left_pad=False,
move_eos_to_beginning=False,
)
src_lengths, sorted_order = src_lengths.sort(descending=True)
forward_input["src_tokens"] = feature.index_select(0, sorted_order)
forward_input["src_lengths"] = src_lengths
forward_input["prev_output_tokens"] = prev_output_tokens
return forward_input
def get_dummy_encoder_output(encoder_out_shape=(100, 80, 5)):
"""
This only provides an example to generate dummy encoder output
"""
(T, B, D) = encoder_out_shape
encoder_out = {}
encoder_out["encoder_out"] = torch.from_numpy(
np.random.randn(*encoder_out_shape).astype(np.float32)
)
seq_lengths = torch.from_numpy(np.random.randint(low=1, high=T, size=B))
# some dummy mask
encoder_out["encoder_padding_mask"] = torch.arange(T).view(1, T).expand(
B, -1
) >= seq_lengths.view(B, 1).expand(-1, T)
encoder_out["encoder_padding_mask"].t_()
# encoer_padding_mask is (T, B) tensor, with (t, b)-th element indicate
# whether encoder_out[t, b] is valid (=0) or not (=1)
return encoder_out
def _current_postion_info():
cf = currentframe()
frameinfo = " (at {}:{})".format(
os.path.basename(getframeinfo(cf).filename), cf.f_back.f_lineno
)
return frameinfo
def check_encoder_output(encoder_output, batch_size=None):
"""we expect encoder_output to be a dict with the following
key/value pairs:
- encoder_out: a Torch.Tensor
- encoder_padding_mask: a binary Torch.Tensor
"""
if not isinstance(encoder_output, dict):
msg = (
"FairseqEncoderModel.forward(...) must be a dict" + _current_postion_info()
)
return False, msg
if "encoder_out" not in encoder_output:
msg = (
"FairseqEncoderModel.forward(...) must contain encoder_out"
+ _current_postion_info()
)
return False, msg
if "encoder_padding_mask" not in encoder_output:
msg = (
"FairseqEncoderModel.forward(...) must contain encoder_padding_mask"
+ _current_postion_info()
)
return False, msg
if not isinstance(encoder_output["encoder_out"], torch.Tensor):
msg = "encoder_out must be a torch.Tensor" + _current_postion_info()
return False, msg
if encoder_output["encoder_out"].dtype != torch.float32:
msg = "encoder_out must have float32 dtype" + _current_postion_info()
return False, msg
mask = encoder_output["encoder_padding_mask"]
if mask is not None:
if not isinstance(mask, torch.Tensor):
msg = (
"encoder_padding_mask must be a torch.Tensor" + _current_postion_info()
)
return False, msg
if (
mask.dtype != torch.uint8
and (not hasattr(torch, 'bool') or mask.dtype != torch.bool)
):
msg = (
"encoder_padding_mask must have dtype of uint8"
+ _current_postion_info()
)
return False, msg
if mask.dim() != 2:
msg = (
"we expect encoder_padding_mask to be a 2-d tensor, in shape (T, B)"
+ _current_postion_info()
)
return False, msg
if batch_size is not None and mask.size(1) != batch_size:
msg = (
"we expect encoder_padding_mask to be a 2-d tensor, with size(1)"
+ " being the batch size"
+ _current_postion_info()
)
return False, msg
return True, None
def check_decoder_output(decoder_output):
"""we expect output from a decoder is a tuple with the following constraint:
- the first element is a torch.Tensor
- the second element can be anything (reserved for future use)
"""
if not isinstance(decoder_output, tuple):
msg = "FariseqDecoder output must be a tuple" + _current_postion_info()
return False, msg
if len(decoder_output) != 2:
msg = "FairseqDecoder output must be 2-elem tuple" + _current_postion_info()
return False, msg
if not isinstance(decoder_output[0], torch.Tensor):
msg = (
"FariseqDecoder output[0] must be a torch.Tensor" + _current_postion_info()
)
return False, msg
return True, None
# ///////////////////////////////////////////////////////////////////////////
# Base Test class
# ///////////////////////////////////////////////////////////////////////////
class TestBaseFairseqModelBase(unittest.TestCase):
"""
This class is used to facilitate writing unittest for any class derived from
`BaseFairseqModel`.
"""
@classmethod
def setUpClass(cls):
if cls is TestBaseFairseqModelBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpModel(self, model):
self.assertTrue(isinstance(model, BaseFairseqModel))
self.model = model
def setupInput(self):
pass
def setUp(self):
self.model = None
self.forward_input = None
pass
class TestFairseqEncoderDecoderModelBase(TestBaseFairseqModelBase):
"""
base code to test FairseqEncoderDecoderModel (formally known as
`FairseqModel`) must be derived from this base class
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqEncoderDecoderModelBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpModel(self, model_cls, extra_args_setters=None):
self.assertTrue(
issubclass(model_cls, (FairseqEncoderDecoderModel, FairseqModel)),
msg="This class only tests for FairseqModel subclasses",
)
task, parser = get_dummy_task_and_parser()
model_cls.add_args(parser)
args = parser.parse_args([])
if extra_args_setters is not None:
for args_setter in extra_args_setters:
args_setter(args)
model = model_cls.build_model(args, task)
self.model = model
def setUpInput(self, input=None):
self.forward_input = get_dummy_input() if input is None else input
def setUp(self):
super().setUp()
def test_forward(self):
if self.model and self.forward_input:
forward_output = self.model.forward(**self.forward_input)
# for FairseqEncoderDecoderModel, forward returns a tuple of two
# elements, the first one is a Torch.Tensor
succ, msg = check_decoder_output(forward_output)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
def test_get_normalized_probs(self):
if self.model and self.forward_input:
forward_output = self.model.forward(**self.forward_input)
logprob = self.model.get_normalized_probs(forward_output, log_probs=True)
prob = self.model.get_normalized_probs(forward_output, log_probs=False)
# in order for different models/criterion to play with each other
# we need to know whether the logprob or prob output is batch_first
# or not. We assume an additional attribute will be attached to logprob
# or prob. If you find your code failed here, simply override
# FairseqModel.get_normalized_probs, see example at
# https://fburl.com/batch_first_example
self.assertTrue(hasattr(logprob, "batch_first"))
self.assertTrue(hasattr(prob, "batch_first"))
self.assertTrue(torch.is_tensor(logprob))
self.assertTrue(torch.is_tensor(prob))
class TestFairseqEncoderModelBase(TestBaseFairseqModelBase):
"""
base class to test FairseqEncoderModel
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqEncoderModelBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpModel(self, model_cls, extra_args_setters=None):
self.assertTrue(
issubclass(model_cls, FairseqEncoderModel),
msg="This class is only used for testing FairseqEncoderModel",
)
task, parser = get_dummy_task_and_parser()
model_cls.add_args(parser)
args = parser.parse_args([])
if extra_args_setters is not None:
for args_setter in extra_args_setters:
args_setter(args)
model = model_cls.build_model(args, task)
self.model = model
def setUpInput(self, input=None):
self.forward_input = get_dummy_input() if input is None else input
# get_dummy_input() is originally for s2s, here we delete extra dict
# items, so it can be used for EncoderModel / Encoder as well
self.forward_input.pop("prev_output_tokens", None)
def setUp(self):
super().setUp()
def test_forward(self):
if self.forward_input and self.model:
bsz = self.forward_input["src_tokens"].size(0)
forward_output = self.model.forward(**self.forward_input)
# we expect forward_output to be a dict with the following
# key/value pairs:
# - encoder_out: a Torch.Tensor
# - encoder_padding_mask: a binary Torch.Tensor
succ, msg = check_encoder_output(forward_output, batch_size=bsz)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
def test_get_normalized_probs(self):
if self.model and self.forward_input:
forward_output = self.model.forward(**self.forward_input)
logprob = self.model.get_normalized_probs(forward_output, log_probs=True)
prob = self.model.get_normalized_probs(forward_output, log_probs=False)
# in order for different models/criterion to play with each other
# we need to know whether the logprob or prob output is batch_first
# or not. We assume an additional attribute will be attached to logprob
# or prob. If you find your code failed here, simply override
# FairseqModel.get_normalized_probs, see example at
# https://fburl.com/batch_first_example
self.assertTrue(hasattr(logprob, "batch_first"))
self.assertTrue(hasattr(prob, "batch_first"))
self.assertTrue(torch.is_tensor(logprob))
self.assertTrue(torch.is_tensor(prob))
class TestFairseqEncoderBase(unittest.TestCase):
"""
base class to test FairseqEncoder
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqEncoderBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpEncoder(self, encoder):
self.assertTrue(
isinstance(encoder, FairseqEncoder),
msg="This class is only used for test FairseqEncoder",
)
self.encoder = encoder
def setUpInput(self, input=None):
self.forward_input = get_dummy_input() if input is None else input
# get_dummy_input() is originally for s2s, here we delete extra dict
# items, so it can be used for EncoderModel / Encoder as well
self.forward_input.pop("prev_output_tokens", None)
def setUp(self):
self.encoder = None
self.forward_input = None
def test_forward(self):
if self.encoder and self.forward_input:
bsz = self.forward_input["src_tokens"].size(0)
forward_output = self.encoder.forward(**self.forward_input)
succ, msg = check_encoder_output(forward_output, batch_size=bsz)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
class TestFairseqDecoderBase(unittest.TestCase):
"""
base class to test FairseqDecoder
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqDecoderBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpDecoder(self, decoder):
self.assertTrue(
isinstance(decoder, FairseqDecoder),
msg="This class is only used for test FairseqDecoder",
)
self.decoder = decoder
def setUpInput(self, input=None):
self.forward_input = get_dummy_encoder_output() if input is None else input
def setUpPrevOutputTokens(self, tokens=None):
if tokens is None:
self.encoder_input = get_dummy_input()
self.prev_output_tokens = self.encoder_input["prev_output_tokens"]
else:
self.prev_output_tokens = tokens
def setUp(self):
self.decoder = None
self.forward_input = None
self.prev_output_tokens = None
def test_forward(self):
if (
self.decoder is not None
and self.forward_input is not None
and self.prev_output_tokens is not None
):
forward_output = self.decoder.forward(
prev_output_tokens=self.prev_output_tokens,
encoder_out=self.forward_input,
)
succ, msg = check_decoder_output(forward_output)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_input = forward_output
class DummyEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@classmethod
def build_model(cls, args, task):
return cls(DummyEncoder())
def get_logits(self, net_output):
# Inverse of sigmoid to use with BinaryCrossEntropyWithLogitsCriterion as
# F.binary_cross_entropy_with_logits combines sigmoid and CE
return torch.log(
torch.div(net_output["encoder_out"], 1 - net_output["encoder_out"])
)
def get_normalized_probs(self, net_output, log_probs, sample=None):
lprobs = super().get_normalized_probs(net_output, log_probs, sample=sample)
lprobs.batch_first = True
return lprobs
class DummyEncoder(FairseqEncoder):
def __init__(self):
super().__init__(None)
def forward(self, src_tokens, src_lengths):
mask, max_len = lengths_to_encoder_padding_mask(src_lengths)
return {"encoder_out": src_tokens, "encoder_padding_mask": mask}
class CrossEntropyCriterionTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
if cls is CrossEntropyCriterionTestBase:
raise unittest.SkipTest("Skipping base class test case")
super().setUpClass()
def setUpArgs(self):
args = argparse.Namespace()
args.sentence_avg = False
args.threshold = 0.1 # to use with BinaryCrossEntropyWithLogitsCriterion
return args
def setUp(self):
args = self.setUpArgs()
self.model = DummyEncoderModel(encoder=DummyEncoder())
self.criterion = self.criterion_cls.build_criterion(args=args, task=DummyTask(args))
def get_src_tokens(self, correct_prediction, aggregate):
"""
correct_prediction: True if the net_output (src_tokens) should
predict the correct target
aggregate: True if the criterion expects net_output (src_tokens)
aggregated across time axis
"""
predicted_idx = 0 if correct_prediction else 1
if aggregate:
src_tokens = torch.zeros((2, 2), dtype=torch.float)
for b in range(2):
src_tokens[b][predicted_idx] = 1.0
else:
src_tokens = torch.zeros((2, 10, 2), dtype=torch.float)
for b in range(2):
for t in range(10):
src_tokens[b][t][predicted_idx] = 1.0
return src_tokens
def get_target(self, soft_target):
if soft_target:
target = torch.zeros((2, 2), dtype=torch.float)
for b in range(2):
target[b][0] = 1.0
else:
target = torch.zeros((2, 10), dtype=torch.long)
return target
def get_test_sample(self, correct, soft_target, aggregate):
src_tokens = self.get_src_tokens(correct, aggregate)
target = self.get_target(soft_target)
L = src_tokens.size(1)
return {
"net_input": {"src_tokens": src_tokens, "src_lengths": torch.tensor([L])},
"target": target,
"ntokens": src_tokens.size(0) * src_tokens.size(1),
}
| 34.980287 | 92 | 0.637533 |
f43cfcccac2fa24aaad7d37b8e1627fbe274769b | 5,464 | py | Python | idaes/core/phases.py | xiangao1/idaes-pse | 96315b109b3d0d753d681c47db06fff1adb5e035 | [
"RSA-MD"
] | null | null | null | idaes/core/phases.py | xiangao1/idaes-pse | 96315b109b3d0d753d681c47db06fff1adb5e035 | [
"RSA-MD"
] | 3 | 2021-07-20T20:12:59.000Z | 2022-03-09T21:06:40.000Z | idaes/core/phases.py | xiangao1/idaes-pse | 96315b109b3d0d753d681c47db06fff1adb5e035 | [
"RSA-MD"
] | 1 | 2021-08-13T15:20:31.000Z | 2021-08-13T15:20:31.000Z | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
IDAES Phase objects
Created on Tue Feb 18 10:54:52 2020
@author: alee
"""
from enum import Enum
from pyomo.environ import Set
from pyomo.common.config import ConfigBlock, ConfigValue
from .process_base import (declare_process_block_class,
ProcessBlockData)
# Enumerate recognised Phase types
class PhaseType(Enum):
undefined = 0
liquidPhase = 1
vaporPhase = 2
solidPhase = 3
aqueousPhase = 4
# TODO: Document EoS options and parameter_Data
@declare_process_block_class("Phase")
class PhaseData(ProcessBlockData):
CONFIG = ConfigBlock()
CONFIG.declare("component_list", ConfigValue(
default=None,
domain=list,
description="List of components in phase",
doc="List of components which are present in phase. This is used "
"to construct the phase-component Set for the property package."))
CONFIG.declare("equation_of_state", ConfigValue(
default=None,
description="Equation of state for phase",
doc="""A valid Python class with the necessary methods for
constructing the desired equation of state (or similar
model)."""))
CONFIG.declare("equation_of_state_options", ConfigValue(
default=None,
description="Options for equation of state",
doc="""A dict or ConfigBlock of options to be used when setting
up equation of state for phase."""))
CONFIG.declare("parameter_data", ConfigValue(
default={},
domain=dict,
description="Dict containing initialization data for parameters"))
CONFIG.declare("_phase_list_exists", ConfigValue(
default=False,
doc="Internal config argument indicating whether phase_list "
"needs to be populated."))
def build(self):
super(PhaseData, self).build()
# If the phase_list does not exist, add a reference to the new Phase
# The IF is mostly for backwards compatability, to allow for old-style
# property packages where the phase_list already exists but we need to
# add new Phase objects
if not self.config._phase_list_exists:
self.__add_to_phase_list()
# For the base Phase class, determine phase type based on component name
# Derived classes will overload these and return the correct type
# This will handle backwards compatability for old-style property packages
def is_liquid_phase(self):
if "Liq" in self.name:
return True
else:
return False
def is_solid_phase(self):
if "Sol" in self.name:
return True
else:
return False
def is_vapor_phase(self):
if "Vap" in self.name:
return True
else:
return False
def is_aqueous_phase(self):
# Returns bool indicating if this phase involve electrolytes
return False
def __add_to_phase_list(self):
"""
Method to add reference to new Phase in phase_list
"""
parent = self.parent_block()
try:
phase_list = getattr(parent, "phase_list")
phase_list.add(self.local_name)
except AttributeError:
# Parent does not have a phase_list yet, so create one
parent.phase_list = Set(initialize=[self.local_name],
ordered=True)
@declare_process_block_class("LiquidPhase", block_class=Phase)
class LiquidPhaseData(PhaseData):
def is_liquid_phase(self):
return True
def is_solid_phase(self):
return False
def is_vapor_phase(self):
return False
@declare_process_block_class("SolidPhase", block_class=Phase)
class SolidPhaseData(PhaseData):
def is_liquid_phase(self):
return False
def is_solid_phase(self):
return True
def is_vapor_phase(self):
return False
@declare_process_block_class("VaporPhase", block_class=Phase)
class VaporPhaseData(PhaseData):
def is_liquid_phase(self):
return False
def is_solid_phase(self):
return False
def is_vapor_phase(self):
return True
@declare_process_block_class("AqueousPhase", block_class=LiquidPhase)
class AqueousPhaseData(LiquidPhaseData):
# Special phase type for liquid phases involving electrolytes
# This is used to determine if we need to do the more complex component
# list determinations
def is_aqueous_phase(self):
return True
# List of all Phase types to use for validation
__all_phases__ = [Phase, LiquidPhase, SolidPhase, VaporPhase, AqueousPhase]
| 33.317073 | 81 | 0.654832 |
f8cb3e325960c6f09f22f110723b17c370c3bb01 | 46,364 | py | Python | scripts/vk_validation_stats.py | cforfang/Vulkan-ValidationLayers | 5f00f84a15f6263f50798db23f764601ae9a5a93 | [
"Apache-2.0"
] | null | null | null | scripts/vk_validation_stats.py | cforfang/Vulkan-ValidationLayers | 5f00f84a15f6263f50798db23f764601ae9a5a93 | [
"Apache-2.0"
] | null | null | null | scripts/vk_validation_stats.py | cforfang/Vulkan-ValidationLayers | 5f00f84a15f6263f50798db23f764601ae9a5a93 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2021 The Khronos Group Inc.
# Copyright (c) 2015-2021 Valve Corporation
# Copyright (c) 2015-2021 LunarG, Inc.
# Copyright (c) 2015-2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Tobin Ehlis <tobine@google.com>
# Author: Dave Houlton <daveh@lunarg.com>
# Author: Shannon McPherson <shannon@lunarg.com>
import argparse
import common_codegen
import csv
import glob
import html
import json
import operator
import os
import platform
import re
import sys
import time
import unicodedata
import subprocess
from collections import defaultdict
from collections import OrderedDict
verbose_mode = False
txt_db = False
csv_db = False
html_db = False
txt_filename = "validation_error_database.txt"
csv_filename = "validation_error_database.csv"
html_filename = "validation_error_database.html"
header_filename = "vk_validation_error_messages.h"
vuid_prefixes = ['VUID-', 'UNASSIGNED-', 'kVUID_']
spirvtools_path = None # default is to not search for repo
# Hard-coded flags that could be command line args, if we decide that's useful
ignore_unassigned = True # These are not found in layer code unless they appear explicitly (most don't), so produce false positives
layer_source_files = [common_codegen.repo_relative(path) for path in [
'layers/buffer_validation.cpp',
'layers/core_validation.cpp',
'layers/descriptor_sets.cpp',
'layers/drawdispatch.cpp',
'layers/parameter_validation_utils.cpp',
'layers/object_tracker_utils.cpp',
'layers/shader_module.cpp',
'layers/shader_validation.cpp',
'layers/stateless_validation.h',
'layers/synchronization_validation.cpp',
'layers/sync_vuid_maps.cpp',
'layers/generated/parameter_validation.cpp',
'layers/generated/object_tracker.cpp',
'layers/generated/spirv_validation_helper.cpp',
'layers/generated/command_validation.cpp',
]]
test_source_files = glob.glob(os.path.join(common_codegen.repo_relative('tests'), '*.cpp'))
unassigned_vuid_files = [common_codegen.repo_relative(path) for path in [
'layers/stateless_validation.h',
'layers/core_validation_error_enums.h',
'layers/object_lifetime_validation.h'
]]
# These files should not change unless event there is a major refactoring in SPIR-V Tools
# Paths are relative from root of SPIR-V Tools repo
spirvtools_source_files = ["source/val/validation_state.cpp"]
spirvtools_test_files = ["test/val/*.cpp"]
def printHelp():
print ("Usage:")
print (" python vk_validation_stats.py <json_file>")
print (" [ -c ]")
print (" [ -todo ]")
print (" [ -vuid <vuid_name> ]")
print (" [ -unassigned ]")
print (" [ -spirvtools [ <path_to_spirv_tools_repo>] ]")
print (" [ -text [ <text_out_filename>] ]")
print (" [ -csv [ <csv_out_filename>] ]")
print (" [ -html [ <html_out_filename>] ]")
print (" [ -export_header ]")
print (" [ -summary ]")
print (" [ -verbose ]")
print (" [ -help ]")
print ("\n The vk_validation_stats script parses validation layer source files to")
print (" determine the set of valid usage checks and tests currently implemented,")
print (" and generates coverage values by comparing against the full set of valid")
print (" usage identifiers in the Vulkan-Headers registry file 'validusage.json'")
print ("\nArguments: ")
print (" <json-file> (required) registry file 'validusage.json'")
print (" -c report consistency warnings")
print (" -todo report unimplemented VUIDs")
print (" -vuid <vuid_name> report status of individual VUID <vuid_name>")
print (" -unassigned report unassigned VUIDs")
print (" -spirvtools [path] when pointed to root directory of SPIRV-Tools repo, will search")
print (" the repo for VUs that are implemented there")
print (" -text [filename] output the error database text to <text_database_filename>,")
print (" defaults to 'validation_error_database.txt'")
print (" -csv [filename] output the error database in csv to <csv_database_filename>,")
print (" defaults to 'validation_error_database.csv'")
print (" -html [filename] output the error database in html to <html_database_filename>,")
print (" defaults to 'validation_error_database.html'")
print (" -export_header export a new VUID error text header file to <%s>" % header_filename)
print (" -summary output summary of VUID coverage")
print (" -verbose show your work (to stdout)")
class ValidationJSON:
def __init__(self, filename):
self.filename = filename
self.explicit_vuids = set()
self.implicit_vuids = set()
self.all_vuids = set()
self.vuid_db = defaultdict(list) # Maps VUID string to list of json-data dicts
self.apiversion = ""
self.duplicate_vuids = set()
# A set of specific regular expression substitutions needed to clean up VUID text
self.regex_dict = {}
self.regex_dict[re.compile('<.*?>|&(amp;)+lt;|&(amp;)+gt;')] = ""
self.regex_dict[re.compile(r'\\\(codeSize \\over 4\\\)')] = "(codeSize/4)"
self.regex_dict[re.compile(r'\\\(\\lceil\{\\mathit\{rasterizationSamples} \\over 32}\\rceil\\\)')] = "(rasterizationSamples/32)"
self.regex_dict[re.compile(r'\\\(\\left\\lceil{\\frac{maxFramebufferWidth}{minFragmentDensityTexelSize_{width}}}\\right\\rceil\\\)')] = "the ceiling of maxFramebufferWidth/minFragmentDensityTexelSize.width"
self.regex_dict[re.compile(r'\\\(\\left\\lceil{\\frac{maxFramebufferHeight}{minFragmentDensityTexelSize_{height}}}\\right\\rceil\\\)')] = "the ceiling of maxFramebufferHeight/minFragmentDensityTexelSize.height"
self.regex_dict[re.compile(r'\\\(\\left\\lceil{\\frac{width}{maxFragmentDensityTexelSize_{width}}}\\right\\rceil\\\)')] = "the ceiling of width/maxFragmentDensityTexelSize.width"
self.regex_dict[re.compile(r'\\\(\\left\\lceil{\\frac{height}{maxFragmentDensityTexelSize_{height}}}\\right\\rceil\\\)')] = "the ceiling of height/maxFragmentDensityTexelSize.height"
self.regex_dict[re.compile(r'\\\(\\textrm\{codeSize} \\over 4\\\)')] = "(codeSize/4)"
# Regular expression for characters outside ascii range
self.unicode_regex = re.compile('[^\x00-\x7f]')
# Mapping from unicode char to ascii approximation
self.unicode_dict = {
'\u002b' : '+', # PLUS SIGN
'\u00b4' : "'", # ACUTE ACCENT
'\u200b' : '', # ZERO WIDTH SPACE
'\u2018' : "'", # LEFT SINGLE QUOTATION MARK
'\u2019' : "'", # RIGHT SINGLE QUOTATION MARK
'\u201c' : '"', # LEFT DOUBLE QUOTATION MARK
'\u201d' : '"', # RIGHT DOUBLE QUOTATION MARK
'\u2026' : '...',# HORIZONTAL ELLIPSIS
'\u2032' : "'", # PRIME
'\u2192' : '->', # RIGHTWARDS ARROW
}
def sanitize(self, text, location):
# Strip leading/trailing whitespace
text = text.strip()
# Apply regex text substitutions
for regex, replacement in self.regex_dict.items():
text = re.sub(regex, replacement, text)
# Un-escape html entity codes, ie &#XXXX;
text = html.unescape(text)
# Apply unicode substitutions
for unicode in self.unicode_regex.findall(text):
try:
# Replace known chars
text = text.replace(unicode, self.unicode_dict[unicode])
except KeyError:
# Strip and warn on unrecognized chars
text = text.replace(unicode, '')
name = unicodedata.name(unicode, 'UNKNOWN')
print('Warning: Unknown unicode character \\u{:04x} ({}) at {}'.format(ord(unicode), name, location))
return text
def read(self):
self.json_dict = {}
if os.path.isfile(self.filename):
json_file = open(self.filename, 'r', encoding='utf-8')
self.json_dict = json.load(json_file, object_pairs_hook=OrderedDict)
json_file.close()
if len(self.json_dict) == 0:
print("Error: Error loading validusage.json file <%s>" % self.filename)
sys.exit(-1)
try:
version = self.json_dict['version info']
validation = self.json_dict['validation']
self.apiversion = version['api version']
except:
print("Error: Failure parsing validusage.json object")
sys.exit(-1)
# Parse vuid from json into local databases
for apiname in validation.keys():
apidict = validation[apiname]
for ext in apidict.keys():
vlist = apidict[ext]
for ventry in vlist:
vuid_string = ventry['vuid']
if (vuid_string[-5:-1].isdecimal()):
self.explicit_vuids.add(vuid_string) # explicit end in 5 numeric chars
vtype = 'explicit'
else:
self.implicit_vuids.add(vuid_string) # otherwise, implicit
vtype = 'implicit'
vuid_text = self.sanitize(ventry['text'], vuid_string)
self.vuid_db[vuid_string].append({'api':apiname, 'ext':ext, 'type':vtype, 'text':vuid_text})
self.all_vuids = self.explicit_vuids | self.implicit_vuids
self.duplicate_vuids = set({v for v in self.vuid_db if len(self.vuid_db[v]) > 1})
if len(self.duplicate_vuids) > 0:
print("Warning: duplicate VUIDs found in validusage.json")
def buildKvuidDict():
kvuid_dict = {}
for uf in unassigned_vuid_files:
line_num = 0
with open(uf) as f:
for line in f:
line_num = line_num + 1
if True in [line.strip().startswith(comment) for comment in ['//', '/*']]:
continue
if 'kVUID_' in line:
kvuid_pos = line.find('kVUID_'); assert(kvuid_pos >= 0)
eq_pos = line.find('=', kvuid_pos)
if eq_pos >= 0:
kvuid = line[kvuid_pos:eq_pos].strip(' \t\n;"')
unassigned_str = line[eq_pos+1:].strip(' \t\n;"')
kvuid_dict[kvuid] = unassigned_str
return kvuid_dict
class ValidationSource:
def __init__(self, source_file_list):
self.source_files = source_file_list
self.vuid_count_dict = {} # dict of vuid values to the count of how much they're used, and location of where they're used
self.duplicated_checks = 0
self.explicit_vuids = set()
self.implicit_vuids = set()
self.unassigned_vuids = set()
self.all_vuids = set()
def parse(self, spirv_val):
kvuid_dict = buildKvuidDict()
if spirv_val and spirv_val.enabled:
self.source_files.extend(spirv_val.source_files)
# build self.vuid_count_dict
prepend = None
for sf in self.source_files:
spirv_file = True if spirv_val.enabled and sf.startswith(spirv_val.repo_path) else False
line_num = 0
with open(sf, encoding='utf-8') as f:
for line in f:
line_num = line_num + 1
if True in [line.strip().startswith(comment) for comment in ['//', '/*']]:
if 'VUID-' not in line or 'TODO:' in line:
continue
# Find vuid strings
if prepend is not None:
line = prepend[:-2] + line.lstrip().lstrip('"') # join lines skipping CR, whitespace and trailing/leading quote char
prepend = None
if any(prefix in line for prefix in vuid_prefixes):
# Replace the '(' of lines containing validation helper functions with ' ' to make them easier to parse
line = line.replace("(", " ")
line_list = line.split()
# A VUID string that has been broken by clang will start with a vuid prefix and end with -, and will be last in the list
broken_vuid = line_list[-1].strip('"')
if any(broken_vuid.startswith(prefix) for prefix in vuid_prefixes) and broken_vuid.endswith('-'):
prepend = line
continue
vuid_list = []
for str in line_list:
if any(prefix in str for prefix in vuid_prefixes):
vuid_list.append(str.strip(',);{}"*'))
for vuid in vuid_list:
if vuid.startswith('kVUID_'): vuid = kvuid_dict[vuid]
if vuid not in self.vuid_count_dict:
self.vuid_count_dict[vuid] = {}
self.vuid_count_dict[vuid]['count'] = 1
self.vuid_count_dict[vuid]['file_line'] = []
self.vuid_count_dict[vuid]['spirv'] = False # default
else:
if self.vuid_count_dict[vuid]['count'] == 1: # only count first time duplicated
self.duplicated_checks = self.duplicated_checks + 1
self.vuid_count_dict[vuid]['count'] = self.vuid_count_dict[vuid]['count'] + 1
self.vuid_count_dict[vuid]['file_line'].append('%s,%d' % (sf, line_num))
if spirv_file:
self.vuid_count_dict[vuid]['spirv'] = True
# Sort vuids by type
for vuid in self.vuid_count_dict.keys():
if (vuid.startswith('VUID-')):
if (vuid[-5:-1].isdecimal()):
self.explicit_vuids.add(vuid) # explicit end in 5 numeric chars
if self.vuid_count_dict[vuid]['spirv']:
spirv_val.source_explicit_vuids.add(vuid)
else:
self.implicit_vuids.add(vuid)
if self.vuid_count_dict[vuid]['spirv']:
spirv_val.source_implicit_vuids.add(vuid)
elif (vuid.startswith('UNASSIGNED-')):
self.unassigned_vuids.add(vuid)
else:
print("Unable to categorize VUID: %s" % vuid)
print("Confused while parsing VUIDs in layer source code - cannot proceed. (FIXME)")
exit(-1)
self.all_vuids = self.explicit_vuids | self.implicit_vuids | self.unassigned_vuids
if spirv_file:
spirv_val.source_all_vuids = spirv_val.source_explicit_vuids | spirv_val.source_implicit_vuids
# Class to parse the validation layer test source and store testnames
class ValidationTests:
def __init__(self, test_file_list, test_group_name=['VkLayerTest', 'VkPositiveLayerTest', 'VkWsiEnabledLayerTest', 'VkBestPracticesLayerTest']):
self.test_files = test_file_list
self.test_trigger_txt_list = []
for tg in test_group_name:
self.test_trigger_txt_list.append('TEST_F(%s' % tg)
self.explicit_vuids = set()
self.implicit_vuids = set()
self.unassigned_vuids = set()
self.all_vuids = set()
#self.test_to_vuids = {} # Map test name to VUIDs tested
self.vuid_to_tests = defaultdict(set) # Map VUIDs to set of test names where implemented
# Parse test files into internal data struct
def parse(self, spirv_val):
kvuid_dict = buildKvuidDict()
if spirv_val and spirv_val.enabled:
self.test_files.extend(spirv_val.test_files)
# For each test file, parse test names into set
grab_next_line = False # handle testname on separate line than wildcard
testname = ''
prepend = None
for test_file in self.test_files:
spirv_file = True if spirv_val.enabled and test_file.startswith(spirv_val.repo_path) else False
with open(test_file) as tf:
for line in tf:
if True in [line.strip().startswith(comment) for comment in ['//', '/*']]:
continue
# if line ends in a broken VUID string, fix that before proceeding
if prepend is not None:
line = prepend[:-2] + line.lstrip().lstrip('"') # join lines skipping CR, whitespace and trailing/leading quote char
prepend = None
if any(prefix in line for prefix in vuid_prefixes):
line_list = line.split()
# A VUID string that has been broken by clang will start with a vuid prefix and end with -, and will be last in the list
broken_vuid = line_list[-1].strip('"')
if any(broken_vuid.startswith(prefix) for prefix in vuid_prefixes) and broken_vuid.endswith('-'):
prepend = line
continue
if any(ttt in line for ttt in self.test_trigger_txt_list):
testname = line.split(',')[-1]
testname = testname.strip().strip(' {)')
if ('' == testname):
grab_next_line = True
continue
#self.test_to_vuids[testname] = []
if grab_next_line: # test name on its own line
grab_next_line = False
testname = testname.strip().strip(' {)')
#self.test_to_vuids[testname] = []
if any(prefix in line for prefix in vuid_prefixes):
line_list = re.split('[\s{}[\]()"]+',line)
for sub_str in line_list:
if any(prefix in sub_str for prefix in vuid_prefixes):
vuid_str = sub_str.strip(',);:"*')
if vuid_str.startswith('kVUID_'): vuid_str = kvuid_dict[vuid_str]
self.vuid_to_tests[vuid_str].add(testname)
#self.test_to_vuids[testname].append(vuid_str)
if (vuid_str.startswith('VUID-')):
if (vuid_str[-5:-1].isdecimal()):
self.explicit_vuids.add(vuid_str) # explicit end in 5 numeric chars
if spirv_file:
spirv_val.test_explicit_vuids.add(vuid_str)
else:
self.implicit_vuids.add(vuid_str)
if spirv_file:
spirv_val.test_implicit_vuids.add(vuid_str)
elif (vuid_str.startswith('UNASSIGNED-')):
self.unassigned_vuids.add(vuid_str)
else:
print("Unable to categorize VUID: %s" % vuid_str)
print("Confused while parsing VUIDs in test code - cannot proceed. (FIXME)")
exit(-1)
self.all_vuids = self.explicit_vuids | self.implicit_vuids | self.unassigned_vuids
# Class to do consistency checking
#
class Consistency:
def __init__(self, all_json, all_checks, all_tests):
self.valid = all_json
self.checks = all_checks
self.tests = all_tests
# Report undefined VUIDs in source code
def undef_vuids_in_layer_code(self):
undef_set = self.checks - self.valid
undef_set.discard('VUID-Undefined') # don't report Undefined
if ignore_unassigned:
unassigned = set({uv for uv in undef_set if uv.startswith('UNASSIGNED-')})
undef_set = undef_set - unassigned
if (len(undef_set) > 0):
print("\nFollowing VUIDs found in layer code are not defined in validusage.json (%d):" % len(undef_set))
undef = list(undef_set)
undef.sort()
for vuid in undef:
print(" %s" % vuid)
return False
return True
# Report undefined VUIDs in tests
def undef_vuids_in_tests(self):
undef_set = self.tests - self.valid
undef_set.discard('VUID-Undefined') # don't report Undefined
if ignore_unassigned:
unassigned = set({uv for uv in undef_set if uv.startswith('UNASSIGNED-')})
undef_set = undef_set - unassigned
if (len(undef_set) > 0):
ok = False
print("\nFollowing VUIDs found in layer tests are not defined in validusage.json (%d):" % len(undef_set))
undef = list(undef_set)
undef.sort()
for vuid in undef:
print(" %s" % vuid)
return False
return True
# Report vuids in tests that are not in source
def vuids_tested_not_checked(self):
undef_set = self.tests - self.checks
undef_set.discard('VUID-Undefined') # don't report Undefined
if ignore_unassigned:
unassigned = set()
for vuid in undef_set:
if vuid.startswith('UNASSIGNED-'):
unassigned.add(vuid)
undef_set = undef_set - unassigned
if (len(undef_set) > 0):
ok = False
print("\nFollowing VUIDs found in tests but are not checked in layer code (%d):" % len(undef_set))
undef = list(undef_set)
undef.sort()
for vuid in undef:
print(" %s" % vuid)
return False
return True
# TODO: Explicit checked VUIDs which have no test
# def explicit_vuids_checked_not_tested(self):
# Class to output database in various flavors
#
class OutputDatabase:
def __init__(self, val_json, val_source, val_tests, spirv_val):
self.vj = val_json
self.vs = val_source
self.vt = val_tests
self.sv = spirv_val
self.header_version = "/* THIS FILE IS GENERATED - DO NOT EDIT (scripts/vk_validation_stats.py) */"
self.header_version += "\n/* Vulkan specification version: %s */" % val_json.apiversion
self.header_preamble = """
/*
* Vulkan
*
* Copyright (c) 2016-2021 Google Inc.
* Copyright (c) 2016-2021 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Tobin Ehlis <tobine@google.com>
* Author: Dave Houlton <daveh@lunarg.com>
*/
#pragma once
// Disable auto-formatting for generated file
// clang-format off
// Mapping from VUID string to the corresponding spec text
typedef struct _vuid_spec_text_pair {
const char * vuid;
const char * spec_text;
const char * url_id;
} vuid_spec_text_pair;
static const vuid_spec_text_pair vuid_spec_text[] = {
"""
self.header_postamble = """};
"""
def dump_txt(self, only_unimplemented = False):
print("\n Dumping database to text file: %s" % txt_filename)
with open (txt_filename, 'w') as txt:
txt.write("## VUID Database\n")
txt.write("## Format: VUID_NAME | CHECKED | SPIRV-TOOL | TEST | TYPE | API/STRUCT | EXTENSION | VUID_TEXT\n##\n")
vuid_list = list(self.vj.all_vuids)
vuid_list.sort()
for vuid in vuid_list:
db_list = self.vj.vuid_db[vuid]
db_list.sort(key=operator.itemgetter('ext')) # sort list to ease diffs of output file
for db_entry in db_list:
checked = 'N'
spirv = 'N'
if vuid in self.vs.all_vuids:
if only_unimplemented:
continue
else:
checked = 'Y'
if vuid in self.sv.source_all_vuids:
spirv = 'Y'
test = 'None'
if vuid in self.vt.vuid_to_tests:
test_list = list(self.vt.vuid_to_tests[vuid])
test_list.sort() # sort tests, for diff-ability
sep = ', '
test = sep.join(test_list)
txt.write("%s | %s | %s | %s | %s | %s | %s | %s\n" % (vuid, checked, test, spirv, db_entry['type'], db_entry['api'], db_entry['ext'], db_entry['text']))
def dump_csv(self, only_unimplemented = False):
print("\n Dumping database to csv file: %s" % csv_filename)
with open (csv_filename, 'w', newline='') as csvfile:
cw = csv.writer(csvfile)
cw.writerow(['VUID_NAME','CHECKED','SPIRV-TOOL', 'TEST','TYPE','API/STRUCT','EXTENSION','VUID_TEXT'])
vuid_list = list(self.vj.all_vuids)
vuid_list.sort()
for vuid in vuid_list:
for db_entry in self.vj.vuid_db[vuid]:
row = [vuid]
if vuid in self.vs.all_vuids:
if only_unimplemented:
continue
else:
row.append('Y') # checked
if vuid in self.sv.source_all_vuids:
row.append('Y') # spirv-tool
else:
row.append('N') # spirv-tool
else:
row.append('N') # checked
row.append('N') # spirv-tool
test = 'None'
if vuid in self.vt.vuid_to_tests:
sep = ', '
test = sep.join(self.vt.vuid_to_tests[vuid])
row.append(test)
row.append(db_entry['type'])
row.append(db_entry['api'])
row.append(db_entry['ext'])
row.append(db_entry['text'])
cw.writerow(row)
def dump_html(self, only_unimplemented = False):
print("\n Dumping database to html file: %s" % html_filename)
preamble = '<!DOCTYPE html>\n<html>\n<head>\n<style>\ntable, th, td {\n border: 1px solid black;\n border-collapse: collapse; \n}\n</style>\n<body>\n<h2>Valid Usage Database</h2>\n<font size="2" face="Arial">\n<table style="width:100%">\n'
headers = '<tr><th>VUID NAME</th><th>CHECKED</th><th>SPIRV-TOOL</th><th>TEST</th><th>TYPE</th><th>API/STRUCT</th><th>EXTENSION</th><th>VUID TEXT</th></tr>\n'
with open (html_filename, 'w') as hfile:
hfile.write(preamble)
hfile.write(headers)
vuid_list = list(self.vj.all_vuids)
vuid_list.sort()
for vuid in vuid_list:
for db_entry in self.vj.vuid_db[vuid]:
checked = '<span style="color:red;">N</span>'
spirv = ''
if vuid in self.vs.all_vuids:
if only_unimplemented:
continue
else:
checked = '<span style="color:limegreen;">Y</span>'
if vuid in self.sv.source_all_vuids:
spirv = 'Y'
hfile.write('<tr><th>%s</th>' % vuid)
hfile.write('<th>%s</th>' % checked)
hfile.write('<th>%s</th>' % spirv)
test = 'None'
if vuid in self.vt.vuid_to_tests:
sep = ', '
test = sep.join(self.vt.vuid_to_tests[vuid])
hfile.write('<th>%s</th>' % test)
hfile.write('<th>%s</th>' % db_entry['type'])
hfile.write('<th>%s</th>' % db_entry['api'])
hfile.write('<th>%s</th>' % db_entry['ext'])
hfile.write('<th>%s</th></tr>\n' % db_entry['text'])
hfile.write('</table>\n</body>\n</html>\n')
# make list of spec versions containing given VUID
@staticmethod
def make_vuid_spec_version_list(pattern, max_minor_version):
assert pattern
all_editions_list = []
for e in reversed(range(max_minor_version+1)):
all_editions_list.append({"version": e, "ext": True, "khr" : False})
all_editions_list.append({"version": e, "ext": False, "khr" : True})
all_editions_list.append({"version": e, "ext": False, "khr" : False})
if pattern == 'core':
return all_editions_list
# pattern is series of parentheses separated by plus
# each parentheses can be prepended by negation (!)
# each parentheses contains list of extensions or vk versions separated by either comma or plus
edition_list_out = []
for edition in all_editions_list:
resolved_pattern = True
raw_terms = re.split(r'\)\+', pattern)
for raw_term in raw_terms:
negated = raw_term.startswith('!')
term = raw_term.lstrip('!(').rstrip(')')
conjunction = '+' in term
disjunction = ',' in term
assert not (conjunction and disjunction)
if conjunction: features = term.split('+')
elif disjunction: features = term.split(',')
else: features = [term]
assert features
def isDefined(feature, edition):
def getVersion(f): return int(f.replace('VK_VERSION_1_', '', 1))
def isVersion(f): return f.startswith('VK_VERSION_') and feature != 'VK_VERSION_1_0' and getVersion(feature) < 1024
def isExtension(f): return f.startswith('VK_') and not isVersion(f)
def isKhr(f): return f.startswith('VK_KHR_')
assert isExtension(feature) or isVersion(feature)
if isVersion(feature) and getVersion(feature) <= edition['version']: return True
elif isExtension(feature) and edition['ext']: return True
elif isKhr(feature) and edition['khr']: return True
else: return False
if not negated and (conjunction or (not conjunction and not disjunction)): # all defined
resolved_term = True
for feature in features:
if not isDefined(feature, edition): resolved_term = False
elif negated and conjunction: # at least one not defined
resolved_term = False
for feature in features:
if not isDefined(feature, edition): resolved_term = True
elif not negated and disjunction: # at least one defined
resolved_term = False
for feature in features:
if isDefined(feature, edition): resolved_term = True
elif negated and (disjunction or (not conjunction and not disjunction)): # none defined
resolved_term = True
for feature in features:
if isDefined(feature, edition): resolved_term = False
resolved_pattern = resolved_pattern and resolved_term
if resolved_pattern: edition_list_out.append(edition)
return edition_list_out
def export_header(self):
if verbose_mode:
print("\n Exporting header file to: %s" % header_filename)
with open (header_filename, 'w', newline='\n') as hfile:
hfile.write(self.header_version)
hfile.write(self.header_preamble)
vuid_list = list(self.vj.all_vuids)
vuid_list.sort()
minor_version = int(self.vj.apiversion.split('.')[1])
for vuid in vuid_list:
db_entry = self.vj.vuid_db[vuid][0]
spec_list = self.make_vuid_spec_version_list(db_entry['ext'], minor_version)
if not spec_list: spec_url_id = 'default'
elif spec_list[0]['ext']: spec_url_id = '1.%s-extensions' % spec_list[0]['version']
elif spec_list[0]['khr']: spec_url_id = '1.%s-khr-extensions' % spec_list[0]['version']
else: spec_url_id = '1.%s' % spec_list[0]['version']
# Escape quotes and backslashes when generating C strings for source code
db_text = db_entry['text'].replace('\\', '\\\\').replace('"', '\\"')
hfile.write(' {"%s", "%s", "%s"},\n' % (vuid, db_text, spec_url_id))
# For multiply-defined VUIDs, include versions with extension appended
if len(self.vj.vuid_db[vuid]) > 1:
print('Warning: Found a duplicate VUID: %s' % vuid)
hfile.write(self.header_postamble)
class SpirvValidation:
def __init__(self, repo_path):
self.enabled = (repo_path != None)
self.repo_path = repo_path
self.version = 'unknown'
self.source_files = []
self.test_files = []
self.source_explicit_vuids = set()
self.source_implicit_vuids = set()
self.source_all_vuids = set()
self.test_explicit_vuids = set()
self.test_implicit_vuids = set()
def load(self, verbose):
if self.enabled == False:
return
# Get hash from git if available
try:
git_dir = os.path.join(self.repo_path, '.git')
process = subprocess.Popen(['git', '--git-dir='+git_dir ,'rev-parse', 'HEAD'], shell=False, stdout=subprocess.PIPE)
self.version = process.communicate()[0].strip().decode('utf-8')[:7]
if process.poll() != 0:
throw
elif verbose:
print('Found SPIR-V Tools version %s' % self.version)
except:
# leave as default
if verbose:
print('Could not find .git file for version of SPIR-V tools, marking as %s' % self.version)
# Find and parse files with VUIDs in source
for path in spirvtools_source_files:
self.source_files.extend(glob.glob(os.path.join(self.repo_path, path)))
for path in spirvtools_test_files:
self.test_files.extend(glob.glob(os.path.join(self.repo_path, path)))
def main(argv):
global verbose_mode
global txt_filename
global csv_filename
global html_filename
global spirvtools_path
run_consistency = False
report_unimplemented = False
report_unassigned = False
get_vuid_status = ''
txt_out = False
csv_out = False
html_out = False
header_out = False
show_summary = False
if (1 > len(argv)):
printHelp()
sys.exit()
# Parse script args
json_filename = argv[0]
i = 1
while (i < len(argv)):
arg = argv[i]
i = i + 1
if (arg == '-c'):
run_consistency = True
elif (arg == '-vuid'):
get_vuid_status = argv[i]
i = i + 1
elif (arg == '-todo'):
report_unimplemented = True
elif (arg == '-unassigned'):
report_unassigned = True
elif (arg == '-spirvtools'):
spirvtools_path = argv[i]
i = i + 1
elif (arg == '-text'):
txt_out = True
# Set filename if supplied, else use default
if i < len(argv) and not argv[i].startswith('-'):
txt_filename = argv[i]
i = i + 1
elif (arg == '-csv'):
csv_out = True
# Set filename if supplied, else use default
if i < len(argv) and not argv[i].startswith('-'):
csv_filename = argv[i]
i = i + 1
elif (arg == '-html'):
html_out = True
# Set filename if supplied, else use default
if i < len(argv) and not argv[i].startswith('-'):
html_filename = argv[i]
i = i + 1
elif (arg == '-export_header'):
header_out = True
elif (arg in ['-verbose']):
verbose_mode = True
elif (arg in ['-summary']):
show_summary = True
elif (arg in ['-help', '-h']):
printHelp()
sys.exit()
else:
print("Unrecognized argument: %s\n" % arg)
printHelp()
sys.exit()
result = 0 # Non-zero result indicates an error case
# Load in SPIRV-Tools if passed in
spirv_val = SpirvValidation(spirvtools_path)
spirv_val.load(verbose_mode)
# Parse validusage json
val_json = ValidationJSON(json_filename)
val_json.read()
exp_json = len(val_json.explicit_vuids)
imp_json = len(val_json.implicit_vuids)
all_json = len(val_json.all_vuids)
if verbose_mode:
print("Found %d unique error vuids in validusage.json file." % all_json)
print(" %d explicit" % exp_json)
print(" %d implicit" % imp_json)
if len(val_json.duplicate_vuids) > 0:
print("%d VUIDs appear in validusage.json more than once." % len(val_json.duplicate_vuids))
for vuid in val_json.duplicate_vuids:
print(" %s" % vuid)
for ext in val_json.vuid_db[vuid]:
print(" with extension: %s" % ext['ext'])
# Parse layer source files
val_source = ValidationSource(layer_source_files)
val_source.parse(spirv_val)
exp_checks = len(val_source.explicit_vuids)
imp_checks = len(val_source.implicit_vuids)
all_checks = len(val_source.vuid_count_dict.keys())
spirv_exp_checks = len(spirv_val.source_explicit_vuids) if spirv_val.enabled else 0
spirv_imp_checks = len(spirv_val.source_implicit_vuids) if spirv_val.enabled else 0
spirv_all_checks = (spirv_exp_checks + spirv_imp_checks) if spirv_val.enabled else 0
if verbose_mode:
print("Found %d unique vuid checks in layer source code." % all_checks)
print(" %d explicit" % exp_checks)
if spirv_val.enabled:
print(" SPIR-V Tool make up %d" % spirv_exp_checks)
print(" %d implicit" % imp_checks)
if spirv_val.enabled:
print(" SPIR-V Tool make up %d" % spirv_imp_checks)
print(" %d unassigned" % len(val_source.unassigned_vuids))
print(" %d checks are implemented more that once" % val_source.duplicated_checks)
# Parse test files
val_tests = ValidationTests(test_source_files)
val_tests.parse(spirv_val)
exp_tests = len(val_tests.explicit_vuids)
imp_tests = len(val_tests.implicit_vuids)
all_tests = len(val_tests.all_vuids)
spirv_exp_tests = len(spirv_val.test_explicit_vuids) if spirv_val.enabled else 0
spirv_imp_tests = len(spirv_val.test_implicit_vuids) if spirv_val.enabled else 0
spirv_all_tests = (spirv_exp_tests + spirv_imp_tests) if spirv_val.enabled else 0
if verbose_mode:
print("Found %d unique error vuids in test source code." % all_tests)
print(" %d explicit" % exp_tests)
if spirv_val.enabled:
print(" From SPIRV-Tools: %d" % spirv_exp_tests)
print(" %d implicit" % imp_tests)
if spirv_val.enabled:
print(" From SPIRV-Tools: %d" % spirv_imp_tests)
print(" %d unassigned" % len(val_tests.unassigned_vuids))
# Process stats
if show_summary:
if spirv_val.enabled:
print("\nValidation Statistics (using validusage.json version %s and SPIRV-Tools version %s)" % (val_json.apiversion, spirv_val.version))
else:
print("\nValidation Statistics (using validusage.json version %s)" % val_json.apiversion)
print(" VUIDs defined in JSON file: %04d explicit, %04d implicit, %04d total." % (exp_json, imp_json, all_json))
print(" VUIDs checked in layer code: %04d explicit, %04d implicit, %04d total." % (exp_checks, imp_checks, all_checks))
if spirv_val.enabled:
print(" From SPIRV-Tools: %04d explicit, %04d implicit, %04d total." % (spirv_exp_checks, spirv_imp_checks, spirv_all_checks))
print(" VUIDs tested in layer tests: %04d explicit, %04d implicit, %04d total." % (exp_tests, imp_tests, all_tests))
if spirv_val.enabled:
print(" From SPIRV-Tools: %04d explicit, %04d implicit, %04d total." % (spirv_exp_tests, spirv_imp_tests, spirv_all_tests))
print("\nVUID check coverage")
print(" Explicit VUIDs checked: %.1f%% (%d checked vs %d defined)" % ((100.0 * exp_checks / exp_json), exp_checks, exp_json))
print(" Implicit VUIDs checked: %.1f%% (%d checked vs %d defined)" % ((100.0 * imp_checks / imp_json), imp_checks, imp_json))
print(" Overall VUIDs checked: %.1f%% (%d checked vs %d defined)" % ((100.0 * all_checks / all_json), all_checks, all_json))
print("\nVUID test coverage")
print(" Explicit VUIDs tested: %.1f%% (%d tested vs %d checks)" % ((100.0 * exp_tests / exp_checks), exp_tests, exp_checks))
print(" Implicit VUIDs tested: %.1f%% (%d tested vs %d checks)" % ((100.0 * imp_tests / imp_checks), imp_tests, imp_checks))
print(" Overall VUIDs tested: %.1f%% (%d tested vs %d checks)" % ((100.0 * all_tests / all_checks), all_tests, all_checks))
# Report status of a single VUID
if len(get_vuid_status) > 1:
print("\n\nChecking status of <%s>" % get_vuid_status);
if get_vuid_status not in val_json.all_vuids and not get_vuid_status.startswith('UNASSIGNED-'):
print(' Not a valid VUID string.')
else:
if get_vuid_status in val_source.explicit_vuids:
print(' Implemented!')
line_list = val_source.vuid_count_dict[get_vuid_status]['file_line']
for line in line_list:
print(' => %s' % line)
elif get_vuid_status in val_source.implicit_vuids:
print(' Implemented! (Implicit)')
line_list = val_source.vuid_count_dict[get_vuid_status]['file_line']
for line in line_list:
print(' => %s' % line)
else:
print(' Not implemented.')
if get_vuid_status in val_tests.all_vuids:
print(' Has a test!')
test_list = val_tests.vuid_to_tests[get_vuid_status]
for test in test_list:
print(' => %s' % test)
else:
print(' Not tested.')
# Report unimplemented explicit VUIDs
if report_unimplemented:
unim_explicit = val_json.explicit_vuids - val_source.explicit_vuids
print("\n\n%d explicit VUID checks remain unimplemented:" % len(unim_explicit))
ulist = list(unim_explicit)
ulist.sort()
for vuid in ulist:
print(" => %s" % vuid)
# Report unassigned VUIDs
if report_unassigned:
# TODO: I do not really want VUIDs created for warnings though here
print("\n\n%d checks without a spec VUID:" % len(val_source.unassigned_vuids))
ulist = list(val_source.unassigned_vuids)
ulist.sort()
for vuid in ulist:
print(" => %s" % vuid)
line_list = val_source.vuid_count_dict[vuid]['file_line']
for line in line_list:
print(' => %s' % line)
print("\n%d tests without a spec VUID:" % len(val_source.unassigned_vuids))
ulist = list(val_tests.unassigned_vuids)
ulist.sort()
for vuid in ulist:
print(" => %s" % vuid)
test_list = val_tests.vuid_to_tests[vuid]
for test in test_list:
print(' => %s' % test)
# Consistency tests
if run_consistency:
print("\n\nRunning consistency tests...")
con = Consistency(val_json.all_vuids, val_source.all_vuids, val_tests.all_vuids)
ok = con.undef_vuids_in_layer_code()
ok &= con.undef_vuids_in_tests()
ok &= con.vuids_tested_not_checked()
if ok:
print(" OK! No inconsistencies found.")
# Output database in requested format(s)
db_out = OutputDatabase(val_json, val_source, val_tests, spirv_val)
if txt_out:
db_out.dump_txt(report_unimplemented)
if csv_out:
db_out.dump_csv(report_unimplemented)
if html_out:
db_out.dump_html(report_unimplemented)
if header_out:
db_out.export_header()
return result
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 47.310204 | 247 | 0.570356 |
6ad899978a522a349b2c7324559f511e268d671e | 2,040 | py | Python | src/pykeen/stoppers/__init__.py | sunny1401/pykeen | ad449ecc753eb603670de67cfa5f49020c61db12 | [
"MIT"
] | null | null | null | src/pykeen/stoppers/__init__.py | sunny1401/pykeen | ad449ecc753eb603670de67cfa5f49020c61db12 | [
"MIT"
] | null | null | null | src/pykeen/stoppers/__init__.py | sunny1401/pykeen | ad449ecc753eb603670de67cfa5f49020c61db12 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Early stoppers.
The following code will create a scenario in which training will stop
(quite) early when training :class:`pykeen.models.TransE` on the
:class:`pykeen.datasets.Nations` dataset.
>>> from pykeen.pipeline import pipeline
>>> pipeline_result = pipeline(
... dataset='nations',
... model='transe',
... model_kwargs=dict(embedding_dim=20, scoring_fct_norm=1),
... optimizer='SGD',
... optimizer_kwargs=dict(lr=0.01),
... loss='marginranking',
... loss_kwargs=dict(margin=1),
... training_loop='slcwa',
... training_kwargs=dict(num_epochs=100, batch_size=128),
... negative_sampler='basic',
... negative_sampler_kwargs=dict(num_negs_per_pos=1),
... evaluator_kwargs=dict(filtered=True),
... evaluation_kwargs=dict(batch_size=128),
... stopper='early',
... stopper_kwargs=dict(frequency=5, patience=2, relative_delta=0.002),
... )
"""
from typing import Collection, Mapping, Type, Union
from .early_stopping import EarlyStopper, StopperCallback # noqa: F401
from .stopper import NopStopper, Stopper
from ..utils import get_cls, get_subclasses, normalize_string
__all__ = [
'Stopper',
'NopStopper',
'EarlyStopper',
'get_stopper_cls',
]
_STOPPER_SUFFIX = 'Stopper'
_STOPPERS: Collection[Type[Stopper]] = set(get_subclasses(Stopper)) # type: ignore
#: A mapping of stoppers' names to their implementations
stoppers: Mapping[str, Type[Stopper]] = {
normalize_string(cls.__name__, suffix=_STOPPER_SUFFIX): cls
for cls in _STOPPERS
}
def get_stopper_cls(query: Union[None, str, Type[Stopper]]) -> Type[Stopper]:
"""Look up a stopper class by name (case/punctuation insensitive) in :data:`pykeen.stoppers.stoppers`.
:param query: The name of the stopper (case insensitive, punctuation insensitive).
:return: The stopper class
"""
return get_cls(
query,
base=Stopper, # type: ignore
lookup_dict=stoppers,
default=NopStopper,
suffix=_STOPPER_SUFFIX,
)
| 31.384615 | 106 | 0.693137 |
82f873f563cb9c5260eaa6acd488d2d3eb7efef0 | 1,287 | py | Python | setup.py | bressanmarcos/deirokay | defa86174a913ae1ae944bd7e976fee12824eaee | [
"MIT"
] | 3 | 2022-03-03T12:56:19.000Z | 2022-03-03T18:38:22.000Z | setup.py | bressanmarcos/deirokay | defa86174a913ae1ae944bd7e976fee12824eaee | [
"MIT"
] | 12 | 2022-03-02T22:45:19.000Z | 2022-03-18T20:28:08.000Z | setup.py | bigdatabr/Deirokay | defa86174a913ae1ae944bd7e976fee12824eaee | [
"MIT"
] | null | null | null | from os.path import dirname, join
from setuptools import find_packages, setup
with open(join(dirname(__file__), 'deirokay', '__version__.py')) as v:
__version__ = None
exec(v.read().strip())
with open('README.md') as f:
long_description = f.read()
with open('requirements.txt') as f:
requirements = [line.strip() for line in f.readlines()]
with open('requirements-dev.txt') as f:
requirements_dev = [line.strip() for line in f.readlines()]
with open('requirements-s3.txt') as f:
requirements_s3 = [line.strip() for line in f.readlines()]
setup(
name="deirokay",
packages=find_packages(include=['deirokay*']),
version=__version__,
author="Marcos Bressan",
author_email="marcos.bressan@bigdata.com.br",
description="A tool for data profiling and data validation",
long_description=long_description,
long_description_content_type='text/markdown',
url="http://gitlab.bigdata/bressanmarcos/deirokay",
classifiers=[
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
],
python_requires='>=3.7',
include_package_data=True,
zip_safe=True,
install_requires=requirements,
extras_require={
'dev': requirements_dev,
's3': requirements_s3
}
)
| 29.25 | 70 | 0.684538 |
68ec0612508f918347000751e2733e595d09d689 | 150 | py | Python | Hackerrank-Solutions/Hackerrank-Python-Solutions/Introduction/Arithmetic Operators.py | HetDaftary/Competitive-Coding-Solutions | a683fa11895410c6eef07b1a68054f3e90aa596b | [
"MIT"
] | null | null | null | Hackerrank-Solutions/Hackerrank-Python-Solutions/Introduction/Arithmetic Operators.py | HetDaftary/Competitive-Coding-Solutions | a683fa11895410c6eef07b1a68054f3e90aa596b | [
"MIT"
] | null | null | null | Hackerrank-Solutions/Hackerrank-Python-Solutions/Introduction/Arithmetic Operators.py | HetDaftary/Competitive-Coding-Solutions | a683fa11895410c6eef07b1a68054f3e90aa596b | [
"MIT"
] | null | null | null | if __name__ == '__main__':
a = int(input())
b = int(input())
print (a + b)
print (a - b if (a > b) else b - a)
print (a * b)
| 18.75 | 39 | 0.446667 |
cb3c847dd2646b86a17bf99ba3b5a570bedaabe0 | 901 | py | Python | splunk_otel/distro.py | slernersplunk/splunk-otel-python | 6eddff4e62e2e7baa79acc99c79a261f7a503585 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | splunk_otel/distro.py | slernersplunk/splunk-otel-python | 6eddff4e62e2e7baa79acc99c79a261f7a503585 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | splunk_otel/distro.py | slernersplunk/splunk-otel-python | 6eddff4e62e2e7baa79acc99c79a261f7a503585 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict
from opentelemetry.instrumentation.distro import BaseDistro # type: ignore
from splunk_otel.options import Options
from splunk_otel.tracing import _configure_tracing
class SplunkDistro(BaseDistro):
def _configure(self, **kwargs: Dict[str, Any]) -> None:
_configure_tracing(Options())
| 34.653846 | 75 | 0.766926 |
3b409e4c0472a4bedd9614158c3baced729edc1c | 3,608 | py | Python | groupdocs_conversion_cloud/models/msg_load_options.py | groupdocs-conversion-cloud/groupdocs-conversion-cloud-python | 841d06ad3205e10e8f2726517779ac2d7c33a02a | [
"MIT"
] | 5 | 2019-11-21T04:58:45.000Z | 2021-02-05T05:22:37.000Z | groupdocs_conversion_cloud/models/msg_load_options.py | groupdocs-conversion-cloud/groupdocs-conversion-cloud-python | 841d06ad3205e10e8f2726517779ac2d7c33a02a | [
"MIT"
] | null | null | null | groupdocs_conversion_cloud/models/msg_load_options.py | groupdocs-conversion-cloud/groupdocs-conversion-cloud-python | 841d06ad3205e10e8f2726517779ac2d7c33a02a | [
"MIT"
] | null | null | null | # coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="MsgLoadOptions.py">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
from groupdocs_conversion_cloud.models import EmailLoadOptions
class MsgLoadOptions(EmailLoadOptions):
"""
Msg load options
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, **kwargs): # noqa: E501
"""Initializes new instance of MsgLoadOptions""" # noqa: E501
base = super(MsgLoadOptions, self)
base.__init__(**kwargs)
self.swagger_types.update(base.swagger_types)
self.attribute_map.update(base.attribute_map)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MsgLoadOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 34.692308 | 85 | 0.596452 |
7d1c17bbcb337a1d7517cdb457bc69e7624bffc2 | 205 | py | Python | beneficiaries/beneficiaries/doctype/the_base/test_the_base.py | baidalala/beneficiaries | b7299e0a7da91e90c607e70d76994ec0aebae402 | [
"MIT"
] | null | null | null | beneficiaries/beneficiaries/doctype/the_base/test_the_base.py | baidalala/beneficiaries | b7299e0a7da91e90c607e70d76994ec0aebae402 | [
"MIT"
] | null | null | null | beneficiaries/beneficiaries/doctype/the_base/test_the_base.py | baidalala/beneficiaries | b7299e0a7da91e90c607e70d76994ec0aebae402 | [
"MIT"
] | 1 | 2021-08-31T18:47:58.000Z | 2021-08-31T18:47:58.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Baida and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestTheBase(unittest.TestCase):
pass
| 18.636364 | 44 | 0.756098 |
a1142bff0976308426be3fcae97ca767febf8f6e | 87 | py | Python | src/lib/parameters/px4params/__init__.py | WeRobotics/Firmware | 96443b3cf3e3adaec8b2ebb6c83a19a92d40a13f | [
"BSD-3-Clause"
] | 15 | 2017-07-21T07:26:56.000Z | 2022-01-11T17:51:02.000Z | src/lib/parameters/px4params/__init__.py | WeRobotics/Firmware | 96443b3cf3e3adaec8b2ebb6c83a19a92d40a13f | [
"BSD-3-Clause"
] | 4 | 2016-04-06T09:03:29.000Z | 2017-01-22T20:16:08.000Z | src/lib/parameters/px4params/__init__.py | WeRobotics/Firmware | 96443b3cf3e3adaec8b2ebb6c83a19a92d40a13f | [
"BSD-3-Clause"
] | 17 | 2016-04-15T11:55:51.000Z | 2021-12-21T12:41:29.000Z | __all__ = ["srcscanner", "srcparser", "xmlout", "dokuwikiout", "dokuwikirpc", "scope"]
| 43.5 | 86 | 0.678161 |
c8678208102daa9c0a2e3db6e2f24ca2e48a917f | 8,583 | py | Python | tests/pool/conftest.py | Losik/toloka-kit | 1e707b17bcaa8a570f0479445906d9afbdf737ae | [
"Apache-2.0"
] | null | null | null | tests/pool/conftest.py | Losik/toloka-kit | 1e707b17bcaa8a570f0479445906d9afbdf737ae | [
"Apache-2.0"
] | 1 | 2021-07-02T13:56:34.000Z | 2021-07-02T14:32:10.000Z | tests/pool/conftest.py | Losik/toloka-kit | 1e707b17bcaa8a570f0479445906d9afbdf737ae | [
"Apache-2.0"
] | 1 | 2021-06-09T09:32:33.000Z | 2021-06-09T09:32:33.000Z | import pytest
@pytest.fixture
def pool_map():
return {
'type': 'REGULAR',
'project_id': '10',
'private_name': 'pool_v12_231',
'public_description': '42',
'may_contain_adult_content': True,
'will_expire': '2016-03-23T12:59:00',
'auto_close_after_complete_delay_seconds': 600,
'reward_per_assignment': 0.03,
'dynamic_pricing_config': {
'type': 'SKILL',
'skill_id': '123123',
'intervals': [
{'from': 50, 'to': 79, 'reward_per_assignment': 0.05},
{'from': 80, 'reward_per_assignment': 0.1},
]
},
'dynamic_overlap_config': {
'type': 'BASIC',
'max_overlap': 5,
'min_confidence': 0.95,
'answer_weight_skill_id': '42',
'fields': [{'name': 'out1'}],
},
'metadata': {'testKey': ['testValue']},
'assignment_max_duration_seconds': 600,
'auto_accept_solutions': True,
'priority': 10,
'defaults': {
'default_overlap_for_new_task_suites': 3,
'default_overlap_for_new_tasks': 2,
},
'mixer_config': {
'real_tasks_count': 10,
'golden_tasks_count': 2,
'training_tasks_count': 1,
'min_training_tasks_count': 0,
'min_golden_tasks_count': 1,
'force_last_assignment': False,
'force_last_assignment_delay_seconds': 10,
'mix_tasks_in_creation_order': False,
'shuffle_tasks_in_task_suite': True,
'golden_task_distribution_function': {
'scope': 'POOL',
'distribution': 'UNIFORM',
'window_days': 5,
'intervals': [
{'to': 50, 'frequency': 5},
{'from': 100, 'frequency': 50},
],
}
},
'assignments_issuing_config': {
'issue_task_suites_in_creation_order': True,
},
'filter': {
'and': [
{
'category': 'profile',
'key': 'adult_allowed',
'operator': 'EQ',
'value': True,
},
{
'or': [
{
'category': 'skill',
'key': '20',
'operator': 'GTE',
'value': 60
},
{
'category': 'skill',
'key': '22',
'operator': 'GT',
'value': 95,
}
]
},
]
},
'quality_control': {
'captcha_frequency': 'LOW',
'checkpoints_config': {
'real_settings': {
'target_overlap': 5,
'task_distribution_function': {
'scope': 'PROJECT',
'distribution': 'UNIFORM',
'window_days': 7,
'intervals': [
{'to': 100, 'frequency': 5},
{'from': 101, 'frequency': 50},
],
},
},
},
'configs': [
{
'collector_config': {
'type': 'CAPTCHA',
'parameters': {'history_size': 5},
},
'rules': [
{
'conditions': [
{
'key': 'stored_results_count',
'operator': 'EQ',
'value': 5
},
{
'key': 'success_rate',
'operator': 'LTE',
'value': 60.0,
}
],
'action': {
'type': 'RESTRICTION',
'parameters': {
'scope': 'POOL',
'duration_days': 10,
'private_comment': 'ban in pool',
}
}
}
]
}
]
}
}
@pytest.fixture
def pool_map_with_readonly(pool_map):
return {
**pool_map,
'id': '21',
'owner': {'id': 'requester-1', 'myself': True, 'company_id': '1'},
'type': 'REGULAR',
'created': '2015-12-16T12:55:01',
'last_started': '2015-12-17T08:00:01',
'last_stopped': '2015-12-18T08:00:01',
'last_close_reason': 'MANUAL',
'status': 'CLOSED',
}
@pytest.fixture
def training_pool_map():
return {
'id': '22',
'owner': {
'id': 'requester-1',
'myself': True,
'company_id': '1'
},
'type': 'TRAINING',
'project_id': '10',
'private_name': 'training_v12_231',
'public_description': '42',
'public_instructions': 'training instructions',
'may_contain_adult_content': True,
'reward_per_assignment': 0.00,
'assignment_max_duration_seconds': 600,
'auto_accept_solutions': True,
'priority': 0,
'defaults': {
'default_overlap_for_new_task_suites': 30_000,
},
'mixer_config': {
'real_tasks_count': 0,
'golden_tasks_count': 0,
'training_tasks_count': 7,
'min_training_tasks_count': 1,
'force_last_assignment': False,
'force_last_assignment_delay_seconds': 10,
'mix_tasks_in_creation_order': False,
'shuffle_tasks_in_task_suite': True,
},
'assignments_issuing_config': {
'issue_task_suites_in_creation_order': True,
},
'quality_control': {
'configs': [
{
'collector_config': {
'type': 'TRAINING',
'uuid': 'cdf0f2ee-04e4-11e8-8a8d-6c96cfdb64bb'
},
'rules': [
{
'conditions': [
{
'key': 'submitted_assignments_count',
'operator': 'EQ',
'value': 5
}
],
'action': {
'type': 'SET_SKILL_FROM_OUTPUT_FIELD',
'parameters': {
'skill_id': '676',
'from_field': 'correct_answers_rate',
}
}
},
{
'conditions': [
{
'key': 'next_assignment_available',
'operator': 'EQ',
'value': False
},
{
'key': 'total_answers_count',
'operator': 'GT',
'value': 0,
}
],
'action': {
'type': 'SET_SKILL_FROM_OUTPUT_FIELD',
'parameters': {
'skill_id': '676',
'from_field': 'correct_answers_rate',
}
}
}
]
}
]
},
'training_config': {
'training_skill_ttl_days': 5,
},
'status': 'OPEN',
'created': '2017-12-03T12:03:00',
'last_started': '2017-12-04T12:12:03',
}
| 35.032653 | 74 | 0.346149 |
a64d34b1b21ced54db0001edc1922a6c5bca08e9 | 2,400 | py | Python | class/super..py | Max-PJB/python-learning2 | e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd | [
"MIT"
] | null | null | null | class/super..py | Max-PJB/python-learning2 | e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd | [
"MIT"
] | null | null | null | class/super..py | Max-PJB/python-learning2 | e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : Max_Pengjb
@ date : 2018/9/25 17:31
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : pengjianbiao@hotmail.com
-------------------------------------------------
Description :
-------------------------------------------------
"""
import time
__author__ = 'Max_Pengjb'
start = time.time()
# 下面写上代码块
class A:
def go(self):
print("go A go!")
def stop(self):
print("stop A stop!")
def pause(self):
raise Exception("Not Implemented")
class B(A):
def go(self):
super(B, self).go()
print("go B go!")
class C(A):
def go(self):
super(C, self).go()
print("go C go!")
def stop(self):
super(C, self).stop()
print("stop C stop!")
class D(B, C):
def go(self):
super(D, self).go()
print("go D go!")
def stop(self):
super(D, self).stop()
print("stop D stop!")
def pause(self):
print("wait D wait!")
class E(B, C):
pass
a = A()
b = B()
c = C()
d = D()
e = E()
# 说明下列代码的输出结果
a.go()
print('--------')
b.go()
print('--------')
c.go()
print('--------')
d.go()
print('--------')
e.go()
print('--------')
a.stop()
print('--------')
b.stop()
print('--------')
c.stop()
print('--------')
d.stop()
print('--------')
e.stop()
print(D.mro())
a.pause()
b.pause()
c.pause()
d.pause()
e.pause()
class A1:
def __init__(self):
self.n = 2
def add(self, m):
print('self is {0} @A.add'.format(self))
self.n += m
class B1(A1):
def __init__(self):
super().__init__()
self.n = 3
def add(self, m):
print('self is {0} @B.add'.format(self))
super().add(m)
print('newb')
self.n += 3
class C1(A1):
def __init__(self):
self.n = 4
def add(self, m):
print('self is {0} @C.add'.format(self))
super().add(m)
print('newc')
self.n += 4
class D1(B1, C1):
def __init__(self):
self.n = 5
def add(self, m):
print('self is {0} @D.add'.format(self))
super().add(m)
self.n += 5
d = D1()
d.add(2)
print(d.n)
# 上面中间写上代码块
end = time.time()
print('Running time: %s Seconds' % (end - start))
| 16 | 51 | 0.444583 |
b8f468194b3400fd65c3154700957fead3f05f60 | 484 | py | Python | tools/smoothstep_lut.py | evanbowman/Red | 85735269a46757305a81ad39f47034bc6cd97846 | [
"BSD-2-Clause"
] | 5 | 2021-08-30T16:18:55.000Z | 2021-10-30T20:23:32.000Z | tools/smoothstep_lut.py | evanbowman/gbc-project | 85735269a46757305a81ad39f47034bc6cd97846 | [
"BSD-2-Clause"
] | null | null | null | tools/smoothstep_lut.py | evanbowman/gbc-project | 85735269a46757305a81ad39f47034bc6cd97846 | [
"BSD-2-Clause"
] | null | null | null | def smoothstep(edge0, edge1, x):
x2 = clamp((x - edge0) / (edge1 - edge0), 0.0, 1.0)
return x2 * x2 * (3 - 2 * x2);
def clamp(x, lower, upper):
if x < lower:
return lower
elif x > upper:
return upper
return x
count = 0
print("DB ", end="")
for i in range(0, 256):
print('${0:0{1}X},'.format(int(255 * smoothstep(0, 255, i)), 2), end=" ")
count += 1
if count > 7:
count = 0
print("")
print("DB ", end="")
| 17.925926 | 77 | 0.489669 |
c2c23f546226db6500860e30f9860ebfa470d276 | 30,910 | py | Python | boto/sns/connection.py | adastreamer/boto | ce472cbbcffd06298fdd0c980d5bfcdcee875498 | [
"MIT"
] | null | null | null | boto/sns/connection.py | adastreamer/boto | ce472cbbcffd06298fdd0c980d5bfcdcee875498 | [
"MIT"
] | null | null | null | boto/sns/connection.py | adastreamer/boto | ce472cbbcffd06298fdd0c980d5bfcdcee875498 | [
"MIT"
] | null | null | null | # Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
import hashlib
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.compat import json
import boto
class SNSConnection(AWSQueryConnection):
"""
Amazon Simple Notification Service
Amazon Simple Notification Service (Amazon SNS) is a web service
that enables you to build distributed web-enabled applications.
Applications can use Amazon SNS to easily push real-time
notification messages to interested subscribers over multiple
delivery protocols. For more information about this product see
`http://aws.amazon.com/sns`_. For detailed information about
Amazon SNS features and their associated API calls, see the
`Amazon SNS Developer Guide`_.
We also provide SDKs that enable you to access Amazon SNS from
your preferred programming language. The SDKs contain
functionality that automatically takes care of tasks such as:
cryptographically signing your service requests, retrying
requests, and handling error responses. For a list of available
SDKs, go to `Tools for Amazon Web Services`_.
"""
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'sns.us-east-1.amazonaws.com'
APIVersion = '2010-03-31'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint,
connection_cls=SNSConnection)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs)
def _build_dict_as_list_params(self, params, dictionary, name):
"""
Serialize a parameter 'name' which value is a 'dictionary' into a list of parameters.
See: http://docs.aws.amazon.com/sns/latest/api/API_SetPlatformApplicationAttributes.html
For example::
dictionary = {'PlatformPrincipal': 'foo', 'PlatformCredential': 'bar'}
name = 'Attributes'
would result in params dict being populated with:
Attributes.entry.1.key = PlatformPrincipal
Attributes.entry.1.value = foo
Attributes.entry.2.key = PlatformCredential
Attributes.entry.2.value = bar
:param params: the resulting parameters will be added to this dict
:param dictionary: dict - value of the serialized parameter
:param name: name of the serialized parameter
"""
items = sorted(dictionary.items(), key=lambda x:x[0])
for kv, index in zip(items, range(1, len(items)+1)):
key, value = kv
prefix = '%s.entry.%s' % (name, index)
params['%s.key' % prefix] = key
params['%s.value' % prefix] = value
def _required_auth_capability(self):
return ['hmac-v4']
def get_all_topics(self, next_token=None):
"""
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {}
if next_token:
params['NextToken'] = next_token
return self._make_request('ListTopics', params)
def get_topic_attributes(self, topic):
"""
Get attributes of a Topic
:type topic: string
:param topic: The ARN of the topic.
"""
params = {'TopicArn': topic}
return self._make_request('GetTopicAttributes', params)
def set_topic_attributes(self, topic, attr_name, attr_value):
"""
Get attributes of a Topic
:type topic: string
:param topic: The ARN of the topic.
:type attr_name: string
:param attr_name: The name of the attribute you want to set.
Only a subset of the topic's attributes are mutable.
Valid values: Policy | DisplayName
:type attr_value: string
:param attr_value: The new value for the attribute.
"""
params = {'TopicArn': topic,
'AttributeName': attr_name,
'AttributeValue': attr_value}
return self._make_request('SetTopicAttributes', params)
def add_permission(self, topic, label, account_ids, actions):
"""
Adds a statement to a topic's access control policy, granting
access for the specified AWS accounts to the specified actions.
:type topic: string
:param topic: The ARN of the topic.
:type label: string
:param label: A unique identifier for the new policy statement.
:type account_ids: list of strings
:param account_ids: The AWS account ids of the users who will be
give access to the specified actions.
:type actions: list of strings
:param actions: The actions you want to allow for each of the
specified principal(s).
"""
params = {'TopicArn': topic,
'Label': label}
self.build_list_params(params, account_ids, 'AWSAccountId.member')
self.build_list_params(params, actions, 'ActionName.member')
return self._make_request('AddPermission', params)
def remove_permission(self, topic, label):
"""
Removes a statement from a topic's access control policy.
:type topic: string
:param topic: The ARN of the topic.
:type label: string
:param label: A unique identifier for the policy statement
to be removed.
"""
params = {'TopicArn': topic,
'Label': label}
return self._make_request('RemovePermission', params)
def create_topic(self, topic):
"""
Create a new Topic.
:type topic: string
:param topic: The name of the new topic.
"""
params = {'Name': topic}
return self._make_request('CreateTopic', params)
def delete_topic(self, topic):
"""
Delete an existing topic
:type topic: string
:param topic: The ARN of the topic
"""
params = {'TopicArn': topic}
return self._make_request('DeleteTopic', params, '/', 'GET')
def publish(self, topic=None, message=None, subject=None, target_arn=None,
message_structure=None):
"""
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type message: string
:param message: The message you want to send to the topic.
Messages must be UTF-8 encoded strings and
be at most 4KB in size.
:type message_structure: string
:param message_structure: Optional parameter. If left as ``None``,
plain text will be sent. If set to ``json``,
your message should be a JSON string that
matches the structure described at
http://docs.aws.amazon.com/sns/latest/dg/PublishTopic.html#sns-message-formatting-by-protocol
:type subject: string
:param subject: Optional parameter to be used as the "Subject"
line of the email notifications.
:type target_arn: string
:param target_arn: Optional parameter for either TopicArn or
EndpointArn, but not both.
"""
if message is None:
# To be backwards compatible when message did not have
# a default value and topic and message were required
# args.
raise TypeError("'message' is a required parameter")
params = {'Message': message}
if subject is not None:
params['Subject'] = subject
if topic is not None:
params['TopicArn'] = topic
if target_arn is not None:
params['TargetArn'] = target_arn
if message_structure is not None:
params['MessageStructure'] = message_structure
return self._make_request('Publish', params)
def subscribe(self, topic, protocol, endpoint):
"""
Subscribe to a Topic.
:type topic: string
:param topic: The ARN of the new topic.
:type protocol: string
:param protocol: The protocol used to communicate with
the subscriber. Current choices are:
email|email-json|http|https|sqs
:type endpoint: string
:param endpoint: The location of the endpoint for
the subscriber.
* For email, this would be a valid email address
* For email-json, this would be a valid email address
* For http, this would be a URL beginning with http
* For https, this would be a URL beginning with https
* For sqs, this would be the ARN of an SQS Queue
"""
params = {'TopicArn': topic,
'Protocol': protocol,
'Endpoint': endpoint}
return self._make_request('Subscribe', params)
def subscribe_sqs_queue(self, topic, queue):
"""
Subscribe an SQS queue to a topic.
This is convenience method that handles most of the complexity involved
in using an SQS queue as an endpoint for an SNS topic. To achieve this
the following operations are performed:
* The correct ARN is constructed for the SQS queue and that ARN is
then subscribed to the topic.
* A JSON policy document is contructed that grants permission to
the SNS topic to send messages to the SQS queue.
* This JSON policy is then associated with the SQS queue using
the queue's set_attribute method. If the queue already has
a policy associated with it, this process will add a Statement to
that policy. If no policy exists, a new policy will be created.
:type topic: string
:param topic: The ARN of the new topic.
:type queue: A boto Queue object
:param queue: The queue you wish to subscribe to the SNS Topic.
"""
t = queue.id.split('/')
q_arn = queue.arn
sid = hashlib.md5(topic + q_arn).hexdigest()
sid_exists = False
resp = self.subscribe(topic, 'sqs', q_arn)
attr = queue.get_attributes('Policy')
if 'Policy' in attr:
policy = json.loads(attr['Policy'])
else:
policy = {}
if 'Version' not in policy:
policy['Version'] = '2008-10-17'
if 'Statement' not in policy:
policy['Statement'] = []
# See if a Statement with the Sid exists already.
for s in policy['Statement']:
if s['Sid'] == sid:
sid_exists = True
if not sid_exists:
statement = {'Action': 'SQS:SendMessage',
'Effect': 'Allow',
'Principal': {'AWS': '*'},
'Resource': q_arn,
'Sid': sid,
'Condition': {'StringLike': {'aws:SourceArn': topic}}}
policy['Statement'].append(statement)
queue.set_attribute('Policy', json.dumps(policy))
return resp
def confirm_subscription(self, topic, token,
authenticate_on_unsubscribe=False):
"""
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type token: string
:param token: Short-lived token sent to and endpoint during
the Subscribe operation.
:type authenticate_on_unsubscribe: bool
:param authenticate_on_unsubscribe: Optional parameter indicating
that you wish to disable
unauthenticated unsubscription
of the subscription.
"""
params = {'TopicArn': topic, 'Token': token}
if authenticate_on_unsubscribe:
params['AuthenticateOnUnsubscribe'] = 'true'
return self._make_request('ConfirmSubscription', params)
def unsubscribe(self, subscription):
"""
Allows endpoint owner to delete subscription.
Confirmation message will be delivered.
:type subscription: string
:param subscription: The ARN of the subscription to be deleted.
"""
params = {'SubscriptionArn': subscription}
return self._make_request('Unsubscribe', params)
def get_all_subscriptions(self, next_token=None):
"""
Get list of all subscriptions.
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {}
if next_token:
params['NextToken'] = next_token
return self._make_request('ListSubscriptions', params)
def get_all_subscriptions_by_topic(self, topic, next_token=None):
"""
Get list of all subscriptions to a specific topic.
:type topic: string
:param topic: The ARN of the topic for which you wish to
find subscriptions.
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {'TopicArn': topic}
if next_token:
params['NextToken'] = next_token
return self._make_request('ListSubscriptionsByTopic', params)
def create_platform_application(self, name=None, platform=None,
attributes=None):
"""
The `CreatePlatformApplication` action creates a platform
application object for one of the supported push notification
services, such as APNS and GCM, to which devices and mobile
apps may register. You must specify PlatformPrincipal and
PlatformCredential attributes when using the
`CreatePlatformApplication` action. The PlatformPrincipal is
received from the notification service. For APNS/APNS_SANDBOX,
PlatformPrincipal is "SSL certificate". For GCM,
PlatformPrincipal is not applicable. For ADM,
PlatformPrincipal is "client id". The PlatformCredential is
also received from the notification service. For
APNS/APNS_SANDBOX, PlatformCredential is "private key". For
GCM, PlatformCredential is "API key". For ADM,
PlatformCredential is "client secret". The
PlatformApplicationArn that is returned when using
`CreatePlatformApplication` is then used as an attribute for
the `CreatePlatformEndpoint` action. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type name: string
:param name: Application names must be made up of only uppercase and
lowercase ASCII letters, numbers, underscores, hyphens, and
periods, and must be between 1 and 256 characters long.
:type platform: string
:param platform: The following platforms are supported: ADM (Amazon
Device Messaging), APNS (Apple Push Notification Service),
APNS_SANDBOX, and GCM (Google Cloud Messaging).
:type attributes: map
:param attributes: For a list of attributes, see
`SetPlatformApplicationAttributes`_
"""
params = {}
if name is not None:
params['Name'] = name
if platform is not None:
params['Platform'] = platform
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='CreatePlatformApplication',
params=params)
def set_platform_application_attributes(self,
platform_application_arn=None,
attributes=None):
"""
The `SetPlatformApplicationAttributes` action sets the
attributes of the platform application object for the
supported push notification services, such as APNS and GCM.
For more information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn for
SetPlatformApplicationAttributes action.
:type attributes: map
:param attributes:
A map of the platform application attributes. Attributes in this map
include the following:
+ `PlatformCredential` -- The credential received from the notification
service. For APNS/APNS_SANDBOX, PlatformCredential is "private
key". For GCM, PlatformCredential is "API key". For ADM,
PlatformCredential is "client secret".
+ `PlatformPrincipal` -- The principal received from the notification
service. For APNS/APNS_SANDBOX, PlatformPrincipal is "SSL
certificate". For GCM, PlatformPrincipal is not applicable. For
ADM, PlatformPrincipal is "client id".
+ `EventEndpointCreated` -- Topic ARN to which EndpointCreated event
notifications should be sent.
+ `EventEndpointDeleted` -- Topic ARN to which EndpointDeleted event
notifications should be sent.
+ `EventEndpointUpdated` -- Topic ARN to which EndpointUpdate event
notifications should be sent.
+ `EventDeliveryFailure` -- Topic ARN to which DeliveryFailure event
notifications should be sent upon Direct Publish delivery failure
(permanent) to one of the application's endpoints.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='SetPlatformApplicationAttributes',
params=params)
def get_platform_application_attributes(self,
platform_application_arn=None):
"""
The `GetPlatformApplicationAttributes` action retrieves the
attributes of the platform application object for the
supported push notification services, such as APNS and GCM.
For more information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn for
GetPlatformApplicationAttributesInput.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
return self._make_request(action='GetPlatformApplicationAttributes',
params=params)
def list_platform_applications(self, next_token=None):
"""
The `ListPlatformApplications` action lists the platform
application objects for the supported push notification
services, such as APNS and GCM. The results for
`ListPlatformApplications` are paginated and return a limited
list of applications, up to 100. If additional records are
available after the first page results, then a NextToken
string will be returned. To receive the next page, you call
`ListPlatformApplications` using the NextToken string received
from the previous call. When there are no more records to
return, NextToken will be null. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type next_token: string
:param next_token: NextToken string is used when calling
ListPlatformApplications action to retrieve additional records that
are available after the first page results.
"""
params = {}
if next_token is not None:
params['NextToken'] = next_token
return self._make_request(action='ListPlatformApplications',
params=params)
def list_endpoints_by_platform_application(self,
platform_application_arn=None,
next_token=None):
"""
The `ListEndpointsByPlatformApplication` action lists the
endpoints and endpoint attributes for devices in a supported
push notification service, such as GCM and APNS. The results
for `ListEndpointsByPlatformApplication` are paginated and
return a limited list of endpoints, up to 100. If additional
records are available after the first page results, then a
NextToken string will be returned. To receive the next page,
you call `ListEndpointsByPlatformApplication` again using the
NextToken string received from the previous call. When there
are no more records to return, NextToken will be null. For
more information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn for
ListEndpointsByPlatformApplicationInput action.
:type next_token: string
:param next_token: NextToken string is used when calling
ListEndpointsByPlatformApplication action to retrieve additional
records that are available after the first page results.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
if next_token is not None:
params['NextToken'] = next_token
return self._make_request(action='ListEndpointsByPlatformApplication',
params=params)
def delete_platform_application(self, platform_application_arn=None):
"""
The `DeletePlatformApplication` action deletes a platform
application object for one of the supported push notification
services, such as APNS and GCM. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn of platform
application object to delete.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
return self._make_request(action='DeletePlatformApplication',
params=params)
def create_platform_endpoint(self, platform_application_arn=None,
token=None, custom_user_data=None,
attributes=None):
"""
The `CreatePlatformEndpoint` creates an endpoint for a device
and mobile app on one of the supported push notification
services, such as GCM and APNS. `CreatePlatformEndpoint`
requires the PlatformApplicationArn that is returned from
`CreatePlatformApplication`. The EndpointArn that is returned
when using `CreatePlatformEndpoint` can then be used by the
`Publish` action to send a message to a mobile app or by the
`Subscribe` action for subscription to a topic. For more
information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type platform_application_arn: string
:param platform_application_arn: PlatformApplicationArn returned from
CreatePlatformApplication is used to create a an endpoint.
:type token: string
:param token: Unique identifier created by the notification service for
an app on a device. The specific name for Token will vary,
depending on which notification service is being used. For example,
when using APNS as the notification service, you need the device
token. Alternatively, when using GCM or ADM, the device token
equivalent is called the registration ID.
:type custom_user_data: string
:param custom_user_data: Arbitrary user data to associate with the
endpoint. SNS does not use this data. The data must be in UTF-8
format and less than 2KB.
:type attributes: map
:param attributes: For a list of attributes, see
`SetEndpointAttributes`_.
"""
params = {}
if platform_application_arn is not None:
params['PlatformApplicationArn'] = platform_application_arn
if token is not None:
params['Token'] = token
if custom_user_data is not None:
params['CustomUserData'] = custom_user_data
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='CreatePlatformEndpoint',
params=params)
def delete_endpoint(self, endpoint_arn=None):
"""
The `DeleteEndpoint` action, which is idempotent, deletes the
endpoint from SNS. For more information, see `Using Amazon SNS
Mobile Push Notifications`_.
:type endpoint_arn: string
:param endpoint_arn: EndpointArn of endpoint to delete.
"""
params = {}
if endpoint_arn is not None:
params['EndpointArn'] = endpoint_arn
return self._make_request(action='DeleteEndpoint', params=params)
def set_endpoint_attributes(self, endpoint_arn=None, attributes=None):
"""
The `SetEndpointAttributes` action sets the attributes for an
endpoint for a device on one of the supported push
notification services, such as GCM and APNS. For more
information, see `Using Amazon SNS Mobile Push
Notifications`_.
:type endpoint_arn: string
:param endpoint_arn: EndpointArn used for SetEndpointAttributes action.
:type attributes: map
:param attributes:
A map of the endpoint attributes. Attributes in this map include the
following:
+ `CustomUserData` -- arbitrary user data to associate with the
endpoint. SNS does not use this data. The data must be in UTF-8
format and less than 2KB.
+ `Enabled` -- flag that enables/disables delivery to the endpoint.
Message Processor will set this to false when a notification
service indicates to SNS that the endpoint is invalid. Users can
set it back to true, typically after updating Token.
+ `Token` -- device token, also referred to as a registration id, for
an app and mobile device. This is returned from the notification
service when an app and mobile device are registered with the
notification service.
"""
params = {}
if endpoint_arn is not None:
params['EndpointArn'] = endpoint_arn
if attributes is not None:
self._build_dict_as_list_params(params, attributes, 'Attributes')
return self._make_request(action='SetEndpointAttributes',
params=params)
def get_endpoint_attributes(self, endpoint_arn=None):
"""
The `GetEndpointAttributes` retrieves the endpoint attributes
for a device on one of the supported push notification
services, such as GCM and APNS. For more information, see
`Using Amazon SNS Mobile Push Notifications`_.
:type endpoint_arn: string
:param endpoint_arn: EndpointArn for GetEndpointAttributes input.
"""
params = {}
if endpoint_arn is not None:
params['EndpointArn'] = endpoint_arn
return self._make_request(action='GetEndpointAttributes',
params=params)
def _make_request(self, action, params, path='/', verb='GET'):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb=verb,
path=path, params=params)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
| 42.517194 | 127 | 0.624393 |
19db9d3ddcffc742f3c317dcc225446465ee3624 | 9,418 | py | Python | src/chapter_03/code/ch3_fig4.py | gchure/phd | cf5941e467ee57c6c93c78dda151335cb320f831 | [
"MIT"
] | 4 | 2020-01-14T01:12:53.000Z | 2021-11-29T10:33:20.000Z | src/chapter_03/code/ch3_fig4.py | gchure/phd | cf5941e467ee57c6c93c78dda151335cb320f831 | [
"MIT"
] | 1 | 2021-10-13T03:30:26.000Z | 2021-11-11T18:21:43.000Z | src/chapter_03/code/ch3_fig4.py | gchure/phd | cf5941e467ee57c6c93c78dda151335cb320f831 | [
"MIT"
] | null | null | null | # -*- coding; utf-8 -*-
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import phd.thermo
import phd.viz
import seaborn as sns
constants = phd.thermo.load_constants()
colors, palette = phd.viz.phd_style()
# Load and restrict the various data sets
data = pd.read_csv('../../data/ch3_mutants/Chure2019_summarized_data.csv', comment='#')
data = data[data['class']=='IND'].copy()
kaki_only_stats = pd.read_csv('../../data/ch3_mutants/Chure2019_KaKi_only_summary.csv')
kaki_only_stats = kaki_only_stats[kaki_only_stats['operator']=='O2'].copy()
kaki_epAI_stats = pd.read_csv('../../data/ch3_mutants/Chure2019_KaKi_epAI_summary.csv')
kaki_epAI_stats = kaki_epAI_stats[kaki_epAI_stats['operator']=='O2']
kaki_epAI_samps = pd.read_csv('../../data/ch3_mutants/Chure2019_KaKi_epAI_samples.csv')
kaki_epAI_samps = kaki_epAI_samps[kaki_epAI_samps['operator']=='O2'].copy()
bohr = pd.read_csv('../../data/ch3_mutants/Chure2019_empirical_F_statistics.csv')
bohr = bohr[bohr['class']=='IND'].copy()
# Define constants for plotting
c_range = np.logspace(-3, 4, 200)
c_range[0] = 0
bohr_range = np.linspace(-8, 8, 200)
F = (1 + np.exp(-bohr_range))**-1
#%%
# ##############################################################################
# FIGURE INSTANTIATION
# ##############################################################################
fig, ax = plt.subplots(3, 4, figsize=(6, 6))
phd.viz.despine(ax.ravel())
for a in ax.ravel():
a.xaxis.set_tick_params(labelsize=6)
a.yaxis.set_tick_params(labelsize=6)
# Add appropriate scaling
for i in range(4):
ax[0, i].set_xscale('symlog', linthreshx=0.01)
ax[-1, i].set_xscale('symlog', linthreshx=0.01)
ax[-1, i].set_ylim([-6, 5])
ax[0, i].set_ylim([-0.2, 1.2])
ax[1, i].set_ylim([-0.2, 1.2])
ax[0, i].set_xticks([0, 1E-2, 1E0, 1E2, 1E4])
ax[0, i].set_yticks([0, 0.5, 1])
ax[1, i].set_yticks([0, 0.5, 1])
ax[-1, i].set_xticks([0, 1E-2, 1E0, 1E2, 1E4])
ax[1, i].set_xlim([-8, 8])
ax[-1, i].set_xlim([-0.001, 1E4])
# Define the axes
axes = {'F164T':3, 'Q294V':2, 'Q294K':1, 'Q294R':0}
titles = {'F161T':3, 'Q291V':2, 'Q291K':1, 'Q291R':0}
op_colors = {'O1':colors['blue'], 'O2':colors['orange'], 'O3':colors['purple']}
# Add labels
for m, a in titles.items():
phd.viz.titlebox(ax[0, a], m, color=colors['black'], bgcolor='white', pad=0.02, boxsize="12%")
ax[0, 0].set_ylabel('fold-change', fontsize=8)
ax[1, 0].set_ylabel('fold-change', fontsize=8)
ax[2, 0].set_ylabel('$\Delta F$ [$k_BT$]', fontsize=8)
for i in range(4):
ax[0, i].set_xlabel('IPTG [µM]', fontsize=8)
ax[-1, i].set_xlabel('IPTG [µM]', fontsize=8)
ax[1, i].set_xlabel('free energy [$k_BT$]', fontsize=8)
for i in range(3):
for j in range(3):
ax[i, j+1].set_yticklabels([])
# Add panel labels.
fig.text(0.05, 0.9, '(A)', fontsize=8)
fig.text(0.05, 0.6, '(B)', fontsize=8)
fig.text(0.05, 0.3, '(C)', fontsize=8)
# ##############################################################################
# FOLD CHANGE CURVES
# ##############################################################################
for i, o in enumerate(('O1', 'O2', 'O3')):
ep_r = constants[o]
for m, a in axes.items():
# Plot the kaki only fits.
_kaki = kaki_only_stats[kaki_only_stats['mutant']==m]
ka = _kaki[_kaki['parameter']=='Ka']['median'].values[0]
ki = _kaki[_kaki['parameter']=='Ki']['median'].values[0]
arch = phd.thermo.SimpleRepression(R=260, ep_r=ep_r, ka=ka, ki=ki,
ep_ai=4.5, effector_conc=c_range).fold_change()
ax[0, a].plot(c_range, arch, ':', color=op_colors[o], lw=0.5)
# Plot the credible regions for the kaki and epAI fits.
_kaki = kaki_epAI_samps[kaki_epAI_samps['mutant']==m]
cred_region = np.zeros((2, len(c_range)))
for j, c in enumerate(c_range):
arch = phd.thermo.SimpleRepression(R=260, ep_r=ep_r, ka=_kaki['Ka'],
ki=_kaki['Ki'], ep_ai=_kaki['ep_AI'], effector_conc=c).fold_change()
cred_region[:, j] = phd.stats.compute_hpd(arch, 0.95)
ax[0, a].fill_between(c_range, cred_region[0, :], cred_region[1, :],
color=op_colors[o], alpha=0.5)
# ##############################################################################
# COLLAPSE CURVE
# ##############################################################################
for i in range(4):
ax[1, i].plot(bohr_range, F, 'k-', lw=1)
# ##############################################################################
# FREE ENERGY PREDICTIONS
# ##############################################################################
ref_pact = phd.thermo.MWC(effector_conc=c_range, ka=constants['Ka'],
ki=constants['Ki'], ep_ai=constants['ep_AI']).pact()
for g, d in data.groupby(['mutant']):
_kaki_samps = kaki_epAI_samps[kaki_epAI_samps['mutant']==g]
cred_region = np.zeros((2, len(c_range)))
for i, c in enumerate(c_range):
pact = phd.thermo.MWC(effector_conc=c, ka=_kaki_samps['Ka'],
ki=_kaki_samps['Ki'], ep_ai=_kaki_samps['ep_AI']).pact()
delF = -np.log(pact / ref_pact[i])
cred_region[:, i] = phd.stats.compute_hpd(delF, 0.95)
# Plot!
ax[-1, axes[g]].fill_between(c_range, cred_region[0, :], cred_region[1, :],
color=op_colors['O2'], alpha=0.5)
# ##############################################################################
# COLLAPSE DATA
# ##############################################################################
for g, d in data.groupby(['mutant', 'operator', 'IPTGuM']):
# Isolate the correct mutant
_kaki = kaki_epAI_samps[kaki_epAI_samps['mutant']==g[0]]
_kaki_stats = kaki_epAI_stats[kaki_epAI_stats['mutant']==g[0]]
# Compute the point estimate for the bohr parameter
ka = _kaki_stats[_kaki_stats['parameter']=='Ka']['median'].values[0]
ki = _kaki_stats[_kaki_stats['parameter']=='Ki']['median'].values[0]
ep_ai = _kaki_stats[_kaki_stats['parameter']=='ep_AI']['median'].values[0]
_bohr = phd.thermo.SimpleRepression(R=260, ep_r=constants[g[1]], ka=ka, ki=ki,
ep_ai=ep_ai,
effector_conc=g[-1]).bohr_parameter()
# Determine coloring.
if g[1] == 'O2':
face = 'w'
ec = colors['orange']
else:
face = op_colors[g[1]]
ec = 'white'
# Plot!
_ax = ax[1, axes[g[0]]]
_ax.errorbar(_bohr, d['mean'], d['sem'], fmt='o', markerfacecolor=face,
color=op_colors[g[1]], ms=4, markeredgewidth=0.5, lw=0.75,
capsize=1, markeredgecolor=ec)
# ##############################################################################
# FOLD-CHANGE DATA
# ##############################################################################
for g, d in data.groupby(['mutant', 'operator']):
_ax = ax[0, axes[g[0]]]
if g[1] == 'O2':
face = 'w'
ec = colors['orange']
else:
face = op_colors[g[1]]
ec = 'white'
_ax.errorbar(d['IPTGuM'], d['mean'], d['sem'], fmt='o',
markerfacecolor=face, linestyle='none', color=op_colors[g[1]], capsize=1,
label=g[1], markeredgewidth=0.5, ms=4, lw=0.75, markeredgecolor=ec)
# ##############################################################################
# DELTA F DATA
# ##############################################################################
for g, d in bohr.groupby(['mutant', 'operator', 'IPTGuM']):
_ax = ax[2, axes[g[0]]]
_param = d[d['parameter']=='delta_bohr']
mu = d[d['parameter']=='fc_mu']['median'].values[0]
sig = d[d['parameter']=='fc_sigma']['median'].values[0]
# If mu is closer to boundaries than value of sigma, do not show the glyph
# (alpha = 0 )
if (mu < sig) | (1 - mu < sig):
color = 'slategray'
alpha = 0
lw = 0
fmt = 'x'
else:
color = op_colors[g[1]]
alpha = 1
lw = 0.5
fmt = 'o'
if g[1] == 'O2':
face = 'w'
ec = colors['orange']
zorder=1000
else:
face = op_colors[g[1]]
ec = 'white'
zorder=100
if g[-1] == 0:
cap_min = -0.1
cap_max = 0.001
else:
cap_min = g[-1] * 0.8
cap_max = g[-1] * 1.2
_ax.plot(_param['IPTGuM'], _param['median'], linestyle='none', marker=fmt, markeredgecolor=ec,
markerfacecolor=face , alpha=alpha, ms=4, zorder=zorder,
markeredgewidth=0.5)
_ax.vlines(_param['IPTGuM'], _param['hpd_min'], _param['hpd_max'], color=op_colors[g[1]],
lw=lw, zorder=zorder)
_ax.hlines(_param['hpd_min'], cap_min, cap_max, lw=lw, zorder=zorder,
color=op_colors[g[1]])
_ax.hlines(_param['hpd_max'], cap_min, cap_max, lw=lw, zorder=zorder,
color=op_colors[g[1]])
# ##############################################################################
# LEGEND INFORMATION
# ##############################################################################
leg = ax[0, 0].legend(fontsize=5, ncol=3, columnspacing=0.001, handletextpad=0.01)
plt.subplots_adjust(wspace=0.15, hspace=0.5)
plt.savefig('../figs/ch3_fig4.pdf', bbox_inches='tight')
plt.savefig('../figs/ch3_fig4.png', bbox_inches='tight')
# %%
| 39.738397 | 99 | 0.513272 |
0d5ee1de5730eac65522a7536fd750e9fd5b01c2 | 3,232 | py | Python | update-modified-keywords.py | shen390s/git-keywords | ed5cebf68509e66005c6c44214f2e97ce1c0c2eb | [
"MIT"
] | 2 | 2018-06-28T07:15:08.000Z | 2018-10-19T03:06:44.000Z | update-modified-keywords.py | doggy8088/git-keywords | e3f75b99359060e1b5e4cf0a021e25e774c08600 | [
"MIT"
] | 1 | 2021-08-20T16:09:09.000Z | 2021-08-20T16:09:09.000Z | update-modified-keywords.py | doggy8088/git-keywords | e3f75b99359060e1b5e4cf0a021e25e774c08600 | [
"MIT"
] | 6 | 2017-10-06T15:19:29.000Z | 2021-08-19T15:54:18.000Z | #!/usr/bin/env python3
# file: update-modified-keywords.py
# vim:fileencoding=utf-8:fdm=marker:ft=python
#
# Copyright © 2013-2015 R.F. Smith <rsmith@xs4all.nl>.
# SPDX-License-Identifier: MIT
# Created: 2013-07-17T18:58:42+02:00
# Last modified: 2019-07-27T21:01:32+0200
"""Remove and check out those files that that contain keywords and have
changed since in the last commit in the current working directory."""
from base64 import b64decode
import mmap
import logging
import os
import subprocess as sp
import sys
def main(args):
"""Main program.
Arguments:
args: command line arguments
"""
logging.basicConfig(level='INFO', format='%(levelname)s: %(message)s')
# Check if git is available.
try:
sp.run(['git'], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
logging.info('found “git”')
except FileNotFoundError:
logging.error('the program “git” cannot be found')
sys.exit(1)
# Check if .git exists
if not os.access('.git', os.F_OK):
print('No .git directory found!')
sys.exit(1)
print('{}: Updating modified files.'.format(args[0]))
# Get modified files
files = modifiedfiles()
if not files:
print('{}: No modified files.'.format(args[0]))
sys.exit(0)
files.sort()
# Find files that have keywords in them
kwfn = keywordfiles(files)
if not kwfn:
print('{}: No keyword files modified.'.format(args[0]))
sys.exit(0)
for fn in kwfn:
os.remove(fn)
sargs = ['git', 'checkout', '-f'] + kwfn
sp.call(sargs)
def modifiedfiles():
"""Find files that have been modified in the last commit.
Returns:
A list of filenames.
"""
fnl = []
try:
args = ['git', 'diff-tree', 'HEAD~1', 'HEAD', '--name-only', '-r',
'--diff-filter=ACMRT']
cp = sp.check_output(
args, stdout=sp.PIPE, stderr=sp.DEVNULL, text=True, check=True
)
fnl = cp.stdout.splitlines()
# Deal with unmodified repositories
if len(fnl) == 1 and fnl[0] == 'clean':
return []
except sp.CalledProcessError:
if cp.returncode == 128: # new repository
args = ['git', 'ls-files']
cp = sp.run(args, stdout=sp.PIPE, stderr=sp.DEVNULL, text=True)
fnl = cp.stdout.splitlines()
# Only return regular files.
fnl = [i for i in fnl if os.path.isfile(i)]
return fnl
def keywordfiles(fns):
"""Filter those files that have keywords in them
Arguments:
fns: A list of filenames.
Returns:
A list for filenames for files that contain keywords.
"""
# These lines are encoded otherwise they would be mangled if this file
# is checked in my git repo!
datekw = b64decode('JERhdGU=')
revkw = b64decode('JFJldmlzaW9u')
rv = []
for fn in fns:
with open(fn, 'rb') as f:
try:
mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
if mm.find(datekw) > -1 or mm.find(revkw) > -1:
rv.append(fn)
mm.close()
except ValueError:
pass
return rv
if __name__ == '__main__':
main(sys.argv)
| 29.117117 | 75 | 0.593441 |
43ddab997f618bf97075a978e2a0bca8e57cd85b | 825 | py | Python | setup.py | AlexandruValeanu/yoda | e6b4325737b877488af4d1bf0b86eb1d98b88aed | [
"MIT"
] | 747 | 2017-06-28T04:58:53.000Z | 2022-02-14T21:40:52.000Z | setup.py | dude-cli/dude | e6b4325737b877488af4d1bf0b86eb1d98b88aed | [
"MIT"
] | 235 | 2017-06-30T12:58:02.000Z | 2019-05-02T02:56:18.000Z | setup.py | dude-cli/dude | e6b4325737b877488af4d1bf0b86eb1d98b88aed | [
"MIT"
] | 237 | 2017-06-12T21:03:03.000Z | 2021-09-16T14:48:59.000Z | from setuptools import setup, find_packages
setup(
name="yoda",
version="1.0.0",
py_modules=["yoda"],
packages=find_packages(),
include_package_data=True,
install_requires=[
"Click",
"Pillow",
"pychalk",
"apiai",
"emoji",
"pyyaml",
"lepl",
"pycrypto",
"pyspeedtest",
"forex-python",
"dulwich",
"PyGithub",
"future",
"speedtest-cli",
"imageio",
"requests",
"pydub",
"pandas",
"fuzzywuzzy",
"python-levenshtein",
],
package_data={"": ["*.txt", "*.lst"]},
entry_points="""
[console_scripts]
yoda=yoda:cli
""",
test_suite="nose.collector",
tests_require=["nose"],
)
| 21.153846 | 44 | 0.467879 |
5a5f06c8711267a10666a65e6cb4fbf154a7e3b4 | 7,128 | py | Python | paasta_tools/cli/cmds/push_to_registry.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 1,711 | 2015-11-10T18:04:56.000Z | 2022-03-23T08:53:16.000Z | paasta_tools/cli/cmds/push_to_registry.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 1,689 | 2015-11-10T17:59:04.000Z | 2022-03-31T20:46:46.000Z | paasta_tools/cli/cmds/push_to_registry.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 267 | 2015-11-10T19:17:16.000Z | 2022-02-08T20:59:52.000Z | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains methods used by the paasta client to upload a docker
image to a registry.
"""
import base64
import binascii
import json
import os
import requests
from requests.exceptions import RequestException
from requests.exceptions import SSLError
from paasta_tools.cli.utils import get_jenkins_build_output_url
from paasta_tools.cli.utils import validate_full_git_sha
from paasta_tools.cli.utils import validate_service_name
from paasta_tools.generate_deployments_for_service import build_docker_image_name
from paasta_tools.utils import _log
from paasta_tools.utils import _log_audit
from paasta_tools.utils import _run
from paasta_tools.utils import build_docker_tag
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import get_service_docker_registry
def add_subparser(subparsers):
list_parser = subparsers.add_parser(
"push-to-registry",
help="Uploads a docker image to a registry",
description=(
"'paasta push-to-registry' is a tool to upload a local docker image "
"to the configured PaaSTA docker registry with a predictable and "
"well-constructed image name. The image name must be predictable because "
"the other PaaSTA components are expecting a particular format for the docker "
"image name."
),
epilog=(
"Note: Uploading to a docker registry often requires access to the local "
"docker socket as well as credentials to the remote registry"
),
)
list_parser.add_argument(
"-s",
"--service",
help='Name of service for which you wish to upload a docker image. Leading "services-", '
"as included in a Jenkins job name, will be stripped.",
required=True,
)
list_parser.add_argument(
"-c",
"--commit",
help="Git sha after which to name the remote image",
required=True,
type=validate_full_git_sha,
)
list_parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
list_parser.add_argument(
"-f",
"--force",
help=(
"Do not check if the image is already in the PaaSTA docker registry. "
"Push it anyway."
),
action="store_true",
)
list_parser.set_defaults(command=paasta_push_to_registry)
def build_command(upstream_job_name, upstream_git_commit):
# This is kinda dumb since we just cleaned the 'services-' off of the
# service so we could validate it, but the Docker image will have the full
# name with 'services-' so add it back.
tag = build_docker_tag(upstream_job_name, upstream_git_commit)
cmd = f"docker push {tag}"
return cmd
def paasta_push_to_registry(args):
"""Upload a docker image to a registry"""
service = args.service
if service and service.startswith("services-"):
service = service.split("services-", 1)[1]
validate_service_name(service, args.soa_dir)
if not args.force:
try:
if is_docker_image_already_in_registry(service, args.soa_dir, args.commit):
print(
"The docker image is already in the PaaSTA docker registry. "
"I'm NOT overriding the existing image. "
"Add --force to override the image in the registry if you are sure what you are doing."
)
return 0
except RequestException as e:
registry_uri = get_service_docker_registry(service, args.soa_dir)
print(
"Can not connect to the PaaSTA docker registry '%s' to verify if this image exists.\n"
"%s" % (registry_uri, str(e))
)
return 1
cmd = build_command(service, args.commit)
loglines = []
returncode, output = _run(
cmd,
timeout=3600,
log=True,
component="build",
service=service,
loglevel="debug",
)
if returncode != 0:
loglines.append("ERROR: Failed to promote image for %s." % args.commit)
output = get_jenkins_build_output_url()
if output:
loglines.append("See output: %s" % output)
else:
loglines.append("Successfully pushed image for %s to registry" % args.commit)
_log_audit(
action="push-to-registry",
action_details={"commit": args.commit},
service=service,
)
for logline in loglines:
_log(service=service, line=logline, component="build", level="event")
return returncode
def read_docker_registry_creds(registry_uri):
dockercfg_path = os.path.expanduser("~/.dockercfg")
try:
with open(dockercfg_path) as f:
dockercfg = json.load(f)
auth = base64.b64decode(dockercfg[registry_uri]["auth"]).decode("utf-8")
first_colon = auth.find(":")
if first_colon != -1:
return (auth[:first_colon], auth[first_colon + 1 : -2])
except IOError: # Can't open ~/.dockercfg
pass
except json.scanner.JSONDecodeError: # JSON decoder error
pass
except binascii.Error: # base64 decode error
pass
return (None, None)
def is_docker_image_already_in_registry(service, soa_dir, sha):
"""Verifies that docker image exists in the paasta registry.
:param service: name of the service
:param sha: git sha
:returns: True, False or raises requests.exceptions.RequestException
"""
registry_uri = get_service_docker_registry(service, soa_dir)
repository, tag = build_docker_image_name(service, sha).split(":")
creds = read_docker_registry_creds(registry_uri)
uri = f"{registry_uri}/v2/{repository}/manifests/paasta-{sha}"
with requests.Session() as s:
try:
url = "https://" + uri
r = (
s.head(url, timeout=30)
if creds[0] is None
else s.head(url, auth=creds, timeout=30)
)
except SSLError:
# If no auth creds, fallback to trying http
if creds[0] is not None:
raise
url = "http://" + uri
r = s.head(url, timeout=30)
if r.status_code == 200:
return True
elif r.status_code == 404:
return False # No Such Repository Error
r.raise_for_status()
| 35.64 | 107 | 0.642536 |
4b839f2606f7582e9926ca2f486db6bf63f09af3 | 1,046 | py | Python | oh/migrations/versions/680b44ae9515_support_ticket_sort_key.py | akshitdewan/cs61a-apps | 155f2afe98b238fb4b1c4ca1c79610ec55e826e6 | [
"MIT"
] | 5 | 2020-09-10T01:45:09.000Z | 2022-01-10T23:24:03.000Z | oh/migrations/versions/680b44ae9515_support_ticket_sort_key.py | akshitdewan/cs61a-apps | 155f2afe98b238fb4b1c4ca1c79610ec55e826e6 | [
"MIT"
] | 424 | 2020-08-24T06:22:59.000Z | 2021-10-10T02:36:11.000Z | oh/migrations/versions/680b44ae9515_support_ticket_sort_key.py | akshitdewan/cs61a-apps | 155f2afe98b238fb4b1c4ca1c79610ec55e826e6 | [
"MIT"
] | 7 | 2020-08-28T22:05:10.000Z | 2022-03-04T12:47:05.000Z | """support ticket sort_key
Revision ID: 680b44ae9515
Revises: c2365b4ad424
Create Date: 2021-02-11 11:14:35.398091
"""
# revision identifiers, used by Alembic.
revision = "680b44ae9515"
down_revision = "c2365b4ad424"
from alembic import op
import sqlalchemy as sa
import oh_queue.models
from oh_queue.models import *
from datetime import datetime
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"ticket",
sa.Column(
"sort_key",
sa.DateTime(),
nullable=True,
),
)
op.execute("UPDATE ticket SET sort_key=created")
op.alter_column("ticket", "sort_key", nullable=False)
op.create_index(op.f("ix_ticket_sort_key"), "ticket", ["sort_key"], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_ticket_sort_key"), table_name="ticket")
op.drop_column("ticket", "sort_key")
# ### end Alembic commands ###
| 24.904762 | 85 | 0.661568 |
e390242ce069fd97053244bcdf7832c9cf8a9eb9 | 4,647 | py | Python | test/functional/sapling_fillblock.py | TransFastCore/pivx533 | b2168d6c2b447c9bf9c7175ffdfc8342b2861179 | [
"MIT"
] | 15 | 2019-08-28T13:34:30.000Z | 2021-12-15T22:01:08.000Z | test/functional/sapling_fillblock.py | TransFastCore/pivx533 | b2168d6c2b447c9bf9c7175ffdfc8342b2861179 | [
"MIT"
] | 9 | 2019-07-17T22:42:46.000Z | 2022-03-02T12:41:27.000Z | test/functional/sapling_fillblock.py | TransFastCore/pivx533 | b2168d6c2b447c9bf9c7175ffdfc8342b2861179 | [
"MIT"
] | 13 | 2019-06-30T22:44:30.000Z | 2022-02-19T16:07:54.000Z | #!/usr/bin/env python3
# Copyright (c) 2020 The TrumpCoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
import time
from test_framework.test_framework import TrumpCoinTestFramework
from test_framework.util import (
assert_greater_than,
assert_greater_than_or_equal,
assert_equal,
Decimal,
satoshi_round,
)
def timed(f):
start = time.time()
ret = f()
elapsed = time.time() - start
return ret, elapsed
MAX_SHIELDED_BLOCKSIZE = 750000
class SaplingFillBlockTest(TrumpCoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [["-blockmaxsize=1999000"], []]
def utxo_splitter(self, node_from, n_inputs, node_to):
txids = []
# collect utxos
utxos = node_from.listunspent()
assert_greater_than_or_equal(len(utxos), n_inputs)
# sort by size
sorted(utxos, key=lambda utxo: utxo["amount"], reverse=True)
# pick the first N
utxos = utxos[:n_inputs]
# split each one in 100 (use fixed 0.05 TRUMP fee)
for u in utxos:
prevout = [{"txid": u["txid"], "vout": u["vout"]}]
output_amt = satoshi_round((u["amount"] - Decimal("0.05")) / 100)
recipients = {node_to.getnewaddress(): output_amt for _ in range(100)}
rawTx_unsigned = node_from.createrawtransaction(prevout, recipients)
rawTx_signed = node_from.signrawtransaction(rawTx_unsigned)["hex"]
txids.append(node_from.sendrawtransaction(rawTx_signed))
return txids
def check_mempool(self, miner, txids):
self.log.info("Checking mempool...")
self.sync_mempools()
mempool_info = miner.getmempoolinfo()
assert_equal(mempool_info['size'], len(txids))
mempool_bytes = mempool_info['bytes']
self.log.info("Miner's mempool size: %d bytes" % mempool_bytes)
assert_greater_than_or_equal(mempool_bytes, 1000000)
def mine_and_checkblock(self, miner, alice):
self.log.info("Mining the block...")
bhash, elapsed = timed(lambda: miner.generate(1)[0])
self.log.info("Block mined in %d seconds" % elapsed)
_, elapsed = timed(lambda: self.sync_all())
bsize = alice.getblock(bhash, True)["size"]
self.log.info("Peers synced in %d seconds. Block size: %d" % (elapsed, bsize))
# Only shielded txes in mempool. Block size must be below
# MAX_SHIELDED_BLOCKSIZE + 513 (header + coinbase + coinstake)
assert_greater_than(MAX_SHIELDED_BLOCKSIZE + 513, bsize)
def send_shielded(self, node, n_txes, from_address, shield_to):
txids = []
for i in range(n_txes):
txids.append(node.shieldsendmany(from_address, shield_to))
if (i + 1) % 200 == 0:
self.log.info("...%d Transactions created..." % (i + 1))
self.sync_mempools()
return txids
def run_test(self):
miner = self.nodes[0]
alice = self.nodes[1]
# First mine 300 blocks
self.log.info("Generating 300 blocks...")
miner.generate(300)
self.sync_blocks()
assert_equal(self.nodes[0].getblockchaininfo()['upgrades']['v5 shield']['status'], 'active')
# -- First check that the miner never produces blocks with more than 750kB of shielded txes
# Split 10 utxos (of 250 TRUMP each) in 1000 new utxos of ~2.5 TRUMP each (to alice)
UTXOS_TO_SPLIT = 10
UTXOS_TO_SHIELD = UTXOS_TO_SPLIT * 100
self.log.info("Creating %d utxos..." % UTXOS_TO_SHIELD)
txids = self.utxo_splitter(miner, UTXOS_TO_SPLIT, alice)
assert_equal(len(txids), UTXOS_TO_SPLIT)
miner.generate(2)
self.sync_blocks()
new_utxos = alice.listunspent()
assert_equal(len(new_utxos), UTXOS_TO_SHIELD)
# Now alice shields the new utxos individually (fixed 0.2 TRUMP fee --> ~2.3 TRUMP notes)
self.log.info("Shielding utxos...")
alice_z_addr = alice.getnewshieldaddress()
shield_to = [{"address": alice_z_addr, "amount": new_utxos[0]["amount"] - Decimal("0.2")}]
txids = self.send_shielded(alice, UTXOS_TO_SHIELD, "from_transparent", shield_to)
# Check mempool
self.check_mempool(miner, txids)
# Mine the block
self.mine_and_checkblock(miner, alice)
self.log.info("Done. %d txes still in mempool." % miner.getmempoolinfo()['size'])
if __name__ == '__main__':
SaplingFillBlockTest().main()
| 38.725 | 100 | 0.646223 |
da9b1fffa646bcc404f3f3c4ee10d85467ae026c | 2,374 | py | Python | tests/V1/test_sale_view.py | kmwangemi/Store-Manager-App | fc7c39dae0da9f62b7fd22353f1374b94646d2c5 | [
"MIT"
] | null | null | null | tests/V1/test_sale_view.py | kmwangemi/Store-Manager-App | fc7c39dae0da9f62b7fd22353f1374b94646d2c5 | [
"MIT"
] | 14 | 2018-10-08T15:45:19.000Z | 2021-06-02T00:41:43.000Z | tests/V1/test_sale_view.py | kmwangemi/Store-Manager-App | fc7c39dae0da9f62b7fd22353f1374b94646d2c5 | [
"MIT"
] | null | null | null | import unittest
import json
from run import app
from app.api.V1.views.sale_views import sale_info
class SalestestCase(unittest.TestCase):
def setUp(self):
"""will be called before every test"""
self.client = app.test_client
self.sale = {
"product" : "product",
"description" : "description",
"quantity" : "quantity",
"stock_quantity" : "stock_quantity",
"price" : "price",
"total" : "total"
}
self.empty_sale = {
"product" : "",
"description" : "",
"quantity" : "",
"stock_quantity" : "",
"price" : "",
"total" : ""
}
'''Tests for sale creation'''
def test_sale_created_successfully(self):
"""Tests that a sale is created successfully"""
res = self.client().post('/api/v1/sales', data=json.dumps(self.sale), headers = {"content-type": "application/json"})
self.assertEqual(res.status_code, 201)
self.assertIn("Sale created", str(res.data))
def test_sale_cannot_create_with_no_details(self):
"""Tests that a sale cannot be created with no details"""
res = self.client().post('/api/v1/sales', data=json.dumps(self.empty_sale), headers = {"content-type": "application/json"})
self.assertEqual(res.status_code, 201)
'''Tests for getting successfully created sales'''
def test_gets_successfully_created_sales(self):
"""Tests that api gets all created sales"""
res = self.client().get('/api/v1/sales', data=json.dumps(self.sale), headers = {"content-type": "application/json"})
self.assertEqual(res.status_code, 200)
self.assertIn("Sales", str(res.data))
'''Tests for getting single sale'''
def test_gets_single_successfully_created_sale(self):
"""Tests that api gets single successfully created sale"""
res = self.client().get('/api/v1/sales/<saleId>', data=json.dumps(self.sale), headers = {"content-type": "application/json"})
self.assertEqual(res.status_code, 200)
self.assertIn("Sale", str(res.data))
| 40.237288 | 133 | 0.552233 |
54022cb32c0c638e3eb2e72aeb220665dfa020f5 | 22,533 | py | Python | Cirq/CirqNonOracles/qft_tests.py | jclapis/qsfe | 941488f8f8a81a4b7d7fe28414ce14fa478a692a | [
"Apache-2.0"
] | 11 | 2019-06-02T01:47:24.000Z | 2021-08-10T14:54:35.000Z | Cirq/CirqNonOracles/qft_tests.py | jclapis/qsfe | 941488f8f8a81a4b7d7fe28414ce14fa478a692a | [
"Apache-2.0"
] | null | null | null | Cirq/CirqNonOracles/qft_tests.py | jclapis/qsfe | 941488f8f8a81a4b7d7fe28414ce14fa478a692a | [
"Apache-2.0"
] | 2 | 2020-04-19T17:39:23.000Z | 2021-03-08T12:35:55.000Z | # ========================================================================
# Copyright (C) 2019 The MITRE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import vs_test_path_fixup
import math
import unittest
import cirq
import qft
import shor_math
class QftTests(unittest.TestCase):
"""
This class contains the tests for QFT, including some customized
implementations of a few simple sine and cosine sampling vectors. These
could be handy when testing other frameworks that don't come with an
analog of the "PrepareArbitraryState" function or the Ry(...) function,
so I can just directly implement circuits that put the register into the
proper states for these tests.
"""
# ====================
# == Test Case Code ==
# ====================
def run_iqft_with_waveform_samples(self, number_of_qubits, sample_rate,
correct_frequency, prep_function, prep_args):
"""
Tests my QFT implementation by comparing it to the classical DFT, ensuring it produces the
same output as DFT when given the same input (after being normalized for quantum operations).
Parameters:
number_of_qubits (int): The size of the processing register to use, in qubits.
This will be used to represent 2^N samples of the input signal.
sample_rate (float): The sampling rate used by the prep opration. This is used to
determine the actual frequency of the measured value once QFT is finished, which can
vary based on the number of samples and the sample rate.
correct_frequency (double): The correct answer that QFT should provide after running on
the prepared input state.
prep_function (function): The function that prepares the qubit register in the desired
state for this test.
prep_args (anything): Arguments to pass to the preparation function.
"""
qubits = cirq.NamedQubit.range(number_of_qubits, prefix="qubit")
circuit = cirq.Circuit()
# Set up the register so it's in the correct state for the test
if prep_args is None:
prep_function(circuit, qubits)
else:
prep_function(circuit, qubits, prep_args)
# Run the inverse QFT, which corresponds to the normal DFT
qft.iqft(circuit, qubits)
# Measure the result from QFT
circuit.append(cirq.measure(*qubits, key="result"))
# Run the circuit
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=1)
result_states = result.histogram(key="result")
for(state, count) in result_states.items():
result = state
break
# QFT suffers from the same Nyquist-frequency mirroring as DFT, but we can't just
# look at all of the output details and ignore the mirrored results. If we end up
# measuring a mirrored result, this will flip it back to the proper result in the
# 0 < X < N/2 space.
number_of_states = 2 ** number_of_qubits
if result > number_of_states / 2:
result = number_of_states - result
# Correct for the sample rate.
total_time = number_of_states / sample_rate
result = result / total_time
# Verify we got the right result.
if result != correct_frequency:
raise ValueError(f"Expected frequency {correct_frequency} but measured {result}.")
def prepare_1hz_sine_8_samples(self, circuit, qubits):
"""
Prepares a register so that the real component of each state's amplitude
corresponds to a sine wave with a 1 Hz frequency, f(x) = sin(2πx). The 8
possible states of the register will take on the role of the time steps
from 0 to 7/8 of a second.
For a classical DFT, the input would be the following array:
[0, 0.707, 1, 0.707, 0, -0.707, -1, -0.707]
where the first element is for t = 0, the second is for t = 1/8, and so on.
For the quantum variation, these values are encoded into the amplitudes of
each register state (and normalized so the whole vector has a magnitude of
1). Thus, the total thing will become:
0.354*|001⟩ + 0.5*|010⟩ + 0.354*|011⟩ - 0.354*|101⟩ - 0.5*|110⟩ - 0.354*|111⟩.
Parameters:
circuit (Circuit): The circuit being constructed
qubits (list[Qid]): The register that will hold the sine wave samples
in its superposition amplitudes
"""
# Okay. So this algorithm is going to look weird at first considering it has nothing to do with
# sine waves, and that's fine. I'm going to walk you through how I designed this, step by step.
# Hopefully you can learn something from it and use it to design your own circuits for weird
# qubit states!
#
# The original classical array for 8 measurements over 1 second of the sine wave is this:
# [0, 1/√2, 1, 1/√2, 0, -1/√2, -1, -1/√2]
#
# We want to encode that into the real component of the amplitudes of a 3 qubit register, so we
# get this:
# 0*|000⟩ + 1/√2|001⟩ + 1*|010⟩ + 1/√2*|011⟩ + 0*|100⟩ - 1/√2*|101⟩ - 1*|110⟩ - 1/√2*|111⟩
#
# Immediately, there's a problem: these amplitudes are too big. Quantum state vectors need the
# sum of squares to add to 1, and these add to:
# 2*(1/√2)^2 + 2*1^2 + 2*(-1/√2)^2
# = 2*1/2 + 2*1 + 2*1/2
# = 4
# So to fix this, we need to divide each state's probability by 4 (and thus, each state's
# amplitude by √4 = 2.
# This is the target state once it's been normalized:
# 0*|000⟩ + 1/2√2*|001⟩ + 1/2*|010⟩ + 1/2√2*|011⟩ + 0*|100⟩ - 1/2√2*|101⟩ - 1/2*|110⟩ - 1/2√2*|111⟩
# = 1/2( 1/√2*|001⟩ + |010⟩ + 1/√2*|011⟩ - 1/√2*|101⟩ - |110⟩ - 1/√2*|111⟩ )
#
# Now that we have the target state, we can start designing a circuit for it.
# The first thing I notice about the general structure of the state is that it's really in two
# halves: when q0 = 0, it's 1/√2*|001⟩ + |010⟩ + 1/√2*|011⟩.
# when q0 = 1, it's -1/√2*|101⟩ - |110⟩ - 1/√2*|111⟩.
# The second half is just the negative version of the first half (with q0 flipped), so I know
# 2 things:
# 1. q0 isn't entangled with anything
# 2. q0 is in an equal superposition of |0⟩ and |-1⟩.
# Thus, we can reduce q0 to the |-⟩ state which is 1/√2*(|0⟩ - |1⟩). We know that whatever else
# happens, we're going to put q0 into |-⟩ at the start of the circuit and ignore it after that.
# Here's the reduction:
# 1/2( 1/√2*|001⟩ + |010⟩ + 1/√2*|011⟩ - 1/√2*|101⟩ - |110⟩ - 1/√2*|111⟩ )
# = 1/2( √2*1/√2( 1/√2*|001⟩ + |010⟩ + 1/√2*|011⟩ - 1/√2*|101⟩ - |110⟩ - 1/√2*|111⟩ ) )
# = 1/2( √2( 1/√2*|-01⟩ + |-10⟩ + 1/√2*|-11⟩ ) )
# = 1/2( |-01⟩ + √2*|-10⟩ + |-11⟩ )
#
# Okay, now we're getting somewhere. Next, I notice that when q2 is 1, q1 has an equal
# probability of being |0⟩ or |1⟩. It might be more obvious if I rearrange the terms like this:
# = 1/2( |-01⟩ + |-11⟩ + √2*|-10⟩ )
#
# In other words, when q2 is 1, q1 = |+⟩ which is 1/√2*(|0⟩ + |1⟩). Let's use that to reduce the
# qubit state even further:
# 1/2( |-01⟩ + |-11⟩ + √2*|-10⟩ )
# = 1/2( √2*1/√2( |-01⟩ + |-11⟩ ) + √2*|-10⟩ )
# = 1/2( √2( |-+1⟩ ) + √2*|-10⟩ )
# = √2/2( |-+1⟩ + |-10⟩ )
# = 1/√2( |-+1⟩ + |-10⟩ )
#
# This is as far as the reduction can go. You might think that you can reduce q2 into |+⟩ here,
# but note that the state of qubit 1 changes depending on what qubit 2 is. That means we can't
# reduce it; it also means that q1 and q2 are going to be entangled.
# Anyway, now that the state is reduced, we can figure out how to create the states with a
# circuit. We know that q0 is going to be |-⟩ no matter what, so that's easy: X and H will put
# it into that state, and then we can ignore it. The other two qubits are then described by
# this state:
# 1/√2( |+1⟩ + |10⟩ )
#
# q2 has an equal probability of being |0⟩ or |1⟩ (probability 1/√2), so it can just be prepared
# with a simple H.
# Now for q1, the weird one: assuming it starts at |0⟩, then if q2 == 1, q1 should be Hadamard'd.
# If q2 == 0, then q1 should be flipped instead. That's actually pretty easy to do: we can just
# do a Controlled H with q2 as the control and q1 as the target, and a zero-controlled X (AKA a
# 0-CNOT) with q2 as the control and q1 as the target. We just have to make sure q2 is H'd first.
#
# And thus, at the end of the day, this is how you construct a 3-qubit state where the real part
# of the amplitudes maps to a 1 Hz sine wave, sampled at 8 samples per second:
circuit.append([
# Set up q0
cirq.X(qubits[0]),
cirq.H(qubits[0]),
# Set up q2
cirq.H(qubits[2]),
# Set up q1: if q2 is 1, H it. Otherwise, X it.
cirq.H(qubits[1]).controlled_by(qubits[2]),
# 0-controlled CNOT
cirq.X(qubits[2]),
cirq.CNOT(qubits[2], qubits[1]),
cirq.X(qubits[2])
])
# I hope that writeup helped explain how these 7 lines create the sine wave, and help you do
# circuit design like this in the future!
def prepare_1hz_cosine_8_samples(self, circuit, qubits):
"""
Prepares a register so that the real component of each state's amplitude
corresponds to a cosine wave with a 1 Hz frequency, f(x) = cos(2πx). The 8
possible states of the register will take on the role of the time steps
from 0 to 7/8 of a second.
For a classical DFT, the input would be the following array:
[1, 0.707, 0, -0.707, -1, -0.707, 0, 0.707]
where the first element is for t = 0, the second is for t = 1/8, and so on.
For the quantum variation, these values are encoded into the amplitudes of
each register state (and normalized so the whole vector has a magnitude of
1). Thus, the total thing will become:
0.5*|000⟩ + 0.354*|001⟩ - 0.354*|011⟩ - 0.5*|100⟩ - 0.354*|101⟩ + 0.354*|111⟩.
Parameters:
circuit (Circuit): The circuit being constructed
qubits (list[Qid]): The register that will hold the cosine wave samples
in its superposition amplitudes
"""
# This operation is going to look quite different from the sine example, but that's okay. The
# structure itself isn't really that important - the key is the methodology that lets you figure
# out a circuit to generate the state you're looking for. The process for this one is the same,
# so this is just going to be another example of it. I'll walk you through it.
#
# First things first: here's the conventional array of samples from cos(2πx) that you'd normally
# feed into DFT:
# [1, 1/√2, 0, -1/√2, -1, -1/√2, 0, 1/√2]
#
# Using the same normalization process as before, so this can be represented in qubits, gives us:
# 1/2*|000⟩ + 1/2√2*|001⟩ + 0*|010⟩ - 1/2√2*|011⟩ - 1/2*|100⟩ - 1/2√2*|101⟩ + 0*|110⟩ + 1/2√2*|111⟩
# = 1/2( |000⟩ + 1/√2*|001⟩ - 1/√2*|011⟩ - |100⟩ - 1/√2*|101⟩ + 1/√2*|111⟩ )
#
# There aren't any immediately obvious patterns with respect to the + and - parts (or at least,
# I couldn't find any), so I just reorganized it a little bit to group the common parts together:
# 1/2( |000⟩ - |100⟩ + 1/√2( |001⟩ - |011⟩ - |101⟩ + |111⟩ ) )
#
# Now with this, I do notice a commonality. In the first 2 terms, q2 is always 0. In the last 4
# terms (the 1/√2 group), q2 is always 1. Both groups have a 50% chance of occurring, so really
# we could break it down into this algorithm:
# H(q2)
# if(q2 == 0), put q0 and q1 in the first group
# else, put q0 and q1 into the second group
# Obviously we can't perform a classical "if-else" on qubits in superpositions... but we CAN
# leverage the Controlled functor which effectively does the same thing!
# So, with that in mind, let's figure out how to create the groups.
#
# The first group is just [|00⟩ - |10⟩]0⟩, which reduces to √2*|-00⟩. That's easy to write a
# circuit for: we just do X and H on q0 to put it into the |-⟩ state. So for the first group,
# we can do this:
# if(q2 == 0)... (the gates below will be zero-controlled on q2)
# X(q0);
# H(q0);
#
# The second group is a little harder: [|00⟩ - |01⟩ - |10⟩ + |11⟩]1⟩. Written as a state vector
# of q0 and q1, it looks like this: [1, -1, -1, 1]. We need some way to get the qubits into
# this state. I did a little reverse engineering on it: we know that H on both qubits will give
# the state [1, 1, 1, 1]. There are 2 negative phases, and we know that Z on q0 would give the
# state [-1, 1, -1, 1]; Z on q1 would give [1, -1, 1, -1]. We could do either of those but we'd
# need a way to swap the 1st and 2nd qubits, or the 3rd and 4th terms, respectively.
# Wait! We have one! CNOT on q0 and q1 will swap the 3rd and 4th terms! So to get this state,
# we could do the following:
# if(q2 == 1)... (the gates below will be controlled on q2)
# H(q0);
# H(q1);
# Z(q1);
# CNOT(q0, q1);
#
# Finally, note that the last operation of the first group is H(q0) and the first operation of
# the second group is H(q0), so we can just remove that step from both groups and perform it
# unconditionally in-between their execution.
#
# You can verify that the amplitudes of each state work out with this setup. I wrote it up below,
# and lo and behold, it produced the cosine measurement array. I'm sure there's a prettier way to
# do this that looks more like the sine function, but this is meant to be another good example of
# how to tackle circuit design to get to a target state.
circuit.append([
# Set up q2
cirq.H(qubits[2]),
# If q2 == 0
cirq.X(qubits[2]),
cirq.CNOT(qubits[2], qubits[0]),
cirq.X(qubits[2]),
cirq.H(qubits[0]),
# Else if(q2 == 1)
cirq.CNOT(qubits[2], qubits[1]),
cirq.H(qubits[1]).controlled_by(qubits[2]),
cirq.CCX(qubits[2], qubits[0], qubits[1])
])
def prepare_2hz_sine_8_samples(self, circuit, qubits):
"""
Prepares a register so that the real component of each state's amplitude
corresponds to a sine wave with a 2 Hz frequency, f(x) = sin(4πx). The 8
possible states of the register will take on the role of the time steps
from 0 to 7/8 of a second.
For a classical DFT, the input would be the following array:
[0, 1, 0, -1, 0, 1, 0, -1]
where the first element is for t = 0, the second is for t = 1/8, and so on.
For the quantum variation, these values are encoded into the amplitudes of
each register state (and normalized so the whole vector has a magnitude of
1). Thus, the total thing will become:
0.5*|001⟩ - 0.5*|011⟩ + 0.5*|101⟩ - 0.5*|111⟩.
Parameters:
circuit (Circuit): The circuit being constructed
qubits (list[Qid]): The register that will hold the sine wave samples
in its superposition amplitudes
"""
# This one's really easy. Here's the full target state:
# 1/2( |001⟩ - |011⟩ + |101⟩ - |111⟩ )
#
# Right off the bat: q2 is always 1. Removing it, we get:
# 1/2( |00⟩ - |01⟩ + |10⟩ - |11⟩ )|1⟩
#
# q0 and q1 are both in uniform superpositions, but the sign is inverted
# when q1 == 1. It should be pretty obvious that q0 = |+⟩ and q1 = |-⟩, but
# here's the decomposition anyway:
# 1/2( |00⟩ - |01⟩ + |10⟩ - |11⟩ )|1⟩
# = 1/2( √2*1/√2( |00⟩ - |01⟩ + |10⟩ - |11⟩ ))|1⟩
# = 1/2( √2( |+0⟩ - |+1⟩ ))|1⟩
# = 1/2( √2*√2*1/√2( |+0⟩ - |+1⟩ ))|1⟩
# = 1/2( 2( |+-⟩ ))|1⟩
# = |+-1>
#
# Yep, it reduces down to a single state with no entanglement.
circuit.append([
cirq.H(qubits[0]),
cirq.X(qubits[1]),
cirq.H(qubits[1]),
cirq.X(qubits[2])
])
def prepare_2hz_cosine_8_samples(self, circuit, qubits):
"""
Prepares a register so that the real component of each state's amplitude
corresponds to a cosine wave with a 2 Hz frequency, f(x) = cos(4πx). The 8
possible states of the register will take on the role of the time steps
from 0 to 7/8 of a second.
For a classical DFT, the input would be the following array:
[1, 0, -1, 0, 1, 0, -1, 0]
where the first element is for t = 0, the second is for t = 1/8, and so on.
For the quantum variation, these values are encoded into the amplitudes of
each register state (and normalized so the whole vector has a magnitude of
1). Thus, the total thing will become:
0.5*|000⟩ - 0.5*|010⟩ + 0.5*|100⟩ - 0.5*|110⟩.
Parameters:
circuit (Circuit): The circuit being constructed
qubits (list[Qid]): The register that will hold the cosine wave samples
in its superposition amplitudes
"""
# This is exactly the same as the 2Hz sine, except q2 is 0.
circuit.append([
cirq.H(qubits[0]),
cirq.X(qubits[1]),
cirq.H(qubits[1])
])
# ================
# == Unit Tests ==
# ================
def test_1hz_sine_8_samples(self):
"""
Tests QFT by giving it a sine wave of 1 Hz, with 8 samples, at 8 samples per second.
"""
self.run_iqft_with_waveform_samples(3, 8, 1, self.prepare_1hz_sine_8_samples, None)
def test_1hz_cosine_8_samples(self):
"""
Tests QFT by giving it a cosine wave of 1 Hz, with 8 samples, at 8 samples per second.
"""
self.run_iqft_with_waveform_samples(3, 8, 1, self.prepare_1hz_cosine_8_samples, None)
def test_2hz_sine_8_samples(self):
"""
Tests QFT by giving it a sine wave of 2 Hz, with 8 samples, at 8 samples per second.
"""
self.run_iqft_with_waveform_samples(3, 8, 2, self.prepare_2hz_sine_8_samples, None)
def test_2hz_cosine_8_samples(self):
"""
Tests QFT by giving it a cosine wave of 2 Hz, with 8 samples, at 8 samples per second.
"""
self.run_iqft_with_waveform_samples(3, 8, 2, self.prepare_2hz_cosine_8_samples, None)
def test_period_6(self):
"""
Tests QFT by running a single iteration of the period-finding subroutine from
Shor's algorithm. This test will use 21 as the number to factor, 11 as the
original guess, and ensure that QFT reports that the modular exponential
equation has a period of 6.
"""
# So this test basically just runs a hardcoded iteration of the quantum portion
# of Shor's algorithm. I don't want to explain the entire thing here; you can
# look at shor.py for my implementation, which has plenty of documentation
# attached to it. For this test, I'm trying to factor 21. That means the
# "output" register needs to be 5 qubits (because 2^4 = 16 and 2^5 = 32, so it
# needs 5 qubits to be represented in binary). For the input register, I'm going
# with 9 qubits: 21^2 = 441, 2^8 = 256, and 2^9 = 512, so 21^2 needs 9 qubits to
# be represented in binary. That will give 512 discrete states. For a guess of
# 11, the period will be 6:
# -------------------------
# State (i) | 11^i mod 21
# -------------------------
# 0 | 1
# 1 | 11
# 2 | 16
# 3 | 8
# 4 | 4
# 5 | 2
# 6 | 1 <== Pattern repeats here, after 6 entries
# 7 | 11
# 8 | 16
# ...
#
# QFT should return some value X which, when divided by 512, should be really
# close to 0/6, 1/6, 2/6, 3/6, 4/6, or 5/6. The amplitude peaks (the expected
# values) are 0, 85, 171, 256, 341, and 427.
input_length = 9
output_length = 5
number_to_factor = 21
guess = 11
input = cirq.NamedQubit.range(input_length, prefix="input")
output = cirq.NamedQubit.range(output_length, prefix="output")
circuit = cirq.Circuit()
circuit.append([
cirq.H.on_each(*input), # Input = |+...+>
cirq.X((output[output_length-1])) # Output = |0...01>
])
# Do the arithmetic so the input register is entangled with the output register; after
# this, if the state X is measured on the input register, the output register will always
# be measured as 11^X mod 21.
for i in range(0, input_length):
power_of_two = input_length - 1 - i
power_of_guess = 2 ** power_of_two
constant = pow(guess, power_of_guess, number_to_factor)
shor_math.controlled_modular_multiply(circuit, input[i], constant,
number_to_factor, output)
# Run inverse QFT (the analog of the normal DFT) to find the period
qft.iqft(circuit, input)
circuit.append(cirq.measure(*input, key="result"))
# Run the circuit
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=1)
result_states = result.histogram(key="result")
measurement = 0
for(state, count) in result_states.items():
measurement = state
break
# Measure the resulting period and make sure it's close to a multiple of 1/6,
# with a tolerance of 0.01.
scaled_measurement = measurement / 512 * 6
nearest_multiple = round(scaled_measurement)
delta = abs(scaled_measurement - nearest_multiple)
print(f"Measured {measurement}/512 => {scaled_measurement}, delta = {delta}")
if delta >= 0.01:
self.fail(f"QFT failed, delta of {delta} is too high.")
print("Passed!")
if __name__ == '__main__':
unittest.main()
| 44.182353 | 103 | 0.610571 |
ece2386269fbd02795d16f10f1ea1d1e4e8476a7 | 471 | py | Python | bisection_method.py | FlorianMarcon/105torus | 361c5c982f849642207aa2ed7fb10529c176b734 | [
"MIT"
] | null | null | null | bisection_method.py | FlorianMarcon/105torus | 361c5c982f849642207aa2ed7fb10529c176b734 | [
"MIT"
] | null | null | null | bisection_method.py | FlorianMarcon/105torus | 361c5c982f849642207aa2ed7fb10529c176b734 | [
"MIT"
] | null | null | null | from torus import *
from math import trunc
def bisection_method(torus, size):
var1 = 0
var2 = 1
result = 0.5
while round(torus.calcul(result) + (5 * pow(10, -size - 2)), size) != 0:
# print ("x =", trunc_float(result, size + 1))
display_torus(result, size)
if torus.calcul(result) > 0 and torus.calcul(var2) < 0:
var1 = result
elif torus.calcul(result) < 0 and torus.calcul(var2) > 0:
var1 = result
else:
var2 = result
result = (var2 + var1) / 2
| 26.166667 | 73 | 0.643312 |
a583e82fd051cddb3026951dcf24bd092c680782 | 749 | py | Python | fulllenguajes/migrations/0001_initial.py | JoinBugs/lenguajes | c062691a430f393b4b2f6d68f16879f86aa76c17 | [
"Apache-2.0"
] | null | null | null | fulllenguajes/migrations/0001_initial.py | JoinBugs/lenguajes | c062691a430f393b4b2f6d68f16879f86aa76c17 | [
"Apache-2.0"
] | null | null | null | fulllenguajes/migrations/0001_initial.py | JoinBugs/lenguajes | c062691a430f393b4b2f6d68f16879f86aa76c17 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-16 21:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Lenguaje',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=10)),
('version', models.CharField(max_length=10)),
('version_api', models.CharField(max_length=10)),
('tipo_lenguaje', models.CharField(max_length=20)),
],
),
]
| 27.740741 | 114 | 0.58745 |
a0f9d672aec7f5a6d0f34777d3bc3de53923ea18 | 1,902 | py | Python | venv/Lib/site-packages/pyrogram/raw/base/phone_call_protocol.py | iamgeorgiy/heroku-userbot | 5a92417d16f8ead949d88cb38da213fc2da5d3a4 | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/pyrogram/raw/base/phone_call_protocol.py | iamgeorgiy/heroku-userbot | 5a92417d16f8ead949d88cb38da213fc2da5d3a4 | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/pyrogram/raw/base/phone_call_protocol.py | iamgeorgiy/heroku-userbot | 5a92417d16f8ead949d88cb38da213fc2da5d3a4 | [
"Apache-2.0"
] | null | null | null | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
from typing import Union
from pyrogram import raw
from pyrogram.raw.core import TLObject
PhoneCallProtocol = Union[raw.types.PhoneCallProtocol]
# noinspection PyRedeclaration
class PhoneCallProtocol: # type: ignore
"""This base type has 1 constructor available.
Constructors:
.. hlist::
:columns: 2
- :obj:`PhoneCallProtocol <pyrogram.raw.types.PhoneCallProtocol>`
"""
QUALNAME = "pyrogram.raw.base.PhoneCallProtocol"
def __init__(self):
raise TypeError("Base types can only be used for type checking purposes: "
"you tried to use a base type instance as argument, "
"but you need to instantiate one of its constructors instead. "
"More info: https://docs.pyrogram.org/telegram/base/phone-call-protocol")
| 38.04 | 97 | 0.646162 |
9814242dab2611f84ad6d187bdd76aeeee007c4b | 3,173 | py | Python | dotaCrawler/dotaCrawler/settings.py | aadityachapagain/DotaCalc | b6903bc07d7e2270d05a92dfca430364178644ab | [
"MIT"
] | 1 | 2019-01-24T08:40:15.000Z | 2019-01-24T08:40:15.000Z | dotaCrawler/dotaCrawler/settings.py | aadityachapagain/DotaCalc | b6903bc07d7e2270d05a92dfca430364178644ab | [
"MIT"
] | null | null | null | dotaCrawler/dotaCrawler/settings.py | aadityachapagain/DotaCalc | b6903bc07d7e2270d05a92dfca430364178644ab | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Scrapy settings for dotaCrawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'dotaCrawler'
SPIDER_MODULES = ['dotaCrawler.spiders']
NEWSPIDER_MODULE = 'dotaCrawler.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'dotaCrawler (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'dotaCrawler.middlewares.DotacrawlerSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'dotaCrawler.middlewares.DotacrawlerDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'dotaCrawler.pipelines.DotacrawlerPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
FEED_FORMAT = 'json'
FEED_URI = 'dota_hero.jp' | 34.48913 | 102 | 0.775922 |
10b11cb735350ee2ec999ce0bcea2777e80a8928 | 2,722 | py | Python | src/modules/generate_DoS.py | Yasushi-Shinohara/Scripts4Elk | dd7a6604813684cefbac08b1dc3c96bf7b5b7bf3 | [
"MIT"
] | null | null | null | src/modules/generate_DoS.py | Yasushi-Shinohara/Scripts4Elk | dd7a6604813684cefbac08b1dc3c96bf7b5b7bf3 | [
"MIT"
] | null | null | null | src/modules/generate_DoS.py | Yasushi-Shinohara/Scripts4Elk | dd7a6604813684cefbac08b1dc3c96bf7b5b7bf3 | [
"MIT"
] | null | null | null | # coding: UTF-8
# This is created 2021/12/24 by Y. Shinohara
# This is lastly modified 2021/xx/yy by Y. Shinohara
import sys
from modules.constants import pi, Hartree
import numpy as np
class GenerateDoS:
def __init__(self):
self.DoS = None #Density of States
self.omega = None
self.NoS = None #Number of States
self.occDoS = None #Occupied Density of States
self.occNoS = None #Occupied Number of States
@classmethod
def generate(self, ED, NDoS = 2000, ewidth = 0.01, plot_option = True):
emin = np.amin(ED.eigval) - 0.2*(np.amax(ED.eigval) - np.amin(ED.eigval))
emax = np.amax(ED.eigval) + 0.2*(np.amax(ED.eigval) - np.amin(ED.eigval))
self.omega = np.linspace(emin, emax, NDoS)
print(emin, emax, self.omega)
self.DoS = np.zeros(NDoS, dtype='float64')
self.NoS = np.zeros(NDoS, dtype='float64')
self.occDoS = np.zeros(NDoS, dtype='float64')
self.occNoS = np.zeros(NDoS, dtype='float64')
for ik in range(ED.Nk):
for ib in range(ED.Nb):
self.DoS[:] = self.DoS[:] + np.exp(-(self.omega[:]-ED.eigval[ib,ik])**2/ewidth**2)
self.occDoS[:] = self.occDoS[:] + np.exp(-(self.omega[:]-ED.eigval[ib,ik])**2/ewidth**2)*ED.occ[ib,ik]
self.DoS = self.DoS/np.sqrt(pi)/ewidth/ED.Nk*ED.spin_degeneracy
self.occDoS = self.occDoS/np.sqrt(pi)/ewidth/ED.Nk
domega = self.omega[1] - self.omega[0]
for ie in range(NDoS-1):
self.NoS[ie+1] = self.NoS[ie] + 0.5*(self.DoS[ie+1] + self.DoS[ie])
self.occNoS[ie+1] = self.occNoS[ie] + 0.5*(self.occDoS[ie+1] + self.occDoS[ie])
self.NoS = domega*self.NoS
self.occNoS = domega*self.occNoS
print('# Number of energy grid: NDoS =', NDoS)
print('# Energy width for DoS: ewidth =', ewidth, '[a.u.] =', ewidth*Hartree, '[eV]')
print('# Energy minimum and maximum for DoS: emin, emax =', emin, emax, '[a.u.] =', emin*Hartree, emax*Hartree, '[eV]')
print('# Number of integrad DoS:', self.NoS[NDoS-1])
print(self.NoS[NDoS-1], ED.Nb)
print(self.occNoS[NDoS-1])
if (plot_option):
import matplotlib.pyplot as plt
plt.figure()
plt.title('Density of States')
plt.fill_between(self.omega, 0.0*self.NoS, self.NoS/np.amax(self.NoS)*np.amax(self.DoS)*0.8, facecolor = 'k', alpha=0.25, label='NoS(normalized)')
plt.plot(self.omega, self.DoS, label='DoS')
plt.plot(self.omega, self.occDoS, label='occDoS')
plt.grid()
plt.legend()
plt.show()
return self.omega, self.DoS, self.NoS, self.occDoS, self.occNoS
| 49.490909 | 158 | 0.590375 |
eb1a5b47bab8e9866d600307aa357686ca981bd4 | 1,149 | py | Python | twurl.py | ipopovych/apitter | cd95dffee38c37f279183591d9088ab3305ae7df | [
"MIT"
] | null | null | null | twurl.py | ipopovych/apitter | cd95dffee38c37f279183591d9088ab3305ae7df | [
"MIT"
] | null | null | null | twurl.py | ipopovych/apitter | cd95dffee38c37f279183591d9088ab3305ae7df | [
"MIT"
] | null | null | null | import urllib.request, urllib.parse, urllib.error
import oauth
import hidden
# https://apps.twitter.com/
# Create App and get the four strings, put them in hidden.py
def augment(url, parameters):
secrets = hidden.oauth()
consumer = oauth.OAuthConsumer(secrets['consumer_key'],
secrets['consumer_secret'])
token = oauth.OAuthToken(secrets['token_key'], secrets['token_secret'])
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
token=token, http_method='GET', http_url=url,
parameters=parameters)
oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(),
consumer, token)
return oauth_request.to_url()
def test_me():
print('* Calling Twitter...')
url = augment('https://api.twitter.com/1.1/statuses/user_timeline.json',
{'screen_name': 'drchuck', 'count': '2'})
print(url)
connection = urllib.request.urlopen(url)
data = connection.read()
print(data)
headers = dict(connection.getheaders())
print(headers)
| 35.90625 | 77 | 0.630983 |
ec92d718eccb4601e9b356b60150f309e28893b2 | 6,005 | py | Python | config/settings/production.py | Kenan7/labrin-task | ffc3f156cf0675733e1fb2b4bc4477da8a1a2d5a | [
"MIT"
] | null | null | null | config/settings/production.py | Kenan7/labrin-task | ffc3f156cf0675733e1fb2b4bc4477da8a1a2d5a | [
"MIT"
] | null | null | null | config/settings/production.py | Kenan7/labrin-task | ffc3f156cf0675733e1fb2b4bc4477da8a1a2d5a | [
"MIT"
] | null | null | null | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["example.com"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# https://github.com/jazzband/django-redis#memcached-exceptions-behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="My Awesome Project <noreply@example.com>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[My Awesome Project]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
# https://anymail.readthedocs.io/en/stable/esps
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
ANYMAIL = {}
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console", "mail_admins"],
"propagate": True,
},
},
}
# Your stuff...
# ------------------------------------------------------------------------------
| 39.506579 | 87 | 0.581515 |
1ace0b39fcef4c4d274fdba3ed0e5602afc4864c | 12,797 | py | Python | old/InterfaceTest.py | Faralaks/the-game | cd08f1f0222eee71916763a11f99ea631dbad578 | [
"MIT"
] | null | null | null | old/InterfaceTest.py | Faralaks/the-game | cd08f1f0222eee71916763a11f99ea631dbad578 | [
"MIT"
] | null | null | null | old/InterfaceTest.py | Faralaks/the-game | cd08f1f0222eee71916763a11f99ea631dbad578 | [
"MIT"
] | null | null | null | #UTF-8
# загрузка библиотек
import pygame, webbrowser
from pygame import *
from stoper import stoper # функция отвечающая за колизию объектов
# инициализация
pygame.init() # инициализация библиотеки
window = pygame.display.set_mode((1280, 720), pygame.FULLSCREEN) # создание окна
pygame.display.set_caption('Test') # измеенение названия окна
hero = pygame.image.load('data/pictures/hero.png') # загрузка изображений главного героя
map_name = 'data/maps/map' # путь к карте первого уровня
moving = [False, False, False, False,] # разрешения на движения
game_flag = True # флаг главгого игрового цикла
menu_flag = False # флаг главного меню
menu_back = pygame.image.load('data/pictures/menu_back.jpg') # задник главного меню
picture_menu_ramka = (pygame.image.load('data/pictures/ramka1.png'), pygame.image.load('data/pictures/ramka2.png'), pygame.image.load('data/pictures/ramka3.png')) # рамки главного меню
picture_music_button = (pygame.image.load('data/pictures/picture_sound_button_off.png'), pygame.image.load('data/pictures/picture_sound_button_on.png')) # переключатель музыки
picture_sound_button = (pygame.image.load('data/pictures/picture_sound_button_off.png'), pygame.image.load('data/pictures/picture_sound_button_on.png')) #переключаьедь звука
pictures_camera_mode_button = (pygame.image.load('data/pictures/pictures_camera_mode1_button.png'), pygame.image.load('data/pictures/pictures_camera_mode2_button.png'), pygame.image.load('data/pictures/pictures_camera_mode3_button.png'), pygame.image.load('data/pictures/pictures_camera_mode4_button.png')) # варианты режимов камеры
menu_button = 0 # нажатая кнопка в главном меню
hero_back = pygame.Surface((50, 50))
reload = True
ground = pygame.Surface((1380, 820))
fps_holder = pygame.time.Clock()
game_menu = (pygame.image.load('data/pictures/game_menu_s.png'),
pygame.image.load('data/pictures/game_menu_u.png'),
pygame.image.load('data/pictures/game_menu_i.png'),
pygame.image.load('data/pictures/game_menu_k.png'),
pygame.image.load('data/pictures/game_menu_z.png'))
# пока так
x_hero = 300
y_hero = 100
map_number = [0, 0]
ground.blit(pygame.image.load('data/maps/map0_0.jpg'), (50, 50))
sound_button = False
music_button = False
camera_mode = 0
# цикл главное меню
while menu_flag:
window.blit(menu_back, (0, 0)) # фон
# обработка событий
for event in pygame.event.get():
if event.type == QUIT:
menu_button = 2
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
menu_button = 2
# события мыши
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
# переключение рамок главного меню
#print(pygame.mouse.get_pos())
if pygame.mouse.get_pos()[0] >= 354 and pygame.mouse.get_pos()[0] <= 593 and pygame.mouse.get_pos()[1] >= 265 and pygame.mouse.get_pos()[1] <= 350:
menu_button = 0
elif pygame.mouse.get_pos()[0] >= 356 and pygame.mouse.get_pos()[0] <= 591 and pygame.mouse.get_pos()[1] >= 384 and pygame.mouse.get_pos()[1] <= 461:
menu_button = 1
elif pygame.mouse.get_pos()[0] >= 356 and pygame.mouse.get_pos()[0] <= 596 and pygame.mouse.get_pos()[1] >= 496 and pygame.mouse.get_pos()[1] <= 569:
menu_button = 2
# управление режимом камеры
elif menu_button == 1 and pygame.mouse.get_pos()[0] >= 877 and pygame.mouse.get_pos()[0] <= 896 and pygame.mouse.get_pos()[1] >= 318 and pygame.mouse.get_pos()[1] <= 337:
camera_mode = 0
elif menu_button == 1 and pygame.mouse.get_pos()[0] >= 897 and pygame.mouse.get_pos()[0] <= 916 and pygame.mouse.get_pos()[1] >= 318 and pygame.mouse.get_pos()[1] <= 337:
camera_mode = 1
elif menu_button == 1 and pygame.mouse.get_pos()[0] >= 917 and pygame.mouse.get_pos()[0] <= 936 and pygame.mouse.get_pos()[1] >= 318 and pygame.mouse.get_pos()[1] <= 337:
camera_mode = 2
elif menu_button == 1 and pygame.mouse.get_pos()[0] >= 937 and pygame.mouse.get_pos()[0] <= 957 and pygame.mouse.get_pos()[1] >= 318 and pygame.mouse.get_pos()[1] <= 337:
camera_mode = 3
# управление звуком и музыкой
elif menu_button == 1 and pygame.mouse.get_pos()[0] >= 910 and pygame.mouse.get_pos()[0] <= 930 and pygame.mouse.get_pos()[1] >= 350 and pygame.mouse.get_pos()[1] <= 370:
music_button = not music_button
elif menu_button == 1 and pygame.mouse.get_pos()[0] >= 910 and pygame.mouse.get_pos()[0] <= 930 and pygame.mouse.get_pos()[1] >= 386 and pygame.mouse.get_pos()[1] <= 406:
sound_button = not sound_button
# ссылки в соц сети
elif menu_button == 1 and pygame.mouse.get_pos()[0] >= 723 and pygame.mouse.get_pos()[0] <= 763 and pygame.mouse.get_pos()[1] >= 556 and pygame.mouse.get_pos()[1] <= 596:
webbrowser.open('https://vk.com/Faralaks')
elif menu_button == 1 and pygame.mouse.get_pos()[0] >= 773 and pygame.mouse.get_pos()[0] <= 828 and pygame.mouse.get_pos()[1] >= 556 and pygame.mouse.get_pos()[1] <= 596:
webbrowser.open('https://www.youtube.com/Faralaks')
elif menu_button == 1 and pygame.mouse.get_pos()[0] >= 840 and pygame.mouse.get_pos()[0] <= 880 and pygame.mouse.get_pos()[1] >= 556 and pygame.mouse.get_pos()[1] <= 596:
webbrowser.open('http://steamcommunity.com/id/Faralaks')
elif menu_button == 1 and pygame.mouse.get_pos()[0] >= 889 and pygame.mouse.get_pos()[0] <= 924 and pygame.mouse.get_pos()[1] >= 556 and pygame.mouse.get_pos()[1] <= 596:
webbrowser.open('https://www.microsoft.com/ru-ru/store/p/ЯндексМузыка/9nblggh0cb6d')
# подтверждение выхода
elif menu_button == 2 and pygame.mouse.get_pos()[0] >= 750 and pygame.mouse.get_pos()[0] <= 917 and pygame.mouse.get_pos()[1] >= 417 and pygame.mouse.get_pos()[1] <= 473:
menu_flag = False
game_flag = False
if menu_button == 0 and pygame.mouse.get_pos()[0] >= 684 and pygame.mouse.get_pos()[0] <= 964 and pygame.mouse.get_pos()[1] >= 260 and pygame.mouse.get_pos()[1] <= 587:
menu_button = 0
menu_flag = False
# прорисовка кадра главного меню
window.blit(picture_menu_ramka[menu_button], (666, 240))
if menu_button == 1:
window.blit(pictures_camera_mode_button[camera_mode], (877, 318))
window.blit(picture_music_button[int(music_button)], (910, 350))
window.blit(picture_sound_button[int(sound_button)], (910, 386))
display.update()
# главный цикл режима камеры 1
while game_flag:
# обработка событий
for event in pygame.event.get():
if event.type == QUIT:
game_flag = False
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
game_flag = False
menu_button = 0
menu_reload = True
if event.key == K_SPACE:
menu_flag = True
menu_reload = True
if event.key == K_q: menu_flag = True; menu_reload = True; menu_button = 4
if event.key == K_m: menu_flag = True; menu_reload = True; menu_button = 3
if event.key == K_e: menu_flag = True; menu_reload = True; menu_button = 2
if event.key == K_r: menu_flag = True; menu_reload = True; menu_button = 1
if event.key == K_LEFT or event.key == K_a: moving[0] = True
if event.key == K_RIGHT or event.key == K_d: moving[1] = True
if event.key == K_UP or event.key == K_w: moving[2] = True
if event.key == K_DOWN or event.key == K_s: moving[3] = True
if event.type == KEYUP:
if event.key == K_LEFT or event.key == K_a: moving[0] = False
if event.key == K_RIGHT or event.key == K_d: moving[1] = False
if event.key == K_UP or event.key == K_w: moving[2] = False
if event.key == K_DOWN or event.key == K_s: moving[3] = False
# если выполнен переход в другую область карты, то задний план меняается
if reload == True:
window.blit(ground, (-50, -50))
reload = False
x_old = x_hero; y_old = y_hero # сохранение прежних координат
# движение при удержпнии клавиш
if moving[0] == True and stoper(map_number, x_hero, y_hero, 3): x_hero -= 3
if moving[1] == True and stoper(map_number, x_hero, y_hero, 2): x_hero += 3
if moving[3] == True and stoper(map_number, x_hero, y_hero, 0): y_hero += 3
if moving[2] == True and stoper(map_number, x_hero, y_hero, 1): y_hero -= 3
# переход в другую область карты
if x_hero < -25:
map_number[0] -= 1
x_hero = 1255
ground.blit(pygame.image.load(map_name + str(map_number[0]) + '_' + str(map_number[1]) + '.jpg'), (50, 50))
reload = True
elif x_hero > 1255:
map_number[0] += 1
x_hero = -25
ground.blit(pygame.image.load(map_name + str(map_number[0]) + '_' + str(map_number[1]) + '.jpg'), (50, 50))
reload = True
elif y_hero < -25:
map_number[1] -= 1
y_hero = 690
ground.blit(pygame.image.load(map_name + str(map_number[0]) + '_' + str(map_number[1]) + '.jpg'), (50, 50))
reload = True
elif y_hero > 690:
map_number[1] += 1
y_hero = -25
ground.blit(pygame.image.load(map_name + str(map_number[0]) + '_' + str(map_number[1]) + '.jpg'), (50, 50))
reload = True
# цикл внутриигрового меню
while menu_flag:
if menu_reload == True:
reload = True
window.blit(ground, (-50, -50))
window.blit(game_menu[menu_button], (0, 0))
reload = True
for event in pygame.event.get():
if event.type == QUIT:
game_flag = False
menu_flag = False
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
menu_flag = False
if event.key == K_SPACE:
menu_flag = False
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
#print(pygame.mouse.get_pos())
if pygame.mouse.get_pos()[0] >= 174 and pygame.mouse.get_pos()[0] <= 329 and pygame.mouse.get_pos()[1] >= 102 and pygame.mouse.get_pos()[1] <= 131:
menu_button = 0
elif pygame.mouse.get_pos()[0] >= 336 and pygame.mouse.get_pos()[0] <= 481 and pygame.mouse.get_pos()[1] >= 102 and pygame.mouse.get_pos()[1] <= 131:
menu_button = 1
elif pygame.mouse.get_pos()[0] >= 489 and pygame.mouse.get_pos()[0] <= 632 and pygame.mouse.get_pos()[1] >= 102 and pygame.mouse.get_pos()[1] <= 131:
menu_button = 2
elif pygame.mouse.get_pos()[0] >= 643 and pygame.mouse.get_pos()[0] <= 782 and pygame.mouse.get_pos()[1] >= 102 and pygame.mouse.get_pos()[1] <= 131:
menu_button = 3
elif pygame.mouse.get_pos()[0] >= 793 and pygame.mouse.get_pos()[0] <= 936 and pygame.mouse.get_pos()[1] >= 102 and pygame.mouse.get_pos()[1] <= 131:
menu_button = 4
elif pygame.mouse.get_pos()[0] >= 947 and pygame.mouse.get_pos()[0] <= 1096 and pygame.mouse.get_pos()[1] >= 102 and pygame.mouse.get_pos()[1] <= 131:
game_flag = False
menu_flag = False
fps_holder.tick(60) # контроль частоты кадров (60 кадров в секунду)
display.update()
# формирование кадра
# замена пикселей в след за героем
# создание массивов с пикселями
pix = pygame.PixelArray(ground)
pix_new = pygame.PixelArray(hero_back)
# замена пикселей
pix_new[:,:] = pix[x_old + 50:x_old + 100, y_old + 50 : y_old + 100]
# применение изменений
del pix, pix_new
# прорисовка
window.blit(hero_back, (x_old, y_old))
window.blit(hero, (x_hero, y_hero))
display.update()
fps_holder.tick(60) # контроль частоты кадров (60 кадров в секунду)
pygame.quit()
| 45.379433 | 332 | 0.587481 |
501cb577e52d38eb6c01febb1ecc23b54c636e40 | 1,501 | py | Python | tools/json_to_struct/struct_generator.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 231 | 2015-01-08T09:04:44.000Z | 2021-12-30T03:03:10.000Z | tools/json_to_struct/struct_generator.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2018-02-10T21:00:08.000Z | 2018-03-20T05:09:50.000Z | tools/json_to_struct/struct_generator.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 268 | 2015-01-21T05:53:28.000Z | 2022-03-25T22:09:01.000Z | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def _GenerateArrayField(field_info):
"""Generate a string defining an array field in a C structure.
"""
contents = field_info['contents']
contents['field'] = '* ' + field_info['field']
if contents['type'] == 'array':
raise RuntimeError('Nested arrays are not supported.')
return (GenerateField(contents) + ';\n' +
' const size_t %s_size') % field_info['field'];
def GenerateField(field_info):
"""Generate a string defining a field of the type specified by
field_info['type'] in a C structure.
"""
field = field_info['field']
type = field_info['type']
if type == 'int':
return 'const int %s' % field
elif type == 'string':
return 'const char* const %s' % field
elif type == 'string16':
return 'const wchar_t* const %s' % field
elif type == 'enum':
return 'const %s %s' % (field_info['ctype'], field)
elif type == 'array':
return _GenerateArrayField(field_info)
else:
raise RuntimeError('Unknown field type "%s"' % type)
def GenerateStruct(type_name, schema):
"""Generate a string defining a structure containing the fields specified in
the schema list.
"""
lines = [];
lines.append('struct %s {' % type_name)
for field_info in schema:
lines.append(' ' + GenerateField(field_info) + ';')
lines.append('};');
return '\n'.join(lines) + '\n';
| 34.113636 | 78 | 0.662891 |
528340496580768a90831780d7fcf4d19adb5354 | 29,579 | py | Python | ant_tracker/labeler/classes.py | ChortJulio/AntTracker | a89fb7fedbb7a8f7d12b024387f6330180fa010f | [
"MIT"
] | null | null | null | ant_tracker/labeler/classes.py | ChortJulio/AntTracker | a89fb7fedbb7a8f7d12b024387f6330180fa010f | [
"MIT"
] | null | null | null | ant_tracker/labeler/classes.py | ChortJulio/AntTracker | a89fb7fedbb7a8f7d12b024387f6330180fa010f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from enum import Enum
import cv2 as cv
import itertools
import numpy as np
import ujson
from packaging.version import Version
from pathlib import Path
from typing import ClassVar, List, NoReturn, Tuple, Dict, Union, Optional, NewType, TypedDict
from ..tracker.ant_labeler_info import groupSequence
from ..tracker.blob import Blob
from ..tracker.common import Position, to_json, to_tuple
from ..tracker.info import TracksInfo
from ..tracker.kellycolors import KellyColors
from ..tracker.parameters import SegmenterParameters, TrackerParameters
from ..tracker.track import Loaded, Track, TrackId
CollectionVersion = Version("2.1")
Color = Tuple[int, int, int]
BinaryMask = NewType("BinaryMask", np.ndarray)
ColoredMask = NewType("ColoredMask", np.ndarray)
ColoredMaskWithUnlabel = NewType("ColoredMaskWithUnlabel", np.ndarray)
Vector = np.ndarray
FrameAndVelocity = Tuple[int, Vector]
class AreaInFrame:
def __init__(self, frame: int, mask: BinaryMask):
self.frame = frame
self.area = (np.nonzero(mask))
self.area = (self.area[0].tolist(), self.area[1].tolist())
self.shape = mask.shape
def encode(self) -> Dict:
return {"frame": self.frame, "area": self.area}
def getMask(self) -> BinaryMask:
mask = np.zeros(self.shape, dtype='uint8')
mask[self.area] = 1
return BinaryMask(mask)
@staticmethod
def decode(area: Dict, shape) -> "AreaInFrame":
areaInFrame = AreaInFrame(-1, BinaryMask(np.ndarray((0, 0))))
areaInFrame.frame = area["frame"]
areaInFrame.area = area["area"]
areaInFrame.shape = shape
return areaInFrame
class AreasByFrame:
def __init__(self):
self.areas_per_frame: List[AreaInFrame] = []
def getArea(self, frame) -> Union[Tuple[int, AreaInFrame], Tuple[None, None]]:
which = [(index, areaInFrame) for index, areaInFrame
in enumerate(self.areas_per_frame)
if areaInFrame.frame == frame]
if len(which) == 1:
return which[0][0], which[0][1]
elif len(which) == 0:
return None, None
else:
raise ValueError("More than one area in frame %d" % frame)
def updateArea(self, frame: int, mask: BinaryMask):
index, areaInFrame = self.getArea(frame)
if not np.any(mask):
if index is not None:
self.areas_per_frame.pop(index)
elif index is not None:
self.areas_per_frame[index] = AreaInFrame(frame, mask)
else:
self.areas_per_frame.append(AreaInFrame(frame, mask))
def encode(self):
return [areaInFrame.encode() for areaInFrame in self.areas_per_frame]
@staticmethod
def decode(areas_as_list, shape) -> "AreasByFrame":
areasByFrame = AreasByFrame()
for area_as_dict in areas_as_list:
areasByFrame.areas_per_frame.append(
AreaInFrame.decode(area_as_dict, shape)
)
return areasByFrame
def epsilon(shape):
size = shape[0] * shape[1]
if size < 350000:
return 0.01
if size < 650000:
return 0.03
else:
return 0.08
def get_contour(mask: BinaryMask):
c, _ = cv.findContours(mask.astype('uint8'), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_TC89_L1)
if not c: return []
a = [cv.contourArea(cnt) for cnt in c]
maxidx = a.index(max(a))
contour = np.array(cv.approxPolyDP(c[maxidx], epsilon(mask.shape), True))
contour = np.flip(contour.reshape((contour.shape[0], 2)), axis=1)
return contour
def get_mask(contour, shape):
mask = np.zeros(shape)
if not np.any(contour): return mask
pts = np.flip(contour, axis=1).reshape((-1, 1, 2))
return cv.fillPoly(mask, [pts], 255).astype(bool)
class Ant:
def __init__(self, _id: int):
self.id = _id
self.color = KellyColors.get(_id)
# self.icon = ColorIcon(*self.color)
self.loaded = False
self.areasByFrame = AreasByFrame()
def __repr__(self):
ret = "Ant - Id: " + str(self.id) + "; Color: " + str(self.color)
if self.loaded:
ret += "; IsLoaded"
return ret
def updateArea(self, frame, mask):
self.areasByFrame.updateArea(frame, mask)
def getArea(self, frame):
return self.areasByFrame.getArea(frame)[1]
def isInFrame(self, frame):
return self.areasByFrame.getArea(frame) != (None, None)
def getMasksToUnlabel(self):
areaInFrame: AreaInFrame
frames_and_masks = [(areaInFrame.frame, areaInFrame.getMask())
for areaInFrame in self.areasByFrame.areas_per_frame]
# print(str(frames_and_masks))
return frames_and_masks
def getMask(self, frame) -> Optional[BinaryMask]:
areaInFrame: AreaInFrame
_, areaInFrame = self.areasByFrame.getArea(frame)
if not areaInFrame:
return None
else:
return areaInFrame.getMask()
def getInvolvedFrames(self) -> List[int]:
areaInFrame: AreaInFrame
return [areaInFrame.frame for areaInFrame in self.areasByFrame.areas_per_frame]
def getLastFrame(self):
return max(self.getInvolvedFrames(), default=0)
def getGroupsOfFrames(self) -> List[Tuple[int, int]]:
return groupSequence(self.getInvolvedFrames())
def as_track(self):
i = self.id
blobs = dict()
areas = self.areasByFrame.areas_per_frame
for area_in_frame in areas:
frame, mask = area_in_frame.frame, area_in_frame.getMask()
blob = Blob(imshape=mask.shape, contour=get_contour(mask))
blobs[frame] = blob
# noinspection PyTypeChecker
blobs = dict(sorted(blobs.items()))
return Track(TrackId(i - 1), blobs, force_load_to=self.loaded)
@staticmethod
def from_track(track: Track, shape: Tuple[int, int]):
self = Ant(track.id + 1)
self.loaded = Loaded.to_bool(track.loaded)
self.areasByFrame = AreasByFrame()
for frame, blob in track.blobs.items():
mask = get_mask(blob.contour, shape)
self.areasByFrame.updateArea(frame, mask)
return self
def encode(self):
return dict({
"id": self.id,
"loaded": self.loaded,
"areasByFrame": self.areasByFrame.encode()
})
@staticmethod
def decode(ant_as_dict, shape) -> "Ant":
ant = Ant(-1)
ant.id = ant_as_dict["id"]
ant.loaded = ant_as_dict["loaded"]
ant.areasByFrame = AreasByFrame.decode(ant_as_dict["areasByFrame"], shape)
return ant
class UnlabeledFrame:
def __init__(self, frame: Optional[int] = None, mask=None, _l=None, _i=None, _v=None, _f=None, contours=None):
if frame is None:
if _l is not None:
self.frame = _f
self.length = _l
self.indices = _i
self.values = _v
return
else:
raise TypeError("Frame & Mask || Frame & contours || setters")
elif mask is not None:
contours, _ = cv.findContours(mask.astype('uint8'), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_TC89_L1)
contours = [cv.approxPolyDP(c, epsilon(mask.shape), True) for c in contours]
contours = [c.reshape(c.shape[0], 2) for c in contours]
self.contours = contours
elif contours is not None:
self.contours = [np.array(c) for c in contours]
else:
raise TypeError("Frame & Mask || Frame & contours || setters")
self.frame = frame
#
# packed_mask = np.packbits(mask,axis=None)
# self.length = len(packed_mask)
# self.indices = np.nonzero(packed_mask)
# self.indices = (self.indices[0].tolist())
# self.values = packed_mask[self.indices].tolist()
def __repr__(self):
return f"Frame: {self.frame}, {len(self.contours)} unlabeled contours"
def getMask(self, shape):
mask = cv.fillPoly(np.zeros(shape), self.contours, 255)
return BinaryMask(mask.astype(bool))
class Serial(TypedDict):
frame: int
contours: List[List[Position]]
class OldSerial(TypedDict):
frame: int
length: int
indices: List[int]
values: List[int]
def encode(self) -> 'UnlabeledFrame.Serial':
d = {
"frame": self.frame,
"contours": [[
to_tuple(point) for point in contour
] for contour in self.contours],
}
return d
@staticmethod
def decode(unlabeled_as_dict: Union['UnlabeledFrame.OldSerial', 'UnlabeledFrame.Serial'], shape=None,
size=None) -> "UnlabeledFrame":
if 'values' in unlabeled_as_dict:
# OldSerial
def old_getMask(uf, _shape, _size) -> BinaryMask:
"""Get a binary mask with ones on segmented pixels"""
packed_mask = np.zeros(uf.length, dtype='uint8')
packed_mask[uf.indices] = uf.values
mask = np.unpackbits(packed_mask, axis=None)[:_size].reshape(_shape)
return BinaryMask(mask)
u = UnlabeledFrame(
_l=unlabeled_as_dict["length"],
_i=unlabeled_as_dict["indices"],
_v=unlabeled_as_dict["values"],
_f=unlabeled_as_dict["frame"],
)
contours, _ = cv.findContours(old_getMask(u, shape, size).astype('uint8'), cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_TC89_L1)
contours = [cv.approxPolyDP(c, epsilon(shape), True) for c in contours]
contours = [c.reshape(c.shape[0], 2) for c in contours]
u.contours = contours
else:
u = UnlabeledFrame(frame=unlabeled_as_dict['frame'], contours=unlabeled_as_dict['contours'])
return u
def get_track(tracks: List[Track], ant_id):
return [track for track in tracks if track.id == ant_id - 1][0]
class AntCollection:
def __init__(self, anymask: Optional[np.ndarray] = None, video_length=None, info=None):
self.ants: List[Ant] = []
self.id_iter = itertools.count(start=1)
self.videoSize = anymask.size if anymask is not None else 0
self.videoShape = anymask.astype('uint8').shape if anymask is not None else (0, 0)
if video_length is not None:
self.videoLength = video_length
self.getUnlabeledMask = self.__getUnlabeledMaskClosure(self.videoShape)
self.info: LabelingInfo = info
self.version = CollectionVersion
@staticmethod
def __getUnlabeledMaskClosure(shape):
def getMask(unl: UnlabeledFrame) -> BinaryMask:
return unl.getMask(shape)
return getMask
def newAnt(self) -> Ant:
_id = next(self.id_iter)
ant = Ant(_id)
self.ants.append(ant)
track = Track(TrackId(_id - 1), {})
self.info.tracks.append(track)
return ant
def getAnt(self, ant_id) -> Optional[Ant]:
which = [ant for ant in self.ants if ant.id == ant_id]
if len(which) == 1:
return which[0]
elif len(which) == 0:
return None
else:
raise ValueError("More than one ant with id %d" % ant_id)
def deleteAnt(self, ant_id):
# Desetiquetar todas las áreas
dead_ant = self.getAnt(ant_id)
print("deleteAnt: dead_ant:", str(dead_ant))
if dead_ant is None:
raise ValueError("Trying to delete a nonexistent ant with id %d" % ant_id)
else:
for frame, mask in dead_ant.getMasksToUnlabel():
print("deleteAnt: frame:", str(frame))
self.updateUnlabeledFrame(frame, mask)
self.ants.remove(dead_ant)
dead_track = get_track(self.info.tracks, ant_id)
self.info.tracks.remove(dead_track)
def update_load(self, ant_id, loaded: bool):
self.getAnt(ant_id).loaded = loaded
get_track(self.info.tracks, ant_id)._Track__loaded = Loaded.parse(loaded)
def getUnlabeledFrameGroups(self):
unl = []
for frame in self.info.unlabeledFrames:
if len(frame.contours) > 0:
unl.append(frame.frame)
return groupSequence(unl), len(unl)
def serialize(self) -> NoReturn:
raise DeprecationWarning("Do not serialize as collection! Create a LabelingInfo instance instead")
# return to_json({
# "ants": [ant.encode() for ant in self.ants],
# "unlabeledFrames": [uF.encode() for uF in self.unlabeledFrames],
# "videoSize": self.videoSize,
# "videoShape": self.videoShape,
# "version": str(CollectionVersion)
# })
@staticmethod
def deserialize(video_path, jsonstring=None, filename=None) -> "AntCollection":
if filename is not None:
with open(filename, 'r') as file:
antDict = ujson.load(file)
elif jsonstring is not None:
antDict = ujson.loads(jsonstring)
else:
raise TypeError("Provide either JSON string or filename.")
if 'labeler_version' in antDict and Version(antDict['labeler_version']) >= Version("2"):
info = LabelingInfo.deserialize(jsonstring=jsonstring, filename=filename)
antCollection = AntCollection.from_info(info)
else:
antCollection = AntCollection(np.zeros(antDict["videoShape"], dtype="uint8"))
for ant in antDict["ants"]:
antCollection.ants.append(Ant.decode(ant, antCollection.videoShape))
antCollection.id_iter = itertools.count(start=antCollection.getLastId() + 1)
antCollection.getUnlabeledMask = \
antCollection.__getUnlabeledMaskClosure(antCollection.videoShape)
if "version" in antDict:
antCollection.version = Version(antDict["version"])
else:
antCollection.version = Version("1")
antCollection.info = LabelingInfo(
video_path=video_path,
ants=antCollection.ants,
unlabeled_frames=[
UnlabeledFrame.decode(uF, antCollection.videoShape, antCollection.videoSize)
for uF in antDict["unlabeledFrames"]
],
)
return antCollection
def updateAreas(self, frame: int, colored_mask: ColoredMaskWithUnlabel):
for ant in self.ants:
contour = get_contour(colored_mask == ant.id)
mask = get_mask(contour, self.videoShape)
ant.updateArea(frame, mask)
has_blob_in_frame = np.any(mask)
track = get_track(self.info.tracks, ant.id)
if has_blob_in_frame:
track.blobs[frame] = Blob(imshape=mask.shape, contour=contour)
elif track.at(frame) is not None:
track.blobs.pop(frame)
# Marcar áreas como etiquetadas
index, unlabeled = self.getUnlabeled(frame)
if index is not None:
unlabeled_mask = self.getUnlabeledMask(unlabeled)
# Quedan sólo las que no tienen etiqueta y que falten etiquetar
unlabeled_mask = np.logical_and(colored_mask == -1, unlabeled_mask)
if np.any(unlabeled_mask):
self.overwriteUnlabeledFrame(frame, unlabeled_mask)
else:
self.deleteUnlabeledFrame(frame)
def addUnlabeledFrame(self, frame: int, mask: BinaryMask):
if np.any(mask):
uf = UnlabeledFrame(frame, mask)
self.info.unlabeledFrames.append(uf)
def deleteUnlabeledFrame(self, frame: int):
index, unlabeled = self.getUnlabeled(frame)
if index is not None:
self.info.unlabeledFrames.remove(unlabeled)
def overwriteUnlabeledFrame(self, frame: int, mask: BinaryMask):
index, unlabeled = self.getUnlabeled(frame)
if index is not None:
self.deleteUnlabeledFrame(frame)
self.addUnlabeledFrame(frame, mask)
def updateUnlabeledFrame(self, frame: int, new_mask: BinaryMask):
index, unlabeled_packed = self.getUnlabeled(frame)
if index is None:
self.addUnlabeledFrame(frame, new_mask)
else:
unlabeled_mask = self.getUnlabeledMask(unlabeled_packed)
unlabeled_mask = np.logical_or(unlabeled_mask, new_mask).astype('uint8')
self.overwriteUnlabeledFrame(frame, unlabeled_mask)
def getUnlabeled(self, frame) -> Union[Tuple[int, UnlabeledFrame], Tuple[None, None]]:
"""Returns the `frame`th packed frame of unlabeled regions and its index in the list"""
which = [(index, unlabeledFrame) for index, unlabeledFrame
in enumerate(self.info.unlabeledFrames)
if unlabeledFrame.frame == frame]
if len(which) == 1:
return which[0][0], which[0][1]
elif len(which) == 0:
return None, None
else:
raise ValueError("More than one packed mask in frame %d" % frame)
def getMask(self, frame) -> ColoredMaskWithUnlabel:
mask = np.zeros(self.videoShape).astype('int16')
area: AreaInFrame
ant: Ant
for (ant_id, area) in ((ant.id, ant.getArea(frame)) for ant in self.ants if ant.isInFrame(frame)):
antmask = area.getMask().astype(bool)
mask[antmask] = (antmask.astype('int16') * ant_id)[antmask]
_, unlabeledFrame = self.getUnlabeled(frame)
if unlabeledFrame is not None:
ulmask = unlabeledFrame.getMask(self.videoShape)
mask[ulmask] = (unlabeledFrame.getMask(self.videoShape).astype('int16') * (-1))[ulmask]
return ColoredMaskWithUnlabel(mask)
def cleanUnlabeledAndAntOverlaps(self, frame: int):
index, unlabeledFrame = self.getUnlabeled(frame)
if index is not None:
unlmask = unlabeledFrame.getMask(self.videoShape).astype('bool')
for ant in self.ants:
if ant.isInFrame(frame):
antmask = ant.getMask(frame).astype('bool')
unlmask: BinaryMask = np.logical_and(unlmask, ~antmask)
self.overwriteUnlabeledFrame(frame, unlmask)
def cleanErrorsInFrame(self, frame, for_specific_ant: Ant = None):
_, unlabeledFrame = self.getUnlabeled(frame)
if unlabeledFrame is None:
mask = np.zeros(self.videoShape).astype('int16')
else:
mask = unlabeledFrame.getMask(self.videoShape).astype('int16') * (-1)
# print("cleaning frame ", frame)
if for_specific_ant is not None:
for ant in self.ants:
if ant.isInFrame(frame):
mask = mask + ant.getMask(frame).astype('int16') * ant.id
alreadyPainted: BinaryMask = mask != 0
aboutToPaint = for_specific_ant.getMask(frame)
overlap: BinaryMask = np.logical_and(alreadyPainted, aboutToPaint)
if np.any(overlap):
for_specific_ant.updateArea(frame, np.zeros(self.videoShape))
else:
for ant in self.ants:
if ant.isInFrame(frame):
# print("- cleaning ant ", ant.id)
alreadyPainted = mask != 0
aboutToPaint = ant.getMask(frame)
overlap: BinaryMask = np.logical_and(alreadyPainted, aboutToPaint)
if np.any(overlap):
ant.updateArea(frame, np.zeros(self.videoShape))
else:
mask = mask + ant.getMask(frame).astype('int16') * ant.id
def cleanErrors(self, number_of_frames, for_specific_ant: Ant = None, from_this_frame=0):
for frame in range(from_this_frame, number_of_frames):
self.cleanErrorsInFrame(frame, for_specific_ant)
def labelFollowingFrames(self, current_frame, ant_id, tracking_radius=160, conflict_radius=60):
def centroids_no_background(mask):
_, _, _, cents = cv.connectedComponentsWithStats(mask.astype('uint8'))
return cents[1:]
def closest_two_nodes(node, nodes):
nodes = np.asarray(nodes)
dist_2 = np.sum((nodes - node) ** 2, axis=1)
index = dist_2.argsort()
return nodes[index[:2]], dist_2[index[:2]]
# Ordenar todos los frames que quedan después del actual
# Por las dudas, en teoría ya deberían estar ordenados
unlabeledFutureFrames = sorted(
[uframe for uframe in self.info.unlabeledFrames
if uframe.frame > current_frame],
key=lambda uframe: uframe.frame)
# Estamos en el último frame/no quedan más frames sin etiquetar hacia adelante:
if not unlabeledFutureFrames:
return
ant = self.getAnt(ant_id)
# track = get_track(self.info.tracks, ant_id)
# TODO: maybe make it so you can retag tagged regions (not that essential)
last_frame = unlabeledFutureFrames[0].frame - 1
last_mask = ant.getMask(current_frame)
if last_mask is None:
raise ValueError("El frame del que se quiere rellenar no tiene una hormiga ya etiquetada")
# last_mask = np.zeros_like(self.getUnlabeledMask(unlabeledFutureFrames[0]),dtype='uint8')
for uFrame in unlabeledFutureFrames:
unlabel_mask = self.getUnlabeledMask(uFrame)
frame = uFrame.frame
print("Frame: ", frame)
if frame != last_frame + 1:
print("Hubo un salto, no hay chances de ver overlap")
break
colored_mask = self.getMask(frame)
if np.any(colored_mask == ant_id):
print("En este frame ya hay una hormiga etiquetada con ese id")
break
last_centroid = centroids_no_background(last_mask)
if len(last_centroid) != 1:
# FIXME: En realidad esto sí puede suceder,
# si el usuario trata de rellenar en un frame donde ya pintó con ese id
# Probablemente lo mejor sea largar QDialog de error avisando que
# está intentando hacer algo indebido
raise ValueError("En la máscara anterior debería haber un solo centroide")
last_centroid = last_centroid[0]
centroids = centroids_no_background(unlabel_mask)
if len(centroids) == 0:
print("Nos quedamos sin hormigas")
break
elif len(centroids) == 1:
print("Hay una sola hormiga, es probablemente la que buscamos...")
dist = np.sum((centroids[0] - last_centroid) ** 2, axis=0)
if dist > tracking_radius:
print("Está muy lejos, probablemente sea una que recién aparece en otro lado")
print("(o bien la hormiga es muy rápida...)")
break
else:
x, y = np.int(centroids[0][0]), np.int(centroids[0][1]) # noqa
if colored_mask[y, x] == -1:
print("Floodfill(centroids[0])")
upcasted_mask = colored_mask.astype('int32')
cv.floodFill(image=upcasted_mask,
mask=None,
seedPoint=(x, y),
newVal=ant_id,
loDiff=0,
upDiff=0)
colored_mask = upcasted_mask.astype('int16').copy()
self.updateAreas(frame, colored_mask)
else:
print("El centroide del área anterior no cae en un área etiquetable")
break
else:
print("Más de una hormiga, busquemos las más cercanas")
closest, dist = closest_two_nodes(last_centroid, centroids)
if dist[1] < conflict_radius:
print("Dos hormigas muy cerca de la anterior, cortemos")
break
elif dist[0] > tracking_radius:
print("Está muy lejos, probablemente la hormiga que seguíamos se fue de cámara")
break
else:
x, y = np.int(closest[0][0]), np.int(closest[0][1]) # noqa
if colored_mask[y, x] == -1:
print("Floodfill(centroids[0])")
upcasted_mask = colored_mask.astype('int32')
cv.floodFill(image=upcasted_mask,
mask=None,
seedPoint=(x, y),
newVal=ant_id,
loDiff=0,
upDiff=0)
colored_mask = upcasted_mask.astype('int16').copy()
self.updateAreas(frame, colored_mask)
else:
print("El centroide del área anterior no cae en un área etiquetable")
break
# Unos en la parte recién filleada
last_mask = (colored_mask == ant_id).astype('uint8')
last_frame = frame
# self.cleanErrorsInFrame(frame,ant)
return
def getLastLabeledFrame(self):
return max((ant.getLastFrame() for ant in self.ants), default=0)
def getLastFrame(self):
lastAntFrame = self.getLastLabeledFrame()
lastUnlabeledFrame = max((unlabeledFrame.frame for unlabeledFrame in self.info.unlabeledFrames), default=0)
return max(lastAntFrame, lastUnlabeledFrame)
def getLastId(self):
if len(self.ants) == 0:
return 0
else:
return max([ant.id for ant in self.ants])
def ants_as_tracks(self):
return [ant.as_track() for ant in self.ants]
@staticmethod
def from_info(info: 'LabelingInfo'):
self = AntCollection(np.zeros(info.video_shape, dtype="uint8"), video_length=info.video_length, info=info)
self.ants = [Ant.from_track(track, info.video_shape) for track in info.tracks]
self.id_iter = itertools.count(start=self.getLastId() + 1)
self.getUnlabeledMask = self.__getUnlabeledMaskClosure(self.videoShape)
self.version = info.labeler_version
return self
class SerializableEnum(str, Enum):
def _generate_next_value_(self, start, count, last_values):
return self
def first(iterable, condition=lambda x: True):
"""
Returns the first element that satisfies `condition`. \n
Returns `None` if not found.
"""
return next((x for x in iterable if condition(x)), None)
# noinspection DuplicatedCode
@dataclass
class LabelingInfo(TracksInfo):
unlabeledFrames: List[UnlabeledFrame] = field(init=False)
labeler_version: Version = field(init=False)
file_extension: ClassVar = '.tag'
def __init__(self, video_path, ants: List[Ant], unlabeled_frames: List[UnlabeledFrame]):
super(LabelingInfo, self).__init__(
video_path=video_path,
tracks=sorted([ant.as_track() for ant in ants], key=lambda t: t.id),
segmenter_version=Version("0"),
segmenter_parameters=SegmenterParameters.mock(),
tracker_version=Version("0"),
tracker_parameters=TrackerParameters.mock(),
)
self.unlabeledFrames: List[UnlabeledFrame] = [uf for uf in unlabeled_frames if uf.contours]
self.labeler_version = CollectionVersion
class Serial(TracksInfo.Serial):
unlabeled_frames: List[UnlabeledFrame.Serial]
labeler_version: str
def encode(self) -> 'LabelingInfo.Serial':
return { # noqa
**super(LabelingInfo, self).encode(),
'unlabeled_frames': [uf.encode() for uf in self.unlabeledFrames],
'labeler_version': str(self.labeler_version),
}
@classmethod
def decode(cls, info: 'LabelingInfo.Serial'):
labeler_version = Version(info.get('labeler_version', "1.0"))
if labeler_version < Version("2.1"):
info['tracks'] = _flip_contours_before_2_1(info['tracks'])
self = super(LabelingInfo, cls).decode(info)
self.labeler_version = labeler_version
size = self.video_shape[0] * self.video_shape[1]
ufs = [UnlabeledFrame.decode(uf, self.video_shape, size) for uf in info['unlabeled_frames']]
self.unlabeledFrames = [uf for uf in ufs if uf.contours]
return self
def serialize(self, pretty=False) -> str:
if pretty: return to_json(self.encode())
return ujson.dumps(self.encode())
def save(self, file: Union[Path, str], pretty=False):
if not isinstance(file, Path): # noqa
file = Path(file)
if not self._is_extension_valid(file):
raise ValueError(f'Wrong extension ({file.suffix}). Only {self.file_extension} files are valid.')
with file.open('w') as f:
f.write(self.serialize(pretty=pretty))
def _flip_contours_before_2_1(tracks: List[Track.Serial]):
for track in tracks:
for blob in track['blobs'].values():
blob['contour'] = [Position(p[1], p[0]) for p in blob['contour']]
return tracks
| 41.427171 | 115 | 0.600629 |
4e861329630c4a7948bccf329c5d4294e468ab3f | 991 | py | Python | backend-project/small_eod/events/migrations/0001_initial.py | WlodzimierzKorza/small_eod | 027022bd71122a949a2787d0fb86518df80e48cd | [
"MIT"
] | 64 | 2019-12-30T11:24:03.000Z | 2021-06-24T01:04:56.000Z | backend-project/small_eod/events/migrations/0001_initial.py | WlodzimierzKorza/small_eod | 027022bd71122a949a2787d0fb86518df80e48cd | [
"MIT"
] | 465 | 2018-06-13T21:43:43.000Z | 2022-01-04T23:33:56.000Z | backend-project/small_eod/events/migrations/0001_initial.py | WlodzimierzKorza/small_eod | 027022bd71122a949a2787d0fb86518df80e48cd | [
"MIT"
] | 72 | 2018-12-02T19:47:03.000Z | 2022-01-04T22:54:49.000Z | # Generated by Django 3.0.1 on 2020-01-09 13:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cases', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('modified_on', models.DateTimeField(auto_now=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=256)),
('data', models.DateTimeField()),
('comment', models.CharField(max_length=256)),
('case', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='cases.Case')),
],
options={
'abstract': False,
},
),
]
| 30.96875 | 114 | 0.565086 |
6bbb285f02d3581199534d3b9cdca118bcb0dfdf | 2,007 | py | Python | api/calendars/migrations/0001_initial.py | anjaekk/CRM-internship- | 94eab9401a7336ebbb11046a77c59b1d07e2bf68 | [
"MIT"
] | 1 | 2021-09-10T09:11:08.000Z | 2021-09-10T09:11:08.000Z | api/calendars/migrations/0001_initial.py | anjaekk/CRM-site-project | 94eab9401a7336ebbb11046a77c59b1d07e2bf68 | [
"MIT"
] | null | null | null | api/calendars/migrations/0001_initial.py | anjaekk/CRM-site-project | 94eab9401a7336ebbb11046a77c59b1d07e2bf68 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-09-13 06:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('companies', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(max_length=300)),
('content', models.TextField()),
('schedule_date', models.DateTimeField()),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='companies.company')),
('contact', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='companies.contact')),
],
options={
'db_table': 'schedules',
},
),
migrations.CreateModel(
name='UserSchedule',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('schedule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='calendars.schedule')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'users_schedules',
},
),
migrations.AddField(
model_name='schedule',
name='user',
field=models.ManyToManyField(through='calendars.UserSchedule', to=settings.AUTH_USER_MODEL),
),
]
| 39.352941 | 130 | 0.594918 |
26d738f0628eede79ffbeea1d4a71e1bf46da8a7 | 1,935 | py | Python | setup.py | yutonogami/cta-lstchain | d234471e3da25f7408571628b2cf5bfe2e8331b3 | [
"BSD-3-Clause"
] | null | null | null | setup.py | yutonogami/cta-lstchain | d234471e3da25f7408571628b2cf5bfe2e8331b3 | [
"BSD-3-Clause"
] | 1 | 2020-09-29T22:33:53.000Z | 2020-09-29T22:33:53.000Z | setup.py | yutonogami/cta-lstchain | d234471e3da25f7408571628b2cf5bfe2e8331b3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# import sys
from setuptools import setup, find_packages
import os
import sys
# Add lstchain folder to path (contains version.py)
# this is needed as lstchain/__init__.py imports dependencies
# that might not be installed before setup runs, so we cannot import
# lstchain.version
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'lstchain'))
from version import get_version, update_release_version # noqa
update_release_version()
version = get_version()
def find_scripts(script_dir, prefix):
script_list = [
os.path.splitext(f)[0]
for f in os.listdir(script_dir) if f.startswith(prefix)
]
script_dir = script_dir.replace('/', '.')
point_list = []
for f in script_list:
point_list.append(f"{f} = {script_dir}.{f}:main")
return point_list
lstchain_list = find_scripts('lstchain/scripts', 'lstchain_')
onsite_list = find_scripts('lstchain/scripts/onsite', 'onsite_')
tools_list = find_scripts('lstchain/tools', 'lstchain_')
entry_points = {}
entry_points['console_scripts'] = lstchain_list + onsite_list + tools_list
setup(
version=version,
packages=find_packages(),
install_requires=[
"astropy~=4.0,>=4.0.2",
'ctapipe~=0.8.0',
'ctaplot~=0.5.3',
"eventio>=1.1.1,<2.0.0a0", # at least 1.1.1, but not 2
'gammapy>=0.18',
'h5py',
'joblib',
'matplotlib',
'numba',
'numpy',
'pandas',
'pyirf~=0.4.0',
'scipy',
'seaborn',
'scikit-learn',
'tables',
'traitlets',
'iminuit~=1.5',
],
package_data={
'lstchain': ['data/lstchain_standard_config.json',
'resources/LST_pixid_to_cluster.txt'],
},
tests_require=[
'pytest',
'pytest-ordering',
],
entry_points=entry_points
)
| 26.148649 | 74 | 0.629974 |
c1f27cd0c301361098f80d7f83e69c13ac0061c0 | 15,701 | py | Python | bitbake/lib/bb/data.py | prakhya/luv_sai | 91d7d83a3e71b63164f683274dcf2392d64892e7 | [
"MIT"
] | null | null | null | bitbake/lib/bb/data.py | prakhya/luv_sai | 91d7d83a3e71b63164f683274dcf2392d64892e7 | [
"MIT"
] | null | null | null | bitbake/lib/bb/data.py | prakhya/luv_sai | 91d7d83a3e71b63164f683274dcf2392d64892e7 | [
"MIT"
] | null | null | null | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake 'Data' implementations
Functions for interacting with the data structure used by the
BitBake build tools.
The expandKeys and update_data are the most expensive
operations. At night the cookie monster came by and
suggested 'give me cookies on setting the variables and
things will work out'. Taking this suggestion into account
applying the skills from the not yet passed 'Entwurf und
Analyse von Algorithmen' lecture and the cookie
monster seems to be right. We will track setVar more carefully
to have faster update_data and expandKeys operations.
This is a trade-off between speed and memory again but
the speed is more critical here.
"""
# Copyright (C) 2003, 2004 Chris Larson
# Copyright (C) 2005 Holger Hans Peter Freyther
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
import sys, os, re
if sys.argv[0][-5:] == "pydoc":
path = os.path.dirname(os.path.dirname(sys.argv[1]))
else:
path = os.path.dirname(os.path.dirname(sys.argv[0]))
sys.path.insert(0, path)
from itertools import groupby
from bb import data_smart
from bb import codeparser
import bb
logger = data_smart.logger
_dict_type = data_smart.DataSmart
def init():
"""Return a new object representing the Bitbake data"""
return _dict_type()
def init_db(parent = None):
"""Return a new object representing the Bitbake data,
optionally based on an existing object"""
if parent is not None:
return parent.createCopy()
else:
return _dict_type()
def createCopy(source):
"""Link the source set to the destination
If one does not find the value in the destination set,
search will go on to the source set to get the value.
Value from source are copy-on-write. i.e. any try to
modify one of them will end up putting the modified value
in the destination set.
"""
return source.createCopy()
def initVar(var, d):
"""Non-destructive var init for data structure"""
d.initVar(var)
def setVar(var, value, d):
"""Set a variable to a given value"""
d.setVar(var, value)
def getVar(var, d, exp = False):
"""Gets the value of a variable"""
return d.getVar(var, exp)
def renameVar(key, newkey, d):
"""Renames a variable from key to newkey"""
d.renameVar(key, newkey)
def delVar(var, d):
"""Removes a variable from the data set"""
d.delVar(var)
def appendVar(var, value, d):
"""Append additional value to a variable"""
d.appendVar(var, value)
def setVarFlag(var, flag, flagvalue, d):
"""Set a flag for a given variable to a given value"""
d.setVarFlag(var, flag, flagvalue)
def getVarFlag(var, flag, d):
"""Gets given flag from given var"""
return d.getVarFlag(var, flag, False)
def delVarFlag(var, flag, d):
"""Removes a given flag from the variable's flags"""
d.delVarFlag(var, flag)
def setVarFlags(var, flags, d):
"""Set the flags for a given variable
Note:
setVarFlags will not clear previous
flags. Think of this method as
addVarFlags
"""
d.setVarFlags(var, flags)
def getVarFlags(var, d):
"""Gets a variable's flags"""
return d.getVarFlags(var)
def delVarFlags(var, d):
"""Removes a variable's flags"""
d.delVarFlags(var)
def keys(d):
"""Return a list of keys in d"""
return d.keys()
__expand_var_regexp__ = re.compile(r"\${[^{}]+}")
__expand_python_regexp__ = re.compile(r"\${@.+?}")
def expand(s, d, varname = None):
"""Variable expansion using the data store"""
return d.expand(s, varname)
def expandKeys(alterdata, readdata = None):
if readdata == None:
readdata = alterdata
todolist = {}
for key in alterdata:
if not '${' in key:
continue
ekey = expand(key, readdata)
if key == ekey:
continue
todolist[key] = ekey
# These two for loops are split for performance to maximise the
# usefulness of the expand cache
for key in sorted(todolist):
ekey = todolist[key]
newval = alterdata.getVar(ekey, False)
if newval is not None:
val = alterdata.getVar(key, False)
if val is not None:
bb.warn("Variable key %s (%s) replaces original key %s (%s)." % (key, val, ekey, newval))
alterdata.renameVar(key, ekey)
def inheritFromOS(d, savedenv, permitted):
"""Inherit variables from the initial environment."""
exportlist = bb.utils.preserved_envvars_exported()
for s in savedenv.keys():
if s in permitted:
try:
d.setVar(s, savedenv.getVar(s, True), op = 'from env')
if s in exportlist:
d.setVarFlag(s, "export", True, op = 'auto env export')
except TypeError:
pass
def emit_var(var, o=sys.__stdout__, d = init(), all=False):
"""Emit a variable to be sourced by a shell."""
func = d.getVarFlag(var, "func", False)
if d.getVarFlag(var, 'python', False) and func:
return False
export = d.getVarFlag(var, "export", False)
unexport = d.getVarFlag(var, "unexport", False)
if not all and not export and not unexport and not func:
return False
try:
if all:
oval = d.getVar(var, False)
val = d.getVar(var, True)
except (KeyboardInterrupt, bb.build.FuncFailed):
raise
except Exception as exc:
o.write('# expansion of %s threw %s: %s\n' % (var, exc.__class__.__name__, str(exc)))
return False
if all:
d.varhistory.emit(var, oval, val, o, d)
if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all:
return False
varExpanded = d.expand(var)
if unexport:
o.write('unset %s\n' % varExpanded)
return False
if val is None:
return False
val = str(val)
if varExpanded.startswith("BASH_FUNC_"):
varExpanded = varExpanded[10:-2]
val = val[3:] # Strip off "() "
o.write("%s() %s\n" % (varExpanded, val))
o.write("export -f %s\n" % (varExpanded))
return True
if func:
# NOTE: should probably check for unbalanced {} within the var
val = val.rstrip('\n')
o.write("%s() {\n%s\n}\n" % (varExpanded, val))
return 1
if export:
o.write('export ')
# if we're going to output this within doublequotes,
# to a shell, we need to escape the quotes in the var
alter = re.sub('"', '\\"', val)
alter = re.sub('\n', ' \\\n', alter)
alter = re.sub('\\$', '\\\\$', alter)
o.write('%s="%s"\n' % (varExpanded, alter))
return False
def emit_env(o=sys.__stdout__, d = init(), all=False):
"""Emits all items in the data store in a format such that it can be sourced by a shell."""
isfunc = lambda key: bool(d.getVarFlag(key, "func", False))
keys = sorted((key for key in d.keys() if not key.startswith("__")), key=isfunc)
grouped = groupby(keys, isfunc)
for isfunc, keys in grouped:
for key in keys:
emit_var(key, o, d, all and not isfunc) and o.write('\n')
def exported_keys(d):
return (key for key in d.keys() if not key.startswith('__') and
d.getVarFlag(key, 'export', False) and
not d.getVarFlag(key, 'unexport', False))
def exported_vars(d):
for key in exported_keys(d):
try:
value = d.getVar(key, True)
except Exception:
pass
if value is not None:
yield key, str(value)
def emit_func(func, o=sys.__stdout__, d = init()):
"""Emits all items in the data store in a format such that it can be sourced by a shell."""
keys = (key for key in d.keys() if not key.startswith("__") and not d.getVarFlag(key, "func", False))
for key in keys:
emit_var(key, o, d, False)
o.write('\n')
emit_var(func, o, d, False) and o.write('\n')
newdeps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func, True))
newdeps |= set((d.getVarFlag(func, "vardeps", True) or "").split())
seen = set()
while newdeps:
deps = newdeps
seen |= deps
newdeps = set()
for dep in deps:
if d.getVarFlag(dep, "func", False) and not d.getVarFlag(dep, "python", False):
emit_var(dep, o, d, False) and o.write('\n')
newdeps |= bb.codeparser.ShellParser(dep, logger).parse_shell(d.getVar(dep, True))
newdeps |= set((d.getVarFlag(dep, "vardeps", True) or "").split())
newdeps -= seen
_functionfmt = """
def {function}(d):
{body}"""
def emit_func_python(func, o=sys.__stdout__, d = init()):
"""Emits all items in the data store in a format such that it can be sourced by a shell."""
def write_func(func, o, call = False):
body = d.getVar(func, False)
if not body.startswith("def"):
body = _functionfmt.format(function=func, body=body)
o.write(body.strip() + "\n\n")
if call:
o.write(func + "(d)" + "\n\n")
write_func(func, o, True)
pp = bb.codeparser.PythonParser(func, logger)
pp.parse_python(d.getVar(func, False))
newdeps = pp.execs
newdeps |= set((d.getVarFlag(func, "vardeps", True) or "").split())
seen = set()
while newdeps:
deps = newdeps
seen |= deps
newdeps = set()
for dep in deps:
if d.getVarFlag(dep, "func", False) and d.getVarFlag(dep, "python", False):
write_func(dep, o)
pp = bb.codeparser.PythonParser(dep, logger)
pp.parse_python(d.getVar(dep, False))
newdeps |= pp.execs
newdeps |= set((d.getVarFlag(dep, "vardeps", True) or "").split())
newdeps -= seen
def update_data(d):
"""Performs final steps upon the datastore, including application of overrides"""
d.finalize(parent = True)
def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
deps = set()
try:
if key[-1] == ']':
vf = key[:-1].split('[')
value = d.getVarFlag(vf[0], vf[1], False)
parser = d.expandWithRefs(value, key)
deps |= parser.references
deps = deps | (keys & parser.execs)
return deps, value
varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude", "exports", "postfuncs", "prefuncs", "lineno", "filename"]) or {}
vardeps = varflags.get("vardeps")
value = d.getVar(key, False)
def handle_contains(value, contains, d):
newvalue = ""
for k in sorted(contains):
l = (d.getVar(k, True) or "").split()
for word in sorted(contains[k]):
if word in l:
newvalue += "\n%s{%s} = Set" % (k, word)
else:
newvalue += "\n%s{%s} = Unset" % (k, word)
if not newvalue:
return value
if not value:
return newvalue
return value + newvalue
if "vardepvalue" in varflags:
value = varflags.get("vardepvalue")
elif varflags.get("func"):
if varflags.get("python"):
parser = bb.codeparser.PythonParser(key, logger)
if value and "\t" in value:
logger.warning("Variable %s contains tabs, please remove these (%s)" % (key, d.getVar("FILE", True)))
parser.parse_python(value, filename=varflags.get("filename"), lineno=varflags.get("lineno"))
deps = deps | parser.references
deps = deps | (keys & parser.execs)
value = handle_contains(value, parser.contains, d)
else:
parsedvar = d.expandWithRefs(value, key)
parser = bb.codeparser.ShellParser(key, logger)
parser.parse_shell(parsedvar.value)
deps = deps | shelldeps
deps = deps | parsedvar.references
deps = deps | (keys & parser.execs) | (keys & parsedvar.execs)
value = handle_contains(value, parsedvar.contains, d)
if vardeps is None:
parser.log.flush()
if "prefuncs" in varflags:
deps = deps | set(varflags["prefuncs"].split())
if "postfuncs" in varflags:
deps = deps | set(varflags["postfuncs"].split())
if "exports" in varflags:
deps = deps | set(varflags["exports"].split())
else:
parser = d.expandWithRefs(value, key)
deps |= parser.references
deps = deps | (keys & parser.execs)
value = handle_contains(value, parser.contains, d)
if "vardepvalueexclude" in varflags:
exclude = varflags.get("vardepvalueexclude")
for excl in exclude.split('|'):
if excl:
value = value.replace(excl, '')
# Add varflags, assuming an exclusion list is set
if varflagsexcl:
varfdeps = []
for f in varflags:
if f not in varflagsexcl:
varfdeps.append('%s[%s]' % (key, f))
if varfdeps:
deps |= set(varfdeps)
deps |= set((vardeps or "").split())
deps -= set(varflags.get("vardepsexclude", "").split())
except Exception as e:
bb.warn("Exception during build_dependencies for %s" % key)
raise
return deps, value
#bb.note("Variable %s references %s and calls %s" % (key, str(deps), str(execs)))
#d.setVarFlag(key, "vardeps", deps)
def generate_dependencies(d):
keys = set(key for key in d if not key.startswith("__"))
shelldeps = set(key for key in d.getVar("__exportlist", False) if d.getVarFlag(key, "export", False) and not d.getVarFlag(key, "unexport", False))
varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS', True)
deps = {}
values = {}
tasklist = d.getVar('__BBTASKS', False) or []
for task in tasklist:
deps[task], values[task] = build_dependencies(task, keys, shelldeps, varflagsexcl, d)
newdeps = deps[task]
seen = set()
while newdeps:
nextdeps = newdeps
seen |= nextdeps
newdeps = set()
for dep in nextdeps:
if dep not in deps:
deps[dep], values[dep] = build_dependencies(dep, keys, shelldeps, varflagsexcl, d)
newdeps |= deps[dep]
newdeps -= seen
#print "For %s: %s" % (task, str(deps[task]))
return tasklist, deps, values
def inherits_class(klass, d):
val = d.getVar('__inherit_cache', False) or []
needle = os.path.join('classes', '%s.bbclass' % klass)
for v in val:
if v.endswith(needle):
return True
return False
| 34.813747 | 150 | 0.594484 |
c675d125d213ade3aa35b8ddb513354e9326334c | 715 | py | Python | ddtrace/contrib/pyramid/__init__.py | pelotoncycle/dd-trace-py | b5254016dc42185eebfadce8dc634003408439d7 | [
"BSD-3-Clause"
] | null | null | null | ddtrace/contrib/pyramid/__init__.py | pelotoncycle/dd-trace-py | b5254016dc42185eebfadce8dc634003408439d7 | [
"BSD-3-Clause"
] | null | null | null | ddtrace/contrib/pyramid/__init__.py | pelotoncycle/dd-trace-py | b5254016dc42185eebfadce8dc634003408439d7 | [
"BSD-3-Clause"
] | 2 | 2017-05-27T05:58:36.000Z | 2019-02-07T13:38:53.000Z | """To trace requests from a Pyramid application, trace your application
config::
from pyramid.config import Configurator
from ddtrace.contrib.pyramid import trace_pyramid
settings = {
'datadog_trace_service' : 'my-web-app-name',
}
config = Configurator(settings=settings)
trace_pyramid(config)
# use your config as normal.
config.add_route('index', '/')
"""
from ..util import require_modules
required_modules = ['pyramid']
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .trace import trace_pyramid, trace_tween_factory
__all__ = [
'trace_pyramid',
'trace_tween_factory',
]
| 23.833333 | 71 | 0.682517 |
6698991d3c2ce04eaf99b3f5d821a7f3fa907417 | 5,112 | py | Python | configs/wholebody/2d_kpt_sview_rgb_img/topdown_heatmap/coco-wholebody/hrnet_w48_coco_wholebody_384x288_dark.py | nightfuryyy/mmpose | 910d9e31dd9d46e3329be1b7567e6309d70ab64c | [
"Apache-2.0"
] | 1,775 | 2020-07-10T01:20:01.000Z | 2022-03-31T16:31:50.000Z | configs/wholebody/2d_kpt_sview_rgb_img/topdown_heatmap/coco-wholebody/hrnet_w48_coco_wholebody_384x288_dark.py | KHB1698/mmpose | 93c3a742c540dfb4ca515ad545cef705a07d90b4 | [
"Apache-2.0"
] | 1,021 | 2020-07-11T11:40:24.000Z | 2022-03-31T14:32:26.000Z | configs/wholebody/2d_kpt_sview_rgb_img/topdown_heatmap/coco-wholebody/hrnet_w48_coco_wholebody_384x288_dark.py | KHB1698/mmpose | 93c3a742c540dfb4ca515ad545cef705a07d90b4 | [
"Apache-2.0"
] | 477 | 2020-07-11T11:27:51.000Z | 2022-03-31T09:42:25.000Z | _base_ = ['../../../../_base_/datasets/coco_wholebody.py']
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', save_best='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=133,
dataset_joints=133,
dataset_channel=[
list(range(133)),
],
inference_channel=list(range(133)))
# model settings
model = dict(
type='TopDown',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w48-8ef0771d.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(48, 96)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(48, 96, 192)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(48, 96, 192, 384))),
),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=48,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='unbiased',
shift_heatmap=True,
modulate_kernel=17))
data_cfg = dict(
image_size=[288, 384],
heatmap_size=[72, 96],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=3, unbiased_encoding=True),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=32,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownCocoWholeBodyDataset',
ann_file=f'{data_root}/annotations/coco_wholebody_train_v1.0.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}),
val=dict(
type='TopDownCocoWholeBodyDataset',
ann_file=f'{data_root}/annotations/coco_wholebody_val_v1.0.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
test=dict(
type='TopDownCocoWholeBodyDataset',
ann_file=f'{data_root}/annotations/coco_wholebody_val_v1.0.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=test_pipeline,
dataset_info={{_base_.dataset_info}}),
)
| 29.211429 | 79 | 0.600743 |
740dcf5593cdb29791251f65e4a974a00d3c6610 | 15,353 | py | Python | src/oci/tenant_manager_control_plane/models/domain_governance.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/tenant_manager_control_plane/models/domain_governance.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/tenant_manager_control_plane/models/domain_governance.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class DomainGovernance(object):
"""
The model for a domain governance entity.
"""
#: A constant which can be used with the lifecycle_state property of a DomainGovernance.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a DomainGovernance.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
def __init__(self, **kwargs):
"""
Initializes a new DomainGovernance object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this DomainGovernance.
:type id: str
:param owner_id:
The value to assign to the owner_id property of this DomainGovernance.
:type owner_id: str
:param domain_id:
The value to assign to the domain_id property of this DomainGovernance.
:type domain_id: str
:param lifecycle_state:
The value to assign to the lifecycle_state property of this DomainGovernance.
Allowed values for this property are: "ACTIVE", "INACTIVE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param is_governance_enabled:
The value to assign to the is_governance_enabled property of this DomainGovernance.
:type is_governance_enabled: bool
:param subscription_email:
The value to assign to the subscription_email property of this DomainGovernance.
:type subscription_email: str
:param ons_topic_id:
The value to assign to the ons_topic_id property of this DomainGovernance.
:type ons_topic_id: str
:param ons_subscription_id:
The value to assign to the ons_subscription_id property of this DomainGovernance.
:type ons_subscription_id: str
:param time_created:
The value to assign to the time_created property of this DomainGovernance.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this DomainGovernance.
:type time_updated: datetime
:param freeform_tags:
The value to assign to the freeform_tags property of this DomainGovernance.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this DomainGovernance.
:type defined_tags: dict(str, dict(str, object))
:param system_tags:
The value to assign to the system_tags property of this DomainGovernance.
:type system_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'id': 'str',
'owner_id': 'str',
'domain_id': 'str',
'lifecycle_state': 'str',
'is_governance_enabled': 'bool',
'subscription_email': 'str',
'ons_topic_id': 'str',
'ons_subscription_id': 'str',
'time_created': 'datetime',
'time_updated': 'datetime',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'system_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'id': 'id',
'owner_id': 'ownerId',
'domain_id': 'domainId',
'lifecycle_state': 'lifecycleState',
'is_governance_enabled': 'isGovernanceEnabled',
'subscription_email': 'subscriptionEmail',
'ons_topic_id': 'onsTopicId',
'ons_subscription_id': 'onsSubscriptionId',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'system_tags': 'systemTags'
}
self._id = None
self._owner_id = None
self._domain_id = None
self._lifecycle_state = None
self._is_governance_enabled = None
self._subscription_email = None
self._ons_topic_id = None
self._ons_subscription_id = None
self._time_created = None
self._time_updated = None
self._freeform_tags = None
self._defined_tags = None
self._system_tags = None
@property
def id(self):
"""
**[Required]** Gets the id of this DomainGovernance.
The OCID of the domain governance entity.
:return: The id of this DomainGovernance.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this DomainGovernance.
The OCID of the domain governance entity.
:param id: The id of this DomainGovernance.
:type: str
"""
self._id = id
@property
def owner_id(self):
"""
**[Required]** Gets the owner_id of this DomainGovernance.
The OCID of the tenancy that owns this domain governance entity.
:return: The owner_id of this DomainGovernance.
:rtype: str
"""
return self._owner_id
@owner_id.setter
def owner_id(self, owner_id):
"""
Sets the owner_id of this DomainGovernance.
The OCID of the tenancy that owns this domain governance entity.
:param owner_id: The owner_id of this DomainGovernance.
:type: str
"""
self._owner_id = owner_id
@property
def domain_id(self):
"""
**[Required]** Gets the domain_id of this DomainGovernance.
The OCID of the domain associated with this domain governance entity.
:return: The domain_id of this DomainGovernance.
:rtype: str
"""
return self._domain_id
@domain_id.setter
def domain_id(self, domain_id):
"""
Sets the domain_id of this DomainGovernance.
The OCID of the domain associated with this domain governance entity.
:param domain_id: The domain_id of this DomainGovernance.
:type: str
"""
self._domain_id = domain_id
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this DomainGovernance.
Lifecycle state of the domain governance entity.
Allowed values for this property are: "ACTIVE", "INACTIVE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this DomainGovernance.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this DomainGovernance.
Lifecycle state of the domain governance entity.
:param lifecycle_state: The lifecycle_state of this DomainGovernance.
:type: str
"""
allowed_values = ["ACTIVE", "INACTIVE"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def is_governance_enabled(self):
"""
Gets the is_governance_enabled of this DomainGovernance.
Indicates whether governance is enabled for this domain.
:return: The is_governance_enabled of this DomainGovernance.
:rtype: bool
"""
return self._is_governance_enabled
@is_governance_enabled.setter
def is_governance_enabled(self, is_governance_enabled):
"""
Sets the is_governance_enabled of this DomainGovernance.
Indicates whether governance is enabled for this domain.
:param is_governance_enabled: The is_governance_enabled of this DomainGovernance.
:type: bool
"""
self._is_governance_enabled = is_governance_enabled
@property
def subscription_email(self):
"""
Gets the subscription_email of this DomainGovernance.
The email to notify the user, and that the ONS subscription will be created with.
:return: The subscription_email of this DomainGovernance.
:rtype: str
"""
return self._subscription_email
@subscription_email.setter
def subscription_email(self, subscription_email):
"""
Sets the subscription_email of this DomainGovernance.
The email to notify the user, and that the ONS subscription will be created with.
:param subscription_email: The subscription_email of this DomainGovernance.
:type: str
"""
self._subscription_email = subscription_email
@property
def ons_topic_id(self):
"""
**[Required]** Gets the ons_topic_id of this DomainGovernance.
The ONS topic associated with this domain governance entity.
:return: The ons_topic_id of this DomainGovernance.
:rtype: str
"""
return self._ons_topic_id
@ons_topic_id.setter
def ons_topic_id(self, ons_topic_id):
"""
Sets the ons_topic_id of this DomainGovernance.
The ONS topic associated with this domain governance entity.
:param ons_topic_id: The ons_topic_id of this DomainGovernance.
:type: str
"""
self._ons_topic_id = ons_topic_id
@property
def ons_subscription_id(self):
"""
**[Required]** Gets the ons_subscription_id of this DomainGovernance.
The ONS subscription associated with this domain governance entity.
:return: The ons_subscription_id of this DomainGovernance.
:rtype: str
"""
return self._ons_subscription_id
@ons_subscription_id.setter
def ons_subscription_id(self, ons_subscription_id):
"""
Sets the ons_subscription_id of this DomainGovernance.
The ONS subscription associated with this domain governance entity.
:param ons_subscription_id: The ons_subscription_id of this DomainGovernance.
:type: str
"""
self._ons_subscription_id = ons_subscription_id
@property
def time_created(self):
"""
Gets the time_created of this DomainGovernance.
Date-time when this domain governance was created. An RFC 3339-formatted date and time string.
:return: The time_created of this DomainGovernance.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this DomainGovernance.
Date-time when this domain governance was created. An RFC 3339-formatted date and time string.
:param time_created: The time_created of this DomainGovernance.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
Gets the time_updated of this DomainGovernance.
Date-time when this domain governance was last updated. An RFC 3339-formatted date and time string.
:return: The time_updated of this DomainGovernance.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this DomainGovernance.
Date-time when this domain governance was last updated. An RFC 3339-formatted date and time string.
:param time_updated: The time_updated of this DomainGovernance.
:type: datetime
"""
self._time_updated = time_updated
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this DomainGovernance.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this DomainGovernance.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this DomainGovernance.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this DomainGovernance.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this DomainGovernance.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this DomainGovernance.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this DomainGovernance.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this DomainGovernance.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def system_tags(self):
"""
Gets the system_tags of this DomainGovernance.
Usage of system tag keys. These predefined keys are scoped to namespaces.
Example: `{\"orcl-cloud\": {\"free-tier-retained\": \"true\"}}`
:return: The system_tags of this DomainGovernance.
:rtype: dict(str, dict(str, object))
"""
return self._system_tags
@system_tags.setter
def system_tags(self, system_tags):
"""
Sets the system_tags of this DomainGovernance.
Usage of system tag keys. These predefined keys are scoped to namespaces.
Example: `{\"orcl-cloud\": {\"free-tier-retained\": \"true\"}}`
:param system_tags: The system_tags of this DomainGovernance.
:type: dict(str, dict(str, object))
"""
self._system_tags = system_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 33.017204 | 245 | 0.645086 |
2a26668fdad847dc9b7884e5962e8b530b7dbeeb | 3,765 | py | Python | a2c/envs.py | WeiChengTseng/DL_final_project | bbe61592a3d85c00731e254edcd1108075c49b6f | [
"Apache-2.0"
] | 7 | 2019-05-09T13:43:19.000Z | 2022-01-11T06:00:05.000Z | a2c/envs.py | pohanchi/DL_final_project | 8ade422f61a2e8bd4256523ebda56e19b189fe91 | [
"Apache-2.0"
] | null | null | null | a2c/envs.py | pohanchi/DL_final_project | 8ade422f61a2e8bd4256523ebda56e19b189fe91 | [
"Apache-2.0"
] | 4 | 2019-05-10T16:57:37.000Z | 2019-06-05T14:43:27.000Z | import numpy as np
import gym
from gym import spaces
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from baselines.common.vec_env import VecEnv
from multiprocessing import Process, Pipe
# cf https://github.com/openai/baselines
def make_env(env_name, rank, seed):
env = make_atari(env_name)
env.seed(seed + rank)
env = wrap_deepmind(env)
return env
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.action_space, env.observation_space))
elif cmd == 'render':
env.render()
else:
raise NotImplementedError
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class RenderSubprocVecEnv(VecEnv):
def __init__(self, env_fns, render_interval):
""" Minor addition to SubprocVecEnv, automatically renders environments
envs: list of gym environments to run in subprocesses
"""
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [
Process(target=worker,
args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote,
env_fn) in zip(self.work_remotes, self.remotes, env_fns)
]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
self.action_space, self.observation_space = self.remotes[0].recv()
self.render_interval = render_interval
self.render_timer = 0
def step(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
results = [remote.recv() for remote in self.remotes]
obs, rews, dones, infos = zip(*results)
self.render_timer += 1
if self.render_timer == self.render_interval:
for remote in self.remotes:
remote.send(('render', None))
self.render_timer = 0
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
@property
def num_envs(self):
return len(self.remotes)
| 29.880952 | 94 | 0.591766 |
4260b2616c3faecf56121480bfb1b0e87fa232d5 | 953 | py | Python | userAuth/admin.py | dahyorr/ticket-system-api | 767fc949b5cdb7ec2a55da981993846ad0d26386 | [
"MIT"
] | null | null | null | userAuth/admin.py | dahyorr/ticket-system-api | 767fc949b5cdb7ec2a55da981993846ad0d26386 | [
"MIT"
] | null | null | null | userAuth/admin.py | dahyorr/ticket-system-api | 767fc949b5cdb7ec2a55da981993846ad0d26386 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .forms import CustomUserCreationForm, CustomUserChangeForm
from .models import User
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = User
list_display = ('email', 'name', 'is_authorized', 'is_staff', 'is_active',)
list_filter = ('email', 'name', 'is_authorized', 'is_staff', 'is_active',)
fieldsets = (
(None, {'fields': ('email', 'name', 'password')}),
('Permissions', {'fields': ('is_authorized', 'is_staff', 'is_active')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'name', 'password1', 'password2', 'is_authorized', 'is_staff', 'is_active')}
),
)
search_fields = ('email', 'name')
ordering = ('email',)
admin.site.register(User, CustomUserAdmin)
| 32.862069 | 109 | 0.612802 |
e44926bafd2b22ea070a74440800e3904e462f89 | 3,329 | py | Python | locations/spiders/farmboy.py | davidchiles/alltheplaces | 6f35f6cd652e7462107ead0a77f322caff198653 | [
"MIT"
] | 297 | 2017-12-07T01:29:14.000Z | 2022-03-29T06:58:01.000Z | locations/spiders/farmboy.py | davidchiles/alltheplaces | 6f35f6cd652e7462107ead0a77f322caff198653 | [
"MIT"
] | 2,770 | 2017-11-28T04:20:21.000Z | 2022-03-31T11:29:16.000Z | locations/spiders/farmboy.py | davidchiles/alltheplaces | 6f35f6cd652e7462107ead0a77f322caff198653 | [
"MIT"
] | 111 | 2017-11-27T21:40:02.000Z | 2022-01-22T01:21:52.000Z | # -*- coding: utf-8 -*-
import scrapy
import re
from locations.items import GeojsonPointItem
class FarmBoySpider(scrapy.Spider):
name = "farmboy"
item_attributes = { 'brand': "Farmboy" }
allowed_domains = ["www.farmboy.ca"]
start_urls = (
'https://www.farmboy.ca/about-us/stores/',
)
def parse_hours(self, trs):
opening_hours = []
for tr in trs:
day = tr.xpath('td/text()').extract_first()
time = tr.xpath('td[2]/text()').extract_first()
if day is not None and time is not None:
day = day.replace('Monday', 'Mo').replace('Tuesday', 'Tu').replace('Wednesday', 'We').replace('Thursday', 'Th').replace('Friday', 'Fr').replace('Saturday', 'Sa').replace('Sunday', 'Su')
time = time.replace('\xa0', ' ')
match = re.search(r'(\d{1,2}) (a|p)[.]m[.] . (\d{1,2}) (a|p)[.]m[.]', time)
if match:
(f_hr, f_ampm, t_hr, t_ampm) = match.groups()
f_hr = int(f_hr)
if f_ampm == 'p':
f_hr += 12
elif f_ampm == 'a' and f_hr == 12:
f_hr = 0
t_hr = int(t_hr)
if t_ampm == 'p':
t_hr += 12
elif t_ampm == 'a' and t_hr == 12:
t_hr = 0
hours = '{:02d}:{}-{:02d}:{}'.format(
f_hr,
'00',
t_hr,
'00',
)
opening_hours.append('{} {}'.format(day, hours))
return "; ".join(opening_hours)
def parse(self, response):
stores = response.xpath('//div[@id="portfolio"]/article')
for store in stores:
if store.xpath('@class').extract_first() != 'all portfolio-item toronto':
properties = {
"ref": store.xpath('div/h3/text()').extract_first(),
"name": store.xpath('div/h3/text()').extract_first(),
"addr_full": store.xpath('div/div/p/text()').extract_first(),
"postcode": self.postCode(store.xpath('div/div/p/text()[last()]').extract_first()),
"state": self.state(store.xpath('div/div/p/text()[last() - 1]').extract_first()),
"phone": self.phone(store.xpath('div/div/div[@id="cinfo"]/p/text()').extract_first()),
"opening_hours": self.parse_hours(store.xpath('div/div/div[@id="sinfo"]/table[1]/tbody/tr'))
}
yield GeojsonPointItem(**properties)
def city(self, data):
str_list = data.split(',')
return str_list[0].strip()
def state(self, data):
if data is None: return ''
m = re.search(r'(,)\s(.*)', data)
m_space = re.search(r'\s(.*)', data)
if m:
return m.group(2)
elif m_space:
return m_space.group(1)
else: return data
def postCode(self, data):
if data is None: return ''
return data.strip().replace('\xa0', '')
def phone(self, data):
if data is None: return ''
if ':' not in data: return data.replace('\xa0', ' ')
return data.split(':')[1]
| 37.829545 | 201 | 0.473415 |
03659ce77a14d555656f1117989ce822caa49645 | 6,115 | py | Python | map.py | haihala/generators | e5d41eedba0837589bdb16c9e37c4b5b1743cf9f | [
"MIT"
] | null | null | null | map.py | haihala/generators | e5d41eedba0837589bdb16c9e37c4b5b1743cf9f | [
"MIT"
] | null | null | null | map.py | haihala/generators | e5d41eedba0837589bdb16c9e37c4b5b1743cf9f | [
"MIT"
] | null | null | null | import pygame, argparse, os, math
from random import random
DEEP_SEA = (0, 0, 100)
SHALLOW_SEA = (80, 80, 200)
MOUNTAIN = (151,124,83)
GRASS = (40, 150, 40)
BEACH = (223,193,99)
SUMMIT = (240, 240, 240)
def smooth(source, window, power):
for i in range(window, len(source)-window):
for j in range(window, len(source[0])-window):
avg = sum(sum(column for column in row[j-window:j+window]) for row in source[i-window:i+window])/((1+2*window)**2)
source[i][j] = power*avg + (1-power)*source[i][j]
def color_lerp(a, b, ratio):
ratio = min(1, max(0, ratio))
return tuple(int(a[i]*ratio+b[i]*(1-ratio)) for i in range(3))
def generate(
fname,
size,
view_result,
height_offset,
height_delta,
height_points,
height_smooth_size,
height_smooth_power,
height_smooth_rounds,
beach_height,
mountain_height,
summit_height
):
pygame.init()
height_map = [[height_offset for i in range(size[1]+2*height_smooth_size)] for j in range(size[0]+2*height_smooth_size)]
surf = pygame.Surface(size)
# Random points for altitude
points = []
for _ in range(int(height_points*size[0]*size[1])):
i = int(random()*size[0])
j = int(random()*size[1])
points.append((i, j))
height_map[i][j] += height_delta*(0.5-random())
print("Randomness generated")
# Smooth out data
for i in range(height_smooth_rounds):
print("Beginning height smoothing round {}".format(i+1))
smooth(height_map, height_smooth_size, height_smooth_power)
print("Smoothing done")
# Remove original points
for point in points:
i, j = point
height_map[i][j] = (sum(sum(column for column in row[j-1:j+1]) for row in height_map[i-1:i+1])-height_map[i][j])/8
# Crop to size. Do it this way to ignore edge when smoothing.
height_map = [row[height_smooth_size:-height_smooth_size] for row in height_map[height_smooth_size:-height_smooth_size]]
print("Cropping done")
# Map to colors
for i in range(size[0]):
for j in range(size[1]):
height = height_map[i][j]
height_scaler = abs(5000*height/height_delta)
if height < 0:
# Water
surf.set_at((i, j), color_lerp(DEEP_SEA, SHALLOW_SEA, height_scaler))
else:
# Land
color = (0, 0, 0)
if height < beach_height:
# Beach
color = BEACH
elif height < mountain_height:
# Grassland/forest
color = color_lerp(MOUNTAIN, GRASS, height/mountain_height)
elif height < summit_height:
# Mountain
color = MOUNTAIN
else:
# Snowy summit
color = SUMMIT
surf.set_at((i, j), color)
# Display if flag given
if view_result:
display = pygame.display.set_mode(size)
pygame.display.set_caption("Preview. Escape to exit.")
display.blit(surf, (0, 0))
pygame.display.flip()
closed = False
while not closed:
for event in pygame.event.get():
if event.type == pygame.QUIT:
closed = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
closed = True
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
print("Clicked position data:")
print("Elevation: {}".format(height_map[pos[0]][pos[1]]))
# Write to file if flag given
if fname:
if not os.path.isdir("images"):
os.mkdir("images")
if '.' not in fname:
fname += ".png"
pygame.image.save(surf, os.path.join("images", fname))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate a world map image")
parser.add_argument("-file", type=str, default=False, nargs="?",
help="Name of file to save the map to")
parser.add_argument("width", type=int,
help="Width of image")
parser.add_argument("height", type=int,
help="Height of image")
parser.add_argument("-view", const=True, default=False, nargs='?',
help="View map after generating")
parser.add_argument("-height_offset", type=float, default=50,
help="How high is sea level")
parser.add_argument("-height_delta", type=float, default=800000,
help="Height delta. How tall should the summits of mountains be.")
parser.add_argument("-height_points", type=int, default=0.001,
help="How many random points should random values be applied to per pixel")
parser.add_argument("-height_smooth_size", type=int, default=20,
help="How much should smoothing influence(1 is total offset, 0 is no smoothing)")
parser.add_argument("-height_smooth_power", type=float, default=0.7,
help="How much should smoothing influence(1 is total offset, 0 is no smoothing)")
parser.add_argument("-height_smooth_rounds", type=int, default=6,
help="How many times should the smoothing be ran")
parser.add_argument("-beach_height", type=float, default=5,
help="Maximum height for 'beach'")
parser.add_argument("-mountain_height", type=float, default=100,
help="Minimum height for 'mountain'")
parser.add_argument("-summit_height", type=float, default=180,
help="Minimum height for 'summit'")
args = parser.parse_args()
generate(
args.file,
(args.width, args.height),
args.view,
args.height_offset,
args.height_delta,
args.height_points,
args.height_smooth_size,
args.height_smooth_power,
args.height_smooth_rounds,
args.beach_height,
args.mountain_height,
args.summit_height
)
| 36.616766 | 126 | 0.590842 |
c1613094a3b53c171fdffafa7de957fe0614048b | 895 | py | Python | py/libga_gat/fitnessplot.py | jimmycao/libga_mpi | 39f223891b1474b1b190a0033576f30723051f17 | [
"MIT"
] | 1 | 2020-06-14T08:28:01.000Z | 2020-06-14T08:28:01.000Z | py/libga_gat/fitnessplot.py | jimmycao/libga_mpi | 39f223891b1474b1b190a0033576f30723051f17 | [
"MIT"
] | null | null | null | py/libga_gat/fitnessplot.py | jimmycao/libga_mpi | 39f223891b1474b1b190a0033576f30723051f17 | [
"MIT"
] | null | null | null | from canvasbase import CanvasBase2D
import numpy as np
from operator import itemgetter
class Fitnessplot(CanvasBase2D):
def __init__(self):
CanvasBase2D.__init__(self)
self.filter_pareto_front = False
def customize(self):
self.axes.set_title("Fittest solution")
self.axes.set_xlabel("Generation")
self.axes.set_ylabel("Fitness / Objective value")
def data_update(self, genome_info):
ospace = genome_info["ospace"]
fittest = min(enumerate(ospace), key=itemgetter(1))[0]
self.data.set_xdata(np.append(self.data.get_xdata(), len(self.data.get_xdata())))
self.data.set_ydata(np.append(self.data.get_ydata(), ospace[fittest]))
self.axes.relim()
self.axes.autoscale_view(True, True, True)
self.fig.canvas.draw()
def data_picker(self, data, mouseevent):
return False, dict()
| 33.148148 | 89 | 0.675978 |
8998f0cbd0f6b419f40a7cac470b38512197f63f | 3,466 | py | Python | huaweicloud-sdk-ces/huaweicloudsdkces/v1/model/update_alarm_template_request.py | githubmilesma/huaweicloud-sdk-python-v3 | 9d9449ed68a609ca65f0aa50b5b2a1c28445bf03 | [
"Apache-2.0"
] | 1 | 2021-04-16T07:59:28.000Z | 2021-04-16T07:59:28.000Z | huaweicloud-sdk-ces/huaweicloudsdkces/v1/model/update_alarm_template_request.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-ces/huaweicloudsdkces/v1/model/update_alarm_template_request.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | 1 | 2022-01-17T02:24:18.000Z | 2022-01-17T02:24:18.000Z | # coding: utf-8
import pprint
import re
import six
class UpdateAlarmTemplateRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'template_id': 'str',
'body': 'UpdateAlarmTemplateRequestBody'
}
attribute_map = {
'template_id': 'template_id',
'body': 'body'
}
def __init__(self, template_id=None, body=None):
"""UpdateAlarmTemplateRequest - a model defined in huaweicloud sdk"""
self._template_id = None
self._body = None
self.discriminator = None
self.template_id = template_id
if body is not None:
self.body = body
@property
def template_id(self):
"""Gets the template_id of this UpdateAlarmTemplateRequest.
:return: The template_id of this UpdateAlarmTemplateRequest.
:rtype: str
"""
return self._template_id
@template_id.setter
def template_id(self, template_id):
"""Sets the template_id of this UpdateAlarmTemplateRequest.
:param template_id: The template_id of this UpdateAlarmTemplateRequest.
:type: str
"""
self._template_id = template_id
@property
def body(self):
"""Gets the body of this UpdateAlarmTemplateRequest.
:return: The body of this UpdateAlarmTemplateRequest.
:rtype: UpdateAlarmTemplateRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdateAlarmTemplateRequest.
:param body: The body of this UpdateAlarmTemplateRequest.
:type: UpdateAlarmTemplateRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateAlarmTemplateRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.06015 | 79 | 0.564051 |
8efe6b1d0ee25b34657e9eb18d86d7677eb11512 | 955 | py | Python | lojaPy/products/views.py | Guiamba1/site | 4f9b3f50e80e7f55da573bce7b120df96b622396 | [
"MIT"
] | null | null | null | lojaPy/products/views.py | Guiamba1/site | 4f9b3f50e80e7f55da573bce7b120df96b622396 | [
"MIT"
] | null | null | null | lojaPy/products/views.py | Guiamba1/site | 4f9b3f50e80e7f55da573bce7b120df96b622396 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.views.generic import DetailView, ListView
# from cart.forms import CartAddProductForm
from .models import Category, Product
class ProductDetailView(DetailView):
queryset = Product.available.all()
#extra_context = {"form": CartAddProductForm()}
class ProductListView(ListView):
category = None
paginate_by = 6
def get_queryset(self):
queryset = Product.available.all()
category_slug = self.kwargs.get("slug")
if category_slug:
self.category = get_object_or_404(Category, slug=category_slug)
queryset = queryset.filter(category=self.category)
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["category"] = self.category
context["categories"] = Category.objects.all()
return context
| 28.088235 | 75 | 0.703665 |
8642db0c431fde8fc3a8bd35249c5ae3857f9727 | 3,449 | py | Python | readthedocs/restapi/urls.py | agarwalrounak/readthedocs.org | 4911600c230809bd6fb3585d1903121db2928ad6 | [
"MIT"
] | null | null | null | readthedocs/restapi/urls.py | agarwalrounak/readthedocs.org | 4911600c230809bd6fb3585d1903121db2928ad6 | [
"MIT"
] | 12 | 2019-12-05T04:47:01.000Z | 2022-01-09T00:56:58.000Z | readthedocs/restapi/urls.py | agarwalrounak/readthedocs.org | 4911600c230809bd6fb3585d1903121db2928ad6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Define routes between URL paths and views/endpoints."""
from django.conf import settings
from django.conf.urls import include, url
from rest_framework import routers
from readthedocs.constants import pattern_opts
from readthedocs.restapi.views import (
core_views,
footer_views,
integrations,
task_views,
)
from .views.model_views import (
BuildCommandViewSet,
BuildViewSet,
DomainViewSet,
NotificationViewSet,
ProjectViewSet,
RemoteOrganizationViewSet,
RemoteRepositoryViewSet,
SocialAccountViewSet,
VersionViewSet,
)
from readthedocs.sphinx_domains.api import SphinxDomainAPIView
router = routers.DefaultRouter()
router.register(r'build', BuildViewSet, basename='build')
router.register(r'command', BuildCommandViewSet, basename='buildcommandresult')
router.register(r'version', VersionViewSet, basename='version')
router.register(r'project', ProjectViewSet, basename='project')
router.register(r'notification', NotificationViewSet, basename='emailhook')
router.register(r'domain', DomainViewSet, basename='domain')
router.register(r'sphinx_domains', SphinxDomainAPIView, base_name='sphinxdomain')
router.register(
r'remote/org',
RemoteOrganizationViewSet,
basename='remoteorganization',
)
router.register(
r'remote/repo',
RemoteRepositoryViewSet,
basename='remoterepository',
)
router.register(
r'remote/account',
SocialAccountViewSet,
basename='remoteaccount',
)
urlpatterns = [
url(r'^', include(router.urls)),
]
function_urls = [
url(r'docurl/', core_views.docurl, name='docurl'),
url(r'footer_html/', footer_views.footer_html, name='footer_html'),
]
task_urls = [
url(
r'jobs/status/(?P<task_id>[^/]+)/',
task_views.job_status,
name='api_job_status',
),
url(
r'jobs/sync-remote-repositories/',
task_views.sync_remote_repositories,
name='api_sync_remote_repositories',
),
]
integration_urls = [
url(
r'webhook/github/(?P<project_slug>{project_slug})/$'.format(
**pattern_opts
),
integrations.GitHubWebhookView.as_view(),
name='api_webhook_github',
),
url(
r'webhook/gitlab/(?P<project_slug>{project_slug})/$'.format(
**pattern_opts
),
integrations.GitLabWebhookView.as_view(),
name='api_webhook_gitlab',
),
url(
r'webhook/bitbucket/(?P<project_slug>{project_slug})/$'.format(
**pattern_opts
),
integrations.BitbucketWebhookView.as_view(),
name='api_webhook_bitbucket',
),
url(
r'webhook/generic/(?P<project_slug>{project_slug})/$'.format(
**pattern_opts
),
integrations.APIWebhookView.as_view(),
name='api_webhook_generic',
),
url(
(
r'webhook/(?P<project_slug>{project_slug})/'
r'(?P<integration_pk>{integer_pk})/$'.format(**pattern_opts)
),
integrations.WebhookView.as_view(),
name='api_webhook',
),
]
urlpatterns += function_urls
urlpatterns += task_urls
urlpatterns += integration_urls
if 'readthedocsext.donate' in settings.INSTALLED_APPS:
# pylint: disable=import-error
from readthedocsext.donate.restapi.urls import urlpatterns \
as sustainability_urls
urlpatterns += [
url(r'^sustainability/', include(sustainability_urls)),
]
| 26.945313 | 81 | 0.676428 |
704598dbfb4f1e7b6c4e1f6dfff113bf643e5667 | 422 | py | Python | config/api_router.py | todd-sudo/mailing | 722827e373a4190698669f5787c2b8619773371d | [
"MIT"
] | null | null | null | config/api_router.py | todd-sudo/mailing | 722827e373a4190698669f5787c2b8619773371d | [
"MIT"
] | null | null | null | config/api_router.py | todd-sudo/mailing | 722827e373a4190698669f5787c2b8619773371d | [
"MIT"
] | null | null | null | from django.conf import settings
from django.urls import include, path
from rest_framework.routers import DefaultRouter, SimpleRouter
from mailing.users.api.views import UserViewSet
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", UserViewSet)
urlpatterns = [
path("mailing/", include("mailing.mail.urls"))
]
app_name = "api"
urlpatterns += router.urls
| 19.181818 | 62 | 0.753555 |
478aa04735763b6c3b8851a346d290b08b888a45 | 416 | py | Python | setup.py | guitarparty/django-spreedly | 532e594d5163bf177141ce1e6a4b349c714e4e6d | [
"MIT"
] | null | null | null | setup.py | guitarparty/django-spreedly | 532e594d5163bf177141ce1e6a4b349c714e4e6d | [
"MIT"
] | null | null | null | setup.py | guitarparty/django-spreedly | 532e594d5163bf177141ce1e6a4b349c714e4e6d | [
"MIT"
] | null | null | null | from distutils.core import setup
from spreedly import __version__ as version
setup(name='django-spreedly',
version=version,
description="Spreedly hook-ins for Django, forked from shelfworthy/django-spreedly",
author="Jon Smelquist",
author_email="jon.smelquist@gmail.com",
url="http://github.com/jsmelquist/django-spreedly",
packages = ['spreedly','spreedly.pyspreedly']
)
| 34.666667 | 90 | 0.713942 |
274b72fc6c80767cb3ffd1f960da98812d186bb3 | 13,149 | py | Python | tests/test_memoryfile.py | nmandery/rasterio | ba5e90c487bd1930f52e57dba999e889b4df9ade | [
"BSD-3-Clause"
] | 1,479 | 2015-01-10T12:35:07.000Z | 2021-10-18T16:17:15.000Z | tests/test_memoryfile.py | nmandery/rasterio | ba5e90c487bd1930f52e57dba999e889b4df9ade | [
"BSD-3-Clause"
] | 1,819 | 2015-01-06T21:56:25.000Z | 2021-10-20T02:28:27.000Z | tests/test_memoryfile.py | nmandery/rasterio | ba5e90c487bd1930f52e57dba999e889b4df9ade | [
"BSD-3-Clause"
] | 509 | 2015-01-06T20:59:12.000Z | 2021-10-18T14:14:57.000Z | """MemoryFile tests. MemoryFile requires GDAL 2.0+.
Tests in this file will ONLY run for GDAL >= 2.x"""
from io import BytesIO
import logging
import os.path
from affine import Affine
import numpy
import pytest
import rasterio
from rasterio.io import MemoryFile, ZipMemoryFile
from rasterio.enums import MaskFlags
from rasterio.env import GDALVersion
from rasterio.shutil import copyfiles
# Skip ENTIRE module if not GDAL >= 2.x.
# pytestmark is a keyword that instructs pytest to skip this module.
pytestmark = pytest.mark.skipif(
not GDALVersion.runtime().major >= 2,
reason="MemoryFile requires GDAL 2.x")
@pytest.fixture(scope='session')
def rgb_file_bytes(path_rgb_byte_tif):
"""Get the bytes of our RGB.bytes.tif file"""
return open(path_rgb_byte_tif, 'rb').read()
@pytest.fixture(scope='session')
def rgb_lzw_file_bytes():
"""Get the bytes of our RGB.bytes.tif file"""
return open('tests/data/rgb_lzw.tif', 'rb').read()
@pytest.fixture(scope='function')
def rgb_file_object(path_rgb_byte_tif):
"""Get RGB.bytes.tif file opened in 'rb' mode"""
return open(path_rgb_byte_tif, 'rb')
@pytest.fixture(scope='session')
def rgb_data_and_profile(path_rgb_byte_tif):
with rasterio.open(path_rgb_byte_tif) as src:
data = src.read()
profile = src.profile
return data, profile
def test_initial_empty():
with MemoryFile() as memfile:
assert len(memfile) == 0
assert len(memfile.getbuffer()) == 0
assert memfile.tell() == 0
def test_initial_not_bytes():
"""Creating a MemoryFile from not bytes fails."""
with pytest.raises(TypeError):
MemoryFile(u'lolwut')
def test_initial_bytes(rgb_file_bytes):
"""MemoryFile contents can initialized from bytes and opened."""
with MemoryFile(rgb_file_bytes) as memfile:
with memfile.open() as src:
assert src.driver == 'GTiff'
assert src.count == 3
assert src.dtypes == ('uint8', 'uint8', 'uint8')
assert src.read().shape == (3, 718, 791)
def test_initial_lzw_bytes(rgb_lzw_file_bytes):
"""MemoryFile contents can initialized from bytes and opened."""
with MemoryFile(rgb_lzw_file_bytes) as memfile:
with memfile.open() as src:
assert src.driver == 'GTiff'
assert src.count == 3
assert src.dtypes == ('uint8', 'uint8', 'uint8')
assert src.read().shape == (3, 718, 791)
def test_initial_file_object(rgb_file_object):
"""MemoryFile contents can initialized from bytes and opened."""
with MemoryFile(rgb_file_object) as memfile:
with memfile.open() as src:
assert src.driver == 'GTiff'
assert src.count == 3
assert src.dtypes == ('uint8', 'uint8', 'uint8')
assert src.read().shape == (3, 718, 791)
def test_closed():
"""A closed MemoryFile can not be opened"""
with MemoryFile() as memfile:
pass
with pytest.raises(IOError):
memfile.open()
def test_non_initial_bytes(rgb_file_bytes):
"""MemoryFile contents can be read from bytes and opened."""
with MemoryFile() as memfile:
assert memfile.write(rgb_file_bytes) == len(rgb_file_bytes)
with memfile.open() as src:
assert src.driver == 'GTiff'
assert src.count == 3
assert src.dtypes == ('uint8', 'uint8', 'uint8')
assert src.read().shape == (3, 718, 791)
def test_non_initial_bytes_in_two(rgb_file_bytes):
"""MemoryFile contents can be read from bytes in two steps and opened."""
with MemoryFile() as memfile:
assert memfile.write(rgb_file_bytes[:10]) == 10
assert memfile.write(rgb_file_bytes[10:]) == len(rgb_file_bytes) - 10
with memfile.open() as src:
assert src.driver == 'GTiff'
assert src.count == 3
assert src.dtypes == ('uint8', 'uint8', 'uint8')
assert src.read().shape == (3, 718, 791)
def test_non_initial_bytes_in_two_reverse(rgb_file_bytes):
"""MemoryFile contents can be read from bytes in two steps, tail first, and opened.
Demonstrates fix of #1926."""
with MemoryFile() as memfile:
memfile.seek(600000)
assert memfile.write(rgb_file_bytes[600000:]) == len(rgb_file_bytes) - 600000
memfile.seek(0)
assert memfile.write(rgb_file_bytes[:600000]) == 600000
with memfile.open() as src:
assert src.driver == "GTiff"
assert src.count == 3
assert src.dtypes == ("uint8", "uint8", "uint8")
assert src.read().shape == (3, 718, 791)
def test_no_initial_bytes(rgb_data_and_profile):
"""An empty MemoryFile can be opened and written into."""
data, profile = rgb_data_and_profile
with MemoryFile() as memfile:
with memfile.open(**profile) as dst:
dst.write(data)
view = memfile.getbuffer()
# Exact size of the in-memory GeoTIFF varies with GDAL
# version and configuration.
assert view.size > 1000000
# NB: bytes(view) doesn't return what you'd expect with python 2.7.
data = bytes(bytearray(view))
with MemoryFile(data) as memfile:
with memfile.open() as src:
assert sorted(src.profile.items()) == sorted(profile.items())
def test_read(tmpdir, rgb_file_bytes):
"""Reading from a MemoryFile works"""
with MemoryFile(rgb_file_bytes) as memfile:
tmptiff = tmpdir.join('test.tif')
while 1:
chunk = memfile.read(8192)
if not chunk:
break
tmptiff.write(chunk, 'ab')
with rasterio.open(str(tmptiff)) as src:
assert src.count == 3
def test_file_object_read(rgb_file_object):
"""An example of reading from a file object"""
with rasterio.open(rgb_file_object) as src:
assert src.driver == 'GTiff'
assert src.count == 3
assert src.dtypes == ('uint8', 'uint8', 'uint8')
assert src.read().shape == (3, 718, 791)
def test_file_object_read_variant(rgb_file_bytes):
"""An example of reading from a MemoryFile object"""
with rasterio.open(MemoryFile(rgb_file_bytes)) as src:
assert src.driver == 'GTiff'
assert src.count == 3
assert src.dtypes == ('uint8', 'uint8', 'uint8')
assert src.read().shape == (3, 718, 791)
def test_file_object_read_variant2(rgb_file_bytes):
"""An example of reading from a BytesIO object"""
with rasterio.open(BytesIO(rgb_file_bytes)) as src:
assert src.driver == 'GTiff'
assert src.count == 3
assert src.dtypes == ('uint8', 'uint8', 'uint8')
assert src.read().shape == (3, 718, 791)
def test_test_file_object_write(tmpdir, rgb_data_and_profile):
"""An example of writing to a file object"""
data, profile = rgb_data_and_profile
with tmpdir.join('test.tif').open('wb') as fout:
with rasterio.open(fout, 'w', **profile) as dst:
dst.write(data)
with rasterio.open(str(tmpdir.join('test.tif'))) as src:
assert src.driver == 'GTiff'
assert src.count == 3
assert src.dtypes == ('uint8', 'uint8', 'uint8')
assert src.read().shape == (3, 718, 791)
def test_nonpersistemt_memfile_fail_example(rgb_data_and_profile):
"""An example of writing to a file object"""
data, profile = rgb_data_and_profile
with BytesIO() as fout:
with rasterio.open(fout, 'w', **profile) as dst:
dst.write(data)
# This fails because the MemoryFile created in open() is
# gone.
rasterio.open(fout)
def test_zip_closed():
"""A closed ZipMemoryFile can not be opened"""
with ZipMemoryFile() as zipmemfile:
pass
with pytest.raises(IOError):
zipmemfile.open('foo')
def test_zip_file_object_read(path_zip_file):
"""An example of reading from a zip file object"""
with open(path_zip_file, 'rb') as zip_file_object:
with ZipMemoryFile(zip_file_object) as zipmemfile:
with zipmemfile.open('white-gemini-iv.vrt') as src:
assert src.driver == 'VRT'
assert src.count == 3
assert src.dtypes == ('uint8', 'uint8', 'uint8')
assert src.read().shape == (3, 768, 1024)
def test_vrt_memfile():
"""Successfully read an in-memory VRT"""
with open('tests/data/white-gemini-iv.vrt') as vrtfile:
source = vrtfile.read()
source = source.replace('<SourceFilename relativeToVRT="1">389225main_sw_1965_1024.jpg</SourceFilename>', '<SourceFilename relativeToVRT="0">{}/389225main_sw_1965_1024.jpg</SourceFilename>'.format(os.path.abspath("tests/data")))
with MemoryFile(source.encode('utf-8'), ext='vrt') as memfile:
with memfile.open() as src:
assert src.driver == 'VRT'
assert src.count == 3
assert src.dtypes == ('uint8', 'uint8', 'uint8')
assert src.read().shape == (3, 768, 1024)
def test_write_plus_mode():
with MemoryFile() as memfile:
with memfile.open(driver='GTiff', dtype='uint8', count=3, height=32, width=32, crs='epsg:3226', transform=Affine.identity() * Affine.scale(0.5, -0.5)) as dst:
dst.write(numpy.full((32, 32), 255, dtype='uint8'), 1)
dst.write(numpy.full((32, 32), 204, dtype='uint8'), 2)
dst.write(numpy.full((32, 32), 153, dtype='uint8'), 3)
data = dst.read()
assert (data[0] == 255).all()
assert (data[1] == 204).all()
assert (data[2] == 153).all()
def test_write_plus_model_jpeg():
with rasterio.Env(), MemoryFile() as memfile:
with memfile.open(driver='JPEG', dtype='uint8', count=3, height=32, width=32, crs='epsg:3226', transform=Affine.identity() * Affine.scale(0.5, -0.5)) as dst:
dst.write(numpy.full((32, 32), 255, dtype='uint8'), 1)
dst.write(numpy.full((32, 32), 204, dtype='uint8'), 2)
dst.write(numpy.full((32, 32), 153, dtype='uint8'), 3)
data = dst.read()
assert (data[0] == 255).all()
assert (data[1] == 204).all()
assert (data[2] == 153).all()
def test_memfile_copyfiles(path_rgb_msk_byte_tif):
"""Multiple files can be copied to a MemoryFile using copyfiles"""
with rasterio.open(path_rgb_msk_byte_tif) as src:
src_basename = os.path.basename(src.name)
with MemoryFile(dirname="foo", filename=src_basename) as memfile:
copyfiles(src.name, memfile.name)
with memfile.open() as rgb2:
assert sorted(rgb2.files) == sorted(['/vsimem/foo/{}'.format(src_basename), '/vsimem/foo/{}.msk'.format(src_basename)])
def test_multi_memfile(path_rgb_msk_byte_tif):
"""Multiple files can be copied to a MemoryFile using copyfiles"""
with open(path_rgb_msk_byte_tif, 'rb') as tif_fp:
tif_bytes = tif_fp.read()
with open(path_rgb_msk_byte_tif + '.msk', 'rb') as msk_fp:
msk_bytes = msk_fp.read()
with MemoryFile(tif_bytes, dirname="bar", filename='foo.tif') as tifmemfile, MemoryFile(msk_bytes, dirname="bar", filename='foo.tif.msk') as mskmemfile:
with tifmemfile.open() as src:
assert sorted(os.path.basename(fn) for fn in src.files) == sorted(['foo.tif', 'foo.tif.msk'])
assert src.mask_flag_enums == ([MaskFlags.per_dataset],) * 3
def test_memory_file_gdal_error_message(capsys):
"""No weird error messages should be seen, see #1659"""
memfile = MemoryFile()
data = numpy.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]).astype('uint8')
west_bound = 0; north_bound = 2; cellsize=0.5; nodata = -9999; driver='AAIGrid';
dtype = data.dtype
shape = data.shape
transform = rasterio.transform.from_origin(west_bound, north_bound, cellsize, cellsize)
dataset = memfile.open(driver=driver, width=shape[1], height=shape[0], transform=transform, count=1, dtype=dtype, nodata=nodata, crs='epsg:3226')
dataset.write(data, 1)
dataset.close()
captured = capsys.readouterr()
assert "ERROR 4" not in captured.err
assert "ERROR 4" not in captured.out
def test_write_plus_mode_requires_width():
"""Width is required"""
with MemoryFile() as memfile:
with pytest.raises(TypeError):
memfile.open(driver='GTiff', dtype='uint8', count=3, height=32, crs='epsg:3226', transform=Affine.identity() * Affine.scale(0.5, -0.5))
def test_write_plus_mode_blockxsize_requires_width():
"""Width is required"""
with MemoryFile() as memfile:
with pytest.raises(TypeError):
memfile.open(driver='GTiff', dtype='uint8', count=3, height=32, crs='epsg:3226', transform=Affine.identity() * Affine.scale(0.5, -0.5), blockxsize=128)
def test_write_rpcs_to_memfile():
"""Ensure we can write rpcs to a new MemoryFile"""
with rasterio.open('tests/data/RGB.byte.rpc.vrt') as src:
profile = src.profile.copy()
with MemoryFile() as memfile:
with memfile.open(**profile) as dst:
assert dst.rpcs is None
dst.rpcs = src.rpcs
assert dst.rpcs
| 38.00289 | 236 | 0.63906 |
13c68b896949d0eefa0c9ab203b12a14d367842e | 401 | py | Python | Lib/idlelib/idlever.py | tyagi-prashant/cpython | a83d8a8b297deefc0893893446460265fc6dd149 | [
"PSF-2.0"
] | 1 | 2015-10-04T20:13:04.000Z | 2015-10-04T20:13:04.000Z | Lib/idlelib/idlever.py | tyagi-prashant/cpython | a83d8a8b297deefc0893893446460265fc6dd149 | [
"PSF-2.0"
] | null | null | null | Lib/idlelib/idlever.py | tyagi-prashant/cpython | a83d8a8b297deefc0893893446460265fc6dd149 | [
"PSF-2.0"
] | null | null | null | """
The separate Idle version was eliminated years ago;
idlelib.idlever is no longer used by Idle
and will be removed in 3.6 or later. Use
from sys import version
IDLE_VERSION = version[:version.index(' ')]
"""
# Kept for now only for possible existing extension use
import warnings as w
w.warn(__doc__, DeprecationWarning)
from sys import version
IDLE_VERSION = version[:version.index(' ')]
| 30.846154 | 55 | 0.750623 |
0e78ab749c9cc051e2e649f8a8e9bd00faddb1a7 | 10,417 | py | Python | azure-mgmt-batchai/tests/test_mgmt_batchai_clusters.py | jafreck/azure-sdk-for-python | 8fc953a0a9e214e2963d5e4d06d5b92b6f56e614 | [
"MIT"
] | null | null | null | azure-mgmt-batchai/tests/test_mgmt_batchai_clusters.py | jafreck/azure-sdk-for-python | 8fc953a0a9e214e2963d5e4d06d5b92b6f56e614 | [
"MIT"
] | null | null | null | azure-mgmt-batchai/tests/test_mgmt_batchai_clusters.py | jafreck/azure-sdk-for-python | 8fc953a0a9e214e2963d5e4d06d5b92b6f56e614 | [
"MIT"
] | null | null | null | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=line-too-long
import azure.mgmt.batchai.models as models
from azure.mgmt.batchai import BatchAIManagementClient
from devtools_testutils import AzureMgmtTestCase
from devtools_testutils import ResourceGroupPreparer
from devtools_testutils import StorageAccountPreparer
from helpers import (
create_batchai_client, create_cluster, assert_existing_clusters_are, wait_for_nodes,
assert_remote_login_info_reported_for_nodes, assert_existing_clusters_are, get_node_ids, assert_file_in_file_share,
create_custom_job,wait_for_job_completion, assert_job_files_are, LOCATION, FAKE_STORAGE, NODE_STARTUP_TIMEOUT_SEC,
AUTO_SCALE_TIMEOUT_SEC, STANDARD_OUTPUT_DIRECTORY_ID, MINUTE
)
class ClusterTestCase(AzureMgmtTestCase):
def setUp(self):
super(ClusterTestCase, self).setUp()
self.client = create_batchai_client(self) # type: BatchAIManagementClient
self.cluster_name = self.get_resource_name('cluster')
@ResourceGroupPreparer(location=LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=LOCATION, playback_fake_resource=FAKE_STORAGE)
def test_creation_and_deletion(self, resource_group, location, storage_account, storage_account_key):
"""Tests basic use-case scenario.
1. Create cluster
2. Execute a task on the host
3. Execute a task in a docker container
4. Delete cluster
"""
cluster = create_cluster(
self.client, location, resource_group.name, self.cluster_name, 'STANDARD_D1', 1,
storage_account.name, storage_account_key)
self.assertEqual(cluster.name, self.cluster_name)
self.assertIsNone(cluster.errors)
self.assertEqual(cluster.vm_size, 'STANDARD_D1')
# Verify that the cluster is reported in the list of clusters
assert_existing_clusters_are(self, self.client, resource_group.name, [self.cluster_name])
# Verify that one node is allocated and become available
self.assertEqual(
wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, 1,
NODE_STARTUP_TIMEOUT_SEC), 1)
assert_remote_login_info_reported_for_nodes(self, self.client, resource_group.name,
self.cluster_name, 1)
# Verify that the cluster able to run tasks.
self.assertCanRunJobOnHost(resource_group, location, cluster.id)
self.assertCanRunJobInContainer(resource_group, location, cluster.id)
# Test cluster deletion
self.client.clusters.delete(resource_group.name, self.cluster_name).result()
assert_existing_clusters_are(self, self.client, resource_group.name, [])
@ResourceGroupPreparer(location=LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=LOCATION, playback_fake_resource=FAKE_STORAGE)
def test_setup_task_execution(self, resource_group, location, storage_account, storage_account_key):
"""Tests setup task execution.
"""
cluster = create_cluster(
self.client, location, resource_group.name, self.cluster_name, 'STANDARD_D1', 1,
storage_account.name, storage_account_key,
setup_task_cmd='echo $GREETING $SECRET_GREETING',
setup_task_env={'GREETING': 'setup task'},
setup_task_secrets={'SECRET_GREETING': 'has a secret'}) # type: models.Cluster
# Verify that the cluster is reported in the list of clusters
assert_existing_clusters_are(self, self.client, resource_group.name, [self.cluster_name])
# Verify that one node is allocated and become available
self.assertEqual(
wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, 1,
NODE_STARTUP_TIMEOUT_SEC), 1)
# Check that server doesn't return values for secrets
self.assertEqual(len(cluster.node_setup.setup_task.secrets), 1)
self.assertEqual(cluster.node_setup.setup_task.secrets[0].name, 'SECRET_GREETING')
self.assertIsNone(cluster.node_setup.setup_task.secrets[0].value)
# Verify that the setup task is completed by checking generated output. BatchAI reports a path which was auto-
# generated for storing setup output logs.
setup_task_output_path = cluster.node_setup.setup_task.std_out_err_path_suffix
nodes = get_node_ids(self.client, resource_group.name, self.cluster_name)
self.assertEqual(len(nodes), 1)
node_id = nodes[0]
assert_file_in_file_share(self, storage_account.name, storage_account_key,
setup_task_output_path,
'stdout-{0}.txt'.format(node_id),
u'setup task has a secret\n')
assert_file_in_file_share(self, storage_account.name, storage_account_key,
setup_task_output_path, 'stderr-{0}.txt'.format(node_id), u'')
self.client.clusters.delete(resource_group.name, self.cluster_name).result()
@ResourceGroupPreparer(location=LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=LOCATION, playback_fake_resource=FAKE_STORAGE)
def test_cluster_resizing(self, resource_group, location, storage_account, storage_account_key):
"""Tests manual cluster resizing"""
cluster = create_cluster(
self.client, location, resource_group.name, self.cluster_name, 'STANDARD_D1', 1,
storage_account.name, storage_account_key)
# Verify that one node is allocated and become available
self.assertEqual(
wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, 1,
NODE_STARTUP_TIMEOUT_SEC), 1)
assert_remote_login_info_reported_for_nodes(self, self.client, resource_group.name,
self.cluster_name, 1)
self.assertCanResizeCluster(resource_group, 0)
self.assertCanResizeCluster(resource_group, 1)
# Verify that cluster able to run tasks after resizing.
self.assertCanRunJobOnHost(resource_group, location, cluster.id)
self.client.clusters.delete(resource_group.name, self.cluster_name).result()
@ResourceGroupPreparer(location=LOCATION)
@StorageAccountPreparer(name_prefix='psdk', location=LOCATION, playback_fake_resource=FAKE_STORAGE)
def test_auto_scaling(self, resource_group, location, storage_account, storage_account_key):
"""Tests auto-scaling"""
# Create the cluster with no nodes.
cluster = create_cluster(
self.client, location, resource_group.name, self.cluster_name, 'STANDARD_D1', 0,
storage_account.name, storage_account_key)
# Switch the cluster into auto-scale mode
self.client.clusters.update(resource_group.name, self.cluster_name,
scale_settings=models.ScaleSettings(
auto_scale=models.AutoScaleSettings(
minimum_node_count=0,
maximum_node_count=1)))
# Submit a task. BatchAI must increase the number of nodes to execute the task.
self.assertCanRunJobOnHost(resource_group, location, cluster.id, timeout_sec=AUTO_SCALE_TIMEOUT_SEC)
# Verify that cluster downsized to zero since there are no more jobs for it
self.assertEqual(
wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, 0,
NODE_STARTUP_TIMEOUT_SEC), 0)
self.client.clusters.delete(resource_group.name, self.cluster_name).result()
def assertCanRunJobInContainer(self, resource_group, location, cluster_id, timeout_sec=MINUTE):
self.assertCanRunJob(resource_group, location, cluster_id, 'container_job',
models.ContainerSettings(image_source_registry=models.ImageSourceRegistry(image="ubuntu")),
timeout_sec)
def assertCanRunJobOnHost(self, resource_group, location, cluster_id, timeout_sec=MINUTE):
self.assertCanRunJob(resource_group, location, cluster_id, 'host_job', None, timeout_sec)
def assertCanRunJob(self, resource_group, location, cluster_id, job_name, container_settings, timeout_sec):
create_custom_job(self.client, resource_group.name, location, cluster_id, job_name, 1,
'echo hello | tee $AZ_BATCHAI_OUTPUT_OUTPUTS/hi.txt', container=container_settings)
# Verify if the job finishes reasonably fast.
self.assertEqual(
wait_for_job_completion(self.is_live, self.client, resource_group.name, job_name, timeout_sec),
models.ExecutionState.succeeded)
# Verify if output files and standard output files are available and contain expected greeting.
assert_job_files_are(self, self.client, resource_group.name, job_name, 'OUTPUTS',
{u'hi.txt': u'hello\n'})
assert_job_files_are(self, self.client, resource_group.name, job_name,
STANDARD_OUTPUT_DIRECTORY_ID,
{u'stdout.txt': u'hello\n', u'stderr.txt': ''})
def assertCanResizeCluster(self, resource_group, target):
self.client.clusters.update(resource_group.name, self.cluster_name, scale_settings=models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=target)))
self.assertEqual(
wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, target,
NODE_STARTUP_TIMEOUT_SEC),
target)
assert_remote_login_info_reported_for_nodes(self, self.client, resource_group.name,
self.cluster_name, target)
| 56.923497 | 120 | 0.673322 |
c392ba79c51499c48aa630ea5f5f3f730709e830 | 12,108 | py | Python | test/functional/mining_basic.py | Nugetzrul3/eqpay | 03e0388e98a2f3624e4e94ae9bc6119e1bd48be6 | [
"MIT"
] | 1 | 2021-09-13T13:48:47.000Z | 2021-09-13T13:48:47.000Z | test/functional/mining_basic.py | Nugetzrul3/eqpay | 03e0388e98a2f3624e4e94ae9bc6119e1bd48be6 | [
"MIT"
] | null | null | null | test/functional/mining_basic.py | Nugetzrul3/eqpay | 03e0388e98a2f3624e4e94ae9bc6119e1bd48be6 | [
"MIT"
] | 1 | 2021-09-04T12:46:23.000Z | 2021-09-04T12:46:23.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mining RPCs
- getmininginfo
- getblocktemplate proposal mode
- submitblock"""
import copy
from decimal import Decimal
from test_framework.blocktools import (
create_coinbase,
TIME_GENESIS_BLOCK,
)
from test_framework.messages import (
CBlock,
CBlockHeader
)
from test_framework.mininode import (
P2PDataStore,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
)
from test_framework.script import CScriptNum
BLOCK_HEADER_SIZE = len(CBlockHeader().serialize())
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate(template_request={'data': block.serialize().hex(), 'mode': 'proposal', 'rules': ['segwit']})
assert_equal(rsp, expect)
class MiningTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.supports_cli = False
def mine_chain(self):
self.log.info('Create some old blocks')
for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 600 * 600, 600):
self.nodes[0].setmocktime(t)
self.nodes[0].generate(1)
mining_info = self.nodes[0].getmininginfo()
assert_equal(mining_info['blocks'], 600)
assert_equal(mining_info['currentblocktx'], 0)
assert_equal(mining_info['currentblockweight'], 4000)
self.restart_node(0)
connect_nodes(self.nodes[0], 1)
def run_test(self):
self.mine_chain()
node = self.nodes[0]
def assert_submitblock(block, result_str_1, result_str_2=None):
block.solve()
result_str_2 = result_str_2 or 'duplicate-invalid'
assert_equal(result_str_1, node.submitblock(hexdata=block.serialize().hex()))
assert_equal(result_str_2, node.submitblock(hexdata=block.serialize().hex()))
self.log.info('getmininginfo')
mining_info = node.getmininginfo()
assert_equal(mining_info['blocks'], 600)
assert_equal(mining_info['chain'], self.chain)
assert 'currentblocktx' not in mining_info
assert 'currentblockweight' not in mining_info
assert_equal(mining_info['difficulty']['proof-of-work'], Decimal('4.656542373906925E-10'))
assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334'))
assert_equal(mining_info['pooledtx'], 0)
# Mine a block to leave initial block download
node.generatetoaddress(1, node.get_deterministic_priv_key().address)
tmpl = node.getblocktemplate({'rules': ['segwit']})
self.log.info("getblocktemplate: Test capability advertised")
assert 'proposal' in tmpl['capabilities']
assert 'coinbasetxn' not in tmpl
coinbase_tx = create_coinbase(height=int(tmpl["height"]))
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
# round-trip the encoded bip34 block height commitment
assert_equal(CScriptNum.decode(coinbase_tx.vin[0].scriptSig), 602)
# round-trip negative and multi-byte CScriptNums to catch python regression
assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(1500))), 1500)
assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(-1500))), -1500)
assert_equal(CScriptNum.decode(CScriptNum.encode(CScriptNum(-1))), -1)
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
self.log.info("getblocktemplate: segwit rule must be set")
assert_raises_rpc_error(-8, "getblocktemplate must be called with the segwit rule set", node.getblocktemplate)
self.log.info("getblocktemplate: Test valid block")
assert_template(node, block, None)
self.log.info("submitblock: Test block decode failure")
assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, block.serialize()[:-15].hex())
self.log.info("getblocktemplate: Test bad input hash for coinbase transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].vin[0].prevout.hash += 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-cb-missing')
self.log.info("submitblock: Test invalid coinbase transaction")
assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, bad_block.serialize().hex())
self.log.info("getblocktemplate: Test truncated final transaction")
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': block.serialize()[:-1].hex(), 'mode': 'proposal', 'rules': ['segwit']})
self.log.info("getblocktemplate: Test duplicate transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx.append(bad_block.vtx[0])
assert_template(node, bad_block, 'bad-txns-duplicate')
assert_submitblock(bad_block, 'bad-txns-duplicate', 'bad-txns-duplicate')
self.log.info("getblocktemplate: Test invalid transaction")
bad_block = copy.deepcopy(block)
bad_tx = copy.deepcopy(bad_block.vtx[0])
bad_tx.vin[0].prevout.hash = 255
bad_tx.rehash()
bad_block.vtx.append(bad_tx)
assert_template(node, bad_block, 'bad-txns-inputs-missingorspent')
assert_submitblock(bad_block, 'bad-txns-inputs-missingorspent')
self.log.info("getblocktemplate: Test nonfinal transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].nLockTime = 2 ** 32 - 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-txns-nonfinal')
assert_submitblock(bad_block, 'bad-txns-nonfinal')
self.log.info("getblocktemplate: Test bad tx count")
# The tx count is immediately after the block header
TX_COUNT_OFFSET = 181
bad_block_sn = bytearray(block.serialize())
assert_equal(bad_block_sn[BLOCK_HEADER_SIZE], 1)
bad_block_sn[BLOCK_HEADER_SIZE] += 1
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': bad_block_sn.hex(), 'mode': 'proposal', 'rules': ['segwit']})
self.log.info("getblocktemplate: Test bad bits")
bad_block = copy.deepcopy(block)
bad_block.nBits = 469762303 # impossible in the real world
assert_template(node, bad_block, 'bad-diffbits')
self.log.info("getblocktemplate: Test bad merkle root")
bad_block = copy.deepcopy(block)
bad_block.hashMerkleRoot += 1
assert_template(node, bad_block, 'bad-txnmrklroot', False)
assert_submitblock(bad_block, 'bad-txnmrklroot', 'bad-txnmrklroot')
#self.log.info("getblocktemplate: Test bad timestamps")
#bad_block = copy.deepcopy(block)
#bad_block.nTime = 2 ** 31 - 1
#assert_template(node, bad_block, 'time-too-new')
#assert_submitblock(bad_block, 'time-too-new', 'time-too-new')
#bad_block.nTime = 0
#assert_template(node, bad_block, 'time-too-old')
#assert_submitblock(bad_block, 'time-too-old', 'time-too-old')
self.log.info("getblocktemplate: Test not best block")
bad_block = copy.deepcopy(block)
bad_block.hashPrevBlock = 123
assert_template(node, bad_block, 'inconclusive-not-best-prevblk')
assert_submitblock(bad_block, 'prev-blk-not-found', 'prev-blk-not-found')
self.log.info('submitheader tests')
assert_raises_rpc_error(-22, 'Block header decode failed', lambda: node.submitheader(hexdata='xx' * BLOCK_HEADER_SIZE))
assert_raises_rpc_error(-22, 'Block header decode failed', lambda: node.submitheader(hexdata='ff' * (BLOCK_HEADER_SIZE-2)))
assert_raises_rpc_error(-25, 'Must submit previous header', lambda: node.submitheader(hexdata=super(CBlock, bad_block).serialize().hex()))
block.nTime += 1
block.solve()
def chain_tip(b_hash, *, status='headers-only', branchlen=1):
return {'hash': b_hash, 'height': 602, 'branchlen': branchlen, 'status': status}
assert chain_tip(block.hash) not in node.getchaintips()
node.submitheader(hexdata=block.serialize().hex())
assert chain_tip(block.hash) in node.getchaintips()
node.submitheader(hexdata=CBlockHeader(block).serialize().hex()) # Noop
assert chain_tip(block.hash) in node.getchaintips()
bad_block_root = copy.deepcopy(block)
bad_block_root.hashMerkleRoot += 2
bad_block_root.solve()
assert chain_tip(bad_block_root.hash) not in node.getchaintips()
node.submitheader(hexdata=CBlockHeader(bad_block_root).serialize().hex())
assert chain_tip(bad_block_root.hash) in node.getchaintips()
# Should still reject invalid blocks, even if we have the header:
assert_equal(node.submitblock(hexdata=bad_block_root.serialize().hex()), 'bad-txnmrklroot')
assert_equal(node.submitblock(hexdata=bad_block_root.serialize().hex()), 'bad-txnmrklroot')
assert chain_tip(bad_block_root.hash) in node.getchaintips()
# We know the header for this invalid block, so should just return early without error:
node.submitheader(hexdata=CBlockHeader(bad_block_root).serialize().hex())
assert chain_tip(bad_block_root.hash) in node.getchaintips()
bad_block_lock = copy.deepcopy(block)
bad_block_lock.vtx[0].nLockTime = 2**32 - 1
bad_block_lock.vtx[0].rehash()
bad_block_lock.hashMerkleRoot = bad_block_lock.calc_merkle_root()
bad_block_lock.solve()
assert_equal(node.submitblock(hexdata=bad_block_lock.serialize().hex()), 'bad-txns-nonfinal')
assert_equal(node.submitblock(hexdata=bad_block_lock.serialize().hex()), 'duplicate-invalid')
# Build a "good" block on top of the submitted bad block
bad_block2 = copy.deepcopy(block)
bad_block2.hashPrevBlock = bad_block_lock.sha256
bad_block2.solve()
assert_raises_rpc_error(-25, 'bad-prevblk', lambda: node.submitheader(hexdata=CBlockHeader(bad_block2).serialize().hex()))
# Should reject invalid header right away, only applies to PoS blocks in eqpay.
#bad_block_time = copy.deepcopy(block)
#bad_block_time.nTime = 1
#bad_block_time.solve()
#assert_raises_rpc_error(-25, 'time-too-old', lambda: node.submitheader(hexdata=CBlockHeader(bad_block_time).serialize().hex()))
# Should ask for the block from a p2p node, if they announce the header as well:
node.add_p2p_connection(P2PDataStore())
node.p2p.wait_for_getheaders(timeout=5) # Drop the first getheaders
node.p2p.send_blocks_and_test(blocks=[block], node=node)
# Must be active now:
assert chain_tip(block.hash, status='active', branchlen=0) in node.getchaintips()
# Building a few blocks should give the same results
node.generatetoaddress(10, node.get_deterministic_priv_key().address)
#assert_raises_rpc_error(-25, 'time-too-old', lambda: node.submitheader(hexdata=CBlockHeader(bad_block_time).serialize().hex()))
assert_raises_rpc_error(-25, 'bad-prevblk', lambda: node.submitheader(hexdata=CBlockHeader(bad_block2).serialize().hex()))
node.submitheader(hexdata=CBlockHeader(block).serialize().hex())
node.submitheader(hexdata=CBlockHeader(bad_block_root).serialize().hex())
assert_equal(node.submitblock(hexdata=block.serialize().hex()), 'duplicate') # valid
if __name__ == '__main__':
MiningTest().main()
| 48.047619 | 163 | 0.690287 |
61201d701a13c99c4208b23fda08108af9c35378 | 1,066 | py | Python | blender/arm/logicnode/deprecated/LN_set_mouse_lock.py | Lykdraft/armory | da1cf33930ce9a8b1865d35c128fe4842bef2933 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/deprecated/LN_set_mouse_lock.py | Lykdraft/armory | da1cf33930ce9a8b1865d35c128fe4842bef2933 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/deprecated/LN_set_mouse_lock.py | Lykdraft/armory | da1cf33930ce9a8b1865d35c128fe4842bef2933 | [
"Zlib"
] | null | null | null | from arm.logicnode.arm_nodes import *
class SetMouseLockNode(ArmLogicTreeNode):
"""Deprecated. It is recommended to use the 'Set Cursor State' node instead."""
bl_idname = 'LNSetMouseLockNode'
bl_label = 'Set Mouse Lock (Deprecated)'
bl_description = "Please use the \"Set Cursor State\" node instead"
bl_icon = 'ERROR'
arm_version = 2
def init(self, context):
super(SetMouseLockNode, self).init(context)
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('NodeSocketBool', 'Lock')
self.add_output('ArmNodeSocketAction', 'Out')
def get_replacement_node(self, node_tree: bpy.types.NodeTree):
if self.arm_version not in (0, 1):
raise LookupError()
return NodeReplacement(
'LNSetMouseLockNode', self.arm_version, 'LNSetCursorStateNode', 1,
in_socket_mapping = {0:0, 1:1}, out_socket_mapping={0:0},
property_defaults={'property0': "Lock"}
)
add_node(SetMouseLockNode, category='input', section='mouse', is_obsolete=True)
| 38.071429 | 83 | 0.670732 |
ed5d6517ce001f9e52649461851c96d2ce0583a1 | 9,533 | py | Python | intus/utils.py | mtbossa/intus | 2eda3685a019afba069ea05a1d4c974aa767c912 | [
"MIT"
] | null | null | null | intus/utils.py | mtbossa/intus | 2eda3685a019afba069ea05a1d4c974aa767c912 | [
"MIT"
] | null | null | null | intus/utils.py | mtbossa/intus | 2eda3685a019afba069ea05a1d4c974aa767c912 | [
"MIT"
] | null | null | null | """ Generic functions"""
import datetime
import errno
import os
import pathlib
import platform
from os import listdir
from os.path import isfile, join
def get_open_command(file_name: str) -> str:
"""
Return the command for opening either Chrome or Chromium based
on the OS and file name.
:param file_name: str Name of the file that chrome will open
:return: str The complete command for opening chrome with the selected file
"""
INDEX_FILE_PATH = _get_index_file_path(file_name)
if platform.system() == 'Windows':
return 'chrome "' + INDEX_FILE_PATH + '" /incognito --start-fullscreen --disable-session-crashed-bubble ' \
'--disable-infobars'
return 'chromium-browser ' + INDEX_FILE_PATH + ' --incognito --start-fullscreen --disable-crash-reporter'
def _get_index_file_path(file_name: str) -> str:
"""
Return the index.html file path using pathlib library.
:param file_name: str Name of the file
:return: str Correct path to index file
"""
return str(pathlib.Path(file_name).absolute())
def get_close_command() -> str:
"""
Return the command for opening either
Chrome or Chromium based on the OS and file name.
:return: str Command for killing chrome
"""
if platform.system() == 'Windows':
return 'TASKKILL /F /IM chrome.exe'
return 'killall chromium-browse'
def transform_date_to_epoch(date_string: str) -> float:
"""
Return the given date string in epoch (int).
:param date_string: str The date in string format
:return: float EPOCH time of the given date
"""
date_time = datetime.datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S')
# Transform to int, so won't have decimals, and multiply by 1000 to get milliseconds
# Needed for javascript Date object creation
seconds_since_epoch = date_time.timestamp()
return seconds_since_epoch
def transform_date_to_epoch_seconds(date_string: str) -> float:
"""Return the given date string in epoch (int)"""
date_time = datetime.datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S')
# Transform to int, so won't have decimals, and multiply by 1000 to get milliseconds
# Needed for javascript Date object creation
seconds_since_epoch = int(date_time.timestamp() * 1000)
return seconds_since_epoch
def should_show1(post_data: dict) -> bool:
"""
Checks if the post should be shown.
:param post_data: dict Current post data
:return: True if should show, false otherwise
"""
# These two values will always be present and needed in both cases
start_time = datetime.time.fromisoformat(post_data['start_time'])
end_time = datetime.time.fromisoformat(post_data['end_time'])
# If not recurrence, means there's start and end date
if 'recurrence' in post_data:
# Must check the recurrence to know if should be shown today and in now time
if today_recurrence(post_data['recurrence']) and check_dates_and_times(start_time, end_time):
return True
else:
start_date = datetime.date.fromisoformat(post_data['start_date'])
end_date = datetime.date.fromisoformat(post_data['end_date'])
if check_dates_and_times(start_time, end_time, start_date, end_date):
return True
return False
def should_show(start_time: str, end_time: str, start_date: str = None, end_date: str = None,
recurrence: dict = None) -> bool:
"""
Checks if the post should be shown.
:param start_date: str Start date ISO Format of the post in EPOCH float
:param end_date: str End date ISO Format of the post in EPOCH float
:param start_time: str Time ISO Format in day that the post should start to be shown
:param end_time: str Time ISO Format in day that the post should stop to be shown
:param recurrence: dict or null If post has recurrence, the recurrence data as list
:return: True if should show, false otherwise
"""
# These two values will always be present and needed in both cases
start_time = datetime.time.fromisoformat(start_time)
end_time = datetime.time.fromisoformat(end_time)
# If not recurrence, means there's start and end date
if recurrence is None:
start_date = datetime.date.fromisoformat(start_date)
end_date = datetime.date.fromisoformat(end_date)
if check_dates_and_times(start_time, end_time, start_date, end_date):
return True
else:
# Must check the recurrence to know if should be shown today and in now time
if today_recurrence(recurrence) and check_dates_and_times(start_time, end_time):
return True
return False
def today_recurrence(recurrence: dict, now_date=datetime.date.today()) -> bool:
"""
Checks the recurrence to know if today is a recurrence day.
:param recurrence: list Recurrence information
:param now_date: date Today date
:return: bool True if today is a day of the recurrence, false otherwise
"""
show = []
for key, value in recurrence.items():
method = getattr(now_date, key)
if callable(method):
method = method()
if value == method:
show.append(True)
if len(recurrence) != len(show):
return False
return True
def check_dates_and_times(start_time, end_time, start_date=datetime.date.today(), end_date=datetime.date.today(),
now=datetime.datetime.today()) -> bool:
"""
Checks date and times of the post, verifying if it should be
shown of not in the given moment (now).
:param start_time: time object Post start day time
:param end_time: time object Post end day time
:param start_date: date object Start date, default value today
:param end_date: date object End date, default value today
:param now: datetime object Now datetime, default value today
:return: bool True if should show, false otherwise
"""
# Verificação 1: a data de início deve ser hoje ou antes de hoje,
# e a de fim deve ser hoje ou depois de hoje.
if start_date <= now.date() <= end_date:
# Verificações 2: já sei que posso mostrar, pelos dias, mas
# preciso verificar as horas.
# Se a hora atual é maior do que a hora de início e menor do que a hora de fim, posso mostrar
# sem verificar os minutos
if start_time.hour < now.hour < end_time.hour:
return True
# Sei que posso mostrar pela hora, pois já passou da hora
# de início da postagem, não preciso verificar mais as horas
# e minutos de início, somente de fim
if start_time.hour < now.hour:
if end_time.hour >= now.hour and end_time.minute > now.minute:
# Não preciso verificar os minutos, pois sei que
# não vai terminar nessa hora ainda.
return True
# Se a hora de início é a mesma da atual, preciso verificar
# os minutos de início e fim
if start_time.hour == now.hour:
# Só mostro se a o minuto de início for o mesmo ou menor que o minuto atual
if start_time.minute <= now.minute:
# Se a hora de fim não a mesma de agora, posso mostrar sem verificar os minutos
if end_time.hour > now.hour:
# Sei que posso mostrar pois não vai terminar nessa hora ainda, então não
# preciso verificar os minutos de fim
return True
# Preciso verificar os minutos de fim, pois a hora atual já é a hora de fim
# Se os minutos de fim forem maiores do que o minuto atual, posso mostrar
elif end_time.minute > now.minute:
return True
return False
def same_list(l1, l2):
"""
Checks if a list is the same as the other. Order matters.
:param l1: list 1
:param l2: list 2
:return: bool True if the lists are the same, false otherwise.
"""
if l1 == l2:
return True
else:
return False
def list_difference(list_1: list, list_2: list) -> list:
"""
Which values are not in list 1 that are in list 2
:param list_1: List to find the different values
:param list_2: List to compare
:return: List with the values that are in list_1 but no in list_2
"""
return [item for item in list_1 if item not in list_2]
def delete_file(filename: str) -> None:
"""
Deletes the given file.
:param filename: Filename with path
:return: None
"""
try:
os.remove(filename)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occurred
else:
print(e)
print(e)
def get_folder_filenames(folder_path: str) -> list:
"""
Checks all files inside the given folder.
:param folder_path: Folder path
:return: list All files from the folder
"""
return [f for f in listdir(folder_path) if isfile(join(folder_path, f))]
def create_filename(file_name: str, extension: str) -> str:
"""
Replaces white spaces of the string with '-' and
adds a '.' and the extension to the end.
:param file_name: File name
:param extension: File extension
:return: str Transformed filename
"""
return file_name.replace(' ', '-') + '.' + extension
| 36.949612 | 115 | 0.662541 |
4e00ec4c6b8dd0cf9023cb4e4415a401640b1283 | 15,127 | py | Python | app/engine/ovr.py | minggli/rainforest-tagging | 5a68298ebd7b0020c49b697f4738646e508d3c5b | [
"MIT"
] | 13 | 2017-04-28T20:05:13.000Z | 2020-12-15T01:19:32.000Z | app/engine/ovr.py | minggli/rainforest-tagging | 5a68298ebd7b0020c49b697f4738646e508d3c5b | [
"MIT"
] | null | null | null | app/engine/ovr.py | minggli/rainforest-tagging | 5a68298ebd7b0020c49b697f4738646e508d3c5b | [
"MIT"
] | 4 | 2017-06-29T03:54:38.000Z | 2019-07-18T08:25:39.000Z | # -*- coding: utf-8 -*-
"""
ovr
One-versus-Rest approach for multi-label classification, predicting each label
independently using Convolutional Neural Network similar architecture to VGG-32
VERY DEEP CONVOLUTIONAL NETWORKS FOR LARGE-SCALE IMAGE RECOGNITION
Simonyan K. & Zisserman A. (2015)
Densely Connected Convolutional Networks
Huang et al 2016
"""
import tensorflow as tf
import numpy as np
from __main__ import EVAL, TRAIN, ENSEMBLE
from app.models.cnn import BasicCNN, DenseNet
from app.settings import (IMAGE_PATH, IMAGE_SHAPE, BATCH_SIZE, MODEL_PATH,
MAX_STEPS, ALPHA, BETA, TAGS, TAGS_WEIGHTINGS, EXT,
TAGS_THRESHOLDS, VALID_SIZE, KEEP_RATE, OUTPUT_PATH,
N_THREADS, AUGMENT)
from app.pipeline import data_pipe, generate_data_skeleton
from app.controllers import (train, save_session, predict, restore_session,
submit)
def vgg_16_train(class_balance, l2_norm):
global prediction, loss, train_step, accuracy, saver, is_train
conv_1 = \
cnn.add_conv_layer(image_feed, [[3, 3, IMAGE_SHAPE[-1], 32], [32]])
conv_2 = cnn.add_conv_layer(conv_1, [[3, 3, 32, 32], [32]])
max_pool_1 = cnn.add_pooling_layer(conv_2)
conv_3 = cnn.add_conv_layer(max_pool_1, [[3, 3, 32, 64], [64]])
conv_4 = cnn.add_conv_layer(conv_3, [[3, 3, 64, 64], [64]])
max_pool_2 = cnn.add_pooling_layer(conv_4)
conv_5 = cnn.add_conv_layer(max_pool_2, [[3, 3, 64, 128], [128]])
conv_6 = cnn.add_conv_layer(conv_5, [[3, 3, 128, 128], [128]])
conv_7 = cnn.add_conv_layer(conv_6, [[3, 3, 128, 128], [128]])
max_pool_3 = cnn.add_pooling_layer(conv_7)
conv_8 = cnn.add_conv_layer(max_pool_3, [[3, 3, 128, 256], [256]])
conv_9 = cnn.add_conv_layer(conv_8, [[3, 3, 256, 256], [256]])
conv_10 = cnn.add_conv_layer(conv_9, [[3, 3, 256, 256], [256]])
max_pool_4 = cnn.add_pooling_layer(conv_10)
conv_11 = cnn.add_conv_layer(max_pool_4, [[3, 3, 256, 256], [256]])
conv_12 = cnn.add_conv_layer(conv_11, [[3, 3, 256, 256], [256]])
conv_13 = cnn.add_conv_layer(conv_12, [[3, 3, 256, 256], [256]])
max_pool_5 = cnn.add_pooling_layer(conv_13)
dense_1 = cnn.add_dense_layer(max_pool_5, [[4 * 4 * 256, 2048], [2048]])
drop_out_1 = cnn.add_drop_out_layer(dense_1)
dense_2 = cnn.add_dense_layer(drop_out_1, [[2048, 512], [512]])
drop_out_2 = cnn.add_drop_out_layer(dense_2)
logits = cnn.add_read_out_layer(drop_out_2)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,
labels=label_feed)
if class_balance:
class_weights = tf.constant([[TAGS_WEIGHTINGS]],
shape=[1, cnn._n_class])
cross_entropy *= class_weights
if l2_norm:
weights2norm = [var for var in tf.trainable_variables()
if var.name.startswith(('weight', 'bias'))][-32:]
regularizers = tf.add_n([tf.nn.l2_loss(var) for var in weights2norm])
cross_entropy += BETA * regularizers
for n in tf.global_variables():
print(n)
loss = tf.reduce_mean(cross_entropy)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_step = tf.train.RMSPropOptimizer(learning_rate=ALPHA,
decay=0.3,
momentum=.5,
epsilon=1e-10,
use_locking=False,
centered=False).minimize(loss)
prediction = tf.nn.sigmoid(logits)
correct_pred = tf.equal(tf.cast(prediction > TAGS_THRESHOLDS, tf.int8),
tf.cast(label_feed, tf.int8))
all_correct_pred = tf.reduce_min(tf.cast(correct_pred, tf.float32), 1)
accuracy = tf.reduce_mean(all_correct_pred)
saver = tf.train.Saver(max_to_keep=5, var_list=tf.global_variables())
def vgg_16_eval():
global prediction, saver
conv_1 = \
cnn.add_conv_layer(image_feed, [[3, 3, IMAGE_SHAPE[-1], 32], [32]])
conv_2 = cnn.add_conv_layer(conv_1, [[3, 3, 32, 32], [32]])
max_pool_1 = cnn.add_pooling_layer(conv_2)
conv_3 = cnn.add_conv_layer(max_pool_1, [[3, 3, 32, 64], [64]])
conv_4 = cnn.add_conv_layer(conv_3, [[3, 3, 64, 64], [64]])
max_pool_2 = cnn.add_pooling_layer(conv_4)
conv_5 = cnn.add_conv_layer(max_pool_2, [[3, 3, 64, 128], [128]])
conv_6 = cnn.add_conv_layer(conv_5, [[3, 3, 128, 128], [128]])
conv_7 = cnn.add_conv_layer(conv_6, [[3, 3, 128, 128], [128]])
max_pool_3 = cnn.add_pooling_layer(conv_7)
conv_8 = cnn.add_conv_layer(max_pool_3, [[3, 3, 128, 256], [256]])
conv_9 = cnn.add_conv_layer(conv_8, [[3, 3, 256, 256], [256]])
conv_10 = cnn.add_conv_layer(conv_9, [[3, 3, 256, 256], [256]])
max_pool_4 = cnn.add_pooling_layer(conv_10)
conv_11 = cnn.add_conv_layer(max_pool_4, [[3, 3, 256, 256], [256]])
conv_12 = cnn.add_conv_layer(conv_11, [[3, 3, 256, 256], [256]])
conv_13 = cnn.add_conv_layer(conv_12, [[3, 3, 256, 256], [256]])
max_pool_5 = cnn.add_pooling_layer(conv_13)
dense_1 = cnn.add_dense_layer(max_pool_5, [[4 * 4 * 256, 2048], [2048]])
drop_out_1 = cnn.add_drop_out_layer(dense_1)
dense_2 = cnn.add_dense_layer(drop_out_1, [[2048, 512], [512]])
drop_out_2 = cnn.add_drop_out_layer(dense_2)
logits = cnn.add_read_out_layer(drop_out_2)
prediction = tf.nn.sigmoid(logits)
# without saver object restore doesn't actually work.
saver = tf.train.Saver(max_to_keep=5, var_list=tf.global_variables())
# def densenet(class_balance=False, l2_norm=False):
# """DenseNet-BC 121"""
# global prediction, loss, train_step, accuracy, saver, is_train
#
# init_conv = dn.add_conv_layer(
# image_feed,
# [[7, 7, IMAGE_SHAPE[-1], 2 * dn._k], [2 * dn._k]],
# bn=False)
# init_pool = dn.add_pooling_layer(init_conv, kernel_size=[1, 3, 3, 1])
# dense_block_1 = dn.add_dense_block(init_pool, L=6)
# transition_layer_1 = dn.add_transition_layer(dense_block_1)
# dense_block_2 = dn.add_dense_block(transition_layer_1, L=12)
# transition_layer_2 = dn.add_transition_layer(dense_block_2)
# dense_block_3 = dn.add_dense_block(transition_layer_2, L=24)
# transition_layer_3 = dn.add_transition_layer(dense_block_3)
# dense_block_4 = dn.add_dense_block(transition_layer_3, L=16)
# global_pool = dn.add_global_average_pool(dense_block_4)
# dim = int(global_pool.get_shape()[-1])
# dense_layer_1 = dn.add_dense_layer(global_pool, [[dim, 1000], [1000]],
# bn=False)
# drop_out_1 = dn.add_drop_out_layer(dense_layer_1)
# logits = dn.add_read_out_layer(drop_out_1)
#
# cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,
# labels=label_feed)
# # Explicit logistic log-loss function
# # cross_entropy = - (y_ * tf.log(1 / (1 + tf.exp(-logits)) + 1e-9) +
# # (1 - y_) * tf.log(1 - 1 / (1 + tf.exp(-logits)) + 1e-9))
#
# if class_balance:
# class_weights = tf.constant([[TAGS_WEIGHTINGS]],
# shape=[1, dn._n_class])
# cross_entropy *= class_weights
#
# if l2_norm:
# weights2norm = [var for var in tf.trainable_variables()
# if var.name.startswith(('weight', 'bias'))][-6:]
# regularizers = tf.add_n([tf.nn.l2_loss(var) for var in weights2norm])
# cross_entropy += BETA * regularizers
#
# for n in tf.global_variables():
# print(n)
#
# loss = tf.reduce_mean(cross_entropy)
#
# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# with tf.control_dependencies(update_ops):
# train_step = tf.train.RMSPropOptimizer(learning_rate=ALPHA,
# decay=0.7,
# momentum=.5,
# epsilon=1e-10,
# use_locking=False,
# centered=False).minimize(loss)
# prediction = tf.nn.sigmoid(logits)
# correct_pred = tf.equal(tf.cast(prediction > TAGS_THRESHOLDS, tf.int8),
# tf.cast(label_feed, tf.int8))
# all_correct_pred = tf.reduce_min(tf.cast(correct_pred, tf.float32), 1)
# accuracy = tf.reduce_mean(all_correct_pred)
#
# saver = tf.train.Saver(max_to_keep=5, var_list=tf.global_variables())
# def densenet_eval():
# """DenseNet-BC 121"""
# global prediction, saver
#
# init_conv = dn.add_conv_layer(
# image_feed,
# [[7, 7, IMAGE_SHAPE[-1], 2 * dn._k], [2 * dn._k]],
# bn=False)
# init_pool = dn.add_pooling_layer(init_conv, kernel_size=[1, 3, 3, 1])
# dense_block_1 = dn.add_dense_block(init_pool, L=6)
# transition_layer_1 = dn.add_transition_layer(dense_block_1)
# dense_block_2 = dn.add_dense_block(transition_layer_1, L=12)
# transition_layer_2 = dn.add_transition_layer(dense_block_2)
# dense_block_3 = dn.add_dense_block(transition_layer_2, L=24)
# transition_layer_3 = dn.add_transition_layer(dense_block_3)
# dense_block_4 = dn.add_dense_block(transition_layer_3, L=16)
# global_pool = dn.add_global_average_pool(dense_block_4)
# dim = int(global_pool.get_shape()[-1])
# dense_layer_1 = dn.add_dense_layer(global_pool, [[dim, 1000], [1000]],
# bn=False)
# drop_out_1 = dn.add_drop_out_layer(dense_layer_1)
# logits = dn.add_read_out_layer(drop_out_1)
#
# prediction = tf.nn.sigmoid(logits)
# saver = tf.train.Saver(max_to_keep=5, var_list=tf.global_variables())
ensemble_probs = list()
for iteration in range(ENSEMBLE):
if TRAIN:
tf.reset_default_graph()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) \
as sess, tf.device('/cpu:0'):
train_file_array, train_label_array, \
valid_file_array, valid_label_array = \
generate_data_skeleton(root_dir=IMAGE_PATH + 'train',
valid_size=VALID_SIZE, ext=EXT)
train_image_batch, train_label_batch = data_pipe(
train_file_array,
train_label_array,
num_epochs=None,
shape=IMAGE_SHAPE,
batch_size=BATCH_SIZE,
# no aug given bn
augmentation=AUGMENT,
shuffle=True,
threads=N_THREADS)
valid_image_batch, valid_label_batch = data_pipe(
valid_file_array,
valid_label_array,
num_epochs=None,
shape=IMAGE_SHAPE,
batch_size=BATCH_SIZE,
augmentation=AUGMENT,
shuffle=True,
threads=N_THREADS)
cnn = BasicCNN(IMAGE_SHAPE, 17, keep_prob=KEEP_RATE)
is_train = cnn.is_train
# dn = DenseNet(IMAGE_SHAPE,
# num_classes=17,
# keep_prob=KEEP_RATE,
# growth=32,
# bottleneck=4,
# compression=.5)
# is_train = dn.is_train
# !!! inefficient feeding of data despite 90%+ GPU utilisation
image_feed = tf.cond(is_train,
lambda: train_image_batch,
lambda: valid_image_batch)
label_feed = tf.cond(is_train,
lambda: train_label_batch,
lambda: valid_label_batch)
with tf.device('/gpu:0'):
# densenet(class_balance=False, l2_norm=False)
vgg_16_train(class_balance=False, l2_norm=False)
init_op = tf.group(tf.local_variables_initializer(),
tf.global_variables_initializer())
sess.run(init_op)
# sess.graph.finalize()
train(MAX_STEPS, sess, is_train, prediction, label_feed,
train_step, accuracy, loss, TAGS_THRESHOLDS)
save_session(sess, path=MODEL_PATH, sav=saver)
if EVAL:
tf.reset_default_graph()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) \
as sess, tf.device('/cpu:0'):
test_file_array, dummy_label_array = \
generate_data_skeleton(root_dir=IMAGE_PATH + 'test',
valid_size=None, ext=EXT)
test_image_batch, _ = data_pipe(
test_file_array,
dummy_label_array,
num_epochs=1,
shape=IMAGE_SHAPE,
batch_size=BATCH_SIZE,
augmentation=AUGMENT,
shuffle=False)
image_feed = test_image_batch
cnn = BasicCNN(IMAGE_SHAPE, 17, keep_prob=KEEP_RATE)
# dn = DenseNet(IMAGE_SHAPE,
# num_classes=17,
# keep_prob=KEEP_RATE,
# growth=32,
# bottleneck=4,
# compression=.5)
with tf.device('/gpu:0'):
vgg_16_eval()
# densenet_eval()
init_op = tf.group(tf.local_variables_initializer(),
tf.global_variables_initializer())
sess.run(init_op)
restore_session(sess, MODEL_PATH)
probs = predict(sess, prediction)
ensemble_probs.append(probs)
if EVAL:
final_probs = np.mean(ensemble_probs, axis=0)
submit(final_probs, OUTPUT_PATH, TAGS, TAGS_THRESHOLDS)
| 46.832817 | 81 | 0.547233 |
163af4c8a8629f58364d088549a071793dd31b49 | 5,765 | py | Python | im2mesh/eval.py | hummat/occupancy_networks | c7b89d58f3839fb56df53c37288d22c33529aeac | [
"MIT"
] | null | null | null | im2mesh/eval.py | hummat/occupancy_networks | c7b89d58f3839fb56df53c37288d22c33529aeac | [
"MIT"
] | null | null | null | im2mesh/eval.py | hummat/occupancy_networks | c7b89d58f3839fb56df53c37288d22c33529aeac | [
"MIT"
] | null | null | null | # from im2mesh import icp
import logging
import numpy as np
import trimesh
from im2mesh.common import compute_iou
# from scipy.spatial import cKDTree
from im2mesh.utils.libkdtree import KDTree
from im2mesh.utils.libmesh import check_mesh_contains
# Maximum values for bounding box [-0.5, 0.5]^3
EMPTY_PCL_DICT = {
'completeness': np.sqrt(3),
'accuracy': np.sqrt(3),
'completeness2': 3,
'accuracy2': 3,
'chamfer': 6,
}
EMPTY_PCL_DICT_NORMALS = {
'normals completeness': -1.,
'normals accuracy': -1.,
'normals': -1.,
}
logger = logging.getLogger(__name__)
class MeshEvaluator(object):
''' Mesh evaluation class.
It handles the mesh evaluation process.
Args:
n_points (int): number of points to be used for evaluation
'''
def __init__(self, n_points=100000):
self.n_points = n_points
def eval_mesh(self, mesh, pointcloud_tgt, normals_tgt,
points_iou, occ_tgt):
''' Evaluates a mesh.
Args:
mesh (trimesh): mesh which should be evaluated
pointcloud_tgt (numpy array): target point cloud
normals_tgt (numpy array): target normals
points_iou (numpy_array): points tensor for IoU evaluation
occ_tgt (numpy_array): GT occupancy values for IoU points
'''
if len(mesh.vertices) != 0 and len(mesh.faces) != 0:
pointcloud, idx = mesh.sample(self.n_points, return_index=True)
pointcloud = pointcloud.astype(np.float32)
normals = mesh.face_normals[idx]
else:
pointcloud = np.empty((0, 3))
normals = np.empty((0, 3))
out_dict = self.eval_pointcloud(
pointcloud, pointcloud_tgt, normals, normals_tgt)
if len(mesh.vertices) != 0 and len(mesh.faces) != 0:
occ = check_mesh_contains(mesh, points_iou)
out_dict['iou'] = compute_iou(occ, occ_tgt)
else:
out_dict['iou'] = 0.
return out_dict
def eval_pointcloud(self, pointcloud, pointcloud_tgt,
normals=None, normals_tgt=None):
''' Evaluates a point cloud.
Args:
pointcloud (numpy array): predicted point cloud
pointcloud_tgt (numpy array): target point cloud
normals (numpy array): predicted normals
normals_tgt (numpy array): target normals
'''
# Return maximum losses if pointcloud is empty
if pointcloud.shape[0] == 0:
logger.warn('Empty pointcloud / mesh detected!')
out_dict = EMPTY_PCL_DICT.copy()
if normals is not None and normals_tgt is not None:
out_dict.update(EMPTY_PCL_DICT_NORMALS)
return out_dict
pointcloud = np.asarray(pointcloud)
pointcloud_tgt = np.asarray(pointcloud_tgt)
# Completeness: how far are the points of the target point cloud
# from thre predicted point cloud
completeness, completeness_normals = distance_p2p(
pointcloud_tgt, normals_tgt, pointcloud, normals
)
completeness2 = completeness ** 2
completeness = completeness.mean()
completeness2 = completeness2.mean()
completeness_normals = completeness_normals.mean()
# Accuracy: how far are th points of the predicted pointcloud
# from the target pointcloud
accuracy, accuracy_normals = distance_p2p(
pointcloud, normals, pointcloud_tgt, normals_tgt
)
accuracy2 = accuracy ** 2
accuracy = accuracy.mean()
accuracy2 = accuracy2.mean()
accuracy_normals = accuracy_normals.mean()
# Chamfer distance
chamferL2 = 0.5 * (completeness2 + accuracy2)
normals_correctness = (
0.5 * completeness_normals + 0.5 * accuracy_normals
)
chamferL1 = 0.5 * (completeness + accuracy)
out_dict = {
'completeness': completeness,
'accuracy': accuracy,
'normals completeness': completeness_normals,
'normals accuracy': accuracy_normals,
'normals': normals_correctness,
'completeness2': completeness2,
'accuracy2': accuracy2,
'chamfer-L2': chamferL2,
'chamfer-L1': chamferL1,
}
return out_dict
def distance_p2p(points_src, normals_src, points_tgt, normals_tgt):
''' Computes minimal distances of each point in points_src to points_tgt.
Args:
points_src (numpy array): source points
normals_src (numpy array): source normals
points_tgt (numpy array): target points
normals_tgt (numpy array): target normals
'''
kdtree = KDTree(points_tgt)
dist, idx = kdtree.query(points_src)
if normals_src is not None and normals_tgt is not None:
normals_src = \
normals_src / np.linalg.norm(normals_src, axis=-1, keepdims=True)
normals_tgt = \
normals_tgt / np.linalg.norm(normals_tgt, axis=-1, keepdims=True)
normals_dot_product = (normals_tgt[idx] * normals_src).sum(axis=-1)
# Handle normals that point into wrong direction gracefully
# (mostly due to mehtod not caring about this in generation)
normals_dot_product = np.abs(normals_dot_product)
else:
normals_dot_product = np.array(
[np.nan] * points_src.shape[0], dtype=np.float32)
return dist, normals_dot_product
def distance_p2m(points, mesh):
''' Compute minimal distances of each point in points to mesh.
Args:
points (numpy array): points array
mesh (trimesh): mesh
'''
_, dist, _ = trimesh.proximity.closest_point(mesh, points)
return dist
| 32.942857 | 77 | 0.629488 |
ca4ea001a847d3c41cc11550a2581044368a9ca2 | 28,366 | py | Python | pyCHX/chx_correlationp2.py | NSLS-II/pyCHX | e82e343903e477c4359b03c4d079eb1e5202c25f | [
"BSD-3-Clause"
] | 2 | 2021-07-21T02:07:11.000Z | 2022-02-18T02:57:49.000Z | pyCHX/chx_correlationp2.py | NSLS-II/pyCHX | e82e343903e477c4359b03c4d079eb1e5202c25f | [
"BSD-3-Clause"
] | 5 | 2021-06-16T20:31:45.000Z | 2022-02-04T21:24:45.000Z | pyCHX/chx_correlationp2.py | NSLS-II/pyCHX | e82e343903e477c4359b03c4d079eb1e5202c25f | [
"BSD-3-Clause"
] | 2 | 2022-02-04T21:51:28.000Z | 2022-03-22T04:11:19.000Z | """
Aug 10, Developed by Y.G.@CHX
yuzhang@bnl.gov
This module is for parallel computation of time correlation
Feb 20, 2018
The chx_correlationp2 is for dedug g2
"""
from __future__ import absolute_import, division, print_function
from skbeam.core.utils import multi_tau_lags
from skbeam.core.roi import extract_label_indices
from pyCHX.chx_libs import tqdm
from pyCHX.chx_correlationc import ( get_pixelist_interp_iq, _validate_and_transform_inputs,
_one_time_process as _one_time_processp, _one_time_process_error as _one_time_process_errorp,
_two_time_process as _two_time_processp )
from pyCHX.chx_compress import ( run_dill_encoded,apply_async, map_async,pass_FD, go_through_FD )
from multiprocessing import Pool
import dill
from collections import namedtuple
import numpy as np
import skbeam.core.roi as roi
import sys
import logging
logger = logging.getLogger(__name__)
class _init_state_two_timep():
def __init__(self, num_levels, num_bufs, labels, num_frames):
(label_array, pixel_list, num_rois, num_pixels, lag_steps, buf,
img_per_level, track_level, cur, norm,
lev_len) = _validate_and_transform_inputs(num_bufs, num_levels, labels)
count_level = np.zeros(num_levels, dtype=np.int64)
# current image time
current_img_time = 0
# generate a time frame for each level
time_ind = {key: [] for key in range(num_levels)}
# two time correlation results (array)
g2 = np.zeros((num_rois, num_frames, num_frames), dtype=np.float64)
(self.buf,
self.img_per_level,
self.label_array,
self.track_level,
self.cur,
self.pixel_list,
self.num_pixels,
self.lag_steps,
self.g2,
self.count_level,
self.current_img_time,
self.time_ind,
self.norm,
self.lev_len,
) = (buf,
img_per_level,
label_array,
track_level,
cur,
pixel_list,
num_pixels,
lag_steps,
g2,
count_level,
current_img_time,
time_ind,
norm,
lev_len,
)
def __getstate__(self):
""" This is called before pickling. """
state = self.__dict__.copy()
return state
def __setstate__(self, state):
""" This is called while unpickling. """
self.__dict__.update(state)
def lazy_two_timep(FD, num_levels, num_bufs, labels,
internal_state=None, bad_frame_list=None, imgsum= None, norm = None ):
""" Generator implementation of two-time correlation
If you do not want multi-tau correlation, set num_levels to 1 and
num_bufs to the number of images you wish to correlate
Multi-tau correlation uses a scheme to achieve long-time correlations
inexpensively by downsampling the data, iteratively combining successive
frames.
The longest lag time computed is num_levels * num_bufs.
** see comments on multi_tau_auto_corr
Parameters
----------
FD: the handler of compressed data
num_levels : int, optional
how many generations of downsampling to perform, i.e.,
the depth of the binomial tree of averaged frames
default is one
num_bufs : int, must be even
maximum lag step to compute in each generation of
downsampling
labels : array
labeled array of the same shape as the image stack;
each ROI is represented by a distinct label (i.e., integer)
two_time_internal_state: None
Yields
------
namedtuple
A ``results`` object is yielded after every image has been processed.
This `reults` object contains, in this order:
- ``g2``: the normalized correlation
shape is (num_rois, len(lag_steps), len(lag_steps))
- ``lag_steps``: the times at which the correlation was computed
- ``_internal_state``: all of the internal state. Can be passed back in
to ``lazy_one_time`` as the ``internal_state`` parameter
Notes
-----
The two-time correlation function is defined as
.. math::
C(q,t_1,t_2) = \\frac{<I(q,t_1)I(q,t_2)>}{<I(q, t_1)><I(q,t_2)>}
Here, the ensemble averages are performed over many pixels of detector,
all having the same ``q`` value. The average time or age is equal to
``(t1+t2)/2``, measured by the distance along the ``t1 = t2`` diagonal.
The time difference ``t = |t1 - t2|``, with is distance from the
``t1 = t2`` diagonal in the perpendicular direction.
In the equilibrium system, the two-time correlation functions depend only
on the time difference ``t``, and hence the two-time correlation contour
lines are parallel.
References
----------
.. [1]
A. Fluerasu, A. Moussaid, A. Mandsen and A. Schofield, "Slow dynamics
and aging in collodial gels studied by x-ray photon correlation
spectroscopy," Phys. Rev. E., vol 76, p 010401(1-4), 2007.
"""
num_frames = FD.end - FD.beg
if internal_state is None:
internal_state = _init_state_two_timep(num_levels, num_bufs, labels, num_frames )
# create a shorthand reference to the results and state named tuple
s = internal_state
qind, pixelist = roi.extract_label_indices( labels )
# iterate over the images to compute multi-tau correlation
fra_pix = np.zeros_like( pixelist, dtype=np.float64)
timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 )
timg[pixelist] = np.arange( 1, len(pixelist) + 1 )
if bad_frame_list is None:
bad_frame_list=[]
for i in range( FD.beg , FD.end ):
if i in bad_frame_list:
fra_pix[:]= np.nan
else:
(p,v) = FD.rdrawframe(i)
w = np.where( timg[p] )[0]
pxlist = timg[ p[w] ] -1
if imgsum is None:
if norm is None:
fra_pix[ pxlist] = v[w]
else:
fra_pix[ pxlist] = v[w]/ norm[pxlist] #-1.0
else:
if norm is None:
fra_pix[ pxlist] = v[w] / imgsum[i]
else:
fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[pxlist]
level = 0
# increment buffer
s.cur[0] = (1 + s.cur[0]) % num_bufs
s.count_level[0] = 1 + s.count_level[0]
# get the current image time
#s = s._replace(current_img_time=(s.current_img_time + 1))
s.current_img_time += 1
# Put the ROI pixels into the ring buffer.
s.buf[0, s.cur[0] - 1] = fra_pix
fra_pix[:]=0
_two_time_processp(s.buf, s.g2, s.label_array, num_bufs,
s.num_pixels, s.img_per_level, s.lag_steps,
s.current_img_time,
level=0, buf_no=s.cur[0] - 1)
# time frame for each level
s.time_ind[0].append(s.current_img_time)
# check whether the number of levels is one, otherwise
# continue processing the next level
processing = num_levels > 1
# Compute the correlations for all higher levels.
level = 1
while processing:
if not s.track_level[level]:
s.track_level[level] = 1
processing = False
else:
prev = 1 + (s.cur[level - 1] - 2) % num_bufs
s.cur[level] = 1 + s.cur[level] % num_bufs
s.count_level[level] = 1 + s.count_level[level]
s.buf[level, s.cur[level] - 1] = ( s.buf[level - 1, prev - 1] +
s.buf[level - 1, s.cur[level - 1] - 1] )/2
t1_idx = (s.count_level[level] - 1) * 2
current_img_time = ((s.time_ind[level - 1])[t1_idx] +
(s.time_ind[level - 1])[t1_idx + 1])/2.
# time frame for each level
s.time_ind[level].append(current_img_time)
# make the track_level zero once that level is processed
s.track_level[level] = 0
# call the _two_time_process function for each multi-tau level
# for multi-tau levels greater than one
# Again, this is modifying things in place. See comment
# on previous call above.
_two_time_processp(s.buf, s.g2, s.label_array, num_bufs,
s.num_pixels, s.img_per_level, s.lag_steps,
current_img_time,
level=level, buf_no=s.cur[level]-1)
level += 1
# Checking whether there is next level for processing
processing = level < num_levels
#print (s.g2[1,:,1] )
#yield s
for q in range(np.max(s.label_array)):
x0 = (s.g2)[q, :, :]
(s.g2)[q, :, :] = (np.tril(x0) + np.tril(x0).T -
np.diag(np.diag(x0)))
return s.g2, s.lag_steps
def cal_c12p( FD, ring_mask, bad_frame_list=None,
good_start=0, num_buf = 8, num_lev = None, imgsum=None, norm=None ):
'''calculation g2 by using a multi-tau algorithm
for a compressed file with parallel calculation
'''
FD.beg = max(FD.beg, good_start)
noframes = FD.end - FD.beg #+1 # number of frames, not "no frames"
for i in range(FD.beg, FD.end):
pass_FD(FD,i)
if num_lev is None:
num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1
print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev))
if bad_frame_list is not None:
if len(bad_frame_list)!=0:
print ('Bad frame involved and will be precessed!')
noframes -= len(np.where(np.in1d( bad_frame_list,
range(good_start, FD.end)))[0])
print ('%s frames will be processed...'%(noframes))
ring_masks = [ np.array(ring_mask==i, dtype = np.int64)
for i in np.unique( ring_mask )[1:] ]
qind, pixelist = roi.extract_label_indices( ring_mask )
if norm is not None:
norms = [ norm[ np.in1d( pixelist,
extract_label_indices( np.array(ring_mask==i, dtype = np.int64))[1])]
for i in np.unique( ring_mask )[1:] ]
inputs = range( len(ring_masks) )
pool = Pool(processes= len(inputs) )
internal_state = None
print( 'Starting assign the tasks...')
results = {}
if norm is not None:
for i in tqdm( inputs ):
#for i in inputs:
results[i] = apply_async( pool, lazy_two_timep, ( FD, num_lev, num_buf, ring_masks[i],
internal_state, bad_frame_list, imgsum,
norms[i], ) )
else:
#print ('for norm is None')
for i in tqdm ( inputs ):
#for i in inputs:
results[i] = apply_async( pool, lazy_two_timep, ( FD, num_lev, num_buf, ring_masks[i],
internal_state, bad_frame_list,imgsum, None,
) )
pool.close()
print( 'Starting running the tasks...')
res = [ results[k].get() for k in tqdm( list(sorted(results.keys())) ) ]
c12 = np.zeros( [ noframes, noframes, len(ring_masks)] )
for i in inputs:
#print( res[i][0][:,0].shape, g2.shape )
c12[:,:,i] = res[i][0][0] #[:len_lag, :len_lag]
if i==0:
lag_steps = res[0][1]
print( 'G2 calculation DONE!')
del results
del res
return c12, lag_steps[ lag_steps < noframes ]
class _internal_statep():
def __init__(self, num_levels, num_bufs, labels, cal_error = False):
'''YG. DEV Nov, 2016, Initialize class for the generator-based multi-tau
for one time correlation
Jan 1, 2018, Add cal_error option to calculate signal to noise to one time correaltion
'''
(label_array, pixel_list, num_rois, num_pixels, lag_steps, buf,
img_per_level, track_level, cur, norm,
lev_len) = _validate_and_transform_inputs(num_bufs, num_levels, labels)
G = np.zeros(( int( (num_levels + 1) * num_bufs / 2), num_rois),
dtype=np.float64)
# matrix for normalizing G into g2
past_intensity = np.zeros_like(G)
# matrix for normalizing G into g2
future_intensity = np.zeros_like(G)
(self.buf,
self.G,
self.past_intensity,
self.future_intensity,
self.img_per_level,
self.label_array,
self.track_level,
self.cur,
self.pixel_list,
self.num_pixels,
self.lag_steps,
self.norm,
self.lev_len ) = (buf,
G,
past_intensity,
future_intensity,
img_per_level,
label_array,
track_level,
cur,
pixel_list,
num_pixels,
lag_steps,
norm,
lev_len )
if cal_error:
self.G_all = np.zeros(( int( (num_levels + 1) * num_bufs / 2), len(pixel_list)),
dtype=np.float64)
# matrix for normalizing G into g2
self.past_intensity_all = np.zeros_like(self.G_all)
# matrix for normalizing G into g2
self.future_intensity_all = np.zeros_like(self.G_all)
def __getstate__(self):
""" This is called before pickling. """
state = self.__dict__.copy()
return state
def __setstate__(self, state):
""" This is called while unpickling. """
self.__dict__.update(state)
def lazy_one_timep(FD, num_levels, num_bufs, labels,
internal_state=None, bad_frame_list=None, imgsum=None,
norm = None, cal_error=False ):
if internal_state is None:
internal_state = _internal_statep(num_levels, num_bufs, labels,cal_error)
# create a shorthand reference to the results and state named tuple
s = internal_state
qind, pixelist = roi.extract_label_indices( labels )
# iterate over the images to compute multi-tau correlation
fra_pix = np.zeros_like( pixelist, dtype=np.float64)
timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 )
timg[pixelist] = np.arange( 1, len(pixelist) + 1 )
if bad_frame_list is None:
bad_frame_list=[]
#for i in tqdm(range( FD.beg , FD.end )):
for i in range( FD.beg , FD.end ):
if i in bad_frame_list:
fra_pix[:]= np.nan
else:
(p,v) = FD.rdrawframe(i)
w = np.where( timg[p] )[0]
pxlist = timg[ p[w] ] -1
if imgsum is None:
if norm is None:
#print ('here')
fra_pix[ pxlist] = v[w]
else:
fra_pix[ pxlist] = v[w]/ norm[pxlist] #-1.0
else:
if norm is None:
fra_pix[ pxlist] = v[w] / imgsum[i]
else:
fra_pix[ pxlist] = v[w]/ imgsum[i]/ norm[pxlist]
level = 0
# increment buffer
s.cur[0] = (1 + s.cur[0]) % num_bufs
# Put the ROI pixels into the ring buffer.
s.buf[0, s.cur[0] - 1] = fra_pix
fra_pix[:]=0
#print( i, len(p), len(w), len( pixelist))
#print ('i= %s init fra_pix'%i )
buf_no = s.cur[0] - 1
# Compute the correlations between the first level
# (undownsampled) frames. This modifies G,
# past_intensity, future_intensity,
# and img_per_level in place!
#print (s.G)
if cal_error:
_one_time_process_errorp(s.buf, s.G, s.past_intensity, s.future_intensity,
s.label_array, num_bufs, s.num_pixels,
s.img_per_level, level, buf_no, s.norm, s.lev_len,
s.G_all, s.past_intensity_all, s.future_intensity_all)
else:
_one_time_processp(s.buf, s.G, s.past_intensity, s.future_intensity,
s.label_array, num_bufs, s.num_pixels,
s.img_per_level, level, buf_no, s.norm, s.lev_len)
#print (s.G)
# check whether the number of levels is one, otherwise
# continue processing the next level
processing = num_levels > 1
level = 1
while processing:
if not s.track_level[level]:
s.track_level[level] = True
processing = False
else:
prev = (1 + (s.cur[level - 1] - 2) % num_bufs)
s.cur[level] = (
1 + s.cur[level] % num_bufs)
s.buf[level, s.cur[level] - 1] = ((
s.buf[level - 1, prev - 1] +
s.buf[level - 1, s.cur[level - 1] - 1]) / 2)
# make the track_level zero once that level is processed
s.track_level[level] = False
# call processing_func for each multi-tau level greater
# than one. This is modifying things in place. See comment
# on previous call above.
buf_no = s.cur[level] - 1
if cal_error:
_one_time_process_errorp(s.buf, s.G, s.past_intensity, s.future_intensity,
s.label_array, num_bufs, s.num_pixels,
s.img_per_level, level, buf_no, s.norm, s.lev_len,
s.G_all, s.past_intensity_all, s.future_intensity_all)
else:
_one_time_processp(s.buf, s.G, s.past_intensity, s.future_intensity,
s.label_array, num_bufs, s.num_pixels,
s.img_per_level, level, buf_no, s.norm, s.lev_len)
level += 1
# Checking whether there is next level for processing
processing = level < num_levels
# If any past intensities are zero, then g2 cannot be normalized at
# those levels. This if/else code block is basically preventing
# divide-by-zero errors.
if not cal_error:
if len(np.where(s.past_intensity == 0)[0]) != 0:
g_max1 = np.where(s.past_intensity == 0)[0][0]
else:
g_max1 = s.past_intensity.shape[0]
if len(np.where(s.future_intensity == 0)[0]) != 0:
g_max2 = np.where(s.future_intensity == 0)[0][0]
else:
g_max2 = s.future_intensity.shape[0]
g_max = min( g_max1, g_max2)
g2 = (s.G[:g_max] / (s.past_intensity[:g_max] *
s.future_intensity[:g_max]))
#sys.stdout.write('#')
#del FD
#sys.stdout.flush()
#print (g2)
#return results(g2, s.lag_steps[:g_max], s)
if cal_error:
#return g2, s.lag_steps[:g_max], s.G[:g_max],s.past_intensity[:g_max], s.future_intensity[:g_max] #, s
return ( None, s.lag_steps,
s.G_all,s.past_intensity_all, s.future_intensity_all ) #, s )
else:
return g2, s.lag_steps[:g_max] #, s
def cal_g2p( FD, ring_mask, bad_frame_list=None,
good_start=0, num_buf = 8, num_lev = None, imgsum=None, norm=None,
cal_error=False ):
'''calculation g2 by using a multi-tau algorithm
for a compressed file with parallel calculation
if return_g2_details: return g2 with g2_denomitor, g2_past, g2_future
'''
FD.beg = max(FD.beg, good_start)
noframes = FD.end - FD.beg +1 # number of frames, not "no frames"
for i in range(FD.beg, FD.end):
pass_FD(FD,i)
if num_lev is None:
num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1
print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev))
if bad_frame_list is not None:
if len(bad_frame_list)!=0:
print ('%s Bad frames involved and will be discarded!'%len(bad_frame_list) )
noframes -= len(np.where(np.in1d( bad_frame_list,
range(good_start, FD.end)))[0])
print ('%s frames will be processed...'%(noframes-1))
ring_masks = [ np.array(ring_mask==i, dtype = np.int64)
for i in np.unique( ring_mask )[1:] ]
qind, pixelist = roi.extract_label_indices( ring_mask )
noqs = len(np.unique(qind))
nopr = np.bincount(qind, minlength=(noqs+1))[1:]
if norm is not None:
norms = [ norm[ np.in1d( pixelist,
extract_label_indices( np.array(ring_mask==i, dtype = np.int64))[1])]
for i in np.unique( ring_mask )[1:] ]
inputs = range( len(ring_masks) )
pool = Pool(processes= len(inputs) )
internal_state = None
print( 'Starting assign the tasks...')
results = {}
if norm is not None:
for i in tqdm( inputs ):
results[i] = apply_async( pool, lazy_one_timep, ( FD, num_lev, num_buf, ring_masks[i],
internal_state, bad_frame_list, imgsum,
norms[i], cal_error ) )
else:
#print ('for norm is None')
for i in tqdm ( inputs ):
results[i] = apply_async( pool, lazy_one_timep, ( FD, num_lev, num_buf, ring_masks[i],
internal_state, bad_frame_list,imgsum, None, cal_error
) )
pool.close()
print( 'Starting running the tasks...')
res = [ results[k].get() for k in tqdm( list(sorted(results.keys())) ) ]
len_lag = 10**10
for i in inputs: #to get the smallest length of lag_step,
##*****************************
##Here could result in problem for significantly cut useful data if some Q have very short tau list
##****************************
if len_lag > len( res[i][1] ):
lag_steps = res[i][1]
len_lag = len( lag_steps )
#lag_steps = res[0][1]
if not cal_error:
g2 = np.zeros( [len( lag_steps),len(ring_masks)] )
else:
g2 = np.zeros( [ int( (num_lev + 1) * num_buf / 2), len(ring_masks)] )
g2_err = np.zeros_like(g2)
g2_G = np.zeros(( int( (num_lev + 1) * num_buf / 2), len(pixelist)) )
g2_P = np.zeros_like( g2_G )
g2_F = np.zeros_like( g2_G )
Gmax = 0
lag_steps_err = res[0][1]
nopr_ = np.lib.pad( np.cumsum(nopr), [1], mode = 'constant',constant_values=(0))[:-1]
for i in inputs:
#print( res[i][0][:,0].shape, g2.shape )
if not cal_error:
g2[:,i] = res[i][0][:,0][:len_lag]
else:
s_Gall_qi = res[i][2]#[:len_lag]
s_Pall_qi = res[i][3]#[:len_lag]
s_Fall_qi = res[i][4]#[:len_lag]
#print( s_Gall_qi.shape,s_Pall_qi.shape,s_Fall_qi.shape )
avgGi = (np.average( s_Gall_qi, axis=1))
devGi = (np.std( s_Gall_qi, axis=1))
avgPi = (np.average( s_Pall_qi, axis=1))
devPi = (np.std( s_Pall_qi, axis=1))
avgFi = (np.average( s_Fall_qi, axis=1))
devFi = (np.std( s_Fall_qi, axis=1))
if len(np.where(avgPi == 0)[0]) != 0:
g_max1 = np.where(avgPi == 0)[0][0]
else:
g_max1 = avgPi.shape[0]
if len(np.where(avgFi == 0)[0]) != 0:
g_max2 = np.where(avgFi == 0)[0][0]
else:
g_max2 = avgFi.shape[0]
g_max = min( g_max1, g_max2)
g2[:g_max,i] = avgGi[:g_max]/( avgPi[:g_max] * avgFi[:g_max] )
g2_err[:g_max,i] = np.sqrt(
( 1/ ( avgFi[:g_max] * avgPi[:g_max] ))**2 * devGi[:g_max] ** 2 +
( avgGi[:g_max]/ ( avgFi[:g_max]**2 * avgPi[:g_max] ))**2 * devFi[:g_max] ** 2 +
( avgGi[:g_max]/ ( avgFi[:g_max] * avgPi[:g_max]**2 ))**2 * devPi[:g_max] ** 2
)
Gmax = max(g_max, Gmax)
lag_stepsi = res[i][1]
if len(lag_steps_err ) < len( lag_stepsi ):
lag_steps_err = lag_stepsi
g2_G[:, nopr_[i]: nopr_[i+1] ] = s_Gall_qi
g2_P[:, nopr_[i]: nopr_[i+1]] = s_Pall_qi
g2_F[:, nopr_[i]: nopr_[i+1]] = s_Fall_qi
del results
del res
if cal_error:
print( 'G2 with error bar calculation DONE!')
return g2[:Gmax,:],lag_steps_err[:Gmax], g2_err[:Gmax,:]/np.sqrt(nopr), g2_G, g2_P, g2_F
else:
print( 'G2 calculation DONE!')
return g2, lag_steps
def auto_two_Arrayp( data_pixel, rois, index=None):
'''
TODO list
will try to use dask
Dec 16, 2015, Y.G.@CHX
a numpy operation method to get two-time correlation function using parallel computation
Parameters:
data: images sequence, shape as [img[0], img[1], imgs_length]
rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs
Options:
data_pixel: if not None,
2-D array, shape as (len(images), len(qind)),
use function Get_Pixel_Array( ).get_data( ) to get
Return:
g12: a 3-D array, shape as ( imgs_length, imgs_length, q)
One example:
g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel )
'''
qind, pixelist = roi.extract_label_indices( rois )
noqs = len( np.unique(qind) )
nopr = np.bincount(qind, minlength=(noqs+1))[1:]
noframes = data_pixel.shape[0]
g12b = np.zeros( [noframes, noframes, noqs] )
if index is None:
index = np.arange( 1, noqs + 1 )
else:
try:
len(index)
index = np.array( index )
except TypeError:
index = np.array( [index] )
qlist = np.arange( 1, noqs + 1 )[ index -1 ]
inputs = range( len(qlist) )
data_pixel_qis = [0]* len(qlist)
for i in inputs:
pixelist_qi = np.where( qind == qlist[i] )[0]
data_pixel_qis[i] = data_pixel[:,pixelist_qi]
#pool = Pool(processes= len(inputs) )
#results = [ apply_async( pool, _get_two_time_for_one_q, ( qlist[i],
# data_pixel_qis[i], nopr, noframes ) ) for i in tqdm( inputs ) ]
#res = [r.get() for r in results]
pool = Pool(processes= len(inputs) )
results = {}
for i in inputs:
results[i] = pool.apply_async( _get_two_time_for_one_q, [
qlist[i], data_pixel_qis[i], nopr, noframes
] )
pool.close()
pool.join()
res = np.array( [ results[k].get() for k in list(sorted(results.keys())) ] )
#print('here')
for i in inputs:
qi=qlist[i]
g12b[:,:,qi -1 ] = res[i]
print( 'G12 calculation DONE!')
return g12b #g12b
def _get_two_time_for_one_q( qi, data_pixel_qi, nopr, noframes ):
#print( data_pixel_qi.shape)
sum1 = (np.average( data_pixel_qi, axis=1)).reshape( 1, noframes )
sum2 = sum1.T
two_time_qi = np.dot( data_pixel_qi, data_pixel_qi.T) /sum1 / sum2 / nopr[qi -1]
return two_time_qi
| 38.964286 | 113 | 0.538391 |
3b0efcc2aaba55df8a5346e1170132a786c5e507 | 1,533 | py | Python | FTSensor/pybind11-master/tests/test_docstring_options.py | yanglh14/InteractiveGrasping | b5bc1866a1847e7b0c11616fd6cbe949c64a355b | [
"MIT"
] | 3 | 2021-04-14T08:24:40.000Z | 2021-11-04T04:10:19.000Z | FTSensor/pybind11-master/tests/test_docstring_options.py | yanglh14/InteractiveGrasping | b5bc1866a1847e7b0c11616fd6cbe949c64a355b | [
"MIT"
] | null | null | null | FTSensor/pybind11-master/tests/test_docstring_options.py | yanglh14/InteractiveGrasping | b5bc1866a1847e7b0c11616fd6cbe949c64a355b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from pybind11_tests import docstring_options as m
def test_docstring_options():
# options.disable_function_signatures()
assert not m.test_function1.__doc__
assert m.test_function2.__doc__ == "A custom docstring"
# docstring specified on just the first overload definition:
assert m.test_overloaded1.__doc__ == "Overload docstring"
# docstring on both overloads:
assert m.test_overloaded2.__doc__ == "overload docstring 1\noverload docstring 2"
# docstring on only second overload:
assert m.test_overloaded3.__doc__ == "Overload docstr"
# options.enable_function_signatures()
assert m.test_function3.__doc__ .startswith("test_function3(a: int, b: int) -> None")
assert m.test_function4.__doc__ .startswith("test_function4(a: int, b: int) -> None")
assert m.test_function4.__doc__ .endswith("A custom docstring\n")
# options.disable_function_signatures()
# options.disable_user_defined_docstrings()
assert not m.test_function5.__doc__
# nested options.enable_user_defined_docstrings()
assert m.test_function6.__doc__ == "A custom docstring"
# RAII destructor
assert m.test_function7.__doc__ .startswith("test_function7(a: int, b: int) -> None")
assert m.test_function7.__doc__ .endswith("A custom docstring\n")
# Suppression of user-defined docstrings for non-function objects
assert not m.DocstringTestFoo.__doc__
assert not m.DocstringTestFoo.value_prop.__doc__
| 38.325 | 90 | 0.727332 |
eb63d587a289267ad958cdae3912bd3e28edced7 | 3,983 | py | Python | src/plots/acc_mgr.py | hyunghunny/tf-hpo | f6ec54480dd0cad9e24fddc050e88ee7b7843cfa | [
"MIT"
] | 8 | 2016-07-22T06:58:16.000Z | 2019-10-23T06:10:46.000Z | src/plots/acc_mgr.py | hyunghunny/tf-hpo | f6ec54480dd0cad9e24fddc050e88ee7b7843cfa | [
"MIT"
] | 6 | 2016-07-15T14:05:18.000Z | 2020-10-27T09:27:20.000Z | src/plots/acc_mgr.py | hyunghunny/tf-hpo | f6ec54480dd0cad9e24fddc050e88ee7b7843cfa | [
"MIT"
] | 1 | 2019-08-06T12:42:34.000Z | 2019-08-06T12:42:34.000Z | # import required libraries to munge data
import pandas as pd
import numpy as np
from mpl_toolkits.mplot3d.axes3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
# define log function
from math import log
def log_scale(list, base=2):
"""apply log2 function to all list item"""
for i in range(len(list)):
list[i] = log(list[i], base)
return list
# read data from csv
accuracy_table = pd.read_csv("test_accuracy.csv", header=0)
all_logs = pd.read_csv("../../log/all_logs_20160810.csv", header=0)
def plot_by_fc(iteration, test_data_num, to_log_scale=False):
"""This figure shows the accuracy after iterations when the neurons in conv1, conv2 and fully connected layers varies."""
tables = get_df_by_iter(iteration, test_data_num)
# reset plot
plt.clf()
# set figure size
fig = plt.figure(num=None, figsize=(16, 12), dpi=80, facecolor='w', edgecolor='k')
cov1_neurons = [2, 4, 8, 16, 32, 64, 128]
if to_log_scale is True:
cov1_neurons = log_scale(cov1_neurons)
# create sub plots
cov1_neurons = [2, 4, 8, 16, 32, 64, 128]
ax = []
for i in range(len(cov1_neurons)) :
subplot = fig.add_subplot(2, 4, i+1)
ax.append(subplot)
# show all accuacy change trends figure
markers = ["r--", "g-", "b-", "m-"]
legends = ["fc:128", "fc:256", "fc:512", "fc:1024"]
cov2_neurons = [2, 4, 8, 16, 32, 64, 128, 256, 512]
if to_log_scale is True:
cov2_neurons = log_scale(cov2_neurons)
# number of accuracy observation count for each conv2
obs_counts = [0, 9, 18, 26, 35, 44, 53, 62] # XXX: dragon lives here
fully_size = 4
for i in range(fully_size):
for j in range(len(cov1_neurons)):
start_index = obs_counts[j]
end_index = obs_counts[j+1]
#exiprint "cov1=" + str(cov1_neurons[j]) + ", " + str(start_index) + ":" + str(end_index) # for debug purpose
# XXX:if conv1 is 8 or log(conv1) is 3, missing condition exists
if ( int(cov1_neurons[j]) is 8 or int(cov1_neurons[j]) is 3) :
leaky_cov2_neurons = [2, 4, 8, 16, 32, 64, 256, 512]
if to_log_scale is True:
leaky_cov2_neurons = log_scale(leaky_cov2_neurons)
ax[j].plot(leaky_cov2_neurons, tables[i][start_index:end_index], markers[i], label=legends[i])
else:
ax[j].plot(cov2_neurons, tables[i][start_index:end_index], markers[i], label=legends[i])
ax[j].set_title("cov1 size:" + str(cov1_neurons[j]))
ax[j].set_xlabel("cov2 size")
ax[j].set_ylim([0.0, 1.0])
plt.ylabel("test accuracy")
title = "Accuracy trends at " + str(iteration) + " iterations with " + str(test_data_num) + " test data"
if to_log_scale is True:
title += " (log scale)"
plt.suptitle(title)
plt.legend(loc="best")
plt.show()
return fig
def get_acc():
return accuracy_table
def get_df_by_iter(iteration, test_data_num):
""" get accuracy table with a specific condition """
# followings are observed conditions
observed_iterations = [6400, 12800, 199680]
observed_test_data = [256, 512, 2048, 8092]
# observed fully connected layers
L3_neurons = [128, 256, 512, 1024]
if not test_data_num in observed_test_data :
return [] # return empty array if not observed
if not iteration in observed_iterations :
return [] # return empty array if not observed
selected_acc_table = accuracy_table[accuracy_table["Iteration"] == iteration]
tables = []
for j in L3_neurons:
table = selected_acc_table[selected_acc_table["L3"] == j]
table = table["Testing Accuracy(" + str(test_data_num) +")"]
tables.append(table)
return tables
| 32.382114 | 125 | 0.611599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.