content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import os
from dotenv import load_dotenv,find_dotenv
# not used in this stub but often useful for finding various files
project_dir = os.path.join(os.path.dirname(__file__), os.pardir)
load_dotenv(find_dotenv())
|
nilq/baby-python
|
python
|
from ..views import add, configure, delete
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
# Copyright (c) 2014 The New York Times Company
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='statsd-rabbitmq',
version='0.0.2',
description="A statsd plugin, written in python, to"
"collect statistics from RabbitMQ.",
long_description=readme + '\n\n' + history,
author="Mike Buzzetti",
author_email='mike.buzzetti@gmail.com',
url='https://github.com/NYTimes/statsd-rabbitmq',
packages=[
'statsd_rabbitmq',
],
package_dir={'statsd_rabbitmq': 'statsd_rabbitmq'},
include_package_data=True,
install_requires=requirements,
license="Apache",
zip_safe=False,
keywords='statsd-rabbitmq',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
tests_require=test_requirements,
data_files=[('share/statsd-rabbitmq/', ['config/types.db.custom'])],
)
|
nilq/baby-python
|
python
|
# Author: Xinshuo Weng
# Email: xinshuow@andrew.cmu.edu
# this file includes general help functions for MUGSY data
import init_paths
init_paths.main()
from check import isstring, isscalar
from file_io import get_sheet_service, update_patchs2sheet, get_data_from_sheet, update_row2sheet
# # number of points for all parts
# num_pts = dict()
# num_pts['face_old'] = 20
# num_pts['face'] = 26
# num_pts['left_upper_eyelid'] = 30
# num_pts['right_upper_eyelid'] = 30
# num_pts['upper_eyelid'] = 30
# num_pts['left_lower_eyelid'] = 17
# num_pts['right_lower_eyelid'] = 17
# num_pts['lower_eyelid'] = 17
# num_pts['nose'] = 10
# num_pts['outer_lip'] = 16
# num_pts['inner_lip'] = 16
# num_pts['upper_teeth'] = 18
# num_pts['lower_teeth'] = 18
# num_pts['left_ears'] = 19
# num_pts['right_ears'] = 19
# num_pts['ears'] = 19
# num_pts['mouth'] = 68
# # num_pts['iris'] = 3
# # num_pts['pupil'] = 3
# num_pts['overall'] = 236
# # index offset of keypoints for all parts
# index_offset = dict()
# index_offset['face_old'] = 0
# index_offset['face'] = 0
# index_offset['left_upper_eyelid'] = 26
# index_offset['right_upper_eyelid'] = 56
# index_offset['left_lower_eyelid'] = 86
# index_offset['right_lower_eyelid'] = 103
# index_offset['nose'] = 120
# index_offset['outer_lip'] = 130
# index_offset['inner_lip'] = 146
# index_offset['upper_teeth'] = 162
# index_offset['lower_teeth'] = 162 # lower teeth already has offset in the raw_annotations
# index_offset['left_ears'] = 198
# index_offset['right_ears'] = 217
# index_offset['mouth'] = 130
# # index_offset['iris'] = 236
# # index_offset['pupil'] = 239
# index_offset['overall'] = 0
# anno_version = 1
# number of points for all parts
num_pts = dict()
num_pts['face_old'] = 20
num_pts['face'] = 26
num_pts['left_upper_eyelid'] = 24
num_pts['right_upper_eyelid'] = 24
num_pts['upper_eyelid'] = 24
num_pts['left_lower_eyelid'] = 17
num_pts['right_lower_eyelid'] = 17
num_pts['lower_eyelid'] = 17
num_pts['nose'] = 10
num_pts['outer_lip'] = 16
num_pts['inner_lip'] = 16
num_pts['upper_teeth'] = 18
num_pts['lower_teeth'] = 18
num_pts['left_ears'] = 19
num_pts['right_ears'] = 19
num_pts['ears'] = 19
num_pts['mouth'] = 68
# num_pts['iris'] = 3
# num_pts['pupil'] = 3
num_pts['overall'] = 224
# index offset of keypoints for all parts
index_offset = dict()
index_offset['face_old'] = 0
index_offset['face'] = 0
index_offset['left_upper_eyelid'] = 26
index_offset['right_upper_eyelid'] = 50
index_offset['left_lower_eyelid'] = 74
index_offset['right_lower_eyelid'] = 91
index_offset['nose'] = 108
index_offset['mouth'] = 118
index_offset['outer_lip'] = 118
index_offset['inner_lip'] = 134
index_offset['upper_teeth'] = 150
index_offset['lower_teeth'] = 150 # lower teeth already has offset in the raw_annotations
index_offset['left_ears'] = 186
index_offset['right_ears'] = 205
# index_offset['iris'] = 224
# index_offset['pupil'] = 227
index_offset['overall'] = 0
anno_version = 2
rotate_degree = {'330001': 90, '330005': -90, '330006': -90, '330007': -90, '330010': 90, '330011': -90, '330012': -90, '330013': -90, '330014': -90, '330015': -90,
'330016': -90, '330017': -90, '330018': -90, '330019': 90, '330020': 90, '330021': 90, '330022': 90, '330023': -90, '330024': -90, '330025': -90,
'330026': -90, '330027': -90, '330028': -90, '330029': -90, '330030': -90, '330031': -90, '330032': -90, '330033': -90, '330034': -90, '330035': 90,
'330036': -90, '330037': -90, '330038': 90, '330039': -90, '330040': -90, '330041': -90, '330042': -90, '330043': -90, '330044': -90, '330045': -90}
rotate_degree_v2 = {'330000': -90, '330005': 90, '330006': -90, '330007': 90, '330010': -90, '330011': -90, '330012': -90, '330014': 90, '330015': -90, '330016': 90,
'330017': 90, '330018': -90, '330019': -90, '330020': 90, '330022': 90, '330023': 90, '330024': -90, '330025': 90, '330026': -90, '330027': -90,
'330028': 90, '330029': 90, '330030': 90, '330031': 90, '330032': 90, '330033': 90, '330036': 90, '330037': 90, '330038': -90, '330040': -90,
'330041': -90, '330042': -90, '330043': -90, '330045': -90, '400004': -90, '400007': -90, '400008': -90, '400010': 90, '400012': 90, '400017': -90,
'400021': 90, '400024': 90, '400025': 90, '400028': 90, '400036': -90, '400039': -90, '400040': 90, '400041': -90, '410001': -90, '410004': 90,
'410016': 90, '410018': -90, '410019': 90, '410029': 90, '410033': 90, '410043': -90, '410044': -90, '410045': 90, '410048': -90, '410049': 90,
'410050': 90, '410051': -90, '410053': 90, '410057': -90, '410061': -90, '410066': 90, '410067': 90, '410068': -90, '410069': 90, '410070': -90,
'410073': -90,}
def get_rotate_dict():
return rotate_degree
def get_rotate_degree(camera_id, debug=True):
if debug:
assert isstring(camera_id), 'the input camera id is not a string for getting rotation degree'
assert camera_id in get_camera_list(), 'the camera id requested: %s does not exist' % camera_id
return rotate_degree[camera_id]
def get_rotate_dict_v2():
return rotate_degree_v2
def get_rotate_degree_v2(camera_id, debug=True):
if debug:
assert isstring(camera_id), 'the input camera id is not a string for getting rotation degree'
assert camera_id in get_camera_list(), 'the camera id requested: %s does not exist' % camera_id
return rotate_degree_v2[camera_id]
def get_compact_subset_list():
return ['face', 'ears', 'lower_eyelid', 'upper_eyelid', 'nose', 'outer_lip', 'inner_lip', 'upper_teeth', 'lower_teeth', 'mouth']
def get_detailed_subset_list():
return ['face', 'left_ears', 'right_ears', 'left_lower_eyelid', 'right_lower_eyelid', 'left_upper_eyelid', 'right_upper_eyelid', 'nose', 'outer_lip', 'inner_lip', 'upper_teeth', 'lower_teeth', 'mouth']
def get_camera_list():
return ['330001', '330005', '330006', '330007', '330010', '330011', '330012', '330014', '330015', '330016', '330017', '330018', '330019', '330020', '330021', '330022', '330023',
'330024', '330025', '330026', '330027', '330028', '330029', '330030', '330031', '330032', '330033', '330034', '330035', '330036', '330037', '330038', '330039', '330040',
'330041', '330042', '330043', '330044', '330045']
def get_camera_list_v2():
return rotate_degree_v2.keys()
def subset_detailed_convert2compact(subset, debug=True):
'''
convert a subset in detailed version to the corresponding compact version
'''
if debug:
assert subset in get_detailed_subset_list() or subset == 'face_old', 'the input subset is not in the detailed subset list'
if subset == 'left_lower_eyelid' or subset == 'right_lower_eyelid':
return 'lower_eyelid'
elif subset == 'left_upper_eyelid' or subset == 'right_upper_eyelid':
return 'upper_eyelid'
elif subset == 'left_ears' or subset == 'right_ears':
return 'ears'
else:
return subset
def get_left_camera_list():
return ['330035', '330039', '330036', '330024', '330023', '330045',
'330021', '330040', '330037', '330041', '330043', '330033', '330028', '330030',
'330025', '330042', '330038', '330020', '330012']
def get_right_camera_list():
return ['330016', '330011', '330032', '330017', '330005', '330019',
'330014', '330034', '330006', '330015', '330018', '330026', '330044', '330027', '330022',
'330031', '330010', '330001', '330029', '330014', '330007' ]
def get_filename(recording_id, recording_type, camera_id, frame_number, labeler_id, debug=True):
'''
return the full filename given all info
'''
if debug:
assert isstring(recording_id), 'recording id is not a string'
assert isstring(recording_type), 'recording type is not a string'
assert isscalar(frame_number), 'frame number is not a scalar'
assert isstring(labeler_id), 'labeler id is not a string'
assert camera_id in get_camera_list(), 'camera id %s is not in the camera list' % camera_id
return '--'.join([recording_id, recording_type, camera_id, '%05d' % (frame_number), labeler_id])
def get_image_id(filename, debug=True):
'''
return the real image id and the labeler id, this function assume the name is separated by '--'
'''
if debug:
assert isstring(filename), 'input filename is not a string'
substrings = filename.split('--')
image_id = '--'.join(substrings[:-1])
return image_id
def get_labeler_id(filename, debug=True):
'''
return the real image id and the labeler id, this function assume the name is separated by '--'
'''
if debug:
assert isstring(filename), 'input filename is not a string'
substrings = filename.split('--')
labeler_id = substrings[-1]
return labeler_id
def get_frame_number(filename, debug=True):
'''
extract the frame number from MUGSY filename
'''
if debug:
assert isstring(filename), 'input filename is not a string'
substrings = filename.split('--')
return int(substrings[3])
def get_person_id(filename, debug=True):
'''
extract the person ID from MUGSY filename
'''
if debug:
assert isstring(filename), 'input filename is not a string'
substrings = filename.split('_')
return substrings[1]
def get_recording_id(filename, debug=True):
'''
extract the recording id, including date and person id and dot flag
'''
if debug:
assert isstring(filename), 'input filename is not a string'
substrings = filename.split('--')
return substrings[0]
def get_recording_type(filename, debug=True):
'''
extract the recording type, sentence or neutral tongue or expression
'''
if debug:
assert isstring(filename), 'input filename is not a string'
substrings = filename.split('--')
return substrings[1]
def get_camera_id(filename, debug=True):
'''
extract the camera ID from MUGSY filename
'''
if debug:
assert isstring(filename), 'input filename is not a string'
# print filename
substrings = filename.split('--')
return substrings[2]
def get_image_name(image_id, labeler_id, debug=True):
'''
merge the image id and labeler id and returns the imagename
'''
return image_id + '--' + labeler_id
def get_crop_bbox_from_subset_and_camera(subset, camera_id, debug=True):
'''
get pre-defined cropping bbox from subset and camera id
return:
bbox in TLBR format
'''
if debug:
assert subset in get_detailed_subset_list() or subset in get_compact_subset_list() or subset == 'face_old', 'subset is not correct!'
assert camera_id in get_camera_list(), 'camera id is not correct!'
if 'lower_eyelid' in subset:
if camera_id == '330030':
bbox = [700, 1169, 1659, 1888]
return bbox
def check_left_right(subset, filename, debug=True):
if debug:
assert subset in ['ears', 'lower_eyelid', 'upper_eyelid'], 'subset is not correct!'
if subset == 'lower_eyelid':
camera_id = get_camera_id(filename, debug=debug)
if camera_id == '330014':
return 'right'
elif camera_id == '330030':
return 'left'
else:
assert False, 'camera wrong!!'
elif subset == 'upper_eyelid':
if camera_id in ['330001', '330010']:
return 'right'
elif camera_id == ['330020', '330038']:
return 'left'
else:
assert False, 'camera wrong!!'
else:
assert False, 'not supported'
def get_line_index_list(subset, debug=True):
if debug:
assert subset in get_detailed_subset_list() or subset in get_compact_subset_list() or subset == 'face_old' or subset == 'overall', 'subset is not correct!'
if 'lower_eyelid' in subset:
return [[0, 8, 4, 7, 2, 6, 3, 5, 1], [16, 15, 14, 13, 12, 11, 10, 9]]
elif 'upper_eyelid' in subset:
return [[0, 8, 4, 7, 2, 6, 3, 5, 1], [16, 15, 14, 13, 12, 11, 10, 9], [23, 22, 21, 20, 19, 18, 17]]
elif subset == 'outer_lip':
return [[0, 8, 4, 9, 2, 10, 5, 11, 1], [0, 12, 6, 13, 3, 14, 7, 15, 1]]
elif subset == 'inner_lip':
return [[1, 11, 5, 10, 2, 9, 4, 8, 0], [0, 12, 6, 13, 3, 14, 7, 15, 1]]
elif subset == 'face':
return [[0, 1, 4, 3, 5, 2], [8, 9, 16, 12, 15, 10, 13, 11, 14, 8], [17, 18, 24, 21, 25, 19, 23, 20, 22, 17], [6, 7]]
elif subset == 'lower_teeth':
return [[17, 8, 7, 16, 17], [7, 6, 15, 16], [6, 5, 14, 15], [5, 0, 9, 14], [0, 1, 10, 9], [1, 2, 11, 10], [2, 3, 12, 11], [3, 4, 13, 12]]
elif subset == 'upper_teeth':
return [[8, 17, 16, 7, 8], [16, 15, 6, 7], [15, 14, 5, 6], [14, 9, 0, 5], [9, 10, 1, 0], [10, 11, 2, 1], [11, 12, 3, 2], [12, 13, 4, 3]]
elif subset == 'mouth':
return [[0, 8, 4, 9, 2, 10, 5, 11, 1], [0, 12, 6, 13, 3, 14, 7, 15, 1], [17, 27, 21, 26, 18, 25, 20, 24, 16], [16, 28, 22, 29, 19, 30, 23, 31, 17],
[40, 49, 48, 39, 40], [48, 47, 38, 39], [47, 46, 37, 38], [46, 41, 32, 37], [41, 42, 33, 32], [42, 43, 34, 33], [43, 44, 35, 34], [44, 45, 36, 35],
[67, 58, 57, 66, 67], [57, 56, 65, 66], [56, 55, 64, 65], [55, 50, 59, 64], [50, 51, 60, 59], [51, 52, 61, 60], [52, 53, 62, 61], [53, 54, 63, 62]]
else:
assert False, '%s is not supported' % subset
def get_camera_from_subset(subset, debug=True):
'''
get camera id for a specific subset as many parts are captured only from several fixed camera position
return:
a list of camera id suitable for this specific part
'''
if debug:
assert subset in get_detailed_subset_list() or subset in get_compact_subset_list() or subset == 'face_old', 'subset is not correct!'
if 'lower_eyelid' in subset:
return ['330030', '330014']
elif 'left_upper_eyelid' == subset:
return ['330020', '330038', '330030', '330014']
elif 'right_upper_eyelid' == subset:
return ['330010', '330001', '330030', '330014']
elif subset == 'face_old':
return ['330030', '330014', '330010', '330001', '330020', '330038', '330012', '330031']
elif 'outer_lip' == subset:
return ['330030']
elif 'inner_lip' == subset:
return ['330030']
elif 'lower_teeth' == subset:
return ['330030']
elif 'upper_teeth' == subset:
return ['330030']
elif 'mouth' == subset:
return ['330030']
elif 'nose' == subset:
return ['330012', '330031']
elif subset == 'face':
return ['330030', '330014', '330012', '330031']
else:
assert False, '%s is not supported' % subset
def get_num_pts(subset, debug=True):
'''
get number of points for a specific facial part
'''
if debug:
assert subset in get_detailed_subset_list() or subset in get_compact_subset_list() or subset == 'face_old' or subset == 'overall', 'subset is not correct!'
return num_pts[subset]
def get_index_offset(subset, debug=True):
'''
get number of points for a specific facial part
'''
if debug:
assert subset in get_detailed_subset_list() or subset == 'face_old', 'subset is not correct!'
return index_offset[subset]
def get_anno_version():
return anno_version
def get_num_pts_all():
return num_pts['overall']
def get_detailed_subset(filename, subset, debug=True):
'''
for ears, lower_eyelid, upper_eyelid, this function returns the left right part based on camera position
'''
if subset in ['face', 'nose', 'upper_teeth', 'lower_teeth', 'outer_lip', 'inner_lip']:
return subset
else:
camera_id = get_camera_id(filename, debug=debug)
if camera_id in get_left_camera_list():
return 'left_' + subset
elif camera_id in get_right_camera_list():
return 'right_' + subset
else:
assert False, 'camera ID %s error!' % camera_id
def get_part_index_from_chopping(subset, debug=True):
'''
get part index for each individual part from face old dataset
return
a list of index
'''
if debug:
assert subset in get_detailed_subset_list() or subset in get_compact_subset_list() or subset == 'face_old', 'subset is not correct!'
if subset in ['right_lower_eyelid', 'right_upper_eyelid', 'outer_lip', 'inner_lip', 'upper_teeth', 'lower_teeth', 'left_lower_eyelid', 'left_upper_eyelid', 'mouth']:
if subset == 'right_lower_eyelid' or subset == 'right_upper_eyelid':
return [4, 5, 6, 7]
elif subset == 'left_lower_eyelid' or subset == 'left_upper_eyelid':
return [8, 9, 10, 11]
elif subset in ['outer_lip', 'inner_lip', 'upper_teeth', 'lower_teeth', 'mouth']:
return [12, 13, 14, 17]
else:
assert False, '%s part is not supported in face old dataset' % subset
def get_search_range_in_sheet():
return 1000 # define the range to search the name in google sheet
def get_experiments_sheet_id():
return '1ViiXL89ek9rLACudOnLbAu6_c4UIyIBtosIhkIncWiE' # sheet id for trained model
def get_training_params_colums_in_experiments_sheet():
return ['F', 'G', 'H', 'I', 'J', 'K', 'L', 'M']
def get_row_index_from_experiments_sheet(dataset, subset, size_set, model_name, debug=True): # all experiments are unique
'''
the returned index is already 1-indexed
'''
if debug:
assert subset in get_compact_subset_list() or subset == 'face_old', 'subset is not correct!'
assert size_set in ['resized_4', 'cropped', 'cropped_all'], 'size set is not correct'
column_search_range = range(get_search_range_in_sheet())
# find index in rows in experiments sheet
columns_search_dataset = ['A%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_dataset = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range=columns_search_dataset, debug=debug) #
columns_search_subset = ['B%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_subset = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range=columns_search_subset, debug=debug) #
columns_search_sizeset = ['C%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_sizeset = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range=columns_search_sizeset, debug=debug) #
columns_search_model = ['D%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_model = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range=columns_search_model, debug=debug) #
row_index_exp = None
for row_index in column_search_range:
if columns_name_subset[row_index] == subset and columns_name_sizeset[row_index] == size_set and columns_name_model[row_index] == model_name and columns_name_dataset[row_index] == dataset:
row_index_exp = row_index+1
break
if row_index_exp is None:
assert False, 'No entry model (%s, %s, %s, %s) found!' % (dataset, subset, size_set, model_name)
return row_index_exp
def fetch_fitsize_from_google_sheets(dataset, subset, size_set, model_name, debug=True):
'''
get input size during training
'''
row_index = get_row_index_from_experiments_sheet(dataset, subset, size_set, model_name, debug=debug)
inputsize = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range='F'+str(row_index), debug=debug)[0]
substrings = inputsize.split(' ')
width = substrings[0]
height = substrings[2]
return [width, height]
def fetch_downsample_from_google_sheets(dataset, subset, size_set, model_name, debug=True):
'''
get downsample factor during training
'''
row_index = get_row_index_from_experiments_sheet(dataset, subset, size_set, model_name, debug=debug)
downsample = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range='H'+str(row_index), debug=debug)[0]
substrings = downsample.split('x')
return substrings[0]
def fetch_model_date_from_google_sheets(dataset, subset, size_set, model_name, debug=True):
'''
get model date during training
'''
row_index = get_row_index_from_experiments_sheet(dataset, subset, size_set, model_name, debug=debug)
date = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range='P'+str(row_index), debug=debug)[0]
return date
def fetch_resize_factor_from_google_sheets(dataset, subset, size_set, model_name, debug=True):
'''
get model date during training
'''
row_index = get_row_index_from_experiments_sheet(dataset, subset, size_set, model_name, debug=debug)
factor = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range='g'+str(row_index), debug=debug)[0]
return factor
def fetch_output_stage_from_google_sheets(dataset, subset, size_set, model_name, debug=True):
'''
get model date during training
'''
row_index = get_row_index_from_experiments_sheet(dataset, subset, size_set, model_name, debug=debug)
out_stage = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range='I'+str(row_index), debug=debug)[0]
out_stage = out_stage.split('S')
return out_stage[-1]
def get_evaluation_sheet_id():
return '1cmORxhEOD-E4cYuaKXrJgMeiJ2h5uu4mIoESawaTvFg' # sheet id for evaluated model
def get_training_params_colums_in_evaluation_sheet():
return ['E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']
def get_testing_params_colums_in_evaluation_sheet():
return ['M', 'N', 'O', 'P', 'Q', 'R']
def get_row_index_list_from_evaluation_sheet(dataset, subset, size_set, evaluation_name, debug=True): # all evaluated models might not be unique
'''
the returned index is already 1-indexed
'''
if debug:
assert subset in get_compact_subset_list() or subset == 'face_old', 'subset is not correct!'
assert size_set in ['resized_4', 'cropped', 'cropped_all'], 'size set is not correct'
column_search_range = range(get_search_range_in_sheet())
# find index in rows in experiments sheet
columns_search_dataset = ['A%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_dataset = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_evaluation_sheet_id(), search_range=columns_search_dataset, debug=debug) #
columns_search_subset = ['B%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_subset = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_evaluation_sheet_id(), search_range=columns_search_subset, debug=debug) #
columns_search_sizeset = ['C%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_sizeset = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_evaluation_sheet_id(), search_range=columns_search_sizeset, debug=debug) #
columns_search_model = ['D%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_model = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_evaluation_sheet_id(), search_range=columns_search_model, debug=debug) #
row_index_list = list()
for row_index in column_search_range:
if columns_name_subset[row_index] == subset and columns_name_sizeset[row_index] == size_set and columns_name_model[row_index] == evaluation_name and columns_name_dataset[row_index] == dataset:
row_index_list.append(row_index + 1)
if len(row_index_list) == 0:
assert False, '%s, %s, %s, %s is not on the search range within the google sheet' % (dataset, subset, size_set, evaluation_name)
return row_index_list
def update_info_evaluation_sheet(dataset, subset, size_set, model_name, evaluation_name, info_list, debug=True):
exp_index = get_row_index_from_experiments_sheet(dataset, subset, size_set, model_name, debug=debug)
evaluation_index_list = get_row_index_list_from_evaluation_sheet(dataset, subset, size_set, evaluation_name, debug=debug)
update_training_info_evaluation_sheet(exp_index, evaluation_index_list, debug=debug)
update_testing_info_evaluation_sheet(evaluation_index_list, info_list, debug=debug)
def update_training_info_evaluation_sheet(exp_index, evaluation_index_list, debug=True):
add_index_exp = lambda x: x+str(exp_index)
columns_list = list(map(add_index_exp, get_training_params_colums_in_experiments_sheet()))
training_info_list = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range=columns_list, debug=debug) #
# paste info to evaluation sheet
for line_index in evaluation_index_list:
add_index_evaluation = lambda x: x+str(line_index)
columns_list = list(map(add_index_evaluation, get_training_params_colums_in_evaluation_sheet()))
update_row2sheet(service=get_sheet_service(), sheet_id=get_evaluation_sheet_id(), row_starting_position=columns_list[0], data=training_info_list, debug=debug) #
def update_testing_info_evaluation_sheet(evaluation_index_list, info_list, debug=True):
'''
update testing configuration to the record
'''
if debug:
assert len(info_list) == len(get_testing_params_colums_in_evaluation_sheet()), 'the information list is not correct %d vs %d' % (len(info_list), len(get_testing_params_colums_in_evaluation_sheet()))
info_list = list(info_list)
for line_index in evaluation_index_list:
row_starting_position = get_testing_params_colums_in_evaluation_sheet()[0] + str(line_index)
update_row2sheet(service=get_sheet_service(), sheet_id=get_evaluation_sheet_id(), row_starting_position=row_starting_position, data=info_list, debug=debug)
|
nilq/baby-python
|
python
|
from dataclasses import dataclass
import pytest
import argdcls
from argdcls.config import _parse
@dataclass
class Config:
lr: float
adam: bool = False
def test_load_params():
# "*"
config = argdcls.load(Config, ["@lr=1.0"])
assert config.lr == 1.0
# ""
config = argdcls.load(Config, ["lr=1.0", "adam=True"])
assert config.lr == 1.0
assert config.adam
# "+"
config = argdcls.load(Config, ["lr=1.0", "+addon=3"])
assert config.lr == 1.0
assert not config.adam
assert config.addon == 3 # type: ignore
# "++"
config = argdcls.load(Config, ["++lr=1.0", "++adam=True", "++addon=3"])
assert config.lr == 1.0
assert config.adam
assert config.addon == 3 # type: ignore
def test_error_cases():
# raise value error if typo exists
with pytest.raises(Exception) as e:
_ = argdcls.load(Config, ["lr=1.0", "adm=True"])
assert (
str(e.value)
== "Parameter \"adm\" not in ['lr', 'adam']. You may use \"+adm=True\" instead."
)
def test_parse():
# "*"
param_t, key, val = _parse("@lr=1.0")
assert param_t == "@"
assert key == "lr"
assert val == 1.0
# ""
param_t, key, val = _parse("lr=1.0")
assert param_t == ""
assert key == "lr"
assert val == 1.0
# "+"
param_t, key, val = _parse("+lr=1.0")
assert param_t == "+"
assert key == "lr"
assert val == 1.0
# "++"
param_t, key, val = _parse("++lr=1.0")
assert param_t == "++"
assert key == "lr"
assert val == 1.0
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
""" A simple demo using aubio and pyaudio to play beats in real time
Note you will need to have pyaudio installed: `pip install pyaudio`.
Examples:
./demo_tapthebeat.py ~/Music/track1.ogg
When compiled with ffmpeg/libav, you should be able to open remote streams. For
instance using youtube-dl (`pip install youtube-dl`):
./demo_tapthebeat.py `youtube-dl -xg https://youtu.be/zZbM9n9j3_g`
"""
import sys
import time
import pyaudio
import aubio
import numpy as np
win_s = 1024 # fft size
hop_s = 512
# parse command line arguments
if len(sys.argv) < 2:
print("Usage: %s <filename> [samplerate]" % sys.argv[0])
sys.exit(1)
filename = sys.argv[1]
samplerate = 0
if len( sys.argv ) > 2: samplerate = int(sys.argv[2])
# create aubio source
a_source = aubio.source(filename, samplerate, hop_s)
samplerate = a_source.samplerate
# create aubio tempo detection
a_tempo = aubio.tempo("default", win_s, hop_s, samplerate)
# create a simple click sound
click = 0.7 * np.sin(2. * np.pi * np.arange(hop_s) / hop_s * samplerate / 3000.)
zerobuf=np.zeros(hop_s).tobytes()
# pyaudio callback
def pyaudio_callback(_in_data, _frame_count, _time_info, _status):
samples, read = a_source()
#print("s ",len(samples)) # len =512, floats
#print("read ",read) # same with hopsize
is_beat = a_tempo(samples)
if is_beat:
samples += click
print('tick') # avoid print in audio callback
audiobuf = samples.tobytes()
if read < hop_s:
return (zerobuf, pyaudio.paComplete)
return (zerobuf, pyaudio.paContinue)
# create pyaudio stream with frames_per_buffer=hop_s and format=paFloat32
p = pyaudio.PyAudio()
pyaudio_format = pyaudio.paFloat32
frames_per_buffer = hop_s
n_channels = 1
stream = p.open(format=pyaudio_format, channels=n_channels, rate=samplerate,
output=True, frames_per_buffer=frames_per_buffer,
stream_callback=pyaudio_callback)
# start pyaudio stream
stream.start_stream()
# wait for stream to finish
while stream.is_active():
time.sleep(0.1)
# stop pyaudio stream
stream.stop_stream()
stream.close()
# close pyaudio
|
nilq/baby-python
|
python
|
'''
Created 03/20/2014
@authors: Yifan Ning
@summary: parse Molecular Formula and drugbank_id from drugbank.xml
then parse MF(Molecular Formula), FDA Preferred Term and UNNI from UNNIs records
match the results from drugbank and results of parse UNNIs records
output terms: FDA Preferred Term, UNII, Drugbank URI
output file: PT-UNIIs-Drugbank-byMF-03202014.txt
'''
import xml.etree.ElementTree as ET
import os, sys
DRUGBANK_XML = "drugbank.xml"
UNIIS_RECORDS = "UNIIs 25Jan2014 Records.txt"
NS = "{http://drugbank.ca}"
DRUGBANK_BIO2RDF = "http://bio2rdf.org/drugbank:"
DRUGBANK_CA = "http://www.drugbank.ca/drugs/"
dict_ickey_dbid = {}
'''
<property>
<kind>Molecular Formula</kind>
<value>C4H6N4O3S2</value>
<source>ChemAxon</source>
</property>
'''
def parseDbIdAndMF(root):
for drug in root.iter(tag=NS + "drug"):
dbid = drug.find(NS + "drugbank-id")
if dbid == None:
continue
else:
drugbankid = dbid.text
for subProp in drug.iter(NS + "property"):
msKind = subProp.find(NS + "kind")
if msKind == None:
continue
elif msKind.text == "Molecular Formula":
msValue = subProp.find(NS + "value")
if msValue == None:
continue
else:
#print drugbankid + '\t' + subValue.text[9:]
ms = msValue.text
dict_ickey_dbid [ms] = drugbankid
tree = ET.parse(DRUGBANK_XML)
root = tree.getroot()
parseDbIdAndMF(root)
#read mapping file that contains UNII PT MF
for line in open(UNIIS_RECORDS,'r').readlines():
row = line.split('\t')
mf = row[2]
if len(mf) == 0:
continue
if dict_ickey_dbid.has_key(mf):
drugbankid = dict_ickey_dbid[mf]
output = row[1] +'\t'+ row[0] +'\t'+ DRUGBANK_CA + drugbankid +'\t'+ DRUGBANK_BIO2RDF + drugbankid
print output.encode('utf-8').strip()
|
nilq/baby-python
|
python
|
import pandas as pd
from ..util import generate_name
from tickcounter.questionnaire import Encoder
class MultiEncoder(object):
def __init__(self, encoding_rule):
if isinstance(encoding_rule, Encoder):
self.rules = {
encoding_rule.name: encoding_rule
}
elif isinstance(encoding_rule, list):
if isinstance(encoding_rule[0], Encoder):
self.rules = dict()
for i in encoding_rule:
self.rules[i.name] = i
else:
pass
# Need to convert the dictionary to encoders, and give default name
else:
raise ValueError(f"Expected list of encoder or dictionary objects, got {type(encoding_rule)} instead")
def transform(self, data,*, rule_map=None, columns=None, ignore_list=None, return_rule=False, mode="any"):
result = data.copy()
encode_rule = None
if isinstance(data, pd.DataFrame):
encode_rule = pd.Series(dtype=str, index=data.columns)
if rule_map is None:
for i in result.columns:
if ignore_list is not None and i in ignore_list:
continue
else:
unique_values = result[i].value_counts().index
for rule in self.rules.values():
if mode == "strict":
if len(set(unique_values) ^ set(rule.target)) == 0:
result[i] = rule.transform(result[i])
encode_rule[i] = rule.name
break
elif mode == "any":
if len(set(unique_values) - set(rule.target)) == 0:
result[i] = rule.transform(result[i])
encode_rule[i] = rule.name
break
else:
raise ValueError("rule argument can only be strict or any")
else:
# Check for correct format for rule_map
# Transform according to the rules
pass
elif isinstance(data, pd.Series):
encode_rule = pd.Series(dtype=str, index=[data.name])
unique_values = result.value_counts().index
for rule in self.rules.values():
if mode == "strict":
if len(set(unique_values) ^ set(rule.target)) == 0:
result = rule.transform(result)
encode_rule[data.name] = rule.name
break
elif mode == "any":
if len(set(unique_values) - set(rule.target)) == 0:
result = rule.transform(result)
encode_rule[data.name] = rule.name
break
else:
raise ValueError("rule argument can only be strict or any")
else:
raise TypeError(f"Expected pandas Series or DataFrame, got {type(data)} instead")
if return_rule:
return (result, encode_rule)
else:
return result
def count_neutral(self, data, **kwargs):
# Might need to refactor this
return_flag = False
if 'return_rule' in kwargs.keys() and kwargs['return_rule']:
return_flag = True
else:
kwargs['return_rule'] = True
df_encoded, rule = self.transform(data, **kwargs)
total = None
if isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):
for col, encoder in rule.dropna().iteritems():
# Need to rewrite this. We transform the thing twice to get the count of neutral!
ss_tally = self.rules[encoder].count_neutral(data[col] if isinstance(data, pd.DataFrame) else data)
# If encoder does not have neutral, it will return None
if ss_tally is not None:
if total is None:
total = pd.DataFrame([ss_tally]).T
else:
total = pd.concat([total, ss_tally], axis=1)
else:
continue
# None will result if there is no neutral specified
if total is not None:
total = total.sum(axis=1)
total.rename("Neutral count", inplace=True)
if return_flag:
return (total, rule)
else:
return total
|
nilq/baby-python
|
python
|
from rest_framework import serializers
from accounts.models import User
class AccountSerializer(serializers.ModelSerializer):
def validate(self, attrs):
username = attrs.get('username', None)
email = attrs.get('email', None)
password = attrs.get('password', None)
try:
if email and username:
user = User.objects.get(username=username)
if user:
raise serializers.ValidationError('This username is already used.')
user = User.objects.get(email=email)
if user:
raise serializers.ValidationError('This email is already registered')
return attrs
else:
raise serializers.ValidationError('Fill all fields')
except User.DoesNotExist:
return attrs
class Meta:
model = User
fields = ('password', 'username', 'email')
|
nilq/baby-python
|
python
|
import os
import pdb
import random
import sys
import time
from pprint import pformat
import numpy
import torch as tc
import torch.nn as nn
from tqdm import tqdm
from utils.logger import Logger
from utils.random_seeder import set_random_seed
from config import get_config
from training_procedure import Trainer
def main(C , logger , run_id = 0):
T = Trainer(C = C , logger = logger)
T.flags["change split"] = ( run_id % C.change_split == 0 )
(graph , labels) , (train_nodes , dev_nodes , test_nodes) , model , (optimizer , loss_func) = T.init(idx = run_id)
patience_cnt = 0
maj_metric = "micro"
best_metric = 0
best_metric_epoch = -1 # best number on dev set
report_dev_res = 0
report_tes_res = 0
pbar = tqdm(range(C.num_epoch) , ncols = 130)
for epoch_id in pbar:
model , loss = T.train(graph, labels, train_nodes, model, loss_func, optimizer)
dev_res, tes_res, tra_res = T.evaluate(
graph, labels, [dev_nodes, test_nodes, train_nodes], model, loss_func
)
now_metric = dev_res[maj_metric] # current number on dev set
if C.no_dev or best_metric <= now_metric:
best_metric = now_metric
best_metric_epoch = epoch_id
report_dev_res = dev_res
report_tes_res = tes_res
patience_cnt = 0
else:
patience_cnt += 1
if C.patience > 0 and patience_cnt >= C.patience:
break
postfix_str = "<%d> [Dev] %.2f [Test] %.2f (%.2f) [Train] %.2f" % ( epoch_id ,
dev_res[maj_metric], tes_res[maj_metric], report_tes_res[maj_metric], tra_res[maj_metric]
)
pbar.set_postfix_str(postfix_str)
logger.log("best epoch is %d" % best_metric_epoch)
logger.log("Best Epoch Valid Acc is %.2f" % (report_dev_res[maj_metric]))
logger.log("Best Epoch Test Acc is %.2f" % (report_tes_res[maj_metric]))
# note returned tra_res is always that of last epoch
return model , report_dev_res , report_tes_res , tra_res
if __name__ == "__main__":
C = get_config()
# init logger
logger = Logger(mode = [print])
logger.add_line = lambda : logger.log("-" * 50)
logger.log(" ".join(sys.argv))
logger.add_line()
logger.log()
if C.seed > 0:
set_random_seed(C.seed)
logger.log ("Seed set. %d" % (C.seed))
# start run
seeds = [random.randint(0,233333333) for _ in range(C.multirun)]
dev_ress = []
tes_ress = []
tra_ress = []
for run_id in range(C.multirun):
logger.add_line()
logger.log ("\t\t%d th Run" % run_id)
logger.add_line()
set_random_seed(seeds[run_id])
logger.log ("Seed set to %d." % seeds[run_id])
model , dev_res , tes_res , tra_res = main(C , logger , run_id)
logger.log("%d th Run ended. Best Epoch Valid Result is %s" % (run_id , str(dev_res)))
logger.log("%d th Run ended. Best Epoch Test Result is %s" % (run_id , str(tes_res)))
logger.log("%d th Run ended. Final Train Result is %s" % (run_id , str(tra_res)))
dev_ress.append(dev_res)
tes_ress.append(tes_res)
tra_ress.append(tra_res)
logger.add_line()
for metric in ["micro" , "macro"]:
for res , name in zip(
[dev_ress , tes_ress , tra_ress] ,
["Dev" , "Test" , "Train"]
):
now_res = [x[metric] for x in res]
logger.log ("%s of %s : %s" % (metric , name , str([round(x,2) for x in now_res])))
avg = sum(now_res) / C.multirun
std = (sum([(x - avg) ** 2 for x in now_res]) / C.multirun) ** 0.5
logger.log("%s of %s : avg / std = %.2f / %.2f" % (metric , name , avg , std))
logger.log("")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2
# coding:utf-8
import base64
import copy
import errno
import getopt
import json
import logging
import os
import sys
import threading
import time
import traceback
import boto3
import oss2
import yaml
from botocore.client import Config
from pykit import jobq
report_state_lock = threading.RLock()
ali_sync_state = {
'total_n': 0,
'total_bytes': 0,
'no_content_md5': 0,
'no_content_md5_list': [],
'exist': 0,
'check_need_s3_error': 0,
'check_need_s3_error_list': [],
'size_override': 0,
'md5_equal': 0,
'default_override': 0,
'default_not_override': 0,
'piped': 0,
'piped_bytes': 0,
'pipe_succeed': 0,
'pipe_succeed_bytes': 0,
'pipe_failed': 0,
'pipe_failed_bytes': 0,
'pipe_failed_exception_error': 0,
'pipe_failed_exception_error_list': [],
'pipe_failed_ali_file_size_error': 0,
'pipe_failed_ali_file_size_error_list': [],
'pipe_failed_ali_md5_error': 0,
'pipe_failed_ali_md5_error_list': [],
'compared': 0,
'compare_succeed': 0,
'compare_failed': 0,
'compare_failed_not_found_error': 0,
'compare_failed_not_found_error_list': [],
'compare_failed_exception_error': 0,
'compare_failed_exception_error_list': [],
'compare_failed_size_error': 0,
'compare_failed_size_error_list': [],
'compare_failed_content_type_error': 0,
'compare_failed_content_type_error_list': [],
'compare_failed_content_md5_error': 0,
'compare_failed_content_md5_error_list': [],
}
ali_meta_prefix = 'x-oss-meta-'
def add_logger():
log_file = os.path.join(cnf['LOG_DIR'], 'ali-sync-for-' +
cnf['ALI_BUCKET_NAME'] + '.log')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(log_file)
formatter = logging.Formatter('[%(asctime)s, %(levelname)s] %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def _mkdir(path):
try:
os.makedirs(path, 0755)
except OSError as e:
if e[0] == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _thread(func, args):
th = threading.Thread(target=func, args=args)
th.daemon = True
th.start()
return th
def get_conf(conf_path):
with open(conf_path) as f:
conf = yaml.safe_load(f.read())
return conf
def get_boto_client(endpoint):
client = boto3.client(
's3',
use_ssl=False,
aws_access_key_id=cnf['BAISHAN_ACCESS_KEY'],
aws_secret_access_key=cnf['BAISHAN_SECRET_KEY'],
config=Config(signature_version='s3v4'),
region_name='us-east-1',
endpoint_url=endpoint,
)
return client
def load_progress():
if os.path.isfile(cnf['PROGRESS_FILE']):
with open(cnf['PROGRESS_FILE'], 'r') as progress_file:
progress = json.loads(progress_file.read())
return progress
return {
'marker': '',
'total_n': 0,
'total_size': 0,
}
def store_progress():
with open(cnf['PROGRESS_FILE'], 'w') as progress_file:
progress_file.write(json.dumps(current_progress))
def clear_progress():
os.remove(cnf['PROGRESS_FILE'])
def iter_files():
marker = current_progress['marker']
start_marker = cnf.get('START_MARKER', '')
if start_marker > marker:
marker = start_marker
end_marker = cnf.get('END_MARKER', None)
for file_object in oss2.ObjectIterator(oss2_bucket, prefix=cnf['PREFIX'], marker=marker):
if end_marker and file_object.key > end_marker:
break
yield file_object
current_progress['total_n'] += 1
current_progress['total_size'] += file_object.size
current_progress['marker'] = file_object.key
if current_progress['total_n'] % 10000 == 0:
store_progress()
store_progress()
def get_ali_user_meta(headers):
meta = {}
for k, v in headers.iteritems():
if k.lower().startswith(ali_meta_prefix):
meta_name = k.lower()[len(ali_meta_prefix):]
meta[meta_name] = v
return meta
def validate_and_extract_ali_file_info(resp_object, result):
file_object = result['file_object']
if resp_object.content_length != file_object.size:
result['pipe_failed_ali_file_size_error'] = {
'key': file_object.key,
'content_length': resp_object.content_length,
'size': file_object.size,
}
logger.warn('ali file size error' +
repr(result['ali_file_size_error']))
return
ali_file_info = {
'size': file_object.size,
'content_type': resp_object.content_type,
}
if 'Content-MD5' in resp_object.headers:
md5 = base64.b64decode(resp_object.headers['Content-MD5'])
md5 = md5.encode('hex')
if md5 != file_object.etag.lower():
result['pipe_failed_ali_md5_error'] = {
'key': file_object.key,
'content_md5': md5,
'etag': file_object.etag.lower(),
}
logger.warn('ali md5 error' + repr(result['ali_md5_error']))
return
ali_file_info['content_md5'] = md5
else:
result['no_content_md5'] = {
'key': file_object.key,
'object_type': file_object.type,
}
ali_file_info['meta'] = get_ali_user_meta(resp_object.headers)
return ali_file_info
def get_s3_file_info(s3_key):
resp = s3_client.head_object(
Bucket=cnf['BAISHAN_BUCKET_NAME'],
Key=s3_key,
)
s3_file_info = {
'size': resp['ContentLength'],
'content_type': resp['ContentType'],
'meta': resp['Metadata'],
'content_md5': resp['ETag'].lower().strip('"'),
}
return s3_file_info
def compare_file_info(ali_file_info, s3_file_info, result, th_status):
if ali_file_info['size'] != s3_file_info['size']:
th_status['compare_failed_size_error_n'] = th_status.get(
'compare_failed_size_error_n', 0) + 1
result['compare_failed_size_error'] = {
'key': result['file_object'].key,
'ali_file_size': ali_file_info['size'],
's3_file_size': s3_file_info['size'],
}
return False
if ali_file_info['content_type'] != s3_file_info['content_type']:
th_status['compare_failed_content_type_error_n'] = th_status.get(
'compare_failed_content_type_error_n', 0) + 1
result['compare_failed_content_type_error'] = {
'key': result['file_object'].key,
'ali_content_type': ali_file_info['content_type'],
's3_content_type': s3_file_info['content_type'],
}
return False
for k, v in ali_file_info['meta'].iteritems():
if k not in s3_file_info['meta'] or v != s3_file_info['meta'][k]:
th_status['compare_failed_meta_error_n'] = th_status.get(
'compare_failed_meta_error_n', 0) + 1
result['compate_failed_meta_error'] = {
'key': result['file_object'].key,
'ali_meta': repr(ali_file_info['meta']),
's3_meta': repr(s3_file_info['meta']),
}
return False
if 'content_md5' in ali_file_info:
if ali_file_info['content_md5'] != s3_file_info['content_md5']:
th_status['compare_failed_content_md5_error_n'] = th_status.get(
'compare_failed_content_md5_error_n', 0) + 1
result['compare_failed_content_md5_error'] = {
'key': result['file_object'].key,
'ali_content_md5': ali_file_info['content_md5'],
's3_content_md5': s3_file_info['content_md5'],
}
return False
return True
def compare_file(result, th_status):
result['compared'] = True
th_status['compared_n'] = th_status.get('compared_n', 0) + 1
try:
s3_file_info = get_s3_file_info(result['s3_key'])
except Exception as e:
result['compare_failed'] = True
th_status['compare_failed_n'] = th_status.get(
'compare_failed_n', 0) + 1
if hasattr(e, 'message') and 'Not Found' in e.message:
result['compare_failed_not_found_error'] = True
th_status['compare_failed_not_found_n'] = th_status.get(
'compare_failed_not_found_n', 0) + 1
result['compare_failed_not_found_err'] = {
'key': result['file_object'].key,
'error': repr(e),
}
logger.error('file not exist is s3 when compare file %s: %s' %
(result['s3_key'], traceback.format_exc()))
else:
result['compare_failed_exception_error'] = True
th_status['compare_failed_exception_n'] = th_status.get(
'compare_failed_exception_n', 0) + 1
result['compare_failed_exception_error'] = {
'key': result['file_object'].key,
'error': repr(e),
}
logger.error('got exception when get s3 file info %s: %s' %
(result['s3_key'], traceback.format_exc()))
return False
ali_file_info = result['ali_file_info']
if not compare_file_info(ali_file_info, s3_file_info, result, th_status):
result['compared_failed'] = True
th_status['compare_failed_n'] = th_status.get(
'compare_failed_n', 0) + 1
return False
result['compare_succeed'] = True
th_status['compare_succeed_n'] = th_status.get('compare_succeed_n', 0) + 1
return True
def pipe_file(result, th_status):
result['piped'] = True
th_status['piped_n'] = th_status.get('piped_n', 0) + 1
def update_pipe_progress(done_bytes, total_bytes):
th_status['pipe_progress'] = (done_bytes, total_bytes)
file_object = result['file_object']
try:
resp_object = oss2_bucket.get_object(
file_object.key, progress_callback=update_pipe_progress)
ali_file_info = validate_and_extract_ali_file_info(resp_object, result)
if ali_file_info == None:
result['pipe_failed'] = True
th_status['pipe_failed_n'] = th_status.get('pipe_failed_n', 0) + 1
return False
extra_args = {
'ACL': cnf['FILE_ACL'],
'ContentType': ali_file_info['content_type'],
'Metadata': ali_file_info['meta'],
}
s3_client.upload_fileobj(resp_object, cnf['BAISHAN_BUCKET_NAME'],
result['s3_key'], ExtraArgs=extra_args)
result['pipe_succeed'] = True
th_status['pipe_succeed_n'] = th_status.get('pipe_succeed_n', 0) + 1
result['ali_file_info'] = ali_file_info
return True
except Exception as e:
result['pipe_failed'] = True
th_status['pipe_failed_n'] = th_status.get('pipe_failed_n', 0) + 1
result['pipe_failed_exception_error'] = {
'key': file_object.key,
'error': repr(e),
}
logger.error('got exception when pipe file %s: %s' %
(file_object.key, traceback.format_exc()))
return False
def convert_key(key):
return key
def check_need(result, th_status):
if not cnf['CHECK_EXIST']:
return True
file_object = result['file_object']
try:
s3_file_info = get_s3_file_info(result['s3_key'])
except Exception as e:
if hasattr(e, 'message') and 'Not Found' in e.message:
return True
else:
th_status['check_need_s3_error_n'] = th_status.get(
'check_need_s3_error_n', 0) + 1
result['check_need_s3_error'] = {
'key': result['s3_key'],
'error': repr(e),
}
logger.error('faied to get s3 file info in check need %s: %s' %
(result['s3_key'], traceback.format_exc()))
return False
result['exist'] = True
th_status['exist_n'] = th_status.get('exist_n', 0) + 1
if s3_file_info['size'] != file_object.size:
result['size_override'] = True
th_status['size_override_n'] = th_status.get('size_override_n', 0) + 1
logger.info(('need to override file: %s, because size not equal, ' +
'ali_size: %d, s3_size: %d') %
(result['s3_key'], file_object.size, s3_file_info['size']))
return True
if s3_file_info['content_md5'].lower() == file_object.etag.lower():
result['md5_equal'] = True
th_status['md5_equal_n'] = th_status.get('md5_equal_n', 0) + 1
return False
if cnf['OVERRIDE']:
result['default_override'] = True
th_status['default_override_n'] = th_status.get(
'default_override_n', 0) + 1
return True
else:
result['default_not_override'] = True
th_status['default_not_override_n'] = th_status.get(
'default_not_override_n', 0) + 1
return False
def sync_one_file(file_object):
thread_name = threading.current_thread().getName()
thread_status[thread_name] = thread_status.get(thread_name, {})
th_status = thread_status[thread_name]
th_status['total_n'] = th_status.get('total_n', 0) + 1
result = {
'file_object': file_object,
's3_key': convert_key(file_object.key)
}
if not check_need(result, th_status):
return result
if not pipe_file(result, th_status):
return result
if not compare_file(result, th_status):
return result
return result
def update_sync_stat(result):
file_object = result['file_object']
ali_sync_state['total_n'] += 1
ali_sync_state['total_bytes'] += file_object.size
if 'no_content_md5' in result:
ali_sync_state['no_content_md5'] += 1
ali_sync_state['no_content_md5_list'].append(result['no_content_md5'])
if 'check_need_s3_error' in result:
ali_sync_state['check_need_s3_error'] += 1
ali_sync_state['check_need_s3_error_list'].append(
result['check_need_s3_error'])
return
if 'exist' in result:
ali_sync_state['exist'] += 1
if 'size_override' in result:
ali_sync_state['size_override'] += 1
elif 'md5_equal' in result:
ali_sync_state['md5_equal'] += 1
elif 'default_override' in result:
ali_sync_state['default_override'] += 1
elif 'default_not_override' in result:
ali_sync_state['default_not_override'] += 1
if not 'piped' in result:
return
ali_sync_state['piped'] += 1
ali_sync_state['piped_bytes'] += file_object.size
if 'pipe_failed' in result:
ali_sync_state['pipe_failed'] += 1
ali_sync_state['pipe_failed_bytes'] += file_object.size
if 'pipe_failed_exception_error' in result:
ali_sync_state['pipe_failed_exception_error'] += 1
ali_sync_state['pipe_failed_exception_error_list'].append(
result['pipe_failed_exception_error'])
elif 'pipe_failed_ali_file_size_error' in result:
ali_sync_state['pipe_failed_ali_file_size_error'] += 1
ali_sync_state['pipe_failed_ali_file_size_error_list'].append(
result['pipe_failed_ali_file_size_error'])
elif 'pipe_failed_ali_md5_error' in result:
ali_sync_state['pipe_failed_ali_md5_error'] += 1
ali_sync_state['pipe_failed_ali_md5_error_list'].append(
result['pipe_failed_ali_md5_error'])
return
ali_sync_state['pipe_succeed'] += 1
ali_sync_state['pipe_succeed_bytes'] += file_object.size
if not 'compared' in result:
return
if 'compare_failed' in result:
ali_sync_state['compare_failed'] += 1
if 'compare_failed_not_found_error' in result:
ali_sync_state['compare_failed_not_found_error'] += 1
ali_sync_state['compare_failed_not_found_error_list'].append(
result['compare_failed_not_found_error'])
elif 'compare_failed_exception_error' in result:
ali_sync_state['compare_failed_exception_error'] += 1
ali_sync_state['compare_failed_exception_error_list'].append(
result['compare_failed_exception_error'])
elif 'compare_failed_size_error' in result:
ali_sync_state['compare_failed_size_error'] += 1
ali_sync_state['compare_failed_size_error_list'].append(
result['compare_failed_size_error'])
elif 'compare_failed_content_md5_error' in result:
ali_sync_state['compare_failed_content_md5_error'] += 1
ali_sync_state['compare_failed_exception_error_list'].append(
result['compare_failed_content_md5_error'])
return
ali_sync_state['compare_succeed'] += 1
def report_thread_status(th_status):
total_n = th_status.get('total_n', 0)
s3_error_n = th_status.get('check_need_s3_error_n', 0)
exist_n = th_status.get('exist_n', 0)
size_override_n = th_status.get('size_override_n', 0)
md5_equal_n = th_status.get('md5_equal_n', 0)
d_override_n = th_status.get('default_override_n', 0)
d_not_override_n = th_status.get('default_not_override_n', 0)
piped_n = th_status.get('piped_n', 0)
pipe_succeed_n = th_status.get('pipe_succeed_n', 0)
pipe_failed_n = th_status.get('pipe_failed_n', 0)
pipe_progress = th_status.get('pipe_progress', (0, 0))
compared_n = th_status.get('compared_n', 0)
compare_succeed_n = th_status.get('compare_succeed_n', 0)
compare_failed_n = th_status.get('compare_failed_n', 0)
not_found_n = th_status.get('compare_failed_not_found_n', 0)
exception_n = th_status.get('compare_failed_exception_n', 0)
size_error_n = th_status.get('compare_failed_size_error_n', 0)
content_type_error_n = th_status.get(
'compare_failed_content_typ_error_n', 0)
meta_error_n = th_status.get('compate_failed_meta_error_n', 0)
content_md5_error_n = th_status.get(
'compare_failed_content_md5_error_n', 0)
print (('total: %d, get s3 file info failed: %s, exist: %d, size ' +
'override: %d, md5_equal: %d, default override: %d, default' +
'not override: %d ') %
(total_n, s3_error_n, exist_n, size_override_n, md5_equal_n,
d_override_n, d_not_override_n))
print ('piped: %d, pipe succeed: %d, pipe failed: %d, pipe grogress: %s' %
(piped_n, pipe_succeed_n, pipe_failed_n, repr(pipe_progress)))
print (('compared: %d, compare succeed: %d, compare failed: %d, not ' +
'found: %d, exception: %d, size error: %d, type error: %d, ' +
'meta error: %d, md5 error: %d') %
(compared_n, compare_succeed_n, compare_failed_n, not_found_n,
exception_n, size_error_n, content_type_error_n,
meta_error_n, content_md5_error_n))
def _report_state():
# os.system('clear')
print (('ali bucket name: %s, prefix: %s, start marker: %s, ' +
'end marker: %s, baishan bucket name: %s') %
(cnf['ALI_BUCKET_NAME'], cnf['PREFIX'], cnf['START_MARKER'],
cnf['END_MARKER'], cnf['BAISHAN_BUCKET_NAME']))
print ''
print (('previous iter progress: total number: %d, ' +
'total size: %d, marker: %s') %
(previous_progress['total_n'],
previous_progress['total_size'],
previous_progress['marker']))
print (('current iter progress: total number: %d, ' +
'total size: %d, marker: %s') %
(current_progress['total_n'],
current_progress['total_size'],
current_progress['marker']))
print ''
print ('total number: %d, total bytes: %d, no content md5: %d' %
(ali_sync_state['total_n'], ali_sync_state['total_bytes'],
ali_sync_state['no_content_md5']))
print ''
print 'check exist: %s' % cnf['CHECK_EXIST']
print 'get s3 file info failed: %d' % ali_sync_state['check_need_s3_error']
print (('exist: %d, size_override: %d, md5_equal: %d, ' +
'default_override: %d, default_not_override: %d') %
(ali_sync_state['exist'],
ali_sync_state['size_override'],
ali_sync_state['md5_equal'],
ali_sync_state['default_override'],
ali_sync_state['default_not_override']))
print ''
print 'piped: %d, piped_bytes: %d' % (ali_sync_state['piped'],
ali_sync_state['piped_bytes'])
print ('pipe succeed: %d, pipe succeed bytes: %d' %
(ali_sync_state['pipe_succeed'],
ali_sync_state['pipe_succeed_bytes']))
print ('pipe failed: %d, pipe failed bytes: %d' %
(ali_sync_state['pipe_failed'],
ali_sync_state['pipe_failed_bytes']))
print (('pipe failed reason: exception: %d, ali file size error: %d, ' +
'ali md5 error: %d') %
(ali_sync_state['pipe_failed_exception_error'],
ali_sync_state['pipe_failed_ali_file_size_error'],
ali_sync_state['pipe_failed_ali_md5_error']))
print ''
print ('compared: %d, compare_succeed: %d, compare_failed: %d' %
(ali_sync_state['compared'],
ali_sync_state['compare_succeed'],
ali_sync_state['compare_failed']))
print (('compare failed reason: not found: %d, exception: %d, ' +
'size error: %d, content type error: %d, content md5 error: %d') %
(ali_sync_state['compare_failed_not_found_error'],
ali_sync_state['compare_failed_exception_error'],
ali_sync_state['compare_failed_size_error'],
ali_sync_state['compare_failed_content_type_error'],
ali_sync_state['compare_failed_content_md5_error']))
print ''
print 'threads status:'
for th_name, th_status in thread_status.iteritems():
print th_name
report_thread_status(th_status)
print ''
def report_state():
with report_state_lock:
_report_state()
def report(sess):
while not sess['stop']:
report_state()
time.sleep(cnf['REPORT_INTERVAL'])
def dump_state():
with open(cnf['STATE_FILE'], 'w') as stat_file:
stat_file.write(json.dumps(ali_sync_state))
def sync():
try:
report_sess = {'stop': False}
report_th = _thread(report, (report_sess,))
jobq.run(iter_files(), [(sync_one_file, 3),
(update_sync_stat, 1),
])
report_sess['stop'] = True
report_th.join()
report_state()
dump_state()
except KeyboardInterrupt:
report_state()
dump_state()
sys.exit(0)
if __name__ == "__main__":
opts, args = getopt.getopt(sys.argv[1:], '', ['conf=', ])
opts = dict(opts)
if opts.get('--conf') is None:
conf_path = '../conf/ali_sync.yaml'
else:
conf_path = opts['--conf']
cnf = get_conf(conf_path)
oss2_auth = oss2.Auth(cnf['ALI_ACCESS_KEY'], cnf['ALI_SECRET_KEY'])
oss2_bucket = oss2.Bucket(
oss2_auth, cnf['ALI_ENDPOINT'], cnf['ALI_BUCKET_NAME'])
s3_client = get_boto_client(cnf['BAISHAN_ENDPOINT'])
_mkdir(cnf['LOG_DIR'])
logger = add_logger()
thread_status = {}
cmd = args[0]
if cmd == 'sync':
current_progress = load_progress()
previous_progress = copy.deepcopy(current_progress)
sync()
elif cmd == 'clear_progress':
clear_progress()
|
nilq/baby-python
|
python
|
from __future__ import print_function
import os
import argparse
import numpy as np
from module.retinaface_function_in_numpy import PriorBox_in_numpy
from utils.retinaface_tool_in_numpy import py_cpu_nms_in_numpy, decode_in_numpy, decode_landm_in_numpy
import cv2
from module.retinaface_model_in_numpy import RetinaFace
from hyperparams import Hyperparams
def get_retinaface_net():
'''
be used to get class net
:param para:
:return:
'''
print('Loading network...')
cfg = np.load("E:/py_file/temFace/temFace/data/retinate.npy", allow_pickle=True)
cfg = cfg.item()
# print(cfg)
net = RetinaFace(cfg)
return net
def face_detection(net,img_raw):
cfg = Hyperparams().cfg_mnet
img = np.float32(img_raw)
# testing scale
target_size = 1600
max_size = 2150
im_shape = img.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
resize = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:防止超过最大尺寸
if np.round(resize * im_size_max) > max_size:
resize = float(max_size) / float(im_size_max)
if True:
resize = 1
if resize != 1:
img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
im_height, im_width, _ = img.shape
scale = np.array([img.shape[1], img.shape[0], img.shape[1], img.shape[0]]).astype(np.float64)
img -= (104, 117, 123) # 单通道
img = img.transpose(2, 0, 1) # 转置
img = np.expand_dims(img,0)
loc, conf, landms, test_size = net(img)
# 面框
priorbox = PriorBox_in_numpy(cfg, image_size=(im_height, im_width), test_size=test_size)
priors = priorbox.forward()
#prior_data = priors.data
prior_data = priors
boxes = decode_in_numpy(loc.squeeze(0), prior_data, cfg['variance'])
boxes = boxes * scale / resize
scores = conf.squeeze(0)[:, 1]
landms = decode_landm_in_numpy(landms.squeeze(0), prior_data, cfg['variance'])
scale1 = np.array([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2]]).astype(np.float64)
landms = landms * scale1 / resize
# ignore low scores
inds = np.where(scores > 0.02)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1] # 返回的是从小到大排序的索引
#if len(order) > 10:
# order = order[:10]
# order = scores.argsort()[::-1][:args.top_k]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
#print("pre:{}".format(dets.shape))
keep = py_cpu_nms_in_numpy(dets, 0.4)
# keep = nms(dets, args.nms_threshold,force_cpu=args.cpu)
dets = dets[keep, :]
#print("back:{}".format(dets.shape))
landms = landms[keep]
# keep top-K faster NMS
# dets = dets[:args.keep_top_k, :]
# landms = landms[:args.keep_top_k, :]
dets = np.concatenate((dets, landms), axis=1) # 连接
faces = []
for b in dets:
if b[4] < 0.5:
continue
# text = "{:.4f}".format(b[4])
b = list(map(int, b))
#cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
faces.append(b)
#print("facesnum:{}".format(len(faces)))
return faces
if __name__ == '__main__':
get_retinaface_net()
|
nilq/baby-python
|
python
|
import logging
def init_logging(log_file, log_level):
logging.getLogger().setLevel(log_level)
log_formatter = logging.Formatter('%(asctime)s %(message)s')
root_logger = logging.getLogger()
if log_file:
file_handler = logging.FileHandler(log_file, encoding='utf8')
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
logging.info('Logging to %s', log_file)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
root_logger.addHandler(console_handler)
|
nilq/baby-python
|
python
|
from django.core.management.base import BaseCommand
from apps.boxes.models import BoxUpload
class Command(BaseCommand):
help = 'Removes expired and completed boxes uploads'
def handle(self, *args, **options):
deleted = BoxUpload.objects.not_active().delete()
self.stdout.write(
self.style.SUCCESS('Successfully removed {} expired '
'and completed uploads.'
.format(deleted[0]))
)
|
nilq/baby-python
|
python
|
from audhelper.audhelper import __version__, __author__
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="audhelper",
version=__version__,
author=__author__,
author_email="sherkfung@gmail.com",
description="Audio helper functions including visualization and processing functions",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Zepyhrus/audhelper",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.4',
)
|
nilq/baby-python
|
python
|
import cv2
import numpy as np
img = cv2.imread(r"EDIZ\OPENCV\basin.jpg")
gri = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
temp1 = cv2.imread(r"EDIZ\OPENCV\temp1.jpg",0)
temp2 = cv2.imread(r"EDIZ\OPENCV\temp2.jpg",0)
w,h = temp2.shape[::-1]
res = cv2.matchTemplate(gri,temp2,cv2.TM_CCOEFF_NORMED)
thresh = 0.5
loc = np.where(res>=thresh)
# print(zip(*loc[::-1]))
for pt in zip(*loc[::-1]):
cv2.rectangle(img,pt,(pt[0]+w,pt[1]+h),(0,255,255),2)
cv2.imshow("ilkresim",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
###
# Copyright 2017 Hewlett Packard Enterprise, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
# -*- coding: utf-8 -*-
""" Directory Command for rdmc """
import sys
import re
import getpass
from argparse import ArgumentParser, SUPPRESS, REMAINDER, Action, RawDescriptionHelpFormatter
from redfish.ris.rmc_helper import IloResponseError, IdTokenError
from rdmc_helper import ReturnCodes, InvalidCommandLineError, IncompatibleiLOVersionError,\
InvalidCommandLineErrorOPTS, NoContentsFoundForOperationError, \
ResourceExists, Encryption
__subparsers__ = ['ldap', 'kerberos', 'test']
PRIVKEY = {1: ('Login', 'AssignedPrivileges'),\
2: ('RemoteConsolePriv', 'OemPrivileges'),\
3: ('ConfigureUsers', 'AssignedPrivileges'),\
4: ('ConfigureManager', 'AssignedPrivileges'),\
5: ('VirtualMediaPriv', 'OemPrivileges'),\
6: ('VirtualPowerAndResetPriv', 'OemPrivileges'),\
7: ('HostNICConfigPriv', 'OemPrivileges'),\
8: ('HostBIOSConfigPriv', 'OemPrivileges'),\
9: ('HostStorageConfigPriv', 'OemPrivileges'),\
10: ('SystemRecoveryConfigPriv', 'OemPrivileges'),\
11: ('ConfigureSelf', 'AssignedPrivileges'),\
12: ('ConfigureComponents', 'AssignedPrivileges')}
class _DirectoryParse(Action):
def __init__(self, option_strings, dest, nargs, **kwargs):
super(_DirectoryParse, self).__init__(option_strings, dest, nargs, **kwargs)
def __call__(self, parser, namespace, values, option_strings):
""" Helper for parsing options """
if option_strings.endswith('disable'):
setattr(namespace, self.dest, False)
elif option_strings.endswith('enable'):
setattr(namespace, self.dest, True)
elif option_strings.endswith('enablelocalauth'):
setattr(namespace, self.dest, False)
elif option_strings.endswith('disablelocalauth'):
setattr(namespace, self.dest, True)
elif option_strings == '--removerolemap':
setattr(namespace, self.dest, {'remove': []})
for role in next(iter(values)).split(','):
role = role.replace('"', '')
if role:
namespace.roles['remove'].append(role)
elif option_strings == '--addrolemap':
setattr(namespace, self.dest, {'add': []})
for role in next(iter(values)).split(','):
role = role.replace('"', '')
if role and re.match('.*:.*', role):
privs = role.split(':')[0].split(';')
if len(privs) > 1:
for priv in privs:
try:
if priv and int(priv) > 12:
try:
parser.error("Invalid privilege number added %s." % priv)
except SystemExit:
raise InvalidCommandLineErrorOPTS("")
except ValueError:
try:
parser.error("Privileges must be added as numbers.")
except SystemExit:
raise InvalidCommandLineErrorOPTS("")
namespace.roles['add'].append(role)
else:
try:
parser.error("Supply roles to add in form <local role>:<remote group>")
except SystemExit:
raise InvalidCommandLineErrorOPTS("")
elif option_strings == '--addsearch':
setattr(namespace, self.dest, {'add': []})
for search in next(iter(values)).split(','):
if search:
namespace.search['add'].append(search)
elif option_strings == '--removesearch':
setattr(namespace, self.dest, {'remove': []})
for search in next(iter(values)).split(','):
if search:
namespace.search['remove'].append(search)
class DirectoryCommand():
""" Update directory settings on the server """
def __init__(self):
self.ident = {
'name':'directory',\
'usage': None,\
'description':'\tAdd credentials, service address, two search strings, and enable'\
'\n\tLDAP directory service, remote role groups (mapping), local custom role\n\t'\
'IDs with privileges.\n\n\tTo view help on specific sub-commands'\
' run: directory <sub-command> -h\n\n\tExample: directory ldap -h\n',
'summary':'Update directory settings, add/delete directory roles, and test directory '\
'settings on the currently logged in server.',\
'aliases': ['ad', 'activedirectory'],\
'auxcommands': ["IloAccountsCommand"]
}
#self.definearguments(self.parser)
self.cmdbase = None
self.rdmc = None
self.auxcommands = dict()
#self.typepath = rdmcObj.app.typepath
#self.iloaccounts = rdmcObj.commands_dict["IloAccountsCommand"](rdmcObj)
def run(self, line):
"""Main directory Function
:param line: string of arguments passed in
:type line: str.
"""
try:
ident_subparser = False
for cmnd in __subparsers__:
if cmnd in line:
(options, args) = self.rdmc.rdmc_parse_arglist(self, line)
ident_subparser = True
break
if not ident_subparser:
(options, args) = self.rdmc.rdmc_parse_arglist(self, line, default=True)
except (InvalidCommandLineErrorOPTS, SystemExit):
if ("-h" in line) or ("--help" in line):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS("")
self.directoryvalidation(options)
if self.rdmc.app.getiloversion() < 5.140:
raise IncompatibleiLOVersionError("Directory settings are only available on "\
"iLO 5 1.40 or greater.")
results = None
if options.command.lower() == 'ldap' or ((True if options.ldap_kerberos == 'ldap' \
else False) if hasattr(options, 'ldap_kerberos') else False):
try:
results = self.rdmc.app.select(selector='AccountService.', \
path_refresh=True)[0].dict
path = results[self.rdmc.app.typepath.defs.hrefstring]
oem = results['Oem'][self.rdmc.app.typepath.defs.oemhp]
local_auth = results['LocalAccountAuth']
results = results['LDAP']
name = 'LDAP'
except (KeyError, IndexError):
raise NoContentsFoundForOperationError("Unable to gather LDAP settings.")
elif options.command.lower() == 'kerberos' or ((True if options.ldap_kerberos == \
'kerberos' else False) if hasattr(options, 'ldap_kerberos') else False):
try:
results = self.rdmc.app.select(selector='AccountService.', \
path_refresh=True)[0].dict
path = results[self.rdmc.app.typepath.defs.hrefstring]
oem = results['Oem'][self.rdmc.app.typepath.defs.oemhp]
local_auth = results['LocalAccountAuth']
results = results['ActiveDirectory']
name = 'ActiveDirectory'
except (KeyError, IndexError):
raise NoContentsFoundForOperationError("Unable to gather Kerberos settings.")
if results:
keytab = None
payload = {}
if hasattr(options, 'keytab'):
keytab = options.keytab
try:
directory_settings = self.directory_helper(results, options)
except IndexError:
directory_settings = self.directory_helper(results, options)
if directory_settings:
payload[name] = directory_settings
if hasattr(options, 'authmode'):
if options.authmode:
payload.update({'Oem':{'Hpe':{'DirectorySettings': \
{'LdapAuthenticationMode': options.authmode}}}})
if not payload and not keytab:
if getattr(options, 'json', False):
self.rdmc.ui.print_out_json({name: results, 'LocalAccountAuth': local_auth, \
"Oem": {"Hpe": oem}})
else:
self.print_settings(results, oem, local_auth, name)
if payload:
priv_patches = {}
try:
if hasattr(options, "localauth"):
if options.localauth:
payload['LocalAccountAuth'] = 'Enabled' \
if options.localauth else 'Disabled'
elif local_auth:
payload['LocalAccountAuth'] = 'Enabled' if local_auth else 'Disabled'
except (NameError, AttributeError):
payload['LocalAccountAuth'] = 'Disabled'
try:
maps = {}
if payload.get('LDAP'):
maps = payload['LDAP'].get('RemoteRoleMapping', {})
elif payload.get('ActiveDirectory'):
maps = payload['ActiveDirectory'].get('RemoteRoleMapping', {})
#Check if we need to modify roles after creating
for mapping in maps:
privs = mapping['LocalRole'].split(';')
if len(privs) > 1:
privs = [int(priv) for priv in privs if priv]
if 10 in privs:
user_privs = self.auxcommands['iloaccounts'].getsesprivs()
if 'SystemRecoveryConfigPriv' not in user_privs.keys():
raise IdTokenError("The currently logged in account "\
"must have the System Recovery Config privilege to "\
"add the System Recovery Config privilege to a local "\
"role group.")
priv_patches[mapping['RemoteGroup']] = privs
mapping['LocalRole'] = "ReadOnly"
except Exception:
pass
self.rdmc.ui.printer("Changing settings...\n")
try:
self.rdmc.app.patch_handler(path, payload)
except IloResponseError as excp:
if not results['ServiceEnabled']:
self.rdmc.ui.error("You must enable this directory service before or "\
"during assignment of username and password. Try adding the flag "\
"--enable.\n", excp)
else:
raise IloResponseError
if priv_patches:
self.update_mapping_privs(priv_patches)
if keytab:
path = oem['Actions'][next(iter(oem['Actions']))]['target']
self.rdmc.ui.printer("Adding keytab...\n")
self.rdmc.app.post_handler(path, {"ImportUri": keytab})
elif options.command.lower() == 'test':
self.test_directory(options, json=getattr(options, "json", False))
self.cmdbase.logout_routine(self, options)
#Return code
return ReturnCodes.SUCCESS
def update_mapping_privs(self, roles_to_update):
""" Helper function to update created role mappings to match user privileges.
:param roles_to_update: Dictionary of privileges to update.
:type roles_to_update: dict
"""
self.rdmc.ui.printer("Updating privileges of created role maps...\n")
try:
results = self.rdmc.app.select(selector='AccountService.', path_refresh=True)[0].dict
roles = self.rdmc.app.getcollectionmembers(\
self.rdmc.app.getidbytype('RoleCollection.')[0])
except (KeyError, IndexError):
raise NoContentsFoundForOperationError("Unable to gather Role settings. Roles may not "\
"be updated to match privileges requested.")
for rolemap in results['LDAP']['RemoteRoleMapping']:
for role in roles:
if role['RoleId'] == rolemap['LocalRole']:
role['RemoteGroup'] = rolemap['RemoteGroup']
break
for role in roles:
privs = {'AssignedPrivileges' : [], 'OemPrivileges': []}
for update_role in roles_to_update.keys():
if role.get('RemoteGroup', None) == update_role:
for priv in roles_to_update[update_role]:
privs[PRIVKEY[priv][1]].append(PRIVKEY[priv][0])
try:
self.rdmc.app.patch_handler(role['@odata.id'], privs)
self.rdmc.ui.printer("Updated privileges for %s\n" % update_role)
except IloResponseError as excp:
self.rdmc.ui.error("Unable to update privileges for %s\n" % update_role, \
excp)
break
def directory_helper(self, settings, options):
""" Helper function to set the payload based on options and arguments
:param settings: dictionary to change
:type settings: dict.
:param options: list of options
:type options: list.
"""
payload = {}
serviceaddress = None
if hasattr(options, 'serviceaddress'):
if isinstance(options.serviceaddress, str):
serviceaddress = options.serviceaddress
if serviceaddress == '""' or serviceaddress == "''":
serviceaddress = ''
if hasattr(options, 'port'):
if isinstance(options.port, str):
if serviceaddress is None:
serviceaddress = settings['ServiceAddresses'][0]
serviceaddress = serviceaddress + ':' + options.port
if hasattr(options, 'realm'):
if isinstance(options.realm, str):
if serviceaddress is None:
serviceaddress = settings['ServiceAddresses'][0]
if options.realm == '""' or options.realm == "''":
options.realm = ''
serviceaddress = serviceaddress + '@' + options.realm
if not serviceaddress is None:
payload['ServiceAddresses'] = [serviceaddress]
if hasattr(options, 'enable'):
if not options.enable is None:
payload['ServiceEnabled'] = options.enable
if hasattr(options, 'ldap_username') and hasattr(options, 'ldap_password'):
if options.ldap_username and options.ldap_password:
payload.update({"Authentication":{"Username": options.ldap_username,\
"Password": options.ldap_password}})
if hasattr(options, 'roles'):
if options.roles:
payload['RemoteRoleMapping'] = self.role_helper(options.roles, \
settings['RemoteRoleMapping'])
if hasattr(options, 'search'):
if options.search:
payload.update({"LDAPService": {"SearchSettings": \
self.search_helper(options.search, settings['LDAPService']['SearchSettings'])}})
return payload
def test_directory(self, options, json=False):
""" Function to perform directory testing
:param options: namespace of custom parser attributes which contain the original command
arguments for 'start/stop/viewresults'
:type options: namespace
:param json: Bool to print in json format or not.
:type json: bool.
"""
results = self.rdmc.app.select(selector='HpeDirectoryTest.', path_refresh=True)[0].dict
if options.start_stop_view.lower() == 'start':
path = None
for item in results['Actions']:
if 'StartTest' in item:
path = results['Actions'][item]['target']
break
if not path:
raise NoContentsFoundForOperationError("Unable to start directory test.")
self.rdmc.ui.printer("Starting the directory test. Monitor results with "\
"command: \"directory viewresults\".\n")
self.rdmc.app.post_handler(path, {})
elif options.start_stop_view.lower() == 'stop':
path = None
for item in results['Actions']:
if 'StopTest' in item:
path = results['Actions'][item]['target']
break
if not path:
raise NoContentsFoundForOperationError("Unable to stop directory test.")
self.rdmc.ui.printer("Stopping the directory test.\n")
self.rdmc.app.post_handler(path, {})
elif options.start_stop_view.lower() == 'viewresults':
if getattr(options, "json", False):
self.rdmc.ui.print_out_json(results['TestResults'])
else:
for test in results['TestResults']:
self.rdmc.ui.printer('Test: %s\n' % test['TestName'])
self.rdmc.ui.printer("------------------------\n")
self.rdmc.ui.printer('Status: %s\n' % test['Status'])
self.rdmc.ui.printer('Notes: %s\n\n' % test['Notes'])
def print_settings(self, settings, oem_settings, local_auth_setting, name):
""" Pretty print settings of LDAP or Kerberos
:param settings: settings to print
:type settings: dict.
:param oem_settings: oem_settings to print
:type oem_settings: dict.
:param local_auth_settings: local authorization setting
:type local_auth_settings: str.
:param name: type of setting (activedirectory or ldap)
:type name: str.
"""
self.rdmc.ui.printer("%s settings:\n" % ('Kerberos' if name == 'ActiveDirectory' else name))
self.rdmc.ui.printer("--------------------------------\n")
self.rdmc.ui.printer("Enabled: %s\n" % str(settings['ServiceEnabled']))
serviceaddress = settings['ServiceAddresses'][0]
self.rdmc.ui.printer("Service Address: %s\n" % (serviceaddress if serviceaddress else \
"Not Set"))
self.rdmc.ui.printer("Local Account Authorization: %s\n" % local_auth_setting)
if name.lower() == 'activedirectory':
address_settings = oem_settings['KerberosSettings']
self.rdmc.ui.printer("Port: %s\n" % address_settings['KDCServerPort'])
self.rdmc.ui.printer("Realm: %s\n" % (address_settings['KerberosRealm'] if \
address_settings['KerberosRealm'] else "Not Set"))
else:
address_settings = oem_settings['DirectorySettings']
self.rdmc.ui.printer("Port: %s\n" % address_settings['LdapServerPort'])
self.rdmc.ui.printer("Authentication Mode: %s\n" % \
address_settings['LdapAuthenticationMode'])
self.rdmc.ui.printer("Search Settings:\n")
try:
count = 1
for search in settings['LDAPService']['SearchSettings']["BaseDistinguishedNames"]:
self.rdmc.ui.printer("\tSearch %s: %s\n" % (count, search))
count += 1
except KeyError:
self.rdmc.ui.printer("\tNo Search Settings\n")
self.rdmc.ui.printer("Remote Role Mapping(s):\n")
for role in settings['RemoteRoleMapping']:
self.rdmc.ui.printer("\tLocal Role: %s\n" % role['LocalRole'])
self.rdmc.ui.printer("\tRemote Group: %s\n" % role['RemoteGroup'])
def role_helper(self, new_roles, curr_roles):
""" Helper to prepare adding and removing roles for patching
:param new_roles: dictionary of new roles to add or remove
:type new_roles: dict.
:param curr_roles: list of current roles on the system
:type curr_roles: list.
"""
final_roles = curr_roles
if 'add' in new_roles:
for role in new_roles['add']:
role = role.split(':', 1)
if not self.duplicate_group(role[1], curr_roles):
final_roles.append({"LocalRole":role[0], "RemoteGroup":role[1]})
else:
raise ResourceExists('Group DN "%s" already exists.' % role[1].split(':')[0])
if 'remove' in new_roles:
removed = False
for role in new_roles['remove']:
removed = False
for item in reversed(final_roles):
if item['LocalRole'] == role:
del final_roles[final_roles.index(item)]
removed = True
break
if not removed:
raise InvalidCommandLineError("Unable to find local role %s to delete" % role)
return final_roles
def duplicate_group(self, group_dn, curr_roles):
""" Checks if new role is a duplicate
:param group_dn: group domain name from user
:type group_dn: str.
:param curr_roles: list of current roles
:type curr_roles: list.
"""
group_dn = group_dn.split(':')[0]
for item in curr_roles:
comp_dn = item["RemoteGroup"].split(':')[0]
if comp_dn == group_dn:
return True
return False
def search_helper(self, new_searches, curr_searches):
""" Helper to prepare search strings for patching
:param new_serches: dictionary of new searches to add
:type new_searches: dict.
:param curr_searches: list of current searches
:type curr_searches: dict.
"""
final_searches = curr_searches
if 'add' in new_searches:
if 'BaseDistinguishedNames' in final_searches:
for search in new_searches['add']:
final_searches['BaseDistinguishedNames'].append(search)
else:
final_searches['BaseDistinguishedNames'] = new_searches['add']
elif 'remove' in new_searches:
to_remove = []
if 'BaseDistinguishedNames' not in curr_searches:
raise NoContentsFoundForOperationError("No search strings to remove")
for search in new_searches['remove']:
if search in curr_searches['BaseDistinguishedNames']:
to_remove.append(search)
else:
raise InvalidCommandLineError("Unable to find search %s to delete" % search)
for item in to_remove:
final_searches['BaseDistinguishedNames'].remove(item)
if not final_searches['BaseDistinguishedNames']:
sys.stdout.write('Attempting to delete all searches.\n')
final_searches['BaseDistinguishedNames'].append("")
return final_searches
def directoryvalidation(self, options):
""" directory validation function
:param options: command line options
:type options: list.
"""
self.cmdbase.login_select_validation(self, options)
def options_argument_group(self, parser):
""" Additional argument
:param parser: The parser to add the removeprivs option group to
:type parser: ArgumentParser/OptionParser
"""
parser.add_argument(
'-j',
'--json',
dest='json',
action="store_true",
help="Optionally include this flag if you wish to change the"\
" displayed output to JSON format. Preserving the JSON data"\
" structure makes the information easier to parse.",
default=False
)
def definearguments(self, customparser):
""" Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser.
"""
if not customparser:
return
#self.cmdbase.add_login_arguments_group(customparser)
subcommand_parser = customparser.add_subparsers(dest='command')
default_parser = subcommand_parser.add_parser('default')
default_parser.add_argument(
'ldap_kerberos',
help="Specify LDAP or Kerberos configuration settings",
metavar='LDAP_KERBEROS',
nargs='?',
type= str,
default=None,
)
self.cmdbase.add_login_arguments_group(default_parser)
privilege_help='\n\nPRIVILEGES:\n\t1: Login\n\t2: Remote Console\n\t'\
'3: User Config\n\t4: iLO (Manager) Config\n\t5: Virtual Media\n\t'\
'6: Virtual Power and Reset\n\t7: Host NIC Config\n\t8: Host Bios Config\n\t9: '\
'Host Storage Config\n\t10: System Recovery Config\n\t11: Self Password Change\n\t'\
'12: Configure Components\n\n\tLOCAL ROLES:\n\tReadOnly\n\tOperator\n\tAdministrator'\
'\n\n\tNOTE: The Self Password Change privilege is automatically added to roles with '\
'the Login privilege.'
ldap_help='\tShow, add or modify properties pertaining to iLO LDAP Configuration.'
ldap_parser = subcommand_parser.add_parser(
__subparsers__[0],
help=ldap_help,
description=ldap_help+'\n\n\tSimply show LDAP configuration:\n\t\tdirectory ldap\n\n'\
'To modify the LDAP username, password, service address, search strings or '\
'enable/disable LDAP.\n\t\tdirectory ldap <username> <password> '\
'--serviceaddress x.x.y.z --addsearch string1, string2 --enable.\n\n\tTo add role '\
'mapping.\n\t\tdirectory ldap <username> <password> --addrolemap \"LocalRole1:\"'\
'\"RemoteGroup3,LocalRole2:RemoteGroup4:SID.\n\n\tTo remove role mapping.\n\t\t'\
'directory ldap <username> <password> --removerolemap LocalRole1, LocalRole2.'\
+privilege_help,
formatter_class=RawDescriptionHelpFormatter
)
ldap_parser.add_argument(
'ldap_username',
help='The LDAP username used in verifying AD (optional outside of \'--enable\' and'\
'\'--disable\')',
metavar='USERNAME',
nargs='?',
type= str,
default=None,
)
ldap_parser.add_argument(
'ldap_password',
help='The LDAP password used in verifying AD (optional outside of \'--enable\' and' \
'\'--disable\')',
metavar='PASSWORD',
nargs='?',
type= str,
default=None,
)
ldap_parser.add_argument(
'--enable',
'--disable',
dest='enable',
type=str,
nargs='*',
action=_DirectoryParse,
help="Optionally add this flag to enable LDAP services.",
default=None,
)
ldap_parser.add_argument(
'--addsearch',
'--removesearch',
dest='search',
nargs='*',
action=_DirectoryParse,
help="Optionally add this flag to add or remove search strings for "\
"generic LDAP services.",
type=str,
default={},
)
ldap_parser.add_argument(
'--serviceaddress',
dest='serviceaddress',
help='Optionally include this flag to set the service address of the LDAP Services.',
default=None,
)
ldap_parser.add_argument(
'--port',
dest='port',
help="Optionally include this flag to set the port of the LDAP services.",
default=None,
)
ldap_parser.add_argument(
'--addrolemap',
'--removerolemap',
dest='roles',
nargs='*',
action=_DirectoryParse,
help='Optionally add this flag to add or remove Role Mapping(s) for the LDAP and '\
'Kerberos services. Remove EX: --removerolemap LocalRole1,LocalRole2 '\
'Add EX: --addrolemap "LocalRole1:RemoteGroup3,LocalRole2:RemoteGroup4\n\n"'
'SID EX: --addrolemap "LocalRole1:RemoteGroup2:SID,LocalRole2:RemoteGroup5:SID'\
'\n\nNOTE 1: Create a custom local role group (and subsequently assign to a role map)'\
'by adding the numbers associated with privilege(s) desired separated by a semicolon'\
'(;)\n\nNOTE 2: SID is optional',
type=str,
default={},
)
ldap_parser.add_argument(
'--enablelocalauth',
'--disablelocalauth',
dest='localauth',
nargs='*',
type=str,
action=_DirectoryParse,
help="Optionally include this flag if you wish to enable or disable authentication "\
"for local accounts.",
default=None
)
ldap_parser.add_argument(
'--authentication',
dest='authmode',
choices=['DefaultSchema', 'ExtendedSchema'],
help="Optionally include this flag if you would like to choose a LDAP authentication "
"mode Valid choices are: DefaultSchema (Directory Default Schema or Schema-free) or "\
"ExtendedSchema (HPE Extended Schema).",
default=None
)
self.cmdbase.add_login_arguments_group(ldap_parser)
self.options_argument_group(ldap_parser)
kerberos_help='Show, add or modify properties pertaining to AD Kerberos Configuration.'
kerberos_parser = subcommand_parser.add_parser(
__subparsers__[1],
help=kerberos_help,
description=ldap_help+'\n\nExamples:\n\nShow Kerberos specific AD/LDAP configuration '\
'settings.\n\tdirectory kerberos\n\nShow current AD Kerberos configuration.'\
'\n\tdirectory kerberos\n\nAlter kerberos service address, AD relm and Port.\n\t'\
'directory kerberos --serviceaddress x.x.y.z --port 8888 --realm adrealm1',
formatter_class=RawDescriptionHelpFormatter
)
kerberos_parser.add_argument(
'--serviceaddress',
dest='serviceaddress',
help="Optionally include this flag to set the Kerberos serviceaddress.",
default=None,
)
kerberos_parser.add_argument(
'--port',
dest='port',
help="Optionally include this flag to set the Kerberos port.",
default=None,
)
kerberos_parser.add_argument(
'--realm',
dest='realm',
help="Optionally include this flag to set the Kerberos realm.",
default=None
)
kerberos_parser.add_argument(
'--keytab',
dest='keytab',
help="Optionally include this flag to import a Kerberos Keytab by it's URI location.",
default=""
)
kerberos_parser.add_argument(
'--enable',
'--disable',
dest='enable',
type=str,
nargs='*',
action=_DirectoryParse,
help="Optionally add this flag to enable or disable Kerberos services.",
default=None,
)
self.cmdbase.add_login_arguments_group(kerberos_parser)
self.options_argument_group(kerberos_parser)
directory_test_help='Start, stop or view results of an AD/LDAP test which include: ICMP, '\
'Domain Resolution, Connectivity, Authentication, Bindings, LOM Object and User '\
'Context tests.'
directory_test_parser = subcommand_parser.add_parser(
__subparsers__[2],
help=directory_test_help,
description=ldap_help+'\n\nExamples:\n\nStart a directory test:\n\tdirectory test '\
'start\n\nStop a directory test:\n\tdirectory test stop\n\nView results of the last '\
'directory test:\n\tdirectory test viewresults',
formatter_class=RawDescriptionHelpFormatter
)
directory_test_parser.add_argument(
'start_stop_view',
help="Start, stop, or view results on an AD/LDAP test.",
metavar='START, STOP, VIEWSTATUS',
default='viewresults'
)
self.cmdbase.add_login_arguments_group(directory_test_parser)
|
nilq/baby-python
|
python
|
from __future__ import annotations
from discord.ext import commands
__all__ = ("HierarchyFail",)
class HierarchyFail(commands.CheckFailure):
...
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
import os
import scipy.io as spio
import math
from random import sample
class ProcessData:
def __init__(self, data_set_directory, columns_to_use):
self.directory = data_set_directory
self.selected_columns = columns_to_use
self.datasets = []
self.all_common_sensors = []
#self.read_data()
def find_common_sensors(self):
files = os.listdir(self.directory)
for file in files:
if file.endswith(".csv"):
df = pd.read_csv(self.directory+'/'+file)
sensors = list(df.columns[1:])
if len(self.all_common_sensors) == 0:
self.all_common_sensors = sensors
else:
self.all_common_sensors = list(set(self.all_common_sensors) & set(sensors))
sensors = []
for sensor in self.all_common_sensors:
sensors.append([])
for file in files:
if file.endswith(".csv"):
sensors[-1].append(sensor)
self.all_common_sensors = sensors
return self.all_common_sensors
def read_data(self, considered_files):
files = os.listdir(self.directory)
count = 0
for file in files:
if file.endswith(".csv") and count in considered_files:
df = pd.read_csv(self.directory+'/'+file)
unselected_columns = list(df.columns)
for x in [unselected_columns[0]] + self.selected_columns:
#print(file, x)
unselected_columns.remove(x)
df = df.drop(columns=unselected_columns)
df['fire_label'] = np.ones(df.iloc[:, 0].shape)
df.iloc[np.where(df.iloc[:, 0] < 10), -1] = 0
data = self.process_data(df)
self.datasets.append(data)
if file.endswith(".csv"):
count += 1
def process_data(self, df):
num_sensors = len(df.columns)-2
x = []
for i in range(1, num_sensors + 1):
x.append(np.array([np.array(df.iloc[1:, i:i + 1]), np.array(df.iloc[:-1, i:i + 1])])[:, :, 0].T[:, :, np.newaxis])
y = np.array(df.iloc[1:, -1:])
time = np.array(df.iloc[1:, 0:1])
return [x, y, time]
def shape_to_input_output(self, sensors):
x = []
for j in sensors:
count = 0
for i in range(len(self.datasets)):
if count == 0:
x.append(self.datasets[i][0][j])
y = self.datasets[i][1]
time = self.datasets[i][2]
else:
x[-1] = np.concatenate((x[-1], self.datasets[i][0][j]), axis=0)
y = np.concatenate((y, self.datasets[i][1]), axis=0)
time = np.concatenate((time, self.datasets[i][2]), axis=0)
count += 1
x[-1] = x[-1]/np.max(x[-1])
rand_samples = sample(range(y.shape[0]), y.shape[0])
#spio.savemat('rand_indices.mat', {'indices': rand_samples})
'''
# To load the previously saved random indices
rand_samples = spio.loadmat('rand_indices.mat')
rand_samples = list(rand_samples['indices'][0])
'''
y = y[rand_samples, :]
time = time[rand_samples, :]
for j in sensors:
x[j] = x[j][rand_samples, :, :]
return x, y, time
|
nilq/baby-python
|
python
|
# coding=utf-8
""" Demo app, to show OpenCV video and PySide2 widgets together."""
import sys
from PySide2.QtWidgets import QApplication
from sksurgerycore.configuration.configuration_manager import \
ConfigurationManager
from sksurgerybard.widgets.bard_overlay_app import BARDOverlayApp
def run_demo(config_file, calib_dir):
""" Prints command line args, and launches main screen."""
app = QApplication([])
configuration = None
if config_file is not None:
configurer = ConfigurationManager(config_file)
configuration = configurer.get_copy()
viewer = BARDOverlayApp(configuration, calib_dir)
viewer.start()
sys.exit(app.exec_())
|
nilq/baby-python
|
python
|
# coding: utf-8
import sys
k_bit_rate_num_bits = [ 0, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 32 ]
k_highest_bit_rate = len(k_bit_rate_num_bits) - 1
k_lowest_bit_rate = 1
k_num_bit_rates = len(k_bit_rate_num_bits)
k_invalid_bit_rate = 255
# This code assumes that rotations, translations, and scales are packed on 3 components (e.g. quat drop w)
if __name__ == "__main__":
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run this script')
sys.exit(1)
permutation_tries = []
permutation_tries_no_scale = []
for rotation_bit_rate in range(k_num_bit_rates):
for translation_bit_rate in range(k_num_bit_rates):
transform_size = k_bit_rate_num_bits[rotation_bit_rate] * 3 + k_bit_rate_num_bits[translation_bit_rate] * 3
permutation_tries_no_scale.append((transform_size, rotation_bit_rate, translation_bit_rate))
for scale_bit_rate in range(k_num_bit_rates):
transform_size = k_bit_rate_num_bits[rotation_bit_rate] * 3 + k_bit_rate_num_bits[translation_bit_rate] * 3 + k_bit_rate_num_bits[scale_bit_rate] * 3
permutation_tries.append((transform_size, rotation_bit_rate, translation_bit_rate, scale_bit_rate))
# Sort by transform size, then by each bit rate
permutation_tries.sort()
permutation_tries_no_scale.sort()
print('constexpr uint8_t k_local_bit_rate_permutations_no_scale[{}][2] ='.format(len(permutation_tries_no_scale)))
print('{')
for transform_size, rotation_bit_rate, translation_bit_rate in permutation_tries_no_scale:
print('\t{{ {}, {} }},\t\t// {} bits per transform'.format(rotation_bit_rate, translation_bit_rate, transform_size))
print('};')
print()
print('constexpr uint8_t k_local_bit_rate_permutations[{}][3] ='.format(len(permutation_tries)))
print('{')
for transform_size, rotation_bit_rate, translation_bit_rate, scale_bit_rate in permutation_tries:
print('\t{{ {}, {}, {} }},\t\t// {} bits per transform'.format(rotation_bit_rate, translation_bit_rate, scale_bit_rate, transform_size))
print('};')
|
nilq/baby-python
|
python
|
#!/usr/bin/python
"""
Date utilities to do fast datetime parsing.
Copyright (C) 2013 Byron Platt
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# TODO: At the moment this has been targeted toward the datetime formats used by
# NNTP as it was developed for use in a NNTP reader. There is, however, no
# reason why this module could not be extended to include other formats.
import calendar
import datetime
import dateutil.parser
import dateutil.tz
class _tzgmt(dateutil.tz.tzutc):
"""GMT timezone.
"""
def tzname(self, dt):
return "GMT"
TZ_LOCAL = dateutil.tz.tzlocal()
"""Local timezone (at the time the module was loaded)"""
TZ_UTC = dateutil.tz.tzutc()
"""UTC timezone."""
TZ_GMT = _tzgmt()
"""GMT timezone."""
_months = dict(
jan=1, feb=2, mar=3, apr=4, may=5, jun=6,
jul=7, aug=8, sep=9, oct=10,nov=11,dec=12
)
"""Conversion dictionary for english abbreviated month to integer."""
def _offset(value):
"""Parse timezone to offset in seconds.
Args:
value: A timezone in the '+0000' format. An integer would also work.
Returns:
The timezone offset from GMT in seconds as an integer.
"""
o = int(value)
if o == 0:
return 0
a = abs(o)
s = a*36+(a%100)*24
return (o//a)*s
def timestamp_d_b_Y_H_M_S(value):
"""Convert timestamp string to time in seconds since epoch.
Timestamps strings like '18 Jun 2013 12:00:00 GMT' are able to be converted
by this function.
Args:
value: A timestamp string in the format '%d %b %Y %H:%M:%S GMT'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
Note: The timezone is ignored it is simply assumed to be UTC/GMT.
"""
d, b, Y, t, Z = value.split()
H, M, S = t.split(":")
return int(calendar.timegm((
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), 0, 0, 0
)))
def datetimeobj_d_b_Y_H_M_S(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like '18 Jun 2013 12:00:00 GMT' are able to be converted
by this function.
Args:
value: A timestamp string in the format '%d %b %Y %H:%M:%S GMT'.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
Note: The timezone is ignored it is simply assumed to be UTC/GMT.
"""
d, b, Y, t, Z = value.split()
H, M, S = t.split(":")
return datetime.datetime(
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), tzinfo=TZ_GMT
)
def timestamp_a__d_b_Y_H_M_S_z(value):
"""Convert timestamp string to time in seconds since epoch.
Timestamps strings like 'Tue, 18 Jun 2013 22:00:00 +1000' are able to be
converted by this function.
Args:
value: A timestamp string in the format '%a, %d %b %Y %H:%M:%S %z'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
"""
a, d, b, Y, t, z = value.split()
H, M, S = t.split(":")
return int(calendar.timegm((
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), 0, 0, 0
))) - _offset(z)
def datetimeobj_a__d_b_Y_H_M_S_z(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like 'Tue, 18 Jun 2013 22:00:00 +1000' are able to be
converted by this function.
Args:
value: A timestamp string in the format '%a, %d %b %Y %H:%M:%S %z'.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
"""
a, d, b, Y, t, z = value.split()
H, M, S = t.split(":")
return datetime.datetime(
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S),
tzinfo=dateutil.tz.tzoffset(None, _offset(z))
)
def timestamp_YmdHMS(value):
"""Convert timestamp string to time in seconds since epoch.
Timestamps strings like '20130618120000' are able to be converted by this
function.
Args:
value: A timestamp string in the format '%Y%m%d%H%M%S'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
Note: The timezone is assumed to be UTC/GMT.
"""
i = int(value)
S = i
M = S//100
H = M//100
d = H//100
m = d//100
Y = m//100
return int(calendar.timegm((
Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, 0, 0, 0)
))
def datetimeobj_YmdHMS(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like '20130618120000' are able to be converted by this
function.
Args:
value: A timestamp string in the format '%Y%m%d%H%M%S'.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
Note: The timezone is assumed to be UTC/GMT.
"""
i = int(value)
S = i
M = S//100
H = M//100
d = H//100
m = d//100
Y = m//100
return datetime.datetime(
Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, tzinfo=TZ_GMT
)
def timestamp_epoch(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like '1383470155' are able to be converted by this
function.
Args:
value: A timestamp string as seconds since epoch.
Returns:
The time in seconds since epoch as an integer.
"""
return int(value)
def datetimeobj_epoch(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like '1383470155' are able to be converted by this
function.
Args:
value: A timestamp string as seconds since epoch.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
"""
return datetime.datetime.utcfromtimestamp(int(value)).replace(tzinfo=TZ_GMT)
def timestamp_fmt(value, fmt):
"""Convert timestamp string to time in seconds since epoch.
Wraps the datetime.datetime.strptime(). This is slow use the other
timestamp_*() functions if possible.
Args:
value: A timestamp string.
fmt: A timestamp format string.
Returns:
The time in seconds since epoch as an integer.
"""
return int(calendar.timegm(
datetime.datetime.strptime(value, fmt).utctimetuple()
))
def datetimeobj_fmt(value, fmt):
"""Convert timestamp string to a datetime object.
Wrapper for datetime.datetime.strptime(). This is slow use the other
timestamp_*() functions if possible.
Args:
value: A timestamp string.
fmt: A timestamp format string.
Returns:
A datetime object.
"""
return datetime.datetime.strptime(value, fmt)
def timestamp_any(value):
"""Convert timestamp string to time in seconds since epoch.
Most timestamps strings are supported in fact this wraps the
dateutil.parser.parse() method. This is SLOW use the other timestamp_*()
functions if possible.
Args:
value: A timestamp string.
Returns:
The time in seconds since epoch as an integer.
"""
return int(calendar.timegm(dateutil.parser.parse(value).utctimetuple()))
def datetimeobj_any(value):
"""Convert timestamp string to a datetime object.
Most timestamps strings are supported in fact this is a wrapper for the
dateutil.parser.parse() method. This is SLOW use the other datetimeobj_*()
functions if possible.
Args:
value: A timestamp string.
Returns:
A datetime object.
"""
return dateutil.parser.parse(value)
_timestamp_formats = {
"%d %b %Y %H:%M:%S" : timestamp_d_b_Y_H_M_S,
"%a, %d %b %Y %H:%M:%S %z": timestamp_a__d_b_Y_H_M_S_z,
"%Y%m%d%H%M%S" : timestamp_YmdHMS,
"epoch" : timestamp_epoch,
}
def timestamp(value, fmt=None):
"""Parse a datetime to a unix timestamp.
Uses fast custom parsing for common datetime formats or the slow dateutil
parser for other formats. This is a trade off between ease of use and speed
and is very useful for fast parsing of timestamp strings whose format may
standard but varied or unknown prior to parsing.
Common formats include:
1 Feb 2010 12:00:00 GMT
Mon, 1 Feb 2010 22:00:00 +1000
20100201120000
1383470155 (seconds since epoch)
See the other timestamp_*() functions for more details.
Args:
value: A string representing a datetime.
fmt: A timestamp format string like for time.strptime().
Returns:
The time in seconds since epoch as and integer for the value specified.
"""
if fmt:
return _timestamp_formats.get(fmt,
lambda v: timestamp_fmt(v, fmt)
)(value)
l = len(value)
if 19 <= l <= 24 and value[3] == " ":
# '%d %b %Y %H:%M:%Sxxxx'
try:
return timestamp_d_b_Y_H_M_S(value)
except (KeyError, ValueError, OverflowError):
pass
if 30 <= l <= 31:
# '%a, %d %b %Y %H:%M:%S %z'
try:
return timestamp_a__d_b_Y_H_M_S_z(value)
except (KeyError, ValueError, OverflowError):
pass
if l == 14:
# '%Y%m%d%H%M%S'
try:
return timestamp_YmdHMS(value)
except (ValueError, OverflowError):
pass
# epoch timestamp
try:
return timestamp_epoch(value)
except ValueError:
pass
# slow version
return timestamp_any(value)
_datetimeobj_formats = {
"%d %b %Y %H:%M:%S" : datetimeobj_d_b_Y_H_M_S,
"%a, %d %b %Y %H:%M:%S %z": datetimeobj_a__d_b_Y_H_M_S_z,
"%Y%m%d%H%M%S" : datetimeobj_YmdHMS,
"epoch" : datetimeobj_epoch,
}
def datetimeobj(value, fmt=None):
"""Parse a datetime to a datetime object.
Uses fast custom parsing for common datetime formats or the slow dateutil
parser for other formats. This is a trade off between ease of use and speed
and is very useful for fast parsing of timestamp strings whose format may
standard but varied or unknown prior to parsing.
Common formats include:
1 Feb 2010 12:00:00 GMT
Mon, 1 Feb 2010 22:00:00 +1000
20100201120000
1383470155 (seconds since epoch)
See the other datetimeobj_*() functions for more details.
Args:
value: A string representing a datetime.
Returns:
A datetime object.
"""
if fmt:
return _datetimeobj_formats.get(fmt,
lambda v: datetimeobj_fmt(v, fmt)
)(value)
l = len(value)
if 19 <= l <= 24 and value[3] == " ":
# '%d %b %Y %H:%M:%Sxxxx'
try:
return datetimeobj_d_b_Y_H_M_S(value)
except (KeyError, ValueError):
pass
if 30 <= l <= 31:
# '%a, %d %b %Y %H:%M:%S %z'
try:
return datetimeobj_a__d_b_Y_H_M_S_z(value)
except (KeyError, ValueError):
pass
if l == 14:
# '%Y%m%d%H%M%S'
try:
return datetimeobj_YmdHMS(value)
except ValueError:
pass
# epoch timestamp
try:
return datetimeobj_epoch(value)
except ValueError:
pass
# slow version
return datetimeobj_any(value)
# testing
if __name__ == "__main__":
import sys
import timeit
log = sys.stdout.write
times = (
datetime.datetime.now(TZ_UTC),
datetime.datetime.now(TZ_GMT),
datetime.datetime.now(TZ_LOCAL),
datetime.datetime.now(),
)
# check timezones
for t in times:
log("%s\n" % t.strftime("%Y-%m-%d %H:%M:%S %Z"))
# TODO validate values (properly)
# check speed
values = (
{
"name": "Implemented Format",
"time": "20130624201912",
"fmt" : "%Y%m%d%H%M%S"
},
{
"name": "Unimplemented Format",
"time": "2013-06-24 20:19:12",
"fmt" : "%Y-%m-%d %H:%M:%S"
}
)
tests = (
{
"name" : "GMT timestamp (strptime version)",
"test" : "int(calendar.timegm(datetime.datetime.strptime('%(time)s', '%(fmt)s').utctimetuple()))",
"setup": "import calendar, datetime",
},
{
"name" : "GMT timestamp (dateutil version)",
"test" : "int(calendar.timegm(dateutil.parser.parse('%(time)s').utctimetuple()))",
"setup": "import calendar, dateutil.parser",
},
{
"name" : "GMT timestamp (fast version)",
"test" : "timestamp('%(time)s')",
"setup": "from __main__ import timestamp",
},
{
"name" : "GMT timestamp (fast version with format hint)",
"test" : "timestamp('%(time)s', '%(fmt)s')",
"setup": "from __main__ import timestamp",
},
{
"name" : "GMT datetime object (strptime version)",
"test" : "datetime.datetime.strptime('%(time)s', '%(fmt)s').replace(tzinfo=TZ_GMT)",
"setup": "import datetime; from __main__ import TZ_GMT",
},
{
"name" : "GMT datetime object (dateutil version)",
"test" : "dateutil.parser.parse('%(time)s').replace(tzinfo=TZ_GMT)",
"setup": "import dateutil.parser; from __main__ import TZ_GMT",
},
{
"name" : "GMT datetime object (fast version)",
"test" : "datetimeobj('%(time)s')",
"setup": "from __main__ import datetimeobj",
},
{
"name" : "GMT datetime object (fast version with format hint)",
"test" : "datetimeobj('%(time)s', '%(fmt)s')",
"setup": "from __main__ import datetimeobj",
}
)
iters = 100000
for v in values:
log("%(name)s (%(fmt)s)\n" % v)
for t in tests:
log(" %(name)-52s" % t)
elapsed = timeit.timeit(t["test"] % v, t["setup"], number=iters)
log("%0.3f sec (%d loops @ %0.3f usec)\n" % (
elapsed, iters, (elapsed/iters)*1000000
))
|
nilq/baby-python
|
python
|
'''OpenGL extension SGIS.texture_filter4
Overview (from the spec)
This extension allows 1D and 2D textures to be filtered using an
application-defined, four sample per dimension filter. (In addition to
the NEAREST and LINEAR filters defined in the original GL Specification.)
Such filtering results in higher image quality. It is defined only
for non-mipmapped filters. The filter that is specified must be
symmetric and separable (in the 2D case).
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/SGIS/texture_filter4.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_SGIS_texture_filter4'
GL_FILTER4_SGIS = constant.Constant( 'GL_FILTER4_SGIS', 0x8146 )
GL_TEXTURE_FILTER4_SIZE_SGIS = constant.Constant( 'GL_TEXTURE_FILTER4_SIZE_SGIS', 0x8147 )
glGetTexFilterFuncSGIS = platform.createExtensionFunction(
'glGetTexFilterFuncSGIS', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, arrays.GLfloatArray,),
doc = 'glGetTexFilterFuncSGIS( GLenum(target), GLenum(filter), GLfloatArray(weights) ) -> None',
argNames = ('target', 'filter', 'weights',),
)
glTexFilterFuncSGIS = platform.createExtensionFunction(
'glTexFilterFuncSGIS', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, constants.GLsizei, arrays.GLfloatArray,),
doc = 'glTexFilterFuncSGIS( GLenum(target), GLenum(filter), GLsizei(n), GLfloatArray(weights) ) -> None',
argNames = ('target', 'filter', 'n', 'weights',),
)
def glInitTextureFilter4SGIS():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
nilq/baby-python
|
python
|
from support import print_func
import using_name
print_func("Jack")
|
nilq/baby-python
|
python
|
"""Various utilities used throughout the code.
Here go various utilities that don't belong directly in any class,
photometry utils module nor or SED model module.
"""
import os
import pickle
import random
import time
from contextlib import closing
import numpy as np
from scipy.special import erf
from scipy.stats import gaussian_kde, norm
from termcolor import colored
def estimate_pdf(distribution):
"""Estimates the PDF of a distribution using a gaussian KDE.
Parameters
----------
distribution: array_like
The distribution.
Returns
-------
xx: array_like
The x values of the PDF.
pdf: array_like
The estimated PDF.
"""
kde = gaussian_kde(distribution)
xmin, xmax = distribution.min(), distribution.max()
xx = np.linspace(xmin, xmax, 300)
pdf = kde(xx)
return xx, pdf
def estimate_cdf(distribution, hdr=False):
"""Estimate the CDF of a distribution."""
h, hx = np.histogram(distribution, density=True, bins=499)
cdf = np.zeros(500) # ensure the first value of the CDF is 0
if hdr:
idx = np.argsort(h)[::-1]
cdf[1:] = np.cumsum(h[idx]) * np.diff(hx)
else:
cdf[1:] = np.cumsum(h) * np.diff(hx)
return cdf
def norm_fit(x, mu, sigma, A):
"""Gaussian function."""
return A * norm.pdf(x, loc=mu, scale=sigma)
def credibility_interval(post, alpha=1.):
"""Calculate bayesian credibility interval.
Parameters:
-----------
post : array_like
The posterior sample over which to calculate the bayesian credibility
interval.
alpha : float, optional
Confidence level.
Returns:
--------
med : float
Median of the posterior.
low : float
Lower part of the credibility interval.
up : float
Upper part of the credibility interval.
"""
z = erf(alpha / np.sqrt(2))
lower_percentile = 100 * (1 - z) / 2
upper_percentile = 100 * (1 + z) / 2
low, med, up = np.percentile(
post, [lower_percentile, 50, upper_percentile]
)
return med, low, up
def credibility_interval_hdr(xx, pdf, cdf, sigma=1.):
"""Calculate the highest density region for an empirical distribution.
Reference: Hyndman, Rob J. 1996
Parameters
----------
xx: array_like
The x values of the PDF (and the y values of the CDF).
pdf: array_like
The PDF of the distribution.
cdf: array_like
The CDF of the distribution.
sigma: float
The confidence level in sigma notation. (e.g. 1 sigma = 68%)
Returns
-------
best: float
The value corresponding to the peak of the posterior distribution.
low: float
The minimum value of the HDR.
high: float
The maximum value of the HDR.
Note: The HDR is capable of calculating more robust credible regions
for multimodal distributions. It is identical to the usual probability
regions of symmetric about the mean distributions. Using this then should
lead to more realistic errorbars and 3-sigma intervals for multimodal
outputs.
"""
# Get best fit value
best = xx[np.argmax(pdf)]
z = erf(sigma / np.sqrt(2))
# Sort the pdf in reverse order
idx = np.argsort(pdf)[::-1]
# Find where the CDF reaches 100*z%
idx_hdr = np.where(cdf >= z)[0][0]
# Isolate the HDR
hdr = pdf[idx][:idx_hdr]
# Get the minimum density
hdr_min = hdr.min()
# Get CI
low = xx[pdf > hdr_min].min()
high = xx[pdf > hdr_min].max()
return best, low, high
def display_star_fin(star, c):
"""Display stellar information."""
temp, temp_e = star.temp, star.temp_e
rad, rad_e = star.rad, star.rad_e
plx, plx_e = star.plx, star.plx_e
lum, lum_e = star.lum, star.lum_e
dist, dist_e = star.dist, star.dist_e
print(colored(f'\t\t\tGaia DR2 ID : {star.g_id}', c))
if star.tic:
print(colored(f'\t\t\tTIC : {star.tic}', c))
if star.kic:
print(colored(f'\t\t\tKIC : {star.kic}', c))
print(colored('\t\t\tGaia Effective temperature : ', c), end='')
print(colored(f'{temp:.3f} +/- {temp_e:.3f}', c))
if rad is not None:
print(colored('\t\t\tGaia Stellar radius : ', c), end='')
print(colored(f'{rad:.3f} +/- {rad_e:.3f}', c))
if lum is not None:
print(colored('\t\t\tGaia Stellar Luminosity : ', c), end='')
print(colored(f'{lum:.3f} +/- {lum_e:.3f}', c))
print(colored('\t\t\tGaia Parallax : ', c), end='')
print(colored(f'{plx:.3f} +/- {plx_e:.3f}', c))
print(colored('\t\t\tBailer-Jones distance : ', c), end='')
print(colored(f'{dist:.3f} +/- {dist_e:.3f}', c))
print(colored('\t\t\tMaximum Av : ', c), end='')
print(colored(f'{star.Av:.3f}', c))
print('')
pass
def display_star_init(star, c):
"""Display stellar information."""
print(colored('\n\t\t#####################################', c))
print(colored('\t\t## ARIADNE ##', c))
print(colored('\t\t#####################################', c))
print(colored(' spectrAl eneRgy dIstribution', c), end=' ')
print(colored('bAyesian moDel averagiNg fittEr', c))
print(colored('\n\t\t\tAuthor : Jose Vines', c))
print(colored('\t\t\tContact : jose . vines at ug . uchile . cl', c))
print(colored('\t\t\tStar : ', c), end='')
print(colored(star.starname, c))
pass
def display_routine(engine, live_points, dlogz, ndim, bound=None, sample=None,
nthreads=None, dynamic=None):
"""Display program information.
What is displayed is:
Program name
Program author
Star selected
Algorithm used (i.e. Multinest or Dynesty)
Setup used (i.e. Live points, dlogz tolerance)
"""
colors = [
'red', 'green', 'blue', 'yellow',
'grey', 'magenta', 'cyan', 'white'
]
c = random.choice(colors)
if engine == 'multinest':
engine = 'MultiNest'
if engine == 'dynesty':
engine = 'Dynesty'
print(colored('\n\t\t*** EXECUTING MAIN FITTING ROUTINE ***', c))
print(colored('\t\t\tSelected engine : ', c), end='')
print(colored(engine, c))
print(colored('\t\t\tLive points : ', c), end='')
print(colored(str(live_points), c))
print(colored('\t\t\tlog Evidence tolerance : ', c), end='')
print(colored(str(dlogz), c))
print(colored('\t\t\tFree parameters : ', c), end='')
print(colored(str(ndim), c))
if engine == 'Dynesty' or engine == 'Bayesian Model Averaging':
print(colored('\t\t\tBounding : ', c), end='')
print(colored(bound, c))
print(colored('\t\t\tSampling : ', c), end='')
print(colored(sample, c))
print(colored('\t\t\tN threads : ', c), end='')
print(colored(nthreads, c))
if dynamic:
print(colored('\t\t\tRunning the Dynamic Nested Sampler', c))
print('')
pass
def end(coordinator, elapsed_time, out_folder, engine, use_norm):
"""Display end of run information.
What is displayed is:
best fit parameters
elapsed time
Spectral type
"""
colors = [
'red', 'green', 'blue', 'yellow',
'grey', 'magenta', 'cyan', 'white'
]
c = random.choice(colors)
if use_norm:
order = np.array(['teff', 'logg', 'z', 'norm', 'rad', 'Av'])
else:
order = np.array(
['teff', 'logg', 'z', 'dist', 'rad', 'Av']
)
if engine == 'Bayesian Model Averaging':
res_dir = f'{out_folder}/BMA.pkl'
else:
res_dir = f'{out_folder}/{engine}_out.pkl'
with closing(open(res_dir, 'rb')) as jar:
out = pickle.load(jar)
star = out['star']
mask = star.filter_mask
n = int(star.used_filters.sum())
for filt in star.filter_names[mask]:
p_ = get_noise_name(filt) + '_noise'
order = np.append(order, p_)
theta = np.zeros(order.shape[0] - 1 + n)
for i, param in enumerate(order):
if param != 'loglike':
theta[i] = out['best_fit_averaged'][param]
if param == 'inflation':
for m, fi in enumerate(star.filter_names[mask]):
_p = get_noise_name(fi) + '_noise'
theta[i + m] = out['best_fit_averaged'][_p]
if engine != 'Bayesian Model Averaging':
z, z_err = out['global_lnZ'], out['global_lnZerr']
print('')
print(colored('\t\t\tFitting finished.', c))
print(colored('\t\t\tBest fit parameters are:', c))
fmt_str = ''
for i, p in enumerate(order):
p2 = p
if 'noise' in p:
continue
fmt_str += '\t\t\t'
fmt = 'f'
if p == 'norm':
p2 = '(R/D)^2'
fmt = 'e'
if p == 'z':
p2 = '[Fe/H]'
fmt_str += f'{p2} : {theta[i]:.4{fmt}} '
if not coordinator[i]:
unlo, unhi = out['uncertainties_averaged'][p]
lo, up = out['confidence_interval_averaged'][p]
fmt_str += f'+ {unhi:.4{fmt}} - {unlo:.4{fmt}} '
fmt_str += f'[{lo:.4{fmt}}, {up:.4{fmt}}]\n'
else:
fmt_str += 'fixed\n'
if not use_norm:
ad = out['best_fit_averaged']['AD']
unlo, unhi = out['uncertainties_averaged']['AD']
lo, up = out['confidence_interval_averaged']['AD']
fmt_str += f'\t\t\tAngular Diameter : {ad:.4f} '
fmt_str += f'+ {unhi:.4f} - {unlo:.4f} [{lo:.4f}, {up:.4f}]\n'
mass = out['best_fit_averaged']['grav_mass']
unlo, unhi = out['uncertainties_averaged']['grav_mass']
lo, up = out['confidence_interval_averaged']['grav_mass']
fmt_str += f'\t\t\tGrav mass : {mass:.4f} '
fmt_str += f'+ {unhi:.4f} - {unlo:.4f} [{lo:.4f}, {up:.4f}]\n'
lum = out['best_fit_averaged']['lum']
unlo, unhi = out['uncertainties_averaged']['lum']
lo, up = out['confidence_interval_averaged']['lum']
fmt_str += f'\t\t\tLuminosity : {lum:.4f} '
fmt_str += f'+ {unhi:.4f} - {unlo:.4f} [{lo:.4f}, {up:.4f}]\n'
if engine == 'Bayesian Model Averaging':
miso = out['best_fit_averaged']['iso_mass']
unlo, unhi = out['uncertainties_averaged']['iso_mass']
lo, up = out['confidence_interval_averaged']['iso_mass']
fmt_str += f'\t\t\tIso mass : {miso:.4f} '
fmt_str += f'+ {unhi:.4f} - {unlo:.4f} [{lo:.4f}, {up:.4f}]\n'
age = out['best_fit_averaged']['age']
unlo, unhi = out['uncertainties_averaged']['age']
lo, up = out['confidence_interval_averaged']['age']
fmt_str += f'\t\t\tAge (Gyr) : {age:.4f} '
fmt_str += f'+ {unhi:.4f} - {unlo:.4f} [{lo:.4f}, {up:.4f}]\n'
eep = out['best_fit_averaged']['eep']
unlo, unhi = out['uncertainties_averaged']['eep']
lo, up = out['confidence_interval_averaged']['eep']
fmt_str += f'\t\t\tEEP : {eep:.4f} '
fmt_str += f'+ {unhi:.4f} - {unlo:.4f} [{lo:.4f}, {up:.4f}]\n'
for i, p in enumerate(order):
if 'noise' not in p:
continue
unlo, unhi = out['uncertainties_averaged'][p]
lo, up = out['confidence_interval_averaged'][p]
p_ = 'Excess '
if 'SDSS' not in p and 'PS1' not in p:
p1, p2 = p.split('_')
else:
p1, p2, p3 = p.split('_')
p1 += '_' + p2
p2 = p3
fmt_str += f'\t\t\t{p_ + p1} {p2} : {theta[i]:.4f} '
fmt_str += f'+ {unhi:.4f} - {unlo:.4f} [{lo:.4f}, {up:.4f}]\n'
print(colored(fmt_str, c), end='')
spt = out['spectral_type']
print(colored('\t\t\tMamajek Spectral Type : ', c), end='')
print(colored(spt, c))
if engine != 'Bayesian Model Averaging':
print(colored('\t\t\tlog Bayesian evidence : ', c), end='')
print(colored(f'{z:.3f} +/-', c), end=' ')
print(colored(f'{z_err:.3f}', c))
else:
probs = out['weights']
for k in probs.keys():
text = f'\t\t\t{k} probability : '
print(colored(text, c), end='')
print(colored(f'{probs[k]:.4f}', c))
print(colored('\t\t\tElapsed time : ', c), end='')
print(colored(elapsed_time, c))
pass
def create_dir(path):
"""Create a directory."""
try:
os.mkdir(path)
except OSError:
err_msg = f"Creation of the directory {path:s} failed. "
err_msg += "It might already exist"
print(colored(err_msg, 'red'))
pass
else:
print(colored(f"Created the directory {path:s}", 'blue'))
pass
pass
def execution_time(start):
"""Calculate run execution time."""
end = time.time() - start
weeks, rest0 = end // 604800, end % 604800
days, rest1 = rest0 // 86400, rest0 % 86400
hours, rest2 = rest1 // 3600, rest1 % 3600
minutes, seconds = rest2 // 60, rest2 % 60
elapsed = ''
if weeks == 0:
if days == 0:
if hours == 0:
if minutes == 0:
elapsed = f'{seconds:.2f} seconds'
else:
elapsed = f'{minutes:.0f} minutes'
elapsed += f' and {seconds:.2f} seconds'
else:
elapsed = f'{hours:.0f} hours'
elapsed += f', {minutes:.0f} minutes'
elapsed += f' and {seconds:.2f} seconds'
else:
elapsed = f'{days:.0f} days'
elapsed += f', {hours:.0f} hours'
elapsed += f', {minutes:.0f} minutes'
elapsed += f' and {seconds:.2f} seconds'
else:
elapsed = f'{weeks:.0f} weeks'
elapsed += f', {days:.0f} days'
elapsed += f', {hours:.0f} hours'
elapsed += f', {minutes:.0f} minutes'
elapsed += f' and {seconds:.2f} seconds'
return elapsed
def get_noise_name(filt):
"""Retrieve parameter name for white noise."""
if filt == 'TYCHO_B_MvB':
return 'BT'
if filt == 'TYCHO_V_MvB':
return 'VT'
if filt == 'SPITZER_IRAC_36':
return 'IRAC 36'
if filt == 'SPITZER_IRAC_45':
return 'IRAC 45'
if filt == 'NGTS_I':
return 'NGTS'
if filt == 'WISE_RSR_W1':
return 'W1'
if filt == 'WISE_RSR_W2':
return 'W2'
if 'SDSS' in filt or 'PS1' in filt:
return filt
return filt.split('_')[-1]
def out_filler(samp, logdat, param, name, out, fmt='f', fixed=False,
method='averaged'):
"""Fill up the output file."""
if method not in ['averaged', 'samples']:
raise Exception('Method is wrong!')
if fixed is False:
try:
xx, pdf = estimate_pdf(samp)
cdf = estimate_cdf(samp, hdr=True)
best, lo, up = credibility_interval_hdr(xx, pdf, cdf, sigma=1)
_, lo3, up3 = credibility_interval_hdr(xx, pdf, cdf, sigma=3)
except ValueError:
wrn = f'HDR failed for parameter {param}, reverting to regular CI'
wrn += ' calculation. Be sure to check the histograms afterwards'
wrn += ' for diagnosis.'
print(colored(wrn, 'red'))
best, lo, up = credibility_interval(samp)
_, lo3, up3 = credibility_interval(samp, alpha=3)
out[f'best_fit_{method}'][param] = best
logdat += f'{name}\t{best:.4{fmt}}\t'
out[f'uncertainties_{method}'][param] = (best - lo, up - best)
logdat += f'{up - best:.4{fmt}}\t{best - lo:.4{fmt}}\t'
out[f'confidence_interval_{method}'][param] = (lo3, up3)
logdat += f'{lo:.4{fmt}}\t{up:.4{fmt}}\n'
else:
out[f'best_fit_{method}'][param] = fixed
out[f'uncertainties_{method}'][param] = np.nan
out[f'confidence_interval_{method}'][param] = np.nan
logdat += f'{name}\t{fixed:.4{fmt}}\t'
logdat += '(FIXED)\n'
return logdat
def get_max_from_kde(samp):
"""Get maximum of the given distribution."""
raise DeprecationWarning()
kde = gaussian_kde(samp)
xmin = samp.min()
xmax = samp.max()
xx = np.linspace(xmin, xmax, 1000)
kde = kde(xx)
best = xx[kde.argmax()]
return best
|
nilq/baby-python
|
python
|
import queue
import cards
import random
#create card decks
class constructBoard:
def __init__(self, lowLevelCards = queue.Queue(), midLevelCards = queue.Queue(), highLevelCards = queue.Queue()):
self.lowlevelcards = lowLevelCards
self.midlevelcards = midLevelCards
self.highlevelcards = highLevelCards
for size in range(100):
card = cards.cards()
if card.getpoints() == 0:
lowLevelCards.put(card)
elif card.getpoints() == 1 or card.getpoints() == 2:
midLevelCards.put(card)
else:
highLevelCards.put(card)
def printboard(self):
cardslots = []
cardslots.append(self.highlevelcards.get())
image1 = cardslots[0].getimage()
cardslots.append(self.highlevelcards.get())
image2 = cardslots[1].getimage()
cardslots.append(self.highlevelcards.get())
image3 = cardslots[2].getimage()
cardslots.append(self.highlevelcards.get())
image4 = cardslots[3].getimage()
print(image1[0] + ' '+ image2[0] + ' ' + image3[0] + ' ' + image4[0] + ' ')
print(image1[1] + ' ' + image2[1] + ' ' + image3[1] + ' ' + image4[1] + ' ')
print(image1[2] + ' ' + image2[2] + ' ' + image3[2] + ' ' + image4[2] + ' ')
print(image1[3] + ' ' + image2[3] + ' ' + image3[3] + ' ' + image4[3] + ' ')
print(image1[4] + ' ' + image2[4] + ' ' + image3[4] + ' ' + image4[4] + ' ')
print(image1[5] + ' ' + image2[5] + ' ' + image3[5] + ' ' + image4[5] + ' ')
print(image1[6] + ' ' + image2[6] + ' ' + image3[6] + ' ' + image4[6] + ' ')
print(image1[7] + ' ' + image2[7] + ' ' + image3[7] + ' ' + image4[7] + ' ')
print(image1[8] + ' ' + image2[8] + ' ' + image3[8] + ' ' + image4[8] + ' ')
mtop1 = self.midlevelcards.get()
mimage1 = mtop1.getimage()
mtop2 = self.midlevelcards.get()
mimage2 = mtop2.getimage()
mtop3 = self.midlevelcards.get()
mimage3 = mtop3.getimage()
mtop4 = self. midlevelcards.get()
mimage4 = mtop4.getimage()
print(mimage1[0] + ' ' + mimage2[0] + ' ' + mimage3[0] + ' ' + mimage4[0] + ' ')
print(mimage1[1] + ' ' + mimage2[1] + ' ' + mimage3[1] + ' ' + mimage4[1] + ' ')
print(mimage1[2] + ' ' + mimage2[2] + ' ' + mimage3[2] + ' ' + mimage4[2] + ' ')
print(mimage1[3] + ' ' + mimage2[3] + ' ' + mimage3[3] + ' ' + mimage4[3] + ' ')
print(mimage1[4] + ' ' + mimage2[4] + ' ' + mimage3[4] + ' ' + mimage4[4] + ' ')
print(mimage1[5] + ' ' + mimage2[5] + ' ' + mimage3[5] + ' ' + mimage4[5] + ' ')
print(mimage1[6] + ' ' + mimage2[6] + ' ' + mimage3[6] + ' ' + mimage4[6] + ' ')
print(mimage1[7] + ' ' + mimage2[7] + ' ' + mimage3[7] + ' ' + mimage4[7] + ' ')
print(mimage1[8] + ' ' + mimage2[8] + ' ' + mimage3[8] + ' ' + mimage4[8] + ' ')
ltop1 = self.lowlevelcards.get()
limage1 = ltop1.getimage()
ltop2 = self.lowlevelcards.get()
limage2 = ltop2.getimage()
ltop3 = self.lowlevelcards.get()
limage3 = ltop3.getimage()
ltop4 = self.lowlevelcards.get()
limage4 = ltop4.getimage()
print(limage1[0] + ' ' + limage2[0] + ' ' + limage3[0] + ' ' + limage4[0] + ' ')
print(limage1[1] + ' ' + limage2[1] + ' ' + limage3[1] + ' ' + limage4[1] + ' ')
print(limage1[2] + ' ' + limage2[2] + ' ' + limage3[2] + ' ' + limage4[2] + ' ')
print(limage1[3] + ' ' + limage2[3] + ' ' + limage3[3] + ' ' + limage4[3] + ' ')
print(limage1[4] + ' ' + limage2[4] + ' ' + limage3[4] + ' ' + limage4[4] + ' ')
print(limage1[5] + ' ' + limage2[5] + ' ' + limage3[5] + ' ' + limage4[5] + ' ')
print(limage1[6] + ' ' + limage2[6] + ' ' + limage3[6] + ' ' + limage4[6] + ' ')
print(limage1[7] + ' ' + limage2[7] + ' ' + limage3[7] + ' ' + limage4[7] + ' ')
print(limage1[8] + ' ' + limage2[8] + ' ' + limage3[8] + ' ' + limage4[8] + ' ')
def main():
board = constructBoard()
board.printboard()
main()
|
nilq/baby-python
|
python
|
from pytest import raises
class IndexedPropertyMapper(object):
def __init__(self, desc, instance):
self.desc = desc
self.instance = instance
def __getitem__(self, item):
return self.desc.fget(self.instance, item)
def __setitem__(self, item, value):
# hmm. is this order of arguments right?
self.desc.fset(self.instance, value, item)
def __delitem__(self, item):
self.desc.fdel(self.instance, item)
class MultiIndexedPropertyMapper(object):
def __init__(self, desc, instance):
self.desc = desc
self.instance = instance
def __getitem__(self, item):
return self.desc.fget(self.instance, *item)
def __setitem__(self, item, value):
# hmm. is this order of arguments right?
self.desc.fset(self.instance, *item, value)
def __delitem__(self, item):
self.desc.fdel(self.instance, *item)
# could we allow __delete__ to invalidate all nodes?
# what does __set__ do? assign the whole mapping? that sounds ok.
# can we assign slices -> translate to multipl set calls? but we don't know.
class IndexedPropertyDescriptor(property):
def __get__(self, instance, owner):
return IndexedPropertyMapper(self, instance)
class MultiIndexedPropertyDescriptor(property):
def __get__(self, instance, owner):
return MultiIndexedPropertyMapper(self, instance)
def index(fget, *args, **kwargs):
if fget.__code__.co_argcount > 2:
return MultiIndexedPropertyDescriptor(fget, *args, **kwargs)
if fget.__code__.co_argcount == 2:
return IndexedPropertyDescriptor(fget, *args, **kwargs)
raise ValueError('index property must take at least one parameter')
def test_indexed_property():
class Thingy(object):
@index
def AddOne(self, i):
return i + 1
@index
def AddTwo(self, i, j):
return i+j
t = Thingy()
with raises(AttributeError):
t.AddOne = 123
with raises(AttributeError):
del t.AddOne
assert t.AddOne[1] == 2
assert t.AddOne[3] == 4
assert t.AddTwo[2,3] == 5
class FibonacciThingy(object):
@index
def Fib(self, item):
if item < 0:
raise KeyError('must be bigger than 0')
if item == 0 or item == 1:
return 1
return self.Fib[item - 1] + self.Fib[item - 2]
def method_fib(self, item):
if item < 0:
raise KeyError('must be bigger than 0')
if item == 0 or item == 1:
return 1
return barefaced_fib(item - 1) + barefaced_fib(item - 2)
def test_fibonacci():
t = FibonacciThingy()
with raises(KeyError):
t.Fib[-100]
assert t.Fib[0] == 1
assert t.Fib[1] == 1
assert t.Fib[6] == 13
def test_benchmark_fibonacci(benchmark):
t = FibonacciThingy()
benchmark(lambda : t.Fib[20])
assert False
def barefaced_fib(item):
if item < 0:
raise KeyError('must be bigger than 0')
if item == 0 or item == 1:
return 1
return barefaced_fib(item - 1) + barefaced_fib(item - 2)
def test_benchmark_barefaced_fib(benchmark):
benchmark(lambda : barefaced_fib(20))
|
nilq/baby-python
|
python
|
import asyncio
import shutil
from collections import namedtuple
from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime, timedelta, timezone
from shlex import split as lex
from subprocess import DEVNULL, Popen
import bottle as bt
import httpx
import peewee as pw
import toml
from waitress import serve
confdir = shutil.os.path.expanduser("~") + "/.config/twitch-py"
bt.TEMPLATE_PATH.insert(0, f"{confdir}/views")
cachedir = shutil.os.path.expanduser("~") + "/.cache/twitch-py"
db = pw.SqliteDatabase(f"{confdir}/data.db")
os_ = shutil.sys.platform.lower()
Image = namedtuple("Image", "id url")
Result = namedtuple("Result", "query model")
class App:
process: Popen = None # Holds process id of current stream/vod
url = "http://localhost:8080/" # Index page of local site
messages = [] # Log of events since application start
errors = {
400: "Bad Request",
404: "Not Found",
500: "Server Error",
502: "Bad Gateway",
}
@staticmethod
def display(message: str = "") -> None:
"""
Reprints terminal screen with most recent event messages
Re-centers logo and change list length based on terminal size
"""
shutil.os.system("clear")
t = shutil.get_terminal_size()
logo = "\n".join(
line.center(t.columns)
for line in """
_ _ _ _
| |___ _(_) |_ ___| |__ _ __ _ _
| __\ \ /\ / / | __/ __| '_ \ _____| '_ \| | | |
| |_ \ V V /| | || (__| | | |_____| |_) | |_| |
\__| \_/\_/ |_|\__\___|_| |_| | .__/ \__, |
|_| |___/ v1.5
""".splitlines()
)
divide = ("─" * round(t.columns / 1.5)).center(t.columns) + "\n"
print(logo, App.url.center(t.columns), sep="\n", end=divide)
(m := App.messages).append(message)
print(*[f" > {msg}" for msg in m[-min(len(m), (t.lines - 12)) :]], sep="\n")
@bt.hook("before_request")
def _connect_db() -> None:
"""
The following is run at the start of each page request (user action on webpage)
"""
db.connect()
if not any(
path in bt.request.path
for path in ["authenticate", "config", "settings", "error"]
):
Db.check_user() # Redirect to login if no user in data.db
Db.check_cache() # If no cache but user login, run initial cache from follows
@bt.hook("after_request")
def _close_db() -> None:
"""
The following is run after server fulfills page request
"""
if not db.is_closed():
db.close() # Terminate connection with data.db
class BaseModel(pw.Model):
"""
Base class for database models, where data.db is the shared database
"""
class Meta:
database = db
class User(BaseModel):
"""
Model/table for the user login. Necessary to store access token for
Twitch Helix API requests
"""
id = pw.IntegerField()
login = pw.TextField()
display_name = pw.TextField()
profile_image_url = pw.TextField()
access_token = pw.TextField()
class Streamer(BaseModel):
"""
Model/table for all Twitch streamers. Holds data for displaying content
on webpages, and boolean for whether streamer is followed by the user.
"""
id = pw.IntegerField(primary_key=True)
login = pw.TextField()
display_name = pw.TextField()
broadcaster_type = pw.TextField(default="user") # If not partner/affiliate
description = pw.TextField(default="Twitch streamer") # Default if no description
profile_image_url = pw.TextField()
followed = pw.BooleanField(default=False)
class Game(BaseModel):
"""
Holds data for presenting game names and box art. The box art stored
is a specified size that exists for all games (some sizes are incompatible)
"""
id = pw.IntegerField(primary_key=True)
name = pw.TextField()
box_art_url = pw.TextField()
class Helix:
"""
Application information to interface with the Helix API
"""
client_id = "o232r2a1vuu2yfki7j3208tvnx8uzq"
redirect_uri = "http://localhost:8080/authenticate"
app_scopes = "user:edit+user:edit:follows+user:read:follows"
endpoint = "https://api.twitch.tv/helix"
oauth = (
"https://id.twitch.tv/oauth2/authorize?client_id="
f"{client_id}&redirect_uri={redirect_uri}"
f"&response_type=token&scope={app_scopes}"
)
@staticmethod
def headers() -> dict:
"""
Prepares headers with app id and stored user-access-token from authentication
"""
return {
"Client-ID": Helix.client_id,
"Authorization": f"Bearer {User.get().access_token}",
}
@staticmethod
def get(params: str) -> list[dict]:
"""
Blueprint for http requests specifically for Helix API
Includes necessary client-id and user access token
Input `params` is used to specify API endpoint as so:
https://api.twitch.tv/helix/<params>
The response is of json format
```
{
"data": [{},{}],
"pagination":...
}
```
and the `data` key is selected, which is of type `list[dict]`
"""
try:
with httpx.Client(headers=Helix.headers(), timeout=None) as session:
resp: list[dict] = session.get(f"{Helix.endpoint}/{params}").json()[
"data"
]
return resp
except httpx.HTTPError as e:
App.display(f"Error in handling request with params {params}. Error: {e}")
bt.abort(code=502, text=f"Error in handling request with params {params}")
@staticmethod
def get_iter(params: str) -> list[dict]:
"""
Blueprint for http requests specifically for Helix API
Includes necessary client-id and user access token
Input `params` is used to specify API endpoint as so:
https://api.twitch.tv/helix/<params>
The response is of json format
```
{
"data": [{},{}],
"pagination":
{"cursor" : [0-9a-zA-Z]+}
}
```
The response's `data` field (of type `list[dict]`) is appended to `results`
The `pagination` cursor, if it exists, is used as a request parameter for a
subsequent request at the same endpoint to show the next series of results
Iterates requests with new index of results until no more data is found
"""
results, data = [], []
with httpx.Client(headers=Helix.headers(), timeout=None) as session:
while True:
resp = session.get(f"{Helix.endpoint}/{params}").json()
try:
data: list[dict] = resp["data"]
except httpx.HTTPError as e:
App.display(f"Error with {resp}. Caused the error {e}")
bt.abort(
code=502, text=f"Error with request {Helix.endpoint}/{params}"
)
if data == []:
break
results += data
if resp["pagination"] == {}:
return results
pagination = resp["pagination"]["cursor"]
if "after" in params:
params = params[: (params.rfind("=") + 1)] + pagination
else:
params = params + f"&after={pagination}"
return results
class Fetch:
@staticmethod
def user(access_token: str) -> User:
"""
Once user logs in via the twitch portal, the access token taken from
the /authentication uri is used to fetch user data and populate the 'user'
table in `data.db`.
https://api.twitch.tv/helix/users
headers contain unique user access token (required)
"""
headers = {
"Client-ID": Helix.client_id,
"Authorization": f"Bearer {access_token}",
}
try:
user: dict = httpx.get(
f"{Helix.endpoint}/users", headers=headers, timeout=None
).json()["data"][0]
except Exception as e:
App.display(f"Error occurred: {e}")
bt.abort(code=500, text="Error in fetching user data")
shutil.sys.exit()
user["access_token"] = access_token
user["id"] = int(user["id"])
return User.create(**user)
@staticmethod
def follows(id: int) -> set[int]:
"""
Fetches id numbers of user's followed channels.
https://api.twitch.tv/helix/users/follows?from_id=<user_id>
"""
resp = Helix.get_iter(f"users/follows?from_id={id}&first=100")
return {int(follow["to_id"]) for follow in resp}
@staticmethod
async def live(ids: set[int]) -> list[dict]:
"""
Input: set of user ids.
Splits ids in chunks of 100 (limit of API endpoint) and fetches stream data.
If channel is not live, data is empty, thus only live stream info is returned.
https://api.twitch.tv/helix/streams?user_id=<id1>&...&user_id=<id100>
"""
tmp = list(ids)
id_lists = [tmp[x : x + 100] for x in range(0, len(tmp), 100)]
async with httpx.AsyncClient(headers=Helix.headers(), timeout=None) as session:
stream_list: list[httpx.Response] = await asyncio.gather(
*(
session.get(
f"{Helix.endpoint}/streams?{'&'.join([f'user_id={i}' for i in i_list])}"
)
for i_list in id_lists
)
)
streams = []
for resp in stream_list:
data: list[dict] = resp.json()["data"]
if data:
streams += data
return streams
@staticmethod
def stream_info(streams: list[dict]) -> list[dict]:
"""
From stream data, cache games and users from their ids.
Caching fetches additional data which is then appended to stream data dict
"""
async def cache():
tasks = []
for args in [("game_id", "games"), ("user_id", "users")]:
ids = {int(i) for stream in streams if (i := stream[args[0]])}
tasks.append(Db.cache(ids, mode=args[1]))
await asyncio.gather(*tasks)
asyncio.run(cache())
for stream in streams:
channel: Streamer = Streamer.get(int(stream["user_id"]))
try:
game = Game.get(int(stream["game_id"]))
stream["box_art_url"] = game.box_art_url
except ValueError:
stream[
"box_art_url"
] = "https://static-cdn.jtvnw.net/ttv-static/404_boxart.jpg"
stream["profile_image_url"] = channel.profile_image_url
stream["uptime"] = time_elapsed(stream["started_at"])
stream["thumbnail_url"] = stream["thumbnail_url"].replace(
"-{width}x{height}", ""
)
streams.sort(key=lambda stream: stream["viewer_count"], reverse=True)
return streams
class Db:
key_defaults = ["broadcaster_type", "description", "offline_image_url"]
@staticmethod
def check_user() -> bt.redirect:
"""
Check if User is logged in (table exists in data.db).
Redirect to authentication page if no user
"""
if db.table_exists("user") is False or User.get_or_none() is None:
App.display("No user found. Please log in.")
return bt.redirect(Helix.oauth)
@staticmethod
def check_cache():
"""Initial creation of database tables and caching if tables do not exist"""
if (Streamer.table_exists() and Game.table_exists()) is False:
db.create_tables([Streamer, Game])
App.display("Building cache")
follows = Fetch.follows(User.get().id)
asyncio.run(Db.cache(follows, "users"))
Streamer.update(followed=True).execute()
@staticmethod
async def cache(ids: set[int], mode: str) -> None:
"""
Caching mode: 'users' or 'games'.
If game/streamer id does not exist in database, send to caching.
https://api.twitch.tv/helix/<'games' or 'users'>?id=<id1>&id=<id2>...
"""
model = Streamer if mode == "users" else Game
tag = "box_art_url" if mode == "games" else "profile_image_url"
tmp = [i for i in ids if model.get_or_none(i) is None]
if not tmp:
return None
id_lists = [tmp[x : x + 100] for x in range(0, len(tmp), 100)]
async with httpx.AsyncClient(headers=Helix.headers(), timeout=None) as session:
resps: list[httpx.Response] = await asyncio.gather(
*(
session.get(
f"{Helix.endpoint}/{mode}?{'&'.join([f'id={i}' for i in i_list])}"
)
for i_list in id_lists
)
)
data = []
for resp in resps:
datum: list[dict] = resp.json()["data"]
if datum:
data += datum
for datum in data:
if mode == "games":
datum["box_art_url"] = datum["box_art_url"].replace(
"-{width}x{height}", "-285x380"
)
else:
for key in Db.key_defaults:
if not datum[key]: # Remove to replace with key's default
datum.pop(key)
# `tag` key different for game datum and user datum
images = [Image(datum["id"], datum[tag]) for datum in data]
def download_image(image: Image) -> None:
"""Get image data from url, write to file with `mode` directory
and datum `id` as the filename"""
data = httpx.get(image.url).content
with open(f"{cachedir}/{mode}/{image.id}.jpg", "wb") as f:
f.write(data)
with ThreadPoolExecutor() as tp:
tp.map(download_image, images)
for datum in data:
datum[tag] = f"/cache/{mode}/{datum['id']}.jpg" # Point to file path
datum["id"] = int(datum["id"])
model.create(**datum) # Discards unused keys
@staticmethod
def update_follows() -> set[int]:
"""
Fetch user's current follows and cache
Toggle channel follow if follow in database and current do not match
"""
follows = Fetch.follows(User.get().id)
asyncio.run(Db.cache(follows, "users"))
streamers: list[Streamer] = [streamer for streamer in Streamer.select()]
to_toggle = set()
for streamer in streamers:
sid = streamer.id
if (sid in follows and streamer.followed is not True) or (
sid not in follows and streamer.followed is True
):
to_toggle.add(streamer)
if to_toggle:
asyncio.run(Db.toggle_follow(to_toggle))
return follows
@staticmethod
async def toggle_follow(streamers: set[Streamer]) -> None:
"""Send http POST or DELETE based on value of follow after toggling"""
url = f"{Helix.endpoint}/users/follows"
async def send(session: httpx.AsyncClient, data: dict, streamer: Streamer):
Streamer.update(followed=not streamer.followed).where(
Streamer.id == streamer.id
).execute()
if streamer.followed is True:
App.display(f"Unfollowing {streamer.display_name}")
await session.delete(url, params=data)
else:
App.display(f"Following {streamer.display_name}")
await session.post(url, params=data)
async with httpx.AsyncClient(headers=Helix.headers(), timeout=None) as session:
tasks = []
for streamer in streamers:
data = {"to_id": str(streamer.id), "from_id": str(User.get().id)}
tasks.append(send(session, data, streamer))
await asyncio.gather(*tasks)
@bt.route("/")
def index():
"""Index of web application. Displays live streams of user's follows"""
follows = Db.update_follows()
streams = Fetch.stream_info(asyncio.run(Fetch.live(follows)))
return bt.template("index.tpl", User=User.get(), streams=streams)
@bt.route("/authenticate")
def authenticate():
"""
User is prompted with login portal. After login, uri redirect includes
access token. Javascript in `authenticate.tpl` grabs this token which is
used to fetch user information which is then cached along with token.
"""
if access_token := bt.request.query.get("access_token"):
User.create_table()
user = Fetch.user(access_token)
App.display(f"Logged in as {user.display_name}")
return bt.redirect("/")
return bt.template("authenticate.tpl")
@bt.route("/<channel>")
def channel(channel, mode=None, data=None):
"""Profile page of channel"""
try:
channel: Streamer = Streamer.get(
(Streamer.display_name == channel) | (Streamer.login == channel)
)
except pw.DoesNotExist:
bt.abort(code=404, text="User does not exist")
date = {"start": "", "end": ""}
if bt.request.query.get("follow"):
asyncio.run(Db.toggle_follow({channel}))
bt.redirect(f"/{channel.login}")
elif bt.request.query.get("watch"):
watch_video(channel.login)
return """<script>setTimeout(function () { window.history.back() });</script>"""
elif bt.request.query.get("vod"):
mode = "vod"
vods = Helix.get_iter(f"videos?user_id={channel.id}&type=archive")
data = process_data(vods, mode)
elif bt.request.query.get("clips"):
mode = "clip"
start = bt.request.query.get("start") + "T00:00:00Z"
end = bt.request.query.get("end") + "T00:00:00Z"
clips = Helix.get(
f"clips?broadcaster_id={channel.id}&first=100&started_at={start}&ended_at={end}"
)
data = process_data(clips, mode="clip")
data = sorted(data, key=lambda info: info["view_count"], reverse=True)
date = {"start": start[:-10], "end": end[:-10]}
elif url := bt.request.query.get("video"):
watch_video(mode="vod", url=url)
return """<script>setTimeout(function () { window.history.back() });</script>"""
elif bt.request.query.get("close"):
bt.redirect(f"/{channel.login}")
return bt.template("channel.tpl", channel=channel, mode=mode, data=data, date=date)
@bt.route("/search")
def search():
"""
List results that match search query string and cache results based on id
For categories, display data from database based on id
For channels, display data from database as well as request data from endpoint
"""
query = bt.request.query.q
t = bt.request.query.t
mode, model, count = (
("games", Game, 10) if t == "categories" else ("users", Streamer, 5)
)
search_results = Helix.get(f"search/{t}?query={query}&first={count}")
ids = {int(result["id"]) for result in search_results}
asyncio.run(Db.cache(ids, mode=mode))
if t == "categories":
results = model.select().where(model.id.in_(ids))
else:
results = [
Result(result, model.get_by_id(int(result["id"])))
for result in search_results
]
return bt.template("search.tpl", query=query, mode=mode, results=results)
@bt.route("/following")
def following():
"""Read data.db for users with `followed == True`"""
Db.update_follows()
follows = (
Streamer.select()
.where(Streamer.followed == True)
.order_by(Streamer.display_name)
)
return bt.template("following.tpl", follows=follows)
@bt.route("/categories/<game_id>")
def browse(game_id="all"):
"""
`/all` View list of games by viewer count
`/<game_id>` View top streams under game category
"""
if game_id == "all":
return bt.redirect("/top/games")
else:
try:
game: Game = Game.get(int(game_id))
streams = Helix.get(f"streams?first=50&game_id={game_id}")
data = Fetch.stream_info(streams)
return bt.template("top.tpl", data=data, t="channels_filter", game=game)
except httpx.HTTPError:
bt.abort(code=404, text=f"Cannot find streams for game id {game_id}")
@bt.route("/top/<t>")
def top(t):
"""
`/games` View list of top games by total viewer count
`/streams` View list of top streams across platform
"""
if t == "channels":
top_streams = Helix.get("streams?first=50")
data = Fetch.stream_info(top_streams)
elif t == "games":
games = [int(g["id"]) for g in Helix.get("games/top?first=100")]
asyncio.run(Db.cache(set(games), mode="games"))
data = list(Game.select().where(Game.id.in_(games)))
data.sort(key=lambda x: games.index(x.id))
else:
bt.abort(code=400, text="Not a valid type for /top")
return bt.template("top.tpl", data=data, t=t)
@bt.route("/settings")
def settings():
"""
Settings page to view current settings, open settings file,
clear cache, and log out.
"""
command = lex(f"xdg-open {confdir}/static/settings.toml")
if bt.request.query.get("open"):
Popen(command)
return bt.redirect("/settings")
elif bt.request.query.get("cache"):
App.display("Clearing cache...")
db.drop_tables([Streamer, Game])
shutil.os.system(f"rm -f {cachedir}/games/* {cachedir}/users/*")
return bt.redirect("/settings")
elif bt.request.query.get("logout"):
App.display("Logging out...")
db.drop_tables([User, Streamer, Game])
return bt.redirect("/settings")
try:
config = toml.load(f"{confdir}/static/settings.toml")[f"{os_}"]
except toml.TomlDecodeError as e:
Popen(command)
bt.abort(code=404, text="Could not parse settings.toml")
return bt.template("settings.tpl", config=config)
@bt.route("/static/<filename:path>")
def send_static(filename):
"""Serve files located in configuration directory"""
return bt.static_file(filename, root=f"{confdir}/static/")
@bt.route("/cache/<filename:path>")
def cache(filename):
"""Serve images cached in ~/.cache/twitch-py"""
return bt.static_file(filename, root=f"{cachedir}/")
@bt.error(400)
def error400(error):
return bt.template("error_page.tpl", code=App.errors[400], error=error)
@bt.error(404)
def error404(error):
return bt.template("error_page.tpl", code=App.errors[404], error=error)
@bt.error(500)
def error500(error):
return bt.template("error_page.tpl", code=App.errors[500], error=error)
@bt.error(502)
def error502(error):
return bt.template("error_page.tpl", code=App.errors[502], error=error)
def time_elapsed(start: str, d="") -> str:
"""Use 'started_at' key and current time to calculated time since"""
start = datetime.strptime(start, "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=timezone.utc)
current = datetime.now(tz=timezone.utc)
elapsed = round((current - start).total_seconds())
delta = str(timedelta(seconds=elapsed))
if "d" in delta:
d = delta[: (delta.find("d") - 1)] + "d"
h, m, s = delta.split(" ")[-1].split(":")
return f"{d}{h}h{m}m"
def watch_video(channel: str = "", mode: str = "live", url: str = "") -> None:
"""
Save process if of running video as App attribute for later termination.
Passes through player and arg settings from `settings.toml`.
"""
c = toml.load(f"{confdir}/static/settings.toml")[f"{os_}"]
if c["multi"] is False and App.process is not None:
App.process.terminate()
if mode == "live":
App.display(f"Launching stream twitch.tv/{channel}")
command = f'streamlink -l none -p {c["app"]} -a "{c["args"]}" \
--twitch-disable-ads --twitch-low-latency twitch.tv/{channel} best'
else:
App.display(f"Launching video: {url}")
command = f'{c["app"]} {c["args"]} --really-quiet {url}'
p = Popen(lex(command), stdout=DEVNULL)
if c["multi"] is False:
App.process = p
def process_data(data: list[dict], mode: str) -> list[dict]:
"""
Format data of vod/clip for presenting. For clips, cache game data
and fetch relevant vod with timestamp of clip.
"""
if mode == "vod":
for vod in data:
vod["thumbnail_url"] = vod["thumbnail_url"].replace(
"%{width}x%{height}", "480x270"
)
if not vod["thumbnail_url"]:
vod[
"thumbnail_url"
] = "https://vod-secure.twitch.tv/_404/404_processing_320x180.png"
vod["created_at"] = time_elapsed(vod["created_at"])
if mode == "clip":
for clip in data:
clip.setdefault(
"box_art_url", "https://static-cdn.jtvnw.net/ttv-static/404_boxart.jpg"
)
clip.setdefault("game_name", "Streaming")
clip["time_since"] = time_elapsed(clip["created_at"])
clip["thumbnail_url"] = clip["thumbnail_url"].rsplit("-", 1)[0] + ".jpg"
asyncio.run(
Db.cache(
{int(gid) for clip in data if (gid := clip["game_id"])}, mode="games"
)
)
for clip in data:
try:
game: Game = Game.get(int(clip["game_id"]))
clip["box_art_url"] = game.box_art_url
clip["game_name"] = game.name
except ValueError:
pass
asyncio.run(vod_from_clip(data))
return data
async def vod_from_clip(clips: list[dict]) -> list[dict]:
"""
Fetch vod clip was taken from if it exists. Calculate timestamp of clip in
vod using formatted date strings.
"""
to_fetch = [vod_id for clip in clips if (vod_id := clip["video_id"])]
async with httpx.AsyncClient(headers=Helix.headers(), timeout=None) as session:
vod_data = await asyncio.gather(
*(
session.get(f"{Helix.endpoint}/videos?id={vod_id}")
for vod_id in to_fetch
)
)
vods = [resp.json()["data"][0] for resp in vod_data]
for clip in clips:
if clip["video_id"]:
clip["vod"] = vods.pop(0) # Consume vod if vod exists for clip
vod_id, timestamp = clip["video_id"], clip["created_at"]
vod_start = datetime.strptime(
clip["vod"]["created_at"], "%Y-%m-%dT%H:%M:%SZ"
).replace(tzinfo=timezone.utc)
timestamp = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%SZ").replace(
tzinfo=timezone.utc
)
elapsed = round((timestamp - vod_start).total_seconds() - 61)
if "h" not in clip["vod"]["duration"]:
clip["vod"]["duration"] = f"0h{clip['vod']['duration']}"
minutes, seconds = divmod(elapsed, 60)
hours, minutes = divmod(minutes, 60)
clip[
"vod_link"
] = f"http://www.twitch.tv/videos/{vod_id}/?t={hours}h{minutes}m{seconds}s"
else:
clip["vod_link"] = None
return clips
def install(arg: str) -> None:
"""Run the latest installation script without having to clone repo if app installed"""
commands = [
"curl -sL -o twitch-install.sh https://raw.githubusercontent.com/RaeedAhmed/twitch-py/master/install.sh",
"chmod +x twitch-install.sh",
f"./twitch-install.sh -{arg}",
"rm twitch-install.sh",
]
for command in commands:
Popen(lex(command)).wait()
if __name__ == "__main__":
docs = """Usage: twitch-py [COMMAND]
-h, --help Display help for commands
-c, --clear-cache Clear cached data while preserving login
-s, --settings Open settings file to edit
--update Install twitch-py from latest git repo
--uninstall Remove all associated files from system
"""
arg = shutil.sys.argv[1:]
if not arg:
App.display("Launching server...")
try:
serve(app=bt.app(), host="localhost", threads=16, port=8080)
except KeyboardInterrupt:
pass
except httpx.HTTPError as e:
App.display(f"Error: {e}. Retrying...")
bt.redirect(bt.request.path)
finally:
App.display("Exiting...")
elif len(arg) > 1:
print("Too many arguments. Use -h for help")
elif arg[0] in ["-h", "--help", "help"]:
print(docs)
elif arg[0] in ["-c", "--clear-cache"]:
try:
App.display("Clearing cache...")
db.drop_tables([Streamer, Game])
shutil.os.system(f"rm -f {cachedir}/games/* {cachedir}/users/*")
except pw.OperationalError:
App.display("Database or cache does not exist")
elif arg[0] in ["--update", "update"]:
install("d")
elif arg[0] in ["--uninstall", "uninstall"]:
install("u")
elif arg[0] in ["-s", "--settings"]:
cmd = lex(f"xdg-open {confdir}/static/settings.toml")
Popen(cmd)
else:
print("Command not recognized. Use -h for help")
print(docs)
shutil.sys.exit()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/ForgottenPassword2.ui'
#
# Created by: PyQt5 UI code generator 5.15.6
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ForgottenPassword2Screen(object):
def setupUi(self, ForgottenPassword2Screen):
ForgottenPassword2Screen.setObjectName("ForgottenPassword2Screen")
ForgottenPassword2Screen.resize(1340, 720)
ForgottenPassword2Screen.setSizeGripEnabled(False)
self.widget = QtWidgets.QWidget(ForgottenPassword2Screen)
self.widget.setGeometry(QtCore.QRect(0, 0, 1340, 720))
self.widget.setStyleSheet("background-color: rgb(69, 69, 69);")
self.widget.setObjectName("widget")
self.Title = QtWidgets.QLabel(self.widget)
self.Title.setGeometry(QtCore.QRect(612, 20, 116, 51))
self.Title.setStyleSheet("font: 36pt \"Sans Serif\"; color:rgb(239, 239, 239)")
self.Title.setObjectName("Title")
self.TabBar = QtWidgets.QWidget(self.widget)
self.TabBar.setGeometry(QtCore.QRect(0, 80, 1340, 80))
self.TabBar.setStyleSheet("background-color: rgb(239, 239, 239);")
self.TabBar.setObjectName("TabBar")
self.LoginTab = QtWidgets.QPushButton(self.TabBar)
self.LoginTab.setGeometry(QtCore.QRect(10, 5, 200, 70))
self.LoginTab.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.LoginTab.setStyleSheet("border: 2px solid;\n"
"border-radius: 20px;\n"
"border-color:rgb(69, 69, 69);\n"
"background-color: rgb(69, 69, 69);\n"
"font: 36pt \"Sans Serif\"; color:rgb(239, 239, 239);\n"
"")
self.LoginTab.setObjectName("LoginTab")
self.SignUpTab = QtWidgets.QPushButton(self.TabBar)
self.SignUpTab.setGeometry(QtCore.QRect(220, 5, 200, 70))
self.SignUpTab.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.SignUpTab.setStyleSheet("border: 2px solid;\n"
"border-radius: 20px;\n"
"border-color:rgb(69, 69, 69);\n"
"background-color: rgb(69, 69, 69);\n"
"font: 36pt \"Sans Serif\"; color:rgb(239, 239, 239);\n"
"")
self.SignUpTab.setObjectName("SignUpTab")
self.ForgottenPasswordTab = QtWidgets.QPushButton(self.TabBar)
self.ForgottenPasswordTab.setGeometry(QtCore.QRect(430, 5, 200, 70))
self.ForgottenPasswordTab.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.ForgottenPasswordTab.setStyleSheet("border: 2px solid;\n"
"border-radius: 20px;\n"
"border-color:rgb(69, 69, 69);\n"
"background-color: rgb(239, 239, 239);\n"
"font: 12pt \"Sans Serif\"; color:rgb(69, 69, 69);\n"
"")
self.ForgottenPasswordTab.setObjectName("ForgottenPasswordTab")
self.ResetPasswordTab = QtWidgets.QPushButton(self.TabBar)
self.ResetPasswordTab.setGeometry(QtCore.QRect(640, 5, 200, 70))
self.ResetPasswordTab.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.ResetPasswordTab.setStyleSheet("border: 2px solid;\n"
"border-radius: 20px;\n"
"border-color:rgb(69, 69, 69);\n"
"background-color: rgb(69, 69, 69);\n"
"font: 20pt \"Sans Serif\"; color:rgb(239, 239, 239);\n"
"")
self.ResetPasswordTab.setObjectName("ResetPasswordTab")
self.MainWidget = QtWidgets.QWidget(self.widget)
self.MainWidget.setGeometry(QtCore.QRect(10, 170, 1320, 540))
self.MainWidget.setStyleSheet("background-color: rgb(239, 239, 239);\n"
"border-radius: 20px;")
self.MainWidget.setObjectName("MainWidget")
self.VerificationCodeText = QtWidgets.QLabel(self.MainWidget)
self.VerificationCodeText.setGeometry(QtCore.QRect(300, 200, 301, 61))
self.VerificationCodeText.setStyleSheet("font: 25pt \"Sans Serif\"; color:rgb(69, 69, 69);\n"
"background-color: rgb(239, 239, 239); padding: 5px;")
self.VerificationCodeText.setObjectName("VerificationCodeText")
self.VerificationCodeInput = QtWidgets.QLineEdit(self.MainWidget)
self.VerificationCodeInput.setGeometry(QtCore.QRect(720, 200, 261, 60))
self.VerificationCodeInput.setStyleSheet("background-color: rgb(239, 239, 239);\n"
"color: rgb(69, 69, 69);\n"
"font: 18pt \"Sans Serif\";\n"
"border: 2px solid;\n"
"border-radius: 20px;\n"
"border-color:rgb(69, 69, 69);")
self.VerificationCodeInput.setText("")
self.VerificationCodeInput.setCursorPosition(0)
self.VerificationCodeInput.setObjectName("VerificationCodeInput")
self.SubmitButton = QtWidgets.QPushButton(self.MainWidget)
self.SubmitButton.setGeometry(QtCore.QRect(570, 440, 200, 70))
self.SubmitButton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.SubmitButton.setStyleSheet("border: 2px solid;\n"
"border-radius: 20px;\n"
"border-color:rgb(69, 69, 69);\n"
"background-color: rgb(69, 69, 69);\n"
"font: 36pt \"Sans Serif\"; color:rgb(239, 239, 239);\n"
"")
self.SubmitButton.setObjectName("SubmitButton")
self.VerificationText = QtWidgets.QLabel(self.MainWidget)
self.VerificationText.setGeometry(QtCore.QRect(305, 60, 730, 61))
self.VerificationText.setStyleSheet("font: 25pt \"Sans Serif\"; color:rgb(69, 69, 69);\n"
"background-color: rgb(239, 239, 239); padding: 5px;")
self.VerificationText.setObjectName("VerificationText")
self.retranslateUi(ForgottenPassword2Screen)
QtCore.QMetaObject.connectSlotsByName(ForgottenPassword2Screen)
def retranslateUi(self, ForgottenPassword2Screen):
_translate = QtCore.QCoreApplication.translate
ForgottenPassword2Screen.setWindowTitle(_translate("ForgottenPassword2Screen", "Visualising the Riemann Hypothesis - Forgotten Password"))
self.Title.setText(_translate("ForgottenPassword2Screen", "Login"))
self.LoginTab.setText(_translate("ForgottenPassword2Screen", "Login"))
self.SignUpTab.setText(_translate("ForgottenPassword2Screen", "Sign Up"))
self.ForgottenPasswordTab.setText(_translate("ForgottenPassword2Screen", "Forgotten Password"))
self.ResetPasswordTab.setText(_translate("ForgottenPassword2Screen", "Reset Password"))
self.VerificationCodeText.setText(_translate("ForgottenPassword2Screen", "<html><head/><body><p align=\"right\">Verification Code:</p></body></html>"))
self.VerificationCodeInput.setPlaceholderText(_translate("ForgottenPassword2Screen", "Enter Verification Code"))
self.SubmitButton.setText(_translate("ForgottenPassword2Screen", "Submit"))
self.VerificationText.setText(_translate("ForgottenPassword2Screen", "<html><head/><body><p align=\"center\">A Verification Code has been sent to your email</p></body></html>"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
ForgottenPassword2Screen = QtWidgets.QDialog()
ui = Ui_ForgottenPassword2Screen()
ui.setupUi(ForgottenPassword2Screen)
ForgottenPassword2Screen.show()
sys.exit(app.exec_())
|
nilq/baby-python
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test for NCHW[x]c convolution"""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.util import get_const_tuple
import pytest
from common import get_all_backend
def _transform_data(data, bn):
# NCHW -> NCHW[x]c
batch_size, channel, height, width = data.shape
data = np.reshape(data, (batch_size, channel//bn, bn, height, width))
data = np.transpose(data, (0, 1, 3, 4, 2))
return data
def _transform_kernel(kernel, ic_bn, oc_bn):
# OIHW -> OIHW[x]i[x]o
out_channel, in_channel, kh, kw = kernel.shape
kernel = np.reshape(kernel, (out_channel//oc_bn, oc_bn, in_channel//ic_bn, ic_bn//4, kh, kw, 4))
kernel = np.transpose(kernel, (0, 2, 4, 5, 3, 1, 6))
return kernel
def verify_group_conv2d_NCHWc_int8(batch, in_channel, groups, in_size, num_filter, kernel, stride,
padding, dilation=1, add_bias=False, add_relu=False, dtype="int32"):
assert dilation == 1, "conv2d_NCHWc does not support dilation for now."
print("Workload: (%d, %d, %d, %d, %d, %d, %d, %d)" %
(batch, in_channel, groups, in_size, num_filter, kernel, stride, padding))
in_height = in_width = in_size
# for testing functionality,
# we choose arbitrary block size that can divide the channel,
# regardless of the performance.
oc_block = 1
for bn in range(16, 0, -1):
if num_filter % bn == 0:
oc_block = bn
break
ic_block = 8
autotvm.GLOBAL_SCOPE.silent = True
A = te.placeholder((batch, in_channel//ic_block, in_height, in_width, ic_block), name='A', dtype='uint8')
W = te.placeholder((num_filter//oc_block, in_channel//ic_block//groups, kernel, kernel, ic_block//4, oc_block, 4), name='W', dtype='int8')
@memoize("topi.tests.test_topi_conv2d_NCHWc_int8.verify_conv2d_NCHWc_int8")
def get_ref_data():
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype("uint8")
w_np = np.random.uniform(size=(num_filter, in_channel//groups, kernel, kernel)).astype("int8")
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding, groups)
return _transform_data(a_np, ic_block), _transform_kernel(w_np, ic_block, oc_block), \
_transform_data(c_np, oc_block)
a_np, w_np, c_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
C = topi.x86.conv2d_NCHWc(A, W, (stride, stride), (padding, padding),
(dilation, dilation),
'NCHW%dc'%ic_block,
"NCHW%dc"%oc_block,
dtype)
s = topi.x86.schedule_conv2d_NCHWc([C])
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), ctx)
func = tvm.build(s, [A, W, C], device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d" %
(batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation))
# print(tvm.lower(s, [A, W, C], simple_mode=True))
func(a, w, c)
tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-3)
# for device in ["llvm"]:
for device in ["llvm -mcpu=skylake-avx512"]:
with autotvm.tophub.context(device): # load tophub pre-tuned parameters
check_device(device)
autotvm.GLOBAL_SCOPE.silent = False
@pytest.mark.skip
def test_conv2d_NCHWc():
# ResNet50 workloads
verify_group_conv2d_NCHWc_int8(1, 256, 32, 224, 64, 7, 2, 3)
if __name__ == "__main__":
# The test requires Skylake and newer Intel machines to generate the correct
# instruction. This test directly calls the topi operator, requiring correct
# kernel shape. For older generation of Intel machines, the kernel needs to
# be 6D. This test tests 7D kernel, that can only work on Skylake+ machines.
# So, disabling the test.
# test_conv2d_NCHWc()
pass
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import re
from pybuilder.core import init
from pybuilder.core import task
from pybuilder.core import depends
from pybuilder.errors import BuildFailedException
from pybuilder.pluginhelper.external_command import ExternalCommandBuilder
from pybuilder.utils import assert_can_execute
@init
def init_radon(project):
""" initialize radon task properties
"""
project.set_property_if_unset('radon_break_build_average_complexity_threshold', None)
project.set_property_if_unset('radon_break_build_complexity_threshold', None)
project.plugin_depends_on('radon')
@task('radon', description='execute radon cyclomatic complexity')
@depends('prepare')
def radon(project, logger, reactor):
""" execute radon cyclomatic complexity
"""
set_verbose_property(project)
command = get_command(project, reactor)
# assert_can_execute(command.parts, prerequisite='radon', caller='complexity')
result = command.run_on_production_source_files(logger, include_dirs_only=True)
if not verify_result(result, logger, command):
return
complexity_data = get_complexity(project, result, logger)
if not verify_complexity(complexity_data):
return
process_complexity(project, complexity_data)
def get_command(project, reactor):
""" return radon command
"""
command = ExternalCommandBuilder('radon', project, reactor)
command.use_argument('cc')
command.use_argument('-a')
command.use_argument('-s')
return command
def set_verbose_property(project):
""" set verbose property
"""
verbose = project.get_property('verbose')
project.set_property('radon_verbose_output', verbose)
def verify_result(result, logger, command):
""" return True if result contains lines, False otherwise
"""
if not result.report_lines:
logger.warn(f"Command {command.as_string} produced no output")
return False
if len(result.error_report_lines) > 0:
logger.error(f"Command {command.as_string} produced errors, see {result.error_report_file}")
return False
return True
def get_complexity(project, result, logger):
""" return complexity info and if verbose log contents of result
"""
complexity_data = {
'average': None,
'highest': {
'name': None,
'score': 0
}
}
regex_line = r'[A-Z] \d+:\d+ (?P<name>.*) - [A-Z] \((?P<score>\d+)\)'
for line in result.report_lines[:-1]:
line = line.strip()
match = re.match(regex_line, line)
if match:
score = float(match.group('score'))
if score > complexity_data['highest']['score']:
complexity_data['highest']['score'] = score
complexity_data['highest']['name'] = match.group('name')
average_complexity = result.report_lines[-1].strip()
logger.info(average_complexity)
regex_average = r'Average complexity: [A-Z] \((?P<average>.*)\)'
match = re.match(regex_average, average_complexity)
if match:
complexity_data['average'] = float(match.group('average'))
return complexity_data
def verify_complexity(complexity_data):
""" return True if complexity structure is valid, False otherwise
"""
if complexity_data['average'] is None:
return False
if complexity_data['highest']['name'] is None:
return False
return True
def process_complexity(project, complexity_data):
""" process complexity
"""
average_complexity_threshold = project.get_property('radon_break_build_average_complexity_threshold')
if average_complexity_threshold:
average = complexity_data['average']
if float(average) > average_complexity_threshold:
raise BuildFailedException(f'average complexity {average} is greater than {average_complexity_threshold}')
complexity_threshold = project.get_property('radon_break_build_complexity_threshold')
if complexity_threshold:
highest_score = complexity_data['highest']['score']
if float(highest_score) > complexity_threshold:
name = complexity_data['highest']['name']
raise BuildFailedException(f'{name} complexity {highest_score} is greater than {complexity_threshold}')
|
nilq/baby-python
|
python
|
"""Class hierarchy for base gates."""
import math
from dataclasses import dataclass
from functools import singledispatch, reduce
from numbers import Number
from typing import Tuple, Union, Callable, Dict, Optional, Iterable, Any, List
import sympy
from typing_extensions import Protocol
import numpy as np
from ...utils import SCHEMA_VERSION
from . import _builtin_gates
Parameter = Union[sympy.Symbol, Number]
def serialize_expr(expr):
return str(expr)
def _make_symbols_map(symbol_names):
return {name: sympy.Symbol(name) for name in symbol_names}
def deserialize_expr(expr_str, symbol_names):
symbols_map = _make_symbols_map(symbol_names)
return sympy.sympify(expr_str, locals=symbols_map)
class Gate(Protocol):
"""Quantum gate representable by a matrix, translatable to other frameworks
and backends."""
@property
def name(self) -> str:
"""Globally unique name of the gate.
Name is used in textual representation and dispatching in conversion between
frameworks. Defining different gates with the same name as built-in ones
is discouraged."""
raise NotImplementedError()
@property
def params(self) -> Tuple[Parameter, ...]:
"""Value of parameters bound to this gate.
Length of `params` should be equal to number of parameters in gate's initializer.
In particular, nonparametric gates should always return ().
Examples:
- an `H` gate has no params
- a `RX(np.pi)` gate has a single param with value of `np.pi`
- a `RX(sympy.Symbol("theta"))` gate has a single symbolic param `theta`
- a `RX(sympy.sympify("theta * alpha"))` gate has a single symbolic expression param `theta*alpha`
We need it for translations to other frameworks and for serialization.
"""
raise NotImplementedError()
@property
def free_symbols(self):
"""Unbound symbols in the gate matrix.
Examples:
- an `H` gate has no free symbols
- a `RX(np.pi)` gate has no free symbols
- a `RX(sympy.Symbol("theta"))` gate has a single free symbol `theta`
- a `RX(sympy.sympify("theta * alpha"))` gate has two free symbols, `alpha` and `theta`
- a `RX(sympy.sympify("theta * alpha")).bind({sympy.Symbol("theta"): 0.42})` gate has one free symbol, `alpha`
"""
symbols = set(
symbol
for param in self.params
if isinstance(param, sympy.Expr)
for symbol in param.free_symbols
)
return sorted(symbols, key=str)
@property
def num_qubits(self) -> int:
"""Number of qubits this gate acts on.
We need it because matrix is computed lazily, and we don't want to create matrix
just to know the number of qubits.
"""
raise NotImplementedError()
@property
def matrix(self) -> sympy.Matrix:
"""Unitary matrix describing gate's action on state vector.
We need it to be able to implement .propagate() on the operation class.
"""
raise NotImplementedError()
def controlled(self, num_control_qubits: int) -> "Gate":
raise NotImplementedError()
@property
def dagger(self) -> "Gate":
raise NotImplementedError()
def bind(self, symbols_map: Dict[sympy.Symbol, Parameter]) -> "Gate":
raise NotImplementedError()
def __call__(self, *qubit_indices: int) -> "GateOperation":
"""Apply this gate on qubits in a circuit."""
return GateOperation(self, qubit_indices)
def to_dict(self):
return {
"name": self.name,
**(
{"params": list(map(serialize_expr, self.params))}
if self.params
else {}
),
**(
{"free_symbols": sorted(map(str, self.free_symbols))}
if self.free_symbols
else {}
),
}
def _gate_from_dict(dict_, custom_gate_defs):
"""Prototype implementation of circuit deserialization"""
gate_ref = _builtin_gates.builtin_gate_by_name(dict_["name"])
if gate_ref is not None:
# ATM we don't have a better way to check if the serialized gate was parametric
# or not
if isinstance(gate_ref, MatrixFactoryGate):
return gate_ref
else:
return gate_ref(
*[
deserialize_expr(param, dict_.get("free_symbols", []))
for param in dict_["params"]
]
)
if dict_["name"] == CONTROLLED_GATE_NAME:
wrapped_gate = _gate_from_dict(dict_["wrapped_gate"], custom_gate_defs)
return ControlledGate(wrapped_gate, dict_["num_control_qubits"])
if dict_["name"] == DAGGER_GATE_NAME:
wrapped_gate = _gate_from_dict(dict_["wrapped_gate"], custom_gate_defs)
return Dagger(wrapped_gate)
gate_def = next(
(
gate_def
for gate_def in custom_gate_defs
if gate_def.gate_name == dict_["name"]
),
None,
)
if gate_def is None:
raise ValueError(
f"Custom gate definition for {dict_['name']} missing from serialized dict"
)
symbol_names = map(serialize_expr, gate_def.params_ordering)
return gate_def(
*[deserialize_expr(param, symbol_names) for param in dict_["params"]]
)
# TODO:
# - controlled gate
# - dagger
@dataclass(frozen=True)
class GateOperation:
gate: Gate
qubit_indices: Tuple[int, ...]
def to_dict(self):
return {
"type": "gate_operation",
"gate": self.gate.to_dict(),
"qubit_indices": list(self.qubit_indices),
}
@classmethod
def from_dict(cls, dict_, custom_gate_defs):
return cls(
gate=_gate_from_dict(dict_["gate"], custom_gate_defs),
qubit_indices=tuple(dict_["qubit_indices"]),
)
def __str__(self):
return f"{self.gate}({','.join(map(str, self.qubit_indices))})"
GATE_OPERATION_DESERIALIZERS = {"gate_operation": GateOperation.from_dict}
def _gate_operation_from_dict(dict_, custom_gate_defs):
# Add deserializers here when we need to support custom, non-gate operations
return GATE_OPERATION_DESERIALIZERS[dict_["type"]](dict_, custom_gate_defs)
@singledispatch
def _sub_symbols(parameter, symbols_map: Dict[sympy.Symbol, Parameter]) -> Parameter:
raise NotImplementedError()
@_sub_symbols.register
def _sub_symbols_in_number(
parameter: Number, symbols_map: Dict[sympy.Symbol, Parameter]
) -> Number:
return parameter
@_sub_symbols.register
def _sub_symbols_in_expression(
parameter: sympy.Expr, symbols_map: Dict[sympy.Symbol, Parameter]
) -> sympy.Expr:
return parameter.subs(symbols_map)
@_sub_symbols.register
def _sub_symbols_in_symbol(
parameter: sympy.Symbol, symbols_map: Dict[sympy.Symbol, Parameter]
) -> Parameter:
return symbols_map.get(parameter, parameter)
def _all_attrs_equal(obj, other_obj, attrs):
return all(getattr(obj, attr) == getattr(other_obj, attr) for attr in attrs)
@dataclass(frozen=True)
class MatrixFactoryGate:
"""`Gate` protocol implementation with a deferred matrix construction.
Most built-in gates are instances of this class.
It requires the gate definition to be present during deserialization, so it's not
easily applicable for gates defined in Orquestra steps.
Keeping a `matrix_factory` instead of a plain gate matrix allows us to defer matrix
construction to _after_ parameter binding. This saves unnecessary work in scenarios
where we construct a quantum circuit and immediately bind parameter values. When done
multiple times, e.g. for every gate in each optimization step, this can lead to major
performance issues.
Args:
name: Name of this gate. Implementers of new gates should make sure that the names
are unique.
matrix_factory: a callable mapping arbitrary number of parameters into gate
matrix. Implementers of new gates should make sure the returned matrices are
square and of dimension being 2 ** `num_qubits`.
params: gate parameters - either concrete values or opaque symbols.
Will be passed to `matrix_factory` when `matrix` property is requested.
num_qubits: number of qubits this gate acts on.
"""
name: str
matrix_factory: Callable[..., sympy.Matrix]
params: Tuple[Parameter, ...]
num_qubits: int
is_hermitian: bool = False
@property
def matrix(self) -> sympy.Matrix:
"""Unitary matrix defining action of this gate.
This is a computed property using `self.matrix_factory` called
with parameters bound to this gate.
"""
return self.matrix_factory(*self.params)
def bind(self, symbols_map) -> "MatrixFactoryGate":
new_symbols = tuple(_sub_symbols(param, symbols_map) for param in self.params)
return MatrixFactoryGate(
name=self.name,
matrix_factory=self.matrix_factory,
params=new_symbols,
num_qubits=self.num_qubits,
)
def controlled(self, num_controlled_qubits: int) -> Gate:
return ControlledGate(self, num_controlled_qubits)
@property
def dagger(self) -> Gate:
return self if self.is_hermitian else Dagger(self)
def __str__(self):
return (
f"{self.name}({', '.join(map(str,self.params))})"
if self.params
else self.name
)
def __eq__(self, other):
if type(self) != type(other):
return False
if not _all_attrs_equal(
self, other, set(self.__dataclass_fields__) - {"params"}
):
return False
if len(self.params) != len(other.params):
return False
return all(
_are_matrix_elements_equal(p1, p2)
for p1, p2 in zip(self.params, other.params)
)
# Normally, we'd use the default implementations by inheriting from the Gate protocol.
# We can't do that because of __init__ arg default value issues, this is
# the workaround.
free_symbols = Gate.free_symbols
__call__ = Gate.__call__
to_dict = Gate.to_dict
CONTROLLED_GATE_NAME = "Control"
@dataclass(frozen=True)
class ControlledGate(Gate):
wrapped_gate: Gate
num_control_qubits: int
@property
def name(self):
return CONTROLLED_GATE_NAME
@property
def num_qubits(self):
return self.wrapped_gate.num_qubits + self.num_control_qubits
@property
def matrix(self):
return sympy.Matrix.diag(
sympy.eye(2 ** self.num_qubits - 2 ** self.wrapped_gate.num_qubits),
self.wrapped_gate.matrix,
)
@property
def params(self):
return self.wrapped_gate.params
def controlled(self, num_control_qubits: int) -> "ControlledGate":
return ControlledGate(
wrapped_gate=self.wrapped_gate,
num_control_qubits=self.num_control_qubits + num_control_qubits,
)
@property
def dagger(self) -> "ControlledGate":
return ControlledGate(
wrapped_gate=self.wrapped_gate.dagger,
num_control_qubits=self.num_control_qubits,
)
def bind(self, symbols_map) -> "Gate":
return self.wrapped_gate.bind(symbols_map).controlled(self.num_control_qubits)
def to_dict(self):
return {
"name": self.name,
"wrapped_gate": self.wrapped_gate.to_dict(),
"num_control_qubits": self.num_control_qubits,
}
DAGGER_GATE_NAME = "Dagger"
@dataclass(frozen=True)
class Dagger(Gate):
wrapped_gate: Gate
@property
def matrix(self) -> sympy.Matrix:
return self.wrapped_gate.matrix.adjoint()
@property
def params(self) -> Tuple[Parameter, ...]:
return self.wrapped_gate.params
@property
def num_qubits(self) -> int:
return self.wrapped_gate.num_qubits
@property
def name(self):
return DAGGER_GATE_NAME
def controlled(self, num_control_qubits: int) -> Gate:
return self.wrapped_gate.controlled(num_control_qubits).dagger
def bind(self, symbols_map) -> "Gate":
return self.wrapped_gate.bind(symbols_map).dagger
@property
def dagger(self) -> "Gate":
return self.wrapped_gate
def to_dict(self):
return {
"name": self.name,
"wrapped_gate": self.wrapped_gate.to_dict(),
}
def _n_qubits(matrix):
n_qubits = math.floor(math.log2(matrix.shape[0]))
if 2 ** n_qubits != matrix.shape[0] or 2 ** n_qubits != matrix.shape[1]:
raise ValueError("Gate's matrix has to be square with dimension 2^N")
return n_qubits
def _matrix_to_json(matrix: sympy.Matrix):
return [
[serialize_expr(element) for element in matrix.row(row_i)]
for row_i in range(matrix.shape[0])
]
def _matrix_from_json(
json_rows: List[List[str]], symbols_names: Iterable[str]
) -> sympy.Matrix:
return sympy.Matrix(
[
[deserialize_expr(element, symbols_names) for element in json_row]
for json_row in json_rows
]
)
@dataclass(frozen=True)
class FixedMatrixFactory:
"""Can be passed as `matrix_factory` when a gate matrix isn't lazily evaluated."""
matrix: sympy.Matrix
params_ordering: Tuple[Parameter, ...]
def __call__(self, *gate_params):
return self.matrix.subs(
{symbol: arg for symbol, arg in zip(self.params_ordering, gate_params)}
)
def __eq__(self, other):
if type(self) != type(other):
return False
if self.params_ordering != other.params_ordering:
return False
if not _are_matrices_equal(self.matrix, other.matrix):
return False
return True
@dataclass(frozen=True)
class CustomGateDefinition:
"""Use this class to define a non-built-in gate.
User-defined gates are treated differently than the built-in ones,
because the built-in ones are defined in `zquantum.core` library, and so
we can assume that the definitions will be available during circuit deserialization.
User-provided gates can be defined in one repo (e.g. Orquestra step), serialized,
and passed to another project for deserialization. The other project must have access
to gate details, e.g. the gate matrix. This class is designed to keep track of
the gate details needed for deserialization.
Instances of this class are serialized by the Circuit objects, additionally to
Circuit operations.
"""
gate_name: str
matrix: sympy.Matrix
params_ordering: Tuple[sympy.Symbol, ...]
def __post_init__(self):
n_qubits = _n_qubits(self.matrix)
object.__setattr__(self, "_n_qubits", n_qubits)
def __call__(self, *params):
return MatrixFactoryGate(
self.gate_name,
FixedMatrixFactory(self.matrix, self.params_ordering),
params,
self._n_qubits,
)
def to_dict(self):
return {
"gate_name": self.gate_name,
"matrix": _matrix_to_json(self.matrix),
"params_ordering": list(map(serialize_expr, self.params_ordering)),
}
@classmethod
def from_dict(cls, dict_):
symbols = [sympy.Symbol(term) for term in dict_.get("params_ordering", [])]
return cls(
gate_name=dict_["gate_name"],
matrix=_matrix_from_json(dict_["matrix"], dict_.get("params_ordering", [])),
params_ordering=tuple(symbols),
)
def __eq__(self, other):
if type(self) != type(other):
return False
if self.gate_name != other.gate_name:
return False
if self.params_ordering != other.params_ordering:
return False
if not _are_matrices_equal(self.matrix, other.matrix):
return False
return True
def _are_matrix_elements_equal(element, another_element):
"""Determine if two elements from gates' matrices are equal.
This is to be used in __eq__ method when comparing matrices elementwise.
Args:
element: first value to compare. It can be float, complex or some sympy expression.
another_element: second value to compare.
"""
difference = sympy.N(sympy.expand(element) - sympy.expand(another_element))
try:
return np.allclose(
float(sympy.re(difference)) + 1j * float(sympy.im(difference)), 0
)
except TypeError:
return False
def _are_matrices_equal(matrix, another_matrix):
return all(
_are_matrix_elements_equal(element, another_element)
for element, another_element in zip(matrix, another_matrix)
)
def _circuit_size_by_operations(operations):
return (
0
if not operations
else max(
qubit_index
for operation in operations
for qubit_index in operation.qubit_indices
)
+ 1
)
def _bind_operation(op: GateOperation, symbols_map) -> GateOperation:
return op.gate.bind(symbols_map)(*op.qubit_indices)
CIRCUIT_SCHEMA = SCHEMA_VERSION + "-circuit"
class Circuit:
"""ZQuantum representation of a quantum circuit."""
def __init__(
self,
operations: Optional[Iterable[GateOperation]] = None,
n_qubits: Optional[int] = None,
custom_gate_definitions: Optional[Iterable[CustomGateDefinition]] = None,
):
self._operations = list(operations) if operations is not None else []
self._n_qubits = (
n_qubits
if n_qubits is not None
else _circuit_size_by_operations(self._operations)
)
self._custom_gate_definitions = (
list(custom_gate_definitions) if custom_gate_definitions else []
)
@property
def operations(self):
"""Sequence of quantum gates to apply to qubits in this circuit."""
return self._operations
@property
def custom_gate_definitions(self):
return self._custom_gate_definitions
@property
def n_qubits(self):
"""Number of qubits in this circuit.
Not every qubit has to be used by a gate.
"""
return self._n_qubits
@property
def free_symbols(self):
"""Set of all the sympy symbols used as params of gates in the circuit."""
return reduce(
set.union,
(operation.gate.free_symbols for operation in self._operations),
set(),
)
def __eq__(self, other: "Circuit"):
if not isinstance(other, type(self)):
return False
if self.n_qubits != other.n_qubits:
return False
if list(self.operations) != list(other.operations):
return False
return True
def __add__(self, other: Union["Circuit"]):
return _append_to_circuit(other, self)
def bind(self, symbols_map: Dict[sympy.Symbol, Any]):
"""Create a copy of the current circuit with the parameters of each gate bound to
the values provided in the input symbols map
Args:
symbols_map: A map of the symbols/gate parameters to new values
"""
return type(self)(
operations=[_bind_operation(op, symbols_map) for op in self.operations],
n_qubits=self.n_qubits,
)
def to_dict(self):
"""Creates a dictionary representing a circuit.
The dictionary is serializable to JSON.
Returns:
A mapping with keys:
- "schema"
- "n_qubits"
- "symbolic_params"
- "gates"
"""
return {
"schema": CIRCUIT_SCHEMA,
"n_qubits": self.n_qubits,
**(
{
"operations": [
operation.to_dict() for operation in self.operations
],
}
if self.operations
else {}
),
**(
{
"custom_gate_definitions": [
gate_def.to_dict() for gate_def in self.custom_gate_definitions
]
}
if self.custom_gate_definitions
else {}
),
}
@classmethod
def from_dict(cls, dict_):
defs = [
CustomGateDefinition.from_dict(def_dict)
for def_dict in dict_.get("custom_gate_definitions", [])
]
return cls(
operations=[
_gate_operation_from_dict(op_dict, defs)
for op_dict in dict_.get("operations", [])
],
n_qubits=dict_["n_qubits"],
custom_gate_definitions=defs,
)
def __repr__(self):
return f"{type(self).__name__}(operations=[{', '.join(map(str, self.operations))}], n_qubits={self.n_qubits}, custom_gate_definitions={self.custom_gate_definitions})"
@singledispatch
def _append_to_circuit(other, circuit: Circuit):
raise NotImplementedError()
@_append_to_circuit.register
def _append_operation(other: GateOperation, circuit: Circuit):
n_qubits_by_operation = max(other.qubit_indices) + 1
return type(circuit)(
operations=[*circuit.operations, other],
n_qubits=max(circuit.n_qubits, n_qubits_by_operation),
)
@_append_to_circuit.register
def _append_circuit(other: Circuit, circuit: Circuit):
return type(circuit)(
operations=[*circuit.operations, *other.operations],
n_qubits=max(circuit.n_qubits, other.n_qubits),
)
|
nilq/baby-python
|
python
|
import pygame, sys
from pygame.locals import *
import random
pygame.init() #initializing the pygame library
#creating sample questions
data = [
{
'question': 'Who is the president of America?',
'right-answer': 'Barack Obama',
'option1' : 'George Washington',
'option2' : 'Paul Kagame',
'option3' : 'Barack Obama'
},
{
'question': 'Who created Facebook?',
'right-answer': 'Mark Zuckeberg',
'option1': 'Bill Gates',
'option2': 'Mark Zuckeberg',
'option3': 'Steve Jobs'
},
{
'question': 'who is the richest person on earth?',
'right-answer': 'Bill Gates',
'option1': 'Bill Gates',
'option2': 'Jack Ma',
'option3': 'Peter Davinch'
},
{
'question': 'What is the capital of United Kingdom?',
'right-answer': 'London',
'option1': 'Manchester',
'option2': 'Arsenal',
'option3': 'London'
}
]
# preparing the surface
WINDOWWIDTH = 600
WINDOWHEIGHT = 500
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT)) #setting the main window size
pygame.display.set_caption('Smarter')
#storing colors to be used
BACKGROUNDCOLOR = (16, 78, 139)
WHITE = (255, 255, 255)
ORANGE = (255, 147, 51)
NAVYBLUE = (0, 0, 128)
YELLOW = (255, 221, 51)
#necessary variable constants
STARTED = False #controls the start of the game
STATE = 0 #stores the index of the first question
QUESTIONRECTWIDTH = None #holds the width of the Rectangle around the question
QUESTIONRECTHEIGHT = None # holds the height of the Rectangle around the question
WINDOWSPACING = 0 # determines the space to leave between the rectangle around the question and the main window
QUESTIONRECTANGLE = None #stores the drawn rectangle object around the question
OPTIONSLIST = [] #stores informations about the options
def drawingQuestion(textl):
""" This function draws the question to the screen """
global QUESTIONRECTWIDTH, QUESTIONRECTHEIGHT, QUESTIONRECTANGLE, WINDOWSPACING
fontObj = pygame.font.SysFont('verdana', 20)
textSurfaceObj = fontObj.render(textl, True, WHITE)
textRectObj = textSurfaceObj.get_rect()
QUESTIONRECTWIDTH = textRectObj.width + 20 # the width of the text plus 20. we want the rectangle's width around the question to be greater than the question's width
QUESTIONRECTHEIGHT = textRectObj.height + 20 # same with the height, we want the height of the rectangle to be geater than the question height.
WINDOWSPACING = (WINDOWWIDTH - QUESTIONRECTWIDTH) / 2 # calculates the space to leave between the main window and the rectangle around the question.
QUESTIONRECTANGLE = pygame.Rect(WINDOWSPACING, 50, QUESTIONRECTWIDTH, QUESTIONRECTHEIGHT)
pygame.draw.rect(DISPLAYSURF, WHITE, QUESTIONRECTANGLE, 2)
DISPLAYSURF.blit(textSurfaceObj, ((QUESTIONRECTANGLE.topleft[0] + 10), (QUESTIONRECTANGLE.topleft[1] + 10)))
def drawingOptions(options):
""" This function draws the question's options to the screen. This function's parameter is a list."""
global QUESTIONRECTWIDTH, QUESTIONRECTHEIGHT, OPTIONSLIST
current_height = 0 #this will helps to leave a space between the rectangles around the options.
counter = 0
random.shuffle(options) #randomly rearranging the question's options
for option in options:
fontObj = pygame.font.SysFont('verdana', 15)
optionSurfaceObj = fontObj.render(option, True, YELLOW)
optionRectObj = optionSurfaceObj.get_rect()
textwidth = optionRectObj.width
textheight = optionRectObj.height
spacing_width = (QUESTIONRECTWIDTH - textwidth) / 2 #calculating the width to leave between the rectangle around the option and the text.
spacing_height = (QUESTIONRECTHEIGHT - textheight) / 2 #calculating the height to leave between the rectangle around the option and the text.
if current_height == 0:
option_rectangle = pygame.Rect(5, 200, QUESTIONRECTWIDTH, QUESTIONRECTHEIGHT)
if counter == 0:
OPTIONSLIST.append({'option1':
{
'x': (option_rectangle.topleft[0], option_rectangle.topright[0]),
'y': (option_rectangle.topleft[1], option_rectangle.bottomleft[1]),
'rectangle' : option_rectangle,
'text' : option
}
})
counter += 1
pygame.draw.rect(DISPLAYSURF, WHITE, option_rectangle, 1)
DISPLAYSURF.blit(optionSurfaceObj, ((option_rectangle.topleft[0] + spacing_width), (option_rectangle.topleft[1] + spacing_height)))
current_height = option_rectangle.bottomleft[1]
else:
current_height += 10
option_rectangle = pygame.Rect(5, current_height, QUESTIONRECTWIDTH, QUESTIONRECTHEIGHT)
if counter == 1:
OPTIONSLIST.append({'option2':
{
'x': (option_rectangle.topleft[0], option_rectangle.topright[0]),
'y': (option_rectangle.topleft[1], option_rectangle.bottomleft[1]),
'rectangle' : option_rectangle,
'text' : option
}
})
counter += 1
else:
OPTIONSLIST.append({'option3':
{
'x': (option_rectangle.topleft[0], option_rectangle.topright[0]),
'y': (option_rectangle.topleft[1], option_rectangle.bottomleft[1]),
'rectangle' : option_rectangle,
'text' : option
}
})
counter = 0
# print option_rectangle
pygame.draw.rect(DISPLAYSURF, WHITE, option_rectangle, 1)
DISPLAYSURF.blit(optionSurfaceObj, ((option_rectangle.topleft[0] + spacing_width), (option_rectangle.topleft[1] + spacing_height)))
current_height = option_rectangle.bottomleft[1]
#the game loop
while True:
if STARTED == False:
DISPLAYSURF.fill(BACKGROUNDCOLOR)
drawingQuestion(data[STATE]['question']) #drawing the question on the screen
drawingOptions([data[STATE]['option1'], data[STATE]['option2'], data[STATE]['option3']]) #drawing the options on the screen
STARTED = True
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
|
nilq/baby-python
|
python
|
# example of setting up, running, and graphing landau damping in 3D
import os
import py_platypus as plat
from py_platypus.utils.params import Parameters as Parameters
from py_platypus.vis.plotter import Plotter as Plotter
if __name__ == "__main__":
# override default parameters
params = {
"dimensions": 3,
"nppc": 4,
"landau": { # defaults for Landau damping
"amplitude": 0.8,
"mode": 3 # number of density peaks
},
"print_every": 1, # print current step at every step
"save_every": 1, # save data at every time step
"runtime": 1
}
# set up and run Landau damping simulation in 2D
plat.run_sim.landau("landau_3d", 3,
param_dict=params
)
# load parameters from a json file
param_json = os.path.join(plat.PLATYPUS_HOME, "py_platypus/out/landau_3d/params.json")
params = Parameters(3, load_file=param_json)
# create instance of plotting class and plot the energy
# for the three dimension case, only the energy can be plotted
plotter = Plotter("landau_3d", params)
plotter.plot_energy()
|
nilq/baby-python
|
python
|
#from app import Util
import aiml
import sys
def formatOutPut(text):
text = text.replace('\\t','\t')
text = text.replace('\\n','\n')
return text
def stripCommand(text):
if(text[0] == '/'):
return text[1:]
return text
def main():
#create and configurate bot knowledbase
ctfbot = aiml.Kernel()
ctfbot.learn("resources/knowledgebase.aiml")
while True:
question = stripCommand(input("> "))
if question == 'quit':
return 0
response = ctfbot.respond(question)
# print( Util.formatOutPut(response))
print(formatOutPut(response))
if __name__ == '__main__':
sys.exit(int(main() or 0))
|
nilq/baby-python
|
python
|
from trackstats.models import Domain, Metric
# Domains
Domain.objects.INVESTMENT = Domain.objects.register(
ref='investment',
name='investment'
)
# Metrics, these are associated with a domain
Metric.objects.INVESTMENT_COUNT = Metric.objects.register(
domain=Domain.objects.INVESTMENT,
ref='investment_count',
name='Number of investments in the system')
Metric.objects.INVESTMENT_WON_COUNT = Metric.objects.register(
domain=Domain.objects.INVESTMENT,
ref='investment_won_count',
name='Number of investments in the system at stage won')
Metric.objects.INVESTMENT_VERIFY_WIN_COUNT = Metric.objects.register(
domain=Domain.objects.INVESTMENT,
ref='investment_verify_win_count',
name='Number of investments in the system at stage verify win')
Metric.objects.INVESTMENT_ACTIVE_COUNT = Metric.objects.register(
domain=Domain.objects.INVESTMENT,
ref='investment_active_count',
name='Number of investments in the system at stage active')
Metric.objects.INVESTMENT_PIPELINE_COUNT = Metric.objects.register(
domain=Domain.objects.INVESTMENT,
ref='investment_pipeline_count',
name='Number of investments in the system not at stage verify win or win')
|
nilq/baby-python
|
python
|
import logging
import json
import transaction
from pyramid.view import view_config
from pyramid.request import Response
__author__ = 'max'
log = logging.getLogger(__name__)
MODULE_DIR = "newsbomb_recommends.views"
@view_config(route_name='generic', renderer='json')
def api(request):
try:
module = request.matchdict["module"]
method = request.matchdict["method"]
if request.method == "GET":
params = json.loads(request.params.get("params", "{}"))
elif request.method == "POST":
params = request.params.mixed()
log.warning("params:%s" % params)
# Allow cross domain call for AJAX
request.response = Response()
request.response.headerlist = []
request.response.headerlist.extend(
(
('Access-Control-Allow-Origin', '*'),
('Content-Type', 'application/json; charset=UTF-8')
)
)
module_path = "%s.%s" % (MODULE_DIR, module)
# import module
mod = __import__(module_path)
components = module_path.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
func = getattr(mod, method, None)
if func:
result = None
with transaction.manager:
result = func(**params)
if method == "verify_email":
return result
return {"status": "success", "data": result}
else:
return {"status": "error", "data": ["No such method: %s." % method]}
# except ValidationError, ex:
# return {"status": "error", "data": [ex.message]}
except Exception, ex:
log.exception("%s, %s, %s" % (module, method, ex))
return {"status": "exception", "data": [ex.message]}
#EOF
|
nilq/baby-python
|
python
|
"""
Define the abstract Game class for providing a structure/ interface for agent environments.
Notes:
- Base implementation done.
- Documentation 15/11/2020
"""
from abc import ABC, abstractmethod
import typing
import numpy as np
from utils.game_utils import GameState
class Game(ABC):
"""
This class specifies the base Game class. To define your own game, subclass this class and implement the
functions below. This works when the game is either single-player or two-player/ adversarial. Note that
the implementations of this class have to be stateless, all state information can be stored in GameState objects.
Optionally, one can also subclass Gym.Env for single-player games, and make use of the existing logic in
Games/gym/GymGame.py or Games/atari/AtariGame.py.
See Games/gym/GymGame.py for an example implementation of a single-player game.
See Games/hex/HexGame.py for an example implementation of a two-player game.
"""
def __init__(self, n_players: int = 1) -> None:
"""
Initialize the base variables for the Game class.
:param n_players: int The number of players/ adversaries within the implementation of Game (either 1 or 2)
:raise NotImplementedError: Error raised for n_players larger than 2.
"""
self.n_players = n_players
self.n_symmetries = 1
if self.n_players > 2:
raise NotImplementedError(f"Environments for more than 2 agents are not yet supported, {n_players} > 2")
@abstractmethod
def getInitialState(self) -> GameState:
"""
Initialize the environment and get the root-state wrapped in a GameState data structure.
:return: GameState Data structure containing the specifics of the current environment state.
"""
@abstractmethod
def getDimensions(self) -> typing.Tuple[int, ...]:
"""
Get the raw observation dimensions visible for a learning algorithm.
:return: tuple of integers representing the dimensions of observation data.
"""
@abstractmethod
def getActionSize(self) -> int:
"""
Get the number of atomic actions in the environment.
:return: int The number of atomic actions in the environment.
"""
@abstractmethod
def getNextState(self, state: GameState, action: int, **kwargs) -> typing.Tuple[GameState, float]:
"""
Perform an action in the environment and observe the transition and reward.
:param state: GameState Data structure containing the specifics of the current environment state.
:param action: int Integer action to perform on the environment.
:return: tuple containing the next environment state in a GameState object, along with a float reward.
"""
@abstractmethod
def getLegalMoves(self, state: GameState) -> np.ndarray:
"""
Determine the legal moves at the provided environment state.
:param state: GameState Data structure containing the specifics of the current environment state.
:return: np.ndarray Array of length |action_space| with 0s for illegal and 1s for legal moves.
"""
@abstractmethod
def getGameEnded(self, state: GameState, **kwargs) -> typing.Union[float, int]:
"""
Determine whether the given state is a terminal state.
:param state: GameState Data structure containing the specifics of the current environment state.
:return: float or int Always returns 0 until the game ends, then a terminal reward is returned.
"""
@abstractmethod
def buildObservation(self, state: GameState) -> np.ndarray:
"""
Compute some representation of the GameState, to be used as the input of a neural network.
:param state: GameState Data structure containing the specifics of the current environment state.
:return: np.ndarray Some game-specific representation of the current environment state.
"""
@abstractmethod
def getSymmetries(self, state: GameState, pi: np.ndarray) -> typing.List:
"""
@DEPRECATED: future will replace state with GameHistory to get symmetries over observation trajectories.
Compute every possible symmetry of the provided environment state with correctly oriented pi-vectors.
:param state: GameState Data structure containing the specifics of the current environment state.
:param pi: np.ndarray Raw move probability vector of size |action-space|.
:return: A list of the form [(state, pi)] where each tuple is a symmetrical form of the state and the
corresponding pi vector. This can be used for diversifying training examples.
"""
@abstractmethod
def getHash(self, state: GameState) -> typing.Union[str, bytes, int]:
"""
Compute a hashable representation of the provided environment state, h: StateSpace -> Universe
:param state: GameState Data structure containing the specifics of the current environment state.
:return: Some hashable datatype representing the provided GameState.
"""
def close(self, state: GameState) -> None:
"""
Clean up necessary variables within the environment/ class. If any.
:param state: GameState Data structure containing the specifics of the current environment state.
"""
def render(self, state: GameState):
"""
Base method for generating a visual rendering of the game implementation.
:param state: GameState Data structure containing the specifics of the current environment state.
:raises NotImplementedError: Error raised if the child class did not implement a rendering method.
"""
raise NotImplementedError(f"Render method not implemented for Game: {self}")
|
nilq/baby-python
|
python
|
x="There are %d types of people." % 10
binary="binary"
do_not="dont't"
y="Those who know %s and those who %s." % (binary,do_not)
print(x)
print(y)
print("I said: %r" % x)
print("I also said: '%s'." % y)
hilarious=False
joke_evaluation="Isn't that joke so funny?! %r"
print(joke_evaluation % hilarious)
w="This is the left side of..."
e="a string with a right side"
print(w+e)
|
nilq/baby-python
|
python
|
# -*- encoding: utf-8 -*-
from bs4 import Tag
from datetime import datetime
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from scrapers.helpers import jsonify_seminars
#########################################################
# This part will seriously depend on the structure of the LAS page
def get_date_time_start_end(data, _time):
year = " {} ".format(datetime.now().year)
start = parse(data + year + _time)
end = start + relativedelta(hours=+1)
return start, end
def get_seminar_info(seminar):
try:
speaker = seminar('b')[0].getText().strip()
if speaker != 'NO SEMINAR':
title = seminar('i')[1].getText().strip()
abstract = "<br/> ".join(map(lambda s: s.getText().strip(),
seminar('i')[2:]))
return (speaker + ' - ' + title, abstract +
"<br/><br/><i>There will be tea in room 606 from 4:15-4:45pm</i>.")
except (IndexError, TypeError, AttributeError):
# log the error
pass
return None, None
def clean_couple(couple):
start, end = get_date_time_start_end(couple[0].string, "4:45 pm")
location = "UCL, first floor of 25 Gordon Street, room D103"
# TODO: Get both using regexp instead of creating this horrible warning
if couple[1].getText().count('Title') > 1:
start = start + relativedelta(hours=-1, minutes=-30)
title = "WARN: two seminars"
description = "There are probably two seminars. <a href='http://www.homepages.ucl.ac.uk/~ucahsze/seminars.html' target='_blank'>Click here for additional informations</a>."
else:
title, description = get_seminar_info(couple[1])
if title is not None:
seminar = {
'start': start,
'end': end,
'title': title,
'description': description,
'location': location
}
else:
seminar = None
return seminar
def admissible_couples(couple):
return (type(couple[0]) == Tag) and (couple[0].name == "dt") and (
type(couple[1]) == Tag) and (couple[1].name == "dd")
def get_event_list(soup):
data = soup.dl.contents
couples = filter(admissible_couples, ((data[i], data[i + 1])
for i in range(len(data) - 2)))
# We can accept empty abstracts but not empty titles
events = filter(lambda ev: ev is not None, map(clean_couple, couples))
return events
def get_nts(last_update=None):
"""Number Theory Seminar"""
return jsonify_seminars(
"http://www.homepages.ucl.ac.uk/~ucahsze/seminars.html", get_event_list,
last_update=last_update)
|
nilq/baby-python
|
python
|
#============================== IBM Code Challenge =============================
# mat_gui.py
#
# Creates GUI for user to interact with
#
# Description:
# This generates a GUI that allows the user to enter two matrices, select
# a mathematical operation, and displays the operation result to the user.
# The class implements the __init__ function to construct the GUI and breaks
# each component out into a new function to help compartmentalize the
# construction.
#
# Todo:
# Add save/restore callback and link to menu items
#===============================================================================
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import numpy as np
import re
import pdb
from mat_widgets import *
from mat_operation import *
class MatOpGUI(QMainWindow):
#===========================================================================
# Define class constants
#===========================================================================
# Define the list of mathematical operations the user can perform on the
# two operations.
OPERATIONS = ['Multiply',
'Sum of Column of Product',
'Product of Column of Product',
'Cumulative Sum Along Column of Product',
'Cumulative Product Along Column of Product',
'Sum of Row of Product',
'Product of Row of Product',
'Cumulative Sum Along Row of Product',
'Cumulative Product Along Row of Product',
'Min of Product',
'Max of Product',
'Mean of Product',
'Median of Product',
'Total Sum of Product',
'Total Product of Product']
# Define the operations that, when selected, will cause the operations
# selection row/column entry field to appear to the user.
OPS_TO_MAKE_ENTRY_VISIBLE = ['Sum of Column of Product',
'Product of Column of Product',
'Sum of Row of Product',
'Product of Row of Product']
# Define the operations that will act on a row of the resultant matrix.
# This will be used to determine the placeholder text of a line edit field
# for entering a row/column, to help the user.
OPS_ON_ROW = ['Sum of Row of Product',
'Product of Row of Product']
#===========================================================================
# Initialization function
#===========================================================================
def __init__(self):
"""
Initialization function for the MatOpGUI class. This will construct the
GUI, primarily through the __createGUI method, and display it to the user.
"""
# Call the super class init function to make sure this generates properly
super().__init__()
# -- Define Instance Variables -----------------------------------------
# Define variables for GUI properties
self.__fontFamily = 'Calibri'
self.__fontColor = QColor(250,250,250)
self.__guiColor = QColor(162, 62, 72) # Official GUI color
# Define counter for the number of operations performed
self.__opCounter = 0
# -- Set Window Properties ---------------------------------------------
self.setAcceptDrops(True)
self.setWindowTitle('Matrix Operations')
self.resize(800,400)
self.setWindowIcon(QIcon('imgs/icon.png'))
# -- Create and Show the GUI -------------------------------------------
# Create and show the GUI
self.__createGUI()
self.show()
def frame(func):
"""
Wrapper function for creating frames in a widget. This allows a function
to simply add content to a frame without handling the process of creating
grids and frames. This will automatically use a QGridLayout.
"""
def wrapper(self, pos, *args, grid = None, gridMargin = 0, gridSpacing = 0,
bgcolor = QColor(255,255,255,255), frameShape = None,
frameShadow = None, lineWidth = 0, **kwargs):
# Create the QFrame and QGridLayout
kwargs['frame'] = QFrame(frameShape = frameShape, frameShadow = frameShadow, lineWidth = lineWidth)
kwargs['grid'] = QGridLayout(margin = gridMargin, spacing = gridSpacing)
# Call the wrapped function
func(self, *args, **kwargs)
# Set the background color of the frame
kwargs['frame'].setAutoFillBackground(True)
p = kwargs['frame'].palette()
p.setColor(kwargs['frame'].backgroundRole(), bgcolor)
kwargs['frame'].setPalette(p)
# Set the grid created in this function as the frame's layout and
# add the frame to the parent's grid at the position provided
kwargs['frame'].setLayout(kwargs['grid'])
if grid is not None:
grid.addWidget(kwargs['frame'], *pos)
else:
self.grid.addWidget(kwargs['frame'], *pos)
# Return the frame
return kwargs['frame']
return wrapper
#===========================================================================
# Level 0: Top level GUI creation and menu bar
#===========================================================================
def __createGUI(self):
"""
Highest level function used to create the GUI components
"""
# -- Define Top-level Components ---------------------------------------
self.widget = QWidget(self) # The central widget in the main window
self.grid = QGridLayout() # The layout manager of the central widget
self.__createMenuBar() # The menu bar
# -- Main Gui Components -----------------------------------------------
self.__headerBar = self.__createHeaderBar((0,0), gridMargin = 5, gridSpacing = 15, bgcolor = self.__guiColor)
self.__contentFrame = self.__createContentFrame((1,0), gridMargin = 5, gridSpacing = 5)
# -- Setup the Grid ----------------------------------------------------
self.grid.setContentsMargins(0,0,0,0)
self.grid.setSpacing(0)
self.grid.setRowStretch(1,1)
# -- Set the Main Widget Properties ------------------------------------
self.widget.setLayout(self.grid)
self.setCentralWidget(self.widget)
def __createMenuBar(self):
"""
Creates the menu bar of the GUI and adds various menus and items to
perform tasks.
"""
# Use the PyQt menu construct. This is particularly important for Macs
# because it will keep the menubar with the GUI window rather than
# placing it at the top of the screen, as is usual for Macs. We don't
# want this to happen because Macs take control of the menus if you have
# it up there and can cause unexpected results.
self.menuBar().setNativeMenuBar(False)
# -- File Menu ---------------------------------------------------------
fileMenu = self.menuBar().addMenu('File')
fileMenu.setTearOffEnabled(True)
# Save Menu Item
saveMenuItem = QAction('Save', fileMenu, shortcut = 'Ctrl+S')
saveMenuItem.triggered.connect(self.__save)
fileMenu.addAction(saveMenuItem)
# Load Menu Item
loadMenuItem = QAction('Load', fileMenu, shortcut = 'Ctrl+L')
loadMenuItem.triggered.connect(self.__askForFileAndLoad)
fileMenu.addAction(loadMenuItem)
# -- Options Menu ------------------------------------------------------
optionsMenu = self.menuBar().addMenu('Options')
optionsMenu.setTearOffEnabled(True)
# Clear Menu Item
clearMenuItem = QAction('Clear All', optionsMenu, shortcut = 'Ctrl+A')
clearMenuItem.triggered.connect(self.__clearAll)
optionsMenu.addAction(clearMenuItem)
optionsMenu.addSeparator()
# Quit Menu Item
quitMenuItem = QAction('Quit', optionsMenu, shortcut = 'Ctrl+Q')
quitMenuItem.triggered.connect(self.close)
optionsMenu.addAction(quitMenuItem)
#===========================================================================
# Level 1: Header and Main Content Frame
#===========================================================================
@frame
def __createHeaderBar(self, *args, **kwargs):
"""
Create the large header bar at the top of the GUI. This just adds a nice,
convenient banner at the top for branding.
"""
# Create the Matrix Operations Label, configure it, and add it to the grid
matOpLabel = QLabel('Matrix Operations')
configureQLabel(matOpLabel, font = self.__fontFamily, font_size = 20,
font_color = self.__fontColor, alignment = Qt.AlignCenter)
kwargs['grid'].addWidget(matOpLabel, 0, 1)
@frame
def __createContentFrame(self, *args, **kwargs):
"""
Create the main content of the GUI. This is a second frame below the header.
This calls several sub-functions that create specific elements of the main
content frame.
"""
# Create the frame at the top for entering the name of the run
runNameFrame = self.__createRunNameFrame(
(0,0,1,2), grid = kwargs['grid'], gridmargin = 5, gridSpacing = 5,
)
# Set the tool tip for this frame to help the user out
runNameFrame.setToolTip('Optionally choose a name for your run.')
# -- Create Matrix Input Frames ----------------------------------------
# Create the two frames which allows the user to input the two matrices
self.__matrixAFrame = self.__createMatrixAInputFrame(
(1,0), grid = kwargs['grid'], gridMargin = 5, gridSpacing = 5,
frameShape = QFrame.StyledPanel, frameShadow = QFrame.Sunken, lineWidth = 0,
)
self.__matrixBFrame = self.__createMatrixBInputFrame(
(1,1), grid = kwargs['grid'], gridMargin = 5, gridSpacing = 5,
frameShape = QFrame.StyledPanel, frameShadow = QFrame.Sunken, lineWidth = 0,
)
# Set the tool tips for this frame to help the user out.
self.__matrixAFrame.setToolTip((
'Enter values for Matrix A here. You can change the matrix size to\n'
'a max of 10x10 and also randomly generate values for the matrix.'
))
self.__matrixBFrame.setToolTip((
'Enter values for Matrix B here. You can change the matrix size to\n'
'a max of 10x10 and also randomly generate values for the matrix.'
))
# -- Create Operation Selection Frame ----------------------------------
# Create the frame below the two matrices for selecting the matrix
# operation to perform
opSelectFrame = self.__createOperationSelectionFrame(
(2,0,1,2), grid = kwargs['grid'], gridMargin = 0, gridSpacing = 5
)
# Set the tool tip for this frame to help the user out
opSelectFrame.setToolTip((
'Select an operation to perform from the dropdown list. Some\n'
'operations act on a single row/column of a matrix. Hit Go!\n'
'to perform the operation.'
))
# -- Create Output Text Box --------------------------------------------
# Create the output text box.
self.__outputTextBox = QTextEdit()
# Make it so user's can't modify the text
self.__outputTextBox.setTextInteractionFlags(Qt.TextSelectableByMouse | Qt.TextSelectableByKeyboard)
# Do not allow text wrapping (to prevent the output from becoming too
# confusing).
self.__outputTextBox.setLineWrapMode(QTextEdit.NoWrap)
# Update the font to a monospaced font.
font = self.__outputTextBox.currentFont()
font.setPointSize(8)
font.setFamily('courier')
self.__outputTextBox.setFont(font)
# Make it a least 400 pixels tall
self.__outputTextBox.setMinimumHeight(400)
# Make the text box initially invisible until data needs to be displayed
# so as not to confuse the user.
self.__outputTextBox.setVisible(False)
# Add to the grid
kwargs['grid'].addWidget(self.__outputTextBox, 3, 0, 1, 2)
# -- Set Grid Properties -----------------------------------------------
kwargs['grid'].setRowStretch(1, 1)
kwargs['grid'].setColumnStretch(0, 1)
kwargs['grid'].setColumnStretch(1, 1)
#===========================================================================
# Level 2: Name, Matrix, Options, and Output Frames
#===========================================================================
@frame
def __createRunNameFrame(self, *args, **kwargs):
"""
Create the frame which allows users to enter the name of the run
"""
# Create the QLabel giving direction to the user
kwargs['grid'].addWidget(QLabel('Name Your Run'), 0, 0)
# Create the line edit for the user to enter the name
self.__nameLineEdit = QLineEdit()
self.__nameLineEdit.setPlaceholderText('Enter run name...')
kwargs['grid'].addWidget(self.__nameLineEdit, 0, 1)
# Set grid properties
kwargs['grid'].setColumnStretch(1, 1)
@frame
def __createMatrixAInputFrame(self, *args, **kwargs):
"""
Create the input frame for defining Matrix A. This has a label at the
top demarking this as Matrix A. It has a sub-frame for changing the size
of the frame, a table for defining the matrix, and a sub-frame for choosing
to randomly generate the matrix.
"""
# Create the label at the top of this frame, labeling this as Matrix A
sectionLabel = QLabel('Matrix A')
configureQLabel(sectionLabel, font = self.__fontFamily, font_size = 16,
alignment = Qt.AlignCenter)
kwargs['grid'].addWidget(sectionLabel, 0, 0)
# Create section for specifying the matrix size
self.__createMatrixASizeFrame(
(1,0), grid = kwargs['grid'], gridMargin = 0, gridSpacing = 0
)
# Create section for inputing the matrix. Default to a 3x3 matrix.
self.__matrixAInputTable = QTableWidget(3, 3)
font = self.__matrixAInputTable.horizontalHeader().font()
font.setWeight(QFont.Bold)
self.__matrixAInputTable.setAlternatingRowColors(True)
self.__matrixAInputTable.horizontalHeader().setFont(font)
self.__matrixAInputTable.verticalHeader().setFont(font)
for row in range(3):
for col in range(3):
self.__matrixAInputTable.setItem(row, col, QTableWidgetItem(''))
kwargs['grid'].addWidget(self.__matrixAInputTable, 2, 0)
# Create section for random matrix generation
self.__createMatrixARandFrame(
(3,0), grid = kwargs['grid'], gridMargin = 0, gridSpacing = 0
)
# Set the grid properties|
kwargs['grid'].setRowStretch(2,1)
@frame
def __createMatrixBInputFrame(self, *args, **kwargs):
"""
Create the input frame for defining Matrix B. This has a label at the
top demarking this as Matrix B. It has a sub-frame for changing the size
of the frame, a table for defining the matrix, and a sub-frame for choosing
to randomly generate the matrix.
"""
# Create the label at the top of this frame, labeling this as Matrix B
sectionLabel = QLabel('Matrix B')
configureQLabel(sectionLabel, font = self.__fontFamily, font_size = 16,
alignment = Qt.AlignCenter)
kwargs['grid'].addWidget(sectionLabel, 0, 0)
# Create section for specifying the matrix size
self.__createMatrixBSizeFrame(
(1,0), grid = kwargs['grid'], gridMargin = 0, gridSpacing = 0
)
# Create section for inputing the matrix
self.__matrixBInputTable = QTableWidget(3, 3)
font = self.__matrixBInputTable.horizontalHeader().font()
font.setWeight(QFont.Bold)
self.__matrixBInputTable.setAlternatingRowColors(True)
self.__matrixBInputTable.horizontalHeader().setFont(font)
self.__matrixBInputTable.verticalHeader().setFont(font)
for row in range(3):
for col in range(3):
self.__matrixBInputTable.setItem(row, col, QTableWidgetItem(''))
kwargs['grid'].addWidget(self.__matrixBInputTable, 2, 0)
# Create section for random matrix generation
self.__createMatrixBRandFrame(
(3,0), grid = kwargs['grid'], gridMargin = 0, gridSpacing = 0
)
# Set the grid properties|
kwargs['grid'].setRowStretch(2,1)
@frame
def __createOperationSelectionFrame(self, *args, **kwargs):
"""
Create the frame which allows the user select the math operation to
perform.
"""
kwargs['grid'].addWidget(QLabel('Select the Operation:'), 2, 0)
# Create the dropdown list of operations
self.__opSelectComboBox = QComboBox()
self.__opSelectComboBox.addItems(MatOpGUI.OPERATIONS)
self.__opSelectComboBox.currentIndexChanged.connect(self.__opSelectChanged)
kwargs['grid'].addWidget(self.__opSelectComboBox, 2, 1)
# Create the row/column entry field, for operations which return a
# result from just a single column/row. This will be where the user
# enters the row/column the return the result from. This will intially
# be invisible as the default matrix operation is to multiply the two
# together, which does not require this widget to exist. When an operation
# is selected that will require this widget, it will be shown to the user.
self.__opEntryField = QLineEdit()
self.__opEntryField.setVisible(False)
kwargs['grid'].addWidget(self.__opEntryField, 2, 2)
# Create the Go! button
self.__goButton = QPushButton('Go!')
self.__goButton.clicked.connect(self.__goButtonClicked)
kwargs['grid'].addWidget(self.__goButton, 2, 3)
# Set the grid properties
kwargs['grid'].setColumnStretch(1,1)
#===========================================================================
# Level 3: Matrix Size and Random Generation Collapsable Frames
#===========================================================================
# == Matrix A ==============================================================
@frame
def __createMatrixASizeFrame(self, *args, **kwargs):
"""
Create a frame with a collapsable section for allowing the user to change
the size of the matrix. This is just a text box for entering both the row
and column and a button to change the size.
"""
# Create a collapsable section to add the various widgets to. This will
# make the GUI output a bit cleaner and only show this to the user if
# they need to see it.
self.__matrixASizeCollapsable = CollapsableSection('Matrix Size', True)
# Create the row size entry
self.__matrixARowSize = QLineEdit('3')
self.__matrixARowSize.setMaximumWidth(30)
self.__matrixARowSize.setPlaceholderText('Row')
self.__matrixASizeCollapsable.addWidget(self.__matrixARowSize, 0, 0)
# Create the 'X' label
self.__matrixASizeCollapsable.addWidget(QLabel('X'), 0, 1)
# Create the col size entry
self.__matrixAColSize = QLineEdit('3')
self.__matrixAColSize.setMaximumWidth(30)
self.__matrixAColSize.setPlaceholderText('Col')
self.__matrixASizeCollapsable.addWidget(self.__matrixAColSize, 0, 2)
# Create the Set Size button
self.__matrixASizeButton = QPushButton('Set Size')
self.__matrixASizeButton.clicked.connect(self.__matrixASetSizeClicked)
self.__matrixASizeCollapsable.addWidget(self.__matrixASizeButton, 0, 3)
# Set the grid properties
self.__matrixASizeCollapsable.setColumnStretch(4,1)
kwargs['grid'].addWidget(self.__matrixASizeCollapsable, 1, 0)
@frame
def __createMatrixARandFrame(self, *args, **kwargs):
"""
Create a frame with a collapsable section for allowing the user to randomly
populate the matrix. The collapsable section has a section for defining
the range to use and for selecting to generate either decimals or integers.
Finally there's a button to actually generate the matrix content.
"""
# Create a collapsable section to add the various widgets to. This will
# make the GUI output a bit cleaner and only show this to the user if
# they need to see it.
self.__matrixARandCollapsable = CollapsableSection('Random Generation', True)
# -- Create range section ----------------------------------------------
self.__matrixARandCollapsable.addWidget(QLabel('Range:'), 0, 0)
# Create the minimum line edit
self.__matrixAMinRandRange = QLineEdit('0.0')
self.__matrixAMinRandRange.setMaximumWidth(50)
self.__matrixAMinRandRange.setPlaceholderText('min')
self.__matrixARandCollapsable.addWidget(self.__matrixAMinRandRange, 0, 1)
# Create the '-' label
self.__matrixARandCollapsable.addWidget(QLabel('-', alignment = Qt.AlignCenter), 0, 2)
# Create the maximum line edit
self.__matrixAMaxRandRange = QLineEdit('1.0')
self.__matrixAMaxRandRange.setMaximumWidth(50)
self.__matrixAMaxRandRange.setPlaceholderText('max')
self.__matrixARandCollapsable.addWidget(self.__matrixAMaxRandRange, 0, 3, 1, 2)
# -- Create number type section ----------------------------------------
self.__matrixARandCollapsable.addWidget(QLabel('Type:'), 1, 0)
# Create the button group for the number type radio buttons
self.__matrixARandButtonGroup = QButtonGroup()
# Create the 'decimal' radio button
decimalButton = QRadioButton('Decimal')
decimalButton.setChecked(True)
self.__matrixARandButtonGroup.addButton(decimalButton, 0)
self.__matrixARandCollapsable.addWidget(decimalButton, 1, 1, 1, 3)
# Create the 'integer' radio button
integerButton = QRadioButton('Integer')
self.__matrixARandButtonGroup.addButton(integerButton, 1)
self.__matrixARandCollapsable.addWidget(integerButton, 1, 4, 1, 1)
# -- Create generation button ------------------------------------------
self.__matrixARandGenButton = QPushButton('Generate')
self.__matrixARandGenButton.clicked.connect(self.__matrixARandGenClicked)
self.__matrixARandCollapsable.addWidget(self.__matrixARandGenButton, 2, 0, 1, 5)
# Set the grid properties
self.__matrixARandCollapsable.setColumnStretch(5, 1)
kwargs['grid'].addWidget(self.__matrixARandCollapsable, 3, 0)
# == Matrix B ==============================================================
def __createMatrixBSizeFrame(self, *args, **kwargs):
"""
Create a frame with a collapsable section for allowing the user to change
the size of the matrix. This is just a text box for entering both the row
and column and a button to change the size.
"""
# Create a collapsable section to add the various widgets to. This will
# make the GUI output a bit cleaner and only show this to the user if
# they need to see it.
self.__matrixBSizeCollapsable = CollapsableSection('Matrix Size', True)
# Create the row size entry
self.__matrixBRowSize = QLineEdit('3')
self.__matrixBRowSize.setMaximumWidth(30)
self.__matrixBRowSize.setPlaceholderText('Row')
self.__matrixBSizeCollapsable.addWidget(self.__matrixBRowSize, 0, 0)
# Create the 'X' label
self.__matrixBSizeCollapsable.addWidget(QLabel('X'), 0, 1)
# Create the col size entry
self.__matrixBColSize = QLineEdit('3')
self.__matrixBColSize.setMaximumWidth(30)
self.__matrixBColSize.setPlaceholderText('Col')
self.__matrixBSizeCollapsable.addWidget(self.__matrixBColSize, 0, 2)
# Create the Set Size button
self.__matrixBSizeButton = QPushButton('Set Size')
self.__matrixBSizeButton.clicked.connect(self.__matrixBSetSizeClicked)
self.__matrixBSizeCollapsable.addWidget(self.__matrixBSizeButton, 0, 3)
# Set the grid properties
self.__matrixBSizeCollapsable.setColumnStretch(4,1)
kwargs['grid'].addWidget(self.__matrixBSizeCollapsable, 1, 0)
@frame
def __createMatrixBRandFrame(self, *args, **kwargs):
"""
Create a frame with a collapsable section for allowing the user to randomly
populate the matrix. The collapsable section has a section for defining
the range to use and for selecting to generate either decimals or integers.
Finally there's a button to actually generate the matrix content.
"""
# Create a collapsable section to add the various widgets to. This will
# make the GUI output a bit cleaner and only show this to the user if
# they need to see it.
self.__matrixBRandCollapsable = CollapsableSection('Random Generation', True)
# -- Create range section ----------------------------------------------
self.__matrixBRandCollapsable.addWidget(QLabel('Range:'), 0, 0)
# Create the minimum line edit
self.__matrixBMinRandRange = QLineEdit('0.0')
self.__matrixBMinRandRange.setMaximumWidth(50)
self.__matrixBMinRandRange.setPlaceholderText('min')
self.__matrixBRandCollapsable.addWidget(self.__matrixBMinRandRange, 0, 1)
# Create the '-' label
self.__matrixBRandCollapsable.addWidget(QLabel('-', alignment = Qt.AlignCenter), 0, 2)
# Create the maximum line edit
self.__matrixBMaxRandRange = QLineEdit('1.0')
self.__matrixBMaxRandRange.setMaximumWidth(50)
self.__matrixBMaxRandRange.setPlaceholderText('max')
self.__matrixBRandCollapsable.addWidget(self.__matrixBMaxRandRange, 0, 3, 1, 2)
# -- Create number type section ----------------------------------------
self.__matrixBRandCollapsable.addWidget(QLabel('Type:'), 1, 0)
# Create the button group for the number type radio buttons
self.__matrixBRandButtonGroup = QButtonGroup()
# Create the 'decimal' radio button
decimalButton = QRadioButton('Decimal')
decimalButton.setChecked(True)
self.__matrixBRandButtonGroup.addButton(decimalButton, 0)
self.__matrixBRandCollapsable.addWidget(decimalButton, 1, 1, 1, 3)
# Create the 'integer' radio button
integerButton = QRadioButton('Integer')
self.__matrixBRandButtonGroup.addButton(integerButton, 1)
self.__matrixBRandCollapsable.addWidget(integerButton, 1, 4, 1, 1)
# -- Create generation button ------------------------------------------
self.__matrixBRandGenButton = QPushButton('Generate')
self.__matrixBRandGenButton.clicked.connect(self.__matrixBRandGenClicked)
self.__matrixBRandCollapsable.addWidget(self.__matrixBRandGenButton, 2, 0, 1, 5)
# Set the grid properties
self.__matrixBRandCollapsable.setColumnStretch(5, 1)
kwargs['grid'].addWidget(self.__matrixBRandCollapsable, 3, 0)
#===========================================================================
# Widget Callbacks and Events
#===========================================================================
def dragEnterEvent(self, event):
"""Callback for a drag enter event"""
# If something was dragged into this window, set it as a move event
event.setDropAction(Qt.MoveAction)
# If the event has a URL to a file, check if only one file is being dropped
# in and that file has a .matop extension. If it meets those conditions,
# accept it, otherwise, ignore it.
if event.mimeData().hasUrls():
if len(event.mimeData().urls()) > 1:
event.ignore()
elif not event.mimeData().urls()[0].toLocalFile().endswith('.matop'):
event.ignore()
else:
event.accept()
# Ignore everything else
else:
event.ignore()
def dropEvent(self, event):
"""Callback for file drop event to load a file"""
for url in event.mimeData().urls():
filename = url.toLocalFile()
self.__load(filename)
def __save(self):
"""Callback for saving the output data"""
# Ask for the file to save to
outfile, _ = QFileDialog.getSaveFileName(self, 'Select a file to save to', QDir.currentPath(), 'MatOp (*.matop)')
# If a file was provided, grab all the text from the output text area and
# write it to that file.
if outfile:
with open(outfile, 'w') as file:
file.write(self.__outputTextBox.toPlainText())
def __askForFileAndLoad(self):
"""Callback for loading from a file, after asking the user for the file"""
# Ask for the file to load from
filename, _ = QFileDialog.getOpenFileName(self, 'Select a file to load', QDir.currentPath(), 'MatOp (*.matop)')
if filename:
self.__load(filename)
def __load(self, filename):
"""Callback for loading from a file, given one is provided"""
# Load the file's content
with open(filename, 'r') as file:
content = file.readlines()
content = ''.join(content)
# Set the textbox output to the loaded content
self.__outputTextBox.setText(content)
# Now use regex to scan through the content and figure out the operation
# counter, so it can be set.
matches = re.findall('Operation (?P<counter>\d+)', content)
self.__opCounter = max(map(int, matches)) if matches else 0
# And finally, set the textbox output to visible
self.__outputTextBox.setVisible(True)
def __clearAll(self):
"""
Callback for clearing all the input/output of the GUI. This is connected
to the "Clear All" menu item.
"""
# Clear the table for Matrix A. This is done by removing all rows/columns,
# setting them to the correct amount, then redefining the widget items in
# the table.
rowNum = self.__matrixAInputTable.rowCount()
colNum = self.__matrixAInputTable.columnCount()
self.__matrixAInputTable.setRowCount(0)
self.__matrixAInputTable.setRowCount(rowNum)
self.__matrixAInputTable.setColumnCount(0)
self.__matrixAInputTable.setColumnCount(colNum)
for row in range(rowNum):
for col in range(colNum):
self.__matrixAInputTable.setItem(row, col, QTableWidgetItem(''))
# Clear the table for Matrix B in the same way as Matrix A.
rowNum = self.__matrixBInputTable.rowCount()
colNum = self.__matrixBInputTable.columnCount()
self.__matrixBInputTable.setRowCount(0)
self.__matrixBInputTable.setRowCount(rowNum)
self.__matrixBInputTable.setColumnCount(0)
self.__matrixBInputTable.setColumnCount(colNum)
for row in range(rowNum):
for col in range(colNum):
self.__matrixBInputTable.setItem(row, col, QTableWidgetItem(''))
# Clear out the output text box and set the operation counter to zero again.
self.__outputTextBox.setText('')
self.__opCounter = 0
def __opSelectChanged(self):
"""
Callback for when the user has selected a new math operation to perform
from the dropdown list. This exists because for some operations, the user
needs to add a row or column to perform the operation on. The text box
for entering this should only be displayed when it is necessary.
"""
# Check if the new selection is in the operations that makes the entry
# field appear. If it is, set it as visible, then set the placeholder
# text to the appropriate text directing them to input a row or a column
# as appropriate. Otherwise, just make the entry field invisible.
if self.__opSelectComboBox.currentText() in MatOpGUI.OPS_TO_MAKE_ENTRY_VISIBLE:
self.__opEntryField.setVisible(True)
if self.__opSelectComboBox.currentText() in MatOpGUI.OPS_ON_ROW:
self.__opEntryField.setPlaceholderText('Enter a row...')
else:
self.__opEntryField.setPlaceholderText('Enter a column...')
else:
self.__opEntryField.setVisible(False)
# Finally, clear the entry field so they can see the placeholder text and
# to reset the field.
self.__opEntryField.clear()
def __goButtonClicked(self):
"""
Callback to execute when the Go! button is clicked to perform the mathematical
operation. A variety of error checking is performed that may result in early
termination of this method. In every case where the function returns early,
it will output a messagebox to the user with a message detailing the nature
of the problem.
"""
# -- Perform Error Checking --------------------------------------------
# If the entry field is visible for specifying the row/column for operations
# that act only on a single row/column, make sure the user input a value
# for it. If no value is found, then let the user know they need to input
# one.
if self.__opEntryField.isVisible():
opEntryFieldText = self.__opEntryField.text()
opRowOrCol = 'Row' if self.__opSelectComboBox.currentText() in MatOpGUI.OPS_ON_ROW else 'Column'
# Verify the size is not an empty string
if not opEntryFieldText:
QMessageBox.critical(self, f'Invalid Operation {opRowOrCol}', f'{opRowOrCol} for the matrix operation is not provided.')
return None
# Verify the input is a valid number
try:
opEntryFieldFloat = float(opEntryFieldText)
opEntryFieldInt = int(opEntryFieldFloat)
except:
QMessageBox.critical(self, f'Invalid Operation {opRowOrCol}', f'{opRowOrCol} of {opEntryFieldText} for the matrix operation is not a valid number.')
return None
# Make sure row input is an integer
if opEntryFieldFloat != opEntryFieldInt:
QMessageBox.critical(self, f'Invalid Operation {opRowOrCol}', f'{opRowOrCol} of {opEntryFieldText} for the matrix operation is not a integer.')
return None
# -- Get Matrices from Table -------------------------------------------
# This will get the two matrices from the table operate on. If either one
# is None, that means a valid matrix was not defined in the table and an
# error was already shown to the user. In that case, just return.
matrixA = self.__getMatrix(self.__matrixAInputTable, 'A')
if matrixA is None: return
matrixB = self.__getMatrix(self.__matrixBInputTable, 'B')
if matrixB is None: return
# -- Create Matrix Operation Object ------------------------------------
# This process is not optimal as it makes a new MatrixOperation object
# every time. A better process would be to keep a record of all previously
# generated MatrixOperation objects and pull from that history.
try:
matop = MatrixOperation(self.__nameLineEdit.text(), matrixA, matrixB)
except MatrixOperationError as e:
QMessageBox.critical(self, 'Invalid Matrices', str(e))
return
# -- Perform Additional Error Checking ---------------------------------
# Now that the matrices are found, one more error check can be performed,
# which is to verify that the row/column provided for the operation is
# within range, based on the matrix sizes. Of course, only check this if
# it is necessary.
if self.__opEntryField.isVisible():
if self.__opSelectComboBox.currentText() in MatOpGUI.OPS_ON_ROW:
upperOpLimit = matop.productRows
else:
upperOpLimit = matop.productCols
if opEntryFieldInt < 1 or upperOpLimit < opEntryFieldInt:
QMessageBox.critical(self, f'Invalid Operation {opRowOrCol}', f'{opRowOrCol} {opEntryFieldText} for the matrix is out of bounds [1,{upperOpLimit}].')
return None
# -- Get Matrix Operation Result ---------------------------------------
# Call the right function based on the user's requested operation
if self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[0]:
result = matop.product
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[1]:
result = matop.getProductColSum(opEntryFieldInt - 1)
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[2]:
result = matop.getProductColProd(opEntryFieldInt - 1)
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[3]:
result = matop.getProductColCumSum()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[4]:
result = matop.getProductColCumProd()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[5]:
result = matop.getProductRowSum(opEntryFieldInt - 1)
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[6]:
result = matop.getProductRowProd(opEntryFieldInt - 1)
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[7]:
result = matop.getProductRowCumSum()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[8]:
result = matop.getProductRowCumProd()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[9]:
result = matop.getProductTotalMin()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[10]:
result = matop.getProductTotalMax()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[11]:
result = matop.getProductTotalMean()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[12]:
result = matop.getProductTotalMedian()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[13]:
result = matop.getProductTotalSum()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[14]:
result = matop.getProductTotalProd()
else:
# If this point is reached, somehow the text of the combo box doesn't
# match any text added to it. This point should never be reached, but
# if it is, present an error to the user. This should not be the user's
# fault and there would be nothing they could do to fix it, but better
# to provide some sort of feedback to the user about the issue.
QMessageBox.critical(self, 'Invalid Operation Selection', 'Invalid Operation Selection: '+self.__opSelectComboBox.currentText())
return
# -- Print Output ------------------------------------------------------
# Make the output text area visible if it is not
self.__outputTextBox.setVisible(True)
# Increment the operation counter
self.__opCounter += 1
# Construct and print the header for the operation
header = '\n\n' if self.__opCounter > 1 else ''
header += '=' * 80 + '\n'
header += f'= Operation {self.__opCounter}'
if self.__nameLineEdit.text():
header += ': ' + self.__nameLineEdit.text() + ' '
header += '\n'
header += '=' * 80 + '\n'
self.__outputTextBox.append(header)
# Output the matrices being multiplied
self.__outputTextBox.append('Matrix A:\n')
self.__outputTextBox.append(str(matrixA) + '\n')
self.__outputTextBox.append('Matrix B:\n')
self.__outputTextBox.append(str(matrixB) + '\n')
# Output the operation result
self.__outputTextBox.append(self.__opSelectComboBox.currentText() + ' Result:\n')
self.__outputTextBox.append(str(result))
# == Matrix A ==============================================================
def __matrixASetSizeClicked(self):
"""
Callback for when the set size button is clicked to change the size input
for matrix A. This will update the QTableWidget's rows and columns to be
the appropriate size based on the user's inputs. Some error checking is
performed to ensure the user's inputs are valid. If a problem is found,
this will return early with a messagebox indicating the nature of the issue.
"""
# TODO: save/restore the values already entered so they don't get erased
# when the size changes.
# -- Perform Error Checking --------------------------------------------
# Validate the provided row. If it's invalid, return
rowNum = self.__validateSize(self.__matrixARowSize, 'A', 'Row')
if rowNum is None: return
# Set the text to the returned value, which should guarantee the input
# always looks like an integer.
self.__matrixARowSize.setText(str(rowNum))
# Validate the provided column. If it's invalid, return
colNum = self.__validateSize(self.__matrixAColSize, 'A', 'Col')
if colNum is None: return
# Set the text to the returned value, which should guarantee the input
# always looks like an integer.
self.__matrixAColSize.setText(str(colNum))
# -- Update matrix size ------------------------------------------------
self.__matrixAInputTable.setRowCount(0)
self.__matrixAInputTable.setRowCount(rowNum)
self.__matrixAInputTable.setColumnCount(0)
self.__matrixAInputTable.setColumnCount(colNum)
for row in range(rowNum):
for col in range(colNum):
self.__matrixAInputTable.setItem(row, col, QTableWidgetItem(''))
def __matrixARandGenClicked(self):
"""
Callback for when the generate button is clicked to generate a random
matrix for matrix A. After some basic error checking, this just generates
a random matrix, based on the inputs provided by the user (such as whether
to generate decimals or integers, and what range to use.
If an error is found, such as an invalid range value input by the user,
a messagebox will be displayed with information about the issue and
the function will return.
"""
# -- Perform Error Checking --------------------------------------------
# Validate the minimum range value
minRangeLimit = self.__validateRange(
self.__matrixAMinRandRange, 'A', 'Min', self.__matrixARandButtonGroup.checkedId() == 1
)
if minRangeLimit is None: return
# Set the text to the returned value.
self.__matrixAMinRandRange.setText(str(minRangeLimit))
# Validate the maximum range value
maxRangeLimit = self.__validateRange(
self.__matrixAMaxRandRange, 'A', 'Max', self.__matrixARandButtonGroup.checkedId() == 1
)
if maxRangeLimit is None: return
# Set the text to the returned value.
self.__matrixAMaxRandRange.setText(str(maxRangeLimit))
# -- Populate the matrix with random values ----------------------------
# Get the matrix size
rowNum = self.__matrixAInputTable.rowCount()
colNum = self.__matrixAInputTable.columnCount()
# Generate the matrix
if self.__matrixARandButtonGroup.checkedId() == 0: # Decimal
matrix = (np.random.rand(rowNum, colNum) * (maxRangeLimit - minRangeLimit)) + minRangeLimit
else: # Integer
matrix = np.random.randint(minRangeLimit, maxRangeLimit, size = (rowNum, colNum))
# Finally, populate the table with the generated matrix
self.__setMatrix(self.__matrixAInputTable, matrix)
# == Matrix B ==============================================================
def __matrixBSetSizeClicked(self):
"""
Callback for when the set size button is clicked to change the size input
for matrix A. This will update the QTableWidget's rows and columns to be
the appropriate size based on the user's inputs. Some error checking is
performed to ensure the user's inputs are valid. If a problem is found,
this will return early with a messagebox indicating the nature of the issue.
"""
# TODO: save/restore the values already entered so they don't get erased
# when the size changes.
# -- Perform Error Checking --------------------------------------------
# Validate the provided row. If it's invalid, return
rowNum = self.__validateSize(self.__matrixBRowSize, 'B', 'Row')
if rowNum is None: return
# Set the text to the returned value, which should guarantee the input
# always looks like an integer.
self.__matrixBRowSize.setText(str(rowNum))
# Validate the provided column. If it's invalid, return
colNum = self.__validateSize(self.__matrixBColSize, 'B', 'Col')
if colNum is None: return
# Set the text to the returned value, which should guarantee the input
# always looks like an integer.
self.__matrixBColSize.setText(str(colNum))
# -- Update matrix size ------------------------------------------------
self.__matrixBInputTable.setRowCount(0)
self.__matrixBInputTable.setRowCount(rowNum)
self.__matrixBInputTable.setColumnCount(0)
self.__matrixBInputTable.setColumnCount(colNum)
for row in range(rowNum):
for col in range(colNum):
self.__matrixBInputTable.setItem(row, col, QTableWidgetItem(''))
def __matrixBRandGenClicked(self):
"""
Callback for when the generate button is clicked to generate a random
matrix for matrix A. After some basic error checking, this just generates
a random matrix, based on the inputs provided by the user (such as whether
to generate decimals or integers, and what range to use.
If an error is found, such as an invalid range value input by the user,
a messagebox will be displayed with information about the issue and
the function will return.
"""
# -- Perform Error Checking --------------------------------------------
# Validate the minimum range value
minRangeLimit = self.__validateRange(
self.__matrixBMinRandRange, 'B', 'Min', self.__matrixBRandButtonGroup.checkedId() == 1
)
if minRangeLimit is None: return
# Set the text to the returned value.
self.__matrixBMinRandRange.setText(str(minRangeLimit))
# Validate the maximum range value
maxRangeLimit = self.__validateRange(
self.__matrixBMaxRandRange, 'B', 'Max', self.__matrixBRandButtonGroup.checkedId() == 1
)
if maxRangeLimit is None: return
# Set the text to the returned value.
self.__matrixBMaxRandRange.setText(str(maxRangeLimit))
# -- Populate the matrix with random values ----------------------------
# Get the matrix size
rowNum = self.__matrixBInputTable.rowCount()
colNum = self.__matrixBInputTable.columnCount()
# Generate the matrix
if self.__matrixBRandButtonGroup.checkedId() == 0: # Decimal
matrix = (np.random.rand(rowNum, colNum) * (maxRangeLimit - minRangeLimit)) + minRangeLimit
else: # Integer
matrix = np.random.randint(minRangeLimit, maxRangeLimit, size = (rowNum, colNum))
# Finally, populate the table with the generated matrix
self.__setMatrix(self.__matrixBInputTable, matrix)
#===========================================================================
# Utilities
#===========================================================================
def __setMatrix(self, table, matrix):
"""
Set the QTableWidget cells with the content from a numpy matrix. Note
that the table and matrix should have the same dimensions.
Input:
table: A QTableWidget object to set the cell values of.
matrix: A numpy array which has values to store in the table.
"""
# No error checking is performed here to confirm that the table and matrix
# have the correct size. Since this is an internal function, it is assumed
# the calling functions are already making sure this isn't an issue.
# In addition, if an issue were found, there'd be no easy way to handle it
# as it wouldn't be the user's fault.
for row in range(np.shape(matrix)[0]):
for col in range(np.shape(matrix)[1]):
# Get the item at the current row/column of the table and set the
# text to the value in the matrix.
item = table.item(row, col)
item.setText(str(matrix[row,col]))
def __getMatrix(self, table, matrixName):
"""
Extract a numpy array from a QTableWidget. The output array will have the
same size as the table. If the table does not have a valid value in it, a
messagebox will be shown to the user with information about the problem and
the method will return early with None.
Input:
table: The QTableWidget object to pull data from for constructing the
numpy array.
matrixName: A string, either 'A' or 'B'. Used to populate the error
message displayed to the user in the event of an issue.
Output:
Returns a numpy array of the same dimensions as the table and with values
from the table. The values are set as floats by default. If the table
has invalid entries (either because it's empty or not a float), None
will be returned.
"""
# Extract the row and column number of the table
rowNum = table.rowCount()
colNum = table.columnCount()
# Create a matrix to return, initially all zeros. Make it all floating type.
result = np.zeros((rowNum, colNum), dtype = np.float)
for row in range(rowNum):
for col in range(colNum):
value = table.item(row, col).text()
# Verify the value is not an empty string
if not value:
row += 1
col += 1
QMessageBox.critical(self, 'Invalid Matrix Entry', f'Value for cell ({row}, {col}) of matrix {matrixName} is not provided.')
return None
# Verify the input is a valid number
try:
num = float(value)
except:
row += 1
col += 1
QMessageBox.critical(self, 'Invalid Matrix Entry', f'Value of {value} for cell ({row}, {col}) of matrix {matrixName} is not a valid number.')
return None
# If no issues, store the number in the matrix
result[row,col] = num
return result
def __validateSize(self, lineEdit, matrix, direction):
"""
Utility function for verifying the size provided by the user in a text box
Input:
lineEdit: The QLineEdit object that has data in it about the size to
extract.
matrix: A string, either 'A' or 'B'. Used to populate the error
message displayed to the user in the event of an issue.
direction: A string, either 'Row' or 'Column'. Used to populate the
error message displayed to the user in the event of an issue.
Output:
Returns the size pulled from the QLineEdit widget as an integer. If
an error is found (e.g., nothing was provided or the input was not
an int), then None is returned and a messagebox is presented to the
user with information about the nature of the issue.
"""
# Pull out the size from the line edit field
sizeNum = lineEdit.text()
# Verify the size is not an empty string
if not sizeNum:
QMessageBox.critical(self, f'Invalid {direction} Size', f'{direction} size for matrix {matrix} is not provided.')
return None
# Verify the input is a valid number
try:
sizeNumFloat = float(sizeNum)
sizeNumInt = int(sizeNumFloat)
except:
QMessageBox.critical(self, f'Invalid {direction} Size', f'{direction} size of {sizeNum} for matrix {matrix} is not a valid number.')
return None
# Make sure row input is an integer
if sizeNumFloat != sizeNumInt:
QMessageBox.critical(self, f'Invalid {direction} Size', f'{direction} size of {sizeNum} for matrix {matrix} is not a integer.')
return None
# Make sure row input is in valid range
if sizeNumInt < 1 or 10 < sizeNumInt:
QMessageBox.critical(self, f'Invalid {direction} Size', f'{direction} size of {sizeNum} for matrix {matrix} is outside valid range of [1,10].')
return None
return sizeNumInt
def __validateRange(self, lineEdit, matrix, end, isInt):
"""
Utility function for verifying the range provided by the user in a text box
Input:
lineEdit: The QLineEdit object that has data in it about the range to
extract.
matrix: A string, either 'A' or 'B'. Used to populate the error
message displayed to the user in the event of an issue.
end: A string, either 'Row' or 'Column'. Used to populate the
error message displayed to the user in the event of an issue.
isInt: A boolean indicating if the output is supposed to be an integer
or a decimal.
Output:
Returns the range pulled from the QLineEdit widget as an integer, or
float. If an error is found (e.g., nothing was provided or the input
was not an int as requested), then None is returned and a messagebox
is presented to the user with information about the nature of the issue.
"""
# Pull out the range from the line edit field
rangeLimit = lineEdit.text()
# Verify the limit is not an empty string
if not rangeLimit:
QMessageBox.critical(self, f'Invalid {end} Range', f'{end} range limit for matrix {matrix} is not provided.')
return None
try:
rangeLimitFloat = float(rangeLimit)
rangeLimitInt = int(rangeLimitFloat)
except:
QMessageBox.critical(self, 'Invalid {end} Range', f'{end} range limit of {rangeLimit} for matrix {matrix} is not a valid number.')
return None
# Make sure the range value is an integer, if it's supposed to be
if isInt and rangeLimitInt != rangeLimitFloat:
QMessageBox.critical(self, 'Invalid {end} Range', f'{end} range limit of {rangeLimit} for matrix {matrix} is not an integer, but integer was selected.')
return None
return rangeLimitInt if isInt else rangeLimitFloat
|
nilq/baby-python
|
python
|
"""
Uses docspec to parse docstrings to markdown.
Intended for use with static site generators where further linting / linking / styling is done downstream.
Loosely based on Numpy-style docstrings.
Automatically infers types from signature typehints. Explicitly documented types are NOT supported in docstrings.
"""
import logging
import docspec
import docspec_python
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
line_break_chars = ['-', '_', '!', '|', '>', ':']
def _is_property(mem):
if mem.decorations is not None:
for dec in mem.decorations:
if dec.name == 'property':
return True
return False
def _is_setter(mem):
if mem.decorations is not None:
for dec in mem.decorations:
if 'setter' in dec.name:
return True
return False
def extract_text_block(splits_enum, splits, indented_block=False, is_hint_block=False):
"""
Parses a block of text and decides whether or not to wrap.
Return if iters finish or on end of indentation (optional) or on start of new heading
"""
block = []
while True:
# feed
lookahead_idx, next_line = next(splits_enum)
# return if indented block and next is not indented (but don't get tripped up with empty lines)
if indented_block and not next_line.startswith(' ') and not next_line.strip() == '':
return lookahead_idx, next_line, '\n'.join(block)
# return if the next next-line would be a new heading
elif lookahead_idx < len(splits) and splits[lookahead_idx].startswith('---'):
return lookahead_idx, next_line, '\n'.join(block)
# return if inside a hint block and the end of the hint block has been encountered
elif is_hint_block and next_line.strip().startswith(':::'):
return lookahead_idx, next_line, '\n'.join(block)
# be careful with stripping content for lines with intentional breaks, e.g. indented bullets...
# if parsing indented blocks, strip the first four spaces
if indented_block:
next_line = next_line[4:]
# code blocks
if next_line.strip().startswith('```'):
code_block = next_line.strip() + '\n'
while True:
lookahead_idx, next_line = next(splits_enum)
if indented_block:
next_line = next_line[4:]
code_block += next_line + '\n'
if next_line.startswith('```'):
break
block.append(code_block)
# tip blocks
elif next_line.strip().startswith(':::'):
hint_in = '\n' + next_line.strip() + '\n\n'
# unpacks hint block
lookahead_idx, next_line, hint_block = extract_text_block(splits_enum,
splits,
indented_block=indented_block,
is_hint_block=True)
# next line will be closing characters, i.e. ':::', insert manually to add newline
block.append(hint_in + hint_block + '\n:::')
# if no block content exists yet
elif not len(block):
block.append(next_line)
# keep blank lines
elif next_line.strip() == '':
block.append('')
# don't wrap if the previous line is blank
elif block[-1] == '':
block.append(next_line)
# don't wrap if the line starts with a bullet point, picture, or table character
elif next_line.strip()[0] in line_break_chars:
block.append(next_line)
# or if the previous line ends with a bullet point, picture, or table character
elif block[-1].strip()[-1] in line_break_chars:
block.append(next_line)
# otherwise wrap
else:
# should be safe to strip text when wrapping
block[-1] += ' ' + next_line.strip()
# return if iters exhausted
if lookahead_idx == len(splits):
return lookahead_idx, next_line, '\n'.join(block)
def process_member(member, lines, config, class_name=None):
# this method only processes functions and classes
if not isinstance(member, (docspec.Function, docspec.Class)):
return
# don't process private members
if (member.name.startswith('_') and not member.name == '__init__') or _is_setter(member):
return
# keep track of the arguments and their types for automatically building function parameters later-on
arg_types_map = {}
# escape underscores in class / method / function names
member_name = member.name.replace('_', '\_')
if class_name is not None:
class_name_esc = class_name.replace('_', '\_')
# if a class definition use the class template
if isinstance(member, docspec.Class):
# when the class is passed-in directly its name is captured in the member_name
lines.append(config['class_name_template'].format(class_name=class_name_esc))
# if the class __init__, then display the class name and .__init__
elif class_name and member.name == '__init__':
lines.append(config['function_name_template'].format(function_name=f'{class_name_esc}'))
# if a class property
elif class_name is not None and _is_property(member):
lines.append(config['class_property_template'].format(prop_name=f'{class_name_esc}.{member_name}'))
# if a class method
elif class_name is not None:
lines.append(config['function_name_template'].format(function_name=f'{class_name_esc}.{member_name}'))
# otherwise a function
else:
lines.append(config['function_name_template'].format(function_name=member_name))
# process the member's signature if a method or a function - classes won't have args
if hasattr(member, 'args') and not _is_property(member):
# prepare the signature string - use member.name instead of escaped versions
if class_name is not None and member.name == '__init__':
signature = f'{class_name}('
elif class_name is not None:
signature = f'{class_name}.{member.name}('
else:
signature = f'{member.name}('
# the spacer is used for lining up wrapped lines
spacer = len(signature)
# unpack the arguments and add
for idx, arg in enumerate(member.args):
# ignore self parameter
if arg.name == 'self':
continue
# param name
param_name = arg.name
# add to the arg_types_map map using the function / method name and param name
arg_types_map[param_name] = arg.datatype
# if the argument type is KeywordRemainder then add the symbols
if arg.type.name == 'KeywordRemainder':
param_name = '**' + param_name
# first argument is wedged against bracket
# except for classes where self parameters are ignored and second argument is wedged
if idx == 0 or class_name is not None and idx == 1:
signature += param_name
# other arguments start on a new line
else:
signature += f'{" " * spacer}{param_name}'
# add default values where present
if arg.default_value is not None:
signature += f'={arg.default_value}'
# if not the last argument, add a comma
if idx != len(member.args) - 1:
signature += ',\n'
# close the signature
signature += ')'
# add the return type if present
if member.return_type is not None:
signature += f'\n{" " * spacer}-> {member.return_type}'
# set into the template
signature = config['signature_template'].format(signature=signature)
lines.append(signature)
# process the docstring
if member.docstring is not None:
# split the docstring at new lines
splits = member.docstring.split('\n')
# iter the docstring with a lookahead index
splits_enum = enumerate(splits, start=1)
try:
# skip and go straight to headings if no introductory text
if len(splits) > 1 and splits[1].startswith('---'):
lookahead_idx, next_line = next(splits_enum)
# otherwise, look for introductory text
else:
lookahead_idx, next_line, text_block = extract_text_block(splits_enum, splits)
if len(text_block):
lines.append(text_block)
# look for headings
while lookahead_idx < len(splits):
# break if not a heading
if not splits[lookahead_idx].startswith('---'):
raise ValueError('Parser out of lockstep with headings.')
heading = next_line.strip()
lines.append(config['heading_template'].format(heading=heading))
# skip the underscore line
next(splits_enum)
# if not param-type headings - just extract the text blocks
if heading not in ['Parameters', 'Returns', 'Yields', 'Raises']:
lookahead_idx, next_line, text_block = extract_text_block(splits_enum, splits)
if len(text_block):
lines.append(text_block)
# otherwise iterate the parameters and their indented arguments
else:
# initial prime to move from heading to parameter name
lookahead_idx, next_line = next(splits_enum)
# Iterate nested parameters
while True:
# this parser doesn't process typehints, use typehints in function declarations instead
if ' ' in next_line.strip() or ':' in next_line.strip():
raise ValueError('Parser does not support types in docstrings. Use type-hints instead.')
# extract the parameter name
param_name = next_line.strip()
# process the indented parameter description
lookahead_idx, next_line, param_description = extract_text_block(splits_enum,
splits,
indented_block=True)
# only include type information for Parameters
if heading == 'Parameters':
param_type = arg_types_map[param_name]
param = config['param_template'].format(name=param_name,
type=param_type,
description=param_description)
else:
param = config['return_template'].format(name=param_name,
description=param_description)
lines.append(param)
# break if a new heading found
if lookahead_idx == len(splits) or splits[lookahead_idx].startswith('---'):
break
# catch exhausted enum
except StopIteration:
pass
def parse(module_name: str,
module: docspec_python.Module,
config: dict):
lines = []
# frontmatter
if config['frontmatter_template'] is not None:
lines.append(config['frontmatter_template'])
# module name
lines.append(config['module_name_template'].format(module_name=module_name).replace('_', '\_'))
# module docstring
if module.docstring is not None:
lines.append(module.docstring.strip().replace('\n', ' '))
if config['toc_template'] is not None:
lines.append(config['toc_template'])
# iterate the module's members
for member in module.members:
# ignores module-level variables
if isinstance(member, docspec.Data):
continue
# process functions
elif isinstance(member, docspec.Function):
process_member(member, lines, config)
# process classes and nested methods
elif isinstance(member, docspec.Class):
class_name = member.name
process_member(member, lines, config, class_name)
for nested_member in member.members:
process_member(nested_member, lines, config, class_name)
return lines
|
nilq/baby-python
|
python
|
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from django.urls import reverse_lazy
from status import models, forms
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def add_comment(request,pk):
post = get_object_or_404(models.Post, pk=pk)
if request.method == 'POST':
form = forms.CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.save()
return redirect('post_detail', pk=post.pk)
else:
form = forms.CommentForm()
return render(request, 'status/comment_form.html', {'form':form})
@login_required
def comment_approval(request,pk):
comment = get_object_or_404(models.Comment,pk=pk)
comment.approve()
return redirect('post_detail', pk=comment.post.pk)
@login_required
def comment_remove(request,pk):
comment = get_object_or_404(models.Comment,pk=pk)
post_pk = comment.post.pk
comment.delete()
return redirect('post_detail', pk=post_pk)
|
nilq/baby-python
|
python
|
def countingSort(arr):
counter = [0]*100 # as per the constraint that arr[i] < 100
for num in arr:
counter[num] += 1
sorted = []
for num, cnt in enumerate(counter):
sorted += [num]*cnt
return sorted
|
nilq/baby-python
|
python
|
#
# relu paddle model generator
#
import numpy as np
from save_model import saveModel
import sys
def relu(name: str, x):
import paddle as pdpd
pdpd.enable_static()
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
out = pdpd.nn.functional.relu(node_x)
cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(pdpd.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out],
inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
data = np.array([-2, 0, 1]).astype('float32')
relu("relu", data)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import os
import os.path
from dataclasses import dataclass
from os import path
import sys
from typing import List
import requests
from bs4 import BeautifulSoup
import re
import win32console # needs pywin32
import time
_stdin = win32console.GetStdHandle(win32console.STD_INPUT_HANDLE)
def input_def(prompt, default=''):
keys = []
for c in str(default):
evt = win32console.PyINPUT_RECORDType(win32console.KEY_EVENT)
evt.Char = c
evt.RepeatCount = 1
evt.KeyDown = True
keys.append(evt)
_stdin.WriteConsoleInput(keys)
return input(prompt)
@dataclass
class Scene:
title: str
performers: List[str]
number: int
@dataclass
class Movie:
title: str
year: str
date: str
scenes: List[Scene]
def get_scene_performers(div):
try:
return list(map(lambda performer: performer.text, div.parent.find_all('a')))
except AttributeError:
return []
def get_movie_data(url):
r = requests.get(url)
soup = BeautifulSoup(r.text, features="html.parser")
title = soup.find('h1').text.strip().split("\n")[0].strip()
try:
year = soup.find('div', {'class': 'item-info'}).find('small').text
except AttributeError:
year = ""
studio = soup.find('div', {'class': 'item-info'}).find('a', {'label': 'Studio'}).text
date = "" # todo
scene_rows = list(map(lambda row: row.parent, soup.find_all('div', {'class': 'col-sm-6 m-b-1'})))
scenes = []
for i, row in enumerate(scene_rows):
scene_title = row.find('a', {'label': 'Scene Title'}).text.strip()
scene_performers = get_scene_performers(row.find(text=re.compile(r'Starring:')))
scenes.append(Scene(scene_title, scene_performers, i + 1))
#print(title)
# print(year)
#print(studio)
#print(scenes)
return Movie(title, year, date, scenes)
def determine_files(folder):
return list(filter(lambda element: path.isfile(path.join(folder, element)), os.listdir(folder)))
def handle_file(folder, index, file, data):
index = ask_index(index, file)
if index >= 0:
new_filename = get_new_filename(index, file, data)
rename(folder, file, new_filename)
def ask_index(index, filename):
#input = input_def('ENTER SCENE NUMBER FOR "' + filename + '"\nIndex: ', index + 1)
input = input_def('ENTER SCENE NUMBER FOR "' + filename + ' (0 to skip)"\nIndex: ', "")
time.sleep(0.5)
return int(input) - 1
def get_new_filename(index, file, data):
scene = data.scenes[index]
filename = '{title}{year} - Scene {index:02d}{performer}{scene_title}{ext}'.format(
title=data.title,
year=build_year_string(data.year),
index=scene.number,
performer=build_performer_string(scene.performers),
scene_title=' - ' + scene.title,
ext=path.splitext(file)[-1]
)
filename = re.sub(r'[<>/\\|?*]', '', filename)
filename = re.sub(r'"', '\'', filename)
filename = re.sub(r':', ' -', filename)
return filename
def build_year_string(year):
if year == "":
return ""
else:
return f" {year}"
def build_performer_string(performers):
if len(performers) > 2:
return ' - ' + ', '.join(performers[:-1]) + ' & ' + str(performers[-1])
elif len(performers) == 2:
return ' - ' + ' & '.join(performers)
elif len(performers) == 1:
return ' - ' + performers[0]
else:
return ''
def rename(folder, file, new_filename):
os.rename(path.join(folder, file), path.join(folder, new_filename))
print(f'Rename {file} to {new_filename}')
if __name__ == '__main__':
folder = sys.argv[1]
#movie_url = sys.argv[2]
files = determine_files(folder)
files.sort()
for index, file in enumerate(files):
print()
movie_url = input(f'URL for {file}: ')
data = get_movie_data(movie_url)
handle_file(folder, index, file, data)
|
nilq/baby-python
|
python
|
import numpy as np
def get_box_from_point(x,y,kernel,pad,stride):
kernel_x = kernel[0]
kernel_y = kernel[1]
pad_x = pad[0]
pad_y = pad[1]
stride_x = stride[0]
stride_y = stride[1]
x_min = (x - 1) * stride_x + 1 - pad_x
x_max = (x - 1) * stride_x - pad_x + kernel_x
y_min = (y - 1) * stride_y + 1 - pad_y
y_max = (y - 1) * stride_y - pad_y + kernel_y
return x_min, y_min, x_max, y_max
def get_convd_size(W, H, kernel, pad, stride):
kernel_x = kernel[0]
kernel_y = kernel[1]
pad_x = pad[0]
pad_y = pad[1]
stride_x = stride[0]
stride_y = stride[1]
H_res = int((H + 2 * pad_y - kernel_y)/stride_y) + 1
W_res = int((W + 2 * pad_x - kernel_x)/stride_x) + 1
return W_res, H_res
def get_original_size(W, H, kernel, pad, stride):
kernel_x = kernel[0]
kernel_y = kernel[1]
pad_x = pad[0]
pad_y = pad[1]
stride_x = stride[0]
stride_y = stride[1]
H_res = (H - 1) * stride_y + kernel_y - 2 * pad_y
W_res = (W - 1) * stride_x + kernel_x - 2 * pad_x
return W_res, H_res
def single_map_value(value_rec, kernel, pad, stride):
W = value_rec.shape[0]
H = value_rec.shape[1]
W_ori, H_ori = get_original_size(W, H, kernel, pad, stride)
res_rec = np.full([W_ori, H_ori],0.)
for i in range(W):
for j in range(H):
tmp_v = value_rec[i, j]
x_min, y_min, x_max, y_max = get_box_from_point(i, j, kernel, pad, stride)
give_v = (tmp_v+0.)/((x_max + 1 - x_min)*(y_max + 1 - y_min))
for p in range(x_min, x_max+1):
for q in range(y_min, y_max+1):
if p >= 0 and p < W_ori and q >=0 and q < H_ori:
res_rec[p, q] += give_v
return res_rec
def multiple_map_value(value_rec, params_list):
tmp_res = value_rec
for params in params_list:
kernel = params[0]
pad = params[1]
stride = params[2]
tmp_res = single_map_value(tmp_res, kernel, pad, stride)
return tmp_res
# tst_area = np.full([40,40],0.)
# tst_area[20,20] = 1.
# res_area = single_map_value(value_rec=tst_area, kernel=[5, 5], pad=[2,2], stride=[1,1])
# for i in range(res_area.shape[0]):
# for j in range(res_area.shape[1]):
# print i,j,res_area[i, j]
|
nilq/baby-python
|
python
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.OrderDetailsView.as_view()),
path("<uuid:order_id>", views.OrderDetailsView.as_view()),
path("status/<str:order_status_value>", views.OrderStatusView.as_view()),
path("incoming", views.IncomingOrders.as_view()),
path("statuses", views.OrderStatusesList.as_view()),
path("current", views.OrderCurrent.as_view()),
path("list", views.OrderList.as_view()),
path("complaint", views.ComplaintView.as_view()),
]
|
nilq/baby-python
|
python
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
from typing import Callable, Union, List, Tuple
import numpy as np
import cv2
import scipy
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlehub.module.module import moduleinfo
import paddlehub.vision.segmentation_transforms as T
from paddlehub.module.module import moduleinfo, runnable, serving
from modnet_mobilenetv2_matting.mobilenetv2 import MobileNetV2
import modnet_mobilenetv2_matting.processor as P
@moduleinfo(
name="modnet_mobilenetv2_matting",
type="CV",
author="paddlepaddle",
summary="modnet_mobilenetv2_matting is a matting model",
version="1.0.0"
)
class MODNetMobilenetV2(nn.Layer):
"""
The MODNet implementation based on PaddlePaddle.
The original article refers to
Zhanghan Ke, et, al. "Is a Green Screen Really Necessary for Real-Time Portrait Matting?"
(https://arxiv.org/pdf/2011.11961.pdf).
Args:
hr_channels(int, optional): The channels of high resolutions branch. Defautl: None.
pretrained(str, optional): The path of pretrianed model. Defautl: None.
"""
def __init__(self, hr_channels:int = 32, pretrained=None):
super(MODNetMobilenetV2, self).__init__()
self.backbone = MobileNetV2()
self.pretrained = pretrained
self.head = MODNetHead(
hr_channels=hr_channels, backbone_channels=self.backbone.feat_channels)
self.blurer = GaussianBlurLayer(1, 3)
self.transforms = P.Compose([P.LoadImages(), P.ResizeByShort(), P.ResizeToIntMult(), P.Normalize()])
if pretrained is not None:
model_dict = paddle.load(pretrained)
self.set_dict(model_dict)
print("load custom parameters success")
else:
checkpoint = os.path.join(self.directory, 'modnet-mobilenetv2.pdparams')
model_dict = paddle.load(checkpoint)
self.set_dict(model_dict)
print("load pretrained parameters success")
def preprocess(self, img: Union[str, np.ndarray] , transforms: Callable, trimap: Union[str, np.ndarray] = None):
data = {}
data['img'] = img
if trimap is not None:
data['trimap'] = trimap
data['gt_fields'] = ['trimap']
data['trans_info'] = []
data = self.transforms(data)
data['img'] = paddle.to_tensor(data['img'])
data['img'] = data['img'].unsqueeze(0)
if trimap is not None:
data['trimap'] = paddle.to_tensor(data['trimap'])
data['trimap'] = data['trimap'].unsqueeze((0, 1))
return data
def forward(self, inputs: dict):
x = inputs['img']
feat_list = self.backbone(x)
y = self.head(inputs=inputs, feat_list=feat_list)
return y
def predict(self, image_list: list, trimap_list: list = None, visualization: bool =False, save_path: str = "modnet_mobilenetv2_matting_output"):
self.eval()
result = []
with paddle.no_grad():
for i, im_path in enumerate(image_list):
trimap = trimap_list[i] if trimap_list is not None else None
data = self.preprocess(img=im_path, transforms=self.transforms, trimap=trimap)
alpha_pred = self.forward(data)
alpha_pred = P.reverse_transform(alpha_pred, data['trans_info'])
alpha_pred = (alpha_pred.numpy()).squeeze()
alpha_pred = (alpha_pred * 255).astype('uint8')
alpha_pred = P.save_alpha_pred(alpha_pred, trimap)
result.append(alpha_pred)
if visualization:
if not os.path.exists(save_path):
os.makedirs(save_path)
img_name = str(time.time()) + '.png'
image_save_path = os.path.join(save_path, img_name)
cv2.imwrite(image_save_path, alpha_pred)
return result
@serving
def serving_method(self, images: list, trimaps:list = None, **kwargs):
"""
Run as a service.
"""
images_decode = [P.base64_to_cv2(image) for image in images]
if trimaps is not None:
trimap_decoder = [cv2.cvtColor(P.base64_to_cv2(trimap), cv2.COLOR_BGR2GRAY) for trimap in trimaps]
else:
trimap_decoder = None
outputs = self.predict(image_list=images_decode, trimap_list= trimap_decoder, **kwargs)
serving_data = [P.cv2_to_base64(outputs[i]) for i in range(len(outputs))]
results = {'data': serving_data}
return results
@runnable
def run_cmd(self, argvs: list):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options", description="Run configuration for controlling module behavior, not required.")
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
if args.trimap_path is not None:
trimap_list = [args.trimap_path]
else:
trimap_list = None
results = self.predict(image_list=[args.input_path], trimap_list=trimap_list, save_path=args.output_dir, visualization=args.visualization)
return results
def add_module_config_arg(self):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--output_dir', type=str, default="modnet_mobilenetv2_matting_output", help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization', type=bool, default=True, help="whether to save output as images.")
def add_module_input_arg(self):
"""
Add the command input options.
"""
self.arg_input_group.add_argument('--input_path', type=str, help="path to image.")
self.arg_input_group.add_argument('--trimap_path', type=str, default=None, help="path to image.")
class MODNetHead(nn.Layer):
"""
Segmentation head.
"""
def __init__(self, hr_channels: int, backbone_channels: int):
super().__init__()
self.lr_branch = LRBranch(backbone_channels)
self.hr_branch = HRBranch(hr_channels, backbone_channels)
self.f_branch = FusionBranch(hr_channels, backbone_channels)
def forward(self, inputs: paddle.Tensor, feat_list: list):
pred_semantic, lr8x, [enc2x, enc4x] = self.lr_branch(feat_list)
pred_detail, hr2x = self.hr_branch(inputs['img'], enc2x, enc4x, lr8x)
pred_matte = self.f_branch(inputs['img'], lr8x, hr2x)
if self.training:
logit_dict = {
'semantic': pred_semantic,
'detail': pred_detail,
'matte': pred_matte
}
return logit_dict
else:
return pred_matte
class FusionBranch(nn.Layer):
def __init__(self, hr_channels: int, enc_channels: int):
super().__init__()
self.conv_lr4x = Conv2dIBNormRelu(
enc_channels[2], hr_channels, 5, stride=1, padding=2)
self.conv_f2x = Conv2dIBNormRelu(
2 * hr_channels, hr_channels, 3, stride=1, padding=1)
self.conv_f = nn.Sequential(
Conv2dIBNormRelu(
hr_channels + 3, int(hr_channels / 2), 3, stride=1, padding=1),
Conv2dIBNormRelu(
int(hr_channels / 2),
1,
1,
stride=1,
padding=0,
with_ibn=False,
with_relu=False))
def forward(self, img: paddle.Tensor, lr8x: paddle.Tensor, hr2x: paddle.Tensor):
lr4x = F.interpolate(
lr8x, scale_factor=2, mode='bilinear', align_corners=False)
lr4x = self.conv_lr4x(lr4x)
lr2x = F.interpolate(
lr4x, scale_factor=2, mode='bilinear', align_corners=False)
f2x = self.conv_f2x(paddle.concat((lr2x, hr2x), axis=1))
f = F.interpolate(
f2x, scale_factor=2, mode='bilinear', align_corners=False)
f = self.conv_f(paddle.concat((f, img), axis=1))
pred_matte = F.sigmoid(f)
return pred_matte
class HRBranch(nn.Layer):
"""
High Resolution Branch of MODNet
"""
def __init__(self, hr_channels: int, enc_channels:int):
super().__init__()
self.tohr_enc2x = Conv2dIBNormRelu(
enc_channels[0], hr_channels, 1, stride=1, padding=0)
self.conv_enc2x = Conv2dIBNormRelu(
hr_channels + 3, hr_channels, 3, stride=2, padding=1)
self.tohr_enc4x = Conv2dIBNormRelu(
enc_channels[1], hr_channels, 1, stride=1, padding=0)
self.conv_enc4x = Conv2dIBNormRelu(
2 * hr_channels, 2 * hr_channels, 3, stride=1, padding=1)
self.conv_hr4x = nn.Sequential(
Conv2dIBNormRelu(
2 * hr_channels + enc_channels[2] + 3,
2 * hr_channels,
3,
stride=1,
padding=1),
Conv2dIBNormRelu(
2 * hr_channels, 2 * hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(
2 * hr_channels, hr_channels, 3, stride=1, padding=1))
self.conv_hr2x = nn.Sequential(
Conv2dIBNormRelu(
2 * hr_channels, 2 * hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(
2 * hr_channels, hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride=1, padding=1))
self.conv_hr = nn.Sequential(
Conv2dIBNormRelu(
hr_channels + 3, hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(
hr_channels,
1,
1,
stride=1,
padding=0,
with_ibn=False,
with_relu=False))
def forward(self, img: paddle.Tensor, enc2x: paddle.Tensor, enc4x: paddle.Tensor, lr8x: paddle.Tensor):
img2x = F.interpolate(
img, scale_factor=1 / 2, mode='bilinear', align_corners=False)
img4x = F.interpolate(
img, scale_factor=1 / 4, mode='bilinear', align_corners=False)
enc2x = self.tohr_enc2x(enc2x)
hr4x = self.conv_enc2x(paddle.concat((img2x, enc2x), axis=1))
enc4x = self.tohr_enc4x(enc4x)
hr4x = self.conv_enc4x(paddle.concat((hr4x, enc4x), axis=1))
lr4x = F.interpolate(
lr8x, scale_factor=2, mode='bilinear', align_corners=False)
hr4x = self.conv_hr4x(paddle.concat((hr4x, lr4x, img4x), axis=1))
hr2x = F.interpolate(
hr4x, scale_factor=2, mode='bilinear', align_corners=False)
hr2x = self.conv_hr2x(paddle.concat((hr2x, enc2x), axis=1))
pred_detail = None
if self.training:
hr = F.interpolate(
hr2x, scale_factor=2, mode='bilinear', align_corners=False)
hr = self.conv_hr(paddle.concat((hr, img), axis=1))
pred_detail = F.sigmoid(hr)
return pred_detail, hr2x
class LRBranch(nn.Layer):
"""
Low Resolution Branch of MODNet
"""
def __init__(self, backbone_channels: int):
super().__init__()
self.se_block = SEBlock(backbone_channels[4], reduction=4)
self.conv_lr16x = Conv2dIBNormRelu(
backbone_channels[4], backbone_channels[3], 5, stride=1, padding=2)
self.conv_lr8x = Conv2dIBNormRelu(
backbone_channels[3], backbone_channels[2], 5, stride=1, padding=2)
self.conv_lr = Conv2dIBNormRelu(
backbone_channels[2],
1,
3,
stride=2,
padding=1,
with_ibn=False,
with_relu=False)
def forward(self, feat_list: list):
enc2x, enc4x, enc32x = feat_list[0], feat_list[1], feat_list[4]
enc32x = self.se_block(enc32x)
lr16x = F.interpolate(
enc32x, scale_factor=2, mode='bilinear', align_corners=False)
lr16x = self.conv_lr16x(lr16x)
lr8x = F.interpolate(
lr16x, scale_factor=2, mode='bilinear', align_corners=False)
lr8x = self.conv_lr8x(lr8x)
pred_semantic = None
if self.training:
lr = self.conv_lr(lr8x)
pred_semantic = F.sigmoid(lr)
return pred_semantic, lr8x, [enc2x, enc4x]
class IBNorm(nn.Layer):
"""
Combine Instance Norm and Batch Norm into One Layer
"""
def __init__(self, in_channels: int):
super().__init__()
self.bnorm_channels = in_channels // 2
self.inorm_channels = in_channels - self.bnorm_channels
self.bnorm = nn.BatchNorm2D(self.bnorm_channels)
self.inorm = nn.InstanceNorm2D(self.inorm_channels)
def forward(self, x):
bn_x = self.bnorm(x[:, :self.bnorm_channels, :, :])
in_x = self.inorm(x[:, self.bnorm_channels:, :, :])
return paddle.concat((bn_x, in_x), 1)
class Conv2dIBNormRelu(nn.Layer):
"""
Convolution + IBNorm + Relu
"""
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation:int = 1,
groups: int = 1,
bias_attr: paddle.ParamAttr = None,
with_ibn: bool = True,
with_relu: bool = True):
super().__init__()
layers = [
nn.Conv2D(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias_attr=bias_attr)
]
if with_ibn:
layers.append(IBNorm(out_channels))
if with_relu:
layers.append(nn.ReLU())
self.layers = nn.Sequential(*layers)
def forward(self, x: paddle.Tensor):
return self.layers(x)
class SEBlock(nn.Layer):
"""
SE Block Proposed in https://arxiv.org/pdf/1709.01507.pdf
"""
def __init__(self, num_channels: int, reduction:int = 1):
super().__init__()
self.pool = nn.AdaptiveAvgPool2D(1)
self.conv = nn.Sequential(
nn.Conv2D(
num_channels,
int(num_channels // reduction),
1,
bias_attr=False), nn.ReLU(),
nn.Conv2D(
int(num_channels // reduction),
num_channels,
1,
bias_attr=False), nn.Sigmoid())
def forward(self, x: paddle.Tensor):
w = self.pool(x)
w = self.conv(w)
return w * x
class GaussianBlurLayer(nn.Layer):
""" Add Gaussian Blur to a 4D tensors
This layer takes a 4D tensor of {N, C, H, W} as input.
The Gaussian blur will be performed in given channel number (C) splitly.
"""
def __init__(self, channels: int, kernel_size: int):
"""
Args:
channels (int): Channel for input tensor
kernel_size (int): Size of the kernel used in blurring
"""
super(GaussianBlurLayer, self).__init__()
self.channels = channels
self.kernel_size = kernel_size
assert self.kernel_size % 2 != 0
self.op = nn.Sequential(
nn.Pad2D(int(self.kernel_size / 2), mode='reflect'),
nn.Conv2D(
channels,
channels,
self.kernel_size,
stride=1,
padding=0,
bias_attr=False,
groups=channels))
self._init_kernel()
self.op[1].weight.stop_gradient = True
def forward(self, x: paddle.Tensor):
"""
Args:
x (paddle.Tensor): input 4D tensor
Returns:
paddle.Tensor: Blurred version of the input
"""
if not len(list(x.shape)) == 4:
print('\'GaussianBlurLayer\' requires a 4D tensor as input\n')
exit()
elif not x.shape[1] == self.channels:
print('In \'GaussianBlurLayer\', the required channel ({0}) is'
'not the same as input ({1})\n'.format(
self.channels, x.shape[1]))
exit()
return self.op(x)
def _init_kernel(self):
sigma = 0.3 * ((self.kernel_size - 1) * 0.5 - 1) + 0.8
n = np.zeros((self.kernel_size, self.kernel_size))
i = int(self.kernel_size / 2)
n[i, i] = 1
kernel = scipy.ndimage.gaussian_filter(n, sigma)
kernel = kernel.astype('float32')
kernel = kernel[np.newaxis, np.newaxis, :, :]
paddle.assign(kernel, self.op[1].weight)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import hungrybotlib as l
from random import choice
import config as cfg
l.cacheMenus(cfg.menu_file, cfg.menu_url, cfg.days)
with open("counter", "r") as f:
count = int(f.read())
text = ["Une nouvelle semaine de bouffe !", "Et voila, je viens de mettre à jour les menus.", "Le RAK vient de me dire ce qu'on mange la semaine prochaine..."]
if count == 1:
l.sayFood("Bonjour ! Je suis fier de me présenter, je suis HungryBot, je vais vous guider tout au long de vos études en vous indiquant ce que vous pourrez manger de bon au RAK de TB !", cfg.channels, cfg.token)
else:
l.sayFood(choice(text)+"\nC'est la "+str(count)+"ème semaine que je suis à votre service.", cfg.channels, cfg.token)
with open("counter", "w") as f:
f.write(str(count+1))
|
nilq/baby-python
|
python
|
"""Output an NML file cataloging a set of audio files.
NML is an XML-based file format used by Traktor. This code generates
NML version 11, which is used by Traktor Pro.
"""
import time
import xml.sax.saxutils
from chirp.common import timestamp
from chirp.common import unicode_util
from chirp.library import artists
from chirp.library import order
_UNKNOWN_ARTIST = "* Artist Not Known *"
_UNKNOWN_ALBUM = "* Album Not Known *"
_UNKNOWN_SONG = "* Title Not Known *"
# The following are templates used to produce NML files.
# Boilerplate that goes at the beginning of every NML file. The one
# format parameter is an integer giving the total number of entries to
# be found in the file.
_NML_PREFIX = u"""<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<NML VERSION="14"><HEAD COMPANY="www.native-instruments.com" PROGRAM="Traktor - Native Instruments"></HEAD>
<MUSICFOLDERS></MUSICFOLDERS>
<COLLECTION ENTRIES="%10d">"""
# A template for producing individual song entries.
_NML_ENTRY = u"""<ENTRY MODIFIED_DATE=%(modified_date)s MODIFIED_TIME=%(modified_time)s TITLE=%(song)s ARTIST=%(artist)s><LOCATION DIR=%(dir)s FILE=%(file)s VOLUME=%(volume)s VOLUME_ID=""></LOCATION>
<ALBUM OF_TRACKS=%(total_num)s TITLE=%(album)s TRACK=%(order_num)s></ALBUM>
<INFO BITRATE=%(bitrate)s GENRE=%(genre)s PLAYTIME=%(duration_s)s IMPORT_DATE=%(import_date)s FILESIZE=%(size_in_kb)s></INFO>
</ENTRY>
"""
# Boilerplate that goes at the end of every NML file.
_NML_SUFFIX = u"""</COLLECTION>
<PLAYLISTS><NODE TYPE="FOLDER" NAME="$ROOT"><SUBNODES COUNT="1">
<NODE TYPE="PLAYLIST" NAME="_RECORDINGS"><PLAYLIST ENTRIES="0" TYPE="LIST"></PLAYLIST>
</NODE>
</SUBNODES>
</NODE>
</PLAYLISTS>
</NML>
"""
def _traktor_path_quote(path):
return path.replace("/", "/:")
class NMLWriter(object):
"""Generates an NML file for a collection of AudioFile objects."""
def __init__(self, file_volume, root_dir, out_fh):
"""Constructor.
Args:
file_volume: The SMB-style file volume containing the files.
This volume will need to be visible to the PC running Traktor
that the NML file will ultimately be used from.
root_dir: The root directory of the library, as seen by the
machine that is running Traktor.
out_fh: The file handle to write to.
"""
self.num_entries = 0
self._file_volume = file_volume
self._file_volume_quoted = _traktor_path_quote(file_volume)
self._root_dir = root_dir
self._out_fh = out_fh
# Make sure we are at the beginning of the file.
self._out_fh.seek(0)
# Write out a prefix for 0 entries.
self._out_fh.write(_NML_PREFIX % 0)
self._all_entries = []
def write(self, au_file):
"""Adds a an audio file to the collection.
Args:
au_file: An AudioFile object to add to the collection.
"""
entry_data = {}
entry_data["order_num"], entry_data["total_num"] = order.decode(
str(au_file.mutagen_id3.get("TRCK")))
if entry_data["total_num"] is None:
entry_data["total_num"] = 100
entry_data["artist"] = unicode_util.simplify(
au_file.mutagen_id3.get("TPE1", _UNKNOWN_ARTIST))
entry_data["album"] = unicode_util.simplify(
au_file.mutagen_id3.get("TALB", _UNKNOWN_ALBUM))
entry_data["song"] = unicode_util.simplify(
au_file.mutagen_id3.get("TIT2", _UNKNOWN_SONG))
# TODO(trow): Set this somehow.
entry_data["genre"] = "Unknown"
entry_data["dir"] = _traktor_path_quote(
au_file.canonical_directory(prefix=self._root_dir))
entry_data["file"] = au_file.canonical_filename()
entry_data["volume"] = self._file_volume_quoted
entry_data["bitrate"] = int(
au_file.mp3_header.bit_rate_kbps * 1000)
entry_data["size_in_kb"] = int(au_file.frame_size / 1024)
entry_data["duration_s"] = int(au_file.duration_ms / 1000)
entry_data["import_date"] = time.strftime(
"%Y/%m/%d", time.gmtime(au_file.import_timestamp))
entry_data["modified_date"] = entry_data["import_date"]
entry_data["modified_time"] = "35364"
order_num = int(entry_data["order_num"])
# Clean up any XML-unsafe characters and wrap each value in
# quotes.
for k, v in entry_data.items():
new_v = xml.sax.saxutils.quoteattr(unicode(v))
if new_v != v:
entry_data[k] = new_v
# TODO(trow): For now, we build a list of all entries so that
# we can fix the ordering --- that is because Traktor
# idiotically chooses to order tracks based on the order they
# appear in the NML file, not based on the track numbering.
entry_key = (au_file.album_id, order_num)
self._all_entries.append((entry_key, entry_data))
# TODO(trow): This is how we should do it!
#self._out_fh.write(_NML_ENTRY % entry_data)
self.num_entries += 1
def close(self):
# TODO(trow): We shouldn't need to build up a big in-memory
# data structure here!
self._all_entries.sort()
for _, entry_data in self._all_entries:
self._out_fh.write(_NML_ENTRY % entry_data)
# Write out the suffix.
self._out_fh.write(_NML_SUFFIX)
# Write out the prefix with the correct number of entries.
self._out_fh.seek(0)
self._out_fh.write(_NML_PREFIX % self.num_entries)
# Note: does not close the underlying file object!
|
nilq/baby-python
|
python
|
input = open('input/input12.txt').readlines()
plants = []
ZERO = 5
BASE_GEN_COUNT = 0
FINAL_GEN_COUNT = 50000000000
will_grow = set()
wont_grow = set()
for line in input:
line = line.strip()
if line.startswith('initial'):
pots = list(line.split(' ')[2])
BASE_GEN_COUNT = len(pots)
plants = ['.' for p in range(len(pots) + BASE_GEN_COUNT + ZERO)]
for pot in range(len(pots)):
plants[ZERO + pot] = pots[pot]
elif line.endswith('#'):
will_grow.add(line.split(' => ')[0])
elif line.endswith('.'):
wont_grow.add(line.split(' => ')[0])
def get_plant_total():
total = 0
for i in range(len(plants)):
if plants[i] == '#': total += (i - ZERO)
return total
# I observed through experimentation that the change delta stayed the
# same after the 100th generation, so it is only necessary to calculate
# up to there. I'm guessing it is 100 because that is the length of the
# initial string. Surely there is an official name for this statistical
# pattern, but I don't know what it is...
plant_count = get_plant_total()
last_delta = 0
for g in range(BASE_GEN_COUNT):
if g == 20: print('Solution 12.1:', plant_count)
new_gen = ['.' for i in range(len(plants))]
for p in range(len(plants) - 5):
segment = ''.join(plants[p:p+5])
if segment in will_grow:
new_gen[p+2] = '#'
elif segment in wont_grow:
new_gen[p+2] = '.'
plants = new_gen
new_plant_count = get_plant_total()
new_delta = new_plant_count - plant_count
if last_delta != new_delta:
# print(g, 'to', g+1, 'delta from', last_delta, 'to', new_delta)
last_delta = new_delta
plant_count = new_plant_count
print('Solution 12.2:', plant_count + ((FINAL_GEN_COUNT - BASE_GEN_COUNT)*last_delta))
|
nilq/baby-python
|
python
|
SOCIAL_AUTH_GITHUB_KEY = '0ec5adf60f9d0db84213'
SOCIAL_AUTH_GITHUB_SECRET = 'c4b6cd88aac6b4515c5b396be2727b21ee54725e'
SOCIAL_AUTH_LOGIN_URL = '/login'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login-error/'
SESSION_COOKIE_DOMAIN = '.localhost.be'
USE_X_FORWARDED_HOST = True
SOCIAL_AUTH_SANITIZE_REDIRECTS = False
DOCKER_API = '1.21'
|
nilq/baby-python
|
python
|
from functools import lru_cache
import os
from time import time
import json
from io import StringIO
import markdown
import jwt
import requests
import github
import datetime
import logging
from time import sleep
from ruamel.yaml import YAML
yaml = YAML()
ZOOM_API = "https://api.zoom.us/v2/"
SPEAKERS_CORNER_USER_ID = "D0n5UNEHQiajWtgdWLlNSA"
VSF_USER_ID = "iJFotmmLRgOHJrTe9MKHRA"
TALKS_FILE = "talks.yml"
MAILGUN_BASE_URL = "https://api.eu.mailgun.net/v3/"
MAILGUN_DOMAIN = "mail.virtualscienceforum.org/"
class CollectExceptions:
def __init__(self):
self.exceptions = []
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
self.exceptions.append([exc_type, exc_value])
return True
def reraise(self):
if not self.exceptions:
return
elif len(self.exceptions) == 1:
raise RuntimeError() from self.exceptions[0][1]
raise RuntimeError([
exc_value
for _, exc_value in self.exceptions
])
def wait_until(minute):
"""Sleep until a specified minute of the hour starts."""
now = datetime.datetime.now(tz=datetime.timezone.utc)
desired = now.replace(minute=minute, second=0, microsecond=0)
if desired < now:
desired += datetime.timedelta(hours=1)
logging.info(f"Sleeping until {desired}")
sleep((desired - now).total_seconds())
def make_zoom_headers(duration: float=100) -> callable:
expiration = time() + duration
def zoom_headers() -> dict:
zoom_api_key = os.getenv("ZOOM_API_KEY")
zoom_api_secret = os.getenv("ZOOM_API_SECRET")
nonlocal expiration
if time() > expiration:
expiration = time() + duration
token = jwt.encode(
# Create a payload of the token containing API Key & expiration time
{"iss": zoom_api_key, "exp": expiration},
zoom_api_secret,
algorithm='HS256'
)
return {'authorization': f'Bearer {token}', 'content-type': 'application/json'}
return zoom_headers
zoom_headers = make_zoom_headers()
def vsf_repo():
gh = github.Github(os.getenv("VSF_BOT_TOKEN"))
return gh.get_repo("virtualscienceforum/virtualscienceforum")
def talks_data(ref="master", repo=None):
if repo is None:
repo = vsf_repo()
# Read the talks file
talks_data = repo.get_contents(TALKS_FILE, ref=ref)
talks = yaml.load(StringIO(talks_data.decoded_content.decode()))
for talk in talks:
# Workaround against issues
# https://sourceforge.net/p/ruamel-yaml/tickets/365/
# https://sourceforge.net/p/ruamel-yaml/tickets/366
# Note that we rely on the current behavior that returns UTC time
talk["time"] = datetime.datetime.fromtimestamp(
talk["time"]
.replace(tzinfo=datetime.timezone.utc)
.timestamp(),
tz=datetime.timezone.utc
)
return talks, talks_data.sha
def zoom_request(method: callable, *args, **kwargs):
"""A minimal wrapper around requests for querying zoom API with error handling"""
response = method(*args, **kwargs, headers=zoom_headers())
if response.status_code > 299:
raise RuntimeError(response.content.decode())
if response.content:
return response.json()
def speakers_corner_user_id() -> str:
users = zoom_request(requests.get, ZOOM_API + "users")["users"]
sc_user_id = next(
u["id"] for u in users
if u["first_name"] == "Speakers'" and u["last_name"] == "Corner"
)
return sc_user_id
def all_meetings(user_id) -> list:
"""Return all meetings by a user.
Handles pagination, and adds ``live: True`` to a meeting that is running (if any).
"""
meetings = []
next_page_token = ""
while True:
meetings_page = zoom_request(
requests.get,
f"{ZOOM_API}users/{user_id}/meetings",
params={"type": "scheduled", "page_size": 300, "next_page_token": next_page_token}
)
meetings += meetings_page["meetings"]
next_page_token = meetings_page["next_page_token"]
if not next_page_token:
break
live_meetings = zoom_request(
requests.get,
f"{ZOOM_API}users/{user_id}/meetings",
params={"type": "live", "page_size": 300}
)["meetings"]
if live_meetings:
for meeting in meetings:
if meeting["id"] == live_meetings[0]["id"]:
meeting["live"] = True
return meetings
def api_query(method, endpoint, **params):
"""A simple wrapper around mailgun API query"""
response = method(
MAILGUN_BASE_URL + endpoint,
auth=("api", os.getenv("MAILGUN_API_KEY")),
**params
)
try:
result = response.json()
except ValueError:
result = response.text
if response.status_code > 299: # Not OK
raise RuntimeError(result)
return result
def markdown_to_email(text: str) -> str:
html = markdown.markdown(text)
return (
'<table cellspacing="0" cellpadding="0" border="0"><tr>'
'<td style="word-break:normal;border-collapse:collapse!important;max-width:600px">'
f'{html}</td></tr></table>'
)
def markdown_to_plain(text: str) -> str:
return text.replace('[', '').replace(']', ' ').replace(' \n', '\n').replace('*', '')
def meeting_registrants(zoom_meeting_id: int) -> dict:
registrants = []
next_page_token = ""
while True:
response = requests.get(
f"https://api.zoom.us/v2/meetings/{zoom_meeting_id}/registrants",
headers=zoom_headers(),
params={"next_page_token": next_page_token}
)
# Registration was not enabled for this meeting
if response.status_code == 400:
return []
response = response.json()
registrants += response["registrants"]
next_page_token = response["next_page_token"]
if not next_page_token:
break
registrants = [
{**i, **{q["title"]: q["value"] for q in i.pop("custom_questions")}}
for i in registrants
]
return registrants
def send_to_participants(
template: str,
subject: str,
talk: dict,
from_email: str,
):
"""
Send an email to meeting participants.
template : jinja2.Template
Email body, variables are keys of ``talk`` (see talks yaml).
subject : str
Email subject, format string expecting as variables keys of ``talk`` (see talks yaml).
talk : dict
Dictionary corresponding to an entry in the talks yaml file.
other_parameters :
Keyword arguments to be passed to format the templates.
"""
message = template.render(**talk)
registrants = meeting_registrants(talk['zoom_meeting_id'])
# Defensively filter out invalid registrants
# See https://github.com/virtualscienceforum/automation/issues/27
registrants = [r for r in registrants if "email" in r and "join_url" in r]
data = {
"from": from_email,
"to": list({
f"{r.get('first_name', '')} {r.get('last_name', '')} <{r['email']}>"
for r in registrants
}),
"subject": subject.format(**talk),
"text": markdown_to_plain(message),
"html": markdown_to_email(message),
"recipient-variables": json.dumps(
{r["email"]: {"join_url": r["join_url"]}
for r in registrants}
),
}
return api_query(
requests.post,
MAILGUN_DOMAIN + "messages",
data=data
)
|
nilq/baby-python
|
python
|
# Clase 24. Curso Píldoras Informáticas.
# Control de Flujo. POO1.
# Clase de Teoría.
# Lenguajes orientados a objetos: C++, Java, VisualNet...
# Atributos/Propiedades: elementos que definen las clases y objetos.
# Ventajas POO:
# Se pueden establecer Módulos.
# Código muy reciclable (esto con Fortran y otros lenguajes no OO no es posible). Herencia.
# Existe tratamiento de excepciones.
# Tiene la ventaja del encapsulamiento.
|
nilq/baby-python
|
python
|
#coding=utf-8
import tensorflow as tf
import wmodule
import basic_tftools as btf
import wml_tfutils as wmlt
from object_detection2.datadef import EncodedData
import tfop
import functools
from object_detection2.datadef import *
import numpy as np
import wnn
import wsummary
from .build import HEAD_OUTPUTS
import object_detection2.wlayers as odl
from object_detection2.modeling.matcher import Matcher
slim = tf.contrib.slim
@HEAD_OUTPUTS.register()
class BoxFreeOutputs(wmodule.WChildModule):
"""
A class that stores information about outputs of a Fast R-CNN head.
"""
def __init__(
self, cfg,parent,box2box_transform, pred_class_logits, pred_proposal_deltas,proposals:EncodedData,
pred_iou_logits=None,
**kwargs
):
"""
Args:
box2box_transform (Box2BoxTransform/Box2BoxTransformRotated):
box2box transform instance for proposal-to-detection transformations.
pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class
logits for all R predicted object instances.
Each row corresponds to a predicted object instance.
pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for
class-specific or class-agnostic regression. It stores the predicted deltas that
transform proposals into final box detections.
B is the box dimension (4 or 5).
When B is 4, each row is [dx, dy, dw, dh (, ....)].
When B is 5, each row is [dx, dy, dw, dh, da (, ....)].
proposals: When training it's EncodedData, when inference, it's ProposalsData
"""
super().__init__(cfg,parent,**kwargs)
self.pred_class_logits = pred_class_logits
if self.is_training:
gt_logits_i = proposals.gt_object_logits
'''
gt_logits_i's shape is [batch_size,box_nr]
'''
self.gt_classes = tf.reshape(gt_logits_i,[-1])
def _log_accuracy(self):
"""
Log the accuracy metrics to EventStorage.
"""
accuracy = wnn.accuracy_ratio(logits=self.pred_class_logits,labels=self.gt_classes)
tf.summary.scalar("fast_rcnn/accuracy",accuracy)
def softmax_cross_entropy_loss(self):
"""
Compute the softmax cross entropy loss for box classification.
Returns:
scalar Tensor
"""
self._log_accuracy()
wsummary.variable_summaries_v2(self.gt_classes,"gt_classes")
wsummary.variable_summaries_v2(self.pred_class_logits,"pred_class_logits")
if self.cfg.MODEL.ROI_HEADS.POS_LABELS_THRESHOLD>1e-3:
with tf.name_scope("modify_gtclasses"):
threshold = self.cfg.MODEL.ROI_HEADS.POS_LABELS_THRESHOLD
scores = tf.reshape(self.proposals[ED_SCORES],[-1])
gt_classes = self.gt_classes
gt_classes = tf.where(tf.greater(scores,threshold),gt_classes,tf.zeros_like(gt_classes))
classes_loss = tf.losses.sparse_softmax_cross_entropy(logits=self.pred_class_logits, labels=gt_classes,
loss_collection=None,
reduction=tf.losses.Reduction.MEAN)
else:
classes_loss = tf.losses.sparse_softmax_cross_entropy(logits=self.pred_class_logits, labels=self.gt_classes,
loss_collection=None,
reduction=tf.losses.Reduction.MEAN)
wsummary.histogram_or_scalar(classes_loss,"fast_rcnn/classes_loss")
return classes_loss*self.cfg.MODEL.ROI_HEADS.BOX_CLS_LOSS_SCALE
def losses(self):
"""
Compute the default losses for box head in Fast(er) R-CNN,
with softmax cross entropy loss and smooth L1 loss.
Returns:
A dict of losses (scalar tensors) containing keys "loss_cls" and "loss_box_reg".
"""
loss = {
"fastrcnn_loss_cls": self.softmax_cross_entropy_loss(),
}
return loss
def inference(self, score_thresh,
proposal_boxes=None,scores=None):
"""
Args:
score_thresh (float): same as fast_rcnn_inference.
nms_thresh (float): same as fast_rcnn_inference.
topk_per_image (int): same as fast_rcnn_inference.
scores:[batch_size,box_nr,num_classes+1]
Returns:
list[Instances]: same as fast_rcnn_inference.
list[Tensor]: same as fast_rcnn_inference.
"""
with tf.name_scope("fast_rcnn_outputs_inference"):
if scores is None:
probability = tf.nn.softmax(self.pred_class_logits)
else:
probability = scores
probability = probability[...,1:] #删除背景
probability,labels = tf.nn.top_k(probability,k=1)
probability = tf.squeeze(probability,axis=-1)
labels = tf.squeeze(labels,axis=-1)+1 #加回删除的背景
size = btf.combined_static_and_dynamic_shape(probability)[0]
res_indices = tf.range(size)
mask = tf.greater(probability,score_thresh)
length = tf.reduce_sum(tf.cast(mask,tf.int32),axis=-1,keepdims=False)
probability = tf.boolean_mask(probability,mask)
boxes = tf.boolean_mask(proposal_boxes,mask)
labels = tf.boolean_mask(labels,mask)
res_indices = tf.boolean_mask(res_indices,mask)
probability, indices= tf.nn.top_k(probability, k=tf.shape(probability)[0])
labels = tf.expand_dims(tf.gather(labels, indices),axis=0)
boxes = tf.expand_dims(tf.gather(boxes, indices),axis=0)
res_indices = tf.expand_dims(tf.gather(res_indices, indices),axis=0)
probability = tf.expand_dims(probability,axis=0)
return {RD_PROBABILITY:probability,RD_BOXES:boxes,RD_LABELS:labels,RD_LENGTH:length,RD_INDICES:res_indices}
|
nilq/baby-python
|
python
|
from sqlalchemy import Integer, Text, DateTime, func, Boolean, text
from models.database_models import Base, Column
class Comment(Base):
__tablename__ = "comment"
id = Column(Integer, primary_key=True, )
user_id = Column(Integer, nullable=False, comment="评论用户的 ID")
post_id = Column(Integer, nullable=False, comment="Post 文章的 ID")
content = Column(Text, nullable=False, comment="用户的评论")
create_time = Column(DateTime, server_default=func.now(), comment="创建时间")
update_time = Column(DateTime, server_default=func.now(), onupdate=func.now(), comment="更新时间")
deleted = Column(Boolean, default=False, server_default=text('0'), nullable=False, comment="该项目是否被删除")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright (C) 2009 Chia-I Wu <olv@0xlab.org>
# All Rights Reserved.
#
# This is based on extension_helper.py by Ian Romanick.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import argparse
import license
import gl_XML
def get_function_spec(func):
sig = ""
# derive parameter signature
for p in func.parameterIterator():
if p.is_padding:
continue
# FIXME: This is a *really* ugly hack. :(
tn = p.type_expr.get_base_type_node()
if p.is_pointer():
sig += 'p'
elif tn.integer:
sig += 'i'
elif tn.size == 4:
sig += 'f'
else:
sig += 'd'
spec = [sig]
for ent in func.entry_points:
spec.append("gl" + ent)
# spec is terminated by an empty string
spec.append('')
return spec
class PrintGlRemap(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "remap_helper.py (from Mesa)"
self.license = license.bsd_license_template % ("Copyright (C) 2009 Chia-I Wu <olv@0xlab.org>", "Chia-I Wu")
return
def printRealHeader(self):
print '#include "main/dispatch.h"'
print '#include "main/remap.h"'
print ''
return
def printBody(self, api):
pool_indices = {}
print '/* this is internal to remap.c */'
print '#ifndef need_MESA_remap_table'
print '#error Only remap.c should include this file!'
print '#endif /* need_MESA_remap_table */'
print ''
print ''
print 'static const char _mesa_function_pool[] ='
# output string pool
index = 0;
for f in api.functionIterateAll():
pool_indices[f] = index
spec = get_function_spec(f)
# a function has either assigned offset, fixed offset,
# or no offset
if f.assign_offset:
comments = "will be remapped"
elif f.offset > 0:
comments = "offset %d" % f.offset
else:
comments = "dynamic"
print ' /* _mesa_function_pool[%d]: %s (%s) */' \
% (index, f.name, comments)
for line in spec:
print ' "%s\\0"' % line
index += len(line) + 1
print ' ;'
print ''
print '/* these functions need to be remapped */'
print 'static const struct gl_function_pool_remap MESA_remap_table_functions[] = {'
# output all functions that need to be remapped
# iterate by offsets so that they are sorted by remap indices
for f in api.functionIterateByOffset():
if not f.assign_offset:
continue
print ' { %5d, %s_remap_index },' \
% (pool_indices[f], f.name)
print ' { -1, -1 }'
print '};'
print ''
return
def _parser():
"""Parse input options and return a namsepace."""
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--filename',
default="gl_API.xml",
metavar="input_file_name",
dest='file_name',
help="An xml description file.")
return parser.parse_args()
def main():
"""Main function."""
args = _parser()
api = gl_XML.parse_GL_API(args.file_name)
printer = PrintGlRemap()
printer.Print(api)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# see LICENSE.rst
# ----------------------------------------------------------------------------
#
# TITLE : Code for the Examples
# AUTHOR : Nathaniel Starkman
# PROJECT : TrackStream
#
# ----------------------------------------------------------------------------
"""Examples Code."""
__author__ = "Nathaniel Starkman"
__copyright__ = "Copyright 2012+"
__license__ = "BSD3"
__maintainer__ = "Nathaniel Starkman"
__all__ = [
"get_transform_matrix",
]
##############################################################################
# IMPORTS
# LOCAL
from .coordinates import get_transform_matrix
##############################################################################
# END
|
nilq/baby-python
|
python
|
def keep_evens(nums):
new_seq = filter(lambda num: num % 2 == 0, nums)
return list(new_seq)
print(keep_evens([3, 4, 6, 7, 0, 1]))
# Saída [4, 6, 0]
'''
1. Write code to assign to the variable filter_testing all the elements in lst_check that have a w in them using filter.
'''
lst_check = ['plums', 'watermelon', 'kiwi', 'strawberries', 'blueberries', 'peaches', 'apples', 'mangos', 'papaya']
filter_testing = filter(lambda word: 'w' in word, lst_check)
print(list(filter_testing))
# Saída ['watermelon', 'kiwi', 'strawberries']
'''
2. Using filter, filter lst so that it only contains words containing the letter “o”. Assign to variable lst2. Do not hardcode this.
'''
lst = ["witch", "halloween", "pumpkin", "cat", "candy", "wagon", "moon"]
lst2 = filter(lambda word: 'o' in word, lst)
print(lst2)
'''
3. Below, we have provided a list of strings called countries. Use filter to produce a list called b_countries that only contains the strings from countries that begin with B.
'''
countries = ['Canada', 'Mexico', 'Brazil', 'Chile', 'Denmark', 'Botswana', 'Spain', 'Britain', 'Portugal', 'Russia', 'Thailand', 'Bangladesh', 'Nigeria', 'Argentina', 'Belarus', 'Laos', 'Australia', 'Panama', 'Egypt', 'Morocco', 'Switzerland', 'Belgium']
b_countries = filter(lambda c: c[0] == 'B', countries)
print(list(b_countries))
# ['Brazil', 'Botswana', 'Britain', 'Bangladesh', 'Belarus', 'Belgium']
|
nilq/baby-python
|
python
|
from django.dispatch import Signal
signup_complete = Signal(providing_args=["user",])
activation_complete = Signal(providing_args=["user",])
confirmation_complete = Signal(providing_args=["user","old_email"])
password_complete = Signal(providing_args=["user",])
order_complete = Signal(providing_args=["user",])
email_change = Signal(providing_args=["user","prev_email","new_email"])
profile_change = Signal(providing_args=["user",])
account_signin = Signal(providing_args=["user",])
account_signout = Signal(providing_args=["user",])
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
from setuptools import setup
setup(
name='lilist',
version='0.1.0',
description='A linear interpolation list class',
url='http://github.com/MatthewScholefield/lilist',
author='Matthew Scholefield',
author_email='matthew331199@gmail.com',
license='MIT',
py_modules=[
'lilist'
]
)
|
nilq/baby-python
|
python
|
from unittest import TestCase
import json
import attr
from marshmallow_helpers import RegisteredEnum, attr_with_schema
def enum_to_schema(enum_cls):
@attr_with_schema(register_as_scheme=True, strict=True)
@attr.s(auto_attribs=True)
class MyEnum:
enum: enum_cls
return MyEnum.schema
def enum_to_field(enum_cls):
return enum_to_schema(enum_cls)._declared_fields['enum']
class MyIntEnum(int, RegisteredEnum):
a = 1
b = 2
c = 3
class MyStrEnum(RegisteredEnum):
a = "A"
b = "B"
c = "C"
class MyTupleEnum(tuple, RegisteredEnum):
a = (1, "a")
b = (2, "b")
c = (3, "c")
class MyByKeyIntEnum(int, RegisteredEnum):
__by_value__ = False
a = 1
b = 2
c = 3
class MyLoadByKeyIntEnum(int, RegisteredEnum):
__load_by_value__ = False
a = 1
b = 2
c = 3
class MyDumpByKeyIntEnum(int, RegisteredEnum):
__dump_by_value__ = False
a = 1
b = 2
c = 3
class EnumTest(TestCase):
def test_enum_metadata(self):
self.assertListEqual(
enum_to_field(MyIntEnum).metadata.get('enum', []),
[1, 2, 3])
self.assertListEqual(
enum_to_field(MyStrEnum).metadata.get('enum', []),
["A", "B", "C"])
self.assertListEqual(
enum_to_field(MyTupleEnum).metadata.get('enum', []),
[(1, "a"), (2, "b"), (3, "c")])
self.assertListEqual(
enum_to_field(MyByKeyIntEnum).metadata.get('enum', []),
["a", "b", "c"])
self.assertListEqual(
enum_to_field(MyLoadByKeyIntEnum).metadata.get('enum', []),
["a", "b", "c"])
self.assertListEqual(
enum_to_field(MyDumpByKeyIntEnum).metadata.get('enum', []),
[1, 2, 3])
class SchemaTest(TestCase):
def test_loads(self):
self.assertEqual(
enum_to_schema(MyIntEnum)().loads('{"enum": 1}').enum,
MyIntEnum.a)
self.assertEqual(
enum_to_schema(MyStrEnum)().loads('{"enum": "A"}').enum,
MyStrEnum.a)
self.assertEqual(
enum_to_schema(MyTupleEnum)().loads('{"enum": [1, "a"]}').enum,
MyTupleEnum.a)
self.assertEqual(
enum_to_schema(MyByKeyIntEnum)().loads('{"enum": "a"}').enum,
MyByKeyIntEnum.a)
self.assertEqual(
enum_to_schema(MyLoadByKeyIntEnum)().loads('{"enum": "a"}').enum,
MyLoadByKeyIntEnum.a)
self.assertEqual(
enum_to_schema(MyDumpByKeyIntEnum)().loads('{"enum": 1}').enum,
MyDumpByKeyIntEnum.a)
def test_dumps(self):
self.assertEqual(
enum_to_schema(MyIntEnum)().dumps({"enum": MyIntEnum.a}),
json.dumps({"enum": 1}))
self.assertEqual(
enum_to_schema(MyStrEnum)().dumps({"enum": MyStrEnum.a}),
json.dumps({"enum": "A"}))
self.assertEqual(
enum_to_schema(MyTupleEnum)().dumps({"enum": MyTupleEnum.a}),
json.dumps({"enum": (1, "a")}))
self.assertEqual(
enum_to_schema(MyByKeyIntEnum)().dumps({"enum": MyByKeyIntEnum.a}),
json.dumps({"enum": "a"}))
self.assertEqual(
enum_to_schema(MyLoadByKeyIntEnum)().dumps(
{"enum": MyLoadByKeyIntEnum.a}),
json.dumps({"enum": 1}))
self.assertEqual(
enum_to_schema(MyDumpByKeyIntEnum)().dumps(
{"enum": MyDumpByKeyIntEnum.a}),
json.dumps({"enum": "a"}))
|
nilq/baby-python
|
python
|
from flask import Flask
from flask_cors import CORS
import json, sys, os, base64
app = Flask(__name__)
CORS(app)
import logging
logging.getLogger("werkzeug").setLevel(logging.ERROR)
@app.route("/set_contest/<contestname>/<userhash>")
def set_contest(contestname, userhash):
print(base64.b64decode(contestname.replace("-", "/")))
if userhash == serverhash:
with open("static/contest.txt", "w") as f:
f.write("".join(map(chr, base64.b64decode(contestname.replace("-", "/")))))
return ""
@app.route("/current-contest")
def getCurrentContest():
with open("static/contest.txt", "r") as f:
return f.read().strip()
def contestIsOngoing():
return getCurrentContest() != ""
@app.route("/problems")
def serveProblems():
return json.dumps(getProblems())
def getProblems():
try:
return sorted(os.listdir("static/contests/%s/files" % getCurrentContest()))
except:
return []
@app.route("/problem/<int:id>")
def problem(id):
with open("static/contests/%s/files/" % getCurrentContest() + getProblems()[id], "r") as f:
return f.read()
def fullname(filename):
with open("static/contests/%s/files/" % getCurrentContest() + filename, "r",encoding='utf8') as f:
return f.read().split("\n")[0][5:-4]
@app.route("/fullnames")
def getFullNames():
return json.dumps(list(map(fullname, getProblems())))
@app.route("/data/<name>")
def getData(name):
contest = getCurrentContest()
data = getattr(getattr(__import__("static.contests.%s.Data.%s" % (contest, name)).contests, contest).Data, name)
return json.dumps({
"inputs": data.inputs,
"outputs": data.outputs,
"timelimit": data.timelimit,
"points": data.points
})
if __name__ == "__main__":
if len(sys.argv) >= 2:
try:
port = int(sys.argv[1])
except:
port = 5005
else:
port = 5005
if len(sys.argv) >= 3 and not sys.argv[2].startswith("--"):
serverhash = sys.argv[2]
else:
serverhash = "7d509328bd69ef7406baf28bd9897c0bf724d8d716b014d0f95f2e8dd9c43a06"
app.run(host = "0.0.0.0", port = port, debug = "--debug" in sys.argv)
|
nilq/baby-python
|
python
|
from operator import mul
numstr = '73167176531330624919225119674426574742355349194934\
96983520312774506326239578318016984801869478851843\
85861560789112949495459501737958331952853208805511\
12540698747158523863050715693290963295227443043557\
66896648950445244523161731856403098711121722383113\
62229893423380308135336276614282806444486645238749\
30358907296290491560440772390713810515859307960866\
70172427121883998797908792274921901699720888093776\
65727333001053367881220235421809751254540594752243\
52584907711670556013604839586446706324415722155397\
53697817977846174064955149290862569321978468622482\
83972241375657056057490261407972968652414535100474\
82166370484403199890008895243450658541227588666881\
16427171479924442928230863465674813919123162824586\
17866458359124566529476545682848912883142607690042\
24219022671055626321111109370544217506941658960408\
07198403850962455444362981230987879927244284909188\
84580156166097919133875499200524063689912560717606\
05886116467109405077541002256983155200055935729725\
71636269561882670428252483600823257530420752963450'
def adjacencyProduct(n):
maxprod = -1
for i in range(len(numstr)-n):
intstrs = list(numstr[i:i+n])
ints = list(map(int, intstrs))
thisprod = reduce(mul, ints, 1)
if thisprod > maxprod:
maxprod = thisprod
return maxprod
if __name__=="__main__":
n = 13
mp = adjacencyProduct(n)
print(mp) #23514624000
|
nilq/baby-python
|
python
|
import asyncio
import unittest.mock
import pytest
START = object()
END = object()
RETVAL = object()
@pytest.fixture
def mock():
return unittest.mock.Mock(return_value=RETVAL)
@pytest.fixture
async def async_fixture(mock):
return await asyncio.sleep(0.1, result=mock(START))
@pytest.mark.asyncio
async def test_async_fixture(async_fixture, mock):
assert mock.call_count == 1
assert mock.call_args_list[-1] == unittest.mock.call(START)
assert async_fixture is RETVAL
|
nilq/baby-python
|
python
|
r = float(input('Quanto dinheiro você tem na carteira?: R$'))
print('Com R${:.2f} você pode comprar US${:.2f}'.format(r, (r/3.27)))
|
nilq/baby-python
|
python
|
"""
XMLAction module.
"""
from pineboolib.core.utils import logging
import os.path
from pineboolib.core.utils.struct import ActionStruct
from .utils.path import _path, coalesce_path
from typing import Optional, Any, Union, TYPE_CHECKING
if TYPE_CHECKING:
from pineboolib.fllegacy.flaction import FLAction # noqa: F401
from pineboolib.fllegacy.flformdb import FLFormDB
from pineboolib.fllegacy.flformrecorddb import FLFormRecordDB
from .moduleactions import ModuleActions # noqa: F401
from .database.pnsqlcursor import PNSqlCursor # noqa: F401
class XMLAction(ActionStruct):
"""
Information related to actions specified in XML modules.
"""
logger = logging.getLogger("main.XMLAction")
mod: Optional["ModuleActions"]
alias: str
def __init__(self, *args, project, name=None, **kwargs) -> None:
"""
Constructor.
"""
super(XMLAction, self).__init__(*args, **kwargs)
self.mod = None
self.project = project
if not self.project:
raise ValueError("XMLActions must belong to a project")
self.form = self._v("form")
self.name = name or self._rv("name") # Mandatory
self.description = self._v("description")
self.scriptform = self._v("scriptform")
self.table = self._v("table")
self.mainform = self._v("mainform")
self.mainscript = self._v("mainscript")
self.formrecord = self._v("formrecord")
self.scriptformrecord = self._v("scriptformrecord")
self.mainform_widget: Optional[FLFormDB] = None
self.formrecord_widget: Optional[FLFormRecordDB] = None
self._loaded = False
def loadRecord(self, cursor: Optional["PNSqlCursor"]) -> "FLFormRecordDB":
"""
Load FLFormRecordDB by default.
@param cursor. Asigna un cursor al FLFormRecord
@return widget con form inicializado
"""
self._loaded = getattr(self.formrecord_widget, "_loaded", False)
if not self._loaded:
if self.formrecord_widget and getattr(self.formrecord_widget, "widget", None):
self.formrecord_widget.widget.doCleanUp()
# self.formrecord_widget.widget = None
self.logger.debug("Loading record action %s . . . ", self.name)
if self.project.DGI.useDesktop():
# FIXME: looks like code duplication. Bet both sides of the IF do the same.
self.formrecord_widget = self.project.conn.managerModules().createFormRecord(
self, None, cursor, None
)
else:
# self.script = getattr(self, "script", None)
# if isinstance(self.script, str) or self.script is None:
script = self.load_script(self.scriptformrecord, None)
self.formrecord_widget = script.form
if self.formrecord_widget is None:
raise Exception("After loading script, no form was loaded")
self.formrecord_widget.widget = self.formrecord_widget
self.formrecord_widget.iface = self.formrecord_widget.widget.iface
self.formrecord_widget._loaded = True
# self.formrecord_widget.setWindowModality(Qt.ApplicationModal)
self.logger.debug(
"End of record action load %s (iface:%s ; widget:%s)",
self.name,
getattr(self.formrecord_widget, "iface", None),
getattr(self.formrecord_widget, "widget", None),
)
if self.formrecord_widget is None:
raise Exception("Unexpected: No formrecord loaded")
if cursor:
self.formrecord_widget.setCursor(cursor)
return self.formrecord_widget
def load(self) -> "FLFormDB":
"""
Load master form.
"""
self._loaded = getattr(self.mainform_widget, "_loaded", False)
if not self._loaded:
if self.mainform_widget is not None and getattr(self.mainform_widget, "widget", None):
self.mainform_widget.widget.doCleanUp()
if self.project.DGI.useDesktop() and hasattr(self.project.main_window, "w_"):
self.logger.info("Loading action %s (createForm). . . ", self.name)
self.mainform_widget = self.project.conn.managerModules().createForm(
action=self, parent=self.project.main_window.w_
)
else:
self.logger.info(
"Loading action %s (load_script %s). . . ", self.name, self.scriptform
)
script = self.load_script(self.scriptform, None)
self.mainform_widget = script.form # FormDBWidget FIXME: Add interface for types
if self.mainform_widget is None:
raise Exception("After loading script, no form was loaded")
self.mainform_widget.widget = self.mainform_widget
self.mainform_widget.iface = self.mainform_widget.widget.iface
self.mainform_widget._loaded = True
self.logger.debug(
"End of action load %s (iface:%s ; widget:%s)",
self.name,
getattr(self.mainform_widget, "iface", None),
getattr(self.mainform_widget, "widget", None),
)
if self.mainform_widget is None:
raise Exception("Unexpected: No form loaded")
return self.mainform_widget
def execMainScript(self, name) -> None:
"""
Execute function for main action.
"""
a = self.project.conn.manager().action(name)
if not a:
self.logger.warning("No existe la acción %s", name)
return
self.project.call("%s.main" % a.name(), [], None, False)
def formRecordWidget(self) -> "FLFormRecordDB":
"""
Return formrecord widget.
This is needed because sometimes there isn't a FLFormRecordDB initialized yet.
@return wigdet del formRecord.
"""
if not getattr(self.formrecord_widget, "_loaded", None):
self.loadRecord(None)
if self.formrecord_widget is None:
raise Exception("Unexpected: No form loaded")
return self.formrecord_widget
# FIXME: cursor is FLSqlCursor but should be something core, not "FL". Also, an interface
def openDefaultFormRecord(self, cursor: "PNSqlCursor", wait: bool = True) -> None:
"""
Open FLFormRecord specified on defaults.
@param cursor. Cursor a usar por el FLFormRecordDB
"""
self.logger.info("Opening default formRecord for Action %s", self.name)
w = self.loadRecord(cursor)
# w.init()
if w:
if self.project.DGI.localDesktop():
if wait:
w.show_and_wait()
else:
w.show()
def openDefaultForm(self) -> None:
"""
Open Main FLForm specified on defaults.
"""
self.logger.info("Opening default form for Action %s", self.name)
w = self.load()
if w:
if self.project.DGI.localDesktop():
w.show()
def execDefaultScript(self) -> None:
"""
Execute script specified on default.
"""
self.logger.info("Executing default script for Action %s", self.name)
script = self.load_script(self.scriptform, None)
self.mainform_widget = script.form
if self.mainform_widget is None:
raise Exception("Unexpected: No form loaded")
if hasattr(self.mainform_widget, "iface"):
if self.mainform_widget.iface is not None:
self.mainform_widget.iface.main()
else:
self.mainform_widget.main()
def load_script(
self, scriptname: Optional[str], parent: Optional["FLFormDB"] = None
) -> Any: # returns loaded script
"""
Transform QS script into Python and starts it up.
@param scriptname. Nombre del script a convertir
@param parent. Objecto al que carga el script, si no se especifica es a self.script
"""
# FIXME: Parent logic is broken. We're loading scripts to two completely different objects.
from importlib import machinery
if scriptname:
scriptname = scriptname.replace(".qs", "")
self.logger.debug(
"Loading script %s of %s for action %s", scriptname, parent, self.name
)
else:
self.logger.info("No script to load on %s for action %s", parent, self.name)
parent_object = parent
action_: Union[XMLAction, "FLAction"] # XMLAction / FLAction
if parent is None:
action_ = self
else:
possible_flaction_ = getattr(parent, "_action", None)
if not isinstance(possible_flaction_, XMLAction):
from .utils.convert_flaction import convertFLAction # type: ignore
action_ = convertFLAction(possible_flaction_)
elif possible_flaction_ is not None:
action_ = possible_flaction_
python_script_path = None
# primero default, luego sobreescribimos
from pineboolib.qsa import emptyscript # type: ignore
script_loaded: Any = emptyscript
if scriptname is None:
script_loaded.form = script_loaded.FormInternalObj(
action=action_, project=self.project, parent=parent_object
)
if parent:
parent.widget = script_loaded.form
parent.iface = parent.widget.iface
return script_loaded
script_path_py = self.project.DGI.alternative_script_path("%s.py" % scriptname)
if script_path_py is None:
script_path_qs = _path("%s.qs" % scriptname, False)
script_path_py = coalesce_path("%s.py" % scriptname, "%s.qs.py" % scriptname, None)
mng_modules = self.project.conn.managerModules()
if mng_modules.staticBdInfo_ and mng_modules.staticBdInfo_.enabled_:
from pineboolib.fllegacy.flmodulesstaticloader import FLStaticLoader # FIXME
ret_py = FLStaticLoader.content(
"%s.qs.py" % scriptname, mng_modules.staticBdInfo_, True
) # Con True solo devuelve el path
if ret_py:
script_path_py = ret_py
else:
ret_qs = FLStaticLoader.content(
"%s.qs" % scriptname, mng_modules.staticBdInfo_, True
) # Con True solo devuelve el path
if ret_qs:
script_path_qs = ret_qs
if script_path_py is not None:
script_path = script_path_py
self.logger.info("Loading script PY %s . . . ", scriptname)
if not os.path.isfile(script_path):
raise IOError
try:
self.logger.debug(
"Cargando %s : %s ",
scriptname,
script_path.replace(self.project.tmpdir, "tempdata"),
)
loader = machinery.SourceFileLoader(scriptname, script_path)
script_loaded = loader.load_module() # type: ignore
except Exception:
self.logger.exception("ERROR al cargar script PY para la accion %s:", action_.name)
elif script_path_qs:
script_path = script_path_qs
self.project.parseScript(script_path)
self.logger.info("Loading script QS %s . . . ", scriptname)
python_script_path = (script_path + ".xml.py").replace(".qs.xml.py", ".qs.py")
try:
self.logger.debug(
"Cargando %s : %s ",
scriptname,
python_script_path.replace(self.project.tmpdir, "tempdata"),
)
loader = machinery.SourceFileLoader(scriptname, python_script_path)
script_loaded = loader.load_module() # type: ignore
except Exception:
self.logger.exception("ERROR al cargar script QS para la accion %s:", action_.name)
script_loaded.form = script_loaded.FormInternalObj(action_, self.project, parent_object)
if parent_object and parent:
parent_object.widget = script_loaded.form
if getattr(parent_object.widget, "iface", None):
parent_object.iface = parent.widget.iface
return script_loaded
def unknownSlot(self) -> None:
"""Log error for actions with unknown slots or scripts."""
self.logger.error("Executing unknown script for Action %s", self.name)
|
nilq/baby-python
|
python
|
from datetime import datetime
from pathlib import Path
import unittest
import pandas as pd
import numpy as np
import vak
import article.syntax
HERE = Path(__file__).parent
DATA_DIR = HERE.joinpath('test_data')
class TestSyntax(unittest.TestCase):
def test_date_from_cbin_filename(self):
CBIN_FILENAME = 'bf_song_repo/gy6or6/032212/gy6or6_baseline_220312_0836.3.cbin'
dt_from_cbin = article.syntax.date_from_cbin_filename(CBIN_FILENAME)
self.assertTrue(
isinstance(dt_from_cbin, datetime)
)
self.assertTrue(
dt_from_cbin.date() == datetime(2012, 3, 22, 8, 36).date()
)
self.assertTrue(
dt_from_cbin.time() == datetime(2012, 3, 22, 8, 36).time()
)
def test_make_df_trans_probs(self):
vds_list = [str(path) for path in DATA_DIR.joinpath('vds').glob('*.vds.json')]
vds_list = [vak.Dataset.load(path) for path in vds_list]
df = article.syntax.make_df_trans_probs(vds_list)
self.assertTrue(
type(df) == pd.DataFrame
)
for field in article.syntax.FIELDS_SYNTAX:
self.assertTrue(field in df.columns)
def test_get_trans_prob(self):
vds_list = sorted(
[str(path) for path in DATA_DIR.joinpath('vds').glob('*.vds.json')]
)
vds_list = [vak.Dataset.load(path) for path in vds_list]
vds_list = vds_list[:1] # just keep one
df = article.syntax.make_df_trans_probs(vds_list)
date = datetime(2012, 3, 22, 0, 0).date()
label = 'S'
label_plus_one = 'i'
p = article.syntax.get_trans_prob(df, date, label, label_plus_one)
self.assertTrue(
type(p) == float
)
self.assertTrue(
p > 0.99
)
def test_find_branch_point(self):
trans_mat = np.asarray(
[
[0., 1.0, 0., 0.],
[0., 0., 0.1, 0.9],
[0., 0., 0., 1.0],
])
labels = list('abcd')
bp_inds, bp_lbl = article.syntax.find_branch_points(trans_mat, labels)
self.assertTrue(
len(bp_inds) == 1
)
self.assertTrue(
bp_lbl == ['b']
)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
"""
The ``fine_tune.py`` file is used to continue training (or `fine-tune`) a model on a `different
dataset` than the one it was originally trained on. It requires a saved model archive file, a path
to the data you will continue training with, and a directory in which to write the results.
. code-block:: bash
$ python fine_tune.py --help
usage: fine_tune.py [-h] -s SERIALIZATION_DIR -c CONFIG_FILE_PATH -p
PRETRAINED_DIR -m PRETRAINED_MODEL_NAME
optional arguments:
-h, --help show this help message and exit
-s SERIALIZATION_DIR, --serialization_dir SERIALIZATION_DIR
Directory in which to save the model and its logs.
-c CONFIG_FILE_PATH, --config_file_path CONFIG_FILE_PATH
Path to parameter file describing the new multi-tasked
model to be fine-tuned.
-p PRETRAINED_DIR, --pretrained_dir PRETRAINED_DIR
Directory in which was saved the pre-trained model.
-m PRETRAINED_MODEL_NAME, --pretrained_model_name PRETRAINED_MODEL_NAME
Name of the weight file for the pretrained model to
fine-tune in the ``pretrained_dir``.
"""
import argparse
import itertools
import os
import json
import re
from copy import deepcopy
import torch
from typing import List, Dict, Any, Tuple
import logging
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO)
from hmtl.tasks import Task
from hmtl.training.multi_task_trainer import MultiTaskTrainer
from hmtl.common import create_and_set_iterators
from evaluate import evaluate
from train import train_model
from allennlp.models.model import Model
from allennlp.data import Vocabulary
from allennlp.data.iterators import DataIterator
from allennlp.commands.train import create_serialization_dir
from allennlp.common.params import Params
from allennlp.common.checks import ConfigurationError
from allennlp.nn import RegularizerApplicator
logger = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--serialization_dir", required=True, help="Directory in which to save the model and its logs.", type=str)
parser.add_argument("-c", "--config_file_path", required=True, help="Path to parameter file describing the new multi-tasked model to be fine-tuned.", type=str)
parser.add_argument("-p", "--pretrained_dir", required=True, help="Directory in which was saved the pre-trained model.", type=str)
parser.add_argument("-m", "--pretrained_model_name", required=True, help="Name of the weight file for the pretrained model to fine-tune in the ``pretrained_dir``.", type=str)
args = parser.parse_args()
params = Params.from_file(params_file=args.config_file_path)
serialization_dir = args.serialization_dir
create_serialization_dir(params, serialization_dir, False)
serialization_params = deepcopy(params).as_dict(quiet=True)
with open(os.path.join(serialization_dir, "config.json"), "w") as param_file:
json.dump(serialization_params, param_file, indent=4)
task_list = []
task_keys = [key for key in params.keys() if re.search("^task_", key)]
for key in task_keys:
logger.info("Creating %s", key)
task_params = params.pop(key)
task_description = task_params.pop("task_description")
task_data_params = task_params.pop("data_params")
task = Task.from_params(params=task_description)
task_list.append(task)
_, _ = task.load_data_from_params(params=task_data_params)
vocab = Vocabulary.from_files(os.path.join(args.pretrained_dir, "vocabulary"))
logger.info("Vocabulary loaded from %s", os.path.join(args.pretrained_dir, "vocabulary"))
vocab.save_to_files(os.path.join(serialization_dir, "vocabulary"))
logger.info("Save vocabulary to file %s", os.path.join(serialization_dir, "vocabulary"))
task_list = create_and_set_iterators(params=params, task_list=task_list, vocab=vocab)
regularizer = RegularizerApplicator.from_params(params.pop("regularizer", []))
model_params = params.pop("model")
model = Model.from_params(vocab=vocab, params=model_params, regularizer=regularizer)
logger.info("Loading the pretrained model from %s", os.path.join(args.pretrained_dir, args.pretrained_model_name))
try:
pretrained_model_state_path = os.path.join(args.pretrained_dir, args.pretrained_model_name)
pretrained_model_state = torch.load(pretrained_model_state_path)
model.load_state_dict(state_dict=pretrained_model_state)
except:
raise ConfigurationError("It appears that the configuration of the pretrained model and " "the model to fine-tune are not compatible. " "Please check the compatibility of the encoders and taggers in the " "config files.")
multi_task_trainer_params = params.pop("multi_task_trainer")
trainer = MultiTaskTrainer.from_params(model=model, task_list=task_list, serialization_dir=serialization_dir, params=multi_task_trainer_params)
metrics = train_model(multi_task_trainer=trainer, recover=False)
if metrics is not None:
logging.info("Fine-tuning is finished ! Let's have a drink. It's on the house !")
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
from brownie import HuskyTokenDeployer, accounts, HuskyToken, HuskyTokenMinter, Wei
def main():
provost = accounts.load('husky')
admin = provost
is_live = False
if provost.balance() == 0:
accounts[0].transfer(provost, Wei('1 ether'))
husky_token = HuskyToken.deploy("Husky", "HUSKY", 0, {'from': provost}, publish_source=is_live)
husky_token_minter = HuskyTokenMinter.deploy(husky_token, admin, {'from': provost}, publish_source=is_live)
husky_token.addMinter(husky_token_minter, {'from': provost});
husky_token.renounceMinter({'from': provost});
|
nilq/baby-python
|
python
|
from pprint import pprint
import gym
env = gym.make('ChessVsRandomBot-v0')
def available_moves():
state = env.state
moves_p1 = env.get_possible_moves(state, 1)
moves_p2 = env.get_possible_moves(state, -1)
pprint(moves_p1)
pprint(moves_p2)
# no actions left -> resign
if len(moves_p1) == 0:
print('resigning is the only move...')
resign_action = env.resign()
# chess coordinates Player 1
for m in moves_p1:
print(env.convert_coords(m))
# chess coordinates Player 2
for m in moves_p2:
print(env.convert_coords(m))
# Player 1 moves
for piece in set([m['piece_id'] for m in moves_p1]):
env.render_moves(state, piece, moves_p1, mode='human')
# Player 2 moves
for piece in set([m['piece_id'] for m in moves_p2]):
env.render_moves(state, piece, moves_p2, mode='human')
if __name__ == "__main__":
available_moves()
|
nilq/baby-python
|
python
|
def is_valid(phrase):
words = phrase.split()
word_letters = list(map("".join, map(sorted, words)))
return len(set(word_letters)) == len(words)
passphrases = open("day4.txt").readlines()
valid_phrases = list(filter(is_valid, passphrases))
print('Valid Passphrases:', len(valid_phrases)) # = 231
|
nilq/baby-python
|
python
|
import os
import datetime
import argparse
import numpy
import networks
import torch
modelnames = networks.__all__
# import datasets
datasetNames = ('Vimeo_90K_interp') #datasets.__all__
parser = argparse.ArgumentParser(description='DAIN')
parser.add_argument('--debug',action = 'store_true', help='Enable debug mode')
parser.add_argument('--netName', type=str, default='DAIN',
choices = modelnames,help = 'model architecture: ' +
' | '.join(modelnames) +
' (default: DAIN)')
parser.add_argument('--datasetName', default='Vimeo_90K_interp',
choices= datasetNames,nargs='+',
help='dataset type : ' +
' | '.join(datasetNames) +
' (default: Vimeo_90K_interp)')
parser.add_argument('--datasetPath',default='',help = 'the path of selected datasets')
parser.add_argument('--dataset_split', type = int, default=97, help = 'Split a dataset into trainining and validation by percentage (default: 97)')
parser.add_argument('--seed', type=int, default=1, help='random seed (default: 1)')
parser.add_argument('--numEpoch', '-e', type = int, default=100, help= 'Number of epochs to train(default:150)')
parser.add_argument('--batch_size', '-b',type = int ,default=1, help = 'batch size (default:1)' )
parser.add_argument('--workers', '-w', type =int,default=8, help = 'parallel workers for loading training samples (default : 1.6*10 = 16)')
parser.add_argument('--channels', '-c', type=int,default=3,choices = [1,3], help ='channels of images (default:3)')
parser.add_argument('--filter_size', '-f', type=int, default=4, help = 'the size of filters used (default: 4)',
choices=[2,4,6, 5,51]
)
parser.add_argument('--lr', type =float, default= 0.002, help= 'the basic learning rate for three subnetworks (default: 0.002)')
parser.add_argument('--rectify_lr', type=float, default=0.001, help = 'the learning rate for rectify/refine subnetworks (default: 0.001)')
parser.add_argument('--save_which', '-s', type=int, default=1, choices=[0,1], help='choose which result to save: 0 ==> interpolated, 1==> rectified')
parser.add_argument('--time_step', type=float, default=0.5, help='choose the time steps')
parser.add_argument('--flow_lr_coe', type = float, default=0.01, help = 'relative learning rate w.r.t basic learning rate (default: 0.01)')
parser.add_argument('--occ_lr_coe', type = float, default=1.0, help = 'relative learning rate w.r.t basic learning rate (default: 1.0)')
parser.add_argument('--filter_lr_coe', type = float, default=1.0, help = 'relative learning rate w.r.t basic learning rate (default: 1.0)')
parser.add_argument('--ctx_lr_coe', type = float, default=1.0, help = 'relative learning rate w.r.t basic learning rate (default: 1.0)')
parser.add_argument('--depth_lr_coe', type = float, default=0.01, help = 'relative learning rate w.r.t basic learning rate (default: 0.01)')
parser.add_argument('--alpha', type=float,nargs='+', default=[0.0, 1.0], help= 'the ration of loss for interpolated and rectified result (default: [0.0, 1.0])')
parser.add_argument('--epsilon', type = float, default=1e-6, help = 'the epsilon for charbonier loss,etc (default: 1e-6)')
parser.add_argument('--weight_decay', type = float, default=0, help = 'the weight decay for whole network ' )
parser.add_argument('--patience', type=int, default=5, help = 'the patience of reduce on plateou')
parser.add_argument('--factor', type = float, default=0.2, help = 'the factor of reduce on plateou')
parser.add_argument('--pretrained', dest='SAVED_MODEL', default=None, help ='path to the pretrained model weights')
parser.add_argument('--no-date', action='store_true', help='don\'t append date timestamp to folder' )
parser.add_argument('--use_cuda', default= True, type = bool, help='use cuda or not')
parser.add_argument('--use_cudnn',default=1,type=int, help = 'use cudnn or not')
parser.add_argument('--dtype', default=torch.cuda.FloatTensor, choices = [torch.cuda.FloatTensor,torch.FloatTensor],help = 'tensor data type ')
# parser.add_argument('--resume', default='', type=str, help='path to latest checkpoint (default: none)')
parser.add_argument('--uid', type=str, default= None, help='unique id for the training')
parser.add_argument('--force', action='store_true', help='force to override the given uid')
parser.add_argument('--video', type = str, default= None, help='')
parser.add_argument('--outStr', type = str, default= None, help='')
parser.add_argument('--outFolder', type = str, default= None, help='')
parser.add_argument('--fps', type = float, default= None, help='')
parser.add_argument('--palette', type = int, default= 0, help='')
parser.add_argument('--resc', type = int, default= 0, help='')
parser.add_argument('--maxResc', type = int, default= 0, help='')
parser.add_argument('--loop', type = int, default= 0, help='')
parser.add_argument('--framerateConf', type = int, default= 0, help='')
parser.add_argument('--use60RealFps', type = float, default= 60, help='')
parser.add_argument('--use60', type = int, default= 0, help='')
parser.add_argument('--use60C1', type = int, default= 0, help='')
parser.add_argument('--use60C2', type = int, default= 0, help='')
parser.add_argument('--interpolationMethod', type = int, default= 0, help='')
parser.add_argument('--exportPng', type = int, default= 0, help='')
parser.add_argument('--useAnimationMethod', type = int, default= 1, help='')
parser.add_argument('--splitFrames', type = int, default= 0, help='')
parser.add_argument('--splitSize', type = int, default= 0, help='')
parser.add_argument('--splitPad', type = int, default= 0, help='')
parser.add_argument('--alphaMethod', type = int, default= 0, help='')
parser.add_argument('--inputMethod', type = int, default= 0, help='')
parser.add_argument('--cleanOriginal', type = int, default= 1, help='')
parser.add_argument('--cleanInterpol', type = int, default= 1, help='')
parser.add_argument('--doOriginal', type = int, default= 1, help='')
parser.add_argument('--doIntepolation', type = int, default= 1, help='')
parser.add_argument('--doVideo', type = int, default= 1, help='')
parser.add_argument('--checkSceneChanges', type = int, default= 1, help='')
parser.add_argument('--sceneChangeSensibility', type = int, default= 10, help='')
parser.add_argument('--uploadBar', type = None, default= None, help='')
parser.add_argument('--useWatermark', type = int, default= 0, help='')
args = parser.parse_args()
import shutil
save_path = ""
parser.add_argument('--save_path',default=save_path,help = 'the output dir of weights')
parser.add_argument('--log', default = save_path+'/log.txt', help = 'the log file in training')
parser.add_argument('--arg', default = save_path+'/args.txt', help = 'the args used')
args = parser.parse_args()
|
nilq/baby-python
|
python
|
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import os
import tensorflow as tf
from collections import OrderedDict
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.keras import activations
from tensorflow.python.keras import layers as keras_layers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_layer as input_layer_module
from tensorflow.python.keras.utils import tf_utils as keras_tf_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tf_nndct import layers as nndct_layers
from tf_nndct import ops as nndct_ops
from tf_nndct.graph import OpTypes
from tf_nndct.graph import dtypes
from tf_nndct.graph import ops
from tf_nndct.graph import utils
from tf_nndct.ops.signal import fft_ops
from tf_nndct.quantization import utils as quant_utils
from tf_nndct.utils import generic_utils
from tf_nndct.utils import registry
from tf_nndct.utils import tf_utils
_INPUT_ARG_PREFIX = '%input%_'
# call(self, input_0, input_1, ...)
_CALL_ARG_TEMPLATE = 'input_%d'
class CodeFormatter(object):
def __init__(self, init_indent_level=0, indent_length=2):
self._indent_level = init_indent_level
self._init_indent_level = init_indent_level
self._indent_length = indent_length
self._start_of_line = False
self._statements = []
def indent(self):
self._indent_level += 1
def outdent(self):
if self._indent_level == 0 or self._indent_level < self._init_indent_level:
raise RuntimeError("outdent() without matching indent()")
self._indent_level -= 1
def current_indentation(self):
return self._indent_length * self._indent_level
def newline(self):
self.add_statement('\n')
def add_statement(self, text):
text = str(text)
if self._indent_level > 0:
pos = 0
for i, c in enumerate(text):
if c == '\n':
self._append(text[pos:i + 1])
pos = i + 1
self._start_of_line = True
self._append(text[pos:])
else:
self._append(text)
if text[-1] == '\n':
self._start_of_line = True
def code(self):
return ''.join(self._statements)
def _append(self, text):
if not text:
return
if self._start_of_line:
self._start_of_line = False
self._append_indent()
self._statements.append(text)
def _append_indent(self):
if self._indent_level == 0:
return
size = self.current_indentation()
self._statements.append(size * ' ')
class GraphCodeGenerator(object):
"""Generate python code from graph and write it to a given path.
The code is keras subclass style and can be used to recreate a keras model
represented by the graph.
"""
def __init__(self, graph, class_spec):
self._class_spec = class_spec
# alias for imported module to be written.
self._module_alias = {}
self._graph = graph
# TODO(yuwang): Remove these two members ?
self._input_signature = graph.input_signature
self._structured_output_tensors = graph.structured_output_tensors
self._translator = GraphTranslator(graph, self._class_spec.quantized)
self._code_formatter = CodeFormatter()
def write(self, filepath):
self._translator.translate()
self._code_formatter.add_statement(
'# This file is generated programmatically, do not modify it manually.\n'
)
self._code_formatter.newline()
self.generate_imports()
self.generate_class_def()
generic_utils.mkdir_if_not_exist(os.path.dirname(filepath))
with open(filepath, 'w') as f:
f.write(self._code_formatter.code())
f.flush()
os.fsync(f.fileno())
layer_to_node = {}
for node in self._graph.nodes:
entity = self._translator.get_entity(node.name)
if entity.init_needed:
layer_to_node[entity.name] = node
return layer_to_node
def generate_imports(self):
objects = [self._class_spec.base]
for entity in self._translator.entities:
if not entity.obj:
continue
objects.append(entity.obj)
imports = set(['import tensorflow as tf'])
for obj in objects:
imports.add(self._generate_obj_import(obj))
# TODO(yuwang): Do not use code formatter directly, just return statements.
imports = sorted(imports)
for impt in imports:
self._code_formatter.add_statement(impt)
self._code_formatter.newline()
self._code_formatter.newline()
def _generate_obj_import(self, obj):
module, pkg, name = _get_module(obj)
if module in self._module_alias:
alias = self._module_alias[module]
else:
alias = name
while alias in self._module_alias.values():
alias = '_' + alias
self._module_alias[module] = alias
if pkg == name:
return 'import {}'.format(pkg)
elif alias == name:
return 'from {} import {}'.format(pkg, name)
else:
return 'from {} import {} as {}'.format(pkg, name, alias)
def generate_class_def(self):
module, _, _ = _get_module(self._class_spec.base)
self._code_formatter.add_statement('class {cls_name}({base_cls}):\n'.format(
cls_name=self._class_spec.name,
base_cls='.'.join(
[self._module_alias[module], self._class_spec.base.__name__])))
self._code_formatter.indent()
self.generate_init()
self.generate_call_fn(self._class_spec.call_fn_name)
self._code_formatter.outdent()
def generate_init(self):
self._code_formatter.add_statement('def __init__(self):')
self._code_formatter.newline()
self._code_formatter.indent()
self._code_formatter.add_statement(
'super({}, self).__init__(name={})'.format(
self._class_spec.name,
utils.stringfy_to_write(self._class_spec.name)))
self._code_formatter.newline()
for entity in self._translator.entities:
if not entity.init_needed:
continue
init_str = self._init_string(entity)
self._code_formatter.add_statement('self.{} = {}'.format(
entity.name, init_str, entity))
self._code_formatter.newline()
self._code_formatter.newline()
self._code_formatter.outdent()
def _init_string(self, entity):
def stringfy_arg_value(value):
if type(value) == tf_dtypes.DType:
value = tf_utils.dtype_to_tf_string(value)
elif isinstance(value, np.ndarray):
value = value.tolist()
elif isinstance(value, str):
# Use a quote to wrap a string value.
value = ''.join(['\'', value, '\''])
elif isinstance(value, Entity):
value = 'self.' + value.name
else:
pass
return value
args = []
input_index = 0
for arg, value in entity.args.items():
# Use input entity name to fill in placeholder.
if arg.startswith(_INPUT_ARG_PREFIX):
args.append(entity.inputs[input_index].name)
input_index += 1
continue
if not isinstance(value, (list, tuple)):
args.append('{}={}'.format(arg, stringfy_arg_value(value)))
else:
value_strs = [stringfy_arg_value(val) for val in value]
args.append(f'{arg}={str(value_strs)}') #.format(arg, str(value_strs)))
module, _, _ = _get_module(entity.obj)
return '{module}.{obj_name}({args})'.format(
module=self._module_alias[module],
obj_name=entity.obj.__name__,
args=', '.join(args))
def generate_call_fn(self, fn_name):
call_args = ['self']
for i in range(len(self._input_signature)):
call_args.append(_CALL_ARG_TEMPLATE % i)
self._code_formatter.add_statement('def {}({}):'.format(
fn_name, ', '.join(call_args)))
self._code_formatter.newline()
self._code_formatter.indent()
placeholders = []
for node in self._graph.nodes:
if node.op.type == OpTypes.INPUT:
assert len(node.out_tensors) == 1
placeholders.append(node)
# [args_0, args_1_1, args_1] -> [args_0, args_1, args_1_1]
placeholders = sorted(placeholders, key=lambda x: x.name)
flattened_input_signature = nest.flatten(self._input_signature)
assert len(placeholders) == len(flattened_input_signature)
# TODO(yuwang): Maybe use more solid way to do this.
# Function arguments are Placeholder nodes in tf.Graph.
# We sort the placeholders by their names and map the flattened input
# signature in the sorted order. This method works now but it relies on
# TensorFlow's implementation for generating placeholder names from input
# argument. Specifically, the first input is named args_0, the second
# input is named args_1 and so on. If an input is a type of sequence,
# then the input is flattened to a list and each element in the list
# is named by args_[input_index]_[element_index_in_the_list].
# For example, we have a call function and the signature is like:
# def call(self, input, state)
# The first argument 'input' is a tensor and TF will generate a
# placeholder for it named args_0. The second argument 'state' is a list
# of tensor, then TF will generate a placeholder for each element in the
# list and the names of the placeholders are arg_1 and arg_1_1. In the
# end, we got got three placeholders [args_0, args_1, args_1_1]. In a
# nndct graph, the order of the placeholders may be [args_0, args_1_1,
# args_1], that is why we sort the nodes by their names before mapping.
# Example:
# placeholders: ['args_0', 'args_1', 'args_1_1']
# args: (TensorSpec(shape=(1, 10), dtype=tf.float32, name='args_0'),
# [TensorSpec(shape=(1, 5), dtype=tf.float32, name='args_1/0'),
# TensorSpec(shape=(1, 5), dtype=tf.float32, name='args_1/1')]
# )
# args_to_placeholder = {
# 'args_0': 'args_0',
# 'args_1/0': 'args_1',
# 'args_1/1': 'args_1_1',
# }
#
# Example:
# placeholders: ['args_0', 'args_0_1', 'args_0_2']
# args: [{'y': TensorSpec(shape=(1, 5), dtype=tf.float32, name='args_0/0/y'),
# 'x': TensorSpec(shape=(1, 5), dtype=tf.float32, name='args_0/0/x')},
# TensorSpec(shape=(1, 5), dtype=tf.float32, name='args_0/1')]
# args_to_placeholder = {
# 'args_0/0/y': 'arg_0',
# 'args_0/0/x': 'arg_0_1',
# 'args_0/1': 'arg_0_2',
# }
args_to_placeholder = {}
for i, arg in enumerate(flattened_input_signature):
args_to_placeholder[arg.name] = placeholders[i].name
def index_string(getter, key):
key = utils.stringfy_to_write(key)
if getter == '[]':
return '[{}]'.format(key)
return '{}{}'.format(getter, key)
def arg_retriving_path(arg, path=()):
"""
Get retriving path of an argument.
See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/util/nest.py::_yield_sorted_items
Args:
arg: The input signature of an argument.
path: Current path.
Yield:
Path(gettter, key) used to retrive the given argument.
"""
if not nest.is_sequence(arg):
yield path
elif isinstance(arg, nest._collections_abc.Mapping):
for key in nest._sorted(arg):
for res in arg_retriving_path(arg[key], path + (('[]', key),)):
yield res
elif nest._is_attrs(arg):
for item in nest._get_attrs_items(arg):
for res in arg_retriving_path(item[1], path + (('.', item[0]),)):
yield res
elif nest._is_namedtuple(arg):
for field in arg._fields:
for res in arg_retriving_path(getattr(arg, field),
path + (('.', field),)):
yield res
# Doesn't support composite_tensor comprared with _yield_sorted_items.
elif nest._is_type_spec(arg):
# Note: to allow CompositeTensors and their TypeSpecs to have matching
# structures, we need to use the same key string here.
for res in arg_retriving_path(arg._component_specs,
path + (('.', arg.value_type.__name__),)):
yield res
else:
for item in enumerate(arg):
for res in arg_retriving_path(item[1], path + (('[]', item[0]),)):
yield res
for i, arg in enumerate(self._input_signature):
for path in list(arg_retriving_path(arg)):
# [('[]' 0), ('[]', 'x')]
arg_name_path = ['args_%d' % i]
retriving_path = []
for getter, key in path:
arg_name_path.append(key)
retriving_path.append(index_string(getter, key))
arg_name = '/'.join(str(s) for s in arg_name_path)
retriving = ''.join(r for r in retriving_path)
if arg_name not in args_to_placeholder:
continue
placeholder_name = args_to_placeholder[arg_name]
entity = self._translator.get_entity(placeholder_name)
call_arg = _CALL_ARG_TEMPLATE % i
#self._code_formatter.add_statement('{} = {}{}'.format(
# entity.outputs[0].name, call_arg, retriving))
#self._code_formatter.newline()
entity.inputs.append(
Entity('{}{}'.format(call_arg, retriving), EntityTypes.Tensor))
# TODO(yuwang): Use entities directly, no longer use node.
for node in self._graph.nodes:
self._code_formatter.add_statement(self._call_string(node))
self._code_formatter.newline()
output_tensors = [
t.name for t in nest.flatten(self._structured_output_tensors)
]
returns = [self._translator.get_entity(t) for t in output_tensors]
if len(returns) == 1:
returns = returns[0]
else:
returns = nest.pack_sequence_as(self._structured_output_tensors, returns)
def return_str(returns):
return_strs = []
if isinstance(returns, Entity):
s = returns.name
elif isinstance(returns, list):
for ret in returns:
return_strs.append(return_str(ret))
s = ''.join(['[', ', '.join(return_strs), ']'])
elif isinstance(returns, tuple):
for ret in returns:
return_strs.append(return_str(ret))
s = ''.join(['(', ', '.join(return_strs), ')'])
elif isinstance(returns, dict):
for key in returns:
return_strs.append('{}: {}'.format(utils.stringfy_to_write(key),
''.join(return_str(returns[key]))))
s = ''.join(['{', ', '.join(return_strs), '}'])
else:
raise NotImplementedError(
'Can not rewrite return object of type '.format(type(returns)))
return s
self._code_formatter.add_statement('return %s' % return_str(returns))
self._code_formatter.newline()
self._code_formatter.outdent()
def _call_string(self, node):
entity = self._translator.get_entity(node.name)
input_names = [str(input) for input in entity.inputs]
output_names = [output.name for output in entity.outputs]
input_str = ', '.join(input_names)
output_str = ', '.join(output_names)
# Layer type
if entity.init_needed:
forward_template = '{outputs} = self.{entity}({inputs}) # {node}'
args_dict = {
'outputs': output_str,
'entity': entity.name,
'inputs': input_str,
'node': node.name,
}
forward_str = forward_template.format(**args_dict)
else:
# Function type
init_str = self._init_string(entity)
forward_str = '{} = {} # {}'.format(output_str, init_str, node.name)
return forward_str
class EntityTypes(object):
Layer = 'layer'
Function = 'function'
Tensor = 'tensor'
class Entity(object):
def __init__(self, name, type, obj=None, args=None):
self.name = name
# One of `EntityTypes`.
self.type = type
# For EntityTypes.Layer and EntityTypes.Function,
# obj is the corresponding layer class or function.
# For EntityTypes.Tensor, obj is None
self.obj = obj
self.args = args
#self.node_name = node_name
# I/O entities
self.inputs = []
self.outputs = []
def __str__(self):
return self.name
def __repr__(self):
return self.name
@property
def init_needed(self):
return self.type == EntityTypes.Layer
class GraphTranslator(object):
"""Translates a computation graph to a call graph composed of entities.
A `Node` will be translated to an `Entity` object with nested entities.
Specifically, translator will convert all `Operation` and `Tensor` objects
in `Node` to entities.
"""
_op_to_layer = {
OpTypes.BIDIRECTIONAL_RNN:
keras_layers.Bidirectional,
OpTypes.CONV1D:
keras_layers.Conv1D,
OpTypes.DENSE:
keras_layers.Dense,
OpTypes.EMBEDDING:
keras_layers.Embedding,
OpTypes.GRU:
keras_layers.GRU,
OpTypes.INPUT:
nndct_layers.Identity,
OpTypes.LSTM:
nndct_layers.LSTM,
#OpTypes.LSTM: keras_layers.LSTM,
OpTypes.LSTM_CELL:
nndct_layers.LSTMCell,
OpTypes.MAX_POOL1D:
keras_layers.MaxPooling1D,
OpTypes.RNN:
keras_layers.RNN,
OpTypes.SIMPLE_RNN:
keras_layers.SimpleRNN,
OpTypes.STACKED_RNN_CELLS:
keras_layers.StackedRNNCells,
}
_op_to_func = {
OpTypes.ADD: tf.math.add,
OpTypes.CAST: tf.cast,
OpTypes.BIAS_ADD: tf.nn.bias_add,
OpTypes.IDENTITY: tf.identity,
OpTypes.LINEAR: activations.linear,
OpTypes.RELU: activations.relu,
OpTypes.SIGMOID: activations.sigmoid,
OpTypes.TANH: activations.tanh,
OpTypes.MULTIPLY: tf.math.multiply,
OpTypes.STRIDED_SLICE: tf.strided_slice,
OpTypes.GATHER: nndct_ops.gather,
'rfft': fft_ops.rfft,
'complex_abs': tf.abs,
'angle': tf.math.angle,
'cast': tf.cast,
'exp': tf.math.exp,
'irfft': fft_ops.irfft,
'pad': tf.pad,
'transpose': tf.transpose,
'sum': tf.math.reduce_sum,
'reshape': tf.reshape,
OpTypes.CONST: tf.constant,
}
def __init__(self, graph, quantized=False):
self._graph = graph
self._quantized = quantized
# All entities translated from ops.
self._entities = []
# Node name -> index in self._entities
self._name_to_entity = {}
# Op type -> count
self._op_count = {}
#for node in graph.nodes:
# entities = self.translate_op_to_entities(node.op)
# self._entities.extend(entities)
# # Post-order traversing in `self.translate_op_to_entities`,
# # the last element in returned list is the entity converted from node.op.
# self._name_to_entity[node.name] = len(self._entities) - 1
# node_entity = self.get_entity(node.name)
# for i, tensor in enumerate(node.out_tensors):
# # Use node entity's name as prefix
# tensor_entity = Entity('%s_%d' % (node_entity.name, i),
# EntityTypes.Tensor)
# self._add_entity(tensor.name, tensor_entity)
# node_entity.outputs.append(tensor_entity)
#for node in graph.nodes:
# entity = self.get_entity(node.name)
# for i, tensor in enumerate(node.in_tensors):
# entity.inputs.append(self.get_entity(tensor.name))
def _append_entity(self, entity, name=None):
"""
Args:
name: Node or Tensor's name.
entity: An `Entity` object.
"""
self._entities.append(entity)
if name:
self._name_to_entity[name] = len(self._entities) - 1
def _get_tf_object(self, op):
if self._quantized:
obj = quant_utils.get_quant_module(op.type, None)
if obj:
return obj, EntityTypes.Layer
if op.type in self._op_to_layer:
obj = self._op_to_layer[op.type]
entity_type = EntityTypes.Layer
elif op.type in self._op_to_func:
obj = self._op_to_func[op.type]
entity_type = EntityTypes.Function
elif op.type == OpTypes.RNN_LAYER:
obj = op.attr['layer_class']
entity_type = EntityTypes.Layer
elif op.type == OpTypes.GENERIC:
obj = op.attr['orig_layer_class']
entity_type = EntityTypes.Layer
else:
raise NotImplementedError("Unable to rewrite operation '{}'".format(
op.type))
return obj, entity_type
def translate(self):
for node in self._graph.nodes:
entity = self._translate_node(node)
# Append config entity first, then append node's entity so that we can
# keep entities in the topological order.
for key in node.op.configs:
value = node.op.get_config(key)
if isinstance(value, Entity):
self._append_entity(value)
self._append_entity(entity, node.name)
for index, tensor in enumerate(node.out_tensors):
# Use node entity's name as prefix
tensor_entity = Entity('%s_%d' % (entity.name, index),
EntityTypes.Tensor)
self._append_entity(tensor_entity, tensor.name)
entity.outputs.append(tensor_entity)
# We only want to get the sequence type so we just take the first
# node data instead of iterating through all the nodes.
#
# Example: pack multiply's input entities to [[entity0, entity1]]
# {'class_name': 'Multiply', 'config': {'name': 'multiply', 'trainable': True, 'dtype': 'float32'},
# 'name': 'multiply', 'inbound_nodes': [[['lambda', 0, 0, {}], ['activation', 0, 0,{}]]]}
#
# See tensorflow/python/keras/engine/functional.py::process_node
for node in self._graph.nodes:
input_entities = [
self.get_entity(tensor.name) for tensor in node.in_tensors
]
entity = self.get_entity(node.name)
if node.op.type != OpTypes.INPUT and entity.type == EntityTypes.Layer:
if 'inbound_nodes' in node.op.configs:
inbound_nodes_data = node.op.get_config('inbound_nodes')
inbound_nodes_data = keras_tf_utils.convert_inner_node_data(
inbound_nodes_data, wrap=True)
node_data = [inbound_nodes_data[0]]
input_entities = nest.pack_sequence_as(node_data, input_entities)
input_entities = base_layer_utils.unnest_if_single_tensor(
input_entities)
if isinstance(input_entities, Entity):
input_entities = [input_entities]
entity.inputs = input_entities
def _translate_node(self, node):
obj, ent_type = self._get_tf_object(node.op)
if ent_type == EntityTypes.Layer:
self._op_config_to_entity(node.op)
argspec = tf_inspect.getfullargspec(obj.__init__)
arg_to_value = self._arguments_for_keras_layer_init(node.op, argspec)
else:
argspec = tf_inspect.getfullargspec(obj)
arg_to_value = self._arguments_for_tf_operation_calling(node, argspec)
return Entity(self._unique_entity_name(node.op), ent_type, obj,
arg_to_value)
def _op_config_to_entity(self, op):
"""Given an `Operation`, traverse its configs and find all `Operation`
items, then:
1. Convert these ops to `Entity` objects
2. Set these entities back to configs to replace original ops.
Arguments:
op: An `Operation` object.
Returns:
An entity converted from the given operation.
Example:
Given: RNN -> config('cell') -> StackedRNNCells -> config('cells')
-> [LSTMCell0, LSTMCell1]
Returns: Entity(RNN) -> config('cell') -> Entity(StackedRNNCells)
-> config('cells') -> [Entity(LSTMCell0), Entity(LSTMCell1)]
"""
obj, ent_type = self._get_tf_object(op)
assert ent_type == EntityTypes.Layer
argspec = tf_inspect.getfullargspec(obj.__init__)
for name in op.configs:
config_value = op.get_config(name)
config_values = generic_utils.to_list(config_value)
# Convert Operation to Entity.
converted_values = []
for value in config_values:
if isinstance(value, ops.Operation):
entity = self._op_config_to_entity(value)
converted_values.append(entity)
else:
converted_values.append(value)
# Keep as the original type.
if isinstance(config_value, tuple):
config_value = tuple(converted_values)
elif isinstance(config_value, list):
config_value = converted_values
elif len(converted_values) <= 1:
config_value = converted_values[0]
else:
raise RuntimeError('Unexpected sequence type: {}'.format(
type(config_value)))
op.set_config(name, config_value)
arg_to_value = self._arguments_for_keras_layer_init(op, argspec)
return Entity(self._unique_entity_name(op), ent_type, obj, arg_to_value)
def _arguments_for_keras_layer_init(self, op, argspec):
# Keep the order of args same as the original signature.
default_count = 0 if not argspec.defaults else len(argspec.defaults)
required_count = len(argspec.args) - default_count
arg_to_value = OrderedDict()
for arg in argspec.args[:required_count]:
if arg == 'self':
continue
elif arg in op.configs:
arg_to_value[arg] = op.get_config(arg)
else:
raise RuntimeError(
'Missing value for argument "{}" of operation {}'.format(
arg, op.type))
if default_count:
for arg, value in zip(argspec.args[-default_count:], argspec.defaults):
# Skip args that have default values.
if arg not in op.configs or op.get_config(arg) == value:
continue
arg_to_value[arg] = op.get_config(arg)
return arg_to_value
def _arguments_for_tf_operation_calling(self, node, argspec):
# Keep the order of args same as the original signature.
default_count = 0 if not argspec.defaults else len(argspec.defaults)
required_count = len(argspec.args) - default_count
arg_to_value = OrderedDict()
num_inputs = len(node.in_tensors)
input_count = 0
for index, arg in enumerate(argspec.args):
if arg in node.op.configs:
arg_to_value[arg] = node.op.get_config(arg)
elif input_count < num_inputs:
# If an argument not in configs, we treat it as a tensor.
arg_to_value[_INPUT_ARG_PREFIX + arg] = None
input_count += 1
elif index < required_count:
raise ValueError('Missing required argument for {}'.format(
node.op.type))
if input_count < num_inputs:
raise ValueError(
'Unused tensor founded, there may be a mismatch with function signature of {}'
.format(node.op.type))
return arg_to_value
def _unique_entity_name(self, op):
count = self._op_count.get(op.type, 0)
self._op_count[op.type] = count + 1
return '%s%d' % (op.type, count)
def get_entity(self, name):
return self._entities[self._name_to_entity[name]]
@property
def entities(self):
return self._entities
def _get_module(object):
module = tf_inspect.getmodule(object)
module_full_name = module.__name__
if '.' in module_full_name:
pkg, module_name = module.__name__.rsplit('.', 1)
else:
pkg = module_full_name
module_name = module_full_name
return (module, pkg, module_name)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import argparse
import pandas as pd
from zvt import init_log, zvt_env
from zvt.api.quote import get_stock_factor_schema
from zvt.contract import IntervalLevel
from zvt.contract.api import df_to_db
from zvt.contract.recorder import FixedCycleDataRecorder
from zvt.recorders.joinquant.common import to_jq_trading_level, to_jq_entity_id
from zvt.domain import Stock,StockFactorCommon
from zvt.utils.pd_utils import pd_is_not_null
from zvt.utils.time_utils import to_time_str, now_pd_timestamp, TIME_FORMAT_DAY, TIME_FORMAT_ISO8601
try:
from jqdatasdk import auth, logout, get_factor_values
except:
pass
class JqChinaStockFactorRecorder(FixedCycleDataRecorder):
entity_provider = 'joinquant'
entity_schema = Stock
data_schema = StockFactorCommon
# 数据来自jq
provider = 'joinquant'
def __init__(self,
exchanges=['sh', 'sz'],
schema=None,
entity_ids=None,
codes=None,
batch_size=10,
force_update=True,
sleeping_time=0,
default_size=2000,
real_time=False,
fix_duplicate_way='ignore',
start_timestamp=None,
end_timestamp=None,
level=IntervalLevel.LEVEL_1WEEK,
kdata_use_begin_time=False,
close_hour=15,
close_minute=0,
one_day_trading_minutes=4 * 60,
) -> None:
level = IntervalLevel(level)
self.data_schema = get_stock_factor_schema(schema)
self.jq_trading_level = to_jq_trading_level(level)
super().__init__('stock', exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute, level, kdata_use_begin_time, one_day_trading_minutes)
auth(zvt_env['jq_username'], zvt_env['jq_password'])
def on_finish(self):
super().on_finish()
logout()
def record(self, entity, start, end, size, timestamps):
now_date = to_time_str(now_pd_timestamp())
jq_entity_di = to_jq_entity_id(entity)
if size > 1000:
start_end_size = self.evaluate_start_end_size_timestamps(entity)
size = 1000
bdate= pd.bdate_range(start=start_end_size[0], periods=size)
self.start_timestamp = bdate[0]
self.end_timestamp = bdate[-1] if bdate[-1] <= now_pd_timestamp() else now_pd_timestamp()
if not self.end_timestamp:
factor_data = get_factor_values(securities=[jq_entity_di],
factors=self.data_schema.important_cols(),
end_date=now_date,
count=size)
else:
end_timestamp = to_time_str(self.end_timestamp)
if self.start_timestamp:
start_timestamp = to_time_str(self.start_timestamp)
else:
bdate_list = pd.bdate_range(end=end_timestamp, periods=size)
start_timestamp = to_time_str(bdate_list[0])
factor_data = get_factor_values(securities=[to_jq_entity_id(entity)],
factors=self.data_schema.important_cols(),
start_date=start_timestamp,
end_date=end_timestamp)
df_list = [values.rename(columns={jq_entity_di: key}) for key, values in factor_data.items()]
if len(df_list) != 0:
df = pd.concat(df_list,join='inner',sort=True,axis=1).sort_index(ascending=True)
else:
df = pd.DataFrame(columns=self.data_schema.important_cols(),index=pd.bdate_range(start=start_timestamp,end=end_timestamp))
if pd_is_not_null(df):
df_fill = pd.DataFrame(index=pd.bdate_range(start=start_timestamp, end=end_timestamp)) if self.end_timestamp else pd.DataFrame(index=df.index)
if df_fill.shape[0] != df.shape[0]:
df = pd.concat([df_fill,df],axis=1)
df['name'] = entity.name
df['entity_id'] = entity.id
df['timestamp'] = pd.to_datetime(df.index)
df['provider'] = 'joinquant'
df['code'] = entity.code
def generate_factor_id(se):
if self.level >= IntervalLevel.LEVEL_1DAY:
return "{}_{}".format(se['entity_id'], to_time_str(se['timestamp'], fmt=TIME_FORMAT_DAY))
else:
return "{}_{}".format(se['entity_id'], to_time_str(se['timestamp'], fmt=TIME_FORMAT_ISO8601))
df['id'] = df[['entity_id', 'timestamp']].apply(generate_factor_id, axis=1)
df_to_db(df=df, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
return None
__all__ = ['JqChinaStockFactorRecorder']
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--level', help='trading level', default='1d', choices=[item.value for item in IntervalLevel])
parser.add_argument('--codes', help='codes', default=['000001'], nargs='+')
args = parser.parse_args()
level = IntervalLevel(args.level)
codes = args.codes
init_log('jq_china_stock_{}_kdata.log'.format(args.level))
JqChinaStockFactorRecorder(level=level, sleeping_time=0, codes=codes, real_time=False, ).run()
#
# print(get_kdata(entity_id='stock_sz_000001', limit=10, order=StockFactor.timestamp.desc(),
# adjust_type=AdjustType.hfq))
|
nilq/baby-python
|
python
|
import sys, re
def alphaprint(code):
r = "^(abcdefghijklmnopqrstuvwxyz)*(a(b(c(d(e(f(g(h(i(j(k(l(m(n(o(p(q(r(s(t(u(v(w(x(y(z?)?)?)?)?)?)?)?)?)?)?)?)?)?)?)?)?)?)?)?)?)?)?)?)?)?)?$"
if re.match(r, code, re.I):
last=""
for i in range(len(code)):
if code[i].isupper():
print(end=chr(i % 256))
print("\n=>", i % 256)
else:
my_filename = sys.argv[0].replace("\\", "/").split("/")[-1]
err_msg = "Invalid source code"
if not code.isalpha():
err_msg = "Invalid character '"
err_msg += re.sub("[A-Za-z]", "", code)[0]
err_msg += "' in source code"
print(f"{my_filename}: Error: {err_msg}")
if __name__ == "__main__":
my_filename = sys.argv[0].replace("\\", "/").split("/")[-1]
if len(sys.argv) <= 1:
print(f"Usage: {my_filename} [OPTION] FILE")
print(f"Type '{my_filename} --help' for more information")
elif "--help" in sys.argv:
print(f"""Usage: {my_filename} [OPTION] FILE
FILE should not start with a dash.
Options:
--help Print this message
-i, --input Get the code from STDIN instead of reading from a file""")
else:
code = ""
if "-i" in sys.argv or "--input" in sys.argv:
code = input("Enter code: ")
elif any(map(lambda a: a[0] != "-", sys.argv[1:])):
code = open(tuple(filter(lambda a: a[0] != "-", sys.argv[1:]))[0]).read()
else:
print(f"{my_filename}: Error: no FILE provided")
sys.exit(1)
alphaprint(code)
|
nilq/baby-python
|
python
|
# Create your models here.
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.conf import settings
from .common import *
class User(AbstractUser):
#Boolean fields to select the type of account.
is_job_hunter = models.BooleanField(default=False)
is_company = models.BooleanField(default=False)
class Company(models.Model):
company = models.OneToOneField(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
companyname = models.CharField(max_length=100, default='')
address = models.CharField(max_length=100, default='')
logo = models.TextField(default='../assets/scau.jpeg')
scale = models.CharField(max_length=50, default='0-15', choices=COMPANY_SCALE_CHOICE)
financing = models.CharField(max_length=50, default='none', choices=COMPANY_FINANCING_CHOICE)
def __str__(self):
return self.company.email
class Job(models.Model):
job_name = models.CharField(max_length=100)
company_id = models.ForeignKey(Company, on_delete=models.CASCADE)
description = models.TextField()
welfare = models.CharField(max_length=50)
salary = models.CharField(max_length=50, default='0-2000', choices=JOB_SALARY_CHOICE)
experience = models.CharField(max_length=50, default='', choices=JOB_EXPERIENCE_CHOICE)
education = models.CharField(max_length=100)
deliver_date = models.DateField(auto_now_add=True, blank=True)
class JobHunter(models.Model):
job_hunter = models.OneToOneField(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=True, null=True)
jobhuntername = models.CharField(max_length=100, default='')
age = models.IntegerField()
sex = models.CharField(max_length=100)
# 多对多关系
collect_jobs = models.ManyToManyField(Job, through='CollectJob', related_name='collect_set')
deliver_jobs = models.ManyToManyField(Job, through='Deliver', related_name='deliver_set')
def __str__(self):
return self.job_hunter.email
class Resume(models.Model):
job_hunter_id = models.ForeignKey(JobHunter, on_delete=models.CASCADE)
job_hunter_name = models.CharField(max_length=100)
age = models.CharField(max_length=100)
sex = models.CharField(max_length=100)
education = models.CharField(max_length=100)
telephone = models.CharField(max_length=100)
intention = models.CharField(max_length=100)
description = models.TextField()
class CollectJob(models.Model):
"""
多对多关系的表,可添加其他字段,例如收藏日期
"""
job_hunter_id = models.ForeignKey(JobHunter, on_delete=models.CASCADE)
job_id = models.ForeignKey(Job, on_delete=models.CASCADE)
class Deliver(models.Model):
job_hunter_id = models.ForeignKey(JobHunter, on_delete=models.CASCADE)
job_id = models.ForeignKey(Job, on_delete=models.CASCADE, related_name='deliver_job_set')
resume_id = models.ForeignKey(Resume, on_delete=models.CASCADE)
deliver_date = models.DateTimeField(auto_now_add=True, blank=True)
|
nilq/baby-python
|
python
|
#!/bin/python
"""
Title: The DISEMVOWLER
Author: Maxwell Haley
Description: Takes in a line of text from stdin, strips it of all vowles,
then prints the mangled text and it's vowel remains. Done for
/r/DailyProgrammer challenge #149.
You can invoke this script in two ways. First, call the script with no arguments
and enter in a string. Everything until a newline will be processed.
Second, pipe text from another program to this script.
`echo 'This is a sentace' | ./disemvowler.py`
`cat text1.txt | ./disemvowler.py`
"""
import sys
targets = ('a', 'e', 'i', 'o', 'u')
victim = list(sys.stdin.readline())
victim = [char.lower() for char in victim]
remains = []
for pos, limb in enumerate(victim):
if limb in targets:
del victim[pos]
remains.append(limb)
print(str.join('', victim).replace(' ', ''))
print(str.join('', remains))
|
nilq/baby-python
|
python
|
import unittest
from pulsar.apps.test.wsgi import HttpTestClient
from pulsar.utils.httpurl import HttpParser, CHttpParser
class TestPyParser(unittest.TestCase):
__benchmark__ = True
__number__ = 1000
@classmethod
async def setUpClass(cls):
http = HttpTestClient()
response = await http.post('http://bla.com/upload', data=b'g' * 2**20)
message = response.message
ip = len(message) // 2
cls.messages = [message[:ip], message[ip:]]
def startUp(self):
self.server = self.parser()
def parser(self, kind=0):
return HttpParser(kind=kind)
def test_server_parser(self):
for msg in self.messages:
assert self.server.execute(msg, len(msg)) == len(msg)
assert self.server.is_message_complete()
class TestCParser(TestPyParser):
def parser(self, kind=0):
return CHttpParser(kind=kind)
|
nilq/baby-python
|
python
|
from . import ash
from . import grm
from . import rdi
from . import srf
from . import sxn
from . import tnl
from . import ubx
# jaykaron
from . import adb
from . import tnt
|
nilq/baby-python
|
python
|
from quiche.egraph import EGraph
from quiche.analysis import MinimumCostExtractor
from .prop_test_parser import PropParser, PropTree, PropTreeCost
from .test_egraph import verify_egraph_shape # , print_egraph
def make_rules():
rules = [
# x -> y ===> ~x | y
PropTree.make_rule("(-> ?x ?y)", "(| (~ ?x) ?y)"),
# ~x | y ===> x -> y
PropTree.make_rule("(| (~ ?x) ?y)", "(-> ?x ?y)"),
# x ===> ~ (~x)
PropTree.make_rule("?x", "~ (~ ?x)"),
# x | y ===> y | x
PropTree.make_rule("(| ?x ?y)", "(| ?y ?x)"),
]
return rules
def x_implies_y():
test_str = "(-> x y)"
parser = PropParser()
parser.build()
return parser.parse(test_str)
def not_x_or_y():
test_str = "(| (~ x) y)"
parser = PropParser()
parser.build()
return parser.parse(test_str)
def not_y_implies_not_x():
test_str = "(-> (~ y) (~ x))"
parser = PropParser()
parser.build()
return parser.parse(test_str)
def test_implies():
"""Test building egraph of x -> y"""
actual = EGraph(x_implies_y())
expected = {"e0": {"x": [()]}, "e1": {"y": [()]}, "e2": {"->": [("e0", "e1")]}}
assert verify_egraph_shape(actual, expected)
def test_nor():
"""Test building egraph of ~x | y"""
actual = EGraph(not_x_or_y())
expected = {
"e0": {"x": [()]},
"e1": {"~": [("e0",)]},
"e2": {"y": [()]},
"e3": {"|": [("e1", "e2")]},
}
assert verify_egraph_shape(actual, expected)
def test_add_implies_and_nor():
"""Test building egraph of both props: x -> y and ~x | y"""
actual = EGraph(x_implies_y())
actual.add(not_x_or_y())
actual.rebuild()
expected = {
"e0": {"x": [()]},
"e1": {"y": [()]},
"e2": {"->": [("e0", "e1")]},
"e3": {"~": [("e0",)]},
"e4": {"|": [("e3", "e1")]},
}
assert verify_egraph_shape(actual, expected)
def test_and_or_not_implies():
test_str = "(& (-> x y) (-> (~ x) z))"
parser = PropParser()
parser.build()
tree = parser.parse(test_str)
actual = EGraph(tree)
expected = {
"e0": {"x": [()]},
"e1": {"y": [()]},
"e2": {"->": [("e0", "e1")]},
"e3": {"~": [("e0",)]},
"e4": {"z": [()]},
"e5": {"->": [("e3", "e4")]},
"e6": {"&": [("e2", "e5")]},
}
assert verify_egraph_shape(actual, expected)
def test_merge_props():
actual = EGraph(x_implies_y())
impl_root = actual.root
nor_root = actual.add(not_x_or_y())
actual.merge(impl_root, nor_root)
actual.rebuild()
expected = {
"e0": {"x": [()]},
"e1": {"y": [()]},
"e4": {"->": [("e0", "e1")], "|": [("e3", "e1")]},
"e3": {"~": [("e0",)]},
}
assert verify_egraph_shape(actual, expected)
def test_prop_ematch():
"""Test rewriting (a & b) -> c to ~(a & b) | c"""
test_str = "(-> (& a b) c)"
parser = PropParser()
parser.build()
tree = parser.parse(test_str)
actual = EGraph(tree)
# Verify tree shape
expected = {
"e0": {"a": [()]},
"e1": {"b": [()]},
"e2": {"&": [("e0", "e1")]},
"e3": {"c": [()]},
"e4": {"->": [("e2", "e3")]},
}
assert verify_egraph_shape(actual, expected)
# Rule to rewrite x -> y to ~x | y
rule = PropTree.make_rule("(-> ?x ?y)", "(| (~ ?x) ?y)")
matches = actual.ematch(rule.lhs, actual.eclasses())
# expect exactly one match
assert len(matches) == 1
match = matches[0]
# expect match to be (e4, {x: e2, b: e3})
assert len(match) == 2
assert str(match[0]) == "e4"
assert len(match[1]) == 2
assert str(match[1]["?x"]) == "e2"
assert str(match[1]["?y"]) == "e3"
def test_expr_subst():
actual = EGraph(x_implies_y())
impl_root = actual.root
nor_root = actual.add(not_x_or_y())
actual.merge(impl_root, nor_root)
actual.rebuild()
# x -> y <===> ~x | y
rule = PropTree.make_rule("(-> ?x ?y)", "(| (~ ?x) ?y)")
matches = actual.ematch(rule.lhs, actual.eclasses())
lhs, env = matches[0]
rhs = actual.subst(rule.rhs, env)
assert str(rhs) == "e4"
expected = {
"e0": {"x": [()]},
"e1": {"y": [()]},
"e3": {"~": [("e0",)]},
"e4": {"|": [("e3", "e1")], "->": [("e0", "e1")]},
}
verify_egraph_shape(actual, expected)
def test_apply_rules_for_contrapositive():
actual = EGraph(x_implies_y())
rules = make_rules()
versions = [3, 14, 16, 24]
for version in versions:
assert actual.version == version
actual.apply_rules(rules)
assert actual.version == versions[-1]
expected = {
"e3": {"~": [("e5",)]},
"e5": {"x": [()], "~": [("e3",)]},
"e6": {"~": [("e7",)]},
"e7": {"y": [()], "~": [("e6",)]},
"e14": {"~": [("e13",)]},
"e13": {
"~": [("e14",)],
"->": [("e5", "e7"), ("e6", "e3")],
"|": [("e3", "e7"), ("e7", "e3")],
},
}
assert verify_egraph_shape(actual, expected)
def test_extract_contrapositive():
# Verify rule application
actual = EGraph(not_y_implies_not_x())
root = actual.root
rules = make_rules()
versions = [5, 18, 20, 28]
best_terms = ["(-> (~ y) (~ x))", "(| y (~ x))", "(| y (~ x))", "(-> x y)"]
for version, term in zip(versions, best_terms):
assert actual.version == version
# Verify extracted term is correct
cost_model = PropTreeCost()
cost_analysis = MinimumCostExtractor()
extracted = cost_analysis.extract(cost_model, actual, root)
assert str(extracted) == term
actual.apply_rules(rules)
assert actual.version == versions[-1]
assert str(cost_analysis.extract(cost_model, actual, root)) == best_terms[-1]
expected = {
"e5": {"y": [()], "~": [("e7",)]},
"e7": {"~": [("e5",)]},
"e8": {"x": [()], "~": [("e9",)]},
"e9": {"~": [("e8",)]},
"e16": {"~": [("e15",)]},
"e15": {
"~": [("e16",)],
"|": [("e5", "e9"), ("e9", "e5")],
"->": [("e7", "e9"), ("e8", "e5")],
},
}
assert verify_egraph_shape(actual, expected)
|
nilq/baby-python
|
python
|
import math
import time
import random
# Print the Main Menu
def menu():
print("---- Public Key Cryptography, RSA ----\n")
print("1) Extended Euclidean Algorithm.")
print("2) Fast Modular Exponentiation.")
print("3) Miller-Rabin Test (True=Composite).")
print("4) Prime Number Generator.")
print("5) RSA keys Generator (Public and Private Key).")
print("6) RSA Encryption.")
print("7) RSA Decryption.")
print("8) Test RSA Decryption With and Without CRT.")
print("9) Quit.\n")
try:
choice = int(input("Select a function to run: "))
if 1 <= choice <= 9:
return choice
else:
print("\nYou must enter a number from 1 to 9\n")
except ValueError:
print("\nYou must enter a number from 1 to 9\n")
input("Press Enter to continue.\n")
# Get input from the user
def get_input(message):
choice = int(input(message))
return choice
# Compute the extended euclidean algorithm, returns the GCD and the Bezout Coefficients
def ext_euclid(a, b):
a, b = abs(a), abs(b)
x0, x1, y0, y1 = 1, 0, 0, 1
while b != 0:
q, a, b = a // b, b, a % b
x0, x1 = x1, x0 - q * x1
y0, y1 = y1, y0 - q * y1
return a, x0, y0
# Compute the Fast modular exponentiation algorithm
def fast_mod_exp(a, exp, n):
result = 1
while exp:
if exp & 1:
result = result * a % n
exp >>= 1
a = a * a % n
return result
# Compute the Miller-Rabin for a number given the number of rounds, return True if the number is composite
def miller_rabin_test(n, rounds=40):
if n == 2 or n == 3:
return False
if n < 2 or n % 2 == 0:
return True
r, s = 0, n - 1
while s % 2 == 0:
r += 1
s //= 2
for _ in range(rounds):
a = random.randrange(2, n - 1)
x = fast_mod_exp(a, s, n)
if x == 1 or x == n - 1:
continue
for _ in range(r - 1):
x = fast_mod_exp(x, 2, n)
if x == n - 1:
break
else:
return True
return False
# Generate a random k-bit prime number
def generate_prime(k, rounds=40):
if k < 2:
return None
while True:
number = random.randrange(pow(2, k - 1) + 1, pow(2, k), 2)
if not miller_rabin_test(number, rounds):
return number
# Generates a k-bit RSA public and private key pair
def rsa_keys(k, crt=False, rounds=40):
if k < 5:
return None
k1 = int(math.ceil(k / 2))
k2 = int(math.floor(k / 2))
p = q = 0
while p == q:
p = generate_prime(k1, rounds)
q = generate_prime(k2, rounds)
if crt and q > p:
p, q = q, p
n = p * q
phi = (p - 1) * (q - 1)
while True:
e = random.randrange(2, phi // 2)
g, d, _ = ext_euclid(e, phi)
if g == 1:
public_key = (e, n)
d = d % phi
if d < 0:
d += phi
if crt:
dp, dq, qinv = crt_pre_computation(p, q, d)
private_key = (p, q, dp, dq, qinv)
else:
private_key = (d, n)
return public_key, private_key
# Pre-computation for RSA with CRT
def crt_pre_computation(p, q, d):
_, qinv, _ = ext_euclid(q, p)
return d % (p - 1), d % (q - 1), qinv % p
# RSA encryption
def rsa_encryption(m, public_key):
return fast_mod_exp(m, public_key[0], public_key[1])
# RSA decryption
def rsa_decryption(c, private_key, crt=False):
if not crt:
return rsa_encryption(c, private_key)
else:
m1 = fast_mod_exp(c, private_key[2], private_key[0])
m2 = fast_mod_exp(c, private_key[3], private_key[1])
h = (private_key[4] * (m1 - m2)) % private_key[0]
return m2 + h * private_key[1]
# Tests Extended Euclidean Algorithm.
def test_ext_euclid():
a = get_input("\nInsert the first integer: ")
b = get_input("Insert the second integer: ")
gcd, x, y, = ext_euclid(a, b)
print("\nGreatest Common Divisor (GCD):", gcd)
print("Bezout Coefficients (x, y):", "(", x, ",", y, ")\n")
# Tests Fast Modular Exponentiation
def test_fast_mod_exp():
a = get_input("\nInsert the base: ")
exp = get_input("Insert the exponent: ")
n = get_input("Insert the modulo: ")
print("\nModular Exponentiation:", fast_mod_exp(a, exp, n), "\n")
# Tests Miller-Rabin Test (True=Composite)
def test_miller_rabin():
n = get_input("\nInsert an integer: ")
rounds = get_input("Insert the number of rounds to execute (default=40): ")
print("\nTest Miller-Rabin:", miller_rabin_test(n, rounds), "\n")
# Tests Prime Number Generator
def test_generate_prime():
k = get_input("\nInsert number of bits (k>1): ")
rounds = get_input("Insert the number of rounds for Miller-Rabin Test (default=40): ")
print("\nGenerated Prime Number:", generate_prime(k, rounds), "\n")
# Tests RSA keys Generator (Public and Private Key)
def test_rsa_keys():
k = get_input("\nInsert the number of bits for the module n: ")
crt = input("Do you want to use CRT optimization? (Y or N): ")
if crt.upper() == 'Y':
crt = True
else:
crt = False
rounds = get_input("Insert the number of rounds for Miller-Rabin Test(default=40): ")
public_key, private_key = rsa_keys(k, crt, rounds)
print("\nPublic Key (e, n):", public_key)
if crt:
print("Private Key (p, q, dp, dq, qinv):", private_key, "\n")
else:
print("Private Key (d, n):", private_key, "\n")
# Tests RSA Encryption
def test_rsa_encryption():
m = get_input("\nInsert the message m: ")
e = get_input("Insert the exponent e of the public key: ")
n = get_input("Insert the modulo n of the public key: ")
print("\nGenerated ciphertext c:", rsa_encryption(m, (e, n)), "\n")
# Tests RSA Decryption
def test_rsa_decryption():
c = get_input("\nInsert the ciphertext c: ")
d = get_input("Insert the exponent d of the private key: ")
n = get_input("Insert the modulo n of the private key: ")
print("\nOriginal message m:", rsa_decryption(c, (d, n)), "\n")
# Test RSA Decryption With and Without CRT (Total Execution Time on 100 random ciphertext)
def test_rsa_crt():
p = get_input("\nInsert the first prime number p: ")
q = get_input("Insert the second prime number q: ")
d = get_input("Insert the exponent d of the private key: ")
if q > p:
p, q = q, p
n = p * q
private_key = (d, n)
dp, dq, qinv = crt_pre_computation(p, q, d)
crt_private_key = (p, q, dp, dq, qinv)
print("\nRSA without CRT - Private Key (d, n):", private_key)
print("RSA with CRT - Private Key (p, q, dp, dq, qinv):", crt_private_key, "\n")
k = get_input("Insert the size of ciphertext to be randomly generated (number of bits): ")
iteration = get_input("Insert the number of ciphertext to be tested: ")
input("\nPress Enter to begin the test.\n")
# Begin the test
decryption_exec_time = 0
crt_decryption_exec_time = 0
print("Test is Started.")
for i in range(iteration):
ciphertext = random.getrandbits(k)
# RSA Decryption without CRT
start = time.perf_counter()
rsa_decryption(ciphertext, private_key)
end = time.perf_counter()
decryption_exec_time += end - start
# RSA Decryption with CRT
start = time.perf_counter()
rsa_decryption(ciphertext, crt_private_key, True)
end = time.perf_counter()
crt_decryption_exec_time += end - start
print("\rCurrently Tested Ciphertexts:", i+1, end="", flush=True)
print("\nTest is completed.\n")
print("- RSA Decryption without CRT -")
print("Total Execution Time on 100 Random Ciphertext:", decryption_exec_time, "seconds")
print("\n- RSA Decryption with CRT -")
print("Total Execution Time on 100 Random Ciphertext:", crt_decryption_exec_time, "seconds\n")
def main():
while True:
# Ask the user what function wants to run
choice = menu()
# Execute the function requested by the user
try:
if choice == 1:
test_ext_euclid()
elif choice == 2:
test_fast_mod_exp()
elif choice == 3:
test_miller_rabin()
elif choice == 4:
test_generate_prime()
elif choice == 5:
test_rsa_keys()
elif choice == 6:
test_rsa_encryption()
elif choice == 7:
test_rsa_decryption()
elif choice == 8:
test_rsa_crt()
elif choice == 9:
exit(0)
except ValueError:
print("\nYou must enter an integer\n")
input("Press Enter to continue.\n")
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from source_hunter import hunter
from source_hunter.hunter import hunt
__all__ = [
'hunter', 'hunt'
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import absolute_import
from collections import namedtuple
from difflib import get_close_matches
from warnings import warn
from pytest import mark
from tests.utils import PY34, PY35, PY33, PY26, PY27, PYPY, PY2, PY3
try:
from collections import OrderedDict
except ImportError:
from future.moves.collections import OrderedDict
try:
from collections import defaultdict
except ImportError:
from future.moves.collections import defaultdict
from future.utils import with_metaclass, iteritems, iterkeys
def get_error_message(message, needle=None, haystack=None, ignore=None): # noqa
if needle is None and haystack is None:
return message
if not isinstance(ignore, (type(None), list, tuple, set)):
raise TypeError('ignore needs to be a list, you are doing it wrong')
if needle is None:
missing_attr_message = ''
else:
missing_attr_message = ' !! Missing attribute: %r' % needle
if haystack is None:
haystack = []
if ignore is None:
choices = haystack
else:
choices = [item for item in haystack if item not in ignore]
close_match = get_close_matches(needle, choices, 1)
if close_match:
misspelled_message = '\nDid you perhaps mistype %r ?\n' % close_match[0]
else:
misspelled_message = ''
return '\n'.join(['', message, missing_attr_message, misspelled_message])
def head_and_tail(x, *xs):
if not xs:
raise ValueError('{0} is missing metadata tags'.format(x))
return x, xs
def separate_version_tags(meta):
_meta = []
_versions = []
for tag in meta:
if 'PY' in tag:
_versions.append(tag)
else:
_meta.append(tag)
return tuple(_versions), tuple(_meta)
def separate_action_tags(meta):
# noinspection PyPep8Naming
ACTION_INDICATOR = 'test_'
_meta = []
_actions = []
for tag in meta:
if tag.startswith(ACTION_INDICATOR):
actions = tag[len(ACTION_INDICATOR):].split('_')
_actions.extend(actions)
else:
_meta.append(tag)
return tuple(_actions), tuple(_meta)
map_version = { # :off
'PY26': PY26,
'PY27': PY27,
'PY33': PY33,
'PY34': PY34,
'PY35': PY35,
'PYPY': PYPY,
'PY2': PY2,
'PY3': PY3,
'not_PY26': not PY26,
'not_PY27': not PY27,
'not_PY33': not PY33,
'not_PY34': not PY34,
'not_PY35': not PY35,
'not_PYPY': not PYPY,
'not_PY2': not PY2,
'not_PY3': not PY3,
} # :on
def is_correct_python_version(version_tags):
# Guard, no specific version tag specified
if not version_tags:
return True
return any(map(lambda tag: map_version[tag], version_tags))
def prepare_py2(cls_dict):
# Doesn't keep test order, but prioritizes '__data' lines for easy error handling.
_cls_dict = OrderedDict()
for key, value in iteritems(cls_dict):
if str(key).endswith('__data'):
_cls_dict[key] = value
_cls_dict.update(cls_dict)
return _cls_dict
# noinspection PyMethodParameters
class MultiTestMeta(type):
# noinspection PyMethodOverriding
@classmethod
def __prepare__(cls, name, bases):
return OrderedDict()
def __new__(cls, cls_name, bases, cls_dict): # noqa
if PY26 or PY27:
cls_dict = prepare_py2(cls_dict)
d = defaultdict(dict)
d.update(dict(cls_dict))
_data = namedtuple(cls_name, ['data', 'expected'])
order = defaultdict(list)
test_data_q = {}
for key in cls_dict:
# Guard, skip internal object attributes
if not key.startswith('it_'):
continue
# clean
del d[key]
test_name, meta = head_and_tail(*key.split('__'))
# Guard, store test data, and move on.
if 'data' in meta:
test_data_q[test_name] = cls_dict[key]
continue
# Guard, expectations defined before data (simplicity)
test_data = test_data_q.get(test_name, None)
if test_data is None:
detailed_error_message = get_error_message( # :off
'{key!r} is improperly defined.'.format(**vars()),
needle='{test_name}__data'.format(**vars()),
haystack=list(iterkeys(cls_dict))
) # :on
warn( # :off
detailed_error_message,
category=TestActionWithNoDataWarning
) # :on
continue
# Guard, skip specs for other versions of python
version_meta_tags, meta = separate_version_tags(meta)
if not is_correct_python_version(version_meta_tags):
continue
# Guard, missing meta data
action_meta_tags, meta = separate_action_tags(meta)
if not action_meta_tags:
detailed_error_message = get_error_message( # :off
'{key!r} is improperly defined. Missing action_tags'.format(**vars()),
needle='{test_name}__test_*'.format(**vars()),
haystack=list(iterkeys(cls_dict))
) # :on
warn( # :off
detailed_error_message,
category=TestDataWithNoActionTagWarning
) # :on
continue
for action in action_meta_tags:
is_duplicate = any([ # :off
test_name in order[action],
'xfail_%s' % test_name in order[action],
'skip_%s' % test_name in order[action],
]) # :on
msg = '\n !! Duplicate entry: {test_name}__test_{action} already loaded.'.format(**vars())
if is_duplicate:
warn(msg, category=TestWithDuplicateActionWarning)
xfail = 'xfail' in meta
skip = 'skip' in meta
if xfail:
test_name = 'xfail_%s' % test_name
elif skip:
test_name = 'skip_%s' % test_name
# add test to class
for action in action_meta_tags:
d[action][test_name] = _data(test_data, cls_dict[key])
order[action].append(test_name)
# check attributes are properly paired.
for test_name in iterkeys(test_data_q):
for action_tag, action in iteritems(order):
if test_name in action:
continue
if 'xfail_%s' % test_name in action:
continue
if 'skip_%s' % test_name in action:
continue
detailed_error_message = get_error_message( # :off
'{test_name!r} is improperly defined. Missing expected_results'.format(**vars()),
needle='{test_name}__test_{action_tag}'.format(**vars()),
haystack=list(iterkeys(cls_dict))
) # :on
warn( # :off
detailed_error_message,
category=TestDataWithNoActionTagWarning
) # :on
if PY26:
for key in iterkeys(order):
order[key].sort()
if order:
d['__ordered__'] = order
return type.__new__(cls, cls_name, bases, d)
class TestActionWithNoDataWarning(UserWarning):
pass
class TestDataWithNoActionTagWarning(UserWarning):
pass
class TestWithDuplicateActionWarning(UserWarning):
pass
class MultiTestCaseBase(with_metaclass(MultiTestMeta)):
@classmethod
def keys(cls, category):
keys = []
for key in cls.__ordered__[category]:
xfail = key.startswith('xfail')
skip = key.startswith('skip')
if xfail:
keys.append(mark.xfail(condition=True, reason='Marked as xfail.')(key))
elif skip:
keys.append(mark.skipif(condition=True, reason='Marked as skip.')(key))
else:
keys.append(key)
return keys
@classmethod
def get(cls, category, item):
return cls.__dict__[category][item]
|
nilq/baby-python
|
python
|
##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import _
from odoo.http import request, route
from odoo.addons.portal.controllers.portal import CustomerPortal
class L10nArCustomerPortal(CustomerPortal):
OPTIONAL_BILLING_FIELDS = CustomerPortal.OPTIONAL_BILLING_FIELDS + [
"commercial_partner_id", "l10n_latam_identification_type_id", "vat",
"l10n_ar_afip_responsibility_type_id",
]
def details_form_validate(self, data):
""" When adding either document_type or document_number, this two should be setted """
error, error_message = super().details_form_validate(data)
vat = data.get('vat')
identification_type = data.get('l10n_latam_identification_type_id')
if identification_type and not vat:
error['vat'] = 'error'
error_message.append(_('Please add the document number.'))
if vat and not identification_type:
error['l10n_latam_identification_type_id'] = 'error'
error_message.append(_('Please add the type of document.'))
write_error, write_message = request.env['res.partner'].try_write_commercial(data)
if write_error:
error.update(write_error)
error_message.extend(write_message)
return error, error_message
@route()
def account(self, redirect=None, **post):
if post:
error, _error_message = self.details_form_validate(post)
if not error:
post.pop('commercial_partner_id', False)
post.pop('vat', False)
post.pop('l10n_latam_identification_type_id', False)
post.pop('l10n_ar_afip_responsibility_type_id', False)
response = super().account(redirect=redirect, **post)
identification_types = request.env['l10n_latam.identification.type'].sudo().search([])
afip_responsibilities = request.env['l10n_ar.afip.responsibility.type'].sudo().search([])
uid = request.session.uid
partner = request.env['res.users'].browse(uid).partner_id if uid else request.env['res.partner']
partner = partner.with_context(show_address=1).sudo()
response.qcontext.update({
'identification_types': identification_types,
'afip_responsibilities': afip_responsibilities,
'partner': partner})
return response
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# Author: Nuha Nishat
# Date: 1/30/20
import rospy
import sys, os
import math
import geometry_msgs.msg
from geometry_msgs.msg import PoseStamped
from sensor_msgs.msg import JointState
import tf, math
import tf.transformations
import pdb
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
from moveit_msgs.msg import RobotState, PlanningScene, PlanningSceneComponents, AllowedCollisionEntry, AllowedCollisionMatrix
from moveit_msgs.srv import GetPlanningScene, ApplyPlanningScene
import time
# move_group_python_interface_tutorial was used as reference
class MoveRobot():
def __init__(self):
# Initialize moveit commander and ros node for moveit
moveit_commander.roscpp_initialize(sys.argv)
# Initializing node
#rospy.init_node("move_kinova", anonymous=True)
# Define robot using RobotCommander. Provided robot info such as
# kinematic model and current joint state
self.robot = moveit_commander.RobotCommander()
# Setting the world
self.scene = moveit_commander.PlanningSceneInterface()
# Define the planning group for the arm you are using
# You can easily look it up on rviz under the MotionPlanning tab
self.move_group = moveit_commander.MoveGroupCommander("arm")
self.move_gripper = moveit_commander.MoveGroupCommander("gripper")
# Set the precision of the robot
rospy.set_param('/move_group/trajectory_execution/allowed_start_tolerance', 0.0)
rospy.wait_for_service("/apply_planning_scene", 10.0)
rospy.wait_for_service("/get_planning_scene", 10.0)
self.apply_scene = rospy.ServiceProxy('/apply_planning_scene', ApplyPlanningScene)
self.get_scene = rospy.ServiceProxy('/get_planning_scene', GetPlanningScene)
rospy.sleep(2)
# To see the trajectory
self.disp = moveit_msgs.msg.DisplayTrajectory()
self.disp.trajectory_start = self.robot.get_current_state()
self.rate = rospy.Rate(10)
self.move_group.allow_replanning(1)
#self.main()
def set_planner_type(self, planner_name):
if planner_name == "RRT":
self.move_group.set_planner_id("RRTConnectkConfigDefault")
if planner_name == "RRT*":
self.move_group.set_planner_id("RRTstarkConfigDefault")
if planner_name == "PRM*":
self.move_group.set_planner_id("PRMstarkConfigDefault")
def go_to_joint_state(self, joint_state):
joint_goal = JointState()
joint_goal.position = joint_state
self.move_group.set_joint_value_target(joint_goal.position)
self.plan = self.move_group.plan()
self.move_group.go(wait=True)
self.move_group.execute(self.plan, wait=True)
self.move_group.stop()
self.move_group.clear_pose_targets()
rospy.sleep(2)
def go_to_goal(self, ee_pose):
pose_goal = geometry_msgs.msg.Pose()
pose_goal.position.x = ee_pose[0]
pose_goal.position.y = ee_pose[1]
pose_goal.position.z = ee_pose[2]
if len(ee_pose) == 6:
quat = tf.transformations.quaternion_from_euler(math.radians(ee_pose[3]), math.radians(ee_pose[4]), math.radians(ee_pose[5]))
pose_goal.orientation.x = quat[0]
pose_goal.orientation.y = quat[1]
pose_goal.orientation.z = quat[2]
pose_goal.orientation.w = quat[3]
else:
pose_goal.orientation.x = ee_pose[3]
pose_goal.orientation.y = ee_pose[4]
pose_goal.orientation.z = ee_pose[5]
pose_goal.orientation.w = ee_pose[6]
self.move_group.set_pose_target(pose_goal)
self.move_group.set_planning_time(20)
rospy.sleep(2)
self.move_group.go(wait=True)
self.move_group.stop()
self.move_group.clear_pose_targets()
rospy.sleep(2)
def move_gripper(self, cmd):
if cmd == "Close":
self.move_gripper.set_named_target("Close")
elif cmd == "Open":
self.move_gripper.set_named_target("Open")
else:
self.move_gripper.set_joint_value_target(cmd)
self.move_gripper.go(wait=True)
rospy.sleep(2)
def display_trajectory(self):
self.disp_pub = rospy.Publisher("/move_group/display_planned_path", moveit_msgs.msg.DisplayTrajectory, queue_size=20)
self.disp.trajectory.append(self.plan)
print(self.disp.trajectory)
self.disp_pub.publish(self.disp)
#def main(self):
# Set up path here
# Pick planner
# self.set_planner_type("RRT")
# Draw a straight line in 90 deg
# rospy.loginfo('Going to first point')
# self.go_to_goal([-0.1, -0.63, 0.2, 0, 180, 0])
# rospy.loginfo('Moving down')
# self.go_to_goal([-0.1, -0.63, 0.097, 0, 180, 0])
# rospy.loginfo("Going to second point")
# self.go_to_goal([0.1, -0.63, 0.097, 0, 180, 0])
# rospy.loginfo('Moving Up')
# self.go_to_goal([0.1, -0.63, 0.2, 0, 180, 0])
# rospy.spin()
if __name__ == '__main__':
robot = MoveRobot()
# Pick planner
robot.set_planner_type("RRT")
# Draw a straight line in 90 deg
rospy.loginfo('Going to first point')
robot.go_to_goal([-0.1, -0.63, 0.2, 0, 180, 0])
rospy.loginfo('Moving down')
robot.go_to_goal([-0.1, -0.63, 0.097, 0, 180, 0])
rospy.loginfo("Going to second point")
robot.go_to_goal([0.1, -0.63, 0.097, 0, 180, 0])
rospy.loginfo('Moving Up')
robot.go_to_goal([0.1, -0.63, 0.2, 0, 180, 0])
rospy.spin()
|
nilq/baby-python
|
python
|
"""Activation Functions Module.
"""
import numpy as np
class Activation:
"""Base class for the activation function.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def f(self, x):
"""Compute the activation function on x.
Warning: Overrides this method in order to
implement the activation function.
Parameters
----------
x : array-like, shape = [n_samples, out_layer_dim]
The output of a layer, usually correspond to:
x = np.dot(A*W), where A is the input matrix to a layer
and W is the weight matrix.
"""
pass
def derivative(self, x):
"""Compute the derivative of an activation function on x.
Warning: Overrides this method in order to
implement the derivative of an activation function.
Parameters
----------
x : array-like
It will performe the derivative on x
"""
pass
class Sigmoid(Activation):
"""This class provide the logistic sigmoid function and its derivative.
Attributes
----------
a : float
A value usede to dilate and shrink the sigmoid::
1 / (1 + exp(-a*x)).
"""
def __init__(self, a=1):
self.a = a
def f(self, x):
"""Return the value of activation function on x::
f(x) = 1 / (1 + exp(-a*x)).
Parameters
----------
x : array-like, shape = [n_samples, out_layer_dim]
The output of a layer, usually correspond to:
x = np.dot(A*W), where A is the input matrix to a layer
and W is the weight matrix.
Returns
-------
The value of activation function on x.
"""
return 1 / ( 1 + np.exp(-self.a*x))
def derivative(self, x):
"""Return the derivative of the activation function on x::
f'(x) = a*f(x)*(1-f(x)).
Parameters
----------
x : array-like
It will performe the derivative on x
Returns
-------
return the derivative of the activation function on x.
"""
x = self.f(x)
return self.a*x * (1 - x)
class Identity(Activation):
"""This class provide the identity function and its derivative.
"""
def f(self, x):
"""Return the value of activation function on x::
f(x)=x.
Parameters
----------
x : array-like, shape = [n_samples, out_layer_dim]
The output of a layer, usually correspond to:
x = np.dot(A*W), where A is the input matrix to a layer
and W is the weight matrix.
Returns
-------
The value of activation function on x.
"""
return x
def derivative(self, x):
"""Return the derivative of the activation function on x::
f'(x) = 1.
Parameters
----------
x : array-like
It will performe the derivative on x
Returns
-------
return the derivative of the activation function on x.
"""
return np.ones(x.shape)
class Tanh(Activation):
"""This class provide the hyperbolic tan function and its derivative.
Attributes
----------
a : float,
a value usede to dilate and shrink the tanh::
tanh(a*x/2).
"""
def __init__(self, a=2):
"""Compute the activation function on x.
Parameters
----------
x : array-like, shape = [n_samples, out_layer_dim]
The output of a layer, usually correspond to:
x = np.dot(A*W), where A is the input matrix to a layer
and W is the weight matrix.
Returns
-------
The value of activation function on x.
"""
self.a = a
def f(self, x):
"""Return the value of activation function on x::
f(x) = tanh(a*x/2).
Parameters
----------
x : array-like, shape = [n_samples, out_layer_dim]
The output of a layer, usually correspond to:
x = np.dot(A*W), where A is the input matrix to a layer
and W is the weight matrix.
Returns
-------
The value of activation function on x.
"""
return np.tanh(self.a*x/2)
def derivative(self, x):
"""Return the derivative of the activation function on x::
f'(x) = 1 - tanh(a*x/2)^2
Parameters
----------
x : array-like
It will performe the derivative on x
Returns
-------
return the derivative of the activation function on x.
"""
return 1 - np.tanh((self.a*x)/2)**2
class Relu(Activation):
"""This class provide the rectified linear unit function and its derivative.
"""
def f(self, x):
"""Return the value of activation function on x::
f(x) = max(0,x)
Parameters
----------
x : array-like, shape = [n_samples, out_layer_dim]
The output of a layer, usually correspond to:
x = np.dot(A*W), where A is the input matrix to a layer
and W is the weight matrix.
Returns
-------
The value of activation function on x.
"""
return np.maximum(0,x)
def derivative(self, x):
"""Return the derivative of the activation function on x::
if x > 0 return 1 else 0
Parameters
----------
x : array-like
It will performe the derivative on x
Returns
-------
return the derivative of the activation function on x.
"""
return (x > 0).astype(int)
# da sistemare la softmax
class Softmax(Activation):
"""Softmax activation function.
Warning: this class has not been fully implemented.
"""
def f(self, x):
return np.exp(x)/np.sum(np.exp(x), axis=1)
def derivative(self, x):
return np.diagflat(self.f(x)) - np.dot(self.f(x), self.f(x).T)
|
nilq/baby-python
|
python
|
#
# This file is part of Orchid and related technologies.
#
# Copyright (c) 2017-2020 Reveal Energy Services. All Rights Reserved.
#
# LEGAL NOTICE:
# Orchid contains trade secrets and otherwise confidential information
# owned by Reveal Energy Services. Access to and use of this information is
# strictly limited and controlled by the Company. This file may not be copied,
# distributed, or otherwise disclosed outside of the Company's facilities
# except under appropriate precautions to maintain the confidentiality hereof,
# and may not be used in any way not expressly authorized by the Company.
#
import datetime
import random
import string
import toolz
def rand_digit() -> int:
"""
Generates a single, randomly distributed digit [0-9].
Returns:
The generated digit.
"""
return random.randrange(10)
def rand_2() -> int:
"""
Generates a single, randomly generated 2-digit number.
Returns:
The generated 2-digit number.
"""
return random.randrange(100)
def rand_3() -> int:
"""
Generates a single, randomly generated 3-digit number.
Returns:
The generated 3-digit number.
"""
return random.randrange(1000)
def rand_4() -> int:
"""
Generates a single, randomly generated 4-digit number.
Returns:
The generated 4-digit number.
"""
return random.randrange(10000)
def rand_5() -> int:
"""
Generates a single, randomly generated 5-digit number.
Returns:
The generated 5-digit number.
"""
return random.randrange(100000)
def rand_6() -> int:
"""
Generates a single, randomly generated 6-digit number.
Returns:
The generated 6-digit number.
"""
return random.randrange(1000000)
def rand_7() -> int:
"""
Generates a single, randomly generated 7-digit number.
Returns:
The generated 7-digit number.
"""
return random.randrange(10000000)
def rand_8() -> int:
"""
Generates a single, randomly generated 8-digit number.
Returns:
The generated 8-digit number.
"""
return random.randrange(100000000)
def rand_9() -> int:
"""
Generates a single, randomly generated 9-digit number.
Returns:
The generated 9-digit number.
"""
return random.randrange(1000000000)
def rand_alpha() -> str:
"""
Generates a single, randomly generated (ASCII) letter.
Returns:
The generated letter.
"""
return random.choice(string.ascii_letters)
def rand_alphas(count: int = -1) -> str:
"""
Generate a possibly infinite sequence of randomly generated (ASCII) letters.
Args:
count: The number of characters in the sequence (-1 indicates infinite).
Returns:
The random sequence of single character strings of the specified length.
"""
def infinite_alphas() -> str:
while True:
yield random.choice(string.ascii_letters)
if count > 0:
return toolz.take(count, infinite_alphas())
else:
return infinite_alphas()
def rand_time_stamp(begin_year: int, end_year: int) -> datetime.datetime:
"""
Generate a random time stamp for years `begin_year` to (and including) `end_year`.
Args:
begin_year: The minimum year of the generated time stamps.
end_year: The maximum year of the generated time stamps.
Returns:
The generated time stamp.
"""
while True:
try:
return datetime.datetime(random.randrange(begin_year, end_year + 1), random.randrange(1, 12 + 1),
random.randrange(1, 31 + 1), random.randrange(24), random.randrange(60),
random.randrange(60))
except ValueError:
# Try again. Will eventually generate a valid value.
pass
def draw_normal(mu: float = 0, sigma: float = 1.0) -> float:
"""
Generate a single random value normally distributed with mean, `mu`, and standard deviation, `sigma`.
Args:
mu: The mean of the normal distribution (default = 0.0).
sigma: The standard deviation of the normal distribution (default = 1.0).
Returns:
The value "drawn" from the specified distribution.
"""
return random.gauss(mu, sigma)
def sample_normal(mu: float = 0, sigma: float = 1.0, count: int = 3) -> float:
"""
Generate `count` random values normally distributed with mean, `mu`, and standard deviation, `sigma`.
Args:
mu: The mean of the normal distribution (default = 0.0).
sigma: The standard deviation of the normal distribution (default = 1.0).
count: The number of samples to return (default=3).
Returns:
The sequence of `count` values "drawn" from the specified distribution.
"""
def distribution_func():
return draw_normal(mu, sigma)
return toolz.map(lambda _: distribution_func(), range(count))
|
nilq/baby-python
|
python
|
from django import forms
class AgendaForm(forms.Form):
date=forms.DateField()
lc=forms.CharField(max_length=50)
|
nilq/baby-python
|
python
|
info = input()
animal_food = {}
areas = {}
while True:
if info == 'Last Info':
break
tokens = info.split(':')
command = tokens[0]
name = tokens[1]
number = int(tokens[2])
area = tokens[3]
if command == 'Add':
if name in animal_food:
animal_food[name] += number
else:
animal_food.update({name: number})
if area in areas:
areas[area] += 1
if area not in areas:
areas.update({area: 1})
elif command == 'Feed':
if name in animal_food:
animal_food[name] -= number
if animal_food[name] <= 0:
animal_food.pop(name)
print(f'{name} was successfully fed')
areas[area] -= 1
if areas[area] <= 0:
del areas[area]
info = input()
print('Animals:')
a = sorted(animal_food.items(), key=lambda x: (-x[1], x[0]))
for (key, value) in a:
print(f'{key} -> {value}g')
print('Areas with hungry animals:')
for key in sorted(areas, reverse=True):
print("{} : {}".format(key, areas[key]))
# print("\n".join("{} : {}".format(k, v) for k, v in areas.items()))
|
nilq/baby-python
|
python
|
from typing import Any, Callable, Dict, Type
from marshmallow import Schema, fields, post_load
from typing_extensions import Protocol
from .dataclasses import Picture, Product, ProductPage, Size, StockItem
class Dataclass(Protocol):
def __init__(self, **kwargs):
...
def get_dataclass_maker(cls: Type[Dataclass]) -> Callable[..., Dataclass]:
def make_dataclass(self, data: Dict[str, Any], **kwargs: Any) -> Dataclass:
return cls(**data)
return make_dataclass
class PictureSchema(Schema):
id = fields.Int(required=True)
pic = fields.Url(required=True)
thumbnail = fields.Url(required=True)
make_picture = post_load(get_dataclass_maker(Picture))
class SizeSchema(Schema):
id = fields.Int(required=True)
size = fields.Int(required=True)
make_size = post_load(get_dataclass_maker(Size))
class StockItemSchema(Schema):
id = fields.Int(required=True)
size = fields.Nested(SizeSchema, required=True)
stock = fields.Int(required=True)
make_stock_item = post_load(get_dataclass_maker(StockItem))
class ProductSchema(Schema):
url = fields.Url(required=True)
id = fields.Int(required=True)
code = fields.Str(required=True)
name = fields.Str(required=True)
brand = fields.Str(required=True)
category = fields.Str(required=True)
season = fields.Str(required=True)
price = fields.Str(required=True)
price_currency = fields.Str(required=True)
is_new = fields.Bool(required=True)
color = fields.Str(required=True)
inner_material = fields.Str(required=True)
outer_material = fields.Str(required=True)
sole = fields.Str(required=True, allow_none=True)
main_picture = fields.Nested(PictureSchema, required=True)
secondary_pictures = fields.Nested(PictureSchema, many=True, required=True)
stock_items = fields.Nested(StockItemSchema, many=True, required=True)
make_product = post_load(get_dataclass_maker(Product))
class ProductPageSchema(Schema):
count = fields.Int(required=True)
page = fields.Int(required=True)
num_pages = fields.Int(required=True, data_key="numPages")
has_previous_page = fields.Bool(required=True, data_key="hasPrevious")
has_next_page = fields.Bool(required=True, data_key="hasNext")
results = fields.List(fields.Nested(ProductSchema), required=True)
make_product_page = post_load(get_dataclass_maker(ProductPage))
|
nilq/baby-python
|
python
|
from typing import List
class Solution:
def checkPossibility(self, nums: List[int]) -> bool:
flag = False
for i in range(0, len(nums)-1):
if nums[i] > nums[i+1]:
if flag:
return False
if i == 0:
nums[i] = nums[i+1]
elif nums[i-1] <= nums[i+1]:
nums[i] = nums[i-1]
else:
nums[i+1] = nums[i]
flag = True
return True
if __name__ == '__main__':
nums = [3, 4, 2, 3]
# nums = [4, 2, 3]
ret = Solution().checkPossibility(nums)
print(ret)
|
nilq/baby-python
|
python
|
from tpdataset import RawDataSet, DataDownloader
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
from pyctlib import vector, path, fuzzy_obj
import math
import torch
from ..download_googledrive import download_file_from_google_drive
from functools import partial
import torch
import os
import PIL
from typing import Any, Callable, List, Optional, Union, Tuple
from torchvision.datasets.utils import check_integrity, verify_str_arg
class CelebA:
base_folder = "celeba"
file_list = [
# File ID MD5 Hash Filename
("0B7EVK8r0v71pZjFTYXZWM3FlRnM", "00d2c5bc6d35e252742224ab0c1e8fcb", "img_align_celeba.zip"),
("0B7EVK8r0v71pblRyaVFSWGxPY0U", "75e246fa4810816ffd6ee81facbd244c", "list_attr_celeba.txt"),
("1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS", "32bd1bd63d3c78cd57e08160ec5ed1e2", "identity_CelebA.txt"),
("0B7EVK8r0v71pbThiMVRxWXZ4dU0", "00566efa6fedff7a56946cd1c10f1c16", "list_bbox_celeba.txt"),
("0B7EVK8r0v71pd0FJY3Blby1HUTQ", "cc24ecafdb5b50baae59b03474781f8c", "list_landmarks_align_celeba.txt"),
("0B7EVK8r0v71pY0NSMzRuSXJEVkk", "d32c9cbf5e040fd4025c592c306e6668", "list_eval_partition.txt"),
]
def __init__(self, root="", transform="default", download=False):
self.root = root
if isinstance(transform, str) and transform == "default":
self.trans = transforms.ToTensor()
elif isinstance(transform, str) and transform == "vae":
"""
transforms from https://github.com/AntixK/PyTorch-VAE/blob/8700d245a9735640dda458db4cf40708caf2e77f/experiment.py#L14
"""
SetRange = transforms.Lambda(lambda X: 2 * X - 1.)
self.trans = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.CenterCrop(148),
transforms.Resize(64),
transforms.ToTensor(),
SetRange])
else:
self.trans = transform
if download:
self.download()
self.__train_set = datasets.CelebA(root=str(root), split="train", transform=self.trans, download=False)
self.__test_set = datasets.CelebA(root=str(root), split="test", transform=self.trans, download=False)
self.__valid_set = datasets.CelebA(root=str(root), split="valid", transform=self.trans, download=False)
def _check_integrity(self) -> bool:
for (_, md5, filename) in self.file_list:
fpath = os.path.join(self.root, self.base_folder, filename)
_, ext = os.path.splitext(filename)
# Allow original archive to be deleted (zip and 7z)
# Only need the extracted images
if ext not in [".zip", ".7z"] and not check_integrity(fpath, md5):
return False
# Should check a hash of the images
return os.path.isdir(os.path.join(self.root, self.base_folder, "img_align_celeba"))
def download(self):
import zipfile
if self._check_integrity():
print('Files already downloaded and verified')
return
for (file_id, md5, filename) in self.file_list:
fpath = os.path.join(self.root, self.base_folder, filename)
_, ext = os.path.splitext(filename)
if check_integrity(fpath, md5):
print("file {} already downloaded and veriried".format(filename))
continue
download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder, filename), md5)
print("file {} has been successfully downloaded!".format(filename))
with zipfile.ZipFile(os.path.join(self.root, self.base_folder, "img_align_celeba.zip"), "r") as f:
f.extractall(os.path.join(self.root, self.base_folder))
@property
def train_set(self):
if hasattr(self, "_CelebA__train_set_vector"):
return self.__train_set_vector
self.__train_set_vector = vector(self.__train_set, str_function=lambda x: "\n".join(["Dataset CelebA", " Number of datapoints: {}".format(x.length), " Split: Train"]))
return self.__train_set_vector
@property
def valid_set(self):
if hasattr(self, "_CelebA__valid_set_vector"):
return self.__valid_set_vector
self.__valid_set_vector = vector(self.__valid_set, str_function=lambda x: "\n".join(["Dataset CelebA", " Number of datapoints: {}".format(x.length), " Split: Validation"]))
return self.__valid_set_vector
@property
def test_set(self):
if hasattr(self, "_CelebA__test_set_vector"):
return self.__test_set_vector
self.__test_set_vector = vector(self.__test_set, str_function=lambda x: "\n".join(["Dataset CelebA", " Number of datapoints: {}".format(x.length), " Split: Test"]))
return self.__test_set_vector
def train_dataloader(self, batch_size=1, shuffle=True, num_workers=0, pin_memory=True, drop_last=True):
return DataLoader(self.__train_set, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory, drop_last=drop_last)
def valid_dataloader(self, batch_size=1, shuffle=False, num_workers=0, pin_memory=True, drop_last=True):
return DataLoader(self.__valid_set, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory, drop_last=drop_last)
def train_valid_dataloader(self, batch_size=1, shuffle=True, num_workers=0, pin_memory=True):
return self.train_dataloader(batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory, drop_last=drop_last), self.valid_dataloader(batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, drop_last=drop_last)
def test_dataloader(self, batch_size=1, shuffle=False, num_workers=0, pin_memory=True):
return DataLoader(self.__test_set, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
def __repr__(self):
ret = ["Dataset CelebA"]
ret.append(" # train: {}".format(len(self.__train_set)))
ret.append(" # test: {}".format(len(self.__test_set)))
ret.append(" # valid: {}".format(len(self.__valid_set)))
return "\n".join(ret)
def __str__(self):
return self.__repr__()
|
nilq/baby-python
|
python
|
"""Model for blog."""
from django.db import models
from django.utils import timezone
from django.conf import settings
class Post(models.Model):
"""Post model for blog."""
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
"""Publish the post."""
self.published_date = timezone.now()
self.save()
def __str__(self):
"""Render a text (string) with a Post title."""
return str(self.title)
def approved_comments(self):
"""Render approved comments."""
return self.comments.filter(approved_comment=True)
class Comment(models.Model):
"""Comment model for blog."""
post = models.ForeignKey(
'blog.Post', on_delete=models.CASCADE,
related_name='comments',
)
author = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
approved_comment = models.BooleanField(default=False)
def approve(self):
"""Docstring."""
self.approved_comment = True
self.save()
def __str__(self):
"""Docstring."""
return str(self.text)
|
nilq/baby-python
|
python
|
"""Números
Faça um programa que produza o resultado abaixo (sendo N fornecido pelo
utilizador). O exercício deve ser resolvido utilizando recursão.
1
1 2
1 2 3
...
1 2 3 ... N
"""
def show(n: int) -> None:
if n > 0:
show(n - 1)
i = 1
while i < n + 1:
print(f'{i:>3} ', end='')
i += 1
print()
if __name__ == '__main__':
n = int(input('Introduza o valor de N: '))
show(n)
|
nilq/baby-python
|
python
|
from django.contrib import admin
from website.models import (Category, SubCategory, WebsiteRecommendation,
WebsiteComment, BookRecommendation, BookComment,
VideoRecommendation, VideoComment)
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
class SubCategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Category, CategoryAdmin)
admin.site.register(SubCategory, SubCategoryAdmin)
admin.site.register(WebsiteRecommendation)
admin.site.register(WebsiteComment)
admin.site.register(BookRecommendation)
admin.site.register(BookComment)
admin.site.register(VideoRecommendation)
admin.site.register(VideoComment)
|
nilq/baby-python
|
python
|
"""
Created on 9 Dec 2020
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
The A4CalibratedDatum is designed to provide a model training data set that encapsulates the calibration of the
electrochemical sensor - the fields we_v_cal, ae_v_zero_cal and cal_x_v have no meaning beyond this.
cal_x_v is only relevant to sensors with NO2 cross-sensitivity.
"""
from collections import OrderedDict
from scs_core.data.datum import Datum
from scs_core.gas.a4.a4_calib import A4Calib
from scs_core.gas.a4.a4_datum import A4Datum
# --------------------------------------------------------------------------------------------------------------------
class A4Calibrator(object):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, calib: A4Calib):
"""
Constructor
"""
self.__calib = calib # A4Calib
self.__we_elc_v = calib.we_elc_mv / 1000.0
self.__ae_elc_v = calib.ae_elc_mv / 1000.0
self.__we_sens_v = calib.we_sens_mv / 1000.0
self.__we_no2_x_sens_v = None if calib.we_no2_x_sens_mv is None else calib.we_no2_x_sens_mv / 1000.0
# ----------------------------------------------------------------------------------------------------------------
def calibrate(self, datum, no2_cnc=None):
# zero offset...
we_v_zero_cal = datum.we_v - self.__we_elc_v
ae_v_zero_cal = datum.ae_v - self.__ae_elc_v
# gain...
we_v_cal = we_v_zero_cal / self.__we_sens_v
# cross sensitivity...
cal_x_v = None if no2_cnc is None else no2_cnc * self.__we_no2_x_sens_v
return A4CalibratedDatum(datum.we_v, datum.ae_v, datum.we_c, datum.cnc, we_v_cal, ae_v_zero_cal, cal_x_v)
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "A4Calibrator:{calib:%s}" % self.__calib
# --------------------------------------------------------------------------------------------------------------------
class A4CalibratedDatum(A4Datum):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, we_v, ae_v, we_c, cnc, we_v_cal, ae_v_cal, cal_x_v):
"""
Constructor
"""
super().__init__(we_v, ae_v, we_c, cnc)
self.__we_v_cal = Datum.float(we_v_cal, 6) # calibrated WE voltage
self.__ae_v_cal = Datum.float(ae_v_cal, 6) # calibrated AE voltage
self.__cal_x_v = Datum.float(cal_x_v, 9) # calibrated cross-sensitivity voltage
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['weV'] = self.we_v
jdict['aeV'] = self.ae_v
jdict['weC'] = self.we_c # may be None
jdict['cnc'] = self.cnc # may be None
jdict['weVCal'] = self.we_v_cal
jdict['aeVCal'] = self.ae_v_cal
if self.cal_x_v is not None:
jdict['calXV'] = self.cal_x_v
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def we_v_cal(self):
return self.__we_v_cal
@property
def ae_v_cal(self):
return self.__ae_v_cal
@property
def cal_x_v(self):
return self.__cal_x_v
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "A4CalibratedDatum(v1):{we_v:%s, ae_v:%s, we_c:%s, cnc:%s, we_v_cal:%s, ae_v_cal:%s, cal_x_v:%s}" % \
(self.we_v, self.ae_v, self.we_c, self.cnc, self.we_v_cal, self.ae_v_cal, self.cal_x_v)
|
nilq/baby-python
|
python
|
from application.domain.application import Application
from application.domain.iBaseWatcher import IBaseWatcher
from application.domain.iCerebrum import ICerebrum
from application.infrastructure.championshipBaseWatcher import ChampionshipBaseWatcher
from application.infrastructure.remoteBaseWatcher import RemoteBaseWatcher
from application.infrastructure.remoteCerebrum import RemoteCerebrum
from application.infrastructure.robotWorker import RobotWorker
from communication.infrastructure.socketBaseConnector import SocketBaseConnector
from communication.service.communicationService import CommunicationService
from communication.service.displayService import DisplayService
from communication.service.positionService import PositionService
from context.gpioMapper import GpioMapper
from cortex.domain.cortex import Cortex
from cortex.domain.dexterityCortex import DexterityCortex
from cortex.domain.directionCortex import DirectionCortex
from cortex.domain.pathableCatalog import PathableCatalog
from cortex.infrastructure.communicationCortex import CommunicationCortex
from cortex.infrastructure.napCortex import NapCortex
from cortex.infrastructure.pathableCommunicator import PathableCommunicator
from cortex.infrastructure.pathableFactory import PathableFactory
from cortex.infrastructure.visualCortex import VisualCortex
from dexterity.infrastructure.adcCharge import AdcCharge
from dexterity.service.dexterityService import DexterityService
from mobility.infrastructure.directDriver import DirectDriver
from mobility.infrastructure.positionDriver import PositionDriver
from mobility.service.mobilityService import MobilityService
from polling.service.pollingService import PollingService
from remote.messageTranslator import MessageTranslator
from remote.remoteService import RemoteService
from sight.infrastructure.pololuEyes import PololuEyes
from sight.service.sightService import SightService
from vision.infrastructure.openCvCameraCalibrationFactory import OpenCvCameraCalibrationFactory
from vision.infrastructure.openCvCameraFactory import OpenCvCameraFactory
from vision.infrastructure.openCvDestinationFinder import OpenCvDestinationFinder
from vision.infrastructure.openCvItemFinder import OpenCvItemFinder
from vision.infrastructure.openCvQrCodeReader import OpenCvQrCodeReader
from vision.service.objectiveParser import ObjectiveParser
from vision.service.visionService import VisionService
class CommandReceiver:
def __init__(self, port: int) -> None:
self._chargeable = AdcCharge()
self._dexterity_service = DexterityService(GpioMapper.get_electromagnet_gpio())
self._sight_service = SightService(PololuEyes())
self._vision_service = VisionService(OpenCvCameraFactory(), OpenCvCameraCalibrationFactory(),
OpenCvQrCodeReader(), OpenCvItemFinder(), OpenCvDestinationFinder(),
ObjectiveParser())
self._display_service = DisplayService(GpioMapper.get_light_led())
self._base_connector = SocketBaseConnector(port)
self._communication_service = CommunicationService(self._base_connector)
self._position_service = PositionService(self._communication_service)
self._drivable = GpioMapper.get_drivable_gpio()
self._movement_driver = PositionDriver(self._position_service, DirectDriver())
self._mobility_service = MobilityService(self._drivable, self._movement_driver)
self._remote_service = RemoteService(self._mobility_service)
self._message_translator = MessageTranslator()
self._remote_cerebrum = RemoteCerebrum()
self._pathable_communicator = PathableCommunicator(self._communication_service)
self._pathable_catalog = PathableCatalog(self._pathable_communicator)
self._direction_cortex = DirectionCortex(self._pathable_catalog, self._position_service,
self._mobility_service, self._pathable_communicator)
self._direction_cortex = DirectionCortex(self._pathable_catalog, self._position_service, self._mobility_service,
self._pathable_communicator)
self._nap_cortex = NapCortex(self._mobility_service, self._direction_cortex)
self._visual_cortex = VisualCortex(self._vision_service, self._sight_service)
self._item_chooser = PollingService()
self._dexterity_cortex = DexterityCortex(self._mobility_service, self._dexterity_service,
self._direction_cortex, self._visual_cortex, self._item_chooser)
def application(self) -> Application:
remote_cerebrum = self._make_remote_cerebrum()
robot_worker = self._make_robot_worker()
base_watcher = self.make_base_watcher()
return Application(remote_cerebrum, robot_worker, base_watcher)
def make_base_watcher(self) -> IBaseWatcher:
cortex = self._make_cortex()
championship_watcher = ChampionshipBaseWatcher(self._communication_service, self._pathable_catalog,
PathableFactory(), self._position_service,
self._nap_cortex)
return RemoteBaseWatcher(self._communication_service, self._remote_service, self._display_service,
self._message_translator, self._dexterity_service, self._remote_cerebrum,
cortex, self._direction_cortex, championship_watcher, self._sight_service)
def _make_remote_cerebrum(self) -> ICerebrum:
return self._remote_cerebrum
def _make_robot_worker(self) -> RobotWorker:
return RobotWorker(self._vision_service, self._communication_service, self._display_service, self._chargeable)
def _make_cortex(self) -> Cortex:
communication_cortex = CommunicationCortex(self._display_service, self._communication_service)
return Cortex(communication_cortex, self._visual_cortex, self._direction_cortex, self._nap_cortex,
self._dexterity_cortex)
|
nilq/baby-python
|
python
|
'''
Author : MiKueen
Level : Medium
Company : Google
Problem Statement : Count Unival Subtrees
A unival tree (which stands for "universal value") is a tree where all nodes under it have the same value.
Given the root to a binary tree, count the number of unival subtrees.
For example, the following tree has 5 unival subtrees:
0
/ \
1 0
/ \
1 0
/ \
1 1
'''
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def check_unival(root):
if root is None:
return True
if root.left is not None and root.left.value != root.value:
return False
if root.right is not None and root.right.value != root.value:
return False
if check_unival(root.left) and check_unival(root.right):
return True
return False
def helper(root):
if root is None:
return 0, True
res_left, check_left = helper(root.left)
res_right, check_right = helper(root.right)
check = True
if not check_left or not check_right:
check = False
if root.left is not None and root.left.value != root.value:
check = False
if root.right is not None and root.right.value != root.value:
check = False
if check:
return res_left + res_right + 1, True
else:
return res_left + res_right, False
def count_univals(root):
res, check_unival = helper(root)
return res
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.