text
string
size
int64
token_count
int64
#generate_target.py #Generates a list of target genes from a flybase output import os import sys def main(): in_path = sys.argv[1] out_path = sys.argv[2] infile = open(in_path, "r") outfile = open(out_path, "x") for l0 in infile: l0s = l0.split(",") for item in l0s: if "Dsec" in item: outfile.write(item.split("\\")[1]+"\n") if __name__ == "__main__": main()
429
160
import config from flask import current_app from flask_login import current_user from pgadmin.model import db, User, Server from pgadmin.utils.crypto import encrypt, decrypt MASTERPASS_CHECK_TEXT = 'ideas are bulletproof' def set_crypt_key(_key, _new_login=True): """ Set the crypt key :param _key: The key :param _new_login: Is fresh login or password change """ current_app.keyManager.set(_key, _new_login) def get_crypt_key(): """ Returns the crypt key :return: the key """ enc_key = current_app.keyManager.get() # if desktop mode and master pass disabled then use the password hash if not config.MASTER_PASSWORD_REQUIRED \ and not config.SERVER_MODE: return True, current_user.password # if desktop mode and master pass enabled elif config.MASTER_PASSWORD_REQUIRED \ and not config.SERVER_MODE and enc_key is None: return False, None else: return True, enc_key def validate_master_password(password): """ Validate the password/key against the stored encrypted text :param password: password/key :return: Valid or not """ # master pass is incorrect if decryption fails try: decrypted_text = decrypt(current_user.masterpass_check, password) if isinstance(decrypted_text, bytes): decrypted_text = decrypted_text.decode() if MASTERPASS_CHECK_TEXT != decrypted_text: return False else: return True except Exception as _: False def set_masterpass_check_text(password, clear=False): """ Set the encrypted text which will be used later to validate entered key :param password: password/key :param clear: remove the encrypted text """ try: masterpass_check = None if not clear: masterpass_check = encrypt(MASTERPASS_CHECK_TEXT, password) # set the encrypted sample text with the new # master pass db.session.query(User) \ .filter(User.id == current_user.id) \ .update({User.masterpass_check: masterpass_check}) db.session.commit() except Exception as _: db.session.rollback() raise def cleanup_master_password(): """ Remove the master password and saved passwords from DB which are encrypted using master password. Also remove the encrypted text """ # also remove the master password check string as it will help if master # password entered/enabled again set_masterpass_check_text('', clear=True) from pgadmin.browser.server_groups.servers.utils \ import remove_saved_passwords remove_saved_passwords(current_user.id) current_app.keyManager.hard_reset() from pgadmin.utils.driver import get_driver driver = get_driver(config.PG_DEFAULT_DRIVER) for server in Server.query.filter_by(user_id=current_user.id).all(): manager = driver.connection_manager(server.id) manager.update(server) def process_masterpass_disabled(): """ On master password disable, remove the connection data from session as it may have saved password which will cause trouble :param session: Flask session :param conn_data: connection manager copy from session if any """ if not config.SERVER_MODE and not config.MASTER_PASSWORD_REQUIRED \ and current_user.masterpass_check is not None: cleanup_master_password() return True return False
3,512
983
from django.urls import path from .views import DashboardTemplateView, DashboardView app_name = 'dashboard' urlpatterns = [ path('test/', DashboardView.as_view(), name='test'), path('', DashboardView.as_view(), name='index'), ]
238
80
import random from model.group import Group def test_group_removal(app, db, check_ui): old_group_list = db.get_group_list() group = random.choice(old_group_list) if len(db.get_group_list()) == 0: app.group.create(Group(name="test_group_random_name", header="random_header", footer="random_footer")) app.group.delete_group_by_id(group.id) assert app.group.count() == len(old_group_list) - 1 new_group_list = db.get_group_list() old_group_list.remove(group) assert old_group_list == new_group_list if check_ui: # this will execute when "--check_ui" run option is added def clean(group): # this func removes spaces from group names return Group(id=group.id, name=group.name.strip()) db_list = map(clean, new_group_list) assert sorted(db_list, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
904
306
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from parlai.scripts.train_model import setup_args, TrainLoop if __name__ == '__main__': parser = setup_args() parser.set_defaults( task='wizard_of_wikipedia_ko:generator:train', model='projects.wizard_of_wikipedia_ko.generator.t5:T5EndToEndAgent', model_file='/tmp/end2end_generator/model', t5_model_arch='pretrained_model/t5.1.1.base.gin_ke.ke_v100_span_corruption_600K', text_truncate=256, ln='ko', log_every_n_secs=10, validation_patience=12, validation_metric='ppl', validation_metric_mode='min', validation_every_n_epochs=0.5, truncate=256, max_knowledge=32, knowledge_alpha=0.95, knowledge_truncate=64, learningrate=5e-4, warmup_updates=5000, clip=0.1, lr_scheduler='invsqrt', embedding_type='fasttext', beam_size=1, skip_generation=False, batchsize=64, ) TrainLoop(parser.parse_args()).train() # parlai train_model -m projects.wizard_of_wikipedia_ko.generator.t5:T5EndToEndAgent -mf model/ke-t5_test -t wizard_of_wikipedia_ko:generator:random_split --ln en -bs 4 -eps 1 -lr 1e-5 --num-epochs 1 --optimizer adam --t5-model-arch pretrained_model/t5.1.1.base.gin_ke.ke_v100_span_corruption_600K --text_truncate 512 # parlai train_model -t wizard_of_wikipedia_ko:generator:random_split --ln ke_mix -m projects.wizard_of_wikipedia_ko.generator.t5:T5EndToEndAgent -mf model/ke-t5_test --t5-model-arch ../pretrained_model/t5.1.1.base.gin_ke.ke_v100_span_corruption_600K --log-every-n-secs 10 --validation-patience 12 --validation-metric ppl --validation-metric-mode min --validation-every-n-epochs 0.5 -bs 4 --max_knowledge 32 --num-epochs 1
1,946
743
import os import collections import numpy as np from ipywidgets import widgets from IPython.core.display import display, HTML import logging from NeuNorm.normalization import Normalization from __code import file_handler from __code.ipywe import myfileselector from __code.normalization.get import Get from __code.normalization.metadata_handler import MetadataHandler, MetadataName, METADATA_KEYS from __code.normalization import utilities JSON_DEBUGGING = False MAX_DF_COUNTS_ALLOWED = 900 METADATA_ERROR_ALLOWED = 1 LIST_METADATA_NOT_INSTRUMENT_RELATED = ['filename', 'time_stamp', 'time_stamp_user_format'] class NormalizationWithSimplifySelection: working_dir = '' def __init__(self, working_dir=''): self.working_dir = working_dir self.list_of_images = [] self.input_data_folder = [] # {0: {65027: 55.0, # 65028: 59.2, # 65029: 1.0, # 'filename': 'full_filename', # 'time_stamp': 1454544.34545, # 'time_stamp_user_format': '2019-11-19 02:48:47'}, # ..., # } self.sample_metadata_dict = {} self.ob_metadata_dict = {} self.df_metadata_dict = {} # key of dictionary being the acquisition time # {50: {'config0': {'list_sample': [self.sample_metadata_dict[0], # self.sample_metadata_dict[1],..], # 'list_ob': [self.ob_metadata_dict[0], # self.ob_metadata_dict[1], # ...], # 'list_df': [file1, file2, file3], # 'metadata_infos': {}, # 'first_images': {'sample': {}, # 'ob': {}, # 'df': {}}, # 'last_images': {'sample': {}, # 'ob': {}, # 'df': {}}, # 'time_range_s_selected': {'before': np.NaN, # 'after': np.NaN}, # 'time_range_s': {'before': np.NaN, # 'after': np.NaN}, # }, # 'config1': {...}, # }, # 30: {...}, # } self.final_full_master_dict = {} # same as the final_full_master_dict but in this one, the OB outside the time range # defined as excluded self.final_with_time_range_master_dict = {} o_get = Get(parent=self) log_file_name = o_get.log_file_name() logging.basicConfig(filename=log_file_name, filemode='w', format='[%(levelname)s] - %(asctime)s - %(message)s', level=logging.INFO) # logging.INFO, logging.DEBUG logging.info("*** Starting new session ***") def select_sample_folder(self): folder_sample_widget = myfileselector.MyFileSelectorPanel(instruction='select folder of images to normalize', start_dir=self.working_dir, next=self.retrieve_sample_metadata_from_sample_folder, type='directory', multiple=False) folder_sample_widget.show() def retrieve_sample_metadata_from_sample_folder(self, sample_folder): logging.info(f"select sample folder: {sample_folder}") [list_of_images, _] = file_handler.retrieve_list_of_most_dominant_extension_from_folder(folder=sample_folder) can_we_continue = self.images_files_found_in_list(list_of_images) if can_we_continue: logging.info(f"-> number of images found: {len(list_of_images)}") self.retrieve_sample_metadata(list_of_images) else: logging.info(f"-> No images found!") display(HTML('<span style="font-size: 20px; color:Red">No images found in the folder selected!</span>')) def images_files_found_in_list(self, list_of_images): for _file in list_of_images: if (".tiff" in _file) or (".tif" in _file) or (".fits" in _file): return True return False def retrieve_sample_metadata(self, list_of_images): __name__ = "retrieve_sample_metadata" logging.info(f"Retrieving sample metadata ({__name__})") self.list_of_images = list_of_images self.sample_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_images, display_infos=False, label='sample') # logging.info(f"self.sample_metadata_dict: {self.sample_metadata_dict}") self.auto_retrieve_ob_metadata() self.auto_retrieve_df_metadata() self.match_files() self.calculate_first_and_last_ob() self.calculate_time_range() self.display_time_range_selection_widgets() def select_ob_folder(self): self.select_folder(message='open beam', next_function=self.retrieve_ob_metadata()) def retrieve_ob_metadata(self, selected_folder): list_of_ob_files = Get.list_of_tiff_files(folder=selected_folder) self.ob_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_ob_files) def auto_retrieve_ob_metadata(self): logging.info(f"> auto_retrieve_ob_metadata") folder = os.path.join(self.working_dir, 'raw', 'ob') logging.info(f"-> folder: {folder}") list_of_ob_files = file_handler.get_list_of_all_files_in_subfolders(folder=folder, extensions=['tiff', 'tif']) logging.info(f"-> nbr of ob files found: {len(list_of_ob_files)}") self.ob_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_ob_files, label='ob') # logging.info(f"ob metadata dict") # logging.info(f"-> {self.ob_metadata_dict}") def select_folder(self, message="", next_function=None): folder_widget = myfileselector.MyFileSelectorPanel(instruction='select {} folder'.format(message), start_dir=self.working_dir, next=next_function, type='directory', multiple=False) folder_widget.show() def select_df_folder(self): self.select_folder(message='dark field', next_function=self.retrieve_df_metadata()) def retrieve_df_metadata(self, selected_folder): list_of_df_files = Get.list_of_tiff_files(folder=selected_folder) self.df_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_df_files) def auto_retrieve_df_metadata(self): folder = os.path.join(self.working_dir, 'raw', 'df') list_of_df_files = file_handler.get_list_of_all_files_in_subfolders(folder=folder, extensions=['tiff', 'tif']) logging.info(f"-> nbr of df files found: {len(list_of_df_files)}") self.df_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_df_files, label='df') def match_files(self): """This is where the files will be associated with their respective OB, DF by using the metadata""" if not JSON_DEBUGGING: self.create_master_sample_dict() self.match_ob() self.match_df() if JSON_DEBUGGING: # for debugging only, exporting the json import json with open('/Users/j35/Desktop/which_ob_and_df_to_use.json', 'w') as outfile: json.dump(self.final_full_master_dict, outfile) def match_ob(self): """we will go through all the ob and associate them with the right sample based on - acquisition time - detector type - aperture """ list_ob_dict = self.ob_metadata_dict final_full_master_dict = self.final_full_master_dict list_of_sample_acquisition = final_full_master_dict.keys() for _index_ob in list_ob_dict.keys(): _all_ob_instrument_metadata = Get.get_instrument_metadata_only(list_ob_dict[_index_ob]) _ob_instrument_metadata = utilities.isolate_instrument_metadata( _all_ob_instrument_metadata) _acquisition_time = _all_ob_instrument_metadata[MetadataName.EXPOSURE_TIME.value]['value'] if _acquisition_time in list_of_sample_acquisition: for _config_id in final_full_master_dict[_acquisition_time].keys(): _sample_metadata_infos = final_full_master_dict[_acquisition_time][_config_id]['metadata_infos'] if utilities.all_metadata_match(_sample_metadata_infos, _ob_instrument_metadata): final_full_master_dict[_acquisition_time][_config_id]['list_ob'].append(list_ob_dict[_index_ob]) self.final_full_master_dict = final_full_master_dict def match_df(self): """ we will go through all the df of the IPTS and will associate the df with the right samples based on: - detector type used - acquisition time """ list_df_dict = self.df_metadata_dict final_full_master_dict = self.final_full_master_dict list_of_sample_acquisition = final_full_master_dict.keys() for _index_df in list_df_dict.keys(): _all_df_instrument_metadata = Get.get_instrument_metadata_only(list_df_dict[_index_df]) _df_instrument_metadata = utilities.isolate_instrument_metadata( _all_df_instrument_metadata) _acquisition_time = _all_df_instrument_metadata[MetadataName.EXPOSURE_TIME.value]['value'] if _acquisition_time in list_of_sample_acquisition: for _config_id in final_full_master_dict[_acquisition_time].keys(): _sample_metadata_infos = final_full_master_dict[_acquisition_time][_config_id]['metadata_infos'] if utilities.all_metadata_match(_sample_metadata_infos, _df_instrument_metadata, list_key_to_check=[METADATA_KEYS['df'][ 1].value]): final_full_master_dict[_acquisition_time][_config_id]['list_df'].append(list_df_dict[_index_df]) self.final_full_master_dict = final_full_master_dict def create_master_sample_dict(self): final_full_master_dict = collections.OrderedDict() sample_metadata_dict = self.sample_metadata_dict # we need to keep record of which image was the first one taken and which image was the last one taken first_sample_image = sample_metadata_dict[0] last_sample_image = sample_metadata_dict[0] for _file_index in sample_metadata_dict.keys(): _dict_file_index = sample_metadata_dict[_file_index] _sample_file = _dict_file_index['filename'] _acquisition_time = _dict_file_index[MetadataName.EXPOSURE_TIME.value]['value'] _instrument_metadata = utilities.isolate_instrument_metadata(_dict_file_index) _sample_time_stamp = _dict_file_index['time_stamp'] # find which image was first and which image was last if _sample_time_stamp < first_sample_image['time_stamp']: first_sample_image = _dict_file_index elif _sample_time_stamp > last_sample_image['time_stamp']: last_sample_image = _dict_file_index # first entry or first time seeing that acquisition time if (len(final_full_master_dict) == 0) or not (_acquisition_time in final_full_master_dict.keys()): _first_images_dict = {'sample': first_sample_image, 'ob' : {}, 'df' : {}} _last_images_dict = {'sample': last_sample_image, 'ob' : {}, 'df' : {}} _temp_dict = {'list_sample' : [_dict_file_index], 'first_images' : _first_images_dict, 'last_images' : _last_images_dict, 'list_ob' : [], 'list_df' : [], 'time_range_s_selected': {'before': np.NaN, 'after' : np.NaN}, 'time_range_s' : {'before': np.NaN, 'after' : np.NaN}, 'metadata_infos' : Get.get_instrument_metadata_only( _instrument_metadata)} final_full_master_dict[_acquisition_time] = {} final_full_master_dict[_acquisition_time]['config0'] = _temp_dict else: # check that all the metadata_infos match for the first group of that acquisition time, # otherwise check the next one or create a group if _acquisition_time in final_full_master_dict.keys(): _dict_for_this_acquisition_time = final_full_master_dict[_acquisition_time] _found_a_match = False for _config_key in _dict_for_this_acquisition_time.keys(): _config = _dict_for_this_acquisition_time[_config_key] if (utilities.all_metadata_match(metadata_1=_config['metadata_infos'], metadata_2=_instrument_metadata)): _config['list_sample'].append(_dict_file_index) _first_images_dict = {'sample': first_sample_image, 'ob' : {}, 'df' : {}} _last_images_dict = {'sample': last_sample_image, 'ob' : {}, 'df' : {}} _config['first_images'] = _first_images_dict _config['last_images'] = _last_images_dict _found_a_match = True if not _found_a_match: _first_images_dict = {'sample': first_sample_image, 'ob' : {}, 'df' : {}} _last_images_dict = {'sample': last_sample_image, 'ob' : {}, 'df' : {}} _temp_dict = {'list_sample' : [_dict_file_index], 'first_images' : _first_images_dict, 'last_images' : _last_images_dict, 'list_ob' : [], 'list_df' : [], 'time_range_s_selected': {'before': np.NaN, 'after' : np.NaN}, 'time_range_s' : {'before': np.NaN, 'after' : np.NaN}, 'metadata_infos' : Get.get_instrument_metadata_only( _instrument_metadata)} nbr_config = len(_dict_for_this_acquisition_time.keys()) _dict_for_this_acquisition_time['config{}'.format(nbr_config)] = _temp_dict else: _first_images_dict = {'sample': first_sample_image, 'ob' : {}, 'df' : {}} _last_images_dict = {'sample': last_sample_image, 'ob' : {}, 'df' : {}} _temp_dict = {'list_sample' : [_dict_file_index], 'first_images' : _first_images_dict, 'last_images' : _last_images_dict, 'list_ob' : [], 'list_df' : [], 'time_range_s_selected': {'before': np.NAN, 'after' : np.NaN}, 'time_range_s' : {'before': np.NaN, 'after' : np.NaN}, 'metadata_infos' : Get.get_instrument_metadata_only( _instrument_metadata)} final_full_master_dict[_acquisition_time] = {} final_full_master_dict[_acquisition_time]['config0'] = _temp_dict self.final_full_master_dict = final_full_master_dict def calculate_first_and_last_ob(self): """this will loop through all the acquisition time keys, and config keys, to figure out what is the first ob and last ob in this dictionary""" _final_full_master_dict = self.final_full_master_dict for _acquisition in _final_full_master_dict.keys(): current_acquisition_dict = _final_full_master_dict[_acquisition] _first_ob_time = np.NaN _first_ob = {} _last_ob_time = np.NaN _last_ob = {} for _config in current_acquisition_dict.keys(): current_acquisition_config_dict = current_acquisition_dict[_config] for _ob in current_acquisition_config_dict['list_ob']: _current_ob_time = _ob['time_stamp'] if np.isnan(_first_ob_time): _first_ob_time = _current_ob_time _last_ob_time = _current_ob_time _first_ob = _last_ob = _ob elif _current_ob_time < _first_ob_time: _first_ob_time = _current_ob_time _first_ob = _ob elif _current_ob_time > _last_ob_time: _last_ob_time = _current_ob_time _last_ob = _ob current_acquisition_config_dict['first_images']['ob'] = _first_ob current_acquisition_config_dict['last_images']['ob'] = _last_ob def calculate_time_range(self): """this method will calculate the max time range of OB taken before or after and will use that for the slider selection time range Provide option to use all (that means, do not used any time range) """ _final_full_master_dict = self.final_full_master_dict for _acquisition in _final_full_master_dict.keys(): current_acquisition_dict = _final_full_master_dict[_acquisition] for _config in current_acquisition_dict.keys(): current_acquisition_config_dict = current_acquisition_dict[_config] first_sample_image = current_acquisition_config_dict['first_images']['sample'] first_ob_image = current_acquisition_config_dict['first_images']['ob'] delta_time_before = first_sample_image.get('time_stamp', 0) - first_ob_image.get('time_stamp', 0) _time_range_s_before = delta_time_before if delta_time_before > 0 else 0 last_sample_image = current_acquisition_config_dict['last_images']['sample'] last_ob_image = current_acquisition_config_dict['last_images']['ob'] delta_time_after = last_ob_image.get('time_stamp', 0) - last_sample_image.get('time_stamp', 0) _time_range_s_after = delta_time_after if delta_time_after > 0 else 0 _final_full_master_dict[_acquisition][_config]['time_range_s']['before'] = _time_range_s_before _final_full_master_dict[_acquisition][_config]['time_range_s']['after'] = _time_range_s_after def display_time_range_selection_widgets(self): _final_full_master_dict = self.final_full_master_dict _config_tab_dict = {} # will keep record of each config tab for each acquisition _acquisition_tabs = widgets.Tab() o_get = Get(parent=self) for _acquisition_index, _acquisition in enumerate(_final_full_master_dict.keys()): _dict_of_this_acquisition = _final_full_master_dict[_acquisition] _config_tab = widgets.Tab() _current_acquisition_tab_widgets_id = {'config_tab_id': _config_tab} for _index, _config in enumerate(_dict_of_this_acquisition.keys()): _dict_config = _dict_of_this_acquisition[_config] _dict = o_get.full_layout_for_this_config(_dict_config) _layout = _dict['verti_layout'] _config_widgets_id_dict = _dict['config_widgets_id_dict'] _config_tab.children += (_layout,) _config_tab.set_title(_index, _config) _current_acquisition_tab_widgets_id[_index] = _config_widgets_id_dict _config_tab_dict[_acquisition_index] = _current_acquisition_tab_widgets_id _acquisition_tabs.children += (_config_tab,) # add all the config tab to top acquisition tab _acquisition_tabs.set_title(_acquisition_index, "Acquisition: {}s".format(_acquisition)) _config_tab display(_acquisition_tabs) self.acquisition_tab = _acquisition_tabs self.config_tab_dict = _config_tab_dict def calculate_max_time_before_and_after_exp_for_this_config(self, dict_config): max_time_before = 0 first_sample_image_time_stamp = dict_config['first_images']['sample']['time_stamp'] first_ob_image_time_stamp = dict_config['first_images']['ob'].get('time_stamp', 0) if first_ob_image_time_stamp > first_sample_image_time_stamp: max_time_before = 0 else: max_time_before = (first_sample_image_time_stamp - first_ob_image_time_stamp) max_time_after = 0 last_sample_image_time_stamp = dict_config['last_images']['sample']['time_stamp'] last_ob_image_time_stamp = dict_config['last_images']['ob'].get('time_stamp', 0) if last_ob_image_time_stamp < last_sample_image_time_stamp: max_time_after = 0 else: max_time_after = last_ob_image_time_stamp - last_sample_image_time_stamp return [max_time_before, max_time_after] def populate_metadata_table(self, current_config): metadata_config = current_config['metadata_infos'] table_label = widgets.Label("List of Metadata used to match data set", layout=widgets.Layout(width='30%')) table_value = "<table style='width:50%;background-color:#eee'>" for _key, _value in metadata_config.items(): table_value += "<tr><th>{}</th><th>{}</th></tr>".format(_value['name'], _value['value']) table_value += "</table>" table = widgets.HTML(value=table_value) return [table_label, table] def update_use_this_config_widget(self, state): pass # new_state = state['new'] # [active_acquisition, active_config] = self.get_active_tabs() # self.config_tab_dict[active_acquisition][active_config]['normalize_this_config'] = new_state def update_config_widgets(self, state): if state['new'] is False: # use all files message = None visibility = 'hidden' else: # user defines ranges message = True visibility = 'visible' o_get = Get(parent=self) [time_before_selected_ui, time_after_selected_ui] = o_get.time_before_and_after_ui_of_this_config() experiment_label_ui = o_get.experiment_label_ui_of_this_config() experiment_label_ui.layout.visibility = visibility if visibility == 'hidden': time_before_selected_ui.layout.visibility = 'hidden' time_after_selected_ui.layout.visibility = 'hidden' else: self.show_or_not_before_and_after_sliders() self.update_time_range_event(message) def show_or_not_before_and_after_sliders(self): o_get = Get(parent=self) current_config = o_get.current_config_dict() [max_time_elapse_before_experiment, max_time_elapse_after_experiment] = \ self.calculate_max_time_before_and_after_exp_for_this_config(current_config) slider_before_visibility = 'visible' if max_time_elapse_before_experiment > 0 else 'hidden' slider_after_visibility = 'visible' if max_time_elapse_after_experiment > 0 else 'hidden' [time_before_selected_ui, time_after_selected_ui] = o_get.time_before_and_after_ui_of_this_config() time_before_selected_ui.layout.visibility = slider_before_visibility time_after_selected_ui.layout.visibility = slider_after_visibility def is_custom_time_range_checked_for_this_config(self): o_get = Get(parent=self) current_config = o_get.current_config_of_widgets_id() return current_config['use_custom_time_range_checkbox'].value def update_time_range_event(self, value): # reach when user interact with the sliders in the config tab self.update_time_range_message(value) self.update_list_of_files_in_widgets_using_new_time_range() def update_list_of_files_in_widgets_using_new_time_range(self): o_get = Get(parent=self) # retrieve acquisition and config values acquisition_key = o_get.active_tab_acquisition_key() # ex: '55.0' config_key = o_get.active_tab_config_key() # ex: 'config0' # retrieve list of ob and df for this config for this acquisition final_full_master_dict = self.final_full_master_dict dict_for_this_config = final_full_master_dict[float(acquisition_key)][config_key] list_ob = dict_for_this_config['list_ob'] # no need to do anything more if user wants to use all the files if not self.is_custom_time_range_checked_for_this_config(): list_ob_to_keep = [_file['filename'] for _file in list_ob] else: # retrieve first and last sample file for this config and for this acquisition first_sample_image_time_stamp = dict_for_this_config['first_images']['sample']['time_stamp'] last_sample_images_time_stamp = dict_for_this_config['last_images']['sample']['time_stamp'] # retrieve time before and after selected [time_before_selected, time_after_selected] = o_get.time_before_and_after_of_this_config() # calculate list of ob that are within that time range list_ob_to_keep = [] for _ob_file in list_ob: _ob_time_stamp = _ob_file['time_stamp'] if (_ob_time_stamp < first_sample_image_time_stamp) and \ ((first_sample_image_time_stamp - _ob_time_stamp) <= np.abs(time_before_selected)): list_ob_to_keep.append(_ob_file['filename']) elif (_ob_time_stamp > last_sample_images_time_stamp) and \ ((_ob_time_stamp - last_sample_images_time_stamp) <= np.abs(time_after_selected)): list_ob_to_keep.append(_ob_file['filename']) self.update_list_of_ob_for_current_config_tab(list_ob=list_ob_to_keep) def update_list_of_ob_for_current_config_tab(self, list_ob=[]): o_get = Get(parent=self) [active_acquisition, active_config] = o_get.active_tabs() # short_version_list_ob = NormalizationWithSimplifySelection.keep_basename_only(list_files=list_ob) self.config_tab_dict[active_acquisition][active_config]['list_of_ob'].options = list_ob # select everything by default self.config_tab_dict[active_acquisition][active_config]['list_of_ob'].value = list_ob def update_time_range_message(self, value): o_get = Get(parent=self) if value is None: _message = "Use <b><font color='red'>All </b> " \ "<font color='black'>OBs and DFs " \ "matching the samples images</font>" else: [time_before_selected, time_after_selected] = o_get.time_before_and_after_of_this_config() time_before_selected = np.abs(time_before_selected) def _format_time(_time_s): if _time_s < 60: return "{:.2f}s".format(_time_s) elif _time_s < 3600: _time_mn = int(_time_s / 60.) _time_s = int(_time_s % 60) return "{:d}mn {:d}s".format(_time_mn, _time_s) else: _time_hr = int(_time_s / 3600.) _time_s_left = _time_s - _time_hr * 3600 _time_mn = int(_time_s_left / 60.) _time_s = int(_time_s_left % 60) return "{:d}hr {:d}mn {:d}s".format(_time_hr, _time_mn, _time_s) str_time_before = _format_time(time_before_selected) str_time_after = _format_time(time_after_selected) logging.info(f"str_time_before: {time_before_selected} -> {str_time_before}") _message = "Use OB taken up to <b><font color='red'>" + str_time_before + "</b> " \ "<font color='black'>before and up to </font>" \ "<b><font color='red'>" + str_time_after + "</b> " \ "<font color='black'>after experiment!</font>" time_before_and_after_message_ui = o_get.time_before_and_after_message_ui_of_this_config() time_before_and_after_message_ui.value = _message def checking_normalization_workflow(self): self.create_final_json() self.normalization_recap() def create_final_json(self): _final_full_master_dict = self.final_full_master_dict _config_tab_dict = self.config_tab_dict _final_json_dict = {} for _acquisition_index, _acquisition in enumerate(_final_full_master_dict.keys()): _final_json_for_this_acquisition = {} _config_of_this_acquisition = _config_tab_dict[_acquisition_index] _dict_of_this_acquisition = _final_full_master_dict[_acquisition] for _config_index, _config in enumerate(_dict_of_this_acquisition.keys()): this_config_tab_dict = _config_tab_dict[_acquisition_index][_config_index] normalize_flag = this_config_tab_dict['use_this_config'] list_sample = this_config_tab_dict['list_of_sample_runs'].options list_ob = this_config_tab_dict['list_of_ob'].value list_df = this_config_tab_dict['list_of_df'].value _final_json_for_this_acquisition[_config] = {'list_sample' : list_sample, 'list_df' : list_df, 'list_ob' : list_ob, 'normalize_this_config': normalize_flag} _final_json_dict[_acquisition] = _final_json_for_this_acquisition self.final_json_dict = _final_json_dict def normalization_recap(self): """this will show all the config that will be run and if they have the minimum requirements or not, which mean, at least 1 OB""" final_json = self.final_json_dict self.number_of_normalization = 0 table = "<table style='width:50%;border:1px solid black'>" table += "<tr style='background-color:#eee'><th>Acquisition (s)</th><th>Config. name</th>" \ "<th>Nbr sample</th><th>Nbr OB</th><th>Nbr DF</th><th>Status</th></tr>" for _name_acquisition in final_json.keys(): _current_acquisition_dict = final_json[_name_acquisition] for _name_config in _current_acquisition_dict.keys(): _current_config_dict = _current_acquisition_dict[_name_config] normalize_this_config = _current_config_dict['normalize_this_config'] nbr_ob = len(_current_config_dict['list_ob']) nbr_df = len(_current_config_dict['list_df']) nbr_sample = len(_current_config_dict['list_sample']) self.number_of_normalization += 1 if nbr_ob > 0 else 0 table += utilities.populate_normalization_recap_row( acquisition=_name_acquisition, config=_name_config, nbr_sample=nbr_sample, nbr_ob=nbr_ob, nbr_df=nbr_df, normalize_this_config=normalize_this_config) table += "</table>" table_ui = widgets.HTML(table) display(table_ui) def select_output_folder(self): self.output_folder_ui = myfileselector.FileSelectorPanelWithJumpFolders( instruction='select where to create the ' + \ 'normalized folders', start_dir=self.working_dir, ipts_folder=self.working_dir, next=self.normalization, type='directory', newdir_toolbar_button=True) def normalization(self, output_folder): display(HTML('<span style="font-size: 20px; color:blue">Make sure you do not close the notebook until' 'the busy signal (dark circle top right) is is gone!</span>')) self.output_folder_ui.shortcut_buttons.close() # hack to hide the buttons final_json = self.final_json_dict number_of_normalization = self.number_of_normalization horizontal_layout = widgets.HBox([widgets.Label("Normalization progress", layout=widgets.Layout(width='20%')), widgets.IntProgress(max=number_of_normalization + 1, value=0, layout=widgets.Layout(width='50%'))]) normalization_progress = horizontal_layout.children[1] display(horizontal_layout) list_full_output_normalization_folder_name = [] for _name_acquisition in final_json.keys(): _current_acquisition_dict = final_json[_name_acquisition] for _name_config in _current_acquisition_dict.keys(): _current_config = _current_acquisition_dict[_name_config] list_ob = _current_config['list_ob'] if len(list_ob) == 0: normalization_progress.value += 1 continue if not _current_config['normalize_this_config'].value: normalization_progress.value += 1 continue list_sample = _current_config['list_sample'] full_output_normalization_folder_name = \ utilities.make_full_output_normalization_folder_name( output_folder=output_folder, first_sample_file_name=list_sample[0], name_acquisition=_name_acquisition, name_config=_name_config) list_full_output_normalization_folder_name.append(full_output_normalization_folder_name) list_df = _current_config['list_df'] o_load = Normalization() o_load.load(file=list(list_sample), notebook=True) o_load.load(file=list(list_ob), data_type='ob') if len(list_df) > 0: o_load.load(file=list(list_df), data_type='df') o_load.normalization() o_load.export(folder=full_output_normalization_folder_name, file_type='tif') del o_load normalization_progress.value += 1 horizontal_layout.close() display(HTML('<span style="font-size: 20px; color:blue">Following folders have been created:</span>')) for _folder in list_full_output_normalization_folder_name: _folder = _folder if _folder else "None" display(HTML('<span style="font-size: 15px; color:blue"> -> ' + _folder + '</span>'))
37,914
10,671
import contextlib import io import os import re import subprocess from . import backups_manager_lib from . import backups_main from . import lib from .test_util import AssertEquals from .test_util import AssertLinesEqual from .test_util import CreateDir from .test_util import CreateFile from .test_util import DoBackupsMain def CreateConfig(parent_dir, backups_filename_prefix='backups', filter_merge_path=None): config_path = os.path.join(parent_dir, '%s.config' % backups_filename_prefix) config = backups_manager_lib.BackupsConfig(config_path) config.image_path = os.path.join(parent_dir, '%s.sparsebundle' % backups_filename_prefix) config.mount_path = os.path.join(parent_dir, '%s_mount' % backups_filename_prefix) config.src_path = CreateDir(parent_dir, '%s_src' % backups_filename_prefix) config.checkpoints_dir = CreateDir(parent_dir, '%s_checkpoints' % backups_filename_prefix) config.filter_merge_path = filter_merge_path config.Write() return config def CreateBackupsBundle(config, create_example_content=True): lib.GetDiskImageHelper().CreateImage( config.image_path, size='10G', filesystem='APFS', image_type='SPARSEBUNDLE', volume_name='Backups') with lib.ImageAttacher(config.image_path, config.mount_path, readonly=False, browseable=False) as attacher: backups_dir = CreateDir(attacher.GetMountPoint(), backups_manager_lib.BACKUPS_SUBDIR) backup1_dir = CreateDir(backups_dir, '2020-01-01-120000') CreateDir(backup1_dir, '.metadata') disk_dir = CreateDir(backup1_dir, 'Root') if create_example_content: CreateFile(disk_dir, 'f1') CreateFile(disk_dir, 'fX') CreateFile(disk_dir, 'fT') def CreateLatestManifestCheckpoint(config): backups_manager = backups_manager_lib.BackupsManager.Open( config, readonly=False, browseable=False) try: last_backup = backups_manager.GetLastDone() src_root = last_backup.GetContentRootPath() output_lines = DoBackupsMain(['create-checkpoint', '--src-root', src_root, '--checksum-all', '--manifest-only', '--no-encrypt', '--checkpoint-name', last_backup.GetName(), '--checkpoints-dir', config.checkpoints_dir], expected_output=None) m = re.match('^Created checkpoint at (.+)$', output_lines[-1]) assert m checkpoint_path = m.group(1) AssertLinesEqual(output_lines[:-1], ['>d+++++++ .', '>f+++++++ f1', '>f+++++++ fT', '>f+++++++ fX', 'Transferring 4 paths (0b)']) manifest = lib.ReadManifestFromImageOrPath(checkpoint_path) manifest.SetPath(last_backup.GetManifestPath()) manifest.Write() return checkpoint_path finally: backups_manager.Close() def VerifyBackupManifest(backup, path=None): if path is None: manifest = lib.Manifest.Load(backup.GetManifestPath()) else: manifest = lib.ReadManifestFromImageOrPath(path) output = io.StringIO() verifier = lib.ManifestVerifier(manifest, backup.GetContentRootPath(), output, checksum_path_matcher=lib.PathMatcherAll()) success = verifier.Verify() output_lines = [ line for line in output.getvalue().strip().split('\n') if line ] output.close() AssertLinesEqual(output_lines, []) if not success: raise Exception('Verification failed') @contextlib.contextmanager def SetLogThrottlerLogAlways(log_throttler): old_value = log_throttler.GetLogAlways() log_throttler.SetLogAlways(True) try: yield finally: log_throttler.SetLogAlways(old_value) def DoCreateCheckpoint(src_root, checkpoints_dir, checkpoint_name, expected_output=[], last_checkpoint_path=None, filter_merge_path=None): args = ['create-checkpoint', '--no-encrypt', '--checksum-all', '--src-root', src_root, '--checkpoints-dir', checkpoints_dir, '--checkpoint-name', checkpoint_name] if last_checkpoint_path is not None: args.extend(['--last-checkpoint', last_checkpoint_path]) if filter_merge_path is not None: args.extend(['--filter-merge-path', filter_merge_path]) output = io.StringIO() AssertEquals(backups_main.Main(args, output), True) output_lines = [] checkpoint_path = None for line in output.getvalue().strip().split('\n'): m = re.match('^Created checkpoint at (.+)$', line) if m: checkpoint_path = m.group(1) continue output_lines.append(line) output.close() AssertLinesEqual(output_lines, expected_output) return checkpoint_path def DoCreateBackup(config, backup_name=None, dry_run=False, expected_output=[]): cmd_args = ['create-backup', '--no-encrypt', '--backups-config', config.path] if backup_name is not None: cmd_args.extend(['--backup-name', backup_name]) lines = DoBackupsMain(cmd_args, dry_run=dry_run, expected_output=None) checkpoint_path = None output_lines = [] for line in lines: m = re.match('^Created checkpoint at (.+)$', line) if m: checkpoint_path = m.group(1) continue output_lines.append(line) AssertLinesEqual(output_lines, expected_output) return checkpoint_path def DoApplyToBackups(config, dry_run=False, deduplicate_min_file_size=1024, checksum_all=True, checksum_hardlinks=True, expected_success=True, expected_output=[]): cmd_args = ['apply-to-backups', '--backups-config', config.path, '--deduplicate-min-file-size', str(deduplicate_min_file_size)] if not checksum_all: cmd_args.append('--no-checksum-all') if not checksum_hardlinks: cmd_args.append('--no-checksum-hardlinks') DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success, expected_output=expected_output) def DoListBackups(config, dry_run=False, expected_backups=[]): cmd_args = ['list-backups', '--backups-config', config.path] DoBackupsMain(cmd_args, dry_run=dry_run, expected_output=expected_backups) def DoVerifyBackups(config, dry_run=False, min_backup=None, max_backup=None, full=True, continue_on_error=False, checksum_all=True, expected_success=True, expected_output=[]): cmd_args = ['verify-backups', '--backups-config', config.path] if min_backup is not None: cmd_args.extend(['--min-backup', min_backup]) if max_backup is not None: cmd_args.extend(['--max-backup', max_backup]) if not full: cmd_args.append('--no-full') if continue_on_error: cmd_args.append('--continue-on-error') if not checksum_all: cmd_args.append('--no-checksum-all') DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success, expected_output=expected_output) def DoAddMissingManifestsToBackups(config, expected_output=[]): cmd_args = ['add-missing-manifests-to-backups', '--backups-config', config.path] DoBackupsMain(cmd_args, expected_output=expected_output) def DoDeduplicateBackups( config, min_backup=None, max_backup=None, match_older_mtimes=False, dry_run=False, verbose=False, expected_output=[]): cmd_args = ['deduplicate-backups', '--min-file-size', '1024', '--backups-config', config.path] if min_backup is not None: cmd_args.extend(['--min-backup', min_backup]) if max_backup is not None: cmd_args.extend(['--max-backup', max_backup]) if match_older_mtimes: cmd_args.append('--match-older-mtimes') DoBackupsMain(cmd_args, dry_run=dry_run, verbose=verbose, expected_output=expected_output) def DoCloneBackup(config, backup_name, dry_run=False, expected_success=True, expected_output=[]): cmd_args = ['clone-backup', '--backups-config', config.path, '--backup-name', backup_name] DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success, expected_output=expected_output) def DoDeleteBackups(config, backup_names, dry_run=False, expected_success=True, expected_output=[]): cmd_args = ['delete-backups', '--backups-config', config.path] for backup_name in backup_names: cmd_args.extend(['--backup-name', backup_name]) DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success, expected_output=expected_output) def DoDeleteBackupsInteractive(config, backup_names=[], min_backup=None, max_backup=None, ignore_matching_renames=False, include_latest_backup=False, dry_run=False, verbose=False, expected_success=True, expected_output=[]): cmd_args = ['delete-backups-interactive', '--backups-config', config.path] for backup_name in backup_names: cmd_args.extend(['--backup-name', backup_name]) if min_backup is not None: cmd_args.extend(['--min-backup', min_backup]) if max_backup is not None: cmd_args.extend(['--max-backup', max_backup]) if ignore_matching_renames: cmd_args.append('--ignore-matching-renames') if include_latest_backup: cmd_args.append('--include-latest-backup') DoBackupsMain(cmd_args, dry_run=dry_run, verbose=verbose, expected_success=expected_success, expected_output=expected_output) def DoDumpUniqueFilesInBackups(config, backup_names=[], min_backup=None, max_backup=None, ignore_matching_renames=False, match_previous_only=False, match_next_only=False, dry_run=False, verbose=False, expected_success=True, expected_output=[]): cmd_args = ['dump-unique-files-in-backups', '--backups-config', config.path] for backup_name in backup_names: cmd_args.extend(['--backup-name', backup_name]) if min_backup is not None: cmd_args.extend(['--min-backup', min_backup]) if max_backup is not None: cmd_args.extend(['--max-backup', max_backup]) if ignore_matching_renames: cmd_args.append('--ignore-matching-renames') if match_previous_only: cmd_args.append('--match-previous-only') if match_next_only: cmd_args.append('--match-next-only') DoBackupsMain(cmd_args, dry_run=dry_run, verbose=verbose, expected_success=expected_success, expected_output=expected_output) def DoExtractFromBackups(config, dry_run=False, min_backup=None, max_backup=None, output_image_path=None, paths=[], expected_success=True, expected_output=[]): cmd_args = ['extract-from-backups', '--backups-config', config.path, '--no-encrypt', '--deduplicate-min-file-size', '1024'] if output_image_path is not None: cmd_args.extend(['--output-image-path', output_image_path]) for path in paths: cmd_args.extend(['--path', path]) if min_backup is not None: cmd_args.extend(['--min-backup', min_backup]) if max_backup is not None: cmd_args.extend(['--max-backup', max_backup]) DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success, expected_output=expected_output) def DoMergeIntoBackups(config, dry_run=False, min_backup=None, max_backup=None, from_image_path=None, expected_success=True, expected_output=[]): cmd_args = ['merge-into-backups', '--backups-config', config.path, '--deduplicate-min-file-size', '1024'] if from_image_path is not None: cmd_args.extend(['--from-image-path', from_image_path]) if min_backup is not None: cmd_args.extend(['--min-backup', min_backup]) if max_backup is not None: cmd_args.extend(['--max-backup', max_backup]) DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success, expected_output=expected_output) def DoDeleteInBackups(config, dry_run=False, min_backup=None, max_backup=None, paths=[], expected_success=True, expected_output=[]): cmd_args = ['delete-in-backups', '--backups-config', config.path] if min_backup is not None: cmd_args.extend(['--min-backup', min_backup]) if max_backup is not None: cmd_args.extend(['--max-backup', max_backup]) for path in paths: cmd_args.extend(['--path', path]) DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success, expected_output=expected_output)
12,719
4,144
from django import template from backend.models import Back register = template.Library() @register.inclusion_tag('backend/tags/scrollMenuB.html') def get_back(): scrollB = Back.objects.all() return {"scrollMenuB": scrollB }
234
68
"""Views for ecommerce""" from decimal import Decimal import logging from django.conf import settings from django.contrib.auth import get_user_model from django.http.response import Http404 from django.shortcuts import get_object_or_404 from django.urls import reverse from ipware import get_client_ip from rest_framework import status as statuses from rest_framework.authentication import SessionAuthentication from rest_framework.generics import CreateAPIView, GenericAPIView, RetrieveAPIView from rest_framework.permissions import IsAuthenticated from rest_framework.renderers import TemplateHTMLRenderer from rest_framework.response import Response from rest_framework.validators import ValidationError from rest_framework.views import APIView from applications.constants import AppStates from applications.models import BootcampApplication from backends.edxorg import EdxOrgOAuth2 from ecommerce.api import ( complete_successful_order, create_unfulfilled_order, generate_cybersource_sa_payload, get_new_order_by_reference_number, handle_rejected_order, serialize_user_bootcamp_run, serialize_user_bootcamp_runs, ) from ecommerce.constants import CYBERSOURCE_DECISION_ACCEPT, CYBERSOURCE_DECISION_CANCEL from ecommerce.exceptions import EcommerceException from ecommerce.models import Line, Order, Receipt from ecommerce.permissions import IsSignedByCyberSource from ecommerce.serializers import ( CheckoutDataSerializer, PaymentSerializer, OrderSerializer, ) from hubspot.task_helpers import sync_hubspot_application_from_order from klasses.models import BootcampRun from klasses.permissions import CanReadIfSelf from main.permissions import UserIsOwnerOrAdminPermission from main.serializers import serialize_maybe_user log = logging.getLogger(__name__) User = get_user_model() class PaymentView(CreateAPIView): """ View for payment API. This creates an Order in our system and provides a dictionary to send to Cybersource. """ authentication_classes = (SessionAuthentication,) permission_classes = (IsAuthenticated,) serializer_class = PaymentSerializer def post(self, request, *args, **kwargs): """ Create an unfulfilled order and return a response for it. """ serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) payment_amount = Decimal(serializer.data["payment_amount"]) application_id = serializer.data["application_id"] application = get_object_or_404( BootcampApplication, id=application_id, user=self.request.user ) if application.state != AppStates.AWAITING_PAYMENT.value: log.error( "User attempted to pay for application %d with invalid state %s", application.id, application.state, ) raise ValidationError("Invalid application state") order = create_unfulfilled_order( application=application, payment_amount=payment_amount ) # Sync order data with hubspot sync_hubspot_application_from_order(order) redirect_url = self.request.build_absolute_uri(reverse("applications")) user_ip, _ = get_client_ip(request) return Response( { "payload": generate_cybersource_sa_payload( order, redirect_url, ip_address=user_ip ), "url": settings.CYBERSOURCE_SECURE_ACCEPTANCE_URL, } ) class OrderFulfillmentView(APIView): """ View for order fulfillment API. This API is special in that only CyberSource should talk to it. Instead of authenticating with OAuth or via session this looks at the signature of the message to verify authenticity. """ authentication_classes = () permission_classes = (IsSignedByCyberSource,) def post(self, request, *args, **kwargs): # pylint: disable=unused-argument """ Confirmation from CyberSource which fulfills an existing Order. """ # First, save this information in a receipt receipt = Receipt.objects.create(data=request.data) # Link the order with the receipt if we can parse it reference_number = request.data["req_reference_number"] order = get_new_order_by_reference_number(reference_number) receipt.order = order receipt.save() decision = request.data["decision"] if order.status == Order.FAILED and decision == CYBERSOURCE_DECISION_CANCEL: # This is a duplicate message, ignore since it's already handled return Response(status=statuses.HTTP_200_OK) elif order.status != Order.CREATED: raise EcommerceException( "Order {} is expected to have status 'created'".format(order.id) ) if decision != CYBERSOURCE_DECISION_ACCEPT: handle_rejected_order(order=order, decision=decision) else: # import pdb; pdb.set_trace() complete_successful_order(order) # Sync order data with hubspot sync_hubspot_application_from_order(order) # The response does not matter to CyberSource return Response(status=statuses.HTTP_200_OK) class UserBootcampRunDetail(GenericAPIView): """ Class based view for user bootcamp run view. """ authentication_classes = (SessionAuthentication,) permission_classes = (IsAuthenticated, CanReadIfSelf) lookup_field = "run_key" lookup_url_kwarg = "run_key" queryset = BootcampRun.objects.all() def get( self, request, username, *args, **kwargs ): # pylint: disable=unused-argument """ Returns a serialized bootcamp run and payment for a user """ user = get_object_or_404( User, social_auth__uid=username, social_auth__provider=EdxOrgOAuth2.name ) bootcamp_run = self.get_object() return Response( serialize_user_bootcamp_run(user=user, bootcamp_run=bootcamp_run) ) class UserBootcampRunStatement(RetrieveAPIView): """ View class for a user's bootcamp run payment statement """ authentication_classes = (SessionAuthentication,) permission_classes = (IsAuthenticated,) lookup_field = "run_key" lookup_url_kwarg = "run_key" queryset = BootcampRun.objects.all() renderer_classes = (TemplateHTMLRenderer,) def get(self, request, *args, **kwargs): """ Fetches a user's bootcamp run payment information and renders their statement (or raises a 404 if they have no payments for the specified bootcamp run) """ bootcamp_run = self.get_object() if Line.for_user_bootcamp_run(request.user, bootcamp_run).count() == 0: raise Http404 return Response( { "user": serialize_maybe_user(request.user), "bootcamp_run": serialize_user_bootcamp_run( user=request.user, bootcamp_run=bootcamp_run ), }, template_name="bootcamp/statement.html", ) class UserBootcampRunList(APIView): """ Class based view for user bootcamp run list view. """ authentication_classes = (SessionAuthentication,) permission_classes = (IsAuthenticated, CanReadIfSelf) def get( self, request, username, *args, **kwargs ): # pylint: disable=unused-argument """ Returns serialized bootcamp runs and payments for all runs that a user can pay for. """ user = get_object_or_404( User, social_auth__uid=username, social_auth__provider=EdxOrgOAuth2.name ) return Response(serialize_user_bootcamp_runs(user=user)) class CheckoutDataView(RetrieveAPIView): """ List application ecommerce data for a user, for payable applications """ authentication_classes = (SessionAuthentication,) permission_classes = (IsAuthenticated,) serializer_class = CheckoutDataSerializer def get_queryset(self): """Filter on valid applications for the user""" return ( BootcampApplication.objects.filter( user=self.request.user, state=AppStates.AWAITING_PAYMENT.value ) .select_related("bootcamp_run") .prefetch_related( "bootcamp_run__personal_prices", "bootcamp_run__installment_set", "orders", "orders__line_set", ) .order_by("id") ) def get_object(self): """Get the application given the query parameter""" application_id = self.request.query_params.get("application") return get_object_or_404(self.get_queryset(), id=application_id) class OrderView(RetrieveAPIView): """API view for Orders""" permission_classes = (IsAuthenticated, UserIsOwnerOrAdminPermission) serializer_class = OrderSerializer queryset = Order.objects.all() owner_field = "user"
9,116
2,537
import json import logging import os import torch from rnn import RNNModel import data JSON_CONTENT_TYPE = 'application/json' logger = logging.getLogger(__name__) def model_fn(model_dir): logger.info('Loading the model.') model_info = {} with open(os.path.join(model_dir, 'model_info.pth'), 'rb') as f: model_info = torch.load(f) print('model_info: {}'.format(model_info)) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") logger.info('Current device: {}'.format(device)) model = RNNModel(rnn_type=model_info['rnn_type'], ntoken=model_info['ntoken'], ninp=model_info['ninp'], nhid=model_info['nhid'], nlayers=model_info['nlayers'], dropout=model_info['dropout'], tie_weights=model_info['tie_weights']) with open(os.path.join(model_dir, 'model.pth'), 'rb') as f: model.load_state_dict(torch.load(f)) # after load the rnn params are not a continuous chunk of memory # this makes them a continuous chunk, and will speed up forward pass model.rnn.flatten_parameters() model.to(device).eval() logger.info('Loading the data.') corpus = data.Corpus(model_dir) logger.info('Done loading model and corpus. Corpus dictionary size: {}'.format(len(corpus.dictionary))) return {'model': model, 'corpus': corpus} def input_fn(serialized_input_data, content_type=JSON_CONTENT_TYPE): logger.info('Deserializing the input data.') if content_type == JSON_CONTENT_TYPE: input_data = json.loads(serialized_input_data) if input_data['temperature'] < 1e-3: raise Exception('\'temperature\' has to be greater or equal 1e-3') return input_data raise Exception('Requested unsupported ContentType in content_type: ' + content_type) def output_fn(prediction_output, accept=JSON_CONTENT_TYPE): logger.info('Serializing the generated output.') if accept == JSON_CONTENT_TYPE: return json.dumps(prediction_output), accept raise Exception('Requested unsupported ContentType in Accept: ' + accept) def predict_fn(input_data, model): logger.info('Generating text based on input parameters.') corpus = model['corpus'] model = model['model'] device = torch.device("cuda" if torch.cuda.is_available() else "cpu") logger.info('Current device: {}'.format(device)) torch.manual_seed(input_data['seed']) ntokens = len(corpus.dictionary) input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device) hidden = model.init_hidden(1) logger.info('Generating {} words.'.format(input_data['words'])) result = [] with torch.no_grad(): # no tracking history for i in range(input_data['words']): output, hidden = model(input, hidden) word_weights = output.squeeze().div(input_data['temperature']).exp().cpu() word_idx = torch.multinomial(word_weights, 1)[0] input.fill_(word_idx) word = corpus.dictionary.idx2word[word_idx] word = word if type(word) == str else word.decode() if word == '<eos>': word = '\n' elif i % 12 == 11: word = word + '\n' else: word = word + ' ' result.append(word) return ''.join(result)
3,334
1,054
import pandas as pd import numpy as np import json import requests #Retrieving my api keys information to access the Google API. def get_keys(path): with open(path) as f: return json.load(f) keys = get_keys("/Users/jjherranzsarrion/.secret/google_blog2_api.json") api_key = keys['api_key'] url = 'https://maps.googleapis.com/maps/api/directions/json?' origin = 'Sheepfold+Dog+Park+Fells+Path+Stoneham+MA' destination = 'Terminal+C+Boston+Logan+International+Airport+Boston+MA+02128' departure_time = '1566819000' #time in seconds from midnight 1st Jan 1970 (Unix start time) until Monday 19th August at 07:30 AM. url_params = f"origin={origin}&destination={destination}&departure_time={departure_time}&key={api_key}" request_url = url + url_params response = requests.get(request_url) with open('response.json', 'w') as f: json.dump(response.json(), f)
889
326
from django.apps import AppConfig from openslides.utils.collection import Collection from . import ( __description__, __license__, __url__, __verbose_name__, __version__, ) class ProtocolAppConfig(AppConfig): name = 'openslides_protocol' verbose_name = __verbose_name__ description = __description__ version = __version__ license = __license__ url = __url__ angular_site_module = True js_files = [ 'static/js/openslides_protocol/base.js', 'static/js/openslides_protocol/site.js', 'static/js/openslides_protocol/templatehooks.js', 'static/js/openslides_protocol/templates.js' ] def ready(self): # Import all required stuff. from openslides.core.config import config from openslides.core.signals import post_permission_creation from openslides.utils.rest_api import router from .config_variables import get_config_variables from .signals import add_permissions_to_builtin_groups from .views import ObjectProtocolViewSet, ProtocolViewSet # Define config variables config.update_config_variables(get_config_variables()) # Connect signals. post_permission_creation.connect( add_permissions_to_builtin_groups, dispatch_uid='protocol_add_permissions_to_builtin_groups' ) # Register viewsets. router.register(self.get_model('ObjectProtocol').get_collection_string(), ObjectProtocolViewSet) router.register(self.get_model('Protocol').get_collection_string(), ProtocolViewSet) def get_startup_elements(self): yield Collection(self.get_model('ObjectProtocol').get_collection_string()) yield Collection(self.get_model('Protocol').get_collection_string())
1,807
510
# Generated by Django 3.0.3 on 2020-04-28 13:14 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('roster', '0039_unlock_units'), ] operations = [ migrations.AlterField( model_name='student', name='num_units_done', field=models.SmallIntegerField(default=0, help_text="The number of completed units. This is set manually for Evan's book-keeping."), ), migrations.AlterField( model_name='student', name='vision', field=models.SmallIntegerField(default=3, help_text='Deprecated and no longer in use. To be deleted.'), ), ]
701
221
#!/usr/bin/env python import json from typing import List, Optional, Tuple import datetime import re import io import base64 import os import sys import argparse from plotly.missing_ipywidgets import FigureWidget from tqdm import tqdm import minify_html ROOT=os.path.realpath(os.path.join(os.path.dirname(__file__), '..')) sys.path.append(ROOT) import plotly import plotly.io import plotly.graph_objects as go from plotly.subplots import make_subplots import korea_apartment_price from korea_apartment_price.db import ApartmentId, EntryNotFound from korea_apartment_price.utils import editdist def date_serial2date(x:int): year = x // 10000 month = (x // 100) % 100 date = (x) % 100 return datetime.datetime(year, month, date) def render_graph(apts: List[ApartmentId], date_from=20190101)->Tuple[str, FigureWidget]: sizes = set(korea_apartment_price.db.query_trades(apt_ids=apts, filters=[korea_apartment_price.db.pick_size], date_from=date_from, include_canceled=True)) if len(sizes) == 0: sizes = set([apt['size'] for apt in apts]) favorite_size = apts[0]['size'] chosen_size = list(sorted([(abs(s-favorite_size), s) for s in sizes]))[0][1] fig = go.Figure() aptname = re.sub(r'[0-9]+[ ]*단지[ ]*$', '', apts[0]["name"]) title=(f'{apts[0]["address"]}', f'{aptname} (전용 {chosen_size}평)') fig.update_layout(height = 500, margin=dict(l=10, r=10, b=10, t=10)) fig.update_yaxes( showline=True, linecolor='black', linewidth=1, mirror=True ) fig.update_xaxes( tickformat='%Y-%m-%d', hoverformat='%Y-%m-%d', showline=True, linecolor='black', linewidth=1, mirror=True ) trades = korea_apartment_price.db.query_trades(apt_ids=apts, size_from=chosen_size-0.9, size_to=chosen_size+0.9, date_from=date_from, include_canceled=True) trades_x = [date_serial2date(t['date_serial']) for t in trades if not t['is_canceled']] trades_y = [t['price'] / 10000 for t in trades if not t['is_canceled']] labels = [f'{t["floor"]}층' for t in trades if not t['is_canceled']] canceled_trades_x = [date_serial2date(t['date_serial']) for t in trades if t['is_canceled']] canceled_trades_y = [t['price'] / 10000 for t in trades if t['is_canceled']] canceled_labels = [f'{t["floor"]}층(취소)' for t in trades if t['is_canceled']] el = go.Scattergl(x=trades_x, y=trades_y, showlegend = False, marker={'color': 'blue', 'size': 10}, mode='markers', hovertext=labels, name='실거래') el_canceled = go.Scattergl(x=canceled_trades_x, y=canceled_trades_y, showlegend = False, marker={'color': 'orange', 'size': 10, 'symbol': 'x'}, mode='markers', hovertext=canceled_labels, name='취소') fig.add_trace(el) fig.add_trace(el_canceled) for apt in apts: try: kb_orderbook = sorted(korea_apartment_price.db.query_kb_orderbook(apt, size_from=chosen_size-1, size_to=chosen_size+1, fetched_from=date_from), key=lambda x: x['fetched_at']) break except EntryNotFound: print(apt) pass fetched_date_cnt = {} fetched_price_date_cnt = {} fetched_price_date_lbls = {} for od in kb_orderbook: date_end = od['fetched_at'] if od['detail']['최소매매가'] is not None: price = int(od['detail']['최소매매가']) / 10000 else: price = od['price'] / 10000 fetched_date_cnt[date_end] = fetched_date_cnt.get(date_end, 0) + 1 fetched_price_date_cnt[(date_end, price)] = fetched_price_date_cnt.get((date_end, price), 0) + 1 if not (date_end, price) in fetched_price_date_lbls: fetched_price_date_lbls[(date_end, price)] = set() curlbl = '' if od['apt_dong'] is not None and len(od['apt_dong']) > 0: curlbl += f'{od["apt_dong"]}동' if od['apt_ho'] is not None and len(od['apt_ho']) > 0: curlbl += f'{od["apt_ho"]}호' elif od['floor'] is not None and len(od['floor']) > 0: curlbl += f'{od["floor"]}' if curlbl == '': curlbl='정보없음' curlbl = curlbl.replace('제', '').replace('T', '') fetched_price_date_lbls[(date_end, price)].add(curlbl) fetched_dates = sorted(fetched_date_cnt.keys()) max_cnt = max([1] + list(fetched_price_date_cnt.values())) for (date_end, price), cnt in sorted(fetched_price_date_cnt.items()): date_start = None for trial_date_start in fetched_dates: if trial_date_start < date_end: date_start = trial_date_start if date_start is None: date_start = date_end - datetime.timedelta(2) opacity = min(1.0, 0.1 + 0.9 * cnt / max_cnt) fig.add_trace(go.Scattergl(x=[date_start, date_end], y=[price, price], line={'width':2, 'color':'red'}, marker=None, opacity=opacity, showlegend = False, name='', hoverinfo='skip', mode='lines')) details = sorted(list(fetched_price_date_lbls[(date_end, price)])) details = '<br>' + '<br>'.join(sorted(details)) marker = go.Scattergl(x=[date_end], y=[price], text=[f'{cnt}개 {details}'], line=None, marker={'color':'red', 'size': 3}, opacity=opacity, showlegend = False, name='', mode='markers') fig.add_trace(marker) return title, fig parser = argparse.ArgumentParser() parser.add_argument('aptlst', help='a csv file that contains gu and the apartment name') parser.add_argument('output', help='output html report path') args = parser.parse_args() apts = [] print('[+] reading apartment list') with open(args.aptlst, 'r') as f: for line in tqdm(f.readlines()): line = line.strip() line = line.split(',', 2) if len(line) not in [2, 3]: print (f'Warning: ignoring line "{line}"') continue if len(line) == 2: addr, name = [s.strip() for s in line] size = 18 else: addr, name, size = [s.strip() for s in line] size = int(size) selected=korea_apartment_price.shortcuts.search(addr, name) best_editdist = None best_apt = None for apt in selected: apt['size'] = size cur_editdist = editdist(name, apt['name']) if best_apt is None or best_editdist > cur_editdist: best_apt = apt best_editdist = cur_editdist if best_apt is not None: apts.append(best_apt) else: print(f'[!] couldn\'t find apt entries for query=({addr}, {name})') uniq_apts = {} for apt in apts: uniq_apts[(apt['address'], apt['name'], apt['size'])] = apt apts = [uniq_apts[k] for k in sorted(uniq_apts.keys())] ######## XXX #apts = apts[-3:] uniq_apts = {} for apt in apts: aptname = re.sub(r'[0-9]+[ ]*단지[ ]*$', '', apt["name"]) key = apt['address'], aptname, apt['size'] if not key in uniq_apts: uniq_apts[key] = [] uniq_apts[key].append(apt) apt_keys = sorted(uniq_apts.keys()) print('[+] generating report') for apt_addr, apt_name, apt_size in apt_keys: print(f'{apt_addr} {apt_name} [전용 {apt_size}평]') data = [] data_by_addr = {} addrlst = [] for aptidx, apt_key in enumerate(tqdm(apt_keys)): apts = uniq_apts[apt_key] (addr, aptname), fig = render_graph(apts) cur_chart = json.loads(plotly.io.to_json(fig)) if 'data' in cur_chart: for e in cur_chart['data']: e['type'] = 'scattergl' data.append({ 'addr': addr, 'aptname': aptname, 'fig': cur_chart, }) if not addr in data_by_addr: data_by_addr[addr] = [] data_by_addr[addr].append(aptidx) addrlst = sorted(list(data_by_addr.keys())) datestr = datetime.datetime.now().strftime('%Y-%m-%d') html = f"""<!DOCTYPE html> <html lang="kr"> <head> <meta charset="utf-8" /> <meta http-equiv="x-ua-compatible" content="ie=edge" /> <meta name="viewport" content="width=device-width, initial-scale=1" /> <title>{datestr} 아파트 보고서</title> <script src="https://code.jquery.com/jquery-3.6.0.js"></script> <script src="https://code.jquery.com/ui/1.13.0/jquery-ui.js"></script> <script type="text/javascript" src="https://cdn.plot.ly/plotly-latest.min.js"></script> <script type="text/javascript" id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script> <link href="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/css/select2.min.css" rel="stylesheet" /> <script src="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/js/select2.min.js"></script> <script src="https://cdn.tailwindcss.com"></script> <link rel="stylesheet" href="//code.jquery.com/ui/1.13.0/themes/base/jquery-ui.css"> </head> """ html += f"""<script>let chartData={json.dumps(data, ensure_ascii=False, separators=(',', ':'))};</script>""" html += """<script> function updateChart(idx) { let chartdiv = document.getElementById('chart'); console.log(idx); Plotly.react(chart, chartData[idx]['fig']['data'], chartData[idx]['fig']['layout'], {displayModeBar: false}); } $(document).ready(()=>{ $('#aptselect').select2(); $('#aptselect').on('select2:select', function (e) { let data = e.params.data; updateChart(parseInt(data.id)); }); let chartdiv = document.getElementById('chart'); Plotly.newPlot(chart, chartData[0]['fig']['data'], chartData[0]['fig']['layout'], {displayModeBar: false}); }); </script> """ options = "" for cur_addr in addrlst: options += f'<optgroup label="{cur_addr}">' for cur_data_idx in data_by_addr[cur_addr]: cur_data = data[cur_data_idx] options += f'<option value="{cur_data_idx}" {"selected" if cur_data_idx == 0 else ""}>{cur_data["aptname"]}</option>' options += '</optgroup>' html += f""" <body> <div class="h-screen m-0 p-0 flex flex-col"> <div class="grow-0"> <h3 class="text-center font-bold text-lg">{datestr} 아파트 보고서</h3> <div class="m-3"> <select class="w-full p-3" id="aptselect" name="aptselect"> {options} </select> </div> </div> <div class="grow p-1"><div id="chart"></div></div> </body> </html>""" with open(args.output, 'w') as f: f.write(html) print('[+] done')
9,700
3,853
from lib import rpclib from slickrpc import Proxy from lib import transaction, bitcoin, util from lib.util import bfh, bh2u from lib.transaction import Transaction import requests import pytest import subprocess import json import sys import os from dotenv import load_dotenv load_dotenv(verbose=True) IMPORT_API_HOST = str(os.getenv("IMPORT_API_HOST")) IMPORT_API_PORT = str(os.getenv("IMPORT_API_PORT")) IMPORT_API_BASE_URL = IMPORT_API_HOST rpc_user = os.getenv("IJUICE_KOMODO_NODE_USERNAME") rpc_password = os.getenv("IJUICE_KOMODO_NODE_PASSWORD") port = os.getenv("IJUICE_KOMODO_NODE_RPC_PORT") address = "" amount = 0 greedy = True if len(sys.argv) >= 3: address = sys.argv[1] amount = float(sys.argv[2]) greedy = bool(sys.argv[3]) #this_node_pubkey = os.getenv("THIS_NODE_PUBKEY") #this_node_wif = os.getenv("THIS_NODE_WIF") def get_utxos_api(address): komodo_node_ip = os.getenv("IJUICE_KOMODO_NODE_IPV4_ADDR") rpc_connect = rpc_connection = Proxy("http://" + rpc_user + ":" + rpc_password + "@" + komodo_node_ip + ":" + port) url = "https://blockchain-explorer.thenewfork.staging.do.unchain.io/insight-api-komodo/addrs/"+ address +"/utxo" try: res = requests.get(url) except Exception as e: print(e) return res.text array_of_utxos = [] array_of_utxos_final = [] amount_final = -10000000000 def get_utxos(utxos, amount, greedy): global array_of_utxos global array_of_utxos_final global amount_final if len(array_of_utxos) >= len(array_of_utxos_final) and len(array_of_utxos_final) > 0: return False if amount <= 0 and amount > amount_final: return True flag = False cheap_copy = array_of_utxos for utxo in utxos: for uxto_in_array in array_of_utxos: if uxto_in_array['txid'] == utxo['txid']: flag = True if flag == False: array_of_utxos = array_of_utxos + [utxo] if get_utxos(utxos, amount - utxo['amount'], greedy) == True: array_of_utxos_final = array_of_utxos amount_final = amount if greedy == True: return True flag = False array_of_utxos = cheap_copy return False string = get_utxos_api(address) to_python = "" try: to_python = json.loads(string) except Exception as e: print(e) exit() final = [] for utxo in to_python: if utxo['confirmations'] > 10: final = final + [utxo] get_utxos(final, amount, greedy) print(array_of_utxos_final) #TESTING def is_json(myjson): try: json_object = json.loads(myjson) except ValueError as e: return False return True def test_api(): test = get_utxos_api("RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW") assert is_json(test) == True def test_get_utxos(): testcase = [{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"6d2dbbf64d839bedece788632d6233337494d1d51247823058832a16c1cf1d92","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.01833945,"satoshis":1833945,"confirmations":0,"ts":1602181139},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"ba474f6ddff5883a13bd456570769cd8de54b448cd5baa872fd99d253dc3df79","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.04444815,"satoshis":4444815,"height":104219,"confirmations":1},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"95f7f0a9ccd4256be902d773f884c6b13bff465feaa87b56a61a8773a3cd990b","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.01014884,"satoshis":1014884,"height":104104,"confirmations":116},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"1e562a43ce53a17c1b0cd2f3a7561d943a849d870e0efd4c9f37c8ce750c015b","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":103904,"confirmations":316},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"cad9777cfd1ea164236800506b24ff633702914a87000be019d82523911fdce2","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":103902,"confirmations":318},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"41451a102cfd2780377c33a67d1ed96b3f70fbb616664a7f431115f83f1beb22","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":103901,"confirmations":319},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"7e7390d8176edb9fef91cbd1843c656da7543169baf361971d6bc7eefa498066","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99497,"confirmations":4723},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"19c73bac8031b52b2c3f9f93c3e40f03dc4747a093703907c0e0a8ef09192fac","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99496,"confirmations":4724},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"db60669396d0bb0aa7b81b9325edfe708c879ff0253c9919af1b892efdefac10","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99492,"confirmations":4728},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"2a39930043b87bc3976c6fc39445708103c6c00f88cb8acd18ad24bbaa83a72e","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99486,"confirmations":4734},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"62b29ebbed4e423a72247c116dafe39643c0f6318c4cc435973f1650407a4c06","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99481,"confirmations":4739},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"32a9965986c5922bf9b0de8fbfbac6a9eea70ba8f9a094b084123e97c918631e","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99477,"confirmations":4743},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"8e8ae844ac5a192602031ef0ae1b69aa60900ad73feb3604a0cd2042978c3f80","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99476,"confirmations":4744},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"9ccc0668d3bd89be852ad45cf51c415c212cf861ca0e7b6622b6d71d139ebfd0","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99476,"confirmations":4744},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"ee2495ab86e04fb7c9a0d051df12621516d86845e72b05bc901e222366b4c8fb","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99476,"confirmations":4744},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"4ce3087fc3e3b3f8d586b2d77b4584d819130d141461a3a23c83d22d35128ecf","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99475,"confirmations":4745},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"6edbe4a746e1f84851eda54fc05e7f967367318866a65d73060847ac60497bc9","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99474,"confirmations":4746},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"c16f7f55dee528b925489e9ec4979a4a6215c9cf11b7a1db02ff822189956f0a","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99473,"confirmations":4747},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"11bb33a95f3f1c713e801754031ff4b0fa7fe17242b2c74d223dee08c2568ae4","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99473,"confirmations":4747},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"2dc4e28f322a641169afbce755db55d8cc4547771a29a4e75f0af850016f67aa","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99342,"confirmations":4878},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"98604c684398bc399a45168d30f7ff4515da1145d53f7584d4388b3d69053b7f","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99341,"confirmations":4879},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"ba506982f94df57e2e80418e8a7393568b2892f1c01184d1ea46419c21413ee4","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99339,"confirmations":4881},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"7956be14d1e0681bec8cc8528d7fede271254cbc6ca7d34ae413748ce972182b","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99339,"confirmations":4881},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"49444806b8f9d32efd9536578dfd106e56fa5594bda37f772b7c4b5e582f971f","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99324,"confirmations":4896},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"0fbb4254adce7fc38a3391cd695061d05e43bdf2c27bdad0a4ba0ee076a966e1","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99320,"confirmations":4900},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"6f0f621ae5b071a1a3ab653ee296c426dfaf099586095606a6dcc11c89893c3a","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99316,"confirmations":4904},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"9b25d4de15729fd11cc8d9b40da4eaa3093186a7c7caf4b991bb7101fb9dd56f","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99312,"confirmations":4908},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"c75171dd9737181cde71adf9196f8fddb3710abcd038242a6f99984aba9d1d77","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99307,"confirmations":4913},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"94e047f42834c829fda5f0dc2cdda88d37c67968c180f8e0bb8a61ef812f2934","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99304,"confirmations":4916},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"c56cbc57e260b418519cf43c209b90079a47c0fd50aca8671e35597cc5f6c9d7","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99301,"confirmations":4919},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"1b368d690f8f3db7239248d5140b710ea75f6a0b788c61bb434759087df9e884","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99299,"confirmations":4921},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"923ffde12287052acbeda7cd825fcb390db099dcd4a6ef42a503ccbed32aca5d","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99297,"confirmations":4923},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"8a0f23e3f8230458e299f96996fbce97859b07d6b85bfd83d2610aa8ca159c7a","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":94589,"confirmations":9631},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"5c73c06f2999b00453f5eacbcb60845ba2554a0a540860a051d55ee18a490935","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":94442,"confirmations":9778},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"fed38c710ceaf82d0ef54316df7447171d4b1ec6d499a4b231846b8c9dd33a31","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.28400257,"satoshis":28400257,"height":94145,"confirmations":10075},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"bdde13adb442e0a0b2c5b7220957a2e4d3b9fbbbc47ad3523d35cd996495b608","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.0743478,"satoshis":7434780,"height":94144,"confirmations":10076},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"c064d930a22eaa5a73d0b04201abb304d6d2dffab0f11a3f7652a16724c3d484","vout":2,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.3054992,"satoshis":30549920,"height":94137,"confirmations":10083},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"7dcb33a923f7c25fc8738eb5fc7a230455b55b7285281fc6b41dfa42db900e88","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.95629085,"satoshis":95629085,"height":94137,"confirmations":10083},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"a25b4fb86c86c22fa127838496ae35e75c92ae30d1d80e85ed7fd6135371ddb5","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.8299,"satoshis":82990000,"height":94137,"confirmations":10083},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"f76a8d2ebdab28f39ac76365c36aaaaa4c7cce36ac12f38f32b27548f9ddc6e4","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.69274518,"satoshis":69274518,"height":94137,"confirmations":10083},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"fa58a094f7de0c816f1a40ab3322afded4ccdf89cbe3b6b2702ac1011062a0d2","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":3.979,"satoshis":397900000,"height":91157,"confirmations":13063},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"b27d126a997f960cdc9e4b82aac74c2c26437005e7025c1bdd188d2ea9b561d1","vout":1,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":1.11,"satoshis":111000000,"height":90011,"confirmations":14209},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"8005ab6aaa009c48a1c43d01b21b09f8a2e6c853a3a197d46f0c0fa1344e14e1","vout":1,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":1.11,"satoshis":111000000,"height":90011,"confirmations":14209},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"15a307cd75a630718b63a28a7465e01309dc1d5c0542791fc384b35e86f30b2c","vout":1,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":1.11,"satoshis":111000000,"height":90010,"confirmations":14210},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"5a090d5dd686bed104ae13472262e7cd9d96608f74631351f1252e0d40be70d4","vout":1,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":1.11,"satoshis":111000000,"height":90010,"confirmations":14210},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"3cdf23999fa1354eded15493bda356d5829cc60a1c0d708a07f2cd8406f47328","vout":1,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":1.11,"satoshis":111000000,"height":90010,"confirmations":14210},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"be25a04b0dc9196cf9b65dff78ec8c57e58114aae398699046680e25d03fa015","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":1.10944966,"satoshis":110944966,"height":89762,"confirmations":14458},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"6e975f08b1ee2a3aa02c2b96ebef588b405576acf24f4c81aff1a929085f168b","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.97999673,"satoshis":97999673,"height":89762,"confirmations":14458}] get_utxos(testcase, 0.01, True) assert array_of_utxos_final == [{'address': 'RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW', 'txid': '6d2dbbf64d839bedece788632d6233337494d1d51247823058832a16c1cf1d92', 'vout': 0, 'scriptPubKey': '76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac', 'amount': 0.01833945, 'satoshis': 1833945, 'confirmations': 0, 'ts': 1602181139}]
16,405
10,522
from pynito.cognitodecryptor import CognitoDecryptor
52
20
import sys from PySide2.QtGui import QPixmap, QImage from PySide2.QtWidgets import QApplication, QLabel, QPushButton, QVBoxLayout, QWidget, QFileDialog, QTextEdit, QSizePolicy, QMessageBox, QHBoxLayout from PySide2.QtCore import Slot, Qt, QStringListModel, QSize, QTimer from dbr import DynamsoftBarcodeReader dbr = DynamsoftBarcodeReader() import os import cv2 class UI_Window(QWidget): def __init__(self): QWidget.__init__(self) # The default barcode image. dir_path = os.path.dirname(os.path.realpath(__file__)) filename = os.path.join(dir_path, 'image.tif') # Create a timer. self.timer = QTimer() self.timer.timeout.connect(self.nextFrameSlot) # Create a layout. layout = QVBoxLayout() # Add a button self.btn = QPushButton("Load an image") self.btn.clicked.connect(self.pickFile) layout.addWidget(self.btn) # Add a button button_layout = QHBoxLayout() btnCamera = QPushButton("Open camera") btnCamera.clicked.connect(self.openCamera) button_layout.addWidget(btnCamera) btnCamera = QPushButton("Stop camera") btnCamera.clicked.connect(self.stopCamera) button_layout.addWidget(btnCamera) layout.addLayout(button_layout) # Add a label self.label = QLabel() self.label.setFixedSize(640, 640) pixmap = self.resizeImage(filename) self.label.setPixmap(pixmap) layout.addWidget(self.label) # Add a text area self.results = QTextEdit() self.readBarcode(filename) layout.addWidget(self.results) # Set the layout self.setLayout(layout) self.setWindowTitle("Dynamsoft Barcode Reader") self.setFixedSize(800, 800) # https://stackoverflow.com/questions/1414781/prompt-on-exit-in-pyqt-application def closeEvent(self, event): msg = "Close the app?" reply = QMessageBox.question(self, 'Message', msg, QMessageBox.Yes, QMessageBox.No) if reply == QMessageBox.Yes: event.accept() self.stopCamera() else: event.ignore() def readBarcode(self, filename): dbr.initLicense("Your License") results = dbr.decodeFile(filename, 0x3FF | 0x2000000 | 0x4000000 | 0x8000000 | 0x10000000) out = '' index = 0 for result in results: out += "Index: " + str(index) + "\n" out += "Barcode format: " + result[0] + '\n' out += "Barcode value: " + result[1] + '\n' out += '-----------------------------------\n' index += 1 self.results.setText(out) def resizeImage(self, filename): pixmap = QPixmap(filename) lwidth = self.label.maximumWidth() pwidth = pixmap.width() lheight = self.label.maximumHeight() pheight = pixmap.height() wratio = pwidth * 1.0 / lwidth hratio = pheight * 1.0 / lheight if pwidth > lwidth or pheight > lheight: if wratio > hratio: lheight = pheight / wratio else: lwidth = pwidth / hratio scaled_pixmap = pixmap.scaled(lwidth, lheight) return scaled_pixmap else: return pixmap def pickFile(self): self.stopCamera() # Load an image file. filename = QFileDialog.getOpenFileName(self, 'Open file', 'E:\\Program Files (x86)\\Dynamsoft\\Barcode Reader 7.2\\Images', "Barcode images (*)") # Show barcode images pixmap = self.resizeImage(filename[0]) self.label.setPixmap(pixmap) # Read barcodes self.readBarcode(filename[0]) def openCamera(self): self.vc = cv2.VideoCapture(0) # vc.set(5, 30) #set FPS self.vc.set(3, 640) #set width self.vc.set(4, 480) #set height if not self.vc.isOpened(): msgBox = QMessageBox() msgBox.setText("Failed to open camera.") msgBox.exec_() return self.timer.start(1000./24) def stopCamera(self): self.timer.stop() # https://stackoverflow.com/questions/41103148/capture-webcam-video-using-pyqt def nextFrameSlot(self): rval, frame = self.vc.read() frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) image = QImage(frame, frame.shape[1], frame.shape[0], QImage.Format_RGB888) pixmap = QPixmap.fromImage(image) self.label.setPixmap(pixmap) results = dbr.decodeBuffer(frame, 0x3FF | 0x2000000 | 0x4000000 | 0x8000000 | 0x10000000) out = '' index = 0 for result in results: out += "Index: " + str(index) + "\n" out += "Barcode format: " + result[0] + '\n' out += "Barcode value: " + result[1] + '\n' out += '-----------------------------------\n' index += 1 self.results.setText(out) def main(): app = QApplication(sys.argv) ex = UI_Window() ex.show() sys.exit(app.exec_()) if __name__ == '__main__': main()
5,241
1,731
import sys import logging import numpy as np logger = logging.getLogger(__name__) class TryAgainError(Exception): """ signal to skip this image(s) and try a new one """ def __init__(self, message): # Call the base class constructor with the parameters it needs Exception.__init__(self, message) def setup_logging(level): if level=='info': l=logging.INFO elif level=='debug': l=logging.DEBUG elif level=='warning': l=logging.WARNING elif level=='error': l=logging.ERROR else: l=logging.CRITICAL logging.basicConfig(stream=sys.stdout, level=l) def log_pars(pars, fmt='%8.3g',front=None): """ print the parameters with a uniform width """ s = [] if front is not None: s.append(front) if pars is not None: fmt = ' '.join( [fmt+' ']*len(pars) ) s.append( fmt % tuple(pars) ) s = ' '.join(s) logger.debug(s) class Namer(object): """ create strings with a specified front prefix """ def __init__(self, front=None, back=None): if front=='': front=None if back=='' or back=='noshear': back=None self.front=front self.back=back if self.front is None and self.back is None: self.nomod=True else: self.nomod=False def __call__(self, name): n = name if not self.nomod: if self.front is not None: n = '%s_%s' % (self.front, n) if self.back is not None: n = '%s_%s' % (n, self.back) return n def convert_run_to_seed(run): """ convert the input config file name to an integer for use as a seed """ import hashlib h = hashlib.sha256(run.encode('utf-8')).hexdigest() seed = int(h, base=16) % 2**30 logger.info("got seed %d from run %s" % (seed,run)) return seed def get_trials_nsplit(c): """ split into chunks """ from math import ceil ntrials = c['ntrials'] tmsec = c['desired_hours']*3600.0 sec_per = c['sec_per'] ntrials_per = int(round( tmsec/sec_per ) ) nsplit = int(ceil( ntrials/float(ntrials_per) )) time_hours = ntrials_per*sec_per/3600.0 logger.info("ntrials requested: %s" % (ntrials)) logger.info('seconds per image: %s sec per with rand: %s' % (c['sec_per'],sec_per)) logger.info('nsplit: %d ntrials per: %d time (hours): %s' % (nsplit,ntrials_per,time_hours)) return ntrials_per, nsplit, time_hours def get_trials_per_job_mpi(njobs, ntrials): """ split for mpi """ return int(round(float(ntrials)/njobs)) # # matching by row,col # def match_truth(data, truth, radius_arcsec=0.2, pixel_scale=0.263): """ get indices in the data that match truth catalog by x,y position """ radius_pixels = radius_arcsec/pixel_scale print("matching") allow=1 mdata, mtruth = close_match( data['x'], data['y'], truth['x'], truth['y'], radius_pixels, allow, ) nmatch=mdata.size ntot=data.size frac=float(nmatch)/ntot print(' matched %d/%d %.3f within ' '%.3f arcsec' % (nmatch, ntot, frac,radius_arcsec)) return mdata def close_match(t1,s1,t2,s2,ep,allow,verbose=False): """ Find the nearest neighbors between two arrays of x/y parameters ---------- x1, y1: scalar or array coordinates of a set of points. Must be same length. x2, y2: scalar or array coordinates of a second set of points. Must be same length. ep: scalar maximum match distance between pairs (pixels) allow: scalar maximum number of matches in second array to each element in first array. verbose: boolean make loud Original by Dave Johnston, University of Michigan, 1997 Translated from IDL by Eli Rykoff, SLAC modified slightly by erin sheldon """ t1=np.atleast_1d(t1) s1=np.atleast_1d(s1) t2=np.atleast_1d(t2) s2=np.atleast_1d(s2) n1=t1.size n2=t2.size matcharr=np.zeros([n1,allow],dtype='i8') matcharr.fill(-1) ind=np.arange(n2,dtype='i8') sor=t2.argsort() t2s=t2[sor] s2s=s2[sor] ind=ind[sor] runi=0 endt=t2s[n2-1] for i in range(n1): t=t1[i] tm=t-ep tp=t+ep in1=_binary_search(t2s,tm) # I can improve this? if in1 == -1: if (tm < endt) : in1=0 if in1 != -1: in1=in1+1 in2=in1-1 jj=in2+1 while (jj < n2): if (t2s[in2+1] < tp): in2+=1 jj+=1 else : jj=n2 if (n2 == 1) : in2=0 # hmmm if (in1 <= in2): if (n2 != 1) : check = s2s[in1:in2+1] tcheck = t2s[in1:in2+1] else : check = s2s[0] tcheck=t2s[0] s=s1[i] t=t1[i] offby=abs(check-s) toffby=abs(tcheck-t) good=np.where(np.logical_and(offby < ep,toffby < ep))[0]+in1 ngood=good.size if (ngood != 0) : if (ngood > allow) : offby=offby[good-in1] toffby=toffby[good-in1] dist=np.sqrt(offby**2+toffby**2) good=good[dist.argsort()] ngood=allow good=good[0:ngood] matcharr[i,0:ngood]=good runi=runi+ngood if verbose: print("total put in bytarr:",runi) #matches=np.where(matcharr != -1)[0] matches=np.where(matcharr != -1) #if (matches.size == 0): if (matches[0].size == 0): if verbose: print("no matches found") m1=np.array([]) m2=np.array([]) return m1,m2 m1 = matches[0] % n1 m2 = matcharr[matches] m2 = ind[m2].flatten() if verbose: print(m1.size,' matches') return m1,m2 def _binary_search(arr,x,edgedefault=False,round=False): n=arr.size if (x < arr[0]) or (x > arr[n-1]): if (edgedefault): if (x < arr[0]): index = 0 elif (x > arr[n-1]): index = n-1 else: index = -1 return index down=-1 up=n while (up-down) > 1: mid=down+(up-down)//2 if x >= arr[mid]: down=mid else: up=mid index=down if (round) and (index != n-1): if (abs(x-arr[index]) >= abs(x-arr[index+1])): index=index+1 return index
6,820
2,472
import pandas as pd from PIL import Image import requests from io import BytesIO import os import math df = pd.read_csv('C:\\Users\\v-ngdian\\Documents\\utilities\\thumbnail creator\\MetArtworksAugmented.csv') size = 512, 512 ids = [] def make_thumbnail(objectID, url, foldername): try: response = requests.get(url) image = Image.open(BytesIO(response.content)) ids.append(objectID) image.thumbnail(size, Image.ANTIALIAS) filepath = os.path.dirname(os.path.abspath(__file__)) filepath = os.path.join(filepath, foldername, str(objectID) + '.jpg') image.save(filepath, "JPEG") except Exception as e: print("Invalid URL: {}".format(url)) return def run(category, foldername): df_filtered = df[df['Object Name'] == category] print("There are {} objects in ".format(df_filtered.shape[0]) + category) counter = -1 for index, row in df_filtered.iterrows(): counter += 1 objectID = row['Object ID'] url = row['PrimaryImageUrl'] if counter%50==0: print("Working on object: " + str(counter) + " with id: " + str(objectID)) if isinstance(url, float) and math.isnan(url): next elif not isinstance(objectID, int): print("Object id: {} not an integer".format(objectID)) next else: make_thumbnail(objectID, url, foldername) run("vase", "vases") print(ids)
1,470
468
import ssl import certifi from promisedio import loop, ns, promise, timer async def example1(): context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) context.verify_mode = ssl.CERT_REQUIRED context.check_hostname = True context.load_default_certs() context.load_verify_locations( cafile=certifi.where(), capath=None, cadata=None ) for x in range(100): try: stream = await ns.open_connection(("209.131.162.45", 443), ssl=context, server_hostname="www.verisign.com", timeout=0.2) except timer.TimeoutError: pass print(stream.getsockname()) print(stream.getpeername()) await stream.write(b"GET / HTTP 1.1\n\n") print(await stream.read()) await stream.shutdown() async def example2(): stream = await ns.open_connection(("192.168.1.99", 8080), timeout=2) print(stream.getsockname()) print(stream.getpeername()) await stream.shutdown() promise.exec_async(example1()) loop.run_forever()
1,050
361
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.service.validations.edp import job as j from sahara.tests.unit.service.validation import utils as u from sahara.utils import edp class TestJobValidation(u.ValidationTestCase): def setUp(self): super(TestJobValidation, self).setUp() self._create_object_fun = j.check_mains_libs self.scheme = j.JOB_SCHEMA def test_empty_libs(self): for job_type in [edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_JAVA]: self._assert_create_object_validation( data={ "name": "jar.jar", "type": job_type }, bad_req_i=(1, "INVALID_DATA", "%s flow requires libs" % job_type)) self._assert_create_object_validation( data={ "name": "jar.jar", "type": edp.JOB_TYPE_MAPREDUCE_STREAMING, }) def test_mains_unused(self): for job_type in [edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_JAVA]: self._assert_create_object_validation( data={ "name": "jar.jar", "type": job_type, "mains": ["lib1"], "libs": ["lib2"] }, bad_req_i=(1, "INVALID_DATA", "%s flow does not use mains" % job_type)) def test_empty_pig_mains(self): data = { "name": "pig.pig", "type": edp.JOB_TYPE_PIG, "libs": ['lib-uuid'] } self._assert_create_object_validation( data=data, bad_req_i=(1, "INVALID_DATA", "Pig flow requires main script")) data.update({"type": edp.JOB_TYPE_HIVE}) self._assert_create_object_validation( data=data, bad_req_i=(1, "INVALID_DATA", "Hive flow requires main script")) def test_overlap_libs(self): for job_type in [edp.JOB_TYPE_HIVE, edp.JOB_TYPE_PIG]: self._assert_create_object_validation( data={ "name": "jar.jar", "type": job_type, "libs": ["lib1", "lib2"], "mains": ["lib1"] }, bad_req_i=(1, "INVALID_DATA", "'mains' and 'libs' overlap")) def test_jar_rejected(self): self._assert_create_object_validation( data={ "name": "jar.jar", "type": "Jar", }, bad_req_i=(1, "VALIDATION_ERROR", "'Jar' is not one of " + str(edp.JOB_TYPES_ALL)))
3,232
996
import FWCore.ParameterSet.Config as cms from RecoLocalCalo.HGCalRecProducers.hgcalLayerClusters_cfi import hgcalLayerClusters as hgcalLayerClusters_ from RecoLocalCalo.HGCalRecProducers.HGCalRecHit_cfi import dEdX, HGCalRecHit from RecoLocalCalo.HGCalRecProducers.HGCalUncalibRecHit_cfi import HGCalUncalibRecHit from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import fC_per_ele, hgceeDigitizer, hgchebackDigitizer hgcalLayerClusters = hgcalLayerClusters_.clone() hgcalLayerClusters.timeOffset = hgceeDigitizer.tofDelay hgcalLayerClusters.plugin.dEdXweights = cms.vdouble(dEdX.weights) hgcalLayerClusters.plugin.fcPerMip = cms.vdouble(HGCalUncalibRecHit.HGCEEConfig.fCPerMIP) hgcalLayerClusters.plugin.thicknessCorrection = cms.vdouble(HGCalRecHit.thicknessCorrection) hgcalLayerClusters.plugin.fcPerEle = cms.double(fC_per_ele) hgcalLayerClusters.plugin.noises = cms.PSet(refToPSet_ = cms.string('HGCAL_noises')) hgcalLayerClusters.plugin.noiseMip = hgchebackDigitizer.digiCfg.noise_MIP
1,004
435
# Copyright (c) 2020 Intel Corporation. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """EII Message Bus Azure Edge Runtime Bridge """ import asyncio import traceback as tb from eab.bridge_state import BridgeState def main(): """Main method. """ bs = None try: bs = BridgeState.get_instance() loop = asyncio.get_event_loop() loop.run_forever() except Exception as e: print(f'[ERROR] {e}\n{tb.format_exc()}') raise finally: if bs is not None: # Fully stop the bridge bs.stop() # Clean up asyncio loop.stop() loop.close() if __name__ == "__main__": main()
1,705
548
import logging import time from spaceone.inventory.libs.manager import GoogleCloudManager from spaceone.inventory.libs.schema.base import ReferenceModel from spaceone.inventory.connector.bigquery.sql_workspace import SQLWorkspaceConnector from spaceone.inventory.model.bigquery.sql_workspace.cloud_service import BigQueryWorkSpace, SQLWorkSpaceResource, \ SQLWorkSpaceResponse, ProjectModel from spaceone.inventory.model.bigquery.sql_workspace.cloud_service_type import CLOUD_SERVICE_TYPES from datetime import datetime _LOGGER = logging.getLogger(__name__) class SQLWorkspaceManager(GoogleCloudManager): connector_name = 'SQLWorkspaceConnector' cloud_service_types = CLOUD_SERVICE_TYPES def collect_cloud_service(self, params): _LOGGER.debug(f'** Big Query SQL Workspace START **') start_time = time.time() """ Args: params: - options - schema - secret_data - filter - zones Response: CloudServiceResponse/ErrorResourceResponse """ collected_cloud_services = [] error_responses = [] data_set_id = "" secret_data = params['secret_data'] project_id = secret_data['project_id'] ################################## # 0. Gather All Related Resources # List all information through connector ################################## big_query_conn: SQLWorkspaceConnector = self.locator.get_connector(self.connector_name, **params) data_sets = big_query_conn.list_dataset() projects = big_query_conn.list_projects() update_bq_dt_tables = [] table_schemas = [] for data_set in data_sets: try: ################################## # 1. Set Basic Information ################################## data_refer = data_set.get('datasetReference', {}) data_set_id = data_refer.get('datasetId') dataset_project_id = data_refer.get('projectId') bq_dataset = big_query_conn.get_dataset(data_set_id) creation_time = bq_dataset.get('creationTime', '') last_modified_time = bq_dataset.get('lastModifiedTime') region = self._get_region(bq_dataset.get('location', '')) exp_partition_ms = bq_dataset.get('defaultPartitionExpirationMs') exp_table_ms = bq_dataset.get('defaultTableExpirationMs') # skip if dataset id is invisible if self._get_visible_on_console(data_set_id): bq_dt_tables = big_query_conn.list_tables(data_set_id) update_bq_dt_tables, table_schemas = self._get_table_list_with_schema(big_query_conn, bq_dt_tables) labels = self.convert_labels_format(bq_dataset.get('labels', {})) ################################## # 2. Make Base Data ################################## bq_dataset.update({ 'name': data_set_id, 'project': project_id, 'tables': update_bq_dt_tables, 'table_schemas': table_schemas, 'region': region, 'visible_on_console': self._get_visible_on_console(data_set_id), 'matching_projects': self._get_matching_project(dataset_project_id, projects), 'creationTime': self._convert_unix_timestamp(creation_time), 'lastModifiedTime': self._convert_unix_timestamp(last_modified_time), 'default_partition_expiration_ms_display': self._convert_milliseconds_to_minutes(exp_partition_ms), 'default_table_expiration_ms_display': self._convert_milliseconds_to_minutes(exp_table_ms), 'labels': labels }) big_query_data = BigQueryWorkSpace(bq_dataset, strict=False) ################################## # 3. Make Return Resource ################################## big_query_work_space_resource = SQLWorkSpaceResource({ 'name': data_set_id, 'account': project_id, 'region_code': region, 'tags': labels, 'data': big_query_data, 'reference': ReferenceModel(big_query_data.reference()) }) ################################## # 4. Make Collected Region Code ################################## self.set_region_code(region) ################################## # 5. Make Resource Response Object # List of SQLWorkSpaceResponse Object ################################## collected_cloud_services.append(SQLWorkSpaceResponse({'resource': big_query_work_space_resource})) except Exception as e: _LOGGER.error(f'[collect_cloud_service] => {e}', exc_info=True) error_response = self.generate_resource_error_response(e, 'BigQuery', 'SQLWorkspace', data_set_id) error_responses.append(error_response) _LOGGER.debug(f'** Big Query Finished {time.time() - start_time} Seconds **') return collected_cloud_services, error_responses def _get_region(self, location): matched_info = self.match_region_info(location) return matched_info.get('region_code') if matched_info else 'global' def _get_table_list_with_schema(self, big_conn: SQLWorkspaceConnector, bq_dt_tables): update_bq_dt_tables = [] table_schemas = [] for bq_dt_table in bq_dt_tables: table_ref = bq_dt_table.get('tableReference') table_single = big_conn.get_tables(table_ref.get('datasetId'), table_ref.get('tableId')) if table_single is not None: creation_time = table_single.get('creationTime') expiration_time = table_single.get('expirationTime') last_modified_time = table_single.get('lastModifiedTime') table_single.update({ 'creationTime': self._convert_unix_timestamp(creation_time), 'expirationTime': self._convert_unix_timestamp(expiration_time), 'lastModifiedTime': self._convert_unix_timestamp(last_modified_time) }) _table_schemas = table_single.get('schema', {}) if _table_schemas != {}: fields = _table_schemas.get('fields', []) table_single.update({'schema': fields}) update_bq_dt_tables.append(table_single) for single_schema in fields: single_schema.update({'table_id': table_ref.get('tableId')}) table_schemas.append(single_schema) return update_bq_dt_tables, table_schemas @staticmethod def _get_matching_project(project_id, projects): _projects = [] for project in projects: if project_id == project.get('id'): _projects.append(ProjectModel(project, strict=False)) return _projects @staticmethod def _get_visible_on_console(dataset_id): return False if dataset_id.startswith('_') else True @staticmethod def _convert_milliseconds_to_minutes(milliseconds): if milliseconds: minutes = (int(milliseconds)/1000)/60 return minutes else: return None @staticmethod def _convert_unix_timestamp(unix_timestamp): try: return datetime.fromtimestamp(int(unix_timestamp) / 1000) except Exception as e: _LOGGER.error(f'[_convert_unix_timestamp] {e}') return
8,033
2,141
for a in range(0,6): print('Olá!', a) print('Parei. \n') for b in range(6, 0, -1): print('Olá1', b) print('Parei. \n') for c in range(0, 6, 2): print('Olá!', c) print('Parei. \n')
192
99
# -*- coding: utf-8 -*- """ Created on Fri Feb 26 09:11:06 2016 @author: eikes """ import ConfigParser from components import Component from result import VariableResult _config = ConfigParser.ConfigParser() _config.read('scenario.cfg') _section = 'MySection' _results = 'results' def _create_comp(index): global _config, _section connections = map(str.strip, _config.get( _section, 'comp.{0}.connections'.format(index), ).split(',')) return Component( _config.get(_section, 'comp.{0}.name'.format(index)), _config.get(_section, 'comp.{0}.type'.format(index)), _config.get(_section, 'comp.{0}.reference_values'.format(index)), connections, _config.get(_section, 'comp.{0}.replace_values'.format(index)), _config.getfloat(_section, 'comp.{0}.factor'.format(index)), ) def _create_results(): global _config, _results quantity = _config.getint(_results, 'quantity') results = [] for i in range(1, quantity+1): label = _config.get(_results, 'result.{0}.name'.format(i)) comp = _config.get(_results, 'result.{0}.comp'.format(i)) calc_type = _config.getint(_results, 'result.{0}.type'.format(i)) results.append( VariableResult(pk=i, label=label, comp_name=comp, calc_type=calc_type) ) return results LP_FILE_PATH = _config.get(_section, 'lp') TRC_FILE_PATH = _config.get(_section, 'trc') QUANTITY = _config.getint(_section, 'quantity') COMPONENTS = [ _create_comp(i) for i in range(1, QUANTITY+1) ] RESULTS = _create_results() SIMULATIONS = _config.getint(_section, 'simulations') WORKER = _config.getint(_section, 'worker') S_VALUE = float(1.5855e+07)
1,732
602
ncoders = int(input("enter no. of coders : ")) l=map(int,input().split(" ")) sl=[] l = sorted(list(l)) top = 1 for rotator in range(1,ncoders): sl = l[:rotator] if(top != ncoders): if(max(sl) < l[top]): l[l.index(max(sl))] = 0 top = top +1 elif(max(sl) == l[top]): l[l.index(max(sl[:len(sl)-1]))] = 0 top = top+1 else: break print(l) print(sum(l))
466
189
###################################################################################################################### # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # # with the License. A copy of the License is located at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES # # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions # # and limitations under the License. # ###################################################################################################################### import config from flask_restful import Resource, reqparse import logging from decorators import admin_api, restricted_api, private_api import botocore import datetime from models import db, AmiList import boto3 import errors from sqlalchemy import exc from sqlalchemy.exc import SQLAlchemyError logger = logging.getLogger("api") session = boto3.session.Session() aws_region = session.region_name ec2_client = boto3.client('ec2', aws_region, config=config.boto_extra_config()) def get_ami_info(): ami_info = {} for session_info in AmiList.query.filter_by(is_active=True).all(): ami_info[session_info.ami_label] = session_info.ami_id return ami_info class ManageImage(Resource): @admin_api def post(self): """ Register a new EC2 AMI as DCV image on SOCA --- tags: - DCV parameters: - in: body name: body schema: required: - os - ami_id - ami_label - root_size properties: ami_id: type: string description: EC2 ID of the AMI os: type: string description: Windows or Linux ami_label: type: string description: Friendly name for your image root_size: type: string description: Minimum size of your EC2 AMI responses: 200: description: Pair of user/token is valid 401: description: Invalid user/token pair """ parser = reqparse.RequestParser() parser.add_argument('ami_id', type=str, location='form') parser.add_argument('os', type=str, location='form') parser.add_argument('ami_label', type=str, location='form') parser.add_argument('root_size', type=str, location='form') args = parser.parse_args() ami_id = args["ami_id"] ami_label = str(args["ami_label"]) os = args["os"] if args["os"] is None or args["ami_label"] is None or args["ami_id"] is None or args["root_size"] is None: return errors.all_errors('CLIENT_MISSING_PARAMETER', "os (str), ami_id (str), ami_label (str) and root_size (str) are required.") if args["os"].lower() not in ["centos7", "rhel7", "amazonlinux2", "windows"]: return errors.all_errors('CLIENT_MISSING_PARAMETER', "os must be centos7, rhel7, amazonlinux2, or windows") try: root_size = int(args["root_size"]) except ValueError: return errors.all_errors('IMAGE_REGISTER_ERROR', f"{root_size} must be a valid integer") soca_labels = get_ami_info() # Register AMI to SOCA if ami_label not in soca_labels.keys(): try: ec2_response = ec2_client.describe_images(ImageIds=[ami_id], Filters=[{'Name': 'state', 'Values': ['available']}]) if (len(ec2_response["Images"]) != 0): new_ami = AmiList(ami_id=ami_id, ami_type=os.lower(), ami_label=ami_label, is_active=True, ami_root_disk_size=root_size, created_on=datetime.datetime.utcnow()) try: db.session.add(new_ami) db.session.commit() return {"success": True, "message": f"{ami_id} registered successfully in SOCA as {ami_label}"}, 200 except SQLAlchemyError as e: db.session.rollback() logger.error(f"Failed Creating AMI {ami_label} {ami_id} {e}") return errors.all_errors('IMAGE_REGISTER_ERROR', f"{ami_id} registration not successful") else: logger.error(f"{ami_id} is not available in AWS account") return errors.all_errors('IMAGE_REGISTER_ERROR', f"{ami_id} is not available in AWS account. If you just created it, make sure the state of the image is 'available' on the AWS console") except botocore.exceptions.ClientError as error: logger.error(f"Failed Creating AMI {ami_label} {ami_id} {error}") return errors.all_errors('IMAGE_REGISTER_ERROR', f"{ami_id} Couldn't locate {ami_id} in AWS account. Make sure you do have permission to view it") else: logger.error(f"Label already in use {ami_label}") return errors.all_errors('IMAGE_REGISTER_ERROR', f"Label {ami_label} already in use. Please enter a unique label") @admin_api def delete(self): """ Delete an EC2 AMI registered as DCV image on SOCA --- tags: - DCV parameters: - in: body name: body schema: required: - ami_label properties: ami_label: type: string description: Friendly name for your image responses: 200: description: Pair of user/token is valid 401: description: Invalid user/token pair """ parser = reqparse.RequestParser() parser.add_argument('ami_label', type=str, location='form') args = parser.parse_args() if args["ami_label"] is None: return errors.all_errors('CLIENT_MISSING_PARAMETER', "ami_label (str) is required.") check_session = AmiList.query.filter_by(ami_label=args["ami_label"], is_active=True).first() if check_session: check_session.is_active = False check_session.deactivated_on = datetime.datetime.utcnow() try: db.session.commit() logger.info(f"AMI Label {args['ami_label']} deleted from SOCA") return {"success": True, "message": f"{args['ami_label']} deleted from SOCA successfully"}, 200 except exc.SQLAlchemyError as e: db.session.rollback() logger.error(f"AMI Label {args['ami_label']} delete failed {e}") return errors.all_errors('IMAGE_DELETE_ERROR', f"{args['ami_label']} could not have been deleted because of {e}") else: return errors.all_errors('IMAGE_DELETE_ERROR', f"{args['ami_label']} could not be found")
8,081
2,078
import argparse from typing import List class Solution: def max_profit(self, prices: List[int]) -> int: best_profit = 0 for idx in range(0, len(prices) - 1): # If the price is not greater, then "sell at the peak", else buy/hold if prices[idx + 1] > prices[idx]: best_profit += prices[idx + 1] - prices[idx] return best_profit def main(): parser = argparse.ArgumentParser(description='Process some integers.') parser.add_argument('integers', metavar='N', type=int, nargs='+', help='An integer for processing by the happy number process') args = parser.parse_args() number = args.integers max_sum = Solution().max_sub_array(number) print(max_sum) if __name__ == "__main__": main()
807
250
import calcWinRate as cwr def pp( a, b, table, k): result = cwr.calc_win_rate( a, b, table, k) print( "{} vs {} with {}".format( cwr.card_lst(a), cwr.card_lst(b), cwr.card_lst(table))) print( "{:2.2%} vs {:2.2%}\n".format(result[0], result[1])) k= 10000 # simulate k times # --- example 0 --- # --- 1-draw straight vs 4-card flush player_a = [51,43] #AQ player_b = [52,48] #AKs table_cards = [47,40,28] #K,J,8 pp( player_a, player_b, table_cards, k) # --- straight vs 4-card flush player_a = [51,43] #AQ player_b = [52,48] #AKs table_cards = [47,40,28,33] #K,J,8,T pp( player_a, player_b, table_cards, k) # --- straight vs three of kind player_a = [51,43] #AQ player_b = [47,46] #KK table_cards = [48,40,26,33] #K,J,8,T pp( player_a, player_b, table_cards, k) # --- straight vs two pairs player_a = [51,43] #AQ player_b = [47,39] #KJs table_cards = [48,40,26,33] #K,J,8,T pp( player_a, player_b, table_cards, k)
947
481
import discord from discord.ext import commands from Utils.UserClass import UserClass as User permission_message = ["Guest [Permission Level : 0]", "User [Permission Level : 1]", "Developer [Permission Level : 2]", "Owner [Permission Level : 3]"] async def check_permission(ctx, level): now_user = User(ctx.author) if now_user.permission >= level: return False else: embed = discord.Embed(title=f"User Permission Error", color=0xff0000) embed.set_footer(text = "Sented by Koi_Bot#4999ㆍUser Permission Error") if now_user.permission == 0 and level == 1: embed.add_field(name = "Suggestion", value = "/accept_term으로 약관 동의를 하시면, 'User [Permission Level : 1]' 권한을 얻어, 이 명령어를 실행 하실 수 있습니다.", inline = False) embed.add_field(name = "Your Permission", value = f"{str(permission_message[int(now_user.permission)])}", inline = True) embed.add_field(name = "Command Executable Permission", value = f"{str(permission_message[int(level)])}", inline = True) await ctx.respond(embed=embed) return True
1,085
387
import math test = [] def testPrime(num): sq = int(math.sqrt(num)) for i, factor in enumerate(test): if (i > sq): break if (num % factor == 0): return False test.append(num) return True sumPrimes = 2 for i in range(3, 2000000, 2): if not testPrime(i): continue sumPrimes+=i if (i % 10000 == 1): print("progress : ", i, sumPrimes) print (sumPrimes)
436
164
import sys import re import json import os.path import copy from mako.template import Template from clang import cindex configfile = "clangelscript.json" f = open(configfile) data = f.read() data = re.sub(r"//[^n]*n", "\n", data) config = json.loads(data) f.close() if "ObjectTypes" in config: arr = config["ObjectTypes"] config["ObjectTypes"] = {} for name in arr: config["ObjectTypes"][re.compile(name)] = arr[name] def get(name, default=None, conf=config): if name in conf: return conf[name] else: return default fir = get("FileIncludeRegex", None) fer = get("FileExcludeRegex", None) mir = get("MethodIncludeRegex", None) mer = get("MethodExcludeRegex", None) oir = get("ObjectIncludeRegex", None) oer = get("ObjectExcludeRegex", None) mfir = get("FieldIncludeRegex", None) mfer = get("FieldExcludeRegex", None) generic_regex = get("GenericWrapperRegex", None) maahr = get("MethodArgumentAutoHandleRegex", None) mrahr = get("MethodReturnAutoHandleRegex", None) fir = re.compile(fir) if fir else fir fer = re.compile(fer) if fer else fer mir = re.compile(mir) if mir else mir mer = re.compile(mer) if mer else mer oir = re.compile(oir) if oir else oir oer = re.compile(oer) if oer else oer mfir = re.compile(mfir) if mfir else mfir mfer = re.compile(mfer) if mfer else mfer maahr = re.compile(maahr) if maahr else maahr mrahr = re.compile(mrahr) if mrahr else mrahr generic_regex = re.compile(generic_regex) if generic_regex else generic_regex verbose = get("Verbose", False) doassert = get("Assert", True) keep_unknowns = get("KeepUnknowns", False) output_filename = get("OutputFile", None) funcname = get("FunctionName", "registerScripting") generic_wrappers = [] index = cindex.Index.create() clang_args = get("ClangArguments", []) #clang_args.insert(0, "-I%s/clang/include" % os.path.dirname(os.path.abspath(__file__))) new_args = [] for arg in clang_args: new_args.append(arg.replace("${ConfigFilePath}", os.path.dirname(os.path.abspath(configfile)))) clang_args = new_args tu = index.parse(None, clang_args, [], 13) warn_count = 0 def logWarning(msg): global warn_count warn_count += 1 if verbose: sys.stderr.write(msg + "\n") def get_type(type, cursor=None): pointer = type.kind == cindex.TypeKind.POINTER typename = "" ref = type.kind == cindex.TypeKind.LVALUEREFERENCE if type.kind == cindex.TypeKind.TYPEDEF or type.kind == cindex.TypeKind.RECORD or type.kind == cindex.TypeKind.ENUM: typename = type.get_declaration() elif pointer or ref: t2 = type.get_pointee() typename = t2.get_declaration() if typename is None or typename.kind.is_invalid(): typename = get_type(t2) elif type.kind == cindex.TypeKind.ULONG: typename = "unsigned long" elif type.kind == cindex.TypeKind.UINT: typename = "unsigned int" elif type.kind == cindex.TypeKind.USHORT: typename = "unsigned short" elif type.kind == cindex.TypeKind.CONSTANTARRAY: if cursor is None: raise Exception("Constant array, but cursor not provided so can't solve the type") typename = get_type(type.get_array_element_type()) else: typename = type.kind.name.lower() if typename is None: raise Exception("Typename was None %s" % type.kind) elif isinstance(typename, cindex.Cursor): if typename.spelling == None: raise Exception("Typename was None %s" % type.kind) fullname = [typename.spelling] cursor = typename.lexical_parent while not cursor is None and (cursor.kind == cindex.CursorKind.NAMESPACE or cursor.kind == cindex.CursorKind.CLASS_DECL): fullname.insert(0, cursor.displayname) cursor = cursor.lexical_parent typename = "::".join(fullname) elif typename == "unexposed": raise Exception("Typename is unexposed") return "%s%s" % (typename, "*" if pointer else "&" if ref else "") def is_int(literal): try: i = int(literal) return True except: try: i = int(literal, 16) return True except: pass return False objecttype_scoreboard = {} def add_use(typename): val = (0, 0) p = 0 if "*" in typename: p = 1 typename = typename[:-1] if typename in objecttype_scoreboard: val = objecttype_scoreboard[typename] objecttype_scoreboard[typename] = (val[0]+p, val[1]+1-p) typedef = {} def get_real_type(name): ptr = "*" in name ref = "&" in name if ptr or ref: name = name[:-1] while name in typedef: name = typedef[name] if ptr: return name + "*" if ref: return name + "&" return name def is_const(cursor): #tokens = cindex.tokenize(tu, cursor.extent) tokens = list(cindex.TokenGroup.get_tokens(tu, cursor.extent)) for token in tokens: if token.spelling == "const": return True return False as_builtins = { "unsigned long": "uint64", "unsigned int": "uint", "unsigned short": "uint16", "unsigned char": "uint8", "long": "int64", "void": "void", "double": "double", "float": "float", "char": "int8", "short": "int16", "int": "int", "long": "int64", "bool": "bool" } def get_as_type(name): ptr = "*" in name ref = "&" in name name = name.replace("*", "").replace("&", "") if name in as_builtins: if ptr: raise Exception("Built-in value type %s used as a reference type" % (as_builtins[name])) name = as_builtins[name] return "%s%s%s" % (name, "@" if ptr else "", "&" if ref else "") class Type: def __init__(self, kind): typename = get_type(kind) self.cname = typename typename = get_real_type(typename) self.resolved = typename add_use(typename) self.const = kind.is_const_qualified() get_as_type(self.resolved) def __repr__(self): return self.cname def get_as_type(self): as_type = None if "ObjectTypes" in config: for regex in config["ObjectTypes"]: if regex.search(self.cname) != None: conf = config["ObjectTypes"][regex] if "AngelScriptType" in conf: as_type = regex.sub(conf["AngelScriptType"], self.cname) break if as_type == None: as_type = get_as_type(self.resolved) return "%s%s" % ("const " if self.const else "", as_type) def is_known(self): name = self.resolved.replace("*", "").replace("&", "") if name in objecttypes: return True if name in as_builtins: return True if "ObjectTypes" in config: for regex in config["ObjectTypes"]: if regex.search(self.cname) != None: return True return False def get_c_type(self): return "%s%s" % ("const " if self.const else "", self.cname) def is_reference_type(name): if "ObjectTypes" in config: for regex in config["ObjectTypes"]: if regex.search(name) and "Reference" in config["ObjectTypes"][regex]: return config["ObjectTypes"][regex]["Reference"] if name in objecttypes: ot = objecttypes[name] for p in ot.parents: v = is_reference_type(p) if not v is None: return v if name in objecttype_scoreboard: score = objecttype_scoreboard[name] return score[0] > score[1] return None operatornamedict = { "-operator": "opNeg", "~operator": "opCom", "++operator": "opPreInc", "--operator": "opPreDec", "operator==": "opEquals", #"operator!=": "opEquals", "operator<": "opCmp", # "operator<=": "opCmp", # "operator>": "opCmp", # "operator>=": "opCmp", "operator++": "opPostInc", "operator--": "opPostDec", "operator+": "opAdd", "operator-": "opSub", "operator*": "opMul", "operator/": "opDiv", "operator%": "opMod", "operator&": "opAnd", "operator|": "opOr", "operator^": "opXor", "operator<<": "opShl", "operator>>": "opShr", "operator>>>": "opUShr", "operator[]": "opIndex", "operator=": "opAssign", "operator+=": "opAddAssign", "operator-=": "opSubAssign", "operator*=": "opMulAssign", "operator/=": "opDivAssign", "operator%=": "opModAssign", "operator&=": "opAndAssign", "operator|=": "opOrAssign", "operator^=": "opXorAssign", "operator<<=": "opShlAssign", "operator>>=": "opShrAssign", "operator>>>=": "opUShrAssign", } class Function(object): def __init__(self, cursor, clazz=None, behaviour=None): self.args = [] if cursor is None: return children = list(cursor.get_children()) for child in children: if child.kind == cindex.CursorKind.PARM_DECL: t = Type(child.type) t.const = is_const(child) self.args.append(t) self.name = cursor.spelling self.return_type = Type(cursor.result_type) self.clazz = clazz self.const = False self.behaviour = behaviour if self.clazz and not behaviour: start = cursor.extent.start end = cursor.extent.end i = 0 while i < len(children): if children[i].kind == cindex.CursorKind.PARM_DECL: start = children[i].extent.end if children[i].kind == cindex.CursorKind.COMPOUND_STMT: if i > 0: start = children[i-1].extent.end end = children[i].extent.start break i += 1 if i == len(children): break start = children[i-1].extent.end r = cindex.SourceRange.from_locations(start, end) f = open(cursor.location.file.name) f.seek(start.offset) length = end.offset-start.offset data = f.read(length) f.close() self.const = re.search(r"\s*const\s*(=\s*0)?$", data) != None if len(children) > 0 and children[0].kind != cindex.CursorKind.PARM_DECL: f = open(cursor.location.file.name) f.seek(cursor.extent.start.offset) length = children[0].extent.start.offset-cursor.extent.start.offset data = f.read(length) f.close() data = re.sub(r"%s.*" % self.name, "", data) self.return_type.const = re.search(r"\s*const\s*$", data) != None self.asname() if mir or mer: pn = self.pretty_name() if mer and mer.search(pn): raise Exception("Function matches exclusion pattern. %s" % pn) if mir and not mir.search(pn): raise Exception("Function does not match inclusion pattern. %s" % pn) def uses(self, typename): if self.return_type.resolved == typename: return True for t in self.args: if t.resolved == typename: return True return False def pretty_name(self): cargs = ", ".join([t.get_c_type() for t in self.args]) if self.clazz: return "%s %s::%s(%s)" % (self.return_type, self.clazz, self.name, cargs) else: return "%s %s(%s)" % (self.return_type, self.name, cargs) def asname(self): name = self.name if "operator" in name: if name not in operatornamedict: raise Exception("Operator not supported in AngelScript %s" % self.pretty_name()) name = operatornamedict[name] asargs = [] auto_handle_args = False auto_handle_return = False if maahr and maahr.search(self.pretty_name()) != None: auto_handle_args = True if mrahr and mrahr.search(self.pretty_name()) != None: auto_handle_return = True for a in self.args: asname = a.get_as_type() ref = "&" in asname if ref: asname2 = get_as_type(a.resolved)[:-1] extra = "" if not is_reference_type(asname2): # Value types can only be in or out references. Defaulting to in asname += "in" if "@" in asname and auto_handle_args: asname2 = asname[:-1] add = True if asname2 in objecttypes: ot = objecttypes[asname2] if "asOBJ_NOCOUNT" in ot.get_flags(): add = False if add: asname += "+" asargs.append(asname) asargs = ", ".join(asargs) if self.behaviour == "asBEHAVE_CONSTRUCT" or self.behaviour == "asBEHAVE_FACTORY": name = "void f(%s)" % (asargs) if is_reference_type(self.clazz): add = auto_handle_return if self.clazz in objecttypes: ot = objecttypes[self.clazz] if "asOBJ_NOCOUNT" in ot.get_flags(): add = False name = "%s@%s %s(%s)" % (self.clazz, "+" if add else "", self.clazz, asargs) self.behaviour = "asBEHAVE_FACTORY" elif self.behaviour == "asBEHAVE_DESTRUCT": name = "void f()" else: asname = self.return_type.get_as_type() if "@" in asname and auto_handle_return: asname2 = asname[:-1] add = True if asname2 in objecttypes: ot = objecttypes[asname2] if "asOBJ_NOCOUNT" in ot.get_flags(): add = False if add: asname += "+" name = "%s %s(%s)" % (asname, name, asargs) if self.clazz and self.const: name += " const" return name def get_generic(self): lut = { "double": "Double", "float": "Float", "uint": "DWord", "int": "DWord", "uint16": "Word", "int16": "Word", "uint8": "Byte", "int8": "Byte", "bool": "Byte" } name = self.name if "operator" in name: name = operatornamedict[name] name = name.replace("~", "tilde") + "_generic" for arg in self.args: name += "_" + arg.get_c_type().replace("&", "amp").replace("*", "star").replace(" ", "space").replace(":", "colon") if self.clazz: name = self.clazz + "_" + name func = "void %s(asIScriptGeneric *gen)\n{\n" % name asret = self.return_type.get_as_type() call = "%s(" % self.name if self.clazz: if is_reference_type(self.clazz) and self.behaviour == "asBEHAVE_CONSTRUCT": self.behaviour = "asBEHAVE_FACTORY" if self.behaviour == "asBEHAVE_FACTORY": call = "gen->SetReturnAddress(new %s(" % (self.name) elif self.behaviour == "asBEHAVE_CONSTRUCT": call = "new(gen->GetObject()) %s(" % self.name else: call = "static_cast<%s*>(gen->GetObject())->%s" % (self.clazz, call) for i in range(len(self.args)): if i > 0: call += ", " arg = self.args[i] t = arg.get_as_type() if t in lut: call += "gen->GetArg%s(%d)" % (lut[t], i) else: ct = arg.get_c_type() pt = "*" in ct star = "*" if not pt else "" if "&" in ct: call += "%sstatic_cast<%s%s>(gen->GetArgAddress(%d))" % (star, arg.get_c_type().replace("&", ""), star, i) else: call += "%sstatic_cast<%s%s>(gen->GetArgObject(%d))" % (star, arg.get_c_type(), star, i) call += ")" if self.behaviour == "asBEHAVE_FACTORY": call += ")" asret2 = asret.replace("const ", "").strip() if asret2 in lut: func += "\tgen->SetReturn%s(%s);\n" % (lut[asret2], call) elif asret == "void": func += "\t" + call + ";\n" else: ct = self.return_type.get_c_type() pt = "*" in ct star = "*" if not pt else "" if pt: func += "\tgen->SetReturnAddress(%s);\n" % (call) elif "&" in ct: func += "\tgen->SetReturnAddress((void*)&%s);\n" % (call) else: func += "\t" + self.return_type.get_c_type().replace("&", "").replace("const ", "") + " ret = %s;\n" % call func += "\tgen->SetReturnObject(&ret);\n" #func += "\t" + self.return_type.get_c_type() + " ret = %s;\n" % call #func += "\tnew(gen->GetAddressOfReturnLocation()) %s(ret);\n" % self.return_type.get_c_type().replace("&", "") func += "}\n" if func not in generic_wrappers: generic_wrappers.append(func) return "asFUNCTION(%s), asCALL_GENERIC" % (name) def get_register_string(self): global generic_wrappers cargs = ", ".join([at.get_c_type() for at in self.args]) if self.clazz == None: callconv = "asCALL_CDECL" call = "asFUNCTIONPR(%s, (%s), %s), %s" % (self.name, cargs, self.return_type.get_c_type(), callconv) if generic_regex and generic_regex.search(self.pretty_name()): call = self.get_generic() return _assert("engine->RegisterGlobalFunction(\"%s\", %s)" % (self.asname(), call)) else: const = " const" if self.const else "" call = "asMETHODPR(%s, %s, (%s)%s, %s), asCALL_THISCALL" % (self.clazz, self.name, cargs, const, self.return_type.get_c_type()) if (generic_regex and generic_regex.search(self.pretty_name())) or \ self.behaviour == "asBEHAVE_CONSTRUCT" or \ self.behaviour == "asBEHAVE_DESTRUCT" or \ self.behaviour == "asBEHAVE_FACTORY": call = self.get_generic() if self.behaviour == None: return _assert("engine->RegisterObjectMethod(\"%s\", \"%s\", %s)" % (self.clazz, self.asname(), call)) else: name = self.asname() return _assert("engine->RegisterObjectBehaviour(\"%s\", %s, \"%s\", %s)" % (self.clazz, self.behaviour, name, call)) def is_pure_virtual(cursor): # TODO: Use iterator here children = list(cursor.get_children()) start = cursor.extent.start end = cursor.extent.end while len(children) != 0: child = children[-1] children = list(child.get_children()) start = child.extent.end f = open(cursor.location.file.name) f.seek(start.offset) length = end.offset-start.offset data = f.read(length) f.close() return re.search(r"=\s*0\s*$", data) != None objectindex = 0 class ObjectType: def add_field(self, children, array): for child in children: if child.kind == cindex.CursorKind.CXX_BASE_SPECIFIER: self.add_fields(child.get_reference().get_children(), array) if child.kind == cindex.CursorKind.FIELD_DECL: array.append(child) def __init__(self, cursor, children, name): global objectindex self.cursor = cursor self.name = name self.flags = {"asOBJ_APP_CLASS": True} fields = [] self.parents = [] self.index = objectindex objectindex += 1 self.has_pure_virtuals = False access = cindex.AccessSpecifier.PRIVATE if cursor.kind == cindex.CursorKind.CLASS_DECL else cindex.AccessSpecifier.PUBLIC idx = access.from_param; for child in children: if child.kind == cindex.CursorKind.CXX_BASE_SPECIFIER: c = child.get_resolved_cursor() parentname = c.spelling if parentname in objecttypes: ot = objecttypes[parentname] self.parents.extend(ot.parents) self.parents.append(parentname) toadd = [] for om in objectmethods: if om.clazz == parentname: f = copy.deepcopy(om) f.clazz = self.name toadd.append(f) objectmethods.extend(toadd) toadd = [] for of in objectfields: if of.clazz == parentname: f = copy.deepcopy(of) f.clazz = self.name toadd.append(f) objectfields.extend(toadd) continue if child.kind == cindex.CursorKind.CXX_ACCESS_SPEC_DECL: access = child.access_specifier continue if not access == cindex.AccessSpecifier.PUBLIC: continue if child.kind == cindex.CursorKind.CXX_METHOD: if child.spelling == "operator=": self.flags["asOBJ_APP_CLASS_ASSIGNMENT"] = True if child.is_static_method(): # TODO logWarning("Skipping member method %s::%s as it's static" % (self.name, child.spelling)) continue try: objectmethods.append(Function(child, self.name)) except Exception as e: logWarning("Skipping member method %s::%s - %s" % (self.name, child.spelling, e)) if is_pure_virtual(child): self.has_pure_virtuals = True elif child.kind == cindex.CursorKind.CONSTRUCTOR: self.flags["asOBJ_APP_CLASS_CONSTRUCTOR"] = True try: f = Function(child, self.name, "asBEHAVE_CONSTRUCT") behaviours.append(f) except Exception as e: logWarning("Skipping constructor %s::%s - %s" % (self.name, child.spelling, e)) elif child.kind == cindex.CursorKind.DESTRUCTOR: self.flags["asOBJ_APP_CLASS_DESTRUCTOR"] = True try: f = Function(child, self.name, "asBEHAVE_DESTRUCT") behaviours.append(f) except Exception as e: logWarning("Skipping destructor %s::%s - %s" % (self.name, child.spelling, e)) elif child.kind == cindex.CursorKind.FIELD_DECL: try: type = Type(child.type) objectfields.append(ObjectField(self.name, child.spelling, type)) except Exception as e: logWarning("Skipping member field %s::%s - %s" % (self.name, child.spelling, e)) elif child.kind == cindex.CursorKind.TYPEDEF_DECL: name, kind = get_typedef(child) if name: typedef[name] = kind logWarning("Typedefs within classes are not supported by AngelScript") else: logWarning("Unhandled cursor: %s, %s" % (child.displayname, child.kind)) if "asOBJ_APP_CLASS_DESTRUCTOR" not in self.flags: self.flags["asOBJ_POD"] = True self.add_field(children, fields) if len(fields): try: child = fields.pop(0) t = get_real_type(get_type(child.type, child)) allEqual = True for field in fields: t2 = get_real_type(get_type(field.type, field)) if t2 != t: break if allEqual: if t == "float": self.flags["asOBJ_APP_CLASS_ALLFLOATS"] = True elif t == "int" or t == "unsigned int": self.flags["asOBJ_APP_CLASS_ALLINTS"] = True else: logWarning("%s does not have all fields of equal type. Trying ALLINTS anyway" % (self.name, t)) self.flags["asOBJ_APP_CLASS_ALLINTS"] = True except: pass def get_flags(self): flags = [] if is_reference_type(self.name) else list(self.flags) if "ObjectTypes" in config: for regex in config["ObjectTypes"]: if regex.search(self.name): conf = config["ObjectTypes"][regex] if "Flags" in conf: flags = conf["Flags"] if "ExtraFlags" in conf: flags.extend(conf["ExtraFlags"]) if not is_reference_type(self.name): if "asOBJ_NOCOUNT" in flags: flags.remove("asOBJ_NOCOUNT") return flags def get_register_string(self): flags = self.get_flags() f = "%s%s%s" % ("asOBJ_REF" if is_reference_type(self.name) else "asOBJ_VALUE", "|" if len(flags) else "", "|".join(flags)) if not is_reference_type(self.name): return _assert("engine->RegisterObjectType(\"%s\", sizeof(%s), %s)" % (self.name, self.name, f)) ret = _assert("engine->RegisterObjectType(\"%s\", 0, %s)" % (self.name, f)) for parent in self.parents: extra = "_nocount" if "asOBJ_NOCOUNT" in flags else "" ret += "\n\t" + _assert("engine->RegisterObjectBehaviour(\"%s\", asBEHAVE_REF_CAST, \"%s@ f()\", asFUNCTION((refCast%s<%s,%s>)), asCALL_CDECL_OBJLAST)" % (parent, self.name, extra, parent, self.name)) ret += "\n\t" + _assert("engine->RegisterObjectBehaviour(\"%s\", asBEHAVE_IMPLICIT_REF_CAST, \"%s@ f()\", asFUNCTION((refCast%s<%s,%s>)), asCALL_CDECL_OBJLAST)" % (self.name, parent, extra, self.name, parent)) if not "asOBJ_NOCOUNT" in flags: f = Function(None) f.name = "AddRef" f.clazz = self.name f.const = False t = cindex.Type(cindex.TypeKind.VOID.from_param()) f.behaviour = "asBEHAVE_ADDREF" f.return_type = Type(t) behaviours.append(f) f = copy.deepcopy(f) f.name = "DelRef" f.behaviour = "asBEHAVE_RELEASE" behaviours.append(f) return ret class ObjectField: def __init__(self, clazz, name, type): self.clazz = clazz self.name = name self.type = type pn = self.pretty_name() if mfer and mfer.search(pn): raise Exception("Matches exclude pattern") if mfir and not mfir.search(pn): raise Exception("Doesn't match include pattern") def uses(self, typename): return self.type.resolved == typename def pretty_name(self): return "%s %s::%s" % (self.type, self.clazz, self.name) def get_register_string(self): return _assert("engine->RegisterObjectProperty(\"%s\", \"%s %s\", asOFFSET(%s,%s))" % (self.clazz, self.type, self.name, self.clazz, self.name)) typedefs = [] enums = [] objecttypes = {} functions = [] objectmethods = [] objectfields = [] includes = [] behaviours = [] def _assert(line): if doassert: return "RegisterVerifyAPI(%s);" % line else: return "%s;" % line def get_typedef(cursor): #tokens = cindex.tokenize(tu, cursor.extent) tokens = list(cindex.TokenGroup.get_tokens(tu, cursor.extent)) good = True if len(tokens) >= 4: for x in tokens[1:-2]: if x.kind != cindex.TokenKind.IDENTIFIER and x.kind != cindex.TokenKind.KEYWORD: good = False break else: good = False if good: kind = " ".join([t.spelling for t in tokens[1:len(tokens)-2]]) name = tokens[len(tokens)-2].spelling else: data = "" for token in tokens: data += token.spelling + " " return None, data return name, kind def add_include(filename): if not filename in includes and filename.endswith(".h"): includes.append(filename) def walk(cursor): global typedefs global enums global objecttypes global functions global objectmethods for child in cursor.get_children(): if not child.location.file: continue filename = child.location.file.name if child.kind == cindex.CursorKind.TYPEDEF_DECL: name, kind = get_typedef(child) if name: typedef[name] = kind if fer and fer.search(filename): continue if fir and not fir.search(filename): continue if child.kind == cindex.CursorKind.MACRO_DEFINITION: tokens = list(cindex.TokenGroup.get_tokens(tu, child.extent)) if tokens[0].kind == cindex.TokenKind.IDENTIFIER and tokens[1].kind == cindex.TokenKind.LITERAL and is_int(tokens[1].spelling): define = _assert("engine->RegisterEnumValue(\"HASH_DEFINES\", \"%s\", %s)" % (tokens[0].spelling, tokens[1].spelling)) if define not in enums: enums.append(define) elif child.kind == cindex.CursorKind.FUNCTION_DECL: try: f = Function(child) if "operator" in f.name: raise Exception("Non member operator functions not supported currently") else: functions.append(f) add_include(filename) except Exception as e: logWarning("Skipping function %s - %s" % (child.spelling, e)) elif child.kind == cindex.CursorKind.TYPEDEF_DECL: name, kind = get_typedef(child) if name: typedef[name] = kind if get_real_type(kind) not in as_builtins: logWarning("Typedef %s = %s can't be registered as it doesn't resolve to an AngelScript builtin type" % (name, kind)) else: typedefs.append(_assert("engine->RegisterTypedef(\"%s\", \"%s\")" % (name, get_real_type(kind)))) else: logWarning("Typedef too complex, skipping: %s" % name) elif child.kind == cindex.CursorKind.CLASS_DECL or child.kind == cindex.CursorKind.STRUCT_DECL: children = list(child.get_children()) if len(children) == 0: continue if oer and oer.search(child.spelling): continue if oir and not oir.search(child.spelling): continue classname = child.spelling if len(classname) == 0: classname = child.displayname if len(classname) == 0: logWarning("Skipping class or struct defined at %s" % cursor.extent) continue if classname in objecttypes: # TODO: different namespaces logWarning("Skipping type %s, as it is already defined" % classname) o = ObjectType(child, children, classname) objecttypes[classname] = o add_include(filename) elif child.kind == cindex.CursorKind.MACRO_INSTANTIATION or \ child.kind == cindex.CursorKind.CONVERSION_FUNCTION or \ child.kind == cindex.CursorKind.INCLUSION_DIRECTIVE or \ child.kind == cindex.CursorKind.UNEXPOSED_DECL: continue # TODO: Make sure this is what we want elif child.kind == cindex.CursorKind.CONSTRUCTOR or \ child.kind == cindex.CursorKind.CXX_METHOD: continue else: logWarning("Unhandled cursor: %s, %s" % (child.displayname, child.kind)) # Removes usage of object types that are used both as a reference and a value type def mismatch_filter(source, toremove): toadd =source ret = [] while len(toadd): curr = toadd.pop(0) if curr.uses(toremove): logWarning("\t%s" % curr.pretty_name()) else: ret.append(curr) return ret def remove_ref_val_mismatches(): global functions global objectmethods global behaviours for key in objecttype_scoreboard: isref = is_reference_type(key) ref, val = objecttype_scoreboard[key] if (isref and val == 0) or (not isref and ref == 0): continue logWarning("\"%s\" is used both as a reference type (%d) and a value type (%d). The following will be removed:" % (key, ref, val)) toremove = "%s%s" % (key, "*" if not isref else "") functions = mismatch_filter(functions, toremove) objectmethods = mismatch_filter(objectmethods, toremove) behaviours = mismatch_filter(behaviours, toremove) def unknown_filter(source): toadd = source ret = [] while len(toadd): keep = True curr = toadd.pop(0) broken = None for t in curr.args: if not t.is_known(): broken = t.resolved keep = False if not curr.return_type.is_known(): broken = curr.return_type.resolved keep = False if not keep: logWarning("Removing %s as it's using an unknown type %s [disable with -ku]" % (curr.pretty_name(), broken)) else: ret.append(curr) return ret def remove_unknowns(): global functions global objectmethods global behaviours functions = unknown_filter(functions) objectmethods = unknown_filter(objectmethods) behaviours = unknown_filter(behaviours) def dup_filter(source): toadd = source ret = [] names = [] while len(toadd): keep = True curr = toadd.pop(0) pn = curr.pretty_name() if pn in names: logWarning("Removing duplicate function %s" % pn) else: ret.append(curr) names.append(pn) return ret def remove_duplicates(): global functions global objectmethods global behaviours functions = dup_filter(functions) objectmethods = dup_filter(objectmethods) behaviours = dup_filter(behaviours) def remove_reference_destructors(): global behaviours toadd = behaviours behaviours = [] while len(toadd): curr = toadd.pop(0) if is_reference_type(curr.clazz) and curr.behaviour == "asBEHAVE_DESTRUCT": logWarning("Removing destructor for reference type %s" % curr.clazz) else: behaviours.append(curr) def remove_pure_virtual_constructors(): global behaviours toadd = behaviours behaviours = [] while len(toadd): curr = toadd.pop(0) virt = False if curr.clazz in objecttypes: virt = objecttypes[curr.clazz].has_pure_virtuals if virt and (curr.behaviour == "asBEHAVE_CONSTRUCT" or curr.behaviour == "asBEHAVE_FACTORY"): logWarning("Removing constructor for type %s which has pure virtual members" % curr.clazz) else: behaviours.append(curr) walk(tu.cursor) # File processed, do some post processing remove_ref_val_mismatches() if not keep_unknowns: remove_unknowns() remove_duplicates() remove_reference_destructors() remove_pure_virtual_constructors() if output_filename != None: output_filename = output_filename.replace("${this_file_path}", os.path.dirname(os.path.abspath(configfile))) ot = [objecttypes[o] for o in objecttypes] ot.sort(cmp=lambda a, b: cmp(a.index, b.index)) for diag in tu.diagnostics: logWarning("clang had the following to say: %s" % (diag.spelling)) objectTypeStrings = [] for o in ot: objectTypeStrings.append(o.get_register_string()) typeDefStrings = [] for o in typedefs: typeDefStrings.append(o.get_register_string()) functionStrings = [] for o in functions: functionStrings.append(o.get_register_string()) behaviourStrings = [] for o in behaviours: behaviourStrings.append(o.get_register_string()) objectMethodStrings = [] for o in objectmethods: objectMethodStrings.append(o.get_register_string()) objectFieldStrings = [] for o in objectfields: objectFieldStrings.append(o.get_register_string()) tpl = Template(filename='ScriptBind.mako') rendered = tpl.render( genericWrappers=generic_wrappers, funcName=funcname, includes=includes, objectTypes=objectTypeStrings, typeDefs=typeDefStrings, hashDefines=_assert("engine->RegisterEnum(\"HASH_DEFINES\")"), enums="", functions=functionStrings, behaviours=behaviourStrings, objectMethods=objectMethodStrings, objectFields=objectFieldStrings) with open(output_filename, "w") as f: f.write(rendered) sys.stderr.write("Finished with %d warnings\n" % warn_count)
37,403
11,560
''' 测试数据库存储数据demo >> mysql -u root -p < schema.sql ''' import orm from models import User, Blog, Comment import asyncio async def test (loop): await orm.create_pool(loop, user='www-data', password='www-data', db='awesome') # u = User(id='2', name='Test1', email='test1@example.com', passwd='1234567890', image='about:blank') # await u.save() u = await User.findAll() print('sql save success! %s' % u) if __name__ == '__main__': loop = asyncio.get_event_loop() loop.run_until_complete(test(loop)) print('Test finished') loop.close()
573
219
import avro.schema import json import fastavro SCHEMA = { "namespace": "avg_obj", "type": "record", "name": "Meme", "fields": [ {"name": "user", "type": { "type": "record", "name": "PostUser", "fields": [ {"name": "user_id", "type": "string"}, {"name": "first_name", "type": ["null", "string"], "default": "null"}, {"name": "last_name", "type": ["null", "string"], "default": "null"}, {"name": "user_type", "type": ["null", {"type": "enum", "name": "UserType", "symbols": ["FREE", "REGULAR", "PREMIUM"] }], "default": "null"}, ]}}, {"name": "title", "type": ["null", "string"], "default": "null"}, {"name": "content", "type": ["null", "bytes"], "default": "null"}, {"name": "top_string", "type": ["null", "string"], "default": "null"}, {"name": "botom_string", "type": ["null", "string"], "default": "null"}, {"name": "likes", "type": ["null", "long"], "default": 0}, {"name": "hates", "type": ["null", "long"], "default": 0}, ] } avro_schema = fastavro.parse_schema(SCHEMA)
1,358
403
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Program, ki vas vpraša po imenu, nato pa vas pozdravi. """ # povprašamo po imenu ime = input("Kako ti je ime? ") # pozdravimo print(f"Pozdravljen_a, {ime}!")
210
97
''' # Linear Regression: understanding loss function in linear regression #---------------------------------- # # This function shows how to use Tensorflow to # solve linear regression. # y = Ax + b # # We will use the iris data, specifically: # y = Sepal Length # x = Petal Width ''' import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from sklearn import datasets from tensorflow.python.framework import ops #%% #L2 Loss ops.reset_default_graph() sess=tf.Session() # Load the data # iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)] iris=datasets.load_iris() x_vals=np.array([x[3] for x in iris.data]) y_vals=np.array([y[0] for y in iris.data]) # Declare batch size batch_size = 25 # Initialize placeholders x_data=tf.placeholder(shape=[None,1],dtype=tf.float32) y_=tf.placeholder(shape=[None,1], dtype=tf.float32) #create variable for linear regression A=tf.Variable(tf.random_normal(shape=[1,1])) b=tf.Variable(tf.random_normal(shape=[1,1])) #declare model operations y=tf.add(tf.matmul(x_data,A),b) #declare loss functions (1/2/m) (y_-y)^2 loss=tf.reduce_mean(tf.square(y_- y)) #Declare optimizer op=tf.train.GradientDescentOptimizer(0.4) train_step=op.minimize(loss) #initialize variables init=tf.global_variables_initializer() sess.run(init) #training loop loss_vec_l2=[] for i in range(100): rand_index=np.random.choice(len(x_vals),size=batch_size)#随机从len(x_vals)中选取25个下标 rand_x=np.transpose([x_vals[rand_index]]) rand_y=np.transpose([y_vals[rand_index]]) sess.run(train_step,feed_dict={x_data:rand_x,y_:rand_y}) temp_loss=sess.run(loss,feed_dict={x_data:rand_x,y_:rand_y}) loss_vec_l2.append(temp_loss) if (i+1)%25==0: print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b))) print('Loss = ' + str(temp_loss)) #%% #L1 Loss ops.reset_default_graph() # Create graph sess = tf.Session() # Load the data # iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)] iris = datasets.load_iris() x_vals = np.array([x[3] for x in iris.data]) y_vals = np.array([y[0] for y in iris.data]) # Declare batch size and number of iterations batch_size = 25 learning_rate = 0.4 # Will not converge with learning rate at 0.4 iterations = 100 # Initialize placeholders x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32) y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) # Create variables for linear regression A = tf.Variable(tf.random_normal(shape=[1,1])) b = tf.Variable(tf.random_normal(shape=[1,1])) # Declare model operations model_output = tf.add(tf.matmul(x_data, A), b) # Declare loss functions loss_l1 = tf.reduce_mean(tf.abs(y_target - model_output)) # Initialize variables init = tf.initialize_all_variables() sess.run(init) # Declare optimizers my_opt_l1 = tf.train.GradientDescentOptimizer(learning_rate) train_step_l1 = my_opt_l1.minimize(loss_l1) # Training loop loss_vec_l1 = [] for i in range(iterations): rand_index = np.random.choice(len(x_vals), size=batch_size) rand_x = np.transpose([x_vals[rand_index]]) rand_y = np.transpose([y_vals[rand_index]]) sess.run(train_step_l1, feed_dict={x_data: rand_x, y_target: rand_y}) temp_loss_l1 = sess.run(loss_l1, feed_dict={x_data: rand_x, y_target: rand_y}) loss_vec_l1.append(temp_loss_l1) if (i+1)%25==0: print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b))) #%% #plot loss over time(steps) plt.plot(loss_vec_l1, 'k-', label='L1 Loss') plt.plot(loss_vec_l2, 'r--', label='L2 Loss') plt.title('L1 and L2 Loss per Generation') plt.xlabel('Generation') plt.ylabel('L1 Loss') plt.legend(loc='upper right') plt.show()
3,699
1,490
#pysh: shell in python import sys cmdlist = ['start','exit','cd','md','ls','pd','cf','cl'] convert = [] waiting = 0 print 'pysh 1.0.5 19.03.11 #6. type start to enter, exit to leave.' paths = ['pysh/'] direct = 'pysh/' added = [] entered = raw_input(': ') if entered == 'start': while entered != ['exit']: entered = raw_input('{} '.format(direct)) entered = entered.split() for x in entered: if x in cmdlist: if waiting == 0: if x == 'ls': for i in paths: if i.startswith(direct) and len(i) > len(direct): temp = len(direct) splitted = i[temp:].split('/') if len(splitted) > 1 and (splitted[0] + '/') not in added: print splitted[0] + '/' added.append(splitted[0] + '/') elif len(splitted) < 2 and splitted[0] not in added: print splitted[0] added.append(splitted[0]) else: pass else: pass elif x == 'pd': print direct elif x == 'cd': waiting = 1 elif x == 'md': waiting = 2 elif x == 'cf': waiting = 3 elif x == 'start': print 'already in pysh' elif x == 'cl': sys.stdout.write('\x1b[2J\x1b[H') else: break else: print 'pysh: consecutive cmd {}'.format(x) else: if waiting == 1: if x == '..': direct = direct[:-1].rsplit('/',1)[0] + '/' else: if direct + x + '/' in paths: direct = direct + x + '/' elif x.endswith('/'): if direct + x in paths: direct = direct + x else: print 'pysh: directory \'{}\' not found'.format(x) else: print 'pysh: can\'t cd to file \'{}\''.format(x) waiting = 0 elif waiting == 2: if x.endswith('/'): paths.append(direct + x) else: paths.append(direct + x + '/') waiting = 0 elif waiting == 3: if x.endswith('/'): paths.append(direct + x - '/') else: paths.append(direct + x) waiting = 0 else: print 'pysh: {} not found.'.format(x) break else: print 'startup: {} not found'.format(entered)
3,258
800
from idautils import * from idaapi import * from idc import * import urllib2 def ida_set_function_colour(function_address, colour): idc.set_color(function_address, CIC_FUNC, colour) def ida_get_function_colour(function_address): function = idaapi.get_func(function_address) if not function: return 0 return function.color def ida_func_exists(function_address): for segment in Segments(): # get all functions for function_ea in Functions(segment, SegEnd(segment)): if function_address == function_ea: return True return False def downloadFile(url): response = urllib2.urlopen(url) html = response.read() return html def toAddressList(html): lines = html.split('\n') ret = [] for line in lines: addr = line.split(" ")[0].strip() if len(addr) > 0: ret.append(addr) return ret class FunctionData(): def __init__(self): self.bIsImpl = False self.bIsStub = False self.bIsCovered = False def ColourName(self): if self.bIsImpl: return "Decompiled" elif self.bIsStub and self.bIsCovered: return "Covered stub" elif self.bIsStub: return "Stub" else: return "Covered" def Colour(self): if self.bIsImpl: return 0xEEFFF0 #0xB4DED2 elif self.bIsStub and self.bIsCovered: # Covered and stubbed return 0xC57AAF elif self.bIsStub: # None covered stub return 0xD2B4DE else: # Coverage only case return 0xA569BD def LineToInt(line): line = line.strip(); return long(line) def EnsureKey(address, dict): if not dict.has_key(address): dict[address] = FunctionData() def AddDecompiled(address, dict): EnsureKey(address, dict) dict[address].bIsImpl = True def AddStubbed(address, dict): EnsureKey(address, dict) dict[address].bIsStub = True def AddCovered(address, dict): print "Add covered " + asHex(address) EnsureKey(address, dict) dict[address].bIsCovered = True def asHex(value): return (hex(value).rstrip("L") or "0").upper() def sync_function_colour(address, functionData): if (ida_func_exists(address)): # Everything else is open season colourToSet = functionData.Colour() if ida_get_function_colour(address) == colourToSet: print asHex(address) + " already set to " + functionData.ColourName() + "(" + asHex(colourToSet) + ")" else: print "Set " + asHex(address) + " to " + functionData.ColourName() + "(" + asHex(colourToSet) + ")" ida_set_function_colour(address, colourToSet) else: print asHex(address) + " function not found in IDB!" def main(): functionDataDict = {} with open('C:\GOG Games\Abes Oddysee\decompiled_functions.txt', 'r') as f: for line in f: AddDecompiled(LineToInt(line), functionDataDict) with open('C:\GOG Games\Abes Oddysee\stubbed_functions.txt', 'r') as f: for line in f: AddStubbed(LineToInt(line), functionDataDict) #funcsWithCoverage = toAddressList(downloadFile("https://gist.githubusercontent.com/paulsapps/ea894a929f02c7bb7c931af12ad08151/raw/38cf5fcd0f8ba6b27a2a08043f81be7f8b34b4e4/gistfile1.txt")) #for func in funcsWithCoverage: # print "func is " + func # AddCovered(int(func, 16), functionDataDict) for address in functionDataDict.iterkeys(): data = functionDataDict[address] sync_function_colour(address, data) if __name__ == '__main__': main()
3,813
1,355
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class AlipayCommerceEducateTuitioncodeMonitorCreateModel(object): def __init__(self): self._bank_type = None self._login_account = None self._out_apply_id = None self._parent_no = None @property def bank_type(self): return self._bank_type @bank_type.setter def bank_type(self, value): self._bank_type = value @property def login_account(self): return self._login_account @login_account.setter def login_account(self, value): self._login_account = value @property def out_apply_id(self): return self._out_apply_id @out_apply_id.setter def out_apply_id(self, value): self._out_apply_id = value @property def parent_no(self): return self._parent_no @parent_no.setter def parent_no(self, value): self._parent_no = value def to_alipay_dict(self): params = dict() if self.bank_type: if hasattr(self.bank_type, 'to_alipay_dict'): params['bank_type'] = self.bank_type.to_alipay_dict() else: params['bank_type'] = self.bank_type if self.login_account: if hasattr(self.login_account, 'to_alipay_dict'): params['login_account'] = self.login_account.to_alipay_dict() else: params['login_account'] = self.login_account if self.out_apply_id: if hasattr(self.out_apply_id, 'to_alipay_dict'): params['out_apply_id'] = self.out_apply_id.to_alipay_dict() else: params['out_apply_id'] = self.out_apply_id if self.parent_no: if hasattr(self.parent_no, 'to_alipay_dict'): params['parent_no'] = self.parent_no.to_alipay_dict() else: params['parent_no'] = self.parent_no return params @staticmethod def from_alipay_dict(d): if not d: return None o = AlipayCommerceEducateTuitioncodeMonitorCreateModel() if 'bank_type' in d: o.bank_type = d['bank_type'] if 'login_account' in d: o.login_account = d['login_account'] if 'out_apply_id' in d: o.out_apply_id = d['out_apply_id'] if 'parent_no' in d: o.parent_no = d['parent_no'] return o
2,499
811
import unittest from code import instance as i from code import datamapping as dm from code import greedyfirst as gf from code import algorithm as a from code import baseobjects as bo from code import tabu class TestTabuSpecific(unittest.TestCase): def setUp(self): raw_data = dm.Importer() # raw_data.import_data("./tests/cvrp2.test") # raw_data.import_data("./tests/ulysses-n16-k3.vrp") raw_data.import_data("./tests/E-n23-k3.vrp") # raw_data.import_data("./tests/cvrp3.test") # raw_data.import_data("./tests/P-n19-k2.vrp") #raw_data.import_data("./tests/E-n101-k14.vrp") data = dm.DataMapper(raw_data) self.instance = i.ProblemInstance(data) self.solution = a.Solution(self.instance) greedy = gf.GreedyFirst(self.solution.solution) greedy.run(sort=True) self.solution.value = self.solution.eval() self.tabu_search = tabu.TabuSearch(self.solution, 100) def test_deep_copy(self): self.assertEqual(self.tabu_search.instance.solution.fleet[0].route[0].id, self.tabu_search.best_instance.solution.fleet[0].route[0].id) self.tabu_search.instance.solution.fleet[0].route[0].id = 666 self.assertNotEqual(self.tabu_search.instance.solution.fleet[0].route[0].id, self.tabu_search.best_instance.solution.fleet[0].route[0].id) def test_get_sorted_edges(self): edges = self.tabu_search.get_sorted_edges(self.tabu_search.instance.solution.fleet[0]) self.assertTrue(self.tabu_search.instance.distance_between(edges[0][0], edges[0][1]) < self.tabu_search.instance.distance_between(edges[-1][0], edges[-1][1])) def test_best_neighbours(self): neighbours = self.tabu_search.best_neighbours(2) self.assertTrue(neighbours[0][1]>neighbours[-1][1]) class TestTabuGeneral(unittest.TestCase): def setUp(self): raw_data = dm.Importer() # raw_data.import_data("./tests/cvrp2.test") # raw_data.import_data("./tests/ulysses-n16-k3.vrp") # raw_data.import_data("./tests/E-n23-k3.vrp") # raw_data.import_data("./tests/cvrp3.test") # raw_data.import_data("./tests/P-n19-k2.vrp") raw_data.import_data("./tests/E-n101-k14.vrp") data = dm.DataMapper(raw_data) self.instance = i.ProblemInstance(data) self.solution = a.Solution(self.instance) greedy = gf.GreedyFirst(self.solution.solution) greedy.run(sort=False) self.solution.value = self.solution.eval() self.tabu_search = tabu.TabuSearch(self.solution, 100) # def test_general(self): # print("value before: " + str(self.tabu_search.best_instance.eval())) # self.tabu_search.run() # print("value after: " + str(self.tabu_search.best_instance.eval())) if __name__ == "__main__": unittest.main()
2,888
1,086
""" examples on scikit-image : call : from skimage.feature import blob_dog, blob_log, blob_doh structure : skimage feature __init__.py (from .blob import blob_dog, blob_log, blob_doh) blob.py (contains blob_dog, blob_log, blob_doh) conclusion : module imported because it was defined in module dir """ from .timemeter import timemeter
381
133
import numpy as np from com.sparksamples.util import get_records from com.sparksamples.util import get_mapping from com.sparksamples.util import extract_features from com.sparksamples.util import extract_label from com.sparksamples.util import extract_features_dt #from pyspark.mllib.tree import DecisionTree from pyspark.mllib.tree import GradientBoostedTrees from pyspark.mllib.regression import LabeledPoint from com.sparksamples.util import squared_log_error __author__ = 'Rajdeep Dua' def evaluate_gbt(train, test, numItr, lrRate, mxDepth, mxBins): # def trainRegressor(cls, data, categoricalFeaturesInfo, # loss="leastSquaresError", numIterations=100, learningRate=0.1, maxDepth=3, # maxBins=32): gbt_model = GradientBoostedTrees.trainRegressor(train,categoricalFeaturesInfo={}, numIterations=numItr, maxDepth=mxDepth, maxBins=mxBins, learningRate=lrRate) predictions = gbt_model.predict(test.map(lambda x: x.features)) tp = test.map(lambda lp: lp.label).zip(predictions) rms_le = np.sqrt(tp.map(lambda (t, p): squared_log_error(t, p)).mean()) return rms_le def get_train_test_data(): records = get_records() records.cache() # extract all the catgorical mappings mappings = [get_mapping(records, i) for i in range(2,10)] cat_len = sum(map(len, mappings)) num_len = len(records.first()[11:15]) data = records.map(lambda r: LabeledPoint(extract_label(r), extract_features(r, cat_len, mappings))) #data_dt = records.map(lambda r: LabeledPoint(extract_label(r), extract_features_dt(r))) data_with_idx = data.zipWithIndex().map(lambda (k, v): (v, k)) test = data_with_idx.sample(False, 0.2, 42) train = data_with_idx.subtractByKey(test) train_data = train.map(lambda (idx, p): p) test_data = test.map(lambda (idx, p) : p) return train_data, test_data
1,941
689
import random import yaml class Grammar: """ A simpler version of Tracery's ideas. """ def __init__( self, rules = None ): self.rules = rules or { } # To be pop()'d off by the caller. self.saved = [ ] def parse( self, string ): if "[" in string or "]" in string: fragments = [ ] buffer = '' brackets = False for char in string: if char == '[': fragments += [ buffer ] buffer = '' if brackets: raise Exception( "Grammar.parse: can't nest brackets" ) brackets = True elif char == ']': if not brackets: raise Exception( "Grammar.parse: unmatched bracket" ) brackets = False # Mechanism for saving what result we got: put a ! somewhere in the [ ]-surrounded text. if buffer.replace( "!", "" ) in self.rules: fragments += [ self.parse( random.choice( self.rules[buffer.replace( "!", "" )] ) ) ] if "!" in buffer: self.saved += [ fragments[-1] ] buffer = '' else: raise Exception( "Grammar.parse: no such rule '" + buffer + "'." ) else: buffer += char if buffer != '': fragments += [ buffer ] return "".join( fragments ) else: return string def rule( self, rule, new = None ): if new: self.rules[rule] = new else: if rule in self.rules: return self.rules[rule] else: return None wallMaker = Grammar({ 'wallMat': [ 'stone', 'rock', 'wood', 'paper', 'earth', 'crystal', 'leafy vagueness', 'sand', 'skin', 'bark', 'foliage', 'needles', 'delicate tiles', 'agate', 'quartz', 'glass', 'iron', 'copper' ], 'wallCond': [ 'dark', 'heavy', 'slick', 'moss-clung', 'twisted', 'fluted', 'greenish', 'dark', 'hot', 'lumpy', 'unsteady', 'slippery', 'geometrically flanged', 'sigil-eaten', 'consuming', 'blue', 'reddish', 'translucent', 'ultramarine', 'sky-blue', 'delicate pink', 'fuligin' ], 'walls': [ 'walls of [wallMat] close in; the way is [width].', '[wallCond] walls of [wallMat] close in.', 'the walls are [wallCond] [wallMat]... the tunnels, [width].', 'all around, [wallCond] [wallMat].', 'all around, [wallMat].', 'there\'s [wallMat] everywhere here.', 'there\'s [wallMat] everywhere here. it\'s [wallCond].', '[wallCond] [wallMat] all around.', 'the walls are made of [wallMat] here.', 'this place is built entirely of [wallMat].', 'it\'s very [wallCond] here.', '[width], [wallCond].', '[wallMat].', '[wallCond].'], 'width': [ 'suffocatingly close', 'echoing', 'massive', 'wide', 'barely large enough to pass crawling', 'thin and straight', 'tall and narrow', 'tiny', 'spacious', 'vast' ], 'door': [ 'door', 'hatch', 'gate', 'opening', 'incision', 'grating', 'well', 'oubliette', 'tunnel', 'arch' ], 'doorMat': [ 'rock', 'oaken', 'papery', 'crystal', 'glass', 'iron', 'silver' ], 'hidden': [ 'half-hidden', 'in plain view', 'almost impossible to spot', 'staring you in the face', 'which can only be found by touch' ] }) if __name__ == '__main__': linkNames = [ "[N]orth;north;n", "[S]outh;south;s", "[E]ast;east;e", "[W]est;west;w", "[U]p;up;u" ] project = { "projectName": "maze", "rooms": { } } roomCount = 25 for i in range(0, roomCount): desc = wallMaker.parse("[walls]\n\na [doorMat] [!door], [hidden].") door = wallMaker.saved.pop( ) ID = "room-" + i.__str__() project["rooms"][ ID ] = { "NAME": "Maze" } project["rooms"][ ID ][ "LINKS" ] = { } project["rooms"][ ID ][ "_/de" ] = desc project["rooms"][ ID ][ "POSTSCRIPT" ] = { "BUILD": [ "@set here=D", "@tel here=#63" ] } # Each room shall have 2-3 links to other random rooms. Don't try to be consistent. ln = linkNames.copy( ) random.shuffle(ln) for i in range( 0, random.choice([ 2, 3, 3, 3, 3, 4, 4, 4 ]) ): project["rooms"][ ID ][ "LINKS" ][ "room-" + random.choice( range(0, roomCount) ).__str__() ] = { "NAME": ln.pop( ), "succ": "You force your way through the " + door + ".", "osucc": "forces their way through the " + door + ".", "odrop": "emerges through an obscure way from some other part of the maze." } with open("maze.gen.yaml", "w") as fh: fh.write( yaml.dump( project ) ) print( "write: maze.gen.yaml (probably.)" )
5,010
1,602
from rest_framework import viewsets from kratos.apps.trigger import models, serializers class TriggerViewSet(viewsets.GenericViewSet): ''' Trigger信息 ''' serializer_class = serializers.TriggerRecordSerializer queryset = models.TriggerRecord.objects.all() def list(self, request): ''' Trigger调用记录列表 ''' records = self.paginator.paginate_queryset(self.get_queryset(), self.request, view=self) serializer = self.get_serializer(records, many=True) return self.paginator.get_paginated_response(serializer.data)
543
175
from FileLoader import FileLoader tests = [ "non_existing_file.csv", "empty_file.csv", "../data/athlete_events.csv", ] if __name__=="__main__": for test in tests: print(f"==> TESTING {test}") fl = FileLoader() print(f"\n=> Loading file") df = fl.load(test) print(f"\n=> Display first 3 rows") fl.display(df, 3) print(f"\n=> Display lasts 3 rows") fl.display(df, -3) input("====>\n\n")
478
174
# REF: https://labs.ig.com/rest-trading-api-reference class Watchlists: """ DO NOT CHANGE Adding is ok ... and encouraged ;) """ base = { 'path': 'watchlists', 'GET': { 'version': '1', 'tokens': True, } # Not supported yet: 'POST' } id = { 'path': 'watchlists/', 'GET': { 'version': '1', 'tokens': True, } # Not supported yet: 'PUT', 'DELETE' }
492
162
#!/usr/bin/env python # coding: utf-8 #AL - the above code is new for the griffin paper version #modified print commands for python3 # Analyze all possible things from BAM-file import sys import argparse from subprocess import call import numpy import scipy import scipy.stats import os.path import os import glob # Parse command line arguments ################################################################################### parser = argparse.ArgumentParser(description='Analyze epigenetic traces in cfDNA') parser.add_argument('-b','--bam', dest='bam_file', help='BAM file',required=True) parser.add_argument('-o','--output', dest='name', help='Output name for files and directory',required=True) parser.add_argument('-cov','--mean-coverage', dest='mean_coverage', help='Mean coverage along the genome [default:1]',default=1,type=float) parser.add_argument('-ylimit','--plot-y-limit', dest='ylimit', help='Plotting until this limit on y-axis [default:1.5]',default=1.5,type=float) parser.add_argument('-norm-file','--normalize-file', dest='norm_log2', help='Normalize by local copynumber from this file') parser.add_argument('-calccov','--calculate-mean-coverage', dest='calc_cov', help='Specify whether genome read depths should be calculated',action="store_true") parser.add_argument('-hg38','--hg38', dest='hg38', help='Use hg38 coordinates [default: hg19]',action="store_true") parser.add_argument('-a','--analysis', dest='analysis', help='Specify type of analysis (all|enhancer|histone|tf|ctcf|...)',required=True) parser.add_argument('-tf','--trans-factor', dest='tf', help='Specify transcription factor for VirChip data') args = parser.parse_args() #################################################################################################### # setup structure print ("Setup structure") # AL mod if not os.path.isdir(args.name): os.mkdir(args.name) #################################################################################################### # get genomewide coverage from bedtools genomecoverage if args.calc_cov: #AL added if/else if os.path.isfile(args.name.rstrip("/")+"/"+args.name+".coverage"): print('cov already complete') else: #AL tabbed over this section to add it to the if/else statement but did not change it print ("Calc avg. coverage") # AL mod OUTPUT=open(args.name.rstrip("/")+"/"+args.name+".coverage","w") if args.hg38: call(["./Software/bedtools","genomecov","-ibam",args.bam_file,"-g","./Ref/hg38.chrom_sizes.txt"],stdout=OUTPUT) else: call(["./Software/bedtools","genomecov","-ibam",args.bam_file,"-g","./Ref/hg19.chrom_sizes.txt"],stdout=OUTPUT) OUTPUT.close() OUTPUT=open(args.name.rstrip("/")+"/"+args.name+".short_coverage","w") call(["./Scripts/get_avg_coverage.py",args.name.rstrip("/")+"/"+args.name+".coverage"],stdout=OUTPUT) OUTPUT.close() #end AL edits INPUT = open(args.name.rstrip("/")+"/"+args.name+".short_coverage","r") avg_coverage = 1 for line in INPUT.readlines(): chrom,cov = line.rstrip().split("\t") if chrom == "genome": avg_coverage = cov INPUT.close() else: print ("Skipping genomewide-coverage calculation using mean coverage: "+str(args.mean_coverage)) # AL mod avg_coverage = args.mean_coverage #################################################################################################### # print statistics: print ("Write Logs") # AL mod OUT=open(args.name.rstrip("/")+"/log.txt","w") OUT.write("BAM:\t"+args.bam_file+"\n") OUT.write("Norm File:\t"+args.norm_log2+"\n") OUT.write("cov:\t"+str(avg_coverage)+"\n") OUT.write("analysis:\t"+args.analysis+"\n") OUT.close() #################################################################################################### # get chromosome coverage from output of bedtools genomecoverage def getChromCoverage(chromosome,args): print (args.name.rstrip("/")+"/"+args.name+".short_coverage") # AL mod if not os.path.isfile(args.name.rstrip("/")+"/"+args.name+".short_coverage"): print ("Coverage file not found") # AL mod sys.exit(1) INPUT = open(args.name.rstrip("/")+"/"+args.name+".short_coverage","r") avg_coverage = 1 found = False for line in INPUT.readlines(): chrom,cov = line.rstrip().split("\t") if chrom == chromosome: avg_coverage = cov found = True INPUT.close() if found: return avg_coverage else: print ("Chromosome not found") # AL mod sys.exit(1) #################################################################################################### # CTCF analysis def ctcf(args,avg_coverage): print ("Analyze CTCF sites") # AL mod if not os.path.isdir(args.name.rstrip("/")+"/CTCF"): os.mkdir(args.name.rstrip("/")+"/CTCF") #OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_In_Insulated_Neighbourhoods.tss","w") #call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/FIMO_ChIP_CTCF_at_Insulated_Neighbourhoods.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT) #OUTPUT.close() #OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_Outside_Insulated_Neighbourhoods.tss","w") #call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/FIMO_ChIP_CTCF_outside_Insulated_Neighbourhoods.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT) #OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_In_Insulated_Neighbourhoods.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.GTRD.Insulated.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT) OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_Outside_Insulated_Neighbourhoods.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.GTRD.NonInsulated.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT) OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_50perc_In_Insulated_Neighbourhoods.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.GTRD.50perc.Insulated.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT) OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_50perc_Outside_Insulated_Neighbourhoods.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.GTRD.50perc.NonInsulated.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT) OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_ultraconserved.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/Ultraconserved_CTCF.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT) OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_proximalTSS.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.hg19.sorted.bed.proximal.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT) OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_distalTSS.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.hg19.sorted.bed.distal.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT) OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_50perc_proximalTSS.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.50perc_hg19.sorted.bed.proximal.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT) OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_50perc_distalTSS.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.50perc_hg19.sorted.bed.distal.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT) OUTPUT.close() #call(["Rscript","./Scripts/plot_MotifCoverage.R",args.name.rstrip("/")+"/CTCF"+"/CTCF_In_Insulated_Neighbourhoods.tss",args.name.rstrip("/")+"/CTCF"+"/CTCF_In_Insulated_Neighbourhoods.png","TADs","0",str(args.ylimit)]) #call(["Rscript","./Scripts/plot_MotifCoverage.R",args.name.rstrip("/")+"/CTCF"+"/CTCF_Outside_Insulated_Neighbourhoods.tss",args.name.rstrip("/")+"/CTCF"+"/CTCF_Outside_Insulated_Neighbourhoods.png", # "NonTADs","0",str(args.ylimit)]) #call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/CTCF"+"/CTCF_In_Insulated_Neighbourhoods.tss",args.name.rstrip("/")+"/CTCF"+"/CTCF_Outside_Insulated_Neighbourhoods.tss", # args.name.rstrip("/")+"/CTCF"+"/CTCF_TADs.png","CTCF sites in TAD boundaries","CTCF sites outside TAD boundaries","0",str(args.ylimit)]) ######################################################################### def tf_gtrd_1000sites(args,avg_coverage): print("Analyze Transcription factors GTRD") if not os.path.isdir(args.name.rstrip("/")+"/TranscriptionFactors"): os.mkdir(args.name.rstrip("/")+"/TranscriptionFactors") if not os.path.isdir(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only_1000sites"): os.mkdir(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only_1000sites") target_list = glob.glob("./Ref/GTRD_1000sites/*.bed") for tf in target_list: tf_name = os.path.basename(tf[:-4]) if os.path.isfile(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only_1000sites"+"/"+tf_name+".tss"): print("Skip "+tf_name) continue OUTPUT = open(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only_1000sites"+"/"+tf_name+".tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-m","100000","-limit","30","-bed",tf,"-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT) OUTPUT.close() call(["Rscript","./Scripts/plot_MotifCoverage.R",args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only_1000sites"+"/"+tf_name+".tss",args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only_1000sites"+"/"+tf_name+".png",tf_name,"0",str(args.ylimit)]) #call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/TranscriptionFactors/ENCODE_ChIP"+"/"+tf+".tss","./Ref/TranscriptionFactors/MergedMaleProfiles/"+tf+".tss", # args.name.rstrip("/")+"/TranscriptionFactors/ENCODE_ChIP/"+tf+"_control.png",tf+" ("+args.name+")",tf+" (MergedMale)","0",str(args.ylimit)]) ######################################################################### def tf_gtrd(args,avg_coverage): print ("Analyze Transcription factors GTRD") # AL mod if not os.path.isdir(args.name.rstrip("/")+"/TranscriptionFactors"): os.mkdir(args.name.rstrip("/")+"/TranscriptionFactors") if not os.path.isdir(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only"): os.mkdir(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only") if args.hg38: target_list = glob.glob("./Ref/GTRD/hg38/*hg38.bed") else: target_list = glob.glob("./Ref/GTRD/*hg19.bed") for tf in target_list: tf_name = os.path.basename(tf[:-4]) if os.path.isfile(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only"+"/"+tf_name+".tss"): print ("Skip "+tf_name) # AL mod continue OUTPUT = open(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only"+"/"+tf_name+".tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-m","100000","-limit","30","-bed",tf,"-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT) OUTPUT.close() call(["Rscript","./Scripts/plot_MotifCoverage.R",args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only"+"/"+tf_name+".tss",args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only"+"/"+tf_name+".png",tf_name,"0",str(args.ylimit)]) #call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/TranscriptionFactors/ENCODE_ChIP"+"/"+tf+".tss","./Ref/TranscriptionFactors/MergedMaleProfiles/"+tf+".tss", # args.name.rstrip("/")+"/TranscriptionFactors/ENCODE_ChIP/"+tf+"_control.png",tf+" ("+args.name+")",tf+" (MergedMale)","0",str(args.ylimit)]) ######################################################################### # TSS def tss(args,avg_coverage): print ("Analyze HK vs. Unexpr. TSS") # AL mod if not os.path.isdir(args.name.rstrip("/")+"/TSS"): os.mkdir(args.name.rstrip("/")+"/TSS") OUTPUT = open(args.name.rstrip("/")+"/TSS"+"/HK_APPRIS_isoforms.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TSS/Housekeeping_APPRIS_isos_hg19_positions.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT) OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/TSS"+"/HK.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TSS/Housekeeping.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT) OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/TSS"+"/FANTOM_lower01.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TSS/Fantomlower01.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT) OUTPUT.close() call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/TSS/HK.tss",args.name.rstrip("/")+"/TSS/FANTOM_lower01.tss", args.name.rstrip("/")+"/TSS/HK_vs_Unexpr.png","Housekeeping TSS","Unexpressed TSS","0",str(args.ylimit)]) #################################################################################################### # AndrogenReceptor def androgen(args,avg_coverage): print ("Analyze Androgen Receptor Binding sites") # AL mod if not os.path.isdir(args.name.rstrip("/")+"/AR"): os.mkdir(args.name.rstrip("/")+"/AR") OUTPUT = open(args.name.rstrip("/")+"/AR"+"/AR_TARBS_All.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TranscriptionFactors/AndrogenReceptor/TARBS.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT) OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/AR"+"/AR_NARBS_All.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TranscriptionFactors/AndrogenReceptor/NARBS.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT) OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/AR"+"/AR_TARBS_GTRD_All.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TranscriptionFactors/AndrogenReceptor/AR_GTRD_TARBS_intersect.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT) OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/AR"+"/AR_NARBS_GTRD_All.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TranscriptionFactors/AndrogenReceptor/AR_GTRD_NARBS_intersect.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT) OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/AR"+"/AR_TARBS_GTRD_50perc.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TranscriptionFactors/AndrogenReceptor/AR_GTRD_50perc_TARBS_intersect.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT) OUTPUT.close() OUTPUT = open(args.name.rstrip("/")+"/AR"+"/AR_NARBS_GTRD_50perc.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TranscriptionFactors/AndrogenReceptor/AR_GTRD_50perc_NARBS_intersect.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT) OUTPUT.close() call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/AR/AR_TARBS_All.tss",args.name.rstrip("/")+"/AR/AR_NARBS_All.tss", args.name.rstrip("/")+"/TSS/TARBS_vs_ARBS.png","T-AR binding sites","N-AR binding sites","0",str(args.ylimit)]) call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/AR/AR_TARBS_GTRD_All.tss",args.name.rstrip("/")+"/AR/AR_TARBS_GTRD_All.tss", args.name.rstrip("/")+"/TSS/TARBS_vs_ARBS_GTRDintersect.png","T-AR binding sites (GTRD intersect)","N-AR binding sites (GTRD intersect)","0",str(args.ylimit)]) call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/AR/AR_TARBS_GTRD_50perc.tss",args.name.rstrip("/")+"/AR/AR_NARBS_GTRD_50perc.tss", args.name.rstrip("/")+"/TSS/TARBS_vs_ARBS_GTRDintersect_50perc.png","T-AR binding sites (GTRD intersect,50perc)","N-AR binding sites (GTRD intersect,50perc)","0",str(args.ylimit)]) #################################################################################################### # Check for binding sites proximal and distal to Transcription start sites def tf_tss(args,avg_coverage): print ("Analyze distal and proximal TF binding sites") # AL mod if not os.path.isdir(args.name.rstrip("/")+"/TSS_TF"): os.mkdir(args.name.rstrip("/")+"/TSS_TF") target_list = glob.glob("./Ref/TranscriptionFactors/TSS_intersects/*distal.bed") for tf in target_list: tf_name = os.path.basename(tf[:-10]) proximal_tf = tf[:-10]+"proximal.bed" if os.path.isfile(args.name.rstrip("/")+"/TSS_TF/"+tf_name+"distal.tss"): print ("Skip "+tf_name) # AL mod continue OUTPUT = open(args.name.rstrip("/")+"/TSS_TF/"+tf_name+"distal.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed",tf,"-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT) OUTPUT.close() if os.path.isfile(args.name.rstrip("/")+"/TSS_TF/"+tf_name+"proximal.tss"): print ("Skip "+tf_name) # AL mod continue OUTPUT = open(args.name.rstrip("/")+"/TSS_TF/"+tf_name+"proximal.tss","w") call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed",proximal_tf,"-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT) OUTPUT.close() call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/TSS_TF/"+tf_name+"distal.tss",args.name.rstrip("/")+"/TSS_TF/"+tf_name+"proximal.tss", args.name.rstrip("/")+"/TSS_TF/"+tf_name+".png",tf_name+" distal to TSS (>2kbp)",tf_name+" proximal to TSS (<2kbp)","0",str(args.ylimit)]) #################################################################################################### if args.analysis == "all": ctcf(args,avg_coverage) tf_gtrd_chip_only(args,avg_coverage) tss(args,avg_coverage) elif args.analysis == "tss": tss(args,avg_coverage) elif args.analysis == "androgen": androgen(args,avg_coverage) elif args.analysis == "ctcf": ctcf(args,avg_coverage) elif args.analysis == "tf_gtrd": tf_gtrd(args,avg_coverage) elif args.analysis == "tf_gtrd_1000sites": tf_gtrd_1000sites(args,avg_coverage) else: print ("Unknown analysis type") # AL mod print (" Use any of:") # AL mod print (" -) all") # AL mod print (" -) ctcf") # AL mod print (" -) androgen") # AL mod print (" -) tf_gtrd") # AL mod print (" -) tf_gtrd_1000sites") # AL mod print (" -) tf_tss") # AL mod
21,236
8,612
import sys def registrasi(): username = input("Masukan username: ") password = input("Masukan password: ") return username, password def login(username, password): # list_user, input_user = input("Masukan username: ") input_pass = input("Masukan password: ") if username == input_user and password == input_pass: print ("Anda berhasil login") else: print("Salah username dan password") def print_menu(): print("Pilih menu untuk melakukan aksi") print("1. Registrasi") print("2. Login") print("3. Keluar") def main(): # list_user = [] username = "" password = "" while True: print_menu() menu = int(input("Pilihan menu: ")) if (menu == 1): username, password = registrasi() # "skks".capitalize() # SKKS # list_user.append({'username': username, 'password': password}) elif (menu == 2): login(username, password) elif (menu == 3): sys.exit() else: print("Masukan pilihan menu yang benar") main()
1,094
335
# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) from builtins import * # this repo overrides ferenda.sources.legal.se.Regeringen to work # against old downloaded import re import codecs # from urllib.parse import urljoin from rdflib import URIRef from rdflib.namespace import SKOS from ferenda.sources.legal.se import Regeringen, RPUBL from ferenda.sources.legal.se.direktiv import DirRegeringen from ferenda.sources.legal.se.sou import SOURegeringen from ferenda.sources.legal.se.ds import Ds from ferenda.sources.legal.se.propositioner import PropRegeringen from ferenda.compat import urljoin from . import SameAs class RegeringenLegacy(Regeringen): source_encoding = "iso-8859-1" def download(self, basefile=None): return False def downloaded_to_intermediate(self, basefile, attachment=None): return codecs.open(self.store.downloaded_path(basefile), encoding=self.source_encoding) # override just some of the methods to parse the HTML index page def extract_metadata(self, rawhead, basefile): content = rawhead title = content.find("h1").string identifier_node = content.find("p", "lead") if identifier_node: identifier = identifier_node.text else: identifier = "" # infer_metadata calls infer_identifier # if this is falsy, which will be good # enough. No need to warn. definitions = content.find("dl", "definitions") ansvarig = None if definitions: for dt in definitions.find_all("dt"): key = dt.get_text(strip=True) value = dt.find_next_sibling("dd").get_text(strip=True) if key in ("Utgiven:", "Publication date:"): utgiven = self.parse_swedish_date(value) elif key in ("Avsändare:",): ansvarig = value sammanfattning = None if content.find("h2", text="Sammanfattning"): sums = content.find("h2", text="Sammanfattning").find_next_siblings("p") # "\n\n" doesn't seem to survive being stuffed in a rdfa # content attribute. Replace with simple space. sammanfattning = " ".join([x.get_text(strip=True) for x in sums]) # find related documents re_basefile = re.compile(r'\d{4}(|/\d{2,4}):\d+') # legStep1=Kommittedirektiv, 2=Utredning, 3=lagrådsremiss, # 4=proposition. Assume that relationships between documents # are reciprocal (ie if the page for a Kommittedirektiv # references a Proposition, the page for that Proposition # references the Kommittedirektiv. elements = {self.KOMMITTEDIREKTIV: [], self.DS: ["legStep1"], self.PROPOSITION: ["legStep1", "legStep2"], self.SOU: ["legStep1"]}[self.document_type] utgarFran = [] for elementid in elements: box = content.find(id=elementid) if not box: continue for listitem in box.find_all("li"): if not listitem.find("span", "info"): continue infospans = [x.text.strip( ) for x in listitem.find_all("span", "info")] rel_basefile = None rel_identifier = None for infospan in infospans: if re_basefile.search(infospan): # scrub rel_identifier ("Dir. 2008:50" -> "2008:50" etc) rel_basefile = re_basefile.search(infospan).group() rel_identifier = infospan if not rel_basefile: # this often means that a non-standard document # type is used as preparatory work for this # document (eg department memos not published in # Ds, like "S2013/8074/PBB" -- seems to be common # in Socialdepartementet and Finansdepartementet) self.log.warning( "%s: Couldn't find rel_basefile (elementid #%s) among %r" % (basefile, elementid, infospans)) continue attribs = {"rpubl:arsutgava": basefile.split(":")[0], "rpubl:lopnummer": basefile.split(":")[1]} if elementid == "legStep1": attribs["rdf:type"] = RPUBL.Kommittedirektiv elif elementid == "legStep2": attribs["rdf:type"] = RPUBL.Utredningsbetankande if rel_identifier.startswith("SOU"): altlabel = "SOU" elif rel_identifier.startswith(("Ds", "DS")): altlabel = "Ds" else: self.log.warning( "%s: Cannot find out what type of document the linked %s is (#%s)" % (basefile, rel_identifier, elementid)) continue attribs["rpubl:utrSerie"] = self.lookup_resource(altlabel, SKOS.altLabel) elif elementid == "legStep3": attribs["rdf:type"] = RPUBL.Proposition uri = self.minter.space.coin_uri(self.attributes_to_resource(attribs)) utgarFran.append(uri) # find related pages related = content.find("h2", text="Relaterat") seealso = [] if related: for link in related.findParent("div").find_all("a"): r = urljoin("http://www.regeringen.se/", link["href"]) seealso.append(URIRef(r)) a = self.metadata_from_basefile(basefile) a.update({'dcterms:title': title, 'dcterms:identifier': identifier, 'dcterms:issued': utgiven, 'rpubl:utgarFran': utgarFran }) if ansvarig: a["rpubl:departement"] = ansvarig if seealso: a["rdfs:seeAlso"] = seealso if sammanfattning: a['dcterms:abstract'] = sammanfattning return a def find_doc_links(self, soup, basefile): files = [] docsection = soup.find('div', 'doc') if docsection: for li in docsection.find_all("li", "pdf"): link = li.find('a') m = re.match(r'/download/(\w+\.pdf).*', link['href'], re.IGNORECASE) if not m: continue pdfbasefile = m.group(1) files.append((pdfbasefile, link.string)) selected = self.select_files(files) self.log.debug("selected %s out of %d pdf files" % (", ".join([x[0] for x in selected]), len(files))) return selected def source_url(self, basefile): # as the old site is gone, there is no possible URL we can # return here. return None class DirRegeringenLegacy(RegeringenLegacy, SameAs, DirRegeringen): alias = "dirregeringen.legacy" class SOURegeringenLegacy(RegeringenLegacy, SameAs, SOURegeringen): alias = "souregeringen.legacy" def sanitize_identifier(self, identifier): from ferenda.sources.legal.se.sou import sou_sanitize_identifier return sou_sanitize_identifier(identifier) class DsRegeringenLegacy(RegeringenLegacy, SameAs, Ds): alias = "dsregeringen.legacy" class PropRegeringenLegacy(RegeringenLegacy, SameAs, PropRegeringen): alias = "propregeringen.legacy"
7,631
2,241
import reverse_geocode reverse_geocode.search([(35.6963860567411,139.686436661882)]) reverse_geocode.search([(-33.8236171057086,151.021885871887)]) reverse_geocode.search([(47.3111740195794,8.52681624913163)])
211
140
from mesa.datacollection import DataCollector ### datacollection functions def density(model): """Density: number of cars per unit length of road.""" return len(model.schedule.agents) / model.space.length def flow(model): """Flow: number of cars passing a reference point per unit of time.""" # get the flow in the current timestep flow_in_timestep = model.data.flow # reset flow counter model.data.flow = 0 return flow_in_timestep / model.space.n_lanes class Data(DataCollector): def __init__(self, flow_reference_point): super().__init__(model_reporters={ #"Density": density, "Flow": flow }) # setup data collectotion variables self.flow_reference_point = flow_reference_point self.flow = 0
803
240
import pymysql import logging from lambda_assistant.handlers.event_handler import EventHandler from lambda_assistant.errors import * logger = logging.getLogger() logger.setLevel(logging.INFO) class Select(): def Select(self, handler: EventHandler, conn: pymysql.connections.Connection, sql: str): try: result = {} # Execute SQL command with conn.cursor() as cur: cur.execute(sql) row_headers=[x[0] for x in cur.description] #this will extract row headers for index, row in enumerate(cur): result[index] = dict(zip(row_headers, row)) # Commit changes conn.commit() return result except Exception as e: handler.performError(GetDataFailedError()) logger.error(handler.lambdaError.toPrint()) logger.error(e) return handler.lambdaError.toDict() class Delete(): def Delete(self, handler: EventHandler, conn:pymysql.connections.Connection, sql: str, sql_recheckidentity: str): try: result = {} # Execute SQL command with conn.cursor() as cur: cur.execute(sql) cur.execute(sql_recheckidentity) # Commit changes conn.commit() return result except Exception as e: handler.performError(DeleteDataFailedError()) logger.error(handler.lambdaError.toPrint()) logger.error(e) return handler.lambdaError.toDict() class Insert(): def Insert(self, handler: EventHandler, conn: pymysql.connections.Connection, sql: str, get_id=False): try: result = {} # Execute SQL command with conn.cursor() as cur: cur.execute(sql) if get_id: id = int(cur.lastrowid) result['id_inserted'] = id # Commit changes conn.commit() return result except Exception as e: handler.performError(PutDataFailedError()) logger.error(handler.lambdaError.toPrint()) logger.error(e) return handler.lambdaError.toDict() class MySqlHandler(Select, Delete, Insert): def __init__(self, db_name, rds_host, db_username, db_password): self.rds_host = rds_host self.db_name = db_name self.db_username = db_username self.db_password = db_password def Connect(self): conn = pymysql.connect(host=self.rds_host, user=self.db_username, passwd=self.db_password, db=self.db_name, connect_timeout=5) return conn
2,850
800
from django.contrib.auth import get_user_model from django.db import models # Create your models here. User = get_user_model() class Post(models.Model): text = models.TextField(verbose_name='Текст') pub_date = models.DateTimeField(auto_now_add=True, verbose_name="date published") author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='posts') group = models.ForeignKey('Group', on_delete=models.CASCADE, blank=True, null=True, related_name='posts', verbose_name='Группа') image = models.ImageField(upload_to='posts/', blank=True, null=True) def __str__(self): return self.text class Group(models.Model): title = models.CharField(max_length=200) slug = models.SlugField(max_length=100, unique=True) description = models.TextField(blank=True, null=True) def __str__(self): return self.title class Comment(models.Model): post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name='comments') author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='comments') text = models.TextField() created = models.DateTimeField(auto_now_add=True)
1,280
385
'''OpenGL extension AMD.blend_minmax_factor This module customises the behaviour of the OpenGL.raw.GL.AMD.blend_minmax_factor to provide a more Python-friendly API Overview (from the spec) The EXT_blend_minmax extension extended the GL's blending functionality to allow the blending equation to be specified by the application. That extension introduced the MIN_EXT and MAX_EXT blend equations, which caused the result of the blend equation to become the minimum or maximum of the source color and destination color, respectively. The MIN_EXT and MAX_EXT blend equations, however, do not include the source or destination blend factors in the arguments to the min and max functions. This extension provides two new blend equations that produce the minimum or maximum of the products of the source color and source factor, and the destination color and destination factor. The official definition of this extension is available here: http://www.opengl.org/registry/specs/AMD/blend_minmax_factor.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.AMD.blend_minmax_factor import * from OpenGL.raw.GL.AMD.blend_minmax_factor import _EXTENSION_NAME def glInitBlendMinmaxFactorAMD(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED SECTION
1,508
413
"""This example demonstrates the basics on building complete forms using campos. It creates several fields, marking some of them as required and adding some custom validation. Finally fields are added to a CreationForm which have several buttons and a custom callback connected to one of them. After added, some related fields are grouped. """ __author__ = 'Juan Manuel Bermúdez Cabrera' def fake_create_person(): if form.valid: msg = 'ID: {}<br/>'.format(form.id) msg += 'Name: {}<br/>'.format(form.name) msg += 'Last name: {}<br/>'.format(form.last_name) msg += 'Phone: {}<br/>'.format(form.phone) msg += 'Address: {}<br/>'.format(form.address) msg += 'Country: {}<br/>'.format(form.country[0]) msg = 'New person created correctly with values:<br/>{}'.format(msg) msg = '<html>{}</html>'.format(msg) QMessageBox.information(None, 'Created', msg) form.close() def create_form(): id = campos.StringField(name='id', text='Personal ID', max_length=11, required=True) name = campos.StringField(name='name', text='Name', required=True) last = campos.StringField(name='last_name', text='Last name', required=True) val = campos.RegExp(r'\+?\d+', message='Invalid phone number') phone = campos.StringField(name='phone', text='Phone number', validators=[val]) address = campos.StringField(name='address', text='Home address') country = campos.SelectField(name='country', text='Country', blank=True, blank_text='Other', choices=['Cuba', 'EE.UU'], default='Cuba') fields = (id, name, last, phone, address, country) global form form = campos.CreationForm(on_save=fake_create_person, fields=fields) form.setWindowTitle('Create Person') # group some fields form.group('Very personal info', ('phone', 'address'), layout='grid') form.group('Identification', ['id', 'name', 'last_name']) return form if __name__ == '__main__': import os import sys # set gui api to use os.environ['QT_API'] = 'pyside' from qtpy.QtWidgets import QMessageBox, QApplication import campos # set global settings for validation type and label positions campos.Validation.set_current('instant') campos.Labelling.set_current('top') app = QApplication(sys.argv) dialog = create_form() sys.exit(dialog.exec_())
2,506
755
from flask import Flask, request from structs import * import json import numpy as np import sys import random, time app = Flask(__name__) dx=0 dy=0 def create_action(action_type, target): actionContent = ActionContent(action_type, target.__dict__) #print(actionContent) return json.dumps(actionContent.__dict__) def create_move_action(target): return create_action("MoveAction", Point(target.X-dx,target.Y-dy)) def create_attack_action(target): return create_action("AttackAction", Point(target.X-dx,target.Y-dy)) def create_collect_action(target): return create_action("CollectAction", Point(target.X-dx,target.Y-dy)) def create_steal_action(target): return create_action("StealAction", Point(target.X-dx,target.Y-dy)) def create_heal_action(): return create_action("HealAction", "") def create_purchase_action(item): return create_action("PurchaseAction", item) def deserialize_map(serialized_map): """ Fonction utilitaire pour comprendre la map """ serialized_map = serialized_map[1:] rows = serialized_map.split('[') column = rows[0].split('{') deserialized_map = [[Tile() for x in range(40)] for y in range(40)] for i in range(len(rows) - 1): column = rows[i + 1].split('{') for j in range(len(column) - 1): infos = column[j + 1].split(',') end_index = infos[2].find('}') content = int(infos[0]) x = int(infos[1]) y = int(infos[2][:end_index]) deserialized_map[i][j] = Tile(content, x, y) return deserialized_map #customs def visual(lines,x,y): for i in lines: line = '' for j in i[:20]: #Empty, Wall, House, Lava, Resource, Shop, Player #0 1 2 3 4 5 6 line+=str(j.Content).replace('None','N').replace('0', ' ').replace('1','#').replace('2','^').replace('3','L').replace('4','$').replace('5','S').replace('6','o') print(line) def distance(p1, p2): return math.sqrt((p2[0]-p1[0])**2+(p2[1]-p1[1])**2) ''' def searchg(x,y,grid,target, at): if grid[x][y] == target: at.append([x,y]) #found return True elif grid[x][y] == 1 or grid[x][y] == 3: return False #wall or lava elif grid[x][y] == 9: return False #been here at.append([x,y]) grid[x][y] == 9 if ((x<len(grid)-1 and search(x+1,y,grid,target, at)) or (y > 0 and search(x, y-1,grid,target, at)) or (x > 0 and search(x-1,y,grid,target, at)) or (y < len(grid)-1 and search(x, y+1,grid,target, at))): return True return False ''' def search_next(me, target,m,dx,dy): x=me.Position.X y=me.Position.Y if me.CarriedRessources==me.CarryingCapacity: print('resource') target=me.HouseLocation #if distance([target.X,target.Y],[x,y])==0: # return create_collect_action(Point(x+dx, x+dy)) neighbors = [[x+1,y],[x-1,y],[x,y+1],[x,y-1]] tNeighbors = [] for neighbor in neighbors: tNeighbors.append([distance(neighbor,[target.X, target.Y]),neighbor]) sortedNeighbors=sorted(tNeighbors, key=lambda x:x[0]) print(sortedNeighbors) for n in sortedNeighbors: #print(target.__dict__) #print(x,y) #print('----------',n,'--------') #Empty, Wall, House, Lava, Resource, Shop, Player #0 1 2 3 4 5 6 tile = m[n[1][0]-dx][n[1][1]-dy] #print(tile.__dict__) content = tile.Content point = Point(n[1][0],n[1][1]) if content==0 or content==2: print('----move----',point) return create_move_action(point) elif content==1 or content == 6: print('attack',point) return create_attack_action(point) elif content==4: return create_collect_action(point) else:# content==3: print('skip') continue def route(start, end, at, best=[]): best.append(end) for i in range(len(at)-1,-1,-1): if compare(at[i], best[-1]): best.append(at[i]) best=best[::-1] return best def compare(a,b): if (a[0]==b[0]) and (abs(a[1]-b[1])==1): return True elif (a[1]==b[1]) and (abs(a[0]-b[0])==1): return True else: return False def arr2action(c,d): if c[0]==d[0]: if c[1]<d[1]: return create_move_action(Point(x+1,y)) else: return create_move_action(Point(x-1,y)) elif c[0]<d[0]: return create_move_action(Point(x,y-1)) else: return create_move_action(Point(x,y+1)) def findTargets(mapmatrix, me): resources = [] enemyhouses = [] shops = [] for row in mapmatrix: for tile in row: if tile.Content==4: resources.append(tile) elif tile.Content==2 and tile.Content!=me.HouseLocation: enemyhouses.append(tile) elif tile.Content==5: shops.append(tile) else: continue return [resources, enemyhouses, shops] def decide(me, closestEnemies, targets, grid): try: distEn = closestEnemy[0][0] enemy = closestEnemy[0][1] except: distEn=0 enemy = [] distTarget = targets[0][0] target = targets[0][1] best=[] at=[] if distEn==1: #print('------1-------') return create_attack_action(Point(enemy.X,enemy.Y)) elif distTarget==1 and target.Content==2: #print('------2-------') return create_collect_action(Point(target.X,target.Y)) elif distTarget==0 and target.Content==4: #print('------3-------') return create_collect_action(Point(target.X,target.Y)) else: #print('------4-------') #t = random.choice([1,0]) #u = (t+1)%2 #return create_move_action(Point(me.Position.X+t,me.Position.Y+u)) return search_next(me, target, grid) def bot(): """ Main de votre bot. """ map_json = request.form["map"] # Player info encoded_map = map_json.encode() map_json = json.loads(encoded_map) p = map_json["Player"] pos = p["Position"] x = pos["X"] y = pos["Y"] house = p["HouseLocation"] player = Player(p["Health"], p["MaxHealth"], Point(x,y), Point(house["X"], house["Y"]), p["Score"], p["CarriedResources"], p["CarryingCapacity"]) # Map serialized_map = map_json["CustomSerializedMap"] deserialized_map=deserialize_map(serialized_map) transposed=np.transpose(deserialized_map) targets = findTargets(deserialized_map, player) visual(transposed[:20][::-1],x,y) otherPlayers = [] ''' #print(map_json) for player_dict in map_json["OtherPlayers"]: #print(player_dict) for player_name in player_dict.keys(): player_info = player_dict[player_name] #print('---------') #print(player_info) #print('---------') p_pos = player_info["Position"] player_info = PlayerInfo(player_info["Health"], player_info["MaxHealth"], Point(p_pos["X"], p_pos["Y"])) otherPlayers.append({player_name: player_info }) ''' # return decision #targets = tTargets = [] for target in targets[0]:#+targets[1]: tTargets.append([distance([x,y],[target.X,target.Y]),target]) sortedTargets = sorted(tTargets, key=lambda x:x[0]) tEnemies = [] for enemy in otherPlayers: tEnemies.append([distance([x,y],[enemy.X,enemy.Y]),enemy]) sortedEnemies = sorted(tEnemies, key=lambda x:x[0]) dx,dy=0,0 for i,line in enumerate(deserialized_map): for j,tile in enumerate(line): if tile.X==x and tile.Y==y: dx = x-i dy = y-j #return decide(player, sortedEnemies, sortedTargets, deserialized_map) print(player.__dict__,player.Position.__dict__) return search_next(player, sortedTargets[0][1], deserialized_map,dx,dy) @app.route("/", methods=["POST"]) def reponse(): """ Point d'entree appelle par le GameServer """ sys.stdout.flush() return bot() if __name__ == "__main__": app.run(host="0.0.0.0", port=3000)
8,452
2,928
from InstaFriend import InstaFriend friend = InstaFriend('bonesaw') friend.say_something()
92
32
import datetime as dt import os import attr @attr.s(repr=False) class Interval: start = attr.ib() end = attr.ib() key = attr.ib() def __repr__(self): return "%s(start=%r, end=%r, key=%r)" % ( type(self).__name__, self.start.isoformat(), self.end.isoformat(), self.key, ) __str__ = __repr__ def strip_timestamp(timestamp): # The timezone offset may or may not be present, remove it if it's there return timestamp.strip("Z").replace("+00-00", "") def get_intervals(keys): """ Generate the intervals completed for a particular resource type. :param keys: A generator of S3 key names. """ for k in keys: name = os.path.basename(k) start, end = name.split("__") start = strip_timestamp(start) end = strip_timestamp(end) try: yield Interval( start=dt.datetime.strptime(start, "%Y-%m-%dT%H-%M-%S.%f"), end=dt.datetime.strptime(end, "%Y-%m-%dT%H-%M-%S.%f"), key=k, ) except ValueError: yield Interval( start=dt.datetime.strptime(start, "%Y-%m-%dT%H-%M-%S"), end=dt.datetime.strptime(end, "%Y-%m-%dT%H-%M-%S"), key=k, ) def combine_overlapping_intervals(sorted_intervals): """ Given a generator of sorted open intervals, generate the covering set. It produces a series of 2-tuples: (interval, running), where ``running`` is the set of sub-intervals used to build the overall interval. :param sorted_intervals: A generator of ``Interval`` instances. """ lower = None running = [] for higher in sorted_intervals: if not lower: lower = higher running.append(higher) else: # We treat these as open intervals. This first case is for the # two intervals being wholly overlapping, for example: # # ( -- lower -- ) # ( -- higher -- ) # if higher.start < lower.end: upper_bound = max(lower.end, higher.end) lower = Interval(start=lower.start, end=upper_bound, key=None) running.append(higher) # Otherwise the two intervals are disjoint. Note that this # includes the case where lower.end == higher.start, because # we can't be sure that point has been included. # # ( -- lower -- ) # ( -- higher -- ) # # or # # ( -- lower -- ) # ( -- higher -- ) # else: yield (lower, running) lower = higher running = [higher] # And spit out the final interval if lower is not None: yield (lower, running)
3,004
848
"""Tests for the mqtt_json component."""
41
14
from .console import embed, shell_entry from .misc import decode_webwx_emoji, enhance_connection, ensure_list, get_raw_dict, get_receiver, \ get_text_without_at_bot, get_username, match_attributes, match_name, match_text, new_local_msg_id, repr_message, \ smart_map, start_new_thread from .puid_map import PuidMap from .tools import detect_freq_limit, dont_raise_response_error, ensure_one, mutual_friends
414
138
def compile_files(fp_list): for fp in fp_list: __compile_file(fp) def __compile_file(file_pointer): pass
123
47
# Plugin made by Dark cobra # For Dark cobra # Made by Shivam Patel(Team Cobra) # Kang with credits.. import random from userbot import CMD_HELP from userbot.events import register from userbot.utils import admin_cmd from telethon import events, types, functions, utils import asyncio def choser(cmd, pack, blacklist={}): docs = None @borg.on(events.NewMessage(pattern=rf'\.{cmd}', outgoing=True)) async def handler(event): if event.fwd_from: return animation_interval = 2 animation_ttl = range(0,8) nonlocal docs for i in animation_ttl: await asyncio.sleep(animation_interval) if docs is None: docs = [ utils.get_input_document(x) for x in (await borg(functions.messages.GetStickerSetRequest(types.InputStickerSetShortName(pack)))).documents ] await event.respond(file=random.choice(docs)) choser('hpdiwali', 'a929138153_by_Shivam_Patel_1_anim')
1,082
334
#!/usr/bin/env python import sys maximum = 0.0 selected = 0.0 results = [] for line in sys.stdin.readlines()[5:]: line = line.strip() if len(line) == 0: continue (inst, actual, predicted, error) = line.split() results.append([inst, actual, predicted, error]) predicted = float(predicted) if predicted > maximum: maximum = predicted selected = float(actual) by_predicted = sorted(results, key=lambda entry: float(entry[2])) by_predicted.reverse() by_actual = sorted(results, key=lambda entry: float(entry[1])) by_actual.reverse() best_of_actuals = float(by_actual[0][1]) sys.stdout.write('Best of Actuals: %f\n' % best_of_actuals) sys.stdout.write('Maximum Prediction: %s\n' % str([x[2] for x in by_predicted[0:5]])) sys.stdout.write('Selected Actual: %s\n' % str([x[1] for x in by_predicted[0:5]])) sys.stdout.write('Percentages: %s\n' % str([float(x[1])/best_of_actuals for x in by_predicted[0:5]]))
1,024
364
'''Logging module for symtuner library Logging module for symtuner library. All loggings in symtuner library use this module. ''' import logging as _logging _LOGGER = None def get_logger(): '''Get a logger. Get a singleton `Logger`. If `Logger` not defined make one and return. If `get_logger` called previously, returns a `Logger` object created previously. Returns: A `Logger` object. ''' global _LOGGER if not _LOGGER: _LOGGER = _logging.getLogger('symtuner') if not _logging.getLogger().handlers: formatter = _logging.Formatter(fmt='%(asctime)s symtuner [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S') stderr_handler = _logging.StreamHandler() stderr_handler.setFormatter(formatter) _LOGGER.addHandler(stderr_handler) _LOGGER.setLevel('INFO') return _LOGGER
941
285
import os import sys import cv2 import argparse import numpy as np import torch from torch import nn from torch.nn import MSELoss from torch.optim import Adam from torch.optim.lr_scheduler import MultiStepLR from torch.autograd import Variable from torch.utils.data import DataLoader from tensorboardX import SummaryWriter import settings from dataset import TestDataset from model import RESCAN from cal_ssim import SSIM logger = settings.logger torch.cuda.manual_seed_all(66) torch.manual_seed(66) torch.cuda.set_device(settings.device_id) def ensure_dir(dir_path): if not os.path.isdir(dir_path): os.makedirs(dir_path) class Session: def __init__(self): self.log_dir = settings.log_dir self.model_dir = settings.model_dir ensure_dir(settings.log_dir) ensure_dir(settings.model_dir) logger.info('set log dir as %s' % settings.log_dir) logger.info('set model dir as %s' % settings.model_dir) self.net = RESCAN().cuda() self.crit = MSELoss().cuda() self.ssim = SSIM().cuda() self.dataloaders = {} def get_dataloader(self, dataset_name): dataset = TestDataset(dataset_name) if not dataset_name in self.dataloaders: self.dataloaders[dataset_name] = \ DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1, drop_last=False) return self.dataloaders[dataset_name] def load_checkpoints(self, name): ckp_path = os.path.join(self.model_dir, name) try: obj = torch.load(ckp_path) logger.info('Load checkpoint %s' % ckp_path) except FileNotFoundError: logger.info('No checkpoint %s!!' % ckp_path) return self.net.load_state_dict(obj['net']) def inf_batch(self, name, batch): O, B = batch['O'].cuda(), batch['B'].cuda() O, B = Variable(O, requires_grad=False), Variable(B, requires_grad=False) R = O - B with torch.no_grad(): O_Rs = self.net(O) loss_list = [self.crit(O_R, R) for O_R in O_Rs] ssim_list = [self.ssim(O - O_R, O - R) for O_R in O_Rs] losses = { 'loss%d' % i: loss.item() for i, loss in enumerate(loss_list) } ssimes = { 'ssim%d' % i: ssim.item() for i, ssim in enumerate(ssim_list) } losses.update(ssimes) return losses def run_test(ckp_name): sess = Session() sess.net.eval() sess.load_checkpoints(ckp_name) dt = sess.get_dataloader('test') all_num = 0 all_losses = {} for i, batch in enumerate(dt): losses = sess.inf_batch('test', batch) batch_size = batch['O'].size(0) all_num += batch_size for key, val in losses.items(): if i == 0: all_losses[key] = 0. all_losses[key] += val * batch_size logger.info('batch %d mse %s: %f' % (i, key, val)) for key, val in all_losses.items(): logger.info('total mse %s: %f' % (key, val / all_num)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-m', '--model', default='latest') args = parser.parse_args(sys.argv[1:]) run_test(args.model)
3,326
1,146
# Online mode selection for FastEMRIWaveforms Packages # Copyright (C) 2020 Michael L. Katz, Alvin J.K. Chua, Niels Warburton, Scott A. Hughes # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. import numpy as np from few.utils.citations import * # check for cupy try: import cupy as xp except (ImportError, ModuleNotFoundError) as e: import numpy as xp class ModeSelector: """Filter teukolsky amplitudes based on power contribution. This module takes teukolsky modes, combines them with their associated ylms, and determines the power contribution from each mode. It then filters the modes bases on the fractional accuracy on the total power (eps) parameter. The mode filtering is a major contributing factor to the speed of these waveforms as it removes large numbers of useles modes from the final summation calculation. Be careful as this is built based on the construction that input mode arrays will in order of :math:`m=0`, :math:`m>0`, and then :math:`m<0`. args: m0mask (1D bool xp.ndarray): This mask highlights which modes have :math:`m=0`. Value is False if :math:`m=0`, True if not. This only includes :math:`m\geq0`. use_gpu (bool, optional): If True, allocate arrays for usage on a GPU. Default is False. """ def __init__(self, m0mask, use_gpu=False): if use_gpu: self.xp = xp else: self.xp = np # store information releated to m values # the order is m = 0, m > 0, m < 0 self.m0mask = m0mask self.num_m_zero_up = len(m0mask) self.num_m_1_up = len(self.xp.arange(len(m0mask))[m0mask]) self.num_m0 = len(self.xp.arange(len(m0mask))[~m0mask]) def attributes_ModeSelector(self): """ attributes: xp: cupy or numpy depending on GPU usage. num_m_zero_up (int): Number of modes with :math:`m\geq0`. num_m_1_up (int): Number of modes with :math:`m\geq1`. num_m0 (int): Number of modes with :math:`m=0`. """ pass @property def citation(self): """Return citations related to this class.""" return few_citation + few_software_citation def __call__(self, teuk_modes, ylms, modeinds, eps=1e-5): """Call to sort and filer teukolsky modes. This is the call function that takes the teukolsky modes, ylms, mode indices and fractional accuracy of the total power and returns filtered teukolsky modes and ylms. args: teuk_modes (2D complex128 xp.ndarray): Complex teukolsky amplitudes from the amplitude modules. Shape: (number of trajectory points, number of modes). ylms (1D complex128 xp.ndarray): Array of ylm values for each mode, including m<0. Shape is (num of m==0,) + (num of m>0,) + (num of m<0). Number of m<0 and m>0 is the same, but they are ordered as (m==0 first then) m>0 then m<0. modeinds (list of int xp.ndarrays): List containing the mode index arrays. If in an equatorial model, need :math:`(l,m,n)` arrays. If generic, :math:`(l,m,k,n)` arrays. e.g. [l_arr, m_arr, n_arr]. eps (double, optional): Fractional accuracy of the total power used to determine the contributing modes. Lowering this value will calculate more modes slower the waveform down, but generally improving accuracy. Increasing this value removes modes from consideration and can have a considerable affect on the speed of the waveform, albeit at the cost of some accuracy (usually an acceptable loss). Default that gives good mismatch qualities is 1e-5. """ # get the power contribution of each mode including m < 0 power = ( self.xp.abs( self.xp.concatenate( [teuk_modes, self.xp.conj(teuk_modes[:, self.m0mask])], axis=1 ) * ylms ) ** 2 ) # sort the power for a cumulative summation inds_sort = self.xp.argsort(power, axis=1)[:, ::-1] power = self.xp.sort(power, axis=1)[:, ::-1] cumsum = self.xp.cumsum(power, axis=1) # initialize and indices array for keeping modes inds_keep = self.xp.full(cumsum.shape, True) # keep modes that add to within the fractional power (1 - eps) inds_keep[:, 1:] = cumsum[:, :-1] < cumsum[:, -1][:, self.xp.newaxis] * ( 1 - eps ) # finds indices of each mode to be kept temp = inds_sort[inds_keep] # adjust the index arrays to make -m indices equal to +m indices # if +m or -m contributes, we keep both because of structure of CUDA kernel temp = temp * (temp < self.num_m_zero_up) + (temp - self.num_m_1_up) * ( temp >= self.num_m_zero_up ) # if +m or -m contributes, we keep both because of structure of CUDA kernel keep_modes = self.xp.unique(temp) # set ylms # adust temp arrays specific to ylm setup temp2 = keep_modes * (keep_modes < self.num_m0) + ( keep_modes + self.num_m_1_up ) * (keep_modes >= self.num_m0) # ylm duplicates the m = 0 unlike teuk_modes ylmkeep = self.xp.concatenate([keep_modes, temp2]) # setup up teuk mode and ylm returns out1 = (teuk_modes[:, keep_modes], ylms[ylmkeep]) # setup up mode values that have been kept out2 = tuple([arr[keep_modes] for arr in modeinds]) return out1 + out2
6,371
1,941
from .city import City from .company import Company from .country import Country
81
19
""" opentrons_shared_data.deck.dev_types: types for deck defs This should only be imported if typing.TYPE_CHECKING is True """ from typing import Any, Dict, List, NewType, Union from typing_extensions import Literal, TypedDict from ..module.dev_types import ModuleType DeckSchemaVersion3 = Literal[3] DeckSchemaVersion2 = Literal[2] DeckSchemaVersion1 = Literal[1] DeckSchema = NewType("DeckSchema", Dict[str, Any]) RobotModel = Union[Literal["OT-2 Standard"], Literal["OT-3 Standard"]] class Metadata(TypedDict, total=False): displayName: str tags: List[str] class Robot(TypedDict): model: RobotModel class BoundingBox(TypedDict): xDimension: float yDimension: float zDimension: float class SlotDefV3(TypedDict, total=False): id: str position: List[float] boundingBox: BoundingBox displayName: str compatibleModuleTypes: List[ModuleType] matingSurfaceUnitVector: List[Union[Literal[1], Literal[-1]]] class CalibrationPoint(TypedDict): id: str position: List[float] displayName: str class Feature(TypedDict): footprint: str class FixedLabwareBySlot(TypedDict): id: str displayName: str labware: str slot: str class FixedLabwareByPosition(TypedDict): id: str displayName: str labware: str position: List[float] class FixedVolumeBySlot(TypedDict): id: str displayName: str boundingBox: BoundingBox slot: str class FixedVolumeByPosition(TypedDict): id: str displayName: str boundingBox: BoundingBox position: List[float] Fixture = Union[ FixedLabwareBySlot, FixedLabwareByPosition, FixedVolumeBySlot, FixedVolumeByPosition ] class LocationsV3(TypedDict): orderedSlots: List[SlotDefV3] calibrationPoints: List[CalibrationPoint] fixtures: List[Fixture] class DeckDefinitionV3(TypedDict): otId: str schemaVersion: Literal[3] cornerOffsetFromOrigin: List[float] dimensions: List[float] metadata: Metadata robot: Robot locations: LocationsV3 layers: Dict[str, List[Feature]] DeckDefinition = DeckDefinitionV3
2,113
719
import math import torch import torch.nn as nn from torch.autograd import Variable from torchvision import models defaultcfg = { 11 : [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512], 13 : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512], 16 : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512], 19 : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512], } def conv_layer(chann_in, chann_out, k_size, p_size): layer = nn.Sequential( nn.Conv2d(chann_in, chann_out, kernel_size=k_size, padding=p_size), nn.BatchNorm2d(chann_out), nn.ReLU() ) return layer def vgg_conv_block(in_list, out_list, k_list, p_list, pooling_k, pooling_s): layers = [ conv_layer(in_list[i], out_list[i], k_list[i], p_list[i]) for i in range(len(in_list)) ] layers += [ nn.MaxPool2d(kernel_size = pooling_k, stride = pooling_s)] return nn.Sequential(*layers) def vgg_fc_layer(size_in, size_out): layer = nn.Sequential( nn.Linear(size_in, size_out), nn.BatchNorm1d(size_out), nn.ReLU() ) return layer class VGG16(nn.Module): def __init__(self, **kwargs): super(VGG16, self).__init__() self.rob = kwargs['robustness'] if 'robustness' in kwargs else False # Conv blocks (BatchNorm + ReLU activation added in each block) self.layer1 = vgg_conv_block([3,64], [64,64], [3,3], [1,1], 2, 2) self.layer2 = vgg_conv_block([64,128], [128,128], [3,3], [1,1], 2, 2) self.layer3 = vgg_conv_block([128,256,256], [256,256,256], [3,3,3], [1,1,1], 2, 2) self.layer4 = vgg_conv_block([256,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2) self.layer5 = vgg_conv_block([512,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2) # FC layers self.layer6 = vgg_fc_layer(512, 4096) self.layer7 = vgg_fc_layer(4096, 4096) # Final layer self.layer8 = nn.Linear(4096, 10) def forward(self, x): output_list = [] out = self.layer1(x) output_list.append(out) out = self.layer2(out) output_list.append(out) out = self.layer3(out) output_list.append(out) out = self.layer4(out) output_list.append(out) vgg16_features = self.layer5(out) out = vgg16_features.view(out.size(0), -1) #print(out.shape) out = self.layer6(out) output_list.append(out) out = self.layer7(out) output_list.append(out) out = self.layer8(out) if self.rob: return out else: return out, output_list
2,803
1,309
""" Preprocessing script for Stanford Sentiment Treebank data. """ import os import glob # # Trees and tree loading # class ConstTree(object): def __init__(self): self.left = None self.right = None def size(self): self.size = 1 if self.left is not None: self.size += self.left.size() if self.right is not None: self.size += self.right.size() return self.size def set_spans(self): if self.word is not None: self.span = self.word return self.span self.span = self.left.set_spans() if self.right is not None: self.span += ' ' + self.right.set_spans() return self.span def get_labels(self, spans, labels, dictionary): if self.span in dictionary: spans[self.idx] = self.span labels[self.idx] = dictionary[self.span] if self.left is not None: self.left.get_labels(spans, labels, dictionary) if self.right is not None: self.right.get_labels(spans, labels, dictionary) class DepTree(object): def __init__(self): self.children = [] self.lo, self.hi = None, None def size(self): self.size = 1 for c in self.children: self.size += c.size() return self.size def set_spans(self, words): self.lo, self.hi = self.idx, self.idx + 1 if len(self.children) == 0: self.span = words[self.idx] return for c in self.children: c.set_spans(words) self.lo = min(self.lo, c.lo) self.hi = max(self.hi, c.hi) self.span = ' '.join(words[self.lo : self.hi]) def get_labels(self, spans, labels, dictionary): if self.span in dictionary: spans[self.idx] = self.span labels[self.idx] = dictionary[self.span] for c in self.children: c.get_labels(spans, labels, dictionary) def load_trees(dirpath): const_trees, dep_trees, toks = [], [], [] with open(os.path.join(dirpath, 'parents.txt')) as parentsfile, \ open(os.path.join(dirpath, 'dparents.txt')) as dparentsfile, \ open(os.path.join(dirpath, 'sents.txt')) as toksfile: parents, dparents = [], [] for line in parentsfile: parents.append(map(int, line.split())) for line in dparentsfile: dparents.append(map(int, line.split())) for line in toksfile: toks.append(line.strip().split()) for i in xrange(len(toks)): const_trees.append(load_constituency_tree(parents[i], toks[i])) dep_trees.append(load_dependency_tree(dparents[i])) return const_trees, dep_trees, toks def load_constituency_tree(parents, words): trees = [] root = None size = len(parents) for i in xrange(size): trees.append(None) word_idx = 0 for i in xrange(size): if not trees[i]: idx = i prev = None prev_idx = None word = words[word_idx] word_idx += 1 while True: tree = ConstTree() parent = parents[idx] - 1 tree.word, tree.parent, tree.idx = word, parent, idx word = None if prev is not None: if tree.left is None: tree.left = prev else: tree.right = prev trees[idx] = tree if parent >= 0 and trees[parent] is not None: if trees[parent].left is None: trees[parent].left = tree else: trees[parent].right = tree break elif parent == -1: root = tree break else: prev = tree prev_idx = idx idx = parent return root def load_dependency_tree(parents): trees = [] root = None size = len(parents) for i in xrange(size): trees.append(None) for i in xrange(size): if not trees[i]: idx = i prev = None prev_idx = None while True: tree = DepTree() parent = parents[idx] - 1 # node is not in tree if parent == -2: break tree.parent, tree.idx = parent, idx if prev is not None: tree.children.append(prev) trees[idx] = tree if parent >= 0 and trees[parent] is not None: trees[parent].children.append(tree) break elif parent == -1: root = tree break else: prev = tree prev_idx = idx idx = parent return root # # Various utilities # def make_dirs(dirs): for d in dirs: if not os.path.exists(d): os.makedirs(d) def load_sents(dirpath): sents = [] with open(os.path.join(dirpath, 'SOStr.txt')) as sentsfile: for line in sentsfile: sent = ' '.join(line.split('|')) sents.append(sent.strip()) return sents def load_splits(dirpath): splits = [] with open(os.path.join(dirpath, 'datasetSplit.txt')) as splitfile: splitfile.readline() for line in splitfile: idx, split = line.split(',') splits.append(int(split)) return splits def load_parents(dirpath): parents = [] with open(os.path.join(dirpath, 'STree.txt')) as parentsfile: for line in parentsfile: p = ' '.join(line.split('|')) parents.append(p.strip()) return parents def load_dictionary(dirpath): labels = [] with open(os.path.join(dirpath, 'sentiment_labels.txt')) as labelsfile: labelsfile.readline() for line in labelsfile: idx, rating = line.split('|') idx = int(idx) rating = float(rating) if rating <= 0.2: label = -2 elif rating <= 0.4: label = -1 elif rating > 0.8: label = +2 elif rating > 0.6: label = +1 else: label = 0 labels.append(label) d = {} with open(os.path.join(dirpath, 'dictionary.txt')) as dictionary: for line in dictionary: s, idx = line.split('|') d[s] = labels[int(idx)] return d def build_vocab(filepaths, dst_path, lowercase=True): vocab = set() for filepath in filepaths: with open(filepath) as f: for line in f: if lowercase: line = line.lower() vocab |= set(line.split()) with open(dst_path, 'w') as f: for w in sorted(vocab): f.write(w + '\n') def split(sst_dir, train_dir, dev_dir, test_dir): sents = load_sents(sst_dir) splits = load_splits(sst_dir) parents = load_parents(sst_dir) with open(os.path.join(train_dir, 'sents.txt'), 'w') as train, \ open(os.path.join(dev_dir, 'sents.txt'), 'w') as dev, \ open(os.path.join(test_dir, 'sents.txt'), 'w') as test, \ open(os.path.join(train_dir, 'parents.txt'), 'w') as trainparents, \ open(os.path.join(dev_dir, 'parents.txt'), 'w') as devparents, \ open(os.path.join(test_dir, 'parents.txt'), 'w') as testparents: for sent, split, p in zip(sents, splits, parents): if split == 1: train.write(sent) train.write('\n') trainparents.write(p) trainparents.write('\n') elif split == 2: test.write(sent) test.write('\n') testparents.write(p) testparents.write('\n') else: dev.write(sent) dev.write('\n') devparents.write(p) devparents.write('\n') def get_labels(tree, dictionary): size = tree.size() spans, labels = [], [] for i in xrange(size): labels.append(None) spans.append(None) tree.get_labels(spans, labels, dictionary) return spans, labels def write_labels(dirpath, dictionary): print('Writing labels for trees in ' + dirpath) with open(os.path.join(dirpath, 'labels.txt'), 'w') as labels, \ open(os.path.join(dirpath, 'dlabels.txt'), 'w') as dlabels: # load constituency and dependency trees const_trees, dep_trees, toks = load_trees(dirpath) # write span labels for i in xrange(len(const_trees)): const_trees[i].set_spans() dep_trees[i].set_spans(toks[i]) # const tree labels s, l = [], [] for j in xrange(const_trees[i].size()): s.append(None) l.append(None) const_trees[i].get_labels(s, l, dictionary) labels.write(' '.join(map(str, l)) + '\n') # dep tree labels dep_trees[i].span = const_trees[i].span s, l = [], [] for j in xrange(len(toks[i])): s.append(None) l.append('#') dep_trees[i].get_labels(s, l, dictionary) dlabels.write(' '.join(map(str, l)) + '\n') def dependency_parse(filepath, cp='', tokenize=True): print('\nDependency parsing ' + filepath) dirpath = os.path.dirname(filepath) filepre = os.path.splitext(os.path.basename(filepath))[0] tokpath = os.path.join(dirpath, filepre + '.toks') parentpath = os.path.join(dirpath, 'dparents.txt') relpath = os.path.join(dirpath, 'rels.txt') tokenize_flag = '-tokenize - ' if tokenize else '' cmd = ('java -cp %s DependencyParse -tokpath %s -parentpath %s -relpath %s %s < %s' % (cp, tokpath, parentpath, relpath, tokenize_flag, filepath)) os.system(cmd) if __name__ == '__main__': print('=' * 80) print('Preprocessing Stanford Sentiment Treebank') print('=' * 80) base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) data_dir = os.path.join(base_dir, 'data') lib_dir = os.path.join(base_dir, 'lib') sst_dir = os.path.join(data_dir, 'sst') train_dir = os.path.join(sst_dir, 'train') dev_dir = os.path.join(sst_dir, 'dev') test_dir = os.path.join(sst_dir, 'test') make_dirs([train_dir, dev_dir, test_dir]) # produce train/dev/test splits split(sst_dir, train_dir, dev_dir, test_dir) sent_paths = glob.glob(os.path.join(sst_dir, '*/sents.txt')) # produce dependency parses classpath = ':'.join([ lib_dir, os.path.join(lib_dir, 'stanford-parser/stanford-parser.jar'), os.path.join(lib_dir, 'stanford-parser/stanford-parser-3.5.1-models.jar')]) for filepath in sent_paths: dependency_parse(filepath, cp=classpath, tokenize=False) # get vocabulary build_vocab(sent_paths, os.path.join(sst_dir, 'vocab.txt')) build_vocab(sent_paths, os.path.join(sst_dir, 'vocab-cased.txt'), lowercase=False) # write sentiment labels for nodes in trees dictionary = load_dictionary(sst_dir) write_labels(train_dir, dictionary) write_labels(dev_dir, dictionary) write_labels(test_dir, dictionary)
11,555
3,627
import warnings from collections import namedtuple import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor import torch.utils.model_zoo as model_zoo from typing import Optional, Tuple, List, Callable, Any from modules.layers import * __all__ = ['GoogLeNet', 'googlenet', "GoogLeNetOutputs", "_GoogLeNetOutputs"] model_urls = { # GoogLeNet ported from TensorFlow 'googlenet': 'https://download.pytorch.org/models/googlenet-1378be20.pth', } GoogLeNetOutputs = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1']) GoogLeNetOutputs.__annotations__ = {'logits': Tensor, 'aux_logits2': Optional[Tensor], 'aux_logits1': Optional[Tensor]} # Script annotations failed with _GoogleNetOutputs = namedtuple ... # _GoogLeNetOutputs set here for backwards compat _GoogLeNetOutputs = GoogLeNetOutputs def googlenet(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> "GoogLeNet": r"""GoogLeNet (Inception v1) model architecture from `"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr aux_logits (bool): If True, adds two auxiliary branches that can improve training. Default: *False* when pretrained is True otherwise *True* transform_input (bool): If True, preprocesses the input according to the method with which it was trained on ImageNet. Default: *False* """ if pretrained: if 'transform_input' not in kwargs: kwargs['transform_input'] = True if 'aux_logits' not in kwargs: kwargs['aux_logits'] = False if kwargs['aux_logits']: warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, ' 'so make sure to train them') original_aux_logits = kwargs['aux_logits'] kwargs['aux_logits'] = True kwargs['init_weights'] = False model = GoogLeNet(**kwargs) model.load_state_dict(model_zoo.load_url(model_urls['googlenet'])) return model return GoogLeNet(**kwargs) class GoogLeNet(nn.Module): __constants__ = ['aux_logits', 'transform_input'] def __init__( self, num_classes: int = 1000, aux_logits: bool = True, transform_input: bool = False, init_weights: Optional[bool] = None, blocks: Optional[List[Callable[..., nn.Module]]] = None ) -> None: super(GoogLeNet, self).__init__() if blocks is None: blocks = [BasicConv2d, Inception, InceptionAux] if init_weights is None: warnings.warn('The default weight initialization of GoogleNet will be changed in future releases of ' 'torchvision. If you wish to keep the old behavior (which leads to long initialization times' ' due to scipy/scipy#11299), please set init_weights=True.', FutureWarning) init_weights = True assert len(blocks) == 3 conv_block = blocks[0] inception_block = blocks[1] inception_aux_block = blocks[2] self.aux_logits = aux_logits self.transform_input = transform_input self.conv1 = conv_block(3, 64, kernel_size=7, stride=2, padding=3) self.maxpool1 = MaxPool2d(3, stride=2, ceil_mode=True) self.conv2 = conv_block(64, 64, kernel_size=1) self.conv3 = conv_block(64, 192, kernel_size=3, padding=1) self.maxpool2 = MaxPool2d(3, stride=2, ceil_mode=True) self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32) self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64) self.maxpool3 = MaxPool2d(3, stride=2, ceil_mode=True) self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64) self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64) self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64) self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64) self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128) self.maxpool4 = MaxPool2d(2, stride=2, ceil_mode=True) self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128) self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128) if aux_logits: self.aux1 = inception_aux_block(512, num_classes) self.aux2 = inception_aux_block(528, num_classes) else: self.aux1 = None # type: ignore[assignment] self.aux2 = None # type: ignore[assignment] self.avgpool = AdaptiveAvgPool2d((1, 1)) self.dropout = Dropout(0.2) self.fc = Linear(1024, num_classes) self.gradients = dict() self.activations = dict() def forward_hook(module, input, output): self.activations['value'] = output return None def backward_hook(module,input,output): self.gradients['value'] = output[0] self.inception3b.register_forward_hook(forward_hook) self.inception3b.register_backward_hook(backward_hook) def forward(self, x): # N x 3 x 224 x 224 x = self.conv1(x) # N x 64 x 112 x 112 x = self.maxpool1(x) # N x 64 x 56 x 56 x = self.conv2(x) # N x 64 x 56 x 56 x = self.conv3(x) # N x 192 x 56 x 56 x = self.maxpool2(x) # N x 192 x 28 x 28 x = self.inception3a(x) # N x 256 x 28 x 28 x = self.inception3b(x) # N x 480 x 28 x 28 x = self.maxpool3(x) # N x 480 x 14 x 14 x = self.inception4a(x) # N x 512 x 14 x 14 x = self.inception4b(x) # N x 512 x 14 x 14 x = self.inception4c(x) # N x 512 x 14 x 14 x = self.inception4d(x) # N x 528 x 14 x 14 x = self.inception4e(x) # N x 832 x 14 x 14 x = self.maxpool4(x) # N x 832 x 7 x 7 x = self.inception5a(x) # N x 832 x 7 x 7 x = self.inception5b(x) # N x 1024 x 7 x 7 x = self.avgpool(x) # N x 1024 x 1 x 1 x = torch.flatten(x, 1) # N x 1024 x = self.dropout(x) x = self.fc(x) # N x 1000 (num_classes) # R = self.CLRP(x) # # logit = x[:, x.max(1)[-1]].sum() # logit.backward() # R = self.fc.relprop(R) # R = self.dropout.relprop(R) # R = R.reshape_as(self.avgpool.Y) # R = self.avgpool.relprop(R) # R = self.inception5b.relprop(R) # R = self.inception5a.relprop(R) # R = self.maxpool4.relprop(R) # R = self.inception4e.relprop(R) # R = self.inception4d.relprop(R) # R = self.inception4c.relprop(R) # R = self.inception4b.relprop(R) # R = self.inception4a.relprop(R) # R = self.maxpool3.relprop(R) # R = self.inception3b.relprop(R) # R = self.inception3a.relprop(R) # # r_weight = torch.mean(R,dim=(2,3),keepdim=True) # r_cam = t*r_weight # r_cam = torch.sum(r_cam,dim=(0,1)) # # a = self.activations['value'] # g = self.gradients['value'] # g_ = torch.mean(g,dim=(2,3),keepdim=True) # grad_cam = a * g_ # grad_cam = torch.sum(grad_cam,dim=(0,1)) # # g_2 = g ** 2 # g_3 = g ** 3 # alpha_numer = g_2 # alpha_denom = 2 * g_2 + torch.sum(a * g_3, dim=(0, 1), keepdim=True) # + 1e-2 # # alpha = alpha_numer / alpha_denom # # w = torch.sum(alpha * torch.clamp(g, min =0), dim=(0, 1), keepdim=True) # # grad_cam_pp = torch.clamp(w * a, min=0) # grad_cam_pp = torch.sum(grad_cam_pp, dim=-1) return x def CLRP(self,x): maxindex = torch.argmax(x) R = torch.ones(x.shape).cuda() R /= -1000 R[:, maxindex] = 1 return R def relprop(self,R): R = self.fc.relprop(R) R = self.dropout.relprop(R) R = R.reshape_as(self.avgpool.Y) R = self.avgpool.relprop(R) R = self.inception5b.relprop(R) R = self.inception5a.relprop(R) R = self.maxpool4.relprop(R) R = self.inception4e.relprop(R) R = self.inception4d.relprop(R) R = self.inception4c.relprop(R) R = self.inception4b.relprop(R) R = self.inception4a.relprop(R) # R = self.maxpool3.relprop(R) # R = self.inception3b.relprop(R) # R = self.inception3a.relprop(R) # R = self.maxpool2.relprop(R) # R = self.conv3.relprop(R) # R = self.conv2.relprop(R) # R = self.maxpool1.relprop(R) # R = self.conv1.relprop(R) return R class InceptionAux(nn.Module): def __init__( self, in_channels: int, num_classes: int, conv_block: Optional[Callable[..., nn.Module]] = None ) -> None: super(InceptionAux, self).__init__() if conv_block is None: conv_block = BasicConv2d self.conv = conv_block(in_channels, 128, kernel_size=1) self.fc1 = nn.Linear(2048, 1024) self.fc2 = nn.Linear(1024, num_classes) def forward(self, x: Tensor) -> Tensor: # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14 x = F.adaptive_avg_pool2d(x, (4, 4)) # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4 x = self.conv(x) # N x 128 x 4 x 4 x = torch.flatten(x, 1) # N x 2048 x = F.relu(self.fc1(x), inplace=True) # N x 1024 x = F.dropout(x, 0.7, training=self.training) # N x 1024 x = self.fc2(x) # N x 1000 (num_classes) return x class Inception(nn.Module): def __init__( self, in_channels: int, ch1x1: int, ch3x3red: int, ch3x3: int, ch5x5red: int, ch5x5: int, pool_proj: int, conv_block: Optional[Callable[..., nn.Module]] = None ) -> None: super(Inception, self).__init__() if conv_block is None: conv_block = BasicConv2d self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1) self.channel1 = ch1x1 self.branch2 = Sequential( conv_block(in_channels, ch3x3red, kernel_size=1), conv_block(ch3x3red, ch3x3, kernel_size=3, padding=1) ) self.channel2 = ch3x3 self.branch3 = Sequential( conv_block(in_channels, ch5x5red, kernel_size=1), # Here, kernel_size=3 instead of kernel_size=5 is a known bug. # Please see https://github.com/pytorch/vision/issues/906 for details. conv_block(ch5x5red, ch5x5, kernel_size=3, padding=1) ) self.channel3 = ch5x5 self.branch4 = Sequential( MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True), conv_block(in_channels, pool_proj, kernel_size=1) ) self.channel4 = pool_proj def _forward(self, x: Tensor) -> List[Tensor]: branch1 = self.branch1(x) branch2 = self.branch2(x) branch3 = self.branch3(x) branch4 = self.branch4(x) outputs = [branch1, branch2, branch3, branch4] return outputs def forward(self, x: Tensor) -> Tensor: outputs = self._forward(x) return torch.cat(outputs, 1) def relprop(self,R): R1 = R[:,:self.channel1] R2 = R[:, self.channel1:self.channel1+self.channel2] R3 = R[:, self.channel1+self.channel2:self.channel1+self.channel2+self.channel3] R4 = R[:, self.channel1+self.channel2+self.channel3:] R1 = self.branch1.relprop(R1) R2 = self.branch2.relprop(R2) R3 = self.branch3.relprop(R3) R4 = self.branch4.relprop(R4) R = R1+R2+R3+R4 return R class BasicConv2d(nn.Module): def __init__( self, in_channels: int, out_channels: int, **kwargs: Any ) -> None: super(BasicConv2d, self).__init__() self.conv = Conv2d(in_channels, out_channels, bias=False, **kwargs) self.bn = BatchNorm2d(out_channels, eps=0.001) def forward(self, x: Tensor) -> Tensor: x = self.conv(x) x = self.bn(x) return F.relu(x, inplace=True) def relprop(self,R): R = self.bn.relprop(R) R = self.conv.relprop(R) return R
12,703
4,956
q = int(raw_input().strip()) for a0 in xrange(q): s=raw_input().strip() # if s.startswith('0'): # print "No" # print s.find('1') # print s.rfind(s,a0,a0-1) # posof1 = s.find('1') digits = [str(x) for x in str(s)] print digits for digit in len(digits): if digits[digit]-digits[digit-1] == 1: print "yes"
375
148
import requests from ..exceptions import ClientError class PGDBClient(object): """ Simple client for interacting with ISCAN servers. """ def __init__(self, host, token=None, corpus_name=None): self.host = host self.token = token if self.host.endswith('/'): self.host = self.host[:-1] self.corpus_name = corpus_name self.query_behavior = 'speaker' def login(self, user_name, password): """ Get an authentication token from the ISCAN server using the specified credentials Parameters ---------- user_name : str User name password : str Password Returns ------- str Authentication token to use in future requests """ end_point = '/'.join([self.host, 'api', 'rest-auth', 'login', '']) resp = requests.post(end_point, {'username': user_name, 'password': password}) token = resp.json()['key'] self.token = token return token def create_database(self, database_name): """ Create a new database with the specified name Parameters ---------- database_name : str Name of the database to be created Returns ------- dict Database information """ databases = self.list_databases() for d in databases: if d['name'] == database_name: raise ClientError('Could not create database, already exists.') end_point = '/'.join([self.host, 'api', 'databases', '']) data = {'name': database_name} resp = requests.post(end_point, data=data, headers={'Authorization': 'Token {}'.format(self.token)}) if resp.status_code not in [200, 201, 202]: raise ClientError('Could not create database: {}'.format(resp.text)) return resp.json() def delete_database(self, database_name): """ Delete a database and all associated content Parameters ---------- database_name : str Name of database to be deleted """ databases = self.list_databases() for d in databases: if d['name'] == database_name: database_id = d['id'] break else: raise ClientError('Could not delete database, does not exist.') end_point = '/'.join([self.host, 'api', 'databases', str(database_id), '']) resp = requests.delete(end_point, headers={'Authorization': 'Token {}'.format(self.token)}) if resp.status_code != 204: raise ClientError('Could not delete database.') def database_status(self, database_name=None): """ Get the current status of a specified database, or all databases on the server. Parameters ---------- database_name : str Name of database to get status of, if not specified, will get status of all databases Returns ------- dict Database status JSON """ if database_name is not None: databases = self.list_databases() for d in databases: if d['name'] == database_name: database_id = d['id'] break else: raise ClientError('Could not find database, does not exist.') end_point = '/'.join([self.host, 'api', 'databases', str(database_id), '']) resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)}) return resp.json() else: end_point = '/'.join([self.host, 'api', 'databases', '']) resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)}) return resp.json() def get_directory(self, database_name): """ Get the directory of a local database Parameters ---------- database_name : str Name of database Returns ------- str Database data directory """ databases = self.list_databases() for d in databases: if d['name'] == database_name: database_id = d['id'] break else: raise ClientError('Could not find database, does not exist.') end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'data_directory', '']) resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)}) return resp.json() def get_ports(self, database_name): """ Get the ports of a locally running database Parameters ---------- database_name : str Name of database Returns ------- dict Ports of the database """ databases = self.list_databases() for d in databases: if d['name'] == database_name: database_id = d['id'] break else: raise ClientError('Could not find database, does not exist.') end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'ports', '']) resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)}) return resp.json() def list_databases(self): """ Get a list of all databases Returns ------- list Database information """ end_point = '/'.join([self.host, 'api', 'databases', '']) resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)}) if resp.status_code != 200: raise ClientError('Encountered error getting list of databases: {}'.format(resp.json())) return resp.json() def list_corpora(self, database_name=None): """ Get a list of all corpora Parameters ---------- database_name : str Name of the database to restrict corpora list to, optional Returns ------- list Corpora information """ if database_name is not None: databases = self.list_databases() for d in databases: if d['name'] == database_name: database_id = d['id'] break else: raise ClientError('Could not find database, does not exist.') end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'corpora', '']) else: end_point = '/'.join([self.host, 'api', 'corpora', '']) resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)}) return resp.json() def start_database(self, database_name): """ Start a database Parameters ---------- database_name : str Database to start """ databases = self.list_databases() for d in databases: if d['name'] == database_name: database_id = d['id'] break else: raise ClientError('Could not find database, does not exist.') end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'start', '']) resp = requests.post(end_point, data={}, headers={'Authorization': 'Token {}'.format(self.token)}) if resp.status_code not in [200, 201, 202]: raise ClientError('Could not start database: {}'.format(resp.text)) def stop_database(self, database_name): """ Stop a database Parameters ---------- database_name : str Database to stop """ databases = self.list_databases() for d in databases: if d['name'] == database_name: database_id = d['id'] break else: raise ClientError('Could not find database, does not exist.') end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'stop', '']) resp = requests.post(end_point, data={}, headers={'Authorization': 'Token {}'.format(self.token)}) if resp.status_code not in [200, 201, 202]: raise ClientError('Could not stop database: {}'.format(resp.text))
8,457
2,227
import numpy as np import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation import matplotlib.animation as animation from matplotlib import rcParams import matplotlib.patches as patches rcParams['font.family'] = 'Times New Roman' rcParams['font.size'] = 20 rcParams['axes.edgecolor'] = (0.0, 0.0, 0.0) rcParams['axes.linewidth'] = 2 hfont = {'fontname': 'Times New Roman'} folderpath = "./testdata/" def format_e(n): a = '%E' % n return a.split('E')[0].rstrip('0').rstrip('.') + '.0E' + a.split('E')[1] def loadData(path, logscale=True, min=1e-16): data = np.array(np.loadtxt(path, delimiter=',', unpack=True)) data[data < min] = min data = np.log10(data) return data def loadlabel(path): data = np.array(np.loadtxt(path, delimiter=' ', unpack=True)) return data def init(ax,xlabel_list,y_min,y_max,num_x,x_labels_num = 16,y_labels_num=18): ax.set_xlim(0, num_x) print(y_min,y_max) ax.set_ylim(y_min, y_max) ax.set_xlabel("Perturbation size",**hfont) ax.set_ylabel("Relative error ", **hfont) y_labels_tuples = () ax.yaxis.set_major_locator(plt.MaxNLocator(y_labels_num)) x_labels_tuples = () ax.xaxis.set_major_locator(plt.MaxNLocator(x_labels_num+1)) for i in range(0,y_labels_num): y_value = i/(y_labels_num-1)*(y_max-y_min)+y_min y_value = format_e(10**int(y_value)) y_labels_tuples = y_labels_tuples+(y_value,) ax.set_yticklabels(y_labels_tuples,size = 10) for i in range(0,x_labels_num): index = int(i/(x_labels_num-1)*(num_x-1)) x_labels_tuples = x_labels_tuples + (format_e(xlabel_list[index]),) ax.set_xticklabels(x_labels_tuples,size = 15) plt.xticks(rotation=45) return def plotData(data, labels,names): num_lines = data.shape[0] num_x = data.shape[1] y_max = np.max(data) y_min = np.min(data) fig, ax = plt.subplots() init(ax,labels,y_min,y_max,num_x,x_labels_num=16,y_labels_num=18) fig.set_figheight(8) fig.set_figwidth(8) plt.grid(True) ydata = np.arange(num_x) for i in range(num_lines): print(ydata.tolist()) print(data[i,:].tolist()) plt.plot(ydata.tolist(),data[i,:].tolist(),'-', animated=False,antialiased=True,markersize=5,color = '#FF5C5C',label = names[i],linewidth = 6) #ln, = plt.plot(ydata.tolist(),data[0,:].tolist(),'-', animated=True,antialiased=True,markersize=5,color = '#FF5C5C',label = "te",linewidth = 6) plt.subplots_adjust(bottom=0.22) plt.show() return if __name__ == "__main__": data = loadData(folderpath+"ttmath_error.csv") labels = loadlabel(folderpath+"h_list.csv") plotData(data, labels,["ttmath","FD","CD","CFD"])
2,745
1,122
# Copyright (C) 2015-2021 Virgil Security, Inc. # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # (1) Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # (2) Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # (3) Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com> from virgil_crypto_lib._libs import * from ctypes import * from ._vscf_impl import vscf_impl_t from ._vscf_error import vscf_error_t from ._vscf_raw_public_key import vscf_raw_public_key_t from ._vscf_raw_private_key import vscf_raw_private_key_t from virgil_crypto_lib.common._c_bridge import vsc_data_t from virgil_crypto_lib.common._c_bridge import vsc_buffer_t class vscf_rsa_t(Structure): pass class VscfRsa(object): """RSA implementation.""" # Defines whether a public key can be imported or not. CAN_IMPORT_PUBLIC_KEY = True # Define whether a public key can be exported or not. CAN_EXPORT_PUBLIC_KEY = True # Define whether a private key can be imported or not. CAN_IMPORT_PRIVATE_KEY = True # Define whether a private key can be exported or not. CAN_EXPORT_PRIVATE_KEY = True def __init__(self): """Create underlying C context.""" self._ll = LowLevelLibs() self._lib = self._ll.foundation def vscf_rsa_new(self): vscf_rsa_new = self._lib.vscf_rsa_new vscf_rsa_new.argtypes = [] vscf_rsa_new.restype = POINTER(vscf_rsa_t) return vscf_rsa_new() def vscf_rsa_delete(self, ctx): vscf_rsa_delete = self._lib.vscf_rsa_delete vscf_rsa_delete.argtypes = [POINTER(vscf_rsa_t)] vscf_rsa_delete.restype = None return vscf_rsa_delete(ctx) def vscf_rsa_use_random(self, ctx, random): vscf_rsa_use_random = self._lib.vscf_rsa_use_random vscf_rsa_use_random.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t)] vscf_rsa_use_random.restype = None return vscf_rsa_use_random(ctx, random) def vscf_rsa_generate_ephemeral_key(self, ctx, key, error): """Generate ephemeral private key of the same type. Note, this operation might be slow.""" vscf_rsa_generate_ephemeral_key = self._lib.vscf_rsa_generate_ephemeral_key vscf_rsa_generate_ephemeral_key.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), POINTER(vscf_error_t)] vscf_rsa_generate_ephemeral_key.restype = POINTER(vscf_impl_t) return vscf_rsa_generate_ephemeral_key(ctx, key, error) def vscf_rsa_import_public_key(self, ctx, raw_key, error): """Import public key from the raw binary format. Return public key that is adopted and optimized to be used with this particular algorithm. Binary format must be defined in the key specification. For instance, RSA public key must be imported from the format defined in RFC 3447 Appendix A.1.1.""" vscf_rsa_import_public_key = self._lib.vscf_rsa_import_public_key vscf_rsa_import_public_key.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_raw_public_key_t), POINTER(vscf_error_t)] vscf_rsa_import_public_key.restype = POINTER(vscf_impl_t) return vscf_rsa_import_public_key(ctx, raw_key, error) def vscf_rsa_export_public_key(self, ctx, public_key, error): """Export public key to the raw binary format. Binary format must be defined in the key specification. For instance, RSA public key must be exported in format defined in RFC 3447 Appendix A.1.1.""" vscf_rsa_export_public_key = self._lib.vscf_rsa_export_public_key vscf_rsa_export_public_key.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), POINTER(vscf_error_t)] vscf_rsa_export_public_key.restype = POINTER(vscf_raw_public_key_t) return vscf_rsa_export_public_key(ctx, public_key, error) def vscf_rsa_import_private_key(self, ctx, raw_key, error): """Import private key from the raw binary format. Return private key that is adopted and optimized to be used with this particular algorithm. Binary format must be defined in the key specification. For instance, RSA private key must be imported from the format defined in RFC 3447 Appendix A.1.2.""" vscf_rsa_import_private_key = self._lib.vscf_rsa_import_private_key vscf_rsa_import_private_key.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_raw_private_key_t), POINTER(vscf_error_t)] vscf_rsa_import_private_key.restype = POINTER(vscf_impl_t) return vscf_rsa_import_private_key(ctx, raw_key, error) def vscf_rsa_export_private_key(self, ctx, private_key, error): """Export private key in the raw binary format. Binary format must be defined in the key specification. For instance, RSA private key must be exported in format defined in RFC 3447 Appendix A.1.2.""" vscf_rsa_export_private_key = self._lib.vscf_rsa_export_private_key vscf_rsa_export_private_key.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), POINTER(vscf_error_t)] vscf_rsa_export_private_key.restype = POINTER(vscf_raw_private_key_t) return vscf_rsa_export_private_key(ctx, private_key, error) def vscf_rsa_can_encrypt(self, ctx, public_key, data_len): """Check if algorithm can encrypt data with a given key.""" vscf_rsa_can_encrypt = self._lib.vscf_rsa_can_encrypt vscf_rsa_can_encrypt.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_size_t] vscf_rsa_can_encrypt.restype = c_bool return vscf_rsa_can_encrypt(ctx, public_key, data_len) def vscf_rsa_encrypted_len(self, ctx, public_key, data_len): """Calculate required buffer length to hold the encrypted data.""" vscf_rsa_encrypted_len = self._lib.vscf_rsa_encrypted_len vscf_rsa_encrypted_len.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_size_t] vscf_rsa_encrypted_len.restype = c_size_t return vscf_rsa_encrypted_len(ctx, public_key, data_len) def vscf_rsa_encrypt(self, ctx, public_key, data, out): """Encrypt data with a given public key.""" vscf_rsa_encrypt = self._lib.vscf_rsa_encrypt vscf_rsa_encrypt.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), vsc_data_t, POINTER(vsc_buffer_t)] vscf_rsa_encrypt.restype = c_int return vscf_rsa_encrypt(ctx, public_key, data, out) def vscf_rsa_can_decrypt(self, ctx, private_key, data_len): """Check if algorithm can decrypt data with a given key. However, success result of decryption is not guaranteed.""" vscf_rsa_can_decrypt = self._lib.vscf_rsa_can_decrypt vscf_rsa_can_decrypt.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_size_t] vscf_rsa_can_decrypt.restype = c_bool return vscf_rsa_can_decrypt(ctx, private_key, data_len) def vscf_rsa_decrypted_len(self, ctx, private_key, data_len): """Calculate required buffer length to hold the decrypted data.""" vscf_rsa_decrypted_len = self._lib.vscf_rsa_decrypted_len vscf_rsa_decrypted_len.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_size_t] vscf_rsa_decrypted_len.restype = c_size_t return vscf_rsa_decrypted_len(ctx, private_key, data_len) def vscf_rsa_decrypt(self, ctx, private_key, data, out): """Decrypt given data.""" vscf_rsa_decrypt = self._lib.vscf_rsa_decrypt vscf_rsa_decrypt.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), vsc_data_t, POINTER(vsc_buffer_t)] vscf_rsa_decrypt.restype = c_int return vscf_rsa_decrypt(ctx, private_key, data, out) def vscf_rsa_can_sign(self, ctx, private_key): """Check if algorithm can sign data digest with a given key.""" vscf_rsa_can_sign = self._lib.vscf_rsa_can_sign vscf_rsa_can_sign.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t)] vscf_rsa_can_sign.restype = c_bool return vscf_rsa_can_sign(ctx, private_key) def vscf_rsa_signature_len(self, ctx, private_key): """Return length in bytes required to hold signature. Return zero if a given private key can not produce signatures.""" vscf_rsa_signature_len = self._lib.vscf_rsa_signature_len vscf_rsa_signature_len.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t)] vscf_rsa_signature_len.restype = c_size_t return vscf_rsa_signature_len(ctx, private_key) def vscf_rsa_sign_hash(self, ctx, private_key, hash_id, digest, signature): """Sign data digest with a given private key.""" vscf_rsa_sign_hash = self._lib.vscf_rsa_sign_hash vscf_rsa_sign_hash.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_int, vsc_data_t, POINTER(vsc_buffer_t)] vscf_rsa_sign_hash.restype = c_int return vscf_rsa_sign_hash(ctx, private_key, hash_id, digest, signature) def vscf_rsa_can_verify(self, ctx, public_key): """Check if algorithm can verify data digest with a given key.""" vscf_rsa_can_verify = self._lib.vscf_rsa_can_verify vscf_rsa_can_verify.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t)] vscf_rsa_can_verify.restype = c_bool return vscf_rsa_can_verify(ctx, public_key) def vscf_rsa_verify_hash(self, ctx, public_key, hash_id, digest, signature): """Verify data digest with a given public key and signature.""" vscf_rsa_verify_hash = self._lib.vscf_rsa_verify_hash vscf_rsa_verify_hash.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_int, vsc_data_t, vsc_data_t] vscf_rsa_verify_hash.restype = c_bool return vscf_rsa_verify_hash(ctx, public_key, hash_id, digest, signature) def vscf_rsa_setup_defaults(self, ctx): """Setup predefined values to the uninitialized class dependencies.""" vscf_rsa_setup_defaults = self._lib.vscf_rsa_setup_defaults vscf_rsa_setup_defaults.argtypes = [POINTER(vscf_rsa_t)] vscf_rsa_setup_defaults.restype = c_int return vscf_rsa_setup_defaults(ctx) def vscf_rsa_generate_key(self, ctx, bitlen, error): """Generate new private key. Note, this operation might be slow.""" vscf_rsa_generate_key = self._lib.vscf_rsa_generate_key vscf_rsa_generate_key.argtypes = [POINTER(vscf_rsa_t), c_size_t, POINTER(vscf_error_t)] vscf_rsa_generate_key.restype = POINTER(vscf_impl_t) return vscf_rsa_generate_key(ctx, bitlen, error) def vscf_rsa_shallow_copy(self, ctx): vscf_rsa_shallow_copy = self._lib.vscf_rsa_shallow_copy vscf_rsa_shallow_copy.argtypes = [POINTER(vscf_rsa_t)] vscf_rsa_shallow_copy.restype = POINTER(vscf_rsa_t) return vscf_rsa_shallow_copy(ctx) def vscf_rsa_impl(self, ctx): vscf_rsa_impl = self._lib.vscf_rsa_impl vscf_rsa_impl.argtypes = [POINTER(vscf_rsa_t)] vscf_rsa_impl.restype = POINTER(vscf_impl_t) return vscf_rsa_impl(ctx)
12,327
4,778
# coding: utf-8 """ Thingsboard REST API For instructions how to authorize requests please visit <a href='http://thingsboard.io/docs/reference/rest-api/'>REST API documentation page</a>. OpenAPI spec version: 2.0 Contact: info@thingsboard.io Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..api_client import ApiClient class UserControllerApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def delete_user_using_delete(self, user_id, **kwargs): """ deleteUser This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_user_using_delete(user_id, async=True) >>> result = thread.get() :param async bool :param str user_id: userId (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.delete_user_using_delete_with_http_info(user_id, **kwargs) else: (data) = self.delete_user_using_delete_with_http_info(user_id, **kwargs) return data def delete_user_using_delete_with_http_info(self, user_id, **kwargs): """ deleteUser This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_user_using_delete_with_http_info(user_id, async=True) >>> result = thread.get() :param async bool :param str user_id: userId (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['user_id'] all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_user_using_delete" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'user_id' is set if ('user_id' not in params) or (params['user_id'] is None): raise ValueError("Missing the required parameter `user_id` when calling `delete_user_using_delete`") collection_formats = {} path_params = {} if 'user_id' in params: path_params['userId'] = params['user_id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['*/*']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['X-Authorization'] return self.api_client.call_api('/api/user/{userId}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_activation_link_using_get(self, user_id, **kwargs): """ getActivationLink This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_activation_link_using_get(user_id, async=True) >>> result = thread.get() :param async bool :param str user_id: userId (required) :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_activation_link_using_get_with_http_info(user_id, **kwargs) else: (data) = self.get_activation_link_using_get_with_http_info(user_id, **kwargs) return data def get_activation_link_using_get_with_http_info(self, user_id, **kwargs): """ getActivationLink This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_activation_link_using_get_with_http_info(user_id, async=True) >>> result = thread.get() :param async bool :param str user_id: userId (required) :return: str If the method is called asynchronously, returns the request thread. """ all_params = ['user_id'] all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_activation_link_using_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'user_id' is set if ('user_id' not in params) or (params['user_id'] is None): raise ValueError("Missing the required parameter `user_id` when calling `get_activation_link_using_get`") collection_formats = {} path_params = {} if 'user_id' in params: path_params['userId'] = params['user_id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['text/plain']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['X-Authorization'] return self.api_client.call_api('/api/user/{userId}/activationLink', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='str', auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_customer_users_using_get(self, customer_id, limit, **kwargs): """ getCustomerUsers This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_customer_users_using_get(customer_id, limit, async=True) >>> result = thread.get() :param async bool :param str customer_id: customerId (required) :param str limit: limit (required) :param str text_search: textSearch :param str id_offset: idOffset :param str text_offset: textOffset :return: TextPageDataUser If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_customer_users_using_get_with_http_info(customer_id, limit, **kwargs) else: (data) = self.get_customer_users_using_get_with_http_info(customer_id, limit, **kwargs) return data def get_customer_users_using_get_with_http_info(self, customer_id, limit, **kwargs): """ getCustomerUsers This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_customer_users_using_get_with_http_info(customer_id, limit, async=True) >>> result = thread.get() :param async bool :param str customer_id: customerId (required) :param str limit: limit (required) :param str text_search: textSearch :param str id_offset: idOffset :param str text_offset: textOffset :return: TextPageDataUser If the method is called asynchronously, returns the request thread. """ all_params = ['customer_id', 'limit', 'text_search', 'id_offset', 'text_offset'] all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_customer_users_using_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'customer_id' is set if ('customer_id' not in params) or (params['customer_id'] is None): raise ValueError("Missing the required parameter `customer_id` when calling `get_customer_users_using_get`") # verify the required parameter 'limit' is set if ('limit' not in params) or (params['limit'] is None): raise ValueError("Missing the required parameter `limit` when calling `get_customer_users_using_get`") collection_formats = {} path_params = {} if 'customer_id' in params: path_params['customerId'] = params['customer_id'] query_params = [] if 'text_search' in params: query_params.append(('textSearch', params['text_search'])) if 'id_offset' in params: query_params.append(('idOffset', params['id_offset'])) if 'text_offset' in params: query_params.append(('textOffset', params['text_offset'])) if 'limit' in params: query_params.append(('limit', params['limit'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['*/*']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['X-Authorization'] return self.api_client.call_api('/api/customer/{customerId}/users', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TextPageDataUser', auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_tenant_admins_using_get(self, tenant_id, limit, **kwargs): """ getTenantAdmins This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_tenant_admins_using_get(tenant_id, limit, async=True) >>> result = thread.get() :param async bool :param str tenant_id: tenantId (required) :param str limit: limit (required) :param str text_search: textSearch :param str id_offset: idOffset :param str text_offset: textOffset :return: TextPageDataUser If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_tenant_admins_using_get_with_http_info(tenant_id, limit, **kwargs) else: (data) = self.get_tenant_admins_using_get_with_http_info(tenant_id, limit, **kwargs) return data def get_tenant_admins_using_get_with_http_info(self, tenant_id, limit, **kwargs): """ getTenantAdmins This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_tenant_admins_using_get_with_http_info(tenant_id, limit, async=True) >>> result = thread.get() :param async bool :param str tenant_id: tenantId (required) :param str limit: limit (required) :param str text_search: textSearch :param str id_offset: idOffset :param str text_offset: textOffset :return: TextPageDataUser If the method is called asynchronously, returns the request thread. """ all_params = ['tenant_id', 'limit', 'text_search', 'id_offset', 'text_offset'] all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_tenant_admins_using_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'tenant_id' is set if ('tenant_id' not in params) or (params['tenant_id'] is None): raise ValueError("Missing the required parameter `tenant_id` when calling `get_tenant_admins_using_get`") # verify the required parameter 'limit' is set if ('limit' not in params) or (params['limit'] is None): raise ValueError("Missing the required parameter `limit` when calling `get_tenant_admins_using_get`") collection_formats = {} path_params = {} if 'tenant_id' in params: path_params['tenantId'] = params['tenant_id'] query_params = [] if 'text_search' in params: query_params.append(('textSearch', params['text_search'])) if 'id_offset' in params: query_params.append(('idOffset', params['id_offset'])) if 'text_offset' in params: query_params.append(('textOffset', params['text_offset'])) if 'limit' in params: query_params.append(('limit', params['limit'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['*/*']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['X-Authorization'] return self.api_client.call_api('/api/tenant/{tenantId}/users', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TextPageDataUser', auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_user_by_id_using_get(self, user_id, **kwargs): """ getUserById This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_user_by_id_using_get(user_id, async=True) >>> result = thread.get() :param async bool :param str user_id: userId (required) :return: User If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_user_by_id_using_get_with_http_info(user_id, **kwargs) else: (data) = self.get_user_by_id_using_get_with_http_info(user_id, **kwargs) return data def get_user_by_id_using_get_with_http_info(self, user_id, **kwargs): """ getUserById This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_user_by_id_using_get_with_http_info(user_id, async=True) >>> result = thread.get() :param async bool :param str user_id: userId (required) :return: User If the method is called asynchronously, returns the request thread. """ all_params = ['user_id'] all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_user_by_id_using_get" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'user_id' is set if ('user_id' not in params) or (params['user_id'] is None): raise ValueError("Missing the required parameter `user_id` when calling `get_user_by_id_using_get`") collection_formats = {} path_params = {} if 'user_id' in params: path_params['userId'] = params['user_id'] query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['*/*']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['X-Authorization'] return self.api_client.call_api('/api/user/{userId}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='User', auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def save_user_using_post(self, user, **kwargs): """ saveUser This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.save_user_using_post(user, async=True) >>> result = thread.get() :param async bool :param User user: user (required) :param bool send_activation_mail: sendActivationMail :return: User If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.save_user_using_post_with_http_info(user, **kwargs) else: (data) = self.save_user_using_post_with_http_info(user, **kwargs) return data def save_user_using_post_with_http_info(self, user, **kwargs): """ saveUser This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.save_user_using_post_with_http_info(user, async=True) >>> result = thread.get() :param async bool :param User user: user (required) :param bool send_activation_mail: sendActivationMail :return: User If the method is called asynchronously, returns the request thread. """ all_params = ['user', 'send_activation_mail'] all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method save_user_using_post" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'user' is set if ('user' not in params) or (params['user'] is None): raise ValueError("Missing the required parameter `user` when calling `save_user_using_post`") collection_formats = {} path_params = {} query_params = [] if 'send_activation_mail' in params: query_params.append(('sendActivationMail', params['send_activation_mail'])) header_params = {} form_params = [] local_var_files = {} body_params = None if 'user' in params: body_params = params['user'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['*/*']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['X-Authorization'] return self.api_client.call_api('/api/user', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='User', auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def send_activation_email_using_post(self, email, **kwargs): """ sendActivationEmail This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.send_activation_email_using_post(email, async=True) >>> result = thread.get() :param async bool :param str email: email (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.send_activation_email_using_post_with_http_info(email, **kwargs) else: (data) = self.send_activation_email_using_post_with_http_info(email, **kwargs) return data def send_activation_email_using_post_with_http_info(self, email, **kwargs): """ sendActivationEmail This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.send_activation_email_using_post_with_http_info(email, async=True) >>> result = thread.get() :param async bool :param str email: email (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['email'] all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method send_activation_email_using_post" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'email' is set if ('email' not in params) or (params['email'] is None): raise ValueError("Missing the required parameter `email` when calling `send_activation_email_using_post`") collection_formats = {} path_params = {} query_params = [] if 'email' in params: query_params.append(('email', params['email'])) header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['*/*']) # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = ['X-Authorization'] return self.api_client.call_api('/api/user/sendActivationMail', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
30,394
7,788
import os import subprocess from pathlib import Path from PIL import Image import errno import time from re import search CONVERT_PNG_TO_JPG = False TOTAL_ORIGINAL = 0 TOTAL_COMPRESSED = 0 TOTAL_GAIN = 0 TOTAL_FILES = 0 QUALITY = 85 def compress(location): for r, d, f in os.walk(location): for item in d: compress(location + os.sep + item) for image in f: path = location input_path = path + os.sep + image out_path = path.replace(r'input', r'output') if image.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif', 'webp')): if os.path.isfile(input_path): global TOTAL_GAIN global TOTAL_ORIGINAL global TOTAL_COMPRESSED global TOTAL_FILES global QUALITY opt = None try: opt = Image.open(input_path) except: #do nothing just print the file skipping print(f'skipping file cannot open: {input_path}') continue original_size = os.stat(input_path).st_size / 1024 / 1024 TOTAL_ORIGINAL += original_size print(input_path) print("Original size: " + f'{original_size:,.2f}' + ' Megabytes') if not os.path.exists(out_path): try: os.makedirs(out_path, exist_ok=True) except OSError as e: #wait for race condition to settle time.sleep(1) # try to create the folder again os.makedirs(out_path, exist_ok=True) if e.errno != errno.EEXIST: raise out_file= out_path + os.sep + image # Convert .pgn to .jpg if CONVERT_PNG_TO_JPG and image.lower().endswith('.png'): im = opt rgb_im = im.convert('RGB') out_file = out_file.replace(".png", ".jpg") rgb_im.save(out_file) opt = Image.open(out_file) opt.save(out_file, optimize=True, quality=QUALITY) opt = Image.open(out_file) compressed_size = os.stat(out_file).st_size / 1024 / 1024 TOTAL_COMPRESSED += compressed_size gain = original_size - compressed_size TOTAL_GAIN += gain TOTAL_FILES +=1 print("Compressed size: " + f'{compressed_size:,.2f}' + " megabytes") print("Gain : " + f'{gain:,.2f}' + " megabytes") opt.close() else: if os.path.isdir(out_path) and not os.path.exists(out_path): try: os.makedirs(out_path, exist_ok=True) except OSError as e: #wait for race condition to settle time.sleep(1) # try to create the folder again os.makedirs(out_path, exist_ok=True) if e.errno != errno.EEXIST: raise if os.path.isfile(input_path): if not os.path.exists(out_path): try: os.makedirs(out_path, exist_ok=True) except OSError as e: #wait for race condition to settle time.sleep(1) # try to create the folder again os.makedirs(out_path, exist_ok=True) if e.errno != errno.EEXIST: raise input_file = input_path output_file= input_file.replace('input','output') print('File not image, copying instead: ' + input_path) subprocess.call('cp ' + input_file + ' ' + output_file, shell=True) if __name__ == '__main__': start_path = os.path.dirname(os.path.abspath(__file__)) + os.sep + r"input" # ask if .pgn images should automatically converted to .jpg CONVERT_PNG_TO_JPG = input('Would you like to convert .png images to .jpg? (y/n): ') == 'y' TOTAL_GAIN = 0 compress(start_path) print("---------------------------------------------------------------------------------------------") print('-------------------------------------------SUMMARY-------------------------------------------') print('Files: ' + f'{TOTAL_FILES}') print( "Original: " + f'{TOTAL_ORIGINAL:,.2f}' + " megabytes || " + "New Size: " + f'{TOTAL_COMPRESSED:,.2f}' + " megabytes" + " || Gain: " + f'{TOTAL_GAIN:,.2f}' + " megabytes ~" + f'{(TOTAL_GAIN / TOTAL_ORIGINAL) * 100:,.2f}' + "% reduction")
5,268
1,486
import os import typing from typing import Any, Callable, List, Tuple, Union import numpy as np from ..data_processor.readers import preprocess_image, read_image, restore_image from ..data_processor.visualizer import show_important_parts, visualize_image, save_image from ..common.paddle_utils import init_checkpoint, to_lodtensor from ._lime_base import LimeBase from .abc_interpreter import Interpreter class LIMECVInterpreter(Interpreter): """ LIME Interpreter for CV tasks. More details regarding the LIME method can be found in the original paper: https://arxiv.org/abs/1602.04938 """ def __init__(self, paddle_model: Callable, trained_model_path: str, model_input_shape=[3, 224, 224], use_cuda=True) -> None: """ Initialize the LIMECVInterpreter. Args: paddle_model (callable): A user-defined function that gives access to model predictions. It takes the following arguments: - data: Data inputs. and outputs predictions. See the example at the end of ``interpret()``. trained_model_path (str): The pretrained model directory. model_input_shape (list, optional): The input shape of the model. Default: [3, 224, 224] use_cuda (bool, optional): Whether or not to use cuda. Default: True """ Interpreter.__init__(self) self.paddle_model = paddle_model self.trained_model_path = trained_model_path self.model_input_shape = model_input_shape self.use_cuda = use_cuda self.paddle_prepared = False # use the default LIME setting self.lime_base = LimeBase() self.lime_intermediate_results = {} def interpret(self, data, interpret_class=None, num_samples=1000, batch_size=50, visual=True, save_path=None): """ Main function of the interpreter. Args: data (str): The input file path. interpret_class (int, optional): The index of class to interpret. If None, the most likely label will be used. Default: None num_samples (int, optional): LIME sampling numbers. Larger number of samples usually gives more accurate interpretation. Default: 1000 batch_size (int, optional): Number of samples to forward each time. Default: 50 visual (bool, optional): Whether or not to visualize the processed image. Default: True save_path (str, optional): The path to save the processed image. If None, the image will not be saved. Default: None :return: LIME Prior weights: {interpret_label_i: weights on features} :rtype: dict Example:: import interpretdl as it def paddle_model(data): import paddle.fluid as fluid class_num = 1000 model = ResNet50() logits = model.net(input=image_input, class_dim=class_num) probs = fluid.layers.softmax(logits, axis=-1) return probs lime = it.LIMECVInterpreter(paddle_model, "assets/ResNet50_pretrained") lime_weights = lime.interpret( 'assets/catdog.png', num_samples=1000, batch_size=100, save_path='assets/catdog_lime.png') """ if isinstance(data, str): data_instance = read_image( data, crop_size=self.model_input_shape[1]) else: if len(data.shape) == 3: data = np.expand_dims(data, axis=0) if np.issubdtype(data.dtype, np.integer): data_instance = data else: data_instance = restore_image(data.copy()) self.input_type = type(data_instance) self.data_type = np.array(data_instance).dtype if not self.paddle_prepared: self._paddle_prepare() # only one example here probability = self.predict_fn(data_instance)[0] # only interpret top 1 if interpret_class is None: pred_label = np.argsort(probability) interpret_class = pred_label[-1:] interpret_class = np.array(interpret_class) lime_weights, r2_scores = self.lime_base.interpret_instance( data_instance[0], self.predict_fn, interpret_class, num_samples=num_samples, batch_size=batch_size) interpretation = show_important_parts( data_instance[0], lime_weights, interpret_class[0], self.lime_base.segments, visual=visual, save_path=save_path) self.lime_intermediate_results['probability'] = probability self.lime_intermediate_results['input'] = data_instance[0] self.lime_intermediate_results[ 'segmentation'] = self.lime_base.segments self.lime_intermediate_results['r2_scores'] = r2_scores return lime_weights def _paddle_prepare(self, predict_fn=None): if predict_fn is None: import paddle.fluid as fluid startup_prog = fluid.Program() main_program = fluid.Program() with fluid.program_guard(main_program, startup_prog): with fluid.unique_name.guard(): data_op = fluid.data( name='data', shape=[None] + self.model_input_shape, dtype='float32') probs = self.paddle_model(data_op) if isinstance(probs, tuple): probs = probs[0] main_program = main_program.clone(for_test=True) if self.use_cuda: gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0)) place = fluid.CUDAPlace(gpu_id) else: place = fluid.CPUPlace() self.place = place exe = fluid.Executor(place) fluid.io.load_persistables(exe, self.trained_model_path, main_program) def predict_fn(data_instance): data = preprocess_image( data_instance ) # transpose to [N, 3, H, W], scaled to [0.0, 1.0] [result] = exe.run(main_program, fetch_list=[probs], feed={'data': data}) return result self.predict_fn = predict_fn self.paddle_prepared = True class LIMENLPInterpreter(Interpreter): """ LIME Interpreter for NLP tasks. More details regarding the LIME method can be found in the original paper: https://arxiv.org/abs/1602.04938 """ def __init__(self, paddle_model: Callable, trained_model_path: str, use_cuda=True) -> None: """ Initialize the LIMENLPInterpreter. Args: paddle_model (callable): A user-defined function that gives access to model predictions. It takes the following arguments: - data: Data inputs. and outputs predictions. See the example at the end of ``interpret()``. trained_model_path (str): The pretrained model directory. model_input_shape (list, optional): The input shape of the model. Default: [3, 224, 224] use_cuda (bool, optional): Whether or not to use cuda. Default: True """ Interpreter.__init__(self) self.paddle_model = paddle_model self.trained_model_path = trained_model_path self.use_cuda = use_cuda self.paddle_prepared = False # use the default LIME setting self.lime_base = LimeBase() self.lime_intermediate_results = {} def interpret(self, data, preprocess_fn, unk_id, pad_id=None, interpret_class=None, num_samples=1000, batch_size=50, lod_levels=None, return_pred=False, visual=True): """ Main function of the interpreter. Args: data (str): The raw string for analysis. preprocess_fn (Callable): A user-defined function that input raw string and outputs the a tuple of inputs to feed into the NLP model. unk_id (int): The word id to replace occluded words. Typical choices include "", <unk>, and <pad>. pad_id (int or None): The word id used to pad the sequences. If None, it means there is no padding. Default: None. interpret_class (list or numpy.ndarray, optional): The index of class to interpret. If None, the most likely label will be used. Default: None num_samples (int, optional): LIME sampling numbers. Larger number of samples usually gives more accurate interpretation. Default: 1000 batch_size (int, optional): Number of samples to forward each time. Default: 50 lod_levels (list or tuple or numpy.ndarray or None, optional): The lod levels for model inputs. It should have the length equal to number of outputs given by preprocess_fn. If None, lod levels are all zeros. Default: None. visual (bool, optional): Whether or not to visualize. Default: True :return: LIME Prior weights: {interpret_label_i: weights on features} :rtype: dict Example:: from assets.bilstm import bilstm import io from interpretdl.data_processor.visualizer import VisualizationTextRecord, visualize_text def load_vocab(file_path): vocab = {} with io.open(file_path, 'r', encoding='utf8') as f: wid = 0 for line in f: if line.strip() not in vocab: vocab[line.strip()] = wid wid += 1 vocab["<unk>"] = len(vocab) return vocab MODEL_PATH = "assets/senta_model/bilstm_model" VOCAB_PATH = os.path.join(MODEL_PATH, "word_dict.txt") PARAMS_PATH = os.path.join(MODEL_PATH, "params") DICT_DIM = 1256606 def paddle_model(data, seq_len): probs = bilstm(data, seq_len, None, DICT_DIM, is_prediction=True) return probs MAX_SEQ_LEN = 256 def preprocess_fn(data): word_ids = [] sub_word_ids = [word_dict.get(d, unk_id) for d in data.split()] seq_lens = [len(sub_word_ids)] if len(sub_word_ids) < MAX_SEQ_LEN: sub_word_ids += [0] * (MAX_SEQ_LEN - len(sub_word_ids)) word_ids.append(sub_word_ids[:MAX_SEQ_LEN]) return word_ids, seq_lens #https://baidu-nlp.bj.bcebos.com/sentiment_classification-dataset-1.0.0.tar.gz word_dict = load_vocab(VOCAB_PATH) unk_id = word_dict[""] #word_dict["<unk>"] lime = it.LIMENLPInterpreter(paddle_model, PARAMS_PATH) reviews = [ '交通 方便 ;环境 很好 ;服务态度 很好 房间 较小', '这本书 实在 太烂 了 , 什么 朗读 手册 , 一点 朗读 的 内容 都 没有 . 看 了 几页 就 不 想 看 下去 了 .' ] true_labels = [1, 0] recs = [] for i, review in enumerate(reviews): pred_class, pred_prob, lime_weights = lime.interpret( review, preprocess_fn, num_samples=200, batch_size=10, unk_id=unk_id, pad_id=0, return_pred=True) id2word = dict(zip(word_dict.values(), word_dict.keys())) for y in lime_weights: print([(id2word[t[0]], t[1]) for t in lime_weights[y]]) words = review.split() interp_class = list(lime_weights.keys())[0] word_importances = [t[1] for t in lime_weights[interp_class]] word_importances = np.array(word_importances) / np.linalg.norm( word_importances) true_label = true_labels[i] if interp_class == 0: word_importances = -word_importances rec = VisualizationTextRecord(words, word_importances, true_label, pred_class[0], pred_prob[0], interp_class) recs.append(rec) visualize_text(recs) """ model_inputs = preprocess_fn(data) if not isinstance(model_inputs, tuple): self.model_inputs = (np.array(model_inputs), ) else: self.model_inputs = tuple(np.array(inp) for inp in model_inputs) if lod_levels is None: lod_levels = [0] * len(self.model_inputs) self.lod_levels = lod_levels if not self.paddle_prepared: self._paddle_prepare() # only one example here probability = self.predict_fn(*self.model_inputs)[0] # only interpret top 1 if interpret_class is None: pred_label = np.argsort(probability) interpret_class = pred_label[-1:] lime_weights, r2_scores = self.lime_base.interpret_instance_text( self.model_inputs, classifier_fn=self.predict_fn, interpret_labels=interpret_class, unk_id=unk_id, pad_id=pad_id, num_samples=num_samples, batch_size=batch_size) data_array = self.model_inputs[0] data_array = data_array.reshape((np.prod(data_array.shape), )) for c in lime_weights: weights_c = lime_weights[c] weights_new = [(data_array[tup[0]], tup[1]) for tup in weights_c] lime_weights[c] = weights_new if return_pred: return (interpret_class, probability[interpret_class], lime_weights) return lime_weights def _paddle_prepare(self, predict_fn=None): if predict_fn is None: import paddle.fluid as fluid startup_prog = fluid.Program() main_program = fluid.Program() with fluid.program_guard(main_program, startup_prog): with fluid.unique_name.guard(): data_ops = () for i, inp in enumerate(self.model_inputs): if self.lod_levels[i] > 0: op_ = fluid.data( name='op_%d' % i, shape=[None], dtype=inp.dtype, lod_level=self.lod_levels[i]) else: op_ = fluid.data( name='op_%d' % i, shape=(None, ) + inp.shape[1:], dtype=inp.dtype) data_ops += (op_, ) probs = self.paddle_model(*data_ops) if isinstance(probs, tuple): probs = probs[0] main_program = main_program.clone(for_test=True) if self.use_cuda: gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0)) place = fluid.CUDAPlace(gpu_id) else: place = fluid.CPUPlace() self.place = place exe = fluid.Executor(self.place) #exe.run(startup_prog) #fluid.io.load_persistables(exe, self.trained_model_path, # main_program) init_checkpoint(exe, self.trained_model_path, main_program) #fluid.load(main_program, self.trained_model_path, exe) def predict_fn(*params): params = self._format_model_inputs(params) [result] = exe.run( main_program, fetch_list=[probs], feed={'op_%d' % i: d for i, d in enumerate(params)}) return result self.predict_fn = predict_fn self.paddle_prepared = True def _format_model_inputs(self, model_inputs): out = () for i, inp in enumerate(model_inputs): if self.lod_levels[i] == 0: out += (inp, ) else: out += (to_lodtensor(inp, self.place), ) return out
16,968
4,904
from opendc.models.portfolio import Portfolio from opendc.models.scenario import Scenario from opendc.models.topology import Topology from opendc.util.rest import Response def POST(request): """Add a new Scenario for this Portfolio.""" request.check_required_parameters(path={'portfolioId': 'string'}, body={ 'scenario': { 'name': 'string', 'trace': { 'traceId': 'string', 'loadSamplingFraction': 'float', }, 'topology': { 'topologyId': 'string', }, 'operational': { 'failuresEnabled': 'bool', 'performanceInterferenceEnabled': 'bool', 'schedulerName': 'string', }, } }) portfolio = Portfolio.from_id(request.params_path['portfolioId']) portfolio.check_exists() portfolio.check_user_access(request.google_id, True) scenario = Scenario(request.params_body['scenario']) topology = Topology.from_id(scenario.obj['topology']['topologyId']) topology.check_exists() topology.check_user_access(request.google_id, True) scenario.set_property('portfolioId', portfolio.get_id()) scenario.set_property('simulation', {'state': 'QUEUED'}) scenario.set_property('topology.topologyId', topology.get_id()) scenario.insert() portfolio.obj['scenarioIds'].append(scenario.get_id()) portfolio.update() return Response(200, 'Successfully added Scenario.', scenario.obj)
2,106
488
# Copyright 2020 Rastko Sknepnek, University of Dundee, r.skepnek@dundee.ac.uk # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions # of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED # TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF # CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # Class handling the simulation box class Box: def __init__(self, Lx, Ly = None): """ Construct simulation box. Parameters ---------- Lx : float Size of the simulation box in x direction Ly : float Size of the simulation box in y direction (if None, same as Lx, i.e., square box) Note ---- Simulation box is centred as (0,0), i.e., x is in (-Lx/2,Lx/2] and y is in (-Ly/2,Ly/2] """ if Lx < 0.0: raise ValueError('Simulation box has to have length larger than 0.') self.Lx = Lx self.Ly = Lx if (Ly == None or Ly < 0.0) else Ly self.xmin = -0.5*self.Lx self.xmax = 0.5*self.Lx self.ymin = -0.5*self.Ly self.ymax = 0.5*self.Ly self.A = self.Lx*self.Ly
1,906
673
# -*- coding: utf-8 -*- # vim:se fenc=utf8 noet: from __future__ import (unicode_literals, division, absolute_import, print_function) try: import vim except ImportError: vim = {} from powerline.bindings.vim import (vim_get_func, buffer_name) from powerline.theme import requires_segment_info @requires_segment_info def webdevicons(pl, segment_info): webdevicons = vim_get_func('WebDevIconsGetFileTypeSymbol') name = buffer_name(segment_info) return [] if not webdevicons else [{ 'contents': webdevicons(name), 'highlight_groups': ['webdevicons', 'file_name'], }] @requires_segment_info def webdevicons_file_format(pl, segment_info): webdevicons_file_format = vim_get_func('WebDevIconsGetFileFormatSymbol') return [] if not webdevicons_file_format else [{ 'contents': webdevicons_file_format(), 'highlight_groups': ['webdevicons_file_format', 'file_format'], }]
884
313
from fastapi.security import OAuth2PasswordRequestForm from fastapi import APIRouter, Depends, HTTPException, status from datetime import timedelta from starlette.responses import JSONResponse from db.crud.users import blacklist_token from db.session import get_db from core import security from core.auth import authenticate_user, get_current_active_user, sign_up_new_user auth_router = r = APIRouter() @r.post("/token") async def login( db=Depends(get_db), form_data: OAuth2PasswordRequestForm = Depends() ): try: user = authenticate_user(db, form_data.username, form_data.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Incorrect username or password", headers={"WWW-Authenticate": "Bearer"}, ) access_token_expires = timedelta( minutes=security.ACCESS_TOKEN_EXPIRE_MINUTES ) if user.is_superuser: permissions = "admin" else: permissions = "user" access_token = security.create_access_token( data={"sub": user.alias, "permissions": permissions}, expires_delta=access_token_expires, ) return {"access_token": access_token, "token_type": "bearer", "permissions": permissions} except HTTPException as e: raise e except Exception as e: return JSONResponse(status_code=400, content=f"ERR::login::{str(e)}") @r.post("/signup") async def signup( db=Depends(get_db), form_data: OAuth2PasswordRequestForm = Depends() ): try: user = sign_up_new_user(db, form_data.username, form_data.password) if not user: raise HTTPException( status_code=status.HTTP_409_CONFLICT, detail="Account already exists", headers={"WWW-Authenticate": "Bearer"}, ) access_token_expires = timedelta( minutes=security.ACCESS_TOKEN_EXPIRE_MINUTES ) if user.is_superuser: permissions = "admin" else: permissions = "user" access_token = security.create_access_token( data={"sub": user.alias, "permissions": permissions}, expires_delta=access_token_expires, ) return {"access_token": access_token, "token_type": "bearer"} except HTTPException as e: raise e except Exception as e: return JSONResponse(status_code=400, content=f"ERR::signup::{str(e)}") @r.post("/logout") async def logout(db=Depends(get_db), token: str = Depends(security.oauth2_scheme), current_user=Depends(get_current_active_user)): try: return blacklist_token(db, token) except Exception as e: JSONResponse(status_code=400, content=f"ERR::logout::{str(e)}")
2,844
859
import os import enum from aqt.qt import QDialog, QGraphicsScene, QGraphicsRectItem, QGraphicsEllipseItem, QApplication from aqt.qt import Qt, QPen, QGraphicsItem, QPixmap, QRectF, QPainter from aqt.qt import QPointF, QBrush, QColor, QPainterPath, QIcon, QSize, QPalette from aqt.utils import showInfo from ..sr_occluder_ui import Ui_SROccluder from .sr_rect import SRRect from .sr_occlusion_view.py import SROcclusionView from .sr_occlusion_scene.py import SROcclusionScene class ToolMode(enum.Enum): Select = 1 Move = 2 Zoom = 3 Rect = 4 Ellipse = 5 Polygon = 6 Line = 7 Arrow = 8 Darrow = 9 Text = 10 class SROccluder(QDialog): def __init__(self, parent): super().__init__(parent=parent) self.ui = Ui_SROccluder() self.ui.setupUi(self) self.toolMode = ToolMode.Select self.setupButtons() def setupButtons(self): main_path = f'{os.path.dirname(os.path.realpath(__file__))}/../icons' self.ui.selectButton.setIcon(QIcon(f"{main_path}/select.png")) self.ui.moveButton.setIcon(QIcon(f"{main_path}/move.png")) self.ui.zoomButton.setIcon(QIcon(f"{main_path}/zoom.png")) self.ui.rectButton.setIcon(QIcon(f"{main_path}/rect.png")) self.ui.ellipseButton.setIcon(QIcon(f"{main_path}/ellipse.png")) self.ui.polygonButton.setIcon(QIcon(f"{main_path}/polygon.png")) self.ui.lineButton.setIcon(QIcon(f"{main_path}/line.png")) self.ui.arrowButton.setIcon(QIcon(f"{main_path}/arrow.png")) self.ui.darrowButton.setIcon(QIcon(f"{main_path}/darrow.png")) self.ui.textButton.setIcon(QIcon(f"{main_path}/text.png")) self.ui.selectButton.clicked.connect(self.selectTool) self.ui.moveButton.clicked.connect(self.moveTool) self.ui.zoomButton.clicked.connect(self.zoomTool) self.ui.rectButton.clicked.connect(self.rectTool) self.ui.ellipseButton.clicked.connect(self.ellipseTool) self.ui.polygonButton.clicked.connect(self.polygonTool) self.ui.lineButton.clicked.connect(self.lineTool) self.ui.arrowButton.clicked.connect(self.arrowTool) self.ui.darrowButton.clicked.connect(self.darrowTool) self.ui.textButton.clicked.connect(self.textTool) def selectTool(self): QApplication.setOverrideCursor(Qt.ArrowCursor) self.changeMode(ToolMode.Select) def moveTool(self): QApplication.setOverrideCursor(Qt.SizeAllCursor) self.changeMode(ToolMode.Move) def zoomTool(self): QApplication.setOverrideCursor(Qt.ArrowCursor) self.changeMode(ToolMode.Zoom) def rectTool(self): QApplication.setOverrideCursor(Qt.ArrowCursor) self.changeMode(ToolMode.Rect) def ellipseTool(self): QApplication.setOverrideCursor(Qt.ArrowCursor) self.changeMode(ToolMode.Ellipse) def polygonTool(self): QApplication.setOverrideCursor(Qt.ArrowCursor) self.changeMode(ToolMode.Polygon) def lineTool(self): QApplication.setOverrideCursor(Qt.ArrowCursor) self.changeMode(ToolMode.Line) def arrowTool(self): QApplication.setOverrideCursor(Qt.ArrowCursor) self.changeMode(ToolMode.Arrow) def darrowTool(self): QApplication.setOverrideCursor(Qt.ArrowCursor) self.changeMode(ToolMode.Darrow) def textTool(self): QApplication.setOverrideCursor(Qt.ArrowCursor) self.changeMode(ToolMode.Text) def changeMode(self, mode): self.resetButton(mode, True) self.resetButton(self.toolMode, False) self.toolMode = mode def resetButton(self, mode, state): if mode == ToolMode.Select: self.ui.selectButton.setChecked(state) self.ui.selectButton.repaint() elif mode == ToolMode.Move: self.ui.moveButton.setChecked(state) self.ui.moveButton.repaint() elif mode == ToolMode.Zoom: self.ui.zoomButton.setChecked(state) self.ui.zoomButton.repaint() elif mode == ToolMode.Rect: self.ui.rectButton.setChecked(state) self.ui.rectButton.repaint() elif mode == ToolMode.Ellipse: self.ui.ellipseButton.setChecked(state) self.ui.ellipseButton.repaint() elif mode == ToolMode.Polygon: self.ui.polygonButton.setChecked(state) self.ui.polygonButton.repaint() elif mode == ToolMode.Line: self.ui.lineButton.setChecked(state) self.ui.lineButton.repaint() elif mode == ToolMode.Arrow: self.ui.arrowButton.setChecked(state) self.ui.arrowButton.repaint() elif mode == ToolMode.Darrow: self.ui.darrowButton.setChecked(state) self.ui.darrowButton.repaint() elif mode == ToolMode.Text: self.ui.textButton.setChecked(state) self.ui.textButton.repaint() def setupUi(self): theScene = SROcclusionScene(self, 'skull.jpg') self.ui.graphicsView.setScene(theScene) outlinePen = QPen() rect = theScene.addRect(10, 10, 50, 50, outlinePen, Qt.green) rect.setFlag(QGraphicsItem.ItemIsMovable) rect.setFlag(QGraphicsItem.ItemIsSelectable) rect.setFlag(QGraphicsItem.ItemIsFocusable) rect2 = SRRect(0, 0, 50, 30) rect2.setFlag(QGraphicsItem.ItemIsMovable) rect2.setFlag(QGraphicsItem.ItemIsSelectable) theScene.addItem(rect2)
5,530
1,846
""" Configuration class for handling configs with a given default. If you need custom functionality or need to apply post_processing to parsed config, simply extend this class. Example: ``` class FenceConfig(Config): def __init__(self, *args, **kwargs): super(FenceConfig, self).__init__(*args, **kwargs) def post_process(self): # allow authlib traffic on http for development if enabled. By default # it requires https. # # NOTE: use when fence will be deployed in such a way that fence will # only receive traffic from internal clients, and can safely use HTTP if ( self._configs.get("AUTHLIB_INSECURE_TRANSPORT") and "AUTHLIB_INSECURE_TRANSPORT" not in os.environ ): os.environ["AUTHLIB_INSECURE_TRANSPORT"] = "true" # if we're mocking storage, ignore the storage backends provided # since they'll cause errors if misconfigured if self._configs.get("MOCK_STORAGE", False): self._configs["STORAGE_CREDENTIALS"] = {} cirrus.config.config.update(**self._configs.get("CIRRUS_CFG", {})) ``` Recommended use: - Create a `config-default.yaml` and `config.py` in the top-level folder your app - Inside `config-default.yaml` add keys and reasonable default values - Inside `config.py`, create a class that inherits from this Config class - See above example - Add a final line to your `config.py` that instantiates your custom class: - Ensure that you provide the default config path - If placed in same directory as `config.py` you can use something like: ``` default_cfg_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "config-default.yaml" ) config = FenceConfig(default_cfg_path) ``` - Import your instaniated object whenever you need to get configuration - Example: `from fence.config import config` - Load in application configuration during init of your app - Example: `config.load('path/to/fence-config.yaml')` - Now you can safely access anything that was in your `config-default.yaml` from this object as if it were a dictionary - Example: `storage_creds = config["STORAGE_CREDENTIALS"]` - Example: `if config["SOME_BOOLEAN"]: ...` - Example: `nested_value = config["TOP_LEVEL"]["nested"] - And of course you can import that into any file you want and will have access to keys/values - Example: `from fence.config import config` """ from __future__ import division, absolute_import, print_function, unicode_literals import os import glob from yaml import safe_load as yaml_load from yaml.scanner import ScannerError from jinja2 import Template, TemplateSyntaxError import six from cdislogging import get_logger from gen3config.errors import NotFoundError, ParsingError logger = get_logger(__name__, log_level="info") class Config(dict): """ Configuration singleton that's instantiated on module load. Allows updating from a config file by using .update() """ def __init__(self, default_cfg_path): self._configs = {} self.default_cfg_path = default_cfg_path logger.debug("Checking if provided cfg path is an actual file...") if not os.path.isfile(default_cfg_path): raise FileNotFoundError( "Default configuration file provided {} does not exist.".format( default_cfg_path ) ) logger.debug("Attempting to parse provided cfg as yaml file...") try: yaml_load(open(self.default_cfg_path)) except Exception as exc: logger.exception(exc) raise ParsingError( "Could not parse provided file {} as YAML. See logs for details.".format( default_cfg_path ) ) def get(self, key, default=None): return self._configs.get(key, default) def set(self, key, value): self._configs.__setitem__(key, value) def setdefault(self, key, default=None): self._configs.setdefault(key, default) def __setitem__(self, key, value): self._configs.__setitem__(key, value) def __contains__(self, key): return key in self._configs def __iter__(self): for key, value in six.iteritems(self._configs): yield key, value def __getitem__(self, key): return self._configs[key] def __delitem__(self, key): del self._configs[key] def __len__(self): return len(self._configs) def __str__(self): return str(self._configs) def update(self, *args, **kwargs): """ update configuration properties support passing dictionary or keyword args """ if len(args) > 1: raise TypeError( "update expected at most 1 arguments, got {}".format(len(args)) ) if args: self._configs.update(dict(args[0])) self._configs.update(kwargs) def load(self, config_path=None, search_folders=None, file_name=None): if not config_path and not search_folders: raise AttributeError( "Cannot find configuration with given information. " "You must either provide `search_folders` arg so load knows where to " "look OR provide `config_path` as full path to config." ) config_path = config_path or get_config_path(search_folders, file_name) if config_path: self.load_configuration_file(config_path) self.post_process() return self def load_configuration_file(self, provided_cfg_path): logger.info("Opening default configuration...") # treat default cfg as template and replace nested vars, returning an updated dict config = nested_render( yaml_load(open(self.default_cfg_path)), {}, {} ) logger.info("Applying configuration: {}".format(provided_cfg_path)) # treat provided cfg as template and replace nested vars, returning an updated dict provided_configurations = nested_render( yaml_load(open(provided_cfg_path)), {}, {} ) # only update known configuration values. In the situation # where the provided config does not have a certain value, # the default will be used. common_keys = { key: value for (key, value) in six.iteritems(config) if key in provided_configurations } keys_not_provided = { key: value for (key, value) in six.iteritems(config) if key not in provided_configurations } keys_to_update = { key: value for (key, value) in six.iteritems(provided_configurations) if key in common_keys } unknown_keys = { key: value for (key, value) in six.iteritems(provided_configurations) if key not in common_keys } config.update(keys_to_update) if keys_not_provided: logger.warning( "Did not provide key(s) {} in {}. Will be set to default value(s) from {}.".format( keys_not_provided.keys(), provided_cfg_path, self.default_cfg_path ) ) if unknown_keys: logger.warning( "Unknown key(s) {} found in {}. Will be ignored.".format( unknown_keys.keys(), provided_cfg_path ) ) self._configs.update(config) def post_process(self): """ Do some post processing to the configuration (set env vars if necessary, do more complex modifications/changes to vars, etc.) Called after loading the configuration and doing the template-replace. """ pass def force_default_if_none(self, key, default_cfg=None, default_cfg_path=None): """ Set the key in the configuration to the default value if it either 1) doesn't exist (this is mostly for backwards-compatibility with previous configuration methods) 2) is None """ default_cfg = default_cfg or yaml_load(open(default_cfg_path)) if key not in self._configs or self._configs[key] is None: self._configs[key] = default_cfg.get(key) def nested_render(cfg, fully_rendered_cfgs, replacements): """ Template render the provided cfg by recurisevly replacing {{var}}'s which values from the current "namespace". The nested config is treated like nested namespaces where the inner variables are only available in current block and further nested blocks. Said the opposite way: the namespace with available vars that can be used includes the current block's vars and parent block vars. This means that you can do replacements for top-level (global namespaced) config vars anywhere, but you can only use inner configs within that block or further nested blocks. An example is worth a thousand words: --------------------------------------------------------------------------------- fence-config.yaml -------------------------------------------------------------------------------- BASE_URL: 'http://localhost/user' OPENID_CONNECT: fence: api_base_url: 'http://other_fence/user' client_kwargs: redirect_uri: '{{BASE_URL}}/login/fence/login' authorize_url: '{{api_base_url}}/oauth2/authorize' THIS_WONT_WORK: '{{api_base_url}}/test' -------------------------------------------------------------------------------- "redirect_uri" will become "http://localhost/user/login/fence/login" - BASE_URL is in the global namespace so it can be used in this nested cfg "authorize_url" will become "http://other_fence/user/oauth2/authorize" - api_base_url is in the current namespace, so it is available "THIS_WONT_WORK" will become "/test" - Why? api_base_url is not in the current namespace and so we cannot use that as a replacement. the configuration (instead of failing) will replace with an empty string Args: cfg (TYPE): Description fully_rendered_cfgs (TYPE): Description replacements (TYPE): Description Returns: dict: Configurations with template vars replaced """ if isinstance(cfg, dict): for key, value in six.iteritems(cfg): replacements.update(cfg) fully_rendered_cfgs[key] = {} fully_rendered_cfgs[key] = nested_render( value, fully_rendered_cfgs=fully_rendered_cfgs[key], replacements=replacements, ) # new namespace, remove current vars (no longer available as replacements) for old_cfg, value in six.iteritems(cfg): replacements.pop(old_cfg, None) return fully_rendered_cfgs else: # it's not a dict, so lets try to render it. But only if it's # truthy (which means there's actually something to replace) if cfg: try: t = Template(str(cfg)) rendered_value = t.render(**replacements) except TemplateSyntaxError: rendered_value = cfg try: cfg = yaml_load(rendered_value) except ScannerError: # it's not loading into yaml, so let's assume it's a string with special # chars such as: {}[],&*#?|:-<>=!%@\) # # in YAML, we have to "quote" a string with special chars. # # since yaml_load isn't loading from a file, we need to wrap the Python # str in actual quotes. cfg = yaml_load('"{}"'.format(rendered_value)) return cfg def get_config_path(search_folders, file_name="*config.yaml"): """ Return the path of a single configuration file ending in config.yaml from one of the search folders. NOTE: Will return the first match it finds. If multiple are found, this will error out. """ possible_configs = [] file_name = file_name or "*config.yaml" for folder in search_folders: config_path = os.path.join(folder, file_name) possible_files = glob.glob(config_path) possible_configs.extend(possible_files) if len(possible_configs) == 1: return possible_configs[0] elif len(possible_configs) > 1: raise IOError( "Multiple config.yaml files found: {}. Please specify which " "configuration to use by providing `config_path` instead of " "`search_folders` to Config.load(). Alternatively, ensure that only a " "single valid *config.yaml exists in the search folders: {}.".format( str(possible_configs), search_folders ) ) else: raise NotFoundError( "Could not find config file {}. Searched in the following locations: " "{}".format(file_name, str(search_folders)) )
13,371
3,622
# coding:utf-8 # Type: Public import numpy as np import common.Math as cMath import math class CarlrUtils(object): Author = "BaoChuan Wang" AllowImport = False @staticmethod def get_direction_vector_series_and_car_to_next_waypoint_ratio( carla_engine, start_waypoint_xy_array, target_waypoint_xy_array, draw_in_UE=False ): ''' 适用于WaypointsTarget环境的state求取 # 以下代码作为参考 获得车辆最近的路径点,以及接下来n个路径点(目前改为最后两个路径点,不会随着车辆位置更新!),然后返回与这两个路径点相关的参数,有: 1.车辆到两个waypoints的中点距离 2.waypoint方向角 3.车辆到waypoint中点方向角 4.车辆本身方向角 # 另外这样获取waypoints实时更新的方法是不合适的,产生的rewards不对action连续 # 原来的方法是车辆获取最近的waypoint然后求得下一个waypoints,现在改成一开始就确定waypoints 因为使用获取最近waypoints的方法可能会产生变道 原来的方法代码: # # 获得车辆的下两个waypoints的xy坐标 # next_center_waypoints = self.engine.map.get_waypoint( # # location # self.engine.vehicle.get_location() # ) # # 获得接下来5m的作为下一个路径点 # next_next_center_waypoints = next_center_waypoints.next(5)[0] # # waypoint_list =(( # next_center_waypoints.transform.location.x, # next_center_waypoints.transform.location.y # ), ( # next_next_center_waypoints.transform.location.x, # next_next_center_waypoints.transform.location.y # )) # # # 在carla中绘制路径点 # self.engine.draw_waypoint_list( # [next_center_waypoints,next_next_center_waypoints],life_time=1) # # return waypoint_list # 注意点: 因为最终计算的时候是需要两个waypoint来得到和车辆的距离 以及 车辆到waypoints中心点的方向 和 两个waypoints方向 的夹角 所以一定要保证waypoints中心点在车辆前方(否则就会后退) 需要保证Waypoints的间隔足够大即可!也可以这里取点时取后面两个点而不是一个点! # 这里的代码是求得距离车辆最近的点,然后往下找3个点,现在更新成一开始指定的点! # # 求得最近的waypoints的index,然后找下一个!如果到了waypoints的末端? # distance = np.sqrt( # np.sum(np.square(self.car_waypoints_xy_array - np.array([self.engine.vehicle.get_location().x, # self.engine.vehicle.get_location().y])), axis=1)) # # # print(distance) # # 最大的index # index_max = distance.shape[0] - 1 # # 找到距离最近的waypoints的index # index = int(np.argmin(distance)) # # # index = index_max - 1 # # # 这里点取得向前一点儿 # next_point_index = index + 3 # if next_point_index > index_max: next_point_index = index_max # if draw_in_UE: # # 作出两个waypoints的线段 # start = self.car_waypoints_list[index] # end = self.car_waypoints_list[next_point_index] # self.engine.draw_line(start, end, life_time=1, color=(0, 255, 0)) # center_point = (self.car_waypoints_xy_array[index, :].reshape(-1) + # self.car_waypoints_xy_array[next_point_index, :].reshape(-1)) / 2 ''' # 车辆位置 vehicle_location = carla_engine.vehicle.get_location() car_point = np.array([vehicle_location.x, vehicle_location.y]) if draw_in_UE: # waypoint中点 center_point = (start_waypoint_xy_array + target_waypoint_xy_array) / 2 center_point_transform = carla_engine.make_transform( x=center_point[0], y=center_point[1], z=vehicle_location.z ) carla_engine.draw_point_xyz(center_point[0], center_point[1], carla_engine.vehicle.get_location().z + 0.25, color=(0, 255, 255), thickness=0.1) carla_engine.draw_line_location( vehicle_location, center_point_transform.location, life_time=1, color=(0, 0, 255) ) # waypoints的单位方向向量 way_unit_direction = target_waypoint_xy_array - start_waypoint_xy_array way_unit_direction /= np.linalg.norm(way_unit_direction, 2) # 车辆到中心点的单位方向向量 car_to_way_unit_direction = (target_waypoint_xy_array - car_point) car_to_way_unit_direction /= np.linalg.norm(car_to_way_unit_direction, 2) # 车辆本身的单位方向向量 car_unit_direction = carla_engine.vehicle.get_transform().get_forward_vector() car_unit_direction = np.array([car_unit_direction.x, car_unit_direction.y]) # 车辆到target点和总路程的比值 total_distance = np.linalg.norm(target_waypoint_xy_array - start_waypoint_xy_array, 2) now_distance = np.linalg.norm(target_waypoint_xy_array - car_point, 2) car_to_target_distance_ratio = now_distance / total_distance # 车辆的yaw角度 car_yaw = math.radians(carla_engine.vehicle_yaw) # 增加:相对于车辆坐标的目标waypoint的x和y target_xy_array_relate_to_car = cMath.convert_point_into_relative_coordinate( target_waypoint_xy_array, car_point, original_yaw_radius=car_yaw) return way_unit_direction, car_to_way_unit_direction, car_unit_direction, car_to_target_distance_ratio, target_xy_array_relate_to_car @staticmethod def get_car_target_waypoints(engine, vehicle, n_waypoint=2, waypoint_spacing=15, draw_waypoints=True): if n_waypoint < 2: raise ValueError("At least 2 waypoints will return!") # List<Waypoints> car_waypoints_list = [] # Array2D car_waypoints_xy_array = None # List<List> car_waypoints_xy_list = [] # 起始的点 next_center_waypoints = engine.map.get_waypoint(vehicle.get_location()) # 车辆的起点 start_waypoint_xy_array = np.array([next_center_waypoints.transform.location.x, next_center_waypoints.transform.location.y]) car_waypoints_list.append(next_center_waypoints) car_waypoints_xy_list.append([next_center_waypoints.transform.location.x, next_center_waypoints.transform.location.y]) if n_waypoint == 2: next_center_waypoints = next_center_waypoints.next(waypoint_spacing)[0] car_waypoints_list.append(next_center_waypoints) car_waypoints_xy_list.append([next_center_waypoints.transform.location.x, next_center_waypoints.transform.location.y]) else: for i in range(n_waypoint - 1): next_center_waypoints = next_center_waypoints.next(waypoint_spacing)[0] car_waypoints_list.append(next_center_waypoints) car_waypoints_xy_list.append([next_center_waypoints.transform.location.x, next_center_waypoints.transform.location.y]) car_waypoints_xy_array = np.array(car_waypoints_xy_list) # 终点 target_waypoint_xy_array = np.array([next_center_waypoints.transform.location.x, next_center_waypoints.transform.location.y]) # 绘制路径点 if draw_waypoints: engine.draw_waypoint_list(car_waypoints_list, life_time=99999) return car_waypoints_list, car_waypoints_xy_list, car_waypoints_xy_array, target_waypoint_xy_array @staticmethod def get_velocity_accel_relative_to_car_and_their_scalar(engine): velocity_vector = engine.get_velocity() velocity_to_car_x, velocity_to_car_y = cMath.convert_point_into_relative_coordinate( target_xy=[velocity_vector.x, velocity_vector.y], original_xy=[0, 0], original_yaw_radius=math.radians(engine.vehicle_yaw)) velocity = engine.get_velocity_scalar() accel_vector = engine.get_accel() accel_to_car_x, accel_to_car_y = cMath.convert_point_into_relative_coordinate( target_xy=[accel_vector.x, accel_vector.y], original_xy=[0, 0], original_yaw_radius=math.radians(engine.vehicle_yaw)) accel = engine.get_velocity_scalar() return velocity, velocity_to_car_x, velocity_to_car_y, accel, accel_to_car_x, accel_to_car_y
8,150
3,113
import torch.nn as nn import torch.nn.functional as F class ResBlock(nn.Module): def __init__(self, inFeatures): super(ResBlock, self).__init__() self.conv = nn.Sequential(nn.ReflectionPad2d(1), nn.Conv2d(inFeatures, inFeatures, 3), nn.InstanceNorm2d(inFeatures), nn.ReLU(inplace=True), nn.ReflectionPad2d(1), nn.Conv2d(inFeatures, inFeatures, 3), nn.InstanceNorm2d(inFeatures)) def forward(self, X): out = X + self.conv(X) return out class Generator(nn.Module): def __init__(self, inputnc, outputnc, nResBlocks=9): super(Generator, self).__init__() layers = [nn.ReflectionPad2d(3), nn.Conv2d(inputnc, 64, 7), nn.InstanceNorm2d(64), nn.ReLU(inplace=True)] #To downsample the Image inFeatures = 64 outFeatures = 2*inFeatures for i in range(2): layers += [nn.Conv2d(inFeatures, outFeatures, 3, stride=2, padding=1), nn.InstanceNorm2d(outFeatures), nn.ReLU(inplace=True)] inFeatures = outFeatures outFeatures = 2*inFeatures for i in range(nResBlocks): layers += [ResBlock(inFeatures)] #To upsample the Image outFeatures = inFeatures//2 for i in range(2): layers += [nn.ConvTranspose2d(inFeatures, outFeatures, 3, stride=2, padding=1, output_padding=1), nn.InstanceNorm2d(outFeatures), nn.ReLU(inplace=True)] inFeatures = outFeatures outFeatures = inFeatures//2 layers += [nn.ReflectionPad2d(3), nn.Conv2d(64, outputnc, 7), nn.Tanh()] self.model = nn.Sequential(*layers) def forward(self, X): out=self.model(X) return out class Discriminator(nn.Module): def __init__(self, inputnc): super(Discriminator, self).__init__() layers = [nn.Conv2d(inputnc, 64, 4, stride=2, padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64, 128, 4, stride=2, padding=1), nn.InstanceNorm2d(128), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(128, 256, 4, stride=2, padding=1), nn.InstanceNorm2d(256), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(256, 512, 4, padding=1), nn.InstanceNorm2d(512), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(512, 1, 4, padding=1)] self.model = nn.Sequential(*layers) def forward(self, X): out = self.model(X) out = F.avg_pool2d(out, out.size()[2:]).view(out.size()[0], -1) return out
2,413
1,261
from stable_baselines3.bear.policies import BearPolicy from stable_baselines3.bear.bear import BEAR
99
33
#!/usr/bin/python """ This is written by Zhiyang Ong to modify text (non-binary) files. Synopsis: Script to modify text (non-binary) files. Revision History: 1) November 11, 2014. Initial working version. The MIT License (MIT) Copyright (c) <2014> <Zhiyang Ong> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Email address: echo "cukj -wb- 23wU4X5M589 TROJANS cqkH wiuz2y 0f Mw Stanford" | awk '{ sub("23wU4X5M589","F.d_c_b. ") sub("Stanford","d0mA1n"); print $5, $2, $8; for (i=1; i<=1; i++) print "6\b"; print $9, $7, $6 }' | sed y/kqcbuHwM62z/gnotrzadqmC/ | tr 'q' ' ' | tr -d [:cntrl:] | tr -d 'ir' | tr y "\n" """ # Import packages and functions from the Python Standard Library. #from os import listdir, system from os import system #from os.path import isfile, join, splitext #from os.subprocess import call #import subprocess # ============================================================ """ Create an output file object. Assume that the specified filename does not belong to an important file. Assume that the specified file can be overwritten. """ f_object = open("input-file.txt", "w"); # Lists to generate data for the input test file. # List of universities that are good in EDA. universities = ["Berkeley", "Stanford", "MIT", "UT Austin", "Carnegie Mellon", "Georgia Tech", "Columbia", "Northwestern", "Purdue", "UCSD", "UCLA"] # List of other universities in EDA. other_unis = ["UIUC", "Brown", "Boston University", "UC Irvine", "UC Riverside", "UCSB", "USC", "University of Minnesota at Twin Cities", "Utah", "University of Wisconsin-Madison"] # List of VLSI topics. vlsi_topics = ["RTL design", "TLM design", "processor design", "SRAM design", "DRAM design", "low-power VLSI design", "decoder design", "DFM", "VLSI verification", "VLSI design flow", "NoC", "asynchronous VLSI design", "VLSI architecture", "digitally-assisted analog IC design", "VLSI signal processing", "microarchitecture"] # List of EDA topics. eda_topics = ["model checking", "equivalence checking", "high-level synthesis", "hardware/software partitioning", "hardware-accelerated emulation", "logic synthesis", "RTL synthesis", "static timing analysis", "statistical STA", "power optimization", "DVFS", "logic simulation", "fault saimulation", "ATPG", "DFT", "BIST", "memory compiler", "gate sizing", "threshold voltage assignment", "buffer insertion", "crosstalk analysis", "signal integrity analysis", "noise analysis", "thermal analysis", "floorplanning", "partitioning", "detailed placement", "detailed routing", "global placement", "global routing", "clock network synthesis", "power and ground routing", "layout compaction", "layout extraction", "parasitic extraction", "interconnect modeling", "design rule check", "layout versus schematic check", "electric rule check", "computational lithography", "optical proximity correction", "resolution enhancement technologies", "mask data preparation", "circuit simulation"] # Lists of numbers to be fixed. list_of_hundreds = range(1500, 5000, 100) list_of_10s = range(1234560, 1234767, 10) # References: # http://eecs_ece-and-cs.quora.com/Choosing-a-Graduate-Program-in-VLSI-Design-Related-Areas-Things-to-Consider # http://www.quora.com/What-are-the-best-VLSI-CAD-research-groups-in-US-universities # Write text to the input test file. #f_object.write("Ciao Mondo") # Pointer to currently enumerated index of EDA topics. ptr = 0 # ============================================================ # Generate test data for the test input file. # Enumerate all universities that are good in EDA. for gd_uni in universities: #temp_str = "%S %S %S", gd_uni, eda_topics[ptr], eda_topics[ptr+1] temp_str = gd_uni + "; " + str(list_of_hundreds[ptr]) + "; " + eda_topics[ptr] ptr = ptr + 1 temp_str = temp_str + "; " + str(list_of_10s[ptr]) + "; " + eda_topics[ptr] + ".\n" if ptr < len(universities): ptr = ptr + 1 f_object.write(temp_str) temp_str = "Stanford" + "; " + "326748027" + "; " + "statistical STA" temp_str = temp_str + "; " + "7289" + "; " + "hardware-accelerated emulation" + ".\n" f_object.write(temp_str) # ============================================================ # Close the file object f_object.close()
5,194
1,822
import pydicom from tqdm import tqdm import pandas as pd import os import time import glob import numpy as np from pydicom import _dicom_dict as dc from constants import * import string def dcmtag2df(folder: str, list_of_tags: list): """ # Create a Pandas DataFrame with the <list_of_tags> DICOM tags # from the DICOM files in <folder> # Parameters: # folder (str): folder to be recursively walked looking for DICOM files. # list_of_tags (list of strings): list of DICOM tags with no whitespaces. # Returns: # df (DataFrame): table of DICOM tags from the files in folder. """ list_of_tags = list_of_tags.copy() table = [] start = time.time() # checks if folder exists if not os.path.isdir(folder): print(f'{folder} is not a valid folder.') return None # joins ** to the folder name for using at the glob function print("Searching files recursively...") search_folder = os.path.join(folder, '**') try: filelist = glob.glob(search_folder, recursive=True) print(f"{len(list(filelist))} files/folders found ") except Exception as e: print(e) return None time.time() print("Reading files...") for _f in tqdm(filelist): try: dataset = pydicom.dcmread(_f, stop_before_pixels=True) items = [] items.append(_f) for _tag in list_of_tags: if _tag in dataset: if dataset.data_element(_tag) is not None: items.append(str(dataset.data_element(_tag).value)) else: if dataset[tag_number] is not None: items.append(str(dataset[tag_number].value)) else: items.append("NaN") else: series_description = dataset.get('SeriesDescription') if _tag == 'IOP_Plane': IOP = dataset.get('ImageOrientationPatient') _plano = IOP_Plane(IOP) items.append(_plano) elif _tag == "Primary": try: image_type = ' '.join(dataset.get('ImageType')) except: image_type = '' found_word = search_words_in_serie(image_type, PRIMARY) items.append(found_word) elif _tag == "Gad": found_word = search_words_in_serie(series_description, GAD, GAD_EXCLUSION) items.append(found_word) elif _tag == "T1": found_word = search_words_in_serie(series_description, T1, FLAIR + T2) items.append(found_word) elif _tag == "T2": found_word = search_words_in_serie(series_description, T2) items.append(found_word) elif _tag == "FLAIR": found_word = search_words_in_serie(series_description, FLAIR, T1) items.append(found_word) elif _tag == "SWI": found_word = search_words_in_serie(series_description, SWI) items.append(found_word) elif _tag == "FIESTA": found_word = search_words_in_serie(series_description, FIESTA) items.append(found_word) elif _tag == "TOF": found_word = search_words_in_serie(series_description, TOF) items.append(found_word) elif _tag == "DWI": found_word = search_words_in_serie(series_description, DWI, DWI_EXCLUSION) items.append(found_word) elif _tag == "Angio": found_word = search_words_in_serie(series_description, ANGIO) items.append(found_word) elif _tag == "MPR": found_word = search_words_in_serie(series_description, MPR) items.append(found_word) elif _tag == "Others": found_word = search_words_in_serie(series_description, OTHERS) items.append(found_word) else: # checks if a tag number was informed tag_number = tag_number_to_base_16(_tag) if tag_number in dataset: if dataset[tag_number] is not None: items.append(str(dataset[tag_number].value)) else: items.append("NaN") else: items.append("NaN") table.append((items)) except (FileNotFoundError, PermissionError): pass except Exception as e: pass list_of_tags.insert(0, "Filename") test = list(map(list, zip(*table))) dictone = {} if len(table) == 0: print(f'0 DICOM files found at folder: {folder}') return None for i, _tag in enumerate(list_of_tags): dictone[_tag] = test[i] df = pd.DataFrame(dictone) time.sleep(2) print("Finished.") return df def IOP_Plane(IOP: list) -> str: """ This function takes IOP of an image and returns its plane (Sagittal, Coronal, Transverse) ['1', '0', '0', '0', '0', '-1'] you are dealing with Coronal plane view ['0', '1', '0', '0', '0', '-1'] you are dealing with Sagittal plane view ['1', '0', '0', '0', '1', '0'] you are dealing with Axial plane view """ try: IOP_round = [round(x) for x in IOP] plane = np.cross(IOP_round[0:3], IOP_round[3:6]) plane = [abs(x) for x in plane] if plane[0] == 1: return "SAG" elif plane[1] == 1: return "COR" elif plane[2] == 1: return "AXI" else: return "UNK" except: return "UNK" def dicomtagnumber_to_tagname(dicom_tag_number: str) -> str: # if receives int, convert to str dicom_tag_base_16 = tag_number_to_base_16(dicom_tag_number) try: dicom_tag_name = dc.DicomDictionary.get(dicom_tag_base_16, (0, 0, 0, 0, dicom_tag_number))[4] if dicom_tag_name == "0008103E": dicom_tag_name = "SeriesDescription" except Exception as e: print(f'Erro ao converter dicomtag {dicom_tag_number}\n{e}') return dicom_tag_name def dicomtagname_to_tagnumber(dicom_tag_name: str) -> str: tag_number_8_digits = dicom_tag_name try: # searches for Contracted Name for key, value in dc.DicomDictionary.items(): if dicom_tag_name == value[4]: tag_number = key break # searches for Expanded Name if not found Contracted Form if not tag_number: for key, value in dc.DicomDictionary.items(): if dicom_tag_name == value[2]: tag_number = key break hex_number = hex(1048592)[2:] tag_number_8_digits = f"{hex_number:>08}" except Exception as e: print(f'Erro ao converter dicomtag {dicom_tag_name}\n{e}') return tag_number_8_digits def tag_number_to_base_16(dicom_tag_number: str) -> str: # if receives int, convert to str hx = string.hexdigits if type(dicom_tag_number) == int: dicom_tag_number = str(dicom_tag_number) only_hexdigits_tag = ''.join(i for i in dicom_tag_number if i in hx) dicom_tag_base_16 = int(only_hexdigits_tag, 16) return dicom_tag_base_16 def search_words_in_serie(series_description: str, search_words: list, exclusion_words: list = []) -> bool: try: search_flag = False for word in search_words: if word.upper() in series_description.upper(): search_flag = True break except Exception as e: print(f"Erro ao procurar a lista de palavras de inclusao {search_words} na descricao {series_description}") return "NaN" try: exclusion_flag = False for word in exclusion_words: if word.upper() in series_description.upper(): exclusion_flag = True break except Exception as e: print(f"Erro ao procurar a lista de palavras de exclusao {search_words} na descricao {series_description}") return "NaN" found = search_flag and exclusion_flag is False return found
8,771
2,604
# -*- coding: utf-8 -*- """ eve-swagger ~~~~~~~~~~~ swagger.io extension for Eve-powered REST APIs. :copyright: (c) 2015 by Nicola Iarocci. :license: BSD, see LICENSE for more details. """ try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict # noqa: F401 from .swagger import swagger, add_documentation # noqa INFO = 'SWAGGER_INFO' HOST = 'SWAGGER_HOST'
430
159
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ _ / | | __ _ __ _ / | / |_||_|| || / | / | |\ | ||_ /____ |__/\ . | | \|_|\_| __________________________ . ███████╗██████╗ ██████╗ ██████╗██╗ ██╗ ██╔════╝██╔══██╗██╔═══██╗██╔════╝██║ ██║ █████╗ ██████╔╝██║ ██║██║ ███████║ ██╔══╝ ██╔═══╝ ██║ ██║██║ ██╔══██║ ███████╗██║ ╚██████╔╝╚██████╗██║ ██║ ╚══════╝╚═╝ ╚═════╝ ╚═════╝╚═╝ ╚═╝ Created on Wed May 30 15:34:05 2018 @author: chrisunderwood To compare the outputted Electron spectrums, as part of a parameter scan """ import numpy as np import os import matplotlib.pyplot as plt import seaborn as sns #============================================================================== # A function that replicates os.walk with a max depth level #============================================================================== def walklevel(some_dir, level=1): some_dir = some_dir.rstrip(os.path.sep) assert os.path.isdir(some_dir) num_sep = some_dir.count(os.path.sep) for root, dirs, files in os.walk(some_dir): yield root, dirs, files num_sep_this = root.count(os.path.sep) if num_sep + level <= num_sep_this: del dirs[:] #============================================================================== # Creates a list of the folders of interest #============================================================================== def listFolders(mainDir): listSubFolders = [x[0] for x in walklevel(mainDir)][1:] folderNames = [] #Modify so useable path for i in range(len(listSubFolders)): listSubFolders[i] += '/' #Add the folder that I am looking into here too! listSubFolders[i] += 'Dist_evo/' folderNames.append(listSubFolders[i].split('/')[-3]) listSubFolders = np.array(listSubFolders) folderNames = np.array(folderNames) return listSubFolders, folderNames def nearposn(array,value): #Find array position of value posn = (abs(array-value)).argmin() return posn def subplotPerSpectra(data, Crop): sns.set_palette(sns.color_palette("Set1", len(folderNames))) sns.set_context("talk") sns.set_style('darkgrid') fig, axes = plt.subplots(nrows = len(data), sharex = True, figsize = (7,8)) for d, names, ax in zip(data, folderNames, axes): yLims = [1e50, 0] px = d[:,0] Energy_J = (px ** 2) / (2 * 9.11e-31) Energy_eV = Energy_J / 1.6e-19 Energy_MeV = Energy_eV * 1e-6 xlow = nearposn(Energy_MeV, Crop[0]) xhigh = nearposn(Energy_MeV, Crop[1]) # print xlow, xhigh # xlow = 50; xhigh = 400 intensity = d[:,1] cropI = intensity[xlow:xhigh] if cropI.min() < yLims[0]: yLims[0] = cropI.min() if cropI.max() > yLims[1]: yLims[1] = cropI.max() # print fp if plot_MeV: xAxis = Energy_MeV else: xAxis = Energy_J ax.plot(xAxis, intensity) ax.set_title('Blade Translation ' + names[1:] + 'mm') ax.set_ylim(yLims) # ax.set_ylabel('Intensity (# of electrons)') ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0),useOffset=False) if plot_MeV: plt.xlabel('Electron Energy (MeV)') else: plt.xlabel('Electron Energy (J)') # plt.ylabel('Intensity (# of electrons)') fig.text(0.02, 0.5, 'Intensity (# of electrons)', ha='center', va='center', rotation='vertical') #============================================================================== # Apply the plotting limits #============================================================================== #plt.xlim([-1e-14, 1e-13]) #plt.yscale('log') # # if logPlot: # plt.ylim([yLims[1]/1e5, yLims[1]]) # plt.yscale('log') # else: # plt.ylim(yLims) # plt.xlim([xAxis[xlow],xAxis[xhigh]]) plt.legend() def createPlotOfAll_e_spectra(folderPaths, folderNames, Crop_X, Crop_Y = False): sns.set_palette(sns.color_palette("Set1", len(folderNames))) sns.set_context("talk") sns.set_style('darkgrid') yLims = [1e50, 0] data = [] plt.figure(figsize = (10,7)) for fp, names in zip(folderPaths, folderNames): fp += 'Electron_Spectrum.txt' try: #Assuming that the first row is currently px d = np.loadtxt(fp) data.append(d) px = d[:,0] Energy_J = (px ** 2) / (2 * 9.11e-31) Energy_eV = Energy_J / 1.6e-19 Energy_MeV = Energy_eV * 1e-6 xlow = nearposn(Energy_MeV, Crop_X[0]) xhigh = nearposn(Energy_MeV, Crop_X[1]) # print xlow, xhigh # xlow = 50; xhigh = 400 intensity = d[:,1] if not Crop_Y: cropI = intensity[xlow:xhigh] if cropI.min() < yLims[0]: yLims[0] = cropI.min() if cropI.max() > yLims[1]: yLims[1] = cropI.max() else: yLims = Crop_Y # print fp if plot_MeV: xAxis = Energy_MeV else: xAxis = Energy_J plt.plot(xAxis, intensity, label = names) plt.xlim([xAxis[xlow],xAxis[xhigh]]) except: print 'Error Reading File' print ' ' + fp if plot_MeV: plt.xlabel('Electron Energy (MeV)') else: plt.xlabel('Electron Energy (J)') plt.ylabel('Intensity (# of electrons)') #============================================================================== # Apply the plotting limits #============================================================================== #plt.xlim([-1e-14, 1e-13]) #plt.yscale('log') # if logPlot: plt.ylim([yLims[1]/1e5, yLims[1]]) plt.yscale('log') else: plt.ylim(yLims) plt.legend() print 'Crop corresponds to: ', [xAxis[xlow],xAxis[xhigh]], ' MeV' print 'Range of inputed data is: ', Energy_MeV[0], Energy_MeV[-1] return data hdrive = '/Volumes/CIDU_passport/2018_Epoch_vega_1/' gdrive = '/Volumes/GoogleDrive/My Drive/' gdrive += '2018_Epoch_vega_1/' #hdrive += '0601_Gaus_for_wavebreak/' #fileSplice = [8,None] #hdrive += '0607_Intensity_Scan/' #fileSplice = [1,-11] #hdrive += '0612_profileScan/' #fileSplice = [2,None] #hdrive = gdrive + '0711_highRes_selfInjection/' #fileSplice = [-4,None] #hdrive = gdrive + '0721_HR_Jump/' #fileSplice = [-4,None] hdrive = hdrive + '1010_SlurmJob/' fileSplice = [10,12] #hdrive = gdrive + '1018_vega1_Jump/' #fileSplice = [2,None] folderPaths, folderNames = listFolders(hdrive) logPlot = False plot_MeV = True #============================================================================== # Search for the set of folders to look at! #============================================================================== starts = '' #starts = '' fins = 'FS' #Index_to_save = [i for i in xrange(len(folderNames)) if folderNames[i].endswith(fins)] Index_to_save = [i for i in xrange(len(folderNames)) if folderNames[i].startswith(starts)] #Index_to_save = [i for i in xrange(len(folderNames)) if folderNames[i].startswith(starts) and folderNames[i].endswith('23')] #Modify the both arrays to just be the ones of interest folderPaths = folderPaths[Index_to_save] folderNames = folderNames[Index_to_save] print folderNames #============================================================================== # Crop the axis to the interesting data #============================================================================== Energy_Crop = [1, 5] # In MeV IntensityCrop = [0, 0.5e8] #============================================================================== # Slice name for number to sort by #============================================================================== Num = [] for f in folderNames: Num.append(float(f[fileSplice[0]:fileSplice[1]])) print Num sort = sorted(zip(Num, folderNames, folderPaths)) folderNames = [x[1] for x in sort] folderPaths = [x[2] for x in sort] print 'Sorted' print folderNames #folderNames = folderNames[:-1] data = createPlotOfAll_e_spectra(folderPaths, folderNames, Energy_Crop, IntensityCrop) plt.savefig(hdrive + 'Electron_spectrum.png') plt.show() #data = data[:4] subplotPerSpectra(data, Energy_Crop) plt.tight_layout() plt.savefig(hdrive + 'Electron_spectrums_in_subplot.png', dpi = 300)
9,005
3,200
print("Lukas Ho, pronouns: he/him")
35
16
# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing import List, Dict # noqa: F401 from swagger_server.models.base_model_ import Model from swagger_server.models.preferences import Preferences # noqa: F401,E501 from swagger_server import util class PeoplePatch(Model): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, email: str=None, name: str=None, preferences: Preferences=None): # noqa: E501 """PeoplePatch - a model defined in Swagger :param email: The email of this PeoplePatch. # noqa: E501 :type email: str :param name: The name of this PeoplePatch. # noqa: E501 :type name: str :param preferences: The preferences of this PeoplePatch. # noqa: E501 :type preferences: Preferences """ self.swagger_types = { 'email': str, 'name': str, 'preferences': Preferences } self.attribute_map = { 'email': 'email', 'name': 'name', 'preferences': 'preferences' } self._email = email self._name = name self._preferences = preferences @classmethod def from_dict(cls, dikt) -> 'PeoplePatch': """Returns the dict as a model :param dikt: A dict. :type: dict :return: The people_patch of this PeoplePatch. # noqa: E501 :rtype: PeoplePatch """ return util.deserialize_model(dikt, cls) @property def email(self) -> str: """Gets the email of this PeoplePatch. :return: The email of this PeoplePatch. :rtype: str """ return self._email @email.setter def email(self, email: str): """Sets the email of this PeoplePatch. :param email: The email of this PeoplePatch. :type email: str """ self._email = email @property def name(self) -> str: """Gets the name of this PeoplePatch. :return: The name of this PeoplePatch. :rtype: str """ return self._name @name.setter def name(self, name: str): """Sets the name of this PeoplePatch. :param name: The name of this PeoplePatch. :type name: str """ self._name = name @property def preferences(self) -> Preferences: """Gets the preferences of this PeoplePatch. :return: The preferences of this PeoplePatch. :rtype: Preferences """ return self._preferences @preferences.setter def preferences(self, preferences: Preferences): """Sets the preferences of this PeoplePatch. :param preferences: The preferences of this PeoplePatch. :type preferences: Preferences """ self._preferences = preferences
2,965
890
import torch import torch.nn as nn import torch.optim as optim import numpy as np import time import logging from sklearn.metrics import roc_auc_score from sklearn.cluster import KMeans from sklearn.manifold import TSNE from src.models.optim.Loss_Functions import DMSADLoss from src.utils.utils import print_progessbar class DMSAD_trainer: """ Trainer for the DMSAD. """ def __init__(self, c, R, eta=1.0, gamma=0.05, n_sphere_init=100, n_epoch=150, lr=1e-4, lr_milestone=(), batch_size=64, weight_decay=1e-6, device='cuda', n_job_dataloader=0, print_batch_progress=False): """ Constructor of the DMSAD trainer. ---------- INPUT |---- c (array like N_sphere x Embed dim) the centers of the hyperspheres. | If None, the centers are initialized using Kmeans. |---- R (1D array) the radii associated with the centers. |---- eta (float) the weight of semi-supervised labels in the loss. |---- gamma (float) the fraction of allowed outlier when setting the | radius of each sphere in the end. |---- n_sphere_init (int) the number of initial hypersphere. |---- n_epoch (int) the number of epoch. |---- lr (float) the learning rate. |---- lr_milestone (tuple) the lr update steps. |---- batch_size (int) the batch_size to use. |---- weight_decay (float) the weight_decay for the Adam optimizer. |---- device (str) the device to work on ('cpu' or 'cuda'). |---- n_job_dataloader (int) number of workers for the dataloader. |---- print_batch_progress (bool) whether to dispay the batch | progress bar. OUTPUT |---- None """ # learning parameters self.n_epoch = n_epoch self.lr = lr self.lr_milestone = lr_milestone self.batch_size = batch_size self.weight_decay = weight_decay self.device = device self.n_job_dataloader = n_job_dataloader self.print_batch_progress = print_batch_progress # DMSAD parameters self.c = torch.tensor(c, device=self.device) if c is not None else None self.R = torch.tensor(R, device=self.device) if R is not None else None self.eta = eta self.gamma = gamma self.n_sphere_init = n_sphere_init # Optimization parameters self.eps = 1e-6 # Results self.train_time = None self.train_loss = None self.eval_auc = None self.eval_time = None self.eval_scores = None def train(self, dataset, net, valid_dataset=None): """ Train the DMSAD network on the provided dataset. ---------- INPUT |---- dataset (torch.utils.data.Dataset) the dataset on which the | network is trained. It must return an image, label, mask | semi-supervized labels and the index. |---- net (nn.Module) The DeepSAD to train. |---- valid_dataset (torch.utils.data.Dataset) the dataset on which | to validate the network at each epoch. Not validated if | not provided. OUTPUT |---- net (nn.Module) The trained DeepSAD. """ logger = logging.getLogger() # make the train dataloader train_loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, \ shuffle=True, num_workers=self.n_job_dataloader) # put net to device net = net.to(self.device) # initialize hypersphere center if self.c is None: logger.info(' Initializing the hypersphere centers.') self.initialize_centers(train_loader, net) logger.info(f' {self.c.shape[0]} centers successfully initialized.') # define loss criterion loss_fn = DMSADLoss(self.eta, eps=self.eps) # define optimizer optimizer = optim.Adam(net.parameters(), lr=self.lr, weight_decay=self.weight_decay) # define scheduler scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestone, gamma=0.1) # Start training logger.info('Start Training the DMSAD.') start_time = time.time() epoch_loss_list = [] n_batch = len(train_loader) for epoch in range(self.n_epoch): net.train() epoch_loss = 0.0 epoch_start_time = time.time() n_k = torch.zeros(self.c.shape[0], device=self.device) for b, data in enumerate(train_loader): # get input and semi-supervized labels input, _, _, semi_label, _ = data # put them to device input = input.to(self.device).float().requires_grad_(True) semi_label = semi_label.to(self.device) # zero the network's gradients optimizer.zero_grad() # optimize by backpropagation _, embed = net(input) loss = loss_fn(embed, self.c, semi_label) loss.backward() optimizer.step() epoch_loss += loss.item() # get the closest sphere and count the number of normal samples per sphere idx = torch.argmin(torch.norm(self.c.unsqueeze(0) - embed.unsqueeze(1), p=2, dim=2), dim=1) for i in idx[semi_label != -1]: n_k[i] += 1 if self.print_batch_progress: print_progessbar(b, len(train_loader), Name='\t\tTrain Batch', Size=40, erase=True) # remove centers with less than gamma fraction of largest hypersphere number of sample self.c = self.c[n_k >= self.gamma * torch.max(n_k)] # validate if required valid_auc = '' if valid_dataset: auc = self.evaluate(net, valid_dataset, return_auc=True, print_to_logger=False, save_tSNE=False) valid_auc = f' Valid AUC {auc:.3%} |' # log the epoch statistics logger.info(f'----| Epoch: {epoch + 1:03}/{self.n_epoch:03} ' f'| Train Time: {time.time() - epoch_start_time:.3f} [s] ' f'| Train Loss: {epoch_loss / n_batch:.6f} ' f'| N sphere {self.c.shape[0]:03} |' + valid_auc) epoch_loss_list.append([epoch+1, epoch_loss/n_batch]) # update scheduler scheduler.step() if epoch + 1 in self.lr_milestone: logger.info(f'---- LR Scheduler : new learning rate {scheduler.get_lr()[0]:g}') # Set the radius of each sphere as 1-gamma quantile of normal samples distances logger.info(f'---- Setting the hyperspheres radii as the {1-self.gamma:.1%} quantiles of normal sample distances.') self.set_radius(train_loader, net) logger.info(f'---- {self.R.shape[0]} radii successufully defined.') # End training self.train_loss = epoch_loss_list self.train_time = time.time() - start_time logger.info(f'---- Finished Training DMSAD in {self.train_time:.3f} [s]') return net def evaluate(self, net, dataset, return_auc=False, print_to_logger=True, save_tSNE=True): """ Evaluate the DSAD network on the provided dataset. ---------- INPUT |---- net (nn.Module) The DMSAD network to validate. |---- dataset (torch.utils.data.Dataset) the dataset on which the | network is evaluated. |---- return_auc (bool) whether to return the computed auc or not. |---- print_to_logger (bool) whether to print in the logger. |---- save_tSNE (bool) whether to save a 2D t-SNE representation of | the embeded data points OUTPUT |---- (auc) (float) the validation auc if required. """ if print_to_logger: logger = logging.getLogger() # make dataloader loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.n_job_dataloader) # put net on device net = net.to(self.device) # Evaluating if print_to_logger: logger.info('Start Evaluating the DMSAD.') start_time = time.time() idx_label_score = [] net.eval() with torch.no_grad(): for b, data in enumerate(loader): # get data on device input, label, _, semi_label, idx = data input = input.to(self.device).float() label = label.to(self.device) semi_label = semi_label.to(self.device) idx = idx.to(self.device) # Embed input and compute anomaly score _, embed = net(input) # find closest sphere score, sphere_idx = torch.min(torch.norm(self.c.unsqueeze(0) - embed.unsqueeze(1), p=2, dim=2), dim=1) # append idx, scores, label and embeding idx_label_score += list(zip(idx.cpu().data.numpy().tolist(), label.cpu().data.numpy().tolist(), score.cpu().data.numpy().tolist(), sphere_idx.cpu().data.numpy().tolist(), embed.cpu().data.numpy().tolist())) if self.print_batch_progress: print_progessbar(b, len(loader), Name='\t\t Evaluation Batch', Size=40, erase=True) # compute AUCs index, label, score, sphere_index, embed = zip(*idx_label_score) label, score = np.array(label), np.array(score) auc = roc_auc_score(label, score) if save_tSNE: embed = np.array(embed) embed = TSNE(n_components=2).fit_transform(embed) idx_label_score = list(zip(index, label.tolist(), score.tolist(), sphere_index, embed.tolist())) self.eval_time = time.time() - start_time self.eval_scores = idx_label_score self.eval_auc = auc if print_to_logger: logger.info(f'Evaluation Time : {self.eval_time}') logger.info(f'Evaluation AUC : {self.eval_auc:.3%}') logger.info('Finished Evaluating the DMSAD.') if return_auc: return auc def initialize_centers(self, loader, net, eps=0.1): """ Initialize the multiple centers using the K-Means algorithm on the embedding of all the normal samples. ---------- INPUT |---- loader (torch.utils.data.Dataloader) the loader of the data. |---- net (nn.Module) the DMSAD network. The output must be a vector | embedding of the input. The network should be an | autoencoder for which the forward pass returns both the | reconstruction and the embedding of the input. |---- eps (float) minimal value for center coordinates, to avoid | center too close to zero. OUTPUT |---- None """ # Get sample embedding repr = [] net.eval() with torch.no_grad(): for b, data in enumerate(loader): # get data input, _, _, semi_label, _ = data input = input.to(self.device).float() semi_label = semi_label.to(self.device) # keep only normal samples input = input[semi_label != -1] # get embdeding of batch _, embed = net(input) repr.append(embed) if self.print_batch_progress: print_progessbar(b, len(loader), Name='\t\tBatch', Size=40, erase=True) repr = torch.cat(repr, dim=0).cpu().numpy() # Apply Kmeans algorithm on embedding kmeans = KMeans(n_clusters=self.n_sphere_init).fit(repr) self.c = torch.tensor(kmeans.cluster_centers_).to(self.device) # check if c_i are epsilon too close to zero to avoid them to be trivialy matched to zero self.c[(torch.abs(self.c) < eps) & (self.c < 0)] = -eps self.c[(torch.abs(self.c) < eps) & (self.c > 0)] = eps def set_radius(self, loader, net): """ compute radius as 1-gamma quatile of normal sample distance to center. Then anomaly score is ||net(x) - c_j||^2 - R_j^2 <--- negative if in, positive if out. ---------- INPUT |---- loader (torch.utils.data.Dataloader) the loader of the data. |---- net (nn.Module) the DMSAD network. The output must be a vector | embedding of the input. The network should be an | autoencoder for which the forward pass returns both the | reconstruction and the embedding of the input. OUTPUT |---- None """ dist_list = [[] for _ in range(self.c.shape[0])] # initialize N_sphere lists net.eval() with torch.no_grad(): for b, data in enumerate(loader): # get data input, _, _, semi_label, _ = data input = input.to(self.device).float() semi_label = semi_label.to(self.device) # keep only normal samples input = input[semi_label != -1] # get embdeding of batch _, embed = net(input) # get the closest sphere and count the number of normal samples per sphere dist, idx = torch.min(torch.norm(self.c.unsqueeze(0) - embed.unsqueeze(1), p=2, dim=2), dim=1) for i, d in zip(idx, dist): dist_list[i].append(d) if self.print_batch_progress: print_progessbar(b, len(loader), Name='\t\tBatch', Size=40, erase=True) # compute the radius as 1-gamma quantile of the normal distances of each spheres self.R = torch.zeros(self.c.shape[0], device=self.device) for i, dist in enumerate(dist_list): dist = torch.stack(dist, dim=0) self.R[i] = torch.kthvalue(dist, k=int((1 - self.gamma) * dist.shape[0]))[0]
14,649
4,296
from __future__ import print_function, unicode_literals from escher import __schema_version__ import escher.server from escher import Builder, get_cache_dir, clear_cache from escher.plots import (_load_resource, local_index, server_index, model_json_for_name, map_json_for_name) from escher.urls import get_url import os import sys from os.path import join import json from pytest import raises, mark try: from urllib.error import URLError except ImportError: from urllib2 import URLError if sys.version < '3': unicode_type = unicode else: unicode_type = str # cache def test_get_cache_dir(): d = get_cache_dir() assert os.path.isdir(d) d = get_cache_dir(name='maps') assert os.path.isdir(d) def test_clear_cache(tmpdir, request): (tmpdir.mkdir('maps').mkdir('Escherichia coli') .join('iJO1366.Central metabolism.json').write('temp')) (tmpdir.mkdir('models').mkdir('Escherichia coli') .join('iJO1366.json').write('temp')) clear_cache(str(tmpdir)) assert os.listdir(str(tmpdir)) == [] def fin(): tmpdir.remove() request.addfinalizer(fin) def test_local_index(tmpdir, request): maps = tmpdir.mkdir('maps') maps.mkdir('Escherichia coli').join('iJO1366.Central metabolism.json').write('temp') # ignore these maps.join('ignore_md.json').write('ignore') tmpdir.mkdir('models').mkdir('Escherichia coli').join('iJO1366.json').write('temp') assert local_index(str(tmpdir)) == { 'maps': [ { 'organism': 'Escherichia coli', 'map_name': 'iJO1366.Central metabolism' } ], 'models': [ { 'organism': 'Escherichia coli', 'model_name': 'iJO1366' } ] } def fin(): tmpdir.remove() request.addfinalizer(fin) # server @mark.web def test_server_index(): index = server_index() map_0 = index['maps'][0] assert 'organism' in map_0 assert 'map_name' in map_0 model_0 = index['models'][0] assert 'organism' in model_0 assert 'model_name' in model_0 # model and maps def test_model_json_for_name(tmpdir): models = tmpdir.mkdir('models') models.mkdir('Escherichia coli').join('iJO1366.json').write('"temp"') json = model_json_for_name('iJO1366', cache_dir=str(tmpdir)) assert json == '"temp"' @mark.web def test_model_json_for_name_web(tmpdir): data = model_json_for_name('iJO1366', cache_dir=str(tmpdir)) assert 'reactions' in data assert 'metabolites' in data def test_map_json_for_name(tmpdir): maps = tmpdir.mkdir('maps') maps.mkdir('Escherichia coli').join('iJO1366.Central metabolism.json').write('"temp"') json = map_json_for_name('iJO1366.Central metabolism', cache_dir=str(tmpdir)) assert json == '"temp"' @mark.web def test_map_json_for_name_web(tmpdir): data = map_json_for_name('iJO1366.Central metabolism', cache_dir=str(tmpdir)) root = get_url('escher_root', protocol='https').rstrip('/') assert json.loads(data)[0]['schema'] == '/'.join([root, 'escher', 'jsonschema', __schema_version__ + '#']) # helper functions def test__load_resource(tmpdir): assert _load_resource('{"r": "val"}', 'name') == '{"r": "val"}' directory = os.path.abspath(os.path.dirname(__file__)) assert _load_resource(join(directory, 'example.json'), 'name').strip() == '{"r": "val"}' with raises(ValueError) as err: p = join(str(tmpdir), 'dummy') with open(p, 'w') as f: f.write('dummy') _load_resource(p, 'name') assert 'not a valid json file' in err.value @mark.web def test__load_resource_web(tmpdir): url = '/'.join([get_url('map_download', protocol='https'), 'Escherichia%20coli/iJO1366.Central%20metabolism.json']) _ = json.loads(_load_resource(url, 'name')) def test_Builder(tmpdir): b = Builder(map_json='{"r": "val"}', model_json='{"r": "val"}') # Cannot load dev/local version without an explicit css string property. # TODO include a test where these do not raise. with raises(Exception): b.display_in_notebook(js_source='dev') with raises(Exception): b.display_in_notebook(js_source='local') # ok with embedded_css arg b = Builder(map_json='{"r": "val"}', model_json='{"r": "val"}', embedded_css='') b.display_in_notebook(js_source='dev') b.save_html(join(str(tmpdir), 'Builder.html'), js_source='dev') # test options with raises(Exception): b._get_html(js_source='devv') with raises(Exception): b._get_html(menu='') with raises(Exception): b._get_html(scroll_behavior='asdf') b._get_html(js_source='local') b._get_html(menu='all') b._get_html(scroll_behavior='zoom') @mark.web def test_Builder_download(): # download b = Builder(map_name='iJO1366.Central metabolism', model_name='iJO1366') assert b.loaded_map_json is not None assert b.loaded_model_json is not None b._get_html(js_source='web') b.display_in_notebook(height=200) # data b = Builder(map_name='iJO1366.Central metabolism', model_name='iJO1366', reaction_data=[{'GAPD': 123}, {'GAPD': 123}]) b = Builder(map_name='iJO1366.Central metabolism', model_name='iJO1366', metabolite_data=[{'nadh_c': 123}, {'nadh_c': 123}]) b = Builder(map_name='iJO1366.Central metabolism', model_name='iJO1366', gene_data=[{'gapA': 123}, {'adhE': 123}]) assert type(b.the_id) is unicode_type assert len(b.the_id) == 10 def test_Builder_options(): b = Builder(embedded_css='') b.set_metabolite_no_data_color('white') assert b.metabolite_no_data_color=='white' html = b._get_html(js_source='local') assert 'metabolite_no_data_color: "white"' in html def test__draw_js(): b = Builder(map_json='"useless_map"', model_json='"useless_model"', embedded_css='') def look_for_string(st, substring): """Look for the string in the substring. This solves a bug in py.test for these cases""" try: found = st.find(substring) assert found > -1 except AssertionError: raise AssertionError('Could not find\n\n%s\n\nin\n\n%s' % (substring, st)) # no static parse, dev ijs = b._initialize_javascript('id', 'local') js = b._draw_js('id', True, 'all', True, True, True, 'pan', True, None) look_for_string(ijs, 'var map_data_id = "useless_map";') look_for_string(ijs, 'var model_data_id = "useless_model";') look_for_string(js, 'Builder(map_data_id, model_data_id, embedded_css_id, d3.select("#id"), options);') # static parse, not dev ijs = b._initialize_javascript('id', 'local') static_index = '{"my": ["useless", "index"]}' js = b._draw_js('id', True, 'all', True, False, True, 'pan', True, static_index) look_for_string(ijs, 'var map_data_id = "useless_map";') look_for_string(ijs, 'var model_data_id = "useless_model";') look_for_string(js, 'escher.static.load_map_model_from_url("%s/maps/", "%s/models/",' % (__schema_version__, __schema_version__)) look_for_string(js, static_index) look_for_string(js, 'options, function(map_data_id, model_data_id, options) {') look_for_string(js, 'escher.Builder(map_data_id, model_data_id, embedded_css_id, d3.select("#id"), options);')
7,600
2,740
from setuptools import find_packages, setup setup( name="casualty", version="0.1.9", packages=find_packages(exclude=["tests"]), install_requires=[ "structlog==18.2.0", "wrapt==1.10.11", "pre-commit-hooks==1.4.0", "mock==2.0.0", "pytest==3.8.2", "pytest-mock==1.10.0", "pytest-cov" ], url="", python_requires=">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", license="MIT", author="Sohit Kumar", author_email="sumitk002@gmail.com", test_suite="tests", description="A python library to generate co-relation id and bind it to headers in outgoing request", )
656
259
import time from ..utils.log import log, INFO, ERROR, PASS from ..utils.isaac import submit_login_form, assert_logged_in from ..utils.i_selenium import assert_tab, image_div from ..utils.i_selenium import wait_for_xpath_element, wait_for_invisible_xpath from ..tests import TestWithDependency from selenium.common.exceptions import TimeoutException __all__ = ["user_progress_access"] ##### # Test : Access Users Progress Page ##### @TestWithDependency("USER_PROGRESS_ACCESS", ["LOGIN", "LOGOUT"]) def user_progress_access(driver, Users, ISAAC_WEB, WAIT_DUR, **kwargs): """Test access to user progress page is suitably restricted. - 'driver' should be a Selenium WebDriver. - 'Users' must be a TestUsers object. - 'ISAAC_WEB' is the string URL of the Isaac website to be tested. - 'WAIT_DUR' is the time in seconds to wait for JavaScript to run/load. """ assert_tab(driver, ISAAC_WEB) driver.get(ISAAC_WEB + "/logout") log(INFO, "Logging out any logged in user.") time.sleep(WAIT_DUR) progress_access_fail = False try: log(INFO, "Test if logged out user can access '/progress/1'.") driver.get(ISAAC_WEB + "/progress/1") time.sleep(WAIT_DUR) assert (("/login?target=%2Fprogress%2F1" in driver.current_url) or ("/login?target=~2Fprogress~2F1" in driver.current_url)) log(INFO, "Logged out users can't access progress pages.") time.sleep(WAIT_DUR) driver.get(ISAAC_WEB + "/logout") log(INFO, "Logging out to start from same initial page each time.") time.sleep(WAIT_DUR) except AssertionError: progress_access_fail = True image_div(driver, "ERROR_unexpected_admin_access") log(ERROR, "Logged out user may have accessed '/progress/1'; see 'ERROR_unexpected_admin_access.png'!") access_cases = [("Student", Users.Student), ("Teacher", Users.Teacher), ("Content Editor", Users.Editor), ("Event Manager", Users.Event)] for i_type, user in access_cases: log(INFO, "Test if '%s' users can access another users progress page." % i_type) try: driver.get(ISAAC_WEB + "/progress/1") time.sleep(WAIT_DUR) submit_login_form(driver, user=user, wait_dur=WAIT_DUR) time.sleep(WAIT_DUR) assert_logged_in(driver, user, wait_dur=WAIT_DUR) log(INFO, "Try loading progress page; no errors will be shown but have to wait to see if data loads.") wait_for_invisible_xpath(driver, "//div[@loading-overlay]", 60) except AssertionError: log(ERROR, "Couldn't log user in to test '/progress/1' access!") return False except TimeoutException: log(ERROR, "'%s' users given endless loading screen; can't tell if can access page. Can't continue!" % i_type) return False try: title = str(wait_for_xpath_element(driver, "(//h1)[1]").text) title = title.strip() assert title == "Progress for user:", "Title is '%s', expected 'Progress for user:' without a name!" log(INFO, "'%s' users given blank info as expected; can't access page." % i_type) except TimeoutException: log(ERROR, "No title found on page after loading finished! Can't continue!") return False except AssertionError: log(ERROR, "User of type '%s' accessed another users progress page!" % i_type) progress_access_fail = True driver.get(ISAAC_WEB + "/logout") log(INFO, "Logged out '%s' user." % i_type) time.sleep(WAIT_DUR) access_cases = [("Admin", Users.Admin)] for i_type, user in access_cases: log(INFO, "Test if '%s' users can access another users progress page." % i_type) try: driver.get(ISAAC_WEB + "/progress/1") time.sleep(WAIT_DUR) submit_login_form(driver, user=user, wait_dur=WAIT_DUR) time.sleep(WAIT_DUR) assert_logged_in(driver, user, wait_dur=WAIT_DUR) title = str(wait_for_xpath_element(driver, "(//h1)[1]").text) title = title.strip() assert len(title) > len("Progress for user:"), "Title is '%s', expected 'Progress for user: [name]'!" wait_for_xpath_element(driver, "//div[@d3-plot]//ul[@class='d3-plot-key']") time.sleep(WAIT_DUR) log(INFO, "'%s' users can access '/progress/1' as expected." % i_type) except TimeoutException: progress_access_fail = True image_div(driver, "ERROR_no_admin_access") log(ERROR, "'%s' user can't access '/progress/1'; see 'ERROR_no_admin_access.png'!" % i_type) except AssertionError, e: progress_access_fail = True image_div(driver, "ERROR_no_admin_access") log(ERROR, "Error accessing other user progress: %s See 'ERROR_no_admin_access.png'!" % e.message) driver.get(ISAAC_WEB + "/logout") log(INFO, "Logged out '%s' user." % i_type) time.sleep(3) if not progress_access_fail: log(PASS, "Access to another users progress page restricted appropriately.") return True else: log(ERROR, "Access not appropriately restricted! Fail!") return False
5,321
1,646
import re import sqlite3 conn = sqlite3.connect('loc.db') c = conn.cursor() c.execute('CREATE TABLE IF NOT EXISTS location(loc text PRIMARY KEY, code text)') conn.commit() f = open('loc_code.txt') for d in f.readlines(): data = re.sub(r'\s{2}', '|', d.strip()).split('|') print data[1].strip(), data[0] c.execute('INSERT INTO location VALUES ("%s", "%s")'%(data[1].strip(), data[0])) conn.commit() f.close()
423
155
# This file was automatically generated by SWIG (http://www.swig.org). # Version 3.0.12 # # Do not make changes to this file unless you know what you are doing--modify # the SWIG interface file instead. from sys import version_info as _swig_python_version_info if _swig_python_version_info >= (2, 7, 0): def swig_import_helper(): import importlib pkg = __name__.rpartition('.')[0] mname = '.'.join((pkg, '_valkka_nv')).lstrip('.') try: return importlib.import_module(mname) except ImportError: return importlib.import_module('_valkka_nv') _valkka_nv = swig_import_helper() del swig_import_helper elif _swig_python_version_info >= (2, 6, 0): def swig_import_helper(): from os.path import dirname import imp fp = None try: fp, pathname, description = imp.find_module('_valkka_nv', [dirname(__file__)]) except ImportError: import _valkka_nv return _valkka_nv try: _mod = imp.load_module('_valkka_nv', fp, pathname, description) finally: if fp is not None: fp.close() return _mod _valkka_nv = swig_import_helper() del swig_import_helper else: import _valkka_nv del _swig_python_version_info try: _swig_property = property except NameError: pass # Python < 2.2 doesn't have 'property'. try: import builtins as __builtin__ except ImportError: import __builtin__ def _swig_setattr_nondynamic(self, class_type, name, value, static=1): if (name == "thisown"): return self.this.own(value) if (name == "this"): if type(value).__name__ == 'SwigPyObject': self.__dict__[name] = value return method = class_type.__swig_setmethods__.get(name, None) if method: return method(self, value) if (not static): if _newclass: object.__setattr__(self, name, value) else: self.__dict__[name] = value else: raise AttributeError("You cannot add attributes to %s" % self) def _swig_setattr(self, class_type, name, value): return _swig_setattr_nondynamic(self, class_type, name, value, 0) def _swig_getattr(self, class_type, name): if (name == "thisown"): return self.this.own() method = class_type.__swig_getmethods__.get(name, None) if method: return method(self) raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name)) def _swig_repr(self): try: strthis = "proxy of " + self.this.__repr__() except __builtin__.Exception: strthis = "" return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) try: _object = object _newclass = 1 except __builtin__.Exception: class _object: pass _newclass = 0 from valkka import core class FrameFilter(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, FrameFilter, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, FrameFilter, name) def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined") __repr__ = _swig_repr __swig_destroy__ = _valkka_nv.delete_FrameFilter __del__ = lambda self: None FrameFilter_swigregister = _valkka_nv.FrameFilter_swigregister FrameFilter_swigregister(FrameFilter) class DummyFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, DummyFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, DummyFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, verbose=True, next=None): this = _valkka_nv.new_DummyFrameFilter(name, verbose, next) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_DummyFrameFilter __del__ = lambda self: None DummyFrameFilter_swigregister = _valkka_nv.DummyFrameFilter_swigregister DummyFrameFilter_swigregister(DummyFrameFilter) class InfoFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, InfoFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, InfoFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, next=None): this = _valkka_nv.new_InfoFrameFilter(name, next) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_InfoFrameFilter __del__ = lambda self: None InfoFrameFilter_swigregister = _valkka_nv.InfoFrameFilter_swigregister InfoFrameFilter_swigregister(InfoFrameFilter) class BriefInfoFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, BriefInfoFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, BriefInfoFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, next=None): this = _valkka_nv.new_BriefInfoFrameFilter(name, next) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_BriefInfoFrameFilter __del__ = lambda self: None BriefInfoFrameFilter_swigregister = _valkka_nv.BriefInfoFrameFilter_swigregister BriefInfoFrameFilter_swigregister(BriefInfoFrameFilter) class ThreadSafeFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, ThreadSafeFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, ThreadSafeFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, next=None): this = _valkka_nv.new_ThreadSafeFrameFilter(name, next) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_ThreadSafeFrameFilter __del__ = lambda self: None ThreadSafeFrameFilter_swigregister = _valkka_nv.ThreadSafeFrameFilter_swigregister ThreadSafeFrameFilter_swigregister(ThreadSafeFrameFilter) class ForkFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, ForkFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, ForkFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, next=None, next2=None): this = _valkka_nv.new_ForkFrameFilter(name, next, next2) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_ForkFrameFilter __del__ = lambda self: None ForkFrameFilter_swigregister = _valkka_nv.ForkFrameFilter_swigregister ForkFrameFilter_swigregister(ForkFrameFilter) class ForkFrameFilter3(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, ForkFrameFilter3, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, ForkFrameFilter3, name) __repr__ = _swig_repr def __init__(self, name, next=None, next2=None, next3=None): this = _valkka_nv.new_ForkFrameFilter3(name, next, next2, next3) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_ForkFrameFilter3 __del__ = lambda self: None ForkFrameFilter3_swigregister = _valkka_nv.ForkFrameFilter3_swigregister ForkFrameFilter3_swigregister(ForkFrameFilter3) class ForkFrameFilterN(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, ForkFrameFilterN, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, ForkFrameFilterN, name) __repr__ = _swig_repr def __init__(self, name): this = _valkka_nv.new_ForkFrameFilterN(name) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_ForkFrameFilterN __del__ = lambda self: None def connect(self, tag, filter): return _valkka_nv.ForkFrameFilterN_connect(self, tag, filter) def disconnect(self, tag): return _valkka_nv.ForkFrameFilterN_disconnect(self, tag) ForkFrameFilterN_swigregister = _valkka_nv.ForkFrameFilterN_swigregister ForkFrameFilterN_swigregister(ForkFrameFilterN) class SlotFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, SlotFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, SlotFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, n_slot, next=None): this = _valkka_nv.new_SlotFrameFilter(name, n_slot, next) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_SlotFrameFilter __del__ = lambda self: None SlotFrameFilter_swigregister = _valkka_nv.SlotFrameFilter_swigregister SlotFrameFilter_swigregister(SlotFrameFilter) class PassSlotFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, PassSlotFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, PassSlotFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, n_slot, next=None): this = _valkka_nv.new_PassSlotFrameFilter(name, n_slot, next) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_PassSlotFrameFilter __del__ = lambda self: None PassSlotFrameFilter_swigregister = _valkka_nv.PassSlotFrameFilter_swigregister PassSlotFrameFilter_swigregister(PassSlotFrameFilter) class DumpFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, DumpFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, DumpFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, next=None): this = _valkka_nv.new_DumpFrameFilter(name, next) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_DumpFrameFilter __del__ = lambda self: None DumpFrameFilter_swigregister = _valkka_nv.DumpFrameFilter_swigregister DumpFrameFilter_swigregister(DumpFrameFilter) class CountFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, CountFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, CountFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, next=None): this = _valkka_nv.new_CountFrameFilter(name, next) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_CountFrameFilter __del__ = lambda self: None CountFrameFilter_swigregister = _valkka_nv.CountFrameFilter_swigregister CountFrameFilter_swigregister(CountFrameFilter) class TimestampFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, TimestampFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, TimestampFrameFilter, name) __repr__ = _swig_repr def __init__(self, *args): this = _valkka_nv.new_TimestampFrameFilter(*args) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_TimestampFrameFilter __del__ = lambda self: None TimestampFrameFilter_swigregister = _valkka_nv.TimestampFrameFilter_swigregister TimestampFrameFilter_swigregister(TimestampFrameFilter) class TimestampFrameFilter2(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, TimestampFrameFilter2, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, TimestampFrameFilter2, name) __repr__ = _swig_repr def __init__(self, *args): this = _valkka_nv.new_TimestampFrameFilter2(*args) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_TimestampFrameFilter2 __del__ = lambda self: None TimestampFrameFilter2_swigregister = _valkka_nv.TimestampFrameFilter2_swigregister TimestampFrameFilter2_swigregister(TimestampFrameFilter2) class DummyTimestampFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, DummyTimestampFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, DummyTimestampFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, next=None): this = _valkka_nv.new_DummyTimestampFrameFilter(name, next) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_DummyTimestampFrameFilter __del__ = lambda self: None DummyTimestampFrameFilter_swigregister = _valkka_nv.DummyTimestampFrameFilter_swigregister DummyTimestampFrameFilter_swigregister(DummyTimestampFrameFilter) class RepeatH264ParsFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, RepeatH264ParsFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, RepeatH264ParsFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, next=None): this = _valkka_nv.new_RepeatH264ParsFrameFilter(name, next) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_RepeatH264ParsFrameFilter __del__ = lambda self: None RepeatH264ParsFrameFilter_swigregister = _valkka_nv.RepeatH264ParsFrameFilter_swigregister RepeatH264ParsFrameFilter_swigregister(RepeatH264ParsFrameFilter) class GateFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, GateFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, GateFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, next=None): this = _valkka_nv.new_GateFrameFilter(name, next) try: self.this.append(this) except __builtin__.Exception: self.this = this def set(self): return _valkka_nv.GateFrameFilter_set(self) def unSet(self): return _valkka_nv.GateFrameFilter_unSet(self) def passConfigFrames(self): return _valkka_nv.GateFrameFilter_passConfigFrames(self) def noConfigFrames(self): return _valkka_nv.GateFrameFilter_noConfigFrames(self) __swig_destroy__ = _valkka_nv.delete_GateFrameFilter __del__ = lambda self: None GateFrameFilter_swigregister = _valkka_nv.GateFrameFilter_swigregister GateFrameFilter_swigregister(GateFrameFilter) class SwitchFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, SwitchFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, SwitchFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, next1=None, next2=None): this = _valkka_nv.new_SwitchFrameFilter(name, next1, next2) try: self.this.append(this) except __builtin__.Exception: self.this = this def set1(self): return _valkka_nv.SwitchFrameFilter_set1(self) def set2(self): return _valkka_nv.SwitchFrameFilter_set2(self) __swig_destroy__ = _valkka_nv.delete_SwitchFrameFilter __del__ = lambda self: None SwitchFrameFilter_swigregister = _valkka_nv.SwitchFrameFilter_swigregister SwitchFrameFilter_swigregister(SwitchFrameFilter) class CachingGateFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, CachingGateFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, CachingGateFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, next=None): this = _valkka_nv.new_CachingGateFrameFilter(name, next) try: self.this.append(this) except __builtin__.Exception: self.this = this def set(self): return _valkka_nv.CachingGateFrameFilter_set(self) def unSet(self): return _valkka_nv.CachingGateFrameFilter_unSet(self) __swig_destroy__ = _valkka_nv.delete_CachingGateFrameFilter __del__ = lambda self: None CachingGateFrameFilter_swigregister = _valkka_nv.CachingGateFrameFilter_swigregister CachingGateFrameFilter_swigregister(CachingGateFrameFilter) class SetSlotFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, SetSlotFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, SetSlotFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, next=None): this = _valkka_nv.new_SetSlotFrameFilter(name, next) try: self.this.append(this) except __builtin__.Exception: self.this = this def setSlot(self, n=0): return _valkka_nv.SetSlotFrameFilter_setSlot(self, n) __swig_destroy__ = _valkka_nv.delete_SetSlotFrameFilter __del__ = lambda self: None SetSlotFrameFilter_swigregister = _valkka_nv.SetSlotFrameFilter_swigregister SetSlotFrameFilter_swigregister(SetSlotFrameFilter) class TimeIntervalFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, TimeIntervalFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, TimeIntervalFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, mstimedelta, next=None): this = _valkka_nv.new_TimeIntervalFrameFilter(name, mstimedelta, next) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_TimeIntervalFrameFilter __del__ = lambda self: None TimeIntervalFrameFilter_swigregister = _valkka_nv.TimeIntervalFrameFilter_swigregister TimeIntervalFrameFilter_swigregister(TimeIntervalFrameFilter) class FifoFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, FifoFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, FifoFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, framefifo): this = _valkka_nv.new_FifoFrameFilter(name, framefifo) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_FifoFrameFilter __del__ = lambda self: None FifoFrameFilter_swigregister = _valkka_nv.FifoFrameFilter_swigregister FifoFrameFilter_swigregister(FifoFrameFilter) class BlockingFifoFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, BlockingFifoFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, BlockingFifoFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, framefifo): this = _valkka_nv.new_BlockingFifoFrameFilter(name, framefifo) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_BlockingFifoFrameFilter __del__ = lambda self: None BlockingFifoFrameFilter_swigregister = _valkka_nv.BlockingFifoFrameFilter_swigregister BlockingFifoFrameFilter_swigregister(BlockingFifoFrameFilter) class SwScaleFrameFilter(FrameFilter): __swig_setmethods__ = {} for _s in [FrameFilter]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, SwScaleFrameFilter, name, value) __swig_getmethods__ = {} for _s in [FrameFilter]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, SwScaleFrameFilter, name) __repr__ = _swig_repr def __init__(self, name, target_width, target_height, next=None): this = _valkka_nv.new_SwScaleFrameFilter(name, target_width, target_height, next) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_SwScaleFrameFilter __del__ = lambda self: None SwScaleFrameFilter_swigregister = _valkka_nv.SwScaleFrameFilter_swigregister SwScaleFrameFilter_swigregister(SwScaleFrameFilter) class Thread(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, Thread, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, Thread, name) def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined") __repr__ = _swig_repr __swig_destroy__ = _valkka_nv.delete_Thread __del__ = lambda self: None def setAffinity(self, i): return _valkka_nv.Thread_setAffinity(self, i) def startCall(self): return _valkka_nv.Thread_startCall(self) def stopCall(self): return _valkka_nv.Thread_stopCall(self) def requestStopCall(self): return _valkka_nv.Thread_requestStopCall(self) def waitStopCall(self): return _valkka_nv.Thread_waitStopCall(self) def waitReady(self): return _valkka_nv.Thread_waitReady(self) Thread_swigregister = _valkka_nv.Thread_swigregister Thread_swigregister(Thread) class FrameFifoContext(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, FrameFifoContext, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, FrameFifoContext, name) __repr__ = _swig_repr def __init__(self, *args): this = _valkka_nv.new_FrameFifoContext(*args) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_setmethods__["n_basic"] = _valkka_nv.FrameFifoContext_n_basic_set __swig_getmethods__["n_basic"] = _valkka_nv.FrameFifoContext_n_basic_get if _newclass: n_basic = _swig_property(_valkka_nv.FrameFifoContext_n_basic_get, _valkka_nv.FrameFifoContext_n_basic_set) __swig_setmethods__["n_avpkt"] = _valkka_nv.FrameFifoContext_n_avpkt_set __swig_getmethods__["n_avpkt"] = _valkka_nv.FrameFifoContext_n_avpkt_get if _newclass: n_avpkt = _swig_property(_valkka_nv.FrameFifoContext_n_avpkt_get, _valkka_nv.FrameFifoContext_n_avpkt_set) __swig_setmethods__["n_avframe"] = _valkka_nv.FrameFifoContext_n_avframe_set __swig_getmethods__["n_avframe"] = _valkka_nv.FrameFifoContext_n_avframe_get if _newclass: n_avframe = _swig_property(_valkka_nv.FrameFifoContext_n_avframe_get, _valkka_nv.FrameFifoContext_n_avframe_set) __swig_setmethods__["n_yuvpbo"] = _valkka_nv.FrameFifoContext_n_yuvpbo_set __swig_getmethods__["n_yuvpbo"] = _valkka_nv.FrameFifoContext_n_yuvpbo_get if _newclass: n_yuvpbo = _swig_property(_valkka_nv.FrameFifoContext_n_yuvpbo_get, _valkka_nv.FrameFifoContext_n_yuvpbo_set) __swig_setmethods__["n_setup"] = _valkka_nv.FrameFifoContext_n_setup_set __swig_getmethods__["n_setup"] = _valkka_nv.FrameFifoContext_n_setup_get if _newclass: n_setup = _swig_property(_valkka_nv.FrameFifoContext_n_setup_get, _valkka_nv.FrameFifoContext_n_setup_set) __swig_setmethods__["n_signal"] = _valkka_nv.FrameFifoContext_n_signal_set __swig_getmethods__["n_signal"] = _valkka_nv.FrameFifoContext_n_signal_get if _newclass: n_signal = _swig_property(_valkka_nv.FrameFifoContext_n_signal_get, _valkka_nv.FrameFifoContext_n_signal_set) __swig_setmethods__["n_marker"] = _valkka_nv.FrameFifoContext_n_marker_set __swig_getmethods__["n_marker"] = _valkka_nv.FrameFifoContext_n_marker_get if _newclass: n_marker = _swig_property(_valkka_nv.FrameFifoContext_n_marker_get, _valkka_nv.FrameFifoContext_n_marker_set) __swig_setmethods__["flush_when_full"] = _valkka_nv.FrameFifoContext_flush_when_full_set __swig_getmethods__["flush_when_full"] = _valkka_nv.FrameFifoContext_flush_when_full_get if _newclass: flush_when_full = _swig_property(_valkka_nv.FrameFifoContext_flush_when_full_get, _valkka_nv.FrameFifoContext_flush_when_full_set) __swig_destroy__ = _valkka_nv.delete_FrameFifoContext __del__ = lambda self: None FrameFifoContext_swigregister = _valkka_nv.FrameFifoContext_swigregister FrameFifoContext_swigregister(FrameFifoContext) class DecoderThread(Thread): __swig_setmethods__ = {} for _s in [Thread]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, DecoderThread, name, value) __swig_getmethods__ = {} for _s in [Thread]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, DecoderThread, name) __repr__ = _swig_repr def __init__(self, *args): this = _valkka_nv.new_DecoderThread(*args) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_DecoderThread __del__ = lambda self: None def setTimeCorrection(self, val): return _valkka_nv.DecoderThread_setTimeCorrection(self, val) def getFrameFilter(self): return _valkka_nv.DecoderThread_getFrameFilter(self) def getBlockingFrameFilter(self): return _valkka_nv.DecoderThread_getBlockingFrameFilter(self) def setTimeTolerance(self, mstol): return _valkka_nv.DecoderThread_setTimeTolerance(self, mstol) def setNumberOfThreads(self, n_threads): return _valkka_nv.DecoderThread_setNumberOfThreads(self, n_threads) def decodingOnCall(self): return _valkka_nv.DecoderThread_decodingOnCall(self) def decodingOffCall(self): return _valkka_nv.DecoderThread_decodingOffCall(self) def requestStopCall(self): return _valkka_nv.DecoderThread_requestStopCall(self) DecoderThread_swigregister = _valkka_nv.DecoderThread_swigregister DecoderThread_swigregister(DecoderThread) def NVcuInit(): return _valkka_nv.NVcuInit() NVcuInit = _valkka_nv.NVcuInit def NVgetDevices(): return _valkka_nv.NVgetDevices() NVgetDevices = _valkka_nv.NVgetDevices class NVThread(DecoderThread): __swig_setmethods__ = {} for _s in [DecoderThread]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, NVThread, name, value) __swig_getmethods__ = {} for _s in [DecoderThread]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, NVThread, name) __repr__ = _swig_repr def __init__(self, *args): this = _valkka_nv.new_NVThread(*args) try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _valkka_nv.delete_NVThread __del__ = lambda self: None NVThread_swigregister = _valkka_nv.NVThread_swigregister NVThread_swigregister(NVThread) # This file is compatible with both classic and new-style classes.
33,479
11,818