hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
706f77988f356002f78f82f354138db4b26d737c
2,989
py
Python
Section_16 _Orientacao_a_objeto/execicios/ex02.py
thiagofreitascarneiro/Python_OOP
037621e334ec7159fe0da937db8418eba6321bdd
[ "MIT" ]
null
null
null
Section_16 _Orientacao_a_objeto/execicios/ex02.py
thiagofreitascarneiro/Python_OOP
037621e334ec7159fe0da937db8418eba6321bdd
[ "MIT" ]
null
null
null
Section_16 _Orientacao_a_objeto/execicios/ex02.py
thiagofreitascarneiro/Python_OOP
037621e334ec7159fe0da937db8418eba6321bdd
[ "MIT" ]
null
null
null
''' Crie uma classe Agenda que pode armazenar 10 pessoas e seja capaz de realizar as seguintes operações: * void armazenaPessoa(String nome, int idade, float altura); * void removePessoa(String nome); * int buscaPessoa(String nome); // informa em que posição da agenda está a pessoa * void imprimeAgenda(); // imprime os dados de todas as pessoas da agenda * int buscaPessoa(String nome); // imprime os dados da pessoa que está na posição 'i' da agenda. ''' class Pessoa: def __init__(self, nome, idade, altura): self.__nome = nome self.__idade = idade self.__altura = altura class Agenda: agenda = [] def armazena_pessoa(self, pessoa): self.agenda.append(pessoa) def imprime_agenda(self): print('*** IMPRIMINDO OS DADOS DA AGENDA ***') for i in self.agenda: print(f'Nome: {i._Pessoa__nome}| Idade: {i._Pessoa__idade} |altura: {i._Pessoa__altura}|') print() def imprime_pessoa(self, index): for p, i in enumerate(self.agenda): if p == index: print('*** IMPRIMINDO OS DADOS DA PESSOA ***') print(f'Dados da pessoa na posição {index}: Nome: {i._Pessoa__nome}| Idade: {i._Pessoa__idade} |altura: {i._Pessoa__altura}|') print() def busca_pessoa(self, nome): for p, i in enumerate(self.agenda): if nome == i._Pessoa__nome: print('*** INFORMANDO A POSIÇÃO DA AGENDA. ***') print(f'A posição da Agenda que a/o {nome} se encontra é na {p} posição.') elif nome != i._Pessoa__nome and p == len(self.agenda) - 1: print(f'O {nome} não existe na Agenda.') print() def remover_pessoa(self, nome): print('*** REMOVENDO A PESSOA DA AGENDA. ***') for i, n in enumerate(self.agenda): if nome == n._Pessoa__nome: print(f'Removendo o contato {n._Pessoa__nome}') del self.agenda[i] for i in self.agenda: print(f'Nome: {i._Pessoa__nome}| Idade: {i._Pessoa__idade} |altura: {i._Pessoa__altura}|') print() # instancia do objeto para Pessoa user1 = Pessoa('Bob Jack', 32, 1.85) user2 = Pessoa('Billy Joe', 39, 1.89) user3 = Pessoa('Ayn Rand', 69, 1.67) user4 = Pessoa('Thomas Sowell', 85, 1.99) user5 = Pessoa('Hermione Granger', 29, 1.65) # instância do objeto para Agenda agenda = Agenda() # Armazenando os dados Agenda.armazena_pessoa(agenda, user1) Agenda.armazena_pessoa(agenda, user2) Agenda.armazena_pessoa(agenda, user3) Agenda.armazena_pessoa(agenda, user4) Agenda.armazena_pessoa(agenda, user5) # testando os comandos Agenda.imprime_agenda(agenda) Agenda.remover_pessoa(agenda, 'Bob Jack') #Removendo o contato Agenda.imprime_pessoa(agenda, 2) #Imprimindo Agenda pela posição. Agenda.busca_pessoa(agenda, 'Hermione Granger') #nome que existe Agenda.busca_pessoa(agenda, 'Babu Rangel') #nome não existe na lista
27.675926
142
0.641352
404
2,989
4.586634
0.272277
0.041554
0.029682
0.040475
0.190502
0.164598
0.147868
0.147868
0.117647
0.117647
0
0.017188
0.240883
2,989
107
143
27.934579
0.799471
0.222817
0
0.188679
0
0.056604
0.281727
0
0
0
0
0
0
1
0.113208
false
0
0
0
0.169811
0.264151
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
706f947075d48d6e639e35f385287b783e8a5945
3,148
py
Python
get_feature.py
1895-art/stock-price-predict
951a632cd397e969229d793e0c23f0575d154240
[ "MIT" ]
95
2018-07-15T10:04:27.000Z
2022-03-24T11:49:18.000Z
get_feature.py
1895-art/stock-price-predict
951a632cd397e969229d793e0c23f0575d154240
[ "MIT" ]
3
2019-01-18T08:09:57.000Z
2020-01-07T13:19:32.000Z
get_feature.py
kaka-lin/stock-price-predict
951a632cd397e969229d793e0c23f0575d154240
[ "MIT" ]
27
2018-08-07T05:17:05.000Z
2021-06-20T01:53:38.000Z
import getopt, sys, os import csv import pandas as pd import locale from locale import atof locale.setlocale(locale.LC_NUMERIC, '') def main(): try: opts, args = getopt.getopt(sys.argv[1:], "ho:v:f:", ["help", "output=", "filepath"]) except getopt.GetoptError as err: usage() sys.exit(2) output = None verbose = False filepath = os.getcwd() for o, a in opts: if o == "-v": verbose = True elif o in ("-h", "--help"): usage() sys.exit() elif o in ("-o", "--output"): output = a elif o in ("-f", "--filepath"): filepath = a else: assert False, "unhandled option" return filepath def usage(): print ("=======================\n"\ "please input filepath\n"\ "ex: python get_feature.py -f ./data/20180427 \n"\ "=======================") def get_feature_data(filepath, encode=None, **argv): """ input: filepath encode argv: Code,Date,CB,Open,High,Low,Close,Volumn True or False """ params = [] for param in argv: params = [i for i, t in argv.items() if t == True] # abs filepath filepath = os.path.abspath(filepath) get_date = os.path.basename(filepath) tetfp_file = os.path.join(filepath, "tetfp.csv") save_process_path = os.path.join(os.path.abspath("./data/" + get_date + "_process")) with open(tetfp_file, encoding=encode) as file: rows = csv.reader(file, delimiter=",") data = [] for row in rows: new_index = [] for index in row: if index: index = index.strip() new_index.append(index) data.append(new_index) df = pd.DataFrame(data=data[1:], columns=change_columns(*data[0])) df = df.dropna() df["Volumn"] = pd.to_numeric(df["Volumn"].replace('\.','', regex=True) .replace(',','', regex=True) .astype(int)) types = set(df.loc[:,"Code"]) if not os.path.exists(save_process_path): os.mkdir(save_process_path) for t in types: str_t = str(int(t)) t_types = df.loc[df['Code'] == t][params] t_types.to_csv(os.path.join(save_process_path, get_date + "_" + str_t + ".csv"), index=False) def change_columns(*header): """ replace header to English """ column_dict = { "代碼":"Code", "日期":"Date", "中文簡稱":"CB", "開盤價(元)":"Open", "最高價(元)":"High", "最低價(元)":"Low", "收盤價(元)":"Close", "成交張數(張)": "Volumn" } return [column_dict[h] for h in header] if __name__ == "__main__": """ choose data output column """ choose = { "Code":True, "Date":True, "CB": False, "Open": True, "High": True, "Low": True, "Close": True, "Volumn": True } filepath = main() get_feature_data(filepath, "big5", **choose)
26.233333
101
0.495553
371
3,148
4.091644
0.350404
0.027668
0.039526
0.028986
0
0
0
0
0
0
0
0.006235
0.337675
3,148
119
102
26.453782
0.721823
0.043202
0
0.022727
0
0
0.124654
0.016621
0
0
0
0
0.011364
1
0.045455
false
0
0.056818
0
0.125
0.011364
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7070b93051b38b982ed7f6d291e2d1ee4d9fa685
811
py
Python
graphs.py
meyresearch/ANI-Peptides
84684b484119699cb5458f4c2aed5fa8a482c315
[ "Apache-2.0" ]
1
2022-03-31T16:43:55.000Z
2022-03-31T16:43:55.000Z
graphs.py
meyresearch/ANI-Peptides
84684b484119699cb5458f4c2aed5fa8a482c315
[ "Apache-2.0" ]
null
null
null
graphs.py
meyresearch/ANI-Peptides
84684b484119699cb5458f4c2aed5fa8a482c315
[ "Apache-2.0" ]
null
null
null
import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import matplotlib.ticker as tkr import os import openmm.unit as unit output_dir = "/home/mbowley/ANI-Peptides/outputs/equilibration_aaa_capped_amber_121250_310322" STATE_DATA_FN = "equilibration_state_data.csv" # Make some graphs report = pd.read_csv(os.path.join(output_dir, STATE_DATA_FN)) report = report.melt() with sns.plotting_context('paper'): g = sns.FacetGrid(data=report, row='variable', sharey=False ) g.map(plt.plot, 'value') # format the labels with f-strings for ax in g.axes.flat: ax.xaxis.set_major_formatter(tkr.FuncFormatter(lambda x, p: f'{(x * 10*unit.femtoseconds).value_in_unit(unit.picoseconds):.1f}ns')) plt.savefig(os.path.join(output_dir, 'graphs.png'), bbox_inches='tight')
36.863636
139
0.750925
128
811
4.59375
0.625
0.045918
0.037415
0.054422
0.064626
0
0
0
0
0
0
0.021216
0.128237
811
22
140
36.863636
0.810467
0.060419
0
0
0
0
0.271053
0.219737
0
0
0
0
0
1
0
false
0
0.375
0
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
7070e1e2b4357ac3610eef2cb5402b0e25e73572
13,883
py
Python
resources/lib/api/api_requests.py
sajo84/plugin.video.netflix
757cd2866f2c89c777d12a2772484fe675743543
[ "MIT" ]
null
null
null
resources/lib/api/api_requests.py
sajo84/plugin.video.netflix
757cd2866f2c89c777d12a2772484fe675743543
[ "MIT" ]
null
null
null
resources/lib/api/api_requests.py
sajo84/plugin.video.netflix
757cd2866f2c89c777d12a2772484fe675743543
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix) Copyright (C) 2018 Caphm (original implementation module) Methods to execute requests to Netflix API SPDX-License-Identifier: MIT See LICENSES/MIT.md for more information. """ from __future__ import absolute_import, division, unicode_literals from functools import wraps import resources.lib.common as common import resources.lib.kodi.ui as ui from resources.lib.common import cache_utils from resources.lib.database.db_utils import TABLE_SESSION from resources.lib.globals import g from .exceptions import APIError, MissingCredentialsError, MetadataNotAvailable, CacheMiss from .paths import EPISODES_PARTIAL_PATHS, ART_PARTIAL_PATHS, build_paths def catch_api_errors(func): """Decorator that catches API errors and displays a notification""" # pylint: disable=missing-docstring @wraps(func) def api_error_wrapper(*args, **kwargs): try: return func(*args, **kwargs) except APIError as exc: ui.show_notification(common.get_local_string(30118).format(exc)) return api_error_wrapper def logout(): """Logout of the current account""" common.make_call('logout', g.BASE_URL) def login(ask_credentials=True): """Perform a login""" try: if ask_credentials: ui.ask_credentials() if not common.make_call('login'): # Login not validated # ui.show_notification(common.get_local_string(30009)) return False return True except MissingCredentialsError: # Aborted from user or leave an empty field ui.show_notification(common.get_local_string(30112)) raise def update_lolomo_context(context_name): """Update the lolomo list by context""" lolomo_root = g.LOCAL_DB.get_value('lolomo_root_id', '', TABLE_SESSION) context_index = g.LOCAL_DB.get_value('lolomo_{}_index'.format(context_name.lower()), '', TABLE_SESSION) context_id = g.LOCAL_DB.get_value('lolomo_{}_id'.format(context_name.lower()), '', TABLE_SESSION) if not context_index: return path = [['lolomos', lolomo_root, 'refreshListByContext']] # The fourth parameter is like a request-id, but it doesn't seem to match to # serverDefs/date/requestId of reactContext (g.LOCAL_DB.get_value('request_id', table=TABLE_SESSION)) # nor to request_id of the video event request # has a kind of relationship with renoMessageId suspect with the logblob but i'm not sure because my debug crashed, # and i am no longer able to trace the source. # I noticed also that this request can also be made with the fourth parameter empty, # but it still doesn't update the continueWatching list of lolomo, that is strange because of no error params = [common.enclose_quotes(context_id), context_index, common.enclose_quotes(context_name), ''] # path_suffixs = [ # [['trackIds', 'context', 'length', 'genreId', 'videoId', 'displayName', 'isTallRow', 'isShowAsARow', # 'impressionToken', 'showAsARow', 'id', 'requestId']], # [{'from': 0, 'to': 100}, 'reference', 'summary'], # [{'from': 0, 'to': 100}, 'reference', 'title'], # [{'from': 0, 'to': 100}, 'reference', 'titleMaturity'], # [{'from': 0, 'to': 100}, 'reference', 'userRating'], # [{'from': 0, 'to': 100}, 'reference', 'userRatingRequestId'], # [{'from': 0, 'to': 100}, 'reference', 'boxarts', '_342x192', 'jpg'], # [{'from': 0, 'to': 100}, 'reference', 'promoVideo'] # ] callargs = { 'callpaths': path, 'params': params, # 'path_suffixs': path_suffixs } try: response = common.make_http_call('callpath_request', callargs) common.debug('refreshListByContext response: {}', response) except Exception: # pylint: disable=broad-except # I do not know the reason yet, but sometimes continues to return error 401, # making it impossible to update the bookmark position if not common.is_debug_verbose(): return ui.show_notification(title=common.get_local_string(30105), msg='An error prevented the update the lolomo context on netflix', time=10000) def update_videoid_bookmark(video_id): """Update the videoid bookmark position""" # You can check if this function works through the official android app # by checking if the status bar watched of the video will be updated callargs = { 'callpaths': [['refreshVideoCurrentPositions']], 'params': ['[' + video_id + ']', '[]'], } try: response = common.make_http_call('callpath_request', callargs) common.debug('refreshVideoCurrentPositions response: {}', response) except Exception: # pylint: disable=broad-except # I do not know the reason yet, but sometimes continues to return error 401, # making it impossible to update the bookmark position ui.show_notification(title=common.get_local_string(30105), msg='An error prevented the update the status watched on netflix', time=10000) @common.time_execution(immediate=False) def get_video_raw_data(videoids, custom_partial_path=None): # Do not apply cache to this method """Retrieve raw data for specified video id's""" video_ids = [int(videoid.value) for videoid in videoids] common.debug('Requesting video raw data for {}', video_ids) if not custom_partial_path: paths = build_paths(['videos', video_ids], EPISODES_PARTIAL_PATHS) if videoids[0].mediatype == common.VideoId.EPISODE: paths.extend(build_paths(['videos', int(videoids[0].tvshowid)], ART_PARTIAL_PATHS + [['title']])) else: paths = build_paths(['videos', video_ids], custom_partial_path) return common.make_call('path_request', paths) @catch_api_errors @common.time_execution(immediate=False) def rate(videoid, rating): """Rate a video on Netflix""" common.debug('Rating {} as {}', videoid.value, rating) # In opposition to Kodi, Netflix uses a rating from 0 to in 0.5 steps rating = min(10, max(0, rating)) / 2 common.make_call( 'post', {'endpoint': 'set_video_rating', 'data': { 'titleId': int(videoid.value), 'rating': rating}}) ui.show_notification(common.get_local_string(30127).format(rating * 2)) @catch_api_errors @common.time_execution(immediate=False) def rate_thumb(videoid, rating, track_id_jaw): """Rate a video on Netflix""" common.debug('Thumb rating {} as {}', videoid.value, rating) event_uuid = common.get_random_uuid() response = common.make_call( 'post', {'endpoint': 'set_thumb_rating', 'data': { 'eventUuid': event_uuid, 'titleId': int(videoid.value), 'trackId': track_id_jaw, 'rating': rating, }}) if response.get('status', '') == 'success': ui.show_notification(common.get_local_string(30045).split('|')[rating]) else: common.error('Rating thumb error, response detail: {}', response) ui.show_error_info('Rating error', 'Error type: {}' + response.get('status', '--'), True, True) @catch_api_errors @common.time_execution(immediate=False) def update_my_list(videoid, operation, params): """Call API to update my list with either add or remove action""" common.debug('My List: {} {}', operation, videoid) common.make_call( 'post', {'endpoint': 'update_my_list', 'data': { 'operation': operation, 'videoId': videoid.value}}) ui.show_notification(common.get_local_string(30119)) _update_mylist_cache(videoid, operation, params) def _update_mylist_cache(videoid, operation, params): """Update the my list cache to speeding up page load""" # Avoids making a new request to the server to request the entire list updated perpetual_range_start = params.get('perpetual_range_start') mylist_identifier = 'mylist' if perpetual_range_start and perpetual_range_start != 'None': mylist_identifier += '_' + perpetual_range_start if operation == 'remove': try: video_list_sorted_data = g.CACHE.get(cache_utils.CACHE_MYLIST, mylist_identifier) del video_list_sorted_data.videos[videoid.value] g.CACHE.add(cache_utils.CACHE_MYLIST, mylist_identifier, video_list_sorted_data) except CacheMiss: pass try: my_list_videoids = g.CACHE.get(cache_utils.CACHE_MYLIST, 'my_list_items') my_list_videoids.remove(videoid) g.CACHE.add(cache_utils.CACHE_MYLIST, 'my_list_items', my_list_videoids) except CacheMiss: pass else: try: common.make_call('add_videoids_to_video_list_cache', {'cache_bucket': cache_utils.CACHE_MYLIST, 'cache_identifier': mylist_identifier, 'video_ids': [videoid.value]}) except CacheMiss: pass try: my_list_videoids = g.CACHE.get(cache_utils.CACHE_MYLIST, 'my_list_items') my_list_videoids.append(videoid) g.CACHE.add(cache_utils.CACHE_MYLIST, 'my_list_items', my_list_videoids) except CacheMiss: pass @common.time_execution(immediate=False) def get_metadata(videoid, refresh=False): """Retrieve additional metadata for the given VideoId""" # Delete the cache if we need to refresh the all metadata if refresh: g.CACHE.delete(cache_utils.CACHE_METADATA, videoid.value) metadata_data = {}, None if videoid.mediatype not in [common.VideoId.EPISODE, common.VideoId.SEASON]: metadata_data = _metadata(videoid), None elif videoid.mediatype == common.VideoId.SEASON: metadata_data = _metadata(videoid.derive_parent(None)), None else: try: metadata_data = _episode_metadata(videoid) except KeyError as exc: # Episode metadata may not exist if its a new episode and cached # data is outdated. In this case, delete the cache entry and # try again safely (if it doesn't exist this time, there is no # metadata for the episode, so we assign an empty dict). common.debug('{}, refreshing cache', exc) g.CACHE.delete(cache_utils.CACHE_METADATA, videoid.tvshowid) try: metadata_data = _episode_metadata(videoid) except KeyError as exc: common.error(exc) return metadata_data @common.time_execution(immediate=False) def _episode_metadata(videoid): show_metadata = _metadata(videoid) episode_metadata, season_metadata = common.find_episode_metadata(videoid, show_metadata) return episode_metadata, season_metadata, show_metadata @common.time_execution(immediate=False) @cache_utils.cache_output(cache_utils.CACHE_METADATA, identify_from_kwarg_name='video_id') def _metadata(video_id): """Retrieve additional metadata for a video.This is a separate method from metadata(videoid) to work around caching issues when new episodes are added to a show by Netflix.""" import time common.debug('Requesting metadata for {}', video_id) # Always use params 'movieid' to all videoid identifier ipc_call = common.make_http_call if g.IS_SERVICE else common.make_call metadata_data = ipc_call( 'get', { 'endpoint': 'metadata', 'params': {'movieid': video_id.value, '_': int(time.time())} }) if not metadata_data: # This return empty # - if the metadata is no longer available # - if it has been exported a tv show/movie from a specific language profile that is not # available using profiles with other languages raise MetadataNotAvailable return metadata_data['video'] @common.time_execution(immediate=False) def get_parental_control_data(password): """Get the parental control data""" return common.make_call('parental_control_data', {'password': password}) @common.time_execution(immediate=False) def set_parental_control_data(data): """Set the parental control data""" try: common.make_call( 'post', {'endpoint': 'content_restrictions', 'data': {'action': 'update', 'authURL': data['token'], 'experience': data['experience'], 'guid': data['guid'], 'maturity': data['maturity']}} ) return True except Exception as exc: # pylint: disable=broad-except common.error('Api call profile_hub raised an error: {}', exc) return False @common.time_execution(immediate=False) def verify_pin(pin): """Send adult PIN to Netflix and verify it.""" try: return common.make_call( 'post', {'endpoint': 'pin_service', 'data': {'pin': pin}} ).get('success', False) except Exception: # pylint: disable=broad-except return False @common.time_execution(immediate=False) def verify_profile_lock(guid, pin): """Send profile PIN to Netflix and verify it.""" try: return common.make_call( 'post', {'endpoint': 'profile_lock', 'data': {'pin': pin, 'action': 'verify', 'guid': guid}} ).get('success', False) except Exception: # pylint: disable=broad-except return False
40.832353
119
0.644961
1,669
13,883
5.180947
0.23547
0.017347
0.019429
0.035619
0.345091
0.304152
0.254192
0.189777
0.180062
0.163294
0
0.010429
0.247137
13,883
339
120
40.952802
0.816877
0.258806
0
0.364807
0
0
0.12205
0.012837
0
0
0
0
0
1
0.077253
false
0.025751
0.042918
0
0.193133
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
707187b48b45b72042bc1f7d90faf6f81262ae8b
1,138
py
Python
cybox/test/common/extracted_features_test.py
siemens/python-cybox
b692a98c8a62bd696e2a0dda802ada7359853482
[ "BSD-3-Clause" ]
null
null
null
cybox/test/common/extracted_features_test.py
siemens/python-cybox
b692a98c8a62bd696e2a0dda802ada7359853482
[ "BSD-3-Clause" ]
null
null
null
cybox/test/common/extracted_features_test.py
siemens/python-cybox
b692a98c8a62bd696e2a0dda802ada7359853482
[ "BSD-3-Clause" ]
1
2019-04-16T18:37:32.000Z
2019-04-16T18:37:32.000Z
# Copyright (c) 2014, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. import unittest from cybox.common import ExtractedFeatures from cybox.test import EntityTestCase # Need to do this so the binding class is registered. import cybox.bindings.cybox_common from cybox.bindings.address_object import AddressObjectType setattr(cybox.bindings.cybox_common, "AddressObjectType", AddressObjectType) class TestExtractedFeatures(EntityTestCase, unittest.TestCase): klass = ExtractedFeatures _full_dict = { 'strings': [ {'encoding': u"ASCII", 'string_value': u"A String", 'length': 8}, {'encoding': u"UTF-8", 'string_value': u"Another String"}, ], 'imports': [u"CreateFileA", u"LoadLibrary"], 'functions': [u"DoSomething", u"DoSomethingElse"], #TODO: Use CodeObject instead of AddressObject 'code_snippets': [ {'address_value': u"8.8.8.8", 'xsi:type': "AddressObjectType"}, {'address_value': u"1.2.3.4", 'xsi:type': "AddressObjectType"}, ], } if __name__ == "__main__": unittest.main()
33.470588
77
0.672232
128
1,138
5.835938
0.59375
0.032129
0.048193
0.064257
0
0
0
0
0
0
0
0.015351
0.198594
1,138
33
78
34.484848
0.803728
0.172232
0
0.090909
0
0
0.295624
0
0
0
0
0.030303
0
1
0
false
0
0.272727
0
0.409091
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
7071e73ce6bf98c0a8ebbcf53ffea1005ca30c0c
5,640
py
Python
openslides/motions/config_variables.py
rolandgeider/OpenSlides
331141c17cb23da26e377d4285efdb4a50753a59
[ "MIT" ]
null
null
null
openslides/motions/config_variables.py
rolandgeider/OpenSlides
331141c17cb23da26e377d4285efdb4a50753a59
[ "MIT" ]
null
null
null
openslides/motions/config_variables.py
rolandgeider/OpenSlides
331141c17cb23da26e377d4285efdb4a50753a59
[ "MIT" ]
null
null
null
from django.core.validators import MinValueValidator from openslides.core.config import ConfigVariable from openslides.poll.models import PERCENT_BASE_CHOICES from .models import Workflow def get_workflow_choices(): """ Returns a list of all workflows to be used as choices for the config variable 'motions_workflow'. Each list item contains the pk and the display name. """ return [{'value': str(workflow.pk), 'display_name': workflow.name} for workflow in Workflow.objects.all()] def get_config_variables(): """ Generator which yields all config variables of this app. They are grouped in 'General', 'Amendments', 'Supporters', 'Voting and ballot papers' and 'PDF'. The generator has to be evaluated during app loading (see apps.py). """ # General yield ConfigVariable( name='motions_workflow', default_value='1', input_type='choice', label='Workflow of new motions', choices=get_workflow_choices, weight=310, group='Motions', subgroup='General') yield ConfigVariable( name='motions_identifier', default_value='per_category', input_type='choice', label='Identifier', choices=( {'value': 'per_category', 'display_name': 'Numbered per category'}, {'value': 'serially_numbered', 'display_name': 'Serially numbered'}, {'value': 'manually', 'display_name': 'Set it manually'}), weight=315, group='Motions', subgroup='General') yield ConfigVariable( name='motions_preamble', default_value='The assembly may decide,', label='Motion preamble', weight=320, group='Motions', subgroup='General', translatable=True) yield ConfigVariable( name='motions_stop_submitting', default_value=False, input_type='boolean', label='Stop submitting new motions by non-staff users', weight=325, group='Motions', subgroup='General') yield ConfigVariable( name='motions_allow_disable_versioning', default_value=False, input_type='boolean', label='Allow to disable versioning', weight=330, group='Motions', subgroup='General') # Amendments # Amendments currently not implemented. (TODO: Implement it like in OpenSlides 1.7.) yield ConfigVariable( name='motions_amendments_enabled', default_value=False, input_type='boolean', label='Activate amendments', hidden=True, weight=335, group='Motions', subgroup='Amendments') yield ConfigVariable( name='motions_amendments_prefix', default_value='A', label='Prefix for the identifier for amendments', hidden=True, weight=340, group='Motions', subgroup='Amendments') # Supporters yield ConfigVariable( name='motions_min_supporters', default_value=0, input_type='integer', label='Number of (minimum) required supporters for a motion', help_text='Choose 0 to disable the supporting system.', weight=345, group='Motions', subgroup='Supporters', validators=(MinValueValidator(0),)) yield ConfigVariable( name='motions_remove_supporters', default_value=False, input_type='boolean', label='Remove all supporters of a motion if a submitter edits his motion in early state', weight=350, group='Motions', subgroup='Supporters') # Voting and ballot papers yield ConfigVariable( name='motions_poll_100_percent_base', default_value='WITHOUT_INVALID', input_type='choice', label='The 100 % base of a voting result consists of', choices=PERCENT_BASE_CHOICES, weight=355, group='Motions', subgroup='Voting and ballot papers') yield ConfigVariable( name='motions_pdf_ballot_papers_selection', default_value='CUSTOM_NUMBER', input_type='choice', label='Number of ballot papers (selection)', choices=( {'value': 'NUMBER_OF_DELEGATES', 'display_name': 'Number of all delegates'}, {'value': 'NUMBER_OF_ALL_PARTICIPANTS', 'display_name': 'Number of all participants'}, {'value': 'CUSTOM_NUMBER', 'display_name': 'Use the following custom number'}), weight=360, group='Motions', subgroup='Voting and ballot papers') yield ConfigVariable( name='motions_pdf_ballot_papers_number', default_value=8, input_type='integer', label='Custom number of ballot papers', weight=365, group='Motions', subgroup='Voting and ballot papers', validators=(MinValueValidator(1),)) # PDF yield ConfigVariable( name='motions_pdf_title', default_value='Motions', label='Title for PDF document (all motions)', weight=370, group='Motions', subgroup='PDF', translatable=True) yield ConfigVariable( name='motions_pdf_preamble', default_value='', label='Preamble text for PDF document (all motions)', weight=375, group='Motions', subgroup='PDF') yield ConfigVariable( name='motions_pdf_paragraph_numbering', default_value=False, input_type='boolean', label='Show paragraph numbering (only in PDF)', weight=380, group='Motions', subgroup='PDF')
30.989011
98
0.626064
594
5,640
5.791246
0.277778
0.082849
0.100291
0.130814
0.306395
0.246802
0.181686
0.114535
0.05
0.05
0
0.014324
0.269681
5,640
181
99
31.160221
0.82083
0.091489
0
0.439716
0
0
0.335765
0.060331
0
0
0
0.005525
0
1
0.014184
true
0
0.028369
0
0.049645
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
7071f4925611f43a47a55fea91ffd98ac1480e78
22,743
py
Python
spect.py
philips-software/random_forest
5cbc95aa57ac33260720afd3fc779e7d71b5658c
[ "MIT" ]
2
2020-01-09T23:26:30.000Z
2021-01-27T18:34:15.000Z
spect.py
Charterhouse/random_forest
b842f08fee1054dbff78b6fb3afd4006a7f14a6d
[ "MIT" ]
null
null
null
spect.py
Charterhouse/random_forest
b842f08fee1054dbff78b6fb3afd4006a7f14a6d
[ "MIT" ]
2
2020-03-03T18:30:14.000Z
2021-09-06T13:55:06.000Z
from mpyc.runtime import mpc from src.dataset import ObliviousDataset, Sample from src.output import output from src.secint import secint as s from src.forest import train_forest def sample(ins, out): return Sample([s(i) for i in ins], s(out)) spect_samples = ObliviousDataset.create( sample([1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], 1), sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0], 1), sample([1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1], 1), sample([1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0], 1), sample([1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0], 0), sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1], 1), sample([1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1], 1), sample([1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1], 1), sample([1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1], 0), sample([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1], 0), sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0], 0), sample([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0], 0), sample([1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0], 0), sample([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1], 1), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0], 1), sample([1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1], 1), sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1], 0), sample([1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0], 1), sample([1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1], 1), sample([1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0), sample([1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1], 0), sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0], 0), sample([0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0), sample([0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0], 1), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0], 0), sample([0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0), sample([0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0], 0), sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0), sample([0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1], 1), sample([0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0], 0), sample([1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 1), sample([1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1], 0), sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0], 1), sample([1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1], 1), sample([1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0], 1), sample([1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0], 0), sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0], 0), sample([1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0), sample([1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0], 1), sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0], 0), sample([1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0], 0), sample([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1], 0), sample([1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1], 1), sample([1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0], 1), sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0), sample([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1], 0), sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0], 0), sample([1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1], 1), sample([1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1], 0), sample([1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0], 0), sample([1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 0), sample([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1], 0), sample([1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0], 0), sample([1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 0), sample([1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0], 0), sample([1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1], 0), sample([1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1], 1), sample([1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0], 0), sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1], 1), sample([1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0], 0), sample([1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1], 1), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], 0), sample([1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 1), sample([1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1], 1), sample([1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1], 0), sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1], 1), sample([1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 1), sample([1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1], 1), sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0], 0), sample([1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0], 1), sample([1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0], 1), sample([1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0], 0), sample([1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1], 1), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0), sample([1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1], 1), sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0], 0), sample([1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0], 0), sample([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1], 1), sample([1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1], 1), sample([1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1], 1), sample([1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0], 0), sample([1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1], 1), sample([1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1], 1), sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1], 0), sample([1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1], 1), sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1], 1), sample([1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1], 1), sample([1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1], 1), sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1], 1), sample([1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0], 0), sample([1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1], 1), sample([1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0], 0), sample([1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0], 1), sample([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1], 1), sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 0), sample([1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0], 0), sample([1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1], 1), sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0], 0), sample([1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0], 1), sample([1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0], 0), sample([1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0], 1), sample([1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1], 1), sample([1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1], 1), sample([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1], 1), sample([1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0], 0), sample([1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1], 1), sample([1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0], 0), sample([1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0), sample([1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0), sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0), sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0), sample([1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0), sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1], 1), sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1], 0), sample([1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0], 0), sample([1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], 1), sample([1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1], 0), sample([1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1], 1), sample([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1], 0), sample([1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1], 0), sample([1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1], 1), sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1], 0), sample([1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1], 1), sample([1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0], 1), sample([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1], 1), sample([1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0], 1), sample([1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0], 1), sample([1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1], 1), sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1], 1), sample([1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1], 1), sample([1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0], 0), sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1], 0), sample([1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0], 0), sample([1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1], 1), sample([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1], 1), sample([1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1], 0), sample([1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0), sample([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 1), sample([1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1], 1), sample([1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0], 0), sample([1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1], 1), sample([1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1], 0), sample([1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1], 1), sample([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0], 0), sample([1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1], 0), sample([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1], 1), sample([1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], 1), sample([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0], 0), sample([1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0], 0), sample([1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0], 1), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], 0), sample([0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0), sample([0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], 0), sample([0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0], 0), sample([0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0], 0), sample([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 0) ) async def main(): async with mpc: forest = await output(await train_forest(spect_samples, amount=2, depth=4)) for index, tree in enumerate(forest): print(f"Tree #{index}") tree.pretty_print() if __name__ == '__main__': mpc.run(main())
77.357143
83
0.358
6,492
22,743
1.252157
0.006778
0.741297
0.818182
0.836019
0.952516
0.952516
0.952516
0.952516
0.952516
0.951655
0
0.398637
0.322429
22,743
293
84
77.62116
0.128877
0
0
0.183099
0
0
0.000923
0
0
0
0
0
0
1
0.003521
false
0
0.017606
0.003521
0.024648
0.007042
0
0
1
null
1
1
1
1
1
1
1
1
1
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
12
70727332781524d67d9e08873ec4e97cf38aa95f
6,578
py
Python
projects/wgan/gaussian_gan.py
niujinshuchong/stochastic_processes
ea2538d2f09c39bec1834df5addd37e0699a88bf
[ "MIT" ]
null
null
null
projects/wgan/gaussian_gan.py
niujinshuchong/stochastic_processes
ea2538d2f09c39bec1834df5addd37e0699a88bf
[ "MIT" ]
null
null
null
projects/wgan/gaussian_gan.py
niujinshuchong/stochastic_processes
ea2538d2f09c39bec1834df5addd37e0699a88bf
[ "MIT" ]
null
null
null
import argparse import os import numpy as np import math import sys import matplotlib.pyplot as plt import torchvision.transforms as transforms from torchvision.utils import save_image import random from math import * from torch.utils.data import DataLoader from torchvision import datasets from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch import cv2 os.makedirs('images', exist_ok=True) parser = argparse.ArgumentParser() parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training') parser.add_argument('--batch_size', type=int, default=128, help='size of the batches') parser.add_argument('--lr', type=float, default=0.00005, help='learning rate') parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation') parser.add_argument('--latent_dim', type=int, default=100, help='dimensionality of the latent space') parser.add_argument('--img_size', type=int, default=28, help='size of each image dimension') parser.add_argument('--channels', type=int, default=1, help='number of image channels') parser.add_argument('--n_critic', type=int, default=5, help='number of training steps for discriminator per iter') parser.add_argument('--clip_value', type=float, default=0.01, help='lower and upper clip value for disc. weights') parser.add_argument('--sample_interval', type=int, default=400, help='interval betwen image samples') opt = parser.parse_args() print(opt) img_shape = (2,) cuda = True if torch.cuda.is_available() else False class Generator(nn.Module): def __init__(self): super(Generator, self).__init__() def block(in_feat, out_feat, normalize=True): layers = [ nn.Linear(in_feat, out_feat)] if normalize: layers.append(nn.BatchNorm1d(out_feat, 0.8)) layers.append(nn.LeakyReLU(0.2, inplace=True)) return layers self.model = nn.Sequential( *block(opt.latent_dim, 128, normalize=False), *block(128, 256), *block(256, 512), *block(512, 1024), ) self.one_hot = nn.Linear(1024, 100) self.one_hot_offset = nn.Linear(100, int(np.prod(img_shape))) self.offset = nn.Linear(1024, int(np.prod(img_shape))) self.softmax = nn.Softmax() def forward(self, z): img = self.model(z) one_hot = self.one_hot(img) one_hot_offset = self.one_hot_offset(self.softmax(one_hot)) img = one_hot_offset + self.offset(img) img = img.view(img.shape[0], *img_shape) return img class Discriminator(nn.Module): def __init__(self): super(Discriminator, self).__init__() self.model = nn.Sequential( nn.Linear(int(np.prod(img_shape)), 512), nn.LeakyReLU(0.2, inplace=True), nn.Linear(512, 256), nn.LeakyReLU(0.2, inplace=True), nn.Linear(256, 1) ) def forward(self, img): img_flat = img.view(img.shape[0], -1) validity = self.model(img_flat) return validity def gaussian_mixture(batchsize, ndim, num_labels): if ndim % 2 != 0: raise Exception("ndim must be a multiple of 2.") def sample(x, y, label, num_labels): shift = 1.4 r = 2.0 * np.pi / float(num_labels) * float(label) new_x = x * cos(r) - y * sin(r) new_y = x * sin(r) + y * cos(r) new_x += shift * cos(r) new_y += shift * sin(r) return np.array([new_x, new_y]).reshape((2,)) x_var = 0.05 y_var = 0.05 x = np.random.normal(0, x_var, (batchsize, ndim // 2)) y = np.random.normal(0, y_var, (batchsize, ndim // 2)) z = np.empty((batchsize, ndim), dtype=np.float32) for batch in range(batchsize): for zi in range(ndim // 2): z[batch, zi*2:zi*2+2] = sample(x[batch, zi], y[batch, zi], random.randint(0, num_labels - 1), num_labels) return z # Initialize generator and discriminator generator = Generator() discriminator = Discriminator() if cuda: generator.cuda() discriminator.cuda() # Optimizers optimizer_G = torch.optim.RMSprop(generator.parameters(), lr=opt.lr) optimizer_D = torch.optim.RMSprop(discriminator.parameters(), lr=opt.lr) Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor # ---------- # Training # ---------- batches_done = 0 for epoch in range(opt.n_epochs): for i in range(1000): imgs = gaussian_mixture(opt.batch_size, 2, 4) imgs = Tensor(imgs) #imgs = Tensor(np.random.uniform(low=1.3, high=5.7, size=(opt.batch_size, 2))) # Configure input real_imgs = Variable(imgs.type(Tensor)) # --------------------- # Train Discriminator # --------------------- optimizer_D.zero_grad() # Sample noise as generator input z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim)))) # Generate a batch of images fake_imgs = generator(z).detach() # Adversarial loss loss_D = -torch.mean(discriminator(real_imgs)) + torch.mean(discriminator(fake_imgs)) loss_D.backward() optimizer_D.step() # Clip weights of discriminator for p in discriminator.parameters(): p.data.clamp_(-opt.clip_value, opt.clip_value) # Train the generator every n_critic iterations if i % opt.n_critic == 0: # ----------------- # Train Generator # ----------------- optimizer_G.zero_grad() # Generate a batch of images gen_imgs = generator(z) # Adversarial loss loss_G = -torch.mean(discriminator(gen_imgs)) loss_G.backward() optimizer_G.step() print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" % (epoch, opt.n_epochs, batches_done % 1000, 1000, loss_D.item(), loss_G.item())) if batches_done % opt.sample_interval == 0: Y = gen_imgs.detach().cpu().numpy() plt.scatter(Y[:, 0], Y[:, 1]) plt.savefig('tmp.png') plt.close() image = cv2.imread('tmp.png') cv2.imshow("image", image) cv2.waitKey(1) batches_done += 1
33.390863
129
0.594862
871
6,578
4.359357
0.257176
0.023703
0.044772
0.014222
0.08454
0.060047
0.030024
0.016855
0
0
0
0.030511
0.267559
6,578
196
130
33.561224
0.757576
0.074187
0
0.045113
0
0.007519
0.087838
0
0
0
0
0
0
1
0.052632
false
0
0.12782
0
0.233083
0.015038
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
707383f583ad7bbda06c20e6e87636595bd3dd55
1,302
py
Python
_669.py
elfgzp/leetCode
964c6574d310a9a6c486bf638487fd2f72b83b3f
[ "MIT" ]
3
2019-04-12T06:22:56.000Z
2019-05-04T04:25:01.000Z
_669.py
elfgzp/Leetcode
964c6574d310a9a6c486bf638487fd2f72b83b3f
[ "MIT" ]
null
null
null
_669.py
elfgzp/Leetcode
964c6574d310a9a6c486bf638487fd2f72b83b3f
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- __author__ = 'gzp' # Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None from utils import Tree class Solution(object): def trimBST(self, root, L, R): """ :type root: TreeNode :type L: int :type R: int :rtype: TreeNode """ return self._trimBST(root, L, R) def _trimBST(self, node, L, R): """ :type root: TreeNode :type L: int :type R: int :rtype: TreeNode """ if not node: return if node.val is not None and node.val < L: node = self._trimBST(node.right, L, R) elif node.val is not None and node.val > R: node = self._trimBST(node.left, L, R) if not node: return node.right = self._trimBST(node.right, L, R) node.left = self.trimBST(node.left, L, R) return node if __name__ == '__main__': s = Solution() root = Tree([1, 0, 2]) print(root.get_nodes()) print(s.trimBST(root, 1, 2).get_nodes()) root = Tree([3, 0, 4, None, 2, None, None, 1]) print(root.get_nodes()) print(s.trimBST(root, 1, 3).get_nodes())
22.067797
52
0.523041
178
1,302
3.685393
0.280899
0.021341
0.091463
0.030488
0.460366
0.460366
0.329268
0.329268
0.25
0.143293
0
0.015152
0.341014
1,302
58
53
22.448276
0.749417
0.236559
0
0.24
0
0
0.012318
0
0
0
0
0
0
1
0.08
false
0
0.04
0
0.32
0.16
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7074d22367368afc1491da30cd43502d5330386e
3,895
py
Python
spiders/a85.py
senlyu163/crawler
ecf95f7b356c726922b5e5d90000fda3e16ae90d
[ "Apache-2.0" ]
null
null
null
spiders/a85.py
senlyu163/crawler
ecf95f7b356c726922b5e5d90000fda3e16ae90d
[ "Apache-2.0" ]
null
null
null
spiders/a85.py
senlyu163/crawler
ecf95f7b356c726922b5e5d90000fda3e16ae90d
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import scrapy from scrapy.linkextractors import LinkExtractor from scrapy.spiders import CrawlSpider, Rule from ..utils import extract_CN_from_content from ..items import ScrapySpiderItem import re from scrapy_splash import SplashRequest class A85Spider(CrawlSpider): name = '85' allowed_domains = ['houqi.gov.cn'] start_urls = ['http://houqi.gov.cn/kzhq/zwgk/zwgk.shtml'] rules = ( Rule(LinkExtractor(allow=r'/kzhq/[a-zA-Z]+\d+/list\.shtml'), follow=True), Rule(LinkExtractor(allow=r'/kzhq/[a-zA-Z]+/list\.shtml'), follow=True), Rule(LinkExtractor(allow=r'/kzhq/[a-z]+/list\.shtml'), follow=True), Rule(LinkExtractor(allow=r'/kzhq/gsgg/list\.shtml'), follow=True), Rule(LinkExtractor(restrict_xpaths='//ul[@class="ggnav"]//li'), callback='parse_item', follow=True), Rule(LinkExtractor(allow=r'list_\d+.shtml'), follow=True), # Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True), # Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True), # Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True), ) def _build_request(self, rule, link): r = SplashRequest(url=link.url, callback=self._response_downloaded, args={"wait": 0.5}) r.meta.update(rule=rule, link_text=link.text) return r def _requests_to_follow(self, response): # if not isinstance(response, HtmlResponse): # return seen = set() for n, rule in enumerate(self._rules): links = [lnk for lnk in rule.link_extractor.extract_links(response) if lnk not in seen] if links and rule.process_links: links = rule.process_links(links) for link in links: seen.add(link) r = self._build_request(n, link) yield rule.process_request(r) def parse_item(self, response): # print(response.url) # item = {} #item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get() #item['name'] = response.xpath('//div[@id="name"]').get() #item['description'] = response.xpath('//div[@id="description"]').get() # return item if ('qzqd' in response.url) or ('gsgg' in response.url) or ('xwfbh' in response.url) or ('hqxw' in response.url) or ('tzgg' in response.url): try: item = ScrapySpiderItem() item['url'] = response.url # date = response.xpath('/html/body/div[3]/div/div[2]/div[1]/text()').extract_first() date = re.search(r"(\d{4}-\d{2}-\d{2})", date).groups()[0] item['date'] = date title = response.xpath('//div[@class="content"]/h1/text()').extract_first() item['title'] = title contents = response.xpath('//div[@class="zhengw"]').extract() item['contents'] = extract_CN_from_content(contents) return item except: print("there have no date in case 1.") else: try: item = ScrapySpiderItem() item['url'] = response.url # date = response.xpath('/html/body/div[3]/div/div[2]/div[1]/p[7]/em/text()').extract_first() date = re.search(r"(\d{4}-\d{2}-\d{2})", date).groups()[0] item['date'] = date title = response.xpath('/html/body/div[3]/div/div[2]/div[1]/p[3]/em/text()').extract_first() item['title'] = title contents = response.xpath('//div[@class="zhengw"]').extract() item['contents'] = extract_CN_from_content(contents) return item except: print("there have no date in case 2.")
44.261364
149
0.567394
475
3,895
4.568421
0.267368
0.070507
0.081106
0.084793
0.501382
0.501382
0.484793
0.482488
0.467281
0.44424
0
0.010175
0.268293
3,895
87
150
44.770115
0.751228
0.137099
0
0.31746
0
0.047619
0.174836
0.103407
0
0
0
0
0
1
0.047619
false
0
0.111111
0
0.285714
0.031746
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7074f909c5703484cedced7916b80fe2527f9870
118
py
Python
src/GUM_Dispenser/__init__.py
jrdnfike/GUM_Dispenser
b2655339445af4b1fb328676e2ec9f239dcd6bfc
[ "MIT" ]
null
null
null
src/GUM_Dispenser/__init__.py
jrdnfike/GUM_Dispenser
b2655339445af4b1fb328676e2ec9f239dcd6bfc
[ "MIT" ]
null
null
null
src/GUM_Dispenser/__init__.py
jrdnfike/GUM_Dispenser
b2655339445af4b1fb328676e2ec9f239dcd6bfc
[ "MIT" ]
null
null
null
__all__ = ['GUM_Dispenser_Main', 'GUM_setup_parser', 'GUM_Describe_Source', 'GUM_Generate_NOMNOML', 'GUM_Exceptions']
59
117
0.79661
15
118
5.4
0.733333
0
0
0
0
0
0
0
0
0
0
0
0.059322
118
1
118
118
0.72973
0
0
0
1
0
0.737288
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
1
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
70755d5d03c099f6085f0bb0e914a4c7034022e9
2,589
py
Python
skeleton/functional.py
dogoncouch/dogoncouch-misc
46e020cc541cc6cf19edc0114a73f24e96ce15d0
[ "MIT" ]
3
2020-02-05T07:25:01.000Z
2021-12-24T20:08:03.000Z
skeleton/functional.py
dogoncouch/dogoncouch-misc
46e020cc541cc6cf19edc0114a73f24e96ce15d0
[ "MIT" ]
null
null
null
skeleton/functional.py
dogoncouch/dogoncouch-misc
46e020cc541cc6cf19edc0114a73f24e96ce15d0
[ "MIT" ]
2
2018-02-24T18:59:29.000Z
2020-06-14T15:15:19.000Z
#!/usr/bin/env python # MIT License # # Copyright (c) 2017 Dan Persons (dpersonsdev@gmail.com) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from argparse import ArgumentParser from argparse import FileType from configparser import ConfigParser from os.path import isfile __version__ = '0.1' def get_args(): """Set argument options""" arg_parser = ArgumentParser() arg_parser.add_argument('--version', action = 'version', version = '%(prog)s ' + str(__version__)) arg_parser.add_argument('-c', action = 'store', dest = 'config', default = '/etc/nothing.conf', help = ('set the config file')) arg_parser.add_argument('--full', action = 'store_true', help = ('Do nothing to the fullest')) arg_parser.add_argument('files', type = FileType('r'), metavar='FILE', nargs = '?', help = ('set a file with which to do nothing')) args = arg_parser.parse_args() return args def get_config(configfile): """Read the config file""" config = ConfigParser() if isfile(configfile): myconf = args.config config.read(myconf) else: return None def main_event(): """Do the actual nothing""" pass def run_script(): """Run the program that does nothing""" try: args = get_args() config = get_config(args.config) main_event() except KeyboardInterrupt: print('\nExiting on KeyboardInterrupt') def main(): run_script() if __name__ == "__main__": main()
28.450549
80
0.679413
341
2,589
5.052786
0.486804
0.051074
0.027858
0.046431
0
0
0
0
0
0
0
0.002996
0.226342
2,589
90
81
28.766667
0.857214
0.467362
0
0
0
0
0.151085
0
0
0
0
0
0
1
0.125
false
0.025
0.1
0
0.25
0.025
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7076835d176f91ff169e5946974bfd4dbfe39989
14,077
py
Python
src/silx/gui/plot/tools/RadarView.py
rnwatanabe/silx
b0395f4a06c048b7778dc04ada828edd195ef02d
[ "CC0-1.0", "MIT" ]
94
2016-03-04T17:25:53.000Z
2022-03-18T18:05:23.000Z
src/silx/gui/plot/tools/RadarView.py
rnwatanabe/silx
b0395f4a06c048b7778dc04ada828edd195ef02d
[ "CC0-1.0", "MIT" ]
2,841
2016-01-21T09:06:49.000Z
2022-03-18T14:53:56.000Z
src/silx/gui/plot/tools/RadarView.py
rnwatanabe/silx
b0395f4a06c048b7778dc04ada828edd195ef02d
[ "CC0-1.0", "MIT" ]
71
2015-09-30T08:35:35.000Z
2022-03-16T07:16:28.000Z
# coding: utf-8 # /*########################################################################## # # Copyright (c) 2015-2018 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ """QWidget displaying an overview of a 2D plot. This shows the available range of the data, and the current location of the plot view. """ __authors__ = ["T. Vincent"] __license__ = "MIT" __date__ = "22/02/2021" import logging import weakref from ... import qt from ...utils import LockReentrant _logger = logging.getLogger(__name__) class _DraggableRectItem(qt.QGraphicsRectItem): """RectItem which signals its change through visibleRectDragged.""" def __init__(self, *args, **kwargs): super(_DraggableRectItem, self).__init__( *args, **kwargs) self._previousCursor = None self.setFlag(qt.QGraphicsItem.ItemIsMovable) self.setFlag(qt.QGraphicsItem.ItemSendsGeometryChanges) self.setAcceptHoverEvents(True) self._ignoreChange = False self._constraint = 0, 0, 0, 0 def setConstraintRect(self, left, top, width, height): """Set the constraint rectangle for dragging. The coordinates are in the _DraggableRectItem coordinate system. This constraint only applies to modification through interaction (i.e., this constraint is not applied to change through API). If the _DraggableRectItem is smaller than the constraint rectangle, the _DraggableRectItem remains within the constraint rectangle. If the _DraggableRectItem is wider than the constraint rectangle, the constraint rectangle remains within the _DraggableRectItem. """ self._constraint = left, left + width, top, top + height def setPos(self, *args, **kwargs): """Overridden to ignore changes from API in itemChange.""" self._ignoreChange = True super(_DraggableRectItem, self).setPos(*args, **kwargs) self._ignoreChange = False def moveBy(self, *args, **kwargs): """Overridden to ignore changes from API in itemChange.""" self._ignoreChange = True super(_DraggableRectItem, self).moveBy(*args, **kwargs) self._ignoreChange = False def itemChange(self, change, value): """Callback called before applying changes to the item.""" if (change == qt.QGraphicsItem.ItemPositionChange and not self._ignoreChange): # Makes sure that the visible area is in the data # or that data is in the visible area if area is too wide x, y = value.x(), value.y() xMin, xMax, yMin, yMax = self._constraint if self.rect().width() <= (xMax - xMin): if x < xMin: value.setX(xMin) elif x > xMax - self.rect().width(): value.setX(xMax - self.rect().width()) else: if x > xMin: value.setX(xMin) elif x < xMax - self.rect().width(): value.setX(xMax - self.rect().width()) if self.rect().height() <= (yMax - yMin): if y < yMin: value.setY(yMin) elif y > yMax - self.rect().height(): value.setY(yMax - self.rect().height()) else: if y > yMin: value.setY(yMin) elif y < yMax - self.rect().height(): value.setY(yMax - self.rect().height()) if self.pos() != value: # Notify change through signal views = self.scene().views() assert len(views) == 1 views[0].visibleRectDragged.emit( value.x() + self.rect().left(), value.y() + self.rect().top(), self.rect().width(), self.rect().height()) return value return super(_DraggableRectItem, self).itemChange( change, value) def hoverEnterEvent(self, event): """Called when the mouse enters the rectangle area""" self._previousCursor = self.cursor() self.setCursor(qt.Qt.OpenHandCursor) def hoverLeaveEvent(self, event): """Called when the mouse leaves the rectangle area""" if self._previousCursor is not None: self.setCursor(self._previousCursor) self._previousCursor = None class RadarView(qt.QGraphicsView): """Widget presenting a synthetic view of a 2D area and the current visible area. Coordinates are as in QGraphicsView: x goes from left to right and y goes from top to bottom. This widget preserves the aspect ratio of the areas. The 2D area and the visible area can be set with :meth:`setDataRect` and :meth:`setVisibleRect`. When the visible area has been dragged by the user, its new position is signaled by the *visibleRectDragged* signal. It is possible to invert the direction of the axes by using the :meth:`scale` method of QGraphicsView. """ visibleRectDragged = qt.Signal(float, float, float, float) """Signals that the visible rectangle has been dragged. It provides: left, top, width, height in data coordinates. """ _DATA_PEN = qt.QPen(qt.QColor('white')) _DATA_BRUSH = qt.QBrush(qt.QColor('light gray')) _ACTIVEDATA_PEN = qt.QPen(qt.QColor('black')) _ACTIVEDATA_BRUSH = qt.QBrush(qt.QColor('transparent')) _ACTIVEDATA_PEN.setWidth(2) _ACTIVEDATA_PEN.setCosmetic(True) _VISIBLE_PEN = qt.QPen(qt.QColor('blue')) _VISIBLE_PEN.setWidth(2) _VISIBLE_PEN.setCosmetic(True) _VISIBLE_BRUSH = qt.QBrush(qt.QColor(0, 0, 0, 0)) _TOOLTIP = 'Radar View:\nRed contour: Visible area\nGray area: The image' _PIXMAP_SIZE = 256 def __init__(self, parent=None): self.__plotRef = None self._scene = qt.QGraphicsScene() self._dataRect = self._scene.addRect(0, 0, 1, 1, self._DATA_PEN, self._DATA_BRUSH) self._imageRect = self._scene.addRect(0, 0, 1, 1, self._ACTIVEDATA_PEN, self._ACTIVEDATA_BRUSH) self._imageRect.setVisible(False) self._scatterRect = self._scene.addRect(0, 0, 1, 1, self._ACTIVEDATA_PEN, self._ACTIVEDATA_BRUSH) self._scatterRect.setVisible(False) self._curveRect = self._scene.addRect(0, 0, 1, 1, self._ACTIVEDATA_PEN, self._ACTIVEDATA_BRUSH) self._curveRect.setVisible(False) self._visibleRect = _DraggableRectItem(0, 0, 1, 1) self._visibleRect.setPen(self._VISIBLE_PEN) self._visibleRect.setBrush(self._VISIBLE_BRUSH) self._scene.addItem(self._visibleRect) super(RadarView, self).__init__(self._scene, parent) self.setHorizontalScrollBarPolicy(qt.Qt.ScrollBarAlwaysOff) self.setVerticalScrollBarPolicy(qt.Qt.ScrollBarAlwaysOff) self.setFocusPolicy(qt.Qt.NoFocus) self.setStyleSheet('border: 0px') self.setToolTip(self._TOOLTIP) self.__reentrant = LockReentrant() self.visibleRectDragged.connect(self._viewRectDragged) self.__timer = qt.QTimer(self) self.__timer.timeout.connect(self._updateDataContent) def sizeHint(self): # """Overridden to avoid sizeHint to depend on content size.""" return self.minimumSizeHint() def wheelEvent(self, event): # """Overridden to disable vertical scrolling with wheel.""" event.ignore() def resizeEvent(self, event): # """Overridden to fit current content to new size.""" self.fitInView(self._scene.itemsBoundingRect(), qt.Qt.KeepAspectRatio) super(RadarView, self).resizeEvent(event) def setDataRect(self, left, top, width, height): """Set the bounds of the data rectangular area. This sets the coordinate system. """ self._dataRect.setRect(left, top, width, height) self._visibleRect.setConstraintRect(left, top, width, height) self.fitInView(self._scene.itemsBoundingRect(), qt.Qt.KeepAspectRatio) def setVisibleRect(self, left, top, width, height): """Set the visible rectangular area. The coordinates are relative to the data rect. """ self.__visibleRect = left, top, width, height self._visibleRect.setRect(0, 0, width, height) self._visibleRect.setPos(left, top) self.fitInView(self._scene.itemsBoundingRect(), qt.Qt.KeepAspectRatio) def __setVisibleRectFromPlot(self, plot): """Update radar view visible area. Takes care of y coordinate conversion. """ xMin, xMax = plot.getXAxis().getLimits() yMin, yMax = plot.getYAxis().getLimits() self.setVisibleRect(xMin, yMin, xMax - xMin, yMax - yMin) def getPlotWidget(self): """Returns the connected plot :rtype: Union[None,PlotWidget] """ if self.__plotRef is None: return None plot = self.__plotRef() if plot is None: self.__plotRef = None return plot def setPlotWidget(self, plot): """Set the PlotWidget this radar view connects to. As result `setDataRect` and `setVisibleRect` will be called automatically. :param Union[None,PlotWidget] plot: """ previousPlot = self.getPlotWidget() if previousPlot is not None: # Disconnect previous plot plot.getXAxis().sigLimitsChanged.disconnect(self._xLimitChanged) plot.getYAxis().sigLimitsChanged.disconnect(self._yLimitChanged) plot.getYAxis().sigInvertedChanged.disconnect(self._updateYAxisInverted) # Reset plot and timer # FIXME: It would be good to clean up the display here self.__plotRef = None self.__timer.stop() if plot is not None: # Connect new plot self.__plotRef = weakref.ref(plot) plot.getXAxis().sigLimitsChanged.connect(self._xLimitChanged) plot.getYAxis().sigLimitsChanged.connect(self._yLimitChanged) plot.getYAxis().sigInvertedChanged.connect(self._updateYAxisInverted) self.__setVisibleRectFromPlot(plot) self._updateYAxisInverted() self.__timer.start(500) def _xLimitChanged(self, vmin, vmax): plot = self.getPlotWidget() self.__setVisibleRectFromPlot(plot) def _yLimitChanged(self, vmin, vmax): plot = self.getPlotWidget() self.__setVisibleRectFromPlot(plot) def _updateYAxisInverted(self, inverted=None): """Sync radar view axis orientation.""" plot = self.getPlotWidget() if inverted is None: # Do not perform this when called from plot signal inverted = plot.getYAxis().isInverted() # Use scale to invert radarView # RadarView default Y direction is from top to bottom # As opposed to Plot. So invert RadarView when Plot is NOT inverted. self.resetTransform() if not inverted: self.scale(1., -1.) self.update() def _viewRectDragged(self, left, top, width, height): """Slot for radar view visible rectangle changes.""" plot = self.getPlotWidget() if plot is None: return if self.__reentrant.locked(): return with self.__reentrant: plot.setLimits(left, left + width, top, top + height) def _updateDataContent(self): """Update the content to the current data content""" plot = self.getPlotWidget() if plot is None: return ranges = plot.getDataRange() xmin, xmax = ranges.x if ranges.x is not None else (0, 0) ymin, ymax = ranges.y if ranges.y is not None else (0, 0) self.setDataRect(xmin, ymin, xmax - xmin, ymax - ymin) self.__updateItem(self._imageRect, plot.getActiveImage()) self.__updateItem(self._scatterRect, plot.getActiveScatter()) self.__updateItem(self._curveRect, plot.getActiveCurve()) def __updateItem(self, rect, item): """Sync rect with item bounds :param QGraphicsRectItem rect: :param Item item: """ if item is None: rect.setVisible(False) return ranges = item._getBounds() if ranges is None: rect.setVisible(False) return xmin, xmax, ymin, ymax = ranges width = xmax - xmin height = ymax - ymin rect.setRect(xmin, ymin, width, height) rect.setVisible(True)
38.88674
84
0.617674
1,564
14,077
5.438619
0.257673
0.014108
0.011286
0.016929
0.22925
0.181754
0.143193
0.126734
0.107924
0.093581
0
0.006613
0.280244
14,077
361
85
38.99446
0.832906
0.275556
0
0.228155
0
0
0.013498
0
0
0
0
0.00277
0.004854
1
0.106796
false
0
0.019417
0.004854
0.228155
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
707748746f67df072b8d127ebd45987c9f4adb01
1,486
py
Python
purly/py/purly/model/client.py
rmorshea/purly
0d07d6d7636fd81d9c1c14e2df6a32fc28b325f7
[ "MIT" ]
2
2018-08-18T05:39:24.000Z
2018-08-21T19:02:16.000Z
purly/py/purly/model/client.py
rmorshea/purly
0d07d6d7636fd81d9c1c14e2df6a32fc28b325f7
[ "MIT" ]
2
2018-07-27T07:14:19.000Z
2018-07-27T07:17:06.000Z
purly/py/purly/model/client.py
rmorshea/purly
0d07d6d7636fd81d9c1c14e2df6a32fc28b325f7
[ "MIT" ]
null
null
null
import json import time import websocket class Client: def __init__(self, url): self._url = url self._updates = [] self._socket = create_socket(url, connection_timeout=2) def sync(self): recv = [] while True: data = self._socket.recv() if data: recv.extend(json.loads(data)) outgoing = self._updates[:1000] self._socket.send(json.dumps(outgoing)) self._updates[:1000] = [] if not self._updates: break for incoming in recv: self._recv(incoming) def serve(self, function=None): while True: if function is not None: try: function() except StopIteration: break self.sync() def _send(self, content, header): self._updates.append({ 'header': header, 'content': content, }) def _recv(self, msg): datatype = msg['header']['type'] method = '_on_%s' % datatype if hasattr(self, method): getattr(self, method)(msg['content']) def create_socket(uri, *args, **kwargs): start = time.time() while True: try: return websocket.create_connection(uri, *args, **kwargs) except ConnectionRefusedError: if time.time() - start > kwargs.get('connection_timeout', 0): raise
26.070175
73
0.520188
150
1,486
5
0.4
0.073333
0.050667
0.061333
0
0
0
0
0
0
0
0.010718
0.37214
1,486
56
74
26.535714
0.79314
0
0
0.148936
0
0
0.036339
0
0
0
0
0
0
1
0.12766
false
0
0.06383
0
0.234043
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
707a6afb505aab8fb903afd510d0ef10d069c552
1,378
py
Python
deeplearning-image-gpu/resource/scoring-recipe-helper.py
gbetegon88/dataiku-contrib
4683a9e08a3706a163810bf104bf6464025e235e
[ "Apache-2.0" ]
93
2015-10-28T13:15:45.000Z
2022-03-07T01:16:24.000Z
deeplearning-image-gpu/resource/scoring-recipe-helper.py
gbetegon88/dataiku-contrib
4683a9e08a3706a163810bf104bf6464025e235e
[ "Apache-2.0" ]
130
2015-11-13T17:41:41.000Z
2022-03-30T16:37:44.000Z
deeplearning-image-gpu/resource/scoring-recipe-helper.py
gbetegon88/dataiku-contrib
4683a9e08a3706a163810bf104bf6464025e235e
[ "Apache-2.0" ]
88
2015-10-29T09:36:23.000Z
2021-12-13T20:14:08.000Z
import dataiku import glob import pandas as pd import os def do(payload, config, plugin_config, inputs): if "method" not in payload: return {} client = dataiku.api_client() if payload["method"] == "get-valid-csv-filenames": required_columns = ["id", "className"] sep = "," # Retrieving model folder model_folder_full_name = [inp for inp in inputs if inp["role"] == "modelFolder"][0]["fullName"] model_folder = dataiku.Folder(model_folder_full_name).get_path() csv_files_root_mf = glob.glob(model_folder + "/*.csv") # Filtering out files without required columns csv_valid_filenames = [] for f in csv_files_root_mf: schema = retrieve_schema_from_pandas_compatible_csv_file(f, sep) if len([col for col in required_columns if col not in schema]) == 0 : valid_file = { "path": f, "name": os.path.basename(f) } csv_valid_filenames.append(valid_file) return {"csv_valid_filenames": csv_valid_filenames} def retrieve_schema_from_pandas_compatible_csv_file(file_path, sep): try : df = pd.read_csv(file_path, sep=sep, nrows=0) return df.columns except Exception as e: print "Unexpected exception : {}".format(e.message) return []
32.046512
103
0.621916
174
1,378
4.672414
0.390805
0.067651
0.083641
0.051661
0.162362
0.100861
0.100861
0
0
0
0
0.003015
0.277939
1,378
43
104
32.046512
0.81407
0.049347
0
0
0
0
0.097859
0.017584
0
0
0
0
0
0
null
null
0
0.129032
null
null
0.032258
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
707aa505d5a6e2672f86a0292a8b2705393bee85
1,232
py
Python
practice_problems/prog4_vi.py
vishwasks32/python3-learning
39f39238428727ef0c97c74c8de2570bd84da403
[ "Apache-2.0" ]
3
2018-02-08T21:09:27.000Z
2021-06-15T04:48:46.000Z
practice_problems/prog4_vi.py
vishwasks32/python3-learning
39f39238428727ef0c97c74c8de2570bd84da403
[ "Apache-2.0" ]
null
null
null
practice_problems/prog4_vi.py
vishwasks32/python3-learning
39f39238428727ef0c97c74c8de2570bd84da403
[ "Apache-2.0" ]
1
2018-02-08T21:09:31.000Z
2018-02-08T21:09:31.000Z
#!/usr/bin/env python3 # # Author: Vishwas K Singh # Email: vishwasks32@gmail.com # # Script to convert Celcius Temperature to Farenheit def temp_conv(temp_type, temp_val): ''' Function to convert Temperature from Celcius to farenheit and vice versa''' if(temp_type == 'f'): temp_faren = ((9/5)*temp_val) + 32 return temp_faren elif(temp_type == 'c'): temp_cel = (5*(temp_val - 32))/9 return temp_cel if __name__=='__main__': print("Welcome to Temperature Converter") print("Select 1. Farenheit to Celcius\n\t2. Celcius to Farenheit") conv_type = input() if conv_type == '1': temp_type = 'c' temp_val = float(input("Enter the farenheit value to be converted: ")) temp_celcius = temp_conv(temp_type,temp_val) print("%.2f degree farenheit converts to %.2f degree celcius."%(temp_val, temp_celcius)) elif conv_type == '2': temp_type = 'f' temp_val = float(input("Enter the Celcius value to be converted: ")) temp_farenheit = temp_conv(temp_type,temp_val) print("%.2f degree celcius converts to %.2f degree farenheit."%(temp_val, temp_farenheit)) else: print("Invalid Input!! Exit..")
35.2
98
0.647727
170
1,232
4.470588
0.358824
0.082895
0.047368
0.063158
0.248684
0.190789
0.094737
0.094737
0.094737
0
0
0.020063
0.231331
1,232
34
99
36.235294
0.782471
0.160714
0
0
0
0
0.312932
0
0
0
0
0
0
1
0.043478
false
0
0
0
0.130435
0.217391
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
707b95b9e394fd7ccab1823b73b68b69754eb13a
696
py
Python
2020/09/part1.py
timofurrer/aoc-2020
446b688a57601d9891f520e43b7f822c373a6ff4
[ "MIT" ]
null
null
null
2020/09/part1.py
timofurrer/aoc-2020
446b688a57601d9891f520e43b7f822c373a6ff4
[ "MIT" ]
null
null
null
2020/09/part1.py
timofurrer/aoc-2020
446b688a57601d9891f520e43b7f822c373a6ff4
[ "MIT" ]
null
null
null
import os import sys puzzle_input_path = os.path.join(os.path.dirname(__file__), "input_1.txt") with open(puzzle_input_path) as puzzle_input_file: puzzle_input_raw = puzzle_input_file.read() preamble = 25 numbers = [int(x) for x in puzzle_input_raw.splitlines()] number = next( n for i, n in enumerate(numbers[preamble:], start=preamble) if all(n - e not in numbers[i - preamble:i] for e in numbers[i - preamble:i]) ) print(number) # for idx, number in enumerate(numbers[preamble:], start=preamble): # last_numbers = numbers[idx - preamble:idx] # does_not_match = all(number - e not in last_numbers for e in last_numbers) # if does_not_match: # break
29
81
0.70546
111
696
4.207207
0.36036
0.141328
0.06424
0.111349
0.248394
0.167024
0
0
0
0
0
0.005282
0.183908
696
24
82
29
0.816901
0.327586
0
0
0
0
0.023758
0
0
0
0
0
0
1
0
false
0
0.142857
0
0.142857
0.071429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
707c819d3655d7f0bad5a54fee02df54610cbc35
4,416
py
Python
dualing/core/model.py
gugarosa/dualing
a625476946bded8e6c9211d83fe79dc16b3d8f16
[ "Apache-2.0" ]
2
2020-08-03T08:02:48.000Z
2020-11-21T04:11:45.000Z
dualing/core/model.py
gugarosa/dualing
a625476946bded8e6c9211d83fe79dc16b3d8f16
[ "Apache-2.0" ]
1
2020-09-29T12:58:33.000Z
2020-09-29T12:58:33.000Z
dualing/core/model.py
gugarosa/dualing
a625476946bded8e6c9211d83fe79dc16b3d8f16
[ "Apache-2.0" ]
null
null
null
"""Base architecture and Siamese Network. """ import tensorflow as tf import dualing.utils.exception as e class Base(tf.keras.Model): """A Base class is responsible for easily-implementing the base twin architecture of a Siamese Network. """ def __init__(self, name=''): """Initialization method. Args: name (str): Naming identifier. """ super(Base, self).__init__(name=name) def call(self, x): """Method that holds vital information whenever this class is called. Note that you need to implement this method directly on its child. Essentially, each neural network has its own forward pass implementation. Args: x (tf.Tensor): Tensor containing the input sample. Raises: NotImplementedError. """ raise NotImplementedError class Siamese(tf.keras.Model): """An Siamese class is responsible for implementing the base of Siamese Neural Networks. """ def __init__(self, base, name=''): """Initialization method. Args: base (Base): Twin architecture. name (str): Naming identifier. """ super(Siamese, self).__init__(name=name) # Defines the Siamese's base twin architecture self.B = base @property def B(self): """Base: Twin architecture. """ return self._B @B.setter def B(self, B): if not isinstance(B, Base): raise e.TypeError('`B` should be a child from Base class') self._B = B def compile(self, optimizer): """Method that builds the network by attaching optimizer, loss and metrics. Note that you need to implement this method directly on its child. Essentially, each type of Siamese has its own set of loss and metrics. Args: optimizer (tf.keras.optimizers): Optimization algorithm. Raises: NotImplementedError. """ raise NotImplementedError def step(self, x, y): """Method that performs a single batch optimization step. Note that you need to implement this method directly on its child. Essentially, each type of Siamese has an unique step. Args: x (tf.Tensor): Tensor containing samples. y (tf.Tensor): Tensor containing labels. Raises: NotImplementedError. """ raise NotImplementedError def fit(self, batches, epochs=100): """Method that trains the model over training batches. Note that you need to implement this method directly on its child. Essentially, each type of Siamese may use a distinct type of dataset. Args: batches (Dataset): Batches of tuples holding training samples and labels. epochs (int): Maximum number of epochs. Raises: NotImplementedError. """ raise NotImplementedError def evaluate(self, batches): """Method that evaluates the model over validation or testing batches. Note that you need to implement this method directly on its child. Essentially, each type of Siamese may use a distinct type of dataset. Args: batches (Dataset): Batches of tuples holding validation / testing samples and labels. Raises: NotImplementedError. """ raise NotImplementedError def predict(self, x): """Method that performs a forward pass over samples and returns the network's output. Note that you need to implement this method directly on its child. Essentially, each type of Siamese may predict in a different way. Args: x (tf.Tensor): Tensor containing samples. Raises: NotImplementedError. """ raise NotImplementedError def extract_embeddings(self, x): """Method that extracts embeddings by performing a forward pass over the base architecture (embedder). Args: x (np.array, tf.Tensor): Array or tensor containing the inputs to be embedded. input_shape (tuple): Shape of the input layer. Returns: A tensor containing the embedded inputs. """ x = tf.convert_to_tensor(x) x = self.B(x) return x
25.234286
97
0.615942
515
4,416
5.238835
0.279612
0.025945
0.024463
0.033358
0.395478
0.316901
0.263158
0.236471
0.236471
0.236471
0
0.000991
0.314312
4,416
174
98
25.37931
0.890026
0.610507
0
0.181818
0
0
0.031869
0
0
0
0
0
0
1
0.333333
false
0
0.060606
0
0.515152
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
707ebe5c40335557036acfbcae3d06ae69d50f9a
2,642
py
Python
app/dapp_examples/py/media_analysis/image_quality/App.py
TheAdamBC/AdamBC
e854a64c19442e24a50e4d65ce2f2e8f6ea46f4c
[ "BSD-3-Clause" ]
1
2021-12-14T07:28:46.000Z
2021-12-14T07:28:46.000Z
app/dapp_examples/py/media_analysis/image_quality/App.py
TheAdamBC/AdamBC
e854a64c19442e24a50e4d65ce2f2e8f6ea46f4c
[ "BSD-3-Clause" ]
null
null
null
app/dapp_examples/py/media_analysis/image_quality/App.py
TheAdamBC/AdamBC
e854a64c19442e24a50e4d65ce2f2e8f6ea46f4c
[ "BSD-3-Clause" ]
null
null
null
#** # * The Decentralized App (DApp): # * This is where the App developer writes the decentralized app. # * Make sure the code is written within the specified space region. # * # * IMPORTANT: # * 1. Developer DApp CODE MUST BE WRITTEN WITHIN SPECIFIED SPACE REGION. # * 2. DApp MUST return values through the 'results' variable. # * 3. DApp MUST RETURN A JSON Object. # * 4. DApp data crunching should not exceed 100MB of Data per peer task. # * 5. If you change the name of 'results', make sure to change it at DApp's 'return results' code. # * # * import sys, json results = {} # Storage for successful results. json_str = input() # Capture data input params = json.loads(json_str) # Load parameters values (params) to process #*********************************************************************************/ # /* START WRITING YOUR DAPP CODE BEGINNING HERE: */ #*********************************************************************************/ # EXAMPLE: # Estimating the quality of images in a file directory (We'll consider image quality as calculating the area of an image divided by its size). # Import necessary DApp resources, scripts, assets and modules needed for the task. import numpy as np import cv2 import os import base64 # Variable to store image quality imageQuality = {'imageQuality':0} fileName = params['uParams'][0]['parameter2'] # Capture name of file fileData = base64.b64decode(params['uParams'][0]['parameter1']) # Capture file # Parse image file to Numpy array img_buffer = np.frombuffer(fileData, dtype=np.uint8) im = cv2.imdecode(img_buffer, flags=1) # Save file to local directory try: cv2.imwrite(os.path.join('app/assets/media/', f'{fileName}'), im) cv2.waitKey(0) except: print('Problem saving file!') try: img = cv2.imread(f'app/assets/media/{fileName}', cv2.IMREAD_UNCHANGED) # Load file to OpenCV except: print('Error processing file!') # get dimensions of image dimensions = img.shape # height, width, number of channels in image height = img.shape[0] width = img.shape[1] size = os.path.getsize(f'app/assets/media/{fileName}') # We'll consider image quality as calculating the area of an image divided by its size imageQuality['imageQuality']=(size/(height*width)) # Return results of processing results=imageQuality #*********************************************************************************/ # /* STOP WRITING YOUR DAPP CODE UP UNTIL HERE.*/ #*********************************************************************************/ # Results must return valid JSON Object print(results) sys.stdout.flush()
33.443038
142
0.623391
335
2,642
4.901493
0.450746
0.014616
0.025579
0.023143
0.112058
0.084044
0.084044
0.084044
0.084044
0.084044
0
0.013538
0.161241
2,642
78
143
33.871795
0.727437
0.641559
0
0.133333
0
0
0.198901
0.059341
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
70811820b7b88435ba6e11176b752f15dc282feb
1,142
py
Python
tests/conftest.py
jmolmo/managed-tenants-cli
fb3dd79f6629884577aa7333fdfe8d78802a79d4
[ "Apache-2.0" ]
null
null
null
tests/conftest.py
jmolmo/managed-tenants-cli
fb3dd79f6629884577aa7333fdfe8d78802a79d4
[ "Apache-2.0" ]
null
null
null
tests/conftest.py
jmolmo/managed-tenants-cli
fb3dd79f6629884577aa7333fdfe8d78802a79d4
[ "Apache-2.0" ]
1
2021-09-02T10:11:52.000Z
2021-09-02T10:11:52.000Z
# Configure different hypothesis profiles import os from hypothesis import HealthCheck, Phase, settings FAST_PROFILE = "fast" CI_PROFILE = "ci" # 'fast' profile for local development settings.register_profile( FAST_PROFILE, # Set to true for test reproducibility # https://hypothesis.readthedocs.io/en/latest/settings.html#hypothesis.settings.derandomize derandomize=False, max_examples=3, # https://hypothesis.readthedocs.io/en/latest/settings.html#controlling-what-runs phases=[Phase.generate, Phase.explain], # (sblaisdo) fails `HealthCheck.too_slow` with initial schema/addon loading suppress_health_check=[HealthCheck.too_slow], # (sblaisdo) default deadline of 200ms is exceeded in some cases deadline=None, ) # 'ci' profile for pr_check.sh settings.register_profile( CI_PROFILE, derandomize=False, max_examples=5, phases=[Phase.generate, Phase.explain], suppress_health_check=[HealthCheck.too_slow], deadline=None, ) # Load profile p = CI_PROFILE if os.getenv("CI") == "true" else FAST_PROFILE print(f"Loading hypothesis profile: {p}") settings.load_profile(p)
29.282051
95
0.748687
146
1,142
5.726027
0.486301
0.052632
0.064593
0.066986
0.277512
0.203349
0.114833
0.114833
0
0
0
0.005139
0.147986
1,142
38
96
30.052632
0.85406
0.402802
0
0.434783
0
0
0.064179
0
0
0
0
0
0
1
0
false
0
0.086957
0
0.086957
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
70813d4b3c14a6bf03e7e3ca20a7dd8dc96c4b00
1,859
py
Python
app/user/views.py
spark8103/deploy
7a99c5fcb11a93429814d2a519dca5ea3f99ea3a
[ "MIT" ]
2
2017-11-10T18:06:36.000Z
2018-01-25T13:53:10.000Z
app/user/views.py
spark8103/deploy
7a99c5fcb11a93429814d2a519dca5ea3f99ea3a
[ "MIT" ]
null
null
null
app/user/views.py
spark8103/deploy
7a99c5fcb11a93429814d2a519dca5ea3f99ea3a
[ "MIT" ]
null
null
null
# coding: utf-8 from flask import render_template, redirect, request, url_for, flash, current_app from flask_login import login_user, logout_user, login_required, UserMixin from . import user from .forms import LoginForm from .. import login_manager from config import Config import time users = Config.USER_LIST class User(UserMixin): pass @login_manager.user_loader def user_loader(username): if username not in users: return user = User() user.id = username user.username = username return user @login_manager.user_loader def get_user(username): if username not in users: return user = User() user.id = username user.username = username return user @login_manager.request_loader def request_loader(request): username = request.form.get('username') if username not in users: return user = User() user.id = username if request.form['password'] == users[username]['password']: return user else: return None @user.route('/login', methods=['GET', 'POST']) def login(): form = LoginForm() if form.validate_on_submit(): username = form.username.data if username is not None and users[username]['password'] == form.password.data: user = User() user.id = username user.username = username login_user(user, form.remember_me.data) print username + " is login date - " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) return redirect(request.args.get('next') or url_for('main.index')) flash('Invalid username or password.') return render_template('user/login.html', form=form) @user.route('/logout') @login_required def logout(): logout_user() flash('You have been logged out.') return redirect(url_for('main.index'))
24.786667
103
0.661646
241
1,859
4.991701
0.307054
0.05985
0.0399
0.04655
0.275977
0.244389
0.244389
0.244389
0.209476
0.209476
0
0.000696
0.227542
1,859
75
104
24.786667
0.837047
0.006993
0
0.385965
0
0
0.097019
0
0
0
0
0
0
0
null
null
0.070175
0.122807
null
null
0.017544
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
2
70824a33f97bb12dc07fe121ae5ed43e23d5b01c
2,994
py
Python
src/python/exsim3.py
akafael/unb-controle-digital
4c4915eb1c4d070886284c0f79ce3ee26ece8695
[ "MIT" ]
null
null
null
src/python/exsim3.py
akafael/unb-controle-digital
4c4915eb1c4d070886284c0f79ce3ee26ece8695
[ "MIT" ]
null
null
null
src/python/exsim3.py
akafael/unb-controle-digital
4c4915eb1c4d070886284c0f79ce3ee26ece8695
[ "MIT" ]
null
null
null
""" Laboratory Experiment 3 - Script - Rootlocus project @author Rafael Lima """ from sympy import * def simplifyFraction(G,s): """ Expand numerator and denominator from given fraction """ num,den = fraction(G.expand().simplify()) num = Poly(num,s) den = Poly(den,s) return (num/den) def partfrac(G,s): """ Split Fraction into several factors using residues theorem """ # Find Poles poles = solve(sympy.fraction(G.expand().simplify())[1],s) # Find Resudues Gp = 0 for p in poles: Gp = Gp + (G*(s-p)).subs(s,p)/(s-p) return Gp def roundExpr(expr, num_digits=4): """ Round Every Number in an expression """ return expr.xreplace({n : round(n, num_digits) for n in expr.atoms(Number)}) printing.printer.Printer().set_global_settings(precision=3) # Symbols s = symbols("s",complex=True) z = symbols("z",complex=True) K,a1,a0,T,b = symbols("K alpha_1 alpha_0 T beta",real=True) # Constants na0 = 1 nT = 0.5 # Open Loop Transfer Function sGo = 1/(s+na0) # Z Transform (From table) sGz = (1/na0)*(1-exp(-T))/(z-exp(-T)) # Controler TF sGc = K*z/(z-1) sGma = simplify(expand(sGc*sGz)) sGmf = simplify(expand(sGma/(1+sGma))) # Characterist Equation _,poly = fraction(sGmf) # Find Critical Value for K sK = solve(poly,K)[0] Kmax = sK.subs([(T,nT),(z,-1)]) poles2 = solve(poly.subs([(T,nT),(K,2)]),z) # Part 2 # Constants na1 = 2 Ts = 0.2 # Open Loop Transfer Function sGo2 = 1/(s+2) # TODO find Z Transform (From table) # BUG: not matching ZOH discretization from matlab sGz2 = (z-1)*(1/(4*(z-exp(-2*T))) - 1/(4*(z-1)) + T*(1/((z-1)*(z-1)))) # Controler TF sGc2 = K*(z-exp(-na1*T))/(z-b) sGma2 = simplifyFraction(sGc2*sGz2,z) sGmf2 = simplify(expand(sGma2/(1+sGma2))) # Expression from matlab mGz2 = ((2533546664982251*z)/144115188075855872 + 554410548014771/36028797018963968)/(z**2 - (3761226368457787*z)/2251799813685248 + 6037706219090157/9007199254740992) # Controler TF from Matlab mGc2 = K*(z-exp(-na1*T))/(z-b) mGma2 = simplifyFraction(mGc2*mGz2,z) mGmf2 = simplify(expand(mGma2/(1+mGma2))) # Characterist Equation _,poly2 = fraction(sGmf2) # Request Conditions desiredDamping = 0.5 desiredSettlingTime = 2 desiredOvershoot = exp(-desiredDamping*pi/sqrt(1-desiredDamping**2)) desiredPoles = [0,0] desiredPoles[0] = -(4/desiredSettlingTime)*(1 + I*sqrt(1-desiredDamping**2)/desiredDamping) desiredPoles[1] = -(4/desiredSettlingTime)*(1 - I*sqrt(1-desiredDamping**2)/desiredDamping) desiredPolesZ = [exp(desiredPoles[0]*Ts),exp(desiredPoles[1]*Ts)] # Solve Linear System to find K and b sysKb = [K,b] sysKb[0] = poly2.subs([(z,desiredPolesZ[0]),(T,Ts)]).evalf().collect(K).collect(b) sysKb[1] = poly2.subs([(z,desiredPolesZ[1]),(T,Ts)]).evalf().collect(K).collect(b) resp = list(linsolve(sysKb,(K,b)))[0] nK = resp[0] nb = resp[1] # Find TF nGmf2 = sGmf2.subs([(K,nK),(b,nb),(T,Ts)]) # Find Critical Value for K sK2 = solve(poly2,K)[0] #Kmax = sK.subs([(T,nT),(z,-1)])
23.761905
167
0.660655
461
2,994
4.273319
0.331887
0.007107
0.01066
0.030457
0.130964
0.109645
0.109645
0.074112
0.074112
0
0
0.090229
0.152305
2,994
125
168
23.952
0.685973
0.238143
0
0
0
0
0.011797
0
0
0
0
0.008
0
1
0.053571
false
0
0.017857
0
0.125
0.017857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
708280686076f210ca6f326f21e8b919249cc576
5,439
py
Python
chase/test.py
dave2328/chase
9bca01b959ba65f526db993fd03a13eaba294941
[ "MIT" ]
3
2016-02-03T04:04:00.000Z
2020-10-31T02:36:49.000Z
chase/test.py
tecoach/chase
2b5a11cf70f295500f07207e26f26f4391902d08
[ "MIT" ]
1
2016-09-27T18:44:22.000Z
2016-09-27T18:44:22.000Z
chase/test.py
tecoach/chase
2b5a11cf70f295500f07207e26f26f4391902d08
[ "MIT" ]
5
2016-02-03T04:04:13.000Z
2020-03-30T04:46:11.000Z
import os import unittest from chase import Profile, Order, Reversal merchant_id = os.environ.get('TEST_ORBITAL_MERCHANT_ID') username = os.environ.get('TEST_ORBITAL_USERNAME') password = os.environ.get('TEST_ORBITAL_PASSWORD') def new_profile(): profile = Profile( merchant_id=merchant_id, username=username, password=password ) profile.name = "Test User" profile.address1 = "101 Main St." profile.address2 = "Apt. 4" profile.city = "New York" profile.state = "NY" profile.zipCode = "10012" profile.email = "test@example.com" profile.phone = "9089089080" profile.cc_num = "4788250000028291" profile.cc_expiry = "1122" return profile def new_order(): return Order( merchant_id=merchant_id, username=username, password=password ) def new_reversal(): return Reversal( merchant_id=merchant_id, username=username, password=password ) class TestProfileFunctions(unittest.TestCase): def assert_default_fields(self, result): self.assertEqual(result['ProfileProcStatus'], '0') self.assertEqual(result['CustomerName'], 'Test User') self.assertEqual(result['CustomerAddress1'], '101 Main St.') self.assertEqual(result['CustomerAddress2'], 'Apt. 4') self.assertEqual(result['CustomerCity'], 'New York') self.assertEqual(result['CustomerState'], 'NY') self.assertEqual(result['CustomerZIP'], '10012') self.assertEqual(result['CustomerEmail'], 'test@example.com') self.assertEqual(result['CustomerPhone'], '9089089080') self.assertEqual(result['CCAccountNum'], '4788250000028291') self.assertEqual(result['CCExpireDate'], '1122') def test_lifecycle(self): # test profile creation profile = new_profile() result = profile.create() self.assert_default_fields(result) ident = result['CustomerRefNum'] # test profile reading profile = new_profile() profile.ident = ident result = profile.read() self.assert_default_fields(result) # test profile updating profile = new_profile() profile.ident = ident profile.name = 'Example Customer' profile.city = 'Philadelphia' profile.state = 'PA' profile.zipCode = '19130' result = profile.update() self.assertEqual(result['ProfileProcStatus'], '0') self.assertEqual(result['CustomerRefNum'], ident) self.assertEqual(result['CustomerName'], 'Example Customer') self.assertEqual(result['CustomerCity'], 'Philadelphia') self.assertEqual(result['CustomerState'], 'PA') self.assertEqual(result['CustomerZIP'], '19130') result = profile.read() self.assertEqual(result['ProfileProcStatus'], '0') self.assertEqual(result['CustomerName'], 'Example Customer') self.assertEqual(result['CustomerAddress1'], '101 Main St.') self.assertEqual(result['CustomerAddress2'], 'Apt. 4') self.assertEqual(result['CustomerCity'], 'Philadelphia') self.assertEqual(result['CustomerState'], 'PA') self.assertEqual(result['CustomerZIP'], '19130') self.assertEqual(result['CustomerEmail'], 'test@example.com') self.assertEqual(result['CustomerPhone'], '9089089080') self.assertEqual(result['CCAccountNum'], '4788250000028291') self.assertEqual(result['CCExpireDate'], '1122') # test profile deletion profile = new_profile() profile.ident = ident result = profile.destroy() self.assertEqual(result['ProfileProcStatus'], '0') self.assertEqual(result['CustomerRefNum'], ident) class TestOrderFunctions(unittest.TestCase): def test_profile_order(self): self.profile = new_profile() result = self.profile.create() customer_num = result['CustomerRefNum'] order = new_order() order.customer_num = customer_num order.order_id = '100001' order.amount = '10.00' result = order.charge() self.assertEqual(result['ProfileProcStatus'], '0') txRefNum = result['TxRefNum'] txRefIdx = result['TxRefIdx'] self.assertTrue(txRefNum) self.assertTrue(txRefIdx) refund = new_reversal() refund.tx_ref_num = txRefNum refund.tx_ref_idx = txRefIdx refund.order_id = '100001' result = refund.void() self.assertEqual(result['ProcStatus'], '0') def test_cc_order(self): order = new_order() order.order_id = '100001' order.amount = '10.00' order.address1 = "101 Main St." order.address2 = "Apt. 4" order.city = "New York" order.state = "NY" order.zipCode = "10012" order.email = "test@example.com" order.phone = "9089089080" order.cc_num = "4788250000028291" order.cc_expiry = "1122" result = order.charge() txRefNum = result['TxRefNum'] txRefIdx = result['TxRefIdx'] self.assertTrue(txRefNum) self.assertTrue(txRefIdx) refund = new_reversal() refund.tx_ref_num = txRefNum refund.tx_ref_idx = txRefIdx refund.order_id = '100001' result = refund.void() self.assertEqual(result['ProcStatus'], '0') if __name__ == '__main__': unittest.main()
34.865385
69
0.636882
543
5,439
6.26151
0.176796
0.145588
0.203824
0.055882
0.621176
0.572353
0.562353
0.562353
0.469412
0.408529
0
0.0514
0.238095
5,439
155
70
35.090323
0.769064
0.015812
0
0.559701
0
0
0.194465
0.012341
0
0
0
0
0.298507
1
0.052239
false
0.029851
0.022388
0.014925
0.11194
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
708413de75cff9f09c32fc7eec77271bf88e6168
2,698
py
Python
model/decoder.py
kefirski/hybrid_rvae
39133e656eeb05c998422e5ad9bfadc913c81b44
[ "MIT" ]
23
2017-10-24T01:30:07.000Z
2021-11-15T04:14:02.000Z
model/decoder.py
analvikingur/hybrid_rvae
39133e656eeb05c998422e5ad9bfadc913c81b44
[ "MIT" ]
1
2017-08-20T00:34:23.000Z
2017-08-21T08:03:30.000Z
model/decoder.py
analvikingur/hybrid_rvae
39133e656eeb05c998422e5ad9bfadc913c81b44
[ "MIT" ]
13
2017-08-22T15:35:00.000Z
2021-11-19T01:24:33.000Z
import torch as t import torch.nn as nn import torch.nn.functional as F class Decoder(nn.Module): def __init__(self, vocab_size, latent_variable_size, rnn_size, rnn_num_layers, embed_size): super(Decoder, self).__init__() self.vocab_size = vocab_size self.latent_variable_size = latent_variable_size self.rnn_size = rnn_size self.embed_size = embed_size self.rnn_num_layers = rnn_num_layers self.cnn = nn.Sequential( nn.ConvTranspose1d(self.latent_variable_size, 512, 4, 2, 0), nn.BatchNorm1d(512), nn.ELU(), nn.ConvTranspose1d(512, 512, 4, 2, 0, output_padding=1), nn.BatchNorm1d(512), nn.ELU(), nn.ConvTranspose1d(512, 256, 4, 2, 0), nn.BatchNorm1d(256), nn.ELU(), nn.ConvTranspose1d(256, 256, 4, 2, 0, output_padding=1), nn.BatchNorm1d(256), nn.ELU(), nn.ConvTranspose1d(256, 128, 4, 2, 0), nn.BatchNorm1d(128), nn.ELU(), nn.ConvTranspose1d(128, self.vocab_size, 4, 2, 0) ) self.rnn = nn.GRU(input_size=self.vocab_size + self.embed_size, hidden_size=self.rnn_size, num_layers=self.rnn_num_layers, batch_first=True) self.hidden_to_vocab = nn.Linear(self.rnn_size, self.vocab_size) def forward(self, latent_variable, decoder_input): """ :param latent_variable: An float tensor with shape of [batch_size, latent_variable_size] :param decoder_input: An float tensot with shape of [batch_size, max_seq_len, embed_size] :return: two tensors with shape of [batch_size, max_seq_len, vocab_size] for estimating likelihood for whole model and for auxiliary target respectively """ aux_logits = self.conv_decoder(latent_variable) logits, _ = self.rnn_decoder(aux_logits, decoder_input, initial_state=None) return logits, aux_logits def conv_decoder(self, latent_variable): latent_variable = latent_variable.unsqueeze(2) out = self.cnn(latent_variable) return t.transpose(out, 1, 2).contiguous() def rnn_decoder(self, cnn_out, decoder_input, initial_state=None): logits, final_state = self.rnn(t.cat([cnn_out, decoder_input], 2), initial_state) [batch_size, seq_len, _] = logits.size() logits = logits.contiguous().view(-1, self.rnn_size) logits = self.hidden_to_vocab(logits) logits = logits.view(batch_size, seq_len, self.vocab_size) return logits, final_state
34.589744
99
0.626019
353
2,698
4.518414
0.229462
0.105329
0.048903
0.068966
0.221944
0.160502
0.160502
0.160502
0
0
0
0.041453
0.27576
2,698
77
100
35.038961
0.774821
0.127131
0
0.183673
0
0
0
0
0
0
0
0
0
1
0.081633
false
0
0.061224
0
0.22449
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7084359911956e6eb8fcc82cb7baa296e5280584
186
py
Python
main/Models/Circle/Simulation/Circle.py
JulianoGianlupi/nh-cc3d-4x-base-tool
c0f4aceebd4c5bf3ec39e831ef851e419b161259
[ "CC0-1.0" ]
null
null
null
main/Models/Circle/Simulation/Circle.py
JulianoGianlupi/nh-cc3d-4x-base-tool
c0f4aceebd4c5bf3ec39e831ef851e419b161259
[ "CC0-1.0" ]
null
null
null
main/Models/Circle/Simulation/Circle.py
JulianoGianlupi/nh-cc3d-4x-base-tool
c0f4aceebd4c5bf3ec39e831ef851e419b161259
[ "CC0-1.0" ]
1
2021-02-26T21:50:29.000Z
2021-02-26T21:50:29.000Z
from cc3d import CompuCellSetup from CircleSteppables import CircleSteppable CompuCellSetup.register_steppable(steppable=CircleSteppable(frequency=1)) CompuCellSetup.run()
16.909091
73
0.817204
17
186
8.882353
0.647059
0
0
0
0
0
0
0
0
0
0
0.012346
0.129032
186
10
74
18.6
0.919753
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
7084652e3d8514cf5a87012b67dbfa4aee0e8d9d
15,329
py
Python
web/olga/analytics/models.py
raccoongang/acceptor
fdc1504912b502c8d789d5478eba8cc1a491934b
[ "Apache-2.0" ]
5
2017-10-20T05:52:59.000Z
2020-02-25T10:46:33.000Z
web/olga/analytics/models.py
raccoongang/OLGA
fdc1504912b502c8d789d5478eba8cc1a491934b
[ "Apache-2.0" ]
233
2017-08-14T10:56:16.000Z
2021-04-07T01:09:17.000Z
web/olga/analytics/models.py
raccoongang/acceptor
fdc1504912b502c8d789d5478eba8cc1a491934b
[ "Apache-2.0" ]
2
2018-03-16T22:22:57.000Z
2018-06-15T20:02:56.000Z
""" Models for analytics application. Models used to store and operate all data received from the edx platform. """ from __future__ import division from datetime import date, timedelta import operator import pycountry from django.contrib.postgres.fields import JSONField from django.db import models from django.db.models import Sum, Count, DateField from django.db.models.expressions import F, Func, Value from django.db.models.functions import Trunc def get_last_calendar_day(): """ Get accurate start and end dates, that create segment between them equal to a full last calendar day. Returns: start_of_day (date): Previous day's start. Example for 2017-05-15 is 2017-05-15. end_of_day (date): Previous day's end, it's a next day (tomorrow) toward day's start, that doesn't count in segment. Example for 2017-05-15 is 2017-05-16. """ end_of_day = date.today() start_of_day = end_of_day - timedelta(days=1) return start_of_day, end_of_day class EdxInstallation(models.Model): """ Model that stores overall data received from the edx-platform. """ access_token = models.UUIDField(null=True) platform_name = models.CharField(max_length=255, null=True, blank=True) platform_url = models.URLField(null=True, blank=True) uid = models.CharField(null=True, max_length=32) latitude = models.FloatField( null=True, blank=True, help_text='Latitude coordinate of edX platform follows `float` type. Example: 50.10' ) longitude = models.FloatField( null=True, blank=True, help_text='Longitude coordinate of edX platform follows `float` type. Example: 40.05' ) class InstallationStatistics(models.Model): """ Model that stores statistics data received from the edx-platform. """ active_students_amount_day = models.IntegerField(default=0) active_students_amount_week = models.IntegerField(default=0) active_students_amount_month = models.IntegerField(default=0) registered_students = models.IntegerField(default=0) enthusiastic_students = models.IntegerField(default=0) generated_certificates = models.IntegerField(default=0) courses_amount = models.IntegerField(default=0) data_created_datetime = models.DateTimeField() edx_installation = models.ForeignKey(EdxInstallation, on_delete=models.CASCADE) statistics_level = models.CharField( choices=( ('enthusiast', 'enthusiast'), ('paranoid', 'paranoid'), ), max_length=255, default='paranoid' ) students_per_country = JSONField( default=dict, blank=True, null=True, help_text='This field has students country-count accordance. It follows `json` type. ' 'Example: {"RU": 2632, "CA": 18543, "UA": 2011, "null": 1}' ) unspecified_country_name = 'Country is not specified' @staticmethod def get_statistics_top_country(tabular_countries_list): """ Get first country from tabular format country list. List is sorted, first country is a top active students rank country. :param tabular_countries_list: list of the two elements tuples :return: top country name as a string """ if not tabular_countries_list: return '' return tabular_countries_list[0][0] @classmethod def get_stats_for_the_date(cls, statistics_date, edx_installation_object=None): """ Provide statistic model instance for the given Edx installation. :param edx_installation_object: specific installation object. :return: statistic model instance if it is created at the specified day otherwise None """ stat_item = cls.objects.filter( edx_installation=edx_installation_object, data_created_datetime__gte=statistics_date, data_created_datetime__lt=(statistics_date + timedelta(days=1)) ).last() return stat_item @classmethod def timeline(cls): """ Provide timeline in days for plotting on x axis. """ timeline_datetimes = cls.objects.order_by( 'data_created_datetime' ).values_list('data_created_datetime', flat=True).distinct() timeline_dates = [x.date().strftime('%Y-%m-%d') for x in timeline_datetimes] # Support case, when data are sent more often, for example when testing every 15 seconds. # Then filter unique and sort back, because timeline should be ordered. timeline_dates = sorted(set(timeline_dates)) return timeline_dates @classmethod def data_per_period(cls): """ Provide total students, courses and instances, from all services per period, day by default. We summarize values per day, because in same day we can receive data from multiple different instances. We suppose, that every instance send data only once per day. """ subquery = cls.objects.annotate( date_in_days=Trunc('data_created_datetime', 'day', output_field=DateField()) ).values('date_in_days').order_by('date_in_days') students_per_day = subquery.annotate( students=Sum('active_students_amount_day') ).values_list('students', flat=True) courses_per_day = subquery.annotate(courses=Sum('courses_amount')).values_list('courses', flat=True) instances_per_day = subquery.annotate( instances=Count('edx_installation__access_token') ).values_list('instances', flat=True) return list(students_per_day), list(courses_per_day), list(instances_per_day) @classmethod def overall_counts(cls): """ Provide total count of all instances, courses and students from all instances per previous calendar day. Returns overall counts as dict. { "instances_count": <int:instances_count>, "courses_count": <int:courses_count>, "students_count": <int:students_count>, "generated_certificates_count": <int:generated_certificates_count>, } """ start_of_day, end_of_day = get_last_calendar_day() all_unique_instances = cls.objects.filter( data_created_datetime__gte=start_of_day, data_created_datetime__lt=end_of_day ) instances_count = all_unique_instances.count() courses_count = all_unique_instances.aggregate( Sum('courses_amount') )['courses_amount__sum'] students_count = all_unique_instances.aggregate( Sum('active_students_amount_day') )['active_students_amount_day__sum'] generated_certificates_count = all_unique_instances.aggregate( Sum('generated_certificates') )['generated_certificates__sum'] registered_students_count = all_unique_instances.aggregate( Sum('registered_students') )['registered_students__sum'] return { "instances_count": instances_count or 0, "courses_count": courses_count or 0, "students_count": students_count or 0, "generated_certificates_count": generated_certificates_count or 0, "registered_students_count": registered_students_count or 0, } @classmethod def get_charts_data(cls): """ Provide data about certificates and users for chart. :return: dict { "19-01-28": [0, 1, 0], "19-01-22": [0, 7, 0], "19-01-31": [0, 0, 0], } """ statistics = cls.objects.all() charts = dict() for item in statistics: charts[item.data_created_datetime.strftime('%y-%m-%d')] = [ item.registered_students, item.generated_certificates, item.enthusiastic_students ] return charts @classmethod def get_students_per_country_stats(cls): """ Total of students amount per country to display on world map from all instances per month. Returns: world_students_per_country (dict): Country-count accordance as pair of key-value. """ # Get list of instances's students per country data as unicode strings. queryset = cls.objects.annotate( month_verbose=Func( F('data_created_datetime'), Value('TMMonth YYYY'), function='to_char' ), month_ordering=Func( F('data_created_datetime'), Value('YYYY-MM'), function='to_char' ), ) result_rows = queryset.values_list( 'month_ordering', 'month_verbose', 'students_per_country' ) return cls.aggregate_countries_by_months(result_rows) @classmethod def aggregate_countries_by_months(cls, values_list): """ Aggregate all the months and countries data by the month. :param values_list: list queryset result with three elements for every row :return: dictionary of months with the student countries statistics """ months = {} for month_ordering, month_verbose, countries in values_list: cls.add_month_countries_data( month_ordering, month_verbose, countries, months ) return months @classmethod def add_month_countries_data( cls, month_ordering, month_verbose, countries, months ): """ Add a month data to the months dictionary. :param month_ordering: sortable date key represented as a string :param month_verbose: human friendly date represented as a string :param countries: dictionary of countries where the key is the country code and the value is the amount of the students :param months: dictionary that needs to be updated by the data, passed to the method """ if month_ordering not in months: months[month_ordering] = { 'countries': countries, 'label': month_verbose, } return cls.add_up_new_month_data(months[month_ordering]['countries'], countries) @classmethod def add_up_new_month_data(cls, existing_data, new_data): """ Add a new month data to the resulting data dictionary. Adds the counts from the new countries data dictionary to the existing ones or adds new countries if the don't exist in the existing_data """ for existent_key in existing_data.keys(): existing_data[existent_key] += new_data.pop(existent_key, 0) existing_data.update(new_data) @classmethod def create_students_per_country(cls, worlds_students_per_country): """ Create convenient and necessary data formats to render it from view. Graphs require list-format data. """ datamap_format_countries_list = [] tabular_format_countries_map = {} if not worlds_students_per_country: tabular_format_countries_map[cls.unspecified_country_name] = [0, 0] return datamap_format_countries_list, list(tabular_format_countries_map.items()) all_active_students = sum(worlds_students_per_country.values()) for country, count in worlds_students_per_country.items(): student_amount_percentage = cls.get_student_amount_percentage(count, all_active_students) try: country_info = pycountry.countries.get(alpha_2=country) country_alpha_3 = country_info.alpha_3 datamap_format_countries_list += [[country_alpha_3, count]] country_name = country_info.name except KeyError: # Create students without country amount. country_name = cls.unspecified_country_name if country_name in tabular_format_countries_map: tabular_format_countries_map[country_name] = list(map( operator.add, tabular_format_countries_map[country_name], [count, student_amount_percentage] )) else: tabular_format_countries_map[country_name] = [count, student_amount_percentage] # Pop out the unspecified country unspecified_country_values = tabular_format_countries_map.pop(cls.unspecified_country_name, None) # Sort in descending order. tabular_format_countries_list = sorted( tabular_format_countries_map.items(), key=lambda x: x[1][0], reverse=True ) if unspecified_country_values: tabular_format_countries_list.append( (cls.unspecified_country_name, unspecified_country_values) ) return datamap_format_countries_list, tabular_format_countries_list @classmethod def get_students_per_country(cls): """ Gather convenient and necessary data formats to render it from view. """ months = cls.get_students_per_country_stats() for month in months.values(): datamap_list, tabular_list = cls.create_students_per_country(month['countries']) month['datamap_countries_list'] = datamap_list month['tabular_countries_list'] = tabular_list month['top_country'] = cls.get_statistics_top_country(tabular_list) month['countries_amount'] = ( len(month['countries']) - (cls.unspecified_country_name in month['countries']) ) return months @staticmethod def get_student_amount_percentage(country_count_in_statistics, all_active_students): """ Calculate student amount percentage based on total countries amount and particular county amount comparison. """ if all_active_students == 0: return 0 students_amount_percentage = int(country_count_in_statistics / all_active_students * 100) return students_amount_percentage @classmethod def get_students_countries_amount(cls, months): """ Provide countries amount from students per country statistics as table. Calculate countries amount in world students per country statistics (from tabular countries list). Tabular format countries list can be empty - countries amount is zero. Tabular format countries list can be not empty - it contains particular country-count accordance and `Country is not specified` field, that has students without country amount. Actually `Country is not specified` field is not a country, so it does not fill up in countries amount. """ countries_amount = 0 for month in months.values(): countries = dict(month['tabular_countries_list']) countries.pop(cls.unspecified_country_name, None) countries_amount += len(countries) return countries_amount def update(self, stats): """ Update model from given dictionary and save it. :param stats: dictionary with new data. """ for (key, value) in stats.items(): setattr(self, key, value) self.save()
37.296837
116
0.659339
1,801
15,329
5.360911
0.189895
0.027965
0.027965
0.023304
0.227861
0.160228
0.084102
0.047229
0.023822
0.023822
0
0.010819
0.264401
15,329
410
117
37.387805
0.845424
0.26414
0
0.093333
0
0.004444
0.10287
0.038837
0
0
0
0
0
1
0.071111
false
0
0.04
0
0.275556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
708475a8fdb41ed7fcd4a6f028a2dcd0edaa89ad
20,560
py
Python
pypowervm/tests/tasks/test_cna.py
stephenfin/pypowervm
68f2b586b4f17489f379534ab52fc56a524b6da5
[ "Apache-2.0" ]
24
2015-12-02T19:49:45.000Z
2021-11-17T11:43:51.000Z
pypowervm/tests/tasks/test_cna.py
stephenfin/pypowervm
68f2b586b4f17489f379534ab52fc56a524b6da5
[ "Apache-2.0" ]
18
2017-03-01T05:54:25.000Z
2022-03-14T17:32:47.000Z
pypowervm/tests/tasks/test_cna.py
stephenfin/pypowervm
68f2b586b4f17489f379534ab52fc56a524b6da5
[ "Apache-2.0" ]
17
2016-02-10T22:53:04.000Z
2021-11-10T09:47:10.000Z
# Copyright 2015, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from pypowervm import adapter as adp from pypowervm import exceptions as exc from pypowervm.tasks import cna from pypowervm.tests import test_fixtures as fx from pypowervm.tests.test_utils import test_wrapper_abc as twrap from pypowervm.wrappers import entry_wrapper as ewrap from pypowervm.wrappers import logical_partition as pvm_lpar from pypowervm.wrappers import network as pvm_net VSWITCH_FILE = 'fake_vswitch_feed.txt' VNET_FILE = 'fake_virtual_network_feed.txt' class TestCNA(twrap.TestWrapper): """Unit Tests for creating Client Network Adapters.""" mock_adapter_fx_args = {'traits': fx.RemoteHMCTraits} file = VSWITCH_FILE wrapper_class_to_test = pvm_net.VSwitch @mock.patch('pypowervm.tasks.cna._find_or_create_vnet') def test_crt_cna(self, mock_vnet_find): """Tests the creation of Client Network Adapters.""" # Create a side effect that can validate the input into the create # call. def validate_of_create(*kargs, **kwargs): self.assertIsNotNone(kargs[0]) self.assertEqual('LogicalPartition', kargs[1]) self.assertEqual('fake_lpar', kwargs.get('root_id')) self.assertEqual('ClientNetworkAdapter', kwargs.get('child_type')) return pvm_net.CNA.bld(self.adpt, 1, 'href').entry self.adpt.create.side_effect = validate_of_create self.adpt.read.return_value = self.resp n_cna = cna.crt_cna(self.adpt, None, 'fake_lpar', 5) self.assertIsNotNone(n_cna) self.assertIsInstance(n_cna, pvm_net.CNA) self.assertEqual(1, mock_vnet_find.call_count) @mock.patch('pypowervm.tasks.cna._find_or_create_vnet') def test_crt_cna_no_vnet_crt(self, mock_vnet_find): """Tests the creation of Client Network Adapters. The virtual network creation shouldn't be done in this flow. """ # PVMish Traits self.adptfx.set_traits(fx.LocalPVMTraits) self.adpt.read.return_value = self.resp # Create a side effect that can validate the input into the create # call. def validate_of_create(*kargs, **kwargs): self.assertIsNotNone(kargs[0]) self.assertEqual('LogicalPartition', kargs[1]) self.assertEqual('fake_lpar', kwargs.get('root_id')) self.assertEqual('ClientNetworkAdapter', kwargs.get('child_type')) return pvm_net.CNA.bld(self.adpt, 1, 'href').entry self.adpt.create.side_effect = validate_of_create n_cna = cna.crt_cna(self.adpt, None, 'fake_lpar', 5, slot_num=1) self.assertIsNotNone(n_cna) self.assertIsInstance(n_cna, pvm_net.CNA) self.assertEqual(0, mock_vnet_find.call_count) def test_find_or_create_vswitch(self): """Validates that a vswitch can be created.""" self.adpt.read.return_value = self.resp # Test that it finds the right vSwitch vswitch_w = cna._find_or_create_vswitch(self.adpt, 'ETHERNET0', True) self.assertIsNotNone(vswitch_w) # Create a side effect that can validate the input into the create call def validate_of_create(*kargs, **kwargs): self.assertIsNotNone(kargs[0]) # Is the vSwitch create self.assertEqual('ManagedSystem', kargs[1]) self.assertEqual('VirtualSwitch', kwargs.get('child_type')) # Return a previously created vSwitch... return self.dwrap.entry self.adpt.create.side_effect = validate_of_create # Test the create vswitch_w = cna._find_or_create_vswitch(self.adpt, 'Temp', True) self.assertIsNotNone(vswitch_w) self.assertTrue(self.adpt.create.called) # Make sure that if the create flag is set to false, an error is thrown # when the vswitch can't be found. self.assertRaises(exc.Error, cna._find_or_create_vswitch, self.adpt, 'Temp', False) class TestVNET(twrap.TestWrapper): mock_adapter_fx_args = {'traits': fx.RemoteHMCTraits} file = VNET_FILE wrapper_class_to_test = pvm_net.VNet def test_find_or_create_vnet(self): """Tests that the virtual network can be found/created.""" self.adpt.read.return_value = self.resp fake_vs = mock.Mock() fake_vs.switch_id = 0 fake_vs.name = 'ETHERNET0' fake_vs.related_href = ('https://9.1.2.3:12443/rest/api/uom/' 'ManagedSystem/' '67dca605-3923-34da-bd8f-26a378fc817f/' 'VirtualSwitch/' 'ec8aaa54-9837-3c23-a541-a4e4be3ae489') # This should find a vnet. vnet_resp = cna._find_or_create_vnet(self.adpt, '2227', fake_vs) self.assertIsNotNone(vnet_resp) # Now flip to a CNA that requires a create... resp = adp.Response('reqmethod', 'reqpath', 'status', 'reason', {}) resp.entry = ewrap.EntryWrapper._bld( self.adpt, tag='VirtualNetwork').entry self.adpt.create.return_value = resp vnet_resp = cna._find_or_create_vnet(self.adpt, '2228', fake_vs) self.assertIsNotNone(vnet_resp) self.assertEqual(1, self.adpt.create.call_count) def test_find_free_vlan(self): """Tests that a free VLAN can be found.""" self.adpt.read.return_value = self.resp # Mock data specific to the VNET File fake_vs = mock.Mock() fake_vs.name = 'ETHERNET0' fake_vs.related_href = ('https://9.1.2.3:12443/rest/api/uom/' 'ManagedSystem/' '67dca605-3923-34da-bd8f-26a378fc817f/' 'VirtualSwitch/' 'ec8aaa54-9837-3c23-a541-a4e4be3ae489') self.assertEqual(1, cna._find_free_vlan(self.adpt, fake_vs)) @mock.patch('pypowervm.wrappers.network.VNet.wrap') def test_find_free_vlan_mocked(self, mock_vnet_wrap): """Uses lots of mock data for a find vlan.""" self.adpt.read.return_value = mock.Mock() # Helper function to build the vnets. def build_mock_vnets(max_vlan, vswitch_uri): vnets = [] for x in range(1, max_vlan + 1): vnets.append(mock.Mock(vlan=x, associated_switch_uri=vswitch_uri)) return vnets mock_vswitch = mock.Mock(related_href='test_vs') # Test when all the vnet's are on a single switch. mock_vnet_wrap.return_value = build_mock_vnets(3000, 'test_vs') self.assertEqual(3001, cna._find_free_vlan(self.adpt, mock_vswitch)) # Test with multiple switches. The second vswitch with a higher vlan # should not impact the vswitch we're searching for. mock_vnet_wrap.return_value = (build_mock_vnets(2000, 'test_vs') + build_mock_vnets(4000, 'test_vs2')) self.assertEqual(2001, cna._find_free_vlan(self.adpt, mock_vswitch)) # Test when all the VLANs are consumed mock_vnet_wrap.return_value = build_mock_vnets(4094, 'test_vs') self.assertRaises(exc.Error, cna._find_free_vlan, self.adpt, mock_vswitch) @mock.patch('pypowervm.tasks.cna._find_free_vlan') def test_assign_free_vlan(self, mock_find_vlan): mock_find_vlan.return_value = 2016 mocked = mock.MagicMock() mock_cna = mock.MagicMock(pvid=31, enabled=False) mock_cna.update.return_value = mock_cna updated_cna = cna.assign_free_vlan(mocked, mocked, mocked, mock_cna) self.assertEqual(2016, updated_cna.pvid) self.assertEqual(mock_cna.enabled, updated_cna.enabled) updated_cna = cna.assign_free_vlan(mocked, mocked, mocked, mock_cna, ensure_enabled=True) self.assertEqual(True, updated_cna.enabled) @mock.patch('pypowervm.wrappers.network.CNA.bld') @mock.patch('pypowervm.tasks.cna._find_free_vlan') @mock.patch('pypowervm.tasks.cna._find_or_create_vswitch') @mock.patch('pypowervm.tasks.partition.get_partitions') def test_crt_p2p_cna( self, mock_get_partitions, mock_find_or_create_vswitch, mock_find_free_vlan, mock_cna_bld): """Tests the crt_p2p_cna.""" # Mock out the data mock_vswitch = mock.Mock(related_href='vswitch_href') mock_find_or_create_vswitch.return_value = mock_vswitch mock_find_free_vlan.return_value = 2050 # Mock the get of the VIOSes mock_vio1 = mock.Mock(uuid='src_io_host_uuid') mock_vio2 = mock.Mock(uuid='vios_uuid2') mock_get_partitions.return_value = [mock_vio1, mock_vio2] mock_cna = mock.MagicMock() mock_trunk1, mock_trunk2 = mock.MagicMock(pvid=2050), mock.MagicMock() mock_trunk1.create.return_value = mock_trunk1 mock_cna_bld.side_effect = [mock_trunk1, mock_trunk2, mock_cna] # Invoke the create mock_ext_ids = {'test': 'value', 'test2': 'value2'} client_adpt, trunk_adpts = cna.crt_p2p_cna( self.adpt, None, 'lpar_uuid', ['src_io_host_uuid', 'vios_uuid2'], mock_vswitch, crt_vswitch=True, slot_num=1, mac_addr='aabbccddeeff', ovs_bridge='br-ex', ovs_ext_ids=mock_ext_ids, configured_mtu=1450) # Make sure the client and trunk were 'built' mock_cna_bld.assert_any_call(self.adpt, 2050, 'vswitch_href', slot_num=1, mac_addr='aabbccddeeff') mock_cna_bld.assert_any_call( self.adpt, 2050, 'vswitch_href', trunk_pri=1, dev_name=None, ovs_bridge='br-ex', ovs_ext_ids=mock_ext_ids, configured_mtu=1450) mock_cna_bld.assert_any_call( self.adpt, 2050, 'vswitch_href', trunk_pri=2, dev_name=None, ovs_bridge='br-ex', ovs_ext_ids=mock_ext_ids, configured_mtu=1450) # Make sure they were then created self.assertIsNotNone(client_adpt) self.assertEqual(2, len(trunk_adpts)) mock_cna.create.assert_called_once_with( parent_type=pvm_lpar.LPAR, parent_uuid='lpar_uuid') mock_trunk1.create.assert_called_once_with(parent=mock_vio1) mock_trunk2.create.assert_called_once_with(parent=mock_vio2) @mock.patch('pypowervm.wrappers.network.CNA.bld') @mock.patch('pypowervm.tasks.cna._find_free_vlan') @mock.patch('pypowervm.tasks.cna._find_or_create_vswitch') @mock.patch('pypowervm.tasks.partition.get_partitions') def test_crt_p2p_cna_single( self, mock_get_partitions, mock_find_or_create_vswitch, mock_find_free_vlan, mock_cna_bld): """Tests the crt_p2p_cna with the mgmt lpar and a dev_name.""" # Mock out the data mock_vswitch = mock.Mock(related_href='vswitch_href') mock_find_or_create_vswitch.return_value = mock_vswitch mock_find_free_vlan.return_value = 2050 # Mock the get of the VIOSes mock_vio1 = mock.Mock(uuid='mgmt_lpar_uuid') mock_vio2 = mock.Mock(uuid='vios_uuid2') mock_get_partitions.return_value = [mock_vio1, mock_vio2] mock_cna = mock.MagicMock() mock_trunk1 = mock.MagicMock(pvid=2050) mock_trunk1.create.return_value = mock_trunk1 mock_cna_bld.side_effect = [mock_trunk1, mock_cna] # Invoke the create client_adpt, trunk_adpts = cna.crt_p2p_cna( self.adpt, None, 'lpar_uuid', ['mgmt_lpar_uuid'], mock_vswitch, crt_vswitch=True, mac_addr='aabbccddeeff', dev_name='tap-12345') # Make sure the client and trunk were 'built' mock_cna_bld.assert_any_call(self.adpt, 2050, 'vswitch_href', mac_addr='aabbccddeeff', slot_num=None) mock_cna_bld.assert_any_call( self.adpt, 2050, 'vswitch_href', trunk_pri=1, dev_name='tap-12345', ovs_bridge=None, ovs_ext_ids=None, configured_mtu=None) # Make sure they were then created self.assertIsNotNone(client_adpt) self.assertEqual(1, len(trunk_adpts)) mock_cna.create.assert_called_once_with( parent_type=pvm_lpar.LPAR, parent_uuid='lpar_uuid') mock_trunk1.create.assert_called_once_with(parent=mock_vio1) @mock.patch('pypowervm.wrappers.network.CNA.bld') @mock.patch('pypowervm.tasks.cna._find_free_vlan') @mock.patch('pypowervm.tasks.cna._find_or_create_vswitch') @mock.patch('pypowervm.tasks.partition.get_partitions') def test_crt_trunk_with_free_vlan( self, mock_get_partitions, mock_find_or_create_vswitch, mock_find_free_vlan, mock_cna_bld): """Tests the crt_trunk_with_free_vlan on mgmt based VIOS.""" # Mock out the data mock_vswitch = mock.Mock(related_href='vswitch_href') mock_find_or_create_vswitch.return_value = mock_vswitch mock_find_free_vlan.return_value = 2050 # Mock the get of the VIOSes. mock_vio1 = mock.Mock(uuid='vios_uuid1') mock_get_partitions.return_value = [mock_vio1] mock_trunk1 = mock.MagicMock(pvid=2050) mock_trunk1.create.return_value = mock_trunk1 mock_cna_bld.return_value = mock_trunk1 # Invoke the create mock_ext_id = {'test1': 'value1', 'test2': 'value2'} trunk_adpts = cna.crt_trunk_with_free_vlan( self.adpt, None, ['vios_uuid1'], mock_vswitch, crt_vswitch=True, dev_name='tap-12345', ovs_bridge='br-int', ovs_ext_ids=mock_ext_id, configured_mtu=1450) # Make sure the client and trunk were 'built' mock_cna_bld.assert_any_call( self.adpt, 2050, 'vswitch_href', trunk_pri=1, dev_name='tap-12345', ovs_bridge='br-int', ovs_ext_ids=mock_ext_id, configured_mtu=1450) # Make sure that the trunk was created self.assertEqual(1, len(trunk_adpts)) mock_trunk1.create.assert_called_once_with(parent=mock_vio1) @mock.patch('pypowervm.wrappers.network.CNA.get') def test_find_trunk_on_lpar(self, mock_cna_get): parent_wrap = mock.MagicMock() m1 = mock.Mock(is_trunk=True, pvid=2, vswitch_id=2) m2 = mock.Mock(is_trunk=False, pvid=3, vswitch_id=2) m3 = mock.Mock(is_trunk=True, pvid=3, vswitch_id=1) m4 = mock.Mock(is_trunk=True, pvid=3, vswitch_id=2) mock_cna_get.return_value = [m1, m2, m3] self.assertIsNone(cna._find_trunk_on_lpar(self.adpt, parent_wrap, m4)) self.assertTrue(mock_cna_get.called) mock_cna_get.reset_mock() mock_cna_get.return_value = [m1, m2, m3, m4] self.assertEqual(m4, cna._find_trunk_on_lpar(self.adpt, parent_wrap, m4)) self.assertTrue(mock_cna_get.called) @mock.patch('pypowervm.tasks.cna._find_trunk_on_lpar') @mock.patch('pypowervm.tasks.partition.get_mgmt_partition') @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get') def test_find_trunks(self, mock_vios_get, mock_get_mgmt, mock_find_trunk): # Mocked responses can be simple, since they are just fed into the # _find_trunk_on_lpar mock_vios_get.return_value = [mock.MagicMock(), mock.MagicMock()] mock_get_mgmt.return_value = mock.MagicMock() # The responses back from the find trunk. Make it an odd trunk # priority ordering to make sure we sort properly v1 = mock.Mock(trunk_pri=3) c1, c2 = mock.Mock(trunk_pri=1), mock.Mock(trunk_pri=2) mock_find_trunk.side_effect = [v1, c1, c2] # Invoke the method. resp = cna.find_trunks(self.adpt, mock.Mock(pvid=2)) # Make sure four calls to the find trunk self.assertEqual(3, mock_find_trunk.call_count) # Order of the response is important. Should be based off of trunk # priority self.assertEqual([c1, c2, v1], resp) @mock.patch('pypowervm.wrappers.network.CNA.get') def test_find_all_trunks_on_lpar(self, mock_cna_get): parent_wrap = mock.MagicMock() m1 = mock.Mock(is_trunk=True, vswitch_id=2) m2 = mock.Mock(is_trunk=False, vswitch_id=2) m3 = mock.Mock(is_trunk=True, vswitch_id=1) m4 = mock.Mock(is_trunk=True, vswitch_id=2) mock_cna_get.return_value = [m1, m2, m3, m4] returnVal = [m1, m3, m4] self.assertEqual(returnVal, cna._find_all_trunks_on_lpar(self.adpt, parent_wrap)) mock_cna_get.reset_mock() mock_cna_get.return_value = [m1, m2, m3, m4] self.assertEqual([m3], cna._find_all_trunks_on_lpar(self.adpt, parent_wrap=parent_wrap, vswitch_id=1)) @mock.patch('pypowervm.wrappers.network.CNA.get') @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get') @mock.patch('pypowervm.wrappers.logical_partition.LPAR.get') def test_find_cna_wraps(self, mock_lpar_get, mock_vios_get, mock_cna_get): # Mocked responses are simple since they are only used for # pvm_net.CNA.get mock_lpar_get.return_value = [mock.MagicMock()] mock_vios_get.return_value = [mock.MagicMock()] # Mocked cna_wraps m1 = mock.Mock(uuid=2, pvid=2, vswitch_id=2) m2 = mock.Mock(uuid=3, pvid=1, vswitch_id=1) m3 = mock.Mock(uuid=1, pvid=1, vswitch_id=1) mock_cna_get.side_effect = [[m1, m2], [m3]] mock_trunk = mock.Mock(adapter=self.adpt, uuid=1, pvid=1, vswitch_id=1) self.assertEqual([m1, m2, m3], cna._find_cna_wraps(mock_trunk)) mock_cna_get.side_effect = [[m1, m2], [m3]] self.assertEqual([m2, m3], cna._find_cna_wraps(mock_trunk, 1)) @mock.patch('pypowervm.tasks.cna._find_cna_wraps') def test_find_cnas_on_trunk(self, mock_find_wraps): # Mocked cna_wraps m1 = mock.Mock(uuid=2, pvid=2, vswitch_id=2) m2 = mock.Mock(uuid=3, pvid=1, vswitch_id=1) m3 = mock.Mock(uuid=1, pvid=1, vswitch_id=1) mock_find_wraps.return_value = [m1, m2, m3] mock_trunk = mock.Mock(adapter=self.adpt, uuid=1, pvid=1, vswitch_id=1) self.assertEqual([m2], cna.find_cnas_on_trunk(mock_trunk)) mock_find_wraps.return_value = [m1, m3] self.assertEqual([], cna.find_cnas_on_trunk(mock_trunk)) mock_trunk = mock.Mock(adapter=self.adpt, uuid=3, pvid=3, vswitch_id=3) self.assertEqual([], cna.find_cnas_on_trunk(mock_trunk)) @mock.patch('pypowervm.tasks.cna._find_cna_wraps') @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get') @mock.patch('pypowervm.tasks.partition.get_mgmt_partition') @mock.patch('pypowervm.tasks.cna._find_all_trunks_on_lpar') @mock.patch('pypowervm.wrappers.network.VSwitch.search') def test_find_orphaned_trunks(self, mock_vswitch, mock_trunks, mock_get_mgmt, mock_vios_get, mock_wraps): mock_vswitch.return_value = mock.MagicMock(switch_id=1) mock_get_mgmt.return_value = mock.MagicMock() mock_vios_get.return_value = [mock.MagicMock()] # Mocked cna_wraps m1 = mock.Mock(is_trunk=True, uuid=2, pvid=2, vswitch_id=1) m2 = mock.Mock(is_trunk=False, uuid=3, pvid=3, vswitch_id=1) m3 = mock.Mock(is_trunk=True, uuid=1, pvid=1, vswitch_id=1) m4 = mock.Mock(is_trunk=False, uuid=4, pvid=1, vswitch_id=1) mock_wraps.return_value = [m1, m2, m3, m4] mock_trunks.side_effect = [[m1, m3], []] self.assertEqual([m1], cna.find_orphaned_trunks(self.adpt, mock.MagicMock))
45.286344
79
0.655302
2,827
20,560
4.483905
0.125575
0.027769
0.0426
0.03266
0.698959
0.651231
0.6065
0.578731
0.524613
0.485405
0
0.028905
0.242802
20,560
453
80
45.386313
0.785329
0.135165
0
0.490132
0
0
0.125715
0.077446
0
0
0
0
0.203947
1
0.065789
false
0
0.029605
0
0.134868
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7084a93fe5b577c679276f294fe5764e40d39b1b
390
py
Python
src/dispatch/plugins/dispatch_test/participant_group.py
roor0/dispatch
12c4f567096411abe62abaf61c7c124496764346
[ "Apache-2.0" ]
3,417
2020-02-23T22:54:47.000Z
2022-03-31T13:01:01.000Z
src/dispatch/plugins/dispatch_test/participant_group.py
roor0/dispatch
12c4f567096411abe62abaf61c7c124496764346
[ "Apache-2.0" ]
607
2020-02-24T14:27:02.000Z
2022-03-30T19:15:39.000Z
src/dispatch/plugins/dispatch_test/participant_group.py
roor0/dispatch
12c4f567096411abe62abaf61c7c124496764346
[ "Apache-2.0" ]
359
2020-02-24T19:04:43.000Z
2022-03-29T06:48:12.000Z
from dispatch.plugins.bases import ParticipantGroupPlugin class TestParticipantGroupPlugin(ParticipantGroupPlugin): title = "Dispatch Test Plugin - Participant Group" slug = "test-participant-group" def create(self, participants, **kwargs): return def add(self, participant, **kwargs): return def remove(self, participant, **kwargs): return
24.375
57
0.702564
37
390
7.405405
0.594595
0.131387
0.109489
0.19708
0
0
0
0
0
0
0
0
0.207692
390
15
58
26
0.886731
0
0
0.3
0
0
0.158974
0.05641
0
0
0
0
0
1
0.3
false
0
0.1
0.3
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
3
7084e27c49595c6dd313ddb9fd27d9cdb9c9e2f7
17,707
py
Python
tools/stats_gen_lib.py
mtak-/lockfree-stm
00cd5f9a056e999f0cd140106c1d66b321d6fd47
[ "MIT" ]
9
2016-11-14T23:35:30.000Z
2019-01-18T23:21:08.000Z
tools/stats_gen_lib.py
mtak-/lockfree-stm
00cd5f9a056e999f0cd140106c1d66b321d6fd47
[ "MIT" ]
3
2017-01-09T01:22:57.000Z
2017-03-20T04:50:05.000Z
tools/stats_gen_lib.py
mtak-/lockfree-stm
00cd5f9a056e999f0cd140106c1d66b321d6fd47
[ "MIT" ]
null
null
null
#!/usr/bin/python from string import Formatter _STATS_TEMPLATE = '''#ifndef {INCLUDE_GUARD} #define {INCLUDE_GUARD} // clang-format off #ifdef {MACRO_PREFIX}ON {INCLUDES} #include <iomanip> #include <sstream> #include <string> #include <vector> // comment out any stats you don't want, and things will be just dandy {MACROS_ON} #define {MACRO_PREFIX}PUBLISH_RECORD() \\ do {{ \\ {NS_ACCESS}{CLASS_NAME}::get().publish({NS_ACCESS}tls_record()); \\ {NS_ACCESS}tls_record() = {{}}; \\ }} while(0) \\ /**/ #define {MACRO_PREFIX}CLEAR() {NS_ACCESS}{CLASS_NAME}::get().clear() #ifndef {MACRO_PREFIX}DUMP #include <iostream> #define {MACRO_PREFIX}DUMP() (std::cout << {NS_ACCESS}{CLASS_NAME}::get().results()) #endif /* {MACRO_PREFIX}DUMP */ #else #define {MACRO_PREFIX}PUBLISH_RECORD() /**/ #define {MACRO_PREFIX}CLEAR() /**/ #ifndef {MACRO_PREFIX}DUMP #define {MACRO_PREFIX}DUMP() /**/ #endif /* {MACRO_PREFIX}DUMP */ #endif /* {MACRO_PREFIX}ON */ {MACROS_OFF} // clang-format on #ifdef {MACRO_PREFIX}ON {NAMESPACE_BEGIN} struct {CLASS_NAME}_tls_record {{ {THREAD_RECORD_MEMBERS} {CLASS_NAME}_tls_record() noexcept = default; {THREAD_RECORD_MEMBER_FUNCTIONS} std::string results() const {{ std::ostringstream ostr; ostr {THREAD_RECORD_STREAM_OUTPUT}; return ostr.str(); }} }}; inline {CLASS_NAME}_tls_record& tls_record() noexcept {{ static LSTM_THREAD_LOCAL {CLASS_NAME}_tls_record record{{}}; return record; }} struct {CLASS_NAME} {{ private: using records_t = std::vector<{CLASS_NAME}_tls_record>; using records_iter = typename records_t::iterator; using records_value_type = typename records_t::value_type; records_t records_; {CLASS_NAME}() = default; {CLASS_NAME}(const {CLASS_NAME}&) = delete; {CLASS_NAME}& operator=(const {CLASS_NAME}&) = delete; std::uint64_t total_count(std::function<std::size_t(const {CLASS_NAME}_tls_record*)> accessor) const noexcept {{ std::size_t result = 0; for (auto& tid_record : records_) result += accessor(&tid_record); return result; }} std::uint64_t max(std::function<std::size_t(const {CLASS_NAME}_tls_record*)> accessor) const noexcept {{ std::size_t result = 0; for (auto& tid_record : records_) result = std::max(result, accessor(&tid_record)); return result; }} public: static {CLASS_NAME}& get() noexcept {{ static {CLASS_NAME} singleton; return singleton; }} inline void publish({CLASS_NAME}_tls_record record) noexcept {{ records_.emplace_back(std::move(record)); }} {TRANSACTION_LOG_MEMBER_FUNCTIONS} std::size_t thread_count() const noexcept {{ return records_.size(); }} const records_t& records() const noexcept {{ return records_; }} void clear() noexcept {{ records_.clear(); }} std::string results(bool per_thread = true) const {{ std::ostringstream ostr; ostr {TRANSACTION_LOG_STREAM_OUTPUT}; if (per_thread) {{ std::size_t i = 0; for (auto& record : records_) {{ ostr << "--== Thread: " << std::setw(4) << i++ << " ==--" << '\\n'; ostr << record.results() << '\\n'; }} }} return ostr.str(); }} }}; {NAMESPACE_END} #endif /* {MACRO_PREFIX}ON */ #endif /* {INCLUDE_GUARD} */''' types = { 'counter' : 'std::uint64_t', 'max' : 'std::uint64_t', 'sum' : 'std::uint64_t', } def indent(s, amount = 1): return '\n'.join([' ' * amount * 4 + x for x in s.splitlines()]) def get_pretty_name(stat): return ' '.join([w.capitalize() for w in stat.split()]) def get_mem_name(stat): return stat.lower().replace(' ', '_') def get_mem_fun_for_stat(compound_stat): return get_mem_name(compound_stat) + '()' def get_mem_or_func_call(stat, stats, compound_stats): if stat in stats: return get_mem_name(stat) assert(stat in compound_stats) return get_mem_fun_for_stat(stat) def get_macro_name(stat, macro_prefix): return macro_prefix + stat.upper().replace(' ', '_') def get_macro_params(stat, stats_kinds): param = { 'counter' : '', 'max' : 'amt', 'sum' : 'amt', } return param[stats_kinds[stat]] def get_macro_define(stat, stats_kinds, macro_prefix): return '#define %s(%s)' % ( get_macro_name(stat, macro_prefix), get_macro_params(stat, stats_kinds) ) def add_trailing_whitespace(strings): max_length = max([len(x) for x in strings]) return ['{0: <{1}}'.format(x, max_length + 1) for x in strings] def get_macro_defines(stats, stats_kinds, macro_prefix): return add_trailing_whitespace([get_macro_define(stat, stats_kinds, macro_prefix) for stat in stats]) def get_macro_expansion_on(stat, stats_kinds, ns_access): param = { 'counter' : '++{NS_ACCESS}tls_record().{MEM_NAME}', 'max' : '{NS_ACCESS}tls_record().{MEM_NAME} = std::max({NS_ACCESS}tls_record().{MEM_NAME}, static_cast<std::uint64_t>({PARAMS}))', 'sum' : '{NS_ACCESS}tls_record().{MEM_NAME} += {PARAMS}', } return param[stats_kinds[stat]].format( NS_ACCESS = ns_access, MEM_NAME = get_mem_name(stat), PARAMS = get_macro_params(stat, stats_kinds) ) def get_macros_on(stats, stats_kinds, ns_access, macro_prefix): param = { 'counter' : '++{MEM_NAME}', 'max' : '{MEM_NAME} = std::max({MEM_NAME}, {PARAMS})', 'sum' : '{MEM_NAME} += {PARAMS}', } defines = get_macro_defines(stats, stats_kinds, macro_prefix) return '\n'.join([define + get_macro_expansion_on(stat, stats_kinds, ns_access) for stat, define in zip(stats, defines)]) def get_macros_off(stats, stats_kinds, macro_prefix): _FORMAT_STRING = '''#ifndef {MACRO_NAME} {MACRO_DEFINE} /**/ #endif''' result = [] for stat in stats: result.append(_FORMAT_STRING.format( MACRO_NAME = get_macro_name(stat, macro_prefix), MACRO_DEFINE = get_macro_define(stat, stats_kinds, macro_prefix), )) return '\n'.join(result) def get_thread_record_mems(stats, stats_kinds): initial_value = { 'counter' : '0', 'max' : '0', 'sum' : '0', } _FORMAT_STRING = '%s %s{%s};' return '\n'.join([_FORMAT_STRING % (types[stats_kinds[stat]], get_mem_name(stat), initial_value[stats_kinds[stat]]) for stat in stats]) def map_get_mem_or_func_call(stat_list, stats, compound_stats): return [get_mem_or_func_call(x, stats, compound_stats) for x in stat_list] def get_assert(op, operands): assert_kind = { '/' : ' <= ', '-' : ' >= ', '+' : None, } mems = map_get_mem_or_func_call(operands, stats, compound_stats) if assert_kind[op] != None: return 'LSTM_ASSERT(%s);\n ' % assert_kind[op].join(mems) return '' def get_contents(stats, compound_stats, stat_data): op = stat_data['op'] operands = stat_data['operands'] casted = map_get_mem_or_func_call(operands, stats, compound_stats) if op == '/': casted[-1] = 'float(%s)' % casted[-1] return (' ' + op + ' ').join(casted) def get_thread_record_mem_fun(compound_stat, stats, compound_stats, compound_stats_kinds): _FORMAT_STRING = '''auto {NAME} const noexcept {{ return {CONTENTS}; }}''' return _FORMAT_STRING.format( NAME = get_mem_fun_for_stat(compound_stat), CONTENTS = get_contents(stats, compound_stats, compound_stats_kinds[compound_stat]), ) def get_thread_record_mem_funs(stats, compound_stats, compound_stats_kinds): return '\n'.join([get_thread_record_mem_fun(compound_stat, stats, compound_stats, compound_stats_kinds) for compound_stat in compound_stats]) def get_thread_record_stream_output(all_stats, stats, compound_stats): names = add_trailing_whitespace([get_pretty_name(s) + ':' for s in all_stats]) values = add_trailing_whitespace([get_mem_or_func_call(s, stats, compound_stats) for s in all_stats]) return '\n'.join(['<< " ' + name + '" << ' + value + ' << \'\\n\'' for name, value in zip(names, values)]) def get_singleton_class_mem_fun_contents(class_name, stat, stats, stats_kinds, compound_stats_kinds): if stat in stats: if stats_kinds[stat] == 'counter' or stats_kinds[stat] == 'sum': return 'total_count(&%s_tls_record::%s)' % (class_name, get_mem_name(stat)) elif stats_kinds[stat] == 'max': return 'this->max(&%s_tls_record::%s)' % (class_name, get_mem_name(stat)) else: assert(false) stat_data = compound_stats_kinds[stat] op = stat_data['op'] operands = map(get_mem_fun_for_stat, stat_data['operands']) if op == '/': operands[-1] = 'float(%s)' % operands[-1] return (' ' + op + ' ').join(operands) def get_singleton_class_mem_fun(class_name, stat, stats, stats_kinds, compound_stats_kinds): _FORMAT_STRING = '''auto {NAME} const noexcept {{ return {CONTENTS}; }}''' return _FORMAT_STRING.format( NAME = get_mem_fun_for_stat(stat), CONTENTS = get_singleton_class_mem_fun_contents(class_name, stat, stats, stats_kinds, compound_stats_kinds), ) def get_singleton_class_mem_funs(class_name, stats, compound_stats, stats_kinds, compound_stats_kinds): return '\n'.join([get_singleton_class_mem_fun(class_name, stat, stats, stats_kinds, compound_stats_kinds) for stat in stats] + [get_singleton_class_mem_fun(class_name, compound_stat, stats, stats_kinds, compound_stats_kinds) for compound_stat in compound_stats]) def get_singleton_class_stream_output(all_stats): names = add_trailing_whitespace([get_pretty_name(s) + ':' for s in all_stats]) values = add_trailing_whitespace([get_mem_fun_for_stat(s) for s in all_stats]) return '\n'.join(['<< "' + name + '" << ' + value + ' << \'\\n\'' for name, value in zip(names, values)]) def gen_stats( the_stats, include_guard, class_name = 'perf_stats', macro_prefix = '', includes = '', namespace_begin = '', namespace_end = '', namespace_access = '', stat_output_ordering = [], stats_member_ordering = [], compound_stats_member_func_ordering = [] ): stats = stats_member_ordering stats += [k for k,v in the_stats.items() if type(v) == type('') and not k in stats] compound_stats = compound_stats_member_func_ordering compound_stats += [k for k,v in the_stats.items() if type(v) != type('') and not k in compound_stats] stats_kinds = {k:v for k,v in the_stats.items() if type(v) == type('')} compound_stats_kinds = {k:v for k,v in the_stats.items() if type(v) != type('')} all_stats = stat_output_ordering all_stats += [k for k in the_stats.keys() if not k in all_stats] assert(sorted(all_stats) == sorted(compound_stats + stats)) return _STATS_TEMPLATE.format( INCLUDE_GUARD = include_guard, MACRO_PREFIX = macro_prefix, INCLUDES = indent(includes), CLASS_NAME = class_name, NAMESPACE_BEGIN = namespace_begin, NAMESPACE_END = namespace_end, NS_ACCESS = namespace_access, MACROS_ON = indent(get_macros_on(stats, stats_kinds, namespace_access, macro_prefix), 1), MACROS_OFF = get_macros_off(stats, stats_kinds, macro_prefix), THREAD_RECORD_MEMBERS = indent(get_thread_record_mems(stats, stats_kinds), 2), THREAD_RECORD_MEMBER_FUNCTIONS = indent(get_thread_record_mem_funs(stats, compound_stats, compound_stats_kinds), 2), THREAD_RECORD_STREAM_OUTPUT = indent(get_thread_record_stream_output(all_stats, stats, compound_stats), 4), TRANSACTION_LOG_MEMBER_FUNCTIONS = indent(get_singleton_class_mem_funs(class_name, stats, compound_stats, stats_kinds, compound_stats_kinds), 2), TRANSACTION_LOG_STREAM_OUTPUT = indent(get_singleton_class_stream_output(all_stats), 4), ) def get_stats_func(stat, compound_stats, compound_stats_kinds): if stat in compound_stats: if compound_stats_kinds[stat]['op'] == '/': return 'statsd_gauged' return 'statsd_gauge' def get_singleton_statsd_output(all_stats, compound_stats, compound_stats_kinds): _FORMAT_NAME = 'const_cast<char*>(LSTM_TESTNAME ".process.{NAME}")' stats_funcs = [get_stats_func(s, compound_stats, compound_stats_kinds) for s in all_stats] names = [_FORMAT_NAME.format(NAME = get_mem_name(s)) for s in all_stats] values = ['stats.' + get_mem_fun_for_stat(s) for s in all_stats] return '\n'.join([stats_func + '(link, ' + name + ', ' + value + ');' for stats_func, name, value in zip(stats_funcs, names, values)]) def get_thread_record_statsd_output(all_stats, stats, compound_stats, compound_stats_kinds): _FORMAT_NAME = 'const_cast<char*>((LSTM_TESTNAME ".thread" + std::to_string(i) + ".{NAME}").c_str())' stats_funcs = [get_stats_func(s, compound_stats, compound_stats_kinds) for s in all_stats] names = [_FORMAT_NAME.format(NAME = get_mem_name(s)) for s in all_stats] values = ['record.' + get_mem_or_func_call(s, stats, compound_stats) for s in all_stats] header = ''' int i = 0; for (auto& record : stats.records()) {{''' body = indent('\n'.join([stats_func + '(link, ' + name + ', ' + value + ');' for stats_func, name, value in zip(stats_funcs, names, values)])) footer = ''' ++i; }}''' return '\n'.join([header, body, footer]) def gen_statsd_output( the_stats, include_guard, class_name = 'perf_stats', macro_prefix = '', namespace_begin = '', namespace_end = '', namespace_access = '', stat_output_ordering = [], stats_member_ordering = [], compound_stats_member_func_ordering = [] ): stats = stats_member_ordering stats += [k for k,v in the_stats.items() if type(v) == type('') and not k in stats] compound_stats = compound_stats_member_func_ordering compound_stats += [k for k,v in the_stats.items() if type(v) != type('') and not k in compound_stats] stats_kinds = {k:v for k,v in the_stats.items() if type(v) == type('')} compound_stats_kinds = {k:v for k,v in the_stats.items() if type(v) != type('')} all_stats = stat_output_ordering all_stats += [k for k in the_stats.keys() if not k in all_stats] assert(sorted(all_stats) == sorted(compound_stats + stats)) result = '\n'.join([ get_singleton_statsd_output(all_stats, compound_stats, compound_stats_kinds), get_thread_record_statsd_output(all_stats, stats, compound_stats, compound_stats_kinds)]) return indent(result, 2)
39.612975
142
0.550969
1,988
17,707
4.556841
0.101107
0.090407
0.073518
0.040181
0.62667
0.549178
0.49575
0.456011
0.440888
0.411524
0
0.003135
0.333371
17,707
447
143
39.612975
0.764317
0.000904
0
0.365333
0
0.010667
0.296761
0.063761
0
0
0
0
0.021333
1
0.077333
false
0
0.002667
0.026667
0.186667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7084f8adc7a1f6ea7b5b0235bc00bc3a8d6d415b
3,138
py
Python
tests/template.py
NehaKeshan/nanomine-graph
d3ebd2415aad95d481bfbde12c279d6176af93ed
[ "MIT" ]
null
null
null
tests/template.py
NehaKeshan/nanomine-graph
d3ebd2415aad95d481bfbde12c279d6176af93ed
[ "MIT" ]
null
null
null
tests/template.py
NehaKeshan/nanomine-graph
d3ebd2415aad95d481bfbde12c279d6176af93ed
[ "MIT" ]
null
null
null
from . import ingest_tester from whyis_test_case import WhyisTestCase class IngestTestSetup(WhyisTestCase): @classmethod def setUpClass(cls): print("Setting Up Class") cls.maxDiff = None cls.expected_data = ingest_tester.autoparse(cls.file_under_test) def setUp(self): ingest_tester.setUp(self, self.file_under_test) def run_agent(self, agent, nanopublication=None): app = self.app agent.dry_run = True agent.app = app results = [] if nanopublication is not None: results.extend(agent.process_graph(nanopublication)) elif agent.query_predicate == app.NS.whyis.globalChangeQuery: results.extend(agent.process_graph(app.db)) else: print("Running as update agent") for resource in agent.getInstances(app.db): print(resource.identifier) for np_uri, in app.db.query('''select ?np where { graph ?assertion { ?e ?p ?o.} ?np a np:Nanopublication; np:hasAssertion ?assertion. }''', initBindings={'e': resource.identifier}, initNs=app.NS.prefixes): print(np_uri) np = app.nanopub_manager.get(np_uri) results.extend(agent.process_graph(np)) return results class IngestTestTests(IngestTestSetup): def test_nanocomposites(self): ingest_tester.test_nanocomposites(self) def test_authors(self): ingest_tester.test_authors(self, self.expected_data["authors"]) def test_language(self): ingest_tester.test_language(self, self.expected_data["language"]) def test_keywords(self): ingest_tester.test_keywords(self, self.expected_data["keywords"]) def test_devices(self): ingest_tester.test_devices(self, self.expected_data["equipment"]) def test_volume(self): ingest_tester.test_volume(self, self.expected_data["journ_vol"]) def test_matrix_chemical_names(self): ingest_tester.test_matrix_chemical_names(self) def test_matrix_trade_names(self): ingest_tester.test_matrix_trade_names(self) def test_filler_chemical_names(self): ingest_tester.test_filler_chemical_names(self) def test_filler_trade_names(self): ingest_tester.test_filler_trade_names(self) # TODO Fix or remove def test_temperatures(self): ingest_tester.test_temperatures(self) def test_abbreviations(self): ingest_tester.test_abbreviations(self) def test_manufacturers(self): ingest_tester.test_manufacturers(self) def test_complete_material(self): ingest_tester.test_complete_material(self) # TODO Fix or remove def test_filler_processing(self): ingest_tester.test_filler_processing(self) def test_viscoelastic_measurement_mode(self): ingest_tester.test_viscoelastic_measurement_mode(self) # TODO add the following tests once completed # test_stress # test_melt_viscosity # test_rheometer_mode # test_specific_surface_area # test_dielectric_real_permittivity
33.031579
73
0.688655
376
3,138
5.457447
0.305851
0.111111
0.132554
0.155945
0.241228
0.098441
0.025341
0
0
0
0
0
0.226259
3,138
94
74
33.382979
0.84514
0.061823
0
0
0
0
0.068484
0
0
0
0
0.010638
0.030769
1
0.292308
false
0
0.030769
0
0.369231
0.061538
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
0
0
0
0
2
708607f4e8ad0abd68f63300e62f77ebd76b2725
1,200
py
Python
src/frame/mysql_conf_parser.py
f304646673/scheduler_frame
0a9ba45a6523cbf9bd50e9fa8e08c8bfd2a9204a
[ "Apache-2.0" ]
9
2017-05-14T05:12:32.000Z
2022-01-13T08:11:07.000Z
src/frame/mysql_conf_parser.py
f304646673/scheduler_frame
0a9ba45a6523cbf9bd50e9fa8e08c8bfd2a9204a
[ "Apache-2.0" ]
null
null
null
src/frame/mysql_conf_parser.py
f304646673/scheduler_frame
0a9ba45a6523cbf9bd50e9fa8e08c8bfd2a9204a
[ "Apache-2.0" ]
7
2017-08-28T08:31:43.000Z
2020-03-03T07:18:37.000Z
import copy import ConfigParser import conf_keys from loggingex import LOG_WARNING class mysql_conf_parser: def parse(self, job_conf_path): cp = ConfigParser.SafeConfigParser() cp.read(job_conf_path) sections = cp.sections() conns_info = {} for section in sections: conn_info = {} for key in conf_keys.mysql_conn_keys: if False == cp.has_option(section, key): LOG_WARNING() continue conn_info[key] = cp.get(section, key) if cp.has_option(section, "range_max"): range_max = int(cp.get(section, "range_max")) db_name_base = conn_info["db"] for index in range(0, range_max): conn_info["db"] = db_name_base + "_" + str(index) section_index_name = section + "_" + str(index) conns_info[section_index_name] = copy.deepcopy(conn_info) else: conns_info[section] = conn_info return conns_info if __name__ == "__main__": a = mysql_conf_parser() print a.parse("../../conf/mysql_manager.conf") pass
33.333333
77
0.564167
141
1,200
4.453901
0.361702
0.076433
0.047771
0.057325
0
0
0
0
0
0
0
0.001263
0.34
1,200
35
78
34.285714
0.791667
0
0
0
0
0
0.050833
0.024167
0
0
0
0
0
0
null
null
0.032258
0.129032
null
null
0.032258
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
7086ab4940faf1fbffd4020ba41b2d8ad3df6cb7
596
py
Python
n3-torch/ffi/python/n3/std/nn/conv/conv2d.py
kerryeon/n3-rs
e3c7325a89ec3eaffae356d2e4dcc4c63ba8caa1
[ "MIT" ]
1
2022-01-18T13:18:58.000Z
2022-01-18T13:18:58.000Z
n3-torch/ffi/python/n3/std/nn/conv/conv2d.py
kerryeon/n3-rs
e3c7325a89ec3eaffae356d2e4dcc4c63ba8caa1
[ "MIT" ]
null
null
null
n3-torch/ffi/python/n3/std/nn/conv/conv2d.py
kerryeon/n3-rs
e3c7325a89ec3eaffae356d2e4dcc4c63ba8caa1
[ "MIT" ]
null
null
null
import torch.nn as nn from n3 import ExternNode class Conv2D(ExternNode): kernel_size: int padding: int stride: int input_channels: int output_channels: int bias: bool def __init__(self, **kwargs): super().__init__(**kwargs) self._inner = nn.Conv2d( in_channels=self.input_channels, out_channels=self.output_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, bias=self.bias, ) def forward(self, x): return self._inner(x)
19.866667
46
0.600671
70
596
4.842857
0.428571
0.088496
0
0
0
0
0
0
0
0
0
0.007299
0.310403
596
29
47
20.551724
0.817518
0
0
0
0
0
0
0
0
0
0
0
0
1
0.095238
false
0
0.095238
0.047619
0.571429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
7086b67b426ff9f8307d4800efd294b1e2f817c3
6,482
py
Python
auto-traveler.py
biomadeira/auto-traveler
38f2c086923925d9819c07bdef297ec24f2ec58f
[ "Apache-2.0" ]
null
null
null
auto-traveler.py
biomadeira/auto-traveler
38f2c086923925d9819c07bdef297ec24f2ec58f
[ "Apache-2.0" ]
null
null
null
auto-traveler.py
biomadeira/auto-traveler
38f2c086923925d9819c07bdef297ec24f2ec58f
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 """ Copyright [2009-present] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import glob import os import click from utils import crw, rfam, ribovision, config from utils.generate_model_info import generate_model_info def get_ribotyper_output(fasta_input, output_folder, cm_library): """ Run ribotyper on the fasta sequences to select the best matching covariance model. """ ribotyper_long_out = os.path.join(output_folder, os.path.basename(output_folder) + '.ribotyper.long.out') if not os.path.exists(ribotyper_long_out): cmd = 'ribotyper.pl --skipval -i {cm_library}/modelinfo.txt -f {fasta_input} {output_folder}'.format( cm_library=cm_library, fasta_input=fasta_input, output_folder=output_folder ) print(cmd) os.system(cmd) f_out = os.path.join(output_folder, 'hits.txt') cmd = "cat %s | grep -v '^#' | grep -v MultipleHits | grep PASS | awk -v OFS='\t' '{print $2, $8, $3}' > %s" % (ribotyper_long_out, f_out) os.system(cmd) return f_out def symlink_cms(source): for cm_file in glob.glob(os.path.join(source, '*.cm')): if 'all.cm' not in cm_file: target = os.path.join(os.path.abspath(config.CM_LIBRARY), os.path.basename(cm_file)) if not os.path.exists(target): cmd = 'ln -s {} {}'.format(os.path.abspath(cm_file), target) os.system(cmd) @click.group() def cli(): pass @cli.command() def setup(): if not os.path.exists(config.CM_LIBRARY): os.makedirs(config.CM_LIBRARY) rfam.setup() crw.setup() symlink_cms(config.RIBOVISION_CM_LIBRARY) symlink_cms(config.CRW_CM_LIBRARY) generate_model_info(cm_library=config.CM_LIBRARY) print('Done') @cli.command() @click.argument('fasta-input', type=click.Path()) @click.argument('output-folder', type=click.Path()) def draw(fasta_input, output_folder): """ Single entry point for visualising 2D for an RNA sequence. Selects a template and runs Traveler using CRW, LSU, or Rfam libraries. """ os.system('mkdir -p %s' % output_folder) with open(get_ribotyper_output(fasta_input, output_folder, config.CM_LIBRARY), 'r') as f: for line in f.readlines(): rnacentral_id, model_id, _ = line.split('\t') print(line) if model_id.count('.') >= 2: crw.visualise_crw(fasta_input, output_folder, rnacentral_id, model_id) elif model_id.count('_') == 2: ribovision.visualise_lsu(fasta_input, output_folder, rnacentral_id, model_id) else: rfam.visualise_rfam(fasta_input, output_folder, rnacentral_id, model_id) @cli.group('crw') def crw_group(): pass @crw_group.command('draw') @click.option('--test', default=False, is_flag=True, help='Process only the first 10 sequences') @click.argument('fasta-input', type=click.Path()) @click.argument('output-folder', type=click.Path()) def rrna_draw(fasta_input, output_folder, test): os.system('mkdir -p %s' % output_folder) with open(get_ribotyper_output(fasta_input, output_folder, config.CRW_CM_LIBRARY), 'r') as f: for line in f.readlines(): rnacentral_id, model_id, _ = line.split('\t') crw.visualise_crw(fasta_input, output_folder, rnacentral_id, model_id) @cli.group('ribovision') def ribovision_group(): """ Commands dealing with laying out sequences based upon RiboVision models. """ pass @ribovision_group.command('draw') @click.argument('fasta-input', type=click.Path()) @click.argument('output-folder', type=click.Path()) def ribovision_draw (fasta_input, output_folder): os.system('mkdir -p %s' % output_folder) with open(get_ribotyper_output(fasta_input, output_folder, config.RIBOVISION_CM_LIBRARY), 'r') as f: for line in f.readlines(): rnacentral_id, model_id, _ = line.split('\t') ribovision.visualise_lsu(fasta_input, output_folder, rnacentral_id, model_id) @cli.group('rfam') def rfam_group(): """ Commands dealing with laying out sequences based upon Rfam models. """ pass @rfam_group.command('blacklisted') def rfam_blacklist(): """ Show all blacklisted families. These include rRNA families as well as families that do not have any secondary structure. """ for model in sorted(rfam.blacklisted()): print(model) @rfam_group.command('draw') @click.option('--test', default=False, is_flag=True, help='Process only the first 10 sequences') @click.argument('rfam_accession', type=click.STRING) @click.argument('fasta-input', type=click.Path()) @click.argument('output-folder', type=click.Path()) def rfam_draw(rfam_accession, fasta_input, output_folder, test=None): """ Visualise sequences using the Rfam/R-scape consensus structure as template. RFAM_ACCESSION - Rfam family to process (RF00001, RF00002 etc) """ print(rfam_accession) if rfam_accession == 'all': rfam_accs = rfam.get_all_rfam_acc() else: rfam_accs = [rfam_accession] for rfam_acc in rfam_accs: if rfam.has_structure(rfam_acc): rfam.rscape2traveler(rfam_acc) rfam.generate_2d(rfam_acc, output_folder, fasta_input, test) else: print('{} does not have a conserved secondary structure'.format(rfam_acc)) @rfam_group.command('validate') @click.argument('rfam_accession', type=click.STRING) @click.argument('output', type=click.File('w')) def rfam_validate(rfam_accession, output): """ Check if the given Rfam accession is one that should be drawn. If so it will be output to the given file, otherwise it will not. """ if rfam_accession not in rfam.blacklisted(): output.write(rfam_accession + '\n') if __name__ == '__main__': cli()
34.478723
142
0.67726
892
6,482
4.741031
0.261211
0.076614
0.056751
0.078033
0.391818
0.354221
0.342398
0.332939
0.332939
0.28943
0
0.006012
0.204566
6,482
187
143
34.663102
0.814197
0.208578
0
0.307018
0
0.017544
0.120672
0.005203
0
0
0
0
0
1
0.114035
false
0.04386
0.04386
0
0.166667
0.061404
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7087885e36c0266307dc1789a9c00444a7c21a73
49,994
py
Python
tests/test_make_table.py
kellyjonbrazil/jtbl
9bfc755bc964fbed59a4884bc4be605a5065f3d8
[ "MIT" ]
108
2020-03-10T13:22:03.000Z
2022-03-30T03:09:38.000Z
tests/test_make_table.py
kellyjonbrazil/jtbl
9bfc755bc964fbed59a4884bc4be605a5065f3d8
[ "MIT" ]
9
2020-03-08T00:44:38.000Z
2022-02-15T19:36:04.000Z
tests/test_make_table.py
kellyjonbrazil/jtbl
9bfc755bc964fbed59a4884bc4be605a5065f3d8
[ "MIT" ]
5
2020-03-10T11:34:18.000Z
2021-08-02T10:57:43.000Z
import unittest import textwrap import jtbl.cli class MyTests(unittest.TestCase): def setUp(self): self.SUCCESS, self.ERROR = True, False def test_no_piped_data(self): stdin = None expected = textwrap.dedent('''\ jtbl: Missing piped data ''') self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.ERROR, expected)) def test_null_data(self): stdin = '' expected = '' self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.ERROR, expected)) def test_simple_key_value(self): stdin = '[{"key": "value"}]' expected = textwrap.dedent('''\ key ----- value''') self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.SUCCESS, expected)) def test_multi_key_value(self): stdin = '[{"key1": "value1", "key2": "value1"}, {"key1": "value2", "key2": "value2"}]' expected = textwrap.dedent('''\ key1 key2 ------ ------ value1 value1 value2 value2''') self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.SUCCESS, expected)) def test_null_string(self): stdin = 'null' expected = textwrap.dedent('''\ jtbl: Cannot represent this part of the JSON Object as a table. (Could be an Element, an Array, or Null data instead of an Object): [null] ''') self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.ERROR, expected)) def test_hello_string(self): stdin = 'hello' expected = textwrap.dedent('''\ jtbl: Exception - Expecting value: line 1 column 1 (char 0) Cannot parse line 1 (Not JSON or JSON Lines data): hello ''') self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.ERROR, expected)) def test_array_input(self): stdin = '["value1", "value2", "value3"]' expected = textwrap.dedent('''\ jtbl: Cannot represent this part of the JSON Object as a table. (Could be an Element, an Array, or Null data instead of an Object): ["value1", "value2", "value3"] ''') self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.ERROR, expected)) def test_deep_nest(self): stdin = '{"this":{"is":{"a":{"deeply":{"nested":{"structure":"value1","item2":"value2"}}}}}}' expected = textwrap.dedent('''\ this --------------------------------------------------------------------------------- {'is': {'a': {'deeply': {'nested': {'structure': 'value1', 'item2': 'value2'}}}}}''') self.assertEqual(jtbl.cli.make_table(input_data=stdin, columns=100), (self.SUCCESS, expected)) def test_jc_dig(self): stdin = '[{"id": 55658, "opcode": "QUERY", "status": "NOERROR", "flags": ["qr", "rd", "ra"], "query_num": 1, "answer_num": 5, "authority_num": 0, "additional_num": 1, "question": {"name": "www.cnn.com.", "class": "IN", "type": "A"}, "answer": [{"name": "www.cnn.com.", "class": "IN", "type": "CNAME", "ttl": 147, "data": "turner-tls.map.fastly.net."}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.1.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.65.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.129.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.193.67"}], "query_time": 44, "server": "2600", "when": "Wed Mar 18 12:20:59 PDT 2020", "rcvd": 143}]' expected = textwrap.dedent('''\ +------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+ | id | opco | stat | flag | quer | answ | auth | addi | ques | answ | quer | serv | when | rcvd | | | de | us | s | y_nu | er_n | orit | tion | tion | er | y_ti | er | | | | | | | | m | um | y_nu | al_n | | | me | | | | | | | | | | | m | um | | | | | | | +======+========+========+========+========+========+========+========+========+========+========+========+========+========+ | 5565 | QUER | NOER | ['qr | 1 | 5 | 0 | 1 | {'na | [{'n | 44 | 2600 | Wed | 143 | | 8 | Y | ROR | ', ' | | | | | me': | ame' | | | Mar | | | | | | rd', | | | | | 'ww | : 'w | | | 18 1 | | | | | | 'ra | | | | | w.cn | ww.c | | | 2:20 | | | | | | '] | | | | | n.co | nn.c | | | :59 | | | | | | | | | | | m.', | om.' | | | PDT | | | | | | | | | | | 'cl | , 'c | | | 2020 | | | | | | | | | | | ass' | lass | | | | | | | | | | | | | | : 'I | ': ' | | | | | | | | | | | | | | N', | IN', | | | | | | | | | | | | | | 'typ | 'ty | | | | | | | | | | | | | | e': | pe': | | | | | | | | | | | | | | 'A'} | 'CN | | | | | | | | | | | | | | | AME' | | | | | | | | | | | | | | | , 't | | | | | | | | | | | | | | | tl': | | | | | | | | | | | | | | | 147 | | | | | | | | | | | | | | | , 'd | | | | | | | | | | | | | | | ata' | | | | | | | | | | | | | | | : 't | | | | | | | | | | | | | | | urne | | | | | | | | | | | | | | | r-tl | | | | | | | | | | | | | | | s.ma | | | | | | | | | | | | | | | p.fa | | | | | | | | | | | | | | | stly | | | | | | | | | | | | | | | .net | | | | | | | | | | | | | | | .'}, | | | | | | | | | | | | | | | {'n | | | | | | | | | | | | | | | ame' | | | | | | | | | | | | | | | : 't | | | | | | | | | | | | | | | urne | | | | | | | | | | | | | | | r-tl | | | | | | | | | | | | | | | s.ma | | | | | | | | | | | | | | | p.fa | | | | | | | | | | | | | | | stly | | | | | | | | | | | | | | | .net | | | | | | | | | | | | | | | .', | | | | | | | | | | | | | | | 'cla | | | | | | | | | | | | | | | ss': | | | | | | | | | | | | | | | 'IN | | | | | | | | | | | | | | | ', ' | | | | | | | | | | | | | | | type | | | | | | | | | | | | | | | ': ' | | | | | | | | | | | | | | | A', | | | | | | | | | | | | | | | 'ttl | | | | | | | | | | | | | | | ': 5 | | | | | | | | | | | | | | | , 'd | | | | | | | | | | | | | | | ata' | | | | | | | | | | | | | | | : '1 | | | | | | | | | | | | | | | 51.1 | | | | | | | | | | | | | | | 01.1 | | | | | | | | | | | | | | | .67' | | | | | | | | | | | | | | | }, { | | | | | | | | | | | | | | | 'nam | | | | | | | | | | | | | | | e': | | | | | | | | | | | | | | | 'tur | | | | | | | | | | | | | | | ner- | | | | | | | | | | | | | | | tls. | | | | | | | | | | | | | | | map. | | | | | | | | | | | | | | | fast | | | | | | | | | | | | | | | ly.n | | | | | | | | | | | | | | | et.' | | | | | | | | | | | | | | | , 'c | | | | | | | | | | | | | | | lass | | | | | | | | | | | | | | | ': ' | | | | | | | | | | | | | | | IN', | | | | | | | | | | | | | | | 'ty | | | | | | | | | | | | | | | pe': | | | | | | | | | | | | | | | 'A' | | | | | | | | | | | | | | | , 't | | | | | | | | | | | | | | | tl': | | | | | | | | | | | | | | | 5, | | | | | | | | | | | | | | | 'dat | | | | | | | | | | | | | | | a': | | | | | | | | | | | | | | | '151 | | | | | | | | | | | | | | | .101 | | | | | | | | | | | | | | | .65. | | | | | | | | | | | | | | | 67'} | | | | | | | | | | | | | | | , {' | | | | | | | | | | | | | | | name | | | | | | | | | | | | | | | ': ' | | | | | | | | | | | | | | | turn | | | | | | | | | | | | | | | er-t | | | | | | | | | | | | | | | ls.m | | | | | | | | | | | | | | | ap.f | | | | | | | | | | | | | | | astl | | | | | | | | | | | | | | | y.ne | | | | | | | | | | | | | | | t.', | | | | | | | | | | | | | | | 'cl | | | | | | | | | | | | | | | ass' | | | | | | | | | | | | | | | : 'I | | | | | | | | | | | | | | | N', | | | | | | | | | | | | | | | 'typ | | | | | | | | | | | | | | | e': | | | | | | | | | | | | | | | 'A', | | | | | | | | | | | | | | | 'tt | | | | | | | | | | | | | | | l': | | | | | | | | | | | | | | | 5, ' | | | | | | | | | | | | | | | data | | | | | | | | | | | | | | | ': ' | | | | | | | | | | | | | | | 151. | | | | | | | | | | | | | | | 101. | | | | | | | | | | | | | | | 129. | | | | | | | | | | | | | | | 67'} | | | | | | | | | | | | | | | , {' | | | | | | | | | | | | | | | name | | | | | | | | | | | | | | | ': ' | | | | | | | | | | | | | | | turn | | | | | | | | | | | | | | | er-t | | | | | | | | | | | | | | | ls.m | | | | | | | | | | | | | | | ap.f | | | | | | | | | | | | | | | astl | | | | | | | | | | | | | | | y.ne | | | | | | | | | | | | | | | t.', | | | | | | | | | | | | | | | 'cl | | | | | | | | | | | | | | | ass' | | | | | | | | | | | | | | | : 'I | | | | | | | | | | | | | | | N', | | | | | | | | | | | | | | | 'typ | | | | | | | | | | | | | | | e': | | | | | | | | | | | | | | | 'A', | | | | | | | | | | | | | | | 'tt | | | | | | | | | | | | | | | l': | | | | | | | | | | | | | | | 5, ' | | | | | | | | | | | | | | | data | | | | | | | | | | | | | | | ': ' | | | | | | | | | | | | | | | 151. | | | | | | | | | | | | | | | 101. | | | | | | | | | | | | | | | 193. | | | | | | | | | | | | | | | 67'} | | | | | | | | | | | | | | | ] | | | | | +------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+--------+''') self.assertEqual(jtbl.cli.make_table(input_data=stdin, columns=80), (self.SUCCESS, expected)) def test_jc_dig_150cols(self): stdin = '[{"id": 55658, "opcode": "QUERY", "status": "NOERROR", "flags": ["qr", "rd", "ra"], "query_num": 1, "answer_num": 5, "authority_num": 0, "additional_num": 1, "question": {"name": "www.cnn.com.", "class": "IN", "type": "A"}, "answer": [{"name": "www.cnn.com.", "class": "IN", "type": "CNAME", "ttl": 147, "data": "turner-tls.map.fastly.net."}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.1.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.65.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.129.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.193.67"}], "query_time": 44, "server": "2600", "when": "Wed Mar 18 12:20:59 PDT 2020", "rcvd": 143}]' expected = textwrap.dedent('''\ +----------+----------+-------+----------+---------+----------+----------+----------+----------+----------+----------+----------+--------+--------+ | opcode | server | id | status | flags | query_ | answer | author | additi | questi | answer | query_ | when | rcvd | | | | | | | num | _num | ity_nu | onal_n | on | | time | | | | | | | | | | | m | um | | | | | | +==========+==========+=======+==========+=========+==========+==========+==========+==========+==========+==========+==========+========+========+ | QUERY | 2600 | 55658 | NOERRO | ['qr', | 1 | 5 | 0 | 1 | {'name | [{'nam | 44 | Wed Ma | 143 | | | | | R | 'rd', | | | | | ': 'ww | e': 'w | | r 18 1 | | | | | | | 'ra'] | | | | | w.cnn. | ww.cnn | | 2:20:5 | | | | | | | | | | | | com.', | .com.' | | 9 PDT | | | | | | | | | | | | 'clas | , 'cla | | 2020 | | | | | | | | | | | | s': 'I | ss': ' | | | | | | | | | | | | | | N', 't | IN', ' | | | | | | | | | | | | | | ype': | type': | | | | | | | | | | | | | | 'A'} | 'CNAM | | | | | | | | | | | | | | | E', 't | | | | | | | | | | | | | | | tl': 1 | | | | | | | | | | | | | | | 47, 'd | | | | | | | | | | | | | | | ata': | | | | | | | | | | | | | | | 'turne | | | | | | | | | | | | | | | r-tls. | | | | | | | | | | | | | | | map.fa | | | | | | | | | | | | | | | stly.n | | | | | | | | | | | | | | | et.'}, | | | | | | | | | | | | | | | {'nam | | | | | | | | | | | | | | | e': 't | | | | | | | | | | | | | | | urner- | | | | | | | | | | | | | | | tls.ma | | | | | | | | | | | | | | | p.fast | | | | | | | | | | | | | | | ly.net | | | | | | | | | | | | | | | .', 'c | | | | | | | | | | | | | | | lass': | | | | | | | | | | | | | | | 'IN', | | | | | | | | | | | | | | | 'type | | | | | | | | | | | | | | | ': 'A' | | | | | | | | | | | | | | | , 'ttl | | | | | | | | | | | | | | | ': 5, | | | | | | | | | | | | | | | 'data' | | | | | | | | | | | | | | | : '151 | | | | | | | | | | | | | | | .101.1 | | | | | | | | | | | | | | | .67'}, | | | | | | | | | | | | | | | {'nam | | | | | | | | | | | | | | | e': 't | | | | | | | | | | | | | | | urner- | | | | | | | | | | | | | | | tls.ma | | | | | | | | | | | | | | | p.fast | | | | | | | | | | | | | | | ly.net | | | | | | | | | | | | | | | .', 'c | | | | | | | | | | | | | | | lass': | | | | | | | | | | | | | | | 'IN', | | | | | | | | | | | | | | | 'type | | | | | | | | | | | | | | | ': 'A' | | | | | | | | | | | | | | | , 'ttl | | | | | | | | | | | | | | | ': 5, | | | | | | | | | | | | | | | 'data' | | | | | | | | | | | | | | | : '151 | | | | | | | | | | | | | | | .101.6 | | | | | | | | | | | | | | | 5.67'} | | | | | | | | | | | | | | | , {'na | | | | | | | | | | | | | | | me': ' | | | | | | | | | | | | | | | turner | | | | | | | | | | | | | | | -tls.m | | | | | | | | | | | | | | | ap.fas | | | | | | | | | | | | | | | tly.ne | | | | | | | | | | | | | | | t.', ' | | | | | | | | | | | | | | | class' | | | | | | | | | | | | | | | : 'IN' | | | | | | | | | | | | | | | , 'typ | | | | | | | | | | | | | | | e': 'A | | | | | | | | | | | | | | | ', 'tt | | | | | | | | | | | | | | | l': 5, | | | | | | | | | | | | | | | 'data | | | | | | | | | | | | | | | ': '15 | | | | | | | | | | | | | | | 1.101. | | | | | | | | | | | | | | | 129.67 | | | | | | | | | | | | | | | '}, {' | | | | | | | | | | | | | | | name': | | | | | | | | | | | | | | | 'turn | | | | | | | | | | | | | | | er-tls | | | | | | | | | | | | | | | .map.f | | | | | | | | | | | | | | | astly. | | | | | | | | | | | | | | | net.', | | | | | | | | | | | | | | | 'clas | | | | | | | | | | | | | | | s': 'I | | | | | | | | | | | | | | | N', 't | | | | | | | | | | | | | | | ype': | | | | | | | | | | | | | | | 'A', ' | | | | | | | | | | | | | | | ttl': | | | | | | | | | | | | | | | 5, 'da | | | | | | | | | | | | | | | ta': ' | | | | | | | | | | | | | | | 151.10 | | | | | | | | | | | | | | | 1.193. | | | | | | | | | | | | | | | 67'}] | | | | +----------+----------+-------+----------+---------+----------+----------+----------+----------+----------+----------+----------+--------+--------+''') self.assertEqual(jtbl.cli.make_table(input_data=stdin, columns=150), (self.SUCCESS, expected)) def test_jc_dig_150cols_t(self): stdin = '[{"id": 55658, "opcode": "QUERY", "status": "NOERROR", "flags": ["qr", "rd", "ra"], "query_num": 1, "answer_num": 5, "authority_num": 0, "additional_num": 1, "question": {"name": "www.cnn.com.", "class": "IN", "type": "A"}, "answer": [{"name": "www.cnn.com.", "class": "IN", "type": "CNAME", "ttl": 147, "data": "turner-tls.map.fastly.net."}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.1.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.65.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.129.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.193.67"}], "query_time": 44, "server": "2600", "when": "Wed Mar 18 12:20:59 PDT 2020", "rcvd": 143}]' expected = textwrap.dedent('''\ opcode status server id flags query_nu answer_n authorit addition question answer query_ti when rcvd -------- -------- -------- ----- -------- ---------- ---------- ---------- ---------- ---------- -------- ---------- ------- ------ QUERY NOERROR 2600 55658 ['qr', ' 1 5 0 1 {'name': [{'name' 44 Wed Mar 143''') self.assertEqual(jtbl.cli.make_table(input_data=stdin, truncate=True, columns=150), (self.SUCCESS, expected)) def test_jc_dig_nowrap(self): stdin = '[{"id": 55658, "opcode": "QUERY", "status": "NOERROR", "flags": ["qr", "rd", "ra"], "query_num": 1, "answer_num": 5, "authority_num": 0, "additional_num": 1, "question": {"name": "www.cnn.com.", "class": "IN", "type": "A"}, "answer": [{"name": "www.cnn.com.", "class": "IN", "type": "CNAME", "ttl": 147, "data": "turner-tls.map.fastly.net."}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.1.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.65.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.129.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.193.67"}], "query_time": 44, "server": "2600", "when": "Wed Mar 18 12:20:59 PDT 2020", "rcvd": 143}]' expected = textwrap.dedent('''\ id opcode status flags query_num answer_num authority_num additional_num question answer query_time server when rcvd ----- -------- -------- ------------------ ----------- ------------ --------------- ---------------- ---------------------------------------------------- ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ------------ -------- ---------------------------- ------ 55658 QUERY NOERROR ['qr', 'rd', 'ra'] 1 5 0 1 {'name': 'www.cnn.com.', 'class': 'IN', 'type': 'A'} [{'name': 'www.cnn.com.', 'class': 'IN', 'type': 'CNAME', 'ttl': 147, 'data': 'turner-tls.map.fastly.net.'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.1.67'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.65.67'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.129.67'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.193.67'}] 44 2600 Wed Mar 18 12:20:59 PDT 2020 143''') self.assertEqual(jtbl.cli.make_table(input_data=stdin, nowrap=True, columns=150), (self.SUCCESS, expected)) def test_jc_dig_nowrap_t_cols_80(self): """test that nowrap overrides both truncate and columns""" stdin = '[{"id": 55658, "opcode": "QUERY", "status": "NOERROR", "flags": ["qr", "rd", "ra"], "query_num": 1, "answer_num": 5, "authority_num": 0, "additional_num": 1, "question": {"name": "www.cnn.com.", "class": "IN", "type": "A"}, "answer": [{"name": "www.cnn.com.", "class": "IN", "type": "CNAME", "ttl": 147, "data": "turner-tls.map.fastly.net."}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.1.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.65.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.129.67"}, {"name": "turner-tls.map.fastly.net.", "class": "IN", "type": "A", "ttl": 5, "data": "151.101.193.67"}], "query_time": 44, "server": "2600", "when": "Wed Mar 18 12:20:59 PDT 2020", "rcvd": 143}]' expected = textwrap.dedent('''\ id opcode status flags query_num answer_num authority_num additional_num question answer query_time server when rcvd ----- -------- -------- ------------------ ----------- ------------ --------------- ---------------- ---------------------------------------------------- ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ------------ -------- ---------------------------- ------ 55658 QUERY NOERROR ['qr', 'rd', 'ra'] 1 5 0 1 {'name': 'www.cnn.com.', 'class': 'IN', 'type': 'A'} [{'name': 'www.cnn.com.', 'class': 'IN', 'type': 'CNAME', 'ttl': 147, 'data': 'turner-tls.map.fastly.net.'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.1.67'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.65.67'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.129.67'}, {'name': 'turner-tls.map.fastly.net.', 'class': 'IN', 'type': 'A', 'ttl': 5, 'data': '151.101.193.67'}] 44 2600 Wed Mar 18 12:20:59 PDT 2020 143''') self.assertEqual(jtbl.cli.make_table(input_data=stdin, nowrap=True, columns=80, truncate=True), (self.SUCCESS, expected)) def test_jc_dig_answer(self): stdin = '[{"name":"www.cnn.com.","class":"IN","type":"CNAME","ttl":147,"data":"turner-tls.map.fastly.net."},{"name":"turner-tls.map.fastly.net.","class":"IN","type":"A","ttl":5,"data":"151.101.1.67"},{"name":"turner-tls.map.fastly.net.","class":"IN","type":"A","ttl":5,"data":"151.101.65.67"},{"name":"turner-tls.map.fastly.net.","class":"IN","type":"A","ttl":5,"data":"151.101.129.67"},{"name":"turner-tls.map.fastly.net.","class":"IN","type":"A","ttl":5,"data":"151.101.193.67"}]' expected = textwrap.dedent('''\ name class type ttl data -------------------------- ------- ------ ----- -------------------------- www.cnn.com. IN CNAME 147 turner-tls.map.fastly.net. turner-tls.map.fastly.net. IN A 5 151.101.1.67 turner-tls.map.fastly.net. IN A 5 151.101.65.67 turner-tls.map.fastly.net. IN A 5 151.101.129.67 turner-tls.map.fastly.net. IN A 5 151.101.193.67''') self.assertEqual(jtbl.cli.make_table(input_data=stdin, columns=80), (self.SUCCESS, expected)) def test_json_lines(self): """test JSON Lines data""" stdin = textwrap.dedent('''\ {"name":"lo0","type":null,"ipv4_addr":"127.0.0.1","ipv4_mask":"255.0.0.0"} {"name":"gif0","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"stf0","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"XHC0","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"XHC20","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"VHC128","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"XHC1","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"en5","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"ap1","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"en0","type":null,"ipv4_addr":"192.168.1.221","ipv4_mask":"255.255.255.0"} {"name":"p2p0","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"awdl0","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"en1","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"en2","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"en3","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"en4","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"bridge0","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"utun0","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"utun1","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"utun2","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"utun3","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"utun4","type":null,"ipv4_addr":null,"ipv4_mask":null} {"name":"vmnet1","type":null,"ipv4_addr":"192.168.101.1","ipv4_mask":"255.255.255.0"} {"name":"vmnet8","type":null,"ipv4_addr":"192.168.71.1","ipv4_mask":"255.255.255.0"}''') expected = textwrap.dedent('''\ name type ipv4_addr ipv4_mask ------- ------ ------------- ------------- lo0 127.0.0.1 255.0.0.0 gif0 stf0 XHC0 XHC20 VHC128 XHC1 en5 ap1 en0 192.168.1.221 255.255.255.0 p2p0 awdl0 en1 en2 en3 en4 bridge0 utun0 utun1 utun2 utun3 utun4 vmnet1 192.168.101.1 255.255.255.0 vmnet8 192.168.71.1 255.255.255.0''') self.assertEqual(jtbl.cli.make_table(input_data=stdin), (self.SUCCESS, expected)) if __name__ == '__main__': unittest.main()
116.808411
864
0.173461
2,330
49,994
3.645494
0.116738
0.035319
0.060866
0.095361
0.774194
0.766188
0.753944
0.737226
0.734989
0.636332
0
0.06084
0.631776
49,994
427
865
117.081967
0.400565
0.00146
0
0.383033
0
0.082262
0.936586
0.108375
0
0
0
0
0.03856
1
0.041131
false
0
0.007712
0
0.051414
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
1
0
0
0
1
1
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
7087b1595e41533f5047a1198773514a488662fd
11,862
py
Python
Autonomous Car Locator.py
thedrgman/Autonomous-Car-Locator
d21c72d1d110e9f959cfd4d59f454f9c4193f06d
[ "BSD-3-Clause" ]
null
null
null
Autonomous Car Locator.py
thedrgman/Autonomous-Car-Locator
d21c72d1d110e9f959cfd4d59f454f9c4193f06d
[ "BSD-3-Clause" ]
null
null
null
Autonomous Car Locator.py
thedrgman/Autonomous-Car-Locator
d21c72d1d110e9f959cfd4d59f454f9c4193f06d
[ "BSD-3-Clause" ]
null
null
null
""" Autonomous Car Locator This is a program that helps autonomous cars to find out the location and direction of other autonomous cars. It is all based on what is provided by the car and from what the car detects. By David Gameiro """ import random def locator(): cars = [ { "speed" : 50, #the speed of the current car "compass" : "N", #the direction of the current car "id" : 1423456, #the ID number of the car "gps" : 36.00000 #the current GPS location of the current car } ] def info_input(): #this is to gather the speed and compass direction of your car and the other car i = 0 while True:#len(cars) > i: if i == 0: print("YOUR CAR") speed = speed_type() cars[0][0] = speed print("The current speed of your car is " + str(cars[0][0]) + "mph\n") compass = compass_type() cars[0][1] = compass print("The current direction of your car is " + cars[0][1] + "\n") else: new_car = { "speed" : 50, #the speed of the current car "compass" : "N", #the direction of the current car "id" : 1423456, #the ID number of the car "gps" : 36.00000 #the current GPS location of the current car } print("Car " + str(i)) speed = speed_type() new_car[0] = speed print("The current speed of Car " + str(i) + " is " + str(new_car[0]) + "mph\n") print("Both cars can only be traveling the same or opposite directions.") print("Please only choose the same direction or the opposite on.") compass = compass_type() new_car[1] = compass print("The current direction of Car " + str(i) + " is " + new_car[1] + "\n") new_car[2] = random.randrange(1420,62416,2) cars.append(new_car) stop_count = input("If there are no more cars to add, type [E] for End or [C] for continue.\n") stop_count = stop_count.upper() if stop_count == "E" or stop_count == "END": break i += 1 print(len(cars)) def speed_type(): #if the input for the speed is not a number then it keeps asking for the speed val_type = "str" while val_type != "int": speed = input("What is the speed of the car? ") val_type = check_user_input(speed) speed = int(speed) return speed def compass_type(): #if the input for the compass direction is not a number then it keeps asking while True: #This loop through until an input is given that is one of the options val_type = "int" while val_type != "str": compass = input("What is the direction that the car is traveling? [N], [S], [E}, [W] ") val_type = check_user_input(compass) compass = compass.upper() if compass == "N" or compass == "S" or compass == "E" or compass == "W": break #this verfies that the input is only as specifed, and then ends the loop, if not it continues else: continue return compass def check_user_input(input): try: # Convert it into integer val = int(input) val_type="int" except ValueError: try: # Convert it into float val = float(input) val_type = "float" except ValueError: val_type = "str" return val_type info_input() j = 1 while len(cars) > j: print("The ID number of the car is " + str(cars[j][2])) def speed_compare(): relative_speed = "faster" #the relative speed your car is going compared to other cars if cars[0][0] > cars[j][0]: print("Your car is going faster than Car " + str(j)) relative_speed = "faster" elif cars[0][0] < cars[j][0]: print("Your car is going slower than Car " + str(j)) relative_speed = "slower" else: print("Your car is going the same speed as Car " + str(j)) relative_speed = "same" return relative_speed def compass(): #used to compare the traveling direction of the two cars if cars[0][1] == cars[j][1]: print("You and Car " + str(j) + " are both going the same direction") direction = "same" elif cars[0][1] == "N" and cars[j][1] == "S": print("You and Car " + str(j) + " are both going the opposite direction") direction = "opposite" elif cars[0][1] == "E" and cars[j][1] == "W": print("You and Car " + str(j) + " are both going the opposite direction") direction = "opposite" elif cars[0][1] == "S" and cars[j][1] == "N": print("You and Car " + str(j) + " are both going the opposite direction") direction = "opposite" elif cars[0][1] == "W" and cars[j][1] == "E": print("You and Car " + str(j) + " are both going the opposite direction") direction = "opposite" return direction def sensors(): #Which sensors are being triggered on your car, or where is the car is in relation to you sensor = ["front", "right", "rear", "left"] #4 available sensors on the car, on all 4 sides position = random.choice(sensor) #where is the other car located relative to yours if position == "front": print("The car is in front of your car") elif position == "right": print("The car is to the right of your car") elif position == "left": print("The car is to the left of your car") else: print("The car is behind your car") return position direction = compass() relative_speed = speed_compare() position = sensors() def visual_before(): #displays what the current layout of the road is print("\nCURRENT ROAD LAYOUT") if direction == "same" and position == "front": print("| | || | |") print("| | || | " + str(j) + " |") print("| | || Y | |") print("| | || | |") elif direction == "same" and position == "rear": print("| | || | |") print("| | || | Y |") print("| | || " + str(j) + " | |") print("| | || | |") elif direction == "same" and position == "right": print("| | || | |") print("| | || | |") print("| | || Y | " + str(j) + " |") print("| | || | |") elif direction == "same" and position == "left": print("| | || | |") print("| | || | |") print("| | || " + str(j) + " | Y |") print("| | || | |") elif direction == "opposite": print("| | || | |") print("| | " + str(j) + " || | |") print("| | || Y | |") print("| | || | |") def prediction(): #if the same conditions continue then this will be the predicted road layout print("\nPREDICTED FUTURE LAYOUT") if direction == "same" and (relative_speed == "same" or relative_speed == "slower") and position == "front": print("The other car will remain in front of you.") print("| | || | |") print("| | || | " + str(j) + " |") print("| | || Y | |") print("| | || | |") elif direction == "same" and (relative_speed == "same" or relative_speed == "faster") and position == "rear": print("The other car will remain behind you.") print("| | || | |") print("| | || | Y |") print("| | || " + str(j) + " | |") print("| | || | |") elif direction == "same" and relative_speed == "same" and position == "right": print("The other car will remain to the right of you.") print("| | || | |") print("| | || | |") print("| | || Y | " + str(j) + " |") print("| | || | |") elif direction == "same" and relative_speed == "same" and position == "left": print("The other car will remain to the left of you.") print("| | || | |") print("| | || | |") print("| | || " + str(j) + " | Y |") print("| | || | |") elif direction == "same" and relative_speed == "faster" and position == "front": print("You will pass the other car and be in front of them.") print("| | || | |") print("| | || Y | |") print("| | || | " + str(j) + " |") print("| | || | |") elif direction == "same" and relative_speed == "faster" and position == "left": print("You will pass the other car and be in front of them.") print("| | || | |") print("| | || | Y |") print("| | || " + str(j) + " | |") print("| | || | |") elif direction == "same" and relative_speed == "faster" and position == "right": print("You will pass the other car and be in front of them.") print("| | || | |") print("| | || Y | |") print("| | || | " + str(j) + " |") print("| | || | |") elif direction == "same" and relative_speed == "slower" and position == "right": print("The other car will be in front of you.") print("| | || | |") print("| | || | " + str(j) + " |") print("| | || Y | |") print("| | || | |") elif direction == "same" and relative_speed == "slower" and position == "left": print("The other car will be in front of you.") print("| | || | |") print("| | || " + str(j) + " | |") print("| | || | Y |") print("| | || | |") elif direction == "same" and relative_speed == "slower" and position == "rear": print("The other car will pass you.") print("| | || | |") print("| | || " + str(j) + " | |") print("| | || | Y |") print("| | || | |") elif direction == "opposite": print("The other car will be behind you.") print("| | || | |") print("| | || Y | |") print("| | " + str(j) + " || | |") print("| | || | |") visual_before() prediction() print("\n\n\n") j += 1 locator()
45.623077
121
0.42986
1,236
11,862
4.078479
0.136731
0.019044
0.044436
0.033327
0.52807
0.48542
0.441579
0.412021
0.369768
0.336441
0
0.012459
0.43163
11,862
259
122
45.799228
0.735242
0.114062
0
0.488789
0
0.008969
0.283737
0
0
0
0
0
0
1
0.044843
false
0.080717
0.004484
0
0.076233
0.44843
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
1
0
2
7087c1f44e1b42fefc6088af0233d3d5c9a7f47d
460
py
Python
jenkins-dashboard.py
jfm/jenkins-dashboard
4dc4cf69f7f6be1f9cfd15b24509a96454c4de09
[ "MIT" ]
null
null
null
jenkins-dashboard.py
jfm/jenkins-dashboard
4dc4cf69f7f6be1f9cfd15b24509a96454c4de09
[ "MIT" ]
3
2021-03-18T20:10:45.000Z
2021-09-07T23:37:52.000Z
jenkins-dashboard.py
jfm/jenkins-dashboard
4dc4cf69f7f6be1f9cfd15b24509a96454c4de09
[ "MIT" ]
null
null
null
from jenkinsdashboard.ci.jenkins import Jenkins from jenkinsdashboard.ui.dashboard import Dashboard import time if __name__ == '__main__': # jenkins = Jenkins('http://10.0.0.102:18081', 'jfm', 'c3po4all') jenkins = Jenkins( 'http://jenkins.onboarding.liquid.int.tdk.dk', 'admin', '0nboarding') dashboard = Dashboard(jenkins) while True: ci_rows = dashboard.generate() dashboard.render(ci_rows) time.sleep(30)
28.75
77
0.678261
54
460
5.592593
0.592593
0.13245
0.119205
0
0
0
0
0
0
0
0
0.045576
0.18913
460
15
78
30.666667
0.764075
0.136957
0
0
0
0
0.167089
0
0
0
0
0
0
1
0
false
0
0.272727
0
0.272727
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
708a394ef1e0df69e0c499106dd15967d32aa202
2,001
py
Python
tutorials/basics/g_app.py
nunoedgarhubsoftphotoflow/py-fmas
241d942fe0cd6a49001b1bf110dd32bccc86bb16
[ "MIT" ]
4
2021-04-28T07:02:54.000Z
2022-01-25T13:15:49.000Z
tutorials/basics/g_app.py
Photonics-Precision-Technologies/py-fmas
241d942fe0cd6a49001b1bf110dd32bccc86bb16
[ "MIT" ]
3
2021-06-10T07:11:35.000Z
2021-11-22T15:23:01.000Z
tutorials/basics/g_app.py
Photonics-Precision-Technologies/py-fmas
241d942fe0cd6a49001b1bf110dd32bccc86bb16
[ "MIT" ]
5
2021-05-20T08:53:44.000Z
2022-01-25T13:18:34.000Z
r""" Using `fmas` as a black-box application ======================================= This examples shows how to use `py-fmas` as a black-box application, that only requires a minimal amount of scripting. .. codeauthor:: Oliver Melchert <melchert@iqo.uni-hannover.de> """ ############################################################################### # We start by simply importing the required `fmas` into the current namespace. # import fmas ############################################################################### # If an adequate input file is located within the current working directory, # `fmas` can be used as shown below. It features a particular function called # `run`, which reads-in the propagation setting stored in the input file # `input_file.h5` and runs the simulaton res = fmas.run('input_file.h5', model_type='FMAS_S_R', solver_type='IFM_RK4IP') ############################################################################### # An example that shows how an adequate input file can be generated via python # is shown under the link below: # # :ref:`sphx_glr_auto_tutorials_basics_ng_generate_infile.py` # # After the proapgation algorithm (specified in `input_file.h5`) terminates, # a simple dictionary data structure with the following keys is available print(res.keys()) ############################################################################### # A simple plot that shows the result of the simulation run can be produced # using function `plot_evolution` implemented in module `tools` from fmas.tools import plot_evolution plot_evolution( res['z'], res['t'], res['u'], t_lim=(-500,2200), w_lim=(1.,4.)) ############################################################################### # The results can be stored for later postprocessing using the function # `save_h5` implemented in module `data_io`. It will generate a file # `out_file.h5` with HDF5 format in the current working directory from fmas.data_io import save_h5 save_h5('out_file.h5', **res)
39.235294
79
0.594703
255
2,001
4.560784
0.529412
0.046432
0.028375
0.020636
0.044712
0.044712
0
0
0
0
0
0.010771
0.118441
2,001
50
80
40.02
0.648526
0.626687
0
0
0
0
0.134557
0
0
0
0
0
0
1
0
false
0
0.375
0
0.375
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
708b028c1ab718e2fb6ff3f78c3ef2f30aed6475
3,852
py
Python
tests.py
CTPUG/mdx_attr_cols
8aef79857685f9913c703befe717872d4e2d1bea
[ "0BSD" ]
null
null
null
tests.py
CTPUG/mdx_attr_cols
8aef79857685f9913c703befe717872d4e2d1bea
[ "0BSD" ]
null
null
null
tests.py
CTPUG/mdx_attr_cols
8aef79857685f9913c703befe717872d4e2d1bea
[ "0BSD" ]
null
null
null
from unittest import TestCase import xmltodict from markdown import Markdown from markdown.util import etree from mdx_attr_cols import AttrColTreeProcessor, AttrColExtension, makeExtension class XmlTestCaseMixin(object): def mk_doc(self, s): return etree.fromstring( "<div>" + s.strip() + "</div>") def assert_xml_equal(self, a, b): self.assertEqual( xmltodict.parse(etree.tostring(a)), xmltodict.parse(etree.tostring(b))) class TestAttrColTreeProcessor(XmlTestCaseMixin, TestCase): def mk_processor(self, **conf): md = Markdown() return AttrColTreeProcessor(md, conf) def test_config_none(self): md = Markdown p = AttrColTreeProcessor(md, None) self.assertEqual(p.columns, 12) self.assertEqual(p.attr, 'cols') self.assertEqual(p.tags, set(['section'])) def test_config_defaults(self): p = self.mk_processor() self.assertEqual(p.columns, 12) self.assertEqual(p.attr, 'cols') self.assertEqual(p.tags, set(['section'])) def test_config_overrides(self): p = self.mk_processor( columns=16, attr='columns', tags=['section', 'div'], ) self.assertEqual(p.columns, 16) self.assertEqual(p.attr, 'columns') self.assertEqual(p.tags, set(['section', 'div'])) def test_simple_rows(self): root = self.mk_doc(""" <section cols='4'>Foo</section> <section cols='6'>Bar</section> <section cols='2'>Beep</section> """) p = self.mk_processor() new_root = p.run(root) self.assert_xml_equal(new_root, self.mk_doc(""" <div class="row"><div class="col-md-4"><section>Foo</section> </div><div class="col-md-6"><section>Bar</section> </div><div class="col-md-2"><section>Beep</section> </div></div> """)) class TestAttrColExtension(TestCase): def mk_markdown(self, extensions=None): if extensions is None: extensions = ['attr_list', 'mdx_outline'] md = Markdown(extensions=extensions) return md def assert_registered(self, md): processor = md.treeprocessors['attr_cols'] self.assertTrue(isinstance(processor, AttrColTreeProcessor)) def assert_not_registered(self, md): self.assertFalse('attr_cols' in md.treeprocessors) def text_create(self): ext = AttrColExtension({'a': 'b'}) self.assertEqual(ext.conf, {'a': 'b'}) def test_extend_markdown(self): md = self.mk_markdown() ext = AttrColExtension({}) ext.extendMarkdown(md) self.assert_registered(md) def test_missing_attr_list(self): md = self.mk_markdown(['mdx_outline']) ext = AttrColExtension({}) self.assertRaisesRegexp( RuntimeError, "The attr_cols markdown extension depends the following" " extensions which must preceded it in the extension list:" " attr_list, mdx_outline", ext.extendMarkdown, md) self.assert_not_registered(md) def test_missing_outline(self): md = self.mk_markdown([]) ext = AttrColExtension({}) self.assertRaisesRegexp( RuntimeError, "The attr_cols markdown extension depends the following" " extensions which must preceded it in the extension list:" " attr_list, mdx_outline", ext.extendMarkdown, md) self.assert_not_registered(md) class TestExtensionRegistration(TestCase): def test_make_extension(self): configs = {'a': 'b'} ext = makeExtension(**configs) self.assertTrue(isinstance(ext, AttrColExtension)) self.assertEqual(ext.conf, configs)
32.369748
79
0.616044
427
3,852
5.42623
0.222482
0.077687
0.062149
0.02978
0.373759
0.318084
0.285283
0.259819
0.259819
0.259819
0
0.004924
0.261942
3,852
118
80
32.644068
0.81006
0
0
0.260417
0
0.010417
0.199117
0.048027
0
0
0
0
0.25
1
0.15625
false
0
0.052083
0.010417
0.28125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
708b7c3f2101b8cf722c8c1af351e895b4c6a938
621
py
Python
backend/tiles/migrations/0002_auto_20200526_0100.py
landdafku11/mobile-backend
3f3328afd81f85f90170a57689af72f8f705b8a3
[ "MIT" ]
null
null
null
backend/tiles/migrations/0002_auto_20200526_0100.py
landdafku11/mobile-backend
3f3328afd81f85f90170a57689af72f8f705b8a3
[ "MIT" ]
null
null
null
backend/tiles/migrations/0002_auto_20200526_0100.py
landdafku11/mobile-backend
3f3328afd81f85f90170a57689af72f8f705b8a3
[ "MIT" ]
null
null
null
# Generated by Django 3.0.5 on 2020-05-25 17:00 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('cubes', '0001_initial'), ('tiles', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='tilecomment', name='parent_id', ), migrations.AddField( model_name='tilecomment', name='parent', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='cubes.CubeComment'), ), ]
24.84
116
0.602254
65
621
5.661538
0.630769
0.065217
0.076087
0.119565
0.163043
0
0
0
0
0
0
0.050885
0.272142
621
24
117
25.875
0.763274
0.072464
0
0.222222
1
0
0.15331
0
0
0
0
0
0
1
0
false
0
0.111111
0
0.277778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
708b92b1cb033572a615db2bbf33c6400d6fe304
720
py
Python
Q47_Permutations-II.py
xiaosean/leetcode_python
844ece02d699bfc620519bd94828ed0e18597f3e
[ "MIT" ]
null
null
null
Q47_Permutations-II.py
xiaosean/leetcode_python
844ece02d699bfc620519bd94828ed0e18597f3e
[ "MIT" ]
null
null
null
Q47_Permutations-II.py
xiaosean/leetcode_python
844ece02d699bfc620519bd94828ed0e18597f3e
[ "MIT" ]
null
null
null
class Solution: def permuteUnique(self, nums: List[int]) -> List[List[int]]: n = len(nums) if n == 0: return None if n == 1: return [nums] nums.sort() res = [] def dfs(nums, res, path=None): if path is None: path = [] n = len(nums) if n == 0: res += [path] return last = nums[0] - 1 for i in range(n): if last == nums[i]: continue dfs(nums[:i] + nums[i+1:], res, path + [nums[i]]) last = nums[i] dfs(nums, res, path=None) return res
26.666667
66
0.370833
81
720
3.296296
0.320988
0.093633
0.059925
0.074906
0.224719
0.089888
0
0
0
0
0
0.016901
0.506944
720
27
67
26.666667
0.735211
0
0
0.166667
0
0
0
0
0
0
0
0
0
1
0.083333
false
0
0
0
0.291667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
708c9cd7e0c47f6fd647b59adb4727ab13f448e2
1,764
py
Python
dialogbot/search/local/tfidfmodel.py
ishine/dialogbot
6c3d2f95555a05a3b935dda818e481ddc20eed08
[ "Apache-2.0" ]
68
2019-06-30T07:39:59.000Z
2022-03-30T12:15:40.000Z
dialogbot/search/local/tfidfmodel.py
ishine/dialogbot
6c3d2f95555a05a3b935dda818e481ddc20eed08
[ "Apache-2.0" ]
2
2021-06-30T10:22:17.000Z
2021-07-27T12:41:01.000Z
dialogbot/search/local/tfidfmodel.py
ishine/dialogbot
6c3d2f95555a05a3b935dda818e481ddc20eed08
[ "Apache-2.0" ]
16
2019-08-22T16:05:53.000Z
2022-03-11T07:51:27.000Z
# -*- coding: utf-8 -*- """ @author:XuMing(xuming624@qq.com) @description: """ import time from gensim import corpora, models, similarities from dialogbot.reader.data_helper import load_corpus_file from dialogbot.utils.log import logger class TfidfModel: def __init__(self, corpus_file, word2id): time_s = time.time() self.contexts, self.responses = load_corpus_file(corpus_file, word2id, size=50000) self._train_model() self.corpus_mm = self.tfidf_model[self.corpus] self.index = similarities.MatrixSimilarity(self.corpus_mm) logger.debug("Time to build tfidf model by %s: %2.f seconds." % (corpus_file, time.time() - time_s)) def _train_model(self, min_freq=1): # Create tfidf model. self.dct = corpora.Dictionary(self.contexts) # Filter low frequency words from dictionary. low_freq_ids = [id_ for id_, freq in self.dct.dfs.items() if freq <= min_freq] self.dct.filter_tokens(low_freq_ids) self.dct.compactify() # Build tfidf model. self.corpus = [self.dct.doc2bow(s) for s in self.contexts] self.tfidf_model = models.TfidfModel(self.corpus) def _text2vec(self, text): bow = self.dct.doc2bow(text) return self.tfidf_model[bow] def similarity(self, query, size=10): vec = self._text2vec(query) sims = self.index[vec] sim_sort = sorted(list(enumerate(sims)), key=lambda item: item[1], reverse=True) return sim_sort[:size] def get_docs(self, sim_items): docs = [self.contexts[id_] for id_, score in sim_items] answers = [self.responses[id_] for id_, score in sim_items] return docs, answers
33.923077
108
0.647392
235
1,764
4.67234
0.395745
0.054645
0.040984
0.03643
0.083789
0.040073
0.040073
0
0
0
0
0.01497
0.24263
1,764
51
109
34.588235
0.806886
0.086735
0
0
0
0
0.02875
0
0
0
0
0
0
1
0.151515
false
0
0.121212
0
0.393939
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7090b5633b08f50dcfd4327962f33bf944fbe670
237
py
Python
LeetCode/lc0137-Single Number II.py
rahulbakshee/cp
0299de012e5d09bb13636f4fdcc9481da1fbc54f
[ "MIT" ]
null
null
null
LeetCode/lc0137-Single Number II.py
rahulbakshee/cp
0299de012e5d09bb13636f4fdcc9481da1fbc54f
[ "MIT" ]
null
null
null
LeetCode/lc0137-Single Number II.py
rahulbakshee/cp
0299de012e5d09bb13636f4fdcc9481da1fbc54f
[ "MIT" ]
null
null
null
# https://leetcode.com/problems/single-number-ii/ class Solution: def singleNumber(self, nums: List[int]) -> int: sum_nums = sum(nums) sum_set = sum(set(nums)) * 3 return (sum_set - sum_nums) // 2
26.333333
51
0.586498
32
237
4.21875
0.625
0.155556
0.148148
0
0
0
0
0
0
0
0
0.011561
0.270042
237
8
52
29.625
0.768786
0.198312
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0
0
0.6
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
7091366072f1274a003619cb14cf65bebdc5b41f
6,052
py
Python
aslam_offline_calibration/kalibr/python/kalibr_evaluation_calibration.py
zhixy/multical
b5eeb6283f4ad68def4b62c10416a6764651e771
[ "BSD-3-Clause" ]
27
2021-03-26T12:03:48.000Z
2022-03-29T02:16:56.000Z
aslam_offline_calibration/kalibr/python/kalibr_evaluation_calibration.py
zhixy/multical
b5eeb6283f4ad68def4b62c10416a6764651e771
[ "BSD-3-Clause" ]
2
2021-03-26T14:34:51.000Z
2021-11-03T09:14:16.000Z
aslam_offline_calibration/kalibr/python/kalibr_evaluation_calibration.py
zhixy/multical
b5eeb6283f4ad68def4b62c10416a6764651e771
[ "BSD-3-Clause" ]
9
2021-08-23T11:25:29.000Z
2022-03-28T13:22:39.000Z
#!/usr/bin/env python import argparse import kalibr_common as kc from mpl_toolkits.mplot3d import art3d, Axes3D, proj3d import numpy as np import pylab as pl import sm import glob def parse_arguments(): parser = argparse.ArgumentParser( description='read calibration results from yaml and compare with ground truth') parser.add_argument('--reference-sensor', dest='reference_sensor', help='Specify the sensor as the reference coordinate system: camera0 or imu0', required=True) parser.add_argument( '--cam-ground-truth', dest='cam_ground_truth', help= 'the name of yaml file which stores the ground truth of camera extrinsics', required=False) parser.add_argument( '--cam-file-name-prefix', dest='cam_file_name_prefix', help= 'the name prefix of yaml file which stores the calibration results of camera extrinsics', required=False) parser.add_argument( '--lidar-ground-truth', dest='lidar_ground_truth', help= 'the name of yaml file which stores the ground truth of lidar extrinsics', required=False) parser.add_argument( '--lidar-file-name-prefix', dest='lidar_file_name_prefix', help= 'the name prefix of yaml file which stores the calibration results of lidar extrinsics', required=False) parser.add_argument( '--imu-ground-truth', dest='imu_ground_truth', help= 'the name of yaml file which stores the ground truth of imu extrinsics', required=False) parser.add_argument( '--imu-file-name-prefix', dest='imu_file_name_prefix', help= 'the name prefix of yaml file which stores the calibration results of imu extrinsics', required=False) parsed_args = parser.parse_args() return parsed_args def calcErrorGTAndEstimation(ext_gt, ext): err_T = ext_gt.inverse() * ext err_vec = sm.fromTEuler(err_T.T()) return err_vec def main(): parsed_args = parse_arguments() if parsed_args.cam_ground_truth and parsed_args.cam_file_name_prefix: cam_chain_ext_gt = kc.CameraChainParameters(parsed_args.cam_ground_truth) ext_gt_list = [] num_cam = cam_chain_ext_gt.numCameras() for camNr in range(1, num_cam): ext_gt_list.append(cam_chain_ext_gt.getExtrinsicsReferenceToCam(camNr)) err_vec_list_list = [[] for _ in range(num_cam - 1)] for file_name in glob.glob(parsed_args.cam_file_name_prefix): cam_chain_ext = kc.CameraChainParameters(file_name, parsed_args.reference_sensor) for camNr in range(1, num_cam): ext = cam_chain_ext.getExtrinsicsReferenceToCam(camNr) err_vec = calcErrorGTAndEstimation(ext_gt_list[camNr-1], ext) err_vec_list_list[camNr-1].append(err_vec) for idx, err_vec_list in enumerate(err_vec_list_list): err_mat = np.array(err_vec_list) err_mean = np.mean(err_mat, axis=0) err_variance = np.var(err_mat, axis=0) print ("cam {} extrinsic calibration error".format(idx+1)) print ("mean of error: ", err_mean) print ("variance of error: ", err_variance) if parsed_args.lidar_ground_truth and parsed_args.lidar_file_name_prefix: lidar_list_ext_gt = kc.LiDARListParameters(parsed_args.lidar_ground_truth, parsed_args.reference_sensor) ext_gt_list = [] num_lidar = lidar_list_ext_gt.numLiDARs() for idx in range(0, num_lidar): lidar_parameter = lidar_list_ext_gt.getLiDARParameters(idx) ext_gt_list.append(lidar_parameter.getExtrinsicsReferenceToHere()) err_vec_list_list = [[] for _ in range(num_lidar)] for file_name in glob.glob(parsed_args.lidar_file_name_prefix): lidar_list_ext = kc.LiDARListParameters(file_name, parsed_args.reference_sensor) for idx in range(num_lidar): lidar_parameter = lidar_list_ext.getLiDARParameters(idx) ext = lidar_parameter.getExtrinsicsReferenceToHere() err_vec = calcErrorGTAndEstimation(ext_gt_list[idx], ext) err_vec_list_list[idx].append(err_vec) for idx, err_vec_list in enumerate(err_vec_list_list): err_mat = np.array(err_vec_list) err_mean = np.mean(err_mat, axis=0) err_variance = np.var(err_mat, axis=0) print ("LiDAR {} extrinsic calibration error".format(idx)) print ("mean of error: ", err_mean) print ("variance of error: ", err_variance) if parsed_args.imu_ground_truth and parsed_args.imu_file_name_prefix: imu_list_ext_gt = kc.ImuSetParameters(parsed_args.imu_ground_truth, parsed_args.reference_sensor) ext_gt_list = [] num_imu = imu_list_ext_gt.numImus() for idx in range(0, num_imu): imu_parameter = imu_list_ext_gt.getImuParameters(idx) ext_gt_list.append(imu_parameter.getExtrinsicsReferenceToHere()) err_vec_list_list = [[] for _ in range(num_imu)] for file_name in glob.glob(parsed_args.imu_file_name_prefix): imu_list_ext = kc.ImuSetParameters(file_name, parsed_args.reference_sensor) for idx in range(num_imu): imu_parameter = imu_list_ext.getImuParameters(idx) ext = imu_parameter.getExtrinsicsReferenceToHere() err_vec = calcErrorGTAndEstimation(ext_gt_list[idx], ext) err_vec_list_list[idx].append(err_vec) for idx, err_vec_list in enumerate(err_vec_list_list): err_mat = np.array(err_vec_list) err_mean = np.mean(err_mat, axis=0) err_variance = np.var(err_mat, axis=0) print ("IMU {} extrinsic calibration error".format(idx)) print ("mean of error: ", err_mean) print ("variance of error: ", err_variance) if __name__ == "__main__": main()
45.503759
119
0.666887
799
6,052
4.739675
0.138924
0.03644
0.039609
0.033272
0.699498
0.634275
0.616583
0.561922
0.476895
0.415632
0
0.004392
0.247521
6,052
132
120
45.848485
0.827185
0.003305
0
0.368852
0
0
0.179738
0.014923
0
0
0
0
0
1
0.02459
false
0
0.057377
0
0.098361
0.07377
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7092b52d42b2a6cc2e1c28dd93180668936123db
3,251
bzl
Python
antlir/vm/bzl/install_kernel_modules.bzl
zeroxoneb/antlir
811d88965610d16a5c85d831d317f087797ca732
[ "MIT" ]
28
2020-08-11T16:22:46.000Z
2022-03-04T15:41:52.000Z
antlir/vm/bzl/install_kernel_modules.bzl
zeroxoneb/antlir
811d88965610d16a5c85d831d317f087797ca732
[ "MIT" ]
137
2020-08-11T16:07:49.000Z
2022-02-27T10:59:05.000Z
antlir/vm/bzl/install_kernel_modules.bzl
zeroxoneb/antlir
811d88965610d16a5c85d831d317f087797ca732
[ "MIT" ]
10
2020-09-10T00:01:28.000Z
2022-03-08T18:00:28.000Z
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. load("@bazel_skylib//lib:paths.bzl", "paths") load("//antlir/bzl:image.bzl", "image") load("//antlir/bzl:oss_shim.bzl", "buck_genrule") load("//antlir/bzl/image/feature:defs.bzl", "feature") def install_kernel_modules(kernel, module_list): # This intermediate genrule is here to create a dir hierarchy # of kernel modules that are needed for the initrd. This # provides a single dir that can be cloned into the initrd # layer and allows for kernel modules that might be missing # from different kernel builds. buck_genrule( name = kernel.uname + "-selected--modules", out = ".", cmd = """ mkdir -p $OUT pushd $OUT 2>/dev/null # copy the needed modules out of the module layer binary_path=( $(exe //antlir:find-built-subvol) ) layer_loc="$(location {module_layer})" mod_layer_path=\\$( "${{binary_path[@]}}" "$layer_loc" ) mods="{module_list}" for mod in $mods; do mod_src="$mod_layer_path/kernel/$mod" if [[ -f "$mod_src" ]]; then mod_dir=\\$(dirname "$mod") mkdir -p "$mod_dir" cp "$mod_src" "$mod_dir" fi done """.format( module_layer = kernel.artifacts.modules, module_list = " ".join(module_list), ), antlir_rule = "user-internal", ) buck_genrule( name = kernel.uname + "selected--modules-load.conf", cmd = "echo '{}' > $OUT".format("\n".join([ paths.basename(module).rsplit(".")[0] for module in module_list ])), antlir_rule = "user-internal", visibility = [], ) return [ # Install the kernel modules specified in module_list above into the # layer image.ensure_subdirs_exist("/usr/lib", paths.join("modules", kernel.uname)), feature.install( image.source( source = ":" + kernel.uname + "-selected--modules", path = ".", ), paths.join("/usr/lib/modules", kernel.uname, "kernel"), ), [ [ image.clone( kernel.artifacts.modules, paths.join("/modules.{}".format(f)), paths.join("/usr/lib/modules", kernel.uname, "modules.{}".format(f)), ), image.clone( kernel.artifacts.modules, paths.join("/modules.{}.bin".format(f)), paths.join("/usr/lib/modules", kernel.uname, "modules.{}.bin".format(f)), ), ] for f in ("dep", "symbols", "alias", "builtin") ], # Ensure the kernel modules are loaded by systemd when the initrd is started image.ensure_subdirs_exist("/usr/lib", "modules-load.d"), feature.install(":" + kernel.uname + "selected--modules-load.conf", "/usr/lib/modules-load.d/initrd-modules.conf"), ]
38.702381
123
0.538296
364
3,251
4.711538
0.35989
0.051312
0.037901
0.060641
0.290962
0.273469
0.177843
0.110787
0.054811
0.054811
0
0.000907
0.321747
3,251
83
124
39.168675
0.776871
0.177484
0
0.19697
0
0
0.426531
0.117625
0
0
0
0
0
1
0.015152
false
0
0
0
0.030303
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
709423185bdfdebb688bc7d5a18f61d78ced662c
22
py
Python
bifrost_min_read_check/__init__.py
ssi-dk/bifrost_min_read_check
120951280c4ca7dab07c59d73e677b701e2a5cca
[ "MIT" ]
null
null
null
bifrost_min_read_check/__init__.py
ssi-dk/bifrost_min_read_check
120951280c4ca7dab07c59d73e677b701e2a5cca
[ "MIT" ]
1
2020-08-21T07:14:36.000Z
2020-08-21T07:14:36.000Z
bifrost_min_read_check/__init__.py
ssi-dk/bifrost_min_read_check
120951280c4ca7dab07c59d73e677b701e2a5cca
[ "MIT" ]
null
null
null
__version__ = 'v2_2_8'
22
22
0.772727
4
22
2.75
1
0
0
0
0
0
0
0
0
0
0
0.15
0.090909
22
1
22
22
0.4
0
0
0
0
0
0.26087
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
70956e742f00e379dd33cde03763c0b2f6948b87
12,173
py
Python
medis/Detector/get_photon_data.py
RupertDodkins/medis
bdb1f00fb93506da2a1f251bc6780e70e97a16c5
[ "MIT" ]
1
2021-06-25T17:35:56.000Z
2021-06-25T17:35:56.000Z
medis/Detector/get_photon_data.py
RupertDodkins/medis
bdb1f00fb93506da2a1f251bc6780e70e97a16c5
[ "MIT" ]
null
null
null
medis/Detector/get_photon_data.py
RupertDodkins/medis
bdb1f00fb93506da2a1f251bc6780e70e97a16c5
[ "MIT" ]
2
2018-12-08T15:05:13.000Z
2019-08-08T17:28:24.000Z
"""Top level code that takes a atmosphere phase map and propagates a wavefront through the system""" import os import numpy as np import traceback import multiprocessing import glob import random import pickle as pickle import time from proper_mod import prop_run from medis.Utils.plot_tools import quicklook_im, view_datacube, loop_frames from medis.Utils.misc import dprint from medis.params import ap,cp,tp,mp,sp,iop,dp import medis.Detector.MKIDs as MKIDs import medis.Detector.H2RG as H2RG import medis.Detector.pipeline as pipe import medis.Detector.readout as read import medis.Telescope.aberrations as aber import medis.Atmosphere.atmos as atmos sentinel = None def gen_timeseries(inqueue, photon_table_queue, outqueue, conf_obj_tup): """ generates observation sequence by calling optics_propagate in time series is the time loop wrapper for optics_propagate this is where the observation sequence is generated (timeseries of observations by the detector) thus, where the detector observes the wavefront created by optics_propagate (for MKIDs, the probability distribution) :param inqueue: time index for parallelization (used by multiprocess) :param photon_table_queue: photon table (list of photon packets) in the multiprocessing format :param spectralcube_queue: series of intensity images (spectral image cube) in the multiprocessing format :param xxx_todo_changeme: :return: """ # TODO change this name (tp,ap,sp,iop,cp,mp) = conf_obj_tup try: if tp.detector == 'MKIDs': with open(iop.device_params, 'rb') as handle: dp = pickle.load(handle) start = time.time() for it, t in enumerate(iter(inqueue.get, sentinel)): kwargs = {'iter': t, 'params': [ap, tp, iop, sp]} _, save_E_fields = prop_run('medis.Telescope.optics_propagate', 1, ap.grid_size, PASSVALUE=kwargs, VERBOSE=False, PHASE_OFFSET=1) print(save_E_fields.shape) spectralcube = np.sum(np.abs(save_E_fields[-1, :, :]) ** 2, axis=1) if tp.detector == 'ideal': image = np.sum(spectralcube, axis=0) vmin = np.min(spectralcube)*10 # cube = ideal.assign_calibtime(spectralcube,PASSVALUE['iter']) # cube = rawImageIO.arange_into_cube(packets, value='phase') # rawImageIO.make_phase_map(cube, plot=True) # return '' elif tp.detector == 'MKIDs': packets = read.get_packets(spectralcube, t, dp, mp) # packets = read.get_packets(save_E_fields, t, dp, mp) # if sp.show_wframe or sp.show_cube or sp.return_spectralcube: cube = pipe.arange_into_cube(packets, (mp.array_size[0], mp.array_size[1])) if mp.remove_close: timecube = read.remove_close_photons(cube) if sp.show_wframe: image = pipe.make_intensity_map(cube, (mp.array_size[0], mp.array_size[1])) # Interpolating spectral cube from ap.nwsamp discreet wavelengths # if sp.show_cube or sp.return_spectralcube: spectralcube = pipe.make_datacube(cube, (mp.array_size[0], mp.array_size[1], ap.w_bins)) if sp.save_obs: command = read.get_obs_command(packets,t) photon_table_queue.put(command) vmin = 0.9 if sp.show_wframe: dprint((sp.show_wframe, sp.show_wframe == 'continuous')) quicklook_im(image, logAmp=True, show=sp.show_wframe, vmin=vmin) if sp.show_cube: view_datacube(spectralcube, logAmp=True, vmin=vmin) if sp.use_gui: gui_images = np.zeros_like(save_E_fields, dtype=np.float) phase_ind = sp.gui_map_type == 'phase' amp_ind = sp.gui_map_type == 'amp' gui_images[phase_ind] = np.angle(save_E_fields[phase_ind], deg=False) gui_images[amp_ind] = np.absolute(save_E_fields[amp_ind]) outqueue.put((t, gui_images, spectralcube)) elif sp.return_E: outqueue.put((t, save_E_fields)) else: outqueue.put((t, spectralcube)) now = time.time() elapsed = float(now - start) / 60. each_iter = float(elapsed) / (it + 1) print('***********************************') dprint(f'{elapsed:.2f} minutes elapsed, each time step took {each_iter:.2f} minutes') #* ap.numframes/sp.num_processes TODO change to log # except Exception as e: traceback.print_exc() # raise e pass def wait_until(somepredicate, timeout, period=0.25, *args, **kwargs): mustend = time.time() + timeout while time.time() < mustend: if somepredicate(*args, **kwargs): return True time.sleep(period) return False def run_medis(EfieldsThread=None, plot=False): """ main script to organize calls to various aspects of the simulation initialize different sub-processes, such as atmosphere and aberration maps, MKID device parameters sets up the multiprocessing features returns the observation sequence created by gen_timeseries :return: obs_sequence """ # Printing Params dprint("Checking Params Info-print params from here (turn on/off)") # TODO change this to a logging function # for param in [ap, cp, tp, mp, sp, iop]: # print('\n', param) # pprint(param.__dict__) iop.makedir() # make the directories at this point in case the user doesn't want to keep changing params.py check = read.check_exists_obs_sequence(plot) if check: if iop.obs_seq[-3:] == '.h5': obs_sequence = read.open_obs_sequence_hdf5(iop.obs_seq) else: obs_sequence = read.open_obs_sequence(iop.obs_seq) return obs_sequence begin = time.time() print('Creating New MEDIS Simulation') print('********** Taking Obs Data ***********') try: multiprocessing.set_start_method('spawn') except RuntimeError: pass # initialize atmosphere print("Atmosdir = %s " % iop.atmosdir) if tp.use_atmos and glob.glob(iop.atmosdir + '/*.fits') == []: atmos.generate_maps() # initialize telescope if (tp.aber_params['QuasiStatic'] is True) and glob.glob(iop.aberdir + 'quasi/*.fits') == []: aber.generate_maps(tp.f_lens) if tp.aber_params['NCPA']: aber.generate_maps(tp.f_lens, 'NCPA', 'lens') # if tp.servo_error: # aber.createObjMapsEmpty() aber.initialize_CPA_meas() if tp.active_null: aber.initialize_NCPA_meas() if sp.save_locs is None: sp.save_locs = [] if 'detector' not in sp.save_locs: sp.save_locs = np.append(sp.save_locs, 'detector') sp.gui_map_type = np.append(sp.gui_map_type, 'amp') # initialize MKIDs if tp.detector == 'MKIDs' and not os.path.isfile(iop.device_params): MKIDs.initialize() photon_table_queue = multiprocessing.Queue() inqueue = multiprocessing.Queue() outqueue = multiprocessing.Queue() jobs = [] if sp.save_obs and tp.detector == 'MKIDs': proc = multiprocessing.Process(target=read.handle_output, args=(photon_table_queue, iop.obsfile)) proc.start() if ap.companion is False: ap.contrast = [] if tp.detector == 'MKIDs': obs_sequence = np.zeros((ap.numframes, ap.w_bins, mp.array_size[1], mp.array_size[0])) else: obs_sequence = np.zeros((ap.numframes, ap.w_bins, ap.grid_size, ap.grid_size)) if sp.return_E: e_fields_sequence = np.zeros((ap.numframes, len(sp.save_locs), ap.nwsamp, 1 + len(ap.contrast), ap.grid_size, ap.grid_size), dtype=np.complex64) else: e_fields_sequence = None # Sending Queues to gen_timeseries for i in range(sp.num_processes): p = multiprocessing.Process(target=gen_timeseries, args=(inqueue, photon_table_queue, outqueue, (tp,ap,sp,iop,cp,mp))) jobs.append(p) p.start() if tp.quick_ao: for t in range(ap.startframe, ap.startframe + ap.numframes): inqueue.put(t) if sp.use_gui: it, gui_images, spectralcube = outqueue.get() while sp.play_gui is False: time.sleep(0.005) EfieldsThread.newSample.emit(gui_images) EfieldsThread.sct.newSample.emit((it, spectralcube)) else: dprint('If the code has hung here it probably means it cant read the CPA file at some iter') for t in range(ap.startframe, ap.startframe+ap.numframes): # time.sleep(rollout[t]) print(t) if not tp.active_null: with open(iop.CPA_meas, 'rb') as handle: _, iters = pickle.load(handle) # print t, iter, 't, iter' print(iters, 'iters') while iters[0] + ap.startframe < t: time.sleep(0.1) print('looping', t) try: with open(iop.CPA_meas, 'rb') as handle: _, iters = pickle.load(handle) iter = iters[0] # sys.stdout.write("\rWaiting for aberration measurements...\n") # sys.stdout.flush() except EOFError: print('Errored') else: with open(iop.NCPA_meas, 'rb') as handle: _,_, iter = pickle.load(handle) while iter < t: time.sleep(0.1) try: with open(iop.NCPA_meas, 'rb') as handle: _,_, iter = pickle.load(handle) # sys.stdout.write("\rWaiting for aberration measurements...\n") # sys.stdout.flush() except EOFError: print('Errored') # if t in delay_inds: # with open(iop.NCPA_meas, 'rb') as handle: # _, _, iter = pickle.load(handle) # print iter, t # while iter != t: # with open(iop.NCPA_meas, 'rb') as handle: # _, _, iter = pickle.load(handle) # # wait_until() inqueue.put(t) for i in range(sp.num_processes): # Send the sentinal to tell Simulation to end inqueue.put(sentinel) for t in range(ap.numframes): if sp.return_E: t, save_E_fields = outqueue.get() e_fields_sequence[t - ap.startframe] = save_E_fields else: t, spectralcube = outqueue.get() obs_sequence[t - ap.startframe] = spectralcube # should be in the right order now because of the identifier # for i, p in enumerate(jobs): # p.join() photon_table_queue.put(None) outqueue.put(None) if sp.save_obs and tp.detector == 'MKIDs': proc.join() obs_sequence = np.array(obs_sequence) print('MEDIS Data Run Completed') finish = time.time() if sp.timing is True: print(f'Time elapsed: {(finish-begin)/60:.2f} minutes') print('**************************************') print(f"Shape of obs_sequence = {np.shape(obs_sequence)}") if tp.detector == 'H2RG': obs_sequence = H2RG.scale_to_luminos(obs_sequence) if tp.detector == 'H2RG' and hp.use_readnoise: obs_sequence = H2RG.add_readnoise(obs_sequence, hp.readnoise) if sp.return_E: read.save_fields(e_fields_sequence, fields_file=iop.fields) return e_fields_sequence else: dprint("Saving obs_sequence as hdf5 file:") read.save_obs_sequence(obs_sequence, obs_seq_file=iop.obs_seq) return obs_sequence if __name__ == '__main__': sp.timing = True run_medis()
36.446108
147
0.594348
1,531
12,173
4.561724
0.239059
0.034651
0.01575
0.012027
0.205183
0.171535
0.128007
0.111684
0.108247
0.08047
0
0.006078
0.297133
12,173
333
148
36.555556
0.810192
0.204962
0
0.229268
0
0
0.076754
0.015916
0
0
0
0.009009
0
1
0.014634
false
0.014634
0.087805
0
0.121951
0.102439
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
70974016c8294426ef58c0a0a53fcfaf197aee1a
332
py
Python
HW3/YaRa1312/Pleshyvtseva PythonCore HW 3.3.py
kolyasalubov/Lv-677.PythonCore
c9f9107c734a61e398154a90b8a3e249276c2704
[ "MIT" ]
null
null
null
HW3/YaRa1312/Pleshyvtseva PythonCore HW 3.3.py
kolyasalubov/Lv-677.PythonCore
c9f9107c734a61e398154a90b8a3e249276c2704
[ "MIT" ]
null
null
null
HW3/YaRa1312/Pleshyvtseva PythonCore HW 3.3.py
kolyasalubov/Lv-677.PythonCore
c9f9107c734a61e398154a90b8a3e249276c2704
[ "MIT" ]
6
2022-02-22T22:30:49.000Z
2022-03-28T12:51:19.000Z
#3.3 user_ineteger_1 = int(input("Enter an integer: ")) user_integer_2 = int(input("Enter an other integer or the same as the first one: ")) def sameOrNot(user_ineteger_1, user_integer_2): if (user_ineteger_1 ^ user_integer_2): print("Not Same") else: print("Same") sameOrNot(user_ineteger_1, user_integer_2)
36.888889
84
0.713855
54
332
4.092593
0.425926
0.217195
0.235294
0.230769
0.420814
0.420814
0.307692
0
0
0
0
0.036364
0.171687
332
9
85
36.888889
0.767273
0.009036
0
0
0
0
0.25228
0
0
0
0
0
0
1
0.125
false
0
0
0
0.125
0.25
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
709771a132f3b39704dc8a78fa50fd197c2b9aca
144
py
Python
tests/fixtures/regressors/__init__.py
XavierJingfeng/starter
274566e491d5c7157f3c8deff136c56838022349
[ "MIT" ]
null
null
null
tests/fixtures/regressors/__init__.py
XavierJingfeng/starter
274566e491d5c7157f3c8deff136c56838022349
[ "MIT" ]
null
null
null
tests/fixtures/regressors/__init__.py
XavierJingfeng/starter
274566e491d5c7157f3c8deff136c56838022349
[ "MIT" ]
null
null
null
from tests.fixtures.regressors.simple_gaussian_mlp_regressor import ( SimpleGaussianMLPRegressor) __all__ = ['SimpleGaussianMLPRegressor']
28.8
69
0.840278
12
144
9.5
0.916667
0
0
0
0
0
0
0
0
0
0
0
0.090278
144
4
70
36
0.870229
0
0
0
0
0
0.180556
0.180556
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
709b6bd1dc6310e9d17ea3ad5431e576cabcfddb
10,286
py
Python
src/utils/workflow_utils.py
cmikke97/AMSG
ddcfeb6262124e793fcee385405365417e57f91f
[ "Apache-2.0" ]
3
2021-06-30T07:22:46.000Z
2022-03-23T08:21:10.000Z
src/utils/workflow_utils.py
cmikke97/Automatic-Malware-Signature-Generation
ddcfeb6262124e793fcee385405365417e57f91f
[ "Apache-2.0" ]
null
null
null
src/utils/workflow_utils.py
cmikke97/Automatic-Malware-Signature-Generation
ddcfeb6262124e793fcee385405365417e57f91f
[ "Apache-2.0" ]
null
null
null
# Copyright 2021, Crepaldi Michele. # # Developed as a thesis project at the TORSEC research group of the Polytechnic of Turin (Italy) under the supervision # of professor Antonio Lioy and engineer Andrea Atzeni and with the support of engineer Andrea Marcelli. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import base64 # provides functions for encoding/decoding binary data to/from printable ASCII characters import hashlib # implements a common interface to many different secure hash and message digest algorithms import mlflow # open source platform for managing the end-to-end machine learning lifecycle from logzero import logger # robust and effective logging for Python from mlflow.entities import RunStatus # status of a Run from mlflow.tracking.fluent import _get_experiment_id # get current experiment id function from mlflow.utils import mlflow_tags # mlflow tags class Hash: """ Simple wrapper around hashlib sha256 functions. """ def __init__(self): """ Initialize hash class using hashlib sha256 implementation. """ # initialize sha256 hash object self.m = hashlib.sha256() def update(self, w): # string to update hash value with """ Update current hash value. Args: w: String to update hash value with """ # update current hash with w self.m.update(w.encode('utf-8')) def copy(self): """ Return a copy of the Hash object Returns: Copy of the current Hash instance """ # instantiate new hash object copy = Hash() # copy current object sha256 into the new instance copy.m = self.m.copy() # return the new instance return copy def get_b64(self): """ Get base64 encoding of the current hash value digest. Returns: Base64 encoding of the hash digest. """ # return base64 encoded (url safe) hash digest return base64.urlsafe_b64encode(self.m.digest()).decode('utf-8') def _already_ran(entry_point_name, # entry point name of the run parameters, # parameters of the run git_commit, # git version of the code run config_sha, # sha256 of config file ignore_git=False, # whether to ignore git version or not (default: False) experiment_id=None, # experiment id (default: None) resume=False): # whether to resume a failed/killed previous run or not (default: False) """ Best-effort detection of if a run with the given entrypoint name, parameters, and experiment id already ran. The run must have completed successfully and have at least the parameters provided. Args: entry_point_name: Entry point name of the run parameters: Parameters of the run git_commit: Git version of the code run config_sha: Sha256 of config file ignore_git: Whether to ignore git version or not (default: False) experiment_id: Experiment id (default: None) resume: Whether to resume a failed/killed previous run (only for training) or not (default: False) Returns: Previously executed run if found, None otherwise. """ # if experiment ID is not provided retrieve current experiment ID experiment_id = experiment_id if experiment_id is not None else _get_experiment_id() # instantiate MLflowClient (creates and manages experiments and runs) client = mlflow.tracking.MlflowClient() # get reversed list of run information (from last to first) all_run_infos = reversed(client.list_run_infos(experiment_id)) run_to_resume_id = None # for all runs info for run_info in all_run_infos: # fetch run from backend store full_run = client.get_run(run_info.run_id) # get run dictionary of tags tags = full_run.data.tags # if there is no entry point, or the entry point for the run is different from 'entry_point_name', continue if tags.get(mlflow_tags.MLFLOW_PROJECT_ENTRY_POINT, None) != entry_point_name: continue # initialize 'match_failed' bool to false match_failed = False # for each parameter in the provided run parameters for param_key, param_value in parameters.items(): # get run param value from the run dictionary of parameters run_value = full_run.data.params.get(param_key) # if the current parameter value is different from the run parameter set 'match_failed' to true and break if str(run_value) != str(param_value): match_failed = True break # if the current run is not the one we are searching for go to the next one if match_failed: continue # get previous run git commit version previous_version = tags.get(mlflow_tags.MLFLOW_GIT_COMMIT, None) # if the previous version is different from the current one, go to the next one if not ignore_git and git_commit != previous_version: logger.warning("Run matched, but has a different source version, so skipping (found={}, expected={})" .format(previous_version, git_commit)) continue # get config file sha256 from the run run_config_sha = full_run.data.params.get('config_sha') # if the config file sha256 for the run is different from the current sha, go to the next one if str(run_config_sha) != str(config_sha): logger.warning("Run matched, but config is different.") continue # if the run is not finished if run_info.to_proto().status != RunStatus.FINISHED: if resume: # if resume is enabled, set current run to resume id -> if no newer completed run is found, # this stopped run will be resumed run_to_resume_id = run_info.run_id continue else: # otherwise skip it and try with the next one logger.warning("Run matched, but is not FINISHED, so skipping " "(run_id={}, status={})" .format(run_info.run_id, run_info.status)) continue # otherwise (if the run was found and it is exactly the same), return the found run return client.get_run(run_info.run_id) # if no previously executed (and finished) run was found but a stopped run was found, resume such run if run_to_resume_id is not None: logger.info("Resuming run with entrypoint=%s and parameters=%s" % (entry_point_name, parameters)) # update new run parameters with the stopped run id parameters.update({ 'run_id': run_to_resume_id }) # submit new run that will resume the previously interrupted one submitted_run = mlflow.run(".", entry_point_name, parameters=parameters) # log config file sha256 as parameter in the submitted run client.log_param(submitted_run.run_id, 'config_sha', config_sha) # return submitted (new) run return mlflow.tracking.MlflowClient().get_run(submitted_run.run_id) # if the searched run was not found return 'None' logger.warning("No matching run has been found.") return None def run(entrypoint, # entrypoint of the run parameters, # parameters of the run config_sha): # sha256 of config file """ Launch run. Args: entrypoint: Entrypoint of the run parameters: Parameters of the run config_sha: Sha256 of config file Returns: Launched run. """ # get mlflow tracking client client = mlflow.tracking.MlflowClient() logger.info("Launching new run for entrypoint={} and parameters={}".format(entrypoint, parameters)) # submit (start) run submitted_run = mlflow.run(".", entrypoint, parameters=parameters) # log config file sha256 as parameter in the submitted run client.log_param(submitted_run.run_id, 'config_sha', config_sha) # return run return client.get_run(submitted_run.run_id) def get_or_run(entrypoint, # entrypoint of the run parameters, # parameters of the run git_commit, # git version of the run config_sha, # sha256 of config file ignore_git=False, # whether to ignore git version or not (default: False) use_cache=True, # whether to cache previous runs or not (default: True) resume=False): # whether to resume a failed/killed previous run or not (default: False) """ Get previously executed run, if it exists, or launch run. Args: entrypoint: Entrypoint of the run parameters: Parameters of the run git_commit: Git version of the run config_sha: Sha256 of config file ignore_git: Whether to ignore git version or not (default: False) use_cache: Whether to cache previous runs or not (default: True) resume: Whether to resume a failed/killed previous run or not (default: False) Returns: Found or launched run. """ # get already executed run, if it exists existing_run = _already_ran(entrypoint, parameters, git_commit, ignore_git=ignore_git, resume=resume, config_sha=config_sha) # if we want to cache previous runs and we found a previously executed run, return found run if use_cache and existing_run: logger.info("Found existing run for entrypoint={} and parameters={}".format(entrypoint, parameters)) return existing_run # otherwise, start run and return it return run(entrypoint=entrypoint, parameters=parameters, config_sha=config_sha)
44.336207
118
0.66994
1,402
10,286
4.808131
0.204708
0.016318
0.016615
0.020175
0.33897
0.278149
0.257084
0.249963
0.233645
0.226969
0
0.009163
0.267937
10,286
231
119
44.528139
0.886056
0.530332
0
0.228261
0
0
0.094076
0
0
0
0
0
0
1
0.076087
false
0
0.076087
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
709bdbfb5b18720b7d117f1e6e1e246c7727a60a
1,578
py
Python
tests/test_data.py
joseph-nagel/torchutils
e13b5b156734dc1645e1d6c7b81738ca52904c92
[ "MIT" ]
null
null
null
tests/test_data.py
joseph-nagel/torchutils
e13b5b156734dc1645e1d6c7b81738ca52904c92
[ "MIT" ]
null
null
null
tests/test_data.py
joseph-nagel/torchutils
e13b5b156734dc1645e1d6c7b81738ca52904c92
[ "MIT" ]
null
null
null
'''Tests for the data module.''' import pytest import numpy as np import torch from torch.utils.data import TensorDataset from torchutils.data import mean_std_over_dataset, image2tensor, tensor2image @pytest.mark.parametrize('no_samples', [100, 1000]) @pytest.mark.parametrize('feature_shape', [(), (1,), (10,), (10,10)]) def test_mean_std_over_dataset(no_samples, feature_shape): '''Test correctness of evaluating the mean and standard deviation.''' torch.manual_seed(0) X = torch.randn(no_samples, *feature_shape) y = torch.randint(2, size=(no_samples,)) data_set = TensorDataset(X, y) mean, std = mean_std_over_dataset(data_set) ref_mean = X.numpy().mean() ref_std = X.numpy().std() assert np.isclose(mean, ref_mean, rtol=1e-02, atol=1e-03) assert np.isclose(std, ref_std, rtol=1e-02, atol=1e-03) @pytest.mark.parametrize('shape', [(10,10), (10,10,3), (1,10,10,3)]) def test_image2tensor2image(shape): '''Test the transformation and back-transformation of an image.''' np.random.seed(0) image = np.random.randn(*shape) tensor = image2tensor(image) new_image = tensor2image(tensor) assert np.allclose(image.squeeze(), new_image.squeeze()) @pytest.mark.parametrize('shape', [(10,10), (3,10,10), (1,3,10,10)]) def test_tensor2image2tensor(shape): '''Test the transformation and back-transformation of a tensor.''' torch.manual_seed(0) tensor = torch.randn(*shape) image = tensor2image(tensor) new_tensor = image2tensor(image) assert np.allclose(tensor.squeeze(), new_tensor.squeeze())
38.487805
77
0.707858
228
1,578
4.763158
0.307018
0.033149
0.077348
0.049724
0.174954
0.174954
0.090239
0.090239
0
0
0
0.051661
0.141318
1,578
40
78
39.45
0.749816
0.134347
0
0.064516
0
0
0.024554
0
0
0
0
0
0.129032
1
0.096774
false
0
0.16129
0
0.258065
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
709be0e39f954015ae500d50e9d8b5505b26abc0
8,593
py
Python
src/model_execution_worker/tasks.py
OasisLMF/OasisPlatform_SQL
e3359d0bd3093e47bc46848c810b8876980d5cbc
[ "BSD-3-Clause" ]
1
2020-02-27T13:25:22.000Z
2020-02-27T13:25:22.000Z
src/model_execution_worker/tasks.py
OasisLMF/OasisPlatform_SQL
e3359d0bd3093e47bc46848c810b8876980d5cbc
[ "BSD-3-Clause" ]
3
2019-11-14T10:26:46.000Z
2021-03-25T22:33:52.000Z
src/model_execution_worker/tasks.py
OasisLMF/OasisPlatform_SQL
e3359d0bd3093e47bc46848c810b8876980d5cbc
[ "BSD-3-Clause" ]
2
2019-03-21T09:22:12.000Z
2019-05-24T15:13:51.000Z
from __future__ import absolute_import import importlib import logging import uuid from contextlib import contextmanager import fasteners import json import os import shutil import tarfile import glob import sys import time from oasislmf.model_execution.bin import prepare_model_run_directory, prepare_model_run_inputs from oasislmf.model_execution import runner from oasislmf.utils import status from oasislmf.utils.exceptions import OasisException from oasislmf.utils.log import oasis_log from pathlib2 import Path from celery import Celery from celery.task import task from ..utils.path import setcwd from ..conf.settings import settings from ..conf import celery as celery_conf ''' Celery task wrapper for Oasis ktools calculation. ''' ARCHIVE_FILE_SUFFIX = '.tar' CELERY = Celery() CELERY.config_from_object(celery_conf) logging.info("Started worker") logging.info("INPUTS_DATA_DIRECTORY: {}".format(settings.get('worker', 'INPUTS_DATA_DIRECTORY'))) logging.info("OUTPUTS_DATA_DIRECTORY: {}".format(settings.get('worker', 'OUTPUTS_DATA_DIRECTORY'))) logging.info("MODEL_DATA_DIRECTORY: {}".format(settings.get('worker', 'MODEL_DATA_DIRECTORY'))) logging.info("WORKING_DIRECTORY: {}".format(settings.get('worker', 'WORKING_DIRECTORY'))) logging.info("KTOOLS_BATCH_COUNT: {}".format(settings.get('worker', 'KTOOLS_BATCH_COUNT'))) logging.info("KTOOLS_ALLOC_RULE: {}".format(settings.get('worker', 'KTOOLS_ALLOC_RULE'))) logging.info("KTOOLS_MEMORY_LIMIT: {}".format(settings.get('worker', 'KTOOLS_MEMORY_LIMIT'))) logging.info("LOCK_TIMEOUT_IN_SECS: {}".format(settings.get('worker', 'LOCK_TIMEOUT_IN_SECS'))) logging.info("LOCK_RETRY_COUNTDOWN_IN_SECS: {}".format(settings.get('worker', 'LOCK_RETRY_COUNTDOWN_IN_SECS'))) logging.info("POST_ANALYSIS_SLEEP_IN_SECS: {}".format(settings.get('worker', 'POST_ANALYSIS_SLEEP_IN_SECS'))) class MissingInputsException(OasisException): def __init__(self, input_archive): super(MissingInputsException, self).__init__('Inputs location not found: {}'.format(input_archive)) class InvalidInputsException(OasisException): def __init__(self, input_archive): super(InvalidInputsException, self).__init__('Inputs location not a tarfile: {}'.format(input_archive)) class MissingModelDataException(OasisException): def __init__(self, model_data_path): super(MissingModelDataException, self).__init__('Model data not found: {}'.format(model_data_path)) @contextmanager def get_lock(): lock = fasteners.InterProcessLock(settings.get('worker', 'LOCK_FILE')) gotten = lock.acquire(blocking=True, timeout=settings.getfloat('worker', 'LOCK_TIMEOUT_IN_SECS')) yield gotten if gotten: lock.release() @task(name='run_analysis', bind=True) def start_analysis_task(self, input_location, analysis_settings_json): ''' Task wrapper for running an analysis. Args: analysis_profile_json (string): The analysis settings. Returns: (string) The location of the outputs. ''' logging.info("LOCK_FILE: {}".format(settings.get('worker', 'LOCK_FILE'))) logging.info("LOCK_RETRY_COUNTDOWN_IN_SECS: {}".format( settings.get('worker', 'LOCK_RETRY_COUNTDOWN_IN_SECS'))) with get_lock() as gotten: if not gotten: logging.info("Failed to get resource lock - retry task") # max_retries=None is supposed to be unlimited but doesn't seem to work # Set instead to a large number raise self.retry( max_retries=9999999, countdown=settings.getint('worker', 'LOCK_RETRY_COUNTDOWN_IN_SECS')) logging.info("Acquired resource lock") try: logging.info("INPUTS_DATA_DIRECTORY: {}".format(settings.get('worker', 'INPUTS_DATA_DIRECTORY'))) logging.info("OUTPUTS_DATA_DIRECTORY: {}".format(settings.get('worker', 'OUTPUTS_DATA_DIRECTORY'))) logging.info("MODEL_DATA_DIRECTORY: {}".format(settings.get('worker', 'MODEL_DATA_DIRECTORY'))) logging.info("WORKING_DIRECTORY: {}".format(settings.get('worker', 'WORKING_DIRECTORY'))) logging.info("KTOOLS_BATCH_COUNT: {}".format(settings.get('worker', 'KTOOLS_BATCH_COUNT'))) logging.info("KTOOLS_MEMORY_LIMIT: {}".format(settings.get('worker', 'KTOOLS_MEMORY_LIMIT'))) self.update_state(state=status.STATUS_RUNNING) output_location = start_analysis(analysis_settings_json[0], input_location) except Exception: logging.exception("Model execution task failed.") raise time.sleep(settings.getint('worker', 'POST_ANALYSIS_SLEEP_IN_SECS')) return output_location @oasis_log() def start_analysis(analysis_settings, input_location): ''' Run an analysis. Args: analysis_profile_json (string): The analysis settings. Returns: (string) The location of the outputs. ''' # Check that the input archive exists and is valid input_archive = os.path.join( settings.get('worker', 'INPUTS_DATA_DIRECTORY'), input_location + ARCHIVE_FILE_SUFFIX ) if not os.path.exists(input_archive): raise MissingInputsException(input_archive) if not tarfile.is_tarfile(input_archive): raise InvalidInputsException(input_archive) source_tag = analysis_settings['analysis_settings']['source_tag'] analysis_tag = analysis_settings['analysis_settings']['analysis_tag'] logging.info( "Source tag = {}; Analysis tag: {}".format(analysis_tag, source_tag) ) module_supplier_id = analysis_settings['analysis_settings']['module_supplier_id'] model_version_id = analysis_settings['analysis_settings']['model_version_id'] logging.info( "Model supplier - version = {} {}".format(module_supplier_id, model_version_id) ) # Get the supplier module and call it use_default_model_runner = not Path(settings.get('worker', 'SUPPLIER_MODULE_DIRECTORY'), module_supplier_id).exists() model_data_path = os.path.join( settings.get('worker', 'MODEL_DATA_DIRECTORY'), module_supplier_id, model_version_id ) if not os.path.exists(model_data_path): raise MissingModelDataException(model_data_path) logging.info("Setting up analysis working directory") directory_name = "{}_{}_{}".format(source_tag, analysis_tag, uuid.uuid4().hex) working_directory = os.path.join(settings.get('worker', 'WORKING_DIRECTORY'), directory_name) if 'ri_output' in analysis_settings['analysis_settings'].keys(): ri = analysis_settings['analysis_settings']['ri_output'] else: ri = False prepare_model_run_directory(working_directory, ri=ri, model_data_src_path=model_data_path, inputs_archive=input_archive) prepare_model_run_inputs(analysis_settings['analysis_settings'], working_directory, ri=ri) with setcwd(working_directory): logging.info("Working directory = {}".format(working_directory)) # Persist the analysis_settings with open("analysis_settings.json", "w") as json_file: json.dump(analysis_settings, json_file) if use_default_model_runner: model_runner_module = runner else: sys.path.append(settings.get('worker', 'SUPPLIER_MODULE_DIRECTORY')) model_runner_module = importlib.import_module('{}.supplier_model_runner'.format(module_supplier_id)) ##! to add check that RI directories take the form of RI_{ID} amd ID is a monotonic index num_reinsurance_iterations = len(glob.glob('RI_[0-9]*')) model_runner_module.run( analysis_settings['analysis_settings'], settings.getint('worker', 'KTOOLS_BATCH_COUNT'), num_reinsurance_iterations=num_reinsurance_iterations, ktools_mem_limit=settings.getboolean('worker', 'KTOOLS_MEMORY_LIMIT'), set_alloc_rule=settings.getint('worker', 'KTOOLS_ALLOC_RULE'), fifo_tmp_dir=False ) output_location = uuid.uuid4().hex output_filepath = os.path.join( settings.get('worker', 'OUTPUTS_DATA_DIRECTORY'), output_location + ARCHIVE_FILE_SUFFIX) output_directory = os.path.join(working_directory, "output") with tarfile.open(output_filepath, "w:gz") as tar: tar.add(output_directory, arcname="output") if settings.getboolean('worker', 'DO_CLEAR_WORKING'): shutil.rmtree(working_directory, ignore_errors=True) logging.info("Output location = {}".format(output_location)) return output_location
39.237443
124
0.718375
1,036
8,593
5.650579
0.186293
0.048855
0.0726
0.070721
0.376153
0.312265
0.234028
0.214042
0.205159
0.205159
0
0.001816
0.167113
8,593
218
125
39.417431
0.816124
0.068661
0
0.137931
0
0
0.242986
0.076425
0
0
0
0
0
1
0.041379
false
0
0.172414
0
0.248276
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
709c91d65265a22a647756f08a9dbfdc545c3f18
1,088
py
Python
complejidad_algoritmica.py
francomanca93/poo-algoritmos-python
47bc0289cf1a91e2bee93f354bd39e1b592fa774
[ "MIT" ]
null
null
null
complejidad_algoritmica.py
francomanca93/poo-algoritmos-python
47bc0289cf1a91e2bee93f354bd39e1b592fa774
[ "MIT" ]
null
null
null
complejidad_algoritmica.py
francomanca93/poo-algoritmos-python
47bc0289cf1a91e2bee93f354bd39e1b592fa774
[ "MIT" ]
null
null
null
import time # Importo el modulo sys y aumento el limite de recursión, ya que viene predefinido con 1000 import sys sys.setrecursionlimit(1000000) # 1 000 000 def factorial_iterativo(n): respuesta = 1 while n > 1: respuesta *= n n -= 1 return respuesta def factorial_recursivo(n): if n == 1: return 1 return n * factorial_iterativo(n - 1) if __name__ == '__main__': n = 10000000 print('Complejidad temporal de un algoritmo ITERATIVO. Factorial') comienzo = time.time() factorial_iterativo(n) final = time.time() tiempo_iterativo = final - comienzo print(tiempo_iterativo) print('--------------------') print('Complejidad temporal de un algoritmo RECURSIVO. Factorial') comienzo = time.time() factorial_recursivo(n) final = time.time() tiempo_recursivo = final - comienzo print(tiempo_recursivo) print('-------------------') diferencia = abs(tiempo_iterativo - tiempo_recursivo) print(f'La diferencia de tiempo entre un algoritmo y otro es {diferencia}')
23.652174
91
0.647059
130
1,088
5.269231
0.369231
0.011679
0.083212
0.075912
0.265693
0.108029
0
0
0
0
0
0.038508
0.236213
1,088
45
92
24.177778
0.7858
0.090993
0
0.129032
0
0
0.229442
0
0
0
0
0
0
1
0.064516
false
0
0.064516
0
0.225806
0.225806
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
709f375eec3da89e1429343cd567396c10876145
765
py
Python
wheel/notice/email_sender.py
kong5664546498/half_a_wheel
d50c2359ac7dda55f54dd08bb588091eb6232b81
[ "MIT" ]
null
null
null
wheel/notice/email_sender.py
kong5664546498/half_a_wheel
d50c2359ac7dda55f54dd08bb588091eb6232b81
[ "MIT" ]
null
null
null
wheel/notice/email_sender.py
kong5664546498/half_a_wheel
d50c2359ac7dda55f54dd08bb588091eb6232b81
[ "MIT" ]
null
null
null
import smtplib from email.header import Header from email.mime.text import MIMEText class EmailSender: def __init__(self) -> None: self.receiver = "kongandmarx@163.com" self.sender = "kongandmarx@163.com" self.smtp_obj = smtplib.SMTP_SSL("smtp.163.com", port=994) # self.smtp_obj.connect("smtp.163.com", 25) self.smtp_obj.login("kongandmarx@163.com", "YVLZXZWJBYAHLCAJ") def send(self, to, subject, text): t = f""" <h1> {text} </h1> """ message = MIMEText(t, "html") message["Subject"] = Header(subject) message["From"] = Header(f"{self.sender}") message["To"] = Header(f"{to}") self.smtp_obj.sendmail(self.sender, self.receiver, message.as_string())
34.772727
79
0.616993
97
765
4.762887
0.402062
0.064935
0.095238
0.090909
0
0
0
0
0
0
0
0.037162
0.226144
765
22
79
34.772727
0.743243
0.053595
0
0
0
0
0.213001
0
0
0
0
0
0
1
0.111111
false
0
0.166667
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
70a04db5de55b9f9804753c3bf5f7a12c6fc7e92
6,590
py
Python
src/config_common.py
karawallace/mygene
35bf066eb50bc929b4bb4e2423d47b4c98797526
[ "Apache-2.0" ]
null
null
null
src/config_common.py
karawallace/mygene
35bf066eb50bc929b4bb4e2423d47b4c98797526
[ "Apache-2.0" ]
null
null
null
src/config_common.py
karawallace/mygene
35bf066eb50bc929b4bb4e2423d47b4c98797526
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- from biothings.www.settings.default import * from www.api.query_builder import ESQueryBuilder from www.api.query import ESQuery from www.api.transform import ESResultTransformer from www.api.handlers import GeneHandler, QueryHandler, MetadataHandler, StatusHandler, TaxonHandler, DemoHandler # ***************************************************************************** # Elasticsearch variables # ***************************************************************************** # elasticsearch server transport url ES_HOST = 'localhost:9200' # elasticsearch index name ES_INDEX = 'mygene_current' # elasticsearch document type ES_DOC_TYPE = 'gene' API_VERSION = 'v3' HOST_ENVAR_NAME = "MG_HOST" # ***************************************************************************** # App URL Patterns # ***************************************************************************** APP_LIST = [ (r"/status", StatusHandler), (r"/metadata/?", MetadataHandler), (r"/metadata/fields/?", MetadataHandler), (r"/demo/?$", DemoHandler), (r"/{}/species/(\d+)/?".format(API_VERSION), TaxonHandler), (r"/{}/taxon/(\d+)/?".format(API_VERSION), TaxonHandler), (r"/{}/gene/(.+)/?".format(API_VERSION), GeneHandler), (r"/{}/gene/?$".format(API_VERSION), GeneHandler), (r"/{}/query/?".format(API_VERSION), QueryHandler), (r"/{}/metadata/?".format(API_VERSION), MetadataHandler), (r"/{}/metadata/fields/?".format(API_VERSION), MetadataHandler), ] ############################################################################### # app-specific query builder, query, and result transformer classes ############################################################################### # ***************************************************************************** # Subclass of biothings.www.api.es.query_builder.ESQueryBuilder to build # queries for this app # ***************************************************************************** ES_QUERY_BUILDER = ESQueryBuilder # ***************************************************************************** # Subclass of biothings.www.api.es.query.ESQuery to execute queries for this app # ***************************************************************************** ES_QUERY = ESQuery # ***************************************************************************** # Subclass of biothings.www.api.es.transform.ESResultTransformer to transform # ES results for this app # ***************************************************************************** ES_RESULT_TRANSFORMER = ESResultTransformer GA_ACTION_QUERY_GET = 'query_get' GA_ACTION_QUERY_POST = 'query_post' GA_ACTION_ANNOTATION_GET = 'gene_get' GA_ACTION_ANNOTATION_POST = 'gene_post' GA_TRACKER_URL = 'MyGene.info' STATUS_CHECK_ID = '1017' JSONLD_CONTEXT_PATH = 'www/context/context.json' # MYGENE THINGS # This essentially bypasses the es.get fallback as in myvariant... # The first regex matched integers, in which case the query becomes against entrezgeneall annotation queries are now multimatch # against the following fields ANNOTATION_ID_REGEX_LIST = [(re.compile(r'^\d+$'), ['entrezgene', 'retired']), (re.compile(r'.*'), ['ensembl.gene'])] DEFAULT_FIELDS = ['name', 'symbol', 'taxid', 'entrezgene'] TAXONOMY = { "human": {"tax_id": "9606", "assembly": "hg38"}, "mouse": {"tax_id": "10090", "assembly": "mm10"}, "rat": {"tax_id": "10116", "assembly": "rn4"}, "fruitfly": {"tax_id": "7227", "assembly": "dm3"}, "nematode": {"tax_id": "6239", "assembly": "ce10"}, "zebrafish": {"tax_id": "7955", "assembly": "zv9"}, "thale-cress": {"tax_id": "3702"}, "frog": {"tax_id": "8364", "assembly": "xenTro3"}, "pig": {"tax_id": "9823", "assembly": "susScr2"} } DATASOURCE_TRANSLATIONS = { "refseq:": r"refseq.\\\*:", "accession:": r"accession.\\\*:", "reporter:": r"reporter.\\\*:", "interpro:": r"interpro.\\\*:", # GO:xxxxx looks like a ES raw query, so just look for # the term as a string in GO's ID (note: searching every keys # will raise an error because pubmed key is a int and we're # searching with a string term. "GO:": r"go.\\\*.id:go\\\:", #"GO:": r"go.\\\*:go.", "homologene:": r"homologene.\\\*:", "reagent:": r"reagent.\\\*:", "uniprot:": r"uniprot.\\\*:", "ensemblgene:": "ensembl.gene:", "ensembltranscript:": "ensembl.transcript:", "ensemblprotein:": "ensembl.protein:", # some specific datasources needs to be case-insentive "hgnc:": r"HGNC:", "hprd:": r"HPRD:", "mim:": r"MIM:", "mgi:": r"MGI:", "ratmap:": r"RATMAP:", "rgd:": r"RGD:", "flybase:": r"FLYBASE:", "wormbase:": r"WormBase:", "tair:": r"TAIR:", "zfin:": r"ZFIN:", "xenbase:": r"Xenbase:", "mirbase:": r"miRBase:", } SPECIES_TYPEDEF = {'species': {'type': list, 'default': ['all'], 'max': 10, 'translations': [(re.compile(pattern, re.I), translation['tax_id']) for (pattern, translation) in TAXONOMY.items()]}} # For datasource translations DATASOURCE_TRANSLATION_TYPEDEF = [(re.compile(pattern, re.I), translation) for (pattern, translation) in DATASOURCE_TRANSLATIONS.items()] TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF = [(re.compile(re.sub(r':.*', '', pattern).replace('\\', ''), re.I), re.sub(r':.*', '', translation).replace('\\','')) for (pattern, translation) in DATASOURCE_TRANSLATIONS.items()] # Kwarg control update for mygene specific kwargs # ES KWARGS (_source, scopes, ANNOTATION_GET_ES_KWARGS['_source'].update({#'default': DEFAULT_FIELDS, 'translations': TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF}) ANNOTATION_POST_ES_KWARGS['_source'].update({#'default': DEFAULT_FIELDS, 'translations': TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF}) QUERY_GET_ES_KWARGS['_source'].update({'default': DEFAULT_FIELDS, 'translations': TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF}) QUERY_POST_ES_KWARGS['_source'].update({'default': DEFAULT_FIELDS, 'translations': TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF}) # Control KWARGS QUERY_GET_CONTROL_KWARGS['q'].update({'translations': DATASOURCE_TRANSLATION_TYPEDEF}) # query builder KWARGS ANNOTATION_GET_ESQB_KWARGS.update(SPECIES_TYPEDEF) ANNOTATION_POST_ESQB_KWARGS.update(SPECIES_TYPEDEF) QUERY_GET_ESQB_KWARGS.update(SPECIES_TYPEDEF) QUERY_POST_ESQB_KWARGS.update(SPECIES_TYPEDEF) QUERY_POST_ESQB_KWARGS['scopes'].update({'translations': TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF})
43.642384
136
0.57648
664
6,590
5.527108
0.329819
0.013624
0.061035
0.057221
0.291553
0.261035
0.198365
0.125341
0.125341
0.101362
0
0.010691
0.134143
6,590
150
137
43.933333
0.632492
0.301214
0
0.022222
0
0
0.264352
0.010211
0
0
0
0
0
1
0
false
0
0.055556
0
0.055556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
70a079756e35eca1c02df3f616492a74895b5e66
3,785
py
Python
makepip/makepip.py
ao/makepip
6552aa2161e69b63d0564c03b2f36a3f1cd90a34
[ "MIT" ]
null
null
null
makepip/makepip.py
ao/makepip
6552aa2161e69b63d0564c03b2f36a3f1cd90a34
[ "MIT" ]
null
null
null
makepip/makepip.py
ao/makepip
6552aa2161e69b63d0564c03b2f36a3f1cd90a34
[ "MIT" ]
null
null
null
#!/usr/bin/env python import os from pathlib import Path home = str(Path.home()) import datetime now = datetime.datetime.now() def main(): print("Please enter the following questions to generate your new Pip!") name = input("Pip name: ") description = input("Description: ") author_name = input("Author name: ") author_website = input("Author website: ") author_email = input("Author email: ") git_repo = input("Git repository: ") input_script = input("Enter the absolute path to a python script (code should be wrapped in a `def main():` function): ") pypi_username = input("Pypi username: ") if name=="" or name==None: return os.system(f"mkdir -p {name}/{name}") f = open(f"{name}/{name}/__init__.py", "w") f.write("") f.close() f = open(f"{name}/{name}/__main__.py", "w") f.write(f"from .{name} import main\n") f.write("main()") f.close() setup_file = f""" import setuptools setuptools.setup( name='{name}', version='0.1', author="{author_name}", author_email="{author_email}", description="{description}", long_description="{description}", url="{git_repo}", packages=["{name}"], entry_points = {{ "console_scripts": ['{name} = {name}.{name}:main'] }}, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], ) """ f = open(f"{name}/setup.py", "w") f.write(setup_file) f.close() licence_file = f""" Copyright (c) {now.year} {author_name} {author_website} Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ f = open(f"{name}/LICENCE", "w") f.write(licence_file) f.close() readme_file = f""" # {name} {description} """ f = open(f"{name}/README.md", "w") f.write(readme_file) f.close() pypirc_file = f""" [distutils] index-servers=pypi [pypi] repository = https://upload.pypi.org/legacy/ username = {pypi_username} """ f = open(f"{home}/.pypirc", "w") f.write(pypirc_file) f.close() f = open(input_script, "r") script = f.read() f.close() if not script: return None f = open(f"{name}/{name}/{name}.py", "w") f.write(script) f.close() os.chdir(name) os.system("python -m pip install --upgrade pip setuptools wheel") os.system("python -m pip install tqdm") os.system("python -m pip install twine") os.system("python setup.py bdist_wheel") os.system("python -m twine upload dist/*") if git_repo: os.system("git init") os.system(f"git add LICENCE README.md {name}/ setup.py") os.system(f"git commit -m 'Pushing code for {name} version 0.1'") os.system(f"git remote add origin {git_repo}") os.system("git push -u origin master") print("Makepip is complete!") if __name__ == "__main__": main()
27.035714
123
0.674769
552
3,785
4.547101
0.36413
0.03506
0.016733
0.023904
0.078884
0.02988
0
0
0
0
0
0.001616
0.182563
3,785
139
124
27.230216
0.809632
0.005284
0
0.109091
1
0.009091
0.673751
0.049681
0
0
0
0
0
1
0.009091
false
0
0.045455
0
0.072727
0.018182
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
70a18e0916fd434d417dcc6ca67ebfa9f0b09899
30
py
Python
utils.py
andreaskleinl/capacity_transmissionline_simulation
023358d03cce5a4c7768eee597d0d6381742e271
[ "Apache-2.0" ]
null
null
null
utils.py
andreaskleinl/capacity_transmissionline_simulation
023358d03cce5a4c7768eee597d0d6381742e271
[ "Apache-2.0" ]
null
null
null
utils.py
andreaskleinl/capacity_transmissionline_simulation
023358d03cce5a4c7768eee597d0d6381742e271
[ "Apache-2.0" ]
null
null
null
def funny(x,y): return x+y
15
15
0.6
7
30
2.571429
0.714286
0.222222
0
0
0
0
0
0
0
0
0
0
0.233333
30
2
16
15
0.782609
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
7
70a1fd6852d7db5fa6fd60e6b794975a1360fc9b
3,355
py
Python
hangouts-chat.py
matallen/ninja-points
a2af9f358bc302f4638d1516055c687cffa546ab
[ "Apache-2.0" ]
7
2018-11-27T10:13:48.000Z
2022-01-22T07:33:28.000Z
hangouts-chat.py
matallen/ninja-points
a2af9f358bc302f4638d1516055c687cffa546ab
[ "Apache-2.0" ]
29
2018-01-06T05:02:38.000Z
2021-05-20T15:27:15.000Z
hangouts-chat.py
matallen/ninja-points
a2af9f358bc302f4638d1516055c687cffa546ab
[ "Apache-2.0" ]
14
2017-12-09T14:58:53.000Z
2022-01-24T15:56:01.000Z
#!/usr/bin/env python from oauth2client.service_account import ServiceAccountCredentials from os import path import os, requests, sys, argparse SERVICE_ACCOUNT_KEY_FILE_NAME='SERVICE_ACCOUNT_KEY_FILE' HANGOUTS_CHATS_API='https://chat.googleapis.com/v1' GOOGLE_CHAT_SCOPE='https://www.googleapis.com/auth/chat.bot' SPACES_KEY='spaces' MEMBERS_KEY='memberships' def login(session, service_account_key_file): scopes = [GOOGLE_CHAT_SCOPE] credentials = ServiceAccountCredentials.from_json_keyfile_name(service_account_key_file, scopes) access_token = credentials.get_access_token() auth_headers = { 'Authorization': 'Bearer ' + access_token.access_token } session.headers.update(auth_headers) def get_spaces(session): return handle_pagination_items(session, "{0}/spaces".format(HANGOUTS_CHATS_API), SPACES_KEY) def get_members_in_space(session, space): members = handle_pagination_items(session, "{0}/{1}/members".format(HANGOUTS_CHATS_API, space["name"]), MEMBERS_KEY) human_members = [] for member in members: if member["state"] == "JOINED" and member["member"]["type"] == "HUMAN": human_members.append(member) return human_members def get_spaces_with_members(session): spaces_with_members = {} spaces = get_spaces(session) for space in spaces: if space["type"] == "ROOM": val = {} val["space"] = space # Get members members = get_members_in_space(session, space) val["members"] = members spaces_with_members[space["name"]] = val return spaces_with_members def handle_pagination_items(session, url, key, next_page_token=None): params = {} if next_page_token is not None and next_page_token != "": params["pageToken"] = next_page_token response = session.get(url, params=params) response_json = response.json() if "nextPageToken" in response_json and response_json["nextPageToken"] != "": return response_json[key] + handle_pagination_items(session, url, key, response_json["nextPageToken"]) else: return response_json[key] def encode_text(text): if text: return text.encode("utf-8") return text parser = argparse.ArgumentParser(description='Gather Google Hangouts Statistics.') parser.add_argument("-m","--show-members", help="Show members in each space") args = parser.parse_args() show_members = args.show_members service_account_key_file = os.environ.get(SERVICE_ACCOUNT_KEY_FILE_NAME) if not service_account_key_file: print "Error: Service Account Key File Location is Required!" sys.exit(1) if not path.exists(service_account_key_file): print "Error: Service Account Key File Does Not Exist!" sys.exit(1) session = requests.Session() error = login(session, service_account_key_file) if error is not None: print error sys.exit(1) spaces_with_members = get_spaces_with_members(session) print "=== Statistics for Google Hangouts Chat\n" for key, value in spaces_with_members.iteritems(): print "- {0} - {1} Members".format(encode_text(value["space"]["displayName"]), len(value["members"])) if show_members is not None: for member in value["members"]: print " - {0}".format(encode_text(member["member"]["displayName"]))
29.690265
120
0.708793
434
3,355
5.223502
0.25576
0.074107
0.082488
0.101897
0.227614
0.130569
0.045876
0.045876
0.045876
0.045876
0
0.004354
0.178539
3,355
113
121
29.690265
0.818215
0.009538
0
0.041096
0
0
0.166767
0.007225
0
0
0
0
0
0
null
null
0
0.041096
null
null
0.082192
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
70a52b1be3075c53fa2ae1d2c049a7526d9ddb9b
923
py
Python
pyRoovit/pyRoovit.extension/pyRoovit.tab/Tools.Panel/Document.pulldown/Find.pushbutton/script.py
jmcouffin/pyRoovit
acafa74fb6310a1042c703d791b0d53804c1882a
[ "BSD-3-Clause" ]
5
2022-03-14T01:58:34.000Z
2022-03-21T19:51:09.000Z
pyRoovit/pyRoovit.extension/pyRoovit.tab/Tools.Panel/Document.pulldown/Find.pushbutton/script.py
jmcouffin/pyRoovit
acafa74fb6310a1042c703d791b0d53804c1882a
[ "BSD-3-Clause" ]
1
2022-03-14T10:18:40.000Z
2022-03-14T15:52:25.000Z
pyRoovit/pyRoovit.extension/pyRoovit.tab/Tools.Panel/Document.pulldown/Find.pushbutton/script.py
jmcouffin/pyRoovit
acafa74fb6310a1042c703d791b0d53804c1882a
[ "BSD-3-Clause" ]
1
2022-03-14T10:15:22.000Z
2022-03-14T10:15:22.000Z
# import libraries import clr import os from os import listdir import System from System.IO import SearchOption from System import Environment # import pyrevit libraries from pyrevit import forms from pyrevit import revit,DB # get document doc = revit.doc # try to open cache path try: curdoc = DB.Document.PathName.GetValue(doc) curdir = curdoc.rsplit('\\',1) os.startfile(curdir[0]) except: try: guid = doc.WorksharingCentralGUID AppDataList = Environment.GetFolderPath(Environment.SpecialFolder.ApplicationData).split("\\") AppDataList.pop(-1) AppData = "\\".join(AppDataList) location = AppData + "\\Local\\Autodesk\\Revit" os.startfile(location) except: forms.alert('Cannot find the file. This may be because the document is not yet saved to a location, or the path is not accessible to this script for opening.', title='Script cancelled')
31.827586
193
0.713976
118
923
5.584746
0.550847
0.030349
0.051593
0
0
0
0
0
0
0
0
0.004038
0.195016
923
29
193
31.827586
0.882907
0.083424
0
0.173913
0
0.043478
0.225653
0.028504
0
0
0
0
0
1
0
false
0
0.347826
0
0.347826
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
70a65f53a022d6fe554e262dd2b61539aad6cfe3
15,323
py
Python
ansible_collection/hpe/nimble/plugins/modules/hpe_nimble_network.py
datamattsson/nimble-ansible-modules
ba306153f98db093a9af47c99bdfce1381660880
[ "Apache-2.0" ]
null
null
null
ansible_collection/hpe/nimble/plugins/modules/hpe_nimble_network.py
datamattsson/nimble-ansible-modules
ba306153f98db093a9af47c99bdfce1381660880
[ "Apache-2.0" ]
null
null
null
ansible_collection/hpe/nimble/plugins/modules/hpe_nimble_network.py
datamattsson/nimble-ansible-modules
ba306153f98db093a9af47c99bdfce1381660880
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # # Copyright 2020 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this # file except in compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. # author Alok Ranjan (alok.ranjan2@hpe.com) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- author: - HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com> description: Manage the storage network configuration on the HPE Nimble Storage group. module: hpe_nimble_network options: activate: required: False type: bool description: - Activate a network configuration. array: required: False type: list elements: dict description: - List of array network configs. change_name: required: False type: str description: - Change name of the existing network config. iscsi_automatic_connection_method: required: False type: bool description: - Whether automatic connection method is enabled. Enabling this means means redirecting connections from the specified iSCSI discovery IP address to the best data IP address based on connection counts. iscsi_connection_rebalancing: required: False type: bool description: - Whether rebalancing is enabled. Enabling this means rebalancing iSCSI connections by periodically breaking existing connections that are out-of-balance, allowing the host to reconnect to a more appropriate data IP address. ignore_validation_mask: required: False type: int description: - Indicates whether to ignore the validation. mgmt_ip: required: False type: str description: - Management IP address for the Group. Four numbers in the range (0,255) separated by periods. name: required: True type: str choices: - active - backup - draft description: - Name of the network configuration. Use the name 'draft' when creating a draft configuration. secondary_mgmt_ip: required: False type: str description: - Secondary management IP address for the Group. Four numbers in the range [0,255] separated by periods. subnet: required: False type: list elements: dict description: - List of subnet configs. route: required: False type: list elements: dict description: - List of static routes. state: required: True choices: - create - present - absent type: str description: - The network config operation. validate: required: False type: bool description: - Validate a network configuration. extends_documentation_fragment: hpe.nimble.hpe_nimble short_description: Manage the HPE Nimble Storage network configuration. version_added: "2.9.0" ''' EXAMPLES = r''' # if state is create, then create network config, fails if it exist or cannot create # if state is present, then create network config if not present ,else success - name: Create network config hpe_nimble_network: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" name: "{{ name }}" route: "{{ route }}" subnet: "{{ subnet }}" array: "{{ array }}" iscsi_automatic_connection_method: true iscsi_connection_rebalancing: False mgmt_ip: "{{ mgmt_ip }}" state: "{{ state | default('present') }}" - name: Delete network config hpe_nimble_network: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" name: "{{ name }}" state: "absent" - name: Validate network config hpe_nimble_network: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" name: "{{ name }}" state: "present" ignore_validation_mask: 1 validate: true - name: Activate Network config hpe_nimble_network: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" name: "{{ name }}" state: "present" ignore_validation_mask: 1 activate: true ''' RETURN = r''' ''' from ansible.module_utils.basic import AnsibleModule try: from nimbleclient.v1 import client except ImportError: client = None import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils def create_update_network_config( client_obj, name, state, iscsi_automatic_connection_method, iscsi_connection_rebalancing, mgmt_ip, change_name, **kwargs): if utils.is_null_or_empty(name): return (False, False, "Create network config failed as name is not present.", {}, {}) try: network_resp = client_obj.network_configs.get(id=None, name=name) if utils.is_null_or_empty(network_resp): params = utils.remove_null_args(**kwargs) network_resp = client_obj.network_configs.create(name=name, iscsi_automatic_connection_method=iscsi_automatic_connection_method, iscsi_connection_rebalancing=iscsi_connection_rebalancing, mgmt_ip=mgmt_ip, **params) return (True, True, f"Network config '{name}' created successfully.", {}, network_resp.attrs) else: if state == "create": return (False, False, f"Network config '{name}' cannot be created as it is already present in given state.", {}, network_resp.attrs) # update case kwargs['name'] = change_name changed_attrs_dict, params = utils.remove_unchanged_or_null_args(network_resp, **kwargs) # even though some of the attributes have not changed but it still has to be passed in case of update. params = utils.remove_null_args(**kwargs) if changed_attrs_dict.__len__() > 0: network_resp = client_obj.network_configs.update(id=network_resp.attrs.get("id"), name=name, iscsi_automatic_connection_method=iscsi_automatic_connection_method, iscsi_connection_rebalancing=iscsi_connection_rebalancing, mgmt_ip=mgmt_ip, **params) return (True, True, f"Network config '{name}' already present. Modified the following attributes '{changed_attrs_dict}'", changed_attrs_dict, network_resp.attrs) else: return (True, False, f"Network config '{network_resp.attrs.get('name')}' already present in given state.", {}, network_resp.attrs) except Exception as ex: return (False, False, f"Network config creation failed |'{ex}'", {}, {}) def delete_network_config( client_obj, name): if utils.is_null_or_empty(name): return (False, False, "Delete network config failed as name is not present.", {}) try: network_resp = client_obj.network_configs.get(id=None, name=name) if utils.is_null_or_empty(network_resp): return (False, False, f"Network config '{name}' cannot be deleted as it is not present.", {}) client_obj.network_configs.delete(id=network_resp.attrs.get("id")) return (True, True, f"Deleted network config '{name}' successfully.", {}) except Exception as ex: return (False, False, f"Delete network config failed |'{ex}'", {}) def validate_network_config( client_obj, name, ignore_validation_mask): if utils.is_null_or_empty(name): return (False, False, "Validate network config failed as name is not present.", {}) try: network_resp = client_obj.network_configs.get(id=None, name=name) if utils.is_null_or_empty(network_resp): return (False, False, f"Network config '{name}' cannot be validated as it is not present.", {}) client_obj.network_configs.validate_netconfig( id=network_resp.attrs.get("id"), ignore_validation_mask=ignore_validation_mask) return (True, False, f"Validated network config '{name}' successfully.", {}) except Exception as ex: return (False, False, f"Validate Network config failed |'{ex}'", {}) def activate_network_config( client_obj, name, ignore_validation_mask): if utils.is_null_or_empty(name): return (False, False, "Activate network config failed as name is not present.", {}) try: network_resp = client_obj.network_configs.get(id=None, name=name) if utils.is_null_or_empty(network_resp): return (False, False, f"Network config '{name}' cannot be activated as it is not present.", {}) client_obj.network_configs.activate_netconfig(id=network_resp.attrs.get("id"), ignore_validation_mask=ignore_validation_mask) return (True, True, f"Activated network config '{name}' successfully.", {}) except Exception as ex: return (False, False, f"Activate Network config failed |'{ex}'", {}) def main(): fields = { "activate": { "required": False, "type": "bool", "no_log": False }, "array": { "required": False, "type": "list", "elements": 'dict', "no_log": False }, "change_name": { "required": False, "type": "str", "no_log": False }, "iscsi_automatic_connection_method": { "required": False, "type": "bool", "no_log": False }, "iscsi_connection_rebalancing": { "required": False, "type": "bool", "no_log": False }, "ignore_validation_mask": { "required": False, "type": "int", "no_log": False }, "mgmt_ip": { "required": False, "type": "str", "no_log": False }, "name": { "required": True, "choices": ['active', 'backup', 'draft' ], "type": "str", "no_log": False }, "secondary_mgmt_ip": { "required": False, "type": "str", "no_log": False }, "subnet": { "required": False, "type": "list", "elements": 'dict', "no_log": False }, "route": { "required": False, "type": "list", "elements": 'dict', "no_log": False }, "state": { "required": True, "choices": ['create', 'present', 'absent' ], "type": "str" }, "validate": { "required": False, "type": "bool", "no_log": False } } default_fields = utils.basic_auth_arg_fields() fields.update(default_fields) required_if = [('state', 'create', ['array', 'iscsi_automatic_connection_method', 'iscsi_connection_rebalancing', 'mgmt_ip', 'subnet', 'route'])] module = AnsibleModule(argument_spec=fields, required_if=required_if) if client is None: module.fail_json(msg='Python nimble-sdk could not be found.') hostname = module.params["host"] username = module.params["username"] password = module.params["password"] activate = module.params["activate"] array = module.params["array"] iscsi_automatic_connection_method = module.params["iscsi_automatic_connection_method"] iscsi_connection_rebalancing = module.params["iscsi_connection_rebalancing"] ignore_validation_mask = module.params["ignore_validation_mask"] mgmt_ip = module.params["mgmt_ip"] name = module.params["name"] change_name = module.params["change_name"] secondary_mgmt_ip = module.params["secondary_mgmt_ip"] subnet = module.params["subnet"] route = module.params["route"] state = module.params["state"] validate = module.params["validate"] if (username is None or password is None or hostname is None): module.fail_json( msg="Missing variables: hostname, username and password is mandatory.") # defaults return_status = changed = False msg = "No task to run." resp = None try: client_obj = client.NimOSClient( hostname, username, password ) # States if ((validate is None or validate is False) and (activate is None or activate is False) and (state == "create" or state == "present")): # if not client_obj.network_configs.get(id=None, name=name) or state == "create": return_status, changed, msg, changed_attrs_dict, resp = create_update_network_config( client_obj, name, state, iscsi_automatic_connection_method, iscsi_connection_rebalancing, mgmt_ip, change_name, array_list=array, ignore_validation_mask=ignore_validation_mask, secondary_mgmt_ip=secondary_mgmt_ip, subnet_list=subnet, route_list=route) elif state == "absent": return_status, changed, msg, changed_attrs_dict = delete_network_config(client_obj, name) elif state == "present" and validate is True: return_status, changed, msg, changed_attrs_dict = validate_network_config(client_obj, name, ignore_validation_mask) elif state == "present" and activate is True: return_status, changed, msg, changed_attrs_dict = activate_network_config(client_obj, name, ignore_validation_mask) except Exception as ex: # failed for some reason. msg = str(ex) if return_status: if utils.is_null_or_empty(resp): module.exit_json(return_status=return_status, changed=changed, msg=msg) else: module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp) else: module.fail_json(return_status=return_status, changed=changed, msg=msg) if __name__ == '__main__': main()
34.825
149
0.601775
1,682
15,323
5.286564
0.166468
0.049708
0.04206
0.040486
0.554431
0.518106
0.469636
0.408232
0.383266
0.273954
0
0.002424
0.300072
15,323
439
150
34.904328
0.826667
0.056582
0
0.458445
0
0.010724
0.396232
0.039692
0
0
0
0
0
1
0.013405
false
0.021448
0.013405
0
0.075067
0.002681
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
70a965ab4266b455f06fc24f6cfc9727034185be
1,428
py
Python
crawler.py
YellowDong/taohua
b61778d0e49d885141756ca52b1e426f03e89218
[ "MIT" ]
null
null
null
crawler.py
YellowDong/taohua
b61778d0e49d885141756ca52b1e426f03e89218
[ "MIT" ]
null
null
null
crawler.py
YellowDong/taohua
b61778d0e49d885141756ca52b1e426f03e89218
[ "MIT" ]
null
null
null
from requests_html import HTMLSession import re from .blog.models import (Article, Tag, Category) class Spider: def __init__(self): self.sesion = HTMLSession() def get_list(self): url = 'http://python.jobbole.com/all-posts/' resp = self.sesion.get(url) if resp.status_code == 200: links = re.findall('(http://python.jobbole.com/\d+/)', resp.text) return set(links) return def get_detail(self, detail_url): resp = self.sesion.get(detail_url) if resp.status_code == 200: # text = resp.text return resp def parser(self, resp): #text = resp.html.find('.entry > p') text = ''.join(list(map(lambda x: x.text, resp.html.find('div.entry p')))) author = resp.html.find('div.entry div.copyright-area a', first=True).text temp = resp.html.find('p.entry-meta-hide-on-mobile', first=True).text.strip().split('·') createtime = temp[0] category = temp[1] tag = temp[-1] # print(createtime) # print(category) # print(tag) # print('================================================') Article.objects.create(created_time=createtime, ) if __name__ == '__main__': test = Spider() links = test.get_list() if links: for i in links: resp = test.get_detail(i) text = test.parser(resp)
30.382979
96
0.556723
178
1,428
4.348315
0.421348
0.041344
0.062016
0.05168
0.108527
0.056848
0
0
0
0
0
0.008654
0.271709
1,428
46
97
31.043478
0.734615
0.107843
0
0.0625
0
0
0.114534
0.021327
0
0
0
0
0
1
0.125
false
0
0.09375
0
0.34375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
56061be80b560aeb56981297f4a7efbad2518c39
131
py
Python
question_repo/apps/repo/signal/signals.py
Quizas007/question_repo
e377257137077b17ed039b06d3d6990d56d63743
[ "Apache-2.0" ]
null
null
null
question_repo/apps/repo/signal/signals.py
Quizas007/question_repo
e377257137077b17ed039b06d3d6990d56d63743
[ "Apache-2.0" ]
null
null
null
question_repo/apps/repo/signal/signals.py
Quizas007/question_repo
e377257137077b17ed039b06d3d6990d56d63743
[ "Apache-2.0" ]
null
null
null
""" 自定义信号 """ import django.dispatch mysignal = django.dispatch.Signal(providing_args=["arg1","arg2"]) # 内置的信号会自动触发,自定义信号不可以。
10.916667
65
0.709924
14
131
6.571429
0.857143
0.304348
0
0
0
0
0
0
0
0
0
0.017241
0.114504
131
11
66
11.909091
0.775862
0.206107
0
0
0
0
0.086022
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
5607fbf70d32a16b417fbba1a89c53e34ec639fc
4,674
py
Python
ultrasync/create_sync_samples.py
aeshky/ultrasync
0801bcb0312bba2eab07f12f9a68f3a431f5aaeb
[ "Apache-2.0" ]
7
2019-07-29T20:00:25.000Z
2021-06-22T21:21:50.000Z
ultrasync/create_sync_samples.py
aeshky/ultrasync
0801bcb0312bba2eab07f12f9a68f3a431f5aaeb
[ "Apache-2.0" ]
1
2019-10-01T14:25:33.000Z
2019-10-28T15:19:52.000Z
ultrasync/create_sync_samples.py
aeshky/ultrasync
0801bcb0312bba2eab07f12f9a68f3a431f5aaeb
[ "Apache-2.0" ]
null
null
null
""" Date: Jul 2018 Author: Aciel Eshky A script to create positive and negative samples using self-supervision. """ import os import sys import random import pandas as pd from numpy.random import seed as np_seed from ustools.folder_utils import get_utterance_id, get_dir_info from ultrasync.create_sync_samples_utils import create_samples, save_samples_to_disk random.seed(2018) np_seed(2018) def mirror_folder_structure(input_path, output_path): """ Function to create a mirror of the input path folder structure in output path. Adapted from https://stackoverflow.com/a/40829525/5190279 :param input_path: :param output_path: :return: a list of pairs of core dir which contains files, and corresponding generated dir """ folder_pairs = [] for dirpath, dirnames, filenames in os.walk(input_path): dirnames.sort() if any(fname.endswith('.ult') for fname in filenames): new_output_folder = os.path.join(output_path, dirpath[len(input_path):]) if not os.path.isdir(new_output_folder): print("Creating folder: \t" + new_output_folder) os.makedirs(new_output_folder) else: print("Folder already exits: \t" + new_output_folder) if filenames: # return the dirs that contain files folder_pairs.append([dirpath, new_output_folder]) return folder_pairs def get_file_basenames(directory): files = os.listdir(directory) return set([f.split('.')[0] for f in files]) def create_sync_data(folder_pairs): files_created = [] df = pd.DataFrame(folder_pairs, columns=("core", "generated")) for index, row in df.iterrows(): # itertools.islice(df.iterrows(), 80): print("Processing: ", row['core'], row['generated']) core_dir = row['core'] target_dir = row['generated'] basenames = get_file_basenames(core_dir) target_basenames = get_file_basenames(target_dir) for b in basenames: # if os.path.isfile(os.path.join(target_dir, b + '.npz')): # print(os.path.join(target_dir, b + '.npz'), "already exists.") if [b in i for i in target_basenames]: print(b, "files already exist in target directory.") elif "E" in b: print("Skipping utterance of type \"non-speech\" (E):", os.path.join(target_dir, b)) else: try: info = get_dir_info(core_dir) root_id = get_utterance_id(info['dataset'], info['speaker'], info['session'], b) print(root_id) samples = create_samples(core_dir, b) chunk_names = save_samples_to_disk(samples, root_id, target_dir) list.extend(files_created, chunk_names) except: print("Unexpected error:", sys.exc_info()[0]) print("not_processed: ", core_dir, b) return files_created def main(): ultrasuite = ["uxtd", "uxssd", "upx"] # the location of the original ultrasuite data input_path = sys.argv[1] # "/group/project/ultrax2020/UltraSuite/" # the destination: where the sync dataset will be stored. # This will consists of of samples, each corresponding to 200 ms of ultrasound and audio. output_path = sys.argv[2] # "/disk/scratch_big/../SyncDataSmall" for dataset in ultrasuite: docs = os.path.join(output_path, "docs", dataset) if not os.path.exists(docs): os.makedirs(docs) input_path_data = os.path.join(input_path, "core-" + dataset, "core/") # this slash is very important! output_path_data = os.path.join(output_path, dataset) print("processing", dataset, "input directory is:", input_path_data, "output directory is:", output_path_data) # source and destination folder pairs. # destination is created by mirror source. folder_pairs = mirror_folder_structure(input_path_data, output_path_data) # save the pairs for logging purposes pd.DataFrame.to_csv(pd.DataFrame(columns={"source", "target"}, data=folder_pairs), os.path.join(docs, "folder_pairs.csv"), index=False) # create and save the data file_names = create_sync_data(folder_pairs) # save the sample file names for logging purposes pd.DataFrame.to_csv(pd.DataFrame(columns={"file_names"}, data=file_names), os.path.join(docs, "file_names.csv"), index=False) if __name__ == "__main__": main()
31.369128
111
0.632649
602
4,674
4.714286
0.299003
0.02537
0.031712
0.016913
0.128964
0.059901
0.052854
0.036646
0.036646
0.036646
0
0.011621
0.263586
4,674
148
112
31.581081
0.812899
0.226145
0
0.027397
0
0
0.101348
0
0
0
0
0
0
1
0.054795
false
0
0.09589
0
0.191781
0.123288
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5608507a8fbe16afc4a2757ca530a21ae1157296
515
py
Python
setup.py
kaynhelga9/64-bit-Ninja
ba3ffa69797a6f52103142515f0ee253e69e0d49
[ "MIT" ]
3
2019-02-11T14:40:29.000Z
2019-04-21T21:59:11.000Z
setup.py
kaynhelga9/64-bit-Ninja
ba3ffa69797a6f52103142515f0ee253e69e0d49
[ "MIT" ]
null
null
null
setup.py
kaynhelga9/64-bit-Ninja
ba3ffa69797a6f52103142515f0ee253e69e0d49
[ "MIT" ]
null
null
null
import cx_Freeze import os os.environ['TCL_LIBRARY'] = r'C:\Users\Khanh Huynh\AppData\Local\Programs\Python\Python36\tcl\tcl8.6' os.environ['TK_LIBRARY'] = r'C:\Users\Khanh Huynh\AppData\Local\Programs\Python\Python36\tcl\tk8.6' executables = [cx_Freeze.Executable('game.py')] cx_Freeze.setup( name = '64-bit Ninja', version = '1.05', author = 'Khanh H', options = {'build_exe': {'packages': ['pygame'], 'include_files': ['icon.png', 'idle1.png']}}, executables = executables )
34.333333
102
0.669903
71
515
4.760563
0.633803
0.071006
0.053254
0.08284
0.360947
0.360947
0.360947
0.360947
0.360947
0.360947
0
0.031891
0.147573
515
15
103
34.333333
0.738041
0
0
0
0
0.166667
0.484064
0.217131
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5609ed8bf29ac968a3c75282d2fbdfa0946a74d7
1,475
py
Python
xiAnresturant/graph.py
wangteng200000318/-DataMiningMeiTuan
eb152a090c3025726bcb793484d4a88f2072b744
[ "MIT" ]
4
2020-11-23T04:50:41.000Z
2021-03-12T06:19:59.000Z
xiAnresturant/graph.py
wangteng200000318/-DataMiningMeiTuan
eb152a090c3025726bcb793484d4a88f2072b744
[ "MIT" ]
null
null
null
xiAnresturant/graph.py
wangteng200000318/-DataMiningMeiTuan
eb152a090c3025726bcb793484d4a88f2072b744
[ "MIT" ]
null
null
null
from wordcloud import WordCloud import jieba import matplotlib.pyplot as plt xiaozhai = ['佛伦萨·古典火炉披萨', '蘑菇爱上饭', '珍味林饺子馆', '巷子火锅', '千家粗粮王', '猫堂小站猫咪主题馆', 'CoCo都可', '福气焖锅烤肉', '5号酒馆', '82°C魔力焖锅', '小肥羊', '长安大牌档之长安集市', '泰熙家', '大自在火锅', '拉菲达牛排自助', '猫咪餐厅', '京御煌三汁焖锅', '赵家腊汁肉', '米多多烘焙屋', '瑞可爺爺的店', '阿姨奶茶专卖', '百富烤霸', '三姊妹香辣土豆片夹馍', '小哥过桥米线', '太食獸泰式茶餐厅', '和記丸子専買', '0057香辣虾', 'M12铁板餐厅', '重庆鸡公煲', '洪氏嗨捞·新派猪肚鸡' ] hangtiancheng = ['辣条印象', '福临北京烤鸭', '味都西饼店', '刘大饼香辣土豆片夹馍', '韩味坊牛排自助', '星期八工坊', '红透天自助涮烤', '和福顺养生焖锅', '臻膳轩自助涮烤城', '李想大虾火锅花园', '欧味轩艺术蛋糕', '王府臻品火锅', '艾米客蛋糕', '红透天自助涮烤', '川渝小渔哥', '面道' ] xiaozhai_words = [] hangtiancheng_words = [] for word in xiaozhai: xiaozhai_words.append(jieba.cut(word)) for word in hangtiancheng: hangtiancheng_words.append(jieba.cut(word)) res_xiaozhai = "" res_hangtiancheng = "" for i in range(len(xiaozhai_words)): res_xiaozhai += ("/" + "/".join(xiaozhai_words[i])) for i in range(len(hangtiancheng_words)): res_hangtiancheng += ("/" + "/".join(hangtiancheng_words[i])) w1 = WordCloud(font_path="simsun.ttf", background_color='white') w1.generate(res_xiaozhai) w1.to_file('小寨附近餐饮店铺词云图.png') w2 = WordCloud(font_path="simsun.ttf", background_color='white') w2.generate(res_hangtiancheng) w2.to_file("航天城附近餐饮店铺词云图.png")
37.820513
69
0.589831
155
1,475
5.503226
0.554839
0.060961
0.021102
0.044549
0.194607
0.107855
0.107855
0.107855
0
0
0
0.013345
0.237966
1,475
38
70
38.815789
0.742883
0
0
0
0
0
0.244955
0
0
0
0
0
0
1
0
false
0
0.083333
0
0.083333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
560adc09db2a2d64ed970d3656db7a5e1e25da54
4,055
py
Python
api/server/routes/catchall.py
corentinthomasset/ibm-call-for-code-2021
2a3cbc7c9a5a21fd0caa9cbc0a57904bb2087872
[ "Apache-2.0" ]
null
null
null
api/server/routes/catchall.py
corentinthomasset/ibm-call-for-code-2021
2a3cbc7c9a5a21fd0caa9cbc0a57904bb2087872
[ "Apache-2.0" ]
null
null
null
api/server/routes/catchall.py
corentinthomasset/ibm-call-for-code-2021
2a3cbc7c9a5a21fd0caa9cbc0a57904bb2087872
[ "Apache-2.0" ]
null
null
null
import logging import json import warnings import time import datetime as dt from ast import literal_eval as make_tuple from flask import jsonify, abort, Response, request from server import app, cln_client from cloudant.error import CloudantException, ResultException from cloudant.query import Query import yfinance as yf import numpy as np from pandas_datareader import data as pdr from yahoo_fin import stock_info as si import pandas as pd def ticker_details(symbol, backwards): reply = dict() dbs = { 'ratings': 'esg-ratings-ibm-cfc', 'indicators': 'esg-indicators-ibm-cfc', 'details': 'ticker-details-ibm-cfc' } for aspect, db in dbs.items(): try: conn = cln_client.create_database(db) except CloudantException as e: logging.critical(f'DB/{aspect} connection failure: {e}') reply[aspect] = {} else: if conn.exists(): logging.info(f'Using existing {aspect} DB: {db}') field = ('cfc_company' if aspect == 'details' else 'stock_symbol') selector={field: symbol.upper()} try: resp = conn.get_query_result(selector, raw_result=True, limit=100) time.sleep(0.075) except ResultException as e: logging.critical(f'Query/{aspect} failed: {e}') reply[aspect] = {} else: if len(resp['docs']) == 0: logging.warning(f'{aspect} not found for {symbol}') reply[aspect] = {} else: result = list() for doc in resp['docs']: result.append(doc) reply[aspect] = result return reply @app.route("/catchall/<string:symbol>") def catchall(symbol): """catchall route""" if symbol is None or symbol == '': abort(Response(json.dumps({'Error': 'Invalid symbol provided'}), 400)) backwards = request.args.get('period', '1mo') pd.set_option('display.max_rows', None) warnings.filterwarnings("ignore") yf.pdr_override() num_of_years = 1 start = dt.date.today() - dt.timedelta(days = int(365.25*num_of_years)) end = dt.date.today() tickers = si.tickers_dow() tickers.append(symbol) dataset = pdr.get_data_yahoo(tickers, start, end)['Adj Close'] stocks_returns = np.log(dataset/dataset.shift(1)) pairs_to_drop = set() cols = stocks_returns.columns for i in range(0, stocks_returns.shape[1]): for j in range(0, i+1): if i == j: pairs_to_drop.add((cols[i], cols[j])) au_corr = stocks_returns.corr().abs().unstack() au_corr = au_corr.drop(labels=pairs_to_drop) final = list() for ticker in tickers: top = dict() top['target'] = ticker top['correlation'] = dict() for tpl, corr in json.loads(au_corr.to_json()).items(): pair = make_tuple(tpl) if ticker.lower() == pair[0].lower(): top['correlation'].update({pair[1]: corr}) top['correlation'] = dict(sorted(top['correlation'].items(), key=lambda item: item[1], reverse=True)) final.append(top) for item in final: if item['target'].lower() == symbol.lower(): item.update(ticker_details(symbol, backwards)) item['correlations'] = list() for corp, corr_value in item['correlation'].items(): corr_item = dict() corr_item['symbol'] = corp corr_item['value'] = corr_value corr_item.update(ticker_details(corp, backwards)) item['correlations'].append(corr_item) del item['correlation'] return jsonify(item) break else: abort(Response(json.dumps({'Error': 'Stock/Correlation/ESG details not found for symbol'}), 400))
33.237705
109
0.568927
479
4,055
4.713987
0.367432
0.017715
0.019929
0.024801
0.056687
0
0
0
0
0
0
0.01035
0.309001
4,055
121
110
33.512397
0.795503
0.003453
0
0.09
0
0
0.126457
0.022316
0
0
0
0
0
1
0.02
false
0
0.15
0
0.19
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
560b81c62a615cdb5ce98cccdf4071f843bedca3
315
py
Python
setup.py
dkasak/pymodoro
fb17cb29f9473727d3607c14da59b12a9b63d02a
[ "MIT" ]
2
2020-06-12T11:11:14.000Z
2020-07-16T08:15:53.000Z
setup.py
dkasak/pymodoro
fb17cb29f9473727d3607c14da59b12a9b63d02a
[ "MIT" ]
null
null
null
setup.py
dkasak/pymodoro
fb17cb29f9473727d3607c14da59b12a9b63d02a
[ "MIT" ]
null
null
null
from setuptools import setup setup( name='pymodoro', version='0.4', packages=['pymodoro'], package_data={'pymodoro': ['data/*']}, entry_points={ "console_scripts": [ "pymodoro = pymodoro.pymodoro:main", "pymodoroi3 = pymodoro.pymodoroi3:main" ] }, )
21
51
0.565079
28
315
6.25
0.642857
0.182857
0
0
0
0
0
0
0
0
0
0.017544
0.27619
315
14
52
22.5
0.75
0
0
0
0
0
0.374603
0.146032
0
0
0
0
0
1
0
true
0
0.076923
0
0.076923
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
2
560c68d1de24eff114517db2454d489387dbeb48
267
py
Python
src/ralph/dns/apps.py
DoNnMyTh/ralph
97b91639fa68965ad3fd9d0d2652a6545a2a5b72
[ "Apache-2.0" ]
1,668
2015-01-01T12:51:20.000Z
2022-03-29T09:05:35.000Z
src/ralph/dns/apps.py
hq-git/ralph
e2448caf02d6e5abfd81da2cff92aefe0a534883
[ "Apache-2.0" ]
2,314
2015-01-02T13:26:26.000Z
2022-03-29T04:06:03.000Z
src/ralph/dns/apps.py
hq-git/ralph
e2448caf02d6e5abfd81da2cff92aefe0a534883
[ "Apache-2.0" ]
534
2015-01-05T12:40:28.000Z
2022-03-29T21:10:12.000Z
from django.conf import settings from ralph.apps import RalphAppConfig class DNS(RalphAppConfig): name = 'ralph.dns' def get_load_modules_when_ready(self): if settings.ENABLE_HERMES_INTEGRATION: return ['publishers'] return []
20.538462
46
0.696629
31
267
5.806452
0.774194
0
0
0
0
0
0
0
0
0
0
0
0.228464
267
12
47
22.25
0.873786
0
0
0
0
0
0.071161
0
0
0
0
0
0
1
0.125
false
0
0.25
0
0.875
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
560d0ef05a9d538d23b9e00d5bb4bf708502cb92
1,132
py
Python
RLTutorial/MDPValue.py
fyabc/MSRAPaperProject
2d7974acfe8065523d0c56da695807e94acd0b34
[ "MIT" ]
1
2016-08-17T10:04:30.000Z
2016-08-17T10:04:30.000Z
RLTutorial/MDPValue.py
fyabc/MSRAPaperProject
2d7974acfe8065523d0c56da695807e94acd0b34
[ "MIT" ]
null
null
null
RLTutorial/MDPValue.py
fyabc/MSRAPaperProject
2d7974acfe8065523d0c56da695807e94acd0b34
[ "MIT" ]
null
null
null
#! /usr/bin/python # -*- encoding: utf-8 -*- from __future__ import print_function, unicode_literals from MDP import MDP import version23 __author__ = 'fyabc' # random.seed(0) def getRandomPolicyValue(): values = [0.0 for _ in range(10)] num = 1000000 echoEpoch = 10000 mdp = MDP() for k in range(1, num): for initState in range(1, 6): state = initState isTerminal = False gamma = 1.0 value = 0.0 while not isTerminal: action = mdp.randomAction() isTerminal, state, reward = mdp.transform(state, action) value += gamma * reward gamma *= mdp.gamma values[initState] += value if k % echoEpoch == 0: print('k = %d, Average values of state 1-5 are:\n' % k, [value / k for value in values[1:6]]) for i in range(len(values)): values[i] /= num return values def test(): values = getRandomPolicyValue() print('Average values of state 1-5 are:\n', values[1:6]) if __name__ == '__main__': test()
21.769231
72
0.550353
138
1,132
4.376812
0.427536
0.046358
0.02649
0.066225
0.086093
0.086093
0.086093
0.086093
0
0
0
0.048
0.337456
1,132
51
73
22.196078
0.757333
0.04947
0
0
0
0
0.082945
0
0
0
0
0
0
1
0.0625
false
0
0.09375
0
0.1875
0.09375
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
560d4c8d48ed8fbb6b163e3376983982a59d0792
19,002
py
Python
tests/test_component/test_task.py
andre-merzky/radical.entk
a63ad9158cf2f58d7bfff017f7da9cd5236429b5
[ "MIT" ]
null
null
null
tests/test_component/test_task.py
andre-merzky/radical.entk
a63ad9158cf2f58d7bfff017f7da9cd5236429b5
[ "MIT" ]
null
null
null
tests/test_component/test_task.py
andre-merzky/radical.entk
a63ad9158cf2f58d7bfff017f7da9cd5236429b5
[ "MIT" ]
null
null
null
#!/usr/bin/env python import os import glob import shutil import pytest import hypothesis.strategies as st from hypothesis import given, settings from radical.entk import Task from radical.entk import states import radical.entk.exceptions as ree # Hypothesis settings settings.register_profile("travis", max_examples=100, deadline=None) settings.load_profile("travis") # ------------------------------------------------------------------------------ # def test_task_initialization(): ''' **Purpose**: Test if the task attributes have, thus expect, the correct data types ''' t = Task() assert t._uid is None assert t.name is None assert t.state == states.INITIAL assert t.state_history == [states.INITIAL] assert t.executable is None assert t.arguments == list() assert t.pre_exec == list() assert t.post_exec == list() assert t.cpu_reqs['processes'] == 1 assert t.cpu_reqs['process_type'] is None assert t.cpu_reqs['threads_per_process'] == 1 assert t.cpu_reqs['thread_type'] is None assert t.gpu_reqs['processes'] == 0 assert t.gpu_reqs['process_type'] is None assert t.gpu_reqs['threads_per_process'] == 0 assert t.gpu_reqs['thread_type'] is None assert t.lfs_per_process == 0 assert t.upload_input_data == list() assert t.copy_input_data == list() assert t.link_input_data == list() assert t.move_input_data == list() assert t.copy_output_data == list() assert t.move_input_data == list() assert t.download_output_data == list() assert t.stdout is None assert t.stderr is None assert t.exit_code is None assert t.tag is None assert t.path is None assert t.parent_pipeline['uid'] is None assert t.parent_pipeline['name'] is None assert t.parent_stage['uid'] is None assert t.parent_stage['name'] is None # ------------------------------------------------------------------------------ # @given(s=st.text(), l=st.lists(st.text()), i=st.integers().filter(lambda x: type(x) == int), b=st.booleans()) def test_task_exceptions(s, l, i, b): ''' **Purpose**: Test if all attribute assignments raise exceptions for invalid values ''' t = Task() data_type = [s, l, i, b] for data in data_type: # special case due to backward compatibility if not isinstance(data, str) and \ not isinstance(data, list): with pytest.raises(ree.TypeError): t.executable = data if not isinstance(data, str): with pytest.raises(ree.TypeError): t.name = data with pytest.raises(ree.TypeError): t.path = data with pytest.raises(ree.TypeError): t.parent_stage = data with pytest.raises(ree.TypeError): t.parent_pipeline = data with pytest.raises(ree.TypeError): t.stdout = data with pytest.raises(ree.TypeError): t.stderr = data if not isinstance(data, list): with pytest.raises(ree.TypeError): t.pre_exec = data with pytest.raises(ree.TypeError): t.arguments = data with pytest.raises(ree.TypeError): t.post_exec = data with pytest.raises(ree.TypeError): t.upload_input_data = data with pytest.raises(ree.TypeError): t.copy_input_data = data with pytest.raises(ree.TypeError): t.link_input_data = data with pytest.raises(ree.TypeError): t.move_input_data = data with pytest.raises(ree.TypeError): t.copy_output_data = data with pytest.raises(ree.TypeError): t.download_output_data = data with pytest.raises(ree.TypeError): t.move_output_data = data if not isinstance(data, str) and \ not isinstance(data, str): with pytest.raises(ree.ValueError): t.cpu_reqs = {'processes' : 1, 'process_type' : data, 'threads_per_process': 1, 'thread_type' : None} t.cpu_reqs = {'processes' : 1, 'process_type' : None, 'threads_per_process': 1, 'thread_type' : data } t.gpu_reqs = {'processes' : 1, 'process_type' : data, 'threads_per_process': 1, 'thread_type' : None } t.gpu_reqs = {'processes' : 1, 'process_type' : None, 'threads_per_process': 1, 'thread_type' : data} if not isinstance(data, int): with pytest.raises(ree.TypeError): t.cpu_reqs = {'processes' : data, 'process_type' : None, 'threads_per_process' : 1, 'thread_type' : None} with pytest.raises(ree.TypeError): t.cpu_reqs = {'processes' : 1, 'process_type' : None, 'threads_per_process' : data, 'thread_type' : None} with pytest.raises(ree.TypeError): t.gpu_reqs = {'processes' : data, 'process_type' : None, 'threads_per_process' : 1, 'thread_type' : None} with pytest.raises(ree.TypeError): t.gpu_reqs = {'processes' : 1, 'process_type' : None, 'threads_per_process' : data, 'thread_type' : None} # ------------------------------------------------------------------------------ # def test_dict_to_task(): # make sure the type checks kick in d = {'name' : 1} with pytest.raises(ree.TypeError): Task(from_dict=d) d = {'name' : 'foo', 'pre_exec' : ['bar'], 'executable': 'buz', 'arguments' : ['baz', 'fiz'], 'cpu_reqs' : {'processes' : 1, 'process_type' : None, 'threads_per_process': 1, 'thread_type' : None}, 'gpu_reqs' : {'processes' : 0, 'process_type' : None, 'threads_per_process': 0, 'thread_type' : None}} t = Task(from_dict=d) for k,v in d.items(): assert(t.__getattribute__(k) == v), '%s != %s' \ % (t.__getattribute__(k), v) # ------------------------------------------------------------------------------ # def test_task_to_dict(): ''' **Purpose**: Test if the 'to_dict' function of Task class converts all expected attributes of the Task into a dictionary ''' t = Task() d = t.to_dict() assert d == {'uid' : None, 'name' : None, 'state' : states.INITIAL, 'state_history' : [states.INITIAL], 'pre_exec' : [], 'executable' : None, 'arguments' : [], 'post_exec' : [], 'cpu_reqs' : {'processes' : 1, 'process_type' : None, 'threads_per_process' : 1, 'thread_type' : None}, 'gpu_reqs' : {'processes' : 0, 'process_type' : None, 'threads_per_process' : 0, 'thread_type' : None}, 'lfs_per_process' : 0, 'upload_input_data' : [], 'copy_input_data' : [], 'link_input_data' : [], 'move_input_data' : [], 'copy_output_data' : [], 'move_output_data' : [], 'download_output_data' : [], 'stdout' : None, 'stderr' : None, 'exit_code' : None, 'path' : None, 'tag' : None, 'parent_stage' : {'uid' : None, 'name' : None}, 'parent_pipeline' : {'uid' : None, 'name' : None}} t = Task() t.uid = 'test.0017' t.name = 'new' t.pre_exec = ['module load abc'] t.executable = ['sleep'] t.arguments = ['10'] t.cpu_reqs['processes'] = 10 t.cpu_reqs['threads_per_process'] = 2 t.gpu_reqs['processes'] = 5 t.gpu_reqs['threads_per_process'] = 3 t.lfs_per_process = 1024 t.upload_input_data = ['test1'] t.copy_input_data = ['test2'] t.link_input_data = ['test3'] t.move_input_data = ['test4'] t.copy_output_data = ['test5'] t.move_output_data = ['test6'] t.download_output_data = ['test7'] t.stdout = 'out' t.stderr = 'err' t.exit_code = 1 t.path = 'a/b/c' t.tag = 'task.0010' t.parent_stage = {'uid': 's1', 'name': 'stage1'} t.parent_pipeline = {'uid': 'p1', 'name': 'pipeline1'} d = t.to_dict() assert d == {'uid' : 'test.0017', 'name' : 'new', 'state' : states.INITIAL, 'state_history' : [states.INITIAL], 'pre_exec' : ['module load abc'], 'executable' : 'sleep', 'arguments' : ['10'], 'post_exec' : [], 'cpu_reqs' : {'processes' : 10, 'process_type' : None, 'threads_per_process' : 2, 'thread_type' : None}, 'gpu_reqs' : {'processes' : 5, 'process_type' : None, 'threads_per_process' : 3, 'thread_type' : None}, 'lfs_per_process' : 1024, 'upload_input_data' : ['test1'], 'copy_input_data' : ['test2'], 'link_input_data' : ['test3'], 'move_input_data' : ['test4'], 'copy_output_data' : ['test5'], 'move_output_data' : ['test6'], 'download_output_data' : ['test7'], 'stdout' : 'out', 'stderr' : 'err', 'exit_code' : 1, 'path' : 'a/b/c', 'tag' : 'task.0010', 'parent_stage' : {'uid': 's1', 'name' : 'stage1'}, 'parent_pipeline' : {'uid': 'p1', 'name' : 'pipeline1'}} t.executable = 'sleep' d = t.to_dict() assert d == {'uid' : 'test.0017', 'name' : 'new', 'state' : states.INITIAL, 'state_history' : [states.INITIAL], 'pre_exec' : ['module load abc'], 'executable' : 'sleep', 'arguments' : ['10'], 'post_exec' : [], 'cpu_reqs' : {'processes' : 10, 'process_type' : None, 'threads_per_process' : 2, 'thread_type' : None}, 'gpu_reqs' : {'processes' : 5, 'process_type' : None, 'threads_per_process' : 3, 'thread_type' : None}, 'lfs_per_process' : 1024, 'upload_input_data' : ['test1'], 'copy_input_data' : ['test2'], 'link_input_data' : ['test3'], 'move_input_data' : ['test4'], 'copy_output_data' : ['test5'], 'move_output_data' : ['test6'], 'download_output_data' : ['test7'], 'stdout' : 'out', 'stderr' : 'err', 'exit_code' : 1, 'path' : 'a/b/c', 'tag' : 'task.0010', 'parent_stage' : {'uid': 's1', 'name' : 'stage1'}, 'parent_pipeline' : {'uid': 'p1', 'name' : 'pipeline1'}} # ------------------------------------------------------------------------------ # def test_task_from_dict(): ''' **Purpose**: Test if the 'from_dict' function of Task class converts a dictionary into a Task correctly with all the expected attributes ''' d = {'uid' : 're.Task.0000', 'name' : 't1', 'state' : states.DONE, 'state_history' : [states.INITIAL, states.DONE], 'pre_exec' : [], 'executable' : '', 'arguments' : [], 'post_exec' : [], 'cpu_reqs' : {'processes' : 1, 'process_type' : None, 'threads_per_process' : 1, 'thread_type' : None}, 'gpu_reqs' : {'processes' : 0, 'process_type' : None, 'threads_per_process' : 0, 'thread_type' : None}, 'lfs_per_process' : 1024, 'upload_input_data' : [], 'copy_input_data' : [], 'link_input_data' : [], 'move_input_data' : [], 'copy_output_data' : [], 'move_output_data' : [], 'download_output_data' : [], 'stdout' : 'out', 'stderr' : 'err', 'exit_code' : 555, 'path' : 'here/it/is', 'tag' : 'task.0010', 'parent_stage' : {'uid': 's1', 'name' : 'stage1'}, 'parent_pipeline' : {'uid': 'p1', 'name' : 'pipe1'}} t = Task() t.from_dict(d) assert t._uid == d['uid'] assert t.name == d['name'] assert t.state == d['state'] assert t.state_history == d['state_history'] assert t.pre_exec == d['pre_exec'] assert t.executable == d['executable'] assert t.arguments == d['arguments'] assert t.post_exec == d['post_exec'] assert t.cpu_reqs == d['cpu_reqs'] assert t.gpu_reqs == d['gpu_reqs'] assert t.lfs_per_process == d['lfs_per_process'] assert t.upload_input_data == d['upload_input_data'] assert t.copy_input_data == d['copy_input_data'] assert t.link_input_data == d['link_input_data'] assert t.move_input_data == d['move_input_data'] assert t.copy_output_data == d['copy_output_data'] assert t.move_output_data == d['move_output_data'] assert t.download_output_data == d['download_output_data'] assert t.stdout == d['stdout'] assert t.stderr == d['stderr'] assert t.exit_code == d['exit_code'] assert t.path == d['path'] assert t.tag == d['tag'] assert t.parent_stage == d['parent_stage'] assert t.parent_pipeline == d['parent_pipeline'] d['executable'] = 'sleep' t = Task() t.from_dict(d) assert t.executable == d['executable'] # ------------------------------------------------------------------------------ # def test_task_assign_uid(): try: home = os.environ.get('HOME', '/home') folder = glob.glob('%s/.radical/utils/test*' % home) for f in folder: shutil.rmtree(f) except: pass t = Task() assert t.uid == 'task.0000' # ------------------------------------------------------------------------------ # def test_task_validate(): t = Task() t._state = 'test' with pytest.raises(ree.ValueError): t._validate() t = Task() with pytest.raises(ree.MissingError): t._validate() # ------------------------------------------------------------------------------ # if __name__ == '__main__': test_task_initialization() test_task_exceptions() test_dict_to_task() test_task_to_dict() test_task_from_dict() test_task_assign_uid() test_task_validate() # ------------------------------------------------------------------------------
41.041037
86
0.393801
1,640
19,002
4.328659
0.116463
0.060149
0.056346
0.066911
0.628117
0.551486
0.491337
0.451613
0.415411
0.369348
0
0.014778
0.462267
19,002
462
87
41.12987
0.679977
0.068624
0
0.48
0
0
0.171136
0.001307
0
0
0
0
0.182857
1
0.02
false
0.002857
0.025714
0
0.045714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
560d83927ccbfa6f396f7f59b1122955d5914eea
3,689
py
Python
main.py
MNicaretta/picrosssolver
2919049b6a2beebb47d883ddee7ba830bf809a59
[ "MIT" ]
null
null
null
main.py
MNicaretta/picrosssolver
2919049b6a2beebb47d883ddee7ba830bf809a59
[ "MIT" ]
null
null
null
main.py
MNicaretta/picrosssolver
2919049b6a2beebb47d883ddee7ba830bf809a59
[ "MIT" ]
null
null
null
from enum import Enum class CellState(Enum): UNKNOWN = '.' EMPTY = ' ' BOX = '■' def __str__(self): return self.value class Cell(): def __init__(self, state=CellState.UNKNOWN): self.state = state def __str__(self): return self.state class Clue(): def __init__(self, value, filled=0): self.value = value self.filled = filled def isFilled(self): return self.remaining() == 0 def remaining(self): return self.value - self.filled def __str__(self): return str(self.value) class Clues(): def __init__(self, *clues): self.clues = [] for c in clues: self.clues.append(Clue(c)) def fillMin(self, start, end, mark): space = sum(c.value + 1 for c in self.clues) - 1 left = end - start - space index = 0 for c in self.clues: if c.value > left: fill = c.value - left c.filled += fill index += c.value - fill for _ in range(fill): mark(index, CellState.BOX) index += 1 index += 1 else: index += c.value + 1 def adjust(self, array): if not self.isFilled(): for i in range(len(array)): start = 0 end = 0 if self.isFilled(): for cell in array: if cell.state == CellState.UNKNOWN: cell.state = CellState.EMPTY def isFilled(self): return all(c.isFilled() for c in self.clues) class Picross(): def __init__(self, rows=[], columns=[]): self.rows = rows self.columns = columns self.board = [] for _ in range(len(self.rows)): array = [] for _ in range(len(self.columns)): array.append(Cell()) self.board.append(array) def isSolved(self): return all(r.isFilled() for r in self.rows) def set(self, r, c, state): self.board[r][c].state = state def solve(self): self.fillMin() print(self) while not self.isSolved(): self.adjust() print(self) def fillMin(self): for r in range(len(self.rows)): self.rows[r].fillMin(0, len(self.columns), lambda index, state: self.set(r,index,state)) for c in range(len(self.columns)): self.columns[c].fillMin(0, len(self.rows), lambda index, state: self.set(index,c,state)) def adjust(self): for r in range(len(self.rows)): self.rows[r].adjust(self.board[r]) for c in range(len(self.columns)): self.columns[c].adjust(list(map(lambda row: row[c], self.board))) def __str__(self): result = '' for r in range(len(self.board)): for cell in self.board[r]: result += str(cell.state) + ' ' for clue in self.rows[r].clues: result += str(clue) + ' ' result += '\n' has_value = True count = 0 while has_value: has_value = False for clues in self.columns: if len(clues.clues) > count: has_value = True result += str(clues.clues[count]) + ' ' else: result += ' ' result += '\n' count += 1 return result picross = Picross([Clues(1,1,1),Clues(1,1),Clues(1,2),Clues(5),Clues(1,1,1)], [Clues(1,2), Clues(3), Clues(1,2),Clues(3),Clues(1,3)]) print(picross) picross.solve()
25.797203
100
0.500407
457
3,689
3.956236
0.150985
0.044248
0.044248
0.054204
0.224004
0.126659
0.097345
0.079646
0.079646
0.079646
0
0.014249
0.372188
3,689
142
101
25.978873
0.765976
0
0
0.185185
0
0
0.003253
0
0
0
0
0
0
1
0.166667
false
0
0.009259
0.064815
0.324074
0.027778
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
560df272ec26702a46aac33cc6e289b61e0a8412
3,055
py
Python
legonet_pytorch/module.py
max-liulin/CV-Backbones
b32239d10126c8f84e6f6283b95b42b3b60b1a06
[ "Apache-2.0" ]
220
2019-11-27T03:02:14.000Z
2020-02-26T14:08:41.000Z
legonet_pytorch/module.py
vickyqi7/CV-Backbones
1262dacffdea62f9983ef0231177aea720e25f12
[ "Apache-2.0" ]
3
2019-12-10T15:00:57.000Z
2020-02-02T12:02:47.000Z
legonet_pytorch/module.py
vickyqi7/CV-Backbones
1262dacffdea62f9983ef0231177aea720e25f12
[ "Apache-2.0" ]
35
2019-11-28T05:21:50.000Z
2020-02-26T13:46:11.000Z
''' Copyright (C) 2016. Huawei Technologies Co., Ltd. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the MIT license. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License for more details. ''' import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init import numpy as np import torchvision class LegoConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, n_split, n_lego): super(LegoConv2d, self).__init__() self.in_channels, self.out_channels, self.kernel_size, self.n_split = in_channels, out_channels, kernel_size, n_split self.basic_channels = in_channels // self.n_split self.n_lego = int(self.out_channels * n_lego) self.lego = nn.Parameter(nn.init.kaiming_normal_(torch.rand(self.n_lego, self.basic_channels, self.kernel_size, self.kernel_size))) self.aux_coefficients = nn.Parameter(init.kaiming_normal_(torch.rand(self.n_split, self.out_channels, self.n_lego, 1, 1))) self.aux_combination = nn.Parameter(init.kaiming_normal_(torch.rand(self.n_split, self.out_channels, self.n_lego, 1, 1))) def forward(self, x): self.proxy_combination = torch.zeros(self.aux_combination.size()).to(self.aux_combination.device) self.proxy_combination.scatter_(2, self.aux_combination.argmax(dim = 2, keepdim = True), 1); self.proxy_combination.requires_grad = True out = 0 for i in range(self.n_split): lego_feature = F.conv2d(x[:, i*self.basic_channels: (i+1)*self.basic_channels], self.lego, padding = self.kernel_size // 2) kernel = self.aux_coefficients[i] * self.proxy_combination[i] out = out + F.conv2d(lego_feature, kernel) return out def copy_grad(self, balance_weight): self.aux_combination.grad = self.proxy_combination.grad # balance loss idxs = self.aux_combination.argmax(dim = 2).view(-1).cpu().numpy() unique, count = np.unique(idxs, return_counts = True) unique, count = np.unique(count, return_counts = True) avg_freq = (self.n_split * self.out_channels ) / self.n_lego max_freq = 0 min_freq = 100 for i in range(self.n_lego): i_freq = (idxs == i).sum().item() max_freq = max(max_freq, i_freq) min_freq = min(min_freq, i_freq) if i_freq >= np.floor(avg_freq) and i_freq <= np.ceil(avg_freq): continue if i_freq < np.floor(avg_freq): self.aux_combination.grad[:, :, i] = self.aux_combination.grad[:, :, i] - balance_weight * (np.floor(avg_freq) - i_freq) if i_freq > np.ceil(avg_freq): self.aux_combination.grad[:, :, i] = self.aux_combination.grad[:, :, i] + balance_weight * (i_freq - np.ceil(avg_freq))
50.081967
144
0.67365
451
3,055
4.341463
0.277162
0.030644
0.091931
0.05618
0.336568
0.31001
0.244637
0.19714
0.159346
0.141982
0
0.010004
0.21473
3,055
60
145
50.916667
0.806169
0.134861
0
0
0
0
0
0
0
0
0
0
0
1
0.071429
false
0
0.142857
0
0.261905
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
560eee76e44697ef904786e1db3a7b3facad82f0
488
py
Python
backend/trucks/migrations/0005_auto_20201029_2137.py
NikogSamonic/Log_app
197fb46b84d102942352d1a30d2a64680b18d943
[ "MIT" ]
11
2020-09-21T18:58:09.000Z
2020-10-21T19:08:50.000Z
backend/trucks/migrations/0005_auto_20201029_2137.py
NikogSamonic/Log_app
197fb46b84d102942352d1a30d2a64680b18d943
[ "MIT" ]
8
2020-09-22T02:59:26.000Z
2020-10-31T07:28:59.000Z
backend/trucks/migrations/0005_auto_20201029_2137.py
NikogSamonic/Log_app
197fb46b84d102942352d1a30d2a64680b18d943
[ "MIT" ]
21
2020-09-21T11:46:42.000Z
2020-10-29T22:57:47.000Z
# Generated by Django 3.1.2 on 2020-10-29 20:37 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('trucks', '0004_auto_20201025_2222'), ] operations = [ migrations.RemoveField( model_name='car', name='truck', ), migrations.AddField( model_name='car', name='truck', field=models.ManyToManyField(to='trucks.Truck'), ), ]
21.217391
60
0.561475
49
488
5.489796
0.714286
0.066915
0.089219
0.118959
0.156134
0
0
0
0
0
0
0.093093
0.317623
488
22
61
22.181818
0.714715
0.092213
0
0.375
1
0
0.129252
0.052154
0
0
0
0
0
1
0
false
0
0.0625
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
560fa46d7efbe40812073452461efabc1cf83295
3,719
py
Python
pychron/dvc/util.py
ASUPychron/pychron
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
[ "Apache-2.0" ]
31
2016-03-07T02:38:17.000Z
2022-02-14T18:23:43.000Z
pychron/dvc/util.py
ASUPychron/pychron
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
[ "Apache-2.0" ]
1,626
2015-01-07T04:52:35.000Z
2022-03-25T19:15:59.000Z
pychron/dvc/util.py
UIllinoisHALPychron/pychron
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
[ "Apache-2.0" ]
26
2015-05-23T00:10:06.000Z
2022-03-07T16:51:57.000Z
# =============================================================================== # Copyright 2019 ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= EOF ============================================= from uncertainties import ufloat from pychron.dvc import analysis_path, dvc_dump from pychron.processing.interpreted_age import InterpretedAge class Tag(object): name = None path = None note = "" subgroup = "" uuid = "" record_id = "" @classmethod def from_analysis(cls, an, **kw): tag = cls() tag.name = an.tag tag.note = an.tag_note tag.record_id = an.record_id tag.uuid = an.uuid tag.repository_identifier = an.repository_identifier # tag.path = analysis_path(an.record_id, an.repository_identifier, modifier='tags') tag.path = analysis_path(an, an.repository_identifier, modifier="tags") tag.subgroup = an.subgroup for k, v in kw.items(): setattr(tag, k, v) return tag def dump(self): obj = {"name": self.name, "note": self.note, "subgroup": self.subgroup} if not self.path: self.path = analysis_path( self.uuid, self.repository_identifier, modifier="tags", mode="w" ) dvc_dump(obj, self.path) class DVCInterpretedAge(InterpretedAge): labnumber = None isotopes = None repository_identifier = None analyses = None def load_tag(self, obj): self.tag = obj.get("name", "") self.tag_note = obj.get("note", "") def from_json(self, obj): for attr in ("name", "uuid"): setattr(self, attr, obj.get(attr, "")) pf = obj["preferred"] for attr in ("age", "age_err"): setattr(self, attr, pf.get(attr, 0)) sm = obj["sample_metadata"] for attr in ("sample", "material", "project", "irradiation"): setattr(self, attr, sm.get(attr, "")) # for a in ('age', 'age_err', 'age_kind', # # 'kca', 'kca_err','kca_kind', # 'mswd', # 'sample', 'material', 'identifier', 'nanalyses', 'irradiation', # 'name', 'project', 'uuid', 'age_error_kind'): # try: # setattr(self, a, obj.get(a, NULL_STR)) # except BaseException as a: # print('exception DVCInterpretdAge.from_json', a) self.labnumber = self.identifier # self.uage = ufloat(self.age, self.age_err) self._record_id = "{} {}".format(self.identifier, self.name) self.analyses = obj.get("analyses", []) pkinds = pf.get("preferred_kinds") if pkinds: for k in pkinds: attr = k["attr"] if attr == "age": attr = "uage" setattr(self, attr, ufloat(k["value"], k["error"])) def get_value(self, attr): try: return getattr(self, attr) except AttributeError: return ufloat(0, 0) @property def status(self): return "X" if self.is_omitted() else ""
32.33913
91
0.547728
428
3,719
4.670561
0.336449
0.030015
0.030015
0.048024
0.056528
0.037019
0
0
0
0
0
0.004092
0.277225
3,719
114
92
32.622807
0.739583
0.338263
0
0
0
0
0.064556
0
0
0
0
0
0
1
0.092308
false
0
0.046154
0.015385
0.384615
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
560fcacc1a61a2c757a12a88e6d3d993d2d1a1d4
3,214
py
Python
processor/gen_record.py
Princeton-Penn-Vents/princeton-penn-flowmeter
85a5ca8357ca34e0b543fa1489d48ecbc8023294
[ "MIT" ]
3
2020-04-14T10:45:12.000Z
2022-01-06T16:40:30.000Z
processor/gen_record.py
Princeton-Penn-Vents/princeton-penn-flowmeter
85a5ca8357ca34e0b543fa1489d48ecbc8023294
[ "MIT" ]
36
2020-04-05T16:23:33.000Z
2020-10-02T02:58:21.000Z
processor/gen_record.py
Princeton-Penn-Vents/princeton-penn-flowmeter
85a5ca8357ca34e0b543fa1489d48ecbc8023294
[ "MIT" ]
1
2020-04-05T13:18:47.000Z
2020-04-05T13:18:47.000Z
from __future__ import annotations from typing import Optional from dataclasses import dataclass from logging import Logger from processor.device_names import address_to_name @dataclass class GenRecord: logger: Logger _mac: Optional[str] = None _sid: int = 0 _name: Optional[str] = None # Nurse only, but in master class to make simpler _nurse_name: str = "" @property def safemac(self) -> str: """Returns a safe filename (Windows does not allow colons)""" keepcharacters = (".", "_") return "".join( c for c in self.mac if c.isalnum() or c in keepcharacters ).rstrip() @property def mac(self) -> str: """ The mac address. Returns <unknown> if the address is not known. """ return "<unknown>" if self._mac is None else self._mac @mac.setter def mac(self, value: str): if self._mac is None or self._mac != value: self._mac = value self.logger.info(f"MAC addr: {self._mac}") self.mac_changed() @property def sid(self) -> int: """ Sensor ID, as an integer. Printout with "X" format. """ return self._sid @sid.setter def sid(self, value: int): if self._sid != value: self._sid = value self.logger.info(f"Sensor ID: {self._sid:X}") self.sid_changed() @property def box_name(self) -> str: """ The name of the box, or <unknown>. """ if self._name is not None: return self._name if self._mac is None: return "<unknown>" try: return address_to_name(self._mac).title() except ValueError: return self.mac @box_name.setter def box_name(self, value: Optional[str]): if self._name != value: self._name = value self.logger.info(f"Box name: {self._name}") self.mac_changed() @property def stacked_name(self) -> str: """ Return the box name stacked using a newline If unknown, return Box name: <unknown>. """ if self._mac is None or self._mac == "00:00:00:00:00:00": return "Box name\n<unknown>" try: return "\n".join(address_to_name(self._mac).title().split()) except ValueError: return self.mac @property def title(self) -> str: """ The title to show in the dialog box. Will show box_name if unset. """ return self._nurse_name @title.setter def title(self, value: str): if self._nurse_name is None or self._nurse_name != value: self._nurse_name = value self.logger.info(f"Changed title to {self._nurse_name!r}") self.title_changed() def title_changed(self) -> None: """ Modify in subclasses to add special callbacks here. """ def mac_changed(self) -> None: """ Modify in subclasses to add special callbacks here. """ def sid_changed(self) -> None: """ Modify in subclasses to add special callbacks here. """
26.561983
73
0.564717
405
3,214
4.335802
0.237037
0.063781
0.037016
0.025057
0.334282
0.202164
0.129841
0.129841
0.102506
0.102506
0
0.00603
0.329185
3,214
120
74
26.783333
0.808442
0.174549
0
0.194444
0
0
0.065988
0
0
0
0
0
0
1
0.180556
false
0
0.069444
0
0.486111
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
56115c228ac0c726ff0df0045669becb3ae31e8a
22,024
py
Python
inputs/transformers.py
aayushk614/DTI
f0338918144c0efbb79556ac8e81cbcefc70e22f
[ "MIT" ]
null
null
null
inputs/transformers.py
aayushk614/DTI
f0338918144c0efbb79556ac8e81cbcefc70e22f
[ "MIT" ]
null
null
null
inputs/transformers.py
aayushk614/DTI
f0338918144c0efbb79556ac8e81cbcefc70e22f
[ "MIT" ]
null
null
null
from __future__ import print_function import math import nibabel as nib import nrrd import numpy as np import operator import os import random import torch import warnings from functools import reduce from inputs import Image, ImageType CHANNEL, DEPTH, HEIGHT, WIDTH = 0, 1, 2, 3 class ToNDTensor(object): """ Creates a torch.Tensor object from a numpy array. The transformer supports 3D and 4D numpy arrays. The numpy arrays are transposed in order to create tensors with dimensions (DxHxW) for 3D or (CxDxHxW) for 4D arrays. The dimensions are D: Depth, H: Height, W: Width, C: Channels. """ # noinspection PyArgumentList def __call__(self, nd_array): """ :param nd_array: A 3D or 4D numpy array to convert to torch.Tensor :return: A torch.Tensor of size (DxHxW) or (CxDxHxW)""" if not isinstance(nd_array, np.ndarray): raise TypeError("Only {} are supporter".format(np.ndarray)) if nd_array.ndim == 3: nd_tensor = torch.Tensor(nd_array.reshape(nd_array.shape + (1,))) elif nd_array.ndim == 4: nd_tensor = torch.Tensor(nd_array) else: raise NotImplementedError("Only 3D or 4D arrays are supported") return nd_tensor def __repr__(self): return self.__class__.__name__ + '()' class ToNiftiFile(object): """ Creates a Nifti1Image from a given numpy ndarray The numpy arrays are transposed to respect the standard Nifti dimensions (WxHxDxC) """ def __init__(self, file_path, affine): self._file_path = file_path self._affine = affine def __call__(self, nd_array): if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]): raise TypeError( "Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported") output_dir = os.path.dirname(self._file_path) if not os.path.exists(output_dir): os.makedirs(output_dir) if nd_array.shape[0] not in [6, 9]: nd_array = np.squeeze(nd_array, axis=0) else: nd_array = np.moveaxis(nd_array, 0, 3) nifti1_file = nib.Nifti1Image(nd_array, self._affine) nib.save(nifti1_file, self._file_path) def __repr__(self): return self.__class__.__name__ + '()' class ToNrrdFile(object): """ Create a .NRRD file and save it at the given path. The numpy arrays are transposed to respect the standard NRRD dimensions (WxHxDxC) """ def __init__(self, file_path): self._file_path = file_path def __call__(self, nd_array): if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]): raise TypeError( "Only 3D (DxHxW) or 4D (CxDxHxW) {} are supported".format(np.ndarray)) output_dir = os.path.dirname(self._file_path) if not os.path.exists(output_dir): os.makedirs(output_dir) header = self._create_header_from(nd_array) nrrd.write(self._file_path, np.moveaxis(nd_array, 0, 3), header=header) def __repr__(self): return self.__class__.__name__ + '()' @staticmethod def _create_header_from(nd_array): if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]): raise TypeError( "Only 3D (DxHxW) or 4D (CxDxHxW) {} are supported".format(np.ndarray)) return { 'type': nd_array.dtype, 'dimension': nd_array.ndim, 'sizes': nd_array.shape, 'kinds': ['domain', 'domain', 'domain', '3D-matrix'] if nd_array.ndim == 4 else ['domain', 'domain', 'domain'], 'endian': 'little', 'encoding': 'raw' } class ToNumpyArray(object): """ Creates a numpy ndarray from a given Nifti or NRRD image file path. The numpy arrays are transposed to respect the standard dimensions (DxHxW) for 3D or (CxDxHxW) for 4D arrays. """ def __call__(self, image_path): if Image.is_nifti(image_path): nifti_image = nib.load(image_path) nd_array = nifti_image.get_fdata().__array__() affine = nifti_image._affine elif Image.is_nrrd(image_path): nd_array, header = nrrd.read(image_path) else: raise NotImplementedError( "Only {} files are supported !".format(ImageType.ALL)) if nd_array.ndim == 3: nd_array = np.moveaxis(np.expand_dims(nd_array, 3), 3, 0) elif nd_array.ndim == 4: nd_array = np.moveaxis(nd_array, 3, 0) return nd_array def __repr__(self): return self.__class__.__name__ + '()' class ToUniqueTensorValues(object): UNIQUE_TENSOR_VALUES_INDEX = [0, 1, 2, 4, 5, 8] """ Creates a numpy ndarray from a given Nifti or NRRD image file path. The numpy arrays are transposed to respect the standard dimensions (DxHxW) for 3D or (CxDxHxW) for 4D arrays. """ def __call__(self, nd_array): if not isinstance(nd_array, np.ndarray) or nd_array.ndim is not 4 or nd_array.shape[0] != 9: raise TypeError( "Only 4D (CxDxHxW) {} are with 9 channels are supported".format(np.ndarray)) return nd_array[self.UNIQUE_TENSOR_VALUES_INDEX, :, :, :] def __repr__(self): return self.__class__.__name__ + '()' class ToLogEuclidean(object): """ Convert a DTI image in the Log-Euclidean space. To convert the DTI image into the Log-Euclidean space, the eigen-decomposition of each tensor is performed and the log of the eigen-values is computed. It can mathematically be expressed as follow: log(D) = Ulog(V)U.T where D is a tensor, U is a matrix of eigen-vector and V a diagonal matrix of eigen-values. Based on: Arsigny, V., Fillard, P., Pennec, X., & Ayache, N. (2006). Log-Euclidean metrics for fast and simple calculus on diffusion tensors https://www.ncbi.nlm.nih.gov/pubmed/16788917 """ def __call__(self, nd_array): """ :param nd_array: The DTI image as a nd array of dimension CxDxHxW) :return: he DTI image in the log-Euclidean space """ warnings.filterwarnings('ignore') if not isinstance(nd_array, np.ndarray) or nd_array.ndim is not 4 or nd_array.shape[0] != 9: raise TypeError( "Only 4D (CxDxHxW) {} are with 9 channels are supported".format(np.ndarray)) image_as_vector = nd_array.reshape( (3, 3, reduce(operator.mul, nd_array.shape[1:], 1))) return self.apply(image_as_vector, np.zeros(image_as_vector.shape, dtype='float32')).reshape(nd_array.shape) def __repr__(self): return self.__class__.__name__ + '()' @staticmethod def apply(image_vector, output): index = 0 while index < image_vector.shape[2]: diffusion_tensor = image_vector[:, :, index] # Does not convert the background tensors to log-euclidean if np.any(diffusion_tensor): eig_val, eig_vec = np.linalg.eigh(diffusion_tensor) output[:, :, index] = np.dot(np.dot(np.ascontiguousarray(eig_vec), np.diag(np.log(eig_val))), np.ascontiguousarray(np.linalg.inv(eig_vec))) else: output[:, :, index] = diffusion_tensor index = index + 1 @staticmethod def undo(image_vector, output): index = 0 while index < image_vector.shape[2]: log_euclidean_diffusion_tensor = image_vector[:, :, index] # Due to noise, negative eigenvalues can arise. Those noisy tensors cannot be converted back to Euclidean. if np.any(log_euclidean_diffusion_tensor) and not np.isnan(log_euclidean_diffusion_tensor).any(): eig_val, eig_vec = np.linalg.eigh( log_euclidean_diffusion_tensor) output[:, :, index] = np.dot(np.dot(np.ascontiguousarray(eig_vec), np.diag(np.exp(eig_val))), np.ascontiguousarray(np.linalg.inv(eig_vec))) else: output[:, :, index] = log_euclidean_diffusion_tensor index = index + 1 class InterpolateNSDTensors(object): """ Interpolates Negative Semi-Definite tensors using trilinear interpolation. It computed a weighted sum of the NSD tensors' neighbors in the Log-Euclidean domain. """ def __call__(self, log_euclidean_nd_array): if not isinstance(log_euclidean_nd_array, np.ndarray) or log_euclidean_nd_array.ndim is not 4: raise TypeError("Only {} are supported".format(np.ndarray.dtype)) d_index, h_index, w_index = np.where( np.isnan(log_euclidean_nd_array[-1, :, :, :])) for index in list(zip(d_index, h_index, w_index)): neighbors = self._get_tri_linear_neighbors_and_weights( index, log_euclidean_nd_array) log_euclidean_nd_array[:, index[0], index[1], index[2]] = np.dot(np.array(neighbors[0]).T, neighbors[1] / np.sum(neighbors[1])) return log_euclidean_nd_array def _get_tri_linear_neighbors_and_weights(self, nsd_index, log_euclidean_nd_array): """ Gets the 8 neighbors of the NSD tensors from which to interpolate. The weight associated with each neighbor is inversely proportional to the distance between the interpolated tensor and the neighbor. :param nsd_index: The index of the NSD tensor. :param log_euclidean_nd_array: The log euclidean image as numpy ndarray :return: A list of the 8 corner neighbors and their associated weights in separated tuples. """ front, left, down = -1, -1, -1 back, right, up = 1, 1, 1 directions = [(front, left, down), (front, left, up), (back, left, down), (back, left, up), (front, right, down), (front, right, up), (back, right, up), (back, right, down)] neighbors_and_weights = list(map(lambda direction: self._get_closest_neighbor_of( log_euclidean_nd_array, nsd_index, direction), directions)) return list(zip(*neighbors_and_weights)) @staticmethod def _get_closest_neighbor_of(log_euclidean_nd_array, nsd_index, direction): """ Gets the closest non-NSD tensor to the nsd_index and its weight following a given direction. The associated weight is 1/distance, where the distance is the distance from the neighbor and the nsd_index. :param log_euclidean_nd_array: The log-euclidean image as ndarray. :param nsd_index: The index of the NSD tensor to interpolate. :param direction: The direction in which the neighbor is searched. :return: The closest neighbor as a 9 values vector and its associated weight. """ distance = 1 neighbor = None try: while neighbor is None: d, h, w = tuple(((np.array(direction) * distance) + nsd_index)) if 0 < d < log_euclidean_nd_array.shape[1] and 0 < h < log_euclidean_nd_array.shape[2] and 0 < w < \ log_euclidean_nd_array.shape[3]: potential_neighbor = log_euclidean_nd_array[:, d, h, w] else: raise IndexError if not np.isnan(potential_neighbor).any(): neighbor = potential_neighbor else: distance = distance + 1 weight = 1 / distance except IndexError: neighbor = np.zeros(log_euclidean_nd_array.shape[0]) weight = 0 return neighbor, weight def __repr__(self): return self.__class__.__name__ + '()' class CropToContent(object): """ Crops the image to its content. The content's bounding box is defined by the first non-zero slice in each direction (D, H, W) """ def __call__(self, nd_array): if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]): raise TypeError( "Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported") c, d_min, d_max, h_min, h_max, w_min, w_max = self.extract_content_bounding_box_from( nd_array) return nd_array[:, d_min:d_max, h_min:h_max, w_min:w_max] if nd_array.ndim is 4 else \ nd_array[d_min:d_max, h_min:h_max, w_min:w_max] def __repr__(self): return self.__class__.__name__ + '()' @staticmethod def extract_content_bounding_box_from(nd_array): """ Computes the D, H, W min and max values defining the content bounding box. :param nd_array: The input image as a numpy ndarray :return: The D, H, W min and max values of the bounding box. """ depth_slices = np.any(nd_array, axis=(2, 3)) height_slices = np.any(nd_array, axis=(1, 3)) width_slices = np.any(nd_array, axis=(1, 2)) d_min, d_max = np.where(depth_slices)[1][[0, -1]] h_min, h_max = np.where(height_slices)[1][[0, -1]] w_min, w_max = np.where(width_slices)[1][[0, -1]] return nd_array.shape[CHANNEL], d_min, d_max, h_min, h_max, w_min, w_max class PadToShape(object): def __init__(self, target_shape, padding_value=0, isometric=False): self._padding_value = padding_value if isometric: largest_dimension = max(target_shape[DEPTH], target_shape[WIDTH]) self._target_shape = ( target_shape[CHANNEL], largest_dimension, target_shape[HEIGHT], largest_dimension) else: self._target_shape = target_shape def __call__(self, nd_array): if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]): raise TypeError( "Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported") elif nd_array.ndim is not len(self._target_shape): raise ValueError( "The input image and target shape's dimension does not match {} vs {}".format(nd_array.ndim, len(self._target_shape))) return self.apply(nd_array, self._target_shape, self._padding_value) def __repr__(self): return self.__class__.__name__ + '()' @staticmethod def apply(nd_array, target_shape, padding_value): deltas = tuple(max(0, target - current) for target, current in zip(target_shape, nd_array.shape)) if nd_array.ndim == 3: nd_array = np.pad(nd_array, ((math.floor(deltas[0] / 2), math.ceil(deltas[0] / 2)), (math.floor(deltas[1] / 2), math.ceil(deltas[1] / 2)), (math.floor(deltas[2] / 2), math.ceil(deltas[2] / 2))), 'constant', constant_values=padding_value) elif nd_array.ndim == 4: nd_array = np.pad(nd_array, ((0, 0), (math.floor(deltas[1] / 2), math.ceil(deltas[1] / 2)), (math.floor(deltas[2] / 2), math.ceil(deltas[2] / 2)), (math.floor(deltas[3] / 2), math.ceil(deltas[3] / 2))), 'constant', constant_values=padding_value) return nd_array @staticmethod def undo(nd_array, original_shape): deltas = tuple(max(0, current - target) for target, current in zip(original_shape, nd_array.shape)) if nd_array.ndim == 3: nd_array = nd_array[ math.floor(deltas[0] / 2):-math.ceil(deltas[0] / 2), math.floor(deltas[1] / 2):-math.ceil(deltas[1] / 2), math.floor(deltas[2] / 2):-math.ceil(deltas[2] / 2)] elif nd_array.ndim == 4: nd_array = nd_array[ :, math.floor(deltas[1] / 2):-math.ceil(deltas[1] / 2), math.floor(deltas[2] / 2):-math.ceil(deltas[2] / 2), math.floor(deltas[3] / 2):-math.ceil(deltas[3] / 2)] return nd_array class RandomFlip(object): def __init__(self, exec_probability): self._exec_probability = exec_probability def __call__(self, nd_array): if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]): raise TypeError( "Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported") for axis in (0, 1, 2): if random.uniform(0, 1) <= self._exec_probability: nd_array = self.apply(nd_array, [axis]) return nd_array def __repr__(self): return self.__class__.__name__ + '()' @staticmethod def apply(nd_array, axes): if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]): raise TypeError( "Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported") for axis in axes: if nd_array.ndim is 3: nd_array = np.flip(nd_array, axis) else: channels = [np.flip(nd_array[c], axis) for c in range(nd_array.shape[0])] nd_array = np.stack(channels, axis=0) return nd_array @staticmethod def undo(nd_array, axes): if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]): raise TypeError( "Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported") for axis in axes[::-1]: if nd_array.ndim is 3: nd_array = np.flip(nd_array, axis) else: channels = [np.flip(nd_array[c], axis) for c in range(nd_array.shape[0])] nd_array = np.stack(channels, axis=0) return nd_array class RandomRotate90(object): def __init__(self, exec_probability): self._exec_probability = exec_probability def __call__(self, nd_array): if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]): raise TypeError( "Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported") if random.uniform(0, 1) <= self._exec_probability: num_rotation = random.randint(0, 4) nd_array = self.apply(nd_array, num_rotation) return nd_array def __repr__(self): return self.__class__.__name__ + '()' @staticmethod def apply(nd_array, num_rotation): if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]): raise TypeError( "Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported") if nd_array.ndim == 3: nd_array = np.rot90(nd_array, num_rotation, (1, 2)) else: channels = [np.rot90(nd_array[c], num_rotation, (1, 2)) for c in range(nd_array.shape[0])] nd_array = np.stack(channels, axis=0) return nd_array @staticmethod def undo(nd_array, num_rotation): if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]): raise TypeError( "Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported") if nd_array.ndim == 3: nd_array = np.rot90(nd_array, num_rotation, (2, 1)) else: channels = [np.rot90(nd_array[c], num_rotation, (2, 1)) for c in range(nd_array.shape[0])] nd_array = np.stack(channels, axis=0) return nd_array class Normalize(object): def __init__(self, mean, std): self._mean = mean self._std = std def __call__(self, nd_array): if not isinstance(nd_array, np.ndarray): raise TypeError("Only ndarrays are supported") return self.apply(nd_array, self._mean, self._std) def __repr__(self): return self.__class__.__name__ + '()' @staticmethod def apply(nd_array, mean, std): if not isinstance(nd_array, np.ndarray): raise TypeError("Only ndarrays are supported") return (nd_array - mean) / std @staticmethod def undo(nd_array, mean, std): if not isinstance(nd_array, np.ndarray): raise TypeError("Only ndarrays are supported") return (nd_array * std) + mean class Flip(object): def __init__(self, axis): self._axis = axis def __call__(self, nd_array): if not isinstance(nd_array, np.ndarray): raise TypeError("Only ndarrays are supported") return self.apply(nd_array, self._axis) def __repr__(self): return self.__class__.__name__ + '()' @staticmethod def apply(nd_array, axis): if not isinstance(nd_array, np.ndarray): raise TypeError("Only ndarrays are supported") return np.flip(nd_array, axis).copy() @staticmethod def undo(nd_array, axis): if not isinstance(nd_array, np.ndarray): raise TypeError("Only ndarrays are supported") return np.flip(nd_array, axis).copy() class TensorFlip(object): def __init__(self, axis): self._axis = axis def __call__(self, tensor): return self.apply(tensor, self._axis) def __repr__(self): return self.__class__.__name__ + '()' @staticmethod def apply(tensor, axis): return tensor.flip(axis) @staticmethod def undo(tensor, axis): return tensor.flip(axis)
37.265651
120
0.593444
2,861
22,024
4.318071
0.116393
0.099725
0.025498
0.027198
0.598996
0.550024
0.509228
0.482678
0.44779
0.421645
0
0.01739
0.305485
22,024
590
121
37.328814
0.790272
0.138712
0
0.502604
0
0
0.064083
0
0
0
0
0
0
1
0.140625
false
0
0.03125
0.044271
0.320313
0.002604
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
56116252a18b3ab533ede58656a33d7beedc09ce
1,198
py
Python
tests/test_image_xpress.py
jni/microscopium
b9cddd8ef5f3003a396ace602228651b3020c4a3
[ "BSD-3-Clause" ]
53
2016-08-30T09:45:12.000Z
2022-02-03T06:22:50.000Z
tests/test_image_xpress.py
jni/microscopium
b9cddd8ef5f3003a396ace602228651b3020c4a3
[ "BSD-3-Clause" ]
151
2015-01-15T06:16:27.000Z
2021-03-22T01:01:26.000Z
tests/test_image_xpress.py
jni/microscopium
b9cddd8ef5f3003a396ace602228651b3020c4a3
[ "BSD-3-Clause" ]
19
2015-01-15T06:13:26.000Z
2021-09-13T13:06:47.000Z
from microscopium.screens import image_xpress import collections as coll def test_ix_semantic_filename(): test_fn = "./Week1_22123/G10_s2_w11C3B9BCC-E48F-4C2F-9D31-8F46D8B5B972.tif" expected = coll.OrderedDict([('directory', './Week1_22123'), ('prefix', ''), ('plate', 22123), ('well', 'G10'), ('field', 1), ('channel', 0), ('suffix', 'tif')]) assert image_xpress.ix_semantic_filename(test_fn) == expected def test_ix_semantic_filename2(): test_fn = "./BBBC022_v1_images_20585w1/IXMtest_L09_s3_w1538679C9-F03A-" \ "4656-9A57-0D4A440C1C62.tif" expected = coll.OrderedDict([('directory', './BBBC022_v1_images_20585w1'), ('prefix', 'IXMtest'), ('plate', 20585), ('well', 'L09'), ('field', 2), ('channel', 0), ('suffix', 'tif')]) assert image_xpress.ix_semantic_filename(test_fn) == expected
39.933333
79
0.477462
102
1,198
5.313725
0.509804
0.073801
0.099631
0.121771
0.416974
0.243542
0.243542
0.243542
0.243542
0.243542
0
0.138736
0.392321
1,198
29
80
41.310345
0.605769
0
0
0.26087
0
0
0.243108
0.146199
0
0
0
0
0.086957
1
0.086957
false
0
0.086957
0
0.173913
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
561183bf158e6122876d8d2dc908ce0fd0c44f33
541
py
Python
tests/test_gui/test_components/test_copy_button.py
ryan-lingle/node-launcher
4f1f7087a28d76f5b8153adac548d09b0558f6d5
[ "MIT" ]
249
2019-02-01T17:17:09.000Z
2022-03-18T21:26:41.000Z
tests/test_gui/test_components/test_copy_button.py
justinmichael3x/node-launcher
5a2a646b01803fe284fa18bee99c13c7cbf04498
[ "MIT" ]
299
2019-02-02T02:01:02.000Z
2022-02-21T05:13:20.000Z
tests/test_gui/test_components/test_copy_button.py
justinmichael3x/node-launcher
5a2a646b01803fe284fa18bee99c13c7cbf04498
[ "MIT" ]
49
2019-02-11T14:46:40.000Z
2022-02-20T04:05:14.000Z
import pytest from PySide2.QtCore import Qt from PySide2.QtGui import QClipboard from PySide2.QtTest import QTest from node_launcher.gui.components.copy_button import CopyButton @pytest.fixture def copy_button() -> CopyButton: copy_button = CopyButton(button_text='Test Me', copy_text='copy_this') return copy_button class TestCopyButton(object): def test_copy_button(self, copy_button: CopyButton, qtbot: QTest): qtbot.mouseClick(copy_button.button, Qt.LeftButton) assert QClipboard().text() == 'copy_this'
28.473684
74
0.768946
71
541
5.676056
0.450704
0.173697
0.148883
0
0
0
0
0
0
0
0
0.006466
0.142329
541
18
75
30.055556
0.862069
0
0
0
0
0
0.046211
0
0
0
0
0
0.076923
1
0.153846
false
0
0.384615
0
0.692308
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
5611a9096513cba2a9a68bf3992b55379db65e5b
3,581
py
Python
examples/vertex_pipeline/pipelines/batch_prediction_pipeline_runner.py
marcosgm/professional-services
f332b425c2f3b6538ebf65afda7e67de3bed1b3d
[ "Apache-2.0" ]
2,116
2017-05-18T19:33:05.000Z
2022-03-31T13:34:48.000Z
examples/vertex_pipeline/pipelines/batch_prediction_pipeline_runner.py
hyuatpc/professional-services
e5c811a8752e91fdf9f959a0414931010b0ea1ba
[ "Apache-2.0" ]
548
2017-05-20T05:05:35.000Z
2022-03-28T16:38:12.000Z
examples/vertex_pipeline/pipelines/batch_prediction_pipeline_runner.py
hyuatpc/professional-services
e5c811a8752e91fdf9f959a0414931010b0ea1ba
[ "Apache-2.0" ]
1,095
2017-05-19T00:02:36.000Z
2022-03-31T05:21:39.000Z
# Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Runner for batch prediction pipeline.""" import argparse from absl import logging from kfp.v2.google import client def run_training_pipeline(): """Main function for batch prediction pipeline runner.""" parser = argparse.ArgumentParser() parser.add_argument('--project_id', type=str) parser.add_argument('--pipeline_region', type=str) parser.add_argument('--pipeline_root', type=str) parser.add_argument('--pipeline_job_spec_path', type=str) # Staging path for running custom job parser.add_argument('--data_pipeline_root', type=str) # Parameters required for data ingestion and processing parser.add_argument('--input_dataset_uri', type=str) parser.add_argument('--gcs_data_output_folder', type=str) parser.add_argument('--data_region', type=str) parser.add_argument('--gcs_result_folder', type=str) # Parameters required for training job parser.add_argument('--model_resource_name', type=str, default='') parser.add_argument('--endpoint_resource_name', type=str, default='') # Parameters required for batch prediction job parser.add_argument('--machine_type', type=str, default='n1-standard-4') parser.add_argument('--accelerator_count', type=int, default=0) parser.add_argument('--accelerator_type', type=str, default='ACCELERATOR_TYPE_UNSPECIFIED') parser.add_argument('--starting_replica_count', type=int, default=1) parser.add_argument('--max_replica_count', type=int, default=2) # Parameters required for pipeline scheduling parser.add_argument('--pipeline_schedule', type=str, default='', help='0 2 * * *') parser.add_argument('--pipeline_schedule_timezone', type=str, default='US/Pacific') parser.add_argument('--enable_pipeline_caching', action='store_true', default=False, help='Specify whether to enable caching.') args, _ = parser.parse_known_args() logging.info(args) api_client = client.AIPlatformClient(args.project_id, args.pipeline_region) params_to_remove = ['pipeline_region', 'pipeline_root', 'pipeline_job_spec_path', 'pipeline_schedule', 'pipeline_schedule_timezone', 'enable_pipeline_caching'] pipeline_params = vars(args).copy() for item in params_to_remove: pipeline_params.pop(item, None) if not args.pipeline_schedule: api_client.create_run_from_job_spec( args.pipeline_job_spec_path, pipeline_root=args.pipeline_root, parameter_values=pipeline_params, enable_caching=args.enable_pipeline_caching ) else: api_client.create_schedule_from_job_spec( args.pipeline_job_spec_path, schedule=args.pipeline_schedule, time_zone=args.pipeline_schedule_timezone, pipeline_root=args.pipeline_root, parameter_values=pipeline_params, enable_caching=args.enable_pipeline_caching ) if __name__ == '__main__': run_training_pipeline()
39.788889
78
0.726054
460
3,581
5.384783
0.347826
0.069035
0.1304
0.038757
0.281389
0.16956
0.104158
0.104158
0.076706
0.076706
0
0.005382
0.169785
3,581
89
79
40.235955
0.827783
0.244624
0
0.140351
0
0
0.224879
0.100486
0
0
0
0
0
1
0.017544
false
0
0.052632
0
0.070175
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5612459fcdf8951d0bf375d61705d77efffeb9d8
1,642
py
Python
examples/slash_commands.py
z03h/discord.py
7e5831ba9cc3f881e11b3536159a3851fba6ab52
[ "MIT" ]
7
2021-09-12T02:31:57.000Z
2022-02-20T21:15:35.000Z
examples/slash_commands.py
jay3332/discord.py
953f067e3b5ee33f5be62ae614ac724afc289879
[ "MIT" ]
13
2021-11-04T00:32:25.000Z
2022-03-02T03:03:54.000Z
examples/slash_commands.py
jay3332/discord.py
953f067e3b5ee33f5be62ae614ac724afc289879
[ "MIT" ]
null
null
null
import discord from discord.application_commands import ApplicationCommand, ApplicationCommandTree, option tree = ApplicationCommandTree(guild_id=1234) # Replace with your guild ID, or ``None`` to commands global class Ping(ApplicationCommand, name='ping', tree=tree): """Pong!""" async def callback(self, interaction: discord.Interaction): await interaction.response.send_message('Pong!') class Math(ApplicationCommand, name='math', tree=tree): """Basic math operations.""" class Add(ApplicationCommand, name='add', parent=Math): """Sum of x + y.""" x: int = option(description='Value of "x"', required=True) y: int = option(description='Value of "y"', required=True) async def callback(self, interaction: discord.Interaction): answer = self.x + self.y await interaction.response.send_message(f'The value of {self.x} + {self.y} is **{answer}**.', ephemeral=True) class Subtract(ApplicationCommand, name='subtract', parent=Math): """Difference of x - y.""" x: int = option(description='Value of "x"', required=True) y: int = option(description='Value of "y"', required=True) async def callback(self, interaction: discord.Interaction): answer = self.x - self.y await interaction.response.send_message(f'The value of {self.x} - {self.y} is **{answer}**.', ephemeral=True) class Client(discord.Client): async def on_ready(self): print(f'Logged in as {self.user} (ID: {self.user.id})') print('------') client = Client(update_application_commands_at_startup=True) client.add_application_command_tree(tree) client.run('token')
34.93617
117
0.690621
210
1,642
5.338095
0.319048
0.037467
0.071365
0.089206
0.490633
0.459411
0.459411
0.4157
0.4157
0.4157
0
0.002905
0.161389
1,642
46
118
35.695652
0.811184
0.074909
0
0.269231
0
0
0.150868
0
0
0
0
0
0
1
0
false
0
0.076923
0
0.423077
0.076923
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5612e312b8ffbaa468da026af436acb1c1385add
10,887
py
Python
Examples/tk_simple_dialog.py
Aarif1430/Python-Awesome-notes-and-exercises-list
c8ad7f90ebd973025f37d4e79c2f1229a8a2915c
[ "MIT" ]
2
2021-01-13T21:20:57.000Z
2021-08-18T17:53:53.000Z
Examples/tk_simple_dialog.py
Aarif1430/Python-Awesome-notes-and-exercises-list
c8ad7f90ebd973025f37d4e79c2f1229a8a2915c
[ "MIT" ]
null
null
null
Examples/tk_simple_dialog.py
Aarif1430/Python-Awesome-notes-and-exercises-list
c8ad7f90ebd973025f37d4e79c2f1229a8a2915c
[ "MIT" ]
1
2020-11-05T09:56:55.000Z
2020-11-05T09:56:55.000Z
#!/usr/bin/env python import Tkinter import tkMessageBox import rwkpickle, rwkos, os, glob from Tkinter import StringVar, IntVar, DoubleVar pklpath = rwkos.FindFullPath('pygimp_lecturerc.pkl') class myWindow: def close(self, *args, **kwargs): #print('got close event') self.mw.destroy() def __init__(self, title="Enter Quiz #"): self.mw = Tkinter.Tk() self.mw.option_add("*font", ("Arial", 15, "normal")) self.mw.geometry("+250+200") self.var = Tkinter.StringVar() entry = Tkinter.Entry(self.mw, textvariable=self.var) entry.focus_set() entry.pack() #entry.bind("<KP_Enter>", self.close) entry.bind("<Return>", self.close) self.mw.title(title) self.btn2 = Tkinter.Button(self.mw, text = "Exit", command = self.mw.destroy) self.btn2.pack() self.mw.mainloop() class width_and_dpi_dialog:#(tkSimpleDialog.Dialog): def close(self, *args, **kwargs): #print('got close event') self.width_float = float(self.width_string.get()) self.result = self.width_float, self.dpi_int.get() print('result = %f, %i' % self.result) self.mw.destroy() def __init__(self, title="Width and DPI Dialog"): self.result = None self.mw = Tkinter.Tk() self.mw.option_add("*font", ("Arial", 15, "normal")) self.mw.geometry("+250+200") Tkinter.Label(self.mw, text="Width (in.):").grid(row=0) Tkinter.Label(self.mw, text="dpi:").grid(row=1) self.width_string = Tkinter.StringVar()#Tkinter.DoubleVar() self.dpi_int = Tkinter.IntVar() width_entry = Tkinter.Entry(self.mw, textvariable=self.width_string) width_entry.grid(row=0, column=1) dpi_entry = Tkinter.Entry(self.mw, textvariable=self.dpi_int) dpi_entry.grid(row=1, column=1) self.dpi_int.set(300) self.width_string.set('') #self.width_float.set(3.0) #entry.pack() #entry.bind("<KP_Enter>", self.close) width_entry.bind("<Return>", self.close) self.mw.title(title) self.exit_btn = Tkinter.Button(self.mw, text = "Exit", command = self.mw.destroy) self.exit_btn.grid(row=2, column=0) self.go_btn = Tkinter.Button(self.mw, text = "Go", command = self.close) self.go_btn.grid(row=2, column=1) width_entry.focus_set() self.mw.mainloop() ## def body(self, master): ## Label(master, text="First:").grid(row=0) ## Label(master, text="Second:").grid(row=1) ## self.e1 = Entry(master) ## self.e2 = Entry(master) ## self.e1.grid(row=0, column=1) ## self.e2.grid(row=1, column=1) ## return self.e1 # initial focus ## def apply(self): ## first = string.atoi(self.e1.get()) ## second = string.atoi(self.e2.get()) ## self.result = first, second ## print first, second # or something ## def btnClick(self): ## self.answer = tkMessageBox.askyesno(title = "Your Choice", message = 'Please click either "Yes" or "No".') ## if self.answer: ## tkMessageBox.showinfo(title = "Yes", message = "Your choice was: Yes.") ## else: ## tkMessageBox.showinfo(title = "No", message = "Your choice was: No.") class pickle_entry(object): def __init__(self, parent, mw, label, key, row, \ varclass=None): if varclass is None: varclass = StringVar self.var = varclass() self.parent = parent self.mw = mw self.label = label self.key = key self.row = row curtext = label + ":" Tkinter.Label(mw, text=curtext).grid(row=row, column=0, sticky='e') self.entry = Tkinter.Entry(mw, textvariable=self.var, \ width=25) self.entry.grid(row=row, column=1) def get(self): return self.key, self.var.get() def load_pickle(self): value = self.parent.pickle[self.key] self.var.set(value) class lecture_pickle_dialog:#(tkSimpleDialog.Dialog): def close(self, *args, **kwargs): print('got close event') #self.width_float = float(self.width_string.get()) #self.result = self.width_float, self.dpi_int.get() #print('result = %f, %i' % self.result) self.set_pickle() self.save_pickle() self.mw.destroy() def load_pickle(self): for entry in self.entries: entry.load_pickle() def set_pickle(self): for entry in self.entries: key, val = entry.get() self.pickle[key] = val def save_pickle(self): rwkpickle.SavePickle(self.pickle, pklpath) def __init__(self, title="Lecture Pickle Dialog"): self.pickle = rwkpickle.LoadPickle(pklpath) self.result = None self.mw = Tkinter.Tk() self.mw.option_add("*font", ("Arial", 15, "normal")) self.mw.geometry("+400+300") self.labels = ['Lecture Path', 'Course Num.', \ 'Search Pattern', 'Date Stamp', \ 'Pat', 'Current Slide', \ 'Outline Slide'] self.keys = ['lecture_path', 'course_num', \ 'search_pat', 'date_stamp' , \ 'pat', 'current_slide', 'outline_slide'] self.data = [('Lecture Path', 'lecture_path', StringVar), \ ('Course Num.', 'course_num', StringVar), \ ('Search Pattern', 'search_pat', StringVar), \ ('Date Stamp', 'date_stamp', StringVar), \ ('Pat', 'pat', StringVar), \ ('Current Slide', 'current_slide', IntVar), \ ('Outline Slide', 'outline_slide', IntVar), \ ] self.entries = [] for i, tup in enumerate(self.data): label = tup[0] key = tup[1] varclass = tup[2] pickle = pickle_entry(self, self.mw, \ label=label, \ key=key, \ row=i, \ varclass=varclass) self.entries.append(pickle) N = len(self.data) self.mw.title('Pickle Editor') self.exit_btn = Tkinter.Button(self.mw, text = "Exit", command = self.mw.destroy) self.exit_btn.grid(row=N, column=0) self.go_btn = Tkinter.Button(self.mw, text = "Go", command = self.close) self.go_btn.grid(row=N, column=1) self.load_pickle() self.mw.mainloop() class reset_lecture_dialog:#(tkSimpleDialog.Dialog): def close(self, *args, **kwargs): print('got close event') self.pickle['current_slide'] = 0 #self.width_float = float(self.width_string.get()) #self.result = self.width_float, self.dpi_int.get() #print('result = %f, %i' % self.result) if self.var1.get(): print('reseting outline slide') self.reset_outline() if self.var2.get(): print('deleting existing slides') self.delete_existing_slides() rwkpickle.SavePickle(self.pickle, pklpath) self.mw.destroy() def reset_outline(self): self.pickle['outline_slide'] = 0 clear_list = ['outline_pat','outline_dir'] for key in clear_list: if self.pickle.has_key(key): self.pickle.pop(key) def _build_pat(self, end='*'): lp = self.pickle['lecture_path'] pat = self.pickle['search_pat'] + end return os.path.join(lp, pat) def build_xcf_pat(self): self.xcf_pat = self._build_pat(end='*.xcf') def build_delete_pat(self): self.delete_pat = self._build_pat(end='*') def delete_existing_slides(self): self.build_delete_pat() rwkos.delete_from_glob_pat(self.delete_pat) def __init__(self, title="Reset Lecture Dialog"): self.result = None self.mw = Tkinter.Tk() self.mw.option_add("*font", ("Arial", 15, "normal")) self.mw.geometry("+300+300") self.pickle = rwkpickle.LoadPickle(pklpath) #Need to display the number of existing slides and the #current outline slide number label1 = Tkinter.Label(self.mw, \ text='Number of existing slides') label1.grid(row=0, column=0, sticky='w') self.num_slides = IntVar() self.entry1 = Tkinter.Entry(self.mw, \ textvariable=self.num_slides, \ width=5) self.entry1.grid(row=0, column=1) self.build_xcf_pat() self.existing_slides = glob.glob(self.xcf_pat) self.num_slides.set(len(self.existing_slides)) label2 = Tkinter.Label(self.mw, \ text='Outline Slide') label2.grid(row=1, column=0, sticky='w') self.outline_slide = IntVar() self.entry2 = Tkinter.Entry(self.mw, \ textvariable=self.outline_slide, \ width=5) self.entry2.grid(row=1, column=1) self.outline_slide.set(self.pickle['outline_slide']) self.var1 = IntVar() check1 = Tkinter.Checkbutton(self.mw, \ text="Reset outline slide", \ variable=self.var1) check1.var = self.var1 check1.grid(row=2, sticky='w') self.var2 = IntVar() check2 = Tkinter.Checkbutton(self.mw, \ text="Delete existing slides", \ variable=self.var2) check2.var = self.var2 check2.grid(row=3, sticky='w') self.go_btn = Tkinter.Button(self.mw, text = "Go", command = self.close) self.go_btn.bind("<Return>", self.close) self.go_btn.grid(row=4) self.go_btn.focus_set() self.mw.title(title) self.mw.mainloop() if __name__ == "__main__": #app = myWindow() #app = width_and_dpi_dialog() #app = lecture_pickle_dialog() app = reset_lecture_dialog()
32.792169
117
0.524479
1,229
10,887
4.519121
0.154597
0.049694
0.021606
0.020526
0.4287
0.340835
0.314908
0.269175
0.242528
0.242528
0
0.015105
0.343253
10,887
331
118
32.891239
0.761678
0.135758
0
0.267606
0
0
0.088585
0
0
0
0
0
0
1
0.089202
false
0
0.018779
0.004695
0.140845
0.023474
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5613450d1a3d2afb56deff14d8b5138f01e852ba
3,159
py
Python
gqcnn/grasping/constraint_fn.py
richardliaw/gqcnn
a0930e9d2fef3c930c41dd91cde902d261348fbe
[ "CNRI-Python" ]
1
2019-05-29T00:16:56.000Z
2019-05-29T00:16:56.000Z
gqcnn/grasping/constraint_fn.py
richardliaw/gqcnn
a0930e9d2fef3c930c41dd91cde902d261348fbe
[ "CNRI-Python" ]
null
null
null
gqcnn/grasping/constraint_fn.py
richardliaw/gqcnn
a0930e9d2fef3c930c41dd91cde902d261348fbe
[ "CNRI-Python" ]
4
2019-05-22T17:33:30.000Z
2020-02-18T03:44:01.000Z
""" Constraint functions for grasp sampling Author: Jeff Mahler """ from abc import ABCMeta, abstractmethod import numpy as np class GraspConstraintFn(object): """ Abstract constraint functions for grasp sampling. """ __metaclass__ = ABCMeta def __init__(self, config): # set params self._config = config def __call__(self, grasp): """ Evaluates whether or not a grasp is valid. Parameters ---------- grasp : :obj:`Grasp2D` grasp to evaluate Returns ------- bool True if the grasp satisfies constraints, False otherwise """ return self.satisfies_constraints(grasp) @abstractmethod def satisfies_constraints(self, grasp): """ Evaluates whether or not a grasp is valid. Parameters ---------- grasp : :obj:`Grasp2D` grasp to evaluate Returns ------- bool True if the grasp satisfies constraints, False otherwise """ pass class DiscreteApproachGraspConstraintFn(GraspConstraintFn): """ Constrains the grasp approach direction into a discrete set of angles from the world z direction. """ def __init__(self, config): # init superclass GraspConstraintFn.__init__(self, config) self._max_approach_angle = self._config['max_approach_angle'] self._angular_tolerance = self._config['angular_tolerance'] self._angular_step = self._config['angular_step'] self._T_camera_world = self._config['camera_pose'] def satisfies_constraints(self, grasp): """ Evaluates whether or not a grasp is valid by evaluating the angle between the approach axis and the world z direction. Parameters ---------- grasp : :obj:`Grasp2D` grasp to evaluate Returns ------- bool True if the grasp satisfies constraints, False otherwise """ # find grasp angle in world coordinates axis_world = self._T_camera_world.rotation.dot(grasp.approach_axis) angle = np.arccos(-axis_world[2]) # check closest available angle available_angles = np.array([0.0]) if self._angular_step > 0: available_angles = np.arange(start=0.0, stop=self._max_approach_angle, step=self._angular_step) diff = np.abs(available_angles - angle) angle_index = np.argmin(diff) closest_angle = available_angles[angle_index] if diff[angle_index] < self._angular_tolerance: return True return False class GraspConstraintFnFactory(object): @staticmethod def constraint_fn(fn_type, config): if fn_type == 'none': return None elif fn_type == 'discrete_approach_angle': return DiscreteApproachGraspConstraintFn(config) else: raise ValueError('Grasp constraint function type %s not supported!' %(fn_type))
29.523364
91
0.598291
325
3,159
5.590769
0.326154
0.044029
0.023115
0.041277
0.298294
0.259769
0.259769
0.259769
0.259769
0.259769
0
0.004178
0.318139
3,159
106
92
29.801887
0.839369
0.30421
0
0.097561
0
0
0.070594
0.012208
0
0
0
0
0
1
0.146341
false
0.02439
0.04878
0
0.414634
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5614247b9f08fee16422d09e7313f385f4710f7f
1,198
py
Python
backend/apis/family_group/models/memberships.py
sango09/senasoft
d7e5b0fae620764b3af73955f7771dcee5746dfc
[ "MIT" ]
1
2020-10-17T03:39:56.000Z
2020-10-17T03:39:56.000Z
backend/apis/family_group/models/memberships.py
sango09/senasoft
d7e5b0fae620764b3af73955f7771dcee5746dfc
[ "MIT" ]
null
null
null
backend/apis/family_group/models/memberships.py
sango09/senasoft
d7e5b0fae620764b3af73955f7771dcee5746dfc
[ "MIT" ]
null
null
null
"""Modelo de pacientes""" # Django from django.db import models # Utilidades from apis.utils.models import ModelUtil class Memberships(ModelUtil): """Modelo de pacientes Un paciente puede tener un grupo familiar asociado, si el paciente crea el grupo sera el titular de la familia, los miembros del grupo familiar no podran agregar mas familiares. Solo el titular podra actualizar o eliminar miembros de su grupo familiar """ user = models.ForeignKey('users.User', on_delete=models.CASCADE) family_group = models.ForeignKey('family_group.FamilyGroup', on_delete=models.CASCADE) pacient = models.ForeignKey('pacient.Pacient', on_delete=models.CASCADE) is_admin = models.BooleanField('Titular', default=False) affiliated = models.PositiveIntegerField(default=0) remaining_affiliates = models.PositiveIntegerField(default=0) affiliated_by = models.ForeignKey( 'users.User', null=True, on_delete=models.CASCADE, related_name='affiliated_by' ) def __str__(self): """Regresa el username y el grupo familiar al que pertenece""" return f'{self.user.username} hace parte del grupo familiar'
33.277778
90
0.724541
150
1,198
5.686667
0.533333
0.076202
0.065651
0.098476
0
0
0
0
0
0
0
0.002062
0.190317
1,198
35
91
34.228571
0.87732
0.308013
0
0
0
0
0.163085
0.030341
0
0
0
0
0
1
0.058824
false
0
0.117647
0
0.705882
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
56149181b9ac3f4c3f13a262ec51ccae2f8c900c
193
py
Python
django_amenities/apps.py
okfde/django-amenities
a5522e33e27deb93a2ed49f89a6d9222fc146523
[ "MIT" ]
1
2020-09-17T12:27:04.000Z
2020-09-17T12:27:04.000Z
django_amenities/apps.py
okfde/django-amenities
a5522e33e27deb93a2ed49f89a6d9222fc146523
[ "MIT" ]
null
null
null
django_amenities/apps.py
okfde/django-amenities
a5522e33e27deb93a2ed49f89a6d9222fc146523
[ "MIT" ]
null
null
null
from django.apps import AppConfig from django.utils.translation import gettext_lazy as _ class AmenitiesConfig(AppConfig): name = 'django_amenities' verbose_name = _("Amenities App")
24.125
54
0.777202
23
193
6.304348
0.695652
0.137931
0
0
0
0
0
0
0
0
0
0
0.150259
193
7
55
27.571429
0.884146
0
0
0
0
0
0.150259
0
0
0
0
0
0
1
0
false
0
0.4
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
5614d2570169d56a1b186afec8d94e21051b1226
8,051
py
Python
aiida/cmdline/commands/shell.py
astamminger/aiida_core
b01ad8236f21804f273c9d2a0365ecee62255cbb
[ "BSD-2-Clause" ]
null
null
null
aiida/cmdline/commands/shell.py
astamminger/aiida_core
b01ad8236f21804f273c9d2a0365ecee62255cbb
[ "BSD-2-Clause" ]
null
null
null
aiida/cmdline/commands/shell.py
astamminger/aiida_core
b01ad8236f21804f273c9d2a0365ecee62255cbb
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- ########################################################################### # Copyright (c), The AiiDA team. All rights reserved. # # This file is part of the AiiDA code. # # # # The code is hosted on GitHub at https://github.com/aiidateam/aiida_core # # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### from __future__ import absolute_import import os from aiida.cmdline.baseclass import VerdiCommand default_modules_list = [ ("aiida.orm", "Node", "Node"), ("aiida.orm.utils", "load_node", "load_node"), ("aiida.orm", "Calculation", "Calculation"), ("aiida.orm", "JobCalculation", "JobCalculation"), ("aiida.orm.code", "Code", "Code"), ("aiida.orm", "Data", "Data"), ("aiida.orm", "CalculationFactory", "CalculationFactory"), ("aiida.orm", "DataFactory", "DataFactory"), ("aiida.orm", "WorkflowFactory", "WorkflowFactory"), ("aiida.orm.computer", "Computer", "Computer"), ("aiida.orm.group", "Group", "Group"), ("aiida.orm.workflow", "Workflow", "Workflow"), ("aiida.orm", "load_workflow", "load_workflow"), ("aiida.orm.querybuilder", "QueryBuilder", "QueryBuilder"), # ("aiida.backends.djsite.db", "models", "models"), # ("aiida.backends.sqlalchemy", "models", "models"), ] class Shell(VerdiCommand): """ Run the interactive shell with the AiiDA environment loaded. This command opens an ipython shell with the AiiDA environment loaded. """ shells = ['ipython', 'bpython'] def get_start_namespace(self): """Load all default and custom modules""" from aiida import load_dbenv, is_dbenv_loaded from aiida.backends import settings if not is_dbenv_loaded(): load_dbenv(profile=settings.AIIDADB_PROFILE) from aiida.common.setup import get_property user_ns = {} # load default modules for app_mod, model_name, alias in default_modules_list: user_ns[alias] = getattr(__import__(app_mod, {}, {}, model_name), model_name) # load custom modules custom_modules_list = [(str(e[0]), str(e[2])) for e in [p.rpartition('.') for p in get_property( 'verdishell.modules', default="").split( ':')] if e[1] == '.'] for app_mod, model_name in custom_modules_list: try: user_ns[model_name] = getattr( __import__(app_mod, {}, {}, model_name), model_name) except AttributeError: # if the module does not exist, we ignore it pass return user_ns def _ipython_pre_011(self): """Start IPython pre-0.11""" from IPython.Shell import IPShell user_ns = self.get_start_namespace() if user_ns: shell = IPShell(argv=[], user_ns=user_ns) else: shell = IPShell(argv=[]) shell.mainloop() def _ipython_pre_100(self): """Start IPython pre-1.0.0""" from IPython.frontend.terminal.ipapp import TerminalIPythonApp app = TerminalIPythonApp.instance() app.initialize(argv=[]) user_ns = self.get_start_namespace() if user_ns: app.shell.user_ns.update(user_ns) app.start() def _ipython(self): """Start IPython >= 1.0""" from IPython import start_ipython user_ns = self.get_start_namespace() if user_ns: start_ipython(argv=[], user_ns=user_ns) else: start_ipython(argv=[]) def ipython(self): """Start any version of IPython""" for ip in ( self._ipython, self._ipython_pre_100, self._ipython_pre_011): try: ip() except ImportError as ie: pass else: return # no IPython, raise ImportError raise ImportError("No IPython") def bpython(self): import bpython user_ns = self.get_start_namespace() if user_ns: bpython.embed(user_ns) else: bpython.embed() def run_shell(self, shell=None): available_shells = [shell] if shell else self.shells for shell in available_shells: try: return getattr(self, shell)() except ImportError: pass raise ImportError def handle_noargs(self, *args): import argparse parser = argparse.ArgumentParser(prog='verdi shell') parser.add_argument('--plain', dest='plain', action='store_true', help='Tells Django to use plain Python, not ' 'IPython or bpython.)') parser.add_argument('--no-startup', action='store_true', dest='no_startup', help='When using plain Python, ignore the ' 'PYTHONSTARTUP environment variable and ' '~/.pythonrc.py script.') parser.add_argument('-i', '--interface', action='store', choices=self.shells, dest='interface', help='Specify an interactive interpreter ' 'interface. Available options: "ipython" ' 'and "bpython"') parsed_args = parser.parse_args(args) use_plain = parsed_args.plain no_startup = parsed_args.no_startup interface = parsed_args.interface try: if use_plain: # Don't bother loading IPython, because the user wants plain Python. raise ImportError self.run_shell(shell=interface) except ImportError: import code # Set up a dictionary to serve as the environment for the shell, so # that tab completion works on objects that are imported at runtime. # See ticket 5082. imported_objects = {} try: # Try activating rlcompleter, because it's handy. import readline except ImportError: pass else: # We don't have to wrap the following import in a 'try', because # we already know 'readline' was imported successfully. import rlcompleter readline.set_completer( rlcompleter.Completer(imported_objects).complete) readline.parse_and_bind("tab:complete") # We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system # conventions and get $PYTHONSTARTUP first then .pythonrc.py. if not no_startup: for pythonrc in ( os.environ.get("PYTHONSTARTUP"), '~/.pythonrc.py'): if not pythonrc: continue pythonrc = os.path.expanduser(pythonrc) if not os.path.isfile(pythonrc): continue try: with open(pythonrc) as handle: exec (compile(handle.read(), pythonrc, 'exec'), imported_objects) except NameError: pass code.interact(local=imported_objects) def run(self, *args): # pass_to_django_manage([execname, 'customshell'] + list(args)) self.handle_noargs(*args) def complete(self, subargs_idx, subargs): # disable further completion print ""
36.931193
85
0.531859
808
8,051
5.144802
0.311881
0.027424
0.020447
0.014433
0.08612
0.07746
0.051479
0.051479
0.033678
0
0
0.005366
0.351882
8,051
217
86
37.101382
0.791299
0.158614
0
0.212329
0
0
0.13831
0.003502
0
0
0
0
0
0
null
null
0.034247
0.184932
null
null
0.006849
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
56179b533974125c261d08cc31294d0f7cdd2f1f
4,052
py
Python
hw4/dynamics.py
tombroz/berkeley-cs294_homework
5419b772c734093c750362d2e09b46ce59d79da6
[ "MIT" ]
null
null
null
hw4/dynamics.py
tombroz/berkeley-cs294_homework
5419b772c734093c750362d2e09b46ce59d79da6
[ "MIT" ]
null
null
null
hw4/dynamics.py
tombroz/berkeley-cs294_homework
5419b772c734093c750362d2e09b46ce59d79da6
[ "MIT" ]
null
null
null
import os import tensorflow as tf import numpy as np C = 1e-13 # Predefined function to build a feedforward neural network def build_mlp(input_placeholder, output_size, scope, n_layers=2, size=500, activation=tf.tanh, output_activation=None ): out = input_placeholder with tf.variable_scope(scope): for _ in range(n_layers): out = tf.layers.dense(out, size, activation=activation) out = tf.layers.dense(out, output_size, activation=output_activation) return out class NNDynamicsModel(): def __init__(self, env, n_layers, size, activation, output_activation, normalization, batch_size, iterations, learning_rate, sess ): """ Note: Be careful about normalization """ self.mean_obs,self.std_obs,self.mean_deltas,self.std_deltas,self.mean_actions,self.std_actions = normalization self.obs_dim = env.observation_space.shape[0] self.actions_dim = env.action_space.shape[0] self.in_states_acts= tf.placeholder(tf.float32,[None,self.obs_dim + self.actions_dim],name='states_actions') self.out_states_deltas = tf.placeholder(tf.float32,[None,self.obs_dim],name='states_deltas') self.epochs = iterations self.gstep = tf.Variable(0, dtype=tf.int32,trainable=False, name='global_step') self.pred_delt = build_mlp(self.in_states_acts,self.obs_dim,"pred_state_delta",n_layers,size,activation,output_activation) self.batch_size = batch_size self.lr = learning_rate self.loss = tf.losses.mean_squared_error(self.out_states_deltas,self.pred_delt) self.opt = tf.train.AdamOptimizer(self.lr).minimize(self.loss) self.sess=sess def fit(self, data): """ a function to take in a dataset of (unnormalized)states, (unnormalized)actions, (unnormalized)next_states and fit the dynamics model going from normalized states, normalized actions to normalized state differences (s_t+1 - s_t) """ obs = np.vstack([path['observations'] for path in data]) actions = np.vstack([path['actions'] for path in data]) next_obs = np.vstack([path['next_observations'] for path in data]) norm_obs = (obs - self.mean_obs) / (self.std_obs + C) norm_actions = (actions - self.mean_actions) / (self.std_actions + C) norm_delta = (next_obs - self.mean_deltas) / (self.std_deltas + C) obs_actions = np.vstack((norm_obs,norm_actions)) n_batches = obs.shape[0]//self.batch_size+1 for ep in range(self.epochs): perm_ids = np.random.choice(obs.shape[0]) tl=0. for st in range(n_batches): start_id = st*self.batch_size perms_ids_batch = perm_ids[start_id:start_id+self.batch_size] in_batch = obs_actions[perms_ids_batch:] out_batch = norm_delta[perms_ids_batch:] l,_ = self.sess.run([self.loss,self.opt],feed_dict={self.in_states_acts:in_batch,self.out_states_deltas:out_batch}) tl+=l print("Epoch {0}/{1}: Train_loss = {2:.6f}".format(ep,self.epochs,tl/n_batches)) def predict(self, states, actions): """ a function to take in a batch of (unnormalized) states and (unnormalized) actions and return the (unnormalized) next states as predicted by using the model """ norm_obs = (states - self.mean_obs) / (self.std_obs + C) norm_actions = (actions - self.mean_actions) / (self.std_actions + C) obs_actions = np.vstack((norm_obs,norm_actions)) pred_states_deltas = self.sess.run([self.pred_delt],feed_dict={self.in_states_acts: obs_actions}) unnormalized = states + self.mean_deltas + pred_states_deltas*self.std_deltas return unnormalized
44.527473
235
0.633761
535
4,052
4.568224
0.254206
0.02946
0.016367
0.026187
0.261047
0.225041
0.141162
0.116612
0.087152
0.057283
0
0.008378
0.263574
4,052
90
236
45.022222
0.810657
0.118707
0
0.084507
0
0
0.035361
0
0
0
0
0
0
1
0.056338
false
0
0.042254
0
0.140845
0.014085
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
56192f9c7a8a841e4444c9e8024df8a56b978917
4,881
py
Python
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Util/Introspector.py
mdavid/nuxleus
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
[ "BSD-3-Clause" ]
1
2017-03-28T06:41:51.000Z
2017-03-28T06:41:51.000Z
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Util/Introspector.py
mdavid/nuxleus
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
[ "BSD-3-Clause" ]
null
null
null
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Kamaelia/Util/Introspector.py
mdavid/nuxleus
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
[ "BSD-3-Clause" ]
1
2016-12-13T21:08:58.000Z
2016-12-13T21:08:58.000Z
#!/usr/bin/env python # Copyright (C) 2006 British Broadcasting Corporation and Kamaelia Contributors(1) # All Rights Reserved. # # You may only modify and redistribute this under the terms of any of the # following licenses(2): Mozilla Public License, V1.1, GNU General # Public License, V2.0, GNU Lesser General Public License, V2.1 # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://kamaelia.sourceforge.net/AUTHORS - please extend this file, # not this notice. # (2) Reproduced in the COPYING file, and at: # http://kamaelia.sourceforge.net/COPYING # Under section 3.5 of the MPL, we are using this text since we deem the MPL # notice inappropriate for this file. As per MPL/GPL/LGPL removal of this # notice is prohibited. # # Please contact us via: kamaelia-list-owner@lists.sourceforge.net # to discuss alternative licensing. # ------------------------------------------------------------------------- """\ =================================================== Detecting the topology of a running Kamaelia system =================================================== The Introspector component introspects the current local topology of a Kamaelia system - that is what components there are and how they are wired up. It continually outputs any changes that occur to the topology. Example Usage ------------- Introspect and display whats going on inside the system:: MyComplexSystem().activate() Pipeline( Introspector(), text_to_token_lists() AxonVisualiser(), ) How does it work? ----------------- Once activated, this component introspects the current local topology of a Kamaelia system. Local? This component examines its scheduler to find components and postmen. It then examines them to determine their inboxes and outboxes and the linkages between them. In effect, it determines the current topology of the system. If this component is not active, then it will see no scheduler and will report nothing. What is output is how the topology changes. Immediately after activation, the topology is assumed to be empty, so the first set of changes describes adding nodes and linkages to the topology to build up the current state of it. Subsequent output just describes the changes - adding or deleting linkages and nodes as appropriate. Nodes in the topology represent components and postboxes. A linkage between a component node and a postbox node expresses the fact that that postbox belongs to that component. A linkage between two postboxes represents a linkage in the Axon system, from one component to another. This topology change data is output as string containing one or more lines. It is output through the "outbox" outbox. Each line may be one of the following: * "DEL ALL" - the first thing sent immediately after activation - to ensure that the receiver of this data understand that we are starting from nothing * "ADD NODE <id> <name> randompos component" * "ADD NODE <id> <name> randompos inbox" * "ADD NODE <id> <name> randompos outbox" - an instruction to add a node to the topology, representing a component, inbox or outbox. <id> is a unique identifier. <name> is a 'friendly' textual label for the node. * "DEL NODE <id>" - an instruction to delete a node, specified by its unique id * "ADD LINK <id1> <id2>" - an instruction to add a link between the two identified nodes. The link is deemed to be directional, from <id1> to <id2> * "DEL LINK <id1> <id2>" - an instruction to delete any link between the two identified nodes. Again, the directionality is from <id1> to <id2>. the <id> and <name> fields may be encapsulated in double quote marks ("). This will definitely be so if they contain space characters. If there are no topology changes then nothing is output. This component ignores anything arriving at its "inbox" inbox. If a shutdownMicroprocess message is received on the "control" inbox, it is sent on to the "signal" outbox and the component will terminate. """ from Axon.Introspector import Introspector as _AxonIntrospector class Introspector(_AxonIntrospector): pass __kamaelia_components__ = ( Introspector, ) if __name__ == '__main__': import Axon i = Introspector() i.activate() from Kamaelia.Util.Console import ConsoleEchoer e = ConsoleEchoer() e.activate() i.link((i,"outbox"), (e, "inbox")) print "You should see the Introspector find that it and a ConsoleEchoer component exist." print "We both have inbox, control, signal and outbox postboxes" print "The Introspector's outbox is linked to the ConsoleEchoer's inbox" print Axon.Scheduler.scheduler.run.runThreads(slowmo=0)
35.889706
93
0.69453
674
4,881
5.002967
0.382789
0.022835
0.017794
0.011566
0.116548
0.090154
0.056346
0.035587
0.035587
0.035587
0
0.006455
0.206515
4,881
135
94
36.155556
0.864188
0.186642
0
0
0
0
0.314735
0
0
0
0
0
0
0
null
null
0.058824
0.176471
null
null
0.235294
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
2
56196bbd9c6e856d4ee7624c88862a4b3d4c3852
1,017
py
Python
app/Product/service.py
psyphore/flask-phone-book
cceec3caabdeb03f260d37f3b55d5aa7a52c30c2
[ "MIT" ]
null
null
null
app/Product/service.py
psyphore/flask-phone-book
cceec3caabdeb03f260d37f3b55d5aa7a52c30c2
[ "MIT" ]
2
2021-03-19T03:39:56.000Z
2021-06-08T20:28:03.000Z
app/Product/service.py
psyphore/flask-phone-book
cceec3caabdeb03f260d37f3b55d5aa7a52c30c2
[ "MIT" ]
null
null
null
import maya from py2neo.ogm import Node from app.graph_context import GraphContext from .cypher_queries import get_product_by_id_query class ProductService(): ''' This Product Service houses all the actions can be performed against the product object ''' def fetch(self, id): '''Fetch a single product with matching id''' try: value = GraphContext().exec_cypher(get_product_by_id_query(id), id=id) print(f'{value}') return value except Exception as ex: print(f'X exception: {ex}') return None def fetch_all(self, limit=100): '''Fetch all Product nodes stored ordered by firstname limited (default=100)''' try: matcher = GraphContext().get_node_matcher response = list(matcher.match('Product').order_by( "_.name").limit(limit)) return response except Exception as ex: print(f'X exception: {ex}') return []
28.25
91
0.610619
123
1,017
4.918699
0.512195
0.029752
0.039669
0.046281
0.204959
0.142149
0.142149
0.142149
0.142149
0.142149
0
0.009804
0.297935
1,017
35
92
29.057143
0.837535
0.19764
0
0.272727
0
0
0.068354
0
0
0
0
0
0
1
0.090909
false
0
0.181818
0
0.5
0.136364
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5619839185e18406d5f02290ccde9ac7b3a514d6
119
py
Python
oimdp/__init__.py
OpenITI/oimdp
9cb0dfb298b90d85f08e248633369c0c3c494c65
[ "MIT" ]
1
2022-02-21T11:55:39.000Z
2022-02-21T11:55:39.000Z
oimdp/__init__.py
OpenITI/oimdp
9cb0dfb298b90d85f08e248633369c0c3c494c65
[ "MIT" ]
7
2022-02-05T09:04:20.000Z
2022-02-06T15:31:28.000Z
oimdp/__init__.py
OpenITI/oimdp
9cb0dfb298b90d85f08e248633369c0c3c494c65
[ "MIT" ]
1
2021-07-09T06:23:26.000Z
2021-07-09T06:23:26.000Z
from .parser import parser def parse(text): return parser(text) __all__ = [ 'parse' ] __version__ = '1.1.0'
9.916667
26
0.638655
16
119
4.25
0.6875
0
0
0
0
0
0
0
0
0
0
0.032609
0.226891
119
11
27
10.818182
0.706522
0
0
0
0
0
0.084034
0
0
0
0
0
0
1
0.142857
false
0
0.142857
0.142857
0.428571
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
2
561cd253e6ea0665afa83c977dd5106cee35aeab
2,041
py
Python
adlib/tests/adversaries/feature_deletion_test.py
xyvivian/adlib
79a93baa8aa542080bbf55734168eb89317df83c
[ "MIT" ]
null
null
null
adlib/tests/adversaries/feature_deletion_test.py
xyvivian/adlib
79a93baa8aa542080bbf55734168eb89317df83c
[ "MIT" ]
null
null
null
adlib/tests/adversaries/feature_deletion_test.py
xyvivian/adlib
79a93baa8aa542080bbf55734168eb89317df83c
[ "MIT" ]
null
null
null
import pytest from adlib.adversaries.feature_deletion import AdversaryFeatureDeletion from sklearn import svm from adlib.learners import SimpleLearner from data_reader.dataset import EmailDataset from data_reader.operations import load_dataset @pytest.fixture def data(): dataset = EmailDataset(path='./data_reader/data/test/100_instance_debug.csv', raw=False) training_, testing_ = dataset.split({'train': 60, 'test': 40}) training_data = load_dataset(training_) testing_data = load_dataset(testing_) return {'training_data': training_data, 'testing_data': testing_data} @pytest.fixture def learner(data): learning_model = svm.SVC(probability=True, kernel='linear') learner = SimpleLearner(learning_model, data['training_data']) learner.train() return learner @pytest.fixture def feature_deletion(learner): return AdversaryFeatureDeletion(learner=learner) def test_change_instance(feature_deletion, data): sample = next((x for x in data['testing_data'] if x.get_label() == 1), None) result = feature_deletion.change_instance(sample) assert sample.label == result.label def test_set_params(feature_deletion): feature_deletion.set_params({'num_deletion': 50, 'all_malicious': True}) dict = feature_deletion.get_available_params() assert dict['num_deletion'] == 50 assert dict['all_malicious'] == True def test_attack(feature_deletion, data): result = feature_deletion.attack(data['testing_data'])[0] sample = data['testing_data'][0] num = sample.get_feature_vector().get_feature_count() for i in range(num): assert result.get_feature_vector().get_feature( i) == sample.get_feature_vector().get_feature(i) def test_attack_different(feature_deletion, data): feature_deletion.set_params({'num_deletion': 100, 'all_malicious': False}) result = feature_deletion.attack(data['testing_data'])[0] sample = data['testing_data'][0] assert result.get_feature_vector().indptr[1] != sample.get_feature_vector().indptr[1]
35.189655
92
0.748653
264
2,041
5.518939
0.272727
0.123542
0.072066
0.043926
0.248456
0.200412
0.089224
0.089224
0.089224
0.089224
0
0.011905
0.135718
2,041
57
93
35.807018
0.814059
0
0
0.162791
0
0
0.11465
0.022538
0
0
0
0
0.116279
1
0.162791
false
0
0.139535
0.023256
0.372093
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5620a3700cb215b8dbfe4bc707bf2609413ae823
1,225
py
Python
cut_plist.py
labbbirder/cut-plist
115394d23fbb58044cb421c9c2c220267e80bad5
[ "MIT" ]
1
2021-05-15T14:44:27.000Z
2021-05-15T14:44:27.000Z
cut_plist.py
labbbirder/cut-plist
115394d23fbb58044cb421c9c2c220267e80bad5
[ "MIT" ]
null
null
null
cut_plist.py
labbbirder/cut-plist
115394d23fbb58044cb421c9c2c220267e80bad5
[ "MIT" ]
1
2021-05-15T15:49:58.000Z
2021-05-15T15:49:58.000Z
import plistlib import os import numpy as np from PIL import Image def read_plist(plist_path): with open(plist_path, "rb") as fp: return plistlib.load(fp) def to_list(x): return x.replace("{", "").replace("}", "").split(",") def cut_plist(output, texture, save_dir): if not os.path.exists(save_dir): os.makedirs(save_dir) for key in output: data = output[key] rect = to_list(data["textureRect"]) rotated = data["textureRotated"] x = int(rect[0]) y = int(rect[1]) width = int(rect[2]) height = int(rect[3]) if rotated: width, height = height, width box = (x, y, x + width, y + height) newSize = np.array([width, height]) offset = np.array(to_list(data["spriteOffset"])).astype("float")*(-1,1) srcSize = np.array(to_list(data["spriteSourceSize"])).astype("float") offset = ((newSize-srcSize)/2+offset).astype("int") sprite = texture.crop(box).crop((*offset,*(offset+srcSize))) if rotated: sprite = sprite.transpose(Image.ROTATE_90) save_path = os.path.splitext(os.path.join(save_dir, key))[0] + ".png" sprite.save(save_path)
28.488372
79
0.590204
164
1,225
4.317073
0.402439
0.033898
0.042373
0.036723
0.048023
0
0
0
0
0
0
0.010893
0.250612
1,225
42
80
29.166667
0.760349
0
0
0.0625
0
0
0.061224
0
0
0
0
0
0
1
0.09375
false
0
0.125
0.03125
0.28125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
56250bc919b313aa73d52cc5859b027f7a03d9eb
18,160
py
Python
aiida/scheduler/plugins/test_pbspro.py
BIGDATA2015-AIIDA-EXTENSION/query_engine
d1cfc7cfe6084854005d2376e3825310c66876b5
[ "MIT", "BSD-3-Clause" ]
null
null
null
aiida/scheduler/plugins/test_pbspro.py
BIGDATA2015-AIIDA-EXTENSION/query_engine
d1cfc7cfe6084854005d2376e3825310c66876b5
[ "MIT", "BSD-3-Clause" ]
null
null
null
aiida/scheduler/plugins/test_pbspro.py
BIGDATA2015-AIIDA-EXTENSION/query_engine
d1cfc7cfe6084854005d2376e3825310c66876b5
[ "MIT", "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from aiida.scheduler.plugins.pbspro import * import unittest #import logging import uuid text_qstat_f_to_test = """Job Id: 68350.mycluster Job_Name = cell-Qnormal Job_Owner = usernum1@mycluster.cluster job_state = Q queue = Q_express server = mycluster Checkpoint = u ctime = Tue Apr 9 15:01:47 2013 Error_Path = mycluster.cluster:/home/usernum1/scratch/cptest/scaletest/PTOs caletest/testjob.err Hold_Types = n Join_Path = n Keep_Files = n Mail_Points = a mtime = Mon Apr 22 13:13:53 2013 Output_Path = mycluster.cluster:/home/usernum1/scratch/cptest/scaletest/PTO scaletest/testjob.out Priority = 0 qtime = Tue Apr 9 18:26:32 2013 Rerunable = False Resource_List.mpiprocs = 15 Resource_List.ncpus = 240 Resource_List.nodect = 15 Resource_List.place = free Resource_List.select = 15:ncpus=16 Resource_List.walltime = 01:00:00 substate = 10 Variable_List = PBS_O_SYSTEM=Linux,PBS_O_SHELL=/bin/bash, PBS_O_HOME=/home/usernum1,PBS_O_LOGNAME=usernum1, PBS_O_WORKDIR=/home/usernum1/scratch/cptest/scaletest/PTOscaletest, PBS_O_LANG=en_US.UTF-8, PBS_O_PATH=/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/loc al/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/b in:/opt/software/python/3.3.0/bin:/opt/software/bin, PBS_O_MAIL=/var/spool/mail/usernum1,PBS_O_QUEUE=P_share_queue, PBS_O_HOST=mycluster.cluster comment = Not Running: Node is in an ineligible state: offline etime = Tue Apr 9 18:26:32 2013 Submit_arguments = job-PTO64cell-Qnormal.6.15.1.64.4 project = _pbs_project_default Job Id: 68351.mycluster Job_Name = cell-Qnormal Job_Owner = usernum1@mycluster.cluster job_state = Q queue = Q_express server = mycluster Checkpoint = u ctime = Tue Apr 9 15:01:47 2013 Error_Path = mycluster.cluster:/home/usernum1/scratch/cptest/scaletest/PTOs caletest/testjob.err Hold_Types = n Join_Path = n Keep_Files = n Mail_Points = a mtime = Mon Apr 22 13:13:53 2013 Output_Path = mycluster.cluster:/home/usernum1/scratch/cptest/scaletest/PTO scaletest/testjob.out Priority = 0 qtime = Tue Apr 9 18:26:32 2013 Rerunable = False Resource_List.mpiprocs = 15 Resource_List.ncpus = 240 Resource_List.nodect = 15 Resource_List.place = free Resource_List.select = 15:ncpus=16 Resource_List.walltime = 01:00:00 substate = 10 Variable_List = PBS_O_SYSTEM=Linux,PBS_O_SHELL=/bin/bash, PBS_O_HOME=/home/usernum1,PBS_O_LOGNAME=usernum1, PBS_O_WORKDIR=/home/usernum1/scratch/cptest/scaletest/PTOscaletest, PBS_O_LANG=en_US.UTF-8, PBS_O_PATH=/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/loc al/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/b in:/opt/software/python/3.3.0/bin:/opt/software/bin, PBS_O_MAIL=/var/spool/mail/usernum1,PBS_O_QUEUE=P_share_queue, PBS_O_HOST=mycluster.cluster comment = Not Running: Node is in an ineligible state: offline etime = Tue Apr 9 18:26:32 2013 Submit_arguments = job-PTO64cell-Qnormal.6.15.1.64.8 project = _pbs_project_default Job Id: 69301.mycluster Job_Name = Cu-dbp Job_Owner = user02@mycluster.cluster resources_used.cpupercent = 6384 resources_used.cput = 4090:56:03 resources_used.mem = 13378420kb resources_used.ncpus = 64 resources_used.vmem = 9866188kb resources_used.walltime = 64:26:16 job_state = R queue = P_lsu server = mycluster Account_Name = lsu Checkpoint = u ctime = Wed Apr 10 17:10:29 2013 depend = afterok:69299.mycluster@mycluster.cluster, beforeok:69302.mycluster@mycluster.cluster Error_Path = mycluster.cluster:/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN7/C u-dbp.e69301 exec_host = b141/0*16+b142/0*16+b143/0*16+b144/0*16 exec_vnode = (b141:ncpus=16)+(b142:ncpus=16)+(b143:ncpus=16)+(b144:ncpus=16 ) Hold_Types = n Join_Path = oe Keep_Files = n Mail_Points = a mtime = Sat Apr 20 01:37:01 2013 Output_Path = mycluster.cluster:/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN7/ Cu-dbp.o69301 Priority = 0 qtime = Wed Apr 10 17:10:29 2013 Rerunable = False Resource_List.mpiprocs = 4 Resource_List.ncpus = 64 Resource_List.nodect = 4 Resource_List.place = excl Resource_List.select = 4:ncpus=16 Resource_List.walltime = 72:00:00 stime = Sat Apr 20 01:36:59 2013 session_id = 118473 Shell_Path_List = /bin/tcsh jobdir = /home/user02 substate = 42 Variable_List = SSH_ASKPASS=/usr/libexec/openssh/gnome-ssh-askpass, PERL_BADLANG=0,KDE_IS_PRELINKED=1,PBS_O_HOME=/home/user02, module=() { eval `/usr/bin/modulecmd bash $*`,}, LESSOPEN=|/usr/bin/lesspipe.sh %s,PBS_O_LOGNAME=user02, SSH_CLIENT=128.178.54.94 46714 22,CVS_RSH=ssh,PBS_O_LANG=C,USER=user02, HOME=/home/user02,LIBGL_ALWAYS_INDIRECT=yes, PATH=/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/local/bin :/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/bin:/op t/software/python/3.3.0/bin:/opt/software/bin, LD_LIBRARY_PATH=/opt/software/python/3.3.0/lib, SSH_CONNECTION=128.178.54.94 46714 128.178.209.70 22,LANG=C, QTLIB=/usr/lib64/qt-3.3/lib,TERM=xterm,SHELL=/bin/bash, QTINC=/usr/lib64/qt-3.3/include,G_BROKEN_FILENAMES=1,HISTSIZE=1000, PBS_O_WORKDIR=/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN7, PBS_O_PATH=/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/loc al/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/b in:/opt/software/python/3.3.0/bin:/opt/software/bin, MANPATH=/opt/xcat/share/man:,XCATROOT=/opt/xcat, MODULESHOME=/usr/share/Modules,PBS_O_SYSTEM=Linux,MSM_PRODUCT=MSM, HOST=mycluster,MAIL=/var/spool/mail/user02, PBS_O_MAIL=/var/spool/mail/user02,_=/opt/pbs/default/bin/qsub, MODULEPATH=/etc/modulefiles:/opt/software/modulefiles:/opt/software/cs e-software/modulefiles,KDEDIRS=/usr,PBS_O_SHELL=/bin/bash, SSH_TTY=/dev/pts/55,OLDPWD=/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN6, LOADEDMODULES=,HISTCONTROL=ignoredups,SHLVL=1, PWD=/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN7,HOSTNAME=mycluster, MSM_HOME=/usr/local/MegaRAID Storage Manager,LOGNAME=user02, PBS_O_HOST=mycluster.cluster comment = Job run at Sat Apr 20 at 01:36 on (b141:ncpus=16)+(b142:ncpus=16) +(b143:ncpus=16)+(b144:ncpus=16) etime = Sat Apr 20 01:36:59 2013 Submit_arguments = job.sh project = _pbs_project_default Job Id: 69302.mycluster Job_Name = Cu-dbp Job_Owner = user02@mycluster.cluster job_state = H queue = P_lsu server = mycluster Account_Name = lsu Checkpoint = u ctime = Wed Apr 10 17:11:21 2013 depend = afterok:69301.mycluster@mycluster.cluster Error_Path = mycluster.cluster:/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN8/C u-dbp.e69302 Hold_Types = s Join_Path = oe Keep_Files = n Mail_Points = a mtime = Wed Apr 10 17:11:21 2013 Output_Path = mycluster.cluster:/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN8/ Cu-dbp.o69302 Priority = 0 qtime = Wed Apr 10 17:11:21 2013 Rerunable = False Resource_List.mpiprocs = 4 Resource_List.ncpus = 64 Resource_List.nodect = 4 Resource_List.place = excl Resource_List.select = 4:ncpus=16 Resource_List.walltime = 72:00:00 Shell_Path_List = /bin/tcsh substate = 22 Variable_List = SSH_ASKPASS=/usr/libexec/openssh/gnome-ssh-askpass, PERL_BADLANG=0,KDE_IS_PRELINKED=1,PBS_O_HOME=/home/user02, module=() { eval `/usr/bin/modulecmd bash $*`,}, LESSOPEN=|/usr/bin/lesspipe.sh %s,PBS_O_LOGNAME=user02, SSH_CLIENT=128.178.54.94 46714 22,CVS_RSH=ssh,PBS_O_LANG=C,USER=user02, HOME=/home/user02,LIBGL_ALWAYS_INDIRECT=yes, PATH=/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/local/bin :/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/bin:/op t/software/python/3.3.0/bin:/opt/software/bin, LD_LIBRARY_PATH=/opt/software/python/3.3.0/lib, SSH_CONNECTION=128.178.54.94 46714 128.178.209.70 22,LANG=C, QTLIB=/usr/lib64/qt-3.3/lib,TERM=xterm,SHELL=/bin/bash, QTINC=/usr/lib64/qt-3.3/include,G_BROKEN_FILENAMES=1,HISTSIZE=1000, PBS_O_WORKDIR=/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN8, PBS_O_PATH=/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/loc al/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/b in:/opt/software/python/3.3.0/bin:/opt/software/bin, MANPATH=/opt/xcat/share/man:,XCATROOT=/opt/xcat, MODULESHOME=/usr/share/Modules,PBS_O_SYSTEM=Linux,MSM_PRODUCT=MSM, HOST=mycluster,MAIL=/var/spool/mail/user02, PBS_O_MAIL=/var/spool/mail/user02,_=/opt/pbs/default/bin/qsub, MODULEPATH=/etc/modulefiles:/opt/software/modulefiles:/opt/software/cs e-software/modulefiles,KDEDIRS=/usr,PBS_O_SHELL=/bin/bash, SSH_TTY=/dev/pts/55,OLDPWD=/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN7, LOADEDMODULES=,HISTCONTROL=ignoredups,SHLVL=1, PWD=/scratch/user02/QMMM-CuPhens/dbp/NOSE/RUN8,HOSTNAME=mycluster, MSM_HOME=/usr/local/MegaRAID Storage Manager,LOGNAME=user02, PBS_O_HOST=mycluster.cluster Submit_arguments = job.sh project = _pbs_project_default Job Id: 74164.mycluster Job_Name = u-100-l-96.job Job_Owner = user3@mycluster.cluster resources_used.cpupercent = 3889 resources_used.cput = 343:11:42 resources_used.mem = 1824176kb resources_used.ncpus = 32 resources_used.vmem = 3796376kb resources_used.walltime = 10:45:13 job_state = R queue = Q_normal server = mycluster Checkpoint = u ctime = Fri Apr 12 15:21:55 2013 depend = afterany:74163.mycluster@mycluster.cluster, beforeany:74165.mycluster@mycluster.cluster Error_Path = mycluster.cluster:/scratch/user3/ubiquitin/100gL/starting-from -left/production/u-100-l-96.job.e74164 exec_host = b270/0*16+b275/0*16 exec_vnode = (b270:ncpus=16)+(b275:ncpus=16) Hold_Types = n Join_Path = oe Keep_Files = n Mail_Points = abe Mail_Users = enrico.user3@epfl.ch mtime = Mon Apr 22 07:17:36 2013 Output_Path = mycluster.cluster:/scratch/user3/ubiquitin/100gL/starting-fro m-left/production/u-100-l-96.job.o74164 Priority = 0 qtime = Fri Apr 12 15:21:55 2013 Rerunable = False Resource_List.mpiprocs = 32 Resource_List.ncpus = 32 Resource_List.nodect = 2 Resource_List.place = excl Resource_List.select = 2:ncpus=16:mpiprocs=16 Resource_List.walltime = 24:00:00 stime = Mon Apr 22 07:17:36 2013 session_id = 14147 jobdir = /home/user3 substate = 42 Variable_List = PBS_O_SYSTEM=Linux,PBS_O_SHELL=/bin/bash, PBS_O_HOME=/home/user3,PBS_O_LOGNAME=user3, PBS_O_WORKDIR=/scratch/user3/ubiquitin/100gL/starting-from-left/produc tion,PBS_O_LANG=en_US.utf8, PBS_O_PATH=/opt/pbs/default/sbin/:/home/bovigny/bin:/opt/xcat/bin:/opt /xcat/sbin:/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/loca l/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/bi n:/opt/software/python/3.3.0/bin:/opt/software/bin:/opt/pbs/default/bin :/opt/software/python/3.3.0/bin:/opt/software/bin, PBS_O_MAIL=/var/spool/mail/user3,PBS_O_QUEUE=P_share_queue, PBS_O_HOST=mycluster.cluster comment = Job run at Mon Apr 22 at 07:17 on (b270:ncpus=16)+(b275:ncpus=16) etime = Mon Apr 22 07:17:34 2013 Submit_arguments = -W depend=afterany:74163 u-100-l-96.job project = _pbs_project_default Job Id: 74165.mycluster Job_Name = u-100-l-97.job Job_Owner = user3@mycluster.cluster job_state = H queue = Q_normal server = mycluster Checkpoint = u ctime = Fri Apr 12 15:22:01 2013 depend = afterany:74164.mycluster@mycluster.cluster, beforeany:74166.mycluster@mycluster.cluster Error_Path = mycluster.cluster:/scratch/user3/ubiquitin/100gL/starting-from -left/production/u-100-l-97.job.e74165 Hold_Types = s Join_Path = oe Keep_Files = n Mail_Points = abe Mail_Users = enrico.user3@epfl.ch mtime = Fri Apr 12 15:22:07 2013 Output_Path = mycluster.cluster:/scratch/user3/ubiquitin/100gL/starting-fro m-left/production/u-100-l-97.job.o74165 Priority = 0 qtime = Fri Apr 12 15:22:01 2013 Rerunable = False Resource_List.mpiprocs = 32 Resource_List.ncpus = 32 Resource_List.nodect = 2 Resource_List.place = excl Resource_List.select = 2:ncpus=16:mpiprocs=16 Resource_List.walltime = 24:00:00 substate = 22 Variable_List = PBS_O_SYSTEM=Linux,PBS_O_SHELL=/bin/bash, PBS_O_HOME=/home/user3,PBS_O_LOGNAME=user3, PBS_O_WORKDIR=/scratch/user3/ubiquitin/100gL/starting-from-left/produc tion,PBS_O_LANG=en_US.utf8, PBS_O_PATH=/opt/pbs/default/sbin/:/home/bovigny/bin:/opt/xcat/bin:/opt /xcat/sbin:/opt/xcat/bin:/opt/xcat/sbin:/usr/lib64/qt-3.3/bin:/usr/loca l/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/pbs/default/bi n:/opt/software/python/3.3.0/bin:/opt/software/bin:/opt/pbs/default/bin :/opt/software/python/3.3.0/bin:/opt/software/bin, PBS_O_MAIL=/var/spool/mail/user3,PBS_O_QUEUE=P_share_queue, PBS_O_HOST=mycluster.cluster Submit_arguments = -W depend=afterany:74164 u-100-l-97.job project = _pbs_project_default """ __copyright__ = u"Copyright (c), 2015, ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (Theory and Simulation of Materials (THEOS) and National Centre for Computational Design and Discovery of Novel Materials (NCCR MARVEL)), Switzerland and ROBERT BOSCH LLC, USA. All rights reserved." __license__ = "MIT license, see LICENSE.txt file" __version__ = "0.4.0" __contributors__ = "Andrea Cepellotti, Giovanni Pizzi, Marco Dorigo" class TestParserQstat(unittest.TestCase): """ Tests to verify if teh function _parse_joblist_output behave correctly The tests is done parsing a string defined above, to be used offline """ def test_parse_common_joblist_output(self): """ Test whether _parse_joblist can parse the qstat -f output """ s = PbsproScheduler() retval = 0 stdout = text_qstat_f_to_test stderr = '' job_list = s._parse_joblist_output(retval, stdout, stderr) # The parameters are hard coded in the text to parse job_on_cluster = 6 job_parsed = len(job_list) self.assertEquals(job_parsed, job_on_cluster) job_running = 2 job_running_parsed = len([ j for j in job_list if j.job_state \ and j.job_state == job_states.RUNNING ]) self.assertEquals(job_running,job_running_parsed) job_held = 2 job_held_parsed = len([ j for j in job_list if j.job_state \ and j.job_state == job_states.QUEUED_HELD ]) self.assertEquals(job_held,job_held_parsed) job_queued = 2 job_queued_parsed = len([ j for j in job_list if j.job_state \ and j.job_state == job_states.QUEUED ]) self.assertEquals(job_queued,job_queued_parsed) running_users = ['user02','user3'] parsed_running_users = [ j.job_owner for j in job_list if j.job_state \ and j.job_state == job_states.RUNNING ] self.assertEquals( set(running_users) , set(parsed_running_users) ) running_jobs = ['69301.mycluster','74164.mycluster'] parsed_running_jobs = [ j.job_id for j in job_list if j.job_state \ and j.job_state == job_states.RUNNING ] self.assertEquals( set(running_jobs) , set(parsed_running_jobs) ) for j in job_list: if j.allocated_machines: num_machines = 0 num_cpus = 0 for n in j.allocated_machines: num_machines += 1 num_cpus += n.num_cpus self.assertTrue( j.num_machines==num_machines ) self.assertTrue( j.num_cpus==num_cpus ) # TODO : parse the env_vars # TODO: WHEN WE USE THE CORRECT ERROR MANAGEMENT, REIMPLEMENT THIS TEST # def test_parse_with_error_retval(self): # """ # The qstat -f command has received a retval != 0 # """ # s = PbsproScheduler() # retval = 1 # stdout = text_qstat_f_to_test # stderr = '' # # Disable logging to avoid excessive output during test # logging.disable(logging.ERROR) # with self.assertRaises(SchedulerError): # job_list = s._parse_joblist_output(retval, stdout, stderr) # # Reset logging level # logging.disable(logging.NOTSET) # def test_parse_with_error_stderr(self): # """ # The qstat -f command has received a stderr # """ # s = PbsproScheduler() # retval = 0 # stdout = text_qstat_f_to_test # stderr = 'A non empty error message' # # TODO : catch the logging error # job_list = s._parse_joblist_output(retval, stdout, stderr) # # print s._logger._log, dir(s._logger._log),'!!!!' class TestSubmitScript(unittest.TestCase): def test_submit_script(self): """ """ from aiida.scheduler.datastructures import JobTemplate s = PbsproScheduler() job_tmpl = JobTemplate() job_tmpl.argv = ["mpirun", "-np", "23", "pw.x", "-npool", "1"] job_tmpl.stdin_name = 'aiida.in' job_tmpl.job_resource = s.create_job_resource(num_machines=1, num_mpiprocs_per_machine=1) job_tmpl.uuid = str(uuid.uuid4()) job_tmpl.max_wallclock_seconds = 24 * 3600 submit_script_text = s.get_submit_script(job_tmpl) self.assertTrue( '#PBS -r n' in submit_script_text ) self.assertTrue( submit_script_text.startswith('#!/bin/bash') ) self.assertTrue( '#PBS -l walltime=24:00:00' in submit_script_text ) self.assertTrue( '#PBS -l select=1' in submit_script_text ) self.assertTrue( "'mpirun' '-np' '23' 'pw.x' '-npool' '1'" + \ " < 'aiida.in'" in submit_script_text )
40.626398
278
0.691244
2,797
18,160
4.314623
0.169825
0.019224
0.015081
0.010938
0.767981
0.741631
0.713126
0.683046
0.677743
0.664899
0
0.075775
0.188271
18,160
446
279
40.717489
0.742894
0.07913
0
0.638522
0
0.118734
0.813281
0.436839
0
0
0
0.002242
0.034301
1
0.005277
false
0.005277
0.010554
0
0.021108
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
5625a3407da98d220cca4d5d9645cc01df5da356
368
py
Python
server/server/posts/migrations/0003_auto_20210727_1618.py
connectiveproject/connective
8866082b2147feef0e5254ac4215987b9d881396
[ "MIT" ]
4
2021-07-05T10:49:26.000Z
2021-11-24T11:34:43.000Z
server/server/posts/migrations/0003_auto_20210727_1618.py
connectiveproject/connective
8866082b2147feef0e5254ac4215987b9d881396
[ "MIT" ]
39
2021-06-21T15:02:37.000Z
2022-02-28T15:07:42.000Z
server/server/posts/migrations/0003_auto_20210727_1618.py
Eyal-VR/connective
46857dd79dc58f63c3afb9791ecf8adf853a6c57
[ "MIT" ]
17
2021-06-16T08:59:45.000Z
2021-09-29T11:35:38.000Z
# Generated by Django 3.1.11 on 2021-07-27 13:18 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('posts', '0002_auto_20210727_1319'), ] operations = [ migrations.RenameField( model_name='post', old_name='creation_time', new_name='created', ), ]
19.368421
48
0.589674
40
368
5.25
0.85
0
0
0
0
0
0
0
0
0
0
0.124031
0.298913
368
18
49
20.444444
0.689922
0.125
0
0
1
0
0.1625
0.071875
0
0
0
0
0
1
0
false
0
0.083333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
562632b9da3d75bd5559f771c9a43df116af2988
3,791
py
Python
examples/lstm2.py
neosensory/tflite_micro_compiler
2c21a364e9763e51706cf6f6b447ed908314e117
[ "Apache-2.0" ]
48
2020-05-10T13:33:02.000Z
2022-03-24T06:47:50.000Z
examples/lstm2.py
neosensory/tflite_micro_compiler
2c21a364e9763e51706cf6f6b447ed908314e117
[ "Apache-2.0" ]
49
2020-05-21T22:03:51.000Z
2022-03-09T08:09:45.000Z
examples/lstm2.py
neosensory/tflite_micro_compiler
2c21a364e9763e51706cf6f6b447ed908314e117
[ "Apache-2.0" ]
16
2020-05-10T12:59:20.000Z
2022-03-09T06:04:22.000Z
#!/usr/bin/python3 import random import math import numpy as np import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, LSTM train_batches=2000 eval_batches=50 train_sequlen=32 train_inputs=1 lstm_states=6 #activation="relu" activation=None rec_activation="hard_sigmoid" x_train = np.zeros((train_batches*train_sequlen,1,train_inputs)) y_train = np.zeros((train_batches*train_sequlen,1,1)) x_test = np.zeros((eval_batches*train_sequlen,1,train_inputs)) y_test = np.zeros((eval_batches*train_sequlen,1,1)) random.seed(1234) # generate input of random sine waves, feed one at a time to the network def random_sample(): ampl = random.uniform(0.5,1) freq = random.uniform(18,32) phase= random.uniform(-math.pi,math.pi) return (ampl,freq,phase) def waveform(ampl,freq,phase,idx): return ampl*math.sin(idx/freq*2*math.pi+phase) # calculate train data for i in range(train_batches): (ampl,freq,phase) = random_sample() for j in range(train_sequlen): # subsequent measurements for k in range(train_inputs): x_train[i*train_sequlen+j][0][k]=waveform(ampl,freq,phase,j+k) y_train[i*train_sequlen+j][0]=waveform(ampl,freq,phase,j+train_inputs) for i in range(eval_batches): (ampl,freq,phase) = random_sample() for j in range(train_sequlen): # subsequent measurements for k in range(train_inputs): x_test[i*train_sequlen+j][0][k]=waveform(ampl,freq,phase,j+k) y_test[i*train_sequlen+j][0]=waveform(ampl,freq,phase,j+train_inputs) print(x_train[0][0:5], y_train[0][0:5]) print(x_train.shape, y_train.shape) print(x_test.shape, y_test.shape) def create_model(train=True): if train: input0 = tf.keras.Input(batch_shape=(train_sequlen,1,train_inputs)) # stateful is worse x = LSTM(lstm_states, recurrent_activation=rec_activation, activation=activation, return_sequences=False, return_state=False, stateful=False)(input0) #x = Dropout(0.1)(x) makes it a bit worse else: input0 = tf.keras.Input(batch_shape=(1,1,train_inputs),name="data") input1 = tf.keras.Input(batch_shape=(1,lstm_states),name="state_h") input2 = tf.keras.Input(batch_shape=(1,lstm_states),name="state_c") x, state,state2 = LSTM(lstm_states, recurrent_activation=rec_activation, activation=activation, return_sequences=False, return_state=True, stateful=True, unroll=True)(input0, initial_state=(input1, input2)) x = Dense(units=1)(x) if train: model = tf.keras.Model(inputs=input0, outputs=x, name="sine") else: model = tf.keras.Model(inputs=(input0,input1,input2), outputs=(x,state,state2), name="sine") model.summary() return model model=create_model() model.compile(loss='mean_squared_error', optimizer='adam') for i in range(8): model.fit(x_train, y_train, epochs=1, batch_size=train_sequlen, verbose=1, shuffle=False, validation_data=(x_test,y_test)) model.reset_states() model.save('mymodel') model.save('mymodel_w.h5', save_format="h5") model2= create_model(False) model2.load_weights('mymodel_w.h5') model2.save('evalmodel.h5', save_format="h5") model2.compile(loss='mean_squared_error', optimizer='adam') state_h2 = np.zeros((1,lstm_states)) state_c2 = np.zeros((1,lstm_states)) for i in range(train_sequlen): testx, testy = x_test[i], y_test[i] testx = testx.reshape(1, 1, 1) res = model2.predict([testx,state_h2,state_c2], batch_size=1) print('In=%.1f, Expected=%.1f, Predicted=%.1f' % (testx[0][0][0], testy, res[0])) state_h2=res[1] state_c2=res[2] # to convert to tflite use # tflite_convert --keras_model_file evalmodel.h5 --output_file evalmodel.tflite --inference_type FLOAT # from tensorflow 1.15 (2.2 doesn't work)
35.429907
214
0.724347
608
3,791
4.34375
0.256579
0.063612
0.039379
0.039758
0.442635
0.391897
0.341537
0.302158
0.247633
0.247633
0
0.030774
0.134265
3,791
106
215
35.764151
0.773918
0.105249
0
0.12987
0
0
0.049394
0
0
0
0
0
0
1
0.038961
false
0
0.077922
0.012987
0.155844
0.051948
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5626db7729bb19f378a1f3a643736beccf6c224a
1,251
py
Python
critiquebrainz/frontend/external/musicbrainz_db/includes.py
AbhinavOhri/critiquebrainz
d1c1c175209ec78bcced1dbfd5bd64a46be2d1f4
[ "Apache-2.0" ]
null
null
null
critiquebrainz/frontend/external/musicbrainz_db/includes.py
AbhinavOhri/critiquebrainz
d1c1c175209ec78bcced1dbfd5bd64a46be2d1f4
[ "Apache-2.0" ]
null
null
null
critiquebrainz/frontend/external/musicbrainz_db/includes.py
AbhinavOhri/critiquebrainz
d1c1c175209ec78bcced1dbfd5bd64a46be2d1f4
[ "Apache-2.0" ]
null
null
null
import critiquebrainz.frontend.external.musicbrainz_db.exceptions as mb_exceptions RELATABLE_TYPES = [ 'area', 'artist', 'label', 'place', 'event', 'recording', 'release', 'release-group', 'series', 'url', 'work', 'instrument' ] RELATION_INCLUDES = [entity + '-rels' for entity in RELATABLE_TYPES] TAG_INCLUDES = ["tags", "user-tags"] RATING_INCLUDES = ["ratings", "user-ratings"] VALID_INCLUDES = { 'place': ["aliases", "annotation"] + RELATION_INCLUDES + TAG_INCLUDES, 'event': ["aliases"] + RELATION_INCLUDES + TAG_INCLUDES, 'release_group': ["artists", "media", "releases"] + TAG_INCLUDES + RELATION_INCLUDES, 'release': ["artists", "labels", "recordings", "release-groups", "media", "annotation", "aliases"] + TAG_INCLUDES + RELATION_INCLUDES, 'artist': ["recordings", "releases", "media", "aliases", "annotation"] + RELATION_INCLUDES + TAG_INCLUDES, } def check_includes(entity, includes): """Check if includes specified for an entity are valid includes.""" for include in includes: if include not in VALID_INCLUDES[entity]: raise mb_exceptions.InvalidIncludeError("Bad includes: {inc} is not a valid include".format(inc=include))
35.742857
117
0.666667
132
1,251
6.151515
0.439394
0.118227
0.070197
0.099754
0.108374
0.108374
0
0
0
0
0
0
0.183054
1,251
34
118
36.794118
0.794521
0.048761
0
0
0
0
0.282939
0
0
0
0
0
0
1
0.033333
false
0
0.033333
0
0.066667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
562700dc0c3f61321ee677178dd5d439d1213e59
801
py
Python
djstripe/migrations/0007_auto_20200507_1913.py
bomba1990/dj-stripe
507593c619145c30264d0e85b39bdba6f671a6d6
[ "MIT" ]
null
null
null
djstripe/migrations/0007_auto_20200507_1913.py
bomba1990/dj-stripe
507593c619145c30264d0e85b39bdba6f671a6d6
[ "MIT" ]
null
null
null
djstripe/migrations/0007_auto_20200507_1913.py
bomba1990/dj-stripe
507593c619145c30264d0e85b39bdba6f671a6d6
[ "MIT" ]
null
null
null
# Generated by Django 2.2.11 on 2020-05-07 19:13 from django.db import migrations, models import djstripe.fields class Migration(migrations.Migration): dependencies = [ ('djstripe', '0006_2_3'), ] operations = [ migrations.AlterField( model_name='plan', name='amount', field=djstripe.fields.StripeDecimalCurrencyAmountField(decimal_places=2, default=0, help_text='Amount (as decimal) to be charged on the interval specified.', max_digits=11), preserve_default=False, ), migrations.AlterField( model_name='plan', name='nickname', field=models.TextField(default='', help_text='A brief description of the plan, hidden from customers.', max_length=5000), ), ]
30.807692
185
0.635456
90
801
5.544444
0.644444
0.056112
0.1002
0.116232
0.148297
0.148297
0
0
0
0
0
0.050336
0.25593
801
25
186
32.04
0.786913
0.057428
0
0.315789
1
0
0.203187
0
0
0
0
0
0
1
0
false
0
0.105263
0
0.263158
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
5627c77651ef6fd4e5b5393b20243c305bd397e3
5,511
py
Python
Gradient Descent/Gradient_Descent_Housing.py
prasad-madhale/machine-learning
bb611f809c16e1425136052e215ca83bd1148652
[ "MIT" ]
null
null
null
Gradient Descent/Gradient_Descent_Housing.py
prasad-madhale/machine-learning
bb611f809c16e1425136052e215ca83bd1148652
[ "MIT" ]
null
null
null
Gradient Descent/Gradient_Descent_Housing.py
prasad-madhale/machine-learning
bb611f809c16e1425136052e215ca83bd1148652
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Feb 1 01:07:37 2019 @author: prasad """ import numpy as np import pandas as pd import math import matplotlib.pyplot as plt def get_data(column_names): ''' Args column_names: names of the features in dataset Returns train_df: training data test_df: testing data ''' train_df = pd.read_csv('./data/housing_train.txt', delim_whitespace=True, header = None) test_df = pd.read_csv('./data/housing_test.txt', delim_whitespace=True, header = None) test_df.columns = column_names train_df.columns = column_names return train_df, test_df def normalize(dataset): ''' Args dataset: data to be normalized using shift-scale normalization Returns dataset: normalized dataset maxs: max parameters for each feature normalization mins: min parameters for each feature normalization ''' maxs = dataset.max() mins = dataset.min() for feature in dataset.columns[:-1]: for i, entry in dataset.iterrows(): dataset.at[i, feature] = (entry[feature] - mins[feature]) / (maxs[feature] - mins[feature]) return dataset, maxs, mins def normalize_params(dataset, maxs, mins): ''' Args dataset: data to be normalized maxs: max parameters for each feature normalization mins: min parameters for each feature normalization Returns: dataset: normalized dataset ''' for feature in dataset.columns[:-1]: for i, entry in dataset.iterrows(): dataset.at[i, feature] = (entry[feature] - mins[feature]) / (maxs[feature] - mins[feature]) return dataset def predict(test_data, weights): ''' Args test_data: data for which predictions are to be calculated weights: weights to obtain predictions based on Returns preds: predictions based on given weights applied on dataset ''' test_data = test_data.drop(['MEDV'], axis = 1).values test_data = np.append(np.ones([len(test_data),1]),test_data,1) preds = {} for i in range(len(test_data)): preds[i] = np.dot(weights, test_data[i]) return preds def get_mse(test_data, preds): ''' Args test_data: data for which model is to be tested using MSE preds: predictions on given test_data obtained from model Returns mse: mean squared error ''' test_labels = test_data['MEDV'].values errors = [] for i, label in enumerate(test_labels): errors.append(np.square(label - preds[i])) mse = pd.Series(errors).mean() return mse def cost(data, labels, weights): ''' Args data: data for which cost needs to be calculated labels: actual labels for data used weights: optimized weights for prediction Returns cost on the given data ''' preds = np.dot(data, weights) preds = preds.flatten() return np.sum(np.square(np.subtract(preds, labels))) / len(data) def train(train_data, learn_rate = 0.001, max_iter = 3000): ''' Args train_data : normalized data for training learn_rate : learning rate for Gradient Descent max_iter : maximum number of iterations to run GD ''' # get data without the labels x = train_data.drop(['MEDV'], axis = 1).values # add 1s to the data for bias calculations x = np.append(np.ones([len(x),1]),x,1) # get labels of the training set y = train_data['MEDV'].values # initialize weights with random values w = np.random.normal(scale = 1 / math.sqrt(len(x[0])),size = (len(x[0]), 1)) w = w.flatten() # keep records of costs as we keep performing iteration of GD costs = [] for itr in range(max_iter): # predictions based on current weights predicts = np.dot(x, w) predicts = predicts.flatten() # difference between current predictions and actual labels loss = np.subtract(predicts, y) grads = np.dot(x.T, loss) # update weights w = np.subtract(w, learn_rate * grads) # record cost after weight updates costs.append(cost(x,y,w)) if itr % 100 == 0: print('{}: Cost: {}'.format(itr, costs[itr])) return w, costs def plot_cost(costs): plt.figure(figsize = (20,10)) plt.title('Cost function') plt.ylabel('Costs') plt.xlabel('Iterations') plt.plot(costs) #### EXECUTION # names for the features column_names = ['CRIM','ZN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT','MEDV'] # extract data from files train_data, test_data = get_data(column_names) # normalize data train_data, maxs, mins = normalize(train_data) # normalize test data using same parameters as for the training set test_data = normalize_params(test_data,maxs, mins) # optimize weights using Gradient Descent w,costs = train(train_data) # get predictions for optimized weights pred_train = predict(train_data, w) print('MSE for Housing dataset using Gradient Descent on Train Data: {}'.format(get_mse(train_data, pred_train))) # get predictions for optimized weights preds = predict(test_data, w) print('MSE for Housing dataset using Gradient Descent on Test Data: {}'.format(get_mse(test_data, preds))) plot_cost(costs)
27.974619
113
0.633279
739
5,511
4.631935
0.273342
0.046743
0.019866
0.028046
0.297692
0.235466
0.178791
0.178791
0.156588
0.156588
0
0.010304
0.260388
5,511
197
114
27.974619
0.82949
0.348757
0
0.082192
0
0
0.083633
0.014089
0
0
0
0
0
1
0.109589
false
0
0.054795
0
0.260274
0.041096
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
56282d4008935ca3506817cc4fc64ad64b685ddf
2,833
py
Python
teamspirit/preorders/views.py
etienne86/oc_p13_team_spirit
fd3d45618d349ecd0a03e63c4a7e9c1044eeffaa
[ "MIT" ]
null
null
null
teamspirit/preorders/views.py
etienne86/oc_p13_team_spirit
fd3d45618d349ecd0a03e63c4a7e9c1044eeffaa
[ "MIT" ]
null
null
null
teamspirit/preorders/views.py
etienne86/oc_p13_team_spirit
fd3d45618d349ecd0a03e63c4a7e9c1044eeffaa
[ "MIT" ]
null
null
null
from django.contrib.auth.decorators import login_required from django.urls import reverse_lazy from django.views.generic import ListView from django.views.generic.edit import FormView from teamspirit.catalogs.models import Product from teamspirit.preorders.forms import AddToCartForm, DropFromCartForm from teamspirit.preorders.models import ShoppingCart, ShoppingCartLine class ShoppingCartView(ListView): model = ShoppingCartLine template_name = "preorders/shopping_cart.html" def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['shopping_cart_amount'] = ShoppingCart.objects.get_or_create( user=self.request.user )[0].get_cart_amount() return context def get_queryset(self): queryset = super().get_queryset() shopping_cart = ShoppingCart.objects.get_or_create( user=self.request.user )[0] queryset = ShoppingCartLine.objects.filter( shopping_cart=shopping_cart ) return queryset shopping_cart_view = ShoppingCartView.as_view() shopping_cart_view = login_required(shopping_cart_view) class AddToCartView(FormView): template_name = "preorders/add_to_cart.html" form_class = AddToCartForm success_url = reverse_lazy('catalogs:catalog') def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['product'] = Product.objects.get(id=self.kwargs['product_id']) return context def get_initial(self): initial = super().get_initial() initial['shopping_cart'] = ShoppingCart.objects.get_or_create( user=self.request.user )[0] initial['product'] = Product.objects.get(id=self.kwargs['product_id']) return initial add_to_cart_view = AddToCartView.as_view() add_to_cart_view = login_required(add_to_cart_view) class DropFromCartView(FormView): template_name = "preorders/drop_from_cart.html" form_class = DropFromCartForm success_url = reverse_lazy('preorders:shopping_cart') def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['shopping_cart_line'] = ShoppingCartLine.objects.get( id=self.kwargs['line_id'] ) return context def get_form_kwargs(self): kwargs = super(DropFromCartView, self).get_form_kwargs() kwargs.update({'request_user': self.request.user}) kwargs.update({'line_id': self.kwargs['line_id']}) kwargs.update({ 'shopping_cart_line': ShoppingCartLine.objects.get( id=self.kwargs['line_id'] ) }) return kwargs drop_from_cart_view = DropFromCartView.as_view() drop_from_cart_view = login_required(drop_from_cart_view)
31.831461
78
0.701024
331
2,833
5.722054
0.199396
0.07603
0.044351
0.040127
0.349525
0.326294
0.326294
0.326294
0.326294
0.326294
0
0.001324
0.200141
2,833
88
79
32.193182
0.83451
0
0
0.242424
0
0
0.09354
0.037416
0
0
0
0
0
1
0.090909
false
0
0.106061
0
0.454545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
562c8104d444901a4e792e4529b19010d3a451b2
40,079
py
Python
tasks/prime.py
transcom/milmove_load_testing
b46526d9332c864de8891ef391394c0e9e8e7b95
[ "MIT" ]
2
2021-07-20T13:41:14.000Z
2021-10-07T18:27:48.000Z
tasks/prime.py
transcom/milmove_load_testing
b46526d9332c864de8891ef391394c0e9e8e7b95
[ "MIT" ]
69
2020-07-08T21:05:58.000Z
2022-03-31T11:35:14.000Z
tasks/prime.py
transcom/milmove_load_testing
b46526d9332c864de8891ef391394c0e9e8e7b95
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ TaskSets and tasks for the Prime & Support APIs """ import logging import json import random from copy import deepcopy from typing import Dict from locust import tag, task, TaskSet from utils.constants import ( INTERNAL_API_KEY, TEST_PDF, ZERO_UUID, PRIME_API_KEY, SUPPORT_API_KEY, MOVE_TASK_ORDER, MTO_SHIPMENT, MTO_AGENT, MTO_SERVICE_ITEM, PAYMENT_REQUEST, ) from .base import check_response, CertTaskMixin, ParserTaskMixin logger = logging.getLogger(__name__) def prime_path(url: str) -> str: return f"/prime/v1{url}" def support_path(url: str) -> str: return f"/support/v1{url}" class PrimeDataStorageMixin: """ TaskSet mixin used to store data from the Prime API during load testing so that it can be passed around and reused. We store a number of objects in a local store that can be requested by tasks. The tasks then hit an endpoint and call add or replace to update our local store with a list of viable objects. This mixin allows storing multiple items of each kind. """ DATA_LIST_MAX: int = 50 # contains the ID values needed when creating moves using createMoveTaskOrder: default_mto_ids: Dict[str, str] = { "contractorID": "", "destinationDutyStationID": "", "originDutyStationID": "", "uploadedOrdersID": "", } local_store: Dict[str, list] = { MOVE_TASK_ORDER: [], MTO_SHIPMENT: [], MTO_SERVICE_ITEM: [], PAYMENT_REQUEST: [], } # data stored will be shared among class instances thanks to mutable dict def get_stored(self, object_key, *args, **kwargs): """ Given an object_key that represents an object type from the MilMove app, returns an object of that type from the list. :param object_key: str in [MOVE_TASK_ORDER, MTO_SHIPMENT, MTO_AGENT, MTO_SERVICE_ITEM, PAYMENT_REQUEST] """ data_list = self.local_store[object_key] if len(data_list) > 0: # otherwise we return None return random.choice(data_list) def get_stored_shipment_address(self, mto_shipment=None): """ Grabs one of either pickupAddress or destinationAddress from a shipment and returns the specific field and payload for that address. :param mto_shipment: JSON/dict of a specific MTO Shipment payload (optional) :return: tuple(str name of the address field, dict address payload) """ if not mto_shipment: mto_shipment = self.get_stored(MTO_SHIPMENT) or {} address_fields = ["pickupAddress", "destinationAddress"] valid_addresses = [ (field, mto_shipment[field]) for field in address_fields if mto_shipment.get(field) and mto_shipment[field].get("id", ZERO_UUID) != ZERO_UUID ] if len(valid_addresses) > 0: # otherwise we return None return random.choice(valid_addresses) def add_stored(self, object_key, object_data): """ Adds data to the list for the object key provided. Also checks if the list is already at the max number of elements, and if so, it randomly removes 1 to MAX number of elements so that the cycle can start again (and so we don't hog too much memory). :param object_key: str in [MOVE_TASK_ORDER, MTO_SHIPMENT, MTO_AGENT, MTO_SERVICE_ITEM, PAYMENT_REQUEST] :param object_data: JSON/dict :return: None """ data_list = self.local_store[object_key] if len(data_list) >= self.DATA_LIST_MAX: num_to_delete = random.randint(1, self.DATA_LIST_MAX) del data_list[:num_to_delete] # Some creation endpoint auto-create multiple objects and return an array, # but each object in the array should still be considered individually here: if isinstance(object_data, list): data_list.extend(object_data) else: data_list.append(object_data) def update_stored(self, object_key, old_data, new_data): """ Given an object key, replaces a stored object in the local store with a new updated object. :param object_key: str in [MOVE_TASK_ORDER, MTO_SHIPMENT, MTO_AGENT, MTO_SERVICE_ITEM, PAYMENT_REQUEST] :param old_data: JSON/dict :param new_data: JSON/dict :return: None """ data_list = self.local_store[object_key] # Remove all instances of the stored object, in case multiples were added erroneously: while True: try: data_list.remove(old_data) except ValueError: break # this means we finally cleared the list data_list.append(new_data) def set_default_mto_ids(self, moves): """ Given a list of Move Task Orders, gets the four ID values needed to create more MTOs: - contractorID - uploadedOrdersID - destinationDutyStationID - originDutyStationID To get these values, this function hits the getMoveTaskOrder endpoint in the Support API to get all of the details on an MTO. The Prime API doesn't have access to all of this info, which is why we need to use the Support API instead. It will go through and hit this endpoint for all of the moves in the list until it finally gets a complete set of IDs. CAN ONLY be used when subclassed with TaskSet and CertTaskMixin. :param moves: list of JSON/dict objects :return: None """ # Checks that we have a full set of MTO IDs already and halts processing if so: if self.has_all_default_mto_ids(): return headers = {"content-type": "application/json"} for move in moves: # Call the Support API to get full details on the move: resp = self.client.get( support_path(f"/move-task-orders/{move['id']}"), name=support_path("/move-task-orders/{moveTaskOrderID}"), headers=headers, **self.cert_kwargs, ) move_details, success = check_response(resp, "getMoveTaskOrder") if not success: continue # try again with the next move in the list # Get the values we need from the move and set them in self.default_move_ids. # If this move is missing any of these values, we default to using whatever value is already in # self.default_mto_ids, which could be nothing, or could be a value gotten from a previous move. # This way we never override good ID values from earlier moves in the list. self.default_mto_ids["contractorID"] = move_details.get( "contractorID", self.default_mto_ids["contractorID"] ) if order_details := move_details.get("order"): self.default_mto_ids["uploadedOrdersID"] = order_details.get( "uploadedOrdersID", self.default_mto_ids["uploadedOrdersID"] ) self.default_mto_ids["destinationDutyStationID"] = order_details.get( "destinationDutyStationID", self.default_mto_ids["destinationDutyStationID"] ) self.default_mto_ids["originDutyStationID"] = order_details.get( "originDutyStationID", self.default_mto_ids["originDutyStationID"] ) # Do we have all the ID values we need? Cool, then stop processing. if self.has_all_default_mto_ids(): logger.info(f"☑️ Set default MTO IDs for createMoveTaskOrder: \n{self.default_mto_ids}") break # If we're in the local environment, and we have gone through the entire list without getting a full set of IDs, # set our hardcoded IDs as the default: if not self.has_all_default_mto_ids() and self.user.is_local: logger.warning("⚠️ Using hardcoded MTO IDs for LOCAL env") self.default_mto_ids.update( { "contractorID": "5db13bb4-6d29-4bdb-bc81-262f4513ecf6", "destinationDutyStationID": "71b2cafd-7396-4265-8225-ff82be863e01", "originDutyStationID": "1347d7f3-2f9a-44df-b3a5-63941dd55b34", "uploadedOrdersID": "c26421b0-e4c3-446b-88f3-493bb25c1756", } ) def has_all_default_mto_ids(self) -> bool: """Boolean indicating that we have all the values we need for creating new MTOs.""" return self.default_mto_ids and all(self.default_mto_ids.values()) @tag("prime") class PrimeTasks(PrimeDataStorageMixin, ParserTaskMixin, CertTaskMixin, TaskSet): """ Set of the tasks that can be called on the Prime API. Make sure to mark tasks with the `@task` decorator and add tags where appropriate to make filtering for custom tests easier. """ def __init__(self, parent): self.csrf_token = None self.session_token = None super().__init__(parent) def customer_path(self, url: str) -> str: return f"{self.user.alternative_host}{url}" def on_start(self): self.client.get(self.customer_path("/devlocal-auth/login")) self.csrf_token = self.client.cookies.get("masked_gorilla_csrf") self.client.headers.update({"x-csrf-token": self.csrf_token}) resp = self.client.post( self.customer_path("/devlocal-auth/create"), data={"userType": "milmove", "gorilla.csrf.Token": self.csrf_token}, ) self.session_token = self.client.cookies.get("mil_session_token") if resp.status_code != 200: self.interrupt() logged_in_user = self.client.get(self.customer_path("/internal/users/logged_in")) json_resp = logged_in_user.json() service_member_id = json_resp["service_member"]["id"] email = json_resp["email"] user_id = json_resp["id"] origin_duty_stations = self.client.get(self.customer_path("/internal/duty_stations?search=29")) current_station_id = origin_duty_stations.json()[0]["id"] overrides = { "id": service_member_id, "user_id": user_id, "edipi": "9999999999", "personal_email": email, "email_is_preferred": True, "current_station_id": current_station_id, } payload = self.fake_request("/service_members/{serviceMemberId}", "patch", INTERNAL_API_KEY, overrides, True) self.client.patch( self.customer_path(f"/internal/service_members/{service_member_id}"), name="/internal/service_members/{serviceMemberId}", data=json.dumps(payload), headers={"content-type": "application/json"}, **self.user.cert_kwargs, ) overrides = {"permission": "NONE"} payload = self.fake_request( "/service_members/{serviceMemberId}/backup_contacts", "post", INTERNAL_API_KEY, overrides ) self.client.post( self.customer_path(f"/internal/service_members/{service_member_id}/backup_contacts"), name="/internal/service_members/{serviceMemberId}/backup_contacts", data=json.dumps(payload), headers={"content-type": "application/json"}, **self.user.cert_kwargs, ) @tag(MOVE_TASK_ORDER, "listMoves") @task def list_moves(self): timeout = {} if self.user.is_local: timeout["timeout"] = 15 # set a timeout of 15sec if we're running locally - just for this endpoint resp = self.client.get(prime_path("/moves"), **self.cert_kwargs, **timeout) moves, success = check_response(resp, "listMoves") # Use these MTOs to set the ID values we'll need to create more MTOs # (NOTE: we don't care about a failure here because we can set the default IDs instead, # if this is running locally) self.set_default_mto_ids(moves or []) @tag(MTO_SERVICE_ITEM, "createMTOServiceItem") @task def create_mto_service_item(self, overrides=None): # If mtoShipmentID was provided, get that specific one. Else get any stored one. object_id = overrides.get("mtoShipmentID") if overrides else None mto_shipment = self.get_stored(MTO_SHIPMENT, object_id) if not mto_shipment: logger.debug("createMTOServiceItem: ⚠️ No mto_shipment found") return None overrides_local = { # override moveTaskOrderID because we don't want a random one "moveTaskOrderID": mto_shipment["moveTaskOrderID"], # override mtoShipmentID because we don't want a random one "mtoShipmentID": mto_shipment["id"], } # Merge local overrides with passed-in overrides overrides_local.update(overrides or {}) payload = self.fake_request("/mto-service-items", "post", PRIME_API_KEY, overrides_local) headers = {"content-type": "application/json"} resp = self.client.post( prime_path("/mto-service-items"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs ) mto_service_items, success = check_response(resp, f"createMTOServiceItem {payload['reServiceCode']}", payload) if success: self.add_stored(MTO_SERVICE_ITEM, mto_service_items) return mto_service_items @tag(MTO_SHIPMENT, "createMTOShipment") @task def create_mto_shipment(self, overrides=None): def guarantee_unique_agent_type(agents): agent_types = {agent["agentType"] for agent in agents} if len(agents) >= 2 and len(agent_types) < 2: possible_types = {"RELEASING_AGENT", "RECEIVING_AGENT"} agents[1]["agentType"] = (possible_types - agent_types).pop() # If moveTaskOrderID was provided, get that specific one. Else get any stored one. object_id = overrides.get("moveTaskOrderID") if overrides else None move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id) if not move_task_order: logger.debug("createMTOShipment: ⚠️ No move_task_order found") return ( None # we can't do anything else without a default value, and no pre-made MTOs satisfy our requirements ) overrides_local = { # Override moveTaskorderID because we don't want a random one "moveTaskOrderID": move_task_order["id"], # Set agents UUIDs to ZERO_UUID because we can't actually set the UUID on creation "agents": {"id": ZERO_UUID, "mtoShipmentID": ZERO_UUID}, # Set pickupAddress to ZERO_UUID because we can't actually set the UUID on creation "pickupAddress": {"id": ZERO_UUID}, # Set destinationAddress to ZERO_UUID because we can't actually set the UUID on creation "destinationAddress": {"id": ZERO_UUID}, # Set mtoServiceItems to empty to let the createMTOServiceItems endpoint do the creation "mtoServiceItems": [], } # Merge local overrides with passed-in overrides if overrides: overrides_local.update(overrides) payload = self.fake_request("/mto-shipments", "post", PRIME_API_KEY, overrides=overrides_local) guarantee_unique_agent_type(payload["agents"]) # modifies the payload directly headers = {"content-type": "application/json"} resp = self.client.post( prime_path("/mto-shipments"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs ) mto_shipment, success = check_response(resp, "createMTOShipment", payload) if success: self.add_stored(MTO_SHIPMENT, mto_shipment) return mto_shipment @tag(MTO_SHIPMENT, "createMTOShipment", "expectedFailure") @task def create_mto_shipment_with_duplicate_agents(self, overrides=None): # If moveTaskOrderID was provided, get that specific one. Else get any stored one. object_id = overrides.get("moveTaskOrderID") if overrides else None move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id) if not move_task_order: logger.debug("createMTOShipment — expected failure: ⚠️ No move_task_order found") return ( None # we can't do anything else without a default value, and no pre-made MTOs satisfy our requirements ) agent_type = random.choice(["RELEASING_AGENT", "RECEIVING_AGENT"]) agent_override = {"id": ZERO_UUID, "mtoShipmentID": ZERO_UUID, "agentType": agent_type} overrides_local = { # Override moveTaskorderID because we don't want a random one "moveTaskOrderID": move_task_order["id"], # Set agents UUIDs to ZERO_UUID because we can't actually set the UUID on creation and guarantee two agents "agents": [agent_override, agent_override], # Set pickupAddress to ZERO_UUID because we can't actually set the UUID on creation "pickupAddress": {"id": ZERO_UUID}, # Set destinationAddress to ZERO_UUID because we can't actually set the UUID on creation "destinationAddress": {"id": ZERO_UUID}, # Set mtoServiceItems to empty to let the createMTOServiceItems endpoint do the creation "mtoServiceItems": [], } # Merge local overrides with passed-in overrides if overrides: overrides_local.update(overrides) payload = self.fake_request("/mto-shipments", "post", PRIME_API_KEY, overrides=overrides_local) headers = {"content-type": "application/json"} resp = self.client.post( prime_path("/mto-shipments"), name=prime_path("/mto-shipments — expected failure"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs, ) check_response(resp, "createMTOShipmentFailure", payload, "422") @tag(PAYMENT_REQUEST, "createUpload") @task def create_upload(self, overrides=None): # If id was provided, get that specific one. Else get any stored one. object_id = overrides.get("id") if overrides else None payment_request = self.get_stored(PAYMENT_REQUEST, object_id) if not payment_request: return upload_file = {"file": open(TEST_PDF, "rb")} resp = self.client.post( prime_path(f"/payment-requests/{payment_request['id']}/uploads"), name=prime_path("/payment-requests/{paymentRequestID}/uploads"), files=upload_file, **self.user.cert_kwargs, ) check_response(resp, "createUpload") @tag(PAYMENT_REQUEST, "createPaymentRequest") @task def create_payment_request(self, overrides=None): # If mtoServiceItemID was provided, get that specific one. Else get any stored one. object_id = overrides.get("mtoServiceItemID") if overrides else None service_item = self.get_stored(MTO_SERVICE_ITEM, object_id) if not service_item: return payload = { "moveTaskOrderID": service_item["moveTaskOrderID"], "serviceItems": [{"id": service_item["id"]}], "isFinal": False, } shipment = self.get_stored(MTO_SHIPMENT, service_item["mtoShipmentID"]) if not shipment: logger.info("unable to find shipment of payment request service item") headers = {"content-type": "application/json"} # if the actual weight hasn't been provided, creating the payment request will fail if not shipment.get("primeActualWeight"): self.client.post( prime_path("/payment-requests"), name=prime_path("/payment-requests — expected failure"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs, ) return None resp = self.client.post( prime_path("/payment-requests"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs ) payment_request, success = check_response(resp, "createPaymentRequest", payload) if success: self.add_stored(PAYMENT_REQUEST, payment_request) return payment_request @tag(MTO_SHIPMENT, "updateMTOShipment") @task def update_mto_shipment(self, overrides=None): # If id was provided, get that specific one. Else get any stored one. object_id = overrides.get("id") if overrides else None mto_shipment = self.get_stored(MTO_SHIPMENT, object_id) if not mto_shipment: return # can't run this task payload = self.fake_request("/mto-shipments/{mtoShipmentID}", "patch", PRIME_API_KEY, overrides) # Agents and addresses should not be updated by this endpoint, and primeEstimatedWeight cannot be updated after # it is initially set (and it is set in create_mto_shipment) fields_to_remove = [ "agents", "pickupAddress", "destinationAddress", "secondaryPickupAddress", "secondaryDeliveryAddress", "primeEstimatedWeight", ] # nts weight is only valid when the shipment type is nts release if payload.get("ntsRecordedWeight"): shipmentType = payload.get("shipmentType") or mto_shipment.get("shipmentType") if shipmentType != "HHG_OUTOF_NTS_DOMESTIC": fields_to_remove.append("ntsRecordedWeight") for f in fields_to_remove: payload.pop(f, None) headers = {"content-type": "application/json", "If-Match": mto_shipment["eTag"]} resp = self.client.patch( prime_path(f"/mto-shipments/{mto_shipment['id']}"), name=prime_path("/mto-shipments/{mtoShipmentID}"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs, ) new_mto_shipment, success = check_response(resp, "updateMTOShipment", payload) if success: self.update_stored(MTO_SHIPMENT, mto_shipment, new_mto_shipment) return new_mto_shipment @tag(MTO_SHIPMENT, "updateMTOShipmentAddress") @task def update_mto_shipment_address(self, overrides=None): # If id was provided, get that specific one. Else get any stored one. object_id = overrides.get("id") if overrides else None mto_shipment = self.get_stored(MTO_SHIPMENT, object_id) if not mto_shipment: return address_tuple = self.get_stored_shipment_address(mto_shipment) # returns a (field_name, address_dict) tuple if not address_tuple: return # this shipment didn't have any addresses, we will try again later with a different shipment field, address = address_tuple overrides_local = {"id": address["id"]} overrides_local.update(overrides or {}) payload = self.fake_request( "/mto-shipments/{mtoShipmentID}/addresses/{addressID}", "put", PRIME_API_KEY, overrides=overrides_local ) headers = {"content-type": "application/json", "If-Match": address["eTag"]} # update mto_shipment address resp = self.client.put( prime_path(f"/mto-shipments/{mto_shipment['id']}/addresses/{address['id']}"), name=prime_path("/mto-shipments/{mtoShipmentID}/addresses/{addressID}"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs, ) updated_address, success = check_response(resp, "updateMTOShipmentAddress", payload) if success: # we only got the address, so we're gonna pop it back into the shipment to store updated_shipment = deepcopy(mto_shipment) updated_shipment[field] = updated_address self.update_stored(MTO_SHIPMENT, mto_shipment, updated_shipment) return updated_shipment @tag(MTO_AGENT, "updateMTOAgent") @task def update_mto_agent(self, overrides=None): # If id was provided, get that specific one. Else get any stored one. object_id = overrides.get("mtoShipmentID") if overrides else None mto_shipment = self.get_stored(MTO_SHIPMENT, object_id) if not mto_shipment: return # can't run this task if mto_shipment.get("agents") is None: return # can't update agents if there aren't any overrides = {} mto_agents = mto_shipment["agents"] mto_agent = mto_shipment["agents"][0] if len(mto_agents) >= 2: overrides = {"agentType": mto_agent["agentType"]} # ensure agentType does not change payload = self.fake_request("/mto-shipments/{mtoShipmentID}/agents/{agentID}", "put", PRIME_API_KEY, overrides) headers = {"content-type": "application/json", "If-Match": mto_agent["eTag"]} resp = self.client.put( prime_path(f"/mto-shipments/{mto_shipment['id']}/agents/{mto_agent['id']}"), name=prime_path("/mto-shipments/{mtoShipmentID}/agents/{agentID}"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs, ) updated_agent, success = check_response(resp, "updateMTOAgent", payload) if success: # we only got the agent, so we're gonna pop it back into the shipment to store new_shipment = deepcopy(mto_shipment) new_shipment["agents"][0] = updated_agent self.update_stored(MTO_SHIPMENT, mto_shipment, new_shipment) return new_shipment @tag(MTO_SERVICE_ITEM, "updateMTOServiceItem") @task def update_mto_service_item(self, overrides=None): # If id was provided, get that specific one. Else get any stored one. object_id = overrides.get("id") if overrides else None mto_service_item = self.get_stored(MTO_SERVICE_ITEM, object_id) if not mto_service_item: return # can't run this task try: re_service_code = mto_service_item["reServiceCode"] except KeyError: logger.error(f"⛔️ update_mto_service_item recvd mtoServiceItem without reServiceCode \n{mto_service_item}") return if re_service_code not in ["DDDSIT", "DOPSIT"]: logging.info( "update_mto_service_item recvd mtoServiceItem from store. Discarding because reServiceCode not in " "[DDDSIT, DOPSIT]" ) return payload = self.fake_request( "/mto-service-items/{mtoServiceItemID}", "patch", overrides={ "id": mto_service_item["id"], "sitDestinationFinalAddress": { "id": mto_service_item["sitDestinationFinalAddress"]["id"] if mto_service_item.get("sitDestinationFinalAddress") and mto_service_item["sitDestinationFinalAddress"].get("id") else ZERO_UUID, }, }, ) headers = {"content-type": "application/json", "If-Match": mto_service_item["eTag"]} resp = self.client.patch( prime_path(f"/mto-service-items/{mto_service_item['id']}"), name=prime_path("/mto-service-items/{mtoServiceItemID}"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs, ) updated_service_item, success = check_response(resp, f"updateMTOServiceItem {re_service_code}", payload) if success: self.update_stored(MTO_SERVICE_ITEM, mto_service_item, updated_service_item) return updated_service_item @tag(MOVE_TASK_ORDER, "updateMTOPostCounselingInformation") @task def update_post_counseling_information(self, overrides=None): # If id was provided, get that specific one. Else get any stored one. object_id = overrides.get("id") if overrides else None move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id) if not move_task_order: logger.debug("updateMTOPostCounselingInformation: ⚠️ No move_task_order found") return # we can't do anything else without a default value, and no pre-made MTOs satisfy our requirements payload = self.fake_request("/move-task-orders/{moveTaskOrderID}/post-counseling-info", "patch", PRIME_API_KEY) move_task_order_id = move_task_order["id"] # path parameter headers = {"content-type": "application/json", "If-Match": move_task_order["eTag"]} resp = self.client.patch( prime_path(f"/move-task-orders/{move_task_order_id}/post-counseling-info"), name=prime_path("/move-task-orders/{moveTaskOrderID}/post-counseling-info"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs, ) new_mto, success = check_response(resp, "updateMTOPostCounselingInformation", payload) if success: self.update_stored(MOVE_TASK_ORDER, move_task_order, new_mto) return new_mto @tag("support") class SupportTasks(PrimeDataStorageMixin, ParserTaskMixin, CertTaskMixin, TaskSet): """ Set of the tasks that can be called on the Support API. Make sure to mark tasks with the `@task` decorator and add tags where appropriate to make filtering for custom tests easier. Ex: @tag('updates', 'shipments') @task def update_mto_shipment_status(self): # etc. """ @tag(MTO_SHIPMENT, "updateMTOShipmentStatus") @task(2) def update_mto_shipment_status(self, overrides=None): # If id was provided, get that specific one. Else get any stored one. object_id = overrides.get("id") if overrides else None mto_shipment = self.get_stored(MTO_SHIPMENT, object_id) if not mto_shipment: logger.debug("updateMTOShipmentStatus: ⚠️ No mto_shipment found.") return None # can't run this task # To avoid issues with the mto shipment being stale # retrieve the move associated with the shipment # and then use the newly fetched move to the find most up to date version of the shipment move_id = mto_shipment["moveTaskOrderID"] headers = {"content-type": "application/json"} resp = self.client.get( support_path(f"/move-task-orders/{move_id}"), name=support_path("/move-task-orders/{moveTaskOrderID}"), headers=headers, ) move_details, success = check_response(resp, "getMoveTaskOrder") if not move_details: logger.debug("updateMTOShipmentStatus: ⚠️ No mto_shipment found.") return None # can't run this task for fetched_mto_shipment in move_details["mtoShipments"]: if fetched_mto_shipment["id"] == mto_shipment["id"]: # Generate fake payload based on the endpoint's required fields payload = self.fake_request( "/mto-shipments/{mtoShipmentID}/status", "patch", SUPPORT_API_KEY, overrides ) if fetched_mto_shipment["status"] == "CANCELLATION_REQUESTED" and payload["status"] != "CANCELED": return None elif fetched_mto_shipment["status"] == "SUBMITTED" and payload["status"] not in [ "APPROVED", "REJECTED", ]: return None elif fetched_mto_shipment["status"] == "DIVERSION_REQUESTED" and payload["status"] != "APPROVED": return None elif fetched_mto_shipment["status"] == "APPROVED" and payload["status"] != "DIVERSION_REQUESTED": return None elif fetched_mto_shipment["status"] in ["DRAFT", "REJECTED", "CANCELED"]: return None headers = {"content-type": "application/json", "If-Match": fetched_mto_shipment["eTag"]} resp = self.client.patch( support_path(f"/mto-shipments/{fetched_mto_shipment['id']}/status"), name=support_path("/mto-shipments/{mtoShipmentID}/status"), data=json.dumps(payload), headers=headers, ) new_mto_shipment, success = check_response(resp, "updateMTOShipmentStatus", payload) if success: self.update_stored(MTO_SHIPMENT, mto_shipment, new_mto_shipment) return mto_shipment @tag(MTO_SHIPMENT, "updateMTOShipmentStatus", "expectedFailure") # run this task less frequently than the others since this is testing an expected failure @task(1) def update_mto_shipment_with_invalid_status(self, overrides=None): # If id was provided, get that specific one. Else get any stored one. object_id = overrides.get("id") if overrides else None mto_shipment = self.get_stored(MTO_SHIPMENT, object_id) if not mto_shipment: logger.debug("updateMTOShipmentStatus: ⚠️ No mto_shipment found.") return None # can't run this task overrides_local = {"status": "DRAFT"} # Merge local overrides with passed-in overrides if overrides: overrides_local.update(overrides) # Generate fake payload based on the endpoint's required fields payload = self.fake_request("/mto-shipments/{mtoShipmentID}/status", "patch", SUPPORT_API_KEY, overrides_local) payload["status"] = "DRAFT" headers = {"content-type": "application/json", "If-Match": mto_shipment["eTag"]} resp = self.client.patch( support_path(f"/mto-shipments/{mto_shipment['id']}/status"), name=support_path("/mto-shipments/{mtoShipmentID}/status — expected failure"), data=json.dumps(payload), headers=headers, ) check_response(resp, "updateMTOShipmentStatusFailure", payload, "422") @tag(MOVE_TASK_ORDER, "createMoveTaskOrder") @task(2) def create_move_task_order(self): # Check that we have all required ID values for this endpoint: if not self.has_all_default_mto_ids(): logger.debug(f"⚠️ Missing createMoveTaskOrder IDs for environment {self.user.env}") return overrides = { "contractorID": self.default_mto_ids["contractorID"], # Moves that are in DRAFT or CANCELED mode cannot be used by the rest of the load testing "status": "SUBMITTED", # If this date is set here, the status will not properly transition to APPROVED "availableToPrimeAt": None, "order": { "status": "APPROVED", "tac": "F8J1", # We need these objects to exist "destinationDutyStationID": self.default_mto_ids["destinationDutyStationID"], "originDutyStationID": self.default_mto_ids["originDutyStationID"], "uploadedOrdersID": self.default_mto_ids["uploadedOrdersID"], # To avoid the overrides being inserted into these nested objects... "entitlement": {}, "customer": {}, }, } payload = self.fake_request("/move-task-orders", "post", SUPPORT_API_KEY, overrides) headers = {"content-type": "application/json"} resp = self.client.post( support_path("/move-task-orders"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs ) json_body, success = check_response(resp, "createMoveTaskOrder", payload) if not success: return # no point continuing if it didn't work out move_task_order_id = json_body["id"] e_tag = json_body["eTag"] headers["if-match"] = e_tag resp = self.client.patch( support_path(f"/move-task-orders/{move_task_order_id}/available-to-prime"), name=support_path("/move-task-orders/{moveTaskOrderID}/available-to-prime"), headers=headers, **self.user.cert_kwargs, ) new_mto, success = check_response(resp, "makeMoveTaskOrderAvailable") if success: self.add_stored(MOVE_TASK_ORDER, new_mto) return new_mto # @tag(MTO_SERVICE_ITEM, "updateMTOServiceItemStatus") @task(2) def update_mto_service_item_status(self, overrides=None): # If id was provided, get that specific one. Else get any stored one. object_id = overrides.get("id") if overrides else None mto_service_item = self.get_stored(MTO_SERVICE_ITEM, object_id) # if we don't have an mto shipment we can't run this task if not mto_service_item: logger.debug("updateMTOServiceItemStatus: ⚠️ No mto_service_item found") return None payload = self.fake_request("/mto-service-items/{mtoServiceItemID}/status", "patch", SUPPORT_API_KEY, overrides) headers = {"content-type": "application/json", "If-Match": mto_service_item["eTag"]} resp = self.client.patch( support_path(f"/mto-service-items/{mto_service_item['id']}/status"), name=support_path("/mto-service-items/{mtoServiceItemID}/status"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs, ) mto_service_item, success = check_response(resp, "updateMTOServiceItemStatus", payload) if success: self.update_stored(MTO_SERVICE_ITEM, mto_service_item, mto_service_item) return mto_service_item @tag(PAYMENT_REQUEST, "updatePaymentRequestStatus") @task(2) def update_payment_request_status(self, overrides=None): # If id was provided, get that specific one. Else get any stored one. object_id = overrides.get("id") if overrides else None payment_request = self.get_stored(PAYMENT_REQUEST, object_id) if not payment_request: return payload = self.fake_request("/payment-requests/{paymentRequestID}/status", "patch", SUPPORT_API_KEY) headers = {"content-type": "application/json", "If-Match": payment_request["eTag"]} resp = self.client.patch( support_path(f"/payment-requests/{payment_request['id']}/status"), name=support_path("/payment-requests/{paymentRequestID}/status"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs, ) new_payment_request, success = check_response(resp, "updatePaymentRequestStatus", payload) if success: self.update_stored(PAYMENT_REQUEST, payment_request, new_payment_request) return new_payment_request @tag(MOVE_TASK_ORDER, "getMoveTaskOrder") @task(2) def get_move_task_order(self, overrides=None): # If id was provided, get that specific one. Else get any stored one. object_id = overrides.get("id") if overrides else None move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id) if not move_task_order: logger.debug("getMoveTaskOrder: ⚠️ No move_task_order found") return headers = {"content-type": "application/json"} resp = self.client.get( support_path(f"/move-task-orders/{move_task_order['id']}"), name=support_path("/move-task-orders/{moveTaskOrderID}"), headers=headers, **self.user.cert_kwargs, ) new_mto, success = check_response(resp, "getMoveTaskOrder") if success: self.update_stored(MOVE_TASK_ORDER, move_task_order, new_mto) return new_mto
44.384275
120
0.637366
4,748
40,079
5.20198
0.117734
0.043645
0.02158
0.022309
0.560266
0.513584
0.461881
0.39698
0.376331
0.359731
0
0.004529
0.267247
40,079
902
121
44.433481
0.835467
0.211058
0
0.372057
0
0
0.21942
0.100408
0
0
0
0
0
1
0.045526
false
0
0.012559
0.00471
0.142857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
562d12ab0b477e0d9a560f4a82cf032a9804cac0
848
py
Python
BubbleSort.py
R-Magnotti/Python.Interesting_Sorting_Algortithms
e10f8105499ac2c90923def7c324c46acda920aa
[ "MIT" ]
null
null
null
BubbleSort.py
R-Magnotti/Python.Interesting_Sorting_Algortithms
e10f8105499ac2c90923def7c324c46acda920aa
[ "MIT" ]
null
null
null
BubbleSort.py
R-Magnotti/Python.Interesting_Sorting_Algortithms
e10f8105499ac2c90923def7c324c46acda920aa
[ "MIT" ]
null
null
null
####################################################################################################################### # Author: Richie Magnotti # # Goal of code is to demonstrate sorting via bubble sort algorithm ####################################################################################################################### def bubble_sort(arr): n = len(arr) print("before", arr) for i in range(0, n): print("i", i, "item", arr[i]) for j in range(0,n-1): print("j", j, "item", arr[j]) if arr[j]>arr[j+1]: temp = arr[j] arr[j] = arr[j+1] arr[j+1] = temp print("after", arr) def main(): A = [0,5,3,13,1,7,4,3,7,6,5,9,2,7] bubble_sort(A) if __name__ == "__main__": main()
28.266667
119
0.325472
94
848
2.829787
0.446809
0.105263
0.078947
0.090226
0.067669
0
0
0
0
0
0
0.034258
0.277123
848
29
120
29.241379
0.399674
0.103774
0
0
0
0
0.060797
0
0
0
0
0
0
1
0.117647
false
0
0
0
0.117647
0.235294
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
562ee8fc837ffcba58e8885e34e37a46643ca002
3,350
py
Python
lib/cell.py
rafelafrance/boyd-bird-journal
289744703220015cb61d22a8e6f8eff0040b296f
[ "MIT" ]
null
null
null
lib/cell.py
rafelafrance/boyd-bird-journal
289744703220015cb61d22a8e6f8eff0040b296f
[ "MIT" ]
5
2017-11-02T17:12:31.000Z
2021-04-21T19:07:39.000Z
lib/cell.py
rafelafrance/boyd-bird-journal
289744703220015cb61d22a8e6f8eff0040b296f
[ "MIT" ]
null
null
null
"""Data and functions for dealing with cell contents.""" # pylint: disable=no-member, too-many-instance-attributes, too-many-arguments import numpy as np from skimage import util from skimage.transform import probabilistic_hough_line from lib.util import Crop, Offset, intersection class Cell: """Data and functions for dealing with cell contents.""" row_label_threshold = 20 col_label_threshold = 15 crop = Crop(top=4, bottom=4, left=4, right=4) forward_slashes = np.deg2rad(np.linspace(65.0, 25.0, num=161)) label_lines = np.deg2rad(np.linspace(0.0, 65.0, num=181)) label_lines += np.deg2rad(np.linspace(-65.0, 0.0, num=181)) def __init__(self, grid, top=None, bottom=None, left=None, right=None): """ Build a cell from the 4 surrounding grid lines. We will also get the for corners of the cell by finding the intersection of the grid lines. """ self.image = grid.edges self.top_left = intersection(top, left) self.bottom_left = intersection(bottom, left) self.top_right = intersection(top, right) self.bottom_right = intersection(bottom, right) self.width = self.top_right.x - self.top_left.x self.height = self.bottom_left.y - self.top_left.y self.offset = Offset(x=grid.offset.x + self.top_left.x, y=grid.offset.y + self.top_left.y) def interior(self, crop=None): """ Get the interior image of the cell. Sometimes we will want to crop the interior to try and remove the surrounding grid lines. That is, we want the cell contents, not the grid lines. """ top = max(0, self.top_left.y, self.top_right.y) bottom = max(0, self.image.shape[0] - min(self.bottom_left.y, self.bottom_right.y)) left = max(0, self.top_left.x, self.bottom_left.x) right = max(0, self.image.shape[1] - min(self.top_right.x, self.bottom_right.x)) inside = util.crop(self.image, ((top, bottom), (left, right))) if crop and inside.shape[1] > (crop.right + crop.left) \ and inside.shape[0] > (crop.bottom + crop.top): inside = util.crop( inside, ((crop.top, crop.bottom), (crop.left, crop.right))) return inside def is_label(self, crop=None): """Determine if the cell is a column label.""" if not crop: crop = self.crop inside = self.interior(crop=crop) lines = self.has_line(self.label_lines, line_length=12) if not min(inside.shape): return False return bool(len(lines)) or np.mean(inside) > self.col_label_threshold def has_line(self, angles=None, line_length=15): """Determine if the cell has a line at any of the given angles.""" return probabilistic_hough_line( self.interior(crop=self.crop), line_length=line_length, line_gap=2, theta=angles) def get_patch(self): """Get the cell patch for output.""" width = self.top_right.x - self.top_left.x height = self.bottom_left.y - self.top_left.y offset_x = self.offset.x offset_y = self.offset.y return (offset_x, offset_y), width, height
37.222222
77
0.617015
484
3,350
4.161157
0.233471
0.048659
0.049156
0.023833
0.217974
0.145482
0.10427
0.10427
0.062562
0
0
0.020492
0.271642
3,350
89
78
37.640449
0.804918
0.189552
0
0
0
0
0
0
0
0
0
0
0
1
0.090909
false
0
0.072727
0
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0