hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
eb8f42714ac932cdfc292aeae7792fbd941d3a70
1,316
py
Python
fountain/linguistics.py
oaao/fountain-of-knuth
626be5ef98370e59c273a93a10614a4438e1fb22
[ "MIT" ]
null
null
null
fountain/linguistics.py
oaao/fountain-of-knuth
626be5ef98370e59c273a93a10614a4438e1fb22
[ "MIT" ]
null
null
null
fountain/linguistics.py
oaao/fountain-of-knuth
626be5ef98370e59c273a93a10614a4438e1fb22
[ "MIT" ]
null
null
null
"""Linguistic data for statistical pre-processing. Frequency statistics, as seen in data/, are provided by: a) Mark Mayzner, 1965: ------------------------------------------------------------------------------ METHODOLOGY: Starting at a random place in a given newspapers/magazines/book, record three- to seven-letter words until 200 words are selected. Repeat 100x. SAMPLE SIZE: 20,000 https://archive.is/wip/u9vOA (as seen in tables: https://archive.is/BJEQt) ------------------------------------------------------------------------------ b) Peter Norvig, 2012: ------------------------------------------------------------------------------ METHODOLOGY: Using the Google Books Ngrams dataset (English 20120701), perform the analysis on the entire corpus, with no sample size or length restrictions. Discard any word with fewer than 100,000 mentions. Mayzner had actually reached out to Norvig requesting this update to his work! SAMPLE SIZE: 743,842,922,321 (unique: 97,565) https://archive.is/wip/SHwcy Data available by substituting {a-z} for the final character in the URL: https://storage.googleapis.com/books/ngrams/books/googlebooks-eng-all-1gram-20120701-a.gz ------------------------------------------------------------------------------ """ import string LETTERS = tuple(string.ascii_lowercase)
38.705882
89
0.592705
158
1,316
4.93038
0.727848
0.038511
0.053915
0.043646
0
0
0
0
0
0
0
0.050847
0.103343
1,316
33
90
39.878788
0.609322
0.951368
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
eb9292b5b15148056f496da1f3e38d544c9ca3dc
846
py
Python
Active-Contour-Loss.py
xuuuuuuchen/Active-Contour-Loss
f76737b92a2bea558f5a960bb1ef00bbe09b8457
[ "MIT" ]
189
2019-06-11T02:13:53.000Z
2022-03-30T15:41:47.000Z
Active-Contour-Loss.py
xuuuuuuchen/Active-Contour-Loss
f76737b92a2bea558f5a960bb1ef00bbe09b8457
[ "MIT" ]
15
2019-06-29T19:22:07.000Z
2021-07-19T03:26:51.000Z
Active-Contour-Loss.py
xuuuuuuchen/Active-Contour-Loss
f76737b92a2bea558f5a960bb1ef00bbe09b8457
[ "MIT" ]
28
2019-07-15T12:52:52.000Z
2022-03-07T16:50:02.000Z
from keras import backend as K import numpy as np def Active_Contour_Loss(y_true, y_pred): """ lenth term """ x = y_pred[:,:,1:,:] - y_pred[:,:,:-1,:] # horizontal and vertical directions y = y_pred[:,:,:,1:] - y_pred[:,:,:,:-1] delta_x = x[:,:,1:,:-2]**2 delta_y = y[:,:,:-2,1:]**2 delta_u = K.abs(delta_x + delta_y) lenth = K.mean(K.sqrt(delta_u + 0.00000001)) # equ.(11) in the paper """ region term """ C_1 = np.ones((256, 256)) C_2 = np.zeros((256, 256)) region_in = K.abs(K.mean( y_pred[:,0,:,:] * ((y_true[:,0,:,:] - C_1)**2) ) ) # equ.(12) in the paper region_out = K.abs(K.mean( (1-y_pred[:,0,:,:]) * ((y_true[:,0,:,:] - C_2)**2) )) # equ.(12) in the paper lambdaP = 1 # lambda parameter could be various. mu = 1 # mu parameter could be various. return lenth + lambdaP * (mu * region_in + region_out)
24.171429
105
0.574468
149
846
3.087248
0.342282
0.076087
0.052174
0.030435
0.178261
0.178261
0.056522
0
0
0
0
0.072674
0.186761
846
34
106
24.882353
0.59593
0.211584
0
0
0
0
0
0
0
0
0
0
0
1
0.0625
false
0
0.125
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eb9398224cfe1d36acebb90ab93ee6b57c3b5626
7,584
py
Python
edbo/objective.py
v13inc/edbo
dee72777a07594f7940bd03f0049a7d9be7c2266
[ "MIT" ]
70
2020-12-11T03:13:09.000Z
2022-03-16T21:17:26.000Z
edbo/objective.py
v13inc/edbo
dee72777a07594f7940bd03f0049a7d9be7c2266
[ "MIT" ]
8
2021-01-15T14:24:00.000Z
2022-01-16T14:43:52.000Z
edbo/objective.py
v13inc/edbo
dee72777a07594f7940bd03f0049a7d9be7c2266
[ "MIT" ]
18
2020-11-24T00:37:49.000Z
2022-03-13T15:52:51.000Z
# -*- coding: utf-8 -*- # Imports import pandas as pd from .pd_utils import load_csv_or_excel from .pd_utils import load_experiment_results from .pd_utils import to_torch from .math_utils import standard # Objective function class class objective: """Objective funciton data container and operations. Note ---- Objective internally standardizes response values to zero mean and unit variance. """ def __init__(self, results_path=None, results=pd.DataFrame(), domain_path=None, domain=pd.DataFrame(), exindex_path=None, exindex=pd.DataFrame(), target=-1, gpu=False, computational_objective=None): """ Parameters ---------- results_path : str, optional Path to experimental results. results : pandas.DataFrame, optional Experimental results with X values matching the domain. domain_path : str, optional Path to experimental domain. Note ---- A domain_path or domain are required. domain : pandas.DataFrame, optional Experimental domain specified as a matrix of possible configurations. exindex_path : str, optional Path to experiment results index if available. exindex : pandas.DataFrame, optional Experiment results index matching domain format. Used as lookup table for simulations. target : str Column label of optimization objective. If set to -1, the last column of the DataFrame will be set as the target. gpu : bool Carry out GPyTorch computations on a GPU if available. computational_objective : function, optional Function to be optimized for computational objectives. """ # Initialize self.results_path = results_path self.results = results self.domain_path = domain_path self.domain = domain self.exindex_path = exindex_path self.exindex = exindex self.target = target self.gpu = gpu self.computational_objective = computational_objective # Load domain if domain_path != None: self.domain = load_csv_or_excel(self.domain_path) self.domain.reset_index(drop=True) # Load results if type(self.results) == type(pd.DataFrame()) and len(self.results) > 0: if target == -1: self.target = self.results.columns.values[-1] elif results_path != None: data = load_experiment_results(self.results_path) self.results = data if target == -1: self.target = self.results.columns.values[-1] # Load experiment index if exindex_path != None: self.exindex = load_csv_or_excel(exindex_path) if target == -1: self.target = self.exindex.columns.values[-1] if type(exindex) == type(pd.DataFrame()) and len(exindex) > 0: if target == -1: self.target = exindex.columns.values[-1] # Standardize targets (0 mean and unit variance) self.scaler = standard() self.results = self.scaler.standardize_target(self.results, self.target) # Torch tensors and labeld external data if len(self.results) > 0: self.X = to_torch(self.results.drop(self.target,axis=1), gpu=gpu) self.y = to_torch(self.results[self.target], gpu=gpu).view(-1) index = ['external' + str(i) for i in range(len(self.results))] self.results = pd.DataFrame(self.results.values, columns=self.results.columns, index=index) else: self.X = to_torch([], gpu=gpu) self.y = to_torch([], gpu=gpu) # Get results from the index def get_results(self, domain_points, append=False): """Returns target values corresponding to domain_points. Parameters ---------- domain_points : pandas.DataFrame Points from experiment index to retrieve responses for. If the objective is a computational function, run function and return responses. append : bool If true append points to results and update X and y. Returns ---------- pandas.DataFrame Proposed experiments. """ # Computational objective if self.computational_objective != None: new_results = [] for point in domain_points.values: result = self.computational_objective(point) new_results.append(result) batch = domain_points.copy() batch[self.target] = new_results if append == True: # Unstandardize results and append to know outcomes results = self.scaler.unstandardize_target(self.results, self.target) data = pd.concat([results, batch]) # Restandardize self.results = self.scaler.standardize_target(data, self.target) self.X = to_torch(self.results.drop(self.target,axis=1), gpu=self.gpu) self.y = to_torch(self.results[self.target], gpu=self.gpu).view(-1) return batch # Human in the loop objective if type(self.exindex) == type(None): return print("edbo bot: Error no experiment index") # Retrieve domain points from index index = self.exindex.drop(self.target, axis=1) union_index = pd.merge( index.reset_index(), domain_points, how='inner' )['index'] batch = self.exindex.iloc[list(union_index)] # Append to results if append == True: # Unstandardize results and append to know outcomes results = self.scaler.unstandardize_target(self.results, self.target) data = pd.concat([results, batch]) # Restandardize self.results = self.scaler.standardize_target(data, self.target) self.X = to_torch(self.results.drop(self.target,axis=1), gpu=self.gpu) self.y = to_torch(self.results[self.target], gpu=self.gpu).view(-1) return batch # Clear results def clear_results(self): """Clear results and reset X and y. Returns ---------- None """ self.results = pd.DataFrame() self.X = to_torch([], gpu=self.gpu) self.y = to_torch([], gpu=self.gpu) # Return unstandardized results def results_input(self): """Return unstandardized results. Returns ---------- pandas.DataFrame Unstandardized results. """ if len(self.results) == 0: results = self.results else: results = self.scaler.unstandardize_target(self.results, self.target) return results
33.706667
86
0.548655
793
7,584
5.147541
0.190416
0.080843
0.040421
0.036012
0.323126
0.259922
0.217785
0.212396
0.212396
0.198432
0
0.004788
0.366561
7,584
225
87
33.706667
0.844921
0.2827
0
0.252747
0
0
0.010759
0
0
0
0
0
0
1
0.043956
false
0
0.054945
0
0.153846
0.010989
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eb94674e627c9637e3631827dccad31e43dbeafa
1,983
py
Python
api/project/modules/geoprocessing/utils.py
wilsoncwc/mapbox-osmnx
38031e31ad3b97e941a773c99b1f9b544ef93fae
[ "MIT" ]
null
null
null
api/project/modules/geoprocessing/utils.py
wilsoncwc/mapbox-osmnx
38031e31ad3b97e941a773c99b1f9b544ef93fae
[ "MIT" ]
null
null
null
api/project/modules/geoprocessing/utils.py
wilsoncwc/mapbox-osmnx
38031e31ad3b97e941a773c99b1f9b544ef93fae
[ "MIT" ]
null
null
null
import osmnx as ox import networkx as nx def gdf_to_nx(gdf_network): # generate graph from GeoDataFrame of LineStrings net = nx.Graph() net.graph['crs'] = gdf_network.crs fields = list(gdf_network.columns) for _, row in gdf_network.iterrows(): first = row.geometry.coords[0] last = row.geometry.coords[-1] data = [row[f] for f in fields] attributes = dict(zip(fields, data)) net.add_edge(first, last, **attributes) return net def add_traveltime_colors(G, center_node, mode): travel_speed = 0 useRoadSpeed = False if mode == 'drive': useRoadSpeed = True elif mode == 'bike': travel_speed = 15 elif mode == 'walk': travel_speed = 5 else: return (None, 'Invalid mode. Try one of "bike", "drive", or "walk".') trip_times = [5, 10, 15, 20, 25] # minutes # add an edge attribute for travel time in minutes required to traverse each edge if useRoadSpeed: G = ox.add_edge_speeds(G) else: # set constant travel speed for all edges nx.set_edge_attributes(G, travel_speed, 'speed_kph') G = ox.add_edge_travel_times(G) # computes travel time in seconds travel_times = nx.get_edge_attributes(G, "travel_time") for u, v, k, data in G.edges(data=True, keys=True): data['time'] = travel_times[(u, v, k)] / 60 # convert to min # get one color for each isochrone iso_colors = ox.plot.get_colors(n=len(trip_times), cmap='plasma', start=0, return_hex=True) # color the edges based on subgraph edge_colors = {} for trip_time, color in zip(sorted(trip_times, reverse=True), iso_colors): subgraph = nx.ego_graph(G, center_node, radius=trip_time, distance='time') for edge in subgraph.edges: edge_colors[edge] = color nx.set_edge_attributes(G, edge_colors, 'color') # project graph back to the standard crs G = ox.project_graph(G, 'WGS84') return (G, '')
34.789474
95
0.646999
291
1,983
4.254296
0.391753
0.044426
0.036349
0.016155
0.03231
0
0
0
0
0
0
0.013324
0.243066
1,983
57
96
34.789474
0.811459
0.165406
0
0.047619
1
0
0.068044
0
0
0
0
0
0
1
0.047619
false
0
0.047619
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
eb951abd2dd3908e69458a1b26cb28c1b3944745
664
py
Python
p002/solution.py
jcbrockschmidt/project_euler
49576d24f485eea1a21c8111e006a5c9ba1701d7
[ "MIT" ]
null
null
null
p002/solution.py
jcbrockschmidt/project_euler
49576d24f485eea1a21c8111e006a5c9ba1701d7
[ "MIT" ]
null
null
null
p002/solution.py
jcbrockschmidt/project_euler
49576d24f485eea1a21c8111e006a5c9ba1701d7
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 from time import time def fib_sum(limit): prev2 = 1 prev1 = 2 fib_sum = 0 while prev2 < limit: # There is probably a more clever solution that skips the calculation # of every 1st and 3rd element. # For now, we will just cherry-pick the even values. if prev1 % 2 == 0: fib_sum += prev1 old_prev1 = prev1 prev1 = prev1 + prev2 prev2 = old_prev1 return fib_sum if __name__ == '__main__': start = time() solu = fib_sum(4e6) elapse = time() - start print('Solution: {}'.format(solu)) print('Solution found in {:.8f}s'.format(elapse))
25.538462
77
0.593373
92
664
4.119565
0.630435
0.079156
0.079156
0
0
0
0
0
0
0
0
0.05
0.307229
664
25
78
26.56
0.773913
0.256024
0
0
0
0
0.091837
0
0
0
0
0
0
1
0.055556
false
0
0.055556
0
0.166667
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eb9525194e5e12addc0e82651317e86bd9db3fd2
15,221
py
Python
source/Gui.py
Faraphel/MKWF-Install
8a86cae630da6702bf65b15340dc2db3e0abc182
[ "Apache-2.0" ]
1
2022-03-01T10:59:11.000Z
2022-03-01T10:59:11.000Z
source/Gui.py
Faraphel/MKWF-Install
8a86cae630da6702bf65b15340dc2db3e0abc182
[ "Apache-2.0" ]
null
null
null
source/Gui.py
Faraphel/MKWF-Install
8a86cae630da6702bf65b15340dc2db3e0abc182
[ "Apache-2.0" ]
3
2021-06-15T17:23:36.000Z
2021-07-07T11:45:46.000Z
from tkinter import filedialog, ttk, messagebox from tkinter import * import traceback import requests import zipfile import json import os from source.Game import Game, RomAlreadyPatched, InvalidGamePath, InvalidFormat, in_thread, VERSION_FILE_URL from source.Option import Option from source.definition import get_version_from_string with open("./translation.json", encoding="utf-8") as f: translation_dict = json.load(f) class Gui: def __init__(self): """ Initialize program Gui """ self.root = Tk() self.option = Option() self.option.load_from_file("./option.json") self.game = Game(gui=self) self.game.ctconfig.load_ctconfig_file("./ct_config.json") self.game.ctconfig.all_version.sort(key=get_version_from_string) latest_version: str = self.game.ctconfig.all_version[-1] self.is_dev_version = False # Is this installer version a dev ? self.stringvar_language = StringVar(value=self.option.language) self.stringvar_game_format = StringVar(value=self.option.format) self.boolvar_disable_download = BooleanVar(value=self.option.disable_download) self.boolvar_del_track_after_conv = BooleanVar(value=self.option.del_track_after_conv) self.boolvar_dont_check_for_update = BooleanVar(value=self.option.dont_check_for_update) self.intvar_process_track = IntVar(value=self.option.process_track) self.boolvar_use_1star_track = BooleanVar(value=True) self.boolvar_use_2star_track = BooleanVar(value=True) self.boolvar_use_3star_track = BooleanVar(value=True) self.stringvar_mark_track_from_version = StringVar(value=latest_version) self.root.title(self.translate("MKWFaraphel Installer")) self.root.resizable(False, False) self.root.iconbitmap(bitmap="./icon.ico") if not(self.boolvar_dont_check_for_update.get()): self.check_update() self.menu_bar = Menu(self.root) self.root.config(menu=self.menu_bar) self.menu_language = Menu(self.menu_bar, tearoff=0) self.menu_bar.add_cascade(label=self.translate("Language"), menu=self.menu_language) self.menu_language.add_radiobutton(label="Français", variable=self.stringvar_language, value="fr", command=lambda: self.option.edit("language", "fr", need_restart=True)) self.menu_language.add_radiobutton(label="English", variable=self.stringvar_language, value="en", command=lambda: self.option.edit("language", "en", need_restart=True)) self.menu_format = Menu(self.menu_bar, tearoff=0) self.menu_bar.add_cascade(label=self.translate("Format"), menu=self.menu_format) self.menu_format.add_radiobutton(label=self.translate("FST (Directory)"), variable=self.stringvar_game_format, value="FST", command=lambda: self.option.edit("format", "FST")) self.menu_format.add_radiobutton(label="ISO", variable=self.stringvar_game_format, value="ISO", command=lambda: self.option.edit("format", "ISO")) self.menu_format.add_radiobutton(label="CISO", variable=self.stringvar_game_format, value="CISO", command=lambda: self.option.edit("format", "CISO")) self.menu_format.add_radiobutton(label="WBFS", variable=self.stringvar_game_format, value="WBFS", command=lambda: self.option.edit("format", "WBFS")) self.menu_trackselection = Menu(self.menu_bar, tearoff=0) self.menu_bar.add_cascade(label=self.translate("Track selection"), menu=self.menu_trackselection) self.menu_trackselection.add_checkbutton(label=self.translate("Select"," 1 ","star"), variable=self.boolvar_use_1star_track) self.menu_trackselection.add_checkbutton(label=self.translate("Select"," 2 ","stars"), variable=self.boolvar_use_2star_track) self.menu_trackselection.add_checkbutton(label=self.translate("Select"," 3 ","stars"), variable=self.boolvar_use_3star_track) self.menu_trackselection.add_separator() self.menu_marktrackversion = Menu(self.menu_trackselection, tearoff=0) self.menu_trackselection.add_cascade(label=self.translate("Mark all tracks from version"), menu=self.menu_marktrackversion) self.menu_marktrackversion.add_radiobutton(label=self.translate("None"), variable=self.stringvar_mark_track_from_version, value="None") for version in self.game.ctconfig.all_version: self.menu_marktrackversion.add_radiobutton(label=f"v{version}", variable=self.stringvar_mark_track_from_version, value=version) self.menu_advanced = Menu(self.menu_bar, tearoff=0) self.menu_bar.add_cascade(label=self.translate("Advanced"), menu=self.menu_advanced) self.menu_advanced.add_checkbutton(label=self.translate("Disable downloads"), variable=self.boolvar_disable_download, command=lambda: self.option.edit("disable_download", self.boolvar_disable_download)) self.menu_advanced.add_checkbutton(label=self.translate("Delete track after wu8 to szs conversion"), variable=self.boolvar_del_track_after_conv, command=lambda: self.option.edit("del_track_after_conv", self.boolvar_del_track_after_conv)) self.menu_advanced.add_checkbutton(label=self.translate("Don't check for update"), variable=self.boolvar_dont_check_for_update, command=lambda: self.option.edit("dont_check_for_update", self.boolvar_dont_check_for_update)) self.menu_advanced.add_separator() self.menu_trackconvprocess = Menu(self.menu_advanced, tearoff=0) self.menu_advanced.add_cascade(label=self.translate("Number of track conversion process"), menu=self.menu_trackconvprocess) for cpu in range(1, 9): self.menu_trackconvprocess.add_radiobutton(label=self.translate(str(cpu), " ", "process"), variable=self.intvar_process_track, value=cpu, command=lambda: self.option.edit("process_track", self.intvar_process_track)) self.frame_language = Frame(self.root) self.frame_language.grid(row=1, column=1, sticky="E") self.frame_game_path = LabelFrame(self.root, text=self.translate("Original game")) self.frame_game_path.grid(row=2, column=1) entry_game_path = Entry(self.frame_game_path, width=50) entry_game_path.grid(row=1, column=1, sticky="NEWS") def select_path(): path = filedialog.askopenfilename(filetypes=((self.translate("Wii game"), r"*.iso *.wbfs main.dol *.wia *.ciso"),)) if os.path.exists(path): entry_game_path.delete(0, END) entry_game_path.insert(0, path) Button(self.frame_game_path, text="...", relief=RIDGE, command=select_path).grid(row=1, column=2, sticky="NEWS") self.frame_game_path_action = Frame(self.frame_game_path) # Extract and do everything button self.frame_game_path_action.grid(row=2, column=1, columnspan=2, sticky="NEWS") self.frame_game_path_action.columnconfigure(1, weight=1) @in_thread def use_path(): nothread_use_path() def nothread_use_path(): self.frame_action.grid_forget() try: self.game.set_path(entry_game_path.get()) self.progress(show=True, indeter=True, statut=self.translate("Extracting the game...")) self.game.extract() self.frame_action.grid(row=3, column=1, sticky="NEWS") except RomAlreadyPatched: messagebox.showerror(self.translate("Error"), self.translate("This game is already modded")) raise RomAlreadyPatched except InvalidGamePath: messagebox.showerror(self.translate("Error"), self.translate("The file path in invalid")) raise InvalidGamePath except InvalidFormat: messagebox.showerror(self.translate("Error"), self.translate("This game's format is invalid")) raise InvalidFormat except: self.log_error() raise Exception finally: self.progress(show=False) self.button_game_extract = Button(self.frame_game_path_action, text=self.translate("Extract file"), relief=RIDGE, command=use_path) self.button_game_extract.grid(row=1, column=1, sticky="NEWS") @in_thread def do_everything(): nothread_use_path() self.game.nothread_patch_file() self.game.nothread_install_mod() self.button_do_everything = Button(self.frame_game_path_action, text=self.translate("Do everything"), relief=RIDGE, command=do_everything) self.button_do_everything.grid(row=1, column=2, sticky="NEWS") self.frame_action = LabelFrame(self.root, text=self.translate("Action")) self.button_prepare_file = Button(self.frame_action, text=self.translate("Prepare files"), relief=RIDGE, command=lambda: self.game.patch_file(), width=45) self.button_prepare_file.grid(row=1, column=1, columnspan=2, sticky="NEWS") self.button_install_mod = Button(self.frame_action, text=self.translate("Install mod"), relief=RIDGE, command=lambda: self.game.install_mod(), width=45) # Install mod button will only appear after prepare file step self.progressbar = ttk.Progressbar(self.root) self.progresslabel = Label(self.root) def check_update(self) -> None: """ Check if an update is available """ try: github_version_data = requests.get(VERSION_FILE_URL, allow_redirects=True).json() with open("./version", "rb") as f: local_version_data = json.load(f) local_version = get_version_from_string(f"{local_version_data['version']}.{local_version_data['subversion']}") github_version = get_version_from_string(f"{github_version_data['version']}.{github_version_data['subversion']}") if github_version > local_version: # if github version is newer than local version if messagebox.askyesno( self.translate("Update available !"), self.translate("An update is available, do you want to install it ?", f"\n\nVersion : {local_version} -> {github_version}\n" f"Changelog :\n{github_version_data['changelog']}")): if not (os.path.exists("./Updater/Updater.exe")): dl = requests.get(github_version_data["updater_bin"], allow_redirects=True) with open("./download.zip", "wb") as file: print(self.translate("Downloading the Updater...")) file.write(dl.content) print(self.translate("end of the download, extracting...")) with zipfile.ZipFile("./download.zip") as file: file.extractall("./Updater/") print(self.translate("finished extracting")) os.remove("./download.zip") print(self.translate("starting application...")) os.startfile(os.path.realpath("./Updater/Updater.exe")) elif local_version > github_version: self.is_dev_version = True except requests.ConnectionError: messagebox.showwarning(self.translate("Warning"), self.translate("Can't connect to internet. Download will be disabled.")) self.option.disable_download = True except: self.log_error() def log_error(self) -> None: """ When an error occur, will show it in a messagebox and write it in error.log """ error = traceback.format_exc() with open("./error.log", "a") as f: f.write(f"---\n" f"For game version : {self.game.ctconfig.version}\n" f"./file/ directory : {os.listdir('./file/')}" f"GAME/files/ information : {self.game.path, self.game.region}" f"{error}\n") messagebox.showerror(self.translate("Error"), self.translate("An error occured", " :", "\n", error, "\n\n")) def progress(self, show: bool = None, indeter: bool = None, step: int = None, statut: str = None, max: int = None, add: int = None) -> None: """ configure the progress bar shown when doing a task :param show: show or hide the progress bar :param indeter: if indeter, the progress bar will do a infinite loop animation :param step: set the progress of the bar :param statut: text shown under the progress bar :param max: set the maximum step :param add: add to step of the progress bar """ if indeter is True: self.progressbar.config(mode="indeterminate") self.progressbar.start(50) elif indeter is False: self.progressbar.config(mode="determinate") self.progressbar.stop() if show is True: self.state_button(enable=False) self.progressbar.grid(row=100, column=1, sticky="NEWS") self.progresslabel.grid(row=101, column=1, sticky="NEWS") elif show is False: self.state_button(enable=True) self.progressbar.grid_forget() self.progresslabel.grid_forget() if statut: self.progresslabel.config(text=statut) if step: self.progressbar["value"] = step if max: self.progressbar["maximum"] = max self.progressbar["value"] = 0 if add: self.progressbar.step(add) def state_button(self, enable: bool = True) -> None: """ used to enable or disable button when doing task :param enable: are the button enabled ? """ button = [ self.button_game_extract, self.button_install_mod, self.button_prepare_file, self.button_do_everything ] for widget in button: if enable: widget.config(state=NORMAL) else: widget.config(state=DISABLED) def translate(self, *texts, lang: str = None) -> str: """ translate text into an another language in translation.json file :param texts: all text to convert :param lang: force a destination language to convert track :return: translated text """ if lang is None: lang = self.stringvar_language.get() elif lang == "F": lang = "fr" elif lang == "G": lang = "ge" elif lang == "I": lang = "it" elif lang == "S": lang = "sp" if lang in translation_dict: _lang_trad = translation_dict[lang] translated_text = "" for text in texts: if text in _lang_trad: translated_text += _lang_trad[text] else: translated_text += text return translated_text return "".join(texts) # if no translation language is found
52.66782
245
0.648183
1,856
15,221
5.132004
0.167026
0.036115
0.028346
0.024147
0.335118
0.263937
0.147192
0.110341
0.074961
0.039685
0
0.005267
0.239143
15,221
288
246
52.850694
0.817201
0.062545
0
0.046512
0
0
0.115549
0.020433
0
0
0
0
0
1
0.046512
false
0
0.046512
0
0.106977
0.018605
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eb967e24d2a33e6b84541055a8e1fda35b50d6bc
1,236
py
Python
FastSimulation/HighLevelTrigger/python/full2fast.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
FastSimulation/HighLevelTrigger/python/full2fast.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
FastSimulation/HighLevelTrigger/python/full2fast.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
import FWCore.ParameterSet.Config as cms def modify_hltL3TrajSeedOIHit(_hltL3TrajSeedOIHit): _iterativeTSG = _hltL3TrajSeedOIHit.TkSeedGenerator.iterativeTSG _iterativeTSG.ComponentName = cms.string('FastTSGFromPropagation') _iterativeTSG.HitProducer = cms.InputTag("fastMatchedTrackerRecHitCombinations") _iterativeTSG.MeasurementTrackerEvent = cms.InputTag("MeasurementTrackerEvent") _iterativeTSG.SimTrackCollectionLabel = cms.InputTag("fastSimProducer") _iterativeTSG.beamSpot = cms.InputTag("offlineBeamSpot") _hltL3TrajSeedOIHit.TrackerSeedCleaner = cms.PSet() def modify_hltL3TrajSeedIOHit(_hltL3TrajSeedIOHit): _iterativeTSG = cms.PSet() _iterativeTSG.ComponentName = cms.string('FastTSGFromIOHit') _iterativeTSG.PtCut = cms.double(1.0) _iterativeTSG.SeedCollectionLabels = cms.VInputTag( cms.InputTag("initialStepSeeds"), cms.InputTag("detachedTripletStepSeeds"), cms.InputTag("lowPtTripletStepSeeds"), cms.InputTag("pixelPairStepSeeds")) _iterativeTSG.SimTrackCollectionLabel = cms.InputTag("fastSimProducer") _hltL3TrajSeedIOHit.TkSeedGenerator.iterativeTSG = _iterativeTSG _hltL3TrajSeedIOHit.TrackerSeedCleaner = cms.PSet()
47.538462
84
0.786408
90
1,236
10.577778
0.4
0.103992
0.081933
0.071429
0.128151
0
0
0
0
0
0
0.009234
0.123786
1,236
25
85
49.44
0.869806
0
0
0.095238
0
0
0.178947
0.102024
0
0
0
0
0
1
0.095238
false
0
0.047619
0
0.142857
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
eb974878838a569a2d9cf63b612ccad301b890eb
1,612
py
Python
titledb/update_db.py
EMUGamesDevTeam/TitleDB
4d13cce0f5e9d547316aba951301f001ca3b2c2c
[ "Unlicense" ]
1
2020-07-13T19:20:45.000Z
2020-07-13T19:20:45.000Z
titledb/update_db.py
EMUGamesDevTeam/TitleDB
4d13cce0f5e9d547316aba951301f001ca3b2c2c
[ "Unlicense" ]
null
null
null
titledb/update_db.py
EMUGamesDevTeam/TitleDB
4d13cce0f5e9d547316aba951301f001ca3b2c2c
[ "Unlicense" ]
null
null
null
import os, sys, re, transaction, base64, zlib from sqlalchemy import engine_from_config from pyramid.paster import ( get_appsettings, setup_logging, ) from .models import ( DBSession, CIA, Entry, User, Group, Base, ) from .security import hash_password def usage(argv): cmd = os.path.basename(argv[0]) print('usage: %s <config_uri>\n' '(example: "%s development.ini")' % (cmd, cmd)) sys.exit(1) def main(argv=sys.argv): if len(argv) != 2: usage(argv) config_uri = argv[1] setup_logging(config_uri) settings = get_appsettings(config_uri) engine = engine_from_config(settings, 'sqlalchemy.') DBSession.configure(bind=engine) with transaction.manager: for cia in DBSession.query(CIA).all(): print(cia.icon_s) icons1 = base64.b64decode(cia.icon_s) try: icons2 = zlib.decompress(icons1) except zlib.error: icons2 = icons1 iconl1 = base64.b64decode(cia.icon_l) try: iconl2 = zlib.decompress(iconl1) except zlib.error: iconl2 = iconl1 cia.icon_s = base64.b64encode(icons2) cia.icon_l = base64.b64encode(iconl2) DBSession.query(CIA).filter_by(id=cia.id).update(dict(icon_s=cia.icon_s,icon_l=cia.icon_l)) with transaction.manager: for cia in DBSession.query(CIA).all(): m = re.search('(.*)#(.*)', cia.url.url) if m: cia.url = m.group(1) cia.path = m.group(2)
26
103
0.581266
199
1,612
4.592965
0.38191
0.053611
0.035011
0.054705
0.109409
0.109409
0.109409
0.109409
0.109409
0.109409
0
0.031943
0.300868
1,612
61
104
26.42623
0.779059
0
0
0.16
0
0
0.046555
0
0
0
0
0
0
1
0.04
false
0.02
0.1
0
0.14
0.04
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eb9805e46c99a38df84681449ccdcec4ab9e7a42
3,297
py
Python
src/modeling/models_cv.py
sebasjp/octopus-ml
c8f650cf9487a82d6b71a5d5bada12c5c42ab954
[ "MIT" ]
1
2021-05-15T22:35:51.000Z
2021-05-15T22:35:51.000Z
src/modeling/models_cv.py
sebasjp/octopus
c8f650cf9487a82d6b71a5d5bada12c5c42ab954
[ "MIT" ]
null
null
null
src/modeling/models_cv.py
sebasjp/octopus
c8f650cf9487a82d6b71a5d5bada12c5c42ab954
[ "MIT" ]
null
null
null
from sklearn.ensemble import RandomForestClassifier import xgboost as xgb from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline import numpy as np # ============================================================= # Modeling tools for cross validation # Reference: https://github.com/fmfn/BayesianOptimization/blob/master/examples/sklearn_example.py # ============================================================= # =================== # Random Forest # =================== def rfc_cv(n_estimators, max_depth, min_samples_split, min_samples_leaf, max_features, metric, X, y, preparessor): """ Random Forest cross validation. This function will instantiate a random forest classifier with parameters n_estimators, min_samples_split, max_depth, min_samples_leaf and max_features. Combined with X and y this will in turn be used to perform cross validation. The result of cross validation is returned. Our goal is to find combinations of n_estimators, min_samples_split, max_depth, min_samples_leaf and max_featues that maximizes the metric """ preprocessor = preparessor estimator = RandomForestClassifier( n_estimators = n_estimators, max_depth = max_depth, min_samples_split = min_samples_split, min_samples_leaf = min_samples_leaf, max_features = max_features, random_state = 42 ) # Append classifier to preparing pipeline. Now we have a full prediction pipeline. clf = Pipeline(steps=[('preprocessor', preprocessor), ('classifier', estimator)]) cval = cross_val_score(clf, X, y, scoring = metric, cv = 5) return cval.mean() # =================== # XGBoost # =================== def xgb_cv(n_estimators, max_depth, colsample_bytree, learning_rate, metric, X, y, preparessor): """ XGBoost cross validation. This function will instantiate a XGBoost classifier this will perform cross validation. The result of cross validation is returned. Our goal is to find combinations that maximizes the metric """ preprocessor = preparessor PARAM_SCALE_POS = np.ceil( len(y[y == 0]) / len(y[y == 1]) ) estimator = xgb.XGBClassifier( n_estimators = n_estimators, max_depth = max_depth, colsample_bytree = colsample_bytree, learning_rate = learning_rate, objective = 'binary:logistic', scale_pos_weight = PARAM_SCALE_POS, random_state = 42, verbosity = 0 ) # Append classifier to preparing pipeline. Now we have a full prediction pipeline. clf = Pipeline(steps=[('preprocessor', preprocessor), ('classifier', estimator)]) cval = cross_val_score(clf, X, y, scoring = metric, cv = 5) return cval.mean()
33.30303
102
0.558386
330
3,297
5.381818
0.339394
0.056306
0.04223
0.042793
0.583896
0.544482
0.480856
0.399775
0.356982
0.356982
0
0.004038
0.323931
3,297
98
103
33.642857
0.792732
0.358811
0
0.542373
0
0
0.028978
0
0
0
0
0
0
1
0.033898
false
0
0.084746
0
0.152542
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eb98399d5d514cad736c35a8e1b08c45e1e0717f
4,089
py
Python
flask_makespc.py
OliWright/MakeSPC
ddcc5b60de3bdb244b25da0d1a459b4b071ab278
[ "MIT" ]
null
null
null
flask_makespc.py
OliWright/MakeSPC
ddcc5b60de3bdb244b25da0d1a459b4b071ab278
[ "MIT" ]
null
null
null
flask_makespc.py
OliWright/MakeSPC
ddcc5b60de3bdb244b25da0d1a459b4b071ab278
[ "MIT" ]
null
null
null
# MIT License # # Copyright (c) 2020 Oli Wright <oli.wright.github@gmail.com> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # flask_makespc.py # # Flask container for makespc.py # Simple script to convert images to the Stop Press Canvas .SPC format which # is used on Amstrad PCW8256 and friends. import os from flask import Flask, flash, request, redirect, send_from_directory from werkzeug.utils import secure_filename from makespc import convert_to_spc APP_ROOT = os.path.dirname(os.path.abspath(__file__)) # refers to application_top UPLOAD_FOLDER = 'uploads' OUTPUT_FOLDER = 'output' PREVIEW_FOLDER = 'previews' APP_UPLOAD_FOLDER = os.path.join(APP_ROOT, UPLOAD_FOLDER) APP_OUTPUT_FOLDER = os.path.join(APP_ROOT, OUTPUT_FOLDER) APP_PREVIEW_FOLDER = os.path.join(APP_ROOT, PREVIEW_FOLDER) ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'bmp'} app = Flask(__name__) def allowed_file(filename): return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @app.route('/preview/<path:filename>', methods=['GET', 'POST']) def preview(filename): return send_from_directory(PREVIEW_FOLDER, filename=filename) @app.route('/output/<path:filename>', methods=['GET', 'POST']) def output(filename): return send_from_directory(OUTPUT_FOLDER, filename=filename) @app.route('/', methods=['GET', 'POST']) def upload_file(): html = ''' <!doctype html> <title>Convert an image to .SPC</title> <h1>Make SPC Online</h1> <p>This tool converts images to Stop Press Canvas .SPC format, popular on Amstrad PCW8256 computers.</p> <form method=post enctype=multipart/form-data> <input type=file name=file> <input type=submit value=Convert to SPC> </form> ''' if request.method == 'POST': # check if the post request has the file part if 'file' not in request.files: flash('No file part') return redirect(request.url) file = request.files['file'] # if user does not select file, browser also # submit an empty part without filename if file.filename == '': flash('No selected file') return redirect(request.url) if file and allowed_file(file.filename): input_filename = secure_filename(file.filename) full_input_filename = os.path.join(APP_UPLOAD_FOLDER, input_filename) file.save(full_input_filename) basename, extension = os.path.splitext(input_filename) preview_filename = basename + ".png" full_preview_filename = os.path.join(APP_PREVIEW_FOLDER, preview_filename) output_filename = basename + ".spc" full_output_filename = os.path.join(APP_OUTPUT_FOLDER, output_filename) convert_to_spc(full_input_filename, full_preview_filename, full_output_filename) html += ''' <p>Click the image to download your SPC file.</p> <a href="/output/%s"><img src="/preview/%s"/></a> ''' % (output_filename, preview_filename) return html
41.72449
108
0.705062
563
4,089
4.989343
0.369449
0.019224
0.02136
0.027768
0.128159
0.045212
0
0
0
0
0
0.004877
0.197603
4,089
97
109
42.154639
0.851265
0.343849
0
0.035088
0
0.017544
0.243019
0.045283
0
0
0
0
0
1
0.070175
false
0
0.070175
0.052632
0.245614
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eb999df27e58e23913b51b8bb91c7eb0ee53cf08
1,201
py
Python
pytherface/yamlFileReader.py
aseiger/pytherface-configurator
704703cee8dd31f28fd73552c2b40c4b4d5faa5b
[ "MIT" ]
null
null
null
pytherface/yamlFileReader.py
aseiger/pytherface-configurator
704703cee8dd31f28fd73552c2b40c4b4d5faa5b
[ "MIT" ]
null
null
null
pytherface/yamlFileReader.py
aseiger/pytherface-configurator
704703cee8dd31f28fd73552c2b40c4b4d5faa5b
[ "MIT" ]
null
null
null
#reads in the protocol requirements and stores the information in a class import yaml import logging logger = logging.getLogger(__name__) def loadYamlFile(filename): #open up the filename logger.debug("Opening file {}".format(filename)) try: fObject = open(filename, 'r') except FileNotFoundError: logger.error("Config File {} not Found!".format(filename)) return [] else: data = yaml.load(fObject.read()) fObject.close() return data def parseYamlConfig(data): # we already have all of the information we need stored in the data from # the YAML file. However, it's worthwhile to also generate a list of all # incoming and outgoing variables. This allows checking for duplicates. incomingVariables = [] outgoingVariables = [] # go through each message for msg, metadata in data.items(): for k, v in metadata['variables'].items(): if metadata['type'] == 'incoming': incomingVariables.append({k: v}) elif metadata['type'] == 'outgoing': outgoingVariables.append(k, v) logger.debug(incomingVariables)
31.605263
77
0.631973
137
1,201
5.510949
0.583942
0.007947
0.021192
0
0
0
0
0
0
0
0
0
0.273106
1,201
37
78
32.459459
0.864834
0.273106
0
0
0
0
0.089157
0
0
0
0
0
0
1
0.083333
false
0
0.083333
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eb99f86622cecc9f033b1bea66381c9d93c350c4
9,225
py
Python
pysnmp-with-texts/CADANT-CMTS-NOTIFICATION-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
8
2019-05-09T17:04:00.000Z
2021-06-09T06:50:51.000Z
pysnmp-with-texts/CADANT-CMTS-NOTIFICATION-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
4
2019-05-31T16:42:59.000Z
2020-01-31T21:57:17.000Z
pysnmp-with-texts/CADANT-CMTS-NOTIFICATION-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module CADANT-CMTS-NOTIFICATION-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CADANT-CMTS-NOTIFICATION-MIB # Produced by pysmi-0.3.4 at Wed May 1 11:45:14 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint") trapCounter, trapSeverity = mibBuilder.importSymbols("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter", "trapSeverity") cadNotification, = mibBuilder.importSymbols("CADANT-PRODUCTS-MIB", "cadNotification") ifOperStatus, ifAdminStatus, ifDescr, ifIndex = mibBuilder.importSymbols("IF-MIB", "ifOperStatus", "ifAdminStatus", "ifDescr", "ifIndex") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") ModuleIdentity, iso, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, NotificationType, Gauge32, MibIdentifier, Integer32, Unsigned32, IpAddress, Counter64, Counter32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "iso", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "NotificationType", "Gauge32", "MibIdentifier", "Integer32", "Unsigned32", "IpAddress", "Counter64", "Counter32", "TimeTicks") TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString") cadNotificationMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1)) cadNotificationMib.setRevisions(('2015-09-14 00:00', '2006-05-03 00:00', '2005-09-28 00:00', '2003-03-26 00:00', '2002-07-24 00:00',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: cadNotificationMib.setRevisionsDescriptions(('Add cadLinkUp.', 'Add cadIpdrNoPrimaryCollector, cadIpdrStreamingDisabled and cadIpdrReportCycleMissed.', 'Add RIP2 authentication failure.', 'Renamed RADIUS traps so that they can also be used by TACACS+.', 'Inital version. Add AAA/Security related traps.',)) if mibBuilder.loadTexts: cadNotificationMib.setLastUpdated('201509140000Z') if mibBuilder.loadTexts: cadNotificationMib.setOrganization('Cadant Inc') if mibBuilder.loadTexts: cadNotificationMib.setContactInfo('Cadant Technical Support ') if mibBuilder.loadTexts: cadNotificationMib.setDescription('This MIB defines object which are NOTIFICATION-TYPE and used to define the SNMP Traps generated from C4 CMTS.') cadTrapMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1)) cadTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0)) cadTrapsInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 1)) securityInfo = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 1, 1), DisplayString()).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: securityInfo.setStatus('current') if mibBuilder.loadTexts: securityInfo.setDescription('The detail security failure information') ipdrInfo = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 1, 2), DisplayString()).setMaxAccess("accessiblefornotify") if mibBuilder.loadTexts: ipdrInfo.setStatus('current') if mibBuilder.loadTexts: ipdrInfo.setDescription('The detail IPDR failure information') aaaServerUnreachableTrap = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 1)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "securityInfo")) if mibBuilder.loadTexts: aaaServerUnreachableTrap.setStatus('current') if mibBuilder.loadTexts: aaaServerUnreachableTrap.setDescription('An event to report that the AAA (RADIUS or TACACS+) server is not responding') aaaServerGroupUnreachableTrap = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 2)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "securityInfo")) if mibBuilder.loadTexts: aaaServerGroupUnreachableTrap.setStatus('current') if mibBuilder.loadTexts: aaaServerGroupUnreachableTrap.setDescription('An event to report that all the hosts in the AAA (RADIUS or TACACS+) server group are not responding') aaaServerAuthFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 3)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "securityInfo")) if mibBuilder.loadTexts: aaaServerAuthFailTrap.setStatus('current') if mibBuilder.loadTexts: aaaServerAuthFailTrap.setDescription('An event to report that AAA (RADIUS or TACACS+) server rejected the authentication request') secuLocalAuthFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 4)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "securityInfo")) if mibBuilder.loadTexts: secuLocalAuthFailTrap.setStatus('current') if mibBuilder.loadTexts: secuLocalAuthFailTrap.setDescription('An event to report that local password authentication failed') secuLineAuthFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 5)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "securityInfo")) if mibBuilder.loadTexts: secuLineAuthFailTrap.setStatus('current') if mibBuilder.loadTexts: secuLineAuthFailTrap.setDescription('An event to report that line password authentication failed') rip2AuthFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 6)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "securityInfo")) if mibBuilder.loadTexts: rip2AuthFailTrap.setStatus('current') if mibBuilder.loadTexts: rip2AuthFailTrap.setDescription('An event to report rip2 authentication failed.') cadIpdrNoPrimaryCollector = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 7)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "ipdrInfo")) if mibBuilder.loadTexts: cadIpdrNoPrimaryCollector.setStatus('current') if mibBuilder.loadTexts: cadIpdrNoPrimaryCollector.setDescription('An event to report IPDR Streaming is enabled but there is no primary collector connected.') cadIpdrStreamingDisabled = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 8)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "ipdrInfo")) if mibBuilder.loadTexts: cadIpdrStreamingDisabled.setStatus('current') if mibBuilder.loadTexts: cadIpdrStreamingDisabled.setDescription('An event to report IPDR Streaming function has been turned off.') cadIpdrReportCycleMissed = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 9)).setObjects(("CADANT-CMTS-EQUIPMENT-MIB", "trapCounter"), ("CADANT-CMTS-EQUIPMENT-MIB", "trapSeverity"), ("CADANT-CMTS-NOTIFICATION-MIB", "ipdrInfo")) if mibBuilder.loadTexts: cadIpdrReportCycleMissed.setStatus('current') if mibBuilder.loadTexts: cadIpdrReportCycleMissed.setDescription('An event to report CMTS was not able to start the scheduled IPDR report cycle session on time. This may be due to system or network load or the lack of primary collector connection.') cadLinkUp = NotificationType((1, 3, 6, 1, 4, 1, 4998, 1, 1, 6, 1, 1, 0, 10)).setObjects(("IF-MIB", "ifIndex"), ("IF-MIB", "ifAdminStatus"), ("IF-MIB", "ifOperStatus"), ("IF-MIB", "ifDescr")) if mibBuilder.loadTexts: cadLinkUp.setStatus('current') if mibBuilder.loadTexts: cadLinkUp.setDescription('This is to expand standard linkUp trap to include ifDescr. A linkUp trap signifies that the SNMP entity, acting in an agent role, has detected that the ifOperStatus object for one of its communication links left the down state and transitioned into some other state (but not into the notPresent state). This other state is indicated by the included value of ifOperStatus.') mibBuilder.exportSymbols("CADANT-CMTS-NOTIFICATION-MIB", cadTrapsInfo=cadTrapsInfo, aaaServerGroupUnreachableTrap=aaaServerGroupUnreachableTrap, aaaServerAuthFailTrap=aaaServerAuthFailTrap, cadIpdrNoPrimaryCollector=cadIpdrNoPrimaryCollector, secuLineAuthFailTrap=secuLineAuthFailTrap, aaaServerUnreachableTrap=aaaServerUnreachableTrap, PYSNMP_MODULE_ID=cadNotificationMib, cadLinkUp=cadLinkUp, securityInfo=securityInfo, cadTrapMibObjects=cadTrapMibObjects, cadNotificationMib=cadNotificationMib, cadTraps=cadTraps, cadIpdrStreamingDisabled=cadIpdrStreamingDisabled, secuLocalAuthFailTrap=secuLocalAuthFailTrap, rip2AuthFailTrap=rip2AuthFailTrap, ipdrInfo=ipdrInfo, cadIpdrReportCycleMissed=cadIpdrReportCycleMissed)
139.772727
717
0.776802
1,086
9,225
6.596685
0.239411
0.009771
0.085008
0.058347
0.446119
0.340592
0.292155
0.28043
0.28043
0.28043
0
0.049917
0.087913
9,225
65
718
141.923077
0.801521
0.03794
0
0
0
0.070175
0.392285
0.098579
0
0
0
0
0
1
0
false
0.035088
0.157895
0
0.157895
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
eb9b09f7ae8cd2422a36c30cbe319e6b1bdb760a
179
py
Python
Exercicios em Python/ex051.py
Raphael-Azevedo/Exercicios_Python
dece138f38edd02b0731aed78e44acccb021b3cb
[ "MIT" ]
null
null
null
Exercicios em Python/ex051.py
Raphael-Azevedo/Exercicios_Python
dece138f38edd02b0731aed78e44acccb021b3cb
[ "MIT" ]
null
null
null
Exercicios em Python/ex051.py
Raphael-Azevedo/Exercicios_Python
dece138f38edd02b0731aed78e44acccb021b3cb
[ "MIT" ]
null
null
null
n1 = int(input('Digite o primeiro termo da PA: ')) n2 = int(input('Digite a razão da PA: ')) for c in range(1, 11): o = n1 + (c - 1)*n2 print(o, end=' - ') print('ACABOU')
29.833333
50
0.564246
33
179
3.060606
0.636364
0.158416
0.277228
0
0
0
0
0
0
0
0
0.057971
0.22905
179
6
51
29.833333
0.673913
0
0
0
0
0
0.344444
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
eb9c34327c8ded8bca2d41bbc276bd049672bbd7
1,729
py
Python
bad.py
Spencer-Clay/Spencer-Clay-ketangpai_cracker-
dd769a853a6fd5e4cd0c7a1b891c46a50360cd27
[ "MIT" ]
null
null
null
bad.py
Spencer-Clay/Spencer-Clay-ketangpai_cracker-
dd769a853a6fd5e4cd0c7a1b891c46a50360cd27
[ "MIT" ]
null
null
null
bad.py
Spencer-Clay/Spencer-Clay-ketangpai_cracker-
dd769a853a6fd5e4cd0c7a1b891c46a50360cd27
[ "MIT" ]
null
null
null
import requests import sys import os import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) requests.adapters.DEFAULT_RETRIES = 10 def exploit_post_code(data, token): main_url = 'https://openapiv51.ketangpai.com/AttenceApi/checkin' headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/65.0.3325.181 Safari/537.36", "Token": "%s" % token} re = requests.post(url=main_url, data=data, headers=headers) re.keep_alive = False if r"\u7b7e\u5230\u6210\u529f" in re.text: msg = "签到成功,正确代码为%s" % str(data["code"]) print(msg) return False ''' cmd = "kill -9 " + str(os.getpid()) os.system(cmd) sys.exit(1) ''' def exploit_code(data, payload, token): dict_data = eval(data) dict_data["code"] = payload msg = '正在尝试%s' % str(payload) sys.stdout.write(str(msg) + '\r') try: exploit_post_code(dict_data, token) except Exception as e: exploit_code(data, payload, token) def geo_exploit(data, token): main_url = 'https://openapiv51.ketangpai.com/AttenceApi/checkin' headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/65.0.3325.181 Safari/537.36", "Token": "%s" % token} re = requests.post(url=main_url, data=data, headers=headers) re.keep_alive = False if r"\u7b7e\u5230\u6210\u529f" in re.text: msg = "签到成功" print(msg) ''' cmd = "kill -9 " + str(os.getpid()) os.system(cmd) sys.exit(1) '''
30.875
111
0.604974
234
1,729
4.376068
0.376068
0.027344
0.029297
0.03125
0.669922
0.617188
0.617188
0.617188
0.617188
0.617188
0
0.071981
0.252747
1,729
55
112
31.436364
0.720588
0
0
0.473684
0
0.052632
0.300328
0.031475
0
0
0
0
0
1
0.078947
false
0
0.105263
0
0.210526
0.052632
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
eb9c7f642d7fd9e4d6c05d5178d9f6237379f4fa
2,087
py
Python
docker/src/app_server/sse.py
ShenTengTu/leak_monitoring_app
dba3bc6aebdc4fe104508262065e426844a1ce52
[ "MIT" ]
null
null
null
docker/src/app_server/sse.py
ShenTengTu/leak_monitoring_app
dba3bc6aebdc4fe104508262065e426844a1ce52
[ "MIT" ]
null
null
null
docker/src/app_server/sse.py
ShenTengTu/leak_monitoring_app
dba3bc6aebdc4fe104508262065e426844a1ce52
[ "MIT" ]
null
null
null
import logging import io from asyncio import Queue from sse_starlette.sse import ( EventSourceResponse as _EventSourceResponse, AppStatus, ServerSentEvent, ) from .endec import Encode logger = logging.getLogger("app_server") class EventSourceResponse(_EventSourceResponse): """Override original `EventSourceResponse`. If data is `None`, send comment to keep connections. """ @staticmethod def comment_encode(content: str = "", sep: str = None) -> bytes: buffer = io.StringIO() buffer.write(f": {content}") buffer.write(sep if sep is not None else "\r\n") return buffer.getvalue().encode("utf-8") async def stream_response(self, send) -> None: await send( { "type": "http.response.start", "status": self.status_code, "headers": self.raw_headers, } ) self._ping_task = self._loop.create_task(self._ping(send)) # type: ignore async for data in self.body_iterator: if AppStatus.should_exit: logger.debug(f"Caught signal. Stopping stream_response loop.") break if isinstance(data, dict): chunk = ServerSentEvent(**data).encode() elif data is None: chunk = self.comment_encode("NONE", sep=self.sep) else: chunk = ServerSentEvent(str(data), sep=self.sep).encode() logger.debug(f"[EventSourceResponse] chunk: {chunk.decode()}") await send({"type": "http.response.body", "body": chunk, "more_body": True}) await send({"type": "http.response.body", "body": b"", "more_body": False}) class SSEManager: __queue = Queue() @classmethod def push_event(cls, event: str, data: dict): cls.__queue.put_nowait(dict(event=event, data=Encode.json(data))) @classmethod async def next_event(cls): q = cls.__queue if q.empty(): return None item = await q.get() q.task_done() return item
30.691176
88
0.595592
235
2,087
5.165957
0.421277
0.026359
0.032125
0.04201
0.074959
0.054366
0.054366
0
0
0
0
0.000671
0.285577
2,087
67
89
31.149254
0.813548
0.051749
0
0.038462
0
0
0.117108
0.010692
0
0
0
0
0
1
0.038462
false
0
0.096154
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eb9cf0127ef5ed427d9efbbfab74a2a9d0c4ae7e
2,088
py
Python
python/constants.py
ptracton/wb_platform
7e8a3c0fc9486cb0ee29d0d44707ee7dc21a6824
[ "MIT" ]
null
null
null
python/constants.py
ptracton/wb_platform
7e8a3c0fc9486cb0ee29d0d44707ee7dc21a6824
[ "MIT" ]
null
null
null
python/constants.py
ptracton/wb_platform
7e8a3c0fc9486cb0ee29d0d44707ee7dc21a6824
[ "MIT" ]
null
null
null
#! /usr/bin/env python3 """ Packet Commands """ COMMAND_CPU_WRITE = 1 COMMAND_CPU_READ = 2 COMMAND_DAQ_WRITE = 3 COMMAND_DAQ_READ = 4 """ RAM ADDRESSES """ RAM0_BASE_ADDRESS = 0x90000000 RAM1_BASE_ADDRESS = 0x90002000 RAM2_BASE_ADDRESS = 0x90004000 RAM3_BASE_ADDRESS = 0x90006000 """ GPIO """ GPIO_BASE_ADDRESS = 0x40000000 GPIO_R_IN = GPIO_BASE_ADDRESS + 0 GPIO_R_OUT = GPIO_BASE_ADDRESS + 4 GPIO_R_OE = GPIO_BASE_ADDRESS + 8 GPIO_R_INTE = GPIO_BASE_ADDRESS + 12 GPIO_R_PTRIG = GPIO_BASE_ADDRESS + 16 GPIO_R_AUX = GPIO_BASE_ADDRESS + 20 GPIO_R_CTRL = GPIO_BASE_ADDRESS + 24 GPIO_F_CTRL_INTE = 0 GPIO_B_CTRL_INTE = (1 << GPIO_F_CTRL_INTE) GPIO_F_CTRL_INTS = 1 GPIO_B_CTRL_INTS = (1 << GPIO_F_CTRL_INTS) GPIO_R_INTS = GPIO_BASE_ADDRESS + 28 GPIO_R_ECLK = GPIO_BASE_ADDRESS + 32 GPIO_R_NEC = GPIO_BASE_ADDRESS + 32 """ SYSCON """ SYSCON_BASE_ADDRESS = 0x40001000 SYSCON_R_IDENTIFICATION = SYSCON_BASE_ADDRESS + 0 SYSCON_R_STATUS = SYSCON_BASE_ADDRESS + 4 F_SYSCON_STATUS_LOCKED = 0 B_SYSCON_STATUS_LOCKED = (1 << F_SYSCON_STATUS_LOCKED) SYSCON_R_CONTROL = SYSCON_BASE_ADDRESS + 8 """ DSP SLAVE """ WB_DSP_SLAVE_BASE_ADDRESS = (0x70000000) WB_DSP_SLAVE_INPUT0 = WB_DSP_SLAVE_BASE_ADDRESS + 0 B_DSP_EQUATION_NONE = 0x00 B_DSP_EQUATION_SUM = 0x01 B_DSP_EQUATION_MULTIPLY = 0x02 B_DSP_EQUATION_DTREE = 0x03 B_DSP_SLAVE_DATA_SIGNED = (1 << 10) B_DSP_SLAVE_ENABLE_MAC = (1 << 11) B_DSP_SLAVE_SCALAR_MULTIPLY = (1 << 12) B_DSP_SLAVE_EQUATION_START = (1 << 31) WB_DSP_SLAVE_INPUT1 = WB_DSP_SLAVE_BASE_ADDRESS + 4 WB_DSP_SLAVE_INPUT2 = WB_DSP_SLAVE_BASE_ADDRESS + 8 WB_DSP_SLAVE_INPUT3 = WB_DSP_SLAVE_BASE_ADDRESS + 0x0C WB_DSP_SLAVE_INPUT4 = WB_DSP_SLAVE_BASE_ADDRESS + 0x10 WB_DSP_SLAVE_OUTPUT0 = WB_DSP_SLAVE_BASE_ADDRESS + 0x14 WB_DSP_SLAVE_OUTPUT1 = WB_DSP_SLAVE_BASE_ADDRESS + 0x18 WB_DSP_SLAVE_OUTPUT2 = WB_DSP_SLAVE_BASE_ADDRESS + 0x1C WB_DSP_SLAVE_OUTPUT3 = WB_DSP_SLAVE_BASE_ADDRESS + 0x20 WB_DSP_SLAVE_OUTPUT4 = WB_DSP_SLAVE_BASE_ADDRESS + 0x24 B_CONTROL_DATA_SIZE_WORD = 0x00 B_CONTROL_DATA_SIZE_HWORD = 0x01 B_CONTROL_DATA_SIZE_BYTE = 0x02 B_CONTROL_DATA_SIZE_UNDEFINED = 0x03
26.769231
55
0.815134
363
2,088
4.090909
0.275482
0.222222
0.141414
0.103704
0.155556
0
0
0
0
0
0
0.090119
0.117816
2,088
77
56
27.116883
0.716069
0.018199
0
0
0
0
0
0
0
0
0.065723
0
0
1
0
false
0
0
0
0
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
eb9dfa5d0a36f0ca4db0ee5aca27a1643f342549
257
py
Python
matdat/__init__.py
Fumipo-Theta/matdat
cb55acd346acef5f192d71c835b8fbe5c957cc55
[ "BSD-2-Clause" ]
null
null
null
matdat/__init__.py
Fumipo-Theta/matdat
cb55acd346acef5f192d71c835b8fbe5c957cc55
[ "BSD-2-Clause" ]
null
null
null
matdat/__init__.py
Fumipo-Theta/matdat
cb55acd346acef5f192d71c835b8fbe5c957cc55
[ "BSD-2-Clause" ]
null
null
null
from .figure import Figure from .subplot import Subplot from .subplot_time import SubplotTime from .csv_reader import CsvReader, matchCsv from .excel_reader import ExcelReader from .get_path import getFileList, PathList from .save_plot import actionSavePNG
32.125
43
0.848249
35
257
6.085714
0.542857
0.103286
0
0
0
0
0
0
0
0
0
0
0.116732
257
7
44
36.714286
0.938326
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
4
eb9fd0269907f86d115d62e0ea0520c852272710
249
py
Python
source/tools/filetool.py
chopin1993/protocolmaster-20210731
e23e235ee00b940a4161c606415574d2a52c701c
[ "Apache-2.0" ]
null
null
null
source/tools/filetool.py
chopin1993/protocolmaster-20210731
e23e235ee00b940a4161c606415574d2a52c701c
[ "Apache-2.0" ]
null
null
null
source/tools/filetool.py
chopin1993/protocolmaster-20210731
e23e235ee00b940a4161c606415574d2a52c701c
[ "Apache-2.0" ]
null
null
null
import os def get_file_list(root, key=None): files = os.listdir(root) if key is not None: files.sort(key=key) return files def get_config_file(name): return os.path.join(os.path.dirname(__file__), ".." , "resource", name)
20.75
75
0.658635
39
249
4
0.564103
0.076923
0
0
0
0
0
0
0
0
0
0
0.204819
249
12
75
20.75
0.787879
0
0
0
0
0
0.04
0
0
0
0
0
0
1
0.25
false
0
0.125
0.125
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
eba03f73105a14dca36c64eedfaaba318422e180
648
py
Python
crud/migrations/migration_motif.py
MedAli-FSTG/AdminDjangoCrud
b7de87f2fbfe0a1b990113792deb3157aed288b8
[ "Apache-2.0" ]
null
null
null
crud/migrations/migration_motif.py
MedAli-FSTG/AdminDjangoCrud
b7de87f2fbfe0a1b990113792deb3157aed288b8
[ "Apache-2.0" ]
null
null
null
crud/migrations/migration_motif.py
MedAli-FSTG/AdminDjangoCrud
b7de87f2fbfe0a1b990113792deb3157aed288b8
[ "Apache-2.0" ]
null
null
null
# Generated by Django 2.2.6 on 2019-10-22 08:11 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='motifs', fields=[ ('id', models.BigAutoField(primary_key=True, serialize=False)), ('nom', models.CharField(max_length=40)), ('description', models.TextField()), ('motif_Img', models.ImageField(upload_to='images/')), ], options={ 'db_table': 'motifs', }, ), ]
24
79
0.520062
59
648
5.627119
0.79661
0
0
0
0
0
0
0
0
0
0
0.040284
0.348765
648
26
80
24.923077
0.746446
0.069444
0
0
1
0
0.086522
0
0
0
0
0
0
1
0
false
0
0.052632
0
0.263158
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
eba3ba5c97bda5e5f7c847bfd13a55f5e2d84a33
280
py
Python
github-json_to_xml/conv.py
alvarenga/github-json_to_xml
6ac210bea8badbed18f9e65127cb19e386e85d24
[ "MIT" ]
null
null
null
github-json_to_xml/conv.py
alvarenga/github-json_to_xml
6ac210bea8badbed18f9e65127cb19e386e85d24
[ "MIT" ]
null
null
null
github-json_to_xml/conv.py
alvarenga/github-json_to_xml
6ac210bea8badbed18f9e65127cb19e386e85d24
[ "MIT" ]
null
null
null
def conv(user): import requests import json import xmltodict url = 'https://api.github.com/users/' + user s = requests.get(url) # Converter json para dict x = {} x['wg'] = json.loads(s.text) y = xmltodict.unparse(x, pretty=True) return y
20
48
0.6
39
280
4.307692
0.692308
0
0
0
0
0
0
0
0
0
0
0
0.267857
280
13
49
21.538462
0.819512
0.085714
0
0
0
0
0.122047
0
0
0
0
0
0
1
0.1
false
0
0.3
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eba40c7836d02b53113b113f1c42b28c5c259a93
404
py
Python
twitoff/__init__.py
jeffyjkang/DS-Unit-3-Sprint-3-Productization-and-Cloud
d12952dd625c8d282db1946dd50c7f478e90dd7a
[ "MIT" ]
null
null
null
twitoff/__init__.py
jeffyjkang/DS-Unit-3-Sprint-3-Productization-and-Cloud
d12952dd625c8d282db1946dd50c7f478e90dd7a
[ "MIT" ]
null
null
null
twitoff/__init__.py
jeffyjkang/DS-Unit-3-Sprint-3-Productization-and-Cloud
d12952dd625c8d282db1946dd50c7f478e90dd7a
[ "MIT" ]
null
null
null
from .app import create_app # APP = create_app() # python commands: # in app dir #FLASKAPP=twitoff flask run # in root dir # FLASK_APP=twitoff flask shell ''' Notes for setup: in root, FLASK_APP=twitoff flask shell import create_app init create_app() import DB DB.create_all() creates tables ''' ''' Other commands user1 = User.query.filter(User.name == 'nasa') user1 = user1.one() user1.tweets '''
14.428571
46
0.725248
62
404
4.612903
0.5
0.125874
0.104895
0.13986
0.174825
0
0
0
0
0
0
0.01173
0.155941
404
28
47
14.428571
0.826979
0.282178
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
eba6b8526a7352b129d62e67953fbea4c4064fb0
1,945
py
Python
tellapart/aurproxy/config/route.py
thinker0/aurproxy
7387bb3ac7decd9d0034f9ca6b4dfea4384ce59d
[ "Apache-2.0" ]
1
2020-02-25T04:09:48.000Z
2020-02-25T04:09:48.000Z
tellapart/aurproxy/config/route.py
aurora-scheduler/aurproxy
73a1e7086cc4dd171456f50724246a9261febaf8
[ "Apache-2.0" ]
null
null
null
tellapart/aurproxy/config/route.py
aurora-scheduler/aurproxy
73a1e7086cc4dd171456f50724246a9261febaf8
[ "Apache-2.0" ]
2
2020-09-12T13:06:45.000Z
2021-01-19T09:42:58.000Z
# Copyright 2015 TellApart, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class ProxyRoute(object): def __init__(self, locations, empty_endpoint_status_code, source_group_manager, use_https=False, route_path='', context={}): self._locations = locations self._empty_endpoint_status_code = empty_endpoint_status_code self._source_group_manager = source_group_manager self._context = context self._locations = locations self._empty_endpoint_status_code = empty_endpoint_status_code self._source_group_manager = source_group_manager self._protocol = 'https://' if use_https else 'http://' self._route_path = route_path @property def blueprints(self): return self._source_group_manager.blueprints @property def locations(self): return self._locations @property def endpoints(self): return self._source_group_manager.endpoints @property def empty_endpoint_status_code(self): return self._empty_endpoint_status_code @property def slug(self): return self._source_group_manager.slug @property def context(self): return self._context @property def protocol(self): return self._protocol @property def route_path(self): return self._route_path def start(self, weight_adjustment_start): self._source_group_manager.start(weight_adjustment_start)
29.029851
74
0.728021
251
1,945
5.354582
0.370518
0.073661
0.120536
0.119792
0.294643
0.254464
0.183036
0.183036
0.183036
0.183036
0
0.005151
0.201542
1,945
66
75
29.469697
0.86027
0.283805
0
0.325581
0
0
0.010877
0
0
0
0
0
0
1
0.232558
false
0
0
0.186047
0.44186
0.046512
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
eba7e9b9d2ce20a664ab35ed0b3544b8abc90d3f
4,394
py
Python
rail/creation/creator.py
LSSTDESC/RAIL
77707a708068a6818d5d815fb6b952ecc06d511b
[ "MIT" ]
7
2020-09-21T13:02:23.000Z
2022-03-23T19:26:41.000Z
rail/creation/creator.py
LSSTDESC/RAIL
77707a708068a6818d5d815fb6b952ecc06d511b
[ "MIT" ]
116
2019-11-21T17:20:52.000Z
2022-03-30T11:21:54.000Z
rail/creation/creator.py
LSSTDESC/RAIL
77707a708068a6818d5d815fb6b952ecc06d511b
[ "MIT" ]
6
2020-01-24T17:14:43.000Z
2022-03-30T11:27:20.000Z
import numpy as np import pandas as pd from rail.creation.engines import Engine from typing import Callable class Creator: """Object that supplies mock data for redshift estimation experiments. The mock data is drawn from a probability distribution defined by the generator, with an optional degrader applied. """ def __init__(self, engine: Engine, degrader: Callable = None, info: dict = None): """ Parameters ---------- engine: rail.Engine object Object defining a redshift probability distribution. Must have sample, log_prob and get_posterior methods (see engine.py) degrader: callable, optional A Degrader, function, or other callable that degrades the generated sample. Must take a pandas DataFrame and a seed int, and return a pandas DataFrame representing the degraded sample. info: any, optional Additional information desired to be stored with the instance as a dictionary. """ self.engine = engine self.degrader = degrader self.info = info def get_posterior(self, data: pd.DataFrame, column: str, grid: np.ndarray): """Calculate the posterior of the given column over the values in grid. Parameters ---------- data : pd.DataFrame Pandas dataframe of the data on which the posteriors are conditioned. column : str Name of the column for which the posterior is calculated. grid : np.ndarray Grid over which the posterior is calculated. Returns ------- np.ndarray Array of posteriors, of shape (data.shape[0], grid.size). """ return self.engine.get_posterior(data, column, grid) def sample( self, n_samples: int, seed: int = None, include_pdf: bool = False, pz_grid: np.ndarray = None, ): """Draws n_samples from the engine Parameters ---------- n_samples : int Number of samples to draw seed : int, optional sets the random seed for drawing samples include_pdf : boolean, optional If True, redshift posteriors are returned for each galaxy. The posteriors are saved in the column pz_pdf, and the redshift grid saved as df.attrs['pz_grid']. pz_grid : np.array, default=np.arange(0, 2.02, 0.02) The grid over which to calculate the redshift posteriors. Returns ------- outputs : pd.DataFrame samples from model, containing photometry, true redshift, and redshift posterior PDF's if requested. Notes ----- Output posterior format is currently hardcoded to grid evaluations but could be integrated with qp. We will probably change the output format to dovetail with the evaluation module when ready. """ if include_pdf is True and pz_grid is None: pz_grid = np.arange(0, 2.02, 0.02) rng = np.random.default_rng(seed) # get samples outputs = self.engine.sample(n_samples, seed=seed) if self.degrader is not None: # degrade sample outputs = self.degrader(outputs, seed=seed) # calculate fraction that survives the cut selected_frac = len(outputs) / n_samples # draw more samples and degrade until we have enough samples while len(outputs) < n_samples: # estimate how many extras to draw n_supplement = int(1.1 / selected_frac * (n_samples - len(outputs))) # draw new samples and apply cut new_sample = self.engine.sample(n_supplement, seed=rng.integers(1e18)) new_sample = self.degrader(new_sample, seed=rng.integers(1e18)) # add these to the larger set outputs = pd.concat((outputs, new_sample), ignore_index=True) # cut out the extras outputs = outputs[:n_samples] # calculate posteriors if include_pdf: posteriors = self.get_posterior(outputs, column="redshift", grid=pz_grid) outputs.attrs["pz_grid"] = pz_grid outputs["pz_pdf"] = list(posteriors) return outputs
37.237288
107
0.60924
535
4,394
4.927103
0.342056
0.024279
0.014795
0.014416
0.046282
0.011381
0.011381
0
0
0
0
0.007692
0.319527
4,394
117
108
37.555556
0.873913
0.503641
0
0
0
0
0.011993
0
0
0
0
0
0
1
0.083333
false
0
0.111111
0
0.277778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
eba86bf586aecd5b8c7e0131858b6b347ef52969
834
py
Python
contrib/nchain/devops/pipe-unittests.py
Trackerming/bitcoin-sv
fb50a64e3ea0334a86b2c80daf5147c5bc2693c4
[ "MIT" ]
8
2019-08-02T02:49:42.000Z
2022-01-17T15:51:48.000Z
contrib/nchain/devops/pipe-unittests.py
Trackerming/bitcoin-sv
fb50a64e3ea0334a86b2c80daf5147c5bc2693c4
[ "MIT" ]
null
null
null
contrib/nchain/devops/pipe-unittests.py
Trackerming/bitcoin-sv
fb50a64e3ea0334a86b2c80daf5147c5bc2693c4
[ "MIT" ]
4
2019-08-02T02:50:44.000Z
2021-05-28T03:21:38.000Z
#!/usr/bin/python3 # Perform the unit tests on SV import subprocess import os import pathlib import traceback import pipetestutils def main(): r1 = -1 try: pathlib.Path("build/reports").mkdir(parents=True, exist_ok=True) os.chdir("src/test") except Exception as e: print("Problem changing directory") print("type error: " + str(e)) print(traceback.format_exc()) exit(-1) try: args = ["./test_bitcoin", "--log_format=JUNIT" \ , "--log_sink=../../build/reports/unittests.xml"] r1 = subprocess.call(args) except Exception as e: print("Problem running tests") print("type error: " + str(e)) print(traceback.format_exc()) exit(-2) exit(abs(r1)) if __name__ == '__main__': main()
23.828571
72
0.581535
100
834
4.71
0.58
0.050955
0.072187
0.076433
0.318471
0.318471
0.191083
0.191083
0.191083
0.191083
0
0.011628
0.278177
834
34
73
24.529412
0.770764
0.055156
0
0.296296
0
0
0.223919
0.05598
0
0
0
0
0
1
0.037037
false
0
0.185185
0
0.222222
0.222222
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebaa426765a7f0d350ab28d87557159798371f08
2,439
py
Python
tx_salaries/utils/transformers/ut_medical_branch.py
texastribune/tx_salaries
197d8da4e1783216830b8d0a5adb23c0200fd3e8
[ "Apache-2.0" ]
6
2016-05-18T05:53:44.000Z
2019-06-13T18:27:50.000Z
tx_salaries/utils/transformers/ut_medical_branch.py
texastribune/tx_salaries
197d8da4e1783216830b8d0a5adb23c0200fd3e8
[ "Apache-2.0" ]
64
2015-02-13T18:29:04.000Z
2018-06-15T19:48:56.000Z
tx_salaries/utils/transformers/ut_medical_branch.py
texastribune/tx_salaries
197d8da4e1783216830b8d0a5adb23c0200fd3e8
[ "Apache-2.0" ]
2
2015-05-08T19:22:12.000Z
2016-07-11T16:57:49.000Z
from . import base from . import mixins from datetime import date class TransformedRecord( mixins.GenericCompensationMixin, mixins.GenericDepartmentMixin, mixins.GenericIdentifierMixin, mixins.GenericJobTitleMixin, mixins.GenericPersonMixin, mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin, mixins.RaceMixin, mixins.LinkMixin, base.BaseTransformedRecord): MAP = { 'last_name': 'FAMILY_NAME', 'first_name': 'GIVEN_NAME', 'department': 'DEPTID_DESCR', 'job_title': 'JOBTITLE', 'gender': 'GENDER', 'race': 'ETHNIC_GROUP_DESCR', 'hire_date': 'LAST_HIRE_DT', 'compensation': 'ANNUAL_PAY', 'longevity': 'ANNUALIZED_LONGEVITY', 'employee_type': 'FULL_PART_TIME', } NAME_FIELDS = ('first_name', 'last_name', ) gender_map = {'Female': 'F', 'Male': 'M'} ORGANIZATION_NAME = 'The University of Texas Medical Branch at Galveston' ORGANIZATION_CLASSIFICATION = 'University Hospital' DATE_PROVIDED = date(2019, 7, 30) URL = ('https://s3.amazonaws.com/raw.texastribune.org/ut_medical_branch/' 'salaries/2019/Response.xlsx') @property def compensation_type(self): if self.employee_type == 'Part-time': return 'PT' else: return 'FT' @property def description(self): if self.employee_type == 'Part-time': return "Part-time annual compensation" else: return "Annual compensation" @property def compensation(self): #longevity is in addition to base annual_pay, add if applicable if self.get_mapped_value('longevity') == '0': return self.get_mapped_value('compensation') else: longevity = self.get_mapped_value('longevity') salary = self.get_mapped_value('compensation') return float(salary) + float(longevity) @property def is_valid(self): # Adjust to return False on invalid fields. For example: return self.last_name.strip() != '' @property def person(self): data = { 'family_name': self.last_name, 'given_name': self.first_name, 'name': self.get_raw_name(), 'gender': self.gender_map[self.gender.strip()] } return data transform = base.transform_factory(TransformedRecord)
29.385542
77
0.627306
252
2,439
5.884921
0.444444
0.037087
0.035064
0.04855
0.125421
0.04855
0.04855
0.04855
0
0
0
0.00723
0.262813
2,439
82
78
29.743902
0.817575
0.04838
0
0.163934
0
0
0.236309
0.011643
0
0
0
0
0
1
0.081967
false
0
0.04918
0.016393
0.393443
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebac27508f66c5ff713c821f2b877ebdbc6660c6
2,912
py
Python
accountingsubject/views.py
zwj12/accounting
96678104ac7815da57f8f8c1a38c3138bc5ed0b6
[ "Apache-2.0" ]
null
null
null
accountingsubject/views.py
zwj12/accounting
96678104ac7815da57f8f8c1a38c3138bc5ed0b6
[ "Apache-2.0" ]
null
null
null
accountingsubject/views.py
zwj12/accounting
96678104ac7815da57f8f8c1a38c3138bc5ed0b6
[ "Apache-2.0" ]
null
null
null
from django.db.models import Count, Sum from django.http import HttpResponseRedirect from django.shortcuts import render, get_object_or_404 # Create your views here. from django.views import generic from accountingsubject.forms import AccountingSubjectForm from accountingsubject.models import AccountingSubject from cash.models import CashOnHand class IndexView(generic.ListView): template_name = 'accountingsubject/index.html' context_object_name = 'accounting_subject_list' def get_queryset(self): return AccountingSubject.objects.annotate(Count('cashonhand'), Sum('cashonhand__lucre')) class DetailView(generic.DetailView): model = AccountingSubject context_object_name = 'accounting_subject' template_name = 'accountingsubject/detail.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['list_count'] = CashOnHand.objects.filter(opposite_account_id=self.object.id).count() context['list_sum'] = CashOnHand.objects.filter(opposite_account_id=self.object.id).count() return context # @login_required() def accounting_subject_edit(request, accounting_subject_id): # logger.error(request) # if request.user.is_authenticated: # # A backend authenticated the credentials # logger.debug(request.user.username + " is logged") # if request.user.has_perm('cash.add_cashonhand'): # logger.debug(request.user.username + " has permission: cash.add_cashonhand") # if request.user.has_perm('cash.add_accountingsubject'): # logger.debug(request.user.username + " has permission: cash.add_accountingsubject") # return HttpResponseRedirect('/admin') # else: # # No backend authenticated the credentials # return HttpResponseRedirect('/cash') accounting_subject = get_object_or_404(AccountingSubject, pk=accounting_subject_id) if request.method == 'POST': form = AccountingSubjectForm(request.POST) if form.is_valid(): accounting_subject.accounting_subject = form.cleaned_data['accounting_subject'] accounting_subject.debit_balance = form.cleaned_data['debit_balance'] accounting_subject.remark = form.cleaned_data['remark'] accounting_subject.save() # return render(request, 'cash/accountingsubjectedit.html', # {'form': form, 'accounting_subject': accounting_subject}) return HttpResponseRedirect('/cash') else: obj = {"accounting_subject":accounting_subject.accounting_subject, "debit_balance":accounting_subject.debit_balance, "remark": accounting_subject.remark, } form = AccountingSubjectForm(obj) return render(request, 'cash/accountingsubjectedit.html', {'form': form, 'accounting_subject': accounting_subject})
44.121212
119
0.714629
306
2,912
6.584967
0.29085
0.177171
0.080397
0.101241
0.316625
0.26799
0.222333
0.195534
0.195534
0.145906
0
0.002535
0.187157
2,912
66
119
44.121212
0.848754
0.258242
0
0
0
0
0.130252
0.051821
0
0
0
0
0
1
0.078947
false
0
0.184211
0.026316
0.552632
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
ebac6ca248c814898ee7e3737a6b5b9899d1e5a4
762
py
Python
utils/custombase.py
Zashel/utils
1c8b9e1ad7ceb1924a719bef588fcfe38dfd1f70
[ "Apache-2.0" ]
null
null
null
utils/custombase.py
Zashel/utils
1c8b9e1ad7ceb1924a719bef588fcfe38dfd1f70
[ "Apache-2.0" ]
null
null
null
utils/custombase.py
Zashel/utils
1c8b9e1ad7ceb1924a719bef588fcfe38dfd1f70
[ "Apache-2.0" ]
null
null
null
class AttributedDict(dict): def __dir__(self): directory = dir(super()) directory.extend([str(key.replace(" ", "_")) for key in self]) return directory def __getattr__(self, attr): _dir_dict = dict() [_dir_dict.update({key.replace(" ", "_"): key}) for key in self] if attr in _dir_dict: return self[_dir_dict[attr]] else: raise AttributeError(attr) def __setattr__(self, attr, value): _dir_dict = dict() [_dir_dict.update({key.replace(" ", "_"): key}) for key in self] if attr in _dir_dict: self[_dir_dict[attr]] = value elif attr in self: self[attr] = value else: raise AttributeError(attr)
31.75
72
0.560367
90
762
4.4
0.277778
0.141414
0.060606
0.090909
0.323232
0.323232
0.323232
0.323232
0.323232
0.323232
0
0
0.316273
762
23
73
33.130435
0.760077
0
0
0.47619
0
0
0.007874
0
0
0
0
0
0
1
0.142857
false
0
0
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebad0fa39828f876d9327badae6cbb23b3a1f522
258
py
Python
upylab/upylab_5.3.py
piousteph/mooc_python
b7446c8a8317e2ae5e87a9ce27fd107f20333765
[ "MIT" ]
null
null
null
upylab/upylab_5.3.py
piousteph/mooc_python
b7446c8a8317e2ae5e87a9ce27fd107f20333765
[ "MIT" ]
null
null
null
upylab/upylab_5.3.py
piousteph/mooc_python
b7446c8a8317e2ae5e87a9ce27fd107f20333765
[ "MIT" ]
null
null
null
def duree(debut, fin): d = debut[0] * 60 + debut[1] f = fin[0] * 60 + fin[1] e = f - d h = e // 60 m = e - h * 60 if h < 0: h = 24 - abs(h) return (h, m) print(duree ((14, 39), (18, 45))) print(duree ((6, 0), (5, 15)))
17.2
33
0.410853
48
258
2.208333
0.5
0.056604
0
0
0
0
0
0
0
0
0
0.171779
0.368217
258
15
34
17.2
0.478528
0
0
0
0
0
0
0
0
0
0
0
0
1
0.090909
false
0
0
0
0.181818
0.181818
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
ebaeccfe37530d080542e9861760ac1b8264f12f
31,392
py
Python
utils.py
val-iisc/ss_human_mesh
f9c7fcf577c83316eb610753e3f5678b7b5e24c5
[ "MIT" ]
31
2020-08-31T11:32:33.000Z
2021-12-05T08:47:33.000Z
utils.py
rakeshramesha/SS_Human_Mesh
b27d53a08b60a1ac32d1845557f317c165498fd5
[ "MIT" ]
null
null
null
utils.py
rakeshramesha/SS_Human_Mesh
b27d53a08b60a1ac32d1845557f317c165498fd5
[ "MIT" ]
7
2020-09-25T03:50:59.000Z
2021-12-10T05:24:58.000Z
""" General Utilities file. """ import sys import os ############################ NON-TF UTILS ########################## from skimage.util import img_as_float import numpy as np import cv2 import pickle from PIL import Image from io import BytesIO import math import tqdm import scipy import json import matplotlib gui_env = ['Agg','TKAgg','GTKAgg','Qt4Agg','WXAgg'] for gui in gui_env: try: print ("testing", gui) matplotlib.use(gui,warn=False, force=True) from matplotlib import pyplot as plt break except: continue print ("utils.py Using:",matplotlib.get_backend()) from matplotlib.backends.backend_agg import FigureCanvasAgg as Canvas from mpl_toolkits.mplot3d import Axes3D import config as cfg ######### Basic Utils ######### def adjust_gamma(image, gamma=1.0): """ Gamma correct images. """ ## Build a LUT mapping the pixel values [0, 255] to their adjusted gamma values invGamma = 1.0 / gamma table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8") ## Apply gamma correction using the LUT return cv2.LUT(image, table) def scipy_sharpen(img_flt, alpha=30): """ Sharpen images. """ from scipy import ndimage blurred_f = ndimage.gaussian_filter(img_flt, 3) filter_blurred_f = ndimage.gaussian_filter(blurred_f, 1) img_flt = blurred_f + alpha * (blurred_f - filter_blurred_f) return img_flt def read_pickle(path): """ Load Pickle file. """ with open(path, 'rb') as f: data = pickle.load(f) return data def save_pickle(data, path): """ Save Pickle file. """ with open(path, 'wb') as f: pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL) ######### Pose quality and Metrics ######### def compute_similarity_transform(S1, S2): """ Computes a similarity transform (sR, t) that takes a set of 3D points S1 (3 x N) closest to a set of 3D points S2, where R is an 3x3 rotation matrix, t 3x1 translation, s scale. i.e. solves the orthogonal Procrutes problem. """ transposed = False if S1.shape[0] != 3 and S2.shape[0] != 3: S1 = S1.T S2 = S2.T transposed = True assert(S2.shape[1] == S1.shape[1]) ## Mean mu1 = S1.mean(axis=1, keepdims=True) mu2 = S2.mean(axis=1, keepdims=True) X1 = S1 - mu1 X2 = S2 - mu2 ## Compute variance of X1 used for scale var1 = np.sum(X1**2) ## The outer product of X1 and X2 K = X1.dot(X2.T) ## Solution that Maximizes trace(R'K) is R=U*V', where U, V are ## Singular vectors of K U, s, Vh = np.linalg.svd(K) V = Vh.T ## Construct Z that fixes the orientation of R to get det(R)=1 Z = np.eye(U.shape[0]) Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T))) ## Construct R R = V.dot(Z.dot(U.T)) ## Recover scale scale = np.trace(R.dot(K)) / var1 ## Recover translation t = mu2 - scale*(R.dot(mu1)) ## Error S1_hat = scale*R.dot(S1) + t if transposed: S1_hat = S1_hat.T return S1_hat def compute_error(pred_3d_all, gt_3d_all, full_out=True): """ MPJPE and PA_MPJPE metric computation. """ pred_3d_all_flat = pred_3d_all.copy() pred_3d_all_flat = pred_3d_all_flat - pred_3d_all_flat[:, 0:1,:] gt_3d_all_flat = gt_3d_all.copy() gt_3d_all_flat = gt_3d_all_flat - gt_3d_all_flat[:, 0:1,:] joint_wise_error = [] error = [] pa_joint_wise_error = [] pa_error = [] for i in range(len(pred_3d_all_flat)): each_pred_3d = pred_3d_all_flat[i] each_gt_3d = gt_3d_all_flat[i] tmp_err = np.linalg.norm(each_pred_3d-each_gt_3d, axis=1) joint_wise_error.append(tmp_err) error.append(np.mean(tmp_err)) pred3d_sym = compute_similarity_transform(each_pred_3d.copy(), each_gt_3d.copy()) tmp_pa_err = np.linalg.norm(pred3d_sym-each_gt_3d, axis=1) pa_joint_wise_error.append(tmp_pa_err) pa_error.append(np.mean(tmp_pa_err)) joint_wise_error = np.array(joint_wise_error) if(full_out): mpjpe = np.mean(error)*1000 ### Note: unit is mm pampjpe = np.mean(pa_error)*1000 ### Note: unit is mm return mpjpe, pampjpe else: return error, pa_error ###### Alternative manual regressors ###### def smplx45_to_17j(pose_smpl): """ SMPLX 45 joint J3D to 17 joint J3D. """ ## Remove fingers pose_smpl = pose_smpl[:-10] ## Remove extra def feet pose_smpl = pose_smpl[:-6] ## Remove face pose_smpl = pose_smpl[:-5] ## Remove wrist pose_smpl = pose_smpl[:-2] ## Remove extra def spine pose_smpl = np.delete(pose_smpl, 3, 0) ## 3 pose_smpl = np.delete(pose_smpl, 5, 0) ## 6 pose_smpl = np.delete(pose_smpl, 7, 0) ## 9 ## Remove torso pose_smpl = np.delete(pose_smpl, 10, 0) ## 10 pose_smpl = np.delete(pose_smpl, 10, 0) ## 11 ## Hip altitude increase and widen alt_f = 0.8 wide_f = 8.0 pelvis = pose_smpl[0].copy() r_hip = pose_smpl[2].copy() l_hip = pose_smpl[1].copy() ## Alt inc r_p_dir = pelvis - r_hip l_p_dir = pelvis - l_hip mag_rp = np.linalg.norm(r_p_dir) r_p_dir /= mag_rp mag_lp = np.linalg.norm(l_p_dir) l_p_dir /= mag_lp r_hip = r_hip + (r_p_dir*mag_rp*alt_f) l_hip = l_hip + (l_p_dir*mag_lp*alt_f) ## H-Widen hip_ctr = (r_hip + l_hip) / 2.0 r_dir = r_hip - hip_ctr l_dir = l_hip - hip_ctr ## Unit vec mag = np.linalg.norm(r_dir) r_dir /= mag l_dir /= np.linalg.norm(l_dir) r_hip = r_hip + (r_dir*mag*wide_f) l_hip = l_hip + (l_dir*mag*wide_f) ## place back pose_smpl[2] = r_hip pose_smpl[1] = l_hip return pose_smpl def smpl23_to_17j_3d(pose_smpl): """ Simple SMPL 23 joint J3D to 17 joint J3D. """ smpl_to_17j = [ [0,1],[8,11], [12],[17],[19], ### or 15 , 17 [13],[18], [20], ### or 16 , 18 [14],[0],[3], [9,6],[9],[1], [4],[10,7],[10] ] pose_17j = np.zeros((len(smpl_to_17j),3)) for idx in range(len(smpl_to_17j)): sel_idx = smpl_to_17j[idx] if(len(sel_idx) == 2): pose_17j[idx] = (pose_smpl[sel_idx[0]] + pose_smpl[sel_idx[1]]) / 2.0 else: pose_17j[idx] = pose_smpl[sel_idx[0]] return pose_17j """ SMPL J17 reordering vec. """ smpl_reorder_vec = [0, 9, 12, 14, 16, 11, 13, 15, 10, 2, 4, 6, 8, 1, 3, 5, 7 ] def reorder_smpl17_to_j17(pose_3d): """ SMPL reorder SMPL J17 to standard J17. """ pose_3d = pose_3d[smpl_reorder_vec] return pose_3d def smpl24_to_17j_adv(pose_smpl): """ Improved SMPL 23 joint J3D to 17 joint J3D. """ ## Hip altitude increase and widen alt_f = 0.8 wide_f = 8.0 pelvis = pose_smpl[0].copy() r_hip = pose_smpl[2].copy() l_hip = pose_smpl[1].copy() ## Alt inc r_p_dir = pelvis - r_hip l_p_dir = pelvis - l_hip mag_rp = np.linalg.norm(r_p_dir) r_p_dir /= mag_rp mag_lp = np.linalg.norm(l_p_dir) l_p_dir /= mag_lp r_hip = r_hip + (r_p_dir*mag_rp*alt_f) l_hip = l_hip + (l_p_dir*mag_lp*alt_f) ## H-Widen hip_ctr = (r_hip + l_hip) / 2.0 r_dir = r_hip - hip_ctr l_dir = l_hip - hip_ctr ## Unit vec mag = np.linalg.norm(r_dir) r_dir /= mag l_dir /= np.linalg.norm(l_dir) r_hip = r_hip + (r_dir*mag*wide_f) l_hip = l_hip + (l_dir*mag*wide_f) ## Place back pose_smpl[2] = r_hip pose_smpl[1] = l_hip ## Neck to head raise with tilt towards nose alt_f = 0.7 head = pose_smpl[15].copy() neck = pose_smpl[12].copy() ## Alt inc n_h_dir = head - neck mag_nh = np.linalg.norm(n_h_dir) n_h_dir /= mag_nh head = head + (n_h_dir*mag_nh*alt_f) ## Place back pose_smpl[15] = head ## Remove wrist pose_smpl = pose_smpl[:-2] ## Remove extra def spine pose_smpl = np.delete(pose_smpl, 3, 0) ## 3 pose_smpl = np.delete(pose_smpl, 5, 0) ## 6 pose_smpl = np.delete(pose_smpl, 7, 0) ## 9 ## Remove torso pose_smpl = np.delete(pose_smpl, 10, 0) ## 10 pose_smpl = np.delete(pose_smpl, 10, 0) ## 11 return pose_smpl def hip_straighten(pose_smpl): """ Straighten Hip in J17. """ #pelvis = pose_smpl[0].copy() r_hip = pose_smpl[2].copy() l_hip = pose_smpl[1].copy() pelvis = (r_hip + l_hip) / 2 pose_smpl[0] = pelvis return pose_smpl """ Limb parents for SMPL joints. """ limb_parents = [ 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12,12,12, 16,17,18,19,20,21 ] """ 3D skeleton plot colours for SMPL joints. """ colors = np.array([[0,0,255], [0,255,0], [255,0,0], [255,0,255], [0,255,255], [255,255,0], [127,127,0], [0,127,0], [100,0,100], [255,0,255], [0,255,0], [0,0,255], [255,255,0], [127,127,0], [100,0,100], [175,100,195], [0,0,255], [0,255,0], [255,0,0], [255,0,255], [0,255,255], [255,255,0], [127,127,0], [0,127,0], [100,0,100], [255,0,255], [0,255,0], [0,0,255], [255,255,0], [127,127,0], [100,0,100], [175,100,195]]) def fig2data(fig): """ Convert a Matplotlib figure to a 4D numpy array with RGBA channels. """ ## Draw the renderer fig.canvas.draw() ## Get the RGBA buffer from the figure w, h = fig.canvas.get_width_height() buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8) buf.shape = (w, h, 4) ## Roll the ALPHA channel to have it in RGBA mode buf = np.roll(buf, 3, axis=2) return buf def draw_limbs_3d_plt(joints_3d, ax, limb_parents=limb_parents): ## Direct 3d plotting for i in range(joints_3d.shape[0]): x_pair = [joints_3d[i, 0], joints_3d[limb_parents[i], 0]] y_pair = [joints_3d[i, 1], joints_3d[limb_parents[i], 1]] z_pair = [joints_3d[i, 2], joints_3d[limb_parents[i], 2]] #ax.text(joints_3d[i, 0], joints_3d[i, 1], joints_3d[i, 2], s=str(i)) ax.plot(x_pair, y_pair, z_pair, color=colors[i]/255.0, linewidth=3, antialiased=True) def plot_skeleton_3d(joints_3d, flag=-1, limb_parents=limb_parents, title=""): ## 3D Skeleton plotting fig = plt.figure(frameon=False, figsize=(7, 7)) ax = fig.add_subplot(1, 1, 1, projection='3d') ax.clear() ## Axis setup if (flag == 0): ax.view_init(azim=0, elev=0) elif (flag == 1): ax.view_init(azim=90, elev=0) ax.set_xlim(-200, 200) ax.set_ylim(-200, 200) ax.set_zlim(-200, 200) scale = 1 ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') draw_limbs_3d_plt(joints_3d * scale, ax, limb_parents) ax.set_title(title) plt_img = fig2data(fig) plt.close(fig) return plt_img def skeleton_image(joints_2d, img): """ 2D Joint skeleton Overlay. """ img_copy = img.copy() for i in range(joints_2d.shape[0]): x_pair = [joints_2d[i, 0], joints_2d[limb_parents[i], 0]] y_pair = [joints_2d[i, 1], joints_2d[limb_parents[i], 1]] img_copy = cv2.line(img_copy, (int(x_pair[0]),int(y_pair[0])), (int(x_pair[1]),int(y_pair[1])), colors[i],4) return img_copy def create_collage(img_list, axis=1): """ Collage a set of images to form a panel. (numpy) """ np_new_array = np.concatenate([i for i in img_list], axis=axis) return np_new_array def align_by_pelvis(joints): """ Center by pelvis joint. """ hip_id = 0 joints -= joints[hip_id, :] return joints def mesh2d_center_by_nose(mesh2d,w=224 ,h=224): """ Simple mesh centering by nose/pelvis vtx. (numpy) """ #hip_id = 0 nose_id = 0 ctr = mesh2d[nose_id,:] mesh_ret = mesh2d - ctr + np.array([ w/2, h/5 ]) return mesh_ret def align_with_image_j2d(points2d, img_width, img_height): """ Perform center alignment to image coordinate system. (numpy) """ points2d[:,0] += img_width/2 points2d[:,1] += img_height/2 return points2d """ Input preprocess """ def get_transform(center, scale, res, rot=0): """ Generate transformation matrix. """ h = 224 * scale t = np.zeros((3, 3)) t[0, 0] = float(res[1]) / h t[1, 1] = float(res[0]) / h t[0, 2] = res[1] * (-float(center[0]) / h + .5) t[1, 2] = res[0] * (-float(center[1]) / h + .5) t[2, 2] = 1 if not rot == 0: rot = -rot ## To match direction of rotation from cropping rot_mat = np.zeros((3,3)) rot_rad = rot * np.pi / 180 sn,cs = np.sin(rot_rad), np.cos(rot_rad) rot_mat[0,:2] = [cs, -sn] rot_mat[1,:2] = [sn, cs] rot_mat[2,2] = 1 ## Need to rotate around center t_mat = np.eye(3) t_mat[0,2] = -res[1]/2 t_mat[1,2] = -res[0]/2 t_inv = t_mat.copy() t_inv[:2,2] *= -1 t = np.dot(t_inv,np.dot(rot_mat,np.dot(t_mat,t))) return t def transform(pt, center, scale, res, invert=0, rot=0): """ Transform pixel location to different reference. """ t = get_transform(center, scale, res, rot=rot) if invert: t = np.linalg.inv(t) new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T new_pt = np.dot(t, new_pt) return new_pt[:2].astype(int) + 1 def crop(img, center, scale, res, rot=0): """ Crop image according to the supplied bounding box. """ ## Upper left point ul = np.array(transform([1, 1], center, scale, res, invert=1)) - 1 ## Bottom right point br = np.array(transform([res[0]+1, res[1]+1], center, scale, res, invert=1)) - 1 ## Padding so that when rotated proper amount of context is included pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2) if not rot == 0: ul -= pad br += pad new_shape = [br[1] - ul[1], br[0] - ul[0]] if len(img.shape) > 2: new_shape += [img.shape[2]] new_img = np.zeros(new_shape) ## Range to fill new array new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0] new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1] ## Range to sample from original image old_x = max(0, ul[0]), min(len(img[0]), br[0]) old_y = max(0, ul[1]), min(len(img), br[1]) new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1], old_x[0]:old_x[1]] if not rot == 0: ## Remove padding new_img = scipy.misc.imrotate(new_img, rot) new_img = new_img[pad:-pad, pad:-pad] new_img = scipy.misc.imresize(new_img, res) return new_img def j2d_crop(img, j2d_file, rescale=1.2, detection_thresh=0.2): """ Get center and scale for Bbox from OpenPose/Centertrack detections.""" with open(j2d_file, 'r') as f: keypoints = json.load(f)['people'][0]['pose_keypoints_2d'] keypoints = np.reshape(np.array(keypoints), (-1,3)) valid = keypoints[:,-1] > detection_thresh valid_keypoints = keypoints[valid][:,:-1] center = valid_keypoints.mean(axis=0) bbox_size = (valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)).max() ## Adjust bounding box tightness scale = bbox_size / 200.0 scale *= rescale img = crop(img, center, scale, (cfg.IMG_W, cfg.IMG_H)) return img def bbox_crop(img, bbox): """ Crop, center and scale image based on BBox """ with open(bbox, 'r') as f: bbox = np.array(json.load(f)['bbox']).astype(np.float32) ul_corner = bbox[:2] center = ul_corner + 0.5 * bbox[2:] width = max(bbox[2], bbox[3]) scale = width / 200.0 img = crop(img, center, scale, (cfg.IMG_W, cfg.IMG_H)) return img ########################### TF UTILS ############################# import pickle as pkl import tensorflow as tf import tensorflow_graphics as tfg from render.render_layer_ortho import RenderLayer import render.vertex_normal_expose as dirt_expose PI = np.pi def tfread_image(image,fmt='png', channels=3): """ Simple read and decode image. """ if (fmt == 'png'): return tf.image.decode_png(image, channels=channels) elif (fmt == 'jpg'): return tf.image.decode_jpeg(image, channels=channels) else: print ("ERROR specified format not found....") def tf_norm(tensor, axis=1): """ Min-Max normalize image. """ min_val = tf.reduce_min(tensor, axis=axis, keepdims=True) normalized_tensor = tf.div( tf.subtract(tensor, min_val), tf.subtract(tf.reduce_max(tensor, axis=axis, keepdims=True), min_val)) return normalized_tensor def tfresize_image(image, size=(cfg.IMG_W, cfg.IMG_H)): """ Resize image. """ return tf.image.resize(image, size) def denormalize_image(image): """ Undo normalization of image. """ image = (image / 2) + 0.5 return image def unprocess_image(image): """ Undo preprocess image. """ # Normalize image to [0, 1] image = (image / 2) + 0.5 image = image * 255.0 #[0,1] to [0,255] range return image def preprocess_image(image, do_znorm=True): """ Preprocess image. """ image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, (cfg.IMG_W, cfg.IMG_H)) image /= 255.0 # normalize to [0,1] range if(do_znorm): # Normalize image to [-1, 1] image = 2 * (image - 0.5) return image def load_and_preprocess_image(path): """ Simple read and preprocess for just image. """ image = tf.io.read_file(path) processed_image = preprocess_image(image) return processed_image def load_and_preprocess_image_and_mask(path, j2d, j3d, beta, mask_path, pose, camera, data_id): """ Simple read and preprocess for image and mask. """ image = tf.io.read_file(path) proc_image = preprocess_image(image) ## For Mask mask = tf.io.read_file(mask_path) proc_mask = preprocess_image(mask, do_znorm=False) return proc_image, j2d, j3d, beta, proc_mask, pose, camera, data_id def tf_create_collage(img_list, axis=2): """ Collage a set of images to form a panel. """ tf_new_array = tf.concat([i for i in img_list], axis=axis) return tf_new_array def log_images(tag, image, step, writer): """ Logs a list of images to tensorboard. """ height, width, channel = image.shape image = Image.fromarray(image) output = BytesIO() image.save(output, format='PNG') image_string = output.getvalue() output.close() ## Create an Image object img_sum = tf.Summary.Image(height=height, width=width, colorspace=channel, encoded_image_string=image_string) ## Create a Summary value im_summary = tf.Summary.Value(tag='%s' % (tag), image=img_sum) ## Create and write Summary summary = tf.Summary(value=[im_summary]) writer.add_summary(summary, step) def get_network_params(scope): """ Get all accessable variables. """ return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) def get_net_train_params(scope): """ Get Trainable params. """ return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope) def copy_weights(iter_no, wt_dir, label='best'): """ Backup the Weights to pretrained_weights/ given iteration number and label i.e 'iter' or 'best' """ files = os.listdir(wt_dir+label+"wt_") match_substr = '%s-%d' % (label, iter_no) files = [f for f in files if match_substr in f] for f in files: cmd = 'cp %s%s pretrained_weights/' % (wt_dir, f) print (cmd) os.system(cmd) def get_most_recent_iteration(wt_dir, label='iter'): """ Gets the most recent iteration number from weights/ dir of given label: ('best' or 'iter') """ files = os.listdir(wt_dir) files = [f for f in files if label in f] numbers = {long(f[f.index('-') + 1:f.index('.')]) for f in files} return max(numbers) def copy_latest(wt_dir, wt_type='best'): """ Backup latest weights. """ latest_iter = get_most_recent_iteration(label=wt_type, wt_dir=wt_dir) copy_weights(latest_iter, label=wt_type, wt_dir=wt_dir) return latest_iter def get_latest_iter(wt_dir, wt_type='best'): """ Get latest weights. """ latest_iter = get_most_recent_iteration(label=wt_type, wt_dir=wt_dir) return latest_iter def tf_align_by_pelvis(joints): """ Simple centering by pelvis location. """ hip_id = 0 pelvis = joints[:, hip_id:hip_id+1, :] return tf.subtract(joints, pelvis) def tf_mesh2d_center_by_nose(mesh2d,w=224 ,h=224): """ Simple mesh centering by nose/pelvis vtx. """ #hip_id = 0 nose_id = 0 ctr = mesh2d[nose_id:nose_id+1,:] mesh_ret = tf.add(tf.subtract(mesh2d, ctr), [[ w/2, h/5 ]]) return mesh_ret def tf_perspective_project(points3d, focal, prin_pt, name="perspective_project"): """ Simple Perspective Projection. """ fx = focal[0] fy = focal[1] tx = prin_pt[0] ty = prin_pt[1] intrin = tf.convert_to_tensor(np.array([ [fx, 0., tx], [0., fy, ty], [0., 0., 1.]])) intrin = tf.tile(intrin,[points3d.shape[0]]) p_cam3d = tf.matmul(points3d, intrin, name=name) points2d = (points3d[:,:,0:2] / points3d[:,:,2]) ### project return points2d def tf_orthographic_project(points3d, name="orthographic_project"): """ Simple Orthographic Projection. """ return points3d[:,:,0:2] ## X,Y,Z def tf_dyn_scale_and_align(vertices, joints_3d, scale, add_trans): """ Dynamic scale and trans adjust. """ xy_max = tf.expand_dims(tf.reduce_max(vertices, axis=1), axis=1) xy_min = tf.expand_dims(tf.reduce_min(vertices, axis=1), axis=1) #person_ctr = (xy_max + xy_min)/2.0 person_range = tf.abs(xy_max-xy_min) person_sc = tf.expand_dims(tf.reduce_max(person_range[:,:,0:2], axis=2), axis=2) ### Scale person to detector scale vertices = tf.div(vertices, person_sc) vertices = vertices * scale joints_3d = tf.div(joints_3d, person_sc) joints_3d = joints_3d * scale ### Bbox center xy_max = tf.expand_dims(tf.reduce_max(vertices, axis=1), axis=1) xy_min = tf.expand_dims(tf.reduce_min(vertices, axis=1), axis=1) person_ctr = (xy_max + xy_min)/2.0 add_trans = tf.concat([add_trans, tf.zeros_like(add_trans[:,:,0:1])], axis=2) vertices = vertices - person_ctr + add_trans joints_3d = joints_3d - person_ctr + add_trans return vertices, joints_3d, scale[:,0], ((add_trans-person_ctr)[:,0,:2]) def tf_do_scale_and_align(vertices, joints_3d, scale, trans): """ Perform Scale and trans. (in world space) """ scale = tf.reshape(scale, [-1, 1, 1]) trans = tf.reshape(trans, [-1, 1, 2]) z = tf.zeros_like(trans[:,:,0:1]) shift = tf.concat([trans, z], axis=2) ### Trans in world space vertices = vertices + shift joints_3d = joints_3d + shift ### Scale person vertices = vertices * scale joints_3d = joints_3d * scale return vertices, joints_3d def for_tpix_tf_do_scale_and_align(vertices, joints_3d, scale, trans): """ Perform Scale and trans. (in Pixel space) """ xy_max = tf.expand_dims(tf.reduce_max(vertices, axis=1), axis=1) xy_min = tf.expand_dims(tf.reduce_min(vertices, axis=1), axis=1) #person_ctr = (xy_max + xy_min)/2.0 person_range = tf.abs(xy_max-xy_min) person_sc = tf.expand_dims(tf.reduce_max(person_range[:,:,0:2], axis=2), axis=2) ##ignore z ### Unit scale vertices = tf.div(vertices, person_sc) joints_3d = tf.div(joints_3d, person_sc) ### scale = tf.reshape(scale, [-1, 1, 1]) trans = tf.reshape(trans, [-1, 1, 2]) z = tf.zeros_like(trans[:,:,0:1]) shift = tf.concat([trans, z], axis=2) ### Scale person vertices = vertices * scale joints_3d = joints_3d * scale ### Trans in cam space vertices = vertices + shift joints_3d = joints_3d + shift return vertices, joints_3d def tf_align_with_image_j2d(points2d, img_width, img_height): """ Perform center alignment to image coordinate system. (in Pixel space) """ if(img_width == img_height): points2d = points2d + (img_width/2) else: width_tf = tf.zeros((points2d.shape[0], points2d.shape[1], 1),dtype=tf.int32) + (img_width/2) height_tf = tf.zeros((points2d.shape[0], points2d.shape[1], 1),dtype=tf.int32) + (img_height/2) concatd = tf.concat([width_tf, height_tf], axis=2) points2d = points2d + concatd return points2d ############ Render pipeline utils ############ MESH_PROP_FACES_FL = './assets/smpl_sampling.pkl' """ Read face definition. Fixed for a SMPL model. """ with open(os.path.join(os.path.dirname(__file__), MESH_PROP_FACES_FL), 'rb') as f: sampling = pkl.load(f) M = sampling['meshes'] faces = M[0]['f'].astype(np.int32) faces = tf.convert_to_tensor(faces,dtype=tf.int32) def_bgcolor = tf.zeros(3) + [0, 0.5, 0] ## Green BG def colour_pick_img(img_batch, vertices, batch_size): """ Pick clr based on mesh registration. [Vtx, Img] -> [Vtx_clr] """ proj_verts = tf_orthographic_project(vertices) verts_pix_space = tf_align_with_image_j2d(proj_verts, cfg.IMG_W, cfg.IMG_H) #### Pick colours and resolve occlusion softly verts_pix_space = tf.cast(verts_pix_space, dtype=tf.int32) verts_pix_space = tf.concat([verts_pix_space[:,:,1:], verts_pix_space[:,:,0:1]], axis=2) if(cfg.TF_version >= 1.14): #### Alternative colour pick for TF 1.14 & above, faster inference. clr_picked = tf.gather_nd(params=occ_aware_mask, indices=verts_pix_space, batch_dims=1) ### NOTE: only for tf 1.14 and above else: ### For TF 1.13 and older for b in range(batch_size): if b == 0: clr_picked = [tf.gather_nd(params=img_batch[b], indices=verts_pix_space[b])] else: curr_clr_pick = [tf.gather_nd(params=img_batch[b], indices=verts_pix_space[b])] clr_picked = tf.concat([clr_picked, curr_clr_pick], axis=0) img_clr_picked = tf.cast(clr_picked, dtype=tf.float32) return img_clr_picked def get_occ_aware_cam_facing_mask(vertices, batch_size, part_based_occlusion_resolve=False, bgcolor=def_bgcolor): """ Occlusion-aware vtx weighting, depth based or part-based. [Vtx] -> [Vtx_occ_wtmap] """ if (part_based_occlusion_resolve): vertex_colors = np.zeros((batch_size, 6890, 3)) ### Part segmentation_generation vtx_prts = np.load("vtx_clr_smpl_proj_final_part_segmentations.npy") ### Vertex parts modify for maximal seperation vtx_prts = vtx_prts + 1 vtx_prts[vtx_prts == 2] = 5 vtx_prts[vtx_prts == 22] = 7 vtx_prts[vtx_prts == 8] = 22 vtx_prts[vtx_prts == 12] = 2 vtx_prts[vtx_prts == 23] = 13 vtx_prts[vtx_prts == 19] = 4 vtx_prts[vtx_prts == 21] = 18 #### part labelled vtx_part_labels = np.zeros(vertices.shape) vtx_prts = np.expand_dims(vtx_prts, axis=1) vtx_prts = vtx_prts / 24.0 part_label = np.concatenate([vtx_prts, vtx_prts, vtx_prts], axis=1) vtx_part_labels[:] = part_label ##broadcast to form batch #### Render cam setup fixed_rt = np.array([1.0, 0.0, 0.0]) ### tilt,pan,roll angle = np.linalg.norm(fixed_rt) axis = fixed_rt / angle ang = np.pi new_an_ax = axis * (ang) fixed_rt = new_an_ax fixed_t = [0., 0., 0.] ## fixed_renderer = RenderLayer(cfg.IMG_W, cfg.IMG_H, 3, bgcolor=bgcolor, f=faces, camera_f=[cfg.IMG_W, cfg.IMG_H], camera_c=[cfg.IMG_W/2.0, cfg.IMG_H/2.0], camera_rt=fixed_rt, camera_t=fixed_t) vert_norms = dirt_expose.get_vertex_normals(vertices, faces) #### Verts selection based on norm vert_norms_flat = tf.reshape(vert_norms, [-1, 3]) fake_angle = tf.ones_like(vert_norms_flat[:,0:1], dtype=tf.float32) ## unit mag euler_angles = tfg.geometry.transformation.euler.from_axis_angle(axis=vert_norms_flat, angle=fake_angle) vert_norms_euler = tf.reshape(euler_angles, [-1, 6890, 3]) ### Diff. margin formulation quant_sharpness_factor = 50 verts_ndiff = vert_norms_euler[:,:,2:] * -1 ## invert as cam faces verts_ndiff = verts_ndiff * quant_sharpness_factor ## centrifugal from 0.0 to get quantization effect #verts_ndiff = tf.math.sign(verts_ndiff) #verts_ndiff = tf.nn.relu(verts_ndiff) verts_ndiff = tf.nn.sigmoid(verts_ndiff) if(part_based_occlusion_resolve): vtx_part_labels= tf.convert_to_tensor(vtx_part_labels, dtype=tf.float32) ## Normal part based resolving occlusion based render cam_facing_vtx_clrs = tf.multiply(vtx_part_labels, verts_ndiff) else: ## Depth based occlusion aware picking to be debugged depth_vertices = vertices[:,:,2:] ## Normalize the depth between 0 and 1 min_val = tf.reduce_min(depth_vertices, axis=1, keepdims=True) normalized_depth_vertices = tf.div( tf.subtract(depth_vertices, min_val), tf.subtract(tf.reduce_max(depth_vertices, axis=1, keepdims=True), min_val)) cam_facing_vtx_clrs = tf.tile(normalized_depth_vertices, [1,1,3]) cam_facing_vtx_clrs = tf.multiply(cam_facing_vtx_clrs, verts_ndiff) ## Mask render for occlusion resolution occ_aware_mask = fixed_renderer.call(vertices, vc=cam_facing_vtx_clrs) ## occulsion aware z-buffered parts masks clr_picked = colour_pick_img(occ_aware_mask, vertices, batch_size) ## Occlusion resolution based on z-buffered parts if(part_based_occlusion_resolve): occ_sel_diff = (vtx_part_labels[:,:,0:1] - clr_picked[:,:,0:1] ) * 10.0 else: ### Depth based colour pick occ_sel_diff = (normalized_depth_vertices[:,:,0:1] - clr_picked[:,:,0:1] ) * 10.0 ### Diff. margin soft selection occ_sel = tf.nn.sigmoid(occ_sel_diff) * tf.nn.sigmoid(-1 * occ_sel_diff) * 4.0 #### Select front facing final_front_facing_occ_resolved = tf.multiply(occ_sel, verts_ndiff) return final_front_facing_occ_resolved def apply_ref_symmetry(vclr_picked_resolved, front_facing_occ_resolved_mask, batch_size): """ Reflectional symmetry module. [Vtx_clr, Vtx_wtmap] -> [Vtx_clr_symm] """ symm_arr = np.load("./assets/basic_vtx_clr_symm_map.npy") symm_arr_transpose = np.transpose(symm_arr) sym_map = tf.expand_dims(symm_arr, axis=0) sym_map = tf.tile(sym_map, [batch_size,1,1]) sym_map_transpose = tf.expand_dims(symm_arr_transpose, axis=0) sym_map_transpose = tf.tile(sym_map_transpose, [batch_size, 1, 1]) ## Group clr value calc num = tf.matmul(sym_map, vclr_picked_resolved) den = tf.matmul(sym_map, front_facing_occ_resolved_mask) den = den + 0.00001 calc_val = tf.truediv(num, den) ### Value assign using symmtery vclr_symm = tf.matmul(sym_map_transpose, calc_val) return vclr_symm
33.538462
195
0.618374
4,930
31,392
3.718256
0.146856
0.024876
0.003819
0.008401
0.330206
0.263543
0.222301
0.206917
0.187715
0.17615
0
0.04785
0.239074
31,392
935
196
33.574332
0.719555
0.161602
0
0.231561
0
0
0.013716
0.004217
0
0
0
0
0.001715
1
0.087479
false
0
0.039451
0
0.210978
0.006861
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebb024cf3162a7ae7533de24c182385e63946496
8,389
py
Python
netQuil/connections.py
att-innovate/qnetdes
459d688e92139ab3219416cdb9e3b20ff082dc1d
[ "MIT" ]
4
2019-11-14T21:30:35.000Z
2021-12-13T08:34:33.000Z
netQuil/connections.py
att-innovate/qnetdes
459d688e92139ab3219416cdb9e3b20ff082dc1d
[ "MIT" ]
null
null
null
netQuil/connections.py
att-innovate/qnetdes
459d688e92139ab3219416cdb9e3b20ff082dc1d
[ "MIT" ]
null
null
null
import queue import multiprocessing import itertools import sys __all__ = ["QConnect", "CConnect"] pulse_length_default = 10 * 10 ** -12 # 10 ps photon pulse length signal_speed = 2.998 * 10 ** 5 #speed of light in km/s fiber_length_default = 0.0 class QConnect: def __init__(self, *args, transit_devices=[]): ''' This is the base class for a quantum connection between multiple agents. :param agents \*args: list of agents to connect :param List<Devices> transit_devices: list of devices qubits travel through ''' agents = list(args) self.agents = {} self.source_devices = {} self.target_devices = {} self.transit_devices = {} ''' Create queue to keep track of multiple requests. Name of queue is name of target agent. ''' self.queues = {} for agent in agents: self.agents.update({agent.name: agent}) self.source_devices.update({agent.name: agent.source_devices}) self.target_devices.update({agent.name: agent.target_devices}) self.transit_devices.update({agent.name: transit_devices}) self.queues.update({agent.name: queue.Queue()}) for agentConnect in agents: if agentConnect != agent: agent.qconnections[agentConnect.name] = self def put(self, source, target, qubits, source_time): ''' Constructs full list of devices that each qubit must travel through. Sends the qubits through source devices. Places qubits and a list of transit and target devices on the queue. Queue is keyed on the target agent's name. :param String source: name of agent where the qubits being sent originated :param String target: name of agent receiving qubits :param Array qubits: array of numbers corresponding to qubits the source is sending :param Float source_time: time of source agent before sending qubits :returns: time qubits took to pass through source devices ''' source_devices = self.source_devices[source] transit_devices = self.transit_devices[source] target_devices = self.target_devices[target] non_source_devices = { "transit": transit_devices, "target": target_devices, } program = self.agents[source].program source_delay = 0 # Keep track of qubits remaining traveling_qubits = qubits if not source_devices: source_delay += pulse_length_default else: # Keep track of qubits lost by each device total_lost_qubits = [] for device in source_devices: # If qubits are still remaining if traveling_qubits: res = device.apply(program, traveling_qubits) if 'lost_qubits' in res.keys(): lost_qubits = res['lost_qubits'] # Remove lost qubits from traveling qubits traveling_qubits = list(set(traveling_qubits) - set(lost_qubits)) # Add lost_qubits lost from current device to total_lost_qubits total_lost_qubits += lost_qubits if 'delay' in res.keys(): source_delay += res['delay'] else: break # Invert lost qubits and add to traveling qubits for q in total_lost_qubits: if q == 0: total_lost_qubits.append(float("-inf")) else: total_lost_qubits.append(-q) traveling_qubits += total_lost_qubits # Scale source delay time according to number of qubits sent scaled_source_delay = source_delay*len(qubits) self.queues[target].put((traveling_qubits, non_source_devices, scaled_source_delay, source_time)) return scaled_source_delay def get(self, agent): ''' Pops qubits off of the agent's queue. Sends qubit through transit and target devices, simulating a quantum network. Return an array of the qubits that have been altered, as well as the time it took the qubit to travel through the network. Some qubits may be lost during transmission. If lost, their value will switch to negative, or, in the case of 0, be set to -inf :param Agent agent: agent receiving the qubits :returns: list of qubits, time to pass through transit and target devices, and the source agent's time ''' traveling_qubits, devices, source_delay, source_time = self.queues[agent.name].get() agent.qubits = list(set(traveling_qubits + agent.qubits)) program = self.agents[agent.name].program transit_devices = devices["transit"] target_devices = devices["target"] # Number of qubits before any are lost num_travel_qubits = len(traveling_qubits) travel_delay = 0 if not transit_devices: travel_delay += fiber_length_default/signal_speed if not target_devices: travel_delay += 0 total_lost_qubits = [q for q in traveling_qubits if q < 0 or q == float("-inf")] remaining_qubits = [q for q in traveling_qubits if q >= 0] for device in list(itertools.chain(transit_devices, target_devices)): # If qubits are remaining if remaining_qubits: res = device.apply(program, traveling_qubits) if 'lost_qubits' in res.keys(): lost_qubits = res['lost_qubits'] # Remove lost qubits from traveling qubits remaining_qubits = list(set(remaining_qubits) - set(lost_qubits)) # Add lost_qubits lost from current device to total_lost_qubits total_lost_qubits += lost_qubits if 'delay' in res.keys(): travel_delay += res['delay'] else: break # Remove traveling_qubits agent.qubits = list(set(agent.qubits) - set(traveling_qubits)) lost_qubits_flipped = [] for q in total_lost_qubits: if q == 0: lost_qubits_flipped.append(float("-inf")) else: lost_qubits_flipped.append(-q) # Add inverted lost qubits to remaining qubits traveling_qubits = remaining_qubits + lost_qubits_flipped agent.qubits += traveling_qubits scaled_delay = travel_delay*num_travel_qubits + source_delay return traveling_qubits, scaled_delay, source_time class CConnect: def __init__(self, *args, length=0.0): ''' This is the base class for a classical connection between multiple agents. :param agents \*args: list of agents to connect :param Float length: distance between first and second agent ''' agents = list(args) self.agents = {} ''' Create queue to keep track of multiple requests. Name of queue is name of target agent. ''' self.queues = {} for agent in agents: self.agents.update({agent.name: agent}) self.queues.update({agent.name: queue.Queue()}) for agentConnect in agents: if agentConnect != agent: agent.cconnections[agentConnect.name] = self self.length = length def put(self, target, cbits): ''' Places cbits on queue keyed on the target Agent's name :param String target: name of recipient of program :param Array cbits: array of numbers corresponding to cbits agent is sending :returns: time for cbits to travel ''' csource_delay = pulse_length_default * 8 * sys.getsizeof(cbits) self.queues[target].put((cbits, csource_delay)) return csource_delay def get(self, agent): ''' Pops cbits off of the agent's queue and adds travel delay :param String agent: name of the agent receiving the cbits :returns: cbits from source and time they took to travel ''' cbits, source_delay = self.queues[agent].get() travel_delay = self.length/signal_speed scaled_delay = travel_delay*len(cbits) + source_delay return cbits, scaled_delay
39.947619
119
0.619383
1,023
8,389
4.924731
0.162268
0.061532
0.032751
0.015879
0.356689
0.26717
0.250099
0.241366
0.241366
0.216753
0
0.004829
0.308857
8,389
210
120
39.947619
0.864091
0.293003
0
0.290909
0
0
0.022023
0
0
0
0
0
0
1
0.054545
false
0
0.036364
0
0.145455
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebb199161cfe0fe5c616a4c065ef5c14803d10c1
2,035
py
Python
pqcli/ui/curses/views/game_view/character_sheet_window.py
tree-s/pq-cli
f5d0ed69a99c490a63f854442fba2b443e59a134
[ "MIT" ]
94
2018-11-17T22:40:16.000Z
2022-03-28T05:09:16.000Z
pqcli/ui/curses/views/game_view/character_sheet_window.py
tree-s/pq-cli
f5d0ed69a99c490a63f854442fba2b443e59a134
[ "MIT" ]
17
2019-04-10T18:06:46.000Z
2022-03-03T03:25:08.000Z
pqcli/ui/curses/views/game_view/character_sheet_window.py
tree-s/pq-cli
f5d0ed69a99c490a63f854442fba2b443e59a134
[ "MIT" ]
14
2019-04-10T21:33:14.000Z
2022-02-16T14:42:56.000Z
import typing as T from pqcli.mechanic import Player, StatType from pqcli.ui.curses.widgets import Focusable from .progress_bar_window import DataTableProgressBarWindow class CharacterSheetWindow(Focusable, DataTableProgressBarWindow): def __init__( self, player: Player, parent: T.Any, h: int, w: int, y: int, x: int ) -> None: super().__init__( parent, h, w, y, x, " Character Sheet ", align_right=False, show_time=True, ) self._on_focus_change += self._render self._focused = True self._player = player self._player.connect("level_up", self._sync_traits) self._player.stats.connect("change", self._sync_traits) self._player.exp_bar.connect("change", self._sync_exp) self.sync() def stop(self) -> None: super().stop() self._player.disconnect("level_up", self._sync_traits) self._player.stats.disconnect("change", self._sync_traits) self._player.exp_bar.disconnect("change", self._sync_exp) def sync(self) -> None: self._sync_traits() self._sync_exp() def _sync_traits(self) -> None: if not self._win: return self._data_table.clear() self._data_table.add("Name", self._player.name) self._data_table.add("Race", self._player.race.name) self._data_table.add("Class", self._player.class_.name) self._data_table.add("Level", str(self._player.level)) self._data_table.add(" " * 15, "") for stat in StatType: self._data_table.add(stat.value, str(self._player.stats[stat])) self._render_data_table() def _sync_exp(self) -> None: self._cur_pos = self._player.exp_bar.position self._max_pos = self._player.exp_bar.max_ self._progress_title = ( f"Experience ({self._max_pos-self._cur_pos:.0f} XP to go)" ) self._render_progress_bar()
30.373134
75
0.615725
252
2,035
4.615079
0.31746
0.128977
0.078246
0.082545
0.239037
0.123818
0.123818
0.123818
0
0
0
0.002011
0.26683
2,035
66
76
30.833333
0.77748
0
0
0
0
0
0.064373
0.016708
0
0
0
0
0
1
0.096154
false
0
0.076923
0
0.211538
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebb3616f90465bce3896df5538302b23c8d738c6
1,668
py
Python
src/precession/planet.py
kurlytail/precession
a5dd83f4fca4629de1f5759bb467183bda1a6506
[ "MIT" ]
null
null
null
src/precession/planet.py
kurlytail/precession
a5dd83f4fca4629de1f5759bb467183bda1a6506
[ "MIT" ]
null
null
null
src/precession/planet.py
kurlytail/precession
a5dd83f4fca4629de1f5759bb467183bda1a6506
[ "MIT" ]
null
null
null
import yaml import pathlib import json import math class Planet(object): def __init__(self, config): self.GMS = 0 # mass self.M = 0. self.name = "unknown" # period self.T = 1. # eccentricity self.e = 0. # semi major axis self.a = 1. # configuration self.config = config def fixup(self): self.M = self.M / 2.e+30 self.RMin = self.a * (1 - self.e) self.RMax = self.a * (1 + self.e) self.R = self.a self.V = (2 * math.pi * self.R) / self.T self.GMS = self.R * self.V**2 self.vMax = math.sqrt( (((1 + self.e) * (1 + self.M)) / self.RMin) * self.GMS) self.L = self.a * (1 - self.e) * self.vMax self.GM = self.GMS * self.M @staticmethod def load(config, data): if isinstance(data, pathlib.PosixPath): data = str(data) if isinstance(data, str): with open(data, "r") as data_file: data = yaml.safe_load(data_file) if not isinstance(data, dict): raise TypeError(f"data type {type(data)} cannot be loaded") planet = Planet(config) for k in data: setattr(planet, k, data[k]) planet.fixup() return planet def get_dict(self): data = self.__dict__.copy() data.pop("config") return data def save(self, filename): with open(filename, 'w') as file: yaml.dump(self.get_dict(), file) def __str__(self) -> str: return f"planet {self.name} => {', '.join(yaml.safe_dump(self.get_dict()).splitlines())}"
27.8
97
0.522182
223
1,668
3.820628
0.336323
0.029343
0.028169
0.035211
0.052817
0.052817
0
0
0
0
0
0.013686
0.342926
1,668
59
98
28.271186
0.763686
0.032374
0
0
0
0.021277
0.082711
0.03296
0
0
0
0
0
1
0.12766
false
0
0.085106
0.021277
0.297872
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebb36f491c315700e82d95483ecfc1489b55b959
2,479
py
Python
tests/unit_test_suit.py
vlastikczech/zang-python
980f5243071404d6838554500a6955ff7bc2a0c7
[ "MIT" ]
1
2019-02-18T21:51:58.000Z
2019-02-18T21:51:58.000Z
tests/unit_test_suit.py
vlastikczech/zang-python
980f5243071404d6838554500a6955ff7bc2a0c7
[ "MIT" ]
6
2019-06-26T13:56:22.000Z
2022-02-17T16:40:48.000Z
tests/unit_test_suit.py
vlastikczech/zang-python
980f5243071404d6838554500a6955ff7bc2a0c7
[ "MIT" ]
6
2017-10-17T12:44:32.000Z
2020-02-07T20:45:00.000Z
import unittest from tests.unit.test_account import TestAccount from tests.unit.test_application import TestApplication from tests.unit.test_usages import TestUsages from tests.unit.test_conferences import TestConferences from tests.unit.test_mms_messages import TestMmsMessages from tests.unit.test_sms_messages import TestSmsMessages from tests.unit.test_calls import TestCalls from tests.unit.test_transcriptions import TestTranscriptions from tests.unit.test_sip_domain import TestSipDomain from tests.unit.test_sip_credentials import TestSipCredentials from tests.unit.test_recordings import TestRecordings from tests.unit.test_notifications import TestNotifications from tests.unit.test_application_clients import TestApplicationClients from tests.unit.test_available_phone_number import TestAvailablePhoneNumber from tests.unit.test_carrier_services import TestCarrierServices from tests.unit.test_incoming_phone_numbers import TestIncomingPhoneNumbers from tests.unit.test_ip_access_control_lists import TestIpAccessControlLists from tests.unit.fraud_control_test import FraudControlTest def suite(): """ Gather all the tests from this module in a test suite. """ test_suite = unittest.TestSuite() test_suite.addTest(unittest.makeSuite(FraudControlTest)) test_suite.addTest(unittest.makeSuite(TestIpAccessControlLists)) test_suite.addTest(unittest.makeSuite(TestIncomingPhoneNumbers)) test_suite.addTest(unittest.makeSuite(TestAvailablePhoneNumber)) test_suite.addTest(unittest.makeSuite(TestApplicationClients)) test_suite.addTest(unittest.makeSuite(TestCarrierServices)) test_suite.addTest(unittest.makeSuite(TestNotifications)) test_suite.addTest(unittest.makeSuite(TestRecordings)) test_suite.addTest(unittest.makeSuite(TestSipCredentials)) test_suite.addTest(unittest.makeSuite(TestSipDomain)) test_suite.addTest(unittest.makeSuite(TestTranscriptions)) test_suite.addTest(unittest.makeSuite(TestCalls)) test_suite.addTest(unittest.makeSuite(TestMmsMessages)) test_suite.addTest(unittest.makeSuite(TestSmsMessages)) test_suite.addTest(unittest.makeSuite(TestConferences)) test_suite.addTest(unittest.makeSuite(TestUsages)) test_suite.addTest(unittest.makeSuite(TestAccount)) test_suite.addTest(unittest.makeSuite(TestApplication)) return test_suite if __name__ == '__main__': runner = unittest.TextTestRunner() test_suite = suite() runner.run(test_suite)
47.673077
76
0.835418
283
2,479
7.09894
0.24735
0.103036
0.116476
0.215032
0.343454
0
0
0
0
0
0
0
0.094796
2,479
51
77
48.607843
0.895276
0.021783
0
0
0
0
0.003326
0
0
0
0
0
0
1
0.022727
false
0
0.431818
0
0.477273
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
ebb41e91a8e2783d3b37bf3b6b979d65654f5089
262
py
Python
Python/basic-py/exercise_14.py
nhutnamhcmus/code
22b528084ed234fcabca89cf1ba02a2c347007bc
[ "MIT" ]
1
2020-10-12T18:33:22.000Z
2020-10-12T18:33:22.000Z
Python/basic-py/exercise_14.py
nhutnamhcmus/code
22b528084ed234fcabca89cf1ba02a2c347007bc
[ "MIT" ]
null
null
null
Python/basic-py/exercise_14.py
nhutnamhcmus/code
22b528084ed234fcabca89cf1ba02a2c347007bc
[ "MIT" ]
null
null
null
def _get_square(list): l1 = [x*x for x in range(0, len(list), 2) if x % 3 != 0] return l1 print(_get_square([1, 2, 3, 4, 5])) def get_square(list): l1 = [x*x for x in list if x % 3 != 0 and x % 2 == 0] return l1 print(get_square([1, 2, 3, 4, 5]))
23.818182
58
0.557252
58
262
2.413793
0.344828
0.257143
0.171429
0.228571
0.771429
0.771429
0.771429
0.771429
0.771429
0.771429
0
0.112821
0.255725
262
11
59
23.818182
0.605128
0
0
0.25
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0
0
0.5
0.25
0
0
0
null
1
0
1
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
8
ebb439a6545138904530c4467b730cf8637621d9
391
py
Python
utils/reduce_data.py
akshatabhat/transformers
83c6f38b9bf1397f3d2c95c7b0ea8907f709c580
[ "Apache-2.0" ]
1
2021-03-26T14:06:52.000Z
2021-03-26T14:06:52.000Z
utils/reduce_data.py
akshatabhat/transformers
83c6f38b9bf1397f3d2c95c7b0ea8907f709c580
[ "Apache-2.0" ]
null
null
null
utils/reduce_data.py
akshatabhat/transformers
83c6f38b9bf1397f3d2c95c7b0ea8907f709c580
[ "Apache-2.0" ]
null
null
null
import json def main(file_path, out_len): with open(file_path, 'r') as f: data = json.loads(f.read()) print(len(data['data'])) data['data'] = data['data'][:out_len] with open(file_path, 'w') as f: print(len(data['data'])) json.dump(data, f) if __name__ == "__main__": main('data/train-v1.1.json', 10) main('data/dev-v1.1.json', 2)
23
45
0.56266
62
391
3.33871
0.435484
0.231884
0.231884
0.231884
0.328502
0.21256
0
0
0
0
0
0.02349
0.237852
391
16
46
24.4375
0.671141
0
0
0.166667
0
0
0.163683
0
0
0
0
0
0
1
0.083333
false
0
0.083333
0
0.166667
0.166667
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
ebb54d34edb14bf6d75544e3fae03ac69c069a8f
15,289
py
Python
jade/jobs/job_submitter.py
NREL/jade
84d73f45e206c4a35e6483e6c1ce29ab7ac7e543
[ "BSD-3-Clause" ]
15
2021-05-15T21:58:26.000Z
2022-03-17T08:26:48.000Z
jade/jobs/job_submitter.py
NREL/jade
84d73f45e206c4a35e6483e6c1ce29ab7ac7e543
[ "BSD-3-Clause" ]
22
2021-02-04T20:02:33.000Z
2021-09-14T13:29:30.000Z
jade/jobs/job_submitter.py
NREL/jade
84d73f45e206c4a35e6483e6c1ce29ab7ac7e543
[ "BSD-3-Clause" ]
3
2021-01-11T15:11:31.000Z
2021-06-07T17:36:51.000Z
"""Provides ability to run jobs locally or on HPC.""" from collections import OrderedDict import datetime import fileinput import importlib import logging import os import shutil import jade from jade.common import ( CONFIG_FILE, JOBS_OUTPUT_DIR, OUTPUT_DIR, RESULTS_FILE, HPC_CONFIG_FILE, ) from jade.enums import JobCompletionStatus, Status, ResourceMonitorType from jade.events import ( EVENTS_FILENAME, EVENT_NAME_ERROR_LOG, StructuredLogEvent, EVENT_CATEGORY_ERROR, EVENT_CATEGORY_RESOURCE_UTIL, EVENT_NAME_BYTES_CONSUMED, EVENT_NAME_SUBMIT_STARTED, EVENT_NAME_SUBMIT_COMPLETED, ) from jade.exceptions import InvalidParameter from jade.extensions.registry import Registry, ExtensionClassType from jade.hpc.common import HpcType from jade.hpc.hpc_manager import HpcManager from jade.hpc.hpc_submitter import HpcSubmitter from jade.jobs.cluster import Cluster from jade.jobs.job_configuration_factory import create_config_from_previous_run from jade.jobs.job_manager_base import JobManagerBase from jade.jobs.job_runner import JobRunner from jade.jobs.results_aggregator import ResultsAggregator from jade.models import SubmitterParams from jade.models.submission_group import make_submission_group_lookup from jade.loggers import log_event from jade.result import serialize_results, ResultsSummary from jade.utils.repository_info import RepositoryInfo from jade.utils.subprocess_manager import run_command from jade.utils.utils import dump_data, get_directory_size_bytes import jade.version logger = logging.getLogger(__name__) class JobSubmitter(JobManagerBase): """Submits jobs for execution locally or on an HPC.""" def __init__(self, config_file, output, is_new): """Internal constructor. Callers should use create() or load().""" super().__init__(config_file, output) self._hpc = None self._config_file = config_file self._is_new = is_new @classmethod def create(cls, config_file, params: SubmitterParams, output=OUTPUT_DIR): """Creates a new instance. Parameters ---------- config_file : JobConfiguration configuration for simulation params: SubmitterParams output : str Output directory """ main_file = os.path.join(output, CONFIG_FILE) shutil.copyfile(config_file, main_file) mgr = cls(main_file, output, True) mgr.run_checks(params) return mgr @classmethod def load(cls, output): """Loads an instance from an existing directory.""" return cls(os.path.join(output, CONFIG_FILE), output, False) def __repr__(self): return f"""num_jobs={self.get_num_jobs()} results_summary={self.get_results_summmary_report()}""" def cancel_jobs(self, cluster): """Cancel running and pending jobs.""" groups = make_submission_group_lookup(cluster.config.submission_groups) hpc = HpcManager(groups, self._output) for job_id in cluster.job_status.hpc_job_ids: hpc.cancel_job(job_id) cluster.mark_complete(canceled=True) def submit_jobs(self, cluster, force_local=False): """Submit simulations. Auto-detect whether the current system is an HPC and submit to its queue. Otherwise, run locally. Parameters ---------- cluster : Cluster force_local : bool If on HPC, run jobs through subprocess as if local. Returns ------- Status """ if self._is_new: logger.info("Submit %s jobs for execution.", self._config.get_num_jobs()) logger.info("JADE version %s", jade.version.__version__) registry = Registry() loggers = registry.list_loggers() logger.info("Registered modules for logging: %s", ", ".join(loggers)) self._save_repository_info(registry) ResultsAggregator.create(self._output) # If an events summary file exists, it is invalid. events_file = os.path.join(self._output, EVENTS_FILENAME) if os.path.exists(events_file): os.remove(events_file) event = StructuredLogEvent( source="submitter", category=EVENT_CATEGORY_RESOURCE_UTIL, name=EVENT_NAME_SUBMIT_COMPLETED, message="job submission started", num_jobs=self.get_num_jobs(), ) log_event(event) else: self._handle_submission_groups() result = Status.IN_PROGRESS group = self._config.get_default_submission_group() groups = make_submission_group_lookup(cluster.config.submission_groups) self._hpc = HpcManager(groups, self._output) if self._hpc.hpc_type == HpcType.LOCAL or force_local: runner = JobRunner(self._config_file, output=self._output) num_processes = group.submitter_params.num_processes verbose = group.submitter_params.verbose result = runner.run_jobs(verbose=verbose, num_processes=num_processes) agg = ResultsAggregator.load(self._output) agg.process_results() is_complete = True else: is_complete = self._submit_to_hpc(cluster) if is_complete: result = self._handle_completion(cluster) return result def _handle_completion(self, cluster): result = Status.GOOD self._results = ResultsAggregator.list_results(self._output) if len(self._results) != self._config.get_num_jobs(): finished_jobs = {x.name for x in self._results} all_jobs = {x.name for x in self._config.iter_jobs()} missing_jobs = sorted(all_jobs.difference(finished_jobs)) logger.error( "Error in result totals. num_results=%s total_num_jobs=%s", len(self._results), self._config.get_num_jobs(), ) logger.error( "These jobs did not finish: %s. Check for process crashes or HPC timeouts.", missing_jobs, ) result = Status.ERROR else: missing_jobs = [] self.write_results_summary(RESULTS_FILE, missing_jobs) self._log_error_log_messages(self._output) bytes_consumed = get_directory_size_bytes(self._output, recursive=False) event = StructuredLogEvent( source="submitter", category=EVENT_CATEGORY_RESOURCE_UTIL, name=EVENT_NAME_BYTES_CONSUMED, message="main output directory size", bytes_consumed=bytes_consumed, ) log_event(event) event = StructuredLogEvent( source="submitter", category=EVENT_CATEGORY_RESOURCE_UTIL, name=EVENT_NAME_SUBMIT_COMPLETED, message="job submission completed", num_jobs=self.get_num_jobs(), ) log_event(event) group = self._config.get_default_submission_group() if group.submitter_params.generate_reports: self.generate_reports(self._output, group.submitter_params.resource_monitor_type) cluster.mark_complete() if cluster.config.pipeline_stage_num is not None: # The pipeline directory must be the one above this one. pipeline_dir = os.path.dirname(self._output) next_stage = cluster.config.pipeline_stage_num + 1 cmd = ( f"jade pipeline submit-next-stage {pipeline_dir} " f"--stage-num={next_stage} " f"--return-code={result.value}" ) run_command(cmd) return result def write_results_summary(self, filename, missing_jobs): """Write the results to filename in the output directory.""" data = OrderedDict() data["jade_version"] = jade.version.__version__ now = datetime.datetime.now() data["timestamp"] = now.strftime("%m/%d/%Y %H:%M:%S") data["base_directory"] = os.getcwd() results = self._build_results(missing_jobs) data["results_summary"] = results["summary"] data["missing_jobs"] = missing_jobs data["results"] = results["results"] output_file = os.path.join(self._output, filename) dump_data(data, output_file) logger.info("Wrote results to %s.", output_file) num_successful = results["summary"]["num_successful"] num_canceled = results["summary"]["num_canceled"] num_failed = results["summary"]["num_failed"] num_missing = len(missing_jobs) total = num_successful + num_failed + num_missing log_func = logger.info if num_successful == total else logger.warning log_func( "Successful=%s Failed=%s Canceled=%s Missing=%s Total=%s", num_successful, num_failed, num_canceled, num_missing, total, ) return output_file def _build_results(self, missing_jobs): num_successful = 0 num_failed = 0 num_canceled = 0 for result in self._results: if result.is_successful(): num_successful += 1 elif result.is_failed(): num_failed += 1 else: assert result.is_canceled(), str(result) num_canceled += 1 return { "results": serialize_results(self._results), "summary": { "num_successful": num_successful, "num_failed": num_failed, "num_canceled": num_canceled, "num_missing": len(missing_jobs), }, } def _save_repository_info(self, registry): extensions = registry.list_extensions() extension_packages = set(["jade"]) for ext in extensions: exec_module = ext[ExtensionClassType.EXECUTION].__module__ name = exec_module.split(".")[0] extension_packages.add(name) for name in extension_packages: try: package = importlib.import_module(name) repo_info = RepositoryInfo(package) patch = os.path.join(self._output, f"{name}-diff.patch") repo_info.write_diff_patch(patch) logger.info("%s repository information: %s", name, repo_info.summary()) except InvalidParameter: pass @staticmethod def _log_error_log_messages(directory): for event in JobSubmitter.find_error_log_messages(directory): log_event(event) @staticmethod def find_error_log_messages(directory): """Parse output log files for error messages Parameters ---------- directory : str output directory """ substrings = ( "DUE TO TIME LIMIT", # includes slurmstepd, but check this first "srun", "slurmstepd", "Traceback", ) filenames = [os.path.join(directory, x) for x in os.listdir(directory) if x.endswith(".e")] if not filenames: return for line in fileinput.input(filenames): for substring in substrings: if substring in line: event = StructuredLogEvent( source="submitter", category=EVENT_CATEGORY_ERROR, name=EVENT_NAME_ERROR_LOG, message="Detected error message in log.", error=substring, filename=fileinput.filename(), line_number=fileinput.lineno(), text=line.strip(), ) yield event # Only find one match in a single line. break @staticmethod def generate_reports(directory, resource_monitor_type): """Create reports summarizing the output results of a set of jobs. Parameters ---------- directory : str output directory resource_monitor_type : ResourceMonitorType """ commands = [ (f"jade show-results -o {directory}", "results.txt"), (f"jade show-events -o {directory} --categories Error", "errors.txt"), ] if resource_monitor_type != ResourceMonitorType.NONE: commands.append((f"jade stats show -o {directory}", "stats.txt")) commands.append((f"jade stats show -o {directory} -j", "stats_summary.json")) if resource_monitor_type == ResourceMonitorType.PERIODIC: commands.append((f"jade stats plot -o {directory}", None)) reports = [] for cmd in commands: output = {} ret = run_command(cmd[0], output=output) if ret != 0: logger.error("Failed to run [%s]: %s: %s", cmd, ret, output["stderr"]) if cmd[1] is not None: filename = os.path.join(directory, cmd[1]) with open(filename, "w") as f_out: if "json" not in cmd[1]: f_out.write(cmd[0] + "\n\n") f_out.write(output["stdout"]) reports.append(filename) logger.info("Generated reports %s.", " ".join(reports)) return 0 def _submit_to_hpc(self, cluster): hpc_submitter = HpcSubmitter( self._config, self._config_file, cluster, self._output, ) if hpc_submitter.run(): logger.info("All submitters have completed.") return True logger.debug("jobs are still pending") return False def run_checks(self, params: SubmitterParams): """Checks the configuration for errors. May mutate the config.""" self._config.check_job_dependencies(params) self._config.check_submission_groups(params) self._config.check_spark_config() @staticmethod def run_submit_jobs(config_file, output, params, pipeline_stage_num=None): """Allows submission from an existing Python process.""" os.makedirs(output, exist_ok=True) mgr = JobSubmitter.create(config_file, params, output=output) cluster = Cluster.create( output, mgr.config, pipeline_stage_num=pipeline_stage_num, ) local = params.hpc_config.hpc_type == HpcType.LOCAL ret = 1 try: status = mgr.submit_jobs(cluster, force_local=local) if status == Status.IN_PROGRESS: check_cmd = f"jade show-status -o {output}" if not params.dry_run: print(f"Jobs are in progress. Run '{check_cmd}' for updates.") ret = 0 else: ret = status.value finally: cluster.demote_from_submitter() if local: # These files were not used in this case. cluster.delete_files_internal() return ret
35.473318
99
0.608738
1,685
15,289
5.279525
0.197626
0.017986
0.007869
0.011241
0.176147
0.110612
0.093413
0.073516
0.057329
0.036196
0
0.001595
0.302832
15,289
430
100
35.555814
0.833005
0.086598
0
0.123839
0
0
0.098097
0.00981
0
0
0
0
0.003096
1
0.049536
false
0.003096
0.092879
0.003096
0.182663
0.003096
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebb61e698b97baa6c67673863e148e21ab80d713
1,364
py
Python
testing/MarketMaker_contract_test.py
SK1989sL/RYO
a0c89c694d9ad4aed9a9776937f2f73271b67f28
[ "MIT" ]
null
null
null
testing/MarketMaker_contract_test.py
SK1989sL/RYO
a0c89c694d9ad4aed9a9776937f2f73271b67f28
[ "MIT" ]
null
null
null
testing/MarketMaker_contract_test.py
SK1989sL/RYO
a0c89c694d9ad4aed9a9776937f2f73271b67f28
[ "MIT" ]
null
null
null
import os import pytest from starkware.starknet.compiler.compile import ( compile_starknet_files) from starkware.starknet.testing.starknet import Starknet from starkware.starknet.testing.contract import StarknetContract # The path to the contract source code. CONTRACT_FILE = os.path.join( os.path.dirname(__file__), "../contracts/MarketMaker.cairo") # The testing library uses python's asyncio. So the following # decorator and the ``async`` keyword are needed. @pytest.mark.asyncio async def test_record_items(): # Compile the contract. contract_definition = compile_starknet_files( [CONTRACT_FILE], debug_info=True) # Create a new Starknet class that simulates the StarkNet # system. starknet = await Starknet.empty() # Deploy the contract. contract_address = await starknet.deploy( contract_definition=contract_definition) contract = StarknetContract( starknet=starknet, abi=contract_definition.abi, contract_address=contract_address, ) market_a_pre = 300 market_b_pre = 500 user_a_pre = 40 # User gives 40. res = await contract.trade(market_a_pre, market_b_pre, user_a_pre).invoke() (market_a_post, market_b_post, user_b_post, ) = res assert market_a_post == market_a_pre + user_a_pre assert market_b_post == market_b_pre - user_b_post
30.311111
79
0.73827
181
1,364
5.287293
0.38674
0.025078
0.065831
0.058516
0
0
0
0
0
0
0
0.009001
0.185484
1,364
44
80
31
0.852385
0.195748
0
0
0
0
0.027624
0.027624
0
0
0
0
0.074074
1
0
false
0
0.185185
0
0.185185
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebb6654546e2771ea9a8d0a98c2ca7dabc9a83dc
230
py
Python
scrapyu/__init__.py
lin-zone/scrapyu
abcb8eed2ea02121b74017e007c57c0d3762342d
[ "MIT" ]
1
2021-01-05T09:11:42.000Z
2021-01-05T09:11:42.000Z
scrapyu/__init__.py
lin-zone/scrapyu
abcb8eed2ea02121b74017e007c57c0d3762342d
[ "MIT" ]
null
null
null
scrapyu/__init__.py
lin-zone/scrapyu
abcb8eed2ea02121b74017e007c57c0d3762342d
[ "MIT" ]
null
null
null
__version__ = '0.1.12' from ._useragent import UserAgentMiddleware from ._markdown import MarkdownPipeline from ._cookies import FirefoxCookiesMiddleware from ._mongodb import MongoDBPipeline from ._redis import RedisDupeFilter
25.555556
46
0.847826
24
230
7.75
0.666667
0
0
0
0
0
0
0
0
0
0
0.019512
0.108696
230
8
47
28.75
0.887805
0
0
0
0
0
0.026087
0
0
0
0
0
0
1
0
false
0
0.833333
0
0.833333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
ebb6bb07248128010f898b3fb588b1cee8d3c6cc
1,930
py
Python
test/perform_additional_setup.py
aws/amazon-braket-containers
44187fb4cc73e05bda3e361638d94b90f6e4c06a
[ "Apache-2.0" ]
1
2022-03-22T23:49:17.000Z
2022-03-22T23:49:17.000Z
test/perform_additional_setup.py
aws/amazon-braket-containers
44187fb4cc73e05bda3e361638d94b90f6e4c06a
[ "Apache-2.0" ]
null
null
null
test/perform_additional_setup.py
aws/amazon-braket-containers
44187fb4cc73e05bda3e361638d94b90f6e4c06a
[ "Apache-2.0" ]
3
2021-11-29T21:19:31.000Z
2022-01-13T16:31:06.000Z
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os import shutil import subprocess import traceback from urllib.parse import urlparse import boto3 import tempfile def download_s3_file(s3_uri: str, local_path: str) -> str: """ Downloads a file to a local path. Args: s3_uri (str): the S3 URI to get the file from. local_path (str) : the local path to download to Returns: str: the path to the file containing the downloaded path. """ s3_client = boto3.client("s3") parsed_url = urlparse(s3_uri, allow_fragments=False) s3_bucket = parsed_url.netloc s3_key = parsed_url.path.lstrip("/") local_s3_file = os.path.join(local_path, os.path.basename(s3_key)) s3_client.download_file(s3_bucket, s3_key, local_s3_file) return local_s3_file def perform_additional_setup() -> None: lib_s3_uri = os.getenv('AMZN_BRAKET_IMAGE_SETUP_SCRIPT') if lib_s3_uri: try: print("Getting setup script from ", lib_s3_uri) with tempfile.TemporaryDirectory() as temp_dir: script_to_run = download_s3_file(lib_s3_uri, temp_dir) subprocess.run(["chmod", "+x", script_to_run]) subprocess.run(script_to_run) except Exception as e: print(f"Unable to install additional libraries.\nException: {e}") if __name__ == "__main__": perform_additional_setup()
33.275862
77
0.698964
283
1,930
4.55477
0.44523
0.031032
0.024825
0
0
0
0
0
0
0
0
0.017857
0.21658
1,930
57
78
33.859649
0.834656
0.389637
0
0
0
0
0.114058
0.045977
0
0
0
0
0
1
0.071429
false
0
0.25
0
0.357143
0.071429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebb7280816985728a4a272af67774f62eef9667c
1,146
py
Python
experiments/optim.py
fbcotter/dtcwt_gainlayer
32ec3e21066edc2a0d5edefaf70f43d031d1b4ac
[ "MIT" ]
6
2018-11-14T22:41:58.000Z
2021-12-08T11:01:32.000Z
experiments/optim.py
fbcotter/dtcwt_gainlayer
32ec3e21066edc2a0d5edefaf70f43d031d1b4ac
[ "MIT" ]
null
null
null
experiments/optim.py
fbcotter/dtcwt_gainlayer
32ec3e21066edc2a0d5edefaf70f43d031d1b4ac
[ "MIT" ]
1
2020-05-22T16:10:00.000Z
2020-05-22T16:10:00.000Z
import torch.optim from numpy import ndarray def get_optim(optim, params, init_lr, steps=1, wd=0, gamma=1, momentum=0.9, max_epochs=120): if optim == 'sgd': optimizer = torch.optim.SGD( params, lr=init_lr, momentum=momentum, weight_decay=wd) elif optim == 'sgd_nomem': optimizer = torch.optim.SGD( params, lr=init_lr, momentum=0, weight_decay=wd) elif optim == 'adam': optimizer = torch.optim.Adam( params, lr=init_lr, weight_decay=wd, # amsgrad=True, betas=(0.9, .999)) else: raise ValueError('Unknown optimizer') # Set the learning rate decay if isinstance(steps, (tuple, list, ndarray)) and len(steps) == 1: steps = steps[0] if isinstance(steps, int): scheduler = torch.optim.lr_scheduler.StepLR( optimizer, int(max_epochs/steps), gamma=gamma) elif isinstance(steps, (tuple, list, ndarray)): scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, steps, gamma=gamma) else: raise ValueError('Unknown lr schedule') return optimizer, scheduler
33.705882
69
0.624782
144
1,146
4.881944
0.361111
0.085349
0.081081
0.059744
0.361309
0.125178
0.125178
0.125178
0.125178
0
0
0.018846
0.259162
1,146
33
70
34.727273
0.809187
0.035777
0
0.148148
0
0
0.047187
0
0
0
0
0
0
1
0.037037
false
0
0.074074
0
0.148148
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebb8162cb09b68c8030371823ad7d00d0561cc03
1,941
py
Python
src/microengineclamav/tasks.py
polyswarm/microengine-clamav
9427932cd35d4f8bfc7fe7877e90f518e7a3bfbb
[ "MIT" ]
2
2018-05-20T00:08:14.000Z
2018-06-13T22:42:14.000Z
src/microengineclamav/tasks.py
polyswarm/microengine-clamav
9427932cd35d4f8bfc7fe7877e90f518e7a3bfbb
[ "MIT" ]
1
2021-06-22T15:03:01.000Z
2021-06-22T20:26:52.000Z
src/microengineclamav/tasks.py
polyswarm/microengine-clamav
9427932cd35d4f8bfc7fe7877e90f518e7a3bfbb
[ "MIT" ]
1
2019-02-21T20:22:32.000Z
2019-02-21T20:22:32.000Z
from celery import Celery, Task from microengine_utils import errors from microengine_utils.datadog import configure_metrics from microengine_utils.constants import SCAN_FAIL, SCAN_SUCCESS, SCAN_TIME, SCAN_VERDICT from microengineclamav.models import Bounty, ScanResult, Verdict, Assertion, Phase from microengineclamav import settings from microengineclamav.scan import scan, compute_bid celery_app = Celery('tasks', broker=settings.BROKER) class MetricsTask(Task): _metrics = None @property def metrics(self): if self._metrics is None: self._metrics = configure_metrics( settings.DATADOG_API_KEY, settings.DATADOG_APP_KEY, settings.ENGINE_NAME, poly_work=settings.POLY_WORK ) return self._metrics @celery_app.task(base=MetricsTask) def handle_bounty(bounty): bounty = Bounty(**bounty) scan_result = ScanResult() with handle_bounty.metrics.timer(SCAN_TIME): try: scan_result = scan(bounty) handle_bounty.metrics.increment(SCAN_SUCCESS, tags=[f'type:{bounty.artifact_type}']) handle_bounty.metrics.increment(SCAN_VERDICT, tags=[f'type:{bounty.artifact_type}', f'verdict:{scan_result.verdict.value}']) except errors.CalledProcessScanError: handle_bounty.metrics.increment( SCAN_FAIL, tags=[f'type:{bounty.artifact_type}', 'scan_error:calledprocess'] ) if bounty.phase == Phase.ARBITRATION: scan_response = scan_result.to_vote() else: if scan_result.verdict in [Verdict.UNKNOWN, Verdict.SUSPICIOUS]: # These results don't bid any NCT. bid = 0 else: bid = compute_bid(bounty, scan_result) scan_response = scan_result.to_assertion(bid) bounty.post_response(scan_response)
35.944444
104
0.663575
218
1,941
5.678899
0.344037
0.056543
0.061389
0.067851
0.181745
0.065428
0
0
0
0
0
0.000692
0.255023
1,941
53
105
36.622642
0.855463
0.016486
0
0.046512
0
0
0.076036
0.073414
0
0
0
0
0.046512
1
0.046512
false
0
0.162791
0
0.27907
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebb9e939ebab9f1ac907089a906eafebd3d40188
18,835
py
Python
app/main.py
rendybjunior/freddie-mercury
1b6d1fe8c06f317e5fc8ab17afdfa0a8b90a7a75
[ "Apache-2.0" ]
null
null
null
app/main.py
rendybjunior/freddie-mercury
1b6d1fe8c06f317e5fc8ab17afdfa0a8b90a7a75
[ "Apache-2.0" ]
2
2019-05-11T16:25:54.000Z
2019-05-13T01:19:16.000Z
app/main.py
rendybjunior/freddie-mercury
1b6d1fe8c06f317e5fc8ab17afdfa0a8b90a7a75
[ "Apache-2.0" ]
null
null
null
import datetime import os, sys, six, base64, copy from jinja2 import Environment, FileSystemLoader, Template from google.auth.transport import requests from google.cloud import datastore from google.cloud import storage from google.cloud import bigquery import google.oauth2.id_token from flask import Flask, render_template, request from flask_wtf import FlaskForm from wtforms import StringField, TextAreaField, SubmitField, IntegerField from wtforms.fields.html5 import DateField from wtforms.validators import DataRequired, Email import github3 DAG_FOLDER = 'dags/' SQL_FOLDER = 'dags/sql/' DAG_REPO_ORG = 'rendybjunior' DAG_REPO_NAME = 'freddie-dags' MASTER_BRANCH_NAME = 'master' PROJECT = 'xxx' BUCKET = 'xxx' g = github3.login(token='xxx') DOLLAR_TO_IDR = 14000 BQ_DOLLAR_PER_TB = 5 datastore_client = datastore.Client() app = Flask(__name__) SECRET_KEY = os.urandom(32) app.config['SECRET_KEY'] = SECRET_KEY class DagForm(FlaskForm): dag_name = StringField('DAG Name', validators=[DataRequired()], render_kw={"placeholder": "lower_case_underscored"}) owner = StringField('Owner', validators=[DataRequired()], render_kw={"placeholder": "lower_case_underscored"}) start_date = DateField('Start Date', validators=[DataRequired()], format='%Y-%m-%d') email = StringField('Email', validators=[DataRequired(), Email()], render_kw={"placeholder": "separate@bycomma.com,separate@bycomma2.com,"}) retries = IntegerField('Num of Retries', validators=[DataRequired()], default=1) retry_delay_minutes = IntegerField('Retry Delay (in minutes)', validators=[DataRequired()], default=15) schedule_interval = StringField('Schedule (in cron) UTC', validators=[DataRequired()], render_kw={"placeholder": "0 17 * * *"}) tasks = StringField('Tasks', validators=[DataRequired()], render_kw={"placeholder": "separated_by_comma, lower_case_underscored"}) dependencies = StringField('Dependencies', validators=[DataRequired()], render_kw={"placeholder": "eg. prev_task_id1,task_id1;prev_task_id1,task_id2)"}) submit = SubmitField('Save') class TaskForm(FlaskForm): task_id = StringField('Task ID', validators=[DataRequired()], render_kw={"placeholder": "lower_case_underscored"}) destination_table = StringField('Destination table', validators=[DataRequired()], render_kw={"placeholder": "my-project.test.freddie_mercury"}) sql = TextAreaField('SQL', validators=[DataRequired()]) sql_params = StringField('SQL Param to test SQL. THIS VALUE FOR TESTING ONLY', render_kw={"placeholder": "example: ds=2019-01-01,dsnodash=20190101"}) save = SubmitField('Save') check_query = SubmitField('Check Query') run_query = SubmitField('Run Query') def store_task(task_id, destination_table, sql, sql_params, updated_by, type_): entity = datastore.Entity(key=datastore_client.key('Task', task_id), exclude_from_indexes=['sql_base64']) entity.update({ 'type': type_, 'destination_table': destination_table, 'sql_base64' : base64.b64encode(sql.encode()), 'sql_params' : sql_params, 'updated_at' : datetime.datetime.now(), 'updated_by' : updated_by }) datastore_client.put(entity) return True, "{} saved".format(task_id) # todo check put return value def fetch_task(task_id): key = datastore_client.key('Task', task_id) task = datastore_client.get(key=key) task_obj = { 'type': task.get('type'), 'task_id': task.key.name, 'sql': base64.b64decode(task.get('sql_base64')).decode(), 'sql_params': task.get('sql_params'), 'destination_table': task.get('destination_table') } return task_obj def fetch_tasks(limit=10): query = datastore_client.query(kind='Task') query.order = ['-updated_at'] tasks = query.fetch(limit=limit) tasks_obj = [] for task in tasks: tasks_obj.append({ 'type': task.get('type'), 'task_id': task.key.name, 'sql': base64.b64decode(task.get('sql_base64')).decode(), 'sql_params': task.get('sql_params'), 'destination_table': task.get('destination_table') }) return tasks_obj def store_dag(dag_name, owner, start_date, retries, retry_delay_minutes, email, schedule_interval, tasks, dependencies, updated_by): entity = datastore.Entity(key=datastore_client.key('Dag', dag_name)) entity.update({ 'dag_name': dag_name, 'owner': owner, 'start_date' : start_date, 'retries': retries, 'retry_delay_minutes': retry_delay_minutes, 'email': email, 'schedule_interval': schedule_interval, 'tasks': tasks, 'dependencies': dependencies, 'updated_at' : datetime.datetime.now(), 'updated_by' : updated_by }) datastore_client.put(entity) return True, "{} saved".format(dag_name) # todo check put return value def fetch_dags(limit=10): query = datastore_client.query(kind='Dag') query.order = ['-updated_at'] dags = query.fetch(limit=limit) dags_obj = [] for dag in dags: dags_obj.append({ 'dag_name': dag.key.name, 'owner': dag.get('owner'), 'start_date' : dag.get('start_date'), 'retries': dag.get('retries'), 'retry_delay_minutes': dag.get('retry_delay_minutes'), 'email': dag.get('email'), 'schedule_interval': dag.get('schedule_interval'), 'tasks': dag.get('tasks'), 'dependencies': dag.get('dependencies'), 'updated_by' : dag.get('updated_by') }) return dags_obj def fetch_dag(dag_name): key = datastore_client.key('Dag', dag_name) dag = datastore_client.get(key=key) dag_obj = { 'dag_name': dag.key.name, 'owner': dag.get('owner'), 'start_date' : dag.get('start_date'), 'retries': dag.get('retries'), 'retry_delay_minutes': dag.get('retry_delay_minutes'), 'email': dag.get('email'), 'schedule_interval': dag.get('schedule_interval'), 'tasks': dag.get('tasks'), 'dependencies': dag.get('dependencies'), 'updated_by' : dag.get('updated_by') } return dag_obj def upload_sql(task_id, sql): file_path = os.path.join(SQL_FOLDER, task_id + ".sql") client = storage.Client(project=PROJECT) bucket = client.get_bucket(BUCKET) blob = bucket.blob(file_path) blob.upload_from_string(sql) url = blob.public_url if isinstance(url, six.binary_type): url = url.decode('utf-8') print(url) # todo return meaningful status & message def upload_dag(dag_name, dag_text): file_path = os.path.join(DAG_FOLDER, dag_name + ".py") client = storage.Client(project=PROJECT) bucket = client.get_bucket(BUCKET) blob = bucket.blob(file_path) blob.upload_from_string(dag_text) url = blob.public_url if isinstance(url, six.binary_type): url = url.decode('utf-8') print(url) # todo return meaningful status & message def check_query(sql): job_config = bigquery.QueryJobConfig() job_config.dry_run = True job_config.use_query_cache = False job_config.use_legacy_sql = False client = bigquery.Client(project=PROJECT) try: query_job = client.query(sql, job_config) query_size_megabyte = query_job.total_bytes_processed / 1024 / 1024 query_size_terabyte = query_size_megabyte / 1024 / 1024 dollar_est = BQ_DOLLAR_PER_TB * query_size_terabyte rp_est = dollar_est * DOLLAR_TO_IDR message = "Total MB that will be processed: {0:.2f}".format(query_size_megabyte) message += ". Cost estimate: ${0:.2f}".format(dollar_est) message += " or Rp{0:.2f})".format(rp_est) return True, message except: return False, sys.exc_info()[1] def run_query(sql, limit=25): sql_with_limit = sql + "\n LIMIT {}".format(limit) job_config = bigquery.QueryJobConfig() job_config.flatten_results = True job_config.use_query_cache = False job_config.use_legacy_sql = False client = bigquery.Client(project=PROJECT) try: query_job = client.query(sql_with_limit, job_config=job_config) # API request rows = query_job.result() return rows, "OK" except: return [], sys.exc_info()[1] def create_branch(repository, dag_name): branch_name = '-'.join([dag_name, datetime.datetime.now().strftime('%Y%m%d%H%M%S')]) master_branch = repository.branch(MASTER_BRANCH_NAME) master_head_sha = master_branch.commit.sha repository.create_branch_ref(branch_name, master_head_sha) return branch_name def create_github_pr(dag_name, dag_file_content, sql_file_contents, committer_name, committer_email): repository = g.repository(DAG_REPO_ORG, DAG_REPO_NAME) branch_name = create_branch(repository, dag_name) dag_file_path = DAG_FOLDER + dag_name + '.py' content = None try: content = repository.file_contents(path=dag_file_path, ref=branch_name) except Exception: pass if content is None: repository.create_file(path=dag_file_path, message="Create DAG File {}".format(dag_name), content=dag_file_content, branch=branch_name, committer={ "name": committer_name, "email": committer_email }) else: content.update( message="Update DAG File {}".format(dag_name), content=dag_file_content, branch=branch_name, committer={ "name": committer_name, "email": committer_email }) for task_id, sql in sql_file_contents: sql_file_path = SQL_FOLDER + task_id + '.sql' content = None try: content = repository.file_contents(path=sql_file_path, ref=branch_name) except Exception: pass if content is None: repository.create_file(path=sql_file_path, message="Create SQL for task {}".format(task_id), content=sql, branch=branch_name, committer={ "name": committer_name, "email": committer_email }) else: content.update( message="Update SQL File for task {}".format(task_id), content=sql, branch=branch_name, committer={ "name": committer_name, "email": committer_email }) pull_body="*test* _123_" #TODO repository.create_pull(title=branch_name, base=MASTER_BRANCH_NAME, head=branch_name, body=pull_body) firebase_request_adapter = requests.Request() @app.route('/') def root(): # Verify Firebase auth. id_token = request.cookies.get("token") error_message = None claims = None dags = None tasks = None if id_token: try: # Verify the token against the Firebase Auth API. This example # verifies the token on each page load. For improved performance, # some applications may wish to cache results in an encrypted # session store (see for instance # http://flask.pocoo.org/docs/1.0/quickstart/#sessions). claims = google.oauth2.id_token.verify_firebase_token( id_token, firebase_request_adapter) tasks = fetch_tasks() dags = fetch_dags() except ValueError as exc: # This will be raised if the token is expired or any other # verification checks fail. error_message = str(exc) return render_template( 'index.html', user_data=claims, error_message=error_message, dags=dags, tasks=tasks) @app.route('/dag_form', methods=["GET", "POST"]) def dag_form(): # Verify Firebase auth. id_token = request.cookies.get("token") error_message = None claims = None if id_token: claims = google.oauth2.id_token.verify_firebase_token( id_token, firebase_request_adapter) form = DagForm() dag_text = "" if form.validate_on_submit(): root = os.path.dirname(os.path.abspath(__file__)) templates_dir = os.path.join(root, 'templates') env = Environment( loader = FileSystemLoader(templates_dir) ) template = env.get_template('dag_template.py') store_dag(dag_name=form.dag_name.data, owner=form.owner.data, start_date=form.start_date.data.strftime("%Y-%m-%d"), email=form.email.data, retries=form.retries.data, retry_delay_minutes=form.retry_delay_minutes.data, schedule_interval=form.schedule_interval.data, tasks=form.tasks.data, dependencies=form.dependencies.data, updated_by=claims['email']) tasks = [] sql_file_contents = [] for task_id in form.tasks.data.replace(' ','').split(','): task = fetch_task(task_id) if task != "": # upload_sql(task_id, task.get('sql')) sql_file_contents.append((task_id, task.get('sql').encode())) task_for_dag = copy.deepcopy(task) task_for_dag['sql'] = 'sql/' + task_id + ".sql" tasks.append(task_for_dag) dependencies = [] for dependency in form.dependencies.data.replace(' ','').split(';'): temp = dependency.split(',') dependencies.append({ 'preceding_task_id': temp[0], 'task_id': temp[1] }) dag_text = template.render( dag_name=form.dag_name.data, owner=form.owner.data, start_date=form.start_date.data.strftime('%Y-%m-%d'), email=form.email.data, retries=form.retries.data, retry_delay_minutes=form.retry_delay_minutes.data, schedule_interval=form.schedule_interval.data, tasks=tasks, dependencies=dependencies, ) # upload_dag(dag_name=form.dag_name.data, dag_text=dag_text) create_github_pr(dag_name=form.dag_name.data, dag_file_content=dag_text.encode(), sql_file_contents=sql_file_contents, committer_name=claims['name'], committer_email=claims['email']) else: if request.args.get('dag_name') is not None: dag = fetch_dag(dag_name=request.args.get('dag_name')) if dag is not None: form.dag_name.data = dag.get('dag_name') form.owner.data = dag.get('owner') form.start_date.data = datetime.datetime.strptime(dag.get('start_date'),"%Y-%m-%d") form.retries.data = dag.get('retries') form.retry_delay_minutes.data = dag.get('retry_delay_minutes') form.email.data = dag.get('email') form.schedule_interval.data = dag.get('schedule_interval') form.tasks.data = dag.get('tasks') form.dependencies.data = dag.get('dependencies') return render_template('dag_form.html', user_data=claims, title='DAG Form', form=form, dag_text=dag_text) @app.route('/task_form', methods=["GET", "POST"]) def task_form(): # Verify Firebase auth. id_token = request.cookies.get("token") error_message = None claims = None times = None if id_token: claims = google.oauth2.id_token.verify_firebase_token( id_token, firebase_request_adapter) form = TaskForm() is_save_ok, save_msg = None, None is_query_ok, check_query_result = None, None run_query_result, run_query_result_headers, run_query_result_msg = [], [], None if form.validate_on_submit(): sql = form.sql.data if form.sql_params.data: params = form.sql_params.data.replace(' ','').split(',') param_dict = {} for param in params: param_dict[param.split('=')[0]] = param.split('=')[1] sql = Template(sql).render(param_dict) is_query_ok, check_query_result = check_query(sql) if form.save.data: if is_query_ok: is_save_ok, save_msg = store_task(task_id=form.task_id.data, destination_table=form.destination_table.data, sql=sql, sql_params=form.sql_params.data, type_='BQ_ETL', updated_by=claims['email']) else: save_msg = "Can not save, something happened, see error msg" # elif form.check_query.data: # do nothing elif form.run_query.data: if is_query_ok: run_query_result, run_query_result_msg = run_query(sql) run_query_result_headers = [field.name for field in run_query_result.schema] else: run_query_result_msg = "Can not run, something happened, see error msg" else: if request.args.get('task_id') is not None: task = fetch_task(task_id=request.args.get('task_id')) if task is not None: form.task_id.data = task.get('task_id') form.destination_table.data = task.get('destination_table') form.sql.data = task.get('sql') form.sql_params.data = task.get('sql_params') return render_template('task_form.html', user_data=claims, title='Task Form', form=form, is_save_ok=is_save_ok, save_msg=save_msg, is_query_ok=is_query_ok, check_query_result=check_query_result, run_query_result_headers=run_query_result_headers, run_query_result=run_query_result, run_query_result_msg=run_query_result_msg) if __name__ == '__main__': # This is used when running locally only. When deploying to Google App # Engine, a webserver process such as Gunicorn will serve the app. This # can be configured by adding an `entrypoint` to app.yaml. # Flask's development server will automatically serve static files in # the "static" directory. See: # http://flask.pocoo.org/docs/1.0/quickstart/#static-files. Once deployed, # App Engine itself will serve those files as configured in app.yaml. app.run(host='127.0.0.1', port=8080, debug=True)
40.945652
156
0.623042
2,287
18,835
4.880192
0.155225
0.020697
0.021324
0.018816
0.473255
0.379984
0.362423
0.328644
0.292089
0.276857
0
0.008483
0.261481
18,835
460
157
40.945652
0.793889
0.060473
0
0.379487
0
0
0.12396
0.013528
0
0
0
0.002174
0
1
0.038462
false
0.005128
0.035897
0
0.158974
0.005128
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebbaa88673070e1877aabf5581d29c7b6749d413
982
py
Python
ej2.py
NiopTres/Ejercicio-Herramientas-Computacionales
af97b810e1ade4fb2cdfa433e1e09ddc301b7dd3
[ "Unlicense" ]
null
null
null
ej2.py
NiopTres/Ejercicio-Herramientas-Computacionales
af97b810e1ade4fb2cdfa433e1e09ddc301b7dd3
[ "Unlicense" ]
null
null
null
ej2.py
NiopTres/Ejercicio-Herramientas-Computacionales
af97b810e1ade4fb2cdfa433e1e09ddc301b7dd3
[ "Unlicense" ]
null
null
null
NotaParcial1 = int(input("Nota primer Parcial: ")) NotaParcial2 = int(input("Nota segundo Parcial: ")) NotaTaller = int(input("Nota del Taller: ")) NotaProyecto = int(input("Nota del Proyecto: ")) Parcial1 = NotaParcial1*(25/100) Parcial2 = NotaParcial2*(25/100) Taller = NotaTaller*(20/100) Proyecto = NotaProyecto*(30/100) nota_final = Parcial1 + Parcial2 + Taller + Proyecto print (nota_final) """ Entrada Ingresar los valores de las notas: Nota Primer Parcial Nota Segundo Parcial Nota Taller Nota Proyecto Proceso Calcular el valor del porcentaje de cada nota: Porcentaje Parcial 1=Nota Pirmer Parcial * 25% Porcentaje Parcial 2=Nota Segundo Parcial * 25% Porcentaje Taller=Nota Taller * 20% Porcentaje Proyecto=Nota Proyecto * 30% Calcular la nota final sumando la suma de los porcentajes: Nota Final = Porcentaje Parcial 1 + Porcentaje Parcial 2 + Porcentaje Taller + Porcentaje Proyecto Salida Devolver la Nota Final """
24.55
99
0.729124
126
982
5.666667
0.34127
0.063025
0.067227
0.042017
0
0
0
0
0
0
0
0.050063
0.186354
982
39
100
25.179487
0.843554
0
0
0
0
0
0.195062
0
0
0
0
0
0
1
0
false
0
0
0
0
0.1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebbeecd7804589e9d66d63e0bc0c0723583222d7
1,395
py
Python
build/package.py
weijiekoh/malvarma
cc8b699b697a0735dd53af27ae4a23955b581f93
[ "MIT" ]
15
2018-01-15T14:22:46.000Z
2022-03-20T19:05:27.000Z
build/package.py
stephensong/malvarma
cc8b699b697a0735dd53af27ae4a23955b581f93
[ "MIT" ]
1
2018-01-21T09:56:04.000Z
2018-06-21T06:20:23.000Z
build/package.py
stephensong/malvarma
cc8b699b697a0735dd53af27ae4a23955b581f93
[ "MIT" ]
6
2018-01-21T10:00:48.000Z
2021-07-26T00:03:45.000Z
#!/usr/bin/env python3 """ This script checksums, signs, and compresses malvarma-<version>.img, and creates malvarma-<version>.tar.bz2. The author's GPG signature is hardcoded below. """ import os import shutil import sys import subprocess if __name__ == "__main__": if len(sys.argv) == 1: print("Usage: python3 package.py malvarma-<version>.img") sys.exit(1) imgfile = sys.argv[1] folder_name = imgfile.split(".img")[0] if not os.path.exists(imgfile): print("Error: {imgfile} does not exist.".format(imgfile=imgfile)) sys.exit(1) print("Checksumming...") subprocess.check_call("sha256sum {imgfile} > {imgfile}.sha256".format(imgfile=imgfile), shell=True, stderr=subprocess.STDOUT) print("Signing...") subprocess.check_call("gpg --detach-sign --default-key 0x90DB43617CCC1632 --sign {imgfile}".format(imgfile=imgfile), shell=True, stderr=subprocess.STDOUT) print("Compressing") shutil.rmtree(folder_name, ignore_errors=True) os.makedirs(folder_name) shutil.move(imgfile, folder_name) shutil.move(imgfile + ".sig", folder_name) shutil.move(imgfile + ".sha256", folder_name) subprocess.check_call("tar -cvjSf {folder_name}.tar.bz2 {folder_name}".format(folder_name=folder_name), shell=True, stderr=subprocess.STDOUT)
31.704545
120
0.665233
171
1,395
5.298246
0.432749
0.110375
0.066225
0.082781
0.247241
0.12362
0.12362
0.12362
0.12362
0
0
0.026738
0.195699
1,395
43
121
32.44186
0.780749
0.127599
0
0.185185
0
0
0.239868
0.035567
0
0
0.014888
0
0
1
0
false
0
0.148148
0
0.148148
0.185185
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebc2424c2e78916d3caf093c64b2223284f39d93
1,955
py
Python
examples/recordRawFrames.py
OnionIoT/tau-lidar-camera
a70b24e18be8e4c5abfe525c6768fbc10a492fd8
[ "MIT" ]
31
2020-12-18T16:35:15.000Z
2022-03-25T18:41:19.000Z
examples/recordRawFrames.py
OnionIoT/tau-lidar-camera
a70b24e18be8e4c5abfe525c6768fbc10a492fd8
[ "MIT" ]
17
2020-11-18T16:10:36.000Z
2022-02-01T22:19:11.000Z
examples/recordRawFrames.py
OnionIoT/tau-lidar-camera
a70b24e18be8e4c5abfe525c6768fbc10a492fd8
[ "MIT" ]
4
2021-01-18T17:25:02.000Z
2021-11-01T13:25:45.000Z
import os import time from signal import signal, SIGINT from TauLidarCommon.frame import FrameType from TauLidarCamera.camera import Camera outputDir = './samples' runLoop = True def setup(): camera = None ports = Camera.scan() ## Scan for available Tau Camera devices if len(ports) > 0: camera = Camera.open(ports[0]) ## Open the first available Tau Camera camera.setModulationChannel(0) ## autoChannelEnabled: 0, channel: 0 camera.setIntegrationTime3d(0, 1000) ## set integration time 0: 1000 camera.setMinimalAmplitude(0, 10) ## set minimal amplitude 0: 80 cameraInfo = camera.info() print("\nToF camera opened successfully:") print(" model: %s" % cameraInfo.model) print(" firmware: %s" % cameraInfo.firmware) print(" uid: %s" % cameraInfo.uid) print(" resolution: %s" % cameraInfo.resolution) print(" port: %s" % cameraInfo.port) print("\nPress Ctrl-c in terminal to shutdown ...") return camera def run(camera): global runLoop count = 0 if not os.path.exists(outputDir): os.makedirs(outputDir) print('Recording...') while runLoop: frame = camera.readFrameRawData(FrameType.DISTANCE_AMPLITUDE) if frame: fName = '%s/%s.frame'%(outputDir, time.time()) with open(fName, "wb") as binary_file: binary_file.write(frame) print('\rFrame: %d'%count, end='') count += 1 def cleanup(camera): print('\nShutting down ...') camera.close() def handler(signal_received, frame): global runLoop runLoop = False if __name__ == "__main__": camera = setup() signal(SIGINT, handler) if camera: try: run(camera) except Exception as e: print(e) cleanup(camera)
26.066667
89
0.586189
210
1,955
5.4
0.461905
0.048501
0.031746
0
0
0
0
0
0
0
0
0.017647
0.304348
1,955
74
90
26.418919
0.816176
0.083887
0
0.037736
0
0
0.133146
0
0
0
0
0
0
1
0.075472
false
0
0.09434
0
0.188679
0.207547
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebc29b44ef030ad4cf5e8ff010606f3919b7f18d
1,038
py
Python
HyeonJinGithub/2020-10-13/2560 회장뽑기.py
Team-Morgorithm/Morgorithm
133f19e1e15e423589bd7b94b698d2afc76c3ef6
[ "MIT" ]
1
2021-07-29T01:33:44.000Z
2021-07-29T01:33:44.000Z
HyeonJinGithub/2020-10-13/2560 회장뽑기.py
Team-NTO/NTO
133f19e1e15e423589bd7b94b698d2afc76c3ef6
[ "MIT" ]
150
2020-09-28T13:11:29.000Z
2021-08-05T23:28:36.000Z
HyeonJinGithub/2020-10-13/2560 회장뽑기.py
Team-Morgorithm/morgorithm
133f19e1e15e423589bd7b94b698d2afc76c3ef6
[ "MIT" ]
3
2020-09-30T14:05:56.000Z
2021-07-29T01:33:53.000Z
import sys from collections import deque def bfs(x): q = deque([x]) dist = [0] * (N + 1) check = [False] * (N + 1) cnt = -1 check[x] = True while q: size = len(q) cnt += 1 for _ in range(size): x = q.popleft() for y in a[x]: if dist[y] == 0 and not check[y]: dist[y] = dist[x] + 1 q.append(y) check[y] = True return cnt if __name__ == '__main__': N = int(input()) a = [[] for _ in range(N + 1)] result = 1000000 res = [] while True: u, v = map(int, sys.stdin.readline().split()) if u == -1 and v == -1: break a[u].append(v) a[v].append(u) for i in range(1, N + 1): score = bfs(i) if score < result: res = [] result = score res.append(i) elif score == result: res.append(i) print(result, len(res)) for s in res: print(s, end=' ')
24.714286
54
0.421002
141
1,038
3.028369
0.361702
0.018735
0.046838
0
0
0
0
0
0
0
0
0.032423
0.435453
1,038
42
55
24.714286
0.696246
0
0
0.097561
0
0
0.008662
0
0
0
0
0
0
1
0.02439
false
0
0.04878
0
0.097561
0.04878
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebc743c4294c3b10ce8684625c881a47ded3ea8a
5,235
py
Python
tests/test_bokeh_wamp.py
ricorx7/rti-python
1316323b782ddb8df357e55404f507a9573e172c
[ "BSD-3-Clause" ]
1
2017-06-10T13:27:44.000Z
2017-06-10T13:27:44.000Z
tests/test_bokeh_wamp.py
ricorx7/rti-python
1316323b782ddb8df357e55404f507a9573e172c
[ "BSD-3-Clause" ]
10
2019-12-28T18:06:18.000Z
2022-03-25T18:48:20.000Z
tests/test_bokeh_wamp.py
ricorx7/rti_python
1316323b782ddb8df357e55404f507a9573e172c
[ "BSD-3-Clause" ]
null
null
null
import json from twisted.logger import Logger from twisted.internet.defer import inlineCallbacks from autobahn.twisted.wamp import ApplicationSession from autobahn.twisted.wamp import ApplicationRunner from bokeh.client import push_session from bokeh.plotting import figure, curdoc from bokeh.models.widgets import Panel, Tabs from bokeh.models import Range1d import numpy as np class test_bokeh_wamp(ApplicationSession): def __init__(self, config=None): ApplicationSession.__init__(self, config) @inlineCallbacks def onJoin(self, details): """ Initialize the WAMP settings. This is called before everything is setup to ensure the WAMP settings are initialized. :return: """ self.log.info("WAMP connected") yield self.subscribe(self.on_ens_json_data, u"com.rti.data.ens") self.log.info("test Bokehs WAMP init") def on_ens_json_data(self, data): """ Called when JSON Ensemble data is received from WAMP. :param data: JSON object containing serial data. :return: """ json_data = json.loads(data) # convert to JSON bins = [] ampB0 = [] ampB1 = [] ampB2 = [] ampB3 = [] corrB0 = [] corrB1 = [] corrB2 = [] corrB3 = [] for bin in range(json_data['EnsembleData']["NumBins"]): bins.append(bin) ampB0.append(json_data['Amplitude']["Amplitude"][bin][0]) ampB1.append(json_data['Amplitude']["Amplitude"][bin][1]) ampB2.append(json_data['Amplitude']["Amplitude"][bin][2]) ampB3.append(json_data['Amplitude']["Amplitude"][bin][3]) corrB0.append(json_data['Correlation']["Correlation"][bin][0]) corrB1.append(json_data['Correlation']["Correlation"][bin][1]) corrB2.append(json_data['Correlation']["Correlation"][bin][2]) corrB3.append(json_data['Correlation']["Correlation"][bin][3]) self.config.extra['ampB0'].data_source.data["y"] = bins self.config.extra['ampB0'].data_source.data["x"] = ampB0 self.config.extra['ampB1'].data_source.data["y"] = bins self.config.extra['ampB1'].data_source.data["x"] = ampB1 self.config.extra['ampB2'].data_source.data["y"] = bins self.config.extra['ampB2'].data_source.data["x"] = ampB2 self.config.extra['ampB3'].data_source.data["y"] = bins self.config.extra['ampB3'].data_source.data["x"] = ampB3 self.config.extra['corrB0'].data_source.data["y"] = bins self.config.extra['corrB0'].data_source.data["x"] = corrB0 self.config.extra['corrB1'].data_source.data["y"] = bins self.config.extra['corrB1'].data_source.data["x"] = corrB1 self.config.extra['corrB2'].data_source.data["y"] = bins self.config.extra['corrB2'].data_source.data["x"] = corrB2 self.config.extra['corrB3'].data_source.data["y"] = bins self.config.extra['corrB3'].data_source.data["x"] = corrB3 if __name__ == '__main__': x = np.array([1]) y = np.array([1]) TOOLS = 'pan,box_zoom,wheel_zoom,box_select,crosshair,resize,reset,save,hover' ampPlot = figure(plot_width=600, plot_height=800, tools=TOOLS, x_range=Range1d(0, 140)) ampPlot.legend.location = "top_left" ampPlot.legend.click_policy = "hide" ampPlot.xaxis[0].axis_label="dB" ampPlot.yaxis[0].axis_label = "Bin" ampB0 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='red', legend="B0") ampB1 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='green', legend="B1") ampB2 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='blue', legend="B2") ampB3 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='orange', legend="B3") tabAmp = Panel(child=ampPlot, title="Amplitude") corrPlot = figure(plot_width=600, plot_height=800, tools=TOOLS, x_range=Range1d(0, 1)) corrPlot.legend.location = "top_left" corrPlot.legend.click_policy = "hide" corrPlot.xaxis[0].axis_label = "% (percent)" corrPlot.yaxis[0].axis_label = "Bin" corrB0 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='red', legend="B0") corrB1 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='green', legend="B1") corrB2 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='blue', legend="B2") corrB3 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='orange', legend="B3") tabCorr = Panel(child=corrPlot, title="Correlation") tabs = Tabs(tabs=[tabAmp, tabCorr]) # open a session to keep our local document in sync with server session = push_session(curdoc()) session.show(tabs) # open the document in a browser # Start the WAMP connection # Connect the main window to the WAMP connection runner = ApplicationRunner(url=u"ws://localhost:55058/ws", realm=u"realm1", extra={'ampB0': ampB0, 'ampB1': ampB1, 'ampB2': ampB2, 'ampB3': ampB3, 'corrB0': corrB0, 'corrB1': corrB1, 'corrB2': corrB2, 'corrB3': corrB3}) runner.run(test_bokeh_wamp) session.loop_until_closed() # run forever
41.547619
110
0.637631
695
5,235
4.686331
0.253237
0.055266
0.073687
0.036844
0.45809
0.429229
0.338348
0.240098
0.156586
0.156586
0
0.033028
0.207641
5,235
126
111
41.547619
0.75217
0.082713
0
0
0
0
0.126722
0.019284
0
0
0
0
0
1
0.034483
false
0
0.114943
0
0.16092
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebc77323d31536c2b311a55342b7c92b4fa87b56
5,875
py
Python
tests/integration/test_nornsible_integration.py
PhillSimonds/nornsible
5be98f5bfa66410bc269fa239a53016b4a4ac65a
[ "MIT" ]
null
null
null
tests/integration/test_nornsible_integration.py
PhillSimonds/nornsible
5be98f5bfa66410bc269fa239a53016b4a4ac65a
[ "MIT" ]
null
null
null
tests/integration/test_nornsible_integration.py
PhillSimonds/nornsible
5be98f5bfa66410bc269fa239a53016b4a4ac65a
[ "MIT" ]
null
null
null
from pathlib import Path import sys from unittest.mock import patch from nornir import InitNornir import nornsible from nornsible import InitNornsible, nornsible_delegate, nornsible_task NORNSIBLE_DIR = nornsible.__file__ TEST_DIR = f"{Path(NORNSIBLE_DIR).parents[1]}/tests/" @nornsible_task def custom_task_example(task): return "Hello, world!" @nornsible_task def custom_task_example_2(task): return "Hello, world!" @nornsible_delegate def custom_task_example_3(task): return "Hello, world!" def test_nornsible_task_skip_task(): testargs = ["somescript", "-l", "localhost", "-s", "custom_task_example"] with patch.object(sys, "argv", testargs): nr = InitNornir( inventory={ "plugin": "nornir.plugins.inventory.simple.SimpleInventory", "options": { "host_file": f"{TEST_DIR}_test_nornir_inventory/hosts.yaml", "group_file": f"{TEST_DIR}_test_nornir_inventory/groups.yaml", }, }, logging={"enabled": False}, ) nr = InitNornsible(nr) task_result = nr.run(task=custom_task_example) assert set(task_result.keys()) == {"delegate", "localhost"} assert task_result["localhost"].result == "Task skipped!" assert task_result["delegate"].result == "Task skipped, delegate host!" def test_nornsible_task_skip_task_disable_delegate(): testargs = ["somescript", "-l", "localhost", "-s", "custom_task_example", "-d"] with patch.object(sys, "argv", testargs): nr = InitNornir( inventory={ "plugin": "nornir.plugins.inventory.simple.SimpleInventory", "options": { "host_file": f"{TEST_DIR}_test_nornir_inventory/hosts.yaml", "group_file": f"{TEST_DIR}_test_nornir_inventory/groups.yaml", }, }, logging={"enabled": False}, ) nr = InitNornsible(nr) task_result = nr.run(task=custom_task_example) assert set(task_result.keys()) == {"localhost"} assert task_result["localhost"].result == "Task skipped!" def test_nornsible_task_explicit_task(): testargs = ["somescript", "-l", "localhost", "-t", "custom_task_example_2"] with patch.object(sys, "argv", testargs): nr = InitNornir( inventory={ "plugin": "nornir.plugins.inventory.simple.SimpleInventory", "options": { "host_file": f"{TEST_DIR}_test_nornir_inventory/hosts.yaml", "group_file": f"{TEST_DIR}_test_nornir_inventory/groups.yaml", }, }, logging={"enabled": False}, ) nr = InitNornsible(nr) print(nr.inventory.hosts) tasks = [custom_task_example, custom_task_example_2] task_results = [] for task in tasks: task_results.append(nr.run(task=task)) assert task_results[0]["localhost"].result == "Task skipped!" assert task_results[1]["localhost"].result == "Hello, world!" assert task_results[0]["delegate"].result == "Task skipped, delegate host!" assert task_results[1]["delegate"].result == "Task skipped, delegate host!" def test_nornsible_task_no_tags(): testargs = ["somescript", "-l", "localhost"] with patch.object(sys, "argv", testargs): nr = InitNornir( inventory={ "plugin": "nornir.plugins.inventory.simple.SimpleInventory", "options": { "host_file": f"{TEST_DIR}_test_nornir_inventory/hosts.yaml", "group_file": f"{TEST_DIR}_test_nornir_inventory/groups.yaml", }, }, logging={"enabled": False}, ) nr = InitNornsible(nr) print(nr.inventory.hosts) tasks = [custom_task_example, custom_task_example_2] task_results = [] for task in tasks: task_results.append(nr.run(task=task)) assert task_results[0]["localhost"].result == "Hello, world!" assert task_results[1]["localhost"].result == "Hello, world!" def test_nornsible_delegate(): testargs = ["somescript", "-l", "localhost"] with patch.object(sys, "argv", testargs): nr = InitNornir( inventory={ "plugin": "nornir.plugins.inventory.simple.SimpleInventory", "options": { "host_file": f"{TEST_DIR}_test_nornir_inventory/hosts.yaml", "group_file": f"{TEST_DIR}_test_nornir_inventory/groups.yaml", }, }, logging={"enabled": False}, ) nr = InitNornsible(nr) print(nr.inventory.hosts) tasks = [custom_task_example_3] task_results = [] for task in tasks: task_results.append(nr.run(task=task)) assert task_results[0]["localhost"].result == "Task skipped, non-delegate host!" def test_nornsible_delegate_disable_delegate(): testargs = ["somescript", "-l", "localhost", "-d"] with patch.object(sys, "argv", testargs): nr = InitNornir( inventory={ "plugin": "nornir.plugins.inventory.simple.SimpleInventory", "options": { "host_file": f"{TEST_DIR}_test_nornir_inventory/hosts.yaml", "group_file": f"{TEST_DIR}_test_nornir_inventory/groups.yaml", }, }, logging={"enabled": False}, ) nr = InitNornsible(nr) print(nr.inventory.hosts) tasks = [custom_task_example_3] task_results = [] for task in tasks: task_results.append(nr.run(task=task)) assert task_results[0]["localhost"].result == "Task skipped, delegate host!"
36.042945
88
0.589787
616
5,875
5.375
0.118506
0.053156
0.071882
0.043491
0.89852
0.84476
0.782241
0.774086
0.68952
0.68952
0
0.003778
0.279149
5,875
162
89
36.265432
0.77804
0
0
0.639706
0
0
0.285787
0.147064
0
0
0
0
0.095588
1
0.066176
false
0
0.044118
0.022059
0.132353
0.029412
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
ebc7a37f046171aa884cf21a18cce4f0bbd74515
8,338
py
Python
scripts/betterX_labs_attributes.py
eliasall/BetterX-Cloud
c6796f1207ced4ad3c63fd56df08ecf5ece613e1
[ "Apache-2.0" ]
null
null
null
scripts/betterX_labs_attributes.py
eliasall/BetterX-Cloud
c6796f1207ced4ad3c63fd56df08ecf5ece613e1
[ "Apache-2.0" ]
null
null
null
scripts/betterX_labs_attributes.py
eliasall/BetterX-Cloud
c6796f1207ced4ad3c63fd56df08ecf5ece613e1
[ "Apache-2.0" ]
null
null
null
## Web File def insertWeb(filetype, json, cursor, conn, uid): if (filetype == 'web'): web_page_node(json,uid,cursor,conn) # [pages] / [pageNode] web_entry_node(json, uid, cursor, conn) # [pages] / [entriesNode] def web_entry_response(json_entries_node, uid, cursor, conn, parentid): tblName = 'lab_web_entries_response' featureAttrs = {'status', 'statusText', 'httpVersion', 'cookieNumber', 'redirectURL', 'headersSize', 'bodySize'} featureAttrs2 = {'Date', 'Server', 'X-Powered-By', 'Content-Encoding', 'Content-Length', 'Keep-Alive', 'Connection', 'Content-Type'} featureAttrs3 = {'size', 'compression', 'mimeType', 'encoding'} vals = {} values = [] cntattr = 0 for tis in featureAttrs: vals[cntattr] = tis values.append(json_entries_node['response'][tis]) cntattr = cntattr + 1 vals[cntattr] = 'web_entries_id' values.append(parentid) cntattr = cntattr + 1 attrsInJson,typesInJson = toCommaStringDict(vals) #print type(attrsInJson) #print attrsInJson vals2 = {} values2 = [] cntattr2 = 0 for tis2 in featureAttrs2: vals2,values2 = appendJsonKey(json_entries_node['response']['headers'], tis2, vals2, values2, cntattr2) cntattr2 = cntattr2 + 1 renameArrayItem(vals2, 'Date', 'header_Date') renameArrayItem(vals2, 'Server', 'header_Server') renameArrayItem(vals2, 'X-Powered-By', 'header_XPoweredBy') renameArrayItem(vals2, 'Content-Encoding', 'header_ContentEncoding') renameArrayItem(vals2, 'Content-Length', 'header_ContentLength') renameArrayItem(vals2, 'Keep-Alive', 'header_KeepAlive') renameArrayItem(vals2, 'Connection', 'header_Connection') renameArrayItem(vals2, 'Content-Type', 'header_ContentType') attrsInJson2,typesInJson2 = toCommaStringDict(vals2) #print type(attrsInJson2) #print attrsInJson2 vals3 = {} values3 = [] cntattr3 = 0 for tis3 in featureAttrs3: vals3,values3 = appendJsonKey(json_entries_node['response']['content'], tis3, vals3, values3, cntattr3) cntattr3 = cntattr3 + 1 renameArrayItem(vals3, 'size', 'content_size') renameArrayItem(vals3, 'compression', 'content_compression') renameArrayItem(vals3, 'mimeType', 'content_mimeType') renameArrayItem(vals3, 'encoding', 'content_encoding') attrsInJson3,typesInJson3 = toCommaStringDict(vals3) #print type(attrsInJson3) #print attrsInJson3 attrsInJsonCombined = attrsInJson typesInJsonCombined = typesInJson if ( attrsInJson2 != ''): attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJson2 typesInJsonCombined = typesInJsonCombined + ',' + typesInJson2 values.extend(values2) if ( attrsInJson3 != ''): attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJson3 typesInJsonCombined = typesInJsonCombined + ',' + typesInJson3 values.extend(values3) dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn) def web_entry_request(json_entries_node, uid, cursor, conn, parentid): tblName = 'lab_web_entries_request' featureAttrs = {'method', 'url', 'httpVersion', 'cookieNumber', 'headerSize', 'bodySize'} featureAttrs2 = {'Host', 'User-Agent', 'Accept', 'Accept-Encoding', 'Connection', 'Content-Length', 'Keep-Alive'} vals = {} values = [] cntattr = 0 for tis in featureAttrs: vals[cntattr] = tis values.append(json_entries_node['request'][tis]) cntattr = cntattr + 1 vals[cntattr] = 'web_entries_id' values.append(parentid) cntattr = cntattr + 1 attrsInJson,typesInJson = toCommaStringDict(vals) #print type(attrsInJson) #print attrsInJson vals2 = {} values2 = [] cntattr2 = 0 for tis2 in featureAttrs2: vals2,values2 = appendJsonKey(json_entries_node['request']['headers'], tis2, vals2, values2, cntattr2) cntattr2 = cntattr2 + 1 renameArrayItem(vals2, 'Host', 'header_Host') renameArrayItem(vals2, 'User-Agent', 'header_UserAgent') renameArrayItem(vals2, 'Accept', 'header_Accept') renameArrayItem(vals2, 'Accept-Encoding', 'header_AcceptEncoding') renameArrayItem(vals2, 'Connection', 'header_Connection') renameArrayItem(vals2, 'Content-Length', 'header_ContentLength') renameArrayItem(vals2, 'Keep-Alive', 'header_KeepAlive') attrsInJson2,typesInJson2 = toCommaStringDict(vals2) #print type(attrsInJson2) #print attrsInJson2 attrsInJsonCombined = attrsInJson typesInJsonCombined = typesInJson if ( attrsInJson2 != ''): attrsInJsonCombined = attrsInJson + ',' + attrsInJson2 typesInJsonCombined = typesInJson + ',' + typesInJson2 values.extend(values2) dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn) def web_entry_node(json, uid, cursor, conn): tblName = 'lab_web_entries' featureAttrs = {'pageid', 'entryStartTime', 'time', 'serverIPAddress', 'connection'} featureAttrs2 = {'blocked', 'dns', 'connect', 'send', 'wait', 'receive', 'ssl'} featureAttrs3 = {'beforeRequestCacheEntries', 'afterRequestCacheEntries', 'hitCount'} for jiv in json['pages']: for innerjiv in jiv['entriesNode']: cntattr = 0 attrsInJson = '' typesInJson = '' keytypevals = {} values = [] for tis in featureAttrs: keytypevals,values = appendJsonKey(innerjiv, tis, keytypevals, values, cntattr) cntattr = cntattr + 1 attrsInJson,typesInJson = toCommaStringDict(keytypevals) cntattr2 = 0 attrsInJson2 = '' typesInJson2 = '' keytypevals2 = {} values2 = [] for tis2 in featureAttrs2: keytypevals2,values2 = appendJsonKey(innerjiv['timings'], tis2, keytypevals2, values2, cntattr2) cntattr2 = cntattr2 + 1 attrsInJson2,typesInJson2 = toCommaStringDict(keytypevals2) cntattr3 = 0 attrsInJson3 = '' typesInJson3 = '' keytypevals3 = {} values3 = [] for tis3 in featureAttrs3: keytypevals3,values3 = appendJsonKey(innerjiv['cache'], tis3, keytypevals3, values3, cntattr3) cntattr3 = cntattr3 + 1 attrsInJson3,typesInJson3 = toCommaStringDict(keytypevals3) ##combine attrsInJsonCombined = attrsInJson + ',' + attrsInJson2 + ',' + attrsInJson3 typesInJsonCombined = typesInJson + ',' + typesInJson2 + ',' + typesInJson3 values.extend(values2) values.extend(values3) #insert dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn) ##entry request web_entry_id = getMaxId(tblName,cursor,conn) web_entry_request(innerjiv, uid, cursor, conn, web_entry_id) web_entry_response(innerjiv, uid, cursor, conn, web_entry_id) def web_page_node(json, uid, cursor, conn): tblName = 'lab_web_pages' featureAttrs = {'tabid', 'pageStartTime', 'pageid', 'pagetitle', 'pageOnContentLoad', 'pageOnLoad', 'origin'} cntattr = 0 for jiv in json['pages']: attrsInJson = '' typesInJson = '' keytypevals = {} values = [] for tis in featureAttrs: keytypevals,values = appendJsonKey(jiv['pageNode'], tis, keytypevals, values, cntattr) cntattr = cntattr + 1 keytypevals[cntattr] = 'uid' cntattr = cntattr + 1 values.append(uid) renameArrayItem(keytypevals, 'pageid', 'id') attrsInJson,typesInJson = toCommaStringDict(keytypevals) dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn) ## Helper Functions def dbinsert(tblName,fields,fieldTypes,cursor,values,conn): sql_command = "insert into " + tblName + " (" + fields + ") values (" + fieldTypes + ")" #print sql_command #print values cursor.execute(sql_command, values) conn.commit() def getMaxId(tblName,cursor, conn): sql = "select max(id) from " + tblName cursor.execute(sql) results = cursor.fetchall() return str(results[0][0]) def isJsonKey(json, tisKey): for key,val in json.items(): if (key == tisKey): return True break return False def appendJsonKey(json, key, vals, values, cntattr): if (isJsonKey(json,key)): vals[cntattr] = str(key) values.append(json[key]) return vals,values def toCommaStringDict(keytypevals): ret = '' ret2 = '' for key in keytypevals: ret = ret + '`' + keytypevals[key] + '`' + ',' ret2 = ret2 + '%s' + ',' if (len(ret) > 0): ret = ret[:-1] ret2 = ret2[:-1] return ret,ret2 def renameArrayItem(arr, frm, to): for key in arr: try: if( arr[key] == frm): arr[key] = to except: dummy = 0 return arr def appendJsonKeyConcat(json, key, vals, values, cntattr): ret = '' if (isJsonKey(json,key)): for i in json[key]: ret = (ret + ' ' + i).strip() vals[cntattr] = str(key) values.append(ret) return vals,values
32.570313
133
0.71444
861
8,338
6.835075
0.188153
0.050977
0.017672
0.011555
0.435174
0.397451
0.379269
0.300255
0.268479
0.239932
0
0.023302
0.150756
8,338
256
134
32.570313
0.807796
0.039938
0
0.457286
0
0
0.160882
0.017416
0
0
0
0
0
1
0.060302
false
0
0
0
0.095477
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebc8ad73bf53731e4e6de57101a7b654d219529d
21,541
py
Python
lingvo/core/beam_search_helper_test.py
allenwang28/lingvo
26d3d6672d3f46d8f281c2aa9f57166ef6296738
[ "Apache-2.0" ]
2,611
2018-10-16T20:14:10.000Z
2022-03-31T14:48:41.000Z
lingvo/core/beam_search_helper_test.py
allenwang28/lingvo
26d3d6672d3f46d8f281c2aa9f57166ef6296738
[ "Apache-2.0" ]
249
2018-10-27T06:02:29.000Z
2022-03-30T18:00:39.000Z
lingvo/core/beam_search_helper_test.py
allenwang28/lingvo
26d3d6672d3f46d8f281c2aa9f57166ef6296738
[ "Apache-2.0" ]
436
2018-10-25T05:31:45.000Z
2022-03-31T07:26:03.000Z
# Lint as: python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for beam_search_helper.""" from absl.testing import parameterized import lingvo.compat as tf from lingvo.core import beam_search_helper from lingvo.core import py_utils from lingvo.core import test_utils import numpy as np def GetBeamSearchHelperResults(sess, num_hyps_per_beam, pass_seq_lengths=False, force_eos_in_top_k=False): np.random.seed(9384758) tf.random.set_seed(8274758) vocab_size = 12 src_len = 5 tgt_len = 7 src_batch_size = 2 tgt_batch_size = src_batch_size * num_hyps_per_beam p = beam_search_helper.BeamSearchHelper.Params().Set( name='bsh', target_seq_len=tgt_len, force_eos_in_top_k=force_eos_in_top_k) bs_helper = p.Instantiate() def InitBeamSearchState(unused_theta, unused_encoder_outputs, unused_num_hyps_per_beam): atten_probs = tf.constant( np.random.normal(size=(tgt_batch_size, src_len)), dtype=tf.float32) return (py_utils.NestedMap({ 'log_probs': tf.zeros([tgt_batch_size, vocab_size]), 'atten_probs': atten_probs, }), py_utils.NestedMap({'atten_probs': atten_probs})) def PreBeamSearchStepCallback(unused_theta, unused_encoder_outputs, unused_step_ids, states, unused_num_hyps_per_beam): atten_probs = tf.identity(states.atten_probs) logits = tf.random.normal([tgt_batch_size, vocab_size], seed=8273747) return (py_utils.NestedMap({ 'atten_probs': atten_probs, 'log_probs': logits }), states) def PostBeamSearchStepCallback(unused_theta, unused_encoder_outputs, unused_new_step_ids, states): return states src_enc = tf.random.normal([src_len, src_batch_size, 8], seed=982774838) src_enc_padding = tf.constant( [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 1.0], [1.0, 1.0]], dtype=tf.float32) encoder_outputs = py_utils.NestedMap(encoded=src_enc, padding=src_enc_padding) if pass_seq_lengths: encoder_outputs['seq_lengths'] = tf.constant([4, 3], dtype=tf.int32) theta = py_utils.NestedMap() decoder_output = bs_helper.BeamSearchDecode(theta, encoder_outputs, num_hyps_per_beam, InitBeamSearchState, PreBeamSearchStepCallback, PostBeamSearchStepCallback) topk_ids, topk_lens, topk_scores = sess.run([ decoder_output.topk_ids, decoder_output.topk_lens, decoder_output.topk_scores ]) return topk_ids, topk_lens, topk_scores class BeamSearchHelperTest(test_utils.TestCase, parameterized.TestCase): # TODO(yonghui): Add more thorough tests. def testBeamSearchHelper(self): with self.session(use_gpu=False) as sess: topk_ids, topk_lens, topk_scores = GetBeamSearchHelperResults( sess, num_hyps_per_beam=3) print(np.array_repr(topk_ids)) print(np.array_repr(topk_lens)) print(np.array_repr(topk_scores)) expected_topk_ids = [[4, 3, 4, 3, 2, 0, 0], [4, 3, 11, 2, 0, 0, 0], [4, 3, 6, 2, 0, 0, 0], [6, 0, 4, 6, 6, 11, 2], [6, 0, 4, 6, 1, 2, 0], [6, 0, 4, 6, 6, 2, 0]] expected_topk_lens = [5, 4, 4, 7, 6, 6] expected_topk_scores = [[8.27340603, 6.26949024, 5.59490776], [9.74691486, 8.46679497, 7.14809656]] self.assertAllEqual(expected_topk_ids, topk_ids.tolist()) self.assertAllEqual(expected_topk_lens, topk_lens.tolist()) self.assertAllClose(expected_topk_scores, topk_scores) def testBeamSearchHelperHypsOne(self): with self.session(use_gpu=False) as sess: topk_ids, topk_lens, topk_scores = GetBeamSearchHelperResults( sess, num_hyps_per_beam=1) print(np.array_repr(topk_ids)) print(np.array_repr(topk_lens)) print(np.array_repr(topk_scores)) expected_topk_ids = [[9, 2, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]] expected_topk_lens = [2, 0] expected_topk_scores = [[3.778749], [0.0]] self.assertAllEqual(expected_topk_ids, topk_ids.tolist()) self.assertAllEqual(expected_topk_lens, topk_lens.tolist()) self.assertAllClose(expected_topk_scores, topk_scores) def testBeamSearchHelperWithSeqLengths(self): with self.session(use_gpu=False) as sess: topk_ids, topk_lens, topk_scores = GetBeamSearchHelperResults( sess, num_hyps_per_beam=3, pass_seq_lengths=True) print(np.array_repr(topk_ids)) print(np.array_repr(topk_lens)) print(np.array_repr(topk_scores)) expected_topk_ids = [[4, 3, 4, 3, 2, 0, 0], [4, 3, 11, 2, 0, 0, 0], [4, 3, 6, 2, 0, 0, 0], [6, 0, 4, 6, 6, 11, 2], [6, 0, 4, 6, 1, 2, 0], [6, 0, 4, 6, 6, 2, 0]] expected_topk_lens = [5, 4, 4, 7, 6, 6] expected_topk_scores = [[8.27340603, 6.26949024, 5.59490776], [9.74691486, 8.46679497, 7.14809656]] self.assertAllEqual(expected_topk_ids, topk_ids.tolist()) self.assertAllEqual(expected_topk_lens, topk_lens.tolist()) self.assertAllClose(expected_topk_scores, topk_scores) def testBeamSearchHelperForceEos(self): with self.session(use_gpu=False) as sess: topk_ids, topk_lens, topk_scores = GetBeamSearchHelperResults( sess, num_hyps_per_beam=3, force_eos_in_top_k=True) print(np.array_repr(topk_ids)) print(np.array_repr(topk_lens)) print(np.array_repr(topk_scores)) expected_topk_ids = [ [4, 3, 11, 6, 9, 3, 2], [4, 3, 11, 6, 9, 7, 2], [4, 3, 4, 1, 4, 1, 2], [6, 0, 4, 6, 6, 11, 2], [6, 0, 4, 6, 3, 3, 2], [6, 0, 4, 6, 1, 2, 0], ] expected_topk_lens = [7, 7, 7, 7, 7, 6] expected_topk_scores = [[10.576365, 9.345996, 9.125197], [9.746915, 8.905771, 8.466795]] self.assertAllEqual(expected_topk_ids, topk_ids.tolist()) self.assertAllEqual(expected_topk_lens, topk_lens.tolist()) self.assertAllClose(expected_topk_scores, topk_scores) @parameterized.named_parameters( ('eos_valid_in_topk', 100.0, True), ('eos_valid_not_in_topk', 100.0, False), ('eos_not_valid_in_topk', 0.5, True), ('eos_not_valid_not_in_topk', 0.5, False), ) def testBeamSearchForceEosInTopK(self, valid_eos_max_logit_delta, force_eos_in_top_k): with self.session() as sess: vocab_size = 300 tgt_len = 100 num_hyps_per_beam = 3 src_batch_size = 2 tgt_batch_size = src_batch_size * num_hyps_per_beam p = beam_search_helper.BeamSearchHelper.Params().Set( name='bsh', target_seq_len=tgt_len, num_hyps_per_beam=num_hyps_per_beam, beam_size=100000.0, # Beam search until the end. valid_eos_max_logit_delta=valid_eos_max_logit_delta, force_eos_in_top_k=force_eos_in_top_k, ) bs_helper = p.Instantiate() def InitBeamSearchCallBack(unused_theta, unused_encoder_outputs, unused_num_hyps_per_beam): return py_utils.NestedMap( log_probs=tf.zeros([tgt_batch_size, vocab_size]), atten_probs=tf.zeros([tgt_batch_size, 0])), py_utils.NestedMap() def PreBeamSearchStepCallback(unused_theta, unused_encoder_outputs, unused_step_ids, states, unused_num_hyps_per_beam): # Same probs for each id. logits = tf.zeros([tgt_batch_size, vocab_size]) # Except eos is slightly lower prob. logits = logits - 1.0 * tf.expand_dims( tf.one_hot(p.target_eos_id, vocab_size), 0) return py_utils.NestedMap( atten_probs=tf.zeros([tgt_batch_size, 0]), log_probs=logits), states def PostBeamSearchStepCallback(unused_theta, unused_encoder_outputs, unused_new_step_ids, states): return states encoder_outputs = py_utils.NestedMap( seq_lengths=tf.zeros([src_batch_size], dtype=tf.int32)) theta = py_utils.NestedMap() beam_search_output = bs_helper.BeamSearchDecode( theta, encoder_outputs, init_beam_search_state=InitBeamSearchCallBack, pre_beam_search_step_callback=PreBeamSearchStepCallback, post_beam_search_step_callback=PostBeamSearchStepCallback) topk_lens = sess.run(beam_search_output.topk_lens) if not force_eos_in_top_k or valid_eos_max_logit_delta < 1.0: self.assertAllEqual(topk_lens, np.zeros_like(topk_lens)) else: self.assertAllGreater(topk_lens, 0) @parameterized.named_parameters( # eos score is too low to terminate # 1 hyp terminated at first frame by eoc, and then two other # terminated at second frame by eoc ('last_chunk_eoc_in_topk', True, True, -10., [1, 2, 2, 1, 2, 2], [[-1., -1., -1.], [-1., -1., -1.]]), # Not last chunk or not forcing in topk, eoc can not terminate. # eos score is low, can not terminate either ('last_chunk_eoc_not_in_topk1', True, False, -10., [0, 0, 0, 0, 0, 0], [[-0., -0., -0.], [-0., -0., -0.]]), ('last_chunk_eoc_not_in_topk2', False, True, -10., [0, 0, 0, 0, 0, 0], [[-0., -0., -0.], [-0., -0., -0.]]), ('last_chunk_eoc_not_in_topk3', False, False, -10., [0, 0, 0, 0, 0, 0], [[-0., -0., -0.], [-0., -0., -0.]]), # eos score is high and can terminate # 1 hyp terminated at first frame by eos, and then two other # terminated at second frame by eos ('last_chunk_eoc_not_in_topk_eos_in_top_k', False, False, 1., [1, 2, 2, 1, 2, 2], [[1., 1., 1.], [1., 1., 1.]]), # both can terminate at each step, use the lower score. ('last_chunk_eoc_in_topk_eos_in_top_k', True, True, 1., [1, 2, 2, 1, 2, 2], [[-1., -1., -1.], [-1., -1., -1.]]), ) def testBeamSearchForceLastChunkEocInTopK(self, is_last_chunk, force_last_chunk_eoc_in_top_k, eos_score, expected_topk_lens, expected_topk_scores): with self.session() as sess: vocab_size = 30 tgt_len = 10 num_hyps_per_beam = 3 src_batch_size = 2 tgt_batch_size = src_batch_size * num_hyps_per_beam p = beam_search_helper.BeamSearchHelper.Params().Set( name='bsh', target_eoc_id=0, target_seq_len=tgt_len, num_hyps_per_beam=num_hyps_per_beam, beam_size=100000.0, # Beam search until the end. force_last_chunk_eoc_in_top_k=force_last_chunk_eoc_in_top_k, ) bs_helper = p.Instantiate() def InitBeamSearchCallBack(unused_theta, unused_encoder_outputs, unused_num_hyps_per_beam): return py_utils.NestedMap( log_probs=tf.zeros([tgt_batch_size, vocab_size]), atten_probs=tf.zeros([tgt_batch_size, 0]), is_last_chunk=tf.zeros([tgt_batch_size], tf.bool)), py_utils.NestedMap() def PreBeamSearchStepCallback(unused_theta, unused_encoder_outputs, unused_step_ids, states, unused_num_hyps_per_beam): # Same probs for each id. logits = tf.zeros([tgt_batch_size, vocab_size]) # Except eoc has slightly lower score. logits = logits - 1.0 * tf.expand_dims( tf.one_hot(p.target_eoc_id, vocab_size), 0) # eos has very low score (can not terminate by eos) logits = logits + eos_score * tf.expand_dims( tf.one_hot(p.target_eos_id, vocab_size), 0) return py_utils.NestedMap( atten_probs=tf.zeros([tgt_batch_size, 0]), log_probs=logits, is_last_chunk=tf.fill([tgt_batch_size], value=is_last_chunk)), states def PostBeamSearchStepCallback(unused_theta, unused_encoder_outputs, unused_new_step_ids, states): return states encoder_outputs = py_utils.NestedMap( seq_lengths=tf.zeros([src_batch_size], dtype=tf.int32)) theta = py_utils.NestedMap() beam_search_output = bs_helper.BeamSearchDecode( theta, encoder_outputs, init_beam_search_state=InitBeamSearchCallBack, pre_beam_search_step_callback=PreBeamSearchStepCallback, post_beam_search_step_callback=PostBeamSearchStepCallback) topk_lens, topk_scores = sess.run( [beam_search_output.topk_lens, beam_search_output.topk_scores]) self.assertAllEqual(topk_lens, expected_topk_lens) self.assertAllClose(topk_scores, expected_topk_scores, atol=1e-6) def testCustomStepIds(self): with self.session(use_gpu=False): np.random.seed(9384758) tf.random.set_seed(8274758) vocab_size = 12 src_len = 5 tgt_len = 7 num_hyps_per_beam = 3 src_batch_size = 2 tgt_batch_size = src_batch_size * num_hyps_per_beam p = beam_search_helper.BeamSearchHelper.Params().Set( name='bsh', target_seq_len=tgt_len) bs_helper = p.Instantiate() def InitBeamSearchState(unused_theta, unused_encoder_outputs, unused_num_hyps_per_beam): atten_probs = tf.constant( np.random.normal(size=(tgt_batch_size, src_len)), dtype=tf.float32) return (py_utils.NestedMap({ 'log_probs': tf.zeros([tgt_batch_size, vocab_size]), 'atten_probs': atten_probs, 'step_ids': tf.zeros([tgt_batch_size, 1], dtype=tf.int32) }), py_utils.NestedMap({'atten_probs': atten_probs})) def PreBeamSearchStepCallback(unused_theta, unused_encoder_outputs, unused_step_ids, states, unused_num_hyps_per_beam): atten_probs = tf.identity(states.atten_probs) logits = tf.random.normal([tgt_batch_size, vocab_size], seed=8273747) return (py_utils.NestedMap({ 'atten_probs': atten_probs, 'log_probs': logits }), states) def PostBeamSearchStepCallback(unused_theta, unused_encoder_outputs, unused_new_step_ids, states): return states src_enc = tf.random.normal([src_len, src_batch_size, 8], seed=982774838) src_enc_padding = tf.constant( [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 1.0], [1.0, 1.0]], dtype=tf.float32) encoder_outputs = py_utils.NestedMap( encoded=src_enc, padding=src_enc_padding) theta = py_utils.NestedMap() decoder_output = bs_helper.BeamSearchDecode(theta, encoder_outputs, num_hyps_per_beam, InitBeamSearchState, PreBeamSearchStepCallback, PostBeamSearchStepCallback) topk_ids, topk_lens, topk_scores = self.evaluate([ decoder_output.topk_ids, decoder_output.topk_lens, decoder_output.topk_scores ]) print(np.array_repr(topk_ids)) print(np.array_repr(topk_lens)) print(np.array_repr(topk_scores)) expected_topk_ids = [[4, 3, 4, 3, 2, 0, 0], [4, 3, 11, 2, 0, 0, 0], [4, 3, 6, 2, 0, 0, 0], [6, 0, 4, 6, 6, 11, 2], [6, 0, 4, 6, 1, 2, 0], [6, 0, 4, 6, 6, 2, 0]] expected_topk_lens = [5, 4, 4, 7, 6, 6] expected_topk_scores = [[8.27340603, 6.26949024, 5.59490776], [9.74691486, 8.46679497, 7.14809656]] self.assertAllEqual(expected_topk_ids, topk_ids.tolist()) self.assertAllEqual(expected_topk_lens, topk_lens.tolist()) self.assertAllClose(expected_topk_scores, topk_scores) class MergeBeamSearchOutputsTest(test_utils.TestCase): def testMergeBeamSearchOutputs(self): with self.session(): topk_scores_1 = [[1., 3., 5.], [-2., -1., 0.]] topk_ids_1 = [[[10, 11, 12], [30, 31, 32], [50, 51, 52]], [[20, 21, 22], [10, 11, 12], [0, 0, 0]]] topk_lens_1 = [[3, 3, 2], [3, 3, 0]] topk_hyps_1 = [['one', 'three', 'five'], ['minus two', 'minus one', '']] topk_1 = beam_search_helper.BeamSearchDecodeOutput( tf.constant(topk_hyps_1), tf.reshape(tf.constant(topk_ids_1), [6, -1]), tf.reshape(tf.constant(topk_lens_1), [-1]), tf.reshape(tf.constant(topk_scores_1), [-1]), None, None) topk_scores_2 = [[2., 4.], [-3., 0.]] topk_ids_2 = [[[20, 21, 22], [40, 41, 42]], [[30, 31, 33], [0, 0, 0]]] topk_lens_2 = [[3, 2], [3, 0]] topk_hyps_2 = [['two', 'four'], ['minus three', '']] topk_2 = beam_search_helper.BeamSearchDecodeOutput( tf.constant(topk_hyps_2), tf.reshape(tf.constant(topk_ids_2), [4, -1]), tf.reshape(tf.constant(topk_lens_2), [-1]), tf.reshape(tf.constant(topk_scores_2), [-1]), None, None) topk = beam_search_helper.MergeBeamSearchOutputs(3, [topk_1, topk_2]) self.assertIsNone(topk.topk_decoded) self.assertAllEqual([5., 4., 3., -1., -2., -3.], topk.topk_scores.eval()) self.assertAllEqual([2, 2, 3, 3, 3, 3], topk.topk_lens.eval()) self.assertAllEqual([[50, 51, 52], [40, 41, 42], [30, 31, 32], [10, 11, 12], [20, 21, 22], [30, 31, 33]], topk.topk_ids.eval()) self.assertAllEqual([[b'five', b'four', b'three'], [b'minus one', b'minus two', b'minus three']], topk.topk_hyps.eval()) class GreedySearchHelperTest(test_utils.TestCase): def testGreedySearchHelper(self): with self.session(use_gpu=False): np.random.seed(9384758) tf.random.set_seed(8274758) vocab_size = 12 src_len = 5 tgt_len = 7 src_batch_size = 2 tgt_batch_size = src_batch_size p = beam_search_helper.GreedySearchHelper.Params().Set( name='gsh', target_seq_len=tgt_len) gs_helper = p.Instantiate() def InitGreedySearchState(unused_theta, unused_encoder_outputs, unused_num_hyps_per_beam): atten_probs = tf.constant( np.random.normal(size=(tgt_batch_size, src_len)), dtype=tf.float32) return (py_utils.NestedMap({ 'log_probs': tf.zeros([tgt_batch_size, vocab_size]), 'atten_probs': atten_probs, }), py_utils.NestedMap({'atten_probs': atten_probs})) def PreGreedySearchStepCallback(unused_theta, unused_encoder_outputs, unused_step_ids, states, unused_num_hyps_per_beam): atten_probs = tf.identity(states.atten_probs) logits = tf.random.normal([tgt_batch_size, vocab_size], seed=8273747) return (py_utils.NestedMap({ 'atten_probs': atten_probs, 'log_probs': logits }), states) def PostGreedySearchStepCallback(unused_theta, unused_encoder_outputs, unused_new_step_ids, states): return states src_enc = tf.random.normal([src_len, src_batch_size, 8], seed=982774838) src_enc_padding = tf.constant( [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 1.0], [1.0, 1.0]], dtype=tf.float32) encoder_outputs = py_utils.NestedMap( encoded=src_enc, padding=src_enc_padding) theta = py_utils.NestedMap() (final_hyp_ids, final_hyp_lens, final_done_hyps) = gs_helper.GreedySearchDecode( theta, encoder_outputs, InitGreedySearchState, PreGreedySearchStepCallback, PostGreedySearchStepCallback) (final_hyp_ids, final_hyp_lens, final_done_hyps) = self.evaluate( [final_hyp_ids, final_hyp_lens, final_done_hyps]) print(np.array_repr(final_hyp_ids)) print(np.array_repr(final_hyp_lens)) print(np.array_repr(final_done_hyps)) expected_hyp_ids = [[2, 2, 6, 7, 1, 9, 4], [3, 9, 3, 9, 6, 5, 10]] expected_hyp_lens = [1, 7] expected_done_hyps = [True, False] self.assertAllEqual(expected_hyp_ids, final_hyp_ids.tolist()) self.assertAllEqual(expected_hyp_lens, final_hyp_lens.tolist()) self.assertAllEqual(expected_done_hyps, final_done_hyps.tolist()) if __name__ == '__main__': tf.test.main()
44.506198
80
0.611346
2,849
21,541
4.316251
0.107055
0.016752
0.020493
0.022445
0.73644
0.708547
0.6909
0.665447
0.649671
0.634382
0
0.060344
0.270693
21,541
483
81
44.598344
0.722406
0.064389
0
0.625628
0
0
0.027141
0.012129
0
0
0
0.00207
0.067839
1
0.062814
false
0.007538
0.015075
0.017588
0.125628
0.045226
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
ebc8be7f524d02d68beecb4c56841bf72041c9e6
1,148
py
Python
tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_b972f241.py
eduardojdiniz/CompNeuro
20269e66540dc4e802273735c97323020ee37406
[ "CC-BY-4.0", "BSD-3-Clause" ]
2,294
2020-05-11T12:05:35.000Z
2022-03-28T21:23:34.000Z
tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_b972f241.py
pellet/course-content
bb383857992469e0e7a9c36639ac0d05e842d9bd
[ "CC-BY-4.0", "BSD-3-Clause" ]
629
2020-05-11T15:42:26.000Z
2022-03-29T12:23:35.000Z
tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_b972f241.py
pellet/course-content
bb383857992469e0e7a9c36639ac0d05e842d9bd
[ "CC-BY-4.0", "BSD-3-Clause" ]
917
2020-05-11T12:47:53.000Z
2022-03-31T12:14:41.000Z
def ddm(T, x0, xinfty, lam, sig): t = np.arange(0, T, 1.) x = np.zeros_like(t) x[0] = x0 for k in range(len(t)-1): x[k+1] = xinfty + lam * (x[k] - xinfty) + sig * np.random.standard_normal(size=1) return t, x # computes equilibrium variance of ddm # returns variance def ddm_eq_var(T, x0, xinfty, lam, sig): t, x = ddm(T, x0, xinfty, lam, sig) # returns variance of the second half of the simulation # this is a hack: assumes system has settled by second half return x[-round(T/2):].var() np.random.seed(2020) # set random seed # sweep through values for lambda lambdas = np.arange(0.05, 0.95, 0.01) empirical_variances = np.zeros_like(lambdas) analytical_variances = np.zeros_like(lambdas) sig = 0.87 # compute empirical equilibrium variance for i, lam in enumerate(lambdas): empirical_variances[i] = ddm_eq_var(5000, x0, xinfty, lambdas[i], sig) # Hint: you can also do this in one line outside the loop! analytical_variances = sig**2 / (1 - lambdas**2) # Plot the empirical variance vs analytical variance with plt.xkcd(): var_comparison_plot(empirical_variances, analytical_variances)
30.210526
89
0.691638
190
1,148
4.094737
0.421053
0.041131
0.034704
0.046272
0.137532
0.068123
0
0
0
0
0
0.037594
0.189024
1,148
38
90
30.210526
0.798067
0.313589
0
0
0
0
0
0
0
0
0
0
0
1
0.1
false
0
0
0
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebca2dfaedf3312d1fbafc3bbd62cb313012d5a5
129
py
Python
yatr/__init__.py
mbodenhamer/yatr
9ebc0de3e474e38d9d79effe3e9c0fec11053024
[ "MIT" ]
null
null
null
yatr/__init__.py
mbodenhamer/yatr
9ebc0de3e474e38d9d79effe3e9c0fec11053024
[ "MIT" ]
1
2021-03-31T18:51:21.000Z
2021-03-31T18:51:21.000Z
yatr/__init__.py
mbodenhamer/yatr
9ebc0de3e474e38d9d79effe3e9c0fec11053024
[ "MIT" ]
1
2017-12-13T13:30:29.000Z
2017-12-13T13:30:29.000Z
__version__ = '0.0.11b' from .base import * from .context import * from .task import * from .env import * from .parse import *
14.333333
23
0.689922
19
129
4.473684
0.526316
0.470588
0
0
0
0
0
0
0
0
0
0.038462
0.193798
129
8
24
16.125
0.778846
0
0
0
0
0
0.054688
0
0
0
0
0
0
1
0
false
0
0.833333
0
0.833333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
ebcd555eb93d05d6fa9188af7a083640c3b0d772
2,829
py
Python
paginas/p1_vrc.py
Blake-SRTC/tv-digital-deteccion
c55798714d1d1829471e31d3d8aa612739a8193c
[ "MIT" ]
null
null
null
paginas/p1_vrc.py
Blake-SRTC/tv-digital-deteccion
c55798714d1d1829471e31d3d8aa612739a8193c
[ "MIT" ]
null
null
null
paginas/p1_vrc.py
Blake-SRTC/tv-digital-deteccion
c55798714d1d1829471e31d3d8aa612739a8193c
[ "MIT" ]
null
null
null
from tkinter import * from tkinter.font import BOLD from algoritmos.vrc import * from algoritmos.noise_1_bits import * ###################################################################################################### # Pagina 1 def tab1(root, common_img, bits_normales, bits_desglosados): pagina1 = Toplevel(root) pagina1.geometry("1200x800") pagina1.title('VRC') # TITULO label1=Label(pagina1,text='VRC', font=('Times_New_Roman',20), width=1000 , height=50, image=common_img, compound='c') label1.place(x=100, y=10) lbl_bits_normales = Label(pagina1, text='Bits: ',font=('Times_New_Roman',20, BOLD), image=common_img, compound='c', height=50) lbl_bits_normales.place(x=100, y=100) lbl_bits_codificados = Label(pagina1, text='VRC: ',font=('Times_New_Roman',20, BOLD), image=common_img, compound='c', height=50) lbl_bits_codificados.place(x=100, y=175) # Codificacion VRC b_separados = list(bits_desglosados) # Funcion / metodo VRC vrc_1 = vrc(b_separados) print('vrc') print(vrc_1) vrc_codificado = [] for i in vrc_1: y = "".join(i) vrc_codificado.append(y) # Label bits originales txt1 = 'Bits: ' for i in bits_normales: txt1 = txt1 + i + ' : ' lbl_bits_normales['text']=txt1 # Label bits condificados txt2 = 'VRC: ' for i in vrc_codificado: txt2 = txt2 + i + ' : ' lbl_bits_codificados['text']=txt2 # Funcion que envia los datos y posible ruido def transmitir(): # Tramas con posibles errores trama_errada = [] trama_errada = noise(vrc_1,'vrc') print('Grupo Transmitido') print(trama_errada) trama_errada2 = [] for i in trama_errada: y = "".join(i) trama_errada2.append(y) # Label bits condificados txt3 = 'Transmitidos: ' for i in trama_errada2: txt3 = txt3 + i + ' : ' lbl_errados['text']=txt3 # Comprobacion VRC comprobado = comprobacion_vrc(trama_errada) print(comprobado) lbl_comprobado['text']='Comprobacion: ' + comprobado #print(vrc_1) # BOTON DE TRANSMITIR btn_transmitir = Button(pagina1, text='Transmitir', font=('Times_New_Roman',20, BOLD), image=common_img, compound='c', height=50, width=200, command=transmitir) btn_transmitir.place(x=500, y=250) # Label del Ruido lbl_errados = Label(pagina1, text='Posible trama errada',font=('Times_New_Roman',20, BOLD), image=common_img, compound='c', height=50) lbl_errados.place(x=100, y=325) # Label Comprobacion VRC lbl_comprobado = Label(pagina1, text='Comprobacion',font=('Times_New_Roman',20, BOLD), image=common_img, compound='c', height=50) lbl_comprobado.place(x=100, y=400)
34.084337
164
0.618593
359
2,829
4.696379
0.264624
0.037367
0.042705
0.060498
0.219454
0.205813
0.205813
0.205813
0.205813
0.172005
0
0.048792
0.224814
2,829
82
165
34.5
0.720018
0.100035
0
0.038462
0
0
0.102923
0
0
0
0
0.012195
0
1
0.038462
false
0
0.076923
0
0.115385
0.096154
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
ebcd560a4b989401a8f15f7d602324e8d9dfe946
889
py
Python
tests/dna_builders_test.py
auxein/auxein
5388cb572b65aecc282f915515c35dc3b987154c
[ "Apache-2.0" ]
1
2019-05-08T14:53:27.000Z
2019-05-08T14:53:27.000Z
tests/dna_builders_test.py
auxein/auxein
5388cb572b65aecc282f915515c35dc3b987154c
[ "Apache-2.0" ]
2
2020-08-26T09:16:47.000Z
2020-10-30T16:47:03.000Z
tests/dna_builders_test.py
auxein/auxein
5388cb572b65aecc282f915515c35dc3b987154c
[ "Apache-2.0" ]
null
null
null
from unittest.mock import patch import numpy as np from auxein.population.dna_builders import UniformRandomDnaBuilder, NormalRandomDnaBuilder def test_uniform_random_dna_builder_instantiation(): builder = UniformRandomDnaBuilder(interval=(-5, 0)) assert builder.get_distribution() == 'uniform' assert len(builder.get(10)) == 10 def test_uniform_random_dna_builder_values(): builder = UniformRandomDnaBuilder() for _ in range(0, 100): dna: np.ndarray = builder.get(2) assert -1 < dna[0] < 1 assert -1 < dna[1] < 1 @patch('numpy.random.normal') def test_normal_random_dna_builder_instantiation(mock_np_normal): mock_np_normal.return_value = [0.5, -1.3] builder = NormalRandomDnaBuilder() assert builder.get_distribution() == 'normal' assert len(builder.get(2)) == 2 mock_np_normal.assert_called_once_with(0.0, 1.0, 2)
29.633333
90
0.725534
118
889
5.220339
0.364407
0.081169
0.077922
0.064935
0.097403
0.097403
0
0
0
0
0
0.037736
0.165354
889
29
91
30.655172
0.792453
0
0
0
0
0
0.035996
0
0
0
0
0
0.35
1
0.15
false
0
0.15
0
0.3
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebcf09fa50c43679cec20b34ad768687dfbb66a1
322
py
Python
Interesting Python Questions/add four integers together.py
liu-yunfei/Python
314cdc98f32f4f0de2c0904279865b944e34dd75
[ "MIT" ]
1
2020-10-08T09:29:59.000Z
2020-10-08T09:29:59.000Z
Interesting Python Questions/add four integers together.py
liu-yunfei/Python
314cdc98f32f4f0de2c0904279865b944e34dd75
[ "MIT" ]
1
2021-01-30T12:04:51.000Z
2021-01-30T12:05:37.000Z
Interesting Python Questions/add four integers together.py
liu-yunfei/Python
314cdc98f32f4f0de2c0904279865b944e34dd75
[ "MIT" ]
null
null
null
# Written by Yunfei LIU # Sep 23, 2020 # Please obey the license GPLv3 # This allows to input four integers with space between them number1,number2,number3,number4 = map(int,input().split()) # Add them up sum = number1 + number2 + number3 + number4 # Get the unit digit result = sum%10 # Output the result print(result)
24.769231
61
0.736025
50
322
4.74
0.78
0.118143
0.177215
0.236287
0
0
0
0
0
0
0
0.064151
0.177019
322
12
62
26.833333
0.830189
0.537267
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.25
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
ebd069b822c585c6ef2aa742b9a5c038a0110139
445
py
Python
math/champernowne-constant.py
qeedquan/misc_utilities
94c6363388662ac8ebbf075b9c853ce6defbb5b3
[ "MIT" ]
8
2018-10-17T18:17:25.000Z
2022-03-18T09:02:53.000Z
math/champernowne-constant.py
qeedquan/misc_utilities
94c6363388662ac8ebbf075b9c853ce6defbb5b3
[ "MIT" ]
null
null
null
math/champernowne-constant.py
qeedquan/misc_utilities
94c6363388662ac8ebbf075b9c853ce6defbb5b3
[ "MIT" ]
3
2020-07-01T13:52:42.000Z
2022-03-18T09:10:59.000Z
#!/usr/bin/env python # https://en.wikipedia.org/wiki/Champernowne_constant # sequences below are related to these constants # counts how many numbers between two 10**n and 10**(n-1) def A0(n): return (10**n - 10**(n-1))*n # http://oeis.org/A033714 # This sequence also gives the total count of digits of n below 10^n. def A033714(n): r = 1 for i in range(1, n): r += A0(i) return r for i in range(0, 32): print(i, A0(i), A033714(i))
22.25
69
0.669663
86
445
3.453488
0.593023
0.050505
0.026936
0.074074
0
0
0
0
0
0
0
0.103542
0.175281
445
19
70
23.421053
0.705722
0.6
0
0
0
0
0
0
0
0
0
0
0
1
0.222222
false
0
0
0.111111
0.444444
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
ebd0835b63b438a8287b71effbf6286cc7da50d9
5,393
py
Python
distributed_model.py
mknw/mask-rcnn
0e7d14abeecb208e63dc5a9f7c05dbd0419afbe7
[ "MIT" ]
null
null
null
distributed_model.py
mknw/mask-rcnn
0e7d14abeecb208e63dc5a9f7c05dbd0419afbe7
[ "MIT" ]
null
null
null
distributed_model.py
mknw/mask-rcnn
0e7d14abeecb208e63dc5a9f7c05dbd0419afbe7
[ "MIT" ]
null
null
null
from model import * from config import * from utils import * if __name__ == "__main__": ''' GPU(s) ''' gpus = tf.config.experimental.list_physical_devices('GPU') GPU_N = 3 if gpus: try: tf.config.experimental.set_visible_devices(gpus[GPU_N:], 'GPU') logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: print(e) import ipdb; ipdb.set_trace() np.random.seed(420) tf.random.set_seed(420) ''' loss and gradient function. ''' # loss_object = tf.losses.SparseCategoricalCrossentropy() @tf.function def loss(model, x, y): y_ = model(x) return loss_object(y_true=y, y_pred=y_) @tf.function def smooth_l1_loss(y_true, y_pred): """Implements Smooth-L1 loss. y_true and y_pred are typically: [N, 4], but could be any shape. """ diff = tf.abs(y_true - y_pred) less_than_one = K.cast(tf.less(diff, 1.0), "float32") loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5) return loss @tf.function def grad(model, inputs, targets): with tf.GradientTape() as tape: loss_value = loss(model, inputs, targets) return loss_value, tape.gradient(loss_value, model.trainable_variables) ''' dataset and dataset iterator''' ## cifar100 is likey too small. Switching to imagenet2012 # cifar100 = tf.keras.datasets.cifar100 # (x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine') import tensorflow_datasets as tfds import ipdb tfds.list_builders() imagenet2012_builder = tfds.builder("imagenet2012") train_set, test_set = imagenet2012_builder.as_dataset(split=["train", "validation"]) def onetwentyseven(x): # normalizing between 1 and -1. x['image'] = tf.image.resize(x['image'], size=(256, 256)) x['image'] = tf.cast(x['image'], tf.float32) / 127.5 - 1 return x train_set = train_set.shuffle(1024).map(onetwentyseven) train_set = train_set.batch(32) test_set = test_set.shuffle(1024).map(onetwentyseven) test_set = test_set.batch(32) import ipdb; ipdb.set_trace() # preprocess ''' x_train = (x_train.reshape(-1, 32, 32, 3) / 255).astype(np.float32) x_test = (x_test.reshape(-1, 32, 32, 3) / 255).astype(np.float32) # create tf.data.Dataset train_set = tf.data.Dataset.from_tensor_slices((x_train, y_train)) test_set = tf.data.Dataset.from_tensor_slices((x_test, y_test)) # now train_set and test_set are Dataset objects. # we return the dataset iterator by calling the # __iter__() method # # Alternatively, we can just iterate over the Datasets # iff eager mode is on (i.e. by default). train_set = train_set.shuffle(10000) test_set.shuffle(10000) b_train_set = train_set.batch(256) b_test_set = test_set.batch(256) ''' ''' model ''' # from config import Config from viz import * from utils import test_model class Config(object): def __init__(self): self.BATCH_SIZE=256 self.BACKBONE = 'resnet51' mycon = Config() model = ResNet((None, None, 3), 1000, mycon) model.build(input_shape=(256, None, None, 3)) # place correct shape from imagenet ''' initialize ''' # Reduce LR with *0.1 when plateau is detected adapt_lr = LearningRateReducer(init_lr=0.1, factor=0.1, patience=10, refractory_interval=20) # wait 20 epochs from last update loss_object = tf.losses.SparseCategoricalCrossentropy() optimizer = tf.keras.optimizers.SGD(adapt_lr.monitor(), momentum = 0.9) train_loss_results = [] train_accuracy_results = [] test_loss_results, test_acc_results = [], [] num_epochs = 300 ''' train ''' for epoch in range(num_epochs): epoch_loss_avg = tf.keras.metrics.Mean() epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy() k = 0 optimizer = tf.keras.optimizers.SGD(adapt_lr.monitor(train_loss_results), momentum = 0.9) for batch in train_set: # img_btch, lab_btch, fn_btch = batch img_btch = batch['image'] lab_btch = batch['label'] loss_value, grads = grad(model, img_btch, lab_btch) optimizer.apply_gradients(zip(grads, model.trainable_variables)) epoch_loss_avg(loss_value) epoch_accuracy(lab_btch, model(img_btch)) if epoch < 1: print("Epoch {:03d}: Batch: {:03d} Loss: {:.3%}, Accuracy: {:.3%}".format(epoch, k, epoch_loss_avg.result(), epoch_accuracy.result())) k+=1 print("Trainset >> Epoch {:03d}: Loss: {:.3%}, Accuracy: {:.3%}".format(epoch, epoch_loss_avg.result(), epoch_accuracy.result())) # end epoch #if int(epoch_accuracy.result() > 70): test_loss, test_accuracy = test_model(model, test_set) test_loss_results.append(test_loss) test_acc_results.append(test_accuracy) train_loss_results.append(epoch_loss_avg.result()) train_accuracy_results.append(epoch_accuracy.result()) # import ipdb; ipdb.set_trace() if epoch % 100 == 0: fname = 'imgs/Test_Acc_Loss_IN2012_' + str(epoch) + '.png' # here we should plot metrics and loss for test too. # hence TODO: update save_plot loss_l = [train_loss_results, test_loss_results] acc_l = [train_accuracy_results, test_acc_results] save_plot(loss_l, acc_l, fname) #if train_loss_results[-1] > train_loss_results[-2]: # was if epoch == 10: # learning_rate /= 10 # optimizer = tf.keras.optimizers.SGD(lr=learning_rate, momentum=0.9) # print("Sir, we just updated the learning rate Sir.") import ipdb; ipdb.set_trace()
29.631868
139
0.708511
809
5,393
4.488257
0.284302
0.026439
0.026439
0.018728
0.226659
0.09474
0.09474
0.058937
0.017075
0
0
0.038893
0.156128
5,393
181
140
29.79558
0.758954
0.167625
0
0.067416
0
0
0.070668
0.007094
0
0
0
0.005525
0
1
0.05618
false
0
0.11236
0
0.224719
0.044944
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebd099ec453489fa4bd88bc8afe52c94a368779d
1,862
py
Python
ReverseInteger/ReverseInteger.py
pauldoust/Competitive-Programming
e824485d0bb3a83b7fba3a80f17639771f32e1e7
[ "Apache-2.0" ]
null
null
null
ReverseInteger/ReverseInteger.py
pauldoust/Competitive-Programming
e824485d0bb3a83b7fba3a80f17639771f32e1e7
[ "Apache-2.0" ]
null
null
null
ReverseInteger/ReverseInteger.py
pauldoust/Competitive-Programming
e824485d0bb3a83b7fba3a80f17639771f32e1e7
[ "Apache-2.0" ]
null
null
null
# https://leetcode.com/problems/reverse-integer/ # Runtime: 32 ms, faster than 96.20% of Python3 online submissions for Reverse Integer. # Runtime: 36 ms, faster than 84.28% of Python3 online submissions for Reverse Integer. # Runtime: 44 ms, faster than 22.92% of Python3 online submissions for Reverse Integer. class Solution: def reverse1(self, x: int): if x >= 0: strX = str(x)[::-1] else: strX = "-"+ str(x)[::-1][:-1] result = int(strX) bits = result.bit_length() if bits > 31: result = 0 return result def reverse2(self, x: int): if x >= 0: strX = str(x)[::-1] else: # This modification increase the performance from 44ms to 36 ms strX = "-" + str(- x)[::-1] result = int(strX) bits = result.bit_length() if bits > 31: result = 0 def reverse3(self, x: int): if x >= 0: strX = str(x)[::-1] else: strX = "-" + str(- x)[::-1] result = int(strX) # This modification increase the performance from 36 ms to 32 ms if result in range(-2**31, 2**31 - 1 ): return result return 0 class Solution: def reverse(self, x: int): if x >= 0: strX = str(x)[::-1] else: # This modification increase the performance from 44ms to 36 ms strX = "-" + str(- x)[::-1] result = int(strX) # bits = result.bit_length() # if bits > 31: # result = 0 if result in range(-2**31, 2**31 - 1 ): return result return 0 x = -1 # x = 1563847412 # print("s" , x.bit_length()) print ( Solution().reverse(x))
25.506849
87
0.486574
232
1,862
3.887931
0.25431
0.019956
0.070953
0.079823
0.733925
0.733925
0.687361
0.625277
0.514412
0.514412
0
0.074758
0.389366
1,862
73
88
25.506849
0.718558
0.318475
0
0.829268
0
0
0.003182
0
0
0
0
0
0
1
0.097561
false
0
0
0
0.268293
0.02439
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
ebd3c4127f7770e12f634b24cbfe903d62dc536d
57
py
Python
LUCI/LuciConvenience.py
benjaminvigneron/LUCI
227bb1e0b87aa06012b2809d9af7767a261e0f00
[ "MIT" ]
null
null
null
LUCI/LuciConvenience.py
benjaminvigneron/LUCI
227bb1e0b87aa06012b2809d9af7767a261e0f00
[ "MIT" ]
null
null
null
LUCI/LuciConvenience.py
benjaminvigneron/LUCI
227bb1e0b87aa06012b2809d9af7767a261e0f00
[ "MIT" ]
null
null
null
""" A hodge-podge of convenience functions for luci """
11.4
47
0.701754
8
57
5
1
0
0
0
0
0
0
0
0
0
0
0
0.175439
57
4
48
14.25
0.851064
0.824561
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
ebd48457bf3399a79fee4cd86c9c5e15db08cd7f
235
py
Python
Practice/Beginner/Hard Cash(CASH)/solution.py
DipanjanDasIT/CodeChefCodes
f3d6c9ee6598b1c873d614c4aff005c2971a4fc0
[ "MIT" ]
null
null
null
Practice/Beginner/Hard Cash(CASH)/solution.py
DipanjanDasIT/CodeChefCodes
f3d6c9ee6598b1c873d614c4aff005c2971a4fc0
[ "MIT" ]
null
null
null
Practice/Beginner/Hard Cash(CASH)/solution.py
DipanjanDasIT/CodeChefCodes
f3d6c9ee6598b1c873d614c4aff005c2971a4fc0
[ "MIT" ]
null
null
null
testcases = int(input()) for _ in range(testcases): main_details = list(map(int, input().split())) coin_details = list(map(lambda x: int(x)%main_details[-1], input().split())) print(sum(coin_details)%main_details[-1])
39.166667
81
0.659574
34
235
4.382353
0.5
0.221477
0.187919
0
0
0
0
0
0
0
0
0.01
0.148936
235
5
82
47
0.735
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.2
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
ebd4b3b718d780d64a2bf799562bbb822aab0940
16,347
py
Python
testscripts/RDKB/component/WAN_MANAGER/TS_WANMANAGER_DSL_CheckInternetConnectivity_PrimaryWanForDSLAndWANOE.py
rdkcmf/rdkb-tools-tdkb
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
[ "Apache-2.0" ]
null
null
null
testscripts/RDKB/component/WAN_MANAGER/TS_WANMANAGER_DSL_CheckInternetConnectivity_PrimaryWanForDSLAndWANOE.py
rdkcmf/rdkb-tools-tdkb
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
[ "Apache-2.0" ]
null
null
null
testscripts/RDKB/component/WAN_MANAGER/TS_WANMANAGER_DSL_CheckInternetConnectivity_PrimaryWanForDSLAndWANOE.py
rdkcmf/rdkb-tools-tdkb
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
[ "Apache-2.0" ]
null
null
null
########################################################################## # If not stated otherwise in this file or this component's Licenses.txt # file the following copyright and licenses apply: # # Copyright 2021 RDK Management # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ########################################################################## ''' <?xml version="1.0" encoding="UTF-8"?><xml> <id/> <version>2</version> <name>TS_WANMANAGER_DSL_CheckInternetConnectivity_PrimaryWanForDSLAndWANOE</name> <primitive_test_id/> <primitive_test_name>wanmanager_DoNothing</primitive_test_name> <primitive_test_version>1</primitive_test_version> <status>FREE</status> <synopsis>With active DSL connection ,primary WAN Type and priority as 1 only DSL is expected to have active connection</synopsis> <groups_id/> <execution_time>30</execution_time> <long_duration>false</long_duration> <advanced_script>false</advanced_script> <remarks/> <skip>false</skip> <box_types> <box_type>Broadband</box_type> </box_types> <rdk_versions> <rdk_version>RDKB</rdk_version> </rdk_versions> <test_cases> <test_case_id>TC_WANMANAGER_48</test_case_id> <test_objective>This test case is to check with active DSL connection ,primary WAN Type and priority as 1 only DSL is expected to have active connection</test_objective> <test_type>Positive</test_type> <test_setup>Broadband</test_setup> <pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component 2.TDK Agent should be in running state or invoke it through StartTdk.sh script 3.WAN Manager should be enabled 4.DSL Line Should be Enabled. </pre_requisite> <api_or_interface_used>none</api_or_interface_used> <input_parameters>Device.X_RDK_WanManager.CPEInterface.1.Wan.Type Device.X_RDK_WanManager.CPEInterface.2.Wan.Type Device.X_RDK_WanManager.CPEInterface.1.Wan.Priority Device.X_RDK_WanManager.CPEInterface.2.Wan.Priority</input_parameters> <automation_approch>1.Load the module 2.Check for active DSL line connection and disable WANOE if enabled 3.Get the default WANOE,DSL WAN type and priority 4.Set WANOE,DSL WAN type and priority as primary,secondary,0,1 respectively 5.reboot the device 6.Check if DSL Line is active and WANOE is inactive 7.Revert the set values 8.Unload the module</automation_approch> <expected_output>with WANOE,DSL WAN type and priority as primary,secondary,0,1 respectively ,DSL Line is expected to be active and WANOE is inactive</expected_output> <priority>High</priority> <test_stub_interface>WAN_MANAGER</test_stub_interface> <test_script>TS_WANMANAGER_DSL_CheckInternetConnectivity_PrimaryWanForDSLAndWANOE</test_script> <skipped>No</skipped> <release_version>M89</release_version> <remarks>None</remarks> </test_cases> <script_tags/> </xml> ''' # tdklib library,which provides a wrapper for tdk testcase script import tdklib; from tdkbVariables import *; from time import sleep; from WanManager_Utility import *; obj = tdklib.TDKScriptingLibrary("tdkbtr181","RDKB"); obj1 = tdklib.TDKScriptingLibrary("sysutil","1"); tadobj = tdklib.TDKScriptingLibrary("tad","1"); #IP and Port of box, No need to change, #This will be replaced with correspoing Box Ip and port while executing script ip = <ipaddress> port = <port> obj.configureTestCase(ip,port,'TS_WANMANAGER_DSL_CheckInternetConnectivity_PrimaryWanForDSLAndWANOE'); obj1.configureTestCase(ip,port,'TS_WANMANAGER_DSL_CheckInternetConnectivity_PrimaryWanForDSLAndWANOE'); tadobj.configureTestCase(ip,port,'TS_WANMANAGER_DSL_CheckInternetConnectivity_PrimaryWanForDSLAndWANOE'); #Get the result of connection with test component and DUT loadmodulestatus =obj.getLoadModuleResult(); loadmodulestatus1 =obj1.getLoadModuleResult(); loadmodulestatus2 = tadobj.getLoadModuleResult(); print "[LIB LOAD STATUS] : %s" %loadmodulestatus; print "[LIB LOAD STATUS] : %s" %loadmodulestatus1; print "[LIB LOAD STATUS] : %s" %loadmodulestatus2; if "SUCCESS" in (loadmodulestatus.upper() and loadmodulestatus1.upper() and loadmodulestatus2.upper()): #Set the result status of execution obj.setLoadModuleStatus("SUCCESS"); obj1.setLoadModuleStatus("SUCCESS"); tadobj.setLoadModuleStatus("SUCCESS"); revertWANOE =0; revertpriority =0; objReturned,dsl_wan,active = getDSLWANStatus(tadobj,1); if active == 0: expectedresult="SUCCESS"; actualresult = "SUCCESS"; print "******performing a pre-requisite where in WANOE inteface is expected to be disabled ***"; tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get'); tdkTestObj.addParameter("ParamName","Device.X_RDK_WanManager.CPEInterface.2.Wan.Enable"); tdkTestObj.executeTestCase(expectedresult); actualresult = tdkTestObj.getResult(); details = tdkTestObj.getResultDetails().strip().replace("\\n", ""); if expectedresult in actualresult and details == "true": print "WANOE is enabled and disabling it "; tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Set'); result,tdkTestObj = EnableDisableInterafce(2,"false",tdkTestObj); revertWANOE = 1; if expectedresult in actualresult: paramList = ["Device.X_RDK_WanManager.CPEInterface.1.Wan.Type","Device.X_RDK_WanManager.CPEInterface.2.Wan.Type"]; defaults = []; flag =0; print "The Default WAN Values are being fetched"; for item in paramList: tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get'); tdkTestObj.addParameter("ParamName",item); #Execute the test case in DUT tdkTestObj.executeTestCase(expectedresult); actualresult = tdkTestObj.getResult(); details = tdkTestObj.getResultDetails().strip().replace("\\n", ""); if expectedresult in actualresult: defaults.append(details); else: flag = 1; break; if flag == 0: #Set the result status of execution tdkTestObj.setResultStatus("SUCCESS"); print "TEST STEP 2: Get the Default WAN and priority values for DSL and WANOE"; print "EXPECTED RESULT 2: Should get the default WAN and priority values for DSL and WANOE" print "ACTUAL RESULT 2 :The defaults for %s are %s:"%(paramList,defaults); #Get the result of execution print "[TEST EXECUTION RESULT] : SUCCESS"; tdkTestObj_Get = obj.createTestStep('TDKB_TR181Stub_Get'); tdkTestObj_Set = obj.createTestStep('TDKB_TR181Stub_Set'); revertpriority,defPriority,actualresult = MakePriorityUnEqual(tdkTestObj_Get,tdkTestObj_Set); if expectedresult in actualresult: for items in paramList: tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Set'); tdkTestObj.addParameter("ParamName",items) tdkTestObj.addParameter("ParamValue","Primary"); tdkTestObj.addParameter("Type","string"); expectedresult= "SUCCESS"; #Execute testcase on DUT tdkTestObj.executeTestCase(expectedresult); actualresult = tdkTestObj.getResult(); Setresult = tdkTestObj.getResultDetails(); if expectedresult in actualresult: print "Set operation successful for :" ,items; else: flag =1; break; if flag == 1: tdkTestObj.setResultStatus("FAILURE"); print "TEST STEP 3: Setting WAN type as Primary for DSL and WANOE"; print "EXPECTED RESULT 3: Should set WAN type as Primary for DSL and WANOE"; print "ACTUAL RESULT 3: set operation failed for %s"%item; print "[TEST EXECUTION RESULT] : FAILURE"; else: #Set the result status of execution tdkTestObj.setResultStatus("SUCCESS"); print "TEST STEP 3: Setting WAN type as Primary for DSL and WANOE"; print "EXPECTED RESULT 3: Should set WAN type as Primary for DSL and WANOE"; print "ACTUAL RESULT 3: Set operation is successful"; #Get the result of execution print "[TEST EXECUTION RESULT] : SUCCESS"; print "Rebooting the device to verify the set operations done are working as expected"; obj1.initiateReboot();(); sleep(300); tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get'); tdkTestObj.addParameter("ParamName","Device.X_RDK_WanManager.CPEInterface.1.Wan.ActiveLink"); #Execute the test case in DUT tdkTestObj.executeTestCase(expectedresult); actualresult1 = tdkTestObj.getResult(); activeDSL = tdkTestObj.getResultDetails().strip().replace("\\n", ""); tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get'); tdkTestObj.addParameter("ParamName","Device.X_RDK_WanManager.CPEInterface.2.Wan.ActiveLink"); #Execute the test case in DUT tdkTestObj.executeTestCase(expectedresult); actualresult2 = tdkTestObj.getResult(); activeWANOE = tdkTestObj.getResultDetails().strip().replace("\\n", ""); if expectedresult in (actualresult1 and actualresult2): if activeDSL == "true" and activeWANOE == "false": #Set the result status of execution tdkTestObj.setResultStatus("SUCCESS"); print "TEST STEP 4: Get the Active link status of DSL and WANOE"; print "EXPECTED RESULT 4: Active link status of DSL is expected to be true and WANOE as false"; print "ACTUAL RESULT 4: DSL status :%s, WANOE status : %s" %(activeDSL,activeWANOE); #Get the result of execution print "[TEST EXECUTION RESULT] : SUCCESS"; else: #Set the result status of execution tdkTestObj.setResultStatus("FAILURE"); print "TEST STEP 4: Get the Active link status of DSL and WANOE"; print "EXPECTED RESULT 4: Active link status of DSL is expected to be true and WANOE as false"; print "ACTUAL RESULT 4: DSL status :%s, WANOE status : %s" %(activeDSL,activeWANOE); #Get the result of execution print "[TEST EXECUTION RESULT] : FAILURE"; else: #Set the result status of execution tdkTestObj.setResultStatus("FAILURE"); print "TEST STEP 4: Get the Active link status of DSL and WANOE"; print "EXPECTED RESULT 4: Active link status of DSL is expected to be true and WANOE as false"; #Get the result of execution print "[TEST EXECUTION RESULT] : FAILURE"; revflg =0; index =0; for item in paramList: tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Set'); tdkTestObj.addParameter("ParamName",item) tdkTestObj.addParameter("ParamValue",defaults[index]); tdkTestObj.addParameter("Type","string"); expectedresult= "SUCCESS"; #Execute testcase on DUT tdkTestObj.executeTestCase(expectedresult); actualresult = tdkTestObj.getResult(); Setresult = tdkTestObj.getResultDetails(); index = index +1; if expectedresult in actualresult: print "Reverting %s"%item; else: revflg =1; if revflg == 0: tdkTestObj.setResultStatus("SUCCESS"); print "revert operation sucessful"; print "rebooting the device to apply the set operations done as apart of revert"; obj1.initiateReboot();(); sleep(300); else: tdkTestObj.setResultStatus("FAILURE"); print "revert operation failed"; else: print "[TEST EXECUTION RESULT] : FAILURE"; print "Failed to make the priorities unequal"; else: #Set the result status of execution tdkTestObj.setResultStatus("FAILURE"); print "TEST STEP 2: Get the Default WAN and priority values for DSL and WANOE"; print "EXPECTED RESULT 2: Should get the default WAN and priority values for DSL and WANOE" print "ACTUAL RESULT 2 :The defaults for %s are %s:"%(paramList,defaults); #Get the result of execution print "[TEST EXECUTION RESULT] : FAILURE"; print "#####Performing revert operation for interafce disabling and priorities if set######"; #Revert operations if revertWANOE == 1: tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Set'); result,tdkTestObj = EnableDisableInterafce(2,"true",tdkTestObj); if expectedresult in result: tdkTestObj.setResultStatus("SUCCESS"); else: tdkTestObj.setResultStatus("FAILURE"); print "Enabling the WNOE interafce failed"; if revertpriority ==1: tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Set'); tdkTestObj.addParameter("ParamName","Device.X_RDK_WanManager.CPEInterface.2.Wan.Priority"); tdkTestObj.addParameter("ParamValue",defPriority[1]); tdkTestObj.addParameter("Type","int"); expectedresult= "SUCCESS"; #Execute testcase on DUT tdkTestObj.executeTestCase(expectedresult); result = tdkTestObj.getResult(); Setresult = tdkTestObj.getResultDetails(); index =index +1; if expectedresult in result: print "Reverted the unequal priority"; tdkTestObj.setResultStatus("SUCCESS"); else: tdkTestObj.setResultStatus("FAILURE"); print "failed to revert the changed priority"; else: objReturned.setResultStatus("FAILURE"); print "*********DSL is not active please have a active connection********"; obj.unloadModule("tdkbtr181"); obj1.unloadModule("sysutil"); tadobj.unloadModule("tad"); else: print "Failed to load module"; obj.setLoadModuleStatus("FAILURE"); obj1.setLoadModuleStatus("FAILURE"); tadobj.setLoadModuleStatus("FAILURE");
53.596721
174
0.603475
1,652
16,347
5.891041
0.184625
0.013152
0.023736
0.033909
0.561241
0.512022
0.490341
0.486334
0.433313
0.408138
0
0.013464
0.304827
16,347
304
175
53.773026
0.842925
0.089986
0
0.53125
0
0
0.280409
0.041591
0
0
0
0
0
0
null
null
0
0.020833
null
null
0.244792
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
ebd4fa6e495803588cbdc17f69290991cb8d3593
2,409
py
Python
tests/conftest.py
frnsys/maup
2cd12184e3a0d60128c4991e7e8eb706e3a227bd
[ "MIT" ]
50
2019-05-10T22:38:14.000Z
2022-03-17T20:11:43.000Z
tests/conftest.py
frnsys/maup
2cd12184e3a0d60128c4991e7e8eb706e3a227bd
[ "MIT" ]
42
2019-03-23T13:03:47.000Z
2022-03-14T22:23:57.000Z
tests/conftest.py
frnsys/maup
2cd12184e3a0d60128c4991e7e8eb706e3a227bd
[ "MIT" ]
16
2019-03-25T18:03:04.000Z
2021-09-27T08:16:30.000Z
import geopandas as gp import pytest from shapely.geometry import Polygon import maup CRS = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs" @pytest.fixture def crs(): return CRS @pytest.fixture def four_square_grid(): """ b d a c """ a = Polygon([(0, 0), (0, 1), (1, 1), (1, 0)]) b = Polygon([(0, 1), (0, 2), (1, 2), (1, 1)]) c = Polygon([(1, 0), (1, 1), (2, 1), (2, 0)]) d = Polygon([(1, 1), (1, 2), (2, 2), (2, 1)]) df = gp.GeoDataFrame( {"ID": ["a", "b", "c", "d"], "geometry": [a, b, c, d]}, crs=CRS ) return df @pytest.fixture def square(): return Polygon([(0.5, 0.5), (0.5, 1.5), (1.5, 1.5), (1.5, 0.5)]) @pytest.fixture def distant_polygon(): return Polygon([(100, 101), (101, 101), (101, 100)]) @pytest.fixture def diamond(): return Polygon([(100, 0), (0, 100), (-100, 0), (0, -100)]) @pytest.fixture def polygon_inside_diamond_bounds(): return Polygon([(90, 90), (91, 90), (91, 91), (90, 91)]) @pytest.fixture def squares_within_four_square_grid(): return gp.GeoSeries( [ # both fit inside a: Polygon([(0, 0), (0, 0.5), (0.5, 0.5), (0.5, 0)]), Polygon([(0.5, 0.5), (1, 0.5), (1, 1), (0.5, 1)]), # is exactly b: Polygon([(0, 1), (0, 2), (1, 2), (1, 1)]), # fits neatly inside d: Polygon([(1.25, 1.25), (1.25, 1.75), (1.75, 1.75), (1.75, 1.25)]), ], crs=CRS, ) @pytest.fixture def left_half_of_square_grid(four_square_grid): return four_square_grid[four_square_grid["ID"].isin(["a", "b"])] @pytest.fixture def squares_df(squares_within_four_square_grid): return gp.GeoDataFrame( { "geometry": squares_within_four_square_grid, "data": [1, 1, 1, 1], "ID": ["01", "02", "03", "04"], }, crs=CRS, ) @pytest.fixture def square_mostly_in_top_left(): return gp.GeoSeries([Polygon([(1.5, 0.5), (1.5, 2), (0, 2), (0, 0.5)])], crs=CRS) @pytest.fixture def squares_some_neat_some_overlapping( square_mostly_in_top_left, squares_within_four_square_grid ): result = squares_within_four_square_grid.append( square_mostly_in_top_left, ignore_index=True ) result.crs = CRS return result @pytest.fixture def big_square(): return gp.GeoSeries([Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])], crs=CRS)
23.617647
85
0.550851
373
2,409
3.407507
0.201072
0.02203
0.151062
0.018883
0.354839
0.108576
0.104642
0.025177
0.025177
0.025177
0
0.103939
0.241179
2,409
101
86
23.851485
0.591357
0.026152
0
0.202899
0
0
0.037866
0
0
0
0
0
0
1
0.173913
false
0
0.057971
0.144928
0.405797
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
1
ebd5e7cdc22ace82a24160df0b1f3fbcd2c0b44d
1,389
py
Python
plot_benchmark.py
99991/ParallelZipFile
070e54845ac9c42805c1e211992f26b719ed3920
[ "MIT" ]
null
null
null
plot_benchmark.py
99991/ParallelZipFile
070e54845ac9c42805c1e211992f26b719ed3920
[ "MIT" ]
null
null
null
plot_benchmark.py
99991/ParallelZipFile
070e54845ac9c42805c1e211992f26b719ed3920
[ "MIT" ]
null
null
null
""" Plot results of benchmark. Need to run benchmark.py first to generate the file benchmark_results.json. """ import json from collections import defaultdict import matplotlib.pyplot as plt def groupby(values, keyfunc): """Group values by key returned by keyfunc.""" groups = defaultdict(list) for value in values: key = keyfunc(value) groups[key].append(value) return groups def main() -> None: """Plot benchmark results.""" with open("benchmark_results.json", encoding="utf-8") as f: results = json.load(f) def keyfunc(result): return (result["zipfile"], result["threadcount"]) for (zipfile, threadcount), results in groupby(results, keyfunc).items(): results.sort(key=lambda result: result["filesize"]) filesizes = [] timings = [] for result in results: filesizes.append(result["filesize"]) timings.append(1000 * sum(result["timings"]) / len(result["timings"])) plt.loglog( filesizes, timings, label=f"{zipfile} - {threadcount} Thread{'s'[:threadcount-1]}", ) plt.legend() plt.xlabel("File size [bytes]") plt.ylabel("Milliseconds to process a 10 MB zip file (lower is better)") plt.tight_layout() plt.savefig("benchmark.png") plt.show() if __name__ == "__main__": main()
25.254545
82
0.62275
163
1,389
5.239264
0.509202
0.056206
0.046838
0
0
0
0
0
0
0
0
0.007626
0.24478
1,389
54
83
25.722222
0.806482
0.12023
0
0
1
0
0.186047
0.040698
0
0
0
0
0
1
0.088235
false
0
0.088235
0.029412
0.235294
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
ebd7565d7fb3e2e6e97cd012dbbf6e7433713b29
872
py
Python
tests/test_dedge.py
GiliardGodoi/edgesets
b59a600400972ccc82e5e17f2acbb2b45045b40b
[ "MIT" ]
null
null
null
tests/test_dedge.py
GiliardGodoi/edgesets
b59a600400972ccc82e5e17f2acbb2b45045b40b
[ "MIT" ]
20
2021-11-08T13:02:33.000Z
2021-11-29T01:03:40.000Z
tests/test_dedge.py
GiliardGodoi/edgesets
b59a600400972ccc82e5e17f2acbb2b45045b40b
[ "MIT" ]
null
null
null
from edgesets import UEdge, DEdge def test_repr(): e1 = DEdge(7, 8) text = repr(e1) assert text == "DEdge(7, 8, weight=1)" e2 = eval(text) assert type(e1) == type(e1) assert e1 == e2 def test_if_directions_are_differents_with_same_nodes(): d1 = DEdge(10, 15) d2 = DEdge(15, 10) assert d1 != d2 assert hash(d1) != hash(d2) def test_if_DEdge_is_differente_from_UEdge(): d1 = DEdge(10, 15) d2 = UEdge(15, 10) assert d1 != d2 assert hash(d1) != hash(d2) def test_DEdge_is_different_from_tuple(): param = (25, 42) edge = DEdge(*param) assert edge != param assert hash(edge) != hash(param) def test_DEdge_is_different_from_list(): param = [24, 25] edge = DEdge(*param) assert edge != param # assert hash(edge) != hash(param) # list is not hashable
24.222222
61
0.603211
128
872
3.921875
0.328125
0.069721
0.027888
0.043825
0.507968
0.456175
0.36255
0.36255
0.36255
0.36255
0
0.075591
0.271789
872
36
61
24.222222
0.714961
0.061927
0
0.37037
0
0
0.026889
0
0
0
0
0
0.37037
1
0.185185
false
0
0.037037
0
0.222222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebd871d4dbf6a21fb3d86e6cb1fedb9b96ed2220
1,889
py
Python
pred.py
amoshyc/tthl-code
d00ba5abd2ade5b55db6a6b95d136041022e3150
[ "Apache-2.0" ]
null
null
null
pred.py
amoshyc/tthl-code
d00ba5abd2ade5b55db6a6b95d136041022e3150
[ "Apache-2.0" ]
null
null
null
pred.py
amoshyc/tthl-code
d00ba5abd2ade5b55db6a6b95d136041022e3150
[ "Apache-2.0" ]
null
null
null
import argparse from pathlib import Path import numpy as np import scipy import keras from keras.models import load_model from moviepy.editor import VideoFileClip, concatenate_videoclips from tqdm import tqdm def main(): # yapf: disable parser = argparse.ArgumentParser(description='Video Highlight') parser.add_argument('model', type=str, help='Path to model') parser.add_argument('video', type=str, help='Path to video to highlight') parser.add_argument('--out', '-o', type=str, default='./hl.mp4', help='output name') parser.add_argument('--fps', type=int, default=2, help='fps') parser.add_argument('--itv', type=int, default=6, help='interval of adjusting') parser.add_argument('--bs', type=int, default=80, help='batch size') args = parser.parse_args() # yapf: enable print('Loading model & video', end='...') model = load_model(args.model) video = VideoFileClip(args.video) print('ok') n_frames = int(video.duration) * args.fps xs = np.zeros((n_frames, 224, 224, 3), dtype=np.float32) for f in tqdm(range(n_frames), desc='Loading Video Frames', ascii=True): img = video.get_frame(f / args.fps) xs[f] = scipy.misc.imresize(img, (224, 224)) # Predicting pred = model.predict(xs, args.bs, verbose=1) pred = pred.round().astype(np.uint8).flatten() print(pred[:500]) for i in range(n_frames - args.itv): s, t = i, i + args.itv if pred[s] == 1 and pred[t - 1] == 1: pred[s:t] = 1 diff = np.diff(np.concatenate([[0], pred, [1]])) starts = (diff == +1).nonzero()[0] / args.fps ends = (diff == -1).nonzero()[0] / args.fps segs = [video.subclip(s, e) for s, e in zip(starts, ends)] out = concatenate_videoclips(segs) out.write_videofile(args.out, fps=video.fps, threads=4, audio=True) if __name__ == '__main__': main()
32.568966
88
0.644256
277
1,889
4.303249
0.397112
0.045302
0.08557
0.043624
0.062081
0.033557
0
0
0
0
0
0.023669
0.194812
1,889
57
89
33.140351
0.760026
0.019587
0
0
0
0
0.103896
0
0
0
0
0
0
1
0.02439
false
0
0.195122
0
0.219512
0.073171
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebd878270215dfbcc338d538a43df8bec58e8bb9
4,389
py
Python
blog/views.py
captainxavier/AutoBlog
44fb23628fe0210a3dcec80b91e1217d27ee9462
[ "MIT" ]
null
null
null
blog/views.py
captainxavier/AutoBlog
44fb23628fe0210a3dcec80b91e1217d27ee9462
[ "MIT" ]
null
null
null
blog/views.py
captainxavier/AutoBlog
44fb23628fe0210a3dcec80b91e1217d27ee9462
[ "MIT" ]
null
null
null
from django.shortcuts import render, get_object_or_404 from django.http import HttpResponse, HttpResponseRedirect from django.utils import timezone from django.db.models import Count, Q from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator from django.contrib.auth import login, authenticate, logout from django.contrib.contenttypes.models import ContentType from taggit.models import Tag from accounts.models import Account from blog.models import BlogPost, Category, BlogPicture from comments.forms import CommentForm from comments.models import Comments from category.models import Category BLOG_POST_PER_PAGE = 3 RESULT_POST_PER_PAGE = 17 #Category Count def get_category_count(): questSet = BlogPost \ .objects \ .values('categories__title','categories__id') \ .annotate(Count('categories__title')) return questSet #Blog Page. def blog_screen_view(request): category_count = get_category_count() super_featured = BlogPost.objects.filter(super_featured=True).order_by('-date_published')[:3] blogPosts = BlogPost.objects.order_by('-date_published') recentPosts = BlogPost.objects.order_by('-date_published')[:4] # Pagination page = request.GET.get('page',1) blog_posts_paginator = Paginator(blogPosts, BLOG_POST_PER_PAGE) try: blogPosts = blog_posts_paginator.page(page) except PageNotAnInteger: blogPosts = blog_posts_paginator.page(BLOG_POST_PER_PAGE) except EmptyPage: blogPosts = blog_posts_paginator.page(blog_posts_paginator.num_pages) context = { 'super_featured_posts':super_featured, 'posts': blogPosts, 'recent_posts': recentPosts, 'categories': category_count, } return render(request, 'blog/blog.html', context) # Single Post def post_screen_view(request, slug): post = get_object_or_404(BlogPost, slug=slug) post_related = post.tags.similar_objects()[:3] app_url = request.get_full_path category_count = get_category_count() recentPosts = BlogPost.objects.order_by('-date_published')[:4] comments = post.comments initial_data = { 'content_type': post.get_content_type, 'object_id' : post.id, } if request.method == 'POST': form = CommentForm(request.POST or None) if form.is_valid(): com = form.save(commit=False) com.user = request.user com.content_type = post.get_content_type com.object_id = post.id parent_obj = None try: parent_id = int(request.POST.get("parent_id")) except: parent_id = None if parent_id: parent_qs = Comments.objects.filter(id=parent_id) if parent_qs.exists() and parent_qs.count() ==1: parent_obj = parent_qs.first() com.parent = parent_obj com.save() return HttpResponseRedirect(com.content_object.get_absolute_url()) else: print('error') else: form = CommentForm() context = { 'post': post, 'recent_posts': recentPosts, 'categories': category_count, 'post_url': app_url, 'comments': comments, 'comment_form': form, 'related_posts':post_related, } return render(request, 'blog/post.html', context) # Search Page def search_screen_view(request): query_set = BlogPost.objects.all() category_count = get_category_count() query = request.GET.get('q') if query: query_set = query_set.filter( Q(title__icontains=query) | Q(description_one__icontains=query) | Q(description_two__icontains=query) ).distinct() paginator = Paginator(query_set, RESULT_POST_PER_PAGE) # 6 posts per page page = request.GET.get('page',1) try: posts = paginator.page(page) except PageNotAnInteger: posts = paginator.page(1) except EmptyPage: posts = paginator.page(paginator.num_pages) context = { 'query_sets': posts, 'categories':category_count, } return render(request, 'blog/result_search.html', context)
33.503817
98
0.642971
497
4,389
5.430584
0.253521
0.052983
0.040015
0.029641
0.240089
0.203038
0.068914
0.034828
0
0
0
0.005576
0.264525
4,389
131
99
33.503817
0.830545
0.017316
0
0.222222
0
0
0.081618
0.005505
0
0
0
0
0
1
0.037037
false
0
0.12037
0
0.203704
0.009259
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebd938bccdfd5e3d285fcfe2b39abb7192868335
1,408
py
Python
data_loader/data_set_loader.py
ys10/WaveRNN
dc4eec65bc1eec59ebc533469d40f072df3a6be6
[ "MIT" ]
6
2018-11-15T05:48:02.000Z
2021-06-18T02:22:31.000Z
data_loader/data_set_loader.py
ys10/WaveRNN
dc4eec65bc1eec59ebc533469d40f072df3a6be6
[ "MIT" ]
null
null
null
data_loader/data_set_loader.py
ys10/WaveRNN
dc4eec65bc1eec59ebc533469d40f072df3a6be6
[ "MIT" ]
1
2021-04-02T11:53:52.000Z
2021-04-02T11:53:52.000Z
# coding=utf-8 import tensorflow as tf class DataSetLoader(object): def __init__(self, config, generators, default_set_name='train'): self.config = config self.generators = generators self.data_sets = dict() self.data_set_init_ops = dict() with tf.variable_scope("data"): for k in self.generators.keys(): self.data_sets[k] = self.get_data_set_from_generator(self.generators[k].next, epochs=self.config.epochs, batch_size=self.config.batch_size) self.iterator = self.data_sets[default_set_name].make_one_shot_iterator() features, labels = self.iterator.get_next() self.next_data = {'features': features, 'labels': labels} for k in self.data_sets.keys(): self.data_set_init_ops[k] = self.iterator.make_initializer(self.data_sets[k]) @staticmethod def get_data_set_from_generator(generator_func, epochs=1, batch_size=16): data_set = tf.data.Dataset.from_generator(generator_func, output_types=(tf.int32, tf.int32), output_shapes=(tf.TensorShape([64]), tf.TensorShape([1]))) data_set = data_set.repeat(epochs) data_set = data_set.batch(batch_size) return data_set
48.551724
120
0.598011
169
1,408
4.692308
0.337278
0.088272
0.075662
0.037831
0.103405
0
0
0
0
0
0
0.011179
0.301136
1,408
28
121
50.285714
0.794715
0.008523
0
0
0
0
0.016499
0
0
0
0
0
0
1
0.083333
false
0
0.041667
0
0.208333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebda183d9ae687038f19ffe815f5ccbfc5935c5a
1,323
py
Python
ssht00ls/classes/connections/__init__.py
vandenberghinc/ssht00ls
e08081773c8da7dfac0764170bfeacb4bf421ec1
[ "CNRI-Python" ]
5
2021-02-18T17:46:39.000Z
2021-12-29T15:48:07.000Z
ssht00ls/classes/connections/__init__.py
vandenberghinc/ssht00ls
e08081773c8da7dfac0764170bfeacb4bf421ec1
[ "CNRI-Python" ]
null
null
null
ssht00ls/classes/connections/__init__.py
vandenberghinc/ssht00ls
e08081773c8da7dfac0764170bfeacb4bf421ec1
[ "CNRI-Python" ]
2
2021-03-19T14:06:20.000Z
2021-09-26T14:08:34.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # imports. from ssht00ls.classes.config import * from ssht00ls.classes import utils # the ssh connections object class. class Connections(Traceback): def __init__(self): # docs. DOCS = { "module":"ssht00ls.connections", "initialized":True, "description":[], "chapter": "Connections", } # defaults. Traceback.__init__(self, traceback="ssht00ls.connections", raw_traceback="ssht00ls.classes.connections.Connections") # def list(self, filter="ssh"): if dev0s.defaults.vars.os not in ["linux"]: return dev0s.response.error(f"Unsupported operating system [{dev0s.defauls.vars.os}].") output = dev0s.utils.__execute_script__("""ss | grep ssh | awk '{print $1","$2","$3","$4","$5","$6}' """) connections = {} for line in output.split("\n"): if line not in [""]: net_id,state,recvq, sendq,local_address,remote_address = line.split(",") if state == "ESTAB": connections[remote_address] = { "remote_address":remote_address, "local_address":local_address, "recvq":recvq, "sendq":sendq, "net_id":net_id, } return dev0s.response.success(f"Successfully listed {len(connections)} ssh connection(s).", { "connections":connections, }) # Initialized objects. connections = Connections()
28.76087
118
0.665911
155
1,323
5.529032
0.522581
0.060677
0.070012
0.060677
0
0
0
0
0
0
0
0.020833
0.165533
1,323
45
119
29.4
0.755435
0.092971
0
0
0
0.033333
0.307305
0.079765
0
0
0
0
0
1
0.066667
false
0
0.066667
0
0.233333
0.033333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebdc4515eef225eea20fa462e0ef3055b00b55ae
193
py
Python
_solutions/pandas/series/pandas_series_create_even.py
sages-pl/2022-01-pythonsqlalchemy-aptiv
1d6d856608e9dbe25b139e8968c48b7f46753b84
[ "MIT" ]
null
null
null
_solutions/pandas/series/pandas_series_create_even.py
sages-pl/2022-01-pythonsqlalchemy-aptiv
1d6d856608e9dbe25b139e8968c48b7f46753b84
[ "MIT" ]
null
null
null
_solutions/pandas/series/pandas_series_create_even.py
sages-pl/2022-01-pythonsqlalchemy-aptiv
1d6d856608e9dbe25b139e8968c48b7f46753b84
[ "MIT" ]
null
null
null
data = np.arange(0, 20, 2) result = pd.Series(data) # Alternative Solution # s = pd.Series(range(0, 20, 2)) # Alternative Solution # s = pd.Series([x for x in range(0, 20) if x % 2 == 0])
16.083333
56
0.61658
35
193
3.4
0.485714
0.07563
0.067227
0.369748
0.470588
0
0
0
0
0
0
0.084967
0.207254
193
11
57
17.545455
0.69281
0.658031
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
ebde515b95808947b4370dd040bc2675c00b8d5a
907
py
Python
ycyc/tests/__init__.py
MrLYC/ycyc
1938493294fbad3a461cc3a752c5385d30a6e51d
[ "MIT" ]
22
2015-07-21T03:15:36.000Z
2021-02-23T07:58:03.000Z
ycyc/tests/__init__.py
MrLYC/ycyc
1938493294fbad3a461cc3a752c5385d30a6e51d
[ "MIT" ]
3
2016-03-20T12:06:07.000Z
2018-01-16T10:34:19.000Z
ycyc/tests/__init__.py
MrLYC/ycyc
1938493294fbad3a461cc3a752c5385d30a6e51d
[ "MIT" ]
3
2015-05-08T00:55:38.000Z
2017-02-25T03:30:14.000Z
#!/usr/bin/env python # encoding: utf-8 from contextlib import contextmanager import mock __author__ = 'Liu Yicong' __email__ = 'imyikong@gmail.com' @contextmanager def mock_patches(*patches, **named_patches): """ A context manager to help create mock patches. >>> with mock_patches("package.module.cls", cls2="package.cls") as mocks: ... mocks.cls() #=> package.module.cls ... mocks.cls2() #=> package.cls """ attrs = list(i.split(".")[-1] for i in patches) attrs.extend(list(named_patches.keys())) patches = list(patches) patches.extend(list(named_patches.values())) mock_patches = [] mocks = mock.Mock() for k, i in zip(attrs, patches): patch = mock.patch(i) mock_patches.append(patch) setattr(mocks, k, patch.start()) try: yield mocks finally: for p in mock_patches: p.stop()
24.513514
77
0.62183
115
907
4.765217
0.486957
0.120438
0.058394
0.080292
0
0
0
0
0
0
0
0.005764
0.23484
907
36
78
25.194444
0.783862
0.261301
0
0
0
0
0.045242
0
0
0
0
0
0
1
0.047619
false
0
0.095238
0
0.142857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebe0baf06bf88a67d2daea2010828c40f054c75a
558
py
Python
corehq/apps/data_interfaces/migrations/0016_createscheduleinstanceactiondefinition_specific_start_date.py
kkrampa/commcare-hq
d64d7cad98b240325ad669ccc7effb07721b4d44
[ "BSD-3-Clause" ]
1
2020-05-05T13:10:01.000Z
2020-05-05T13:10:01.000Z
corehq/apps/data_interfaces/migrations/0016_createscheduleinstanceactiondefinition_specific_start_date.py
kkrampa/commcare-hq
d64d7cad98b240325ad669ccc7effb07721b4d44
[ "BSD-3-Clause" ]
1
2019-12-09T14:00:14.000Z
2019-12-09T14:00:14.000Z
corehq/apps/data_interfaces/migrations/0016_createscheduleinstanceactiondefinition_specific_start_date.py
MaciejChoromanski/commcare-hq
fd7f65362d56d73b75a2c20d2afeabbc70876867
[ "BSD-3-Clause" ]
5
2015-11-30T13:12:45.000Z
2019-07-01T19:27:07.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.10 on 2018-03-11 12:37 from __future__ import absolute_import from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('data_interfaces', '0015_automaticupdaterule_locked_for_editing'), ] operations = [ migrations.AddField( model_name='createscheduleinstanceactiondefinition', name='specific_start_date', field=models.DateField(null=True), ), ]
25.363636
75
0.682796
58
558
6.258621
0.775862
0.055096
0.088154
0
0
0
0
0
0
0
0
0.050691
0.222222
558
21
76
26.571429
0.785714
0.123656
0
0
1
0
0.236626
0.166667
0
0
0
0
0
1
0
false
0
0.214286
0
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
ebe2b6defd1e57b30e21b97498d0791cda38301d
353
py
Python
src/output/console.py
fufuok/PyAgent
f04b0d05e733b9989562934f2cdf1fa7a6af947a
[ "MIT" ]
2
2021-11-19T04:10:18.000Z
2021-12-01T02:39:56.000Z
src/output/console.py
huyingjun/PyAgent
ff7096634aa8deb617d2fe9d47fd2c6fbf8ff9a4
[ "MIT" ]
1
2021-12-22T14:12:42.000Z
2022-02-07T12:44:59.000Z
src/output/console.py
huyingjun/PyAgent
ff7096634aa8deb617d2fe9d47fd2c6fbf8ff9a4
[ "MIT" ]
3
2021-12-04T02:50:07.000Z
2021-12-27T03:49:35.000Z
# -*- coding:utf-8 -*- """ console.py ~~~~~~~~ 数据发布插件 - 输出到控制台 :author: Fufu, 2021/6/7 """ from . import OutputPlugin from ..libs.metric import Metric class Console(OutputPlugin): """数据发布 - 输出到控制台""" name = 'console' async def write(self, metric: Metric) -> None: """写入数据""" print('>>>', metric.as_text)
16.809524
50
0.552408
39
353
4.974359
0.74359
0
0
0
0
0
0
0
0
0
0
0.026515
0.252125
353
20
51
17.65
0.708333
0.271955
0
0
0
0
0.045872
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.666667
0.166667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
ebe364266010bc086bf3592d22da99f969271db1
699
py
Python
tests/slack_sdk/web/test_web_client_issue_1049.py
priya1puresoftware/python-slack-sdk
3503182feaaf4d41b57fd8bf10038ebc99f1f3c7
[ "MIT" ]
2,486
2016-11-03T14:31:43.000Z
2020-10-26T23:07:44.000Z
tests/slack_sdk/web/test_web_client_issue_1049.py
priya1puresoftware/python-slack-sdk
3503182feaaf4d41b57fd8bf10038ebc99f1f3c7
[ "MIT" ]
721
2016-11-03T21:26:56.000Z
2020-10-26T12:41:29.000Z
tests/slack_sdk/web/test_web_client_issue_1049.py
priya1puresoftware/python-slack-sdk
3503182feaaf4d41b57fd8bf10038ebc99f1f3c7
[ "MIT" ]
627
2016-11-02T19:04:19.000Z
2020-10-25T19:21:13.000Z
import json import unittest from slack_sdk.web import WebClient from tests.slack_sdk.web.mock_web_api_server import ( setup_mock_web_api_server, cleanup_mock_web_api_server, ) class TestWebClient_Issue_1049(unittest.TestCase): def setUp(self): setup_mock_web_api_server(self) def tearDown(self): cleanup_mock_web_api_server(self) def test_the_pattern(self): client = WebClient( base_url="http://localhost:8888", token="xoxb-admin_convo_pagination", ) pages = [] for page in client.admin_conversations_search(query="announcement"): pages.append(page) self.assertEqual(len(pages), 2)
25.888889
76
0.688126
89
699
5.067416
0.539326
0.077605
0.110865
0.177384
0.226164
0.101996
0
0
0
0
0
0.016636
0.226037
699
26
77
26.884615
0.817006
0
0
0
0
0
0.085837
0.038627
0
0
0
0
0.047619
1
0.142857
false
0
0.190476
0
0.380952
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
ebe421e20d63c5797e08d5c60d06d2e3c0b635d7
734
py
Python
netforce_service/netforce_service/models/task_list.py
nfco/netforce
35252eecd0a6633ab9d82162e9e3ff57d4da029a
[ "MIT" ]
27
2015-09-30T23:53:30.000Z
2021-06-07T04:56:25.000Z
netforce_service/netforce_service/models/task_list.py
nfco/netforce
35252eecd0a6633ab9d82162e9e3ff57d4da029a
[ "MIT" ]
191
2015-10-08T11:46:30.000Z
2019-11-14T02:24:36.000Z
netforce_service/netforce_service/models/task_list.py
nfco/netforce
35252eecd0a6633ab9d82162e9e3ff57d4da029a
[ "MIT" ]
32
2015-10-01T03:59:43.000Z
2022-01-13T07:31:05.000Z
from netforce.model import Model, fields, get_model, clear_cache from netforce.database import get_connection from datetime import * import time from netforce import access class TaskList(Model): _name = "task.list" _string = "Task List" _fields = { "name": fields.Char("Name",required=True), "date_created": fields.Date("Date Created",required=True), "project_id": fields.Many2One("project","Project"), "milestone_id": fields.Many2One("project.milestone","Milestone"), "tasks": fields.One2Many("task","task_list_id","Tasks"), } _order = "date_created desc,id desc" _defaults ={ "date_created": lambda *a: time.strftime("%Y-%m-%d"), } TaskList.register()
30.583333
73
0.667575
88
734
5.397727
0.454545
0.092632
0.067368
0.096842
0
0
0
0
0
0
0
0.005025
0.186649
734
23
74
31.913043
0.79062
0
0
0
0
0
0.249319
0
0
0
0
0
0
1
0
false
0
0.25
0
0.55
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
ebe505828b5e47c65eb660e7346462d4ff167740
696
py
Python
startpara.py
bhsnsrma/Cisco_Python
e92e272d8335f6b7fef27466a085430dd1694396
[ "Apache-2.0" ]
null
null
null
startpara.py
bhsnsrma/Cisco_Python
e92e272d8335f6b7fef27466a085430dd1694396
[ "Apache-2.0" ]
null
null
null
startpara.py
bhsnsrma/Cisco_Python
e92e272d8335f6b7fef27466a085430dd1694396
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python3 import paramiko,time #using as ssh client client=paramiko.SSHClient() #auto adjut host kue verification with yes or no client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) #time to connect to remote Cisco IOS addr=input("Enter your Router IP :") u='root' p='cisco' #connected with SSh session client.connect(addr,username=u,password=p,allow_agent=False, look_for_keys=False) #we have to ask for shell device_access=client.invoke_shell() #now sending command device_access.send("show ip int br \n") time.sleep(1) ## assuming command got executed, receive data. output=device_access.recv(65000) #deconding byte - like string into staring print(output.decode('ascii'))
27.84
81
0.787356
111
696
4.837838
0.747748
0.067039
0
0
0
0
0
0
0
0
0
0.011236
0.104885
696
24
82
29
0.850722
0.392241
0
0
0
0
0.128641
0
0
0
0
0
0
1
0
false
0.083333
0.083333
0
0.083333
0.083333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
ebe611c4dd602efaddd3352116b777a3b429c7f6
16,830
py
Python
sdkcore/SdkCoreTesting/scripts/arsdkgenobjc.py
papachuj/groundsdk-ios
f205f75b11a57f49b39ee558b2e8e39f59a15963
[ "BSD-3-Clause" ]
2
2020-03-30T00:06:43.000Z
2021-07-18T18:07:15.000Z
sdkcore/SdkCoreTesting/scripts/arsdkgenobjc.py
papachuj/groundsdk-ios
f205f75b11a57f49b39ee558b2e8e39f59a15963
[ "BSD-3-Clause" ]
null
null
null
sdkcore/SdkCoreTesting/scripts/arsdkgenobjc.py
papachuj/groundsdk-ios
f205f75b11a57f49b39ee558b2e8e39f59a15963
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 import sys, os import arsdkparser #=============================================================================== class Writer(object): def __init__(self, fileobj): self.fileobj = fileobj def write(self, fmt, *args): if args: self.fileobj.write(fmt % (args)) else: self.fileobj.write(fmt % ()) #=============================================================================== def class_name(name): splitted_name = name.split('_') return "ArsdkFeature" + "".join(x.capitalize() for x in splitted_name) def enum_class_name(feature_strict_name, enum_name): splitted_name = enum_name.split('_') return class_name(feature_strict_name) + "".join(x.capitalize() for x in splitted_name) def multiset_class_name(feature_strict_name, multiset_name): splitted_name = multiset_name.split('_') return class_name(feature_strict_name) + "".join(x.capitalize() for x in splitted_name) def param_name(name): components = name.split('_') return components[0].lower() + "".join(x[0].upper() + x[1:] for x in components[1:]) def arg_type(feature_strict_name, arg, is_fun_arg=False): args = { arsdkparser.ArArgType.I8: "NSInteger", arsdkparser.ArArgType.U8: "NSUInteger", arsdkparser.ArArgType.I16: "NSInteger", arsdkparser.ArArgType.U16: "NSUInteger", arsdkparser.ArArgType.I32: "NSInteger", arsdkparser.ArArgType.U32: "NSUInteger", arsdkparser.ArArgType.I64: "int64_t", arsdkparser.ArArgType.U64: "uint64_t", arsdkparser.ArArgType.FLOAT: "float", arsdkparser.ArArgType.DOUBLE: "double", arsdkparser.ArArgType.STRING: "NSString*" } if isinstance(arg.argType, arsdkparser.ArEnum): argType = enum_class_name(feature_strict_name, arg.argType.name) elif isinstance(arg.argType, arsdkparser.ArBitfield): if arg.argType.btfType == arsdkparser.ArArgType.I64 or \ arg.argType.btfType == arsdkparser.ArArgType.U64: argType = args[arsdkparser.ArArgType.U64] else: argType = args[arsdkparser.ArArgType.U32] elif isinstance(arg.argType, arsdkparser.ArMultiSetting): if is_fun_arg: argType = multiset_class_name(feature_strict_name, arg.argType.name) + ' *' else: argType = multiset_class_name(feature_strict_name, arg.argType.name) else: argType = args[arg.argType] return argType def multiset_c_name(ftr, multiset): return "struct arsdk_%s_%s" % (ftr, multiset) def arg_c_type(arg, is_fun_arg=False): args = { arsdkparser.ArArgType.I8: "int8_t", arsdkparser.ArArgType.U8: "uint8_t", arsdkparser.ArArgType.I16: "int16_t", arsdkparser.ArArgType.U16: "uint16_t", arsdkparser.ArArgType.I32: "int32_t", arsdkparser.ArArgType.U32: "uint32_t", arsdkparser.ArArgType.I64: "int64_t", arsdkparser.ArArgType.U64: "uint64_t", arsdkparser.ArArgType.FLOAT: "float", arsdkparser.ArArgType.DOUBLE: "double", arsdkparser.ArArgType.STRING: "const char*" } if isinstance(arg.argType, arsdkparser.ArEnum): argType = args[arsdkparser.ArArgType.I32] elif isinstance(arg.argType, arsdkparser.ArBitfield): argType = args[arg.argType.btfType] elif isinstance(arg.argType, arsdkparser.ArMultiSetting): if is_fun_arg: argType = multiset_c_name("generic", arg.argType.name.lower()) + ' *' else: argType = multiset_c_name("generic", arg.argType.name.lower()) else: argType = args[arg.argType] return argType def arg_name(arg): if isinstance(arg.argType, arsdkparser.ArEnum): argName = param_name(arg.name) elif isinstance(arg.argType, arsdkparser.ArBitfield): argName = param_name(arg.name) + "BitField" elif isinstance(arg.argType, arsdkparser.ArMultiSetting): argName = param_name(arg.name) else: argName = param_name(arg.name) return argName def arg_value_from_obj_c_to_c(feature_strict_name, arg): if arg.argType == arsdkparser.ArArgType.STRING: return "[" + arg_name(arg) + " UTF8String]" elif isinstance(arg.argType, arsdkparser.ArMultiSetting): return "[%s getNativeSettings]" % arg_name(arg) elif arg_c_type(arg) != arg_type(feature_strict_name, arg): return "(" + arg_c_type(arg) + ")" + arg_name(arg) else: return arg_name(arg) def c_name(val): return val[0].upper() + val[1:] #=============================================================================== def expected_cmd_class(): return "ExpectedCmd" def command_name(feature_name, cmd): command_name_str = feature_name + "_" + cmd.name splitted_name = command_name_str.split('_') command_name_str = "".join(x.capitalize() for x in splitted_name) # lower first letter return command_name_str[0].lower() + command_name_str[1:] def static_initializer_method_name(feature_obj, feature_name, cmd, with_swift_name=False): return_part = "+ (" + expected_cmd_class() + "*)" method_root_name = command_name(feature_name, cmd) method_name = return_part + method_root_name if cmd.args: # the first arg is special as the arg name is not part of the method name arg = cmd.args[0] method_name += ":(" + arg_type(feature_obj.name, arg, True) + ")" + arg_name(arg) for arg in cmd.args[1:]: method_name += " " + arg_name(arg) + ":(" + arg_type(feature_obj.name, arg, True) + ")" + arg_name(arg) if with_swift_name: method_name += "\nNS_SWIFT_NAME(" + method_root_name + "(" for arg in cmd.args: method_name += arg_name(arg) + ":" method_name += "))" return method_name def command_class_name(feature_name, cmd): command_name_str = command_name(feature_name, cmd) return expected_cmd_class() + command_name_str[0].upper() + command_name_str[1:] def match_command_name(): return "- (BOOL)match:(struct arsdk_cmd*)cmd checkParams:(BOOL)checkParams" def gen_expected_header_file(ctx, out): out.write("/** Generated, do not edit ! */\n") out.write("\n") out.write("#import <Foundation/Foundation.h>\n") out.write("#import <SdkCore/Arsdk.h>\n") out.write("\n") out.write("struct arsdk_cmd;\n") out.write("\n") out.write("@interface %s : NSObject\n", expected_cmd_class()) out.write("\n") out.write("%s;\n", match_command_name()) out.write("- (NSString*)describe;\n"); out.write("\n") for feature_id in sorted(ctx.featuresById.keys()): feature_obj = ctx.featuresById[feature_id] for cmd in feature_obj.cmds: feature_name = feature_obj.name + ("_" + cmd.cls.name if cmd.cls else "") out.write("%s;\n", static_initializer_method_name(feature_obj, feature_name, cmd, True)) out.write("@end\n") out.write("\n") for feature_id in sorted(ctx.featuresById.keys()): feature_obj = ctx.featuresById[feature_id] for cmd in feature_obj.cmds: feature_name = feature_obj.name + ("_" + cmd.cls.name if cmd.cls else "") out.write("@interface %s : %s\n", command_class_name(feature_name, cmd), expected_cmd_class()) out.write("@end\n") out.write("\n") def gen_expected_source_file(ctx, out): out.write("/** Generated, do not edit ! */\n") out.write("\n") out.write("#import \"" + expected_cmd_class() + ".h\"\n") out.write("#import <arsdk/arsdk.h>\n") out.write("\n") out.write("@interface %s ()\n", expected_cmd_class()) out.write("\n") out.write("@property (nonatomic, assign) struct arsdk_cmd* cmd;\n") out.write("@end\n") out.write("\n") out.write("@implementation %s\n", expected_cmd_class()) out.write("\n") out.write("%s {return false;}\n", match_command_name()) out.write("\n") out.write("- (NSString*)describe {\n"); out.write(" return [ArsdkCommand describe:self.cmd];\n"); out.write("}\n"); out.write("\n") for feature_id in sorted(ctx.featuresById.keys()): feature_obj = ctx.featuresById[feature_id] for cmd in feature_obj.cmds: feature_name = feature_obj.name + ("_" + cmd.cls.name if cmd.cls else "") out.write("%s {\n", static_initializer_method_name(feature_obj, feature_name, cmd)) out.write(" %s *expectedCmd = [[%s alloc] init];\n", command_class_name(feature_name, cmd), command_class_name(feature_name, cmd)) out.write(" expectedCmd.cmd = calloc(1, sizeof(*expectedCmd.cmd));\n") out.write(" arsdk_cmd_init(expectedCmd.cmd);\n") out.write("\n") if cmd.args: out.write(" int res = arsdk_cmd_enc_%s_%s(expectedCmd.cmd, %s);\n", c_name(feature_name), c_name(cmd.name), ", ".join(arg_value_from_obj_c_to_c(feature_obj.name, arg) for arg in cmd.args)) else: out.write(" int res = arsdk_cmd_enc_%s_%s(expectedCmd.cmd);\n", c_name(feature_name), c_name(cmd.name)) out.write(" if (res < 0) {\n") out.write(" return nil;\n") out.write(" }\n") out.write(" return expectedCmd;\n") out.write("}\n") out.write("\n") out.write("@end\n") out.write("\n") for feature_id in sorted(ctx.featuresById.keys()): feature_obj = ctx.featuresById[feature_id] for cmd in feature_obj.cmds: feature_name = feature_obj.name + ("_" + cmd.cls.name if cmd.cls else "") out.write("@implementation %s\n", command_class_name(feature_name, cmd)) out.write("\n") out.write("%s {\n", match_command_name()) out.write(" if (self.cmd->id != cmd->id) return false;\n") out.write("\n") if cmd.args: out.write(" if (checkParams) {\n") for arg in cmd.args: out.write(" %s _%s;\n", arg_c_type(arg), arg_name(arg)) out.write(" int res = arsdk_cmd_dec_%s_%s(cmd, %s);\n", c_name(feature_name), c_name(cmd.name), ", ".join("&_" + arg_name(arg) for arg in cmd.args)) out.write(" if (res < 0) {\n") out.write(" return false;\n") out.write(" }\n") out.write("\n") for arg in cmd.args: out.write(" %s my%s;\n", arg_c_type(arg), arg_name(arg).title()) out.write(" res = arsdk_cmd_dec_%s_%s(self.cmd, %s);\n", c_name(feature_name), c_name(cmd.name), ", ".join("&my" + arg_name(arg).title() for arg in cmd.args)) out.write(" if (res < 0) {\n") out.write(" return false;\n") out.write(" }\n") out.write("\n") for arg in cmd.args: if arg.argType == arsdkparser.ArArgType.STRING: out.write(" NSString* %sObj = [NSString stringWithUTF8String:_%s];\n", arg_name(arg), arg_name(arg)) out.write(" NSString* my%sObj = [NSString stringWithUTF8String:my%s];\n", arg_name(arg).title(), arg_name(arg).title()) out.write(" if (![%sObj isEqual:my%sObj]) return false;\n", arg_name(arg), arg_name(arg).title()) elif isinstance(arg.argType, arsdkparser.ArMultiSetting): out.write(" res = memcmp(&_%s, &my%s, sizeof(my%s));\n", arg.name, arg_name(arg).title(), arg_name(arg).title()) out.write(" if (res != 0) {\n") out.write(" return false;\n") out.write(" }\n") else: out.write(" if (_%s != my%s) return false;\n", arg_name(arg), arg_name(arg).title()) out.write("\n") out.write(" }\n") out.write(" return true;\n") out.write("}\n") out.write("@end\n") out.write("\n") #=============================================================================== def cmd_encoder_class(): return "CmdEncoder" def encoder_function_signature(feature_obj, msg, with_swift_name=False): feature_name = feature_obj.name + ("_" + msg.cls.name if msg.cls else "") function_underscored = command_name(feature_name, msg) + "_encoder" components = function_underscored.split('_') func_name = components[0][0].lower() + components[0][1:] + "".join(x[0].upper() + x[1:] for x in components[1:]) function_signature = "+ (int (^)(struct arsdk_cmd *))" + func_name if msg.args: # the first arg is special as the arg name is not part of the method name arg = msg.args[0] function_signature += ":(" + arg_type(feature_obj.name, arg, True) + ")" + arg_name(arg) for arg in msg.args[1:]: function_signature += " " + arg_name(arg) + ":(" + arg_type(feature_obj.name, arg, True) + ")" + arg_name(arg) if with_swift_name: function_signature += "\nNS_SWIFT_NAME(" + func_name + "(" for arg in msg.args: function_signature += arg_name(arg) + ":" function_signature += "))" return function_signature def gen_encoder_header_file(ctx, out): out.write("/** Generated, do not edit ! */\n") out.write("\n") out.write("#import <Foundation/Foundation.h>\n") out.write("#import <SdkCore/Arsdk.h>\n") out.write("\n") out.write("struct arsdk_cmd;\n") out.write("\n") out.write("@interface %s : NSObject\n", cmd_encoder_class()) out.write("\n") for feature_id in sorted(ctx.featuresById.keys()): feature_obj = ctx.featuresById[feature_id] for evt in feature_obj.evts: out.write("%s;\n", encoder_function_signature(feature_obj, evt, True)) out.write("@end\n") out.write("\n") def gen_encoder_source_file(ctx, out): out.write("/** Generated, do not edit ! */\n") out.write("\n") out.write("#import \"%s.h\"\n", cmd_encoder_class()) out.write("#import <arsdk/arsdk.h>\n") out.write("\n") out.write("@implementation %s\n", cmd_encoder_class()) out.write("\n") for feature_id in sorted(ctx.featuresById.keys()): feature_obj = ctx.featuresById[feature_id] for evt in feature_obj.evts: feature_name = feature_obj.name + ("_" + evt.cls.name if evt.cls else "") out.write("%s {\n", encoder_function_signature(feature_obj, evt)) out.write(" return ^(struct arsdk_cmd* cmd) {\n") if evt.args: out.write(" return arsdk_cmd_enc_%s_%s(cmd, %s);\n", c_name(feature_name), c_name(evt.name), ", ".join(arg_value_from_obj_c_to_c(feature_obj.name, arg) for arg in evt.args)) else: out.write(" return arsdk_cmd_enc_%s_%s(cmd);\n", c_name(feature_name), c_name(evt.name)) out.write(" };\n") out.write("}\n") out.write("\n") out.write("@end\n") out.write("\n") #=============================================================================== def list_files(ctx, outdir, extra): None #=============================================================================== #=============================================================================== def generate_files(ctx, outdir, extra): if not os.path.exists (outdir): os.mkdirs (outdir) else: filelist = os.listdir(outdir) for f in filelist: os.remove(outdir + "/" + f) filepath = os.path.join(outdir, expected_cmd_class() + ".h") with open(filepath, "w") as file_obj: gen_expected_header_file(ctx, Writer(file_obj)) filepath = os.path.join(outdir, expected_cmd_class() + ".m") with open(filepath, "w") as file_obj: gen_expected_source_file(ctx, Writer(file_obj)) filepath = os.path.join(outdir, cmd_encoder_class() + ".h") with open(filepath, "w") as file_obj: gen_encoder_header_file(ctx, Writer(file_obj)) filepath = os.path.join(outdir, cmd_encoder_class() + ".m") with open(filepath, "w") as file_obj: gen_encoder_source_file(ctx, Writer(file_obj)) print("Done generating test features files.")
39.048724
128
0.57148
2,090
16,830
4.396172
0.091388
0.096648
0.06367
0.037005
0.732695
0.666086
0.602743
0.555181
0.512407
0.450044
0
0.00683
0.251812
16,830
430
129
39.139535
0.72284
0.043791
0
0.45045
0
0
0.157878
0.026365
0
0
0
0
0
1
0.075075
false
0
0.03003
0.015015
0.168168
0.003003
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ebe8666cf6e33ea8f8a62d695ff363e1865d7f05
4,998
py
Python
game.py
TheAmmiR/snake-game
7a95a36c1ef7c0e9064bad3976f14b25bdb19f2a
[ "MIT" ]
null
null
null
game.py
TheAmmiR/snake-game
7a95a36c1ef7c0e9064bad3976f14b25bdb19f2a
[ "MIT" ]
null
null
null
game.py
TheAmmiR/snake-game
7a95a36c1ef7c0e9064bad3976f14b25bdb19f2a
[ "MIT" ]
null
null
null
import numpy import pygame import random from pygame import gfxdraw pygame.init() config_instance = open('settings.txt', 'r', encoding = 'utf-8') class Settings: def __init__(self, settings: dict): def str_to_rgb(sequence): r, g, b = sequence.split(' ') r, g, b = int(r), int(g), int(b) if (any([r not in range(0, 255), g not in range(0, 255), b not in range(0, 255)])): raise ValueError(f'You set wrong colour values, check your settings! ({r, g, b})') # wrong rgb color values return (r, g, b) setting_names = { 'size of cell': ('cellsize', int), 'size of grid': ('gridsize', int), 'snake colour': 'snake_color', 'apple colour': 'apple_color', 'default length': ('snake_len', int) } for key, value in settings.items(): if (setting_names.get(key)): if (isinstance(setting_names[key], tuple)): setattr(self, setting_names[key][0], setting_names[key][1](value)) else: setattr(self, setting_names[key], value) if (getattr(self, 'snake_color', None)): self.snake_color = str_to_rgb(self.snake_color) else: self.snake_color = (10, 240, 100) # default color if (getattr(self, 'apple_color', None)): self.apple_color = str_to_rgb(self.apple_color) else: self.apple_color = (240, 10, 10) # default color def file_handler(instance): text = instance.read().split('\n') settings = {} for line in text: line = line.split(' - ') line[0] = line[0].strip(); line[1] = line[1].strip() settings[line[0]] = line[1] return Settings(settings) settings = file_handler(config_instance) class Game: def __init__(self, settings): self.settings = settings self.clock = pygame.time.Clock() self.loop = False self.display = pygame.display.set_mode((self.settings.gridsize * self.settings.cellsize, self.settings.gridsize * self.settings.cellsize)) self.snake: list = [] self.apple: list = [] self.direction: str = 'right' middle = self.settings.gridsize // 2 xcoords = [middle + i for i in range(self.settings.snake_len)] ycoords = [middle for _ in range(self.settings.snake_len)] # default snake position for x, y in zip(xcoords, ycoords): self.snake.append((x, y)) pygame.display.set_caption('Snake Game') def start(self): self.loop = True self.spawn_apple() while (self.loop): for e in pygame.event.get(): if (e.type == pygame.QUIT): self.loop = False if (e.type == pygame.KEYDOWN): if (e.key in [pygame.K_w, pygame.K_UP] and self.direction != 'down'): self.direction = 'up' elif (e.key in [pygame.K_s, pygame.K_DOWN] and self.direction != 'up'): self.direction = 'down' elif (e.key in [pygame.K_d, pygame.K_RIGHT] and self.direction != 'left'): self.direction = 'right' elif (e.key in [pygame.K_a, pygame.K_LEFT] and self.direction != 'right'): self.direction = 'left' self.clock.tick(15) self.display.fill((0, 0, 0)) self.move_snake() self.draw() pygame.display.update() def move_snake(self): self.snake.pop(0) if (self.direction == 'left'): self.snake.append((self.snake[-1][0] - 1, self.snake[-1][1])) elif (self.direction == 'right'): self.snake.append((self.snake[-1][0] + 1, self.snake[-1][1])) elif (self.direction == 'up'): self.snake.append((self.snake[-1][0], self.snake[-1][1] - 1)) elif (self.direction == 'down'): self.snake.append((self.snake[-1][0], self.snake[-1][1] + 1)) if (self.snake[-1] == tuple(self.apple)): self.add_snakes_length(self.direction) self.spawn_apple() if (self.snake[-1] in self.snake[:-1]): self.loop = False print(f'You lose. Score: {len(self.snake) - self.settings.snake_len}') if (self.snake[-1][0] < 0 or self.snake[-1][1] < 0 or self.snake[-1][0] > self.settings.cellsize or self.snake[-1][1] > self.settings.cellsize): self.loop = False print(f'You lose. Score: {len(self.snake) - self.settings.snake_len}') def spawn_apple(self): in_snake = True while (in_snake): apple_x = random.randint(0, self.settings.gridsize - 1) apple_y = random.randint(0, self.settings.gridsize - 1) if ((apple_x, apple_y) not in self.snake and (apple_x, apple_y) != self.apple): in_snake = False self.apple = [apple_x, apple_y] def add_snakes_length(self, direction): if (direction == 'up'): self.snake.insert(0, (self.snake[0][0], self.snake[0][1] + 1)) elif (direction == 'down'): self.snake.insert(0, (self.snake[0][0], self.snake[0][1] - 1)) elif (direction == 'left'): self.snake.insert(0, (self.snake[0][0], self.snake[0][1] + 1)) elif (direction == 'right'): self.snake.insert(0, (self.snake[0][0], self.snake[0][1] - 1)) def draw(self): cellsize = self.settings.cellsize gfxdraw.box(self.display, (self.apple[0] * cellsize, self.apple[1] * cellsize, cellsize, cellsize), self.settings.apple_color) for x, y in self.snake: gfxdraw.box(self.display, (x * cellsize, y * cellsize, cellsize, cellsize), self.settings.snake_color) game = Game(settings) game.start()
33.543624
146
0.652461
772
4,998
4.130829
0.178756
0.118532
0.047037
0.027595
0.343995
0.240201
0.207275
0.158984
0.158984
0.158984
0
0.02553
0.169268
4,998
149
147
33.543624
0.742534
0.014606
0
0.121951
0
0
0.08352
0.009754
0
0
0
0
0
1
0.073171
false
0
0.03252
0
0.138211
0.01626
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ccd7c7cddab93c97ea1df7fdefa52d6f4a71efc0
484
py
Python
static/python/demo.py
Nota-Bene/Nota-Bene.github.io
57c0a25176627263bb9403e8f660d36cffa9882b
[ "MIT" ]
null
null
null
static/python/demo.py
Nota-Bene/Nota-Bene.github.io
57c0a25176627263bb9403e8f660d36cffa9882b
[ "MIT" ]
null
null
null
static/python/demo.py
Nota-Bene/Nota-Bene.github.io
57c0a25176627263bb9403e8f660d36cffa9882b
[ "MIT" ]
null
null
null
import time import random def parse(input): tokens = input.split(" ") parsedTokens = [] time.sleep(5) for i in range(200000): test = random.randint(1, 8) + random.randint(-4, 90) for token in tokens: if token == "": continue parsedTokens.append({ "text": token, "lemma": token, "pos": "verb", "decl": "3rd person singular future tense", "gloss": ["a test definition", "a second test definition"] }) return parsedTokens
23.047619
64
0.60124
59
484
4.932203
0.694915
0.089347
0
0
0
0
0
0
0
0
0
0.036011
0.254132
484
20
65
24.2
0.770083
0
0
0
0
0
0.204545
0
0
0
0
0
0
1
0.052632
false
0
0.105263
0
0.210526
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ccdb25e847708f6452a1e362c123c06d0c2c27e2
2,961
py
Python
pytorch/torchnet.py
sjliu68/Remote-Sensing-Image-Classification
9bd5ec28380961c9e66288dd75c998425622043e
[ "MIT" ]
32
2020-09-10T12:54:09.000Z
2022-03-21T08:55:29.000Z
pytorch/torchnet.py
sjliu68/Remote-Sensing-Image-Classification
9bd5ec28380961c9e66288dd75c998425622043e
[ "MIT" ]
null
null
null
pytorch/torchnet.py
sjliu68/Remote-Sensing-Image-Classification
9bd5ec28380961c9e66288dd75c998425622043e
[ "MIT" ]
19
2020-08-10T10:16:47.000Z
2022-02-17T06:52:14.000Z
# -*- coding: utf-8 -*- """ Created on Mon Jan 6 10:07:13 2020 @author: sjliu.me@gmail.com """ import torch import torch.nn as nn import torch.nn.functional as F class wcrn(nn.Module): def __init__(self, num_classes=9): super(wcrn, self).__init__() self.conv1a = nn.Conv2d(103,64,kernel_size=3,stride=1,padding=0,groups=1) self.conv1b = nn.Conv2d(103,64,kernel_size=1,stride=1,padding=0,groups=1) self.maxp1 = nn.MaxPool2d(kernel_size=3) self.maxp2 = nn.MaxPool2d(kernel_size=5) # self.bn1 = nn.BatchNorm2d(128,eps=0.001,momentum=0.9) self.bn1 = nn.BatchNorm2d(128) self.conv2a = nn.Conv2d(128,128,kernel_size=1,stride=1,padding=0,groups=1) self.conv2b = nn.Conv2d(128,128,kernel_size=1,stride=1,padding=0,groups=1) self.fc = nn.Linear(128, num_classes) # torch.nn.init.normal_(self.fc.weight, mean=0, std=0.01) def forward(self, x): out = self.conv1a(x) out1 = self.conv1b(x) out = self.maxp1(out) out1 = self.maxp2(out1) out = torch.cat((out,out1),1) out1 = self.bn1(out) out1 = nn.ReLU()(out1) out1 = self.conv2a(out1) out1 = nn.ReLU()(out1) out1 = self.conv2b(out1) out = torch.add(out,out1) out = out.reshape(out.size(0), -1) out = self.fc(out) return out class resnet99_avg(nn.Module): def __init__(self, num_classes=9): super(resnet99_avg, self).__init__() self.conv1a = nn.Conv2d(103,32,kernel_size=3,stride=1,padding=0,groups=1) self.conv1b = nn.Conv2d(103,32,kernel_size=3,stride=1,padding=0,groups=1) self.bn1 = nn.BatchNorm2d(64,eps=0.001,momentum=0.9) self.conv2a = nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,groups=1) self.conv2b = nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,groups=1) self.bn2 = nn.BatchNorm2d(64,eps=0.001,momentum=0.9) self.conv3a = nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,groups=1) self.conv3b = nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,groups=1) self.fc = nn.Linear(64, num_classes) def forward(self, x): x1 = self.conv1a(x) x2 = self.conv1b(x) x1 = torch.cat((x1,x2),axis=1) x2 = self.bn1(x1) x2 = nn.ReLU()(x2) x2 = self.conv2a(x2) x2 = nn.ReLU()(x2) x2 = self.conv2b(x2) x1 = torch.add(x1,x2) x2 = self.bn2(x1) x2 = nn.ReLU()(x2) x2 = self.conv3a(x2) x2 = nn.ReLU()(x2) x2 = self.conv3b(x2) x1 = torch.add(x1,x2) x1 = nn.AdaptiveAvgPool2d((1,1))(x1) x1 = x1.reshape(x1.size(0), -1) out = self.fc(x1) return out
32.538462
83
0.55049
440
2,961
3.625
0.190909
0.075235
0.087774
0.074608
0.602508
0.576176
0.488401
0.399373
0.399373
0.355486
0
0.121622
0.300236
2,961
90
84
32.9
0.648166
0.07126
0
0.229508
0
0
0
0
0
0
0
0
0
1
0.065574
false
0
0.04918
0
0.180328
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ccdce89434f75942295d9616fd82c9bb36f9f529
27
py
Python
src/lib/Bcfg2/Server/Reports/reports/__init__.py
amplify-education/bcfg2
02d7f574babfeb2da99e2aad3a92b4e8d6494f07
[ "mpich2" ]
null
null
null
src/lib/Bcfg2/Server/Reports/reports/__init__.py
amplify-education/bcfg2
02d7f574babfeb2da99e2aad3a92b4e8d6494f07
[ "mpich2" ]
null
null
null
src/lib/Bcfg2/Server/Reports/reports/__init__.py
amplify-education/bcfg2
02d7f574babfeb2da99e2aad3a92b4e8d6494f07
[ "mpich2" ]
null
null
null
__all__ = ['templatetags']
13.5
26
0.703704
2
27
7.5
1
0
0
0
0
0
0
0
0
0
0
0
0.111111
27
1
27
27
0.625
0
0
0
0
0
0.444444
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
ccdd11c45e65428dfc4eae76238a33996803a95a
12,491
py
Python
py/manipulation/props/parametric_object/parametric_object_test.py
LaudateCorpus1/dm_robotics
647bc810788c74972c1684a8d2e4d2dfd2791485
[ "Apache-2.0" ]
null
null
null
py/manipulation/props/parametric_object/parametric_object_test.py
LaudateCorpus1/dm_robotics
647bc810788c74972c1684a8d2e4d2dfd2791485
[ "Apache-2.0" ]
1
2021-10-05T16:07:56.000Z
2021-10-05T16:07:56.000Z
py/manipulation/props/parametric_object/parametric_object_test.py
LaudateCorpus1/dm_robotics
647bc810788c74972c1684a8d2e4d2dfd2791485
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for parametric_object.py.""" from absl.testing import absltest from absl.testing import parameterized from dm_robotics.manipulation.props.parametric_object import parametric_object class PropertyTest(parameterized.TestCase): def test_size_mismatch_in_init(self): with self.assertRaises(ValueError): _ = parametric_object.ParametricMinMaxBounds({ 'p': [[0, 255]], 'q': [[0, 255]], 'r': [[1, 2, 3]]}) with self.assertRaises(ValueError): _ = parametric_object.ParametricMinMaxBounds({ 'p': [[0, 255]], 'q': [[0, 255]], 'r': [[1]]}) with self.assertRaises(ValueError): _ = parametric_object.ParametricMinMaxBounds({ 'p': [[0, 255]], 'q': [[0, 255]], 'r': [[]]}) with self.assertRaises(ValueError): _ = parametric_object.ParametricMinMaxBounds({ 'p': [[0, 255]], 'q': [[0, 255]], 'r': []}) def test_check_instance_assertions(self): param_names = ('p', 'q', 'r') param_check = parametric_object.ParametricMinMaxBounds({ 'p': [[0, 255]], 'q': [[0, 255]], 'r': [[0, 255]]}) prop = parametric_object.ParametricProperties(param_names, param_check) prop.check_instance({'p': 122, 'q': 122, 'r': 122}) self.assertEqual(prop._param_names, ('p', 'q', 'r')) reply = prop.check_instance({'p': 500, 'q': 0, 'r': 0}) self.assertEqual(False, reply) reply = prop.check_instance({'p': 0, 'q': -500, 'r': 0}) self.assertEqual(False, reply) param_check = parametric_object.ParametricMinMaxBounds({ 'p': [[0, 255]], 'q': [['p', 'r']], 'r': [[0, 255]]}) prop = parametric_object.ParametricProperties(param_names, param_check) prop.check_instance({'p': 0, 'q': 122, 'r': 255}) reply = prop.check_instance({'p': 0, 'q': 255, 'r': 122}) self.assertEqual(False, reply) reply = prop.check_instance({'p': 122, 'q': 0, 'r': 255}) self.assertEqual(False, reply) with self.assertRaises(ValueError): prop.check_instance({'p': 0, 'q': 255}) param_names = ('p0', 'p1', 'p2') with self.assertRaises(ValueError): param_check = parametric_object.ParametricMinMaxBounds({ 'p0': [[0, 255]], 'p1': [[0, 255]], 'p2': [[0, 255]]}).check_instance def test_get_dict(self): names = ('first', 'second', 'third') checks = parametric_object.ParametricMinMaxBounds({ 'first': [[0, 255]], 'second': [[0, 255]], 'third': [[0, 255]]}) prop = parametric_object.ParametricProperties(names, checks) _ = prop.get_dict('first0_second0_third0') with self.assertRaises(ValueError): _ = prop.get_dict('first0_second0') with self.assertRaises(ValueError): _ = prop.get_dict('first0_second0_fourth0') with self.assertRaises(ValueError): _ = prop.get_dict('first0_second0_') def test_set_types(self): names = ('first', 'second', 'third') types = {'first': parametric_object.ParametersTypes.INTEGER, 'second': parametric_object.ParametersTypes.INTEGER, 'third': parametric_object.ParametersTypes.INTEGER} checks = parametric_object.ParametricMinMaxBounds({ 'first': [[0, 255]], 'second': [[0, 255]], 'third': [[0, 255]]}, types) prop = parametric_object.ParametricProperties(names, checks) reply = prop.check_instance({'first': 0, 'second': 255, 'third': 122}) self.assertEqual(True, reply) reply = prop.check_instance({'first': 0.0, 'second': 0.0, 'third': 0.0}) self.assertEqual(False, reply) prop_shape = parametric_object.ParametricProperties(names, checks) prop_texture = parametric_object.ParametricProperties(names, checks) prop = parametric_object.ParametricObject(prop_shape, prop_texture) reply = prop.check_instance({'first': 0, 'second': 255, 'third': 122}, {'first': 0, 'second': 255, 'third': 122}) self.assertEqual(True, reply) names = ('first', 'second', 'third') types = {'first': parametric_object.ParametersTypes.FLOAT, 'second': parametric_object.ParametersTypes.FLOAT, 'third': parametric_object.ParametersTypes.FLOAT} checks = parametric_object.ParametricMinMaxBounds({ 'first': [[0, 255]], 'second': [[0, 255]], 'third': [[0, 255]]}, types) prop = parametric_object.ParametricProperties(names, checks) _ = prop.check_instance({'first': 0.0, 'second': 0.0, 'third': 0.0}) reply = prop.check_instance({'first': 0, 'second': 255, 'third': 122}) self.assertEqual(False, reply) types = {'first': parametric_object.ParametersTypes.FLOAT, 'second': parametric_object.ParametersTypes.INTEGER, 'third': parametric_object.ParametersTypes.FLOAT} checks = parametric_object.ParametricMinMaxBounds({ 'first': [[0, 255]], 'second': [[0, 255]], 'third': [[0, 255]]}, types) prop = parametric_object.ParametricProperties(names, checks) _ = prop.check_instance({'first': 0.0, 'second': 0, 'third': 0.0}) reply = prop.check_instance({'first': 0, 'second': 255, 'third': 122}) self.assertEqual(False, reply) names = ('p', 'q', 'r') checks = parametric_object.ParametricMinMaxBounds({ 'p': [[0, 255]], 'q': [[0, 255]], 'r': [[0, 255]]}) prop = parametric_object.ParametricProperties(names, checks) _ = prop.check_instance({'p': 0.0, 'q': 0, 'r': 0.0}) _ = prop.check_instance({'p': 0, 'q': 255.0, 'r': 122}) def test_parameters_min_max_tuples(self): # 0 <= p0, p1, p2 <=1 # 3 <= p0, p1, p2 <=4 names = ('first', 'second', 'third') checks = parametric_object.ParametricMinMaxBounds({ 'first': [[0, 1], [3, 4]], 'second': [[0, 1], [3, 4]], 'third': [[0, 1], [3, 4]]}) prop = parametric_object.ParametricProperties(names, checks) _ = prop.check_instance({'first': 0, 'second': 0, 'third': 0}) _ = prop.check_instance({'first': 1, 'second': 1, 'third': 1}) _ = prop.check_instance({'first': 3, 'second': 3, 'third': 3}) _ = prop.check_instance({'first': 4, 'second': 4, 'third': 4}) reply = prop.check_instance({'first': 2, 'second': 2, 'third': 2}) self.assertEqual(False, reply) reply = prop.check_instance({'first': 2, 'second': 3, 'third': 3}) self.assertEqual(False, reply) reply = prop.check_instance({'first': 2, 'second': 3, 'third': 2}) self.assertEqual(False, reply) reply = prop.check_instance({'first': 3, 'second': 3, 'third': 2}) self.assertEqual(False, reply) reply = prop.check_instance({'first': 5, 'second': 3, 'third': 3}) self.assertEqual(False, reply) reply = prop.check_instance({'first': 1, 'second': 3, 'third': 3}) self.assertEqual(False, reply) # if a == 2, 0 <= b <= c, 0 <= c <=10 # if 3 <= a <= 10, 0 <= b <= 10, 0 <= c <=10 names = ('a', 'b', 'c') checks = parametric_object.ParametricMinMaxBounds({ 'a': [[2, 2], [3, 10]], 'b': [[0, 'c'], [0, 10]], 'c': [[0, 10], [0, 10]]}) prop = parametric_object.ParametricProperties(names, checks) # if a == 2, 0 <= b <= c, 0 <= c <=10 # if 3 <= a <= 10, 0 <= b <= 10, 0 <= c <=10 with self.assertRaises(ValueError): checks = parametric_object.ParametricMinMaxBounds({ 'a': [[2, 2], [3, 10]], 'b': [[0, 'c'], [0, 10]], 'c': [[0, 10]]}) _ = prop.check_instance({'a': 2, 'b': 2, 'c': 10}) _ = prop.check_instance({'a': 3, 'b': 5, 'c': 2}) reply = prop.check_instance({'a': 2, 'b': 5, 'c': 2}) self.assertEqual(False, reply) def test_add_parametric_dict(self): a = parametric_object.ParametersDict({'k1': 1, 'k2': 2}) b = parametric_object.ParametersDict({'k1': 3, 'k2': 4}) c = parametric_object.ParametersDict({'k3': 5, 'k4': 6}) d = parametric_object.ParametersDict({'k1': 7, 'k4': 8}) r = a + b self.assertEqual(r['k1'], 4) self.assertEqual(r['k2'], 6) with self.assertRaises(TypeError): r = a + 1 with self.assertRaises(ValueError): r = a + c with self.assertRaises(ValueError): r = a + d def test_sub_parametric_dict(self): a = parametric_object.ParametersDict({'k1': 1, 'k2': 2}) b = parametric_object.ParametersDict({'k1': 3, 'k2': 4}) c = parametric_object.ParametersDict({'k3': 5, 'k4': 6}) d = parametric_object.ParametersDict({'k1': 7, 'k4': 8}) r = a - b self.assertEqual(r['k1'], -2) self.assertEqual(r['k2'], -2) with self.assertRaises(TypeError): r = a - 1 with self.assertRaises(ValueError): r = a - c with self.assertRaises(ValueError): r = a - d def test_mult_parametric_dict(self): a = parametric_object.ParametersDict({'k1': 1, 'k2': 2}) b = parametric_object.ParametersDict({'k1': 3, 'k2': 4}) r = a * 0.5 self.assertEqual(r['k1'], int(a['k1']*1/2)) self.assertEqual(r['k2'], int(a['k2']*1/2)) with self.assertRaises(TypeError): r = a * b with self.assertRaises(TypeError): r = 0.5 * b def test_truediv_parametric_dict(self): a = parametric_object.ParametersDict({'k1': 1, 'k2': 2}) b = parametric_object.ParametersDict({'k1': 3, 'k2': 4}) r = a // 2 self.assertEqual(r['k1'], int(a['k1'] // 2)) self.assertEqual(r['k2'], int(a['k2'] // 2)) with self.assertRaises(TypeError): r = a // b with self.assertRaises(TypeError): r = 0.5 // b def test_types_conversion(self): names = ('first', 'second', 'third') checks = parametric_object.ParametricMinMaxBounds({ 'first': [[0, 255]], 'second': [[0, 255]], 'third': [[0, 255]]}) prop = parametric_object.ParametricProperties(names, checks) dictionary = {'first': 0, 'second': 3, 'third': 2} param_dict = parametric_object.ParametersDict(dictionary) name = prop.get_name(param_dict) self.assertEqual(name, 'first0_second3_third2') dictionary = {'first': 0.0, 'second': 0.1, 'third': 2.0} param_dict = parametric_object.ParametersDict(dictionary) name = prop.get_name(param_dict) self.assertEqual(name, 'first0.0_second0.1_third2.0') dictionary = {'first': 1.0, 'second': 3.0, 'third': 4} param_dict = parametric_object.ParametersDict(dictionary) name = prop.get_name(param_dict) reconstruction = prop.get_dict(name) self.assertEqual(dictionary, reconstruction) def test_types_algebra(self): dictionary = {'first': 0, 'second': 3, 'third': 2} types = (parametric_object.ParametersTypes.INTEGER,)*3 param_dict = parametric_object.ParametersDict(dictionary, param_types=types) param_half = param_dict * 1.1 self.assertAlmostEqual(param_half['first'], 0) self.assertAlmostEqual(param_half['second'], 3) self.assertAlmostEqual(param_half['third'], 2) types = (parametric_object.ParametersTypes.FLOAT,)*3 param_dict = parametric_object.ParametersDict(dictionary, param_types=types) param_half = param_dict * 1.1 self.assertAlmostEqual(param_half['first'], 0) self.assertAlmostEqual(param_half['second'], 3.3) self.assertAlmostEqual(param_half['third'], 2.2) dictionary = {'first': 0, 'second': 3, 'third': 2} types = (parametric_object.ParametersTypes.INTEGER,)*3 param_dict = parametric_object.ParametersDict(dictionary, param_types=types) param_half = param_dict / 3 self.assertAlmostEqual(param_half['first'], 0) self.assertAlmostEqual(param_half['second'], int(3/3)) self.assertAlmostEqual(param_half['third'], int(2/3)) types = (parametric_object.ParametersTypes.FLOAT,)*3 param_dict = parametric_object.ParametersDict(dictionary, param_types=types) param_half = param_dict / 3 self.assertAlmostEqual(param_half['first'], 0) self.assertAlmostEqual(param_half['second'], float(3/3)) self.assertAlmostEqual(param_half['third'], float(2/3)) if __name__ == '__main__': absltest.main()
41.636667
80
0.629173
1,558
12,491
4.903081
0.105905
0.134049
0.064537
0.048959
0.807566
0.775101
0.737138
0.693415
0.67679
0.608195
0
0.052193
0.193179
12,491
299
81
41.77592
0.705795
0.063966
0
0.558333
0
0
0.081076
0.007799
0
0
0
0
0.254167
1
0.045833
false
0
0.0125
0
0.0625
0
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
4
ccdd4e7946bbbb66bc7ecddf26b85179c631159d
1,012
py
Python
rpmreq/actions.py
softwarefactory-project/rpmreq
b9b30cf6a184929db23ac86c8cc037592ee8b6be
[ "Apache-2.0" ]
null
null
null
rpmreq/actions.py
softwarefactory-project/rpmreq
b9b30cf6a184929db23ac86c8cc037592ee8b6be
[ "Apache-2.0" ]
null
null
null
rpmreq/actions.py
softwarefactory-project/rpmreq
b9b30cf6a184929db23ac86c8cc037592ee8b6be
[ "Apache-2.0" ]
1
2019-03-10T10:07:04.000Z
2019-03-10T10:07:04.000Z
import hawkey import logging from rpmreq import graph from rpmreq import query log = logging.getLogger(__name__) def build_requires(specs, repos, base_repos=None, out_data=None, out_image=None, cache_ttl=3600): dep_graph = graph.build_requires_graph( specs=specs, repos=repos, base_repos=base_repos, cache_ttl=cache_ttl) graph.break_dep_graph_cycles(dep_graph) if out_data or out_image: graph.dump_dep_graph(dep_graph, out_data=out_data, out_image=out_image) return graph.parse_dep_graph(dep_graph) def last_version(dep, repos): """ Return latest package meeting dep or latest version of dep regardless of version range. :param dep: dependency to meet :param repos: repos to query :return: DepQueryResult, see rpmreq.query.query_dep """ sack = query.fetch_repos_sack(repos) q = hawkey.Query(sack) return query.query_dep(q, dep)
27.351351
57
0.666008
137
1,012
4.649635
0.350365
0.087912
0.065934
0.050235
0
0
0
0
0
0
0
0.005355
0.261858
1,012
36
58
28.111111
0.84739
0.197628
0
0
0
0
0
0
0
0
0
0
0
1
0.095238
false
0
0.190476
0
0.380952
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ccde37610c8f0bf0da8d0c2c4fba8732e91c7b0e
1,033
py
Python
app/utils/path_utils.py
Tim-ty-tang/mlflow-fastapi-deploy
c8884a0462fc9f1ce3aa47f9d000af2bffa82123
[ "MIT" ]
null
null
null
app/utils/path_utils.py
Tim-ty-tang/mlflow-fastapi-deploy
c8884a0462fc9f1ce3aa47f9d000af2bffa82123
[ "MIT" ]
null
null
null
app/utils/path_utils.py
Tim-ty-tang/mlflow-fastapi-deploy
c8884a0462fc9f1ce3aa47f9d000af2bffa82123
[ "MIT" ]
null
null
null
from mlflow.tracking import MlflowClient from urllib.parse import urlparse def get_prod_path_mlflow_model_mlflow_query(model_name, version, new_bucket, new_path): client = MlflowClient() artifact_path_original = None for mv in client.search_model_versions(f"name='{model_name}'"): if mv.version == str(version): artifact_path_original = mv.source new_mflow_path = None if artifact_path_original: if new_bucket and new_path: o = urlparse(artifact_path_original, allow_fragments=False) new_mflow_path = f"s3://{new_bucket.strip('/')}/{new_path.strip('/')}/{o.path.strip('/')}" return {"old_mlflow_path": artifact_path_original, "new_mflow_path": new_mflow_path} def get_prod_path_mlflow_model_explicit(model_name, version, new_bucket, new_path): new_mflow_path = f"s3://{new_bucket.strip('/')}/{new_path.strip('/')}/{model_name}/{version}" return {"old_mlflow_path": None, "new_mflow_path": new_mflow_path}
38.259259
110
0.693127
140
1,033
4.714286
0.3
0.084848
0.127273
0.072727
0.369697
0.369697
0.221212
0.124242
0.124242
0.124242
0
0.002378
0.185866
1,033
26
111
39.730769
0.782402
0
0
0.105263
0
0
0.213385
0.1387
0
0
0
0
0
1
0.105263
false
0
0.105263
0
0.315789
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ccde416df2e474e2d91671ed935f8eaa2f12d8eb
5,350
py
Python
25h8_service.py
openprocurement/robot_tests.broker.25h8
619ffd180a8f051ef46d62767d54f4796baa122c
[ "Apache-2.0" ]
null
null
null
25h8_service.py
openprocurement/robot_tests.broker.25h8
619ffd180a8f051ef46d62767d54f4796baa122c
[ "Apache-2.0" ]
1
2017-12-18T13:44:01.000Z
2017-12-18T13:44:01.000Z
25h8_service.py
openprocurement/robot_tests.broker.25h8
619ffd180a8f051ef46d62767d54f4796baa122c
[ "Apache-2.0" ]
3
2018-06-11T10:30:05.000Z
2019-08-07T07:55:40.000Z
#!/usr/bin/python # -*- coding: utf-8 -*- from datetime import datetime, timedelta from iso8601 import parse_date from pytz import timezone import urllib import json import os def convert_time(date): date = datetime.strptime(date, "%d/%m/%Y %H:%M:%S") return timezone('Europe/Kiev').localize(date).strftime('%Y-%m-%dT%H:%M:%S.%f%z') def subtract_min_from_date(date, minutes): date_obj = datetime.strptime(date.split("+")[0], '%Y-%m-%dT%H:%M:%S.%f') return "{}+{}".format(date_obj - timedelta(minutes=minutes), date.split("+")[1]) def convert_datetime_to_25h8_format(isodate): iso_dt = parse_date(isodate) day_string = iso_dt.strftime("%d/%m/%Y %H:%M") return day_string def convert_string_from_dict_25h8(string): return { u"грн.": u"UAH", u"True": u"1", u"False": u"0", u"Відкриті торги": u"aboveThresholdUA", u"Відкриті торги з публікацією англ. мовою": u"aboveThresholdEU", u'Код ДК 021-2015 (CPV)': u'CPV', u'Код ДК (ДК003)': u'ДК003', u'Код ДК (ДК018)': u'ДК018', u'з урахуванням ПДВ': True, u'з ПДВ': True, u'без урахуванням ПДВ': False, u'ОЧIКУВАННЯ ПРОПОЗИЦIЙ': u'active.tendering', u'ПЕРIОД УТОЧНЕНЬ': u'active.enquiries', u'АУКЦIОН': u'active.auction', u'ПРЕКВАЛІФІКАЦІЯ': u'active.pre-qualification', u'ОСКАРЖЕННЯ ПРЕКВАЛІФІКАЦІЇ': u'active.pre-qualification.stand-still', u'вимога': u'claim', u'дано відповідь': u'answered', u'вирішено': u'resolved', u'Так': True, u'Ні': False, u'на розглядi': u'pending', u'На розгляді': u'pending', u'не вирішено(обробляється)': u'pending', u'відмінено': u'cancelled', u'відмінена': u'cancelled', u'Переможець': u'active', }.get(string, string) def adapt_procuringEntity(role_name, tender_data): if role_name == 'tender_owner': tender_data['data']['procuringEntity']['name'] = u"Ольмек" tender_data['data']['procuringEntity']['address']['postalCode'] = u"01100" tender_data['data']['procuringEntity']['address']['region'] = u"місто Київ" tender_data['data']['procuringEntity']['address']['locality'] = u"Київ" tender_data['data']['procuringEntity']['address']['streetAddress'] = u"вул. Фрунзе 77" tender_data['data']['procuringEntity']['identifier']['legalName'] = u"Ольмек" tender_data['data']['procuringEntity']['identifier']['id'] = u"01234567" if tender_data['data'].has_key('procurementMethodType'): if "above" in tender_data['data']['procurementMethodType']: tender_data['data']['tenderPeriod']['startDate'] = subtract_min_from_date( tender_data['data']['tenderPeriod']['startDate'], 1) return tender_data def adapt_delivery_data(tender_data): for index in range(len(tender_data['data']['items'])): value = tender_data['data']['items'][index]['deliveryAddress']['region'] if value == u"місто Київ": tender_data['data']['items'][index]['deliveryAddress']['region'] = u"Київ" return tender_data def adapt_view_data(value, field_name): if 'value.amount' in field_name: value = float(value.split(' ')[0]) elif 'currency' in field_name: value = value.split(' ')[1] elif 'valueAddedTaxIncluded' in field_name: value = ' '.join(value.split(' ')[2:]) elif 'minimalStep.amount' in field_name: value = float(value.split(' ')[0]) elif 'unit.name' in field_name: value = value.split(' ')[1] elif 'quantity' in field_name: value = float(value.split(' ')[0]) elif 'questions' in field_name and '.date' in field_name: value = convert_time(value.split(' - ')[0]) elif 'Date' in field_name: value = convert_time(value) return convert_string_from_dict_25h8(value) def adapt_view_item_data(value, field_name): if 'unit.name' in field_name: value = ' '.join(value.split(' ')[1:]) elif 'quantity' in field_name: value = float(value.split(' ')[0]) elif 'Date' in field_name: value = convert_time(value) return convert_string_from_dict_25h8(value) def get_related_elem_description(tender_data, feature, item_id): if item_id == "": for elem in tender_data['data']['{}s'.format(feature['featureOf'])]: if feature['relatedItem'] == elem['id']: return elem['description'] else: return item_id def custom_download_file(url, file_name, output_dir): urllib.urlretrieve(url, ('{}/{}'.format(output_dir, file_name))) def add_second_sign_after_point(amount): amount = str(repr(amount)) if '.' in amount and len(amount.split('.')[1]) == 1: amount += '0' return amount def get_bid_phone(internal_id, bid_index): r = urllib.urlopen('https://lb.api-sandbox.openprocurement.org/api/2.3/tenders/{}'.format(internal_id)).read() tender = json.loads(r) bid_id = tender['data']['qualifications'][int(bid_index)]["bidID"] for bid in tender['data']['bids']: if bid['id'] == bid_id: return bid['tenderers'][0]['contactPoint']['telephone'] def get_upload_file_path(): return os.path.join(os.getcwd(), 'src/robot_tests.broker.25h8/testFileForUpload.txt')
37.152778
114
0.629533
697
5,350
4.677188
0.308465
0.067485
0.064417
0.053988
0.328221
0.240798
0.184356
0.136503
0.112577
0.112577
0
0.017294
0.200187
5,350
144
115
37.152778
0.744566
0.007103
0
0.13913
0
0
0.281868
0.04067
0
0
0
0
0
1
0.113043
false
0
0.052174
0.017391
0.278261
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ccdea5fc87e2dce7da82287cb335d1d1997b89b2
125
py
Python
barbearia/email_config.py
FabioMarquesArao/OPE_BARBEARIA
867e7d4b67d9d70b6056b2d817cd3d2561ca7131
[ "MIT" ]
null
null
null
barbearia/email_config.py
FabioMarquesArao/OPE_BARBEARIA
867e7d4b67d9d70b6056b2d817cd3d2561ca7131
[ "MIT" ]
null
null
null
barbearia/email_config.py
FabioMarquesArao/OPE_BARBEARIA
867e7d4b67d9d70b6056b2d817cd3d2561ca7131
[ "MIT" ]
null
null
null
EMAIL_ADDRESS = 'domfigarobarbearia@gmail.com' EMAIL_PASSWORD = 'barbeariadomfigaro' HEROKU_PASSWORD = "Barbeariadomfigaro!"
31.25
46
0.832
11
125
9.181818
0.727273
0.514851
0
0
0
0
0
0
0
0
0
0
0.072
125
4
47
31.25
0.87069
0
0
0
0
0
0.515873
0.222222
0
0
0
0
0
1
0
false
0.666667
0
0
0
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
4
ccdfefff2e5b0b2164f247904feff97a33210609
47
py
Python
src/week_5/evaluation/__init__.py
Gost2017/text-analytics-lecture
a51976ba7cf7a992af4c0fba52dc9eef57a8155b
[ "MIT" ]
2
2018-03-05T20:45:11.000Z
2019-03-17T02:59:57.000Z
src/week_5/evaluation/__init__.py
Gost2017/text-analytics-lecture
a51976ba7cf7a992af4c0fba52dc9eef57a8155b
[ "MIT" ]
null
null
null
src/week_5/evaluation/__init__.py
Gost2017/text-analytics-lecture
a51976ba7cf7a992af4c0fba52dc9eef57a8155b
[ "MIT" ]
8
2018-03-02T14:12:35.000Z
2018-04-21T10:31:25.000Z
''' Created on 15/03/2018 @author: pelejaf '''
9.4
21
0.638298
7
47
4.285714
1
0
0
0
0
0
0
0
0
0
0
0.2
0.148936
47
5
22
9.4
0.55
0.829787
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
cce06a46f4f819aee94f70073960cb4bc2718754
76
py
Python
config/prod.py
Cjwpython/WordlessBook
3426ccf3ab2f8848caef98bbc7635407774d32b2
[ "MIT" ]
2
2021-05-19T10:53:25.000Z
2022-01-20T01:20:08.000Z
config/prod.py
Cjwpython/WordlessBook
3426ccf3ab2f8848caef98bbc7635407774d32b2
[ "MIT" ]
null
null
null
config/prod.py
Cjwpython/WordlessBook
3426ccf3ab2f8848caef98bbc7635407774d32b2
[ "MIT" ]
1
2022-01-20T01:19:56.000Z
2022-01-20T01:19:56.000Z
# coding: utf-8 from config.base import * DEBUG = False SERVER_PORT = 8899
12.666667
25
0.723684
12
76
4.5
1
0
0
0
0
0
0
0
0
0
0
0.080645
0.184211
76
5
26
15.2
0.790323
0.171053
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
cce181f9f38d4d3c462cfc4fd68ac4c8d8aebe76
6,574
py
Python
connect_four.py
seanstappas/dynamic-connect-4
f6106f71ac8779513cd80a2f46397bb778e21018
[ "MIT" ]
1
2020-08-21T03:05:08.000Z
2020-08-21T03:05:08.000Z
connect_four.py
seanstappas/dynamic-connect-4
f6106f71ac8779513cd80a2f46397bb778e21018
[ "MIT" ]
null
null
null
connect_four.py
seanstappas/dynamic-connect-4
f6106f71ac8779513cd80a2f46397bb778e21018
[ "MIT" ]
null
null
null
from __future__ import print_function NUM_ROWS = 7 NUM_COLS = 7 DIRECTIONS = ('E', 'W', 'N', 'S') MOVEMENT_DIFFS = { 'N': (0, -1), 'S': (0, 1), 'E': (1, 0), 'W': (-1, 0) } X_MOVEMENT_DIFFS = { 'N': 0, 'S': 0, 'E': 1, 'W': -1 } Y_MOVEMENT_DIFFS = { 'N': -1, 'S': 1, 'E': 0, 'W': 0 } def actions_and_successors(state, white_player=True): """ Returns a list of action, successor tuples resulting from the given state. :param state: the state to get successors of :param white_player: True if the current player is white, False otherwise :return: a list of action, successor tuples resulting from the given state. """ return [(a, result(state, a, white_player)) for a in actions(state, white_player)] def print_state(state): """ Prints the given state. :param state: the state to print """ print(' ', end=' ') for col in range(NUM_COLS): print(col + 1, end=' ') print() for row in range(NUM_ROWS): print(row + 1, end=' ') for col in range(NUM_COLS): if (col + 1, row + 1) in state[0]: print('O', end='') elif (col + 1, row + 1) in state[1]: print('X', end='') else: print(' ', end='') if col < NUM_COLS - 1: print(',', end='') print() def str_to_state(str_state): """ Returns a state corresponding to the provided string representation. Here is an example of a valid state: , , , , , ,X , , , , ,X, , , , , ,O,X ,X,O, , , ,X , , , , ,O, ,O,X, , , , O, , , ,O, , :param str_state: a string representation of the board :return: the corresponding state """ white_squares = [] black_squares = [] y = 1 for row in str_state.splitlines(): x = 1 for square in row.split(','): if square == ',': continue if square == 'O': white_squares.append((x, y)) elif square == 'X': black_squares.append((x, y)) x += 1 y += 1 return tuple(white_squares), tuple(black_squares) def is_within_bounds(x, y): """ :return: True if the given x, y coordinates are within the bounds of the board """ return 0 < x <= NUM_COLS and 0 < y <= NUM_ROWS def is_free_square(state, x, y): """ :return: True if the given x, y coordinates are free spots, given the provided state """ return (x, y) not in state[0] and (x, y) not in state[1] def is_valid_action(state, x, y, direction): """ Checks if moving the piece at given x, y coordinates in the given direction is valid, given the current state. :param state: the current state :param x: the x coordinate of the piece :param y: the y coordinate of the piece :param direction: the direction to travel with this action :return: True if the action is valid, False otherwise """ new_x = x + X_MOVEMENT_DIFFS[direction] new_y = y + Y_MOVEMENT_DIFFS[direction] return is_within_bounds(new_x, new_y) and is_free_square(state, new_x, new_y) def occupied_squares_by_player(state, white_player): """ Returns the the x, y coordinates of the squares occupied by the given player. :param state: the given state :param white_player: True if the current player is white, False otherwise :return: the x, y coordinates of the squares occupied by the given player. """ return state[0] if white_player else state[1] def actions(state, white_player=True): """ Returns the actions available to the given player in the given state. :param state: the current state :param white_player: True if the current player is white, False otherwise :return: the actions available to the given player in the given state """ return [(x, y, direction) for (x, y) in occupied_squares_by_player(state, white_player) for direction in DIRECTIONS if is_valid_action(state, x, y, direction)] def action_str_to_tuple(a): """ Converts the provided action string to a tuple :param a: the action, in string form. For example: '13E'. :return: the action in tuple form """ if a is not None and '1' <= a[0] <= '7' and '1' <= a[1] <= '7' and a[2] in DIRECTIONS: return int(a[0]), int(a[1]), a[2] else: return None def action_tuple_to_str(action): """ Converts the provided action tuple to a string. :param action: the action :return: a string representation of the action tuple """ if action is None: return None return str(action[0]) + str(action[1]) + action[2] def result(state, action, white_player=True): """ Returns the resulting state when the given action is applied to the given state. :param state: the current state :param action: the action to apply :param white_player: True if the current player is white, False otherwise :return: the resulting state when the given action is applied to the given state """ if white_player: return result_tuple(state, action, white_player), state[1] else: return state[0], result_tuple(state, action, white_player) def result_tuple(s, a, white_player): """ Returns the x, y coordinates of the pieces of the given player when the given action is applied to the given state. :param s: the current state :param a: the action to apply :param white_player: True if the current player is white, False otherwise :return: the x, y coordinates of the pieces of the given player when the given action is applied to the given state """ old_x = a[0] old_y = a[1] direction = a[2] new_x = old_x + X_MOVEMENT_DIFFS[direction] new_y = old_y + Y_MOVEMENT_DIFFS[direction] return tuple((x, y) if x != old_x or y != old_y else (new_x, new_y) for (x, y) in occupied_squares_by_player(s, white_player)) def file_to_state(file_name): """ Converts the board given by the provided file to a state. Here is an example of a valid state: , , , , , ,X , , , , ,X, , , , , ,O,X ,X,O, , , ,X , , , , ,O, ,O,X, , , , O, , , ,O, , :param file_name: the name of the file containing the state :return: a state corresponding to the board """ with open(file_name, 'r') as state_file: string_state = state_file.read() state = str_to_state(string_state) return state
29.479821
119
0.605872
979
6,574
3.95097
0.117467
0.04757
0.033609
0.027921
0.505946
0.441313
0.400982
0.33635
0.307135
0.295502
0
0.012102
0.283541
6,574
222
120
29.612613
0.80913
0.434134
0
0.111111
0
0
0.009868
0
0
0
0
0
0
1
0.131313
false
0
0.010101
0
0.292929
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cce3ac8194087917625821b84302b29b6337c46a
419
py
Python
web/src/schemas/database.py
disteLLL/saltyboy
d1c777c31d82f3b6e2126d8170446d9b028c37ab
[ "MIT" ]
null
null
null
web/src/schemas/database.py
disteLLL/saltyboy
d1c777c31d82f3b6e2126d8170446d9b028c37ab
[ "MIT" ]
null
null
null
web/src/schemas/database.py
disteLLL/saltyboy
d1c777c31d82f3b6e2126d8170446d9b028c37ab
[ "MIT" ]
null
null
null
from dataclasses import dataclass from typing import Dict from dataclasses_jsonschema import JsonSchemaMixin @dataclass class DatabaseStatsBreakdown(JsonSchemaMixin): """Database stats broken down by tier""" breakdown: Dict[str, int] total: int @dataclass class DatabaseStatsSchema(JsonSchemaMixin): """All database stats""" matches: DatabaseStatsBreakdown fighters: DatabaseStatsBreakdown
22.052632
50
0.78043
40
419
8.15
0.6
0.092025
0
0
0
0
0
0
0
0
0
0
0.155131
419
18
51
23.277778
0.920904
0.126492
0
0.181818
0
0
0
0
0
0
0
0
0
1
0
true
0
0.272727
0
0.818182
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
2
cce3bf69f8b9a7979f41b539d693ed801301a438
3,781
py
Python
pyampute/tests/test_mapping.py
RianneSchouten/pyampute
98de0d5591546f958b0106217f60df92dc00fbb9
[ "BSD-3-Clause" ]
3
2022-02-14T02:02:23.000Z
2022-02-20T09:52:41.000Z
pyampute/tests/test_mapping.py
flacle/pyampute
8785f62c52a762dfc3113abe3610ba4893ef5f4b
[ "BSD-3-Clause" ]
24
2022-01-26T15:42:13.000Z
2022-03-12T15:49:56.000Z
pyampute/tests/test_mapping.py
flacle/pyampute
8785f62c52a762dfc3113abe3610ba4893ef5f4b
[ "BSD-3-Clause" ]
1
2022-02-15T19:15:42.000Z
2022-02-15T19:15:42.000Z
import numpy as np import pandas as pd import unittest from pyampute.ampute import MultivariateAmputation from pyampute.exploration.md_patterns import mdPatterns class TestMapping(unittest.TestCase): ''' This class tests the example code in the blogpost "A mapping from R-function ampute to pyampute" ''' def setUp(self) -> None: super().setUp() self.n = 10000 self.nhanes2_sim = np.random.randn(10000, 4) try: self.nhanes2_orig = pd.read_csv("data/nhanes2.csv") except: print("CSV file failed to load.") def test_patterns(self): mdp = mdPatterns() mypatterns = mdp.get_patterns(self.nhanes2_orig, show_plot=False) self.assertEqual(mypatterns.shape, (6, 6)) self.assertListEqual( mypatterns.iloc[1:-1, 1:-1].values.tolist(), [[1, 1, 1, 0], [1, 1, 0, 1], [1, 0, 0, 1], [1, 0, 0, 0]]) ma = MultivariateAmputation( patterns=[ {'incomplete_vars': [3]}, {'incomplete_vars': [2]}, {'incomplete_vars': [1, 2]}, {'incomplete_vars': [1, 2, 3]} ] ) nhanes2_incomplete = ma.fit_transform(self.nhanes2_sim) mdp = mdPatterns() mypatterns = mdp.get_patterns(nhanes2_incomplete, show_plot=False) self.assertEqual(mypatterns.shape, (6, 6)) self.assertListEqual( mypatterns["n_missing_values"].values[:-1].astype(int).tolist(), [0, 1, 1, 2, 3]) def test_proportions(self): ma = MultivariateAmputation( patterns=[ {'incomplete_vars': [3], 'freq': 0.1}, {'incomplete_vars': [2], 'freq': 0.6}, {'incomplete_vars': [1, 2], 'freq': 0.2}, {'incomplete_vars': [1, 2, 3], 'freq': 0.1} ], prop=0.3) nhanes2_incomplete = ma.fit_transform(self.nhanes2_sim) mdp = mdPatterns() mypatterns = mdp.get_patterns(nhanes2_incomplete, show_plot=False) self.assertListEqual( mypatterns.columns.values.tolist(), ["row_count", 0, 3, 1, 2, "n_missing_values"] ) self.assertAlmostEqual( mypatterns.loc[1, "row_count"], 0.3 * 0.6 * self.n, delta=0.05 * self.n, ) def test_mechanisms(self): ma = MultivariateAmputation( patterns=[ {'incomplete_vars': [3], 'mechanism': "MCAR"}, {'incomplete_vars': [2]}, {'incomplete_vars': [1, 2], 'mechanism': "MNAR"}, {'incomplete_vars': [1, 2, 3]} ] ) nhanes2_incomplete = ma.fit_transform(self.nhanes2_sim) self.assertEqual(ma.patterns[0]['mechanism'], "MCAR") self.assertEqual(ma.patterns[2]['mechanism'], "MNAR") self.assertListEqual(ma.mechanisms.tolist(), ["MCAR", "MAR", "MNAR", "MAR"]) def test_weights(self): ma = MultivariateAmputation( patterns=[ {'incomplete_vars': [3], 'weights': [0, 4, 1, 0]}, {'incomplete_vars': [2]}, {'incomplete_vars': [1, 2], 'mechanism': "MNAR"}, {'incomplete_vars': [1, 2, 3], 'weights': {0: -2, 3: 1}, 'mechanism': "MAR+MNAR"} ] ) nhanes2_incomplete = ma.fit_transform(self.nhanes2_sim) mdp = mdPatterns() mypatterns = mdp.get_patterns(nhanes2_incomplete, show_plot=False) self.assertListEqual( ma.weights.tolist(), [[0, 4, 1, 0], [1, 1, 0, 1], [0, 1, 1, 0], [-2, 0, 0, 1]] ) self.assertTrue(len(ma.wss_per_pattern), 4) if __name__ == "__main__": unittest.main()
31.773109
100
0.539275
420
3,781
4.702381
0.242857
0.113418
0.060759
0.06481
0.508354
0.503797
0.444557
0.351392
0.351392
0.351392
0
0.051056
0.311029
3,781
118
101
32.042373
0.707102
0.02539
0
0.359551
0
0
0.126465
0
0
0
0
0
0.123596
1
0.05618
false
0
0.05618
0
0.123596
0.011236
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cce820db4737d5f4fc516df5c5d36a6fb126649f
1,560
py
Python
classification.py
alisher0717/one-shot-learning
b1a0814df045e4a9c3bf8e0d7cbc7a0979e9ceb0
[ "MIT" ]
null
null
null
classification.py
alisher0717/one-shot-learning
b1a0814df045e4a9c3bf8e0d7cbc7a0979e9ceb0
[ "MIT" ]
null
null
null
classification.py
alisher0717/one-shot-learning
b1a0814df045e4a9c3bf8e0d7cbc7a0979e9ceb0
[ "MIT" ]
null
null
null
from __future__ import absolute_import from __future__ import print_function import numpy as np import random import cv2 from keras.utils import to_categorical from keras.models import Model from keras.layers import Dropout, Lambda, Dense, Conv2D, Flatten, Input, MaxPooling2D from keras.optimizers import RMSprop from keras import backend as K import os from numpy.random import permutation class Classification: def __init__(self, x_train, y_train, x_test, y_test, input_shape, num_classes): self.x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1) self.y_train = to_categorical(y_train) self.x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1) self.y_test = to_categorical(y_test) self.input_shape = input_shape self.num_classes = num_classes def build_model(self): input = Input(shape=(self.input_shape[0], self.input_shape[1], 1)) x = Flatten()(input) x = Dense(128, activation='relu')(x) x = Dropout(0.1)(x) x = Dense(128, activation='relu')(x) x = Dropout(0.1)(x) x = Dense(128, activation='relu')(x) x = Dense(self.num_classes, activation='softmax')(x) model = Model(input, x) return model def train(self, epochs): model = self.build_model() model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.fit(self.x_train, self.y_train, validation_data=(self.x_test, self.y_test), epochs=epochs)
38.04878
104
0.688462
233
1,560
4.386266
0.274678
0.041096
0.029354
0.055773
0.116438
0.092955
0.092955
0.092955
0.092955
0.092955
0
0.021514
0.195513
1,560
41
104
38.04878
0.792829
0
0
0.142857
0
0
0.035234
0.015375
0
0
0
0
0
1
0.085714
false
0
0.342857
0
0.485714
0.028571
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
cce86a805da411d647ff0f543066e46f420f6023
596
py
Python
setup.py
holtrop/simple-svn-browser
62641493b95535bf0efb9855fec3dd496b37764e
[ "Zlib" ]
null
null
null
setup.py
holtrop/simple-svn-browser
62641493b95535bf0efb9855fec3dd496b37764e
[ "Zlib" ]
null
null
null
setup.py
holtrop/simple-svn-browser
62641493b95535bf0efb9855fec3dd496b37764e
[ "Zlib" ]
null
null
null
import sys if sys.version_info < (3, 5): print("Python 3.5 is required for this package") sys.exit(1) from setuptools import setup setup(name = "simplesvnbrowser", version = "0.0.1", description = "A simple subversion repository browser application", url = "https://github.com/holtrop/simple-svn-browser", author = "Josh Holtrop", author_email = "jholtrop@gmail.com", license = "zlib", packages = ["simplesvnbrowser"], zip_safe = False, scripts = ["bin/simple-svn-browser"], install_requires = ["pygobject", "pyxdg"], )
29.8
73
0.634228
70
596
5.342857
0.742857
0.010695
0.085562
0
0
0
0
0
0
0
0
0.017391
0.228188
596
19
74
31.368421
0.795652
0
0
0
0
0
0.404362
0.036913
0
0
0
0
0
1
0
true
0
0.117647
0
0.117647
0.058824
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
cce90a7d4c65cdb80621e7e28495129d068861f2
2,092
py
Python
Synchro-Update-Vols.py
Apoorb/HCS_Synchro-Reader
d89428069584f420d584e2011a5cf21cd0a51f8b
[ "MIT" ]
null
null
null
Synchro-Update-Vols.py
Apoorb/HCS_Synchro-Reader
d89428069584f420d584e2011a5cf21cd0a51f8b
[ "MIT" ]
null
null
null
Synchro-Update-Vols.py
Apoorb/HCS_Synchro-Reader
d89428069584f420d584e2011a5cf21cd0a51f8b
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Wed Oct 30 10:02:36 2019 @author: abibeka Purpose: Batch update synchro volumes """ # 0.0 Housekeeping. Clear variable space #****************************************************************************************** from IPython import get_ipython #run magic commands ipython = get_ipython() ipython.magic("reset -f") ipython = get_ipython() import os import pandas as pd import numpy as np import csv os.chdir(r'C:\Users\abibeka\OneDrive - Kittelson & Associates, Inc\Documents\RampMetering\operations\Synchro') # Read the volume data dat = pd.read_csv('VOLUME.CSV',skiprows=2) dat.fillna('',inplace=True) dat2 = dat dat2 = dat2.drop(columns = 'DATE') dat2.rename(columns = {'TIME': 'RECORDNAME'},inplace=True) dat2.RECORDNAME = 'Volume' # Scale the volume data #Number of Years = 2040 - 2016 NumYears = 2040 - 2016 GrowthRates = [0,1,2] # percent per year NetGrowthCalc = lambda x: (1+x/100)**NumYears NetGrowthRate = list(map(NetGrowthCalc,GrowthRates)) NetGrowthRate def Output2040Vols(datCp = dat2, NetGrowthRt = 1): datCp.iloc[:,2:] = datCp.iloc[:,2:].applymap(lambda x: x if not x else round(x*NetGrowthRt)) #Change volume data and columns to list --- so it can be written dat2Write = datCp.values.tolist() #Read the two 2 lines of the csv file separately with open('VOLUME.csv', 'r') as readFile: reader = csv.reader(readFile) lines = list(reader) Header = lines[0:3] Header[0] = ['[Lanes]'] Header[1] =['Lane Group Data'] Header[2][0] = 'RECORDNAME' Header[2].remove('TIME') #Write the top 2 lines of the csv file, column name and data with open('Volume2040_NetGrwRt_{}.csv'.format(round(NetGrowthRt,2)), 'w', newline = '') as writeFile: writer = csv.writer(writeFile) writer.writerows(Header) writer.writerows(dat2Write) writeFile.close() Output2040Vols(datCp = dat2, NetGrowthRt = NetGrowthRate[0]) Output2040Vols(datCp = dat2, NetGrowthRt = NetGrowthRate[1]) Output2040Vols(datCp = dat2, NetGrowthRt = NetGrowthRate[2])
31.223881
110
0.663002
273
2,092
5.058608
0.494505
0.055033
0.066618
0.098479
0.128168
0.026068
0
0
0
0
0
0.049397
0.167782
2,092
66
111
31.69697
0.743825
0.25
0
0.051282
0
0.025641
0.137864
0.062136
0
0
0
0
0
1
0.025641
false
0
0.128205
0
0.153846
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ccea292c2e611183f7753e3277974ac6da80216f
5,695
py
Python
matplotlib.indigoPlugin/Contents/Server Plugin/chart_multiline.py
DaveL17/matplotlib
857daf4222390d021defb87b57c3360fa12af5ab
[ "MIT" ]
4
2017-08-27T16:53:56.000Z
2022-03-27T10:48:02.000Z
matplotlib.indigoPlugin/Contents/Server Plugin/chart_multiline.py
DaveL17/matplotlib
857daf4222390d021defb87b57c3360fa12af5ab
[ "MIT" ]
3
2019-01-30T20:04:00.000Z
2021-06-21T02:11:17.000Z
matplotlib.indigoPlugin/Contents/Server Plugin/chart_multiline.py
DaveL17/matplotlib
857daf4222390d021defb87b57c3360fa12af5ab
[ "MIT" ]
null
null
null
#! /usr/bin/env python # -*- coding: utf-8 -*- """ Creates the multiline text charts Given the unique nature of multiline text charts, we use a separate method to construct them. ----- """ # Built-in Modules import pickle import sys import textwrap import traceback # Third-party Modules # Note the order and structure of matplotlib imports is intentional. import matplotlib matplotlib.use('AGG') # Note: this statement must be run before any other matplotlib imports are done. import matplotlib.pyplot as plt import matplotlib.patches as patches # My modules import chart_tools log = chart_tools.log payload = chart_tools.payload p_dict = payload['p_dict'] k_dict = payload['k_dict'] props = payload['props'] chart_name = props['name'] plug_dict = payload['prefs'] text_to_plot = payload['data'] log['Threaddebug'].append(u"chart_multiline.py called.") if plug_dict['verboseLogging']: chart_tools.log['Threaddebug'].append(u"{0}".format(payload)) try: def __init__(): pass def clean_string(val): """ Cleans long strings of whitespace and formats certain characters The clean_string(self, val) method is used to scrub multiline text elements in order to try to make them more presentable. The need is easily seen by looking at the rough text that is provided by the U.S. National Weather Service, for example. ----- :param unicode val: :return val: """ # List of (elements, replacements) clean_list = ((' am ', ' AM '), (' pm ', ' PM '), ('*', ' '), ('\u000A', ' '), ('...', ' '), ('/ ', '/'), (' /', '/'), ('/', ' / ') ) # Take the old, and replace it with the new. for (old, new) in clean_list: val = val.replace(old, new) val = ' '.join(val.split()) # Eliminate spans of whitespace. return val p_dict['figureWidth'] = float(props['figureWidth']) p_dict['figureHeight'] = float(props['figureHeight']) try: height = int(props.get('figureHeight', 300)) / int(plt.rcParams['savefig.dpi']) if height < 1: height = 1 chart_tools.log['Warning'].append(u"[{n}] Height: Pixels / DPI can not be less than one. Coercing to " u"one.".format(n=chart_name) ) except ValueError: height = 3 try: width = int(props.get('figureWidth', 500)) / int(plt.rcParams['savefig.dpi']) if width < 1: width = 1 chart_tools.log['Warning'].append(u"[{n}] Width: Pixels / DPI can not be less than one. Coercing to " u"one.".format(n=chart_name) ) except ValueError: width = 5 fig = plt.figure(figsize=(width, height)) ax = fig.add_subplot(111) ax.axis('off') # If the value to be plotted is empty, use the default text from the device # configuration. if len(text_to_plot) <= 1: text_to_plot = unicode(p_dict['defaultText']) else: # The clean_string method tries to remove some potential ugliness from the text # to be plotted. It's optional--defaulted to on. No need to call this if the # default text is used. if p_dict['cleanTheText']: text_to_plot = clean_string(val=text_to_plot) if plug_dict['verboseLogging']: chart_tools.log['Threaddebug'].append(u"[{n}] Data: {t}".format(n=chart_name, t=text_to_plot)) # Wrap the text and prepare it for plotting. text_to_plot = textwrap.fill(text=text_to_plot, width=int(p_dict['numberOfCharacters']), replace_whitespace=p_dict['cleanTheText'] ) ax.text(0.01, 0.95, text_to_plot, transform=ax.transAxes, color=p_dict['textColor'], fontname=p_dict['fontMain'], fontsize=p_dict['multilineFontSize'], verticalalignment='top' ) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if not p_dict['textAreaBorder']: [s.set_visible(False) for s in ax.spines.values()] # Transparent Charts Fill if p_dict['transparent_charts'] and p_dict['transparent_filled']: ax.add_patch(patches.Rectangle((0, 0), 1, 1, transform=ax.transAxes, facecolor=p_dict['faceColor'], zorder=1 ) ) # =============================== Format Title ================================ chart_tools.format_title(p_dict=p_dict, k_dict=k_dict, loc=(0.5, 0.98), align='center') # Note that subplots_adjust affects the space surrounding the subplots and not # the fig. plt.subplots_adjust(top=0.98, bottom=0.05, left=0.02, right=0.98, hspace=None, wspace=None ) chart_tools.save(logger=log) except (KeyError, IndexError, ValueError, UnicodeEncodeError) as sub_error: tb = traceback.format_exc() chart_tools.log['Critical'].append(u"[{n}] {s}".format(n=chart_name, s=tb)) pickle.dump(chart_tools.log, sys.stdout)
33.5
114
0.545391
666
5,695
4.537538
0.37988
0.028127
0.029782
0.021178
0.121774
0.121774
0.104567
0.104567
0.085374
0.085374
0
0.012881
0.332046
5,695
169
115
33.698225
0.781546
0.231958
0
0.126214
0
0
0.139442
0
0
0
0
0
0
1
0.019417
false
0.009709
0.07767
0
0.106796
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
cceb89993da7a1b69dbd4927d78012815ff5b4de
1,814
py
Python
sdks/apigw-manager/tests/apigw_manager/apigw/test_command.py
IMBlues/bkpaas-python-sdk
a87bee3d26f0ddeac124c7a4679cd3eff4abb8fc
[ "MIT" ]
17
2021-08-03T03:15:35.000Z
2022-03-18T06:10:04.000Z
sdks/apigw-manager/tests/apigw_manager/apigw/test_command.py
piglei/bkpaas-python-sdk
3dfea8be5702ccea1228691c6c1c3e87a27238d2
[ "MIT" ]
7
2021-08-03T07:10:12.000Z
2022-03-23T04:47:22.000Z
sdks/apigw-manager/tests/apigw_manager/apigw/test_command.py
piglei/bkpaas-python-sdk
3dfea8be5702ccea1228691c6c1c3e87a27238d2
[ "MIT" ]
9
2021-08-03T03:20:36.000Z
2022-03-08T13:47:50.000Z
# -*- coding: utf-8 -*- """ * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-蓝鲸 PaaS 平台(BlueKing-PaaS) available. * Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at http://opensource.org/licenses/MIT * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. """ import pytest from apigw_manager.apigw import command class TestApiCommand: @pytest.fixture(autouse=True) def setup_command(self): self.command = command.ApiCommand() def test_get_configuration(self, configuration): result = self.command.get_configuration() assert configuration.api_name == result.api_name assert configuration.host == result.host def test_get_configuration_with_args(self, faker): api_name = faker.color host = faker.url() result = self.command.get_configuration(api_name=api_name, host=host) assert api_name == result.api_name assert host.startswith(result.host) class TestDefinitionCommand: @pytest.fixture(autouse=True) def setup_command(self): self.command = command.DefinitionCommand() def test_get_context(self): context = self.command.get_context(["a:1", "b:2"]) assert "settings" in context assert "environ" in context assert context["data"]["a"] == 1 assert context["data"]["b"] == 2
37.791667
118
0.711136
242
1,814
5.243802
0.5
0.038613
0.023641
0.025217
0.189125
0.137116
0.096139
0.096139
0.096139
0.096139
0
0.010316
0.198456
1,814
47
119
38.595745
0.862448
0.400772
0
0.153846
0
0
0.028918
0
0
0
0
0
0.307692
1
0.192308
false
0
0.076923
0
0.346154
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ccece4c2f4cefc9d065c0f4184c45e006ab941e8
1,342
py
Python
tests/test_common.py
tussedrotten/pylie
df34b820b9d9273bc9c4287e559e5d5837faf794
[ "BSD-3-Clause" ]
6
2020-09-25T07:38:00.000Z
2022-03-11T09:09:22.000Z
tests/test_common.py
tussedrotten/pylie
df34b820b9d9273bc9c4287e559e5d5837faf794
[ "BSD-3-Clause" ]
1
2020-10-29T08:39:34.000Z
2021-02-09T14:07:09.000Z
tests/test_common.py
tussedrotten/pylie
df34b820b9d9273bc9c4287e559e5d5837faf794
[ "BSD-3-Clause" ]
1
2020-11-21T12:33:44.000Z
2020-11-21T12:33:44.000Z
from pylie.common import * from pylie import SO3 def test_to_rotation_matrix_results_in_valid_rotation(): # Test 2x2 matrix. A = np.random.rand(2, 2) R = to_rotation_matrix(A) np.testing.assert_almost_equal(R @ R.T, np.identity(2), 14) np.testing.assert_almost_equal(np.linalg.det(R), 1, 14) # Test 3x3 matrix. A = np.random.rand(3, 3) R = to_rotation_matrix(A) np.testing.assert_almost_equal(R @ R.T, np.identity(3), 14) np.testing.assert_almost_equal(np.linalg.det(R), 1, 14) def test_to_rotation_matrix_results_in_close_rotation(): angle = 0.5 * np.pi axis = np.array([[1, 0, 0]]).T R = SO3.Exp(angle * axis).matrix # Invalidate a valid rotation matrix by scaling it. R_scaled = 3 * R # Fit to SO(3). R_closest = to_rotation_matrix(R_scaled) # Result should be the same rotation matrix. np.testing.assert_almost_equal(R_closest, R, 14) # Perturb the rotation matrix with random noise. R_noisy = R + 0.01 * np.random.rand(3, 3) # Fit to SO(3) so3_closest = SO3(R_noisy) # Extract angle-axis representation. angle_closest, axis_closest = so3_closest.Log(True) # Result should be close to the same rotation. np.testing.assert_almost_equal(angle_closest, angle, 2) np.testing.assert_almost_equal(axis_closest, axis, 2)
29.822222
63
0.688525
221
1,342
3.972851
0.280543
0.127563
0.11959
0.167426
0.452164
0.331435
0.300683
0.22779
0.22779
0.22779
0
0.03907
0.198957
1,342
44
64
30.5
0.777674
0.208644
0
0.173913
0
0
0
0
0
0
0
0
0.304348
1
0.086957
false
0
0.086957
0
0.173913
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
ccedbc549ffc192d24b74e12a04695a4d740a2b9
1,409
py
Python
sources/base.py
chenders/hours
878e0fa57ad4810851fd2bab529e7e1525cf9fbb
[ "MIT" ]
null
null
null
sources/base.py
chenders/hours
878e0fa57ad4810851fd2bab529e7e1525cf9fbb
[ "MIT" ]
6
2015-01-28T00:48:39.000Z
2015-01-28T00:51:48.000Z
sources/base.py
chenders/hours
878e0fa57ad4810851fd2bab529e7e1525cf9fbb
[ "MIT" ]
null
null
null
import pytz from datetime import timedelta from dateutil import parser from django.utils.text import Truncator from django.db import IntegrityError from core.models import Data class HoursDataSource(object): def __init__(self, start_date, end_date): self.entries = [] self.start_date = start_date self.end_date = end_date def truncate(self, text, length): return Truncator(text).chars(length) def date_within_bounds(self, date, give_or_take=None): start_date = self.start_date end_date = self.end_date if give_or_take is not None: start_date -= give_or_take end_date += give_or_take return start_date <= date <= end_date def get_group_date(self, date): return date + timedelta(days=-date.weekday()) # return date.replace(day=1) def add_entry(self, date, title, mouseover, url, css_class): try: Data.objects.create(date=date, title=title, mouseover=mouseover, url=url, css_class=css_class) except IntegrityError: pass def date_within_bounds(self, date, give_or_take=None): start_date = self.start_date end_date = self.end_date if give_or_take is not None: start_date -= give_or_take end_date += give_or_take return start_date <= date <= end_date
31.311111
76
0.648687
190
1,409
4.536842
0.3
0.114849
0.092807
0.097448
0.408353
0.408353
0.38051
0.38051
0.38051
0.38051
0
0.000978
0.274663
1,409
44
77
32.022727
0.842466
0.018453
0
0.4
0
0
0
0
0
0
0
0
0
1
0.171429
false
0.028571
0.171429
0.057143
0.485714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ccede3910d44ac17adace50d3005cb0696103c2a
398
py
Python
roadmaps/signal_listeners.py
adambober/roadmap_builder
6d48fc4ff65de0bb667f2495004d55f6398b5536
[ "MIT" ]
3
2021-12-04T00:25:34.000Z
2022-03-20T17:33:52.000Z
roadmaps/signal_listeners.py
adambober/roadmap_builder
6d48fc4ff65de0bb667f2495004d55f6398b5536
[ "MIT" ]
null
null
null
roadmaps/signal_listeners.py
adambober/roadmap_builder
6d48fc4ff65de0bb667f2495004d55f6398b5536
[ "MIT" ]
null
null
null
from django.db.models.signals import post_save from django.dispatch import receiver from roadmaps.models import RoadmapNode from roadmaps.services.progress import ProgressPropagator @receiver(post_save, sender=RoadmapNode) def propagate_completion_to_descendant_nodes(sender, **kwargs): roadmap: RoadmapNode = kwargs.get('instance') ProgressPropagator.propagate_completion_desc(roadmap)
33.166667
63
0.836683
47
398
6.914894
0.574468
0.061538
0
0
0
0
0
0
0
0
0
0
0.092965
398
11
64
36.181818
0.900277
0
0
0
0
0
0.020101
0
0
0
0
0
0
1
0.125
false
0
0.5
0
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
ccf0191c3e20264408fdd3e37fe77537c8aef935
7,725
py
Python
tests/datastructures_tests/intensity_data_test.py
czbiohub/reconstruct-order
e729ae3871aea0a5ec2d42744a9448c7f0a93037
[ "Unlicense" ]
6
2019-10-30T23:00:01.000Z
2021-03-02T19:09:07.000Z
tests/datastructures_tests/intensity_data_test.py
czbiohub/ReconstructOrder
e729ae3871aea0a5ec2d42744a9448c7f0a93037
[ "Unlicense" ]
14
2019-07-08T22:51:29.000Z
2019-07-13T15:44:01.000Z
tests/datastructures_tests/intensity_data_test.py
mehta-lab/reconstruct-order
e729ae3871aea0a5ec2d42744a9448c7f0a93037
[ "Unlicense" ]
2
2020-05-02T23:28:36.000Z
2020-07-16T23:46:46.000Z
import numpy as np import pytest from numpy.testing import assert_array_equal from ReconstructOrder.datastructures.intensity_data import IntensityData # ==== test basic construction ===== def test_basic_constructor_nparray(): """ test assignment using numpy arrays """ int_data = IntensityData() a = np.ones((512, 512)) b = 2*np.ones((512, 512)) c = 3*np.ones((512, 512)) d = 4*np.ones((512, 512)) e = 5*np.ones((512, 512)) int_data.append_image(a) int_data.append_image(b) int_data.append_image(c) int_data.append_image(d) int_data.append_image(e) assert_array_equal(int_data.get_image(0), a) assert_array_equal(int_data.get_image(1), b) assert_array_equal(int_data.get_image(2), c) assert_array_equal(int_data.get_image(3), d) assert_array_equal(int_data.get_image(4), e) assert_array_equal(int_data.data, np.array([a, b, c, d, e])) def test_basic_constructor_memap(setup_temp_data): """ test assignment using memory mapped files """ mm = setup_temp_data int_data = IntensityData() int_data.append_image(mm) int_data.append_image(2 * mm) int_data.append_image(3 * mm) int_data.append_image(4 * mm) int_data.append_image(5 * mm) assert_array_equal(int_data.get_image(0), mm) assert_array_equal(int_data.get_image(1), 2*mm) assert_array_equal(int_data.get_image(2), 3*mm) assert_array_equal(int_data.get_image(3), 4*mm) assert_array_equal(int_data.get_image(4), 5*mm) assert_array_equal(int_data.data, np.array([mm, 2*mm, 3*mm, 4*mm, 5*mm])) def test_basic_constructor_with_names(): """ test construction with channel names Returns ------- """ int_data = IntensityData() int_data.channel_names = ['IExt', 'I0', 'I45', 'I90', 'I135'] a = np.ones((512, 512)) b = 2 * np.ones((512, 512)) c = 3 * np.ones((512, 512)) d = 4 * np.ones((512, 512)) e = 5 * np.ones((512, 512)) int_data.replace_image(a, 'IExt') int_data.replace_image(b, 'I0') int_data.replace_image(c, 'I45') int_data.replace_image(d, 'I90') int_data.replace_image(e, 'I135') assert_array_equal(int_data.get_image("IExt"), a) def test_basic_constructor_without_names(): """ test construction with channel names Returns ------- """ int_data = IntensityData() # int_data.channel_names = ['IExt', 'I0', 'I45', 'I90', 'I135'] a = np.ones((512, 512)) b = 2 * np.ones((512, 512)) c = 3 * np.ones((512, 512)) d = 4 * np.ones((512, 512)) e = 5 * np.ones((512, 512)) int_data.append_image(a) int_data.append_image(b) int_data.append_image(c) int_data.append_image(d) int_data.append_image(e) assert_array_equal(int_data.get_image(0), a) # ==== test instances and private/public access ===== def test_instances(): """ test instance attributes """ I1 = IntensityData() I2 = IntensityData() with pytest.raises(AssertionError): assert(I1 == I2) with pytest.raises(AssertionError): I1.append_image(np.ones((32, 32))) I2.append_image(np.ones((64, 64))) assert_array_equal(I1.get_image(0),I2.get_image(0)) def test_private_access(setup_intensity_data): """ should not have access to private variables access is restricted to setters/getters """ int_data, a, b, c, d, e = setup_intensity_data with pytest.raises(AttributeError): print(int_data.__IExt) with pytest.raises(AttributeError): print(int_data.__I0) # ==== test methods ===== # replace_image method def test_replace_image_shape(setup_intensity_data): int_data, a, b, c, d, e = setup_intensity_data newim = np.ones((5,5)) with pytest.raises(ValueError): int_data.replace_image(newim, 0) def test_replace_image_dtype(setup_intensity_data): int_data, a, b, c, d, e = setup_intensity_data newim = 0 with pytest.raises(TypeError): int_data.replace_image(newim, 0) def test_replace_image_by_index(setup_intensity_data): int_data, a, b, c, d, e = setup_intensity_data newim = np.ones((512, 512)) int_data.replace_image(newim, 0) assert_array_equal(int_data.data[0], newim) def test_replace_image_by_string(setup_intensity_data): int_data, a, b, c, d, e = setup_intensity_data int_data.channel_names = ['IExt', 'I0', 'I45', 'I90', 'I135'] newim = np.ones((512,512)) int_data.replace_image(newim, 'I90') assert_array_equal(int_data.get_image('I90'), newim) # channel_names property def test_channel_names(setup_intensity_data): int_data, a, b, c, d, e = setup_intensity_data names = ['a','b','c','d','e'] int_data.channel_names = names # get_image method def test_get_image_str(setup_intensity_data): """ test query by string channel name """ int_data, a, b, c, d, e = setup_intensity_data names = ['a','b','c','d','e'] int_data.channel_names = names dat = int_data.get_image('e') assert(dat.shape, (512,512)) assert(dat[0][0], 5) def test_get_img_str_undef(setup_intensity_data): """ test exception handling of query by string channel name """ int_data, a, b, c, d, e = setup_intensity_data names = ['a','b','c','d','e','f','g','h'] int_data.channel_names = names with pytest.raises(ValueError): dat = int_data.get_image('q') def test_get_image_int(setup_intensity_data): """ test query by int channel index """ int_data, a, b, c, d, e = setup_intensity_data names = ['a','b','c','d','e'] int_data.channel_names = names dat = int_data.get_image(4) assert(dat.shape, (512,512)) assert(dat[0][0], 5) # axis_names property def test_axis_names(setup_intensity_data): int_data, a, b, c, d, e = setup_intensity_data names = ['c', 'x', 'y', 'z', 't'] int_data.axis_names = names assert(int_data.axis_names, names) # ==== test data dimensions ===== def test_ndims_1(setup_ndarrays): """ test that shape is preserved """ p, q, r = setup_ndarrays int_data = IntensityData() int_data.append_image(p) int_data.append_image(p) int_data.append_image(p) assert(int_data.data[0].shape == p.shape) assert(int_data.data.shape == (3,)+p.shape) def test_ndims_2(setup_ndarrays): """ test exception handling for image data that is not \ numpy array or numpy memmap """ int_data = IntensityData() with pytest.raises(TypeError): int_data.append_image(1) with pytest.raises(TypeError): int_data.append_image([1, 2, 3]) with pytest.raises(TypeError): int_data.append_image({1, 2, 3}) with pytest.raises(TypeError): int_data.append_image((1, 2, 3)) def test_ndims_3(setup_ndarrays): """ test exception handling upon assignment of dim mismatch image """ p, q, r = setup_ndarrays int_data = IntensityData() int_data.append_image(p) with pytest.raises(ValueError): int_data.append_image(q) # ==== Attribute assignment ========== def test_assignment(setup_intensity_data): """ test exception handling of improper assignment """ int_data, a, b, c, d, e = setup_intensity_data with pytest.raises(TypeError): int_data.Iext = a with pytest.raises(TypeError): int_data.__IExt = a def test_set_data(setup_intensity_data): """ test that neither data nor frames are set-able attributes """ int_data, a, b, c, d, e = setup_intensity_data with pytest.raises(AttributeError): int_data.data = 0 with pytest.raises(AttributeError): int_data.num_channels = 0
24.759615
77
0.655016
1,159
7,725
4.099223
0.11648
0.129657
0.06567
0.090928
0.669964
0.615028
0.571248
0.520732
0.428962
0.412334
0
0.03838
0.207379
7,725
311
78
24.839228
0.737547
0.13178
0
0.52439
0
0
0.013616
0
0
0
0
0
0.170732
1
0.121951
false
0
0.02439
0
0.146341
0.012195
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
ccf118cf4e661c54ae6e3a8fa5192d55fe0bbd47
1,722
py
Python
configs/mario_pg_config.py
Shiien/verify_rl_torch
45866609ac55fcf99aaaa89df94573acf35580d2
[ "MIT" ]
1
2022-03-22T14:59:01.000Z
2022-03-22T14:59:01.000Z
configs/mario_pg_config.py
Shiien/verify_rl_torch
45866609ac55fcf99aaaa89df94573acf35580d2
[ "MIT" ]
null
null
null
configs/mario_pg_config.py
Shiien/verify_rl_torch
45866609ac55fcf99aaaa89df94573acf35580d2
[ "MIT" ]
null
null
null
import torch class MarioConfig: def __init__(self): # hyper config self.max_num_gpus = 1 self.num_workers = 32 self.discount = 0.999 self.observation_space = (84, 84, 3) self.action_space = 256 + 20 + 8 import os import datetime self.results_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../results", os.path.basename(__file__)[:-3], datetime.datetime.now().strftime( "%Y-%m-%d--%H-%M-%S")) # Path to store the model weights and TensorBoard logs self.save_log = True # Save the checkpoint in results_path as model.checkpoint self.training_steps = int(100 * 1e6) # Total number of training steps (ie weights update according to a batch) # Alg config self.lambda_ = 0.95 # Actor config # Learner config self.train_on_gpu = torch.cuda.is_available() # Train on GPU if available self.batch_size = 32 # Number of parts of games to train on at each training step self.checkpoint_interval = int(8) # Number of training steps before using the model for self-playing self.optimizer = "Adam" # "Adam" or "SGD". Paper uses SGD self.weight_decay = 1e-4 # L2 weights regularization self.momentum = 0.9 # Used only if optimizer is SGD self.cofentropy = 1e-3 self.v_scaling = 0.5 self.clip_param = 0.15 self.lr_init = 5e-4 # Initial learning rate self.replay_buffer_size = int(1e3) # Number of self-play games to keep in the replay buffer self.num_unroll_steps = 16 # Number of game moves to keep for every batch element
43.05
119
0.624855
243
1,722
4.283951
0.539095
0.038425
0.03074
0.040346
0
0
0
0
0
0
0
0.039216
0.289199
1,722
39
120
44.153846
0.811275
0.348432
0
0
0
0
0.028959
0
0
0
0
0
0
1
0.035714
false
0
0.107143
0
0.178571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0