hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
4156d6278870fbb774b81e7ffbdf14d0c4744d9b
2,490
py
Python
tf_encrypted/keras/layers/layers_utils.py
wqruan/tf-encrypted
50ee4ae3ba76b7c1f70a90e18f875191adea0a07
[ "Apache-2.0" ]
825
2019-04-18T09:21:32.000Z
2022-03-30T05:55:26.000Z
tf_encrypted/keras/layers/layers_utils.py
wqruan/tf-encrypted
50ee4ae3ba76b7c1f70a90e18f875191adea0a07
[ "Apache-2.0" ]
354
2019-04-18T08:42:40.000Z
2022-03-31T18:06:31.000Z
tf_encrypted/keras/layers/layers_utils.py
wqruan/tf-encrypted
50ee4ae3ba76b7c1f70a90e18f875191adea0a07
[ "Apache-2.0" ]
161
2019-05-02T16:43:31.000Z
2022-03-31T01:35:03.000Z
"""TF Encrypted Keras layers utils""" import inspect import tensorflow as tf class UnknownLayerArgError(ValueError): """Raise error for unknown layer arguments. Args: arg_name: TF Encrypted Keras layer argument name (string) layer_sign: TensorFlow Keras layer signature (dict) tf_layer_name: TensorFlow Keras layer name (string) """ def __init__(self, arg_name, layer_sign, layer_name): super(UnknownLayerArgError, self).__init__() self.arg_name = arg_name self.layer_sign = layer_sign self.layer_name = layer_name def __str__(self): msg = ( "Argument '{arg_name}' is not part of the " "signature for '{layer_name}' layers: {layer_sign}" ) return msg.format( arg_name=self.arg_name, layer_name=self.layer_name, layer_sign=self.layer_sign.keys(), ) class LayerArgNotImplementedError(NotImplementedError): """Raise error when layer argument is not yet supported in TFE. Args: arg: TFE layer argument arg_name: TFE layer argument name (string) tf_layer_name: Tensorflow keras layer name (string) """ def __init__(self, arg_name, tf_layer_name, tf_default_arg): super(LayerArgNotImplementedError, self).__init__() self.arg_name = arg_name self.tf_layer_name = tf_layer_name self.tf_default_arg = tf_default_arg def __str__(self): arg_not_impl_msg = ( "`{}` argument is not implemented for layer {}. " "Please use the default value of {}." ) return arg_not_impl_msg.format( self.arg_name, self.tf_layer_name, self.tf_default_arg ) def default_args_check(arg, arg_name, tf_layer_name): """Check if the layer is using the dfault argument Args: arg: TFE layer argument arg_name: TFE layer argument name (string) tf_layer_name: Tensorflow keras layer name (string) Raises: NotImplementedError: if this argument is not implemented for this `layer`. """ tf_layer_cls = getattr(tf.keras.layers, tf_layer_name) layer_sign = inspect.signature(tf_layer_cls.__init__).parameters if arg_name not in layer_sign: raise UnknownLayerArgError(arg_name, layer_sign, tf_layer_name) tf_default_arg = layer_sign[arg_name].default if arg != tf_default_arg: raise LayerArgNotImplementedError(arg_name, tf_layer_name, tf_default_arg)
32.337662
82
0.675904
323
2,490
4.866873
0.19195
0.114504
0.076972
0.038168
0.353053
0.307252
0.278626
0.244275
0.172392
0.172392
0
0
0.245382
2,490
76
83
32.763158
0.836615
0.283534
0
0.1
0
0
0.101296
0
0
0
0
0
0
1
0.125
false
0
0.05
0
0.275
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4158338da63ba358220ee7b0c0e8ce7b54fd01ff
5,850
py
Python
vharfbuzz.py
KazunariTsuboi/font-engineering
2b80182d9cdfebf9853c01295ab13046f2ccb5a1
[ "Apache-2.0" ]
null
null
null
vharfbuzz.py
KazunariTsuboi/font-engineering
2b80182d9cdfebf9853c01295ab13046f2ccb5a1
[ "Apache-2.0" ]
null
null
null
vharfbuzz.py
KazunariTsuboi/font-engineering
2b80182d9cdfebf9853c01295ab13046f2ccb5a1
[ "Apache-2.0" ]
null
null
null
"""A user-friendlier way to use Harfbuzz in Python.""" import uharfbuzz as hb from fontTools.ttLib import TTFont import re class Vharfbuzz: def __init__(self, filename): """Opens a font file and gets ready to shape text.""" self.filename = filename with open(self.filename, "rb") as fontfile: self.fontdata = fontfile.read() self.ttfont = TTFont(filename) self.glyphOrder = self.ttfont.getGlyphOrder() self.prepare_shaper() self.shapers = None self.drawfuncs = None def prepare_shaper(self): face = hb.Face(self.fontdata) font = hb.Font(face) upem = face.upem font.scale = (upem, upem) hb.ot_font_set_funcs(font) self.hbfont = font def make_message_handling_function(self, buf, onchange): self.history = {"GSUB": [], "GPOS": []} self.lastLookupID = None def handle_message(msg, buf2): m = re.match("start lookup (\\d+)", msg) if m: lookupid = int(m[1]) self.history[self.stage].append(self.serialize_buf(buf2)) m = re.match("end lookup (\\d+)", msg) if m: lookupid = int(m[1]) if self.serialize_buf(buf2) != self.history[self.stage][-1]: onchange(self, self.stage, lookupid, self._copy_buf(buf2)) self.history[self.stage].pop() if msg.startswith("start GPOS stage"): self.stage = "GPOS" return handle_message def shape(self, text, onchange=None): """Shapes a text This shapes a piece of text, return a uharfbuzz `Buffer` object. Additionally, if an `onchange` function is provided, this will be called every time the buffer changes *during* shaping, with the following arguments: - ``self``: the vharfbuzz object. - ``stage``: either "GSUB" or "GPOS" - ``lookupid``: the current lookup ID - ``buffer``: a copy of the buffer as a list of lists (glyphname, cluster, position) """ self.prepare_shaper() buf = hb.Buffer() buf.add_str(text) buf.guess_segment_properties() self.stage = "GSUB" if onchange: f = self.make_message_handling_function(buf, onchange) buf.set_message_func(f) hb.shape(self.hbfont, buf, shapers=self.shapers) self.stage = "GPOS" return buf def _copy_buf(self, buf): # Or at least the bits we care about outs = [] for info, pos in zip(buf.glyph_infos, buf.glyph_positions): l = [self.glyphOrder[info.codepoint], info.cluster] if self.stage == "GPOS": l.append(pos.position) else: l.append(None) outs.append(l) return outs def serialize_buf(self, buf): """Returns the contents of the given buffer in a string format similar to that used by hb-shape.""" outs = [] for info, pos in zip(buf.glyph_infos, buf.glyph_positions): outs.append("%s=%i" % (self.glyphOrder[info.codepoint], info.cluster)) if self.stage == "GPOS": outs[-1] = outs[-1] + "+%i" % (pos.position[2]) if self.stage == "GPOS" and (pos.position[0] != 0 or pos.position[1] != 0): outs[-1] = outs[-1] + "@<%i,%i>" % (pos.position[0], pos.position[1]) return "|".join(outs) def setup_svg_draw_funcs(self): if self.drawfuncs: return def move_to(x, y, c): c["output_string"] = c["output_string"] + f"M{x},{y}" def line_to(x, y, c): c["output_string"] = c["output_string"] + f"L{x},{y}" def cubic_to(c1x, c1y, c2x, c2y, x, y, c): c["output_string"] = ( c["output_string"] + f"C{c1x},{c1y} {c2x},{c2y} {x},{y}" ) def quadratic_to(c1x, c1y, x, y, c): c["output_string"] = c["output_string"] + f"Q{c1x},{c1y} {x},{y}" def close_path(c): c["output_string"] = c["output_string"] + "Z" self.drawfuncs = hb.DrawFuncs.create() self.drawfuncs.set_move_to_func(move_to) self.drawfuncs.set_line_to_func(line_to) self.drawfuncs.set_cubic_to_func(cubic_to) self.drawfuncs.set_quadratic_to_func(quadratic_to) self.drawfuncs.set_close_path_func(close_path) def glyph_to_svg_path(self, gid): if not hasattr(hb, "DrawFuncs"): raise ValueError( "glyph_to_svg_path requires uharfbuzz with draw function support" ) self.setup_svg_draw_funcs() container = {"output_string": ""} self.drawfuncs.draw_glyph(self.hbfont, gid, container) return container["output_string"] def buf_to_svg(self, buf): x_cursor = 0 y_cursor = 0 paths = [] svg = "" for info, pos in zip(buf.glyph_infos, buf.glyph_positions): glyph_path = self.glyph_to_svg_path(info.codepoint) dx, dy = pos.position[0], pos.position[1] p = ( f'<path d="{glyph_path}" ' + f' transform="translate({x_cursor+dx}, {y_cursor+dy})"/>\n' ) svg += p x_cursor += pos.position[2] y_cursor += pos.position[3] svg = ( ( f'<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 {x_cursor} 2000"' + ' transform="matrix(1 0 0 -1 0 1000)">\n' ) + svg + "</svg>\n" ) return svg # v = Vharfbuzz("/Users/simon/Library/Fonts/SourceSansPro-Regular.otf") # buf = v.shape("ABCj") # svg = v.buf_to_svg(buf) # import cairosvg # cairosvg.svg2png(bytestring=svg, write_to="foo.png")
34.411765
88
0.558462
753
5,850
4.201859
0.280212
0.045512
0.041087
0.022124
0.190265
0.183312
0.142857
0.134324
0.134324
0.117889
0
0.014081
0.308034
5,850
169
89
34.615385
0.76754
0.143248
0
0.122951
0
0.008197
0.120226
0.007072
0
0
0
0
0
1
0.122951
false
0
0.02459
0
0.213115
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
415a0184fc0cf2acf5e7fa555af6365ef819e0ca
2,581
py
Python
preprocessing/fields/amazon_fields.py
stungkit/Copycat-abstractive-opinion-summarizer
04fe5393a7bb6883516766b762f6a0c530e95375
[ "MIT" ]
51
2020-09-25T07:05:01.000Z
2022-03-17T12:07:40.000Z
preprocessing/fields/amazon_fields.py
stungkit/Copycat-abstractive-opinion-summarizer
04fe5393a7bb6883516766b762f6a0c530e95375
[ "MIT" ]
4
2020-10-19T10:00:22.000Z
2022-03-14T17:02:47.000Z
preprocessing/fields/amazon_fields.py
stungkit/Copycat-abstractive-opinion-summarizer
04fe5393a7bb6883516766b762f6a0c530e95375
[ "MIT" ]
22
2020-09-22T01:06:47.000Z
2022-01-26T14:20:09.000Z
class AmazonFields(object): PROD_ID = "asin" REV_TEXT = "reviewText" OVERALL = 'overall' # group ids there are used to produce summaries EXCLUDED_GROUP_IDS = {'B005085X5Y', 'B0052PKEYK', 'B009L0KU46', 'B006QM4HOG', 'B005DO7R3G', 'B007I8G8A8', 'B004X86A86', 'B0040EIHQQ', 'B002AROW78', 'B005BQ6YYO', 'B0013EQ20Y', 'B000A2FTN6', 'B006UF64CW', 'B00AZ6WVQU', 'B001QCZ3E4', 'B001TKE25E', 'B0051PJHDK', 'B005ZA2W42', 'B003U8K0N6', 'B003YJ5LLM', 'B000BYGGBW', 'B004IV8Y50', 'B00DB94FB2', 'B002RL8IGI', 'B004G8EKIA', 'B000V7CPJG', 'B001H9O71Q', 'B00BOZCSQE', 'B000EB7OTU', 'B005MZO5TA', 'B000EZUQK0', 'B005H86LZ8', 'B005JS7KME', 'B000YA8NYQ', 'B002BJU8YQ', 'B00GW5NX88', 'B004OQQLTU', 'B008Z8W714', 'B003HT9W32', 'B00028QA72', 'B000EXUB3E', 'B000N245Y2', 'B0039PH51K', 'B006ZO110I', 'B004U3Y9FU', 'B006NNLX2C', 'B006Y401K0', 'B00006IUVM', 'B004R9X6VU', 'B000COQT0Y', 'B0002U34HY', 'B000N6MI7E', 'B0000CCY1S', 'B001EQJ5AU', 'B00442C8RS', 'B0036SGFXO', 'B0025OO8DO', 'B001GCV0FK', 'B00455NTOU', 'B006CO18RM' }
37.955882
51
0.279737
82
2,581
8.756098
0.963415
0.022284
0
0
0
0
0
0
0
0
0
0.321701
0.644711
2,581
67
52
38.522388
0.461287
0.017435
0
0
0
0
0.245067
0
0
0
0
0
0
1
0
false
0
0
0
0.076923
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
415b5b68914faf3e3638db9ddfedb6c109eb3f7e
9,973
py
Python
habitat_baselines/config/default.py
rpartsey/habitat-pointnav-aux
03a24ddca8ab257f64092c70d4f2ff6805287b40
[ "MIT", "Unlicense" ]
15
2020-07-10T15:43:02.000Z
2022-03-09T03:11:30.000Z
habitat_baselines/config/default.py
rpartsey/habitat-pointnav-aux
03a24ddca8ab257f64092c70d4f2ff6805287b40
[ "MIT", "Unlicense" ]
2
2020-09-09T19:09:19.000Z
2020-10-21T16:30:23.000Z
habitat_baselines/config/default.py
rpartsey/habitat-pointnav-aux
03a24ddca8ab257f64092c70d4f2ff6805287b40
[ "MIT", "Unlicense" ]
1
2021-02-05T14:50:30.000Z
2021-02-05T14:50:30.000Z
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Union import numpy as np from habitat import get_config as get_task_config from habitat.config import Config as CN DEFAULT_CONFIG_DIR = "configs/" CONFIG_FILE_SEPARATOR = "," # ----------------------------------------------------------------------------- # EXPERIMENT CONFIG # ----------------------------------------------------------------------------- _C = CN() _C.BASE_TASK_CONFIG_PATH = "configs/tasks/pointnav.yaml" _C.TASK_CONFIG = CN() # task_config will be stored as a config node _C.CMD_TRAILING_OPTS = [] # store command line options as list of strings _C.TRAINER_NAME = "ppo" _C.ENV_NAME = "NavRLEnv" _C.SIMULATOR_GPU_ID = 0 _C.TORCH_GPU_ID = 0 _C.VIDEO_OPTION = ["disk", "tensorboard"] _C.TENSORBOARD_DIR = "tb" _C.VIDEO_DIR = "video_dir" _C.TEST_EPISODE_COUNT = -1 _C.EVAL_CKPT_PATH_DIR = "data/checkpoints" # path to ckpt or path to ckpts dir _C.NUM_PROCESSES = 16 _C.SENSORS = ["RGB_SENSOR", "DEPTH_SENSOR"] _C.CHECKPOINT_FOLDER = "data/checkpoints" _C.NUM_UPDATES = 10000 _C.LOG_INTERVAL = 10 _C.LOG_FILE = "train.log" _C.CHECKPOINT_INTERVAL = 50 # ----------------------------------------------------------------------------- # EVAL CONFIG # ----------------------------------------------------------------------------- _C.EVAL = CN() # The split to evaluate on _C.EVAL.SPLIT = "val" _C.EVAL.USE_CKPT_CONFIG = True # ----------------------------------------------------------------------------- # REINFORCEMENT LEARNING (RL) ENVIRONMENT CONFIG # ----------------------------------------------------------------------------- _C.RL = CN() _C.RL.REWARD_MEASURE = "distance_to_goal" _C.RL.SUCCESS_MEASURE = "spl" _C.RL.SUCCESS_REWARD = 2.5 _C.RL.SLACK_REWARD = -0.01 # ----------------------------------------------------------------------------- # PROXIMAL POLICY OPTIMIZATION (PPO) # ----------------------------------------------------------------------------- _C.RL.PPO = CN() _C.RL.PPO.clip_param = 0.2 _C.RL.PPO.ppo_epoch = 4 _C.RL.PPO.num_mini_batch = 16 _C.RL.PPO.value_loss_coef = 0.5 _C.RL.PPO.entropy_coef = 0.01 _C.RL.PPO.aux_loss_coef = 1.0 _C.RL.PPO.lr = 7e-4 _C.RL.PPO.eps = 1e-5 _C.RL.PPO.max_grad_norm = 0.5 _C.RL.PPO.num_steps = 5 _C.RL.PPO.use_gae = True _C.RL.PPO.use_linear_lr_decay = False _C.RL.PPO.use_linear_clip_decay = False _C.RL.PPO.gamma = 0.99 _C.RL.PPO.tau = 0.95 _C.RL.PPO.reward_window_size = 50 # Policy _C.RL.PPO.policy = "BASELINE" _C.RL.PPO.POLICY = CN() _C.RL.PPO.POLICY.name = "BASELINE" _C.RL.PPO.POLICY.use_mean_and_var = False _C.RL.PPO.POLICY.pretrained_encoder = False _C.RL.PPO.POLICY.pretrained_weights = "/srv/share/ewijmans3/resnet-18-mp3d-rgbd-100m.pth" _C.RL.PPO.POLICY.midlevel_medium = 'curvature' # "depth_zbuffer" _C.RL.PPO.POLICY.HIERARCHICAL = CN() _C.RL.PPO.POLICY.HIERARCHICAL.type = "linear" # linear, custom, all_for_one _C.RL.PPO.POLICY.HIERARCHICAL.dependencies = () # A tuple representing a DAG OR a string representing a type _C.RL.PPO.POLICY.IM = CN() _C.RL.PPO.POLICY.IM.comm_interval = 16 # Auxiliary Tasks _C.RL.AUX_TASKS = CN() _C.RL.AUX_TASKS.tasks = [] _C.RL.AUX_TASKS.required_sensors = [] _C.RL.AUX_TASKS.distribution = "uniform" # one-hot, TODO gaussian _C.RL.AUX_TASKS.entropy_coef = 0.0 _C.RL.AUX_TASKS.InverseDynamicsTask = CN() _C.RL.AUX_TASKS.InverseDynamicsTask.loss_factor = 0.01 _C.RL.AUX_TASKS.InverseDynamicsTask.subsample_rate = 0.1 _C.RL.AUX_TASKS.ActionPrediction = CN() _C.RL.AUX_TASKS.ActionPrediction.loss_factor = 0.01 _C.RL.AUX_TASKS.ActionPrediction.subsample_rate = 0.1 _C.RL.AUX_TASKS.ActionPrediction.num_steps = 3 _C.RL.AUX_TASKS.ActionRecall = CN() _C.RL.AUX_TASKS.ActionRecall.loss_factor = 0.01 _C.RL.AUX_TASKS.ActionRecall.subsample_rate = 0.1 _C.RL.AUX_TASKS.ActionRecall.num_steps = 3 _C.RL.AUX_TASKS.TemporalDistanceTask = CN() _C.RL.AUX_TASKS.TemporalDistanceTask.loss_factor = 0.1 _C.RL.AUX_TASKS.TemporalDistanceTask.num_pairs = 1 # in lieu of subsample rate _C.RL.AUX_TASKS.TemporalReachTask = CN() _C.RL.AUX_TASKS.TemporalReachTask.loss_factor = 1.0 _C.RL.AUX_TASKS.TemporalReachTask.threshold = 10 _C.RL.AUX_TASKS.TemporalReachTask.num_pairs = 1 _C.RL.AUX_TASKS.ForwardDynamicsTask = CN() _C.RL.AUX_TASKS.ForwardDynamicsTask.loss_factor = .0002 _C.RL.AUX_TASKS.ForwardDynamicsTask.subsample_rate = 0.1 _C.RL.AUX_TASKS.CPCA_Single = CN() _C.RL.AUX_TASKS.CPCA_Single.loss_factor = 0.05 _C.RL.AUX_TASKS.CPCA_Single.num_steps = 8 _C.RL.AUX_TASKS.CPCA_Single.subsample_rate = 0.2 _C.RL.AUX_TASKS.CPCA_Single_A = _C.RL.AUX_TASKS.CPCA_Single.clone() _C.RL.AUX_TASKS.CPCA_Single_A.num_steps = 2 _C.RL.AUX_TASKS.CPCA_Single_B = _C.RL.AUX_TASKS.CPCA_Single.clone() _C.RL.AUX_TASKS.CPCA_Single_B.num_steps = 4 _C.RL.AUX_TASKS.CPCA_Single_C = _C.RL.AUX_TASKS.CPCA_Single.clone() _C.RL.AUX_TASKS.CPCA_Single_C.num_steps = 8 _C.RL.AUX_TASKS.CPCA_Single_D = _C.RL.AUX_TASKS.CPCA_Single.clone() _C.RL.AUX_TASKS.CPCA_Single_D.num_steps = 16 _C.RL.AUX_TASKS.CPCA = CN() _C.RL.AUX_TASKS.CPCA.loss_factor = 0.05 _C.RL.AUX_TASKS.CPCA.num_steps = 1 _C.RL.AUX_TASKS.CPCA.subsample_rate = 0.2 _C.RL.AUX_TASKS.CPCA_A = _C.RL.AUX_TASKS.CPCA.clone() _C.RL.AUX_TASKS.CPCA_A.num_steps = 2 _C.RL.AUX_TASKS.CPCA_B = _C.RL.AUX_TASKS.CPCA.clone() _C.RL.AUX_TASKS.CPCA_B.num_steps = 4 _C.RL.AUX_TASKS.CPCA_C = _C.RL.AUX_TASKS.CPCA.clone() _C.RL.AUX_TASKS.CPCA_C.num_steps = 8 _C.RL.AUX_TASKS.CPCA_D = _C.RL.AUX_TASKS.CPCA.clone() _C.RL.AUX_TASKS.CPCA_D.num_steps = 16 _C.RL.AUX_TASKS.CPCA_Weighted = CN() _C.RL.AUX_TASKS.CPCA_Weighted.loss_factor = 0.05 _C.RL.AUX_TASKS.CPCA_Weighted.subsample_rate = 0.2 _C.RL.AUX_TASKS.GID = CN() _C.RL.AUX_TASKS.GID.loss_factor = 0.2 _C.RL.AUX_TASKS.GID.num_steps = 4 _C.RL.AUX_TASKS.GID.subsample_rate = 0.2 _C.RL.AUX_TASKS.ActionDist = CN() _C.RL.AUX_TASKS.ActionDist.loss_factor = 0.2 _C.RL.AUX_TASKS.ActionDist.num_steps = 4 _C.RL.AUX_TASKS.ActionDist.subsample_rate = 0.2 _C.RL.AUX_TASKS.ActionDist_A = _C.RL.AUX_TASKS.ActionDist.clone() _C.RL.AUX_TASKS.ActionDist_A.num_steps = 2 _C.RL.AUX_TASKS.SensorPrediction = CN() _C.RL.AUX_TASKS.SensorPrediction.loss_factor = 0.05 _C.RL.AUX_TASKS.SensorPrediction.subsample_rate = 0.2 _C.RL.AUX_TASKS.SensorPrediction.goal = "objectgoal" _C.RL.AUX_TASKS.VisionContrastedSP = CN() _C.RL.AUX_TASKS.VisionContrastedSP.loss_factor = 0.05 _C.RL.AUX_TASKS.VisionContrastedSP.subsample_rate = 0.2 _C.RL.AUX_TASKS.VisionContrastedSP.sensor = "semantic" _C.RL.AUX_TASKS.Dummy = CN() _C.RL.PPO.use_normalized_advantage = True _C.RL.PPO.hidden_size = 512 # ----------------------------------------------------------------------------- # DECENTRALIZED DISTRIBUTED PROXIMAL POLICY OPTIMIZATION (DD-PPO) # ----------------------------------------------------------------------------- _C.RL.DDPPO = CN() _C.RL.DDPPO.sync_frac = 0.6 _C.RL.DDPPO.distrib_backend = "GLOO" _C.RL.DDPPO.rnn_type = "LSTM" _C.RL.DDPPO.num_recurrent_layers = 2 _C.RL.DDPPO.backbone = "resnet50" _C.RL.DDPPO.pretrained_weights = "data/ddppo-models/gibson-2plus-resnet50.pth" # Loads pretrained weights _C.RL.DDPPO.pretrained = False # Loads just the visual encoder backbone weights _C.RL.DDPPO.pretrained_encoder = False # Whether or not the visual encoder backbone will be trained _C.RL.DDPPO.train_encoder = True # Whether or not to reset the critic linear layer _C.RL.DDPPO.reset_critic = True # ----------------------------------------------------------------------------- # ORBSLAM2 BASELINE # ----------------------------------------------------------------------------- _C.ORBSLAM2 = CN() _C.ORBSLAM2.SLAM_VOCAB_PATH = "habitat_baselines/slambased/data/ORBvoc.txt" _C.ORBSLAM2.SLAM_SETTINGS_PATH = ( "habitat_baselines/slambased/data/mp3d3_small1k.yaml" ) _C.ORBSLAM2.MAP_CELL_SIZE = 0.1 _C.ORBSLAM2.MAP_SIZE = 40 _C.ORBSLAM2.CAMERA_HEIGHT = get_task_config().SIMULATOR.DEPTH_SENSOR.POSITION[ 1 ] _C.ORBSLAM2.BETA = 100 _C.ORBSLAM2.H_OBSTACLE_MIN = 0.3 * _C.ORBSLAM2.CAMERA_HEIGHT _C.ORBSLAM2.H_OBSTACLE_MAX = 1.0 * _C.ORBSLAM2.CAMERA_HEIGHT _C.ORBSLAM2.D_OBSTACLE_MIN = 0.1 _C.ORBSLAM2.D_OBSTACLE_MAX = 4.0 _C.ORBSLAM2.PREPROCESS_MAP = True _C.ORBSLAM2.MIN_PTS_IN_OBSTACLE = ( get_task_config().SIMULATOR.DEPTH_SENSOR.WIDTH / 2.0 ) _C.ORBSLAM2.ANGLE_TH = float(np.deg2rad(15)) _C.ORBSLAM2.DIST_REACHED_TH = 0.15 _C.ORBSLAM2.NEXT_WAYPOINT_TH = 0.5 _C.ORBSLAM2.NUM_ACTIONS = 3 _C.ORBSLAM2.DIST_TO_STOP = 0.05 _C.ORBSLAM2.PLANNER_MAX_STEPS = 500 _C.ORBSLAM2.DEPTH_DENORM = get_task_config().SIMULATOR.DEPTH_SENSOR.MAX_DEPTH def get_config( config_paths: Optional[Union[List[str], str]] = None, opts: Optional[list] = None, ) -> CN: r"""Create a unified config with default values overwritten by values from `config_paths` and overwritten by options from `opts`. Args: config_paths: List of config paths or string that contains comma separated list of config paths. opts: Config options (keys, values) in a list (e.g., passed from command line into the config. For example, `opts = ['FOO.BAR', 0.5]`. Argument can be used for parameter sweeping or quick tests. """ config = _C.clone() if config_paths: if isinstance(config_paths, str): if CONFIG_FILE_SEPARATOR in config_paths: config_paths = config_paths.split(CONFIG_FILE_SEPARATOR) else: config_paths = [config_paths] for config_path in config_paths: config.merge_from_file(config_path) if opts: for k, v in zip(opts[0::2], opts[1::2]): if k == "BASE_TASK_CONFIG_PATH": config.BASE_TASK_CONFIG_PATH = v config.TASK_CONFIG = get_task_config(config.BASE_TASK_CONFIG_PATH) if opts: config.CMD_TRAILING_OPTS = opts config.merge_from_list(opts) config.freeze() return config
36.531136
108
0.684348
1,562
9,973
4.046095
0.214469
0.060759
0.076899
0.140981
0.446994
0.268671
0.205854
0.187184
0.137025
0.051899
0
0.024071
0.112704
9,973
272
109
36.665441
0.690134
0.227314
0
0.010101
0
0
0.057266
0.030664
0
0
0
0.003676
0
1
0.005051
false
0
0.020202
0
0.030303
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
415c300c1fc04be956dd24ffadcfc44181fd9b54
10,681
py
Python
backup_client.py
evermind/restic-backupclient
347fd6bfae0f967adac1b65775245f6e87a8c554
[ "MIT" ]
null
null
null
backup_client.py
evermind/restic-backupclient
347fd6bfae0f967adac1b65775245f6e87a8c554
[ "MIT" ]
null
null
null
backup_client.py
evermind/restic-backupclient
347fd6bfae0f967adac1b65775245f6e87a8c554
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 from os import environ import logging as log import argparse from crontab import CronTab from datetime import datetime,timedelta import time import subprocess import os.path import re import yaml import shutil import elasticdump import mysqldump import pgdump import mongodump import influxdump def fail(msg,args): log.error(msg,args) quit(1) def resolve_env_placeholders(template): origTemplate = template resolveDepth = 0 while resolveDepth < 10: resolveDepth += 1 changed = False for placeholder, key in re.findall('(\$\(([a-zA-Z0-9_-]+)\))', template): if key in environ: template = template.replace(placeholder, environ[key]) changed = True if not changed: break return template UNDEFINED=object() def get_env(name,default=UNDEFINED): if name in environ: return resolve_env_placeholders(environ[name]) if default != UNDEFINED: return default fail('Please set the environment variable %s',name) class ParseCronExpressions(argparse.Action): def __init__(self, option_strings, dest, **kwargs): super(ParseCronExpressions, self).__init__(option_strings, dest, **kwargs) def __call__(self, parser, namespace, values, option_string=None): items=[] for value in values: try: items.append(CronTab(value)) except ValueError as e: raise argparse.ArgumentError(self,'%s: %s'%(value,e)) setattr(namespace, self.dest, items) def get_next_schedule(crontab): now=datetime.now() delay=-1 for cron in crontab: cron_delay=cron.next(now,default_utc=False) if delay<0 or cron_delay<delay: delay=cron_delay return now+timedelta(seconds=delay) def load_config(): config_file=get_env('BACKUP_CONFIG',None) if config_file is None: return {} if not os.path.exists(config_file): log.error('Config does not exist: %s'%config_file) try: log.info('Using extra config from %s'%config_file) with open(config_file,'r') as config: return yaml.load(config) except: log.exception('Unable to read config file %s'%config_file) return None def run_pre_backup_script(scriptinfo): if type(scriptinfo) is not dict: log.error("Expected pre-backup-script to be a dict, got: %s",type(scriptinfo).__name__) return False if not 'script' in scriptinfo: log.error("Pre-backup-script does not contain a 'script' property.") return False script=scriptinfo['script'] fail_on_error=bool(scriptinfo['fail-on-error']) if 'fail-on-error' in scriptinfo else True description=("Executing pre-backup-script: %s"%scriptinfo['description']) if 'description' in scriptinfo else "Executing pre-backup-script" log.info(description) try: subprocess.check_call(script,stderr=subprocess.STDOUT,shell=True) log.info("Pre-backup-script succeeded") except subprocess.CalledProcessError as e: if (fail_on_error): log.error("Pre-backup-script failed: %s",e) return False log.warning("Pre-backup-script failed: %s",e) return True def init_restic_repo(): log.info('Initializing repository') try: subprocess.check_output([ 'restic', 'init' ],stderr=subprocess.STDOUT) log.info('Repository initialized.') except subprocess.CalledProcessError as e: output=e.output.decode() if 'repository master key and config already initialized' in output or 'config file already exists' in output: log.info('Repository was already initialized.') else: log.error('Initializing repository failed: %s'%output) return False def run_backup(): backup_root=get_env('BACKUP_ROOT') init_restic_repo() config=load_config() if config is None: return False if not (os.path.exists(backup_root)): log.info('Backup mount point not found %s. Creating internal mount point for dump jobs. This might be ok if you only backup database dumps.'%backup_root) os.mkdir(backup_root) if 'pre-backup-scripts' in config: for script in config['pre-backup-scripts']: if not run_pre_backup_script(script): log.error('Stopped due to pre-backup script failures') return False if 'elasticdump' in config: elasticdump_dir=os.path.join(backup_root,'elasticdump') try: shutil.rmtree(elasticdump_dir) except: pass if os.path.exists(elasticdump_dir): log.error('Unable to delete old elasticdump dir at %s'%elasticdump_dir) os.mkdir(elasticdump_dir) log.info('Running elasticdump to %s'%elasticdump_dir) elasticdump_ok=elasticdump.es_dump_with_config(elasticdump_dir,config['elasticdump']) if not elasticdump_ok: log.error('Elasticdump failed. Backup canceled.') return False if 'mysqldump' in config: mysqldump_dir=os.path.join(backup_root,'mysqldump') try: shutil.rmtree(mysqldump_dir) except: pass if os.path.exists(mysqldump_dir): log.error('Unable to delete old mysqldump dir at %s'%mysqldump_dir) os.mkdir(mysqldump_dir) log.info('Running mysqldump to %s'%mysqldump_dir) mysqldump_ok=mysqldump.mysql_dump_with_config(mysqldump_dir,config['mysqldump']) if not mysqldump_ok: log.error('Mysqldump failed. Backup canceled.') return False if 'pgdump' in config: pgdump_dir=os.path.join(backup_root,'pgdump') try: shutil.rmtree(pgdump_dir) except: pass if os.path.exists(pgdump_dir): log.error('Unable to delete old pgdump dir at %s'%pgdump_dir) os.mkdir(pgdump_dir) log.info('Running pgdump to %s'%pgdump_dir) pgdump_ok=pgdump.pg_dump_with_config(pgdump_dir,config['pgdump']) if not pgdump_ok: log.error('Pgdump failed. Backup canceled.') return False if 'mongodump' in config: mongodump_dir=os.path.join(backup_root,'mongodump') try: shutil.rmtree(mongodump_dir) except: pass if os.path.exists(mongodump_dir): log.error('Unable to delete old mongodump dir at %s'%mongodump_dir) os.mkdir(mongodump_dir) log.info('Running mongodump to %s'%mongodump_dir) mongodump_ok=mongodump.mongodump_with_config(mongodump_dir,config['mongodump']) if not mongodump_ok: log.error('Mongodump failed. Backup canceled.') return False if 'influxdump' in config: influxdump_dir=os.path.join(backup_root,'influxdump') try: shutil.rmtree(influxdump_dir) except: pass if os.path.exists(influxdump_dir): log.error('Unable to delete old influxdump dir at %s'%influxdump_dir) os.mkdir(influxdump_dir) log.info('Running influxdump to %s'%influxdump_dir) influxdump_ok=influxdump.influxdump_with_config(influxdump_dir,config['influxdump']) if not influxdump_ok: log.error('Influxdump failed. Backup canceled.') return False cmd=[ 'nice','-n19', 'ionice','-c3', 'restic', 'backup', '--host',get_env('BACKUP_HOSTNAME'), ] # exclude caches (http://bford.info/cachedir/spec.html) if not ('exclude-caches' in config and bool(config['exclude-caches'])): cmd.append('--exclude-caches') # ignore inode for changed-file checks (default is true) if not ('ignore-inode' in config and bool(config['ignore-inode'])): cmd.append('--ignore-inode') # set cacheDir if not default one should be used if ('cache-dir' in config ): log.info("cache-dir is: "+config['cache-dir']) cmd.append('--cache-dir') cmd.append(config['cache-dir']) # include files to backupset from given files if 'include-from' in config: includes=config['include-from'] if type(includes) is not list: includes=[includes] for include in includes: log.info("Use include from: %s"%include) cmd.append('--files-from') cmd.append(include) # exclude other files if 'exclude' in config: excludes=config['exclude'] if type(excludes) is not list: excludes=[excludes] for exclude in excludes: log.info("Excluding: %s"%exclude) cmd.append('--exclude') cmd.append(exclude) # if include is set no backuproot should given as argument if 'include-from' not in config: cmd.append(backup_root) log.info('Starting backup') try: subprocess.check_call(cmd,stderr=subprocess.STDOUT) log.info('Backup finished.') except subprocess.CalledProcessError as e: log.info('Backup failed.') return False clean_old_backups(config, True) def clean_old_backups(config=None, prune = False): if config is None: # direct call, init first config=load_config() init_restic_repo() if config is None: return False cleanup_command=[ 'restic', 'forget', '--prune' ] if 'keep' in config: keep=config['keep'] keep_is_valid=False for keep_type in ['last','hourly','daily','weekly','monthly','yearly']: if keep_type in keep: keep_is_valid=True cleanup_command+=['--keep-%s'%keep_type,str(keep[keep_type])] if not keep_is_valid: log.warn('Keep configuration is invalid - not deleting old backups.') return else: keep_is_valid=False for keep_type in ['last','hourly','daily','weekly','monthly','yearly']: keep_env='KEEP_%s' % (keep_type.upper()) if keep_env in environ: keep_is_valid=True cleanup_command+=['--keep-%s'%keep_type,str(environ[keep_env])] if not keep_is_valid: log.warn('Rotation not configured. Keeping backups forever.') return log.info('Unlocking repository') subprocess.check_call(['restic','unlock'],stderr=subprocess.STDOUT) log.info('Deleting old backups') try: subprocess.check_call(cleanup_command,stderr=subprocess.STDOUT) log.info('Backup finished.') except subprocess.CalledProcessError as e: log.warn('Cleanup failed!') def schedule_backup(crontab): while True: next_schedule=get_next_schedule(crontab) log.info('Scheduling next backup at %s'%next_schedule) while True: now=datetime.now() if now>=next_schedule: break time.sleep(10) try: run_backup() except: log.exception("Something went unexpectedly wrong!") def main(): log.basicConfig(level=log.INFO,format='%(asctime)s %(levelname)7s: %(message)s') parser = argparse.ArgumentParser(description='Perform backups with restic') subparsers = parser.add_subparsers(help='sub-command help',dest='cmd') subparsers.required = True parser_run = subparsers.add_parser('run', help='Run a backup now and rotate afterwards.') parser_run = subparsers.add_parser('rotate', help='Rotate backups now.') parser_schedule = subparsers.add_parser('schedule', help='Schedule backups.') parser_schedule.add_argument('cronexpression',nargs='+',action=ParseCronExpressions, help='Time to schedule the backup (cron expression, see https://pypi.org/project/crontab/)') args=parser.parse_args() get_env('RESTIC_REPOSITORY') get_env('RESTIC_PASSWORD') get_env('BACKUP_HOSTNAME') get_env('BACKUP_ROOT') if args.cmd=='run': result=run_backup() if not result: quit(1) if args.cmd=='rotate': result=clean_old_backups(None, True) if not result: quit(1) else: schedule_backup(args.cronexpression) if __name__ == '__main__': main()
28.712366
155
0.735137
1,522
10,681
5.02431
0.187911
0.021054
0.019616
0.0085
0.185171
0.138355
0.099516
0.049954
0.049954
0.049954
0
0.001961
0.14053
10,681
371
156
28.789757
0.831046
0.030147
0
0.248387
0
0.003226
0.247416
0.002319
0
0
0
0
0
1
0.041935
false
0.019355
0.051613
0
0.170968
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
415db15c97efdf73489f6ae6ea62535109579cca
3,883
py
Python
lib/ezdxf/__init__.py
tapnair/DXFer
8ec957d80c2f251bb78440147d1478106f99b3eb
[ "MIT" ]
4
2019-03-31T00:41:13.000Z
2021-07-31T05:09:07.000Z
lib/ezdxf/__init__.py
tapnair/DXFer
8ec957d80c2f251bb78440147d1478106f99b3eb
[ "MIT" ]
null
null
null
lib/ezdxf/__init__.py
tapnair/DXFer
8ec957d80c2f251bb78440147d1478106f99b3eb
[ "MIT" ]
5
2018-03-29T06:28:07.000Z
2021-07-31T05:09:08.000Z
# Purpose: ezdxf package # Created: 10.03.2011 # Copyright (C) 2011, Manfred Moitzi # License: MIT License from __future__ import unicode_literals version = (0, 8, 2) # also update VERSION in setup.py VERSION = "%d.%d.%d" % version __version__ = VERSION __author__ = "mozman <mozman@gmx.at>" import sys if sys.version_info[:2] < (2, 7): raise ImportError("Package 'ezdxf' requires Python 2.7 or later!") import io import codecs from .lldxf.encoding import dxfbackslashreplace codecs.register_error('dxfreplace', dxfbackslashreplace) # setup DXF unicode encoder -> '\U+nnnn' from .options import options # example: ezdxf.options.template_dir = 'c:\templates' from .lldxf.tags import dxf_info from .lldxf.tagger import stream_tagger, skip_comments from .tools.importer import Importer from .tools.codepage import is_supported_encoding from .lldxf.const import DXFStructureError, DXFVersionError from .tools.zipmanager import ctxZipReader from .tools import transparency2float, float2transparency # convert transparency integer values to floats 0..1 from .tools.rgb import int2rgb, rgb2int from .tools.pattern import PATTERN from .lldxf import const # restore module structure ezdxf.const def new(dxfversion='AC1009'): """Create a new DXF drawing. :param dxfversion: DXF version specifier, default is 'AC1009' new() can create drawings for following DXF versions: - 'AC1009': AutoCAD R12 (DXF12) - 'AC1015': AutoCAD 2000 - 'AC1018': AutoCAD 2004 - 'AC1021': AutoCAD 2007 - 'AC1024': AutoCAD 2010 - 'AC1027': AutoCAD 2013 """ from .drawing import Drawing return Drawing.new(dxfversion) def read(stream): """Read DXF drawing from a text *stream*, which only needs a readline() method. read() can open drawings of following DXF versions: - pre 'AC1009' DXF versions will be upgraded to 'AC1009', requires encoding set by header var $DWGCODEPAGE - 'AC1009': AutoCAD R12 (DXF12), requires encoding set by header var $DWGCODEPAGE - 'AC1012': AutoCAD R12 upgraded to AC1015, requires encoding set by header var $DWGCODEPAGE - 'AC1014': AutoCAD R14 upgraded to AC1015, requires encoding set by header var $DWGCODEPAGE - 'AC1015': AutoCAD 2000, requires encoding set by header var $DWGCODEPAGE - 'AC1018': AutoCAD 2004, requires encoding set by header var $DWGCODEPAGE - 'AC1021': AutoCAD 2007, requires encoding='utf-8' - 'AC1024': AutoCAD 2010, requires encoding='utf-8' - 'AC1027': AutoCAD 2013, requires encoding='utf-8' """ from .drawing import Drawing return Drawing.read(stream) def readfile(filename, encoding='auto'): """Read DXF drawing from file *filename*. """ if not is_dxf_file(filename): raise IOError("File '{}' is not a DXF file.".format(filename)) with io.open(filename, mode='rt', encoding='utf-8', errors='ignore') as fp: info = dxf_info(fp) if encoding != 'auto': # override encoding detection and $DWGCODEPAGE enc = encoding elif info.version >= 'AC1021': # R2007 files and later are always encoded as UTF-8 enc = 'utf-8' else: enc = info.encoding with io.open(filename, mode='rt', encoding=enc, errors='ignore') as fp: dwg = read(fp) dwg.filename = filename if encoding != 'auto' and is_supported_encoding(encoding): dwg.encoding = encoding return dwg def readzip(zipfile, filename=None): """ Reads the DXF file *filename* from *zipfile* or the first DXF file in *zipfile* if *filename* is *None*. """ with ctxZipReader(zipfile, filename) as zipstream: dwg = read(zipstream) dwg.filename = zipstream.dxf_file_name return dwg def is_dxf_file(filename): with io.open(filename, errors='ignore') as fp: reader = skip_comments(stream_tagger(fp)) return next(reader) == (0, 'SECTION')
35.3
112
0.701262
510
3,883
5.276471
0.34902
0.053512
0.042363
0.046823
0.167224
0.154589
0.12709
0.042363
0.042363
0.042363
0
0.055342
0.194952
3,883
109
113
35.623853
0.805502
0.43111
0
0.072727
0
0
0.08409
0
0
0
0
0
0
1
0.090909
false
0
0.345455
0
0.527273
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
415dfc81e1ad703ea86b474b35a040d48a33ce28
646
py
Python
books/masteringPython/cp11/cp11_manual_trace.py
Bingwen-Hu/hackaway
69727d76fd652390d9660e9ea4354ba5cc76dd5c
[ "BSD-2-Clause" ]
null
null
null
books/masteringPython/cp11/cp11_manual_trace.py
Bingwen-Hu/hackaway
69727d76fd652390d9660e9ea4354ba5cc76dd5c
[ "BSD-2-Clause" ]
null
null
null
books/masteringPython/cp11/cp11_manual_trace.py
Bingwen-Hu/hackaway
69727d76fd652390d9660e9ea4354ba5cc76dd5c
[ "BSD-2-Clause" ]
null
null
null
# manual trace import sys import trace as trace_module import contextlib @contextlib.contextmanager def trace(count=False, trace=True, timing=True): tracer = trace_module.Trace( count=count, trace=trace, timing=timing) sys.settrace(tracer.globaltrace) yield tracer sys.settrace(None) result = tracer.results() result.write_results(show_missing=False, summary=True) def eggs_generator(): yield 'eggs' yield 'EGGS!' def spam_generator(): yield 'spam' yield 'spam!' yield 'SPAM!' with trace(): generator = spam_generator() print(next(generator)) print(next(generator)) generator = eggs_generator() print(next(generator))
19.575758
55
0.750774
84
646
5.678571
0.357143
0.056604
0.113208
0.169811
0
0
0
0
0
0
0
0
0.130031
646
33
56
19.575758
0.848754
0.018576
0
0.12
0
0
0.036335
0
0
0
0
0
0
1
0.12
false
0
0.12
0
0.24
0.12
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
415fd9ca7dc3dd7e0ad28f54f3222836570e36ec
229
py
Python
helpers/version.py
Dabalon/blues_bot.py
b153f65054ce973e16c5fd1e2061ce1fe50145d1
[ "MIT" ]
5
2020-01-05T18:53:20.000Z
2022-03-19T13:01:24.000Z
helpers/version.py
Dabalon/blues_bot.py
b153f65054ce973e16c5fd1e2061ce1fe50145d1
[ "MIT" ]
22
2019-10-27T00:56:30.000Z
2021-07-13T16:42:24.000Z
helpers/version.py
Dabalon/blues_bot.py
b153f65054ce973e16c5fd1e2061ce1fe50145d1
[ "MIT" ]
11
2020-01-05T18:53:22.000Z
2022-03-30T22:20:13.000Z
# Version command helper def get_version(): """ Opens version file and returns it as a string """ file = open("assets/version", "r") ret = '' for line in file: ret += line file.close() return ret
20.818182
57
0.585153
31
229
4.290323
0.709677
0
0
0
0
0
0
0
0
0
0
0
0.292576
229
10
58
22.9
0.820988
0.305677
0
0
0
0
0.098684
0
0
0
0
0
0
1
0.142857
false
0
0
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
416352a765ad965b8902ff0ad42dd03f68d57af2
3,542
py
Python
code/tfidf.py
yekang/ccf_sougou
1c65cd78407f14646bbf52140051fc5f239c31d3
[ "MIT" ]
null
null
null
code/tfidf.py
yekang/ccf_sougou
1c65cd78407f14646bbf52140051fc5f239c31d3
[ "MIT" ]
null
null
null
code/tfidf.py
yekang/ccf_sougou
1c65cd78407f14646bbf52140051fc5f239c31d3
[ "MIT" ]
null
null
null
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer from sklearn.externals import joblib as jl import data_preprocess def tfidf_feature(): gender_label_cleaned_list=[] age_label_cleaned_list=[] education_label_cleaned_list=[] gender_feature_cleaned_list=[] age_feature_cleaned_list=[] education_feature_cleaned_list=[] gender_label_list =data_preprocess.get_gender_label_list() age_label_list =data_preprocess.get_age_label_list() education_label_list = data_preprocess.get_education_label_list() seg_train=jl.load("train_no_number_list") seg_test=jl.load("test_no_number_list") for x in range(len(seg_train)): if gender_label_list[x] != 0: gender_feature_cleaned_list.append(seg_train[x]) # gender_feature_cleaned_list.extend(seg_test) gender_label_cleaned_list.append(gender_label_list[x]) if age_label_list[x] != 0: age_feature_cleaned_list.append(seg_train[x]) # age_feature_cleaned_list.extend(seg_test) age_label_cleaned_list.append(age_label_list[x]) if education_label_list[x] != 0: education_feature_cleaned_list.append(seg_train[x]) # education_feature_cleaned_list.extend(seg_test) education_label_cleaned_list.append(education_label_list[x]) gender_feature_cleaned_list.extend(seg_test) age_feature_cleaned_list.extend(seg_test) education_feature_cleaned_list.extend(seg_test) # print(type(seg_test)) # for x in range(len(seg_test)): # seg_train.extend(seg_test[x]) # # print(len(seg_all)) sentence_gender_list=[] sentence_age_list=[] sentence_education_list=[] for i in range(len(gender_feature_cleaned_list)): sentence_gender_list.append(' '.join(gender_feature_cleaned_list[i])) for i in range(len(age_feature_cleaned_list)): sentence_age_list.append(' '.join(age_feature_cleaned_list[i])) for i in range(len(education_feature_cleaned_list)): sentence_education_list.append(' '.join(education_feature_cleaned_list[i])) ctf_idf=CountVectorizer() gender_matrix = ctf_idf.fit_transform(sentence_gender_list) age_matrix = ctf_idf.fit_transform(sentence_age_list) education_matrix = ctf_idf.fit_transform(sentence_education_list) return gender_matrix,age_matrix,education_matrix def tfidf_label(): gender_label_list =data_preprocess.get_gender_label_list() age_label_list =data_preprocess.get_age_label_list() education_label_list = data_preprocess.get_education_label_list() gender_label_cleaned_list=[] age_label_cleaned_list=[] education_label_cleaned_list=[] seg_train=jl.load("train_no_number_list") # seg_test=jl.load("docs.test") for x in range(len(seg_train)): if gender_label_list[x] != 0: # gender_feature_cleaned_list.append(seg_train[x]) gender_label_cleaned_list.append(gender_label_list[x]) if age_label_list[x] != 0: # age_feature_cleaned_list.append(seg_train[x]) age_label_cleaned_list.append(age_label_list[x]) if education_label_list[x] != 0: # education_feature_cleaned_list.append(seg_train[x]) education_label_cleaned_list.append(education_label_list[x]) return gender_label_cleaned_list,age_label_cleaned_list,education_label_cleaned_list
49.887324
88
0.732919
482
3,542
4.896266
0.105809
0.167797
0.160169
0.071186
0.742797
0.736864
0.696186
0.583051
0.554237
0.487712
0
0.002065
0.179842
3,542
71
88
49.887324
0.810327
0.118295
0
0.474576
0
0
0.020388
0
0
0
0
0
0
1
0.033898
false
0
0.067797
0
0.135593
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4165009c85e483f589a199ebd8b12e333a80178e
1,168
py
Python
yqml/select.py
maztohir/YQML
6e5eb06f2869a7309614120cc56ac04f5a689c9f
[ "MIT" ]
2
2021-03-11T16:44:02.000Z
2021-04-17T17:57:38.000Z
yqml/select.py
maztohir/yqml
6e5eb06f2869a7309614120cc56ac04f5a689c9f
[ "MIT" ]
null
null
null
yqml/select.py
maztohir/yqml
6e5eb06f2869a7309614120cc56ac04f5a689c9f
[ "MIT" ]
null
null
null
from yqml.keys import Keys from .where import Where from .join import Join, RightJoin, LeftJoin from .fields import Fields class Select: def __init__(self, dict): self._content = dict def to_sql(self): query = f"SELECT {self.field_clause} {self.from_clause} {self.join_clause} {self.left_join_clause} {self.right_join_clause} {self.where_clause}" return query @property def field_clause(self): return Fields(self._content.get(Keys.SELECT)).to_raw_sql() @property def from_clause(self): return From(self._content).to_raw_sql() @property def join_clause(self): return Join(self._content).to_raw_sql() @property def right_join_clause(self): return RightJoin(self._content).to_raw_sql() @property def left_join_clause(self): return LeftJoin(self._content).to_raw_sql() @property def where_clause(self): return Where(self._content).to_raw_sql() class From: def __init__(self, dict): self._content = dict def to_raw_sql(self): table_id = self._content.get(Keys.FROM) return f'FROM {table_id}'
26.545455
152
0.668664
160
1,168
4.56875
0.1875
0.150479
0.076607
0.109439
0.311902
0.259918
0.259918
0.095759
0.095759
0
0
0
0.22774
1,168
44
153
26.545455
0.810421
0
0
0.294118
0
0.029412
0.126604
0.040205
0
0
0
0
0
1
0.294118
false
0
0.117647
0.176471
0.705882
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
4165053f103fa07c4e3af8d74e151dd12dc1f794
904
py
Python
manu_sawyer/src/GelSightA.py
robertocalandra/the-feeling-of-success
7bb895897e369ae9f5fcaeed61d401e019a9cdf1
[ "MIT" ]
10
2018-05-31T04:57:25.000Z
2021-05-28T11:22:29.000Z
manu_sawyer/src/GelSightA.py
robertocalandra/the-feeling-of-success
7bb895897e369ae9f5fcaeed61d401e019a9cdf1
[ "MIT" ]
null
null
null
manu_sawyer/src/GelSightA.py
robertocalandra/the-feeling-of-success
7bb895897e369ae9f5fcaeed61d401e019a9cdf1
[ "MIT" ]
3
2018-05-31T05:00:08.000Z
2019-02-25T06:32:45.000Z
#!/usr/bin/env python import rospy from cv_bridge import CvBridge, CvBridgeError from sensor_msgs.msg import Image import multiprocessing class GelSightA: def __init__(self, topic='/gelsightA/image_raw'): # '/gelsightA/image_raw' /image_view_A/output # Variable self.img = None # Used to convert image from ROS to cv2 self.bridge = CvBridge() # The subscriber self.subscriber = rospy.Subscriber(topic, Image, self.update_image) def spin_thread(): rospy.spin() self.gelsight_process = multiprocessing.Process(target=spin_thread) self.gelsight_process.start() def get_image(self): return self.img def update_image(self, data): self.img = self.bridge.imgmsg_to_cv2(data, "bgr8") def end_process(self): self.gelsight_process.terminate() self.gelsight_process.join()
26.588235
100
0.67146
112
904
5.223214
0.455357
0.082051
0.129915
0
0
0
0
0
0
0
0
0.004335
0.234513
904
33
101
27.393939
0.84104
0.139381
0
0
0
0
0.031048
0
0
0
0
0
0
1
0.25
false
0
0.2
0.05
0.55
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
4165c6d113e9bcb67cd9c5283014beedef28535b
2,142
py
Python
ingest/common/python/package/streamsx_health/test/TestObservation.py
markheger/streamsx.health
14c27e377aa5947e76b0b681c1d8f5350a56323c
[ "Apache-2.0" ]
45
2016-06-23T21:47:52.000Z
2021-09-06T02:44:51.000Z
ingest/common/python/package/streamsx_health/test/TestObservation.py
markheger/streamsx.health
14c27e377aa5947e76b0b681c1d8f5350a56323c
[ "Apache-2.0" ]
107
2016-06-23T21:49:16.000Z
2020-07-10T13:01:28.000Z
ingest/common/python/package/streamsx_health/test/TestObservation.py
markheger/streamsx.health
14c27e377aa5947e76b0b681c1d8f5350a56323c
[ "Apache-2.0" ]
48
2016-12-01T23:05:38.000Z
2021-12-14T17:11:56.000Z
#**************************************************************************** # Copyright (C) 2017 International Business Machines Corporation # All Rights Reserved # **************************************************************************** import unittest import json from streamsx_health.ingest.Observation import * class TestObservation(unittest.TestCase): def getObservation(self): jsonStr = '{"patientId":"patient-1", "device":{"id":"VitalsGenerator", "locationId":"bed1"}, "readingSource": {"id":123, "deviceId":"VitalsGenerator", "sourceType":"generated"}, "reading": {"ts": 605, "uom":"bpm", "value":82.56785326532197, "readingType": {"code":"8867-4", "system":"streamsx.heath/1.0"}}}' dictObj = json.loads(jsonStr) return dictObj def test_getPatientReadingCode(self): obx = self.getObservation() self.assertEqual(getReadingCode(obx), '8867-4') def test_geReadingCodeSystem(self): obx = self.getObservation() self.assertEqual(getReadingCodeSystem(obx), 'streamsx.heath/1.0') def test_geReadingValue(self): obx = self.getObservation() self.assertEqual(getReadingValue(obx), 82.56785326532197) def test_getUom(self): obx = self.getObservation() self.assertEqual(getUom(obx), 'bpm') def test_getReadingTs(self): obx = self.getObservation() self.assertEqual(getReadingTs(obx), 605) def test_getReadingSourceId(self): obx = self.getObservation() self.assertEqual(getReadingSourceId(obx), '123') def test_getReadingSourceType(self): obx = self.getObservation() self.assertEqual(getReadingSourceType(obx), 'generated') def test_getDeviceId(self): obx = self.getObservation() self.assertEqual(getDeviceId(obx), 'VitalsGenerator') def test_getLocationId(self): obx = self.getObservation() self.assertEqual(getLocationId(obx), 'bed1') def test_getPatientId(self): obx = self.getObservation() self.assertEqual(getPatientId(obx), 'patient-1') if __name__ == '__main__': unittest.main()
34.548387
315
0.625117
197
2,142
6.700508
0.380711
0.15
0.083333
0.189394
0.30303
0.30303
0
0
0
0
0
0.037479
0.177871
2,142
61
316
35.114754
0.712095
0.110177
0
0.25
0
0.025
0.194532
0.100946
0
0
0
0
0.25
1
0.275
false
0
0.075
0
0.4
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
2
416700a551016e3c3062ddbef52da0802e586ce3
1,849
py
Python
podcastapi.py
rexxars/kodi-vg-podcasts
f5151b78717533e97a3d70439946654228adca70
[ "MIT" ]
null
null
null
podcastapi.py
rexxars/kodi-vg-podcasts
f5151b78717533e97a3d70439946654228adca70
[ "MIT" ]
null
null
null
podcastapi.py
rexxars/kodi-vg-podcasts
f5151b78717533e97a3d70439946654228adca70
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # Copyright (C) 2015-2016 Espen Hovlandsdal from requests import Session API_URL = 'http://api.vg.no/podcast'; session = Session() session.headers['User-Agent'] = 'kodi-vg-podcasts' session.headers['Accept'] = 'application/json' class Base(object): id = None title = None subtitle = None thumb = None logo = None def __init__(self, **kwargs): self.__dict__.update(kwargs) class Show(Base): @staticmethod def from_response(r): return Show( id=r['slug'], title=r['name'], subtitle=r['subtitle'], logo=r['logo'], thumb=r['logoThumb'] ) class Episode(Base): duration = 0 media_url = None year = 2015 @staticmethod def from_response(r): url = None for attachment in r['attachments']: if attachment['format'] == 'mp3': url = attachment['url'] break return Episode( id=r['slug'], title=r['title'], subtitle=r['subtitle'], logo=r['logo'], thumb=r['logoThumb'], year=get_year(r['pubDate']), duration=parse_duration(r['duration']), media_url=url ) def shows(): return [Show.from_response(item) for item in _get('/shows.json')] def episodes(slug): items = _get('/' + slug + '.json')['episodes'] return [Episode.from_response(item) for item in items] def parse_duration(dur): parts = dur.split(':') multiplier = 1 seconds = 0 for part in reversed(parts): seconds += int(part) * multiplier multiplier *= 60 return seconds def get_year(date): return int(date[:4]) def _get(path): r = session.get(API_URL + path) r.raise_for_status() return r.json()
22.277108
69
0.559221
218
1,849
4.62844
0.389908
0.047572
0.037661
0.053518
0.212091
0.130823
0.081269
0.081269
0.081269
0
0
0.01542
0.29854
1,849
82
70
22.54878
0.762529
0.034072
0
0.15873
0
0
0.109428
0
0
0
0
0
0
1
0.126984
false
0
0.015873
0.047619
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4168384f50e5cc2f231cecb8b748342434bfed92
554
py
Python
LOGGING/logger.py
Couso99/EEG-Environment
d67de00c08c5892baebe5bf993cac0a5db6e70b1
[ "MIT" ]
null
null
null
LOGGING/logger.py
Couso99/EEG-Environment
d67de00c08c5892baebe5bf993cac0a5db6e70b1
[ "MIT" ]
null
null
null
LOGGING/logger.py
Couso99/EEG-Environment
d67de00c08c5892baebe5bf993cac0a5db6e70b1
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ @author: %(Mikel Val Calvo)s @email: %(mikel1982mail@gmail.com) @institution: %(Dpto. de Inteligencia Artificial, Universidad Nacional de Educación a Distancia (UNED)) @DOI: 10.5281/zenodo.3759306 """ class logger(): # Inicializa la aplicación GUI def __init__(self, gui): self.text = '' self.gui = gui # Actualiza el texto del logger que aparece en la visualización def update_text(self, text): #Recibe el texto de FILEIO self.gui.bci_graph.logger.appendPlainText(text)
29.157895
103
0.666065
71
554
5.112676
0.732394
0.057851
0
0
0
0
0
0
0
0
0
0.041284
0.212996
554
18
104
30.777778
0.791284
0.606498
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
416ad546e81165ecbce7d3668b0084d159819a2c
442
py
Python
src/year2020/day05b.py
lancelote/advent_of_code
06dda6ca034bc1e86addee7798bb9b2a34ff565b
[ "Unlicense" ]
10
2017-12-11T17:54:52.000Z
2021-12-09T20:16:30.000Z
src/year2020/day05b.py
lancelote/advent_of_code
06dda6ca034bc1e86addee7798bb9b2a34ff565b
[ "Unlicense" ]
260
2015-12-09T11:03:03.000Z
2021-12-12T14:32:23.000Z
src/year2020/day05b.py
lancelote/advent_of_code
06dda6ca034bc1e86addee7798bb9b2a34ff565b
[ "Unlicense" ]
null
null
null
"""2020 - Day 5 Part 2: Binary Boarding.""" from src.year2020.day05a import process_data def solve(task: str) -> int: """Find an empty seat.""" seats = process_data(task) first = min(seats).pk last = max(seats).pk ideal = set(range(first, last)) real = set(seat.pk for seat in seats) difference = ideal.difference(real) assert len(difference), "difference is not a single seat" return difference.pop()
26
61
0.658371
64
442
4.515625
0.6875
0.076125
0
0
0
0
0
0
0
0
0
0.034483
0.21267
442
16
62
27.625
0.795977
0.128959
0
0
0
0
0.082888
0
0
0
0
0
0.1
1
0.1
false
0
0.1
0
0.3
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
416ba491b8205e861203d98c397a2176ef1e5e99
6,451
py
Python
app/src/original.py
chrisjim316/Amazon-Alexa-Hack
8180520b3feb9ef8dc55aa69119b21436ee109d6
[ "MIT" ]
null
null
null
app/src/original.py
chrisjim316/Amazon-Alexa-Hack
8180520b3feb9ef8dc55aa69119b21436ee109d6
[ "MIT" ]
null
null
null
app/src/original.py
chrisjim316/Amazon-Alexa-Hack
8180520b3feb9ef8dc55aa69119b21436ee109d6
[ "MIT" ]
null
null
null
from __future__ import print_function import json import urllib2 # properties needed to interact with the api apiToken = //intentionally obfuscated seToken = //intentionally obfuscated baseUrl = 'https://www.secretescapes.com/v3' numberOfRooms = 1 APP_ID = "amzn1.ask.skill.258c1e36-afd4-4d5b-8f50-fbab4ed83475" def lambda_handler(event, context): if (event['session']['application']['applicationId'] != APP_ID): raise ValueError("Invalid Application ID") if event['session']['new']: on_session_started({'requestId': event['request']['requestId']}, event['session']) if event['request']['type'] == "LaunchRequest": return on_launch(event['request'], event['session']) elif event['request']['type'] == "IntentRequest": return on_intent(event['request'], event['session']) elif event['request']['type'] == "SessionEndedRequest": return on_session_ended(event['request'], event['session']) def on_session_started(session_started_request, session): """ Called when the session starts """ print("on_session_started requestId=" + session_started_request['requestId'] + ", sessionId=" + session['sessionId']) def on_launch(launch_request, session): """ Called when the user launches the skill without specifying what they want """ return get_welcome_response() def on_intent(intent_request, session): """ Called when the user specifies an intent for this skill """ print("on_intent requestId=" + intent_request['requestId'] + ", sessionId=" + session['sessionId']) intent = intent_request['intent'] intent_name = intent_request['intent']['name'] # Dispatch to your skill's intent handlers if intent_name == "SearchHolidays": return search_holiday(intent, session) elif intent_name == "AMAZON.HelpIntent": return get_welcome_response() elif intent_name == "AMAZON.StopIntent": return get_stop_response() else: raise ValueError("Invalid intent") def on_session_ended(session_ended_request, session): """ Called when the user ends the session. Is not called when the skill returns should_end_session=true """ print("on_session_ended requestId=" + session_ended_request['requestId'] + ", sessionId=" + session['sessionId']) # add cleanup logic here # --------------- Functions that control the skill's behavior ------------------ def get_welcome_response(): """ If we wanted to initialize the session to have some attributes we could add those here """ session_attributes = {} card_title = "Secret Escapes" speech_output = "Welcome to secret escapes, what would you like to search for? or say, inspire me, for some ideas" reprompt_text = "say what you would like to search for, or say, inspire me, for some ideas" should_end_session = False return build_response(session_attributes, build_speechlet_response( card_title, speech_output, reprompt_text, should_end_session)) def get_stop_response(): session_attributes = {} card_title = "Secret Escapes" speech_output = "Happy travles!" reprompt_text = None should_end_session = True return build_response(session_attributes, build_speechlet_response( card_title, speech_output, reprompt_text, should_end_session)) def doCall(urlPath, data): if data is None: # create GET request req = urllib2.Request(baseUrl + urlPath) else: # create POST request req = urllib2.Request(baseUrl + urlPath, json.dumps(data)) req.add_header("Content-type", "application/json") req.add_header("se-api-token", apiToken) req.add_header("se-token", seToken) # execute request to get response response = urllib2.urlopen(req).read() # all responses are JSON so can be parsed accordingly parsed = json.loads(response) return parsed def doSearchQuery(query): # Search for sale for sales ... searchData = { "query" : query } # Run search call with params specified above sales = doCall('/search/sales/flash', searchData) matches = len(sales["match"]) print("Found " + str(matches) + " matching sale(s)") if (matches > 0): sale = sales["match"][0] return { "hotelDetails": sale["title"], "location": query, "price": str(int(sale["price"]["discounted"])) + " pounds " + sale["price"]["description"] } return None def search_holiday(intent, session): card_title = "Secret Escapes" city = 'Berlin' slots = intent['slots'] if (slots): city = slots['city']['value'] card_title = "Results for " + city session_attributes = {"lastresult" : city} should_end_session = False reprompt_text = None result = doSearchQuery(city) session_attributes = { "hotelDetails":result["hotelDetails"], "location":result["location"], "price":result["price"] } speech_output = "Sorry, I can't find anything that matches " + city if (result): speech_output = "How about, " + result['hotelDetails'] + " with prices from " + result['price'] + "?" reprompt_text = "Would you like to favourite this, have it sent to you in an email or find something else?" return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session)) # --------------- Helpers that build all of the responses ---------------------- def build_speechlet_response(title, output, reprompt_text, should_end_session): return { 'outputSpeech': { 'type': 'PlainText', 'text': output }, 'card': { 'type': 'Simple', 'title': title, 'content': output }, 'reprompt': { 'outputSpeech': { 'type': 'PlainText', 'text': reprompt_text } }, 'shouldEndSession': should_end_session, "directives": [ { "type": "Dialog.ElicitSlot", "slotToElicit": "saleAction" } ] } def build_response(session_attributes, speechlet_response): return { 'version': '1.0', 'sessionAttributes': session_attributes, 'response': speechlet_response }
31.622549
118
0.637421
709
6,451
5.629055
0.310296
0.020296
0.036081
0.024054
0.238537
0.200952
0.150088
0.150088
0.102481
0.102481
0
0.005897
0.237638
6,451
203
119
31.778325
0.805612
0.072082
0
0.189781
0
0.007299
0.25301
0.009344
0
0
0
0
0
0
null
null
0
0.021898
null
null
0.036496
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
416bf5d355c191581abaaf5baae4b6d02f53cd4e
4,338
py
Python
src/tools/files.py
cowboysmall/rosalind
021e4392a8fc946b97bbf86bbb8227b28bb5e462
[ "MIT" ]
null
null
null
src/tools/files.py
cowboysmall/rosalind
021e4392a8fc946b97bbf86bbb8227b28bb5e462
[ "MIT" ]
null
null
null
src/tools/files.py
cowboysmall/rosalind
021e4392a8fc946b97bbf86bbb8227b28bb5e462
[ "MIT" ]
null
null
null
def write_lines(file_path, lines): with open(file_path, 'w') as file: for line in lines: file.write(line + '\n') def read_line(file_path): with open(file_path) as file: return file.readline().strip() def read_lines(file_path): lines = [] with open(file_path) as file: for line in file: line = line.strip() if line: lines.append(line) return lines def read_float(file_path): with open(file_path) as file: return float(file.readline().strip()) def read_floats(file_path): floats = [] with open(file_path) as file: for line in file: line = line.strip() if line: floats.append(float(line)) return floats def read_int(file_path): with open(file_path) as file: return int(file.readline().strip()) def read_ints(file_path): ints = [] with open(file_path) as file: for line in file: line = line.strip() if line: ints.append(int(line)) return ints def read_line_of_words(file_path): with open(file_path) as file: return [word for word in file.readline().split()] def read_lines_of_words(file_path): lines = [] with open(file_path) as file: for line in file: line = line.strip() if line: lines.append([word for word in line.split()]) return lines def read_line_of_floats(file_path): with open(file_path) as file: return [float(i) for i in file.readline().split()] def read_lines_of_floats(file_path): lines = [] with open(file_path) as file: for line in file: line = line.strip() if line: lines.append([float(i) for i in line.split()]) return lines def read_line_of_ints(file_path): with open(file_path) as file: return [int(i) for i in file.readline().split()] def read_lines_of_ints(file_path): lines = [] with open(file_path) as file: for line in file: line = line.strip() if line: lines.append([int(i) for i in line.split()]) return lines def read_graph(file_path): with open(file_path) as file: graph = [int(i) for i in file.readline().split()] edges = [] for line in file: tail, head = line.strip().split() edges.append((int(tail), int(head))) graph.append(edges) return graph def read_weighted_graph(file_path): with open(file_path) as file: graph = [int(i) for i in file.readline().split()] edges = [] for line in file: tail, head, weight = line.strip().split() edges.append((int(tail), int(head), int(weight))) graph.append(edges) return graph def read_graphs(file_path): with open(file_path) as file: k = int(file.readline().strip()) graph_count = 0 graphs = [] while graph_count < k: line = file.readline().strip() while not line: line = file.readline().strip() graph = [int(i) for i in line.split()] edge_count = 0 edges = [] while edge_count < graph[1]: tail, head = file.readline().strip().split() edges.append((int(tail), int(head))) edge_count += 1 graph.append(edges) graphs.append(graph) graph_count += 1 return k, graphs def read_weighted_graphs(file_path): with open(file_path) as file: k = int(file.readline().strip()) graph_count = 0 graphs = [] while graph_count < k: line = file.readline().strip() while not line: line = file.readline().strip() graph = [int(i) for i in line.split()] edge_count = 0 edges = [] while edge_count < graph[1]: tail, head, weight = file.readline().strip().split() edges.append((int(tail), int(head), int(weight))) edge_count += 1 graph.append(edges) graphs.append(graph) graph_count += 1 return k, graphs
22.59375
68
0.542416
562
4,338
4.049822
0.074733
0.119508
0.089631
0.119508
0.880492
0.84007
0.84007
0.808875
0.777241
0.708699
0
0.003511
0.343476
4,338
191
69
22.712042
0.795646
0
0
0.6875
0
0
0.000692
0
0
0
0
0
0
1
0.132813
false
0
0
0
0.257813
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
416ecd1bf60b2f5dfac4a13faf4ce1dc4d266fd7
603
py
Python
ebl/tests/transliteration/test_atf.py
ElectronicBabylonianLiterature/dictionary
5977a57314cf57f94f75cd12520f178b1d6a6555
[ "MIT" ]
4
2020-04-12T14:24:51.000Z
2020-10-15T15:48:15.000Z
ebl/tests/transliteration/test_atf.py
ElectronicBabylonianLiterature/dictionary
5977a57314cf57f94f75cd12520f178b1d6a6555
[ "MIT" ]
200
2019-12-04T09:53:20.000Z
2022-03-30T20:11:31.000Z
ebl/tests/transliteration/test_atf.py
ElectronicBabylonianLiterature/dictionary
5977a57314cf57f94f75cd12520f178b1d6a6555
[ "MIT" ]
1
2021-09-06T16:22:39.000Z
2021-09-06T16:22:39.000Z
import pytest from ebl.transliteration.domain.atf import to_sub_index, sub_index_to_int SUB_INDICES = [ (None, "ₓ"), (1, ""), (2, "₂"), (3, "₃"), (4, "₄"), (5, "₅"), (6, "₆"), (7, "₇"), (8, "₈"), (9, "₉"), (0, "₀"), (1024, "₁₀₂₄"), ] @pytest.mark.parametrize("number,expected", SUB_INDICES) def test_to_sub_index(number, expected): assert to_sub_index(number) == expected @pytest.mark.parametrize("expected,sub_index,", [*SUB_INDICES, (1, "₁")]) def test_sub_index_to_int(sub_index, expected): assert sub_index_to_int(sub_index) == expected
20.793103
73
0.60199
87
603
3.908046
0.367816
0.211765
0.088235
0.114706
0.358824
0.170588
0.170588
0
0
0
0
0.059671
0.19403
603
28
74
21.535714
0.639918
0
0
0
0
0
0.08126
0
0
0
0
0
0.090909
1
0.090909
false
0
0.090909
0
0.181818
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
416fc4132f82c94ae93a71e4d7f00ce6d134cda7
764
py
Python
youtube/forms.py
shravs-cell/video
0bc56062f8200049c85dd35eb0e07447289bc902
[ "MIT" ]
null
null
null
youtube/forms.py
shravs-cell/video
0bc56062f8200049c85dd35eb0e07447289bc902
[ "MIT" ]
null
null
null
youtube/forms.py
shravs-cell/video
0bc56062f8200049c85dd35eb0e07447289bc902
[ "MIT" ]
null
null
null
from django import forms class LoginForm(forms.Form): username = forms.CharField(label='Username', max_length=300) password = forms.CharField(label='Password', max_length=300) class RegisterForm(forms.Form): username = forms.CharField(label='Username', max_length=300) password = forms.CharField(label='Password', max_length=20) email = forms.CharField(label='Email', max_length=300) class CommentForm(forms.Form): text = forms.CharField(label='Text', max_length=500) #video = forms.IntegerField(widget=forms.HiddenInput(), initial=1) class NewVideoForm(forms.Form): title = forms.CharField(label='Title', max_length=700) description = forms.CharField(label='Description', max_length=300) file = forms.FileField()
36.380952
71
0.732984
95
764
5.810526
0.336842
0.202899
0.275362
0.07971
0.362319
0.362319
0.362319
0.362319
0.362319
0.362319
0
0.036199
0.132199
764
20
72
38.2
0.79638
0.085079
0
0.142857
0
0
0.081779
0
0
0
0
0
0
1
0
false
0.142857
0.071429
0
1
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
4
416fdbf082852439f1fed8fca80d233764759242
1,381
py
Python
regcore/migrations/0015_auto_20210429_1708.py
cgodwin1/regulations-core
1afad359f33655946d93da069bf58bb12ec4805e
[ "CC0-1.0" ]
1
2021-04-14T06:08:10.000Z
2021-04-14T06:08:10.000Z
regcore/migrations/0015_auto_20210429_1708.py
cgodwin1/regulations-core
1afad359f33655946d93da069bf58bb12ec4805e
[ "CC0-1.0" ]
2
2021-02-16T18:23:56.000Z
2021-04-26T14:27:17.000Z
regcore/migrations/0015_auto_20210429_1708.py
cgodwin1/regulations-core
1afad359f33655946d93da069bf58bb12ec4805e
[ "CC0-1.0" ]
2
2020-12-07T16:46:30.000Z
2021-02-23T14:28:35.000Z
# Generated by Django 3.2 on 2021-04-29 17:08 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('regcore', '0014_auto_20160504_0101'), ] operations = [ migrations.AlterField( model_name='document', name='level', field=models.PositiveIntegerField(editable=False), ), migrations.AlterField( model_name='document', name='lft', field=models.PositiveIntegerField(editable=False), ), migrations.AlterField( model_name='document', name='rght', field=models.PositiveIntegerField(editable=False), ), migrations.CreateModel( name='Part', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=8)), ('title', models.CharField(max_length=8)), ('date', models.DateField()), ('last_updated', models.DateTimeField(auto_now=True)), ('document', models.JSONField()), ('structure', models.JSONField()), ], options={ 'unique_together': {('name', 'title', 'date')}, }, ), ]
31.386364
114
0.534395
117
1,381
6.188034
0.538462
0.082873
0.103591
0.120166
0.435083
0.366022
0.234807
0.234807
0.234807
0.234807
0
0.034632
0.33092
1,381
43
115
32.116279
0.748918
0.031137
0
0.351351
1
0
0.107784
0.017216
0
0
0
0
0
1
0
false
0
0.027027
0
0.108108
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4170081c348b25a347acdd29605bf96b4bb56f60
59
py
Python
testing/resources/test_pvector_in_imported_module.py
timgates42/processing.py
78a237922c2a928b83f4ad579dbf8d32c0099890
[ "Apache-2.0" ]
1,224
2015-01-01T22:09:23.000Z
2022-03-29T19:43:56.000Z
testing/resources/test_pvector_in_imported_module.py
timgates42/processing.py
78a237922c2a928b83f4ad579dbf8d32c0099890
[ "Apache-2.0" ]
253
2015-01-14T03:45:51.000Z
2022-02-08T01:18:19.000Z
testing/resources/test_pvector_in_imported_module.py
timgates42/processing.py
78a237922c2a928b83f4ad579dbf8d32c0099890
[ "Apache-2.0" ]
225
2015-01-13T18:38:33.000Z
2022-03-30T20:27:39.000Z
import imported_module_with_pvector as m m.sayok() exit()
11.8
40
0.79661
10
59
4.4
0.9
0
0
0
0
0
0
0
0
0
0
0
0.118644
59
4
41
14.75
0.846154
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
41716acd74ef124d89f5dce40e0eba84b378df21
43,180
py
Python
src/pylogit/mixed_logit.py
mathijsvdv/pylogit
2e7a06907d11b6fe02d3f3f9df91d374ed8a0c6d
[ "BSD-3-Clause" ]
153
2016-03-22T05:52:41.000Z
2022-02-09T13:33:20.000Z
src/pylogit/mixed_logit.py
mathijsvdv/pylogit
2e7a06907d11b6fe02d3f3f9df91d374ed8a0c6d
[ "BSD-3-Clause" ]
63
2016-03-22T05:47:56.000Z
2021-12-23T12:01:29.000Z
src/pylogit/mixed_logit.py
mathijsvdv/pylogit
2e7a06907d11b6fe02d3f3f9df91d374ed8a0c6d
[ "BSD-3-Clause" ]
91
2016-05-27T06:04:38.000Z
2022-03-13T20:00:15.000Z
# -*- coding: utf-8 -*- """ Created on Mon Jul 18 18:15:50 2016 @name: Mixed MultiNomial Logit @author: Timothy Brathwaite @summary: Contains functions necessary for estimating mixed multinomial logit models (with the help of the "base_multinomial_cm.py" file). Version 1 only works for MNL kernels and only for mixing of index coefficients. General References ------------------ Train, K., 2009. Discrete Choice Models With Simulation. 2 ed., Cambridge University Press, New York, NY, USA. """ from __future__ import absolute_import import warnings import numpy as np from scipy.sparse import csr_matrix from . import base_multinomial_cm_v2 as base_mcm from . import choice_calcs as cc from . import mixed_logit_calcs as mlc from .choice_tools import get_dataframe_from_data from .choice_tools import create_design_matrix from .choice_tools import create_long_form_mappings from .display_names import model_type_to_display_name from .estimation import EstimationObj from .estimation import estimate # Alias necessary functions for model estimation general_calc_probabilities = cc.calc_probabilities general_sequence_probs = mlc.calc_choice_sequence_probs general_log_likelihood = mlc.calc_mixed_log_likelihood general_gradient = mlc.calc_mixed_logit_gradient general_bhhh = mlc.calc_bhhh_hessian_approximation_mixed_logit _msg_1 = "The Mixed MNL Model has no shape parameters. " _msg_2 = "shape_names and shape_ref_pos will be ignored if passed." _shape_ignore_msg = _msg_1 + _msg_2 # Create a warning string that will be issued if ridge regression is performed. _msg_3 = "NOTE: An L2-penalized regression is being performed. The " _msg_4 = "reported standard errors and robust standard errors " _msg_5 = "***WILL BE INCORRECT***." _ridge_warning_msg = _msg_3 + _msg_4 + _msg_5 def split_param_vec(beta, return_all_types=False, *args, **kwargs): """ Parameters ---------- beta : 1D numpy array. All elements should by ints, floats, or longs. Should have 1 element for each utility coefficient being estimated (i.e. num_features). return_all_types : bool, optional. Determines whether or not a tuple of 4 elements will be returned (with one element for the nest, shape, intercept, and index parameters for this model). If False, a tuple of 3 elements will be returned, as described below. Returns ------- tuple. `(None, None, beta)`. This function is merely for compatibility with the other choice model files. Note ---- If `return_all_types == True` then the function will return a tuple of `(None, None, None, beta)`. These values represent the nest, shape, outside intercept, and index coefficients for the mixed logit model. """ if return_all_types: return None, None, None, beta else: return None, None, beta def mnl_utility_transform(sys_utility_array, *args, **kwargs): """ Parameters ---------- sys_utility_array : ndarray. Should have 1D or 2D. Should have been created by the dot product of a design matrix and an array of index coefficients. Returns ------- systematic_utilities : 2D ndarray. The input systematic utilities. If `sys_utility_array` is 2D, then `sys_utility_array` is returned. Else, returns `sys_utility_array[:, None]`. """ # Return a 2D array of systematic utility values if len(sys_utility_array.shape) == 1: systematic_utilities = sys_utility_array[:, np.newaxis] else: systematic_utilities = sys_utility_array return systematic_utilities def check_length_of_init_values(design_3d, init_values): """ Ensures that the initial values are of the correct length, given the design matrix that they will be dot-producted with. Raises a ValueError if that is not the case, and provides a useful error message to users. Parameters ---------- init_values : 1D ndarray. 1D numpy array of the initial values to start the optimizatin process with. There should be one value for each index coefficient being estimated. design_3d : 2D ndarray. 2D numpy array with one row per observation per available alternative. There should be one column per index coefficient being estimated. All elements should be ints, floats, or longs. Returns ------- None. """ if init_values.shape[0] != design_3d.shape[2]: msg_1 = "The initial values are of the wrong dimension. " msg_2 = "They should be of dimension {}".format(design_3d.shape[2]) raise ValueError(msg_1 + msg_2) return None def add_mixl_specific_results_to_estimation_res(estimator, results_dict): """ Stores particular items in the results dictionary that are unique to mixed logit-type models. In particular, this function calculates and adds `sequence_probs` and `expanded_sequence_probs` to the results dictionary. The `constrained_pos` object is also stored to the results_dict. Parameters ---------- estimator : an instance of the MixedEstimator class. Should contain a `choice_vector` attribute that is a 1D ndarray representing the choices made for this model's dataset. Should also contain a `rows_to_mixers` attribute that maps each row of the long format data to a unit of observation that the mixing is being performed over. results_dict : dict. This dictionary should be the dictionary returned from scipy.optimize.minimize. In particular, it should have the following `long_probs` key. Returns ------- results_dict. """ # Get the probability of each sequence of choices, given the draws prob_res = mlc.calc_choice_sequence_probs(results_dict["long_probs"], estimator.choice_vector, estimator.rows_to_mixers, return_type='all') # Add the various items to the results_dict. results_dict["simulated_sequence_probs"] = prob_res[0] results_dict["expanded_sequence_probs"] = prob_res[1] return results_dict class MixedEstimator(EstimationObj): """ Estimation object for the Mixed Logit Model. Parameters ---------- model_obj : a pylogit.base_multinomial_cm_v2.MNDC_Model instance. Should contain the following attributes: - alt_IDs - choices - design - intercept_ref_position - shape_ref_position - utility_transform - design_3d mapping_res : dict. Should contain the scipy sparse matrices that map the rows of the long format dataframe to various other objects such as the available alternatives, the unique observations, etc. The keys that it must have are `['rows_to_obs', 'rows_to_alts', 'chosen_row_to_obs']` ridge : int, float, long, or None. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero.. zero_vector : 1D ndarray. Determines what is viewed as a "null" set of parameters. It is explicitly passed because some parameters (e.g. parameters that must be greater than zero) have their null values at values other than zero. split_params : callable. Should take a vector of parameters, `mapping_res['rows_to_alts']`, and model_obj.design as arguments. Should return a tuple containing separate arrays for the model's shape, outside intercept, and index coefficients. For each of these arrays, if this model does not contain the particular type of parameter, the callable should place a `None` in its place in the tuple. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_values.size.` Default == None. weights : 1D ndarray or None, optional. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. """ def __init__(self, model_obj, mapping_dict, ridge, zero_vector, split_params, constrained_pos=None, weights=None): super(MixedEstimator, self).__init__(model_obj, mapping_dict, ridge, zero_vector, split_params, constrained_pos=constrained_pos, weights=weights) # Add the 3d design matrix to the object self.design_3d = model_obj.design_3d return None def convenience_split_params(self, params, return_all_types=False): """ Splits parameter vector into shape, intercept, and index parameters. Parameters ---------- params : 1D ndarray. The array of parameters being estimated or used in calculations. return_all_types : bool, optional. Determines whether or not a tuple of 4 elements will be returned (with one element for the nest, shape, intercept, and index parameters for this model). If False, a tuple of 3 elements will be returned with one element for the shape, intercept, and index parameters. Returns ------- tuple. Will have 4 or 3 elements based on `return_all_types`. """ return self.split_params(params, return_all_types=return_all_types) def check_length_of_initial_values(self, init_values): """ Ensures that the initial values are of the correct length. """ return check_length_of_init_values(self.design_3d, init_values) def convenience_calc_probs(self, params): """ Calculates the probabilities of the chosen alternative, and the long format probabilities for this model and dataset. """ shapes, intercepts, betas = self.convenience_split_params(params) prob_args = (betas, self.design_3d, self.alt_id_vector, self.rows_to_obs, self.rows_to_alts, self.utility_transform) prob_kwargs = {"chosen_row_to_obs": self.chosen_row_to_obs, "return_long_probs": True} probability_results = general_calc_probabilities(*prob_args, **prob_kwargs) return probability_results def convenience_calc_log_likelihood(self, params): """ Calculates the log-likelihood for this model and dataset. """ shapes, intercepts, betas = self.convenience_split_params(params) args = [betas, self.design_3d, self.alt_id_vector, self.rows_to_obs, self.rows_to_alts, self.rows_to_mixers, self.choice_vector, self.utility_transform] kwargs = {"ridge": self.ridge, "weights": self.weights} log_likelihood = general_log_likelihood(*args, **kwargs) return log_likelihood def convenience_calc_gradient(self, params): """ Calculates the gradient of the log-likelihood for this model / dataset. """ shapes, intercepts, betas = self.convenience_split_params(params) args = [betas, self.design_3d, self.alt_id_vector, self.rows_to_obs, self.rows_to_alts, self.rows_to_mixers, self.choice_vector, self.utility_transform] return general_gradient(*args, ridge=self.ridge, weights=self.weights) def convenience_calc_hessian(self, params): """ Calculates the hessian of the log-likelihood for this model / dataset. Note that this function name is INCORRECT with regard to the actual actions performed. The Mixed Logit model uses the BHHH approximation to the Fisher Information Matrix in place of the actual hessian. """ shapes, intercepts, betas = self.convenience_split_params(params) args = [betas, self.design_3d, self.alt_id_vector, self.rows_to_obs, self.rows_to_alts, self.rows_to_mixers, self.choice_vector, self.utility_transform] approx_hess =\ general_bhhh(*args, ridge=self.ridge, weights=self.weights) # Account for the constrained position when presenting the results of # the approximate hessian. if self.constrained_pos is not None: for idx_val in self.constrained_pos: approx_hess[idx_val, :] = 0 approx_hess[:, idx_val] = 0 approx_hess[idx_val, idx_val] = -1 return approx_hess def convenience_calc_fisher_approx(self, params): """ Calculates the BHHH approximation of the Fisher Information Matrix for this model / dataset. Note that this function name is INCORRECT with regard to the actual actions performed. The Mixed Logit model uses a placeholder for the BHHH approximation of the Fisher Information Matrix because the BHHH approximation is already being used to approximate the hessian. This placeholder allows calculation of a value for the 'robust' standard errors, even though such a value is not useful since it is not correct... """ shapes, intercepts, betas = self.convenience_split_params(params) placeholder_bhhh = np.diag(-1 * np.ones(betas.shape[0])) return placeholder_bhhh class MixedLogit(base_mcm.MNDC_Model): """ Parameters ---------- data : string or pandas dataframe. If string, data should be an absolute or relative path to a CSV file containing the long format data for this choice model. Note long format has one row per available alternative for each observation. If pandas dataframe, the dataframe should be the long format data for the choice model. alt_id_col : str. Should denote the column in data which contains the alternative identifiers for each row. obs_id_col : str. Should denote the column in data which contains the observation identifiers for each row. choice_col : str. Should denote the column in data which contains the ones and zeros that denote whether or not the given row corresponds to the chosen alternative for the given individual. specification : OrderedDict. Keys are a proper subset of the columns in long_form_df. Values are either a list or a single string, `all_diff` or `all_same`. If a list, the elements should be one of the following: - single objects that are within the alternative ID column of long_form_df - lists of objects that are within the alternative ID column of long_form_df. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the specification_dict values, a single column will be created for all the alternatives within iterable (i.e. there will be one common coefficient for the variables in the iterable). names : OrderedDict, optional. Should have the same keys as `specification_dict`. For each key: - if the corresponding value in specification_dict is "all_same", then there should be a single string as the value in names. - if the corresponding value in specification_dict is "all_diff", then there should be a list of strings as the value in names. There should be one string in the value in names for each - if the corresponding value in specification_dict is a list, then there should be a list of strings as the value in names. There should be one string in the value in names per item in the value in specification_dict. Default == None. mixing_id_col : str, or None, optional. Should be a column heading in `data`. Should denote the column in `data` which contains the identifiers of the units of observation over which the coefficients of the model are thought to be randomly distributed. If `model_type == "Mixed Logit"`, then `mixing_id_col` must be passed. Default == None. mixing_vars : list, or None, optional. All elements of the list should be strings. Each string should be present in the values of `names.values()` and they're associated variables should only be index variables (i.e. part of the design matrix). If `model_type == "Mixed Logit"`, then `mixing_vars` must be passed. Default == None. Methods ------- panel_predict(new_data, num_draws, return_long_probs, choice_col, seed) Predicts the probability of each individual in `new_data` making each possible choice in each choice situation they are faced with. This method differs from the `predict()` function by using 'individualized coefficient distributions' that are conditioned on each person's past choices and choice situations (if there are any). """ def __init__(self, data, alt_id_col, obs_id_col, choice_col, specification, names=None, mixing_id_col=None, mixing_vars=None, *args, **kwargs): ########## # Print a helpful message for users who have included shape parameters # or shape names unneccessarily ########## for keyword in ["shape_names", "shape_ref_pos"]: if keyword in kwargs and kwargs[keyword] is not None: warnings.warn(_shape_ignore_msg) break if "intercept_ref_pos" in kwargs: if kwargs["intercept_ref_pos"] is not None: msg = "All Mixed Logit intercepts should be in the index. " msg_2 = "intercept_ref_pos should be None." raise ValueError(msg + msg_2) # Carry out the common instantiation process for all choice models model_name = model_type_to_display_name["Mixed Logit"] super(MixedLogit, self).__init__(data, alt_id_col, obs_id_col, choice_col, specification, names=names, model_type=model_name, mixing_id_col=mixing_id_col, mixing_vars=mixing_vars) # Store the utility transform function self.utility_transform = mnl_utility_transform return None def fit_mle(self, init_vals, num_draws, seed=None, constrained_pos=None, print_res=True, method="BFGS", loss_tol=1e-06, gradient_tol=1e-06, maxiter=1000, ridge=None, just_point=False, **kwargs): """ Parameters ---------- init_vals : 1D ndarray. Should contain the initial values to start the optimization process with. There should be one value for each utility coefficient and shape parameter being estimated. num_draws : int. Should be greater than zero. Denotes the number of draws that we are making from each normal distribution. seed : int or None, optional. If an int is passed, it should be greater than zero. Denotes the value to be used in seeding the random generator used to generate the draws from the normal distribution. Default == None. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_values.size.` Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. method : str, optional. Should be a valid string which can be passed to scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next which is needed to determine convergence. Default = 1e-06. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default = 1e-06. maxiter : int, optional. Denotes the maximum number of iterations of the algorithm specified by `method` that will be used to estimate the parameters of the given model. Default == 1000. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a float is passed, then that float determines the ridge penalty for the optimization. Default = None. just_point : bool, optional. Determines whether (True) or not (False) calculations that are non- critical for obtaining the maximum likelihood point estimate will be performed. If True, this function will return the results dictionary from scipy.optimize. Default == False. Returns ------- None. Estimation results are saved to the model instance. """ # Check integrity of passed arguments kwargs_to_be_ignored = ["init_shapes", "init_intercepts", "init_coefs"] if any([x in kwargs for x in kwargs_to_be_ignored]): msg = "MNL model does not use of any of the following kwargs:\n{}" msg_2 = "Remove such kwargs and pass a single init_vals argument" raise ValueError(msg.format(kwargs_to_be_ignored) + msg_2) # Store the optimization method self.optimization_method = method # Store the ridge parameter self.ridge_param = ridge if ridge is not None: warnings.warn(_ridge_warning_msg) # Construct the mappings from alternatives to observations and from # chosen alternatives to observations mapping_res = self.get_mappings_for_fit() rows_to_mixers = mapping_res["rows_to_mixers"] # Get the draws for each random coefficient num_mixing_units = rows_to_mixers.shape[1] draw_list = mlc.get_normal_draws(num_mixing_units, num_draws, len(self.mixing_pos), seed=seed) # Create the 3D design matrix self.design_3d = mlc.create_expanded_design_for_mixing(self.design, draw_list, self.mixing_pos, rows_to_mixers) # Create the estimation object zero_vector = np.zeros(init_vals.shape) mixl_estimator = MixedEstimator(self, mapping_res, ridge, zero_vector, split_param_vec, constrained_pos=constrained_pos) # Perform one final check on the length of the initial values mixl_estimator.check_length_of_initial_values(init_vals) # Get the estimation results estimation_res = estimate(init_vals, mixl_estimator, method, loss_tol, gradient_tol, maxiter, print_res, use_hessian=True, just_point=just_point) if not just_point: # Store the mixed logit specific estimation results args = [mixl_estimator, estimation_res] estimation_res = add_mixl_specific_results_to_estimation_res(*args) # Store the estimation results self.store_fit_results(estimation_res) return None else: return estimation_res def __filter_past_mappings(self, past_mappings, long_inclusion_array): """ Parameters ---------- past_mappings : dict. All elements should be None or compressed sparse row matrices from scipy.sparse. The following keys should be in past_mappings: - "rows_to_obs", - "rows_to_alts", - "chosen_rows_to_obs", - "rows_to_nests", - "rows_to_mixers" The values that are not None should be 'mapping' matrices that denote which rows of the past long-format design matrix belong to which unique object such as unique observations, unique alternatives, unique nests, unique 'mixing' units etc. long_inclusion_array : 1D ndarray. Should denote, via a `1`, the rows of the past mapping matrices that are to be included in the filtered mapping matrices. Returns ------- new_mappings : dict. The returned dictionary will be the same as `past_mappings` except that all the mapping matrices will have been filtered according to `long_inclusion_array`. """ new_mappings = {} for key in past_mappings: if past_mappings[key] is None: new_mappings[key] = None else: mask_array = long_inclusion_array[:, None] orig_map = past_mappings[key] # Initialize the resultant array that is desired new_map = orig_map.multiply(np.tile(mask_array, (1, orig_map.shape[1]))).A # Perform the desired filtering current_filter = (new_map.sum(axis=1) != 0) if current_filter.shape[0] > 0: current_filter = current_filter.ravel() new_map = new_map[current_filter, :] # Do the second filtering current_filter = (new_map.sum(axis=0) != 0) if current_filter.shape[0] > 0: current_filter = current_filter.ravel() new_map = new_map[:, current_filter] new_mappings[key] = csr_matrix(new_map) return new_mappings def panel_predict(self, data, num_draws, return_long_probs=True, choice_col=None, seed=None): """ Parameters ---------- data : string or pandas dataframe. If string, data should be an absolute or relative path to a CSV file containing the long format data to be predicted with this choice model. Note long format has one row per available alternative for each observation. If pandas dataframe, the dataframe should be in long format. num_draws : int. Should be greater than zero. Denotes the number of draws being made from each mixing distribution for the random coefficients. return_long_probs : bool, optional. Indicates whether or not the long format probabilites (a 1D numpy array with one element per observation per available alternative) should be returned. Default == True. choice_col : str or None, optonal. Denotes the column in long_form which contains a one if the alternative pertaining to the given row was the observed outcome for the observation pertaining to the given row and a zero otherwise. If passed, then an array of probabilities of just the chosen alternative for each observation will be returned. Default == None. seed : int or None, optional. If an int is passed, it should be greater than zero. Denotes the value to be used in seeding the random generator used to generate the draws from the mixing distributions of each random coefficient. Default == None. Returns ------- numpy array or tuple of two numpy arrays. - If `choice_col` is passed AND `return_long_probs` is True, then the tuple `(chosen_probs, pred_probs_long)` is returned. - If `return_long_probs` is True and `choice_col` is None, then only `pred_probs_long` is returned. - If `choice_col` is passed and `return_long_probs` is False then `chosen_probs` is returned. `chosen_probs` is a 1D numpy array of shape (num_observations,). Each element is the probability of the corresponding observation being associated with its realized outcome. `pred_probs_long` is a 1D numpy array with one element per observation per available alternative for that observation. Each element is the probability of the corresponding observation being associated with that row's corresponding alternative. Notes ----- It is NOT valid to have `choice_col == None` and `return_long_probs == False`. """ # Ensure that the function arguments are valid if choice_col is None and not return_long_probs: msg = "choice_col is None AND return_long_probs == False" raise ValueError(msg) # Get the dataframe of observations we'll be predicting on dataframe = get_dataframe_from_data(data) # Determine the conditions under which we will add an intercept column # to our long format dataframe. condition_1 = "intercept" in self.specification condition_2 = "intercept" not in dataframe.columns if condition_1 and condition_2: dataframe["intercept"] = 1.0 # Make sure the necessary columns are in the long format dataframe for column in [self.alt_id_col, self.obs_id_col, self.mixing_id_col]: if column is not None and column not in dataframe.columns: msg = "{} not in data.columns".format(column) raise ValueError(msg) # Get the new column of alternative IDs and get the new design matrix new_alt_IDs = dataframe[self.alt_id_col].values new_design_res = create_design_matrix(dataframe, self.specification, self.alt_id_col, names=self.name_spec) new_design_2d = new_design_res[0] # Get the new mappings between the alternatives and observations mapping_res = create_long_form_mappings(dataframe, self.obs_id_col, self.alt_id_col, choice_col=choice_col, nest_spec=self.nest_spec, mix_id_col=self.mixing_id_col) new_rows_to_obs = mapping_res["rows_to_obs"] new_rows_to_alts = mapping_res["rows_to_alts"] new_chosen_to_obs = mapping_res["chosen_row_to_obs"] new_rows_to_mixers = mapping_res["rows_to_mixers"] # Determine the coefficients being used for prediction. # Note that I am making an implicit assumption (for now) that the # kernel probabilities are coming from a logit-type model. new_index_coefs = self.coefs.values new_intercepts = (self.intercepts.values if self.intercepts is not None else None) new_shape_params = (self.shapes.values if self.shapes is not None else None) # Get the draws for each random coefficient num_mixing_units = new_rows_to_mixers.shape[1] draw_list = mlc.get_normal_draws(num_mixing_units, num_draws, len(self.mixing_pos), seed=seed) # Calculate the 3D design matrix for the prediction. design_args = (new_design_2d, draw_list, self.mixing_pos, new_rows_to_mixers) new_design_3d = mlc.create_expanded_design_for_mixing(*design_args) # Calculate the desired probabilities for the mixed logit model. prob_args = (new_index_coefs, new_design_3d, new_alt_IDs, new_rows_to_obs, new_rows_to_alts, mnl_utility_transform) prob_kwargs = {"intercept_params": new_intercepts, "shape_params": new_shape_params, "return_long_probs": True} # Note that I am making an implicit assumption (for now) that the # kernel probabilities are coming from a logit-type model. new_kernel_probs = general_calc_probabilities(*prob_args, **prob_kwargs) # Initialize and calculate the weights needed for prediction with # "individualized" coefficient distributions. Should have shape # (new_row_to_mixer.shape[1], num_draws) weights_per_ind_per_draw = (1.0 / num_draws * np.ones((new_rows_to_mixers.shape[1], num_draws))) ########## # Create an array denoting the observation ids that are present in both # the dataset to be predicted and the dataset used for model estimation ########## # Get the old mixing ids old_mixing_id_long = self.data[self.mixing_id_col].values # Get the new mixing ids new_mixing_id_long = dataframe[self.mixing_id_col].values # Get the unique individual ids from the original and preserve order orig_unique_id_idx_old = np.sort(np.unique(old_mixing_id_long, return_index=True)[1]) orig_unique_id_idx_new = np.sort(np.unique(new_mixing_id_long, return_index=True)[1]) # Get the unique ids, in their original order of appearance orig_order_unique_ids_old = old_mixing_id_long[orig_unique_id_idx_old] orig_order_unique_ids_new = new_mixing_id_long[orig_unique_id_idx_new] # Figure out which long format rows have ids are common to both # datasets old_repeat_mixing_id_idx = np.in1d(old_mixing_id_long, orig_order_unique_ids_new) # Figure out which unique ids are in both datasets old_unique_mix_id_repeats = np.in1d(orig_order_unique_ids_old, orig_order_unique_ids_new) new_unique_mix_id_repeats = np.in1d(orig_order_unique_ids_new, orig_order_unique_ids_old) # Get the 2d design matrix used to estimate the model, and filter it # to only those individuals for whom we are predicting new choice # situations. past_design_2d = self.design[old_repeat_mixing_id_idx, :] ########## # Appropriately filter the old mapping matrix that maps rows of the # long format design matrix to unique mixing units. ########## orig_mappings = self.get_mappings_for_fit() past_mappings = self.__filter_past_mappings(orig_mappings, old_repeat_mixing_id_idx) # Create the 3D design matrix for those choice situations, using the # draws that were just taken from the mixing distributions of interest. past_draw_list = [x[new_unique_mix_id_repeats, :] for x in draw_list] design_args = (past_design_2d, past_draw_list, self.mixing_pos, past_mappings["rows_to_mixers"]) past_design_3d = mlc.create_expanded_design_for_mixing(*design_args) # Get the kernel probabilities of each of the alternatives for each # each of the previoius choice situations, given the current draws of # of the random coefficients prob_args = (new_index_coefs, past_design_3d, self.alt_IDs[old_repeat_mixing_id_idx], past_mappings["rows_to_obs"], past_mappings["rows_to_alts"], mnl_utility_transform) prob_kwargs = {"return_long_probs": True} past_kernel_probs = mlc.general_calc_probabilities(*prob_args, **prob_kwargs) ########## # Calculate the old sequence probabilities of all the individual's # for whom we have recorded observations and for whom we are predicting # future choice situations ########## past_choices = self.choices[old_repeat_mixing_id_idx] sequence_args = (past_kernel_probs, past_choices, past_mappings["rows_to_mixers"]) seq_kwargs = {"return_type": 'all'} old_sequence_results = mlc.calc_choice_sequence_probs(*sequence_args, **seq_kwargs) # Note sequence_probs_per_draw should have shape past_sequence_probs_per_draw = old_sequence_results[1] # Calculate the weights for each individual who has repeat observations # in the previously observed dataset past_weights = (past_sequence_probs_per_draw / past_sequence_probs_per_draw.sum(axis=1)[:, None]) # Rearrange the past weights to match the current ordering of the # unique observations rel_new_ids = orig_order_unique_ids_new[new_unique_mix_id_repeats] num_rel_new_id = rel_new_ids.shape[0] new_unique_mix_id_repeats_2d = rel_new_ids.reshape((num_rel_new_id, 1)) rel_old_ids = orig_order_unique_ids_old[old_unique_mix_id_repeats] num_rel_old_id = rel_old_ids.shape[0] old_unique_mix_id_repeats_2d = rel_old_ids.reshape((1, num_rel_old_id)) new_to_old_repeat_ids = csr_matrix(new_unique_mix_id_repeats_2d == old_unique_mix_id_repeats_2d) past_weights = new_to_old_repeat_ids.dot(past_weights) # Map these weights to earlier initialized weights weights_per_ind_per_draw[new_unique_mix_id_repeats, :] = past_weights # Create a 'long' format version of the weights array. This version # should have the same number of rows as the new kernel probs but the # same number of columns as the weights array (aka the number of draws) weights_per_draw = new_rows_to_mixers.dot(weights_per_ind_per_draw) # Calculate the predicted probabilities of each alternative for each # choice situation being predicted pred_probs_long = (weights_per_draw * new_kernel_probs).sum(axis=1) # Note I am assuming pred_probs_long should be 1D (as should be the # case if we are predicting with one set of betas and one 2D data # object) pred_probs_long = pred_probs_long.ravel() # Format the returned objects according to the user's desires. if new_chosen_to_obs is None: chosen_probs = None else: # chosen_probs will be of shape (num_observations,) chosen_probs = new_chosen_to_obs.transpose().dot(pred_probs_long) if len(chosen_probs.shape) > 1 and chosen_probs.shape[1] > 1: pass else: chosen_probs = chosen_probs.ravel() # Return the long form and chosen probabilities if desired if return_long_probs and chosen_probs is not None: return chosen_probs, pred_probs_long # If working with predictions, return just the long form probabilities elif return_long_probs and chosen_probs is None: return pred_probs_long # If estimating the model and storing fitted probabilities or # testing the model on data for which we know the chosen alternative, # just return the chosen probabilities. elif chosen_probs is not None: return chosen_probs
44.979167
79
0.612297
5,293
43,180
4.788022
0.118458
0.009707
0.008523
0.007103
0.360178
0.283905
0.261571
0.225861
0.206092
0.194847
0
0.005837
0.333488
43,180
959
80
45.026069
0.874739
0.482145
0
0.28836
0
0
0.050646
0.002343
0
0
0
0
0
1
0.042328
false
0.007937
0.034392
0
0.134921
0.005291
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4174d02ae022627c9a3fdf728ab8801e521bd891
7,836
py
Python
node/blockchain/tests/test_models/test_signed_change_request/test_node_declaration.py
thenewboston-developers/Node
e71a405f4867786a54dd17ddd97595dd3a630018
[ "MIT" ]
18
2021-11-30T04:02:13.000Z
2022-03-24T12:33:57.000Z
node/blockchain/tests/test_models/test_signed_change_request/test_node_declaration.py
thenewboston-developers/Node
e71a405f4867786a54dd17ddd97595dd3a630018
[ "MIT" ]
1
2022-02-04T17:07:38.000Z
2022-02-04T17:07:38.000Z
node/blockchain/tests/test_models/test_signed_change_request/test_node_declaration.py
thenewboston-developers/Node
e71a405f4867786a54dd17ddd97595dd3a630018
[ "MIT" ]
5
2022-01-31T05:28:13.000Z
2022-03-08T17:25:31.000Z
import json import re import pytest from pydantic import ValidationError from node.blockchain.inner_models import ( NodeDeclarationSignedChangeRequest, NodeDeclarationSignedChangeRequestMessage, SignedChangeRequest ) from node.blockchain.mixins.crypto import HashableStringWrapper from node.blockchain.tests.test_models.base import CREATE, VALID, node_declaration_message_type_validation_parametrizer def test_create_from_node_declaration_signed_change_request_message( node_declaration_signed_change_request_message, regular_node_key_pair ): signed_change_request = SignedChangeRequest.create_from_signed_change_request_message( message=node_declaration_signed_change_request_message, signing_key=regular_node_key_pair.private, ) assert isinstance(signed_change_request, NodeDeclarationSignedChangeRequest) assert signed_change_request.message == node_declaration_signed_change_request_message assert signed_change_request.signer == regular_node_key_pair.public assert signed_change_request.signature == ( 'e6f950cce5fbe79ebc58dbd317ba7dec5baf6387bfeeb4656d73c8790d2564a4' '44f8c702b3e3ca931b5bb6e534781a135d5c17c4ff03886a80f32643dbd8fe0d' ) def test_serialize_and_deserialize_node_declaration( regular_node_declaration_signed_change_request, regular_node_key_pair ): assert isinstance(regular_node_declaration_signed_change_request, NodeDeclarationSignedChangeRequest) serialized = regular_node_declaration_signed_change_request.json() deserialized = SignedChangeRequest.parse_raw(serialized) assert isinstance(deserialized, NodeDeclarationSignedChangeRequest) assert deserialized.signer == regular_node_declaration_signed_change_request.signer assert deserialized.signature == regular_node_declaration_signed_change_request.signature assert deserialized.message == regular_node_declaration_signed_change_request.message assert deserialized == regular_node_declaration_signed_change_request serialized2 = deserialized.json() assert serialized == serialized2 def test_node_does_not_serialize_identifier(regular_node_declaration_signed_change_request, regular_node_key_pair): assert isinstance(regular_node_declaration_signed_change_request, NodeDeclarationSignedChangeRequest) serialized = regular_node_declaration_signed_change_request.dict() assert 'identifier' not in serialized['message']['node'] serialized_json = regular_node_declaration_signed_change_request.json() serialized = json.loads(serialized_json) assert 'identifier' not in serialized['message']['node'] def test_signature_validation_node_declaration( node_declaration_signed_change_request_message, primary_validator_key_pair ): signed_change_request_template = NodeDeclarationSignedChangeRequest.create_from_signed_change_request_message( message=node_declaration_signed_change_request_message, signing_key=primary_validator_key_pair.private, ) with pytest.raises(ValidationError) as exc_info: NodeDeclarationSignedChangeRequest( signer=signed_change_request_template.signer, signature='0' * 128, message=signed_change_request_template.message, ) assert re.search(r'__root__.*Invalid signature', str(exc_info.value), flags=re.DOTALL) with pytest.raises(ValidationError) as exc_info: NodeDeclarationSignedChangeRequest( signer='0' * 64, signature=signed_change_request_template.signature, message=signed_change_request_template.message, ) assert re.search(r'__root__.*Invalid signature', str(exc_info.value), flags=re.DOTALL) message = NodeDeclarationSignedChangeRequestMessage( node=signed_change_request_template.message.node, account_lock='0' * 64, type=signed_change_request_template.message.type, ) with pytest.raises(ValidationError) as exc_info: NodeDeclarationSignedChangeRequest( signer=signed_change_request_template.signer, signature=signed_change_request_template.signature, message=message, ) assert re.search(r'__root__.*Invalid signature', str(exc_info.value), flags=re.DOTALL) @node_declaration_message_type_validation_parametrizer def test_type_validation_for_node_declaration_message_on_parsing( id_, regular_node, node, node_identifier, node_addresses, node_fee, account_lock, search_re ): if node is CREATE and node_identifier is not VALID: # Skip not applicable tests return regular_node_dict = regular_node.dict() del regular_node_dict['identifier'] serialized = { 'signer': '0' * 64, 'signature': '0' * 128, 'message': { 'type': 1, 'account_lock': regular_node.identifier if account_lock is VALID else account_lock, 'node': regular_node_dict if node is VALID else ({ 'addresses': regular_node.addresses if node_addresses is VALID else node_addresses, 'fee': regular_node.fee if node_fee is VALID else node_fee, } if node is CREATE else node) } } serialized_json = json.dumps(serialized) with pytest.raises(ValidationError) as exc_info: SignedChangeRequest.parse_raw(serialized_json) assert re.search(search_re, str(exc_info.value), flags=re.DOTALL) @pytest.mark.parametrize( 'id_, signer, signature, type_, search_re', ( # signer (1, None, '0' * 128, 1, r'signer.*none is not an allowed value'), (2, 1, '0' * 128, 1, r'signer.*str type expected'), (3, '', '0' * 128, 1, r'signer.*ensure this value has at least 64 characters'), (4, 'ab', '0' * 128, 1, r'signer.*ensure this value has at least 64 characters'), # signature (5, '0' * 64, None, 1, r'signature.*none is not an allowed value'), (6, '0' * 64, 1, 1, r'signature.*str type expected'), (7, '0' * 64, '', 1, r'signature.*ensure this value has at least 128 characters'), (8, '0' * 64, 'ab', 1, r'signature.*ensure this value has at least 128 characters'), # type_ (9, '0' * 64, '0' * 128, None, r'type.*none is not an allowed value'), (10, '0' * 64, '0' * 128, '', r'type.*value is not a valid integer'), (11, '0' * 64, '0' * 128, '1', r'type.*value is not a valid integer'), (12, '0' * 64, '0' * 128, 0, r'GenesisSignedChangeRequest.*field required'), (13, '0' * 64, '0' * 128, 1000, r'type.*value is not a valid enumeration member'), (14, '0' * 64, '0' * 128, -1, r'type.*value is not a valid enumeration member'), ) ) def test_type_validation_for_node_declaration_on_parsing(id_, regular_node, signer, signature, type_, search_re): node = regular_node.dict() del node['identifier'] serialized = { 'signer': signer, 'signature': signature, 'message': { 'type': type_, 'account_lock': regular_node.identifier, 'node': node } } serialized_json = json.dumps(serialized) with pytest.raises(ValidationError) as exc_info: SignedChangeRequest.parse_raw(serialized_json) assert re.search(search_re, str(exc_info.value), flags=re.DOTALL) def test_hashing_does_not_include_node_identifier(regular_node_declaration_signed_change_request): request_dict = regular_node_declaration_signed_change_request.dict() assert 'identifier' not in request_dict['message']['node'] hashing_string = json.dumps(request_dict, separators=(',', ':'), sort_keys=True) expected_hash = HashableStringWrapper(hashing_string).make_hash() assert regular_node_declaration_signed_change_request.make_hash() == expected_hash
44.777143
119
0.727667
911
7,836
5.92865
0.149287
0.079985
0.126643
0.099981
0.591372
0.534716
0.479911
0.408628
0.381781
0.356786
0
0.032712
0.184661
7,836
174
120
45.034483
0.812647
0.006126
0
0.256944
0
0
0.133368
0.020686
0
0
0
0
0.145833
1
0.048611
false
0
0.048611
0
0.104167
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
417535dcc542f83c434ea5ed0d71b62a8ea4e3dc
1,552
py
Python
playerdo/backends/socket.py
spookylukey/playerdo
d020ae43ddee1d77382c20a66b647489f366a9e9
[ "BSD-2-Clause" ]
5
2020-09-21T13:42:24.000Z
2021-02-03T17:12:33.000Z
playerdo/backends/socket.py
spookylukey/playerdo
d020ae43ddee1d77382c20a66b647489f366a9e9
[ "BSD-2-Clause" ]
1
2020-01-13T12:31:07.000Z
2020-01-13T12:34:21.000Z
playerdo/backends/socket.py
spookylukey/playerdo
d020ae43ddee1d77382c20a66b647489f366a9e9
[ "BSD-2-Clause" ]
null
null
null
from __future__ import absolute_import import socket class SocketPlayerBase(object): def send_socket_command(self, command): s = self.get_open_socket() s.send((command + "\n").encode("ascii")) # We never know how much to receive, most of these # protocols send very little data back for the commands # we use. # It's also easier to write both Python 2 and 3 compatible # if we convert to unicode strings everywhere. # Usually we are getting back ASCII. return s.recv(2048).decode('utf-8') def get_open_socket(self): if hasattr(self, '_socket'): return self._socket s = self.create_socket() self.connect_socket(s) self._socket = s # We'll leave it to Python to clean this up when # the script exits... return s def create_socket(self): raise NotImplementedError() def connect_socket(self, socket): raise NotImplementedError() class TcpSocketPlayerMixin(SocketPlayerBase): def create_socket(self): return socket.socket() def connect_socket(self, socket): socket.connect(self.socket_address()) def socket_address(self): raise NotImplementedError() class UnixSocketPlayerMixin(SocketPlayerBase): def create_socket(self): return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) def connect_socket(self, socket): socket.connect(self.socket_path()) def socket_path(self): raise NotImplentedError()
27.22807
66
0.657861
190
1,552
5.226316
0.436842
0.080564
0.064451
0.057402
0.231621
0.205438
0.205438
0.205438
0.098691
0
0
0.006071
0.257088
1,552
56
67
27.714286
0.85516
0.20232
0
0.28125
0
0
0.01546
0
0
0
0
0
0
1
0.3125
false
0
0.0625
0.0625
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
41754b7be3194cb3183aea7d9f558b7b18c2dc8f
1,742
py
Python
CodeComb_Core/config_shell.py
amartyaamp/CodeComb
33fd50b91edd60dd08b1f4decc35e2fcf5f1a78d
[ "MIT" ]
1
2019-09-06T07:14:57.000Z
2019-09-06T07:14:57.000Z
CodeComb_Core/config_shell.py
amartyaamp/CodeComb
33fd50b91edd60dd08b1f4decc35e2fcf5f1a78d
[ "MIT" ]
12
2019-09-10T04:07:51.000Z
2019-12-13T03:04:49.000Z
CodeComb_Core/config_shell.py
amartyaamp/CodeComb
33fd50b91edd60dd08b1f4decc35e2fcf5f1a78d
[ "MIT" ]
1
2019-09-11T04:12:03.000Z
2019-09-11T04:12:03.000Z
import os from pyfiglet import figlet_format import cutie import configparser ## Either colorama or termcolor try: import colorama colorama.init() except ImportError: colorama = None try: from termcolor import colored except ImportError: colored = None ## Set the format config def set_format(): format_opts = {"C++":"cpp", "Python":"py", "C#":"cs", "Java":"java"} print(colored('Choose filetype (use up/down keys):', 'yellow')) format_keys = list(format_opts.keys()) answers = cutie.select_multiple(format_keys) ## Store the config file config = configparser.ConfigParser() home = os.path.expanduser("~") config_file = os.path.join(home, "codecomb_config.ini") config.read(config_file) config['FORMAT'] = dict((format_keys[ans], format_opts[format_keys[ans]]) \ for ans in answers) with open(config_file, "w") as fmtFile: config.write(fmtFile) ## Set the Editor def set_editor(): editor_opts = {"Vim":"vim ", "VSCode":"start code ", "Notepad++":"Notepad++", "Sublime Text": "subl", "Atom":"atom"} print(colored('Editor selection (should be launchable from terminal)', 'yellow')) print(colored('Choose editor (use up/down keys):', 'yellow')) editor_keys = list(editor_opts.keys()) answer = cutie.select(editor_keys, selected_index=0) ## Store the config file config = configparser.ConfigParser() home = os.path.expanduser("~") config_file = os.path.join(home, "codecomb_config.ini") config.read(config_file) config['EDITOR'] = {"startcmd": editor_opts[editor_keys[answer]]} with open(config_file, "w") as fmtFile: config.write(fmtFile) def config_shell(): #os.system("cls") #os.system("clear") set_format() set_editor() if __name__ == "__main__": config_shell()
23.226667
82
0.702641
233
1,742
5.090129
0.373391
0.067454
0.053963
0.021922
0.337268
0.305228
0.305228
0.305228
0.305228
0.305228
0
0.000668
0.141217
1,742
75
83
23.226667
0.792112
0.08209
0
0.355556
0
0
0.189666
0
0
0
0
0
0
1
0.066667
false
0
0.177778
0
0.244444
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41757415060bd40b7bf8385c7ab3e828141ce2df
637
py
Python
Candlestick-mpl_finance.py
anablima/Python-Studies
958e181a7b9ce0569259f67f2d87d78b90cb5aa1
[ "MIT" ]
null
null
null
Candlestick-mpl_finance.py
anablima/Python-Studies
958e181a7b9ce0569259f67f2d87d78b90cb5aa1
[ "MIT" ]
null
null
null
Candlestick-mpl_finance.py
anablima/Python-Studies
958e181a7b9ce0569259f67f2d87d78b90cb5aa1
[ "MIT" ]
null
null
null
import matplotlib.pyplot as fig import datetime as dt import mpl_finance as mpf import matplotlib.dates as mdates import pandas_datareader.data as web inicio=dt.datetime(2019,1,1) fim=dt.datetime(2022,2,2) df=web.DataReader('MGLU3.SA','yahoo',inicio,fim) df['med_mov']=df['Close'].rolling(window=20,min_periods=0).mean() df_ohlc=df['Close'].resample('7D').ohlc() df_ohlc['Volume']=df['Volume'].resample('7D').sum() df_ohlc.reset_index(inplace=True) df_ohlc['Date']=df_ohlc['Date'].map(mdates.date2num) ax1=fig.subplot(211) ax1.xaxis_date() mpf.candlestick_ohlc(ax1,df_ohlc.values,width=2,colorup='g') ax1.plot(df.index,df['med_mov'])
30.333333
65
0.758242
112
637
4.1875
0.517857
0.076759
0.034115
0
0
0
0
0
0
0
0
0.044851
0.054945
637
21
66
30.333333
0.734219
0
0
0
0
0
0.097179
0
0
0
0
0
0
1
0
false
0
0.294118
0
0.294118
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4177179531f58a2be53761395add53901bd1e218
1,901
py
Python
components/eastereggs.py
n8wachT/BotListBot
457160498a90c8d0a63d5a9f7400227e35431b6d
[ "MIT" ]
null
null
null
components/eastereggs.py
n8wachT/BotListBot
457160498a90c8d0a63d5a9f7400227e35431b6d
[ "MIT" ]
null
null
null
components/eastereggs.py
n8wachT/BotListBot
457160498a90c8d0a63d5a9f7400227e35431b6d
[ "MIT" ]
null
null
null
import random from pprint import pprint from peewee import fn from model import Bot from telegram import ReplyKeyboardMarkup import util from telegram import KeyboardButton import captions from model import track_activity @track_activity('easteregg', '"crappy troll markup"') def _crapPy_Tr0ll_kbmarkup(rows=None): if rows is None: rows = 4 first = ['Gay', 'Pony', 'Dick', 'Telegram', 'Milk', 'WhatsApp', 'Daniils', 'T3CHNOs', 'Adult', 'ThirdWorld', 'Asian', 'Mexican', 'SM', 'Russian', 'Chinese', 'Gonzo', 'Anime'] second = ['Tales', 'Porn', 'Rice', 'Bugs', 'Whores', 'Pigs', 'Alternatives', 'Pics', 'Penetrator', 'Addiction', 'Ducks', 'Slaves'] third = ['Collection', 'Channel', 'Bot', 'Radio', 'Chat', 'Discuss ion', 'Conversation', 'Voting', 'ForPresident'] def compound(): choices = [ '{} {} {}'.format(random.choice(first), random.choice(second), random.choice(third)), '@{}{}{}'.format(random.choice(first), random.choice(second), ''.join(random.choice(third).split(' '))), ] return random.choice(choices) buttons = [[KeyboardButton(compound()) for x in range(2)] for y in range(rows)] return buttons def send_next(bot, update, args=None): uid = util.uid_from_update(update) rows = None if args: try: rows = int(args[0]) except: rows = None reply_markup = ReplyKeyboardMarkup(_crapPy_Tr0ll_kbmarkup(rows), one_time_keyboard=True, per_user=True) text = 'ɹoʇɐɹǝuǝb ǝɯɐuɹǝsn ɯɐɹbǝןǝʇ' util.send_md_message(bot, uid, text, reply_markup=reply_markup) def send_random_bot(bot, update): from components.explore import send_bot_details random_bot = Bot.select().where((Bot.approved == True), (Bot.description.is_null(False))).order_by(fn.Random()).limit(1)[0] send_bot_details(bot, update, random_bot)
34.563636
127
0.652288
229
1,901
5.28821
0.50655
0.069364
0.024773
0.037985
0.067713
0.067713
0.067713
0
0
0
0
0.005219
0.193582
1,901
55
128
34.563636
0.784736
0
0
0.04878
0
0
0.165089
0
0
0
0
0
0
1
0.097561
false
0
0.243902
0
0.390244
0.02439
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4177f78e0fda7c1ab99934917f15b1e0a6282a28
16,492
py
Python
heat/tests/api/openstack_v1/test_routes.py
whitepages/heat
4da2e1262fa42e6107389ba7a0e72ade024e316f
[ "Apache-2.0" ]
null
null
null
heat/tests/api/openstack_v1/test_routes.py
whitepages/heat
4da2e1262fa42e6107389ba7a0e72ade024e316f
[ "Apache-2.0" ]
null
null
null
heat/tests/api/openstack_v1/test_routes.py
whitepages/heat
4da2e1262fa42e6107389ba7a0e72ade024e316f
[ "Apache-2.0" ]
1
2021-03-21T11:37:03.000Z
2021-03-21T11:37:03.000Z
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import heat.api.openstack.v1 as api_v1 from heat.tests import common class RoutesTest(common.HeatTestCase): def assertRoute(self, mapper, path, method, action, controller, params=None): params = params or {} route = mapper.match(path, {'REQUEST_METHOD': method}) self.assertIsNotNone(route) self.assertEqual(action, route['action']) self.assertEqual( controller, route['controller'].controller.__class__.__name__) del(route['action']) del(route['controller']) self.assertEqual(params, route) def setUp(self): super(RoutesTest, self).setUp() self.m = api_v1.API({}).map def test_template_handling(self): self.assertRoute( self.m, '/aaaa/resource_types', 'GET', 'list_resource_types', 'StackController', { 'tenant_id': 'aaaa', }) self.assertRoute( self.m, '/aaaa/resource_types/test_type', 'GET', 'resource_schema', 'StackController', { 'tenant_id': 'aaaa', 'type_name': 'test_type' }) self.assertRoute( self.m, '/aaaa/resource_types/test_type/template', 'GET', 'generate_template', 'StackController', { 'tenant_id': 'aaaa', 'type_name': 'test_type' }) self.assertRoute( self.m, '/aaaa/validate', 'POST', 'validate_template', 'StackController', { 'tenant_id': 'aaaa' }) def test_stack_collection(self): self.assertRoute( self.m, '/aaaa/stacks', 'GET', 'index', 'StackController', { 'tenant_id': 'aaaa' }) self.assertRoute( self.m, '/aaaa/stacks', 'POST', 'create', 'StackController', { 'tenant_id': 'aaaa' }) self.assertRoute( self.m, '/aaaa/stacks/preview', 'POST', 'preview', 'StackController', { 'tenant_id': 'aaaa' }) self.assertRoute( self.m, '/aaaa/stacks/detail', 'GET', 'detail', 'StackController', { 'tenant_id': 'aaaa' }) def test_stack_data(self): self.assertRoute( self.m, '/aaaa/stacks/teststack', 'GET', 'lookup', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack' }) self.assertRoute( self.m, '/aaaa/stacks/arn:openstack:heat::6548ab64fbda49deb188851a3b7d8c8b' ':stacks/stack-1411-06/1c5d9bb2-3464-45e2-a728-26dfa4e1d34a', 'GET', 'lookup', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'arn:openstack:heat:' ':6548ab64fbda49deb188851a3b7d8c8b:stacks/stack-1411-06/' '1c5d9bb2-3464-45e2-a728-26dfa4e1d34a' }) self.assertRoute( self.m, '/aaaa/stacks/teststack/resources', 'GET', 'lookup', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'path': 'resources' }) self.assertRoute( self.m, '/aaaa/stacks/teststack/events', 'GET', 'lookup', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'path': 'events' }) self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb', 'GET', 'show', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb', }) def test_stack_snapshot(self): self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/snapshots', 'POST', 'snapshot', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb', }) self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/snapshots/cccc', 'GET', 'show_snapshot', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb', 'snapshot_id': 'cccc' }) self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/snapshots/cccc', 'DELETE', 'delete_snapshot', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb', 'snapshot_id': 'cccc' }) self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/snapshots', 'GET', 'list_snapshots', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb' }) self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/snapshots/cccc/restore', 'POST', 'restore_snapshot', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb', 'snapshot_id': 'cccc' }) def test_stack_outputs(self): self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/outputs', 'GET', 'list_outputs', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb' } ) self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/outputs/cccc', 'GET', 'show_output', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb', 'output_key': 'cccc' } ) def test_stack_data_template(self): self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/template', 'GET', 'template', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb', }) self.assertRoute( self.m, '/aaaa/stacks/teststack/template', 'GET', 'lookup', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'path': 'template' }) def test_stack_post_actions(self): self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/actions', 'POST', 'action', 'ActionController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb', }) def test_stack_post_actions_lookup_redirect(self): self.assertRoute( self.m, '/aaaa/stacks/teststack/actions', 'POST', 'lookup', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'path': 'actions' }) def test_stack_update_delete(self): self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb', 'PUT', 'update', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb', }) self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb', 'DELETE', 'delete', 'StackController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb', }) def test_resources(self): self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/resources', 'GET', 'index', 'ResourceController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb' }) self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/resources/cccc', 'GET', 'show', 'ResourceController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb', 'resource_name': 'cccc' }) self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/resources/cccc/metadata', 'GET', 'metadata', 'ResourceController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb', 'resource_name': 'cccc' }) self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/resources/cccc/signal', 'POST', 'signal', 'ResourceController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb', 'resource_name': 'cccc' }) def test_events(self): self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/events', 'GET', 'index', 'EventController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb' }) self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/resources/cccc/events', 'GET', 'index', 'EventController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb', 'resource_name': 'cccc' }) self.assertRoute( self.m, '/aaaa/stacks/teststack/bbbb/resources/cccc/events/dddd', 'GET', 'show', 'EventController', { 'tenant_id': 'aaaa', 'stack_name': 'teststack', 'stack_id': 'bbbb', 'resource_name': 'cccc', 'event_id': 'dddd' }) def test_software_configs(self): self.assertRoute( self.m, '/aaaa/software_configs', 'GET', 'index', 'SoftwareConfigController', { 'tenant_id': 'aaaa' }) self.assertRoute( self.m, '/aaaa/software_configs', 'POST', 'create', 'SoftwareConfigController', { 'tenant_id': 'aaaa' }) self.assertRoute( self.m, '/aaaa/software_configs/bbbb', 'GET', 'show', 'SoftwareConfigController', { 'tenant_id': 'aaaa', 'config_id': 'bbbb' }) self.assertRoute( self.m, '/aaaa/software_configs/bbbb', 'DELETE', 'delete', 'SoftwareConfigController', { 'tenant_id': 'aaaa', 'config_id': 'bbbb' }) def test_software_deployments(self): self.assertRoute( self.m, '/aaaa/software_deployments', 'GET', 'index', 'SoftwareDeploymentController', { 'tenant_id': 'aaaa' }) self.assertRoute( self.m, '/aaaa/software_deployments', 'POST', 'create', 'SoftwareDeploymentController', { 'tenant_id': 'aaaa' }) self.assertRoute( self.m, '/aaaa/software_deployments/bbbb', 'GET', 'show', 'SoftwareDeploymentController', { 'tenant_id': 'aaaa', 'deployment_id': 'bbbb' }) self.assertRoute( self.m, '/aaaa/software_deployments/bbbb', 'PUT', 'update', 'SoftwareDeploymentController', { 'tenant_id': 'aaaa', 'deployment_id': 'bbbb' }) self.assertRoute( self.m, '/aaaa/software_deployments/bbbb', 'DELETE', 'delete', 'SoftwareDeploymentController', { 'tenant_id': 'aaaa', 'deployment_id': 'bbbb' }) def test_build_info(self): self.assertRoute( self.m, '/fake_tenant/build_info', 'GET', 'build_info', 'BuildInfoController', {'tenant_id': 'fake_tenant'} ) def test_405(self): self.assertRoute( self.m, '/fake_tenant/validate', 'GET', 'reject', 'DefaultMethodController', {'tenant_id': 'fake_tenant', 'allowed_methods': 'POST'} ) self.assertRoute( self.m, '/fake_tenant/stacks', 'PUT', 'reject', 'DefaultMethodController', {'tenant_id': 'fake_tenant', 'allowed_methods': 'GET,POST'} ) self.assertRoute( self.m, '/fake_tenant/stacks/fake_stack/stack_id', 'POST', 'reject', 'DefaultMethodController', {'tenant_id': 'fake_tenant', 'stack_name': 'fake_stack', 'stack_id': 'stack_id', 'allowed_methods': 'GET,PUT,PATCH,DELETE'} ) def test_options(self): self.assertRoute( self.m, '/fake_tenant/validate', 'OPTIONS', 'options', 'DefaultMethodController', {'tenant_id': 'fake_tenant', 'allowed_methods': 'POST'} ) self.assertRoute( self.m, '/fake_tenant/stacks/fake_stack/stack_id', 'OPTIONS', 'options', 'DefaultMethodController', {'tenant_id': 'fake_tenant', 'stack_name': 'fake_stack', 'stack_id': 'stack_id', 'allowed_methods': 'GET,PUT,PATCH,DELETE'} ) def test_services(self): self.assertRoute( self.m, '/aaaa/services', 'GET', 'index', 'ServiceController', { 'tenant_id': 'aaaa' })
28.288165
79
0.431057
1,227
16,492
5.619397
0.136919
0.108774
0.135025
0.142132
0.751269
0.742857
0.732995
0.656853
0.60145
0.519797
0
0.010723
0.445853
16,492
582
80
28.33677
0.743736
0.033107
0
0.724584
0
0
0.320276
0.120213
0
0
0
0
0.099815
1
0.03512
false
0
0.003697
0
0.040665
0
0
0
0
null
0
0
0
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
417897748e026f4f02ed88c0117e7837109fc500
185
py
Python
test_django/hello/urls.py
YEZHIAN1996/pythonstudy
a8774ef97266e8ab0289484ef190d2ee55f1d37c
[ "Apache-2.0" ]
1
2022-02-22T01:54:56.000Z
2022-02-22T01:54:56.000Z
test_django/hello/urls.py
YEZHIAN1996/pythonstudy
a8774ef97266e8ab0289484ef190d2ee55f1d37c
[ "Apache-2.0" ]
null
null
null
test_django/hello/urls.py
YEZHIAN1996/pythonstudy
a8774ef97266e8ab0289484ef190d2ee55f1d37c
[ "Apache-2.0" ]
null
null
null
from django.urls import path from hello.views import is_odd, http_req urlpatterns = [ path('<int:num>/', is_odd, name='is_odd'), path('http/req/', http_req, name='http_req') ]
23.125
48
0.681081
30
185
4
0.5
0.233333
0
0
0
0
0
0
0
0
0
0
0.151351
185
7
49
26.428571
0.764331
0
0
0
0
0
0.179348
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
417d85f2f2634da06e6ff80737c0f455994abeac
5,398
py
Python
sets/generator/nopattern/remote.py
ignacio-gallego/tbcnn_skill_pill
66c3939e2944160c864b61495ac4c7aaa56acd18
[ "MIT" ]
null
null
null
sets/generator/nopattern/remote.py
ignacio-gallego/tbcnn_skill_pill
66c3939e2944160c864b61495ac4c7aaa56acd18
[ "MIT" ]
null
null
null
sets/generator/nopattern/remote.py
ignacio-gallego/tbcnn_skill_pill
66c3939e2944160c864b61495ac4c7aaa56acd18
[ "MIT" ]
null
null
null
from pandas import DataFrame as PandasDataFrame from optimus.engines.base.basedataframe import BaseDataFrame class RemoteDummyAttribute: def __init__(self, name, names, dummy_id, op): self.__names = [*names, name] self.__op = op self.__id = dummy_id def __getattr__(self, item): return RemoteDummyAttribute(item, self.__names, self.__id, self.__op) def __call__(self, *args, **kwargs): if kwargs.get("client_submit"): client_submit = kwargs["client_submit"] del kwargs["client_submit"] else: client_submit = False def _f(op, unique_id, method, *args, **kwargs): obj = op.get_var(unique_id) if obj is None: op.del_var(unique_id) raise Exception("Remote variable with id " + unique_id + " not found or null") func = obj for me in method: func = getattr(func, me) if callable(func): result = func(*args, **kwargs) else: result = func return result if client_submit: return self.__op.remote_submit(_f, self.__id, self.__names, *args, **kwargs) else: return self.__op.remote_run(_f, self.__id, self.__names, *args, **kwargs) class RemoteDummyVariable: def __init__(self, op, unique_id, *args, **kwargs): self.op = op self.id = unique_id def __getattr__(self, item): if item.startswith('_'): raise AttributeError(item) return RemoteDummyAttribute(item, [], self.id, self.op) def __getstate__(self): return {"op": self.op, "id": self.id} def __setstate__(self, d): self.op = d.op self.id = d.id return def __del__(self): self.op.remote.del_var(self.id).result(180) class RemoteDummyDataFrame(RemoteDummyVariable): print = BaseDataFrame.print table = BaseDataFrame.table display = BaseDataFrame.display def __repr__(self): return self.ascii() def _repr_html_(self): return self.table() @property def meta(self): def _get_attr(op, unique_id, attr): df = op.get_var(unique_id) if df is None: op.del_var(unique_id) raise Exception("Remote variable with id " + unique_id + " not found or null") return getattr(df, attr) return self.op.remote_run(_get_attr, self.id, "meta") class ClientActor: op = {} _vars = {} _del_next = [] def __init__(self, engine=False): if not engine: from optimus.optimus import Engine engine = Engine.DASK.value from optimus import Optimus self.op = Optimus(engine) self.op.set_var = self.set_var self.op.get_var = self.get_var self.op.del_var = self.del_var self.op.list_vars = self.list_vars self.op.update_vars = self.update_vars self.set_var("_load", self.op.load) self.set_var("_create", self.op.create) def list_vars(self): return list(self._vars.keys()) def update_vars(self, values): self._vars.update(values) def _del_var(self, name): try: del self._vars[name] except: print(name + " not found") def del_var(self, name): for _name in self._del_next: self._del_var(_name) self._del_next = [] if not name.startswith("_"): if self._vars[name] is None: print(name + " not found") else: self._del_next.append(name) def set_var(self, name, value): self._vars[name] = value def get_var(self, name): return self._vars.get(name, None) def _return(self, value): import cupy as cp import numpy as np if isinstance(value, (dict,)): for key in value: value[key] = self._return(value[key]) return value elif isinstance(value, (list,)): return list(map(self._return, value)) elif isinstance(value, (set,)): return set(map(self._return, value)) elif isinstance(value, (tuple,)): return tuple(map(self._return, value)) elif isinstance(value, (PandasDataFrame,)): return value.head() elif not isinstance(value, (str, bool, int, float, complex, np.generic, cp.generic)) and value is not None: import uuid unique_id = str(uuid.uuid4()) self.set_var(unique_id, value) if isinstance(value, (BaseDataFrame,)): return {"dummy": unique_id, "dataframe": True} else: return {"dummy": unique_id, "dataframe": False} else: return value def submit(self, callback, *args, **kwargs): try: result = callback(self.op, *args, **kwargs) except Exception as err: import traceback error_class = err.__class__.__name__ detail = err.args[0] tb = traceback.format_exc() error = "%s: %s\n%s" % (error_class, detail, tb) return {"status": "error", "error": error} return self._return(result) if result is not None else None
30.156425
115
0.567803
646
5,398
4.490712
0.190402
0.041365
0.018959
0.034471
0.235436
0.122372
0.109962
0.053775
0.053775
0.053775
0
0.001372
0.32475
5,398
178
116
30.325843
0.794513
0
0
0.128571
0
0
0.040571
0
0
0
0
0
0
1
0.157143
false
0
0.057143
0.042857
0.45
0.021429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
417e3d6a00fec073fc8e5b110ada2e2d5309582c
1,919
py
Python
app/util/ML/dataset.py
SoftwareEngineerUB/SmartEnergy
8893728eaf989a3b8bd2c1b3a8a1a5e6c4ce9c10
[ "Apache-2.0" ]
null
null
null
app/util/ML/dataset.py
SoftwareEngineerUB/SmartEnergy
8893728eaf989a3b8bd2c1b3a8a1a5e6c4ce9c10
[ "Apache-2.0" ]
null
null
null
app/util/ML/dataset.py
SoftwareEngineerUB/SmartEnergy
8893728eaf989a3b8bd2c1b3a8a1a5e6c4ce9c10
[ "Apache-2.0" ]
null
null
null
import torch as T import numpy as np from app.util.ML.constants import * DEVICE = T.device("cpu") GPU_ENABLED = False class DeviceMeterDataset(T.utils.data.Dataset): @staticmethod def createDatasets(device_id, mul_factor=1): allData = np.load(BASE_PATH + TRAIN_FOLDER + DEVICE_BASE_NAME + str(device_id) + ".npy") data_length = allData.shape[0] used_data = min(data_length // 2, 50000) increasing_factor = min(int(data_length * 2 / 10), 8000) np.random.shuffle(allData) ans = { "train": DeviceMeterDataset(allData[:used_data], mul_factor), "validation": DeviceMeterDataset(allData[used_data:used_data + increasing_factor], mul_factor), "test": DeviceMeterDataset(allData[used_data + increasing_factor:used_data + 2 * increasing_factor], mul_factor) } return ans # we need to generate mean error to have a comparasion basis for anomaly detections @staticmethod def createEvalData(device_id, mul_factor=1): allData = np.load(BASE_PATH + TRAIN_FOLDER + DEVICE_BASE_NAME + str(device_id) + ".npy") index = np.random.choice(allData.shape[0], 1000, replace=False) max_index = allData.shape[0] - 12 index = index[index < max_index] evalData = [] for id in index: evalData.append(allData[id:id + 12, :].copy()) for data in evalData: data[:, 3] *= mul_factor return evalData def __init__(self, data, mul_factor=1): self.allData = data self.allData[:, 3] *= mul_factor self.xy_data = T.tensor(self.allData, dtype=T.float32).to(DEVICE) def __len__(self): return len(self.xy_data) def __getitem__(self, idx): data = self.xy_data[idx, :3] value = self.xy_data[idx, 3].reshape((1)) return data, value
31.983333
112
0.625847
244
1,919
4.709016
0.368852
0.062663
0.034813
0.086162
0.160139
0.13577
0.13577
0.13577
0.13577
0.13577
0
0.024788
0.2642
1,919
59
113
32.525424
0.788952
0.042209
0
0.095238
0
0
0.016349
0
0
0
0
0
0
1
0.119048
false
0
0.071429
0.02381
0.309524
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
417ee4d8de3b41665d6dba7dbc8fa1ba85341638
1,903
py
Python
resource/v2.py
NuenoB/TheTeleop
57e3f745d391743fac408fb44bf20ffad945aa19
[ "BSD-3-Clause" ]
null
null
null
resource/v2.py
NuenoB/TheTeleop
57e3f745d391743fac408fb44bf20ffad945aa19
[ "BSD-3-Clause" ]
null
null
null
resource/v2.py
NuenoB/TheTeleop
57e3f745d391743fac408fb44bf20ffad945aa19
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'AddBag.ui' # # Created: Mon Nov 30 10:24:59 2015 # by: PyQt4 UI code generator 4.10.4 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_Form2(object): def setupUi(self, Form): Form.setObjectName(_fromUtf8("Form")) Form.resize(243, 210) self.label = QtGui.QLabel(Form) self.label.setGeometry(QtCore.QRect(50, 30, 121, 17)) self.label.setObjectName(_fromUtf8("label")) self.lineEdit = QtGui.QLineEdit(Form) self.lineEdit.setGeometry(QtCore.QRect(60, 60, 113, 27)) self.lineEdit.setObjectName(_fromUtf8("lineEdit")) self.pushButton = QtGui.QPushButton(Form) self.pushButton.setGeometry(QtCore.QRect(10, 140, 98, 27)) self.pushButton.setObjectName(_fromUtf8("pushButton")) self.pushButton_2 = QtGui.QPushButton(Form) self.pushButton_2.setGeometry(QtCore.QRect(120, 140, 98, 27)) self.pushButton_2.setObjectName(_fromUtf8("pushButton_2")) self.retranslateUi(Form) QtCore.QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): Form.setWindowTitle(_translate("Form", "Form", None)) self.label.setText(_translate("Form", "Insert New Robot", None)) self.pushButton.setText(_translate("Form", "Cancel", None)) self.pushButton_2.setText(_translate("Form", "Create", None))
36.596154
79
0.687861
224
1,903
5.75
0.397321
0.086957
0.062112
0.086957
0.212733
0.127329
0.127329
0.127329
0.127329
0.127329
0
0.05
0.190751
1,903
51
80
37.313725
0.786364
0.110878
0
0.166667
1
0
0.051693
0
0
0
0
0
0
1
0.138889
false
0
0.027778
0.083333
0.277778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
417fc0767f9f3045bfabe98057d3d6ae8df77d25
59,087
py
Python
src/rgt/viz/Main.py
mguo123/pan_omics
e1cacd543635b398fb08c0b31d08fa6b7c389658
[ "MIT" ]
null
null
null
src/rgt/viz/Main.py
mguo123/pan_omics
e1cacd543635b398fb08c0b31d08fa6b7c389658
[ "MIT" ]
null
null
null
src/rgt/viz/Main.py
mguo123/pan_omics
e1cacd543635b398fb08c0b31d08fa6b7c389658
[ "MIT" ]
null
null
null
# Python Libraries from __future__ import division from __future__ import print_function import os import sys import time import getpass import argparse import datetime import matplotlib matplotlib.use('Agg', warn=False) from .boxplot import Boxplot from .lineplot import Lineplot from .jaccard_test import Jaccard from .projection_test import Projection from .intersection_test import Intersect from .bed_profile import BedProfile from .shared_function import check_dir, print2, output_parameters, \ copy_em, list_all_index, output from .plotTools import Venn from .. import __version__ current_dir = os.getcwd() """ Statistical analysis methods and plotting tools for ExperimentalMatrix Author: Joseph C.C. Kuo """ def main(): ############################################################################### ##### PARAMETERS ############################################################## ############################################################################### # Some general help descriptions ######### Some general plotting arguments descriptions ############### helpinput = 'The file name of the input Experimental Matrix file. Recommended to add more columns for more information for ploting. For example, cell type or factors. (default: %(default)s)' helpoutput = 'The directory name for the output files. For example, project name. (default: %(default)s)' helptitle = 'The title shown on the top of the plot and also the folder name. (default: %(default)s)' helpgroup = "Group the data by reads(needs 'factor' column), regions(needs 'factor' column), another name of column (for example, 'cell')in the header of experimental matrix, or None. (default: %(default)s)" helpgroupbb = "Group the data by any optional column (for example, 'cell') of experimental matrix, or None. (default: %(default)s)" helpsort = "Sort the data by reads(needs 'factor' column), regions(needs 'factor' column), another name of column (for example, 'cell')in the header of experimental matrix, or None. (default: %(default)s)" helpcolor = "Color the data by reads(needs 'factor' column), regions(needs 'factor' column), another name of column (for example, 'cell')in the header of experimental matrix, or None. (default: %(default)s)" helpcolorbb = "Color the data by any optional column (for example, 'cell') of experimental matrix, or None. (default: %(default)s)" help_define_color = 'Define the specific colors with the given column "color" in experimental matrix. The color should be in the format of matplotlib.colors. For example, "r" for red, "b" for blue, or "(100, 35, 138)" for RGB. (default: %(default)s)' helpreference = 'The file name of the reference Experimental Matrix. Multiple references are acceptable. (default: %(default)s)' helpquery = 'The file name of the query Experimental Matrix. Multiple queries are acceptable. (default: %(default)s)' helpcol = "Group the data in columns by reads(needs 'factor' column), regions(needs 'factor' column), another name of column (for example, 'cell')in the header of experimental matrix, or None. (default: %(default)s)" helprow = "Group the data in rows by reads(needs 'factor' column), regions(needs 'factor' column), another name of column (for example, 'cell')in the header of experimental matrix, or None. (default: %(default)s)" helpmp = "Define the number of cores for parallel computation. (default: %(default)s)" version_message = "viz - Regulatory Analysis Toolbox (RGT). Version: " + str(__version__) parser = argparse.ArgumentParser(description='Provides various Statistical analysis methods and plotting tools for ExperimentalMatrix.\ \nAuthor: Joseph C.C. Kuo, Ivan Gesteira Costa Filho', formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=True, version=version_message) subparsers = parser.add_subparsers(help='sub-command help', dest='mode') ################### BED profile ########################################## parser_bedprofile = subparsers.add_parser('bed_profile', help='BED profile analyzes the given BED file(s) by their length, distribution and composition of the sequences.') parser_bedprofile.add_argument('-i', metavar=' ', help="Input experimental matrix or Input BED file or Input directory which contains BED files") parser_bedprofile.add_argument('-o', metavar=' ', help=helpoutput) parser_bedprofile.add_argument('-t', metavar=' ', default='bed_profile', help=helptitle) parser_bedprofile.add_argument('-organism', metavar=' ', default=None, help='Define the organism. (default: %(default)s)') parser_bedprofile.add_argument('-biotype', metavar=' ', default=False, help='Define the directory for biotype BED files.') parser_bedprofile.add_argument('-repeats', metavar=' ', default=False, help='Define the directory for repeats BED files.') parser_bedprofile.add_argument('-genposi', metavar=' ', default=False, help='Define the directory for the generic position BED files. (exons, introns, and intergenic regions)') parser_bedprofile.add_argument('-labels', metavar=' ', default=None, help='Define the labels for more BED sets') parser_bedprofile.add_argument('-sources', metavar=' ', default=None, help='Define the directories for more BED sets corresponding to the labels') parser_bedprofile.add_argument('-strand', metavar=' ', default=None, help='Define whether to perform strand-specific comparison for each reference corresponding to the labels (T or F)') parser_bedprofile.add_argument('-other', metavar=' ', default=None, help='Define whether to count "else" for each reference corresponding to the labels (T or F)') parser_bedprofile.add_argument('-background', metavar=' ', default=None, help='Add the background to the first row of the figures (T or F)') parser_bedprofile.add_argument('-coverage', action="store_true", default=False, help='Calculate the overlapping region by coverage in bp instead of simple counting') parser_bedprofile.add_argument('-test', action="store_true", default=False, help='test script') ################### Projection test ########################################## parser_projection = subparsers.add_parser('projection', help='Projection test evaluates the association level by comparing to the random binomial model.') parser_projection.add_argument('-r', metavar=' ', help=helpreference) parser_projection.add_argument('-q', metavar=' ', help=helpquery) parser_projection.add_argument('-o', metavar=' ', help=helpoutput) parser_projection.add_argument('-t', metavar=' ', default='projection_test', help=helptitle) parser_projection.add_argument('-g', metavar=' ', default=None, help=helpgroupbb) parser_projection.add_argument('-c', metavar=' ', default="regions", help=helpcolorbb) parser_projection.add_argument('-bg', metavar=' ', type=str, default=None, help="Define a BED file as background. If not defined, the background is whole genome according to the given organism. (default: %(default)s)") parser_projection.add_argument('-union', action="store_true", help='Take the union of references as background for binominal test. (default: %(default)s)') parser_projection.add_argument('-organism', metavar=' ', default='hg19', help='Define the organism. (default: %(default)s)') parser_projection.add_argument('-log', action="store_true", help='Set y axis of the plot in log scale. (default: %(default)s)') parser_projection.add_argument('-color', action="store_true", help=help_define_color) parser_projection.add_argument('-show', action="store_true", help='Show the figure in the screen. (default: %(default)s)') parser_projection.add_argument('-table', action="store_true", help='Store the tables of the figure in text format. (default: %(default)s)') parser_projection.add_argument('-bed', action="store_true", default=False, help='Output BED files for the regions of query which overlap the reference. (default: %(default)s)') parser_projection.add_argument('-pw', metavar=' ', type=int, default=5, help='Define the width of single panel. (default: %(default)s)') parser_projection.add_argument('-ph', metavar=' ', type=int, default=3, help='Define the height of single panel. (default: %(default)s)') parser_projection.add_argument('-cfp', metavar=' ', type=float, default=0, help='Define the cutoff of the proportion. (default: %(default)s)') parser_projection.add_argument('-load', action="store_false", default=True, help='Load the BED files later during processing, which saves memory usage when dealing with large number of BED files.') ################### Intersect Test ########################################## parser_intersect = subparsers.add_parser('intersect', help='Intersection test provides various modes of intersection to test the association between references and queries.') parser_intersect.add_argument('-r', metavar=' ', help=helpreference) parser_intersect.add_argument('-q', metavar=' ', help=helpquery) parser_intersect.add_argument('-o', help=helpoutput) parser_intersect.add_argument('-t', metavar=' ', default='intersection_test', help=helptitle) parser_intersect.add_argument('-g', metavar=' ', default=None, help=helpgroupbb) parser_intersect.add_argument('-c', metavar=' ', default="regions", help=helpcolorbb) parser_intersect.add_argument('-organism', metavar=' ', default='hg19', help='Define the organism. (default: %(default)s)') parser_intersect.add_argument('-bg', metavar=' ', help="Define a BED file as background. If not defined, the background is whole genome according to the given organism. (default: %(default)s)") parser_intersect.add_argument('-m', metavar=' ', default="count", choices=['count', 'bp'], help="Define the mode of calculating intersection. 'count' outputs the number of overlapped regions.'bp' outputs the coverage(basepair) of intersection. (default: %(default)s)") parser_intersect.add_argument('-tc', metavar=' ', type=int, default=False, help="Define the threshold(in percentage) of reference length for intersection counting. For example, '20' means that the query which overlaps more than 20%% of reference is counted as intersection. (default: %(default)s)") parser_intersect.add_argument('-ex', metavar=' ', type=int, default=0, help="Define the extension(in bp) of reference length for intersection counting. For example, '20' means that each region of reference is extended by 20 bp in order to include proximal queries. (default: %(default)s)") parser_intersect.add_argument('-log', action="store_true", help='Set y axis of the plot in log scale.') parser_intersect.add_argument('-color', action="store_true", help=help_define_color) parser_intersect.add_argument('-show', action="store_true", help='Show the figure in the screen. (default: %(default)s)') parser_intersect.add_argument('-stest', metavar=' ', type=int, default=0, help='Define the repetition time of random subregion test between reference and query. (default: %(default)s)') parser_intersect.add_argument('-mp', metavar=' ', default=4, type=int, help=helpmp) parser_intersect.add_argument('-pw', metavar=' ', type=int, default=3, help='Define the width of single panel. (default: %(default)s)') parser_intersect.add_argument('-ph', metavar=' ', type=int, default=3, help='Define the height of single panel. (default: %(default)s)') ################### Jaccard test ########################################## parser_jaccard = subparsers.add_parser('jaccard', help='Jaccard test evaluates the association level by comparing with jaccard index from repeating randomization.') parser_jaccard.add_argument('-o', help=helpoutput) parser_jaccard.add_argument('-r', metavar=' ', help=helpreference) parser_jaccard.add_argument('-q', metavar=' ', help=helpquery) parser_jaccard.add_argument('-t', metavar=' ', default='jaccard_test', help=helptitle) parser_jaccard.add_argument('-rt', metavar=' ', type=int, default=500, help='Define how many times to run the randomization. (default: %(default)s)') parser_jaccard.add_argument('-g', default=None, help=helpgroupbb) parser_jaccard.add_argument('-c', default="regions", help=helpcolorbb) parser_jaccard.add_argument('-organism', default='hg19', help='Define the organism. (default: %(default)s)') parser_jaccard.add_argument('-nlog', action="store_false", help='Set y axis of the plot not in log scale. (default: %(default)s)') parser_jaccard.add_argument('-color', action="store_true", help=help_define_color) parser_jaccard.add_argument('-show', action="store_true", help='Show the figure in the screen. (default: %(default)s)') parser_jaccard.add_argument('-table', action="store_true", help='Store the tables of the figure in text format. (default: %(default)s)') parser_jaccard.add_argument('-pw', metavar=' ', type=int, default=3, help='Define the width of single panel. (default: %(default)s)') parser_jaccard.add_argument('-ph', metavar=' ', type=int, default=3, help='Define the height of single panel. (default: %(default)s)') ################### Combinatorial Test ########################################## parser_combinatorial = subparsers.add_parser('combinatorial', help='Combinatorial test compare all combinatorial possibilities from reference to test the association between references and queries.') parser_combinatorial.add_argument('-o', help=helpoutput) parser_combinatorial.add_argument('-r', metavar=' ', help=helpreference) parser_combinatorial.add_argument('-q', metavar=' ', help=helpquery) parser_combinatorial.add_argument('-t', metavar=' ', default='combinatorial_test', help=helptitle) parser_combinatorial.add_argument('-g', default=None, help=helpgroupbb) parser_combinatorial.add_argument('-c', default="regions", help=helpcolorbb) parser_combinatorial.add_argument('-organism', default='hg19', help='Define the organism. (default: %(default)s)') parser_combinatorial.add_argument('-bg', help="Define a BED file as background. If not defined, the background is whole genome according to the given organism. (default: %(default)s)") parser_combinatorial.add_argument('-m', default="count", choices=['count', 'bp'], help="Define the mode of calculating intersection. 'count' outputs the number of overlapped regions.'bp' outputs the coverage(basepair) of intersection. (default: %(default)s)") parser_combinatorial.add_argument('-tc', type=int, default=False, help="Define the threshold(in percentage) of reference length for intersection counting. For example, '20' means that the query which overlaps more than 20%% of reference is counted as intersection. (default: %(default)s)") parser_combinatorial.add_argument('-ex', type=int, default=0, help="Define the extension(in percentage) of reference length for intersection counting. For example, '20' means that each region of reference is extended by 20%% in order to include proximal queries. (default: %(default)s)") parser_combinatorial.add_argument('-log', action="store_true", help='Set y axis of the plot in log scale. (default: %(default)s)') parser_combinatorial.add_argument('-color', action="store_true", help=help_define_color) parser_combinatorial.add_argument('-venn', action="store_true", help='Show the Venn diagram of the combinatorials of references. (default: %(default)s)') parser_combinatorial.add_argument('-show', action="store_true", help='Show the figure in the screen. (default: %(default)s)') parser_combinatorial.add_argument('-stest', type=int, default=0, help='Define the repetition time of random subregion test between reference and query. (default: %(default)s)') parser_combinatorial.add_argument('-pw', metavar=' ', type=int, default=3, help='Define the width of single panel. (default: %(default)s)') parser_combinatorial.add_argument('-ph', metavar=' ', type=int, default=3, help='Define the height of single panel. (default: %(default)s)') ################### Boxplot ########################################## parser_boxplot = subparsers.add_parser('boxplot', help='Boxplot based on the BAM and BED files for gene association analysis.') parser_boxplot.add_argument('input', help=helpinput) parser_boxplot.add_argument('-o', metavar=' ', help=helpoutput) parser_boxplot.add_argument('-t', metavar=' ', default='boxplot', help=helptitle) parser_boxplot.add_argument('-g', metavar=' ', default='reads', help=helpgroup) parser_boxplot.add_argument('-c', metavar=' ', default='regions', help=helpcolor) parser_boxplot.add_argument('-s', metavar=' ', default='None', help=helpsort) parser_boxplot.add_argument('-scol', action="store_true", help="Share y axis among columns. (default: %(default)s)") parser_boxplot.add_argument('-nlog', action="store_false", help='Set y axis of the plot not in log scale. (default: %(default)s)') parser_boxplot.add_argument('-color', action="store_true", help=help_define_color) parser_boxplot.add_argument('-pw', metavar=' ', type=int, default=3, help='Define the width of single panel. (default: %(default)s)') parser_boxplot.add_argument('-ph', metavar=' ', type=int, default=3, help='Define the height of single panel. (default: %(default)s)') parser_boxplot.add_argument('-nqn', action="store_true", help='No quantile normalization in calculation. (default: %(default)s)') parser_boxplot.add_argument('-df', action="store_true", help="Show the difference of the two signals which share the same labels.The result is the subtraction of the first to the second. (default: %(default)s)") parser_boxplot.add_argument('-ylim', metavar=' ', type=int, default=None, help="Define the limit of y axis. (default: %(default)s)") parser_boxplot.add_argument('-p', metavar=' ', type=float, default=0.05, help='Define the significance level for multiple test. (default: %(default)s)') parser_boxplot.add_argument('-show', action="store_true", help='Show the figure in the screen. (default: %(default)s)') parser_boxplot.add_argument('-table', action="store_true", help='Store the tables of the figure in text format. (default: %(default)s)') ################### Lineplot ########################################## parser_lineplot = subparsers.add_parser('lineplot', help='Generate lineplot with various modes.') choice_center = ['midpoint', 'bothends', 'upstream', 'downstream'] # Be consist as the arguments of GenomicRegionSet.relocate_regions parser_lineplot.add_argument('input', help=helpinput) parser_lineplot.add_argument('-o', help=helpoutput) parser_lineplot.add_argument('-ga', action="store_true", help="Use genetic annotation data as input regions (e.g. TSS, TTS, exons and introns) instead of the BED files in the input matrix.") parser_lineplot.add_argument('-t', metavar=' ', default='lineplot', help=helptitle) parser_lineplot.add_argument('-center', metavar=' ', choices=choice_center, default='midpoint', help='Define the center to calculate coverage on the regions. Options are: ' + ', '.join( choice_center) + '. (default: %(default)s) The bothend mode will flap the right end region for calculation.') parser_lineplot.add_argument('-g', metavar=' ', default='None', help=helpgroup) parser_lineplot.add_argument('-row', metavar=' ', default='None', help=helprow) parser_lineplot.add_argument('-col', metavar=' ', default='regions', help=helpcol) parser_lineplot.add_argument('-c', metavar=' ', default='reads', help=helpcolor) parser_lineplot.add_argument('-e', metavar=' ', type=int, default=2000, help='Define the extend length of interested region for plotting. (default: %(default)s)') parser_lineplot.add_argument('-rs', metavar=' ', type=int, default=200, help='Define the readsize for calculating coverage. (default: %(default)s)') parser_lineplot.add_argument('-ss', metavar=' ', type=int, default=50, help='Define the stepsize for calculating coverage. (default: %(default)s)') parser_lineplot.add_argument('-bs', metavar=' ', type=int, default=100, help='Define the binsize for calculating coverage. (default: %(default)s)') parser_lineplot.add_argument('-log', action="store_true", help="Take log for the value before calculating average. (default: %(default)s)") parser_lineplot.add_argument('-scol', action="store_true", help="Share y axis among columns. (default: %(default)s)") parser_lineplot.add_argument('-srow', action="store_true", help="Share y axis among rows. (default: %(default)s)") parser_lineplot.add_argument('-organism', metavar=' ', help='Define the organism. (default: %(default)s)') parser_lineplot.add_argument('-color', action="store_true", help=help_define_color) parser_lineplot.add_argument('-pw', metavar=' ', type=int, default=3, help='Define the width of single panel. (default: %(default)s)') parser_lineplot.add_argument('-ph', metavar=' ', type=int, default=3, help='Define the height of single panel. (default: %(default)s)') parser_lineplot.add_argument('-test', action="store_true", help="Sample only the first 10 regions in all BED files for testing. (default: %(default)s)") parser_lineplot.add_argument('-mp', metavar=' ', type=int, default=0, help="Perform multiprocessing for faster computation. (default: %(default)s)") parser_lineplot.add_argument('-df', action="store_true", help="Show the difference of the two signals which share the same labels.The result is the subtraction of the first to the second. (default: %(default)s)") parser_lineplot.add_argument('-dft', metavar=' ', default=None, help="Add one more tag for calculating difference. (default: %(default)s)") parser_lineplot.add_argument('-show', action="store_true", help='Show the figure in the screen. (default: %(default)s)') parser_lineplot.add_argument('-table', action="store_true", help='Store the tables of the figure in text format. (default: %(default)s)') parser_lineplot.add_argument('-sense', action="store_true", help='Set the plot sense-specific. (default: %(default)s)') parser_lineplot.add_argument('-strand', action="store_true", help='Set the plot strand-specific. (default: %(default)s)') parser_lineplot.add_argument('-average', action="store_true", help='Show only the average of the replicates. (default: %(default)s)') parser_lineplot.add_argument('-flip_negative', action="store_true", default=False, help='Flip the negative strand (default: %(default)s)') parser_lineplot.add_argument('-extend_outside', action="store_true", default=False, help='Extend the window outside of the given regions and compress the given region into fixed internal. (default: %(default)s)') parser_lineplot.add_argument('-add_region_number', action="store_true", default=False, help="Add the number of regions in the axis label. (default: %(default)s)") ################### Heatmap ########################################## parser_heatmap = subparsers.add_parser('heatmap', help='Generate heatmap with various modes.') choice_center = ['midpoint', 'bothends', 'upstream', 'downstream'] # Be consist as the arguments of GenomicRegionSet.relocate_regions parser_heatmap.add_argument('input', help=helpinput) parser_heatmap.add_argument('-o', metavar=' ', help=helpoutput) parser_heatmap.add_argument('-ga', action="store_true", help="Use genetic annotation data as input regions (e.g. TSS, TTS, exons and introns) instead of the BED files in the input matrix. (default: %(default)s)") parser_heatmap.add_argument('-t', metavar=' ', default='heatmap', help=helptitle) parser_heatmap.add_argument('-center', metavar=' ', choices=choice_center, default='midpoint', help='Define the center to calculate coverage on the regions. Options are: ' + ', '.join( choice_center) + '.(Default:midpoint) The bothend mode will flap the right end region for calculation. (default: %(default)s)') parser_heatmap.add_argument('-sort', metavar=' ', type=int, default=None, help='Define the way to sort the signals.' + 'Default is no sorting at all, the signals arrange in the order of their position; ' + '"0" is sorting by the average ranking of all signals; ' + '"1" is sorting by the ranking of 1st column; "2" is 2nd and so on... (default: %(default)s)') parser_heatmap.add_argument('-col', metavar=' ', default='regions', help=helpcol) parser_heatmap.add_argument('-c', metavar=' ', default='reads', help=helpcolor) parser_heatmap.add_argument('-row', metavar=' ', default='None', help=helprow) parser_heatmap.add_argument('-e', metavar=' ', type=int, default=2000, help='Define the extend length of interested region for plotting. (default: %(default)s)') parser_heatmap.add_argument('-rs', metavar=' ', type=int, default=200, help='Define the readsize for calculating coverage. (default: %(default)s)') parser_heatmap.add_argument('-ss', metavar=' ', type=int, default=50, help='Define the stepsize for calculating coverage. (default: %(default)s)') parser_heatmap.add_argument('-bs', metavar=' ', type=int, default=100, help='Define the binsize for calculating coverage. (default: %(default)s)') parser_heatmap.add_argument('-organism', metavar=' ', default='hg19', help='Define the organism. (default: %(default)s)') parser_heatmap.add_argument('-color', action="store_true", help=help_define_color) parser_heatmap.add_argument('-log', action="store_true", help='Set colorbar in log scale. (default: %(default)s)') parser_heatmap.add_argument('-mp', action="store_true", help="Perform multiprocessing for faster computation. (default: %(default)s)") parser_heatmap.add_argument('-show', action="store_true", help='Show the figure in the screen. (default: %(default)s)') parser_heatmap.add_argument('-table', action="store_true", help='Store the tables of the figure in text format. (default: %(default)s)') ################### Venn Diagram ######################################## parser_venn = subparsers.add_parser('venn', help='Generate Venn Diagram with peaks of gene list.') parser_venn.add_argument('-s1', metavar=' ', default=None, help="Define the file for gene set 1 (BED or gene list)") parser_venn.add_argument('-s2', metavar=' ', default=None, help="Define the file for gene set 2 (BED or gene list)") parser_venn.add_argument('-s3', metavar=' ', default=None, help="Define the file for gene set 3 (BED or gene list)") parser_venn.add_argument('-s4', metavar=' ', default=None, help="Define the file for gene set 3 (BED or gene list)") parser_venn.add_argument('-l1', metavar=' ', default=None, help="Define label on venn diagram for set 1") parser_venn.add_argument('-l2', metavar=' ', default=None, help="Define label on venn diagram for set 2") parser_venn.add_argument('-l3', metavar=' ', default=None, help="Define label on venn diagram for set 3") parser_venn.add_argument('-l4', metavar=' ', default=None, help="Define label on venn diagram for set 4") parser_venn.add_argument('-o', metavar=' ', help=helpoutput) parser_venn.add_argument('-t', metavar=' ', default='venn_diagram', help=helptitle) parser_venn.add_argument('-organism', metavar=' ', help='Define the organism. ') ################### Integration ########################################## parser_integration = subparsers.add_parser('integration', help='Provides some tools to deal with experimental matrix or other purposes.') parser_integration.add_argument('-ihtml', action="store_true", help='Integrate all the html files within the given directory and generate index.html for all plots.') parser_integration.add_argument('-l2m', help='Convert a given file list in txt format into a experimental matrix.') parser_integration.add_argument('-o', help='Define the folder of the output file.') ################### Parsing the arguments ################################ # print(sys.argv) if len(sys.argv) == 1: parser.print_help() sys.exit(0) elif len(sys.argv) == 2: if sys.argv[1] == "-h" or sys.argv[1] == "--help": parser.print_help() sys.exit(0) elif sys.argv[1] == "-v" or sys.argv[1] == "--version": print(version_message) sys.exit(0) else: # retrieve subparsers from parser subparsers_actions = [action for action in parser._actions if isinstance(action, argparse._SubParsersAction)] # there will probably only be one subparser_action,but better save than sorry for subparsers_action in subparsers_actions: # get all subparsers and print help for choice, subparser in subparsers_action.choices.items(): if choice == sys.argv[1]: print("\nYou need more arguments.") print("\nSubparser '{}'".format(choice)) subparser.print_help() sys.exit(1) else: args = parser.parse_args() if args.mode != 'integration': if not args.o: print("** Error: Please define the output directory (-o).") sys.exit(1) t0 = time.time() # Normalised output path args.o = os.path.normpath(os.path.join(current_dir, args.o)) check_dir(args.o) check_dir(os.path.join(args.o, args.t)) # Input parameters dictionary parameter = ["Time: " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "User: " + getpass.getuser(), "\nCommand:\n\t$ " + " ".join(sys.argv)] ################################################################################################# ##### Main ##################################################################################### ################################################################################################# if args.mode == 'bed_profile': ################### BED profile ########################################## print2(parameter, "\n############# BED profile #############") print2(parameter, "\tInput path:\t" + args.i) print2(parameter, "\tOutput path:\t" + os.path.join(args.o, args.t)) if not args.organism: print("Please define organism...") sys.exit(1) else: print2(parameter, "\tOrganism:\t" + args.organism) if args.labels: args.labels = args.labels.split(",") args.sources = args.sources.split(",") if not args.sources: print("Please define the sources files corresponding to the the labels.") sys.exit(1) elif len(args.labels) != len(args.sources): print("The number of labels doesn't match the number of sources.") sys.exit(1) if args.strand: strands = [] for i, bss in enumerate(args.strand.split(",")): if bss == "T": strands.append(True) args.labels[i] += "(strand-specific)" elif bss == "F": strands.append(False) args.strand = strands else: args.strand = [True for i in args.labels] if args.other: others = [] for i, bss in enumerate(args.other.split(",")): if bss == "T": others.append(True) elif bss == "F": others.append(False) args.other = others else: args.other = [True for i in args.labels] bed_profile = BedProfile(args.i, args.organism, args) bed_profile.cal_statistics() bed_profile.plot_distribution_length() bed_profile.plot_motif_composition() if args.biotype: bed_profile.plot_ref(ref_dir=args.biotype, tag="Biotype", other=True, strand=True, background=True) if args.repeats: bed_profile.plot_ref(ref_dir=args.repeats, tag="Repeats", other=True, background=True) if args.genposi: bed_profile.plot_ref(ref_dir=args.genposi, tag="Genetic position", other=False, strand=False) if args.labels: for i, label in enumerate(args.labels): bed_profile.plot_ref(ref_dir=args.sources[i], tag=label, other=args.other[i], strand=args.strand[i], background=True) bed_profile.write_tables(args.o, args.t) bed_profile.save_fig(filename=os.path.join(args.o, args.t, "figure_" + args.t)) bed_profile.gen_html(args.o, args.t) ################### Projection test ########################################## elif args.mode == 'projection': # Fetching reference and query EM print2(parameter, "\n############# Projection Test #############") print2(parameter, "\tReference: " + args.r) print2(parameter, "\tQuery: " + args.q) print2(parameter, "\tOutput directory: " + os.path.basename(args.o)) print2(parameter, "\tExperiment title: " + args.t) projection = Projection(args.r, args.q, load_bed=args.load) projection.group_refque(args.g) projection.colors(args.c, args.color) if args.bg: print2(parameter, "\tBackground: " + args.bg) projection.set_background(bed_path=args.bg) if args.union: projection.ref_union() projection.projection_test(organism=args.organism) print2(parameter, "\tTaking union of references as the background. ") else: projection.projection_test(organism=args.organism) # generate pdf projection.plot(args.log, args.pw, args.ph) output(f=projection.fig, directory=args.o, folder=args.t, filename="projection_test", extra=matplotlib.pyplot.gci(), pdf=True, show=args.show) if args.bed: print2(parameter, "\tOutput BED files: " + "/".join(os.path.join(args.o, args.t, "bed").split("/")[-3:])) projection.output_interq(directory=os.path.join(args.o, args.t, "bed")) # generate html projection.gen_html(args.o, args.t, args=args) if args.table: projection.table(directory=args.o, folder=args.t) print("\nAll related files are saved in: " + os.path.join(os.path.basename(args.o), args.t)) t1 = time.time() print2(parameter, "\nTotal running time is : " + str(datetime.timedelta(seconds=round(t1 - t0)))) output_parameters(parameter, directory=args.o, folder=args.t, filename="parameters.txt") copy_em(em=args.r, directory=args.o, folder=args.t, filename="reference_experimental_matrix.txt") copy_em(em=args.q, directory=args.o, folder=args.t, filename="query_experimental_matrix.txt") list_all_index(path=args.o) ########################################################################### ################### Intersect Test ########################################## if args.mode == 'intersect': print2(parameter, "\n############ Intersection Test ############") print2(parameter, "\tReference: " + args.r) print2(parameter, "\tQuery: " + args.q) print2(parameter, "\tOutput directory: " + os.path.basename(args.o)) print2(parameter, "\tExperiment title: " + args.t) # Fetching reference and query EM inter = Intersect(args.r, args.q, mode_count=args.m, organism=args.organism) # Grouping inter.group_refque(args.g) # Setting background inter.background(args.bg) # Extension if args.ex == 0: pass elif args.ex > 0: inter.extend_ref(args.ex) elif args.ex < 0: print("\n**** extension percentage(-ex) should be positive value, not negative.\n") sys.exit(1) inter.colors(args.c, args.color) print("\tProcessing data.", end="") sys.stdout.flush() inter.count_intersect(threshold=args.tc) # generate pdf print("\n\tGenerate graphics...") inter.barplot(logt=args.log) output(f=inter.bar, directory=args.o, folder=args.t, filename="intersection_bar", extra=matplotlib.pyplot.gci(), pdf=True, show=args.show) inter.stackedbar() output(f=inter.sbar, directory=args.o, folder=args.t, filename="intersection_stackedbar", extra=matplotlib.pyplot.gci(), pdf=True, show=args.show) inter.barplot(logt=args.log, percentage=True) output(f=inter.bar, directory=args.o, folder=args.t, filename="intersection_barp", extra=matplotlib.pyplot.gci(), pdf=True, show=args.show) if args.stest > 0: print("\tStatistical testing by randomizing the regions...") inter.stest(repeat=args.stest, threshold=args.tc, mp=args.mp) # generate html inter.gen_html(directory=args.o, title=args.t, align=50, args=args) t1 = time.time() print2(parameter, "\nAll related files are saved in: " + os.path.join(os.path.basename(args.o), args.t)) print2(parameter, "\nTotal running time is : " + str(datetime.timedelta(seconds=round(t1 - t0)))) output_parameters(parameter, directory=args.o, folder=args.t, filename="parameters.txt") copy_em(em=args.r, directory=args.o, folder=args.t, filename="reference_experimental_matrix.txt") copy_em(em=args.q, directory=args.o, folder=args.t, filename="query_experimental_matrix.txt") list_all_index(path=args.o) ########################################################################### ################### Jaccard test ########################################## if args.mode == "jaccard": """Return the jaccard test of every possible comparisons between two ExperimentalMatrix. Method: The distribution of random jaccard index is calculated by randomizing query for given times. Then, we compare the real jaccard index to the distribution and formulate p-value as p-value = (# random jaccard > real jaccard)/(# random jaccard) """ print("\n############## Jaccard Test ###############") jaccard = Jaccard(args.r, args.q) jaccard.group_refque(args.g) jaccard.colors(args.c, args.color) # jaccard test jaccard.jaccard_test(args.rt, args.organism) parameter = parameter + jaccard.parameter t1 = time.time() # ploting and generate pdf jaccard.plot(logT=args.nlog) for i, f in enumerate(jaccard.fig): output(f=f, directory=args.o, folder=args.t, filename="jaccard_test" + str(i + 1), extra=matplotlib.pyplot.gci(), pdf=True, show=args.show) # generate html jaccard.gen_html(args.o, args.t) if args.table: jaccard.table(directory=args.o, folder=args.t) print("\nAll related files are saved in: " + os.path.join(dir, args.o, args.t)) print2(parameter, "\nTotal running time is : " + str(datetime.timedelta(seconds=round(t1 - t0)))) output_parameters(parameter, directory=args.o, folder=args.t, filename="parameters.txt") copy_em(em=args.r, directory=args.o, folder=args.t, filename="Reference_experimental_matrix.txt") copy_em(em=args.q, directory=args.o, folder=args.t, filename="Query_experimental_matrix.txt") list_all_index(path=args.o) ########################################################################### ################### Combinatorial Test ########################################## if args.mode == 'combinatorial': print("\n############ Combinatorial Test ############") # Fetching reference and query EM # comb = Combinatorial(args.r,args.q, mode_count=args.m, organism=args.organism) inter = Intersect(args.r, args.q, mode_count=args.m, organism=args.organism) # Setting background inter.background(args.bg) # Grouping inter.group_refque(args.g) # Extension if args.ex == 0: pass elif args.ex > 0: inter.extend_ref(args.ex) elif args.ex < 0: print("\n**** extension percentage(-ex) should be positive value, not negative.\n") sys.exit(1) # Combinatorial print2(parameter, "Generating all combinatorial regions for further analysis...") inter.combinatorial() inter.count_intersect(threshold=args.tc, frequency=True) # generate pdf inter.colors_comb() # inter.barplot(args.log) # output(f=inter.bar, directory = args.output, folder = args.title, filename="intersection_bar",extra=matplotlib.pyplot.gci(),pdf=True,show=args.show) # if args.stackedbar: # inter.colors(args.c, args.color,ref_que = "ref") inter.comb_stacked_plot() output(f=inter.sbar, directory=args.o, folder=args.t, filename="intersection_stackedbar", extra=matplotlib.pyplot.gci(), pdf=True, show=args.show) if args.venn: inter.comb_venn(directory=os.path.join(args.o, args.t)) # if args.lineplot: # inter.comb_lineplot() if args.stest > 0: inter.stest(repeat=args.stest, threshold=args.tc, mp=args.mp) # generate html inter.gen_html_comb(directory=args.o, title=args.t, align=50, args=args) # parameter = parameter + inter.parameter t1 = time.time() print("\nAll related files are saved in: " + os.path.join(current_dir, args.o, args.t)) print2(parameter, "\nTotal running time is : " + str(datetime.timedelta(seconds=round(t1 - t0)))) output_parameters(parameter, directory=args.o, folder=args.t, filename="parameters.txt") copy_em(em=args.r, directory=args.o, folder=args.t, filename="Reference_experimental_matrix.txt") copy_em(em=args.q, directory=args.o, folder=args.t, filename="Query_experimental_matrix.txt") # list_all_index(path=args.o) ########################################################################### ################### Boxplot ########################################## if args.mode == 'boxplot': print("\n################# Boxplot #################") boxplot = Boxplot(args.input, fields=[args.g, args.s, args.c], title=args.t, df=args.df) print2(parameter, "\nStep 1/5: Combining all regions") boxplot.combine_allregions() print2(parameter, " " + str(len(boxplot.all_bed)) + " regions from all bed files are combined.") t1 = time.time() print2(parameter, " --- finished in {0} secs\n".format(round(t1 - t0))) # Coverage of reads on all_bed print2(parameter, "Step 2/5: Calculating coverage of each bam file on all regions") boxplot.bedCoverage() t2 = time.time() print2(parameter, " --- finished in {0} (H:M:S)\n".format(datetime.timedelta(seconds=round(t2 - t1)))) # Quantile normalization print2(parameter, "Step 3/5: Quantile normalization of all coverage table") if args.nqn: print2(parameter, " No quantile normalization.") boxplot.norm_table = boxplot.all_table else: boxplot.quantile_normalization() t3 = time.time() print2(parameter, " --- finished in {0} secs\n".format(round(t3 - t2))) # Generate individual table for each bed print2(parameter, "Step 4/5: Constructing different tables for box plot") boxplot.tables_for_plot() # if args.table: boxplot.print_plot_table(directory = args.o, folder = args.t) t4 = time.time() print2(parameter, " --- finished in {0} secs\n".format(round(t4 - t3))) # Plotting print2(parameter, "Step 5/5: Plotting") boxplot.group_tags(groupby=args.g, sortby=args.s, colorby=args.c) boxplot.group_data(directory=args.o, folder=args.t, log=args.nlog) boxplot.color_map(colorby=args.c, definedinEM=args.color) boxplot.plot(title=args.t, logT=args.nlog, scol=args.scol, ylim=args.ylim, pw=args.pw, ph=args.ph) if args.table: boxplot.print_plot_table(directory=args.o, folder=args.t) output(f=boxplot.fig, directory=args.o, folder=args.t, filename="boxplot", extra=matplotlib.pyplot.gci(), pdf=True, show=args.show) # HTML boxplot.gen_html(args.o, args.t, align=50) t5 = time.time() print2(parameter, " --- finished in {0} secs\n".format(round(t5 - t4))) print2(parameter, "Total running time is: " + str(datetime.timedelta(seconds=round(t5 - t0))) + " (H:M:S)\n") print("\nAll related files are saved in: " + os.path.join(current_dir, args.o, args.t)) output_parameters(parameter, directory=args.o, folder=args.t, filename="parameters.txt") copy_em(em=args.input, directory=args.o, folder=args.t) list_all_index(path=args.o) ################### Lineplot ######################################### if args.mode == 'lineplot': if args.scol and args.srow: print("** Err: -scol and -srow cannot be used simutaneously.") sys.exit(1) print("\n################ Lineplot #################") # Read experimental matrix t0 = time.time() if "reads" not in (args.g, args.col, args.c, args.row): print("Please add 'reads' tag as one of grouping, sorting, or coloring argument.") sys.exit(1) # if "regions" not in (args.col, args.c, args.row): # print("Please add 'regions' tag as one of grouping, sorting, or coloring argument.") # sys.exit(1) if not os.path.isfile(args.input): print("Please check the input experimental matrix again. The given path is wrong.") sys.exit(1) print2(parameter, "Parameters:\tExtend length:\t" + str(args.e)) print2(parameter, "\t\tRead size:\t" + str(args.rs)) print2(parameter, "\t\tBin size:\t" + str(args.bs)) print2(parameter, "\t\tStep size:\t" + str(args.ss)) print2(parameter, "\t\tCenter mode:\t" + str(args.center + "\n")) lineplot = Lineplot(em_path=args.input, title=args.t, annotation=args.ga, organism=args.organism, center=args.center, extend=args.e, rs=args.rs, bs=args.bs, ss=args.ss, df=args.df, dft=args.dft, fields=[args.g, args.col, args.row, args.c], test=args.test, sense=args.sense, strand=args.strand, flipnegative=args.flip_negative, outside=args.extend_outside, add_number=args.add_region_number) # Processing the regions by given parameters print2(parameter, "Step 1/3: Processing regions by given parameters") lineplot.relocate_bed() t1 = time.time() print2(parameter, "\t--- finished in {0} secs".format(str(round(t1 - t0)))) if args.mp > 0: print2(parameter, "\nStep 2/3: Calculating the coverage to all reads and averaging with multiprocessing ") else: print2(parameter, "\nStep 2/3: Calculating the coverage to all reads and averaging") lineplot.group_tags(groupby=args.g, rowby=args.row, columnby=args.col, colorby=args.c) lineplot.gen_cues() lineplot.coverage(sortby=args.row, mp=args.mp, log=args.log, average=args.average) t2 = time.time() print2(parameter, "\t--- finished in {0} (H:M:S)".format(str(datetime.timedelta(seconds=round(t2 - t1))))) # Plotting print2(parameter, "\nStep 3/3: Plotting the lineplots") lineplot.colormap(colorby=args.c, definedinEM=args.color) lineplot.plot(output=args.o, printtable=args.table, ylog=args.log, scol=args.scol, srow=args.srow, w=args.pw, h=args.ph) for i, f in enumerate(lineplot.fig): output(f=f, directory=args.o, folder=args.t, filename="lineplot_" + lineplot.group_tags[i], extra=matplotlib.pyplot.gci(), pdf=True, show=args.show) lineplot.gen_html(args.o, args.t) t3 = time.time() print2(parameter, "\t--- finished in {0} secs".format(str(round(t3 - t2)))) print2(parameter, "\nTotal running time is : " + str(datetime.timedelta(seconds=round(t3 - t0))) + "(H:M:S)\n") print("\nAll related files are saved in: " + os.path.join(dir, args.o, args.t)) output_parameters(parameter, directory=args.o, folder=args.t, filename="parameters.txt") copy_em(em=args.input, directory=args.o, folder=args.t) list_all_index(path=args.o) ################### Heatmap ########################################## if args.mode == 'heatmap': print("\n################# Heatmap #################") # Most part of heat map are the same as lineplot, so it share the same class as lineplot # Read experimental matrix t0 = time.time() if "reads" not in (args.g, args.col, args.c, args.row): print("Please add 'reads' tag as one of grouping, sorting, or coloring argument.") sys.exit(1) # if "regions" not in (args.g, args.col, args.c, args.row): # print("Please add 'regions' tag as one of grouping, sorting, or coloring argument.") # sys.exit(1) print2(parameter, "Parameters:\tExtend length:\t" + str(args.e)) print2(parameter, "\t\tRead size:\t" + str(args.rs)) print2(parameter, "\t\tBin size:\t" + str(args.bs)) print2(parameter, "\t\tStep size:\t" + str(args.ss)) print2(parameter, "\t\tCenter mode:\t" + str(args.center + "\n")) lineplot = Lineplot(em_path=args.input, title=args.t, annotation=args.ga, organism=args.organism, center=args.center, extend=args.e, rs=args.rs, bs=args.bs, ss=args.ss, df=False, fields=[args.col, args.row, args.c], dft=args.dft, flipnegative=False, sense=False, strand=False, test=False) # Processing the regions by given parameters print2(parameter, "Step 1/4: Processing regions by given parameters") lineplot.relocate_bed() t1 = time.time() print2(parameter, " --- finished in {0} secs".format(str(round(t1 - t0)))) if args.mp: print2(parameter, "\nStep 2/4: Calculating the coverage to all reads and averaging with multiprocessing ") else: print2(parameter, "\nStep 2/4: Calculating the coverage to all reads and averaging") lineplot.group_tags(groupby=args.col, sortby=args.row, colorby=args.c) lineplot.gen_cues() lineplot.coverage(sortby=args.s, heatmap=True, logt=args.log, mp=args.mp) t2 = time.time() print2(parameter, " --- finished in {0} (h:m:s)".format(str(datetime.timedelta(seconds=round(t2 - t1))))) # Sorting print2(parameter, "\nStep 3/4: Sorting the data for heatmap") lineplot.hmsort(sort=args.sort) t3 = time.time() print2(parameter, " --- finished in {0} (h:m:s)".format(str(datetime.timedelta(seconds=round(t3 - t2))))) # Plotting print2(parameter, "\nStep 4/4: Plotting the heatmap") lineplot.hmcmlist(colorby=args.c, definedinEM=args.color) lineplot.heatmap(args.log) for i, name in enumerate(lineplot.hmfiles): output(f=lineplot.figs[i], directory=args.o, folder=args.t, filename=name, pdf=True, show=args.show) lineplot.gen_htmlhm(args.o, args.t) t4 = time.time() print2(parameter, " --- finished in {0} secs".format(str(round(t4 - t3)))) print2(parameter, "\nTotal running time is : " + str(datetime.timedelta(seconds=round(t4 - t0))) + "(H:M:S)\n") print("\nAll related files are saved in: " + os.path.join(current_dir, args.o, args.t)) output_parameters(parameter, directory=args.o, folder=args.t, filename="parameters.txt") copy_em(em=args.input, directory=args.o, folder=args.t) list_all_index(path=args.o) ################### Venn Diagram ########################################## if args.mode == 'venn': print("\n################# Venn Diagram ###############") if not os.path.exists(os.path.join(args.o, args.t)): os.makedirs(os.path.join(args.o, args.t)) sets = [s for s in [args.s1, args.s2, args.s3, args.s4] if s] venn = Venn(sets=sets, organism=args.organism) f = venn.venn_diagram(directory=args.o, title=args.t, labels=[args.l1, args.l2, args.l3, args.l4]) output(f=f, directory=args.o, folder=args.t, filename="venn", pdf=True) ################### Integration ########################################## if args.mode == 'integration': print("\n################# Integration ###############") if args.ihtml: list_all_index(path=args.o)
66.764972
263
0.581803
6,795
59,087
4.966004
0.091685
0.053461
0.043119
0.047297
0.656176
0.62989
0.58849
0.519115
0.476944
0.452495
0
0.007264
0.261462
59,087
884
264
66.840498
0.766007
0.037995
0
0.309353
0
0.051799
0.318442
0.00543
0.002878
0
0
0
0
1
0.001439
false
0.005755
0.025899
0
0.027338
0.143885
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
417ffbd4ba1e57bb20138f983895a6ec4d8a43b6
324
py
Python
server/quiz/urls.py
namanwfhsolve/Quiz-API
dcac94da3fb6ae514bbb4feb1abeb595903ce9aa
[ "MIT" ]
null
null
null
server/quiz/urls.py
namanwfhsolve/Quiz-API
dcac94da3fb6ae514bbb4feb1abeb595903ce9aa
[ "MIT" ]
1
2021-07-11T18:37:34.000Z
2021-07-11T18:37:34.000Z
server/quiz/urls.py
namanwfhsolve/Quiz-API
dcac94da3fb6ae514bbb4feb1abeb595903ce9aa
[ "MIT" ]
null
null
null
from django.urls import path from .views import QuizView, QuizDetailView, QuizCreateView urlpatterns = [ path("quizes/", QuizView.as_view(), name="quiz_list_create"), path("create/", QuizCreateView.as_view(), name="quiz_create"), path("quiz/<int:pk>/", QuizDetailView.as_view(), name="quiz_retrive_update"), ]
32.4
81
0.722222
40
324
5.65
0.525
0.079646
0.132743
0.185841
0
0
0
0
0
0
0
0
0.114198
324
9
82
36
0.787456
0
0
0
0
0
0.228395
0
0
0
0
0
0
1
0
false
0
0.285714
0
0.285714
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
41801ea050275d49201c9b87f491278b3faaa4f9
23
py
Python
mGdi32/__init__.py
SkyLined/mWindowsSDK
931cc9d30316893662a3dc4e200dabe97122d216
[ "CC-BY-4.0" ]
2
2019-08-01T15:08:25.000Z
2021-01-30T07:29:34.000Z
mGdi32/__init__.py
SkyLined/mWindowsSDK
931cc9d30316893662a3dc4e200dabe97122d216
[ "CC-BY-4.0" ]
null
null
null
mGdi32/__init__.py
SkyLined/mWindowsSDK
931cc9d30316893662a3dc4e200dabe97122d216
[ "CC-BY-4.0" ]
null
null
null
from .mGdi32 import *;
11.5
22
0.695652
3
23
5.333333
1
0
0
0
0
0
0
0
0
0
0
0.105263
0.173913
23
1
23
23
0.736842
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
418053ee5e7fc1cc778869d9ebfee56bc7e30f8e
190
py
Python
built-in/TensorFlow/Official/nlp/Transformer_for_TensorFlow/noahnmt/attentions/__init__.py
Huawei-Ascend/modelzoo
df51ed9c1d6dbde1deef63f2a037a369f8554406
[ "Apache-2.0" ]
null
null
null
built-in/TensorFlow/Official/nlp/Transformer_for_TensorFlow/noahnmt/attentions/__init__.py
Huawei-Ascend/modelzoo
df51ed9c1d6dbde1deef63f2a037a369f8554406
[ "Apache-2.0" ]
3
2021-03-31T20:15:40.000Z
2022-02-09T23:50:46.000Z
built-in/TensorFlow/Official/nlp/Transformer_for_TensorFlow/noahnmt/attentions/__init__.py
Huawei-Ascend/modelzoo
df51ed9c1d6dbde1deef63f2a037a369f8554406
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright Huawei Noah's Ark Lab. from noahnmt.attentions import dot_attention from noahnmt.attentions import dot_prod_attention from noahnmt.attentions import sum_attention
31.666667
49
0.847368
28
190
5.607143
0.607143
0.210191
0.401274
0.515924
0.66879
0
0
0
0
0
0
0.005882
0.105263
190
6
50
31.666667
0.917647
0.236842
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
4180dd4796db00aa27180113a9c270adce7f7aee
8,390
py
Python
viz/uncertainty.py
WadhwaniAI/covid-modelling
db9f89bfbec392ad4de6b4583cfab7c3d823c1c9
[ "MIT" ]
3
2021-06-23T10:27:11.000Z
2022-02-09T07:50:42.000Z
viz/uncertainty.py
WadhwaniAI/covid-modelling
db9f89bfbec392ad4de6b4583cfab7c3d823c1c9
[ "MIT" ]
3
2021-06-23T09:36:29.000Z
2022-01-13T03:38:16.000Z
viz/uncertainty.py
WadhwaniAI/covid-modelling
db9f89bfbec392ad4de6b4583cfab7c3d823c1c9
[ "MIT" ]
null
null
null
import datetime from copy import copy from datetime import timedelta import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from adjustText import adjust_text from matplotlib.lines import Line2D from matplotlib.patches import Patch from utils.generic.enums import Columns from viz.utils import axis_formatter def plot_ptiles(predictions_dict, vline=None, which_compartments=[Columns.active], plot_individual_curves=True, log_scale=False, truncate_series=True, left_truncation_buffer=30, ci_lb=2.5, ci_ub=97.5): predictions = copy(predictions_dict['forecasts']) try: del predictions['best'] except: pass df_master = list(predictions.values())[0] for df in list(predictions.values())[1:]: if isinstance(df, pd.DataFrame): df = df.reset_index() else: df = df['df_prediction'] df_master = pd.concat([df_master, df], ignore_index=True) train_period = predictions_dict['run_params']['split']['train_period'] val_period = predictions_dict['run_params']['split']['val_period'] val_period = 0 if val_period is None else val_period df_true = predictions_dict['df_district'] if truncate_series: df_true = df_true[df_true['date'] > (list(predictions.values())[0]['date'].iloc[0] - timedelta(days=left_truncation_buffer))] df_true.reset_index(drop=True, inplace=True) plots = {} for compartment in which_compartments: fig, ax = plt.subplots(figsize=(12, 12)) texts = [] ax.plot(df_true[Columns.date.name].to_numpy(), df_true[compartment.name].to_numpy(), '-o', color='C0', label=f'{compartment.label} (Observed)') if plot_individual_curves: for _, (ptile, df_prediction) in enumerate(predictions.items()): sns.lineplot(x=Columns.date.name, y=compartment.name, data=df_prediction, ls='-', label=f'{compartment.label} Percentile :{ptile}') texts.append(plt.text( x=df_prediction[Columns.date.name].iloc[-1], y=df_prediction[compartment.name].iloc[-1], s=ptile)) else: ax.plot(df_master[Columns.date.name], df_master[compartment.name], ls='-', label=f'{compartment.label}') ax.fill_between(predictions[ci_lb][Columns.date.name], predictions[ci_lb][compartment.name], predictions[ci_ub][compartment.name], ls='-', label=f'{compartment.label}') if vline: plt.axvline(datetime.datetime.strptime(vline, '%Y-%m-%d')) ax.axvline(x=list(predictions.values())[0].iloc[0, :]['date'], ls=':', color='brown', label='Train starts') ax.axvline(x=list(predictions.values())[0].iloc[train_period+val_period-1, :]['date'], ls=':', color='black', label='Data Last Date') ax.set_xlim(ax.get_xlim()[0], ax.get_xlim()[1] + 10) adjust_text(texts, arrowprops=dict(arrowstyle="->", color='r', lw=0.5)) axis_formatter(ax, log_scale=log_scale) fig.suptitle('Forecast of all deciles for {} '.format(compartment.name), fontsize=16) plots[compartment] = fig return plots def plot_ptiles_reichlab(df_comb, model, location, target='inc death', plot_true=False, plot_point=True, plot_individual_curves=True, ci_lb=2.5, ci_ub=97.5, color='C0', ax=None, ): compartment = 'deceased' if 'death' in target else 'total' mode = 'inc' if 'inc' in target else 'cum' compartment = Columns.from_name(compartment) df_plot = copy(df_comb.loc[(df_comb['model'] == model) & ( df_comb['location'] == location), :]) df_plot = df_plot[[target in x for x in df_plot['target']]] if ax is None: fig, ax = plt.subplots(figsize=(12, 12)) else: fig = None if plot_true: df_true = df_plot.groupby('target_end_date').mean().reset_index() ax.plot(df_true['target_end_date'].to_numpy(), df_true['true_value'].to_numpy(), '--o', color=compartment.color) if plot_point: df_point = df_plot[df_plot['type'] == 'point'] ax.plot(df_point['target_end_date'].to_numpy(), df_point['forecast_value'].to_numpy(), '-o', color=color) texts = [] df_quantiles = df_plot[df_plot['type'] == 'quantile'] quantiles = df_quantiles.groupby('quantile').sum().index if plot_individual_curves: for _, qtile in enumerate(quantiles): df_qtile = df_quantiles[df_quantiles['quantile'] == qtile].infer_objects() label = round(qtile*100) if qtile * \ 100 % 1 < 1e-8 else round(qtile*100, 1) sns.lineplot(x='target_end_date', y='value', data=df_qtile, ls='-') texts.append(plt.text( x=df_qtile['target_end_date'].iloc[-1], y=df_qtile['value'].iloc[-1], s=label)) else: df_ci_lb = df_quantiles[df_quantiles['quantile'] == ci_lb*0.01].infer_objects() df_ci_ub = df_quantiles[df_quantiles['quantile'] == ci_ub*0.01].infer_objects() ax.fill_between(df_ci_ub['target_end_date'], df_ci_lb['forecast_value'], df_ci_ub['forecast_value'], color=color, alpha=0.1, label=f'{model} 95% CI') ax.set_xlim(ax.get_xlim()[0], ax.get_xlim()[1] + 10) adjust_text(texts, arrowprops=dict(arrowstyle="->", color='r', lw=0.5)) axis_formatter(ax) legend_elements = [] if plot_true: legend_elements += [ Line2D([0], [0], ls='--', marker='o', color=compartment.color, label=f'{target.title()} (Observed)')] if plot_point: legend_elements += [ Line2D([0], [0], ls='-', marker='o', color=color, label=f'{model} {target.title()} Point Forecast')] if plot_individual_curves: legend_elements += [ Line2D([0], [0], ls='-', color='blue', label=f'{model} {target.title()} Percentiles'), ] else: legend_elements += [ Patch(facecolor=color, edgecolor=color, alpha=0.1, label=f'{model} {target.title()} 95% CI'), ] ax.legend(handles=legend_elements) ax.set_title('Forecast for {}, {}, {} {}'.format(model, location, mode.title(), compartment.label), fontsize=16) return fig, ax def plot_beta_loss(dict_of_trials): fig, ax = plt.subplots(figsize=(12, 8)) ax.plot(list(dict_of_trials.keys()), list(dict_of_trials.values())) ax.set_ylabel('Loss value') ax.set_xlabel('Beta value') ax.set_title('How the beta loss changes with beta') return fig, ax def plot_chains(mcmc, figsize=(20, 20)): """Summary Args: mcmc (MCMC): Description out_dir (str): Description """ params = [*mcmc.prior_ranges.keys()] for param in params: plt.figure(figsize=figsize) plt.subplot(2,1,1) for i, chain in enumerate(mcmc.chains): df = pd.DataFrame(chain[0]) samples = np.array(df[param]) plt.plot(list(range(len(samples))), samples, label='chain {}'.format(i+1)) plt.xlabel("iterations") plt.title("Accepted {} samples".format(param)) plt.legend() plt.subplot(2,1,2) for i, chain in enumerate(mcmc.chains): df = pd.DataFrame(chain[1]) try: samples = np.array(df[param]) plt.scatter(list(range(len(samples))), samples, s=4, label='chain {}'.format(i+1)) except: continue plt.xlabel("iterations") plt.title("Rejected {} samples".format(param)) plt.legend() for param in params: plt.figure(figsize=(20, 10)) for i, chain in enumerate(mcmc.chains): df = pd.DataFrame(chain[0]) samples = np.array(df[param]) mean = np.mean(samples) sns.kdeplot(np.array(samples), bw=0.005*mean) plt.title("Density plot of {} samples".format(param)) plt.show()
40.728155
104
0.586651
1,069
8,390
4.434986
0.213283
0.012656
0.016452
0.018561
0.325881
0.229066
0.166632
0.115587
0.094917
0.07973
0
0.018687
0.266508
8,390
205
105
40.926829
0.751706
0.00882
0
0.269006
0
0
0.109556
0
0
0
0
0
0
1
0.023392
false
0.005848
0.070175
0
0.111111
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
418199c796f2780deea7cf4549a397f4a304c2c1
3,299
py
Python
bookqlub_api/tests/test_user_schema.py
danifujii/bookqlub
c6543df63ccc4f403a86121224b26b12d587553b
[ "MIT" ]
null
null
null
bookqlub_api/tests/test_user_schema.py
danifujii/bookqlub
c6543df63ccc4f403a86121224b26b12d587553b
[ "MIT" ]
26
2020-06-27T19:16:13.000Z
2022-02-27T05:48:21.000Z
bookqlub_api/tests/test_user_schema.py
danifujii/bookqlub
c6543df63ccc4f403a86121224b26b12d587553b
[ "MIT" ]
null
null
null
import bcrypt import jwt from bookqlub_api import utils from bookqlub_api.schema import models from tests import base_test class TestUserSchema(base_test.BaseTestSchema): mutation = """ mutation CreateUser($full_name: String!, $username: String!, $pass: String!) { createUser(fullName: $full_name, username: $username, password: $pass) { token } } """ def test_user_creation(self): # Create new user variables = {"full_name": "Daniel", "username": "daniel", "pass": "hello"} resp = self.graphql_request(self.mutation, variables) token = resp.get("data", {}).get("createUser").get("token") self.assertTrue(token) self.assertIn( "userId", jwt.decode(token, utils.config["app"]["secret"], algorithms=["HS256"]) ) # Check user was saved correctly resp_data = self.graphql_request( "{ user { username } }", # Cannot use response token because SQLAlchemy mock doesn't set the new user ID headers=self.get_headers_with_auth(), ) resp_data = resp_data.get("data", {}) user = resp_data.get("user") self.assertTrue(user) self.assertEqual(user.get("username"), variables.get("username")) def test_user_already_exists(self): variables = {"full_name": "Daniel", "username": "daniel", "pass": "hello"} _ = self.graphql_request(self.mutation, variables) variables = {"full_name": "Gabe", "username": "daniel", "pass": "hello2"} errors = self.graphql_request(self.mutation, variables).get("errors") self.assertTrue(errors) self.assertEqual(errors[0].get("message"), "Username already exists") class TestLoginSchema(base_test.BaseTestSchema): login_mutation = """ mutation Login($username: String!, $pass: String!) { login(username: $username, password: $pass) { token } } """ password = "hello" def setUp(self): super().setUp() password = bcrypt.hashpw(self.password.encode(), bcrypt.gensalt()).decode("utf8") self.session.add(models.User(username="dan", full_name="Daniel", password=password)) self.session.commit() def test_user_login(self): variables = {"username": "dan", "pass": self.password} resp = self.graphql_request(self.login_mutation, variables) token = resp.get("data", {}).get("login").get("token") self.assertIn( "userId", jwt.decode(token, utils.config["app"]["secret"], algorithms=["HS256"]) ) def test_invalid_user(self): variables = {"username": "daniel", "pass": self.password} resp = self.graphql_request(self.login_mutation, variables) errors = resp.get("errors") self.assertTrue(errors) self.assertEqual(errors[0].get("message"), "Invalid username or password") def test_invalid_password(self): variables = {"username": "dan", "pass": "someInvalidPassword"} resp = self.graphql_request(self.login_mutation, variables) errors = resp.get("errors") self.assertTrue(errors) self.assertEqual(errors[0].get("message"), "Invalid username or password")
36.252747
92
0.61746
354
3,299
5.638418
0.251412
0.038577
0.063126
0.066132
0.471944
0.410822
0.360721
0.333166
0.287074
0.287074
0
0.004376
0.237951
3,299
90
93
36.655556
0.789578
0.037587
0
0.289855
0
0
0.260801
0.006938
0
0
0
0
0.15942
1
0.086957
false
0.231884
0.072464
0
0.231884
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
4182ec15967a7a53ca35c791ffcf894022ac1a7d
618
py
Python
maya/app/renderSetup/views/lightEditor/utils.py
arjun-namdeo/py_stubs
605bb167e239978f5417f3f1fc1f5c12e2a243cc
[ "MIT" ]
null
null
null
maya/app/renderSetup/views/lightEditor/utils.py
arjun-namdeo/py_stubs
605bb167e239978f5417f3f1fc1f5c12e2a243cc
[ "MIT" ]
null
null
null
maya/app/renderSetup/views/lightEditor/utils.py
arjun-namdeo/py_stubs
605bb167e239978f5417f3f1fc1f5c12e2a243cc
[ "MIT" ]
null
null
null
def createDynamicAttribute(nodeName, attrName, attrType, value): pass def findNodeFromMaya(nodeName): pass def findSelectedNodeFromMaya(): pass def resolveIconFile(filename): """ Resolve filenames using the XBMLANGPATH icon searchpath or look through the embedded Qt resources (if the path starts with a ':'). :Parameters: filename (string) filename path or resource path (uses embedded Qt resources if starts with a ':' :Return: (string) Fully resolved filename, or empty string if file is not resolved. """ pass
20.6
99
0.65534
67
618
6.044776
0.597015
0.051852
0.093827
0.103704
0
0
0
0
0
0
0
0
0.278317
618
29
100
21.310345
0.908072
0.580906
0
0.5
0
0
0
0
0
0
0
0
0
1
0.5
false
0.5
0
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
2
41846f748394eaaab9d178f6abb98d1845018fdd
395
py
Python
app/core/migrations/0003_auto_20201129_1357.py
bondeveloper/maischool
16bf2afe99d26caa067b7912e88839639cf2191e
[ "MIT" ]
null
null
null
app/core/migrations/0003_auto_20201129_1357.py
bondeveloper/maischool
16bf2afe99d26caa067b7912e88839639cf2191e
[ "MIT" ]
null
null
null
app/core/migrations/0003_auto_20201129_1357.py
bondeveloper/maischool
16bf2afe99d26caa067b7912e88839639cf2191e
[ "MIT" ]
null
null
null
# Generated by Django 3.1.3 on 2020-11-29 13:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0002_auto_20201129_1325'), ] operations = [ migrations.AlterField( model_name='attachment', name='file', field=models.FileField(upload_to='attachments'), ), ]
20.789474
60
0.602532
42
395
5.547619
0.833333
0
0
0
0
0
0
0
0
0
0
0.109155
0.281013
395
18
61
21.944444
0.711268
0.113924
0
0
1
0
0.149425
0.066092
0
0
0
0
0
1
0
false
0
0.083333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
41850d2f20a25e9d6a0751208d08a9bcdd244bde
3,429
py
Python
knowledge/migrations/0001_initial.py
darth-dodo/hustlers-den
74c375da1b82c6ad4d4f2b763bc8e47a3f73417d
[ "MIT" ]
3
2019-07-27T08:42:27.000Z
2020-04-02T14:26:35.000Z
knowledge/migrations/0001_initial.py
darth-dodo/hustlers-den
74c375da1b82c6ad4d4f2b763bc8e47a3f73417d
[ "MIT" ]
34
2019-08-14T19:05:56.000Z
2022-02-10T10:54:45.000Z
knowledge/migrations/0001_initial.py
darth-dodo/hustlers-den
74c375da1b82c6ad4d4f2b763bc8e47a3f73417d
[ "MIT" ]
null
null
null
# Generated by Django 2.0.6 on 2018-06-10 06:46 import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [] operations = [ migrations.CreateModel( name="Category", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("is_active", models.BooleanField(default=True)), ("created_at", models.DateTimeField(auto_now_add=True)), ("modified_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(max_length=100)), ("description", models.TextField(blank=True, null=True)), ("slug", models.CharField(max_length=100)), ], options={"verbose_name_plural": "categories", "db_table": "category",}, ), migrations.CreateModel( name="ExpertiseLevel", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("is_active", models.BooleanField(default=True)), ("created_at", models.DateTimeField(auto_now_add=True)), ("modified_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(max_length=100)), ("slug", models.CharField(max_length=100)), ], options={"db_table": "expertise_level",}, ), migrations.CreateModel( name="KnowledgeStore", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("is_active", models.BooleanField(default=True)), ("created_at", models.DateTimeField(auto_now_add=True)), ("modified_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(max_length=100)), ("url", models.URLField(blank=True, null=True)), ("description", models.TextField(blank=True, null=True)), ("difficulty_sort", models.PositiveIntegerField(default=1)), ( "categories", models.ManyToManyField( related_name="knowledge_store", to="knowledge.Category" ), ), ( "expertise_level", models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="knowledge_store", to="knowledge.ExpertiseLevel", ), ), ], options={"db_table": "knowledge_store",}, ), ]
36.870968
83
0.449694
257
3,429
5.817121
0.29572
0.032107
0.084281
0.100334
0.616054
0.616054
0.567893
0.459532
0.459532
0.459532
0
0.016087
0.438029
3,429
92
84
37.271739
0.75973
0.013123
0
0.658824
1
0
0.114134
0.007096
0
0
0
0
0
1
0
false
0
0.023529
0
0.070588
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
418670d002d663322a8f470f2873865170a66103
1,908
py
Python
ScoringEngine/ScoringEngine/engine/options/__init__.py
norserage/scoring
b3f5c199b64bb3a78342924349fe6a713257b1a0
[ "MIT" ]
1
2020-07-29T16:12:02.000Z
2020-07-29T16:12:02.000Z
ScoringEngine/ScoringEngine/engine/options/__init__.py
norserage/scoring
b3f5c199b64bb3a78342924349fe6a713257b1a0
[ "MIT" ]
null
null
null
ScoringEngine/ScoringEngine/engine/options/__init__.py
norserage/scoring
b3f5c199b64bb3a78342924349fe6a713257b1a0
[ "MIT" ]
null
null
null
""" Copyright 2016 Brandon Warner Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ class Option(): def __init__(self, optional=True): self.optional = optional self.type = self.__class__.__name__ def value(self, input): return input def parse(self, input): return input class String(Option): pass class Integer(Option): pass class EnumS(String): def __init__(self, options, optional=True): self.optional = optional self.options = options self.type = 'Enum' class EnumI(Integer): def __init__(self, options, optional=True): self.optional = optional self.options = options self.type = 'Enum' class JSON(Option): def value(self, input): import json import pprint try: pprint.pprint(input) return json.dumps(input) except Exception as e: return "" def parse(self, input): import json return json.loads(input) class PasswordDB(EnumS): def __init__(self, optional=True): import ScoringEngine.core.db.tables session = ScoringEngine.core.db.Session() self.optional = optional self.options = [] for o in session.query(ScoringEngine.core.db.tables.PasswordDatabase.name).distinct(): self.options.append(o[0]) self.type = 'Enum'
25.783784
94
0.648847
238
1,908
5.10084
0.428571
0.049423
0.036244
0.079077
0.223229
0.169687
0.140033
0.140033
0.140033
0.140033
0
0.006433
0.266771
1,908
73
95
26.136986
0.86133
0.290356
0
0.534884
0
0
0.008922
0
0
0
0
0
0
1
0.186047
false
0.093023
0.093023
0.046512
0.55814
0.046512
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
2
4186a83376db6719d09806172772089d522d0c98
12,828
py
Python
Game-TestHuman.py
brian1985/rl-wall-avoider
c6c5d87f3693bfd27f39a4015f361773bf219cd3
[ "MIT" ]
null
null
null
Game-TestHuman.py
brian1985/rl-wall-avoider
c6c5d87f3693bfd27f39a4015f361773bf219cd3
[ "MIT" ]
null
null
null
Game-TestHuman.py
brian1985/rl-wall-avoider
c6c5d87f3693bfd27f39a4015f361773bf219cd3
[ "MIT" ]
null
null
null
from random import randint, choice from collections import deque from time import sleep import pygame, time import numpy as np """ Code to use arduino as input import serial import re board = serial.Serial("/dev/ttyACM0") data = board.readline() data = data.decode() data = re.sub("[^0-9|]", "", data) xyz = data.split("|") x/y default is 512, 0 is left/down, 1024 right/up z=0/1 with 1 unpressed and 0 pressed """ pygame.init() ###################################################################################### class Field: def __init__(self, height=10, width=5): self.width = width self.height = height self.body = np.zeros(shape=(self.height, self.width)) def update_field(self,walls, player): try: # Clear the field: self.body = np.zeros(shape=(self.height, self.width)) # Put the walls on the field: for wall in walls: if not wall.out_of_range : self.body[wall.y:min(wall.y+wall.height,self.height),:] = wall.body # Put the player on the field: self.body[player.y:player.y+player.height, player.x:player.x+player.width] += player.body except : pass ###################################################################################### class Wall: def __init__(self, height = 5, width=100, hole_width = 20, y = 0, speed = 1, field = None): self.height = height self.width = width self.hole_width = hole_width self.y = y self.speed = speed self.field = field self.body_unit = 1 self.body = np.ones(shape = (self.height, self.width))*self.body_unit self.out_of_range = False self.create_hole() def create_hole(self): hole = np.zeros(shape = (self.height, self.hole_width)) hole_pos = randint(0,self.width-self.hole_width) self.body[ : , hole_pos:hole_pos+self.hole_width] = 0 def move(self): self.y += self.speed self.out_of_range = True if ((self.y + self.height) > self.field.height) else False ###################################################################################### class Player: def __init__(self, height = 5, max_width = 10 , width=2, x = 0, y = 0, speed = 2): self.height = height self.max_width = max_width self.width = width self.x = x self.y = y self.speed = speed self.body_unit = 2 self.body = np.ones(shape = (self.height, self.width))*self.body_unit self.stamina = 20 self.max_stamina = 20 def move(self, field, direction = 0 ): ''' Moves the player : - No change = 0 - left, if direction = 1 - right, if direction = 2 ''' val2dir = {0:0 , 1:-1 , 2:1} direction = val2dir[direction] next_x = (self.x + self.speed*direction) if not (next_x + self.width > field.width or next_x < 0): self.x += self.speed*direction self.stamina -= 1 def change_width(self, action = 0): ''' Change the player's width: - No change = 0 - narrow by one unit = 3 - widen by one unit = 4 ''' val2act = {0:0 , 3:-1 , 4:1} action = val2act[action] new_width = self.width+action player_end = self.x + new_width if new_width <= self.max_width and new_width > 0 and player_end <= self.max_width: self.width = new_width self.body = np.ones(shape = (self.height, self.width))*self.body_unit ###################################################################################### class Environment: P_HEIGHT = 2 # Height of the player F_HEIGHT = 20 # Height of the field W_HEIGHT = 2 # Height of the walls WIDTH = 10 # Width of the field and the walls MIN_H_WIDTH = 2 # Minimum width of the holes MAX_H_WIDTH = 6 # Maximum width of the holes MIN_P_WIDTH = 2 # Minimum Width of the player MAX_P_WIDTH = 6 # Maximum Width of the player HEIGHT_MUL = 30 # Height Multiplier (used to draw np.array as blocks in pygame ) WIDTH_MUL = 40 # Width Multiplier (used to draw np.array as blocks in pygame ) WINDOW_HEIGHT = (F_HEIGHT+1) * HEIGHT_MUL # Height of the pygame window WINDOW_WIDTH = (WIDTH) * WIDTH_MUL # Widh of the pygame window ENVIRONMENT_SHAPE = (F_HEIGHT,WIDTH,1) ACTION_SPACE = [0,1,2,3,4] ACTION_SPACE_SIZE = len(ACTION_SPACE) PUNISHMENT = -100 # Punishment increment REWARD = 10 # Reward increment score = 0 # Initial Score MOVE_WALL_EVERY = 4 # Every how many frames the wall moves. MOVE_PLAYER_EVERY = 1 # Every how many frames the player moves. frames_counter = 0 def __init__(self): # Colors: self.BLACK = (25,25,25) self.WHITE = (255,255,255) self.RED = (255, 80, 80) self.BLUE = (80, 80, 255) self.field = self.walls = self.player = None self.current_state = self.reset() self.val2color = {0:self.WHITE, self.walls[0].body_unit:self.BLACK, self.player.body_unit:self.BLACK, self.MAX_VAL:self.RED} def reset(self): self.score = 0 self.frames_counter = 0 self.game_over = False self.field = Field(height=self.F_HEIGHT, width=self.WIDTH ) w1 = Wall( height = self.W_HEIGHT, width=self.WIDTH, hole_width = randint(self.MIN_H_WIDTH,self.MAX_H_WIDTH), field = self.field) self.walls = deque([w1]) p_width = randint(self.MIN_P_WIDTH,self.MAX_P_WIDTH) self.player = Player( height = self.P_HEIGHT, max_width = self.WIDTH, width = p_width, x = randint(0,self.field.width-p_width), y = int(self.field.height*0.7), speed = 1) self.MAX_VAL = self.player.body_unit + w1.body_unit # Update the field : self.field.update_field(self.walls, self.player) observation = self.field.body/self.MAX_VAL return observation def print_text(self, WINDOW = None, text_cords = (0,0), center = False, text = "", color = (0,0,0), size = 32): pygame.init() font = pygame.font.Font('freesansbold.ttf', size) text_to_print = font.render(text, True, color) textRect = text_to_print.get_rect() if center: textRect.center = text_cords else: textRect.x = text_cords[0] textRect.y = text_cords[1] WINDOW.blit(text_to_print, textRect) def step(self, action): global score_increased self.frames_counter += 1 reward = 0 # If the performed action is (move) then player.move method is called: if action in [1,2]: self.player.move(direction = action, field = self.field) # If the performed action is (change_width) then player.change_width method is called: if action in [3,4]: self.player.change_width(action = action) # Move the wall one step (one step every MOVE_WALL_EVERY frames): if self.frames_counter % self.MOVE_WALL_EVERY == 0: # move the wall one step self.walls[-1].move() # reset the frames counter self.frames_counter = 0 # Update the field : self.field.update_field(self.walls, self.player) # If the player passed a wall successfully increase the reward +1 if ((self.walls[-1].y) == (self.player.y + self.player.height)) and not score_increased : reward += self.REWARD self.score += self.REWARD # Increase player's stamina every time it passed a wall successfully self.player.stamina = min(self.player.max_stamina, self.player.stamina+10) # score_increased : a flag to make sure that reward increases once per wall score_increased = True # Lose Conditions : # C1 : The player hits a wall # C2 : Player's width was far thinner than hole's width # C3 : Player fully consumed its stamina (energy) lose_conds = [self.MAX_VAL in self.field.body, ((self.player.y == self.walls[-1].y) and (self.player.width < (self.walls[-1].hole_width-1))), self.player.stamina <=0] # If one lose condition or more happend, the game ends: if True in lose_conds: self.game_over = True reward = self.PUNISHMENT return self.field.body/self.MAX_VAL, reward, self.game_over # Check if a wall moved out of the scene: if self.walls[-1].out_of_range: # Create a new wall self.walls[-1] = Wall( height = self.W_HEIGHT, width = self.WIDTH, hole_width = randint(self.MIN_H_WIDTH,self.MAX_H_WIDTH), field = self.field) score_increased = False # Return New Observation , reward, game_over(bool) return self.field.body/self.MAX_VAL, reward, self.game_over def render(self, WINDOW = None, human=False): if human: ################ Check Actions ##################### action = 0 events = pygame.event.get() for event in events: if event.type == pygame.QUIT: self.game_over = True if event.type == pygame.KEYDOWN: if event.key == pygame.K_LEFT: action = 1 if event.key == pygame.K_RIGHT: action = 2 if event.key == pygame.K_UP: action = 4 if event.key == pygame.K_DOWN: action = 3 ################## Step ############################ _,reward, self.game_over = self.step(action) ################ Draw Environment ################### WINDOW.fill(self.WHITE) self.field.update_field(self.walls, self.player) for r in range(self.field.body.shape[0]): for c in range(self.field.body.shape[1]): pygame.draw.rect(WINDOW, self.val2color[self.field.body[r][c]], (c*self.WIDTH_MUL, r*self.HEIGHT_MUL, self.WIDTH_MUL, self.HEIGHT_MUL)) self.print_text(WINDOW = WINDOW, text_cords = (self.WINDOW_WIDTH // 2, int(self.WINDOW_HEIGHT*0.1)), text = str(self.score), color = self.RED, center = True) self.print_text(WINDOW = WINDOW, text_cords = (0, int(self.WINDOW_HEIGHT*0.9)), text = str(self.player.stamina), color = self.RED) pygame.display.update() ###################################################################################### # Make an environment object env = Environment() # Change wall speed to 3 (one step every 3 frames) env.MOVE_WALL_EVERY = 3 # Initialize some variables WINDOW = pygame.display.set_mode((env.WINDOW_WIDTH, env.WINDOW_HEIGHT)) clock = pygame.time.Clock() win = False winning_score = 100 # Repeaat the game untill the player win (got a score of winning_score) or quits the game. while not win: score_increased = False game_over = False _ = env.reset() pygame.display.set_caption("Game") while not game_over: clock.tick(27) env.render(WINDOW = WINDOW, human=True) game_over = env.game_over ##################################################### sleep(0.5) WINDOW.fill(env.WHITE) if env.score >= winning_score: win = True env.print_text(WINDOW = WINDOW, text_cords = (env.WINDOW_WIDTH // 2, env.WINDOW_HEIGHT// 2), text = f"You Win - Score : {env.score}", color = env.RED, center = True) else: env.print_text(WINDOW = WINDOW, text_cords = (env.WINDOW_WIDTH // 2, env.WINDOW_HEIGHT// 2), text = f"Game Over - Score : {env.score}", color = env.RED, center = True) pygame.display.update() ######################################################################################
41.514563
116
0.528687
1,590
12,828
4.138994
0.159748
0.032822
0.014891
0.017323
0.26774
0.198146
0.16183
0.144203
0.127944
0.116092
0
0.022829
0.323901
12,828
308
117
41.649351
0.735962
0.145385
0
0.17757
0
0
0.008123
0
0
0
0
0
0
1
0.060748
false
0.004673
0.023364
0
0.214953
0.037383
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4186fcc1ce90afaf587b5f5bd0c8099ee8a70d32
26,536
py
Python
backend/models_test.py
OmarThinks/PIM-API
b7259dd64b397844b26d5e190df5a8701be0ff85
[ "MIT" ]
null
null
null
backend/models_test.py
OmarThinks/PIM-API
b7259dd64b397844b26d5e190df5a8701be0ff85
[ "MIT" ]
null
null
null
backend/models_test.py
OmarThinks/PIM-API
b7259dd64b397844b26d5e190df5a8701be0ff85
[ "MIT" ]
null
null
null
import json import unittest from models import (NotReceived, validate_key, MyModel, Product,Category,ProductCategory, populate_tables, db_drop_and_create_all,get_dict,) #from app import create_app from __init__ import session unittest.TestLoader.sortTestMethodsUsing = None class modelsTestCase(unittest.TestCase): """This class represents the trivia test case""" def setUp(self): #db_drop_and_create_all() #create_app() # create and configure the app #self.app = create_app(testing=True) #Flask(__name__) #self.client = self.app.test_client #db.app = self.app #db.init_app(self.app) #db.create_all() pass def tearDown(self): """Executed after reach test""" print("_+++++++++++++++++++++++++++++++++_") #Note: Tests are run alphapetically def test_001_test(self): self.assertEqual(1,1) print("Test 1:Hello, Tests!") def test_002_test(self): db_drop_and_create_all() print("Test 2:db_drop_and_create_all") def test_0a_1_1_1_validate_key(self): the_dict = {"id":41,"password":"abc","username":"tryu","bool1":True, "bool2":False, "nr":NotReceived()} validated = [] for key in the_dict: validated.append(validate_key(the_dict,key)) self.assertEqual([False,False,True,True,True,False],validated) print("Test 0a_1_1_1 : validate_key: success") def test_0a_1_1_2_validate_key(self): the_dict = {"id":41,"password":"abc","username":"tryu","bool1":True, "bool2":False, "nr":NotReceived()} validated = [] for key in the_dict: validated.append(validate_key(the_dict,key,id=True)) self.assertEqual([True,False,True,True,True,False],validated) print("Test 0a_1_1_2 : validate_key: success") def test_0a_1_1_3_validate_key(self): the_dict = {"id":41,"password":"abc","username":"tryu","bool1":True, "bool2":False, "nr":NotReceived()} validated = [] for key in the_dict: validated.append(validate_key(the_dict,key,dangerous = True)) self.assertEqual([False,True,True,True,True,False],validated) print("Test 0a_1_1_3 : validate_key: success") def test_0a_1_1_4_validate_key(self): the_dict = {"id":41,"password":"abc","username":"tryu","bool1":True, "bool2":False, "nr":NotReceived()} validated = [] for key in the_dict: validated.append(validate_key(the_dict,key,dangerous = True)) self.assertEqual([False,True,True,True,True,False],validated) print("Test 0a_1_1_4 : validate_key: success") def test_0a_1_1_5_validate_key(self): the_dict = {"iD":41,"password":"abc","username":"tryu","bool1":True, "bool2":False, "nr":NotReceived(), "unsupported":{}} validated = [] for key in the_dict: validated.append(validate_key(the_dict,key,dangerous = True, unsupported=True)) #print(validated) self.assertEqual([False,True,True,True,True,False,False],validated) print("Test 0a_1_1_5 : validate_key: success") def test_0a_1_1_6_validate_key(self): product = Product(name="Cheese",price=50.4, quantity=7.89, code=789456611) #print(type(type(product))) the_dict = {"ID":41,"password":"abc","username":"tryu","bool1":True, "bool2":False, "nr":NotReceived(), "unsupported1":{}, "unsupported2":product} validated = [] for key in the_dict: validated.append(validate_key(the_dict,key,dangerous = True, unsupported=True)) #print(validated) self.assertEqual([False,True,True,True,True,False,False,True],validated) print("Test 0a_1_1_6 : validate_key: success") def test_0a_1_1_7_validate_key(self): product = Product(name="Cheese",price=50.4, quantity=7.89, code=789456611) the_dict = {"Id":41,"paSSword":"abc","username":"tryu", "bool1":True,"bool2":False, "nr":NotReceived(), "unsupported1":{}, "unsupported2":product} validated = [] for key in the_dict: validated.append(validate_key(the_dict,key, unsupported=False)) self.assertEqual([False,False,True,True,True,False,False,False],validated) print("Test 0a_1_1_7 : validate_key: success") def test_0a_1_1_8_validate_key(self): product = Product(name="Cheese",price=50.4, quantity=7.89, code=789456611) class tst(object): def __init__(self): self.Id = 41 self.paSSword = "abc" self.username = "tryu" self.bool1 = True self.bool2 = False self.nr = NotReceived() self.unsupported1 = {} self.unsupported2 = product validation_obj = tst() validated = [] for key in ["Id","paSSword","username","bool1","bool2","nr","unsupported1", "unsupported2"]: validated.append(validate_key(validation_obj,key, unsupported=False)) self.assertEqual([False,False,True,True,True,False,False,False],validated) print("Test 0a_1_1_8 : validate_key: with object") def test_0a_1_2_1_get_dict(self): product = Product(name="Cheese",price=50.4, quantity=7.89, code=789456611) class tst(object): def __init__(self): self.Id = 41 self.paSSword = "abc" self.username = "tryu" self.bool1 = True self.bool2 = False self.nr = NotReceived() self.unsupported1 = {} self.unsupported2 = product validation_obj = tst() the_dict = get_dict(validation_obj) self.assertEqual(the_dict,{"username":"tryu","bool1":True,"bool2":False}) the_dict = get_dict(validation_obj, id=True,dangerous=True) self.assertEqual(the_dict,{"username":"tryu","bool1":True,"bool2":False, "paSSword":"abc","Id":41}) print("Test 0a_1_2_1 : get_dict: with object") def test_0a_1_2_2_get_dict(self): product = Product(name="Cheese",price=50.4, quantity=7.89, code=789456611) the_dict = get_dict(product, id=True,dangerous=True) product.insert() the_dict = get_dict(product, id=True,dangerous=True) self.assertEqual(the_dict,{"name":"Cheese", "price":50.4,"id":1,"quantity":7.89,"code":789456611}) product.delete() print("Test 0a_1_2_2 : get_dict: with object") def test_0a_1_2_3_get_dict(self): product = Product(name="Cheese",price=50.4, quantity=7.89, code=789456611) the_dict = {"Id":41,"paSSword":"abc","username":"tryu","bool1":True, "bool2":False, "nr":NotReceived(),"unsupported1":{},"unsupported2":product} validated = get_dict(the_dict, id=True,dangerous=True) self.assertEqual(validated,{"username":"tryu","bool1":True,"bool2":False, "paSSword":"abc","Id":41}) validated = get_dict(the_dict) self.assertEqual(validated,{"username":"tryu","bool1":True,"bool2":False}) print("Test 0a_1_2_3 : get_dict: with dict") def test_0a_1_2_1_MyModel(self): product = Product(name="Cheese",price=50.4, quantity=7.89, code=789456611) self.assertEqual(product.name,"Cheese") self.assertEqual(product.price,50.4) self.assertEqual(product.quantity,7.89) self.assertEqual(product.code,789456611) print("Test 0a_1_2_1 : MyModel: success") def test_0a_1_2_2_MyModel(self): try: product = Product(name="Cheese",price=50.4, quantity=7.89, code=789456611, bla="123") except Exception as e: self.assertEqual(str(e),"'bla' is an invalid "+ "keyword argument for Product") print("Test 0a_1_2_2 : MyModel: success") def test_0a_1_2_3_MyModel(self): product = Product(name="Cheese",price=50.4, quantity=7.89, code=789456611) self.assertEqual(product.simple(),{"id":None,"name":"Cheese", "price":50.4, "quantity":7.89, "code":789456611}) product.insert() self.assertEqual(product.simple(),{"name":"Cheese", "price":50.4, "quantity":7.89, "code":789456611, "id":1}) #prod = Product(name="789",price=123,seller_id=1) #self.assertEqual(prod.simple(),{"name":"789","price":123, # "seller_id":1,"id":None,"in_stock":None,"seller":None}) #prod.insert() #self.assertEqual(prod.simple(),{"name":"789","price":123, # "seller_id":1,"id":1,"in_stock":True}) #prod.delete() product.delete() print("Test 0a_1_2_3 : MyModel: success") def test_0a_1_2_4_MyModel(self): #Trying to add the product with id, and seeing how the d will be neglected product = Product(name="Cheese",price=50.4, quantity=7.89, id=10000000,code=789456611) self.assertEqual(product.simple(),{"id":None,"name":"Cheese", "price":50.4, "quantity":7.89, "code":789456611}) print("Test 0a_1_2_4 : MyModel: success") def test_0a_1_3_1_MyModel(self): db_drop_and_create_all() # Creating the product product_to_del = Product(name="Cheese",price=50.4, quantity=7.89, id=10000000,code=789456611) product_to_del.insert() #self.assertEqual(len(session.query(Product).all()),1) self.assertEqual(len(Product.query().all()),1) """prod_to_del1 = Product(name = "abc",price=456,seller_id=user_to_del.id) prod_to_del2 = Product(name = "abcdef",price=4567,seller_id=user_to_del.id) db.session.add_all([prod_to_del1,prod_to_del2]) db.session.commit() self.assertEqual(len(Product.query.all()),2) order_to_del1 = Order( user_id = user_to_del.id,product_id=prod_to_del1.id,amount=1) order_to_del2 = Order( user_id = user_to_del.id,product_id=prod_to_del2.id,amount=3) order_to_del3 = Order( user_id = user_to_del.id,product_id=prod_to_del2.id,amount=5) db.session.add_all([order_to_del1,order_to_del2,order_to_del3]) db.session.commit() self.assertEqual(len(Order.query.all()),3)""" #img_to_delete1=Image(seller_id=1,name="abc",formatting = "png") #img_to_delete2=Image(seller_id=1,name="abce",formatting = "jpg") #db.session.add_all([img_to_delete1,img_to_delete2]) #db.session.commit() #self.assertEqual(len(Image.query.all()),2) # Trying to delete #img_to_delete2.delete() #self.assertEqual(len(Image.query.all()),1) """order_to_del3.delete() self.assertEqual(len(Order.query.all()),2) prod_to_del2.delete() self.assertEqual(len(Order.query.all()),1) self.assertEqual(len(Product.query.all()),1)""" product_to_del.delete() #self.assertEqual(len(Image.query.all()),0) """self.assertEqual(len(Order.query.all()),0) self.assertEqual(len(Product.query.all()),0) self.assertEqual(len(Product.query.all()),0)""" print("Test 0a_1_3_1 : MyModel: relationships") def test_0a_1_4_1_MyModel(self): # Testing update # Creating the product product_to_del = Product(name="Cheese",price=50.4, quantity=7.89, id=10000000,code=789456611) product_to_del.insert() product_dict = get_dict(product_to_del,id=True,dangerous=True) self.assertEqual(product_dict,{"id":1,"name":"Cheese", "price":50.4, "quantity":7.89,"code":789456611}) product_to_del.update(id=14,name="QUU",price=90, quantity=7000,code=0) product_dict = get_dict(product_to_del,id=True,dangerous=True) self.assertEqual(product_dict,{"id":1,"name":"QUU", "price":90, "quantity":7000,"code":0}) product_to_del.delete() print("Test 0a_1_4_1 : MyModel: update") def test_0a_1_5_1_MyModel_deep(self): # Testing update # Creating the product product_to_del = Product(name="Cheese",price=50.4, quantity=7.89, id=10000000,code=789456611) product_to_del.insert() #prod = Product(name="789",price=123,seller_id=1) #prod.insert() #print(product_to_del.deep()) self.assertEqual(product_to_del.deep(), {'categories': [], 'code': 789456611, 'id': 1, 'name': 'Cheese', 'price': 50.4, 'quantity': 7.89}) """self.assertEqual(prod.deep(),{'id': 1, 'in_stock': True, 'name': '789', 'orders': [], 'price': 123.0, 'seller': {'id': 1, 'username': 'abc'}, 'seller_id': 1})""" print("Test 0a_1_5_1 : MyModel: deep") def test_a_1_000_product_intro(self): print("") print("") print("_+++++++++++++++++++++++++++++++++_") print("_+++++++++++++++++++ Models : 1 ) Product ++_") print("_+++++++++++++++++++++++++++++++++_") print("") print("") def test_a_1_001_product_insert(self): db_drop_and_create_all() product1 = Product(name="Cheese",price=50.4, quantity=7.89, id=10000000,code=789456611) product1.insert() products = Product.query().all() self.assertEqual(len(products),1) print("Test a_1_1: product insert") def test_a_1_002_product_update(self): product1 = Product.query().get(1) #product1.name = "modified" product1.update(name="modified") product_1 = Product.query().get(1) self.assertEqual(product_1.name,"modified") print("Test a_1_2: product update") def test_a_1_003_product_delete(self): product1 = Product.query().get(1) product1.delete() products = Product.query().all() self.assertEqual(len(products),0) print("Test a_1_3: product delete") def test_a_1_004_populate(self): populate_tables() products = Product.query().all() self.assertEqual(len(products),5) print("Test a_1_4: Populate Tables") def test_a_1_005_product_values(self): product = Product.query().get(1) self.assertEqual(product.id,1) self.assertEqual(product.name,"Cheese") self.assertEqual(product.price,50.4) self.assertEqual(product.quantity,7.89) self.assertEqual(product.code,789456611) #print(product.categories) self.assertEqual(json.loads(str(product.categories)), [{"category_id": 1, "id": 1, "product_id": 1}, {"category_id": 2, "id": 2, "product_id": 1}, {"category_id": 3, "id": 3, "product_id": 1}, {"category_id": 4, "id": 4, "product_id": 1}, {"category_id": 5, "id": 5, "product_id": 1}]) """for prod in user.products: self.assertEqual(type(prod.id),int) self.assertEqual(type(prod.price),float) self.assertEqual(type(prod.in_stock),bool) self.assertEqual(type(prod.seller_id),int) for order in user.orders: self.assertEqual(type(order.id),int) self.assertEqual(type(order.user_id),int) self.assertEqual(type(order.product_id),int) self.assertEqual(type(order.amount),int)""" """for image in user.images: self.assertEqual(type(image.id),int) self.assertEqual(type(image.seller_id),int) self.assertEqual(type(image.name),str) self.assertEqual(type(image.formatting),str)""" print("Test a_1_5: product values") def test_a_1_006_product_insert_wrong(self): products = Product.query().all() old_records_number = len(products) try: #This code will not be executed #There are missing required parameters product = Product() product.insert() self.assertEqual(True,False) except Exception as e: self.assertEqual(str(e),"True != False") products = Product.query().all() new_records_number = len(products) self.assertEqual(old_records_number, new_records_number) print("Test a_1_6: product insert with missing"+ "required parameters") def test_a_1_007_product_delete_wrong(self): products = Product.query().all() old_records_number = len(products) try: #This code will not be executed #There is no product with the number 0 product1 = Product.query().get(0) product1.delete() self.assertEqual(True,False) except Exception as e: self.assertEqual(str(e),"'NoneType' "+ "object has no attribute 'delete'") #print(str(e)) products = Product.query().all() new_records_number = len(products) self.assertEqual(old_records_number, new_records_number) print("Test a_1_7: product delete mistake, non-existent"+ "product id") def test_a_1_008_product_simple(self): product = Product.query().get(1).simple() #print(product) self.assertEqual(product,{'code': 789456611, 'id': 1, 'price': 50.4, 'name': 'Cheese', 'quantity': 7.89}) print("Test a_1_8: product simple") def test_a_1_009_product_relationship_order(self): product = Product.query().get(1) """orders=user.orders orders_ids=[order.id for order in orders] self.assertEqual(1 in orders_ids,True) self.assertEqual(2 in orders_ids,False) self.assertEqual(3 in orders_ids,False) self.assertEqual(4 in orders_ids,True)""" print("Test a_1_9:product relationship_order") def test_a_1_010_product_delete_relationships(self): #measuring lengths beofre actions #populate_tables() products_before = len(Product.query().all()) categories_before = len(Category.query().all()) pc_before = len(ProductCategory.query().all()) # deleting the product prod_to_del = Product.query().get(1) prod_to_del.delete() self.assertEqual(len(Product.query().all()),products_before-1) self.assertEqual(len(Category.query().all()),categories_before) self.assertEqual(len(ProductCategory.query().all()),pc_before-5) print("Test a_1_10: product delete relationships") def test_a_1_011_product_deep(self): #measuring lengths beofre actions product = Product.query().get(4) #print(product.deep()) self.assertEqual(product.deep(), {'categories': [], 'code': 8444441, 'id': 4, 'name': 'Mobile', 'price': 20.1, 'quantity': 9.0}) print("Test a_1_11: product deep") def test_a_2_000_category_intro(self): print("") print("") print("_+++++++++++++++++++++++++++++++++_") print("_+++++++++++++++++++ Models : 2 ) Category ++_") print("_+++++++++++++++++++++++++++++++++_") print("") print("") def test_a_2_001_category_insert(self): db_drop_and_create_all() category1 = Category(name="Cheese") category1.insert() categories = Category.query().all() self.assertEqual(len(categories),1) print("Test a_2_1: category insert") def test_a_2_002_category_update(self): category1 = Category.query().get(1) #category1.name = "modified" category1.update(name="modified") category_1 = Category.query().get(1) self.assertEqual(category_1.name,"modified") print("Test a_2_2: category update") def test_a_2_003_category_delete(self): category1 = Category.query().get(1) category1.delete() categories = Category.query().all() self.assertEqual(len(categories),0) print("Test a_2_3: category delete") def test_a_2_004_populate(self): populate_tables() categories = Category.query().all() self.assertEqual(len(categories),13) print("Test a_2_4: Populate Tables") def test_a_2_005_category_values(self): category = Category.query().get(1) self.assertEqual(category.id,1) self.assertEqual(category.name,"Electronics") self.assertEqual(category.parent_id,None) self.assertEqual(category.parent,None) self.assertEqual(str(category.children), '[{"id": 2, "name": "Camera", "parent_id": 1}]') #print(category.products) self.assertEqual(json.loads(str(category.products)), [{"category_id": 1, "id": 1, "product_id": 1}, {"category_id": 1, "id": 6, "product_id": 2}, {"category_id": 1, "id": 11, "product_id": 3}]) category = Category.query().get(4) self.assertEqual(category.id,4) self.assertEqual(category.name,"Manual Cameras") self.assertEqual(category.parent_id,2) self.assertEqual(category.parent,Category.query().get(2)) #print(category.children) self.assertEqual(str(category.children), '[]') #print(category.products) self.assertEqual(json.loads(str(category.products)), [{"category_id": 4, "id": 4, "product_id": 1}, {"category_id": 4, "id": 9, "product_id": 2}, {"category_id": 4, "id": 14, "product_id": 3}]) print("Test a_2_5: category values") def test_a_2_006_category_insert_wrong(self): categories = Category.query().all() old_records_number = len(categories) try: #This code will not be executed #There are missing required parameters category = Category() category.insert() self.assertEqual(True,False) except Exception as e: self.assertEqual(str(e),"True != False") categories = Category.query().all() new_records_number = len(categories) self.assertEqual(old_records_number, new_records_number) print("Test a_2_6: category insert with missing"+ "required parameters") def test_a_2_007_category_delete_wrong(self): categories = Category.query().all() old_records_number = len(categories) try: #This code will not be executed #There is no category with the number 0 category1 = Category.query().get(0) category1.delete() self.assertEqual(True,False) except Exception as e: self.assertEqual(str(e),"'NoneType' "+ "object has no attribute 'delete'") #print(str(e)) categories = Category.query().all() new_records_number = len(categories) self.assertEqual(old_records_number, new_records_number) print("Test a_2_7: category delete mistake, non-existent"+ "category id") def test_a_2_008_category_simple(self): category = Category.query().get(1).simple() #print(category) self.assertEqual(category,{'id': 1, 'name': 'Electronics', 'parent_id': None}) category = Category.query().get(4).simple() #print(category) self.assertEqual(category,{'id': 4, 'name': 'Manual Cameras', 'parent_id': 2}) print("Test a_2_8: category simple") def test_a_2_009_category_relationship_order(self): category = Category.query().get(1) category.parent=None category = Category.query().get(4) print("Test a_2_9:category relationship") def test_a_2_010_category_delete_relationships(self): products_before = len(Product.query().all()) categories_before = len(Category.query().all()) pc_before = len(ProductCategory.query().all()) # deleting the product category_to_del = Category.query().get(1) category_to_del.delete() self.assertEqual(len(Product.query().all()),products_before) self.assertEqual(len(Category.query().all()),categories_before-1) self.assertEqual(len(ProductCategory.query().all()),pc_before-3) print("Test a_2_10: category delete relationships") def test_a_2_011_category_deep(self): #measuring lengths beofre actions category = Category.query().get(5) #print(category.deep()) self.assertEqual(category.deep(), {'children': [{'id': 6, 'name': 'Sport Cars', 'parent_id': 5}, {'id': 7, 'name': 'Electric Cars', 'parent_id': 5}, {'id': 8, 'name': 'Tractors', 'parent_id': 5}], 'id': 5, 'name': 'Cars', 'parent': None, 'parent_id': None, 'products': [{'category_id': 5, 'id': 5, 'product_id': 1}, {'category_id': 5, 'id': 10, 'product_id': 2}, {'category_id': 5, 'id': 15, 'product_id': 3}]}) category = Category.query().get(4) #print(category.deep()) self.assertEqual(category.deep(), {'children': [], 'id': 4, 'name': 'Manual Cameras', 'parent': {'id': 2, 'name': 'Camera', 'parent_id': None}, 'parent_id': 2, 'products': [{'category_id': 4, 'id': 4, 'product_id': 1}, {'category_id': 4, 'id': 9, 'product_id': 2}, {'category_id': 4, 'id': 14, 'product_id': 3}]}) print("Test a_2_11: category deep") def test_a_3_000_pc_intro(self): print("") print("") print("_+++++++++++++++++++++++++++++++++_") print("_+++++++++++++++++++ Models : 3 ) ProductCategory ++_") print("_+++++++++++++++++++++++++++++++++_") print("") print("") def test_a_3_001_pc_insert(self): db_drop_and_create_all() populate_tables() pc1 = ProductCategory(product_id=3,category_id=7) pc1.insert() pcs = ProductCategory.query().all() self.assertEqual(len(pcs),16) print("Test a_3_1: pc insert") def test_a_3_002_pc_update(self): pc1 = ProductCategory.query().get(1) #pc1.name = "modified" pc1.update(name="modified") pc_1 = ProductCategory.query().get(1) self.assertEqual(pc_1.name,"modified") print("Test a_3_2: pc update") def test_a_3_003_pc_delete(self): pc1 = ProductCategory.query().get(1) pc1.delete() pcs = ProductCategory.query().all() self.assertEqual(len(pcs),15) print("Test a_3_3: pc delete") def test_a_3_004_populate(self): populate_tables() pcs = ProductCategory.query().all() self.assertEqual(len(pcs),15) print("Test a_3_4: Populate Tables") def test_a_3_005_pc_values(self): pc = ProductCategory.query().get(1) self.assertEqual(pc.id,1) self.assertEqual(pc.product_id,1) self.assertEqual(pc.category_id,1) #self.assertEqual(pc.parent,None) #self.assertEqual(str(pc.children), # '[{"id": 2, "name": "Camera", "parent_id": 1}]') #print(pc.product.simple()) self.assertEqual(pc.product.simple(), {'code': 789456611, 'id': 1, 'name': 'Cheese', 'price': 50.4, 'quantity': 7.89}) #print(pc.category.simple()) self.assertEqual(pc.category.simple(), {'id': 1, 'name': 'Electronics', 'parent_id': None}) print("Test a_3_5: pc values") def test_a_3_006_pc_insert_wrong(self): pcs = ProductCategory.query().all() old_records_number = len(pcs) try: #This code will not be executed #There are missing required parameters pc = ProductCategory() pc.insert() self.assertEqual(True,False) except Exception as e: self.assertEqual(str(e),"True != False") pcs = ProductCategory.query().all() new_records_number = len(pcs) self.assertEqual(old_records_number, new_records_number) print("Test a_3_6: pc insert with missing"+ "required parameters") def test_a_3_007_pc_delete_wrong(self): pcs = ProductCategory.query().all() old_records_number = len(pcs) try: #This code will not be executed #There is no pc with the number 0 pc1 = ProductCategory.query().get(0) pc1.delete() self.assertEqual(True,False) except Exception as e: self.assertEqual(str(e),"'NoneType' "+ "object has no attribute 'delete'") #print(str(e)) pcs = ProductCategory.query().all() new_records_number = len(pcs) self.assertEqual(old_records_number, new_records_number) print("Test a_3_7: pc delete mistake, non-existent"+ "pc id") def test_a_3_008_pc_simple(self): pc = ProductCategory.query().get(1).simple() #print(pc) self.assertEqual(pc,{'category_id': 1, 'id': 1, 'product_id': 1}) print("Test a_3_8: pc simple") def test_a_3_009_pc_relationship_order(self): pc = ProductCategory.query().get(1) #pc.parent=None #pc = ProductCategory.query().get(4) print("Test a_3_9:pc relationship") def test_a_3_010_pc_delete_relationships(self): products_before = len(Product.query().all()) categories_before = len(Category.query().all()) pc_before = len(ProductCategory.query().all()) # deleting the product pc_to_del = ProductCategory.query().get(1) pc_to_del.delete() self.assertEqual(len(Product.query().all()),products_before) self.assertEqual(len(Category.query().all()),categories_before) self.assertEqual(len(ProductCategory.query().all()),pc_before-1) print("Test a_3_10: pc delete relationships") def test_a_3_011_pc_deep(self): #measuring lengths beofre actions pc = ProductCategory.query().get(5) #print(pc.deep()) self.assertEqual(pc.deep(), {'category': {'id': 5, 'name': 'Cars', 'parent_id': None}, 'category_id': 5, 'id': 5, 'product': {'code': 789456611, 'id': 1, 'name': 'Cheese', 'price': 50.4, 'quantity': 7.89}, 'product_id': 1}) print("Test a_3_11: pc deep") # Make the tests conveniently executable if __name__ == "__main__": unittest.main()
28.021119
77
0.690496
3,831
26,536
4.568781
0.061864
0.110552
0.014398
0.021368
0.742273
0.684454
0.603668
0.531395
0.471691
0.437868
0
0.050464
0.134497
26,536
946
78
28.05074
0.71163
0.096096
0
0.494526
0
0
0.193759
0.012307
0
0
0
0
0.175182
1
0.109489
false
0.025547
0.007299
0
0.122263
0.136861
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4188e0bdb0246bed574781d83fd5dfac338b69bd
218
py
Python
compiler/python_compiler/engines/py3_8/Compiler.py
unknowncoder05/app-architect
083278e1386562797614f320649ca85d1c44e009
[ "MIT" ]
3
2021-08-12T12:59:27.000Z
2021-08-29T15:30:49.000Z
compiler/python_compiler/engines/py3_8/Compiler.py
unknowncoder05/app-architect
083278e1386562797614f320649ca85d1c44e009
[ "MIT" ]
null
null
null
compiler/python_compiler/engines/py3_8/Compiler.py
unknowncoder05/app-architect
083278e1386562797614f320649ca85d1c44e009
[ "MIT" ]
null
null
null
from utils.flags import * from .get_fragment_class import get_fragment_class def compile(blueprint:dict, *, level = 0)->str: build = get_fragment_class(blueprint, compile, level=level) return build.compile()
27.25
63
0.756881
30
218
5.3
0.533333
0.207547
0.301887
0
0
0
0
0
0
0
0
0.005348
0.142202
218
7
64
31.142857
0.84492
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.8
0.4
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
41894b431af0af154e50d2e462dae772eb97b482
1,349
py
Python
Programs/day_12.py
Yunram/python_training
be3fbab05511716757ecdacef827a16329a85e90
[ "Apache-2.0" ]
null
null
null
Programs/day_12.py
Yunram/python_training
be3fbab05511716757ecdacef827a16329a85e90
[ "Apache-2.0" ]
null
null
null
Programs/day_12.py
Yunram/python_training
be3fbab05511716757ecdacef827a16329a85e90
[ "Apache-2.0" ]
null
null
null
import random def generate_number(): return random.randint(1, 101) def user_number(): return int(input("Make a guess: ")) def compare(random_number, user_guess): if random_number == user_guess: return "Wow, you win." elif random_number > user_guess: return f"{user_guess} is Too Low" elif random_number < user_guess: return f"{user_guess} is Too High" def play_game(): is_game_over = False random_number = generate_number() print("Welcome to the NUMBER GUESSING GAME") print("I am thinking of a number between 1 and 100") difficulty_level = input("Choose a difficulty. Type 'easy' or 'hard': ") if difficulty_level == "easy": print(f"You have 10 attempts remaining to guess the number") lives = 10 elif difficulty_level == "hard": print(f"You have 5 attempts remaining to guess the number") lives = 5 while not is_game_over: user_guess = user_number() print(compare(random_number, user_guess)) if user_guess != random_number: lives -= 1 print(f"You have {lives} attempts remaining to guess the number") if lives == 0: print("You lose") is_game_over = True if random_number == user_guess: is_game_over = True play_game()
29.326087
77
0.630096
185
1,349
4.405405
0.324324
0.110429
0.117791
0.154601
0.381595
0.320245
0.206135
0.112883
0.112883
0.112883
0
0.016495
0.280949
1,349
45
78
29.977778
0.823711
0
0
0.111111
1
0
0.271715
0
0
0
0
0
0
1
0.111111
false
0
0.027778
0.055556
0.277778
0.194444
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
418a73ccd36a29573b20675d0e5a6f1331e0f75a
1,986
py
Python
jenskipper/cli/patch.py
flupke/jenskipper
bb3de3745142a5b1bf3df40409711ae74fdb07ea
[ "Apache-2.0" ]
4
2016-04-30T12:43:01.000Z
2016-12-02T17:42:47.000Z
jenskipper/cli/patch.py
Stupeflix/jenskipper
bb3de3745142a5b1bf3df40409711ae74fdb07ea
[ "Apache-2.0" ]
null
null
null
jenskipper/cli/patch.py
Stupeflix/jenskipper
bb3de3745142a5b1bf3df40409711ae74fdb07ea
[ "Apache-2.0" ]
null
null
null
import subprocess import click from .. import utils from .. import exceptions from .. import jenkins_api from . import decorators from . import diff @click.command() @decorators.repos_command @decorators.jobs_command(num_jobs=1) @decorators.handle_all_errors() @click.argument('fname', type=click.Path(exists=True, dir_okay=False, writable=True)) @click.pass_context def patch(context, jobs_names, base_dir, fname): """ Try to patch FNAME with the diff between local and remote versions of a job. WARNING: this may not always work and does not take into account the Jinja macros. Always check your diffs before commiting changes made by this command. """ session = jenkins_api.auth(base_dir) # Get diff job_name = jobs_names[0] try: diff_lines = diff.get_job_diff(session, base_dir, job_name, {}, reverse=True) except exceptions.JobNotFound: utils.sechowrap('') utils.sechowrap('Unknown job: %s' % job_name, fg='red', bold=True) utils.sechowrap('Job is present in the local repository, but not ' 'on the Jenkins server.', fg='red') context.exit(1) # Patch output file patch_proc = subprocess.Popen(['patch', '--no-backup-if-mismatch', fname], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) # It's important to add a newline at the end of the patch, so patch can # distinguate the end of the file patch = ''.join(diff_lines).encode('utf8') + b'\n' patch_stdout, patch_stderr = patch_proc.communicate(patch) if patch_proc.returncode != 0: click.secho('Patch failed:', fg='red', bold=True) click.secho(patch_stdout.strip().decode('utf8')) click.secho(patch_stderr.strip().decode('utf8')) context.exit(1)
35.464286
78
0.624371
253
1,986
4.790514
0.478261
0.041254
0.037129
0.021452
0
0
0
0
0
0
0
0.005533
0.271903
1,986
55
79
36.109091
0.832642
0.181772
0
0.054054
0
0
0.096855
0.014465
0
0
0
0
0
1
0.027027
false
0.027027
0.189189
0
0.216216
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
418c971816ed357865734aae67a50b1c996358f8
592
py
Python
tests/test_planning_graph_planner.py
debbynirwan/planning-graph
2eb376fc29973e92d123adfd15640935214d3b40
[ "Apache-2.0" ]
5
2021-03-05T14:42:40.000Z
2022-02-02T20:23:33.000Z
tests/test_planning_graph_planner.py
debbynirwan/planning-graph
2eb376fc29973e92d123adfd15640935214d3b40
[ "Apache-2.0" ]
null
null
null
tests/test_planning_graph_planner.py
debbynirwan/planning-graph
2eb376fc29973e92d123adfd15640935214d3b40
[ "Apache-2.0" ]
2
2021-08-23T12:21:48.000Z
2021-12-27T16:27:44.000Z
from planning_graph.planning_graph_planner import GraphPlanner from planning_graph.planning_graph import PlanningGraph class TestGraphPlanner: def test_plan(self): planning_graph = PlanningGraph( 'domain/dock-worker-robot-domain.pddl', 'domain/dock-worker-robot-problem.pddl') graph = planning_graph.create(max_num_of_levels=100) assert graph.fixed_point is False goal = planning_graph.goal graph_planner = GraphPlanner() layered_plan = graph_planner.plan(graph, goal) assert len(layered_plan.data) == 4
32.888889
62
0.711149
71
592
5.690141
0.492958
0.225248
0.133663
0.123762
0.148515
0
0
0
0
0
0
0.008584
0.212838
592
17
63
34.823529
0.858369
0
0
0
0
0
0.123311
0.123311
0
0
0
0
0.153846
1
0.076923
false
0
0.153846
0
0.307692
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
418dbdc2ab0a6e83c7236c8b810a43bb21ee2351
2,783
py
Python
src/mykrobe/stats/stats.py
Phelimb/mykrobe-atlas-cli
866471d0c2d7030698d37f5c90fd232cafc261d5
[ "MIT" ]
1
2020-01-10T06:43:22.000Z
2020-01-10T06:43:22.000Z
src/mykrobe/stats/stats.py
Phelimb/mykrobe-atlas-cli
866471d0c2d7030698d37f5c90fd232cafc261d5
[ "MIT" ]
null
null
null
src/mykrobe/stats/stats.py
Phelimb/mykrobe-atlas-cli
866471d0c2d7030698d37f5c90fd232cafc261d5
[ "MIT" ]
null
null
null
from math import exp from math import factorial from math import log import logging logger = logging.getLogger(__name__) def percent_coverage_from_expected_coverage(coverage): # With low coverage we expect a lower percent of the sequence to be # coverage. return 1 - exp(-coverage) def log_lik_probability_of_N_gaps(depth, percent_coverage): L = 32 percent_coverage = float(percent_coverage)/100 n_gaps = int(round(L-(L*percent_coverage))) expected_n_gaps = exp(-depth) * L return log_poisson_prob(expected_n_gaps, n_gaps) def log_poisson_prob(lam, k): return -lam + k * log(lam) - log_factorial(k) def log_factorial(n): assert n >= 0 out = 0 for i in range(int(n)): out += log(i + 1) return out def log_lik_depth(depth, expected_depth): if expected_depth <= 0: raise ValueError("Expected depth must be greater than 0") if depth < 0: raise ValueError("Depth must not be negative") return log_poisson_prob(lam=expected_depth, k=depth) def log_lik_R_S_coverage(observed_alternate_depth, observed_reference_depth, expected_alternate_depth, expected_reference_depth): lne = log_poisson_prob( lam=expected_alternate_depth, k=observed_alternate_depth) le = log_poisson_prob( lam=expected_reference_depth, k=observed_reference_depth) return lne + le def depth_to_expected_kmer_count(depth): return 32*depth+0.01 def log_lik_R_S_kmer_count(observed_reference_kmer_count, observed_alternate_kmer_count, expected_reference_depth, expected_alternate_depth): expected_reference_kmer_count = depth_to_expected_kmer_count( expected_reference_depth) expected_alternate_kmer_count = depth_to_expected_kmer_count( expected_alternate_depth) # logger.debug("%f, %f, %f" % (expected_reference_depth, # expected_reference_kmer_count, observed_reference_kmer_count)) # logger.debug("%f, %f, %f" % (expected_alternate_depth, # expected_alternate_kmer_count, observed_alternate_kmer_count)) lne = log_poisson_prob( lam=expected_reference_kmer_count, k=observed_reference_kmer_count) le = log_poisson_prob( lam=expected_alternate_kmer_count, k=observed_alternate_kmer_count) # logger.debug("%i, %i, %i, %f" % (expected_reference_depth, # expected_reference_kmer_count, observed_reference_kmer_count, lne)) # logger.debug("%i, %i, %i, %f" % (expected_alternate_depth, # expected_alternate_kmer_count, observed_alternate_kmer_count, le)) return lne + le
33.939024
106
0.684513
359
2,783
4.902507
0.192201
0.102273
0.081818
0.057955
0.488068
0.447727
0.320455
0.217045
0.170455
0.170455
0
0.008023
0.238591
2,783
81
107
34.358025
0.822558
0.224937
0
0.113208
0
0
0.029357
0
0
0
0
0
0.018868
1
0.150943
false
0
0.075472
0.056604
0.377358
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
418fb24b012a503b2e7384627bb584b231c134f7
6,660
py
Python
keras/utils/losses_utils.py
PJmouraocs/keras
7a39b6c62d43c25472b2c2476bd2a8983ae4f682
[ "MIT" ]
259
2016-02-09T09:06:29.000Z
2021-07-29T05:27:40.000Z
keras/utils/losses_utils.py
PJmouraocs/keras
7a39b6c62d43c25472b2c2476bd2a8983ae4f682
[ "MIT" ]
50
2016-02-24T14:46:57.000Z
2020-01-20T07:34:19.000Z
keras/utils/losses_utils.py
PJmouraocs/keras
7a39b6c62d43c25472b2c2476bd2a8983ae4f682
[ "MIT" ]
94
2016-02-17T20:59:27.000Z
2021-04-19T08:18:16.000Z
"""Utilities related to losses.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from .. import backend as K class Reduction(object): """Types of loss reduction. Contains the following values: * `NONE`: Un-reduced weighted losses with the same shape as input. When this reduction type used with built-in Keras training loops like `fit`/`evaluate`, the unreduced vector loss is passed to the optimizer but the reported loss will be a scalar value. * `SUM`: Scalar sum of weighted losses. * `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses. """ NONE = 'none' SUM = 'sum' SUM_OVER_BATCH_SIZE = 'sum_over_batch_size' @classmethod def all(cls): return (cls.NONE, cls.SUM, cls.SUM_OVER_BATCH_SIZE) @classmethod def validate(cls, key): if key not in cls.all(): raise ValueError('Invalid Reduction Key %s.' % key) def squeeze_or_expand_dimensions(y_pred, y_true=None, sample_weight=None): """Squeeze or expand last dimension if needed. 1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1. 2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1 from the new rank of `y_pred`. If `sample_weight` is scalar, it is kept scalar. # Arguments y_pred: Predicted values, a `Tensor` of arbitrary dimensions. y_true: Optional label `Tensor` whose dimensions match `y_pred`. sample_weight: Optional weight scalar or `Tensor` whose dimensions match `y_pred`. # Returns Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has the last dimension squeezed, `sample_weight` could be extended by one dimension. """ if y_true is not None: y_pred_rank = K.ndim(y_pred) y_pred_shape = K.int_shape(y_pred) y_true_rank = K.ndim(y_true) y_true_shape = K.int_shape(y_true) if (y_pred_rank - y_true_rank == 1) and (y_pred_shape[-1] == 1): y_pred = K.squeeze(y_pred, -1) elif (y_true_rank - y_pred_rank == 1) and (y_true_shape[-1] == 1): y_true = K.squeeze(y_true, -1) if sample_weight is None: return y_pred, y_true y_pred_rank = K.ndim(y_pred) weights_rank = K.ndim(sample_weight) if weights_rank != 0: if y_pred_rank == 0 and weights_rank == 1: y_pred = K.expand_dims(y_pred, -1) elif weights_rank - y_pred_rank == 1: sample_weight = K.squeeze(sample_weight, -1) elif y_pred_rank - weights_rank == 1: sample_weight = K.expand_dims(sample_weight, -1) return y_pred, y_true, sample_weight def _num_elements(losses): """Computes the number of elements in `losses` tensor.""" with K.name_scope('num_elements') as scope: return K.cast(K.size(losses, name=scope), losses.dtype) def reduce_weighted_loss(weighted_losses, reduction=Reduction.SUM_OVER_BATCH_SIZE): """Reduces the individual weighted loss measurements.""" if reduction == Reduction.NONE: loss = weighted_losses else: loss = K.sum(weighted_losses) if reduction == Reduction.SUM_OVER_BATCH_SIZE: loss = loss / _num_elements(weighted_losses) return loss def broadcast_weights(values, sample_weight): # Broadcast weights if possible. weights_shape = K.int_shape(sample_weight) values_shape = K.int_shape(values) if values_shape != weights_shape: weights_rank = K.ndim(sample_weight) values_rank = K.ndim(values) # Raise error if ndim of weights is > values. if weights_rank > values_rank: raise ValueError( 'Incompatible shapes: `values` {} vs `sample_weight` {}'.format( values_shape, weights_shape)) # Expand dim of weights to match ndim of values, if required. for i in range(weights_rank, values_rank): sample_weight = K.expand_dims(sample_weight, axis=i) if weights_shape is not None and values_shape is not None: for i in range(weights_rank): if (weights_shape[i] is not None and values_shape[i] is not None and weights_shape[i] != values_shape[i]): # Cannot be broadcasted. if weights_shape[i] != 1: raise ValueError( 'Incompatible shapes: `values` {} vs ' '`sample_weight` {}'.format( values_shape, weights_shape)) sample_weight = K.repeat_elements( sample_weight, values_shape[i], axis=i) return sample_weight def compute_weighted_loss(losses, sample_weight=None, reduction=Reduction.SUM_OVER_BATCH_SIZE, name=None): """Computes the weighted loss. # Arguments losses: `Tensor` of shape `[batch_size, d1, ... dN]`. sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as ` losses`, or be broadcastable to `losses`. reduction: (Optional) Type of Reduction to apply to loss. Default value is `SUM_OVER_BATCH_SIZE`. name: Optional name for the op. # Raises ValueError: If the shape of `sample_weight` is not compatible with `losses`. # Returns Weighted loss `Tensor` of the same type as `losses`. If `reduction` is `NONE`, this has the same shape as `losses`; otherwise, it is scalar. """ Reduction.validate(reduction) if sample_weight is None: sample_weight = 1.0 with K.name_scope(name or 'weighted_loss'): input_dtype = K.dtype(losses) losses = K.cast(losses, K.floatx()) sample_weight = K.cast(sample_weight, K.floatx()) # Update dimensions of `sample_weight` to match with `losses` if possible. losses, _, sample_weight = squeeze_or_expand_dimensions( losses, None, sample_weight) # Broadcast weights if possible. sample_weight = broadcast_weights(losses, sample_weight) # Apply weights to losses. weighted_losses = sample_weight * losses # Apply reduction function to the individual weighted losses. loss = reduce_weighted_loss(weighted_losses, reduction) # Convert the result back to the input type. loss = K.cast(loss, input_dtype) return loss
37.206704
84
0.633333
893
6,660
4.5028
0.19037
0.107436
0.023875
0.031833
0.256155
0.201442
0.067645
0.040786
0.040786
0.040786
0
0.005237
0.283183
6,660
178
85
37.41573
0.837034
0.344144
0
0.152174
0
0
0.043956
0
0
0
0
0
0
1
0.076087
false
0
0.054348
0.01087
0.25
0.01087
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
418fce67b285434917158142db550cc3c4912d7c
671
py
Python
tests/test_utils.py
kelvinxu/blocks
8081c0f00926e86d20731eb24cf8a61cab3a7529
[ "BSD-3-Clause" ]
1
2015-10-19T07:54:34.000Z
2015-10-19T07:54:34.000Z
tests/test_utils.py
kelvinxu/blocks
8081c0f00926e86d20731eb24cf8a61cab3a7529
[ "BSD-3-Clause" ]
null
null
null
tests/test_utils.py
kelvinxu/blocks
8081c0f00926e86d20731eb24cf8a61cab3a7529
[ "BSD-3-Clause" ]
null
null
null
from numpy.testing import assert_raises from theano import tensor from blocks.utils import check_theano_variable, unpack def test_unpack(): assert unpack((1, 2)) == [1, 2] assert unpack([1, 2]) == [1, 2] assert unpack([1]) == 1 test = object() assert unpack(test) is test assert_raises(ValueError, unpack, [1, 2], True) def test_check_theano_variable(): check_theano_variable(None, 3, 'float') check_theano_variable([[1, 2]], 2, 'int') assert_raises(ValueError, check_theano_variable, tensor.vector(), 2, 'float') assert_raises(ValueError, check_theano_variable, tensor.vector(), 1, 'int')
29.173913
54
0.657228
89
671
4.752809
0.292135
0.156028
0.269504
0.066194
0.356974
0.356974
0.356974
0.356974
0.106383
0
0
0.034091
0.213115
671
22
55
30.5
0.767045
0
0
0.117647
0
0
0.023845
0
0
0
0
0
0.470588
1
0.117647
false
0
0.176471
0
0.294118
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
2
4191ed02b99fe80f0d4118fb2e70bad9e2f6b771
18,531
py
Python
zeropdk/layout/waveguide_rounding.py
lightwave-lab/zeropdk
cc49eb1008c449185cf9dcdbb283ba086ebd8de0
[ "MIT" ]
17
2019-08-22T15:55:50.000Z
2022-02-02T20:52:00.000Z
zeropdk/layout/waveguide_rounding.py
lightwave-lab/zeropdk
cc49eb1008c449185cf9dcdbb283ba086ebd8de0
[ "MIT" ]
1
2020-09-29T00:43:38.000Z
2020-10-27T07:15:01.000Z
zeropdk/layout/waveguide_rounding.py
lightwave-lab/zeropdk
cc49eb1008c449185cf9dcdbb283ba086ebd8de0
[ "MIT" ]
3
2019-09-04T07:48:35.000Z
2021-06-16T09:39:42.000Z
""" Straight waveguide rounding algorithms""" from functools import lru_cache from math import atan2, tan, inf import numpy as np import klayout.db as kdb from zeropdk.layout.geometry import rotate, fix_angle, cross_prod from zeropdk.layout.algorithms.sampling import sample_function from zeropdk.layout.waveguides import layout_waveguide def angle_between(v1, v0): """Compute angle in radians between v1 and v0. Rotation angle from v0 to v1 counter-clockwise. """ return fix_angle(atan2(v1.y, v1.x) - atan2(v0.y, v0.x)) def project(P, A, B): """Projects a point P into a line defined by A and B""" AB = B - A eAB = AB / AB.norm() Pproj = A + (P - A) * eAB * eAB return Pproj def bisect(V1, V2): """Bisects two vectors V1 and V2. Returns a vector.""" # from https://math.stackexchange.com/questions/2285965/how-to-find-the-vector-formula-for-the-bisector-of-given-two-vectors V = V1.norm() * V2 + V2.norm() * V1 return V / V.norm() def intersect(A, eA, B, eB): """Computes intersection between lines defined by points A/B and vectors eA/eB""" # from http://mathforum.org/library/drmath/view/62814.html assert abs(cross_prod(eA, eB)) > 0, "Vectors must not be parallel" a = cross_prod(B - A, eB) / cross_prod(eA, eB) return A + a * eA @lru_cache(maxsize=5) def _min_clearance(angle_rad, radius): """ Compute the minimum clearance for a tangent arc given an vertex angle.""" try: return abs(radius / tan(angle_rad / 2)) except ZeroDivisionError: return inf def _solve_Z_angle(α1, α2, BC, R): from math import sin, cos, tan, atan, acos assert α1 * α2 # they should have the same sign sign = α1 / abs(α1) α1, α2 = abs(α1), abs(α2) αprime = atan(0.5 / tan(α1) + 0.5 / tan(α2)) A = 2 / cos(αprime) γ = -αprime + acos(1 / A * (1 / sin(α1) + 1 / sin(α2) - BC / R)) return γ * sign class ClearanceRewind(Exception): pass class ClearanceForward(Exception): pass class _Arc: def __init__(self, P1, C, P2, ccw): from math import isclose assert isclose( (P2 - C).norm(), (P1 - C).norm(), abs_tol=1e-9 ), "Invalid Arc" # inconsistent radius self.P1 = P1 # first point self.C = C # center self.P2 = P2 # second point self.ccw = ccw # True if counter-clockwise def get_points(self): from math import atan2, pi P1, C, P2 = self.P1, self.C, self.P2 r = (P2 - C).norm() theta_start = atan2((P1 - C).y, (P1 - C).x) theta_end = atan2((P2 - C).y, (P2 - C).x) if self.ccw: theta_end = (theta_end - theta_start) % (2 * pi) + theta_start else: theta_start = (theta_start - theta_end) % (2 * pi) + theta_end theta_start, theta_end = theta_end, theta_start arc_function = lambda t: np.array([r * np.cos(t), r * np.sin(t)]) # in the function below, theta_start must be smaller than theta_end t, coords = sample_function(arc_function, [theta_start, theta_end], tol=0.002 / r) # This yields a better polygon # The idea is to place a point right after the first one, to # make sure the arc starts in the right direction insert_at = np.argmax(theta_start + 0.001 <= t) t = np.insert(t, insert_at, theta_start + 0.001) coords = np.insert(coords, insert_at, arc_function(theta_start + 0.001), axis=1) insert_at = np.argmax(theta_end - 0.001 <= t) coords = np.insert( coords, insert_at, arc_function(theta_end - 0.001), axis=1 ) # finish the waveguide a little bit after # create original waveguide poligon prior to clipping and rotation dpoints_list = [C + kdb.DPoint(x, y) for x, y in zip(*coords)] if not self.ccw: dpoints_list = list(reversed(dpoints_list)) return dpoints_list def __repr__(self): return "Arc({P1}, {C}, {P2}, {CCW})".format(P1=self.P1, C=self.C, P2=self.P2, CCW=self.ccw) class _Line: def __init__(self, P1, P2): self.P1 = P1 self.P2 = P2 def get_points(self): return [self.P1, self.P2] def get_length(self): return (self.P2 - self.P1).norm() def __repr__(self): return "Line({P1}, {P2})".format(P1=self.P1, P2=self.P2) def solve_Z(A, B, C, D, radius): from math import sin, pi, copysign AB = B - A BC = C - B CD = D - C α1 = angle_between(-BC, AB) α2 = angle_between(-BC, CD) # print("AB, BC, CD=", AB, BC, CD) # print("α1, α2=", degrees(α1), degrees(α2)) γ = _solve_Z_angle(α1, α2, BC.norm(), radius) # print("γ=", degrees(γ)) eX1X2 = rotate(-BC, -γ) / BC.norm() # print("eX1X2=", eX1X2) x = radius / BC.norm() * (1 - sin(abs(α1 - γ))) / sin(abs(α1)) # print("x=", x) X = B + x * BC # print("X=", X) X1 = X - eX1X2 * radius X2 = X + eX1X2 * radius Aprime = X1 + rotate(X - X1, copysign(pi / 2, α1) + γ - α1) Dprime = X2 + rotate(X - X2, copysign(pi / 2, α2) + γ - α2) # print("line", A, Aprime) # print("arc2", Aprime, X1, X) # print("arc2", X, X2, Dprime) # print("line", Dprime, D) return ( [_Line(A, Aprime), _Arc(Aprime, X1, X, α1 < 0), _Arc(X, X2, Dprime, α1 > 0)], [Dprime, D], ) def solve_U(A, B, C, D, radius): # TODO: known bug. This assumes that there is enough space between # A and B / C and D to perform the turn. Suggestion: if there isn't, # abort or move Eprime and Gprime accordingly. XB = bisect(A - B, C - B) XC = bisect(B - C, D - C) orientation = cross_prod(XB, XC) > 0 # positive if CCW waveguide turn X = intersect(B, XB, C, XC) XB, XC = B - X, C - X Fprime = project(X, B, C) h = (Fprime - X).norm() # if h is too close to R, we will have extra unnecessary arcs # use two solve_3 with h as a radius instead if h >= radius - 0.001: solution1, rest_points = solve_3(A, B, C, h) solution2, rest_points = solve_3(rest_points[0], C, D, h) return solution1 + solution2, rest_points # F = X + (Fprime - X) * radius / h # Bprime = X + XB * radius / h # Cprime = X + XC * radius / h eAB = B - A eAB /= eAB.norm() eDC = C - D eDC /= eDC.norm() Eprime = project(X, A, B) Gprime = project(X, D, C) E = X + (Eprime - X) * radius / h G = X + (Gprime - X) * radius / h def compute_A_prime(E, Eprime, eAB): from math import sqrt D = (E - Eprime).norm() L = sqrt(D * (4 * radius - D)) Aprime = Eprime - eAB * L return Aprime Aprime = compute_A_prime(E, Eprime, eAB) Dprime = compute_A_prime(G, Gprime, eDC) Asec = Aprime + (E - X) Dsec = Dprime + (G - X) H = 0.5 * (Asec + X) II = 0.5 * (Dsec + X) return ( [ _Line(A, Aprime), _Arc(Aprime, Asec, H, not orientation), _Arc(H, X, II, orientation), _Arc(II, Dsec, Dprime, not orientation), ], [Dprime, D], ) def solve_2(A, B): return [_Line(A, B)], [] def solve_V(A, B, C, radius): XB = bisect(A - B, C - B) isCCW = cross_prod(C - B, A - B) > 0 Aprime = project(A, B, XB + B) Cprime = project(C, B, XB + B) rA = (A - Aprime).norm() rC = (C - Cprime).norm() if rA > rC: Csec = project(Cprime, A, B) return [_Line(A, Csec), _Arc(Csec, Cprime, C, isCCW)], [] else: Asec = project(Aprime, B, C) return [_Arc(A, Aprime, Asec, isCCW)], [Asec, C] def solve_3(A, B, C, radius): from math import cos, pi p0, p1, p2 = A, B, C α = angle_between(p0 - p1, p2 - p1) if α % (2 * pi) == pi: # if points are collinear, just ignore middle point return ([], [p0, p2]) # sometimes users pick len1 and len2 to be exactly 1 radius. # in that case, numerical errors might result in a ClearanceRewind # or ClearanceForward. # I am adding this 0.001 fix to correct that. clear = _min_clearance(α, radius - 0.001) len1 = (p1 - p0).norm() len2 = (p2 - p1).norm() if len1 < clear: raise ClearanceRewind() if len2 < clear: raise ClearanceForward() e1 = (p1 - p0) / len1 e2 = (p2 - p1) / len2 arc_center = p1 + 0.5 * (-e1 * clear + e2 * clear) / cos(α / 2) ** 2 return ( [ _Line(p0, p1 - e1 * clear), _Arc(p1 - e1 * clear, arc_center, p1 + e2 * clear, α > 0), ], [p1 + e2 * clear, p2], ) def solve_4(A, B, C, D, radius): AB = B - A BC = C - B CD = D - C α1 = angle_between(-BC, AB) α2 = angle_between(-BC, CD) if α1 * α2 > 0: return solve_Z(A, B, C, D, radius) else: return solve_U(A, B, C, D, radius) def compute_rounded_path(points, radius): """Transforms a list of points into sections of arcs and straight lines. Approach: - Go through the list of points in triplets (A, B, C). - Call solve3 in (A,B,C), which returns a rounded path plus (Bprime, C) - Continue. - If solve3 cannot solve because AB is too short, raise a ClearanceRewind error - Conversely, if solve3 cannot solve because BC is too short, raise a ClearanceForward error - In the case of ClearanceForward, call solve4 on (A,B,C,D) - In the case of ClearanceForward, call solve4 on (O,A,B,C), where O is the previous point Returns: - A list of _Line and _Arc objects """ points_list = list(points) # in case points_list is an iterator N = len(points_list) if N == 2: return [_Line(*points)] # Sanity checks assert N >= 3, "Insufficient number of points, N = {N} < 3".format(N=N) old_rounded_path = rounded_path = list() old_points_left = points_left = list(points) can_rewind = False while len(points_left) > 2: try: solution, rest_points = solve_3(*points_left[0:3], radius) old_points_left = points_left[:] points_left = rest_points + points_left[3:] can_rewind = True except ClearanceRewind: if not can_rewind: raise RuntimeError( "Not enough space for enough turns: Cannot solve:", *points_left[0:3] ) points_left = old_points_left rounded_path = old_rounded_path if len(points_left[0:4]) < 4: raise RuntimeError( "Not enough space for enough turns: Cannot solve:", *points_left[0:4] ) solution, rest_points = solve_4(*points_left[0:4], radius) old_points_left = points_left[:] points_left = rest_points + points_left[4:] can_rewind = False except ClearanceForward: if len(points_left[0:4]) < 4: raise RuntimeError( "Not enough space for enough turns: Cannot solve:", *points_left[0:4] ) solution, rest_points = solve_4(*points_left[0:4], radius) old_points_left = points_left[:] points_left = rest_points + points_left[4:] can_rewind = False old_rounded_path = rounded_path[:] rounded_path += solution # there should be 2 points left in points_left solution, rest_points = solve_2(*points_left[0:2]) rounded_path += solution points_left = rest_points + points_left[2:] assert len(points_left) == 0 return rounded_path class _Path: """ Object holding path plus width information""" def __init__(self, points, widths): self.points = points # This can be a single width or a list of widths, just like in layout_waveguide() self.widths = widths def layout(self, cell, layer): layout_waveguide(cell, layer, self.points, self.widths, smooth=False) def __repr__(self): return "Path({point1}...{pointN}, {widths})".format( point1=self.points[0], pointN=self.points[-1], widths=self.widths ) class _Taper(_Path): def __init__(self, P1, P2, w1, w2): self.P1 = P1 self.P2 = P2 self.w1 = w1 self.w2 = w2 self.points = [P1, P2] self.widths = [w1, w2] def __repr__(self): return "Taper({P1}, {P2}, w1={w1}, w2={w2})".format( P1=self.P1, P2=self.P2, w1=self.w1, w2=self.w2 ) def _compute_tapered_line(line, waveguide_width, taper_width, taper_length): """Takes a _Line object and computes two tapers with taper_width and taper_length""" minimum_length = 30 + 2 * taper_length # don't bother tapering waveguides beyond this length P1, P2 = line.get_points() if line.get_length() < minimum_length: return [_Path([P1, P2], waveguide_width)] u = P2 - P1 u /= u.norm() return [ _Taper(P1, P1 + u * taper_length, waveguide_width, taper_width), _Path([P1 + u * taper_length, P2 - u * taper_length], taper_width), _Taper(P2 - u * taper_length, P2, taper_width, waveguide_width), ] def compute_untapered_path(path, waveguide_width): return [_Path(element.get_points(), waveguide_width) for element in path] def compute_tapered_path(path, waveguide_width, taper_width, taper_length): tapered_path = [] for element in path: if isinstance(element, _Line): tapered_path += _compute_tapered_line( element, waveguide_width, taper_width, taper_length ) elif isinstance(element, _Arc): tapered_path += [_Path(element.get_points(), waveguide_width)] return tapered_path def unique_points(point_list): if len(point_list) < 2: return point_list unique_points = [point_list[0]] previous_point = point_list[0] for point in point_list[1:]: if (point - previous_point).norm() > 1e-4: unique_points.append(point) previous_point = point return unique_points def layout_waveguide_from_points( cell, layer, points, width, radius, taper_width=None, taper_length=None ): assert radius > width / 2, "Please use a radius larger than the half-width" points = unique_points(points) if len(points) < 2: # Nothing to do return cell # First, get the list of lines and arcs try: rounded_path = compute_rounded_path(points, radius) except Exception as e: print("ERROR:", e) print("Continuing...") layout_waveguide(cell, layer, points, 0.1) return cell # Taper path if necessary if taper_width is not None and taper_length is not None: waveguide_path = compute_tapered_path(rounded_path, width, taper_width, taper_length) else: waveguide_path = compute_untapered_path(rounded_path, width) # creating a single path _draw_points = [] _draw_widths = [] for element in waveguide_path: points, width = element.points, element.widths n_points = len(points) try: if len(width) == n_points: _draw_points.extend(points) _draw_widths.extend(width) elif len(width) == 2: _draw_widths.extend(np.linspace(width[0], width[1], n_points)) _draw_points.extend(points) else: raise RuntimeError("Internal error detected. Debug please.") except TypeError: _draw_points.extend(points) _draw_widths.extend(np.ones(n_points) * width) # deleting repeated points _cur_point = None _draw_points2 = [] _draw_widths2 = [] for p, w in zip(_draw_points, _draw_widths): if _cur_point and p == _cur_point: continue _draw_points2.append(p) _draw_widths2.append(w) _cur_point = p layout_waveguide(cell, layer, _draw_points2, _draw_widths2, smooth=False) return cell def main(): def trace_rounded_path(cell, layer, rounded_path, width): points = [] for item in rounded_path: points.extend(item.get_points()) dpath = kdb.DPath(points, width, 0, 0) cell.shapes(layer).insert(dpath) def trace_reference_path(cell, layer, points, width): dpath = kdb.DPath(points, width, 0, 0) cell.shapes(layer).insert(dpath) layout = kdb.Layout() TOP = layout.create_cell("TOP") layer = kdb.LayerInfo(10, 0) layerRec = kdb.LayerInfo(1001, 0) ex, ey = kdb.DPoint(1, 0), kdb.DPoint(0, 1) points = [0 * ex, 10 * ex, 10 * (ex + ey), 30 * ex] origin = 0 * ey points = [origin + point for point in points] x = compute_rounded_path(points, 3) trace_rounded_path(TOP, layer, x, 0.5) trace_reference_path(TOP, layerRec, points, 0.5) points = [0 * ex, 10 * ex, 5 * (ex - ey), 17 * ex, 30 * ex] origin = 30 * ey points = [origin + point for point in points] x = compute_rounded_path(points, 3) trace_rounded_path(TOP, layer, x, 0.5) trace_reference_path(TOP, layerRec, points, 0.5) radius = 3 for ex2 in (ex, -ex): points = [2 * ex2] for d in np.arange(1, 10, 2.5): origin = points[-1] displacements = [ 4 * radius * ex2, 4 * radius * ex2 + d * ey - 1 * d * ex2, d * ey, (d + 2 * radius) * ey, ] points += [origin + displacement for displacement in displacements] origin = 15 * ex + 40 * ey points = [origin + point for point in points] x = compute_rounded_path(points, radius) trace_rounded_path(TOP, layer, x, 0.5) trace_reference_path(TOP, layerRec, points, 0.5) # Layout tapered waveguide points = [ 0 * ex, 100 * ex, 100 * ex + 20 * ey, 10 * ex + 5 * ey, 10 * ex + 25 * ey, 100 * ex + 30 * ey, ] # Untapered origin = 40 * ex points_ = [origin + point for point in points] layout_waveguide_from_points(TOP, layer, points_, 0.5, 5) # Tapered origin = 40 * ex + 40 * ey points_ = [origin + point for point in points] layout_waveguide_from_points(TOP, layer, points_, 0.5, 5, taper_width=3, taper_length=10) print("Wrote waveguide_rounding.gds") TOP.write("waveguide_rounding.gds") if __name__ == "__main__": main()
29.744783
128
0.585667
2,641
18,531
3.957592
0.159031
0.029659
0.004305
0.013395
0.237467
0.199101
0.153272
0.135668
0.128205
0.119786
0
0.035761
0.295289
18,531
622
129
29.792605
0.764607
0.162431
0
0.210396
0
0
0.032665
0.00449
0
0
0
0.001608
0.014851
1
0.084158
false
0.004951
0.032178
0.019802
0.220297
0.007426
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
419209cfa7ba87d275b63ba33aa6f02f592a320e
3,616
py
Python
PyHART_tutorial/05_device_specific_commands_for_DDS.py
wdehoog/PyHART
abe410e45d66710f65d5499165aab066c9ad9fa3
[ "MIT" ]
null
null
null
PyHART_tutorial/05_device_specific_commands_for_DDS.py
wdehoog/PyHART
abe410e45d66710f65d5499165aab066c9ad9fa3
[ "MIT" ]
null
null
null
PyHART_tutorial/05_device_specific_commands_for_DDS.py
wdehoog/PyHART
abe410e45d66710f65d5499165aab066c9ad9fa3
[ "MIT" ]
null
null
null
# # In this module is shown how to send a command to an HART device. # Encode/decode data, logging and manage responses codes. # ''' ------------------------------------------------------------------------------- SAME CODE OF EXAMPLE 01 - IGNORE THIS SECTION This is included to test the example ------------------------------------------------------------------------------- ''' # # Standard import. Append the path of PyHART. Since this file is in the folder PyHART_tutorial, # just go back one folder. # import sys sys.path.append('../') from PyHART.COMMUNICATION.CommCore import * from PyHART.COMMUNICATION.Types import * from PyHART.COMMUNICATION.Utils import * from PyHART.COMMUNICATION.Common import * # # Procedure to list communication ports # count, listOfComPorts = ListCOMPort(True) comport = None selection = 0 while (comport == None) and (selection != (count + 1)): print('\nSelect the communication port.') print('Insert the number related to your choice and press enter.') try: selection = int(input()) except: selection = 0 if (selection == (count + 1)): print('Leaving application...') sys.exit() comport = GetCOMPort(selection, listOfComPorts) # # Instantiates and starts the communication object # hart = HartMaster(comport, \ MASTER_TYPE.PRIMARY, \ num_retry = 2, \ retriesOnPolling = False, \ autoPrintTransactions = True, \ whereToPrint = WhereToPrint.BOTH, \ logFile = 'terminalLog.log', \ rt_os = False, \ manageRtsCts = None) hart.Start() # # Polling connected devices in range [0..EndPollingAddress] and # print identification data of the first device found. # FoundDevice = None pollAddress = 0 EndPollingAddress = 3 while (FoundDevice == None) and (pollAddress < EndPollingAddress): CommunicationResult, SentPacket, RecvPacket, FoundDevice = hart.LetKnowDevice(pollAddress) pollAddress += 1 if (FoundDevice is not None): PrintDevice(FoundDevice, hart) else: print ('Device not found. Leaving Application...') sys.exit() ''' ------------------------------------------------------------------------------- END OF EXAMPLE 01 CODE ------------------------------------------------------------------------------- ''' # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Command 240 # # Send command 240 with slot 8 retStatus, CommunicationResult, SentPacket, RecvPacket = HartCommand(hart, 240, bytearray([8])) # Send command 240 with slot 26 retStatus, CommunicationResult, SentPacket, RecvPacket = HartCommand(hart, 240, bytearray([26])) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Command 79 Simulation Enable # slot = 0 # Pressure simulationEnable = 1 # enable unit = GetUnitCode('Kilopascal') status = 0 txdata = bytearray(8) txdata[0] = slot txdata[1] = simulationEnable txdata[2] = unit txdata[3:6] = FloatToBytearray(34.734) txdata[7] = status retStatus, CommunicationResult, SentPacket, RecvPacket = HartCommand(hart, 79, txdata) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Command 79 Simulation Disable # txdata = bytearray([slot, not simulationEnable, unit, 0, 0, 0, 0, 0]) retStatus, CommunicationResult, SentPacket, RecvPacket = HartCommand(hart, 79, txdata) # # Kills all threads # hart.Stop()
29.16129
97
0.555586
334
3,616
6.002994
0.45509
0.072319
0.097257
0.095761
0.167581
0.145636
0.145636
0.145636
0
0
0
0.022636
0.242533
3,616
123
98
29.398374
0.709383
0.268529
0
0.105263
0
0
0.082375
0
0
0
0
0
0
1
0
false
0
0.087719
0
0.087719
0.070175
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
4192d1cec463e5f4665e763436ee29fbd56e053f
9,315
py
Python
productporter/product/views.py
kamidox/weixin_producthunt
24269da93e75374ee481b1b78257b18abda4d0c7
[ "BSD-3-Clause" ]
10
2015-01-07T06:01:13.000Z
2021-02-14T09:11:10.000Z
productporter/product/views.py
kamidox/weixin_producthunt
24269da93e75374ee481b1b78257b18abda4d0c7
[ "BSD-3-Clause" ]
3
2015-01-01T09:56:04.000Z
2015-01-06T01:34:44.000Z
productporter/product/views.py
kamidox/weixin_producthunt
24269da93e75374ee481b1b78257b18abda4d0c7
[ "BSD-3-Clause" ]
5
2015-01-01T10:31:50.000Z
2018-03-09T05:22:16.000Z
#!/bin/env python # -*- coding: utf-8 -*- """ productporter.product.views ~~~~~~~~~~~~~~~~~~~~~~~~~ product blueprint :copyright: (c) 2014 by the ProductPorter Team. :license: BSD, see LICENSE for more details. """ import datetime import json from flask import Blueprint, request, current_app, flash, redirect, \ url_for, jsonify, make_response from flask.ext.login import current_user from qiniu import Auth from productporter.product.phapi import ProductHuntAPI from productporter.product.models import Product, Tag from productporter.utils.helper import render_template, pull_and_save_posts, render_markup, \ query_products, can_translate, can_review, is_online from productporter.utils.decorators import moderator_required from productporter.user.models import User product = Blueprint('product', __name__) def _tag_names(post): """return tag names of this post""" tagnames = [] for tag in post.tags: if len(tagnames) == 0: tagnames.append(tag.name) else: tagnames.append('; ' + tag.name) return ''.join(tagnames) def _render_tags(post): """render tags. MUST BE THE SAME of macro 'render_tags' in macro.jinja.html""" tag_template = '<a class="label label-default" href="%s">%s</a>' tag_html = [] for tag in post.tags: tag_html.append(tag_template % \ (url_for('product.tags', tag=tag.name), tag.name)) tag_html.append('<br/><br/>') return '\n'.join(tag_html) def _render_contributors(contributers, postid, locked_by, field): """render contributors, MUST BE THE SAME of macro 'contributors' in macro.jinja.html""" div_template = "<div class='translaters-list' data-postid='%s' field='%s'>edit by %s</div>" user_template = "<a href='%s'>@%s</a>" user_htmls = [] users = contributers.all() for user in users: nickname = user.nickname if user.nickname else user.username user_htmls.append(user_template % \ (url_for('user.profile', username=user.username), nickname)) if locked_by: nickname = locked_by.nickname if locked_by.nickname else locked_by.username user_htmls.append((' - locked by ' + user_template) % \ (url_for('user.profile', username=locked_by.username), nickname)) return div_template % (postid, field, '\n'.join(user_htmls)) def _post_aquire_translate(request): """aquire to translate post""" postid = request.args.get('postid') field = request.args.get('field', 'ctagline') current_app.logger.info('aquire translate %s for post %s' % (field, str(postid))) if not can_translate(current_user): ret = { 'status': 'error', 'postid': postid, 'error': 'Please sign in first' } return make_response(jsonify(**ret), 401) post = Product.query.filter(Product.postid==postid).first_or_404() if getattr(post, field + '_locked'): ret = { 'status': 'error', 'postid': postid, 'error': '%s is locked. Please contact adminitrator.' } return make_response(jsonify(**ret), 403) editing_user = getattr(post, 'editing_' + field + '_user') if (editing_user) and \ (editing_user.username != current_user.username) and \ (is_online(editing_user)): ret = { 'status': 'error', 'postid': post.postid, 'error': '%s is editing by %s' % \ (field, editing_user.username) } return make_response(jsonify(**ret), 400) setattr(post, 'editing_' + field + '_user_id', current_user.id) post.save() ret = { 'status': 'success', 'postid': post.postid, 'field': field, 'value': getattr(post, field), 'tags': _tag_names(post) } return jsonify(**ret) # translate detail @product.route('/translate', methods=["GET", "PUT", "POST"]) def translate(): """ use GET to aquire translation use PUT/POST to commit translation :param postid: The postid of product :param field: The field of operation, could be 'ctagline' or 'cintro' :param value: The value of translate field """ if request.method == 'GET': return _post_aquire_translate(request) jsondata = None try: jsondata = json.loads(request.data) except ValueError: ret = { 'status': 'error', 'message': "invalid json data" } return make_response(jsonify(**ret), 405) postid = jsondata['postid'] field = jsondata['field'] if not can_translate(current_user): ret = { 'status': 'error', 'postid': postid, 'field': field, 'error': 'Please sign in first' } return make_response(jsonify(**ret), 401) post = Product.query.filter(Product.postid==postid).first_or_404() try: canceled = jsondata['canceled'] if canceled: setattr(post, 'editing_' + field + '_user_id', None) post.save() ret = { 'status': 'success', 'postid': post.postid, 'field': field } return jsonify(**ret) except KeyError: pass current_app.logger.info('commit %s for post %s' % (field, str(postid))) # deal with tags if field == 'ctagline': post.set_tags(jsondata['tags']) # deal with other filed data setattr(post, field, jsondata['value']) setattr(post, 'editing_' + field + '_user_id', None) post.save() getattr(current_user, 'add_' + field + '_product')(post) ret = { 'status': 'success', 'postid': post.postid, 'field': field, 'value': render_markup(getattr(post, field)), 'contributors': _render_contributors( \ getattr(post, field + '_editors'), post.postid, \ getattr(post, field + '_locked_user'), field), 'tags': _render_tags(post) } return jsonify(**ret) # posts list @product.route('/', methods=["GET"]) def index(): """ product posts home dashboard """ return redirect(url_for('product.posts')) # posts list @product.route('/posts/', methods=["GET"]) def posts(): """ product posts home dashboard """ spec_day = request.args.get('day', '') day, posts = query_products(spec_day) post_count = len(posts) tags = Tag.names() return render_template('product/posts.jinja.html', post_count=post_count, posts=posts, day=day, tags=tags) # posts list @product.route('/posts/<postid>', methods=["GET"]) def post_intro(postid): """ product detail information page """ post = Product.query.filter(Product.postid==postid).first_or_404() tags = Tag.names() return render_template('product/post_intro.jinja.html', post=post, tags=tags) #pull products @product.route('/pull') def pull(): """ pull data from producthunt.com """ day = request.args.get('day', '') count = pull_and_save_posts(day) return "pulled %d posts " % (count) @product.route('/lock', methods=['GET']) @moderator_required def lock(): """ lock product :param postid: The postid of product :param op: Operation, clould be 'lock' or 'unlock' :param field: Field, could be 'ctagline' or 'cintro' """ postid = request.args.get('postid', '') op = request.args.get('op', 'lock') field = request.args.get('field', 'ctagline') post = Product.query.filter(Product.postid==postid).first_or_404() if op.lower() == 'lock': setattr(post, field + '_locked', True) setattr(post, field + '_locked_user_id', current_user.id) op = 'Unlock' else: setattr(post, field + '_locked', False) setattr(post, field + '_locked_user_id', None) op = 'Lock' post.save() ret = { 'status': 'success', 'postid': post.postid, 'contributors': _render_contributors( \ getattr(post, field + '_editors'), post.postid, \ getattr(post, field + '_locked_user'), field) } return jsonify(**ret) @product.route('/tags/', methods=["GET"]) def tags(): """show all products""" return "under construction" @product.route('/tags/<tagname>', methods=["GET"]) def tags_name(tagname): """show all products by selected tag""" return "under construction" @product.route('/dailybriefing/<day>', methods=['GET']) @moderator_required def dailybriefing(day): """ Generate daily briefing """ qday, posts = query_products(day) post_count = len(posts) # Thanks to contributors editors = [] for post in posts: if post.ctagline and post.ctagline_locked: editors += post.ctagline_editors # Thank once is enough editors = {}.fromkeys(editors).keys() return render_template('product/dailybriefing.jinja.html', post_count=post_count, posts=posts, day=qday, editors=editors) @product.route('/qiniutoken', methods=['GET']) def get_qiniu_token(): q = Auth(current_app.config["QINIU_ACCESS_KEY"], current_app.config["QINIU_SECRET_KEY"]) token = q.upload_token(current_app.config["QINIU_BUCKET"]) ret = {'uptoken': token} return jsonify(**ret)
32.231834
95
0.615137
1,099
9,315
5.070974
0.196542
0.019379
0.017585
0.02243
0.362462
0.258748
0.219271
0.171541
0.164364
0.1274
0
0.004664
0.240365
9,315
288
96
32.34375
0.782928
0.122061
0
0.338164
0
0.004831
0.152599
0.013589
0
0
0
0
0
1
0.067633
false
0.004831
0.048309
0
0.217391
0.009662
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41937960edb49006b8dce4b16d90c212ae69d8f7
672
py
Python
tests/wasp1/AllAnswerSets/choice_28.test.py
bernardocuteri/wasp
05c8f961776dbdbf7afbf905ee00fc262eba51ad
[ "Apache-2.0" ]
19
2015-12-03T08:53:45.000Z
2022-03-31T02:09:43.000Z
tests/wasp1/AllAnswerSets/choice_28.test.py
bernardocuteri/wasp
05c8f961776dbdbf7afbf905ee00fc262eba51ad
[ "Apache-2.0" ]
80
2017-11-25T07:57:32.000Z
2018-06-10T19:03:30.000Z
tests/wasp1/AllAnswerSets/choice_28.test.py
bernardocuteri/wasp
05c8f961776dbdbf7afbf905ee00fc262eba51ad
[ "Apache-2.0" ]
6
2015-01-15T07:51:48.000Z
2020-06-18T14:47:48.000Z
input = """ % Similar to choice.12. % Triggers a bug if we have positive possibly true atoms and no final % totalised satisfied check for rules and constraints (nor anything % equivalent). % Using positive PTs, a, b and c are never PTs. c :- a,b. a :- c, not b. b :- c, not a. % Both d and e are positive PTs. % If we take both (order doesn't matter), we get the correct model {d,e}. % However, once we assume one of d and e false and keep the other one true, % the rules are satisfied for the partial interpretations <{d},{a,b,c},{e}> % and <{e},{a,b,c},{d}>, but not for the totalised ones! d v e. e :- not a, not b. d :- not a, not b. """ output = """ {d, e} """
24
75
0.64881
128
672
3.40625
0.46875
0.018349
0.022936
0.036697
0
0
0
0
0
0
0
0.003781
0.212798
672
27
76
24.888889
0.820416
0
0
0.095238
0
0.047619
0.952381
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
41938ce4b42c56cae2f2acd965aaf210dc6bfc5f
415
py
Python
origins/migrations/0055_alter_activenomen_options.py
dennereed/paleocore
d6da6c39cde96050ee4b9e7213ec1200530cbeee
[ "MIT" ]
1
2021-02-05T19:50:13.000Z
2021-02-05T19:50:13.000Z
origins/migrations/0055_alter_activenomen_options.py
dennereed/paleocore
d6da6c39cde96050ee4b9e7213ec1200530cbeee
[ "MIT" ]
59
2020-06-17T22:21:51.000Z
2022-02-10T05:00:01.000Z
origins/migrations/0055_alter_activenomen_options.py
dennereed/paleocore
d6da6c39cde96050ee4b9e7213ec1200530cbeee
[ "MIT" ]
2
2020-07-01T14:11:09.000Z
2020-08-10T17:27:26.000Z
# Generated by Django 3.2.5 on 2021-07-29 21:01 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('origins', '0054_activenomen'), ] operations = [ migrations.AlterModelOptions( name='activenomen', options={'ordering': ['name'], 'verbose_name': 'Actice Nomen', 'verbose_name_plural': 'Active Nomina'}, ), ]
23.055556
115
0.614458
42
415
5.97619
0.785714
0.087649
0
0
0
0
0
0
0
0
0
0.061093
0.250602
415
17
116
24.411765
0.745981
0.108434
0
0
1
0
0.277174
0
0
0
0
0
0
1
0
false
0
0.090909
0
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
419429d46af11923cde1d968263d403240c3dfee
2,241
py
Python
clintk/cat2vec/feature_selection.py
DITEP/db-cleansing
9a2360d45bc250b9b1ec73ba7efc2d14b3250c74
[ "MIT" ]
5
2019-04-19T05:45:20.000Z
2021-11-16T13:22:07.000Z
clintk/cat2vec/feature_selection.py
DITEP/db-cleansing
9a2360d45bc250b9b1ec73ba7efc2d14b3250c74
[ "MIT" ]
null
null
null
clintk/cat2vec/feature_selection.py
DITEP/db-cleansing
9a2360d45bc250b9b1ec73ba7efc2d14b3250c74
[ "MIT" ]
null
null
null
""" selects parameters with L1 logistic regression """ import pandas as pd from sklearn.base import BaseEstimator class LassoSelector(BaseEstimator): """ This class is made to be used after cat2vec.lasso_gridsearch since it selects the features from a dataframe that have the most weighted coefficients (according to a L1-penalized linear model) It inherits from sklearn.base.BaseEstimator to allow gridsearching the best `n_features` using a pipeline and a basline classifier Parameters ---------- n_features : int number of top features to keep lasso_coefs : pd.DataFrame each row is the name of a category and its coef weight in LASSO model feature_col : str name of the feature col (ie name of the categorical variable) coef_col : str name of the column of the LASSO coefficients in lasso_coefs dataframe Examples -------- >>> dico = {'coef': [0, 4.5, 1.2, 0.3], \ 'colnames': ['feat1', 'feat2', 'feat3', 'feat4']} >>> df = pd.DataFrame(dico) keeps only feat2 and feat3 >>> selector = LassoSelector(2).fit(df['colnames'], df['coef']) >>> X = [[0, 0, 1, 0], [1, 1, 0, 0], [0, 1, 0, 0]] >>> selector.transform(X) [[0, 1], [1, 0], [1, 0]] """ def __init__(self, lasso_coefs, feature_col, coef_col, n_features=64): self.n_features = n_features self.feature_col = feature_col self.lasso_coefs = lasso_coefs self.coef_col = coef_col def fit(self, X, y): return self def transform(self, X): """ Parameters ---------- X : pd.DataFrame contains only features Returns ------- ndarray contains the best n_features """ self.lasso_coefs['abs_coef'] = abs(self.lasso_coefs[self.coef_col]) self.lasso_coefs.sort_values(['abs_coef'], ascending=False, inplace=True) # keeping top features according to lasso coefs_to_keep = self.lasso_coefs.iloc[:self.n_features, :] coefs_to_keep = coefs_to_keep[self.feature_col] return X[coefs_to_keep.values].values
29.486842
77
0.604641
294
2,241
4.465986
0.37415
0.076161
0.063976
0.024372
0.054836
0
0
0
0
0
0
0.023154
0.286925
2,241
75
78
29.88
0.798498
0.551986
0
0
0
0
0.019925
0
0
0
0
0
0
1
0.166667
false
0
0.111111
0.055556
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
419618fdd8ed14916e411a16d95f8129f3073e40
163
py
Python
src/controllers/__init__.py
dennismalmgren/marl
baa846dc4144cf6f53e51d8cf1e2fcf5800c9f95
[ "Apache-2.0" ]
null
null
null
src/controllers/__init__.py
dennismalmgren/marl
baa846dc4144cf6f53e51d8cf1e2fcf5800c9f95
[ "Apache-2.0" ]
null
null
null
src/controllers/__init__.py
dennismalmgren/marl
baa846dc4144cf6f53e51d8cf1e2fcf5800c9f95
[ "Apache-2.0" ]
null
null
null
REGISTRY = {} from .basic_controller import BasicMAC from .distri_controller import DistriMAC REGISTRY["basic_mac"] = BasicMAC REGISTRY["distri_mac"] = DistriMAC
23.285714
40
0.797546
19
163
6.631579
0.473684
0.253968
0
0
0
0
0
0
0
0
0
0
0.110429
163
7
41
23.285714
0.868966
0
0
0
0
0
0.115854
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4
4197fe5fe8d16fcca6d82bcf73ee4d6614030b79
3,407
py
Python
src/onevision/nn/layer/padding.py
phlong3105/onevision
90552b64df7213e7fbe23c80ffd8a89583289433
[ "MIT" ]
2
2022-03-28T09:46:38.000Z
2022-03-28T14:12:32.000Z
src/onevision/nn/layer/padding.py
phlong3105/onevision
90552b64df7213e7fbe23c80ffd8a89583289433
[ "MIT" ]
null
null
null
src/onevision/nn/layer/padding.py
phlong3105/onevision
90552b64df7213e7fbe23c80ffd8a89583289433
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """Padding Layers. """ from __future__ import annotations import math from typing import Union import torch.nn.functional as F from torch import nn from torch import Tensor from onevision.factory import PADDING_LAYERS from onevision.type import Int2T __all__ = [ "autopad", "get_padding", "get_padding_value", "get_same_padding", "is_static_pad", "pad_same" ] # MARK: - Functional def autopad(kernel_size: Int2T, padding: Union[str, Int2T, None] = None): """Pad to `same`.""" if padding is None: padding = (kernel_size // 2 if isinstance(kernel_size, int) else [input // 2 for input in kernel_size]) # auto-pad return padding def pad_same( x : Tensor, kernel_size: Int2T, stride : Int2T, dilation : Int2T = (1, 1), value : float = 0 ): """Dynamically pad input with 'same' padding for conv with specified args. """ ih, iw = x.size()[-2:] pad_h = get_same_padding(ih, kernel_size[0], stride[0], dilation[0]) pad_w = get_same_padding(iw, kernel_size[1], stride[1], dilation[1]) if pad_h > 0 or pad_w > 0: x = F.pad( x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value ) return x def get_padding_value( padding: Union[str, Int2T, None], kernel_size: Int2T, **kwargs ) -> tuple[(tuple, int), bool]: dynamic = False if isinstance(padding, str): # For any string padding, the padding will be calculated for you, one # of three ways padding = padding.lower() if padding == "same": # TF compatible 'SAME' padding, has a performance and GPU memory # allocation impact if is_static_pad(kernel_size, **kwargs): # static case, no extra overhead padding = get_padding(kernel_size, **kwargs) else: # Dynamic 'SAME' padding, has runtime/GPU memory overhead padding = 0 dynamic = True elif padding == "valid": # 'VALID' padding, same as padding=0 padding = 0 else: # Default to PyTorch style 'same'-ish symmetric padding padding = get_padding(kernel_size, **kwargs) return padding, dynamic def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: """Calculate symmetric padding for a convolution. FYI: `**_` mean ignore the rest of the args. """ padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 return padding def get_same_padding(x: int, kernel_size: int, stride: int, dilation: int) -> int: """Calculate asymmetric TensorFlow-like 'same' padding for a convolution. """ return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0) def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> bool: """Can `same` padding for given args be done statically?.""" return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 # MARK: - Register PADDING_LAYERS.register(name="zero", module=nn.ZeroPad2d) PADDING_LAYERS.register(name="reflection", module=nn.ReflectionPad2d) PADDING_LAYERS.register(name="replication", module=nn.ReplicationPad2d)
29.885965
85
0.606692
441
3,407
4.537415
0.297052
0.084958
0.027986
0.029985
0.138931
0.067966
0.034983
0.034983
0.034983
0
0
0.019433
0.275022
3,407
113
86
30.150442
0.790688
0.221016
0
0.121212
0
0
0.040895
0
0
0
0
0
0
1
0.090909
false
0
0.121212
0
0.30303
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
419b7fd6852c60efa1fd82faf32167dd58882039
236
py
Python
demo/use_pickle.py
1987539447/start-python
06ee5eb30e7395cd8432e8e33d7209fa855f4ad9
[ "Apache-2.0" ]
null
null
null
demo/use_pickle.py
1987539447/start-python
06ee5eb30e7395cd8432e8e33d7209fa855f4ad9
[ "Apache-2.0" ]
null
null
null
demo/use_pickle.py
1987539447/start-python
06ee5eb30e7395cd8432e8e33d7209fa855f4ad9
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # FileName:use_pickle.py # -*- coding: utf-8 -*- """ 通过pickle序列化对象""" import pickle bob = dict(name='Bob', age=20, score=88) data = pickle.dumps(bob) print(data) re_bob = pickle.loads(data) print(re_bob)
11.8
40
0.661017
36
236
4.25
0.694444
0.065359
0
0
0
0
0
0
0
0
0
0.029851
0.148305
236
19
41
12.421053
0.731343
0.338983
0
0
0
0
0.021277
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.333333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
419d836e21b88898e0497e1625d6eddb5fed1199
5,349
py
Python
pycfmodel/model/cf_model.py
donatoaz/pycfmodel
1586e290b67d2347493dd4a77d2b0c8ee6c0936b
[ "Apache-2.0" ]
null
null
null
pycfmodel/model/cf_model.py
donatoaz/pycfmodel
1586e290b67d2347493dd4a77d2b0c8ee6c0936b
[ "Apache-2.0" ]
null
null
null
pycfmodel/model/cf_model.py
donatoaz/pycfmodel
1586e290b67d2347493dd4a77d2b0c8ee6c0936b
[ "Apache-2.0" ]
null
null
null
from datetime import date from typing import Any, ClassVar, Collection, Dict, List, Optional, Type, Union from pycfmodel.action_expander import expand_actions from pycfmodel.constants import AWS_NOVALUE from pycfmodel.model.base import CustomModel from pycfmodel.model.parameter import Parameter from pycfmodel.model.resources.generic_resource import GenericResource from pycfmodel.model.resources.resource import Resource from pycfmodel.model.resources.types import ResourceModels from pycfmodel.model.types import Resolvable from pycfmodel.resolver import _extended_bool, resolve class CFModel(CustomModel): """ Template that describes AWS infrastructure. Properties: - AWSTemplateFormatVersion - Conditions: Conditions that control behaviour of the template. - Description: Description for the template. - Mappings: A 3 level mapping of keys and associated values. - Metadata: Additional information about the template. - Outputs: Output values of the template. - Parameters: Parameters to the template. - Resources: Stack resources and their properties. - Rules - Transform: For serverless applications, specifies the version of the AWS Serverless Application Model (AWS SAM) to use. More info at [AWS Docs](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) """ AWSTemplateFormatVersion: Optional[date] Conditions: Optional[Dict] = {} Description: Optional[str] = None Mappings: Optional[Dict[str, Dict[str, Dict[str, Any]]]] = {} Metadata: Optional[Dict[str, Dict]] = None Outputs: Optional[Dict[str, Dict[str, Union[str, Dict]]]] = {} Parameters: Optional[Dict[str, Parameter]] = {} Resources: Dict[str, Resolvable[Union[ResourceModels, GenericResource]]] = {} Rules: Optional[Dict] = {} Transform: Optional[List] PSEUDO_PARAMETERS: ClassVar[Dict[str, Union[str, List[str]]]] = { # default pseudo parameters "AWS::AccountId": "123456789012", "AWS::NotificationARNs": [], "AWS::NoValue": AWS_NOVALUE, "AWS::Partition": "aws", "AWS::Region": "eu-west-1", "AWS::StackId": "", "AWS::StackName": "", "AWS::URLSuffix": "amazonaws.com", } def resolve(self, extra_params=None) -> "CFModel": """ Resolve all intrinsic functions on the template. Arguments: extra_params: Values of parameters passed to the Cloudformation. Returns: A new CFModel. """ extra_params = {} if extra_params is None else extra_params # default parameters params = {} for key, parameter in self.Parameters.items(): passed_value = extra_params.pop(key, None) ref_value = parameter.get_ref_value(passed_value) if ref_value is not None: params[key] = ref_value extended_parameters = {**self.PSEUDO_PARAMETERS, **params, **extra_params} dict_value = self.dict() if self.Conditions: conditions = dict_value.pop("Conditions") else: conditions = {} resolved_conditions = { key: _extended_bool(resolve(value, extended_parameters, self.Mappings, {})) for key, value in conditions.items() } resources = dict_value.pop("Resources") resolved_resources = { key: resolve(value, extended_parameters, self.Mappings, resolved_conditions) for key, value in resources.items() } return CFModel(**dict_value, Conditions=resolved_conditions, Resources=resolved_resources) def expand_actions(self) -> "CFModel": """ Returns a model which has expanded all wildcards (`*`) to get all implied actions for every resource. For example:\n - a model containing `s3:*` will be expanded to list all the possible S3 actions. - a model containing `s3:Get*` will be expanded to all the `Get*` actions only. This method can handle the cases of both `Action` and `NotAction`. [Known AWS Actions](https://github.com/Skyscanner/pycfmodel/blob/master/pycfmodel/cloudformation_actions.py). These known actions can be updated by executing: ``` python3 scripts/generate_cloudformation_actions_file.py ``` """ dict_value = self.dict() resources = dict_value.pop("Resources") expanded_resources = {key: expand_actions(value) for key, value in resources.items()} return CFModel(**dict_value, Resources=expanded_resources) def resources_filtered_by_type( self, allowed_types: Collection[Union[str, Type[Resource]]] ) -> Dict[str, Dict[str, Resource]]: """ Filtered resources based on types. Arguments: allowed_types: Collection of desired types. Returns: Dictionary where key is the logical id and value is the resource. """ result = {} allowed_resource_classes = tuple(x for x in allowed_types if isinstance(x, type)) for resource_name, resource in self.Resources.items(): if isinstance(resource, allowed_resource_classes) or resource.Type in allowed_types: result[resource_name] = resource return result
39.043796
125
0.665919
602
5,349
5.815615
0.303987
0.021994
0.030848
0.015995
0.081691
0.051985
0.027992
0.027992
0.027992
0.027992
0
0.00442
0.238736
5,349
136
126
39.330882
0.855354
0.306786
0
0.057143
0
0
0.055572
0.00611
0
0
0
0
0
1
0.042857
false
0.028571
0.157143
0
0.414286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
419de91687fa41f1d418876dd9614a95ee81af4f
39,788
py
Python
nessus/scans.py
tharvik/nessus
4551c319ac6cb3026ddb096a0f6f71f060a578ab
[ "CC0-1.0" ]
null
null
null
nessus/scans.py
tharvik/nessus
4551c319ac6cb3026ddb096a0f6f71f060a578ab
[ "CC0-1.0" ]
null
null
null
nessus/scans.py
tharvik/nessus
4551c319ac6cb3026ddb096a0f6f71f060a578ab
[ "CC0-1.0" ]
null
null
null
""" sub modules for everything about the scans """ from enum import Enum from uuid import uuid4 from typing import Iterable, Mapping, Union, Optional, MutableMapping from nessus.base import LibNessusBase from nessus.editor import NessusTemplate from nessus.model import lying_exist, lying_type, Object, lying_exist_and_type, allow_to_exist from nessus.permissions import NessusPermission from nessus.policies import NessusPolicy class NessusScanType(Enum): """ type of scan """ local = 'local' remote = 'remote' agent = 'agent' class NessusScanStatus(Enum): """ current status of scan lies: - `empty` was added because sometimes, nessus return it (but it is not documented) - `canceled` is returned instead of `cancelled` - `processing` was added because sometimes, nessus return it (but it is not documented) """ completed = 'completed' aborted = 'aborted' imported = 'imported' pending = 'pending' running = 'running' resuming = 'resuming' canceling = 'canceling' cancelled = 'cancelled' pausing = 'pausing' paused = 'paused' stopping = 'stopping' stopped = 'stopped' empty = 'empty' canceled = 'canceled' processing = 'processing' class NessusScan(Object): """ nessus is lying with: - `type` which is none but should be NessusScanType (str) - `status` which can be 'empty' but should be one of NessusScanStatus - `use_dashboard` which do not always exists """ def __init__(self, scan_id: int, uuid: str, name: str, type: NessusScanType, owner: str, enabled: bool, folder_id: int, read: bool, status: NessusScanStatus, shared: bool, user_permissions: int, creation_date: int, last_modification_date: int, control: bool, starttime: str, timezone: str, rrules: str, use_dashboard: bool) -> None: self.id = scan_id self.uuid = uuid self.name = name self.type = type self.owner = owner self.enabled = enabled self.folder_id = folder_id self.read = read self.status = status self.shared = shared self.user_permissions = user_permissions self.creation_date = creation_date self.last_modification_date = last_modification_date self.control = control self.starttime = starttime self.timezone = timezone self.rrules = rrules self.use_dashboard = use_dashboard def __eq__(self, other): return isinstance(other, NessusScan) and self.id == other.id def __hash__(self): return hash(self.id) @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScan': scan_id = int(json_dict['id']) uuid = str(json_dict['uuid']) name = str(json_dict['name']) scan_type = lying_type(json_dict['type'], NessusScanType) owner = str(json_dict['owner']) enabled = bool(json_dict['enabled']) folder_id = int(json_dict['folder_id']) read = bool(json_dict['read']) status = NessusScanStatus(json_dict['status']) shared = bool(json_dict['shared']) user_permissions = int(json_dict['user_permissions']) creation_date = int(json_dict['creation_date']) last_modification_date = int(json_dict['last_modification_date']) control = bool(json_dict['control']) starttime = str(json_dict['starttime']) timezone = str(json_dict['timezone']) rrules = str(json_dict['rrules']) use_dashboard = lying_exist(json_dict, 'use_dashboard', bool) return NessusScan(scan_id, uuid, name, scan_type, owner, enabled, folder_id, read, status, shared, user_permissions, creation_date, last_modification_date, control, starttime, timezone, rrules, use_dashboard) class NessusScanCreated(Object): """ lies: - `notification_filter_type` does not always exist - `tag_id` does not always exist """ def __init__(self, creation_date: int, custom_targets: str, default_permisssions: int, description: str, emails: str, scan_id: int, last_modification_date: int, name: str, notification_filter_type: str, notification_filters: str, owner: str, owner_id: int, policy_id: int, enabled: bool, rrules: str, scanner_id: int, shared: int, starttime: str, tag_id: int, timezone: str, scan_type: str, user_permissions: int, uuid: str, use_dashboard: bool) -> None: self.creation_date = creation_date self.custom_targets = custom_targets self.default_permisssions = default_permisssions self.description = description self.emails = emails self.id = scan_id self.last_modification_date = last_modification_date self.name = name self.notification_filter_type = notification_filter_type self.notification_filters = notification_filters self.owner = owner self.owner_id = owner_id self.policy_id = policy_id self.enabled = enabled self.rrules = rrules self.scanner_id = scanner_id self.shared = shared self.starttime = starttime self.tag_id = tag_id self.timezone = timezone self.type = scan_type self.user_permissions = user_permissions self.uuid = uuid self.use_dashboard = use_dashboard @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanCreated': creation_date = int(json_dict['creation_date']) custom_targets = str(json_dict['custom_targets']) default_permisssions = int(json_dict['default_permisssions']) description = str(json_dict['description']) emails = str(json_dict['emails']) scan_id = int(json_dict['id']) last_modification_date = int(json_dict['last_modification_date']) name = str(json_dict['name']) notification_filter_type = lying_exist(json_dict, 'notification_filter_type', str) notification_filters = str(json_dict['notification_filters']) owner = str(json_dict['owner']) owner_id = int(json_dict['owner_id']) policy_id = int(json_dict['policy_id']) enabled = bool(json_dict['enabled']) rrules = str(json_dict['rrules']) scanner_id = int(json_dict['scanner_id']) shared = int(json_dict['shared']) starttime = str(json_dict['starttime']) tag_id = lying_exist(json_dict, 'tag_id', int) timezone = str(json_dict['timezone']) scan_type = str(json_dict['type']) user_permissions = int(json_dict['user_permissions']) uuid = str(json_dict['uuid']) use_dashboard = bool(json_dict['use_dashboard']) return NessusScanCreated(creation_date, custom_targets, default_permisssions, description, emails, scan_id, last_modification_date, name, notification_filter_type, notification_filters, owner, owner_id, policy_id, enabled, rrules, scanner_id, shared, starttime, tag_id, timezone, scan_type, user_permissions, uuid, use_dashboard) class NessusScanDetailsInfo(Object): """ lies: - `edit_allowed` is not always existing - `policy` is not always existing - `pci_can_upload` is not always existing - `hasaudittrail` is not always existing - `folder_id` is sometimes None - `targets` is not always existing - `timestamp` is not always existing - `haskb` is not always existing - `uuid` is not always existing - `hostcount` is not always existing - `scan_end` is not always existing """ def __init__(self, acls: Iterable[NessusPermission], edit_allowed: bool, status: str, policy: str, pci_can_upload: bool, hasaudittrail: bool, scan_start: str, folder_id: int, targets: str, timestamp: int, object_id: int, scanner_name: str, haskb: bool, uuid: str, hostcount: int, scan_end: str, name: str, user_permissions: int, control: bool) -> None: self.acls = acls self.edit_allowed = edit_allowed self.status = status self.policy = policy self.pci_can_upload = pci_can_upload self.hasaudittrail = hasaudittrail self.scan_start = scan_start self.folder_id = folder_id self.targets = targets self.timestamp = timestamp self.object_id = object_id self.scanner_name = scanner_name self.haskb = haskb self.uuid = uuid self.hostcount = hostcount self.scan_end = scan_end self.name = name self.user_permissions = user_permissions self.control = control @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanDetailsInfo': acls = {NessusPermission.from_json(acl) for acl in json_dict['acls']} edit_allowed = lying_exist(json_dict, 'edit_allowed', bool) status = str(json_dict['status']) policy = lying_exist(json_dict, 'policy', str) pci_can_upload = lying_exist(json_dict, 'pci-can-upload', bool) hasaudittrail = lying_exist(json_dict, 'hasaudittrail', bool) scan_start = str(json_dict['scan_start']) folder_id = lying_type(json_dict['folder_id'], int) # it's None actually targets = lying_exist(json_dict, 'targets', str) timestamp = lying_exist(json_dict, 'timestamp', int) object_id = int(json_dict['object_id']) scanner_name = str(json_dict['scanner_name']) haskb = lying_exist(json_dict, 'haskb', bool) uuid = lying_exist(json_dict, 'uuid', str) hostcount = lying_exist(json_dict, 'hostcount', int) scan_end = lying_exist(json_dict, 'scan_end', str) name = str(json_dict['name']) user_permissions = int(json_dict['user_permissions']) control = bool(json_dict['control']) return NessusScanDetailsInfo(acls, edit_allowed, status, policy, pci_can_upload, hasaudittrail, scan_start, folder_id, targets, timestamp, object_id, scanner_name, haskb, uuid, hostcount, scan_end, name, user_permissions, control) class NessusScanHost(Object): """ lies: - `hostname` can be str """ def __init__(self, host_id: int, host_index: str, hostname: int, progress: str, critical: int, high: int, medium: int, low: int, info: int, totalchecksconsidered: int, numchecksconsidered: int, scanprogresstotal: int, scanprogresscurrent: int, score: int) -> None: self.host_id = host_id self.host_index = host_index self.hostname = hostname self.progress = progress self.critical = critical self.high = high self.medium = medium self.low = low self.info = info self.totalchecksconsidered = totalchecksconsidered self.numchecksconsidered = numchecksconsidered self.scanprogresstotal = scanprogresstotal self.scanprogresscurrent = scanprogresscurrent self.score = score @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanHost': host_id = int(json_dict['host_id']) host_index = str(json_dict['host_index']) hostname = lying_type(json_dict['hostname'], int, str) progress = str(json_dict['progress']) critical = int(json_dict['critical']) high = int(json_dict['high']) medium = int(json_dict['medium']) low = int(json_dict['low']) info = int(json_dict['info']) totalchecksconsidered = int(json_dict['totalchecksconsidered']) numchecksconsidered = int(json_dict['numchecksconsidered']) scanprogresstotal = int(json_dict['scanprogresstotal']) scanprogresscurrent = int(json_dict['scanprogresscurrent']) score = int(json_dict['score']) return NessusScanHost(host_id, host_index, hostname, progress, critical, high, medium, low, info, totalchecksconsidered, numchecksconsidered, scanprogresstotal, scanprogresscurrent, score) class NessusScanNote(Object): def __init__(self, title: str, message: str, severity: int) -> None: self.title = title self.message = message self.severity = severity @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanNote': title = str(json_dict['title']) message = str(json_dict['message']) severity = int(json_dict['severity']) return NessusScanNote(title, message, severity) class NessusScanRemediation(Object): def __init__(self, value: str, remediation: str, hosts: int, vulns: int) -> None: self.value = value self.remediation = remediation self.hosts = hosts self.vulns = vulns @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanRemediation': value = str(json_dict['value']) remediation = str(json_dict['remediation']) hosts = int(json_dict['hosts']) vulns = int(json_dict['vulns']) return NessusScanRemediation(value, remediation, hosts, vulns) class NessusScanDetailsRemediations(Object): """ lies: - `remediations` can be None """ def __init__(self, remediations: Iterable[NessusScanRemediation], num_hosts: int, num_cves: int, num_impacted_hosts: int, num_remediated_cves: int) -> None: self.remediations = remediations self.num_hosts = num_hosts self.num_cves = num_cves self.num_impacted_hosts = num_impacted_hosts self.num_remediated_cves = num_remediated_cves @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanDetailsRemediations': remediations = {NessusScanRemediation(remediation) for remediation in lying_type(json_dict['remediations'], list, lambda x: None, list())} num_hosts = int(json_dict['num_hosts']) num_cves = int(json_dict['num_cves']) num_impacted_hosts = int(json_dict['num_impacted_hosts']) num_remediated_cves = int(json_dict['num_remediated_cves']) return NessusScanDetailsRemediations(remediations, num_hosts, num_cves, num_impacted_hosts, num_remediated_cves) class NessusScanVulnerability(Object): def __init__(self, plugin_id: int, plugin_name: str, plugin_family: str, count: int, vuln_index: int, severity_index: int) -> None: self.plugin_id = plugin_id self.plugin_name = plugin_name self.plugin_family = plugin_family self.count = count self.vuln_index = vuln_index self.severity_index = severity_index @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanVulnerability': plugin_id = int(json_dict['plugin_id']) plugin_name = str(json_dict['plugin_name']) plugin_family = str(json_dict['plugin_family']) count = int(json_dict['count']) vuln_index = int(json_dict['vuln_index']) severity_index = int(json_dict['severity_index']) return NessusScanVulnerability(plugin_id, plugin_name, plugin_family, count, vuln_index, severity_index) class NessusScanHistory(Object): def __init__(self, history_id: int, uuid: str, owner_id: int, status: str, creation_date: int, last_modification_date: int) -> None: self.history_id = history_id self.uuid = uuid self.owner_id = owner_id self.status = status self.creation_date = creation_date self.last_modification_date = last_modification_date @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanHistory': history_id = int(json_dict['history_id']) uuid = str(json_dict['uuid']) owner_id = int(json_dict['owner_id']) status = str(json_dict['status']) creation_date = int(json_dict['creation_date']) last_modification_date = int(json_dict['last_modification_date']) return NessusScanHistory(history_id, uuid, owner_id, status, creation_date, last_modification_date) class NessusScanFilterControl(Object): """ lies: - `readable_regest` is not always there - `regex` is not always there - `options` is not always there """ # FIXME what is the type of `options`? def __init__(self, type: str, readable_regest: str, regex: str, options: Iterable) -> None: self.type = type self.readable_regest = readable_regest self.regex = regex self.options = options @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanFilterControl': type = str(json_dict['type']) readable_regest = lying_exist(json_dict, 'readable_regest', str) regex = lying_exist(json_dict, 'regex', str) options = lying_exist(json_dict, 'options', str) return NessusScanFilterControl(type, readable_regest, regex, options) class NessusScanFilterOperator(Enum): eq = 'eq' neq = 'neq' lt = 'lt' gt = 'gt' match = 'match' nmatch = 'nmatch' date_eq = 'date-eq' date_neq = 'date-neq' date_lt = 'date-lt' date_gt = 'date-gt' class NessusScanFilter(Object): def __init__(self, name: str, readable_name: str, operators: Iterable[NessusScanFilterOperator], control: NessusScanFilterControl) -> None: self.name = name self.readable_name = readable_name self.operators = operators self.control = control @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanFilter': name = str(json_dict['name']) readable_name = str(json_dict['readable_name']) operators = {NessusScanFilterOperator(operator) for operator in json_dict['operators']} control = NessusScanFilterControl.from_json(json_dict['control']) return NessusScanFilter(name, readable_name, operators, control) class NessusScanDetails(Object): """ we currently drop the `dashboard` field, is it needed? lies: - `hosts` not always existing - `comphosts` not always existing - `notes` not always existing - `notes` is sometimes None - `remediations` not always existing - `vulnerabilities` not always existing - `compliance` not always existing - `history` is sometimes None - `filters` not always existing """ def __init__(self, info: NessusScanDetailsInfo, hosts: Iterable[NessusScanHost], comphosts: Iterable[NessusScanHost], notes: Iterable[NessusScanNote], remediations: NessusScanDetailsRemediations, vulnerabilites: Iterable[NessusScanVulnerability], compliance: Iterable[NessusScanVulnerability], history: Iterable[NessusScanHistory], filters: Iterable[NessusScanFilter]) -> None: self.info = info self.hosts = hosts self.comphosts = comphosts self.notes = notes self.remediations = remediations self.vulnerabilites = vulnerabilites self.compliance = compliance self.history = history self.filters = filters @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanDetails': info = NessusScanDetailsInfo.from_json(json_dict['info']) hosts = {NessusScanHost.from_json(host) for host in lying_exist(json_dict, 'hosts', list)} comphosts = {NessusScanHost.from_json(host) for host in lying_exist(json_dict, 'comphosts', list)} notes = {NessusScanNote.from_json(note) for note in lying_exist_and_type(json_dict, 'notes', list, lambda x: list(), list())} remediations = lying_exist(json_dict, 'remediations', NessusScanDetailsRemediations.from_json, None) vulnerabilities = {NessusScanVulnerability.from_json(vulnerability) for vulnerability in lying_exist(json_dict, 'vulnerabilities', list)} compliance = {NessusScanVulnerability.from_json(vulnerability) for vulnerability in lying_exist(json_dict, 'compliance', list)} history = {NessusScanHistory.from_json(history) for history in lying_type(json_dict['history'], list, lambda x: list())} filters = {NessusScanFilter.from_json(filtered) for filtered in lying_exist(json_dict, 'filters', list)} return NessusScanDetails(info, hosts, comphosts, notes, remediations, vulnerabilities, compliance, history, filters) class NessusScanHostDetailsInfo(Object): """ lies: - `mac-address` not always existing - `host-fqdn` not always existing """ def __init__(self, host_start: str, mac_address: str, host_fqdn: str, host_end: str, operating_system: str, host_ip: str) -> None: self.host_start = host_start self.mac_address = mac_address self.host_fqdn = host_fqdn self.host_end = host_end self.operating_system = operating_system self.host_ip = host_ip @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanHostDetailsInfo': host_start = str(json_dict['host_start']) mac_address = lying_exist(json_dict, 'mac-address', str) host_fqdn = lying_exist(json_dict, 'host-fqdn', str) host_end = str(json_dict['host_end']) operating_system = lying_exist(json_dict, 'operating-system', str) host_ip = str(json_dict['host-ip']) return NessusScanHostDetailsInfo(host_start, mac_address, host_fqdn, host_end, operating_system, host_ip) class NessusScanHostCompliance(Object): def __init__(self, host_id: int, hostname: str, plugin_id: int, plugin_name: str, plugin_family: str, count: int, severity_index: int, severity: int) -> None: self.host_id = host_id self.hostname = hostname self.plugin_id = plugin_id self.plugin_name = plugin_name self.plugin_family = plugin_family self.count = count self.severity_index = severity_index self.severity = severity @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanHostCompliance': host_id = int(json_dict['host_id']) hostname = str(json_dict['hostname']) plugin_id = int(json_dict['plugin_id']) plugin_name = str(json_dict['plugin_name']) plugin_family = str(json_dict['plugin_family']) count = int(json_dict['count']) severity_index = int(json_dict['severity_index']) severity = int(json_dict['severity']) return NessusScanHostCompliance(host_id, hostname, plugin_id, plugin_name, plugin_family, count, severity_index, severity) class NessusScanHostVulnerability(Object): def __init__(self, host_id: int, hostname: str, plugin_id: int, plugin_name: str, plugin_family: str, count: int, vuln_index: int, severity_index: int, severity: int) -> None: self.host_id = host_id self.hostname = hostname self.plugin_id = plugin_id self.plugin_name = plugin_name self.plugin_family = plugin_family self.count = count self.vuln_index = vuln_index self.severity_index = severity_index self.severity = severity @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanHostVulnerability': host_id = int(json_dict['host_id']) hostname = str(json_dict['hostname']) plugin_id = int(json_dict['plugin_id']) plugin_name = str(json_dict['plugin_name']) plugin_family = str(json_dict['plugin_family']) count = int(json_dict['count']) vuln_index = int(json_dict['vuln_index']) severity_index = int(json_dict['severity_index']) severity = int(json_dict['severity']) return NessusScanHostVulnerability(host_id, hostname, plugin_id, plugin_name, plugin_family, count, vuln_index, severity_index, severity) class NessusScanHostDetails(Object): def __init__(self, info: NessusScanHostDetailsInfo, compliance: Iterable[NessusScanHostCompliance], vulnerabilities: Iterable[NessusScanHostVulnerability]) -> None: self.info = info self.compliance = compliance self.vulnerabilities = vulnerabilities @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanHostDetails': info = NessusScanHostDetailsInfo.from_json(json_dict['info']) compliance = {NessusScanHostCompliance.from_json(compliance) for compliance in json_dict['compliance']} vulnerabilities = {NessusScanHostVulnerability.from_json(vulnerability) for vulnerability in json_dict['vulnerabilities']} return NessusScanHostDetails(info, compliance, vulnerabilities) class NessusScanPluginOutputInfoDescriptionAttributesRiskInformation(Object): """ lies: - there is more than simply risk_factor - `cvss_base_score`: str (but could be float, we use that) - `cvss_score`: str (but could be float, we use that) - `cvss_vector`: str - `cvss_temporal_score`: str (but could be float, we use that) - `cvss_temporal_vector`: str """ def __init__(self, risk_factor: str, cvss_base_score: Optional[float], cvss_score: Optional[float], cvss_vector: Optional[str], cvss_temporal_score: Optional[float], cvss_temporal_vector: Optional[str]) -> None: self.risk_factor = risk_factor self.cvss_base_score = cvss_base_score self.cvss_score = cvss_score self.cvss_vector = cvss_vector self.cvss_temporal_score = cvss_temporal_score self.cvss_temporal_vector = cvss_temporal_vector @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) \ -> 'NessusScanPluginOutputInfoDescriptionAttributesRiskInformation': risk_factor = str(json_dict['risk_factor']) cvss_base_score = allow_to_exist(json_dict, 'cvss_base_score', float) cvss_score = allow_to_exist(json_dict, 'cvss_score', float) cvss_vector = allow_to_exist(json_dict, 'cvss_vector', str) cvss_temporal_score = allow_to_exist(json_dict, 'cvss_temporal_score', float) cvss_temporal_vector = allow_to_exist(json_dict, 'cvss_temporal_vector', str) args = [risk_factor, cvss_base_score, cvss_score, cvss_vector, cvss_temporal_score, cvss_temporal_vector] return NessusScanPluginOutputInfoDescriptionAttributesRiskInformation(*args) class NessusScanPluginOutputInfoDescriptionAttributesPluginInformation(Object): def __init__(self, plugin_id: int, plugin_type: str, plugin_family: str, plugin_modification_date: str) -> None: self.plugin_id = plugin_id self.plugin_type = plugin_type self.plugin_family = plugin_family self.plugin_modification_date = plugin_modification_date @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) \ -> 'NessusScanPluginOutputInfoDescriptionAttributesPluginInformation': plugin_id = int(json_dict['plugin_id']) plugin_type = str(json_dict['plugin_type']) plugin_family = str(json_dict['plugin_family']) plugin_modification_date = str(json_dict['plugin_modification_date']) return NessusScanPluginOutputInfoDescriptionAttributesPluginInformation(plugin_id, plugin_type, plugin_family, plugin_modification_date) class NessusScanPluginOutputInfoDescriptionAttributesRefInformationRefValues(Object): def __init__(self, value: Iterable[str]) -> None: self.value = value # TODO can be tight by type @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) \ -> 'NessusScanPluginOutputInfoDescriptionAttributesRefInformationRefValues': value = {str(value) for value in json_dict['value']} return NessusScanPluginOutputInfoDescriptionAttributesRefInformationRefValues(value) class NessusScanPluginOutputInfoDescriptionAttributesRefInformationRef(Object): def __init__(self, name: str, values: NessusScanPluginOutputInfoDescriptionAttributesRefInformationRefValues, url: Optional[str]) -> None: self.name = name self.values = values self.url = url @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) \ -> 'NessusScanPluginOutputInfoDescriptionAttributesRefInformationRef': name = str(json_dict['name']) # TODO can be tight by enum? values = NessusScanPluginOutputInfoDescriptionAttributesRefInformationRefValues.from_json(json_dict['values']) url = allow_to_exist(json_dict, 'url', str) return NessusScanPluginOutputInfoDescriptionAttributesRefInformationRef(name, values, url) class NessusScanPluginOutputInfoDescriptionAttributesRefInformation(Object): def __init__(self, ref: Iterable[NessusScanPluginOutputInfoDescriptionAttributesRefInformationRef]) -> None: self.ref = ref @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) \ -> 'NessusScanPluginOutputInfoDescriptionAttributesRefInformation': ref = {NessusScanPluginOutputInfoDescriptionAttributesRefInformationRef.from_json(ref) for ref in json_dict['ref']} return NessusScanPluginOutputInfoDescriptionAttributesRefInformation(ref) class NessusScanPluginOutputInfoDescriptionAttributes(Object): """ lies: - `ref_information` is not documented but is present """ def __init__(self, risk_information: NessusScanPluginOutputInfoDescriptionAttributesRiskInformation, plugin_name: str, plugin_information: NessusScanPluginOutputInfoDescriptionAttributesPluginInformation, solution: Optional[str], fname: str, synopsis: str, description: str, ref_information: Optional[NessusScanPluginOutputInfoDescriptionAttributesRefInformation]) -> None: self.risk_information = risk_information self.plugin_name = plugin_name self.plugin_information = plugin_information self.solution = solution self.fname = fname self.synopsis = synopsis self.description = description self.ref_information = ref_information @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanPluginOutputInfoDescriptionAttributes': risk_information = \ NessusScanPluginOutputInfoDescriptionAttributesRiskInformation.from_json(json_dict['risk_information']) plugin_name = str(json_dict['plugin_name']) plugin_information = \ NessusScanPluginOutputInfoDescriptionAttributesPluginInformation.from_json(json_dict['plugin_information']) if json_dict['solution'] is None: solution = None else: solution = json_dict['solution'] fname = str(json_dict['fname']) synopsis = str(json_dict['synopsis']) description = str(json_dict['description']) ref_information = allow_to_exist(json_dict, 'ref_information', NessusScanPluginOutputInfoDescriptionAttributesRefInformation.from_json) return NessusScanPluginOutputInfoDescriptionAttributes(risk_information, plugin_name, plugin_information, solution, fname, synopsis, description, ref_information) class NessusScanPluginOutputInfoDescription(Object): def __init__(self, severity: int, pluginname: str, pluginattributes: NessusScanPluginOutputInfoDescriptionAttributes, pluginfamily: str, pluginid: int) -> None: self.severity = severity self.pluginname = pluginname self.pluginattributes = pluginattributes self.pluginfamily = pluginfamily self.pluginid = pluginid @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanPluginOutputInfoDescription': severity = int(json_dict['severity']) pluginname = str(json_dict['pluginname']) pluginattributes = NessusScanPluginOutputInfoDescriptionAttributes.from_json(json_dict['pluginattributes']) pluginfamily = str(json_dict['pluginfamily']) pluginid = int(json_dict['pluginid']) return NessusScanPluginOutputInfoDescription(severity, pluginname, pluginattributes, pluginfamily, pluginid) class Transport(Enum): icmp = 'icmp' tcp = 'tcp' udp = 'udp' class Protocol(Enum): ajp13 = 'ajp13' cifs = 'cifs' dns = 'dns' irc = 'irc' ftp = 'ftp' mysql = 'mysql' netbios_ns = 'netbios-ns' postgresql = 'postgresql' rlogin = 'rlogin' rmi_registry = 'rmi_registry' rpc_portmapper = 'rpc-portmapper' rpc_nfs = 'rpc-nfs' rpc_nlockmgr = 'rpc-nlockmgr' rpc_status = 'rpc-status' rpc_mountd = 'rpc-mountd' rsh = 'rsh' smb = 'smb' smtp = 'smtp' ssh = 'ssh' telnet = 'telnet' tftpd = 'tftpd' vnc = 'vnc' wild_shell = 'wild_shell' www = 'www' x11 = 'x11' class NessusScanPluginOutputPort(Object): def __init__(self, number: int, transport: Transport, protocol: Optional[Protocol], hosts: Iterable[str]) -> None: self.number = number self.transport = transport self.protocol = protocol self.hosts = hosts @staticmethod def from_json(port_packed: str, json_list: Iterable[MutableMapping[str, Union[int, str, bool]]]) \ -> 'NessusScanPluginOutputPort': port_splited = port_packed.split(' / ') number = int(port_splited[0]) transport = Transport(port_splited[1]) protocol = (port_splited[2] != '' and Protocol(port_splited[2])) or None hosts = {host['hostname'] for host in json_list} return NessusScanPluginOutputPort(number=number, transport=transport, protocol=protocol, hosts=hosts) class NessusScanPluginOutput(Object): def __init__(self, plugin_output: str, hosts: str, severity: int, ports) -> None: self.plugin_output = plugin_output self.hosts = hosts self.severity = severity self.ports = ports @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanPluginOutput': plugin_output = str(json_dict['plugin_output']) hosts = str(json_dict['hosts']) severity = int(json_dict['severity']) ports = [NessusScanPluginOutputPort.from_json(k, v) for k, v in json_dict['ports'].items()] return NessusScanPluginOutput(plugin_output, hosts, severity, ports) class NessusScanPluginOutputInfo(Object): def __init__(self, plugindescription: NessusScanPluginOutputInfoDescription) -> None: self.plugindescription = plugindescription @staticmethod def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanPluginOutputInfo': plugindescription = NessusScanPluginOutputInfoDescription.from_json(json_dict['plugindescription']) return NessusScanPluginOutputInfo(plugindescription) class NessusScanPluginOutputDetails(Object): """ lies: - `outputs` is typo'ed as `output` """ def __init__(self, info: NessusScanPluginOutputInfo, output: Iterable[NessusScanPluginOutput]) -> None: self.info = info self.output = output @staticmethod def from_json(json_dict: MutableMapping[str, Union[int, str, bool]]) -> 'NessusScanPluginOutputDetails': info = NessusScanPluginOutputInfo.from_json(json_dict.pop('info')) output = {NessusScanPluginOutput.from_json(output) for output in json_dict.pop('outputs')} return NessusScanPluginOutputDetails(info, output) class LibNessusScans(LibNessusBase): """ module handling /scans """ # pylint: disable=bad-whitespace def create(self, policy: NessusPolicy, name: Optional[str] = None, template: Optional[NessusTemplate] = None, default_targets: Iterable[str] = ('localhost',)) -> NessusScanCreated: """ Creates a scan. :param policy: policy to use :param name: name you want for the scan :param template: template will be taken from policy if not given :param default_targets: need to have at least an element :return: created scan """ if name is None: name = str(uuid4()) if template is None: template_uuid = policy.template_uuid else: template_uuid = template.uuid json = { 'uuid': template_uuid, 'settings': { 'name': name, 'policy_id': policy.id, 'enabled': False, 'text_targets': ','.join(default_targets), }, } ans = self._post('scans', json=json) return NessusScanCreated.from_json(ans.json()['scan']) def list(self) -> Iterable[NessusScan]: ans = self._get('scans') if ans.json()['scans'] is None: return set() return {NessusScan.from_json(elem) for elem in ans.json()['scans']} def delete(self, scan: NessusScan) -> None: """ Deletes a scan. Scans in running, paused or stopping states can not be deleted. :param scan: the soon-to-be-deleted """ url = 'scans/{}'.format(scan.id) self._delete(url) def launch(self, scan: NessusScan, alt_targets: Optional[Iterable[str]] = None) -> str: """ Launches a scan. :param scan: the soon-to-be-launch :param alt_targets: target to scan, if not given, default to the one set during scan creation :return: uuid of the launched scan """ url = 'scans/{scan_id}/launch'.format(scan_id=scan.id) json = alt_targets and {'alt_targets': alt_targets} ans = self._post(url, json=json) return ans.json()['scan_uuid'] def details(self, scan: NessusScan) -> NessusScanDetails: url = 'scans/{scan_id}'.format(scan_id=scan.id) ans = self._get(url) return NessusScanDetails.from_json(ans.json()) def host_details(self, scan: NessusScan, host: NessusScanHost) -> NessusScanHostDetails: url = 'scans/{scan_id}/hosts/{host_id}'.format(scan_id=scan.id, host_id=host.host_id) ans = self._get(url) return NessusScanHostDetails.from_json(ans.json()) def plugin_output(self, scan: NessusScan, host: NessusScanHost, plugin_id: int) -> NessusScanPluginOutputDetails: url = 'scans/{scan_id}/hosts/{host_id}/plugins/{plugin_id}'.format(scan_id=scan.id, host_id=host.host_id, plugin_id=plugin_id) ans = self._get(url) return NessusScanPluginOutputDetails.from_json(ans.json())
41.970464
120
0.663894
4,298
39,788
5.905537
0.085621
0.064298
0.024703
0.022063
0.310299
0.237452
0.196793
0.169963
0.160586
0.158577
0
0.000462
0.238238
39,788
947
121
42.014784
0.836979
0.068111
0
0.290087
0
0
0.08506
0.026309
0
0
0
0.002112
0
1
0.091837
false
0
0.01312
0.002915
0.287172
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41a06f10c100f3cd38eb1a9e5dbb23f8546f5139
10,444
py
Python
panopticon/wme.py
scyrusm/panopticon
bb28deffb97fd7c983a5abb8c2626c24d9f25e48
[ "BSD-3-Clause" ]
3
2021-01-14T13:38:32.000Z
2021-09-07T12:18:48.000Z
panopticon/wme.py
scyrusm/panopticon
bb28deffb97fd7c983a5abb8c2626c24d9f25e48
[ "BSD-3-Clause" ]
null
null
null
panopticon/wme.py
scyrusm/panopticon
bb28deffb97fd7c983a5abb8c2626c24d9f25e48
[ "BSD-3-Clause" ]
2
2020-12-22T03:15:27.000Z
2020-12-22T03:16:50.000Z
""" wme.py ==================================== wme """ # second version import numpy as np from tqdm import tqdm import pandas as pd from scipy import stats from itertools import islice from scipy.sparse import coo_matrix, save_npz from panopticon.utilities import get_valid_gene_info def get_list_of_gene_windows(genes, window_size=400, window_step=50, release=102, species='homo sapiens'): """ Parameters ---------- genes : param window_size: (Default value = 200) window_step : Default value = 1) window_size : (Default value = 200) Returns ------- """ gene_names, gene_contigs, gene_starts, gene_ends = get_valid_gene_info(genes, release=release, species=species) gene_df = pd.DataFrame(gene_names) gene_df.columns = ['name'] gene_df['contig'] = gene_contigs gene_df['start'] = gene_starts gene_df['end'] = gene_ends gene_df_groupby = gene_df.set_index('name').sort_values('start').groupby( 'contig') list_of_gene_windows = [] for chromosome in gene_df['contig'].unique(): list_of_gene_windows += [ list(gene_df_groupby.groups[chromosome])[i:(i + window_size)] for i in np.arange( 0, len(gene_df_groupby.groups[chromosome]) - window_size + 1, window_step) ] return list_of_gene_windows def robust_mean_windowed_expressions(genes, list_of_gene_windows, expression_data, upper_cut=5, windsor=False, tqdm_desc=''): """ Produces an arithmetic mean over expression in windows determined by list_of_gene_windows. Highest-expression genes in each window are discarded. Can be made more memory-friendly, by implementing a map function over expression_data--I still haven't done this. S Markson 4 June 2020. Parameters ---------- genes : param list_of_gene_windows: expression_data : param upper_cut: (Default value = 0) windsor : Default value = False) tqdm_desc : Default value = '') list_of_gene_windows : upper_cut : (Default value = 5) Returns ------- """ gene_to_index = {gene: i for i, gene in enumerate(genes)} mean_window_expressions = np.zeros((len(list_of_gene_windows), expression_data.shape[1])) with tqdm(total=len(list_of_gene_windows), desc=tqdm_desc) as pbar: for i, window in enumerate(list_of_gene_windows): window_expression_indices = np.array( [gene_to_index[gene] for gene in window]) exprs = expression_data[window_expression_indices, :] robust_cell_means = np.zeros(exprs.shape[1]) for icell in range(exprs.shape[1]): cell_exprs = exprs[:, icell] truncated = np.sort(cell_exprs)[::-1][upper_cut::] if windsor: robust_cell_means[icell] = np.hstack( ([truncated[0]] * upper_cut, truncated)).mean() else: robust_cell_means[icell] = truncated.mean() mean_window_expressions[i, :] = robust_cell_means pbar.update(1) return mean_window_expressions def get_windowed_mean_expression(loom, list_of_gene_windows, patient_column='Patient_ID', patient=0, cell_type_column=None, cell_type=None, complexity_column='nGene', complexity_cutoff=0, upper_cut=5, log2=False): """ THIS IS DEPRECATED--S. Markson 4 June 2020 Parameters ---------- genes : param metadata: expression_data : param list_of_gene_windows: patient : param cell_type: (Default value = 'tumor') complexity_cutoff : Default value = 1000) cell_type_col_name : Default value = 'cell.type') patient_col_name : Default value = 'patient_ID') complexity_col_name : Default value = 'nGene') metadata : list_of_gene_windows : cell_type : (Default value = 'tumor') patient_columns : (Default value = 'Patient_ID') cell_type_column : (Default value = 'cell.type') Returns ------- """ # Nota bene: patient id gets cast to string below genes = loom.ra['gene'] # This is very inefficient--make a general function for loom copy-over metadata = pd.DataFrame(loom.ca['patient_ID']) metadata.columns = ['patient_ID'] metadata['complexity'] = loom.ca['complexity'] metadata['cell_type'] = loom.ca['cell_type'] # metadata['cell_name'] = loom.ca['cell_names'] # I hate this if complexity_cutoff > 0: metadata = metadata[metadata[complexity_column]>complexity_cutoff] if type(patient) not in [tuple, list]: patient = [str(patient)] else: patient = list(patient) patient = [str(x) for x in patient] print("debug", patient) if cell_type_column==None and cell_type == None: relevant_indices = metadata[(metadata[patient_column].astype(str).isin(patient)) ].index.values else: relevant_indices = metadata[(metadata[cell_type_column].astype(str) == str(cell_type)) & (metadata[patient_column].astype(str).isin(patient))].index.values if log2: relevant_expression_data = 2**loom[:, relevant_indices] - 1 else: relevant_expression_data = loom[:, relevant_indices] mean_window_expressions = robust_mean_windowed_expressions( genes, list_of_gene_windows, relevant_expression_data, tqdm_desc='Calculating Mean Window Expressions, with "Robustification"', upper_cut=upper_cut ) return mean_window_expressions, metadata.loc[relevant_indices] def get_ranks(mean_window_expressions): """ Parameters ---------- mean_window_expressions : Returns ------- """ mean_window_expression_ranks = np.zeros(mean_window_expressions.shape) for icell in range(mean_window_expressions.shape[1]): mean_window_expression_ranks[:, icell] = stats.rankdata( mean_window_expressions[:, icell]) return mean_window_expression_ranks def convert_to_sparse(dense_file, sparse_file=None, genes_not_present=False, genelist_file=None, delimiter='\t'): """ Parameters ---------- dense_file : sparse_file : (Default value = None) genelist_file : (Default value = None) delimiter : (Default value = '\t') Returns ------- """ N = 20 iterator = 0 row = [] col = [] data = [] genes = [] with open(dense_file, 'r') as infile: firstline = islice(infile, 1) headings = np.genfromtxt(firstline, dtype=None) with tqdm( unit=' rows completed', unit_scale=True, unit_divisor=1024, desc='Converting dense matrix to sparse: ') as pbar: while True: gen = islice(infile, N) chunk = np.genfromtxt(gen, dtype=str, delimiter=delimiter) if genes_not_present: expressions = chunk.astype(float) else: genes += list(chunk[:, 0]) expressions = chunk[:, 1::].astype(float) #print(chunk) x, y = np.where(expressions > 0) for i, j in zip(x, y): row.append(i + iterator) col.append(j) data.append(expressions[i, j]) if chunk.shape[0] < N: iterator += chunk.shape[0] break else: iterator += N pbar.update(N) expr_mat = coo_matrix((data, (row, col)), shape=(iterator, len(headings))) if sparse_file: save_npz(sparse_file, expr_mat) if genelist_file and not genes_not_present: np.savetxt(genelist_file,np.array(genes),delimiter=',',fmt='%s') return expr_mat, genes def get_masked_wme(loom, layername, mask=None, gene_ra='gene',species='homo sapiens', release=102, window_step=50, window_size=50, return_principal_components=None, upper_cut=0, mask_option='load_full'): from panopticon.wme import get_list_of_gene_windows, robust_mean_windowed_expressions from tqdm import tqdm gene_windows = get_list_of_gene_windows(loom.ra[gene_ra], species=species, window_step=window_step, window_size=window_size, release=release) if mask is None: X = loom[layername][:,:] else: if mask_option == 'load_full': # this is to address an h5py performance bog X = loom[layername][:,:][:,mask] elif mask_option == 'mask_first': X = loom[layername][:,mask] #if mask_option not in ['load_full','mask_first','scan']: else: raise Exception("mask_option must be one of: load_full, mask_first, scan") if mask_option == 'scan': mwe_parts = [] for (ix, selection, view) in loom.scan(items=mask, axis=1): mwe_parts.append(robust_mean_windowed_expressions(view.ra[gene_ra], gene_windows, view[layername][:,:], upper_cut=upper_cut, ).T) mwe = np.vstack(mwe_parts).T else: mwe = robust_mean_windowed_expressions(loom.ra[gene_ra], gene_windows, X, upper_cut=upper_cut, ) if return_principal_components is not None: if type(return_principal_components)!=int: raise Exception("type of return_principal_components must be None or int") from sklearn.decomposition import PCA pca = PCA(n_components=return_principal_components) return pca.fit_transform(mwe.T) else: return mwe.T
33.155556
203
0.578514
1,186
10,444
4.840641
0.215008
0.038321
0.029612
0.05034
0.127156
0.06201
0.048772
0.048772
0.035882
0
0
0.010108
0.317982
10,444
314
204
33.261147
0.795873
0.188051
0
0.110465
0
0
0.050192
0.003338
0
0
0
0
0
1
0.034884
false
0
0.05814
0
0.133721
0.005814
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41a106d05012e4ff5dbc04ccdc03a0e70f7b8fee
4,669
py
Python
cogs/rr.py
D3monEmper0r/CA-Discord-Bot
1d38e00582cd0ea84af72a39daedc963256fd57a
[ "MIT" ]
null
null
null
cogs/rr.py
D3monEmper0r/CA-Discord-Bot
1d38e00582cd0ea84af72a39daedc963256fd57a
[ "MIT" ]
1
2021-03-26T15:41:07.000Z
2021-03-26T15:41:07.000Z
cogs/rr.py
D3monEmper0r/CA-Discord-Bot
1d38e00582cd0ea84af72a39daedc963256fd57a
[ "MIT" ]
null
null
null
##### Imports ##### import discord import sqlite3 from .__init__ import c from discord.ext import commands def create(db): conn = sqlite3.connect(db) c = conn.cursor() newDbTable = """CREATE TABLE IF NOT EXISTS reactionRole(role TEXT PRIMARY KEY, emote TEXT UNIQUE)""" c.execute(newDbTable) conn.commit() conn.close() def fill(db, role, emote): conn = sqlite3.connect(db) c = conn.cursor() c.execute(f'INSERT INTO reactionRole VALUES ("{role}", "{emote}")') conn.commit() conn.close() def delete(db, role): conn = sqlite3.connect(db) c = conn.cursor() c.execute(f'DELETE FROM reactionRole WHERE role = "{role}"') conn.commit() conn.close() def data(db): conn = sqlite3.connect(db) c = conn.cursor() c.execute(f'SELECT * FROM reactionRole') result = c.fetchall() conn.close() return(result) def search(db, emote): conn = sqlite3.connect(db) c = conn.cursor() c.execute(f'SELECT * FROM reactionRole WHERE emote = "{emote}"') result = c.fetchall() conn.close() return(result) class ReactRole(commands.Cog): ##### Initalization ##### def __init__(self, client): self.client = client ##### events ##### @commands.Cog.listener() async def on_raw_reaction_add(self, payload): reactUser = payload.member g = self.client.get_guild(c.serverId) emoji = payload.emoji tmp = search(c.DB, emoji)[0][0] for role in await g.fetch_roles(): if role.mention == tmp: r = role if r != None and payload.channel_id == c.reactRoleId: if reactUser != self.client.user: await reactUser.add_roles(r) @commands.Cog.listener() async def on_raw_reaction_remove(self, payload): reactUser = discord.utils.get(self.client.get_all_members(), id=payload.user_id) g = self.client.get_guild(c.serverId) emoji = payload.emoji tmp = search(c.DB, emoji)[0][0] for role in await g.fetch_roles(): if role.mention == tmp: r = role if r != None and payload.channel_id == c.reactRoleId: if reactUser != self.client.user: await reactUser.remove_roles(r) ##### commands ##### @commands.has_any_role('Café Antik Geschäftsführung', 'Jonnys Bot test') @commands.command() async def rrCreate(self, ctx): create(c.DB) await ctx.channel.purge(limit = 1) embed = discord.Embed(title='React to give yourself a role.', description='', color=0xa0089b) await ctx.send(embed=embed) @commands.has_any_role('Café Antik Geschäftsführung', 'Jonnys Bot test') @commands.command() async def rrAdd(self, ctx, *, reactRole): await ctx.channel.purge(limit = 1) g = self.client.get_guild(c.serverId) role = reactRole.split(' ')[0] emoji = reactRole.split(' ')[1] fill(c.DB, role, emoji) @commands.has_any_role('Café Antik Geschäftsführung', 'Jonnys Bot test') @commands.command() async def rrUpdate(self, ctx): await ctx.channel.purge(limit = 1) channel = await self.client.fetch_channel(c.reactRoleId) message = await channel.fetch_message(c.reactMsgId) desc = '' for item in data(c.DB): desc += item[0] + ': ' + item[1] + '\n' embed = discord.Embed(title='React to give yourself a role.', description=desc, color=0xa0089b) await message.edit(embed=embed) await message.clear_reactions() for item in data(c.DB): await message.add_reaction(item[1]) @commands.has_any_role('Café Antik Geschäftsführung', 'Jonnys Bot test') @commands.command() async def rrRemove(self, ctx, role): await ctx.channel.purge(limit = 1) g = self.client.get_guild(c.serverId) delete(c.DB, role) @commands.has_any_role('Café Antik Geschäftsführung', 'Jonnys Bot test') @commands.command(aliases=['e']) async def get_e(self, ctx): g = self.client.get_guild(c.serverId) for e in await g.fetch_emojis(): await ctx.send(e) @commands.has_any_role('Café Antik Geschäftsführung', 'Jonnys Bot test') @commands.command(aliases=['r']) async def get_r(self, ctx, role): g = self.client.get_guild(c.serverId) print(role) for r in await g.fetch_roles(): print('CA role: ', r.mention) if r.mention == role: await ctx.send(r.id) ##### Finalize and run ##### def setup(client): client.add_cog(ReactRole(client))
31.126667
103
0.610409
603
4,669
4.643449
0.212272
0.042857
0.0325
0.03
0.604643
0.574643
0.544643
0.498929
0.457857
0.457857
0
0.008331
0.254444
4,669
150
104
31.126667
0.796036
0.012422
0
0.512821
0
0
0.12928
0
0
0
0.003512
0
0
1
0.059829
false
0
0.034188
0
0.102564
0.017094
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41a206702b125bc76a8512deb0a07371c7d2b9ff
1,922
py
Python
tools/create_transaction_volumes_csv.py
alphagov/backdrop-transactions-explorer-collector
60aeac8ddaebf46d24e662d907ffa6bdc9d2dfe7
[ "MIT" ]
null
null
null
tools/create_transaction_volumes_csv.py
alphagov/backdrop-transactions-explorer-collector
60aeac8ddaebf46d24e662d907ffa6bdc9d2dfe7
[ "MIT" ]
2
2015-07-22T11:15:08.000Z
2015-07-22T12:10:52.000Z
tools/create_transaction_volumes_csv.py
alphagov/backdrop-transactions-explorer-collector
60aeac8ddaebf46d24e662d907ffa6bdc9d2dfe7
[ "MIT" ]
2
2019-08-29T11:35:44.000Z
2021-04-10T19:42:13.000Z
#!/usr/bin/env python import argparse import csv import os import sys from distutils import dir_util import unicodecsv # from lib.filters import digest from helper import create_parser from helper import create_directory from helper import map_services_to_csv_data from service import Service path_prefix = '/' asset_prefix = '/transactions-explorer/' static_prefix = 'https://assets.digital.cabinet-office.gov.uk/static' def parse_args_for_create(args): parser = create_parser() parser.add_argument('--services-data', help='Services CSV datafile', default='data/services.csv') parser.add_argument('--path-prefix', help='Prefix for generated URL paths', default=path_prefix) parser.add_argument('--asset-prefix', help='Prefix for generated asset URLs', default=asset_prefix) parser.add_argument('--static-prefix', help='Prefix for generated GOV.UK static URLs', default=static_prefix) parser.add_argument('--static-digests', help='Path to manifest file containing assets digests', type=argparse.FileType()) return parser.parse_args(args) def render_csv(maps, out): with _output_file(out) as output: writer = csv.writer(output, dialect="excel") writer.writerows(maps) def _output_file(path): print path output_path = os.path.join('', path) create_directory(os.path.dirname(output_path)) return open(output_path, 'w') arguments = parse_args_for_create(sys.argv[1:]) input = arguments.services_data data = open(input) reader = unicodecsv.DictReader(data) services = [Service(details=row) for row in reader] csv_map = map_services_to_csv_data(services) render_csv(csv_map, 'data/transaction-volumes.csv')
28.686567
79
0.663371
236
1,922
5.220339
0.360169
0.036526
0.068994
0.046266
0.147727
0
0
0
0
0
0
0.000683
0.238293
1,922
66
80
29.121212
0.840847
0.026535
0
0
0
0
0.196362
0.027287
0
0
0
0
0
0
null
null
0
0.212766
null
null
0.021277
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
41a2184952e142a5afb3083c0dbf463ce07bbe9d
462
py
Python
eu/softfire/tub/exceptions/exceptions.py
softfire-eu/experiment-manager
3e44c917e956bac0359a0303f9a3d0708481e1c9
[ "Apache-2.0" ]
3
2017-05-08T10:45:28.000Z
2017-08-08T13:18:54.000Z
eu/softfire/tub/exceptions/exceptions.py
softfire-eu/experiment-manager
3e44c917e956bac0359a0303f9a3d0708481e1c9
[ "Apache-2.0" ]
null
null
null
eu/softfire/tub/exceptions/exceptions.py
softfire-eu/experiment-manager
3e44c917e956bac0359a0303f9a3d0708481e1c9
[ "Apache-2.0" ]
1
2018-07-12T08:19:14.000Z
2018-07-12T08:19:14.000Z
class _BaseException(Exception): def __init__(self, message, *args) -> None: self.message = message super().__init__(*args) class ManagerNotFound(_BaseException): pass class RpcFailedCall(_BaseException): pass class ExperimentValidationError(_BaseException): pass class ResourceNotFound(_BaseException): pass class ResourceAlreadyBooked(_BaseException): pass class ExperimentNotFound(_BaseException): pass
15.931034
48
0.733766
39
462
8.307692
0.435897
0.314815
0.339506
0
0
0
0
0
0
0
0
0
0.186147
462
28
49
16.5
0.861702
0
0
0.375
0
0
0
0
0
0
0
0
0
1
0.0625
false
0.375
0
0
0.5
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3
41a3c225eedd6acf9dfa7630da6b90b21ff018d4
18,539
py
Python
python/erdos/__init__.py
objorkman/erdos
13b3be477d6674e9e377a56dec484f80ba41e915
[ "Apache-2.0" ]
null
null
null
python/erdos/__init__.py
objorkman/erdos
13b3be477d6674e9e377a56dec484f80ba41e915
[ "Apache-2.0" ]
null
null
null
python/erdos/__init__.py
objorkman/erdos
13b3be477d6674e9e377a56dec484f80ba41e915
[ "Apache-2.0" ]
null
null
null
import logging import multiprocessing as mp import signal import sys from functools import wraps from typing import Optional, Tuple, Type import erdos.context import erdos.internal as _internal import erdos.operator import erdos.utils from erdos.message import Message, WatermarkMessage from erdos.profile import Profile from erdos.streams import ( ExtractStream, IngestStream, LoopStream, OperatorStream, ReadStream, Stream, WriteStream, ) from erdos.timestamp import Timestamp _num_py_operators = 0 # Set the top-level logger for ERDOS logging. # Users can change the logging level to the required level by calling setLevel # erdos.logger.setLevel(logging.DEBUG) FORMAT = "%(asctime)s.%(msecs)03d %(name)s %(levelname)s: %(message)s" DATE_FORMAT = "%Y-%m-%d,%H:%M:%S" formatter = logging.Formatter(FORMAT, datefmt=DATE_FORMAT) default_handler = logging.StreamHandler(sys.stderr) default_handler.setFormatter(formatter) logger = logging.getLogger(__name__) logger.addHandler(default_handler) logger.setLevel(logging.WARNING) logger.propagate = False def connect_source( op_type: Type[erdos.operator.Source], config: erdos.operator.OperatorConfig, *args, **kwargs, ) -> OperatorStream: """Registers a :py:class:`.Source` operator to the dataflow graph, and returns the :py:class:`OperatorStream` that the operator will write the data on. Args: op_type: The :py:class:`.Source` operator that needs to be added to the graph. config: Configuration details required by the operator. *args: Arguments passed to the operator during initialization. **kwargs: Keyword arguments passed to the operator during initialization. Returns: An :py:class:`OperatorStream` corresponding to the :py:class:`WriteStream` made available to :py:meth:`.Source.run`. """ if not issubclass(op_type, erdos.operator.Source): raise TypeError("{} must subclass erdos.operator.Source".format(op_type)) if op_type.run.__code__.co_code == erdos.operator.Source.run.__code__.co_code: logger.warn( "The operator {} does not " "implement the `run` method.".format(op_type) ) # 1-index operators because node 0 is preserved for the current process, # and each node can only run 1 python operator. global _num_py_operators _num_py_operators += 1 node_id = _num_py_operators logger.debug( "Connecting operator #{num} ({name}) to the graph.".format( num=node_id, name=config.name ) ) internal_stream = _internal.connect_source(op_type, config, args, kwargs, node_id) return OperatorStream(internal_stream) def connect_sink( op_type: Type[erdos.operator.Sink], config: erdos.operator.OperatorConfig, read_stream: Stream, *args, **kwargs, ): """Registers a :py:class:`.Sink` operator to the dataflow graph. Args: op_type: The :py:class:`.Sink` operator that needs to be added to the graph. config: Configuration details required by the operator. read_stream: The :py:class:`Stream` instance from where the operator reads its data. *args: Arguments passed to the operator during initialization. **kwargs: Keyword arguments passed to the operator during initialization. """ if not issubclass(op_type, erdos.operator.Sink): raise TypeError("{} must subclass erdos.operator.Sink".format(op_type)) if not isinstance(read_stream, Stream): raise TypeError("{} must subclass `Stream`.".format(read_stream)) if ( op_type.run.__code__.co_code == erdos.operator.Sink.run.__code__.co_code and op_type.on_data.__code__.co_code == erdos.operator.Sink.on_data.__code__.co_code and op_type.on_watermark.__code__.co_code == erdos.operator.Sink.on_watermark.__code__.co_code ): logger.warn( "The operator {} does not implement any of the " "`run`, `on_data` or `on_watermark` methods.".format(op_type) ) # 1-index operators because node 0 is preserved for the current process, # and each node can only run 1 python operator. global _num_py_operators _num_py_operators += 1 node_id = _num_py_operators logger.debug( "Connecting operator #{num} ({name}) to the graph.".format( num=node_id, name=config.name ) ) _internal.connect_sink( op_type, config, read_stream._internal_stream, args, kwargs, node_id ) def connect_one_in_one_out( op_type: Type[erdos.operator.OneInOneOut], config: erdos.operator.OperatorConfig, read_stream: Stream, *args, **kwargs, ) -> OperatorStream: """Registers a :py:class:`.OneInOneOut` operator to the dataflow graph that receives input from the given :code:`read_stream`, and returns the :py:class:`OperatorStream` that the operator will write the data on. Args: op_type: The :py:class:`.OneInOneOut` operator that needs to be added to the graph. config: Configuration details required by the operator. read_stream: The :py:class:`Stream` instance from where the operator reads its data. *args: Arguments passed to the operator during initialization. **kwargs: Keyword arguments passed to the operator during initialization. Returns: An :py:class:`OperatorStream` corresponding to the :py:class:`WriteStream` made available to :py:meth:`.OneInOneOut.run`, or to the operator's callbacks via the :py:class:`.OneInOneOutContext`. """ if not issubclass(op_type, erdos.operator.OneInOneOut): raise TypeError("{} must subclass erdos.operator.OneInOneOut".format(op_type)) if not isinstance(read_stream, Stream): raise TypeError("{} must subclass `Stream`.".format(read_stream)) if ( op_type.run.__code__.co_code == erdos.operator.OneInOneOut.run.__code__.co_code and op_type.on_data.__code__.co_code == erdos.operator.OneInOneOut.on_data.__code__.co_code and op_type.on_watermark.__code__.co_code == erdos.operator.OneInOneOut.on_watermark.__code__.co_code ): logger.warn( "The operator {} does not implement any of the " "`run`, `on_data` or `on_watermark` methods.".format(op_type) ) # 1-index operators because node 0 is preserved for the current process, # and each node can only run 1 python operator. global _num_py_operators _num_py_operators += 1 node_id = _num_py_operators logger.debug( "Connecting operator #{num} ({name}) to the graph.".format( num=node_id, name=config.name ) ) internal_stream = _internal.connect_one_in_one_out( op_type, config, read_stream._internal_stream, args, kwargs, node_id ) return OperatorStream(internal_stream) def connect_two_in_one_out( op_type: Type[erdos.operator.TwoInOneOut], config: erdos.operator.OperatorConfig, left_read_stream: Stream, right_read_stream: Stream, *args, **kwargs, ) -> OperatorStream: """Registers a :py:class:`.TwoInOneOut` operator to the dataflow graph that receives input from the given :code:`left_read_stream` and :code:`right_read_stream`, and returns the :py:class:`OperatorStream` that the operator sends messages on. Args: op_type: The :py:class:`.TwoInOneOut` operator to add to the graph. config: Configuration details required by the operator. left_read_stream: The first :py:class:`Stream` instance from where the operator reads its data. right_read_stream: The second :py:class:`Stream` instance from where the operator reads its data. *args: Arguments passed to the operator during initialization. **kwargs: Keyword arguments passed to the operator during initialization. Returns: An :py:class:`OperatorStream` corresponding to the :py:class:`WriteStream` made available to :py:meth:`.TwoInOneOut.run`, or to the operator's callbacks via the :py:class:`.TwoInOneOutContext`. """ if not issubclass(op_type, erdos.operator.TwoInOneOut): raise TypeError("{} must subclass erdos.operator.TwoInOneOut".format(op_type)) if not isinstance(left_read_stream, Stream): raise TypeError("{} must subclass `Stream`.".format(left_read_stream)) if not isinstance(right_read_stream, Stream): raise TypeError("{} must subclass `Stream`.".format(right_read_stream)) if ( op_type.run.__code__.co_code == erdos.operator.TwoInOneOut.run.__code__.co_code and op_type.on_left_data.__code__.co_code == erdos.operator.TwoInOneOut.on_left_data.__code__.co_code and op_type.on_right_data.__code__.co_code == erdos.operator.TwoInOneOut.on_right_data.__code__.co_code and op_type.on_watermark.__code__.co_code == erdos.operator.TwoInOneOut.on_watermark.__code__.co_code ): logger.warn( "The operator {} does not implement any of the `run`, " "`on_left_data`, `on_right_data` or `on_watermark` " "methods.".format(op_type) ) # 1-index operators because node 0 is preserved for the current process, # and each node can only run 1 python operator. global _num_py_operators _num_py_operators += 1 node_id = _num_py_operators logger.debug( "Connecting operator #{num} ({name}) to the graph.".format( num=node_id, name=config.name ) ) internal_stream = _internal.connect_two_in_one_out( op_type, config, left_read_stream._internal_stream, right_read_stream._internal_stream, args, kwargs, node_id, ) return OperatorStream(internal_stream) def connect_one_in_two_out( op_type: Type[erdos.operator.OneInTwoOut], config: erdos.operator.OperatorConfig, read_stream: Stream, *args, **kwargs, ) -> Tuple[OperatorStream, OperatorStream]: """Registers a :py:class:`.OneInTwoOut` operator to the dataflow graph that receives input from the given :code:`read_stream`, and returns the pair of :py:class:`OperatorStream` instances that the operator will write data on. Args: op_type: The :py:class:`.OneInTwoOut` operator that needs to be added to the graph. config: Configuration details required by the operator. read_stream: The :py:class:`Stream` instance from where the operator reads its data. *args: Arguments passed to the operator during initialization. **kwargs: Keyword arguments passed to the operator during initialization. Returns: A pair of :py:class:`OperatorStream` instances corresponding to the :py:class:`WriteStream` instances made available to :py:meth:`.OneInOneOut.run`, or to the operator's callbacks via the :py:class:`.OneInTwoOutContext`. """ if not issubclass(op_type, erdos.operator.OneInTwoOut): raise TypeError("{} must subclass erdos.operator.OneInTwoOut".format(op_type)) if not isinstance(read_stream, Stream): raise TypeError("{} must subclass `Stream`.".format(read_stream)) if ( op_type.run.__code__.co_code == erdos.operator.OneInTwoOut.run.__code__.co_code and op_type.on_data.__code__.co_code == erdos.operator.OneInTwoOut.on_data.__code__.co_code and op_type.on_watermark.__code__.co_code == erdos.operator.OneInTwoOut.on_watermark.__code__.co_code ): logger.warn( "The operator {} does not implement any of the " "`run`, `on_data` or `on_watermark` methods.".format(op_type) ) # 1-index operators because node 0 is preserved for the current process, # and each node can only run 1 python operator. global _num_py_operators _num_py_operators += 1 node_id = _num_py_operators logger.debug( "Connecting operator #{num} ({name}) to the graph.".format( num=node_id, name=config.name ) ) left_stream, right_stream = _internal.connect_one_in_two_out( op_type, config, read_stream._internal_stream, args, kwargs, node_id ) return OperatorStream(left_stream), OperatorStream(right_stream) def reset(): """Create a new dataflow graph. Note: A call to this function renders the previous dataflow graph unsafe to use. """ logger.info("Resetting the default graph.") global _num_py_operators _num_py_operators = 0 _internal.reset() # TODO (Sukrit) : Should this be called a GraphHandle? # What is the significance of the "Node" here? class NodeHandle: """A handle to the dataflow graph returned by the :py:func:`run_async` function. The handle exposes functions to :py:func:`shutdown` the dataflow, or :py:func:`wait` for its completion. Note: This structure should not be initialized by the users. """ def __init__(self, py_node_handle, processes): self.py_node_handle = py_node_handle self.processes = processes def shutdown(self): """Shuts down the dataflow.""" logger.info("Shutting down other processes") for p in self.processes: p.terminate() p.join() logger.info("Shutting down node.") self.py_node_handle.shutdown_node() def wait(self): """Waits for the completion of all the operators in the dataflow""" for p in self.processes: p.join() logger.debug("Finished waiting for the dataflow graph processes.") def run(graph_filename: Optional[str] = None, start_port: Optional[int] = 9000): """Instantiates and runs the dataflow graph. ERDOS will spawn 1 process for each python operator, and connect them via TCP. Args: graph_filename: The filename to which to write the dataflow graph as a DOT file. start_port: The port on which to start. The start port is the lowest port ERDOS will use to establish TCP connections between operators. """ driver_handle = run_async(graph_filename, start_port) logger.debug("Waiting for the dataflow to complete ...") driver_handle.wait() def _run_node(node_id, data_addresses, control_addresses): _internal.run(node_id, data_addresses, control_addresses) def run_async( graph_filename: Optional[str] = None, start_port: Optional[int] = 9000 ) -> NodeHandle: """Instantiates and runs the dataflow graph asynchronously. ERDOS will spawn 1 process for each python operator, and connect them via TCP. Args: graph_filename: The filename to which to write the dataflow graph as a DOT file. start_port: The port on which to start. The start port is the lowest port ERDOS will use to establish TCP connections between operators. Returns: A :py:class:`.NodeHandle` that allows the driver to interface with the dataflow graph. """ data_addresses = [ "127.0.0.1:{port}".format(port=start_port + i) for i in range(_num_py_operators + 1) ] control_addresses = [ "127.0.0.1:{port}".format(port=start_port + len(data_addresses) + i) for i in range(_num_py_operators + 1) ] logger.debug("Running the dataflow graph on addresses: {}".format(data_addresses)) # Fix for macOS where mulitprocessing defaults # to spawn() instead of fork() in Python 3.8+ # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods # Warning: may lead to crashes # https://bugs.python.org/issue33725 ctx = mp.get_context("fork") processes = [ ctx.Process(target=_run_node, args=(i, data_addresses, control_addresses)) for i in range(1, _num_py_operators + 1) ] # Needed to shut down child processes def sigint_handler(sig, frame): for p in processes: p.terminate() sys.exit(0) signal.signal(signal.SIGINT, sigint_handler) for p in processes: p.start() # The driver must always be on node 0 otherwise ingest and extract streams # will break py_node_handle = _internal.run_async( 0, data_addresses, control_addresses, graph_filename ) return NodeHandle(py_node_handle, processes) def profile(event_name, operator, event_data=None): return Profile(event_name, operator, event_data) def profile_method(**decorator_kwargs): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if isinstance(args[0], erdos.operator.BaseOperator): # The func is an operator method. op_name = args[0].config.name cb_name = func.__name__ if "event_name" in decorator_kwargs: event_name = decorator_kwargs["event_name"] else: # Set the event name to the operator name and the callback # name if it's not passed by the user. event_name = op_name + "." + cb_name timestamp = None if len(args) > 1: if isinstance(args[1], Timestamp): # The func is a watermark callback. timestamp = args[1] elif isinstance(args[1], Message): # The func is a callback. timestamp = args[1].timestamp else: raise TypeError("@erdos.profile can only be used on operator methods") with erdos.profile( event_name, args[0], event_data={"timestamp": str(timestamp)} ): return func(*args, **kwargs) return wrapper return decorator __all__ = [ "Stream", "ReadStream", "WriteStream", "LoopStream", "IngestStream", "ExtractStream", "Profile", "Message", "WatermarkMessage", "Timestamp", "connect_source", "connect_sink", "connect_one_in_one_out", "connect_two_in_one_out", "connect_one_in_two_out", "reset", "run", "run_async", "profile_method", "NodeHandle", ]
34.847744
87
0.663089
2,358
18,539
5.000848
0.126378
0.022388
0.023745
0.017809
0.654003
0.629834
0.572676
0.538755
0.520353
0.490926
0
0.004936
0.246022
18,539
531
88
34.913371
0.838675
0.349426
0
0.348684
0
0.003289
0.14256
0.016302
0
0
0
0.001883
0
1
0.055921
false
0
0.046053
0.003289
0.134868
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41a3f81d4c1f4c13d941abba84f2c1450266e8a1
7,951
py
Python
compyler/node.py
Fogelman/compyler
76c6ba12f264131b6a5d800dd40bb76fe3155900
[ "MIT" ]
null
null
null
compyler/node.py
Fogelman/compyler
76c6ba12f264131b6a5d800dd40bb76fe3155900
[ "MIT" ]
null
null
null
compyler/node.py
Fogelman/compyler
76c6ba12f264131b6a5d800dd40bb76fe3155900
[ "MIT" ]
null
null
null
from llvmlite import ir from rply.token import BaseBox from abc import ABC, abstractmethod import operator as op from compyler.symboltable import FunctionSymbol, SymbolTable class Node(BaseBox, ABC): def __init__(self, value, children=None): self.value = value self.children = children if children is None: self.children = list() @abstractmethod def Evaluate(self, context): pass class Context(object): def __init__(self, st, builder, module, env=dict()): self.st = st self.builder = builder self.module = module self.env = env self.local = dict() def new(self): st = SymbolTable(parent=self.st) builder = self.builder module = self.module env = self.env return Context(st, builder, module, env) def declare(self, name): """Create an alloca in the entry BB of the current function.""" int32 = ir.IntType(32) return self.builder.alloca(int32, name=name) class UnOp(Node): op_map = { '+': lambda builder, x: x, '-': lambda builder, x: builder.neg(x, "unoptmp"), '~': lambda builder, x: builder.not_(x, "unoptmp"), 'not': lambda builder, x: builder.not_(x, "unoptmp"), } def Evaluate(self, context): return self.op_map[self.value](context.builder, self.children[0].Evaluate(context)) class BinOp(Node): op_map = { '+': lambda builder, x, y: builder.add(x, y, "optmp"), '-': lambda builder, x, y: builder.sub(x, y, "optmp"), '*': lambda builder, x, y: builder.mul(x, y, "optmp"), '^': lambda builder, x, y: builder.xor(x, y, "optmp"), '/': lambda builder, x, y: builder.sdiv(x, y, "optmp"), '//': lambda builder, x, y: builder.sdiv(x, y, "optmp"), '%': lambda builder, x, y: builder.srem(x, y, "optmp"), '&': lambda builder, x, y: builder.and_(x, y, "optmp"), '|': lambda builder, x, y: builder.or_(x, y, "optmp"), '<': lambda builder, x, y: builder.icmp_signed("<", x, y, "optmp"), '>': lambda builder, x, y: builder.icmp_signed(">", x, y, "optmp"), '<=': lambda builder, x, y: builder.icmp_signed("<=", x, y, "optmp"), '>=': lambda builder, x, y: builder.icmp_signed(">=", x, y, "optmp"), '==': lambda builder, x, y: builder.icmp_signed("==", x, y, "optmp"), '!=': lambda builder, x, y: builder.icmp_signed("!=", x, y, "optmp"), 'and': lambda builder, x, y: builder.and_(x, y, "optmp"), } def Evaluate(self, context): return self.op_map[self.value](context.builder, self.children[0].Evaluate(context), self.children[1].Evaluate(context)) class IntVal(Node): def Evaluate(self, context): int32 = ir.IntType(32) return ir.Constant(int32, int(self.value)) class BoolVal(Node): def Evaluate(self, context): int32 = ir.IntType(32) return ir.Constant(int32, int(self.value == "True")) class AnyVal(Node): def Evaluate(self, context): return (self.value) class NoOp(Node): def Evaluate(self, context): pass class Assignment(Node): def Evaluate(self, context): addr = context.st.contains(self.value) if not addr: addr = context.declare(self.value) x = self.children[0].Evaluate(context) context.builder.store(x, addr) context.st.set(self.value, addr) class Identifier(Node): def Evaluate(self, context): addr = context.st.get(self.value) return context.builder.load(addr) class Print(Node): def Evaluate(self, context): int8 = ir.IntType(8).as_pointer() printf = context.env["printf"] ftm = context.env["ftm"] if context.local.__contains__("print"): arg = context.local["print"] else: arg = context.builder.bitcast(ftm, int8) context.local["print"] = arg result = self.children[0].Evaluate(context) context.builder.call(printf, [arg, result]) class If(Node): def Evaluate(self, context): int32 = ir.IntType(32) condition = self.children[0].Evaluate(context) pred = context.builder.icmp_signed( '!=', condition, ir.Constant(int32, 0)) with context.builder.if_else(pred) as (then, otherwise): with then: self.children[1].Evaluate(context) with otherwise: if len(self.children) > 2: self.children[2].Evaluate(context) class While(Node): def Evaluate(self, context): int32 = ir.IntType(32) loop = context.builder.function.append_basic_block('loop') context.builder.branch(loop) context.builder.position_at_start(loop) self.children[1].Evaluate(context) endcond = self.children[0].Evaluate(context) cmp = context.builder.icmp_signed( '!=', endcond, ir.Constant(int32, 0), 'loopcond') after = context.builder.function.append_basic_block('afterloop') context.builder.cbranch(cmp, loop, after) context.builder.position_at_start(after) class ReadLine(Node): def Evaluate(self, context): return int(input()) class Commands(Node): def Evaluate(self, context, check=False): for child in self.children: child.Evaluate(context) if check and len(self.children) > 0 and isinstance(self.children[-1], (Return)): context.local["ret"] = "" def append(self, child): self.children.append(child) class FuncAssignment(Node): def _create(self, context): int32 = ir.IntType(32) args, _ = self.children ty = ir.FunctionType(int32, [int32 for i in range(len(args))]) if self.value in context.module.globals: existing_func = context.module[self.value] if not isinstance(existing_func, ir.Function): raise Exception('Function/Global name collision', self.value) if not existing_func.is_declaration(): raise Exception('Redifinition of {0}'.format(self.value)) if len(existing_func.function_type.args) != len(ty.args): raise Exception( 'Redifinition with different number of arguments') func = context.module.globals[self.value] else: # Otherwise create a new function func = ir.Function(context.module, ty, self.value) return func def Evaluate(self, parent): args, body = self.children context = parent.new() func = self._create(context) block = func.append_basic_block('entry') context.builder = ir.IRBuilder(block) for i, arg in enumerate(func.args): arg.name = args[i] addr = context.declare(arg.name) context.builder.store(arg, addr) context.st.set(arg.name, addr) body.Evaluate(context, True) if not context.local.__contains__("ret"): context.builder.ret(ir.Constant(ir.IntType(32), 0)) return func class FuncCall(Node): def Evaluate(self, context): arguments = self.children func = context.module.get_global(self.value) if func is None or not isinstance(func, ir.Function): raise Exception('Call to unknown function', self.value) if len(func.args) != len(arguments): raise Exception('Call argument length mismatch', self.value) call_args = [argument.Evaluate(context) for argument in arguments] return context.builder.call(func, call_args, 'calltmp') class Return(Node): def Evaluate(self, context): if self.children is None or len(self.children) == 0: return context.builder.ret_void() return context.builder.ret(self.children[0].Evaluate(context))
30.003774
127
0.599673
973
7,951
4.840699
0.169579
0.013588
0.059448
0.074735
0.392994
0.303822
0.244586
0.213163
0.171975
0.140552
0
0.010604
0.264621
7,951
264
128
30.117424
0.794938
0.011319
0
0.169399
0
0
0.045837
0
0
0
0
0
0
1
0.125683
false
0.010929
0.027322
0.021858
0.338798
0.027322
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41a41dfe04ba1695c5ba79a312ffef30febd8cc6
2,617
py
Python
tests/test_gnmi.py
dmulyalin/nornir-salt
184002995515dddc802b578400370c2219e94957
[ "MIT" ]
5
2021-01-22T09:34:55.000Z
2021-12-22T08:12:34.000Z
tests/test_gnmi.py
dmulyalin/nornir-salt
184002995515dddc802b578400370c2219e94957
[ "MIT" ]
2
2022-01-27T14:46:40.000Z
2022-02-28T16:59:01.000Z
tests/test_gnmi.py
dmulyalin/nornir-salt
184002995515dddc802b578400370c2219e94957
[ "MIT" ]
1
2021-01-10T04:37:08.000Z
2021-01-10T04:37:08.000Z
""" At the moment this does not tests apat from testing import of PyGNMI library and gNMI connecton and task plugins. Was not able to find always-on endpoints that can test using gNMI, Cisco sandboxes has gRPC API available but that is different. """ import sys import pprint import logging import yaml import pytest import socket sys.path.insert(0, "..") try: from nornir import InitNornir from nornir.core.plugins.inventory import InventoryPluginRegister from nornir.core.plugins.connections import ConnectionPluginRegister from nornir.core.task import Result HAS_NORNIR = True except ImportError: HAS_NORNIR = False from nornir_salt import ( ResultSerializer, DictInventory, nr_test, DataProcessor, netmiko_send_commands, PyGNMIPlugin, pygnmi_call ) logging.basicConfig(level=logging.ERROR) InventoryPluginRegister.register("DictInventory", DictInventory) ConnectionPluginRegister.register("pygnmi", PyGNMIPlugin) skip_if_no_nornir = pytest.mark.skipif( HAS_NORNIR == False, reason="Failed to import all required Nornir modules and plugins", ) # --------------------------------------------------- # cisco always on ios xr lab details # --------------------------------------------------- cisco_iosxr_always_on_router = """ hosts: sandbox-iosxr-1.cisco.com: hostname: "sandbox-iosxr-1.cisco.com" platform: iosxr username: admin password: C1sco12345 port: 57777 connection_options: pygnmi: extras: insecure: True """ try: s = socket.socket() s.settimeout(1) s.connect(("sandbox-iosxr-1.cisco.com", 22)) has_connection_to_cisco_iosxr_always_on_router = True except: has_connection_to_cisco_iosxr_always_on_router = False skip_if_has_no_cisco_iosxr_always_on_router = pytest.mark.skipif( has_connection_to_cisco_iosxr_always_on_router == False, reason="Has no connection to sandbox-iosxr-1.cisco.com router", ) cisco_iosxr_always_on_router_dict = yaml.safe_load(cisco_iosxr_always_on_router) def init(opts): """ Initiate nornir by calling InitNornir() """ nr = InitNornir( logging={"enabled": False}, runner={"plugin": "serial"}, inventory={ "plugin": "DictInventory", "options": { "hosts": opts["hosts"], "groups": opts.get("groups", {}), "defaults": opts.get("defaults", {}), }, }, ) return nr nr = init(cisco_iosxr_always_on_router_dict) @skip_if_no_nornir def test_gnmi_capabilities_check(): pass # test_gnmi_capabilities_check()
25.910891
91
0.680168
313
2,617
5.466454
0.41853
0.046756
0.07481
0.084161
0.19813
0.106955
0.074226
0.074226
0.051432
0
0
0.008996
0.192969
2,617
100
92
26.17
0.801136
0.173481
0
0.028571
0
0
0.22321
0.048198
0
0
0
0
0
1
0.028571
false
0.028571
0.185714
0
0.228571
0.014286
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41a45c2a64fe8ee34481934f5986ff7aa8e03131
6,886
py
Python
bentoctl_aws_ec2/ec2/ec2_cloudformation_template.py
jjmachan/aws-ec2-deploy
e8a20816a84656502d9c6eb03e944e739efa0c1f
[ "Apache-2.0" ]
5
2021-06-22T10:21:00.000Z
2021-08-16T12:57:35.000Z
ec2/ec2_cloudformation_template.py
bentoml/aws-ec2-deploy
6d453fd1afbdb07a080d9d38dcc89f9f6768603a
[ "Apache-2.0" ]
15
2021-08-05T10:29:56.000Z
2022-03-04T01:35:47.000Z
bentoctl_aws_ec2/ec2/ec2_cloudformation_template.py
jjmachan/aws-ec2-deploy
e8a20816a84656502d9c6eb03e944e739efa0c1f
[ "Apache-2.0" ]
5
2021-06-22T14:39:48.000Z
2022-03-25T09:19:03.000Z
EC2_CLOUDFORMATION_TEMPLATE = """\ AWSTemplateFormatVersion: 2010-09-09 Transform: AWS::Serverless-2016-10-31 Description: BentoML load balanced template Parameters: AmazonLinux2LatestAmiId: Type: AWS::SSM::Parameter::Value<AWS::EC2::Image::Id> Default: {ami_id} Resources: SecurityGroupResource: Type: AWS::EC2::SecurityGroup Properties: GroupDescription: "security group for bentoservice" SecurityGroupIngress: - IpProtocol: tcp CidrIp: 0.0.0.0/0 FromPort: 5000 ToPort: 5000 - IpProtocol: tcp CidrIp: 0.0.0.0/0 FromPort: 22 ToPort: 22 VpcId: !Ref Vpc1 Ec2InstanceECRProfile: Type: AWS::IAM::InstanceProfile Properties: Path: / Roles: [!Ref EC2Role] EC2Role: Type: AWS::IAM::Role Properties: AssumeRolePolicyDocument: Statement: - Effect: Allow Principal: Service: [ec2.amazonaws.com] Action: ['sts:AssumeRole'] Path: / Policies: - PolicyName: ecs-service PolicyDocument: Statement: - Effect: Allow Action: - 'ecr:GetAuthorizationToken' - 'ecr:BatchGetImage' - 'ecr:GetDownloadUrlForLayer' Resource: '*' LaunchTemplateResource: Type: AWS::EC2::LaunchTemplate Properties: LaunchTemplateName: {template_name} LaunchTemplateData: IamInstanceProfile: Arn: !GetAtt Ec2InstanceECRProfile.Arn ImageId: !Ref AmazonLinux2LatestAmiId InstanceType: {instance_type} UserData: "{user_data}" SecurityGroupIds: - !GetAtt SecurityGroupResource.GroupId TargetGroup: Type: AWS::ElasticLoadBalancingV2::TargetGroup Properties: VpcId: !Ref Vpc1 Protocol: HTTP Port: 5000 TargetType: instance HealthCheckEnabled: true HealthCheckIntervalSeconds: {target_health_check_interval_seconds} HealthCheckPath: {target_health_check_path} HealthCheckPort: {target_health_check_port} HealthCheckProtocol: HTTP HealthCheckTimeoutSeconds: {target_health_check_timeout_seconds} HealthyThresholdCount: {target_health_check_threshold_count} LoadBalancerSecurityGroup: Type: AWS::EC2::SecurityGroup Properties: GroupDescription: "security group for loadbalancing" VpcId: !Ref Vpc1 SecurityGroupIngress: - IpProtocol: tcp CidrIp: 0.0.0.0/0 FromPort: 80 ToPort: 80 InternetGateway: Type: AWS::EC2::InternetGateway Gateway: Type: AWS::EC2::VPCGatewayAttachment Properties: InternetGatewayId: !Ref InternetGateway VpcId: !Ref Vpc1 PublicRouteTable: Type: AWS::EC2::RouteTable Properties: VpcId: !Ref Vpc1 PublicRoute: Type: AWS::EC2::Route DependsOn: Gateway Properties: DestinationCidrBlock: 0.0.0.0/0 GatewayId: !Ref InternetGateway RouteTableId: !Ref PublicRouteTable RouteTableSubnetTwoAssociationOne: Type: AWS::EC2::SubnetRouteTableAssociation Properties: RouteTableId: !Ref PublicRouteTable SubnetId: !Ref Subnet1 RouteTableSubnetTwoAssociationTwo: Type: AWS::EC2::SubnetRouteTableAssociation Properties: RouteTableId: !Ref PublicRouteTable SubnetId: !Ref Subnet2 Vpc1: Type: AWS::EC2::VPC Properties: CidrBlock: 172.31.0.0/16 EnableDnsHostnames: true EnableDnsSupport: true InstanceTenancy: default Subnet1: Type: AWS::EC2::Subnet Properties: VpcId: !Ref Vpc1 AvailabilityZone: Fn::Select: - 0 - Fn::GetAZs: "" CidrBlock: 172.31.16.0/20 MapPublicIpOnLaunch: true Subnet2: Type: AWS::EC2::Subnet Properties: VpcId: !Ref Vpc1 AvailabilityZone: Fn::Select: - 1 - Fn::GetAZs: "" CidrBlock: 172.31.0.0/20 MapPublicIpOnLaunch: true LoadBalancer: Type: AWS::ElasticLoadBalancingV2::LoadBalancer Properties: IpAddressType: ipv4 Name: {elb_name} Scheme: internet-facing SecurityGroups: - !Ref LoadBalancerSecurityGroup Subnets: - !Ref Subnet1 - !Ref Subnet2 Type: application Listener: Type: AWS::ElasticLoadBalancingV2::Listener Properties: DefaultActions: - Type: forward TargetGroupArn: !Ref TargetGroup LoadBalancerArn: !Ref LoadBalancer Port: 80 Protocol: HTTP AutoScalingGroup: Type: AWS::AutoScaling::AutoScalingGroup DependsOn: Gateway Properties: MinSize: {autoscaling_min_size} MaxSize: {autoscaling_max_size} DesiredCapacity: {autoscaling_desired_capacity} AvailabilityZones: - Fn::Select: - 0 - Fn::GetAZs: "" - Fn::Select: - 1 - Fn::GetAZs: "" LaunchTemplate: LaunchTemplateId: !Ref LaunchTemplateResource Version: !GetAtt LaunchTemplateResource.LatestVersionNumber TargetGroupARNs: - !Ref TargetGroup VPCZoneIdentifier: - !Ref Subnet1 - !Ref Subnet2 UpdatePolicy: AutoScalingReplacingUpdate: WillReplace: true Outputs: AutoScalingGroup: Value: !Ref AutoScalingGroup Description: Autoscaling group name TargetGroup: Value: !Ref TargetGroup Description: Target group for load balancer Url: Value: !Join ['', ['http://', !GetAtt [LoadBalancer, DNSName]]] Description: URL of the bento service """
31.587156
78
0.523671
479
6,886
7.463466
0.39666
0.037203
0.033566
0.008951
0.187692
0.158881
0.158881
0.158881
0.158881
0.113566
0
0.032139
0.408074
6,886
217
79
31.732719
0.844946
0
0
0.376884
0
0
0.994627
0.189951
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
41a614b41c6ed87485f48e036058ce573a7b945d
690
py
Python
src/tests/benchmarks/tools/bench/AnTuTu6.py
VirtualVFix/AndroidTestFramework
1feb769c6aca39a78e6daefd6face0a1e4d62cd4
[ "MIT" ]
null
null
null
src/tests/benchmarks/tools/bench/AnTuTu6.py
VirtualVFix/AndroidTestFramework
1feb769c6aca39a78e6daefd6face0a1e4d62cd4
[ "MIT" ]
null
null
null
src/tests/benchmarks/tools/bench/AnTuTu6.py
VirtualVFix/AndroidTestFramework
1feb769c6aca39a78e6daefd6face0a1e4d62cd4
[ "MIT" ]
null
null
null
# All rights reserved by forest fairy. # You cannot modify or share anything without sacrifice. # If you don't agree, keep calm and don't look at code bellow! __author__ = "VirtualV <https://github.com/virtualvfix>" __date__ = "$Apr 12, 2014 4:40:25 PM$" import ast from tests.benchmarks.tools.base import App class AnTuTu6(App): """ AnTuTu 6 """ def __init__(self, attributes, serial): App.__init__(self, attributes, serial) def collect_results(self, res_doc): raw_res = ast.literal_eval(self.getResults()) for name, value in raw_res: res_doc.add_name(name.replace('[','').replace(']','')) res_doc.add_result(value)
31.363636
66
0.665217
97
690
4.474227
0.731959
0.041475
0.082949
0.110599
0
0
0
0
0
0
0
0.023853
0.210145
690
21
67
32.857143
0.772477
0.236232
0
0
0
0
0.131274
0
0
0
0
0
0
1
0.166667
false
0
0.166667
0
0.416667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41a6d2898476c90a1f687ee05cacf8a8f146ec52
1,496
py
Python
osc_bge/users/admin.py
jisuhan3201/osc-bge
125c441d23d7f1fdb2d9b8f42f859082e757e25a
[ "MIT" ]
null
null
null
osc_bge/users/admin.py
jisuhan3201/osc-bge
125c441d23d7f1fdb2d9b8f42f859082e757e25a
[ "MIT" ]
5
2020-06-05T19:49:47.000Z
2021-09-08T00:50:55.000Z
osc_bge/users/admin.py
jisuhan3201/osc-bge
125c441d23d7f1fdb2d9b8f42f859082e757e25a
[ "MIT" ]
null
null
null
from django.contrib import admin from django.contrib.auth import admin as auth_admin from django.contrib.auth import get_user_model from osc_bge.users.forms import UserChangeForm, UserCreationForm from . import models User = get_user_model() @admin.register(User) class UserAdmin(auth_admin.UserAdmin): form = UserChangeForm add_form = UserCreationForm fieldsets = (("User", {"fields": ("username", "image", "type")}),) + auth_admin.UserAdmin.fieldsets list_display = ["username", "is_superuser", "type", "image"] search_fields = ["username"] @admin.register(models.BgeAdminUser) class BgeAdminUserAdmin(admin.ModelAdmin): list_display = ( "user", "partition", ) @admin.register(models.BgeBranchAdminUser) class BgeBranchAdminUserAdmin(admin.ModelAdmin): list_display = ( "user", "branch", ) @admin.register(models.BgeBranchCoordinator) class BgeBranchCoordinatorAdmin(admin.ModelAdmin): list_display = ( "user", "branch", "position", ) @admin.register(models.AgencyHeadAdminUser) class AgencyHeadAdminUserAdmin(admin.ModelAdmin): list_display = ( "user", "agency_head", ) @admin.register(models.AgencyAdminUser) class AgencyAdminUserAdmin(admin.ModelAdmin): list_display = ( "user", "agency", ) @admin.register(models.Counselor) class CounselorAdmin(admin.ModelAdmin): list_display = ( "user", "agency", )
22
103
0.681818
144
1,496
6.951389
0.347222
0.090909
0.113886
0.155844
0.273726
0.243756
0
0
0
0
0
0
0.198529
1,496
67
104
22.328358
0.834862
0
0
0.32
0
0
0.093583
0
0
0
0
0
0
1
0
false
0
0.1
0
0.46
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41a79266ccd514b38d38bed6f38f0c721bb4fe9e
3,949
py
Python
scripts/ros_tensorflow_classify.py
xuanlvxin/blog_backup
691c040efe4d752b4c4badbdd5dd78960ed966e2
[ "Apache-2.0" ]
45
2018-05-13T01:55:40.000Z
2022-03-28T15:20:48.000Z
scripts/ros_tensorflow_classify.py
xuanlvxin/blog_backup
691c040efe4d752b4c4badbdd5dd78960ed966e2
[ "Apache-2.0" ]
1
2018-06-06T10:39:02.000Z
2018-09-05T01:52:19.000Z
scripts/ros_tensorflow_classify.py
xuanlvxin/blog_backup
691c040efe4d752b4c4badbdd5dd78960ed966e2
[ "Apache-2.0" ]
17
2018-05-14T12:17:57.000Z
2020-03-29T09:41:07.000Z
#!/usr/bin/env python import rospy from sensor_msgs.msg import Image from std_msgs.msg import String from cv_bridge import CvBridge import cv2 import numpy as np import tensorflow as tf import os import re class RosTensorFlow(): def __init__(self): self._session = tf.Session() self._cv_bridge = CvBridge() self._sub = rospy.Subscriber('usb_cam/image_raw', Image, self.callback, queue_size=1) self._pub = rospy.Publisher('/result_ripe', String, queue_size=1) self.score_threshold = rospy.get_param('~score_threshold', 0.1) self.use_top_k = rospy.get_param('~use_top_k', 5) def load(self, label_lookup_path, uid_lookup_path): if not tf.gfile.Exists(uid_lookup_path): tf.logging.fatal('File does not exist %s', uid_lookup_path) if not tf.gfile.Exists(label_lookup_path): tf.logging.fatal('File does not exist %s', label_lookup_path) # Loads mapping from string UID to human-readable string proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines() uid_to_human = {} p = re.compile(r'[n\d]*[ \S,]*') for line in proto_as_ascii_lines: parsed_items = p.findall(line) uid = parsed_items[0] human_string = parsed_items[2] uid_to_human[uid] = human_string # Loads mapping from string UID to integer node ID. node_id_to_uid = {} proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines() for line in proto_as_ascii: if line.startswith(' target_class:'): target_class = int(line.split(': ')[1]) if line.startswith(' target_class_string:'): target_class_string = line.split(': ')[1] node_id_to_uid[target_class] = target_class_string[1:-2] # Loads the final mapping of integer node ID to human-readable string node_id_to_name = {} for key, val in node_id_to_uid.items(): if val not in uid_to_human: tf.logging.fatal('Failed to locate: %s', val) name = uid_to_human[val] node_id_to_name[key] = name return node_id_to_name def callback(self, image_msg): cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8") image_data = cv2.imencode('.jpg', cv_image)[1].tostring() # Creates graph from saved GraphDef. softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0') predictions = self._session.run( softmax_tensor, {'DecodeJpeg/contents:0': image_data}) predictions = np.squeeze(predictions) # Creates node ID --> English string lookup. node_lookup = self.load(PATH_TO_LABELS, PATH_TO_UID) top_k = predictions.argsort()[-self.use_top_k:][::-1] for node_id in top_k: if node_id not in node_lookup: human_string = '' else: human_string = node_lookup[node_id] score = predictions[node_id] if score > self.score_threshold: rospy.loginfo('%s (score = %.5f)' % (human_string, score)) self._pub.publish(human_string) def main(self): rospy.spin() if __name__ == '__main__': ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) PATH_TO_CKPT = ROOT_PATH + '/include/classifier/classify_image_graph_def.pb' PATH_TO_LABELS = ROOT_PATH + '/include/classifier/imagenet_2012_challenge_label_map_proto.pbtxt' PATH_TO_UID = ROOT_PATH + '/include/classifier/imagenet_synset_to_human_label_map.txt' with tf.gfile.FastGFile(PATH_TO_CKPT, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name='') rospy.init_node('ros_tensorflow_classify') tensor = RosTensorFlow() tensor.main()
37.971154
100
0.640669
541
3,949
4.35305
0.286506
0.033121
0.023779
0.014013
0.152866
0.101911
0.061147
0.061147
0.03482
0.03482
0
0.008144
0.253735
3,949
103
101
38.339806
0.790974
0.068625
0
0
0
0
0.117375
0.058279
0
0
0
0
0
1
0.051948
false
0
0.12987
0
0.207792
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41a7a2ca9d0dcc0caead7e8e09caea9c36c46387
3,075
py
Python
python/v1/generate_default_line_item.py
googleads/googleads-displayvideo-examples
cd1b4b3bc63e068fef4ff23264232a65f70207b5
[ "Apache-2.0" ]
2
2021-10-08T12:10:38.000Z
2022-01-23T16:00:12.000Z
python/v1/generate_default_line_item.py
googleads/googleads-displayvideo-examples
cd1b4b3bc63e068fef4ff23264232a65f70207b5
[ "Apache-2.0" ]
1
2021-04-09T16:34:06.000Z
2021-04-12T14:42:00.000Z
python/v1/generate_default_line_item.py
googleads/googleads-displayvideo-examples
cd1b4b3bc63e068fef4ff23264232a65f70207b5
[ "Apache-2.0" ]
4
2021-05-20T17:55:54.000Z
2022-02-10T14:13:40.000Z
#!/usr/bin/python # # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example generates a default line item under the given insertion order. The line item will inherit settings, including targeting, from the insertion order. If generating a Mobile App Install line item, an app ID must be provided. """ import argparse import os import sys from googleapiclient.errors import HttpError sys.path.insert(0, os.path.abspath('..')) import samples_util # Declare command-line flags. argparser = argparse.ArgumentParser(add_help=False) argparser.add_argument( 'advertiser_id', help='The ID of the parent advertiser of the line item to be created.') argparser.add_argument( 'insertion_order_id', help='The ID of the insertion order of the line item to be created.') argparser.add_argument( 'display_name', help='The display name of the line item to be created.') argparser.add_argument( 'line_item_type', help='The type of the line item to be created.') argparser.add_argument( '--app_id', help='The app ID of the mobile app promoted by the line item. Required and only valid if line ' 'item type is either LINE_ITEM_TYPE_DISPLAY_MOBILE_APP_INSTALL or ' 'LINE_ITEM_TYPE_VIDEO_MOBILE_APP_INSTALL.') def main(service, flags): # Create and populate the generateDefault request body. generate_default_line_item_request = { 'insertionOrderId': flags.insertion_order_id, 'displayName': flags.display_name, 'lineItemType': flags.line_item_type } # Add Mobile App object to request generating a Mobile App Install # line item. if flags.line_item_type in [ 'LINE_ITEM_TYPE_DISPLAY_MOBILE_APP_INSTALL', 'LINE_ITEM_TYPE_VIDEO_MOBILE_APP_INSTALL' ]: if not flags.app_id: print('Error: No app ID given for Mobile App Install line item. Exiting.') sys.exit(1) generate_default_line_item_request['mobileApp'] = {'appId': flags.app_id} try: # Build and execute request. response = service.advertisers().lineItems().generateDefault( advertiserId=flags.advertiser_id, body=generate_default_line_item_request).execute() except HttpError as e: print(e) sys.exit(1) # Display the new line item resource name. print(f'Line Item {response["name"]} was created.') if __name__ == '__main__': # Retrieve command line arguments. flags = samples_util.get_arguments(sys.argv, __doc__, parents=[argparser]) # Authenticate and construct service. service = samples_util.get_service(version='v1') main(service, flags)
33.791209
99
0.744715
446
3,075
4.964126
0.363229
0.083108
0.04336
0.036134
0.242096
0.217706
0.172538
0.079494
0.079494
0.079494
0
0.004706
0.170732
3,075
90
100
34.166667
0.863529
0.355772
0
0.155556
0
0
0.369365
0.08248
0
0
0
0
0
1
0.022222
false
0
0.111111
0
0.133333
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41a92c8d5a10768f359c0ce9e4aa075658259077
3,897
py
Python
datacombine/tests/test_models.py
Crimson-Star-Software/data-combine
3209ae2316afc38417e51c3261494d6e7d2e4e2a
[ "MIT" ]
null
null
null
datacombine/tests/test_models.py
Crimson-Star-Software/data-combine
3209ae2316afc38417e51c3261494d6e7d2e4e2a
[ "MIT" ]
3
2020-02-11T23:14:53.000Z
2021-06-10T18:32:57.000Z
datacombine/tests/test_models.py
Crimson-Star-Software/data-combine
3209ae2316afc38417e51c3261494d6e7d2e4e2a
[ "MIT" ]
null
null
null
from django.test import TestCase from datacombine import models as dcmodels from collections import namedtuple from django.core.exceptions import FieldError import re Match = namedtuple("Match", ["object", "regex", "match"]) class PhoneTestCase(TestCase): def setUp(self): dcmodels.Phone.objects.create(area_code="407", number="5559999") dcmodels.Phone.objects.create(number="1234567") dcmodels.Phone.objects.create(number="3141592", extension="48") dcmodels.Phone.objects.create( area_code="904", number="3141592", extension="2" ) def test_str(self): all_phone_nums = dcmodels.Phone.objects.all() matches = [] for num in all_phone_nums: regex_str = "" if getattr(num, 'area_code', None): regex_str += "\([0-9]{3}\)\-" regex_str += "[0-9]{3}\-[0-9]{4}" if getattr(num, "extension", None): regex_str += " x [0-9]+" match = True if re.match(regex_str, str(num)) else False matches.append(Match(num, regex_str, match)) ms = all([m.match for m in matches]) if not ms: for m in matches: if not m.match: print(f"Failure on {m.object} with {m.regex}") self.assertTrue(ms) def test_phone_create_from_str_1_block_7_digit(self): ph = dcmodels.Phone() ph.create_from_str("1234567") self.assertEqual(ph.number, "1234567") def test_phone_create_from_str_2_block_7_digit(self): ph = dcmodels.Phone() ph.create_from_str("123-4567") self.assertEqual(ph.number, "1234567") def test_phone_create_from_str_2_block_bad_7_digit(self): ph = dcmodels.Phone() ph.create_from_str("12-34567") self.assertEqual(ph.number, "1234567") def test_phone_create_from_str_3_block_bad_7_digit(self): ph = dcmodels.Phone() ph.create_from_str("(123)-45-67") self.assertEqual(ph.number, "1234567") def test_phone_create_from_average_str(self): ph = dcmodels.Phone() ph.create_from_str("(407)-666-9999") self.assertTrue(ph.area_code == "407" and ph.number == "6669999") def test_phone_create_from_average_str_with_ext(self): ph = dcmodels.Phone() ph.create_from_str("(407)-666-9999 x 49") self.assertTrue(ph.area_code == "407" and ph.number == "6669999"\ and ph.extension == "49") def test_phone_create_from_str_too_few_numbers(self): ph = dcmodels.Phone() with self.assertRaises(FieldError): ph.create_from_str("1") def test_phone_create_from_str_null(self): ph = dcmodels.Phone() ph.create_from_str("") self.assertTrue(ph.area_code == ph.number == ph.extension == None) def test_null_phone_is_none(self): ph = dcmodels.Phone() ph.create_from_str("") self.assertTrue(ph == None) def tearDown(self): dcmodels.Phone.objects.all().delete() class ContactTestCase(TestCase): def setUp(self): dcmodels.Phone.objects.create(area_code="407", number="5559999") dcmodels.Phone.objects.create(number="1234567") dcmodels.Phone.objects.create(number="3141592", extension="48") dcmodels.Phone.objects.create( area_code="904", number="3141592", extension="2" ) dcmodels.EmailAddress.objects.create( confirm_status=dcmodels.NO_CONFIRMATION_REQUIRED, email_address='pastor@stnerp.org', cc_id='a09d1c20-6aac-11e3-8c26-982bcb740129', opt_in_date='2011-06-27T18:47:16.000Z', opt_in_source=dcmodels.ACTION_BY_OWNER, status=dcmodels.ACTIVE ) def test_get_email_addresses(self): self.assertTrue(len(dcmodels.EmailAddress.objects.all()) == 1)
36.083333
74
0.628689
501
3,897
4.664671
0.255489
0.105691
0.08344
0.073171
0.560548
0.547711
0.500214
0.482242
0.482242
0.482242
0
0.079037
0.243521
3,897
107
75
36.420561
0.713704
0
0
0.303371
0
0
0.098794
0.015396
0
0
0
0
0.123596
1
0.157303
false
0
0.05618
0
0.235955
0.011236
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41a96ce1cac04188c0692b34694f59bc91f568fc
560
py
Python
tests/jdi_uitests_webtests/main/page_objects/pages/home_page.py
jdi-testing/jdi-python
7c0607b97d4d44b27ea8f532d47c68b8dd00e6f7
[ "MIT" ]
5
2020-02-14T10:32:01.000Z
2021-07-22T08:20:28.000Z
tests/jdi_uitests_webtests/main/page_objects/pages/home_page.py
jdi-testing/jdi-python
7c0607b97d4d44b27ea8f532d47c68b8dd00e6f7
[ "MIT" ]
54
2018-07-27T14:07:33.000Z
2021-11-08T09:24:16.000Z
tests/jdi_uitests_webtests/main/page_objects/pages/home_page.py
jdi-testing/jdi-python
7c0607b97d4d44b27ea8f532d47c68b8dd00e6f7
[ "MIT" ]
1
2021-01-20T14:31:52.000Z
2021-01-20T14:31:52.000Z
from JDI.web.selenium.elements.api_interact.find_element_by import By from JDI.web.selenium.elements.common.image import Image from JDI.web.selenium.elements.common.link import Link from JDI.web.selenium.elements.common.text import Text from JDI.web.selenium.elements.composite.web_page import WebPage class HomePage(WebPage): def __init__(self, url, title): super(HomePage, self).__init__(url=url, title=title) about = Link(By.link_text("About")) logo_image = Image(By.css(".epam-logo img")) text_item = Text(By.css(".main-txt"))
32.941176
69
0.753571
86
560
4.732558
0.395349
0.085995
0.12285
0.22113
0.363636
0.235872
0
0
0
0
0
0
0.121429
560
16
70
35
0.827236
0
0
0
0
0
0.05
0
0
0
0
0
0
1
0.090909
false
0
0.454545
0
0.909091
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
41a96db7893cd10b83b375c1bfc15b8901d9c8f4
207
py
Python
api/routes.py
santiagosilas/flask_sample_app
1a2f19439c1efb25abdccdb26f303f9cff95c911
[ "MIT" ]
null
null
null
api/routes.py
santiagosilas/flask_sample_app
1a2f19439c1efb25abdccdb26f303f9cff95c911
[ "MIT" ]
null
null
null
api/routes.py
santiagosilas/flask_sample_app
1a2f19439c1efb25abdccdb26f303f9cff95c911
[ "MIT" ]
null
null
null
from flask import render_template from api import app @app.route('/') @app.route('/home') def home(): return render_template('index.html', answer = 42) @app.route('/test') def test(): return 'test'
18.818182
53
0.676329
30
207
4.6
0.533333
0.173913
0
0
0
0
0
0
0
0
0
0.011364
0.149758
207
11
54
18.818182
0.772727
0
0
0
0
0
0.120192
0
0
0
0
0
0
1
0.222222
true
0
0.222222
0.222222
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
4
41a9a2d818ad02b61cfd18575ed9cdd4ecccdb57
36,453
py
Python
Capture the Flag/FirstRound.py
yokesh-git/Quiz-Application
2b990ee9f711d05956e76ade0550bfa1abd86b08
[ "MIT" ]
null
null
null
Capture the Flag/FirstRound.py
yokesh-git/Quiz-Application
2b990ee9f711d05956e76ade0550bfa1abd86b08
[ "MIT" ]
null
null
null
Capture the Flag/FirstRound.py
yokesh-git/Quiz-Application
2b990ee9f711d05956e76ade0550bfa1abd86b08
[ "MIT" ]
null
null
null
from tkinter import * from firebase import firebase from PIL import Image, ImageTk fbconn = firebase.FirebaseApplication('https://samplefbtest-266bd.firebaseio.com/',None) global crtans crtans = 0 class FirstRound: print("Done") def __init__(self, master): global w,h,ws,hs,x,y w = 1000 h = 650 ws = root.winfo_screenwidth() # width of the screen hs = root.winfo_screenheight() # height of the screen x = (ws/4) - (w/4) y = (hs/4) - (h/4) global answer1,answer2,anslist anslist = [] answer1 = '2' answer2 = '1' answer3 = '2' answer4 = '2' answer5 = '1' answer6 = '2' answer7 = '3' answer8 = '4' answer9 = '3' answer10 = '3' answer11 = '1' answer12 = '2' answer13 = '3' answer14 = '3' answer15 = '3' answer16 = '4' answer17 = '2' answer18 = '4' answer19 = '3' answer20 = '3' self.master=master master.title('First Round') self.frame = Frame(master,width=1000, height=600, bg='black') self.frame.pack() self.heading = Label(self.frame, text="Kalasalingam Institute of Technology", font=('arial 30 bold'), fg='black', bg='lightgreen') self.heading.place(x=180, y=20) self.title = Label(self.frame, text="Cybertron'20", font=('arial 30 bold'), fg='black', bg='lightgreen') self.title.place(x=350, y=100) self.name = Label(self.frame, text="Name :", font=('arial 13'), fg='black', bg='lightgreen') self.name.place(x=300, y=200) self.nameentry = Entry(self.frame,width=50) self.nameentry.place(x=400,y=200) self.clg = Label(self.frame, text="College :", font=('arial 13'), fg='black', bg='lightgreen') self.clg.place(x=300, y=250) self.clgentry = Entry(self.frame,width=50) self.clgentry.place(x=400,y=250) self.mail = Label(self.frame, text="Mail :", font=('arial 13'), fg='black', bg='lightgreen') self.mail.place(x=300, y=300) self.mailentry = Entry(self.frame,width=50) self.mailentry.place(x=400,y=300) self.start = Button(self.frame,width=10,text="Start",command = self.start) self.start.place(x=380,y=350) self.secondwin = Toplevel() self.secondwin.geometry('%dx%d+%d+%d' % (w, h, x, y)) self.frame1 = Frame(self.secondwin,width=1000, height=600, bg='black') self.frame1.pack() self.event = Label(self.secondwin, text="Capture The Flag", font=('arial 30 bold'), fg='black', bg='lightgreen') self.event.place(x=300, y=20) self.q1 = Image.open("images/q1-small.png") self.render = ImageTk.PhotoImage(self.q1) self.img = Label(self.secondwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=100) def q1(): global ans,crtans ans = str(var.get()) if ans == answer1: crtans = crtans+1 #Question 1 var = IntVar() self.R1 = Radiobutton(self.secondwin, text="A) 6, 10, 8 ", variable=var, value=1,bg='lightgreen', command=q1) self.R1.place(x=500,y=100) self.R2 = Radiobutton(self.secondwin, text="B) 4, 8, 4", variable=var, value=2,bg='lightgreen', command=q1) self.R2.place(x=500,y=150) self.R3 = Radiobutton(self.secondwin, text="C) 2, 4, 4", variable=var, value=3,bg='lightgreen', command=q1) self.R3.place(x=500,y=200) self.R4 = Radiobutton(self.secondwin, text="D) 2, 8, 4", variable=var, value=4,bg='lightgreen', command=q1) self.R4.place(x=500,y=250) self.q2 = Image.open("images/q2-small.png") self.render = ImageTk.PhotoImage(self.q2) self.img = Label(self.secondwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=350) '''self.q2 = Label(self.secondwin, text="2)How to declare a variable?", font=('arial 15'), fg='black', bg='lightgreen') self.q2.place(x=20, y=300)''' def q2(): global ans1,crtans ans1 = str(var1.get()) print(ans1) if ans1 == answer2: crtans = crtans+1 #Question 2 var1 = IntVar() self.R5 = Radiobutton(self.secondwin, text="A) 101010", variable=var1, value=1,bg='lightgreen', command=q2) self.R5.place(x=500,y=350) self.R6 = Radiobutton(self.secondwin, text="B) 0xxa5f1010", variable=var1, value=2,bg='lightgreen', command=q2) self.R6.place(x=500,y=400) self.R7 = Radiobutton(self.secondwin, text="C) Run time error", variable=var1, value=3,bg='lightgreen', command=q2) self.R7.place(x=500,y=450) self.R8 = Radiobutton(self.secondwin, text="D) No Output", variable=var1, value=4,bg='lightgreen', command=q2) self.R8.place(x=500,y=500) self.secondnext = Button(self.secondwin,width=10,text="NEXT",command = self.secondnext) self.secondnext.place(x=800,y=550) self.secondback = Button(self.secondwin,width=10,text="BACK",command = self.secondback) self.secondback.place(x=700,y=550) self.secondwin.withdraw() self.thirdwin = Toplevel() self.thirdwin.geometry('%dx%d+%d+%d' % (w, h, x, y)) self.frame2 = Frame(self.thirdwin,width=1000, height=600, bg='black') self.frame2.pack() self.q3 = Image.open("images/q3-small.png") self.render = ImageTk.PhotoImage(self.q3) self.img = Label(self.thirdwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=100) def q3(): global ans2,crtans ans2 = str(var2.get()) if ans2 == answer3: crtans = crtans+1 #Question 3 var2 = IntVar() self.R9 = Radiobutton(self.thirdwin, text="A) 0", variable=var2, value=1,bg='lightgreen', command=q3) self.R9.place(x=500,y=100) self.R10 = Radiobutton(self.thirdwin, text="B) Error because of incorrect line-1 only.", variable=var2, value=2,bg='lightgreen', command=q3) self.R10.place(x=500,y=150) self.R11 = Radiobutton(self.thirdwin, text="C) Error because of incorrect line-1 and line-2.", variable=var2, value=3,bg='lightgreen', command=q3) self.R11.place(x=500,y=200) self.R12 = Radiobutton(self.thirdwin, text="D) Error because of incorrect line-2 only.", variable=var2, value=4,bg='lightgreen', command=q3) self.R12.place(x=500,y=250) self.q4 = Image.open("images/q4-small.png") self.render = ImageTk.PhotoImage(self.q4) self.img = Label(self.thirdwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=350) '''self.q2 = Label(self.secondwin, text="2)How to declare a variable?", font=('arial 15'), fg='black', bg='lightgreen') self.q2.place(x=20, y=300)''' def q4(): global ans3,crtans ans3 = str(var3.get()) print(ans3) if ans3 == answer4: crtans = crtans+1 #Question 4 var3 = IntVar() self.R13 = Radiobutton(self.thirdwin, text="A) 0", variable=var3, value=1,bg='lightgreen', command=q4) self.R13.place(x=500,y=350) self.R14 = Radiobutton(self.thirdwin, text="B) Runtime error", variable=var3, value=2,bg='lightgreen', command=q4) self.R14.place(x=500,y=400) self.R15 = Radiobutton(self.thirdwin, text="C) 5", variable=var3, value=3,bg='lightgreen', command=q4) self.R15.place(x=500,y=450) self.R16 = Radiobutton(self.thirdwin, text="D) compilation error", variable=var3, value=4,bg='lightgreen', command=q4) self.R16.place(x=500,y=500) self.thirdnext = Button(self.thirdwin,width=10,text="NEXT",command = self.thirdnext) self.thirdnext.place(x=800,y=550) self.thirdback = Button(self.thirdwin,width=10,text="BACK",command = self.thirdback) self.thirdback.place(x=700,y=550) self.thirdwin.withdraw() self.forthwin = Toplevel() self.forthwin.geometry('%dx%d+%d+%d' % (w, h, x, y)) self.frame3 = Frame(self.forthwin,width=1000, height=600, bg='black') self.frame3.pack() self.q5 = Image.open("images/q5-small.png") self.render = ImageTk.PhotoImage(self.q5) self.img = Label(self.forthwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=100) def q5(): global ans4,crtans ans4 = str(var4.get()) print(ans4) if ans4 == answer5: crtans = crtans+1 #Question 5 var4 = IntVar() self.R17 = Radiobutton(self.forthwin, text="A) address address value", variable=var4, value=1,bg='lightgreen', command=q5) self.R17.place(x=500,y=100) self.R18 = Radiobutton(self.forthwin, text="B) address value value", variable=var4, value=2,bg='lightgreen', command=q5) self.R18.place(x=500,y=150) self.R19 = Radiobutton(self.forthwin, text="C) address address address", variable=var4, value=3,bg='lightgreen', command=q5) self.R19.place(x=500,y=200) self.R20 = Radiobutton(self.forthwin, text="D) compilation error", variable=var4, value=4,bg='lightgreen', command=q5) self.R20.place(x=500,y=250) self.q6 = Image.open("images/q6-small.png") self.render = ImageTk.PhotoImage(self.q6) self.img = Label(self.forthwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=350) def q6(): global ans5,crtans ans5 = str(var5.get()) print(ans5) if ans5 == answer6: crtans = crtans+1 #Question 6 var5 = IntVar() self.R21 = Radiobutton(self.forthwin, text="A) No output", variable=var5, value=1,bg='lightgreen', command=q6) self.R21.place(x=500,y=350) self.R22 = Radiobutton(self.forthwin, text="B) compile time error", variable=var5, value=2,bg='lightgreen', command=q6) self.R22.place(x=500,y=400) self.R23 = Radiobutton(self.forthwin, text="C) 1", variable=var5, value=3,bg='lightgreen', command=q6) self.R23.place(x=500,y=450) self.R24 = Radiobutton(self.forthwin, text="D) 4", variable=var5, value=4,bg='lightgreen', command=q6) self.R24.place(x=500,y=500) self.forthnext = Button(self.forthwin,width=10,text="NEXT",command = self.forthnext) self.forthnext.place(x=800,y=550) self.forthback = Button(self.forthwin,width=10,text="BACK",command = self.forthback) self.forthback.place(x=700,y=550) self.forthwin.withdraw() self.fifthwin = Toplevel() self.fifthwin.geometry('%dx%d+%d+%d' % (w, h, x, y)) self.frame3 = Frame(self.fifthwin,width=1000, height=600, bg='black') self.frame3.pack() self.q7 = Image.open("images/q7-small.png") self.render = ImageTk.PhotoImage(self.q7) self.img = Label(self.fifthwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=100) def q7(): global ans6,crtans ans6 = str(var6.get()) print(ans6) if ans6 == answer7: crtans = crtans+1 #Question 7 var6 = IntVar() self.R25 = Radiobutton(self.fifthwin, text="A) The control won’t fall into the for loop", variable=var6, value=1,bg='lightgreen', command=q7) self.R25.place(x=500,y=100) self.R26 = Radiobutton(self.fifthwin, text="B) Numbers will be displayed until the signed limit of short and throw a run time error", variable=var6, value=2,bg='lightgreen', command=q7) self.R26.place(x=500,y=150) self.R27 = Radiobutton(self.fifthwin, text="C) ) Numbers will be displayed until the signed limit of short and program will \nsuccessfully terminate", variable=var6, value=3,bg='lightgreen', command=q7) self.R27.place(x=500,y=200) self.R28 = Radiobutton(self.fifthwin, text="D) This program will get into an infinite loop and keep printing numbers with no errors", variable=var6, value=4,bg='lightgreen', command=q7) self.R28.place(x=500,y=250) self.q8 = Image.open("images/q8-small.png") self.render = ImageTk.PhotoImage(self.q8) self.img = Label(self.fifthwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=350) def q8(): global ans7,crtans ans7 = str(var7.get()) print(ans7) if ans7 == answer8: crtans = crtans+1 #Question 8 var7 = IntVar() self.R21 = Radiobutton(self.fifthwin, text="A) 0.000000 1.000000 2.000000", variable=var7, value=1,bg='lightgreen', command=q8) self.R21.place(x=500,y=350) self.R22 = Radiobutton(self.fifthwin, text="B) 2.000000", variable=var7, value=2,bg='lightgreen', command=q8) self.R22.place(x=500,y=400) self.R23 = Radiobutton(self.fifthwin, text="C) Compile time error", variable=var7, value=3,bg='lightgreen', command=q8) self.R23.place(x=500,y=450) self.R24 = Radiobutton(self.fifthwin, text="D) 3.000000", variable=var7, value=4,bg='lightgreen', command=q8) self.R24.place(x=500,y=500) self.fifthnext = Button(self.fifthwin,width=10,text="NEXT",command = self.fifthnext) self.fifthnext.place(x=800,y=550) self.fifthback = Button(self.fifthwin,width=10,text="BACK",command = self.fifthback) self.fifthback.place(x=700,y=550) self.fifthwin.withdraw() self.sixthwin = Toplevel() self.sixthwin.geometry('%dx%d+%d+%d' % (w, h, x, y)) self.frame4 = Frame(self.sixthwin,width=1000, height=600, bg='black') self.frame4.pack() self.q9 = Image.open("images/q9-small.png") self.render = ImageTk.PhotoImage(self.q9) self.img = Label(self.sixthwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=100) def q9(): global ans8,crtans ans8 = str(var8.get()) print(ans8) if ans8 == answer9: crtans = crtans+1 #Question 9 var8 = IntVar() self.R29 = Radiobutton(self.sixthwin, text="A) 5", variable=var8, value=1,bg='lightgreen', command=q9) self.R29.place(x=500,y=100) self.R30 = Radiobutton(self.sixthwin, text="B) 0", variable=var8, value=2,bg='lightgreen', command=q9) self.R30.place(x=500,y=150) self.R31 = Radiobutton(self.sixthwin, text="C) Syntax Error", variable=var8, value=3,bg='lightgreen', command=q9) self.R31.place(x=500,y=200) self.R32 = Radiobutton(self.sixthwin, text="D) 05", variable=var8, value=4,bg='lightgreen', command=q9) self.R32.place(x=500,y=250) self.q10 = Image.open("images/q10-small.png") self.render = ImageTk.PhotoImage(self.q10) self.img = Label(self.sixthwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=350) def q10(): global ans9,crtans ans9 = str(var9.get()) print(ans9) if ans9 == answer10: crtans = crtans+1 #Question 10 var9 = IntVar() self.R33 = Radiobutton(self.sixthwin, text="A) 11 33", variable=var9, value=1,bg='lightgreen', command=q10) self.R33.place(x=500,y=350) self.R34 = Radiobutton(self.sixthwin, text="B) Error", variable=var9, value=2,bg='lightgreen', command=q10) self.R34.place(x=500,y=400) self.R35 = Radiobutton(self.sixthwin, text="C) exception", variable=var9, value=3,bg='lightgreen', command=q10) self.R35.place(x=500,y=450) self.R36 = Radiobutton(self.sixthwin, text="D) 11 -33", variable=var9, value=4,bg='lightgreen', command=q10) self.R36.place(x=500,y=500) self.fifthnext = Button(self.sixthwin,width=10,text="NEXT",command = self.sixthnext) self.fifthnext.place(x=800,y=550) self.sixthback = Button(self.sixthwin,width=10,text="BACK",command = self.sixthback) self.sixthback.place(x=700,y=550) self.sixthwin.withdraw() self.seventhwin = Toplevel() self.seventhwin.geometry('%dx%d+%d+%d' % (w, h, x, y)) self.frame5 = Frame(self.seventhwin,width=1000, height=600, bg='black') self.frame5.pack() self.q11 = Image.open("images/q11-small.png") self.render = ImageTk.PhotoImage(self.q11) self.img = Label(self.seventhwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=100) def q11(): global ans10,crtans ans10 = str(var10.get()) print(ans10) if ans10 == answer11: crtans = crtans+1 #Question 11 var10 = IntVar() self.R37 = Radiobutton(self.seventhwin, text="A) Garbage value", variable=var10, value=1,bg='lightgreen', command=q11) self.R37.place(x=500,y=100) self.R38 = Radiobutton(self.seventhwin, text="B) 1", variable=var10, value=2,bg='lightgreen', command=q11) self.R38.place(x=500,y=150) self.R39 = Radiobutton(self.seventhwin, text="C) 0", variable=var10, value=3,bg='lightgreen', command=q11) self.R39.place(x=500,y=200) self.R40 = Radiobutton(self.seventhwin, text="D) Error", variable=var10, value=4,bg='lightgreen', command=q11) self.R40.place(x=500,y=250) self.q12 = Image.open("images/q12-small.png") self.render = ImageTk.PhotoImage(self.q12) self.img = Label(self.seventhwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=350) def q12(): global ans11,crtans ans11 = str(var11.get()) print(ans11) if ans11 == answer12: crtans = crtans+1 #Question 12 var11 = IntVar() self.R41 = Radiobutton(self.seventhwin, text="A) 5", variable=var11, value=1,bg='lightgreen', command=q12) self.R41.place(x=500,y=350) self.R42 = Radiobutton(self.seventhwin, text="B) 6", variable=var11, value=2,bg='lightgreen', command=q12) self.R42.place(x=500,y=400) self.R43 = Radiobutton(self.seventhwin, text="C) 14", variable=var11, value=3,bg='lightgreen', command=q12) self.R43.place(x=500,y=450) self.R44 = Radiobutton(self.seventhwin, text="D) Compilation Error", variable=var11, value=4,bg='lightgreen', command=q12) self.R44.place(x=500,y=500) self.seventhnext = Button(self.seventhwin,width=10,text="NEXT",command = self.seventhnext) self.seventhnext.place(x=800,y=550) self.seventhback = Button(self.seventhwin,width=10,text="BACK",command = self.seventhback) self.seventhback.place(x=700,y=550) self.seventhwin.withdraw() #----------------------------------------------------------------------# self.eighthwin = Toplevel() self.eighthwin.geometry('%dx%d+%d+%d' % (w, h, x, y)) self.frame6 = Frame(self.eighthwin,width=1000, height=600, bg='black') self.frame6.pack() self.q13 = Image.open("images/q13-small.png") self.render = ImageTk.PhotoImage(self.q13) self.img = Label(self.eighthwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=100) def q13(): global ans12,crtans ans12 = str(var12.get()) print(ans12) if ans12 == answer13: crtans = crtans+1 #Question 13 var12 = IntVar() self.R45 = Radiobutton(self.eighthwin, text="A) The program has a compile error because the size of the array \nwasn’t specified when declaring the array.", variable=var12, value=1,bg='lightgreen', command=q13) self.R45.place(x=500,y=100) self.R46 = Radiobutton(self.eighthwin, text="B) The program has a runtime error because the array elements are not initialized.", variable=var12, value=2,bg='lightgreen', command=q13) self.R46.place(x=500,y=150) self.R47 = Radiobutton(self.eighthwin, text="C) The program runs fine and displays x[0] is 0.", variable=var12, value=3,bg='lightgreen', command=q13) self.R47.place(x=500,y=200) self.R48 = Radiobutton(self.eighthwin, text="D) The program has a runtime error because the array element x[0] is not defined.", variable=var12, value=4,bg='lightgreen', command=q13) self.R48.place(x=500,y=250) self.q14 = Image.open("images/q14-small.png") self.render = ImageTk.PhotoImage(self.q14) self.img = Label(self.eighthwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=350) def q14(): global ans13,crtans ans13 = str(var13.get()) print(ans13) if ans13 == answer14: crtans = crtans+1 #Question 14 var13 = IntVar() self.R49 = Radiobutton(self.eighthwin, text="A) 0", variable=var13, value=1,bg='lightgreen', command=q14) self.R49.place(x=500,y=350) self.R50 = Radiobutton(self.eighthwin, text="B) 5", variable=var13, value=2,bg='lightgreen', command=q14) self.R50.place(x=500,y=400) self.R51 = Radiobutton(self.eighthwin, text="C) Exception is thrown", variable=var13, value=3,bg='lightgreen', command=q14) self.R51.place(x=500,y=450) self.R52 = Radiobutton(self.eighthwin, text="D) Returns the index of “Hari”", variable=var13, value=4,bg='lightgreen', command=q14) self.R52.place(x=500,y=500) self.eighthnext = Button(self.eighthwin,width=10,text="NEXT",command = self.eighthnext) self.eighthnext.place(x=800,y=550) self.eighthback = Button(self.eighthwin,width=10,text="BACK",command = self.eighthback) self.eighthback.place(x=700,y=550) self.eighthwin.withdraw() #=======================================================================================# self.ninthwin = Toplevel() self.ninthwin.geometry('%dx%d+%d+%d' % (w, h, x, y)) self.frame7 = Frame(self.ninthwin,width=1000, height=600, bg='black') self.frame7.pack() self.q15 = Image.open("images/q15-small.png") self.render = ImageTk.PhotoImage(self.q15) self.img = Label(self.ninthwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=100) def q15(): global ans14,crtans ans14 = str(var14.get()) print(ans14) if ans14 == answer15: crtans = crtans+1 #Question 15 var14 = IntVar() self.R53 = Radiobutton(self.ninthwin, text="A) 123", variable=var14, value=1,bg='lightgreen', command=q15) self.R53.place(x=500,y=100) self.R54 = Radiobutton(self.ninthwin, text="B) 1", variable=var14, value=2,bg='lightgreen', command=q15) self.R54.place(x=500,y=150) self.R55 = Radiobutton(self.ninthwin, text="C) Error", variable=var14, value=3,bg='lightgreen', command=q15) self.R55.place(x=500,y=200) self.R56 = Radiobutton(self.ninthwin, text="D) 1 2 3", variable=var14, value=4,bg='lightgreen', command=q15) self.R56.place(x=500,y=250) self.q16 = Image.open("images/q16-small.png") self.render = ImageTk.PhotoImage(self.q16) self.img = Label(self.ninthwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=350) def q16(): global ans15,crtans ans15 = str(var15.get()) print(ans15) if ans15 == answer16: crtans = crtans+1 #Question 16 var15 = IntVar() self.R57 = Radiobutton(self.ninthwin, text="A) Cybertron", variable=var15, value=1,bg='lightgreen', command=q16) self.R57.place(x=500,y=350) self.R58 = Radiobutton(self.ninthwin, text="B) CYBERTRON", variable=var15, value=2,bg='lightgreen', command=q16) self.R58.place(x=500,y=400) self.R59 = Radiobutton(self.ninthwin, text="C) False", variable=var15, value=3,bg='lightgreen', command=q16) self.R59.place(x=500,y=450) self.R60 = Radiobutton(self.ninthwin, text="D) True", variable=var15, value=4,bg='lightgreen', command=q16) self.R60.place(x=500,y=500) self.ninthnext = Button(self.ninthwin,width=10,text="NEXT",command = self.ninthnext) self.ninthnext.place(x=800,y=550) self.ninthback = Button(self.ninthwin,width=10,text="BACK",command = self.ninthback) self.ninthback.place(x=700,y=550) self.ninthwin.withdraw() #______________________________________________________________________________________________# self.tenthwin = Toplevel() self.tenthwin.geometry('%dx%d+%d+%d' % (w, h, x, y)) self.frame8 = Frame(self.tenthwin,width=1000, height=600, bg='black') self.frame8.pack() self.q17 = Image.open("images/q17-small.png") self.render = ImageTk.PhotoImage(self.q17) self.img = Label(self.tenthwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=100) def q17(): global ans16,crtans ans16 = str(var16.get()) print(ans16) if ans16 == answer17: crtans = crtans+1 #Question 17 var16 = IntVar() self.R61 = Radiobutton(self.tenthwin, text="A) Type Error: can only concatenate list (not “int”) to list", variable=var16, value=1,bg='lightgreen', command=q17) self.R61.place(x=500,y=100) self.R62 = Radiobutton(self.tenthwin, text="B) 11", variable=var16, value=2,bg='lightgreen', command=q17) self.R62.place(x=500,y=150) self.R63 = Radiobutton(self.tenthwin, text="C) 12", variable=var16, value=3,bg='lightgreen', command=q17) self.R63.place(x=500,y=200) self.R64 = Radiobutton(self.tenthwin, text="D) 38", variable=var16, value=4,bg='lightgreen', command=q17) self.R64.place(x=500,y=250) self.q18 = Image.open("images/q18-small.png") self.render = ImageTk.PhotoImage(self.q18) self.img = Label(self.tenthwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=350) def q18(): global ans17,crtans ans17 = str(var17.get()) print(ans17) if ans17 == answer18: crtans = crtans+1 #Question 18 var17 = IntVar() self.R65 = Radiobutton(self.tenthwin, text="A) [5, 2, 3, 4] [5, 2, 3, 4] [1, 2, 3, 4] [1, 2, 3, 4]", variable=var17, value=1,bg='lightgreen', command=q18) self.R65.place(x=500,y=350) self.R66 = Radiobutton(self.tenthwin, text="B) [[5], 2, 3, 4] [[5], 2, 3, 4] [[5], 2, 3, 4] [1, 2, 3, 4]", variable=var17, value=2,bg='lightgreen', command=q18) self.R66.place(x=500,y=400) self.R67 = Radiobutton(self.tenthwin, text="C) [5, 2, 3, 4] [5, 2, 3, 4] [5, 2, 3, 4] [1, 2, 3, 4]", variable=var17, value=3,bg='lightgreen', command=q18) self.R67.place(x=500,y=450) self.R68 = Radiobutton(self.tenthwin, text="D) [[5], 2, 3, 4] [[5], 2, 3, 4] [1, 2, 3, 4] [1, 2, 3, 4]", variable=var17, value=4,bg='lightgreen', command=q18) self.R68.place(x=500,y=500) self.tenthnext = Button(self.tenthwin,width=10,text="NEXT",command = self.tenthnext) self.tenthnext.place(x=800,y=550) self.tenthback = Button(self.tenthwin,width=10,text="BACK",command = self.tenthback) self.tenthback.place(x=700,y=550) self.tenthwin.withdraw() #____________________________________________---------------------------------------------# self.lastwin = Toplevel() self.lastwin.geometry('%dx%d+%d+%d' % (w, h, x, y)) self.frame9 = Frame(self.lastwin,width=1000, height=600, bg='black') self.frame9.pack() self.q19 = Image.open("images/q19-small.png") self.render = ImageTk.PhotoImage(self.q19) self.img = Label(self.lastwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=100) def q19(): global ans18,crtans ans18 = str(var18.get()) print(ans18) if ans18 == answer19: crtans = crtans+1 #Question 19 var18 = IntVar() self.R69 = Radiobutton(self.lastwin, text="A) KeyError", variable=var18, value=1,bg='lightgreen', command=q19) self.R69.place(x=500,y=100) self.R70 = Radiobutton(self.lastwin, text="B) {0: 1, 7: 0, 1: 1, 8: 0}", variable=var18, value=2,bg='lightgreen', command=q19) self.R70.place(x=500,y=150) self.R71 = Radiobutton(self.lastwin, text="C) {0: 0, 7: 0, 1: 1, 8: 1}", variable=var18, value=3,bg='lightgreen', command=q19) self.R71.place(x=500,y=200) self.R72 = Radiobutton(self.lastwin, text="D) {1: 1, 7: 2, 0: 1, 8: 1}", variable=var18, value=4,bg='lightgreen', command=q19) self.R72.place(x=500,y=250) self.q20 = Image.open("images/q20-small.png") self.render = ImageTk.PhotoImage(self.q20) self.img = Label(self.lastwin, image=self.render) self.img.image = self.render self.img.place(x=50, y=350) def q20(): global ans19,crtans ans19 = str(var19.get()) print(ans19) if ans19 == answer20: crtans = crtans+1 #Question 20 var19 = IntVar() self.R73 = Radiobutton(self.lastwin, text="A) 100", variable=var19, value=1,bg='lightgreen', command=q20) self.R73.place(x=500,y=350) self.R74 = Radiobutton(self.lastwin, text="B) Compilation error", variable=var19, value=2,bg='lightgreen', command=q20) self.R74.place(x=500,y=400) self.R75 = Radiobutton(self.lastwin, text="C) Runtime error", variable=var19, value=3,bg='lightgreen', command=q20) self.R75.place(x=500,y=450) self.R76 = Radiobutton(self.lastwin, text="D) None of these", variable=var19, value=4,bg='lightgreen', command=q20) self.R76.place(x=500,y=500) self.save = Button(self.lastwin,width=10,text="Save",command = self.save) self.save.place(x=800,y=550) self.lastback = Button(self.lastwin,width=10,text="BACK",command = self.lastback) self.lastback.place(x=700,y=550) self.lastwin.withdraw() def save(self): print("Done") self.name = self.nameentry.get() self.clg = self.clgentry.get() self.mail = self.mailentry.get() data_to_upload = {'Name' : self.name, 'College' : self.clg, 'Mail' : self.mail, 'Correct' : crtans} result = fbconn.post('/candidate/',data_to_upload) self.save.config(state="disabled") self.answer() def answer(self): if answer1==ans: anslist.append('1') else: anslist.append('0') if answer2==ans1: anslist.append('1') else: anslist.append('0') print(anslist) def start(self): self.master.withdraw() self.secondwin.deiconify() def secondnext(self): print("Done") self.secondwin.withdraw() self.thirdwin.deiconify() def thirdnext(self): print("Third Next") self.thirdwin.withdraw() self.forthwin.deiconify() def forthnext(self): print("Forth Next") self.forthwin.withdraw() self.fifthwin.deiconify() def fifthnext(self): print("Fifth Next") self.fifthwin.withdraw() self.sixthwin.deiconify() def sixthnext(self): print("Sixth Next") self.sixthwin.withdraw() self.seventhwin.deiconify() def seventhnext(self): print("Seventh Next") self.seventhwin.withdraw() self.eighthwin.deiconify() def eighthnext(self): print("Eighth Next") self.eighthwin.withdraw() self.ninthwin.deiconify() def ninthnext(self): print("Ninth Next") self.ninthwin.withdraw() self.tenthwin.deiconify() def tenthnext(self): print("Tentn Next") self.tenthwin.withdraw() self.lastwin.deiconify() def lastnext(self): print("Last Next") def secondback(self): print("Back") self.secondwin.withdraw() self.master.deiconify() def thirdback(self): self.thirdwin.withdraw() self.secondwin.deiconify() def forthback(self): self.forthwin.withdraw() self.thirdwin.deiconify() def fifthback(self): self.fifthwin.withdraw() self.forthwin.deiconify() def sixthback(self): self.sixthwin.withdraw() self.fifthwin.deiconify() def seventhback(self): self.seventhwin.withdraw() self.sixthwin.deiconify() def eighthback(self): self.eighthwin.withdraw() self.seventhwin.deiconify() def ninthback(self): self.ninthwin.withdraw() self.eighthwin.deiconify() def tenthback(self): self.tenthwin.withdraw() self.ninthwin.deiconify() def lastback(self): self.lastwin.withdraw() self.tenthwin.deiconify() root = Tk() obj = FirstRound(root) root.geometry('%dx%d+%d+%d' % (w, h, x, y)) root.mainloop()
37.853583
206
0.555373
4,576
36,453
4.391827
0.097028
0.039409
0.075633
0.039807
0.627457
0.341942
0.215704
0.157038
0.147435
0.133254
0
0.086289
0.297726
36,453
962
207
37.892931
0.69875
0.016213
0
0.313187
0
0.006868
0.108442
0
0
0
0
0
0
1
0.06044
false
0
0.004121
0
0.065934
0.04533
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41a9d57be733f7ec06133940f72c9adc60cb07fd
2,491
py
Python
swap.py
garrettkatz/ghu
2bf25ac6f8e82d3e7231c3381f7a4946db6dc59f
[ "MIT" ]
null
null
null
swap.py
garrettkatz/ghu
2bf25ac6f8e82d3e7231c3381f7a4946db6dc59f
[ "MIT" ]
null
null
null
swap.py
garrettkatz/ghu
2bf25ac6f8e82d3e7231c3381f7a4946db6dc59f
[ "MIT" ]
null
null
null
""" Swap input (rinp) on output (rout) with one extra registers (rtmp) """ import numpy as np import torch as tr import matplotlib.pyplot as pt from ghu import * from codec import Codec from controller import Controller from lvd import lvd from reinforce import reinforce if __name__ == "__main__": print("*******************************************************") # Configuration num_symbols = 4 layer_sizes = {"rinp": 64, "rout":64, "rtmp": 64} hidden_size = 32 rho = .99 plastic = [] num_episodes = 1000 # Setup GHU symbols = [str(a) for a in range(num_symbols)] pathways, associations = default_initializer( # all to all layer_sizes.keys(), symbols) codec = Codec(layer_sizes, symbols, rho=rho) controller = Controller(layer_sizes, pathways, hidden_size, plastic) ghu = GatedHebbianUnit( layer_sizes, pathways, controller, codec, batch_size = num_episodes, plastic = plastic) ghu.associate(associations) # Initialize layers separator = "0" ghu.fill_layers(separator) # training example generation def training_example(): # Randomly choose swap symbols (excluding 0 separator) inputs = np.random.choice(symbols[1:], size=2, replace=False) targets = inputs[::-1] return inputs, targets # reward calculation based on leading LVD at individual steps def reward(ghu, targets, outputs): idx = [i for i, out in enumerate(outputs) if out != separator] outputs_ = [out for out in outputs if out != separator] _, d = lvd(outputs_, targets) r = np.zeros(len(outputs)) for i in range(1,d.shape[0]): r[idx[i-1]] = +1. if (i < d.shape[1] and d[i,i] == d[i-1,i-1]) else -1. return r # Run optimization avg_rewards, grad_norms = reinforce(ghu, num_epochs = 100, episode_duration = 3, training_example = training_example, reward = reward, task = "swap", learning_rate = .2, # line_search_iterations = 5, # distribution_cap = .1, # likelihood_cap = .7, distribution_variance_coefficient = 0.01, verbose = 1) pt.figure(figsize=(4,3)) pt.subplot(2,1,1) pt.plot(avg_rewards) pt.title("Learning curve") pt.ylabel("Avg Reward") pt.subplot(2,1,2) pt.plot(grad_norms) pt.xlabel("Epoch") pt.ylabel("||Grad||") pt.tight_layout() pt.show()
30.753086
83
0.608591
316
2,491
4.664557
0.439873
0.033921
0.024423
0.028494
0
0
0
0
0
0
0
0.02603
0.259735
2,491
80
84
31.1375
0.773319
0.140506
0
0
0
0
0.055111
0.025907
0
0
0
0
0
1
0.033898
false
0
0.135593
0
0.20339
0.016949
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41aa2d6b40a21820a8c6f0096cf82ccb5a78479c
1,103
py
Python
face-alignment.py
binhmuc/faced
cbc18f552da9c53628d61d56de7dfda451a6e25f
[ "MIT" ]
null
null
null
face-alignment.py
binhmuc/faced
cbc18f552da9c53628d61d56de7dfda451a6e25f
[ "MIT" ]
null
null
null
face-alignment.py
binhmuc/faced
cbc18f552da9c53628d61d56de7dfda451a6e25f
[ "MIT" ]
null
null
null
import face_alignment from skimage import io import cv2 from skimage import img_as_float from skimage import io import matplotlib.pyplot as plt from faced import FaceDetector from faced.utils import annotate_image import time fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, face_detector='sfd') video = cv2.VideoCapture(0) def draw(fr,Z): for i in Z: cv2.circle(fr,i, 2, (225,255,255), -1) return fr frame_count = 0 tt_opencvHaar = 0 while True: _, fr = video.read() predss = fa.get_landmarks(fr) if predss is not None: for preds in predss: Z = zip(preds[0:68,0], preds[0:68,1]) fr = draw(fr,Z) ##GET fps frame_count += 1 t = time.time() tt_opencvHaar += time.time() - t fpsOpencvHaar = frame_count / tt_opencvHaar label = "FPS : {:.2f}".format(fpsOpencvHaar) cv2.putText(fr, label, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.4, (0, 0, 255), 3, cv2.LINE_AA) if frame_count == 1: tt_opencvHaar = 0 #---------------------------# cv2.imshow('image',fr) if cv2.waitKey(1) == 27: break cv2.destroyAllWindows()
23.978261
94
0.664551
170
1,103
4.188235
0.464706
0.05618
0.071629
0.053371
0.070225
0
0
0
0
0
0
0.057239
0.192203
1,103
45
95
24.511111
0.741863
0.030825
0
0.111111
0
0
0.019608
0
0
0
0
0
0
1
0.027778
false
0
0.25
0
0.305556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41ac00fd9043a9b17ba6e49efc0299f2d40364b0
847
py
Python
tests/test_fingers/test_theme/test_models.py
sonirico/wpoke
be193a41159dabf912d793eb5a6ebf2f0e9440bb
[ "MIT" ]
4
2019-08-19T12:32:40.000Z
2019-10-25T20:57:29.000Z
tests/test_fingers/test_theme/test_models.py
sonirico/wpoke
be193a41159dabf912d793eb5a6ebf2f0e9440bb
[ "MIT" ]
15
2019-07-15T18:30:43.000Z
2020-09-25T08:10:05.000Z
tests/test_fingers/test_theme/test_models.py
sonirico/wpoke
be193a41159dabf912d793eb5a6ebf2f0e9440bb
[ "MIT" ]
null
null
null
import unittest from wpoke.fingers.theme.models import WPThemeMetadata from wpoke.fingers.theme.serializers import WPThemeMetadataSerializer class TestWPThemeMetadata(unittest.TestCase): def test_serialize_empty_values(self): wp_metadata_model = WPThemeMetadata() serializer = WPThemeMetadataSerializer(wp_metadata_model) w_serialized = serializer.data self.assertIsInstance(w_serialized["tags"], list) self.assertIsNone(w_serialized["theme_name"]) def test_serialize_tags_field(self): wp_metadata_model = WPThemeMetadata() wp_metadata_model.tags = "hacking, programming , devops" serializer = WPThemeMetadataSerializer(wp_metadata_model) w_serialized = serializer.data self.assertListEqual(w_serialized["tags"], ["hacking", "programming", "devops"])
35.291667
88
0.747344
85
847
7.188235
0.423529
0.081833
0.12275
0.06874
0.369885
0.258592
0.258592
0.258592
0.258592
0.258592
0
0
0.168831
847
23
89
36.826087
0.867898
0
0
0.375
0
0
0.085006
0
0
0
0
0
0.1875
1
0.125
false
0
0.1875
0
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41ae040201f6c28a176e8746c32a9793421e405a
2,989
py
Python
src/Config.py
albertomn86/Weather-Station-Receiver
53745e51e8227ab40ced665ec0083bfd62a951da
[ "Apache-2.0" ]
1
2020-01-13T20:56:49.000Z
2020-01-13T20:56:49.000Z
src/Config.py
albertomn86/Weather-Station-Receiver
53745e51e8227ab40ced665ec0083bfd62a951da
[ "Apache-2.0" ]
null
null
null
src/Config.py
albertomn86/Weather-Station-Receiver
53745e51e8227ab40ced665ec0083bfd62a951da
[ "Apache-2.0" ]
null
null
null
from yaml import safe_load, YAMLError from os import path from src.Device import Device from typing import Any, Optional class Config(object): def __init__(self, file: str): if not path.exists(file): raise FileNotFoundError(f"Config file not found: {file}") with open(file, 'r') as stream: try: self.__config = safe_load(stream) except YAMLError: raise ConfigException(f"Invalid configuration file: {file}") if self.__config is None: raise ConfigException(f"Empty configuration file: {file}") self.__serial_port = Config.__parse_receiver(self.__config) self.__upload_addres, \ self.__upload_api_key = Config.__parse_upload(self.__config) self.__devices_list, \ self.__allowed_devices_id_list, \ self.__devices_with_subsciption = \ Config.__parse_devices(self.__config) @staticmethod def __parse_receiver(config: dict) -> str: receiver = config.get("Receiver") if receiver is not None: serial_port = receiver.get("SerialPort") if serial_port is not None: return serial_port raise ConfigException("Serial port not specified") @staticmethod def __parse_upload(config: dict) -> tuple[Optional[Any], Optional[Any]]: address = None api_key = None upload = config.get("Upload") if upload is not None: address = upload.get("Address") api_key = upload.get("ApiKey") return address, api_key @staticmethod def __parse_devices(config: dict) -> \ tuple[list[Device], list[Any], list[Any]]: devices = config.get("Devices") if devices is None: raise ConfigException("No devices found") device_list = [] allowed_id_list = [] devices_with_subscription = [] for item in devices: device = Device(item) if device.id in allowed_id_list: continue device_list.append(device) allowed_id_list.append(device.id) if device.subscription_device is not None: devices_with_subscription.append(device.subscription_device) return device_list, allowed_id_list, devices_with_subscription def get_valid_devices_id_list(self) -> list: return self.__allowed_devices_id_list def get_device_by_id(self, id: str) -> Device: return [x for x in self.__devices_list if x.id == id][0] def get_devices_with_subscription(self) -> list: return self.__devices_with_subsciption def get_receiver_serial_port(self) -> str: return self.__serial_port def get_upload_address(self) -> Optional[Any]: return self.__upload_addres def get_upload_api_key(self) -> Optional[Any]: return self.__upload_api_key class ConfigException(Exception): pass
31.797872
76
0.636333
354
2,989
5.031073
0.214689
0.039304
0.020213
0.029197
0.113419
0.086468
0.051656
0.051656
0
0
0
0.000465
0.280027
2,989
93
77
32.139785
0.827138
0
0
0.042254
0
0
0.060555
0
0
0
0
0
0
1
0.140845
false
0.014085
0.056338
0.084507
0.352113
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41aeaa22875724364cf9ef95a6d424d9dc584608
3,687
py
Python
LinkedInViewer.py
Glavin001/LinkedInProfileViewer
a743385a88b2a38b5d4fcae94b08d27dee43759a
[ "CC0-1.0" ]
1
2015-07-25T16:20:18.000Z
2015-07-25T16:20:18.000Z
LinkedInViewer.py
Glavin001/LinkedInProfileViewer
a743385a88b2a38b5d4fcae94b08d27dee43759a
[ "CC0-1.0" ]
null
null
null
LinkedInViewer.py
Glavin001/LinkedInProfileViewer
a743385a88b2a38b5d4fcae94b08d27dee43759a
[ "CC0-1.0" ]
null
null
null
import cookielib import os import urllib import urllib2 import re import string from BeautifulSoup import BeautifulSoup from random import random from time import sleep cookie_filename = "parser.cookies.txt" #The script will go through this persons 'also viewed' people. Change this to a profile of any person in the following format PUT_ANY_LINK_HERE="http://www.linkedin.com/profile/view?id=000001&" class LinkedInParser(object): def __init__(self, login, password): """ Start up... """ self.login = login self.password = password # Simulate browser with cookies enabled self.cj = cookielib.MozillaCookieJar(cookie_filename) if os.access(cookie_filename, os.F_OK): self.cj.load() self.opener = urllib2.build_opener( urllib2.HTTPRedirectHandler(), urllib2.HTTPHandler(debuglevel=0), urllib2.HTTPSHandler(debuglevel=0), urllib2.HTTPCookieProcessor(self.cj) ) self.opener.addheaders = [ ('User-agent', ('Mozilla/4.0 (compatible; MSIE 6.0; ' 'Windows NT 5.2; .NET CLR 1.1.4322)')) ] # Login self.loginPage() self.queue = [] self.collectedPeople = [] #collect from db f = open('db_looked.txt') lines = map(lambda x: x.strip('\n'), f.readlines()) f.close() self.collectedPeople.extend(lines) currentPerson_url = PUT_ANY_LINK_HERE while True: while self.grabNewPerson(currentPerson_url)==False: currentPerson_url = "http://www.linkedin.com"+self.queue.pop(0) print self.extractProfileId(currentPerson_url) self.collectedPeople.append(self.extractProfileId(currentPerson_url)) currentPerson_url = "http://www.linkedin.com"+self.queue.pop(0) sleep(3+random()*2) self.cj.save() def loadPage(self, url, data=None): """ Utility function to load HTML from URLs for us with hack to continue despite 404 """ # We'll print the url in case of infinite loop # print "Loading URL: %s" % url try: if data is not None: response = self.opener.open(url, data) else: response = self.opener.open(url) return ''.join(response.readlines()) except: # If URL doesn't load for ANY reason, try again... # Quick and dirty solution for 404 returns because of network problems # However, this could infinite loop if there's an actual problem return self.loadPage(url, data) def loginPage(self): """ Handle login. This should populate our cookie jar. """ login_data = urllib.urlencode({ 'session_key': self.login, 'session_password': self.password, }) html = self.loadPage("https://www.linkedin.com/uas/login-submit", login_data) return def extractProfileId(self,s): return s[s.index("id=")+3:s.index("&")] def grabNewPerson(self, currentPerson_url): if self.extractProfileId(currentPerson_url) in self.collectedPeople: return False html = self.loadPage(currentPerson_url) soup = BeautifulSoup(html) raw_cards = soup.findAll("li",{"class": "with-photo"}) cards = [] for i in raw_cards: cards.append(i.find("a")["href"]) self.queue.extend(cards) return True #For the scraper to work, you need to be loggedin to LinkedIn... parser = LinkedInParser("email", "password")
32.342105
125
0.606455
433
3,687
5.092379
0.448037
0.065306
0.025397
0.02449
0.065306
0.04263
0.04263
0.04263
0.04263
0.04263
0
0.014101
0.28831
3,687
114
126
32.342105
0.82622
0.136154
0
0.027027
0
0
0.104733
0
0
0
0
0
0
0
null
null
0.054054
0.121622
null
null
0.013514
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
2
41af722e2abcb956634da9556cf5120ca6d46ddf
6,316
py
Python
cwVQ.py
USC-MCL/Func-Pool
20c43df0eb2da68d8d2e01c03d66a1a4e4e06081
[ "MIT" ]
3
2020-01-24T19:03:44.000Z
2021-04-13T17:22:36.000Z
cwVQ.py
USC-MCL/Func-Pool
20c43df0eb2da68d8d2e01c03d66a1a4e4e06081
[ "MIT" ]
null
null
null
cwVQ.py
USC-MCL/Func-Pool
20c43df0eb2da68d8d2e01c03d66a1a4e4e06081
[ "MIT" ]
3
2020-01-24T19:03:45.000Z
2020-04-13T08:27:13.000Z
# 2020.10.19 # @yifan # channel-wise VQ # input is asumed to be DCT/PCA coefficients # import numpy as np from sklearn import cluster import copy from skimage.metrics import mean_squared_error from sklearn.metrics.pairwise import euclidean_distances from myPCA import myPCA from util import * def check_mse(X, km, PSNR_TH): mse_TH = 255**2 / pow(10, PSNR_TH / 10) idx = km.predict(X) res = km.cluster_centers_[idx] mse = mean_squared_error(X, res) if mse > mse_TH: return mse, False return mse, True class cwVQ(): # cw_idx: splitting point # cw_N: num codeword for each cluster def __init__(self, cw_idx, cw_N, PSNR_TH): self.cw_idx = cw_idx self.cw_N = cw_N self.PSNR_TH = PSNR_TH self.km_list = [] self.cent_list = [] self.dim = 0 self.trained = False def fit(self, X): self.dim = X.shape[-1] print(" \033[32m---> cwVQ, num of raining smaples: %d"%(X.shape[0])) for i in range(1, len(self.cw_idx)): tmp = X[:, self.cw_idx[i-1]:self.cw_idx[i]] N = self.cw_N[i-1] while N < 200 * self.cw_N[i-1]: km = cluster.KMeans(n_clusters=int(N), n_init=7) print(np.std(tmp)) km.fit(tmp) mse, flag = check_mse(tmp, km, self.PSNR_TH) #flag = True if flag == True: print(" ---> MSE=%3f nice, stop"%(mse)) break N += 1 print(" ---> MSE=%3f too large, increase N to %2d"%(mse, N)) self.cw_N[i-1] = N print(" <INFO> Finish training feature idx %d - %d, with N=%d" %(self.cw_idx[i-1], self.cw_idx[i], self.cw_N[i-1])) km.cluster_centers_.sort(axis=0) self.km_list.append(km) self.cent_list.append(km.cluster_centers_) print("\033[0m") self.trained = True def encode(self, X): assert (self.trained == True), " \033[0;91m<ERROR> Call fit first!\033[0m" idx = [] for i in range(1, len(self.cw_idx)): tmp = X[:, self.cw_idx[i-1]:self.cw_idx[i]] tmp_idx = np.argmin(euclidean_distances(tmp, self.cent_list[i-1]), axis=1)#self.km_list[i-1].predict(tmp) idx.append(tmp_idx) return idx def decode(self, idx): assert (self.trained == True), " \033[0;91m<ERROR> Call fit first!\033[0m" res = [] print(idx[1][:10], self.cent_list[1][idx[1][:10]]) for i in range(len(idx)): tmp = self.cent_list[i][idx[i]] res.append(tmp) res = np.concatenate(res, axis=1) if res.shape[-1] < self.dim: res = np.concatenate((res, np.zeros((res.shape[0], self.dim-res.shape[-1]))), axis=1) return res class cwVQ4D(cwVQ): def __init__(self, cw_idx, cw_N, PSNR_TH, win, mode=0): super().__init__(cw_idx, cw_N, PSNR_TH) self.win = win self.mode = mode self.pca = myPCA(n_components=-1) def to2D(self, X, train=True): X = Shrink(X, {'win':self.win}) if self.mode == 1: X = DCT(X) X = ZigZag().transform(X) elif self.mode == 2: if train == True: self.pca.fit(X) self.pca.transform(X) return X.reshape(-1, self.win**2), X.shape def to4D(self, X, S): X = X.reshape(S) if self.mode == 1: X = ZigZag().inverse_transform(X) X = IDCT(X) elif self.mode == 2: self.pca.inverse_transform(X) return invShrink(X, {'win':self.win}) def fit(self, X): X, _ = self.to2D(X, train=True) super().fit(X) def encode(self, X): X, S = self.to2D(X, train=False) return super().encode(X), S def decode(self, idx, S): res = super().decode(idx) return self.to4D(res, S) class kmVQ(): def __init__(self, N): self.km = cluster.KMeans(n_clusters=int(N), n_init=7) self.cent = [] def fit(self, X): print(" \033[32m---> VQ, num of raining smaples: %d"%(X.shape[0])) self.km.fit(X) self.cent = self.km.cluster_centers_ def encode(self, X): return self.km.predict(X) def decode(self, idx): return self.cent[idx] class kmVQ4D(kmVQ): def __init__(self, N, win, mode=0): super().__init__(N) self.win = win self.mode = mode self.mode = mode self.pca = myPCA(n_components=32) def to2D(self, X, train=True): X = Shrink(X, {'win':self.win}) if self.mode == 1: X = DCT(X) X = ZigZag().transform(X) elif self.mode == 2: if train == True: self.pca.fit(X) self.pca.transform(X) return X.reshape(-1, self.win**2), X.shape def to4D(self, X, S): X = X.reshape(S) if self.mode == 1: X = ZigZag().inverse_transform(X) X = IDCT(X) elif self.mode == 2: self.pca.inverse_transform(X) return invShrink(X, {'win':self.win}) def fit(self, X): X, _ = self.to2D(X, train=True) super().fit(X) def encode(self, X): X, S = self.to2D(X, train=False) return super().encode(X), S def decode(self, idx, S): res = super().decode(idx) return self.to4D(res, S) if __name__ == "__main__": import time from evaluate import * import cv2 X = cv2.imread("/Users/alex/Desktop/proj/compression/data/Kodak/kodim01.png", 0) X = X.reshape(1, X.shape[0], X.shape[1], 1) t0 = time.time() vq = cwVQ4D(cw_idx=[0, 1, 2, 3, 4], cw_N=[6, 7, 7, 7], PSNR_TH=30, win=2) vq.fit(X) idx, S = vq.encode(copy.deepcopy(X)) iX = vq.decode(idx, S) print(' \033[37m-->cwVQ using %s codewords, PSNR=%f, using time %5f sec'%(str(vq.cw_N), PSNR(X, iX), time.time()-t0)) t0 = time.time() km = kmVQ4D(np.sum(vq.cw_N), 2) km.fit(X) idx, S = km.encode(X) iX = km.decode(idx, S) print(' -->VQ using %d codewords, PSNR=%f, using time %5f sec\033[0m'%(np.sum(vq.cw_N), PSNR(X, iX), time.time()-t0))
33.417989
131
0.528816
957
6,316
3.375131
0.174504
0.029721
0.03065
0.018576
0.501858
0.466873
0.460681
0.419505
0.381115
0.345511
0
0.036128
0.316339
6,316
188
132
33.595745
0.711904
0.028024
0
0.439759
0
0
0.088922
0.009626
0
0
0
0
0.012048
1
0.126506
false
0
0.060241
0.012048
0.295181
0.060241
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41b0f6b2f2c3b8ff76594cc3ca1e5683847bdfa3
3,324
py
Python
venv/Lib/site-packages/tests/unit/driver/test_base_generic_driver.py
melihteke/ebook_study
4848ea42e37ee1d6ec777bfc33f49984653ace34
[ "MIT" ]
null
null
null
venv/Lib/site-packages/tests/unit/driver/test_base_generic_driver.py
melihteke/ebook_study
4848ea42e37ee1d6ec777bfc33f49984653ace34
[ "MIT" ]
null
null
null
venv/Lib/site-packages/tests/unit/driver/test_base_generic_driver.py
melihteke/ebook_study
4848ea42e37ee1d6ec777bfc33f49984653ace34
[ "MIT" ]
null
null
null
from pathlib import Path import pytest import scrapli from scrapli.response import MultiResponse, Response TEST_DATA_DIR = f"{Path(scrapli.__file__).parents[1]}/tests/test_data/" def test_pre_send_command_exception(sync_cisco_iosxe_conn): with pytest.raises(TypeError) as exc: sync_cisco_iosxe_conn._pre_send_command( host=sync_cisco_iosxe_conn.transport.host, command=[] ) assert ( str(exc.value) == "`send_command` expects a single string, got <class 'list'>, to send a list of commands use the `send_commands` method instead." ) def test_pre_send_command(sync_cisco_iosxe_conn): response = sync_cisco_iosxe_conn._pre_send_command( host=sync_cisco_iosxe_conn.transport.host, command="show version", failed_when_contains=["something"], ) assert isinstance(response, Response) assert response.host == sync_cisco_iosxe_conn.transport.host assert response.failed_when_contains == ["something"] assert response.channel_input == "show version" def test_post_send_command(sync_cisco_iosxe_conn): response = sync_cisco_iosxe_conn._pre_send_command( host=sync_cisco_iosxe_conn.transport.host, command="show version", failed_when_contains=["something"], ) final_response = sync_cisco_iosxe_conn._post_send_command( raw_response=b"blah", processed_response=b"blahx2", response=response ) # generic driver doesnt know/care about textfsm_platform assert final_response.textfsm_platform == "" assert final_response.start_time == response.start_time assert final_response.elapsed_time > 0 def test_pre_send_commands_exception(sync_cisco_iosxe_conn): with pytest.raises(TypeError) as exc: sync_cisco_iosxe_conn._pre_send_commands(commands="boo") assert ( str(exc.value) == "`send_commands` expects a list of strings, got <class 'str'>, to send a single command use the `send_command` method instead." ) def test_pre_send_commands(sync_cisco_iosxe_conn): responses = sync_cisco_iosxe_conn._pre_send_commands(commands=["show version"]) assert isinstance(responses, MultiResponse) assert responses.failed is False def test_pre_send_commands_from_file_exception(sync_cisco_iosxe_conn): with pytest.raises(TypeError) as exc: sync_cisco_iosxe_conn._pre_send_commands_from_file(file=[]) assert ( str(exc.value) == "`send_commands_from_file` expects a string path to a file, got <class 'list'>" ) def test_pre_send_commands_from_file(sync_cisco_iosxe_conn): commands = sync_cisco_iosxe_conn._pre_send_commands_from_file( file=f"{TEST_DATA_DIR}/files/vrnetlab_key" ) assert commands[0] == "-----BEGIN OPENSSH PRIVATE KEY-----" assert commands[-1] == "-----END OPENSSH PRIVATE KEY-----" def test_pre_send_commands_interactive(sync_cisco_iosxe_conn): response = sync_cisco_iosxe_conn._pre_send_interactive( host=sync_cisco_iosxe_conn.transport.host, interact_events=[("input1", "expected1"), ("input2", "expected2")], ) assert isinstance(response, Response) assert response.host == sync_cisco_iosxe_conn.transport.host assert response.failed_when_contains is None assert response.channel_input == "input1, input2"
36.933333
139
0.737365
440
3,324
5.181818
0.220455
0.090789
0.141228
0.181579
0.624561
0.524123
0.476754
0.435088
0.413596
0.413596
0
0.003983
0.169073
3,324
89
140
37.348315
0.821506
0.016245
0
0.318841
0
0.028986
0.187883
0.033966
0
0
0
0
0.26087
1
0.115942
false
0
0.057971
0
0.173913
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
41b1d06924eae19655ba9eb1faf5d08b5dbcc871
2,384
py
Python
polsalt/scrunch1d.py
Richard-Tarbell/polsalt
e953985ffbc786fd071d0b48ebca5bd1dac9a960
[ "BSD-3-Clause" ]
1
2017-09-22T17:04:06.000Z
2017-09-22T17:04:06.000Z
polsalt/scrunch1d.py
Richard-Tarbell/polsalt
e953985ffbc786fd071d0b48ebca5bd1dac9a960
[ "BSD-3-Clause" ]
14
2015-12-22T17:56:38.000Z
2021-07-30T15:36:23.000Z
polsalt/scrunch1d.py
Richard-Tarbell/polsalt
e953985ffbc786fd071d0b48ebca5bd1dac9a960
[ "BSD-3-Clause" ]
12
2015-12-21T15:12:44.000Z
2021-08-12T18:58:12.000Z
#! /usr/bin/env python # Resample data into new bins, preserving flux # New version 150912, much faster # New version 170504, fixed case where output bin coverage is larger than input bin coverage # New version 170909, again fixed case where output bin coverage is larger than input bin coverage import os, sys, time, glob, shutil import numpy as np def scrunch1d(input,binedge): # new binedges are in coordinate system x where the left edge of the 0th input bin is at 0.0 na = input.size nx = binedge.size - 1 input_a = np.append(input,0) # deal with edge of array # okxbin = ((binedge>=0) & (binedge<=na)) okxbin = ((binedge[1:]>0) & (binedge[:-1]<na)) okxedge = np.zeros(binedge.size,dtype=bool) okxedge[:-1] |= okxbin okxedge[1:] |= okxbin output_x = np.zeros(nx) # _s: subbins divided by both new and old bin edges ixmin,ixmax = np.where(okxedge)[0][[0,-1]] iamin = int(binedge[ixmin]) iamax = int(binedge[ixmax]) x_s = np.append(binedge[okxedge],range(int(np.ceil(binedge[ixmin])),iamax+1)) x_s,argsort_s = np.unique(x_s,return_index=True) x_s = np.maximum(x_s,0.) # 20170909: deal with edge of array x_s = np.minimum(x_s,na) # 20170909: deal with edge of array ia_s = x_s.astype(int) ix_s = np.append(np.arange(ixmin,ixmax+1),-1*np.ones(iamax-iamin+1))[argsort_s].astype(int) while (ix_s==-1).sum(): ix_s[ix_s==-1] = ix_s[np.where(ix_s==-1)[0] - 1] # np.savetxt("scrout_s.txt",np.vstack((ia_s,ix_s,x_s)).T,fmt="%5i %5i %10.4f") # divide data into subbins, preserving flux ix_x = np.zeros(nx+1).astype(int) s_x = np.zeros(nx+1).astype(int) input_s = input_a[ia_s[:-1]]*(x_s[1:] - x_s[:-1]) ix_x[ixmin:(ixmax+1)], s_x[ixmin:(ixmax+1)] = np.unique(ix_s,return_index=True) ns_x = s_x[1:] - s_x[:-1] # np.savetxt("scrout_x.txt",np.vstack((ix_x,np.append(ns_x,[0]),s_x)).T,fmt="%5i") # sum it into the new bins for s in range(ns_x.max()): output_x[ns_x > s] += input_s[s_x[:nx][ns_x > s]+s] return output_x if __name__=='__main__': input=np.loadtxt(sys.argv[1]) binedge=np.loadtxt(sys.argv[2]) # for n in range(1000): scrunch1d(input,binedge) np.savetxt('outputfile.txt',scrunch1d(input,binedge),fmt="%14.8f")
40.40678
98
0.622903
411
2,384
3.472019
0.294404
0.019622
0.044149
0.029432
0.167484
0.148563
0.110722
0.082691
0.082691
0.082691
0
0.046088
0.217282
2,384
58
99
41.103448
0.71865
0.362836
0
0
0
0
0.018629
0
0
0
0
0
0
1
0.029412
false
0
0.058824
0
0.117647
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41b20f790b6985bfa1f3757a75cfaedc3ed0233b
177
py
Python
yosim/rules/apps.py
thoongnv/yosim
22bcaceb2c40735363496d9404970a73c4b944bc
[ "MIT" ]
2
2022-02-15T03:41:13.000Z
2022-02-15T03:44:46.000Z
yosim/rules/apps.py
thoongnv/yosim
22bcaceb2c40735363496d9404970a73c4b944bc
[ "MIT" ]
5
2021-06-08T22:26:24.000Z
2022-03-12T00:21:35.000Z
yosim/rules/apps.py
thoongnv/yosim
22bcaceb2c40735363496d9404970a73c4b944bc
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from django.apps import AppConfig class RulesConfig(AppConfig): name = 'yosim.rules' verbose_name = "Rules" def ready(self): pass
16.090909
33
0.627119
21
177
5.238095
0.857143
0
0
0
0
0
0
0
0
0
0
0.007463
0.242938
177
10
34
17.7
0.813433
0.118644
0
0
0
0
0.103896
0
0
0
0
0
0
1
0.166667
false
0.166667
0.166667
0
0.833333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
3
41b2fdd3fcfe0d572fb266442a0cede294698396
1,157
py
Python
pacote-download/Ex115a.py
nkonai/Curso-em-video-Python
c05a60b3daa7d448e1e7f0d4d23f62df5d2c8df2
[ "MIT" ]
null
null
null
pacote-download/Ex115a.py
nkonai/Curso-em-video-Python
c05a60b3daa7d448e1e7f0d4d23f62df5d2c8df2
[ "MIT" ]
null
null
null
pacote-download/Ex115a.py
nkonai/Curso-em-video-Python
c05a60b3daa7d448e1e7f0d4d23f62df5d2c8df2
[ "MIT" ]
null
null
null
print('-'*30) print('MENU PRINCIPAL'.center(30)) print('-'*30) print('\033[1;33m1\033[m - \033[1;34mVer pessoas cadastradas\033[m') print('\033[1;33m2\033[m - \033[1;34mCadastrar nova pessoa\033[m') print('\033[1;33m3\033[m - \033[1;34mSair do sistema\033[m') print('-'*30) def leiaopcao(msg): while True: try: opcao = int(input(msg)) while opcao > 3 or opcao < 1: print('ERRO! Digite uma opcao valida: ') opcao = int(input(msg)) if opcao == 1: print('-' * 30) print('OPCAO 1'.center(30)) print('-' * 30) elif opcao == 2: print('-' * 30) print('OPCAO 2'.center(30)) print('-' * 30) else: print('-' * 30) print('Saindo do sistema...Ate logo!'.center(30)) print('-' * 30) break except (ValueError, TypeError): print('\033[1;31mERRO! Digite uma opcao valida: \033[m') continue else: return opcao n = leiaopcao('\033[1;33mSua opcao: \033[m')
31.27027
68
0.475367
139
1,157
3.956835
0.366906
0.114545
0.109091
0.109091
0.047273
0
0
0
0
0
0
0.14479
0.361279
1,157
36
69
32.138889
0.599459
0
0
0.393939
0
0.090909
0.292388
0
0
0
0
0
0
1
0.030303
false
0
0
0
0.060606
0.545455
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
1
41b34f1ab0810f5d2d8176ae3c4ecf1349c413df
2,169
py
Python
PosPy/frame/form.py
richierh/PointofSalePy
54fc11b5f167d361c75b6b1cb890c7020393d46c
[ "Apache-2.0" ]
null
null
null
PosPy/frame/form.py
richierh/PointofSalePy
54fc11b5f167d361c75b6b1cb890c7020393d46c
[ "Apache-2.0" ]
null
null
null
PosPy/frame/form.py
richierh/PointofSalePy
54fc11b5f167d361c75b6b1cb890c7020393d46c
[ "Apache-2.0" ]
null
null
null
"""Subclass of FForm, which is generated by wxFormBuilder.""" import wx import frame.mainframe as mainframe # Implementing FForm class FFormEvent( mainframe.FForm ): def __init__( self, parent ): mainframe.FForm.__init__( self, parent ) self.custom_event() def custom_event(self): # Connect Events self.m_panel2.Bind(wx.EVT_CHAR_HOOK,self.m_panelOnChar) self.m_textCtrl1.Bind( wx.EVT_TEXT, self.datatext1 ) self.m_textCtrl2.Bind( wx.EVT_TEXT, self.datatext2 ) self.m_textCtrl3.Bind( wx.EVT_TEXT, self.datatext3 ) self.m_textCtrl4.Bind( wx.EVT_TEXT, self.datatext4 ) self.m_textCtrl5.Bind( wx.EVT_TEXT, self.datatext5 ) self.m_textCtrl6.Bind( wx.EVT_TEXT, self.datatext5 ) self.m_button1.Bind( wx.EVT_BUTTON, self.batal ) self.m_button2.Bind( wx.EVT_BUTTON, self.ok ) # Handlers for FForm events. def m_panelOnChar(self,event): print (event.GetKeyCode) if event.GetKeyCode() == wx.WXK_ESCAPE: self.Destroy() print ("okay") else: print ("wrong") event.Skip() def datatext1( self, event ): # TODO: Implement datatext1 print(self.m_textCtrl1.GetValue()) pass def datatext2( self, event ): # TODO: Implement datatext2 pass def datatext3( self, event ): # TODO: Implement datatext3 pass def datatext4( self, event ): # TODO: Implement datatext4 pass def datatext5( self, event ): # TODO: Implement datatext5 pass def datatext5( self, event ): # TODO: Implement datatext5 pass def batal( self, event ): # TODO: Implement batal pass def ok( self, event ): # TODO: Implement ok print(self.m_textCtrl1.GetValue()) print(self.m_textCtrl2.GetValue()) print(self.m_textCtrl3.GetValue()) print(self.m_textCtrl4.GetValue()) print(self.m_textCtrl5.GetValue()) print(self.m_textCtrl6.GetValue()) print("okay") pass
28.539474
63
0.600738
250
2,169
5.056
0.256
0.067247
0.064082
0.139241
0.255538
0.128956
0.128956
0.128956
0.079905
0.079905
0
0.022354
0.298755
2,169
75
64
28.92
0.808679
0.144767
0
0.291667
1
0
0.007069
0
0
0
0
0.013333
0
1
0.229167
false
0.166667
0.041667
0
0.291667
0.229167
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
1
0
0
1
0
1
0
0
0
0
0
4
41b47d2a2fab71fffa7fa7dc8edb5358a58ea4bf
1,686
py
Python
neutron_fwaas/tests/unit/cmd/upgrade_checks/test_checks.py
sapcc/neutron-fwaas
59bad17387d15f86ea7d08f8675208160a999ffe
[ "Apache-2.0" ]
null
null
null
neutron_fwaas/tests/unit/cmd/upgrade_checks/test_checks.py
sapcc/neutron-fwaas
59bad17387d15f86ea7d08f8675208160a999ffe
[ "Apache-2.0" ]
null
null
null
neutron_fwaas/tests/unit/cmd/upgrade_checks/test_checks.py
sapcc/neutron-fwaas
59bad17387d15f86ea7d08f8675208160a999ffe
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_upgradecheck.upgradecheck import Code from neutron_fwaas.cmd.upgrade_checks import checks from neutron_fwaas.tests import base class TestChecks(base.BaseTestCase): def setUp(self): super(TestChecks, self).setUp() self.checks = checks.Checks() def test_get_checks_list(self): self.assertIsInstance(self.checks.get_checks(), list) def test_fwaas_v1_check_sucess(self): cfg.CONF.set_override('service_plugins', ['l3', 'qos']) check_result = checks.Checks.fwaas_v1_check(mock.Mock()) self.assertEqual(Code.SUCCESS, check_result.code) def test_fwaas_v1_check_warning(self): plugins_to_check = [ ['l3', 'firewall', 'qos'], ['l3', 'neutron_fwaas.services.firewall.fwaas_plugin:FirewallPlugin', 'qos']] for plugins in plugins_to_check: cfg.CONF.set_override('service_plugins', plugins) check_result = checks.Checks.fwaas_v1_check(mock.Mock()) self.assertEqual(Code.FAILURE, check_result.code)
35.87234
75
0.707592
228
1,686
5.078947
0.47807
0.051813
0.041451
0.027634
0.195164
0.162349
0.107081
0.107081
0.107081
0.107081
0
0.011128
0.200475
1,686
46
76
36.652174
0.847923
0.326216
0
0.08
0
0
0.099822
0.052585
0
0
0
0
0.12
1
0.16
false
0
0.2
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41b838c1916c793c834d6840367d6af260c3208a
4,822
py
Python
licensegh/licensegh.py
sauljabin/licensegh
01dad5a8934869423feb9bc59854631ab1cb6e08
[ "MIT" ]
null
null
null
licensegh/licensegh.py
sauljabin/licensegh
01dad5a8934869423feb9bc59854631ab1cb6e08
[ "MIT" ]
null
null
null
licensegh/licensegh.py
sauljabin/licensegh
01dad5a8934869423feb9bc59854631ab1cb6e08
[ "MIT" ]
null
null
null
import os import re import shutil import git import yaml from rich import box from rich.console import Console from rich.prompt import Prompt from rich.table import Table class Licensegh: def __init__(self): self.repository = TemplatesRepository() self.licenses = [] def init(self): self.repository.init() self.load_licenses() def load_licenses(self): for dirpath, dirnames, filenames in os.walk(self.repository.licenses_path): filenames = [ filename for filename in filenames if filename.endswith(".txt") ] filenames.sort() for license_path in filenames: self.licenses.append(License(os.path.join(dirpath, license_path))) def print_all_licenses(self): self.print_licenses(self.licenses) def print_license_by_id(self, license_id): licenses = [license for license in self.licenses if license_id == license.id] if len(licenses) == 0: console = Console() console.print("[red]License not found[red]") else: licenses[0].load() licenses[0].print() def print_licenses_by_id(self, license_id): licenses = [ license for license in self.licenses if re.match(".*({}).*".format(license_id), license.id) ] if len(licenses) == 0: console = Console() console.print("[red]Licenses not found[red]") else: self.print_licenses( licenses, True, ) def print_licenses(self, licenses, print_description=False): console = Console() table = Table(box=box.HORIZONTALS) table.add_column("Id", style="cyan", justify="right") table.add_column("Name", style="magenta") for license in licenses: license.load() if print_description: table.add_row( license.id, "{}\n[white]{}[white]".format(license.name, license.description), ) else: table.add_row(license.id, license.name) console.print(table) def save_license_by_id(self, license_id): licenses = [license for license in self.licenses if license_id == license.id] if len(licenses) == 0: console = Console() console.print("[red]License not found[red]") else: licenses[0].load() licenses[0].save() def reset_repository(self): self.repository.remove() class License: def __init__(self, path): self.path = path self.directory, self.file_name = os.path.split(self.path) self.id = self.file_name.replace(".txt", "") self.description = "" self.name = "" self.text = "" self.arguments = [] def load(self): with open(self.path, "r") as file: full_text = file.read() cut_index = full_text.find("---", 3) file_parts = { "metadata": full_text[:cut_index], "text": full_text[cut_index + 3 :], } metadata = yaml.safe_load(file_parts["metadata"]) self.description = metadata["description"].strip() self.name = metadata["title"].strip() self.text = file_parts["text"].strip() self.arguments = list(set(re.findall(r"\[([a-z]+)\]", self.text))) def print(self): console = Console() console.print( "[green]Name:[green]\t[magenta bold]{}[magenta bold]".format(self.name) ) console.print( "[green]Id:[green]\t[magenta bold]{}[magenta bold]".format(self.id) ) console.rule() console.print(self.text.replace("[", r"\[")) def save(self): text_to_save = self.text for argument in self.arguments: value = Prompt.ask( f"[magenta]Enter argument[magenta] [cyan]{argument}[cyan]" ) text_to_save = text_to_save.replace(f"[{argument}]", value) with open("LICENSE", "w") as file: file.write(text_to_save) def __eq__(self, o): return self.id == o.id class TemplatesRepository: def __init__(self): self.path = os.path.expanduser("~/.licensegh/choosealicense") self.licenses_path = os.path.join(self.path, "_licenses") self.remote = "https://github.com/github/choosealicense.com.git" def init(self): if os.path.isdir(self.path): repo = git.Repo(self.path) repo.remotes.origin.pull() else: git.Repo.clone_from(self.remote, self.path) def remove(self): shutil.rmtree(self.path)
29.950311
85
0.562837
549
4,822
4.812386
0.218579
0.037472
0.020818
0.039364
0.236563
0.202498
0.202498
0.202498
0.173732
0.173732
0
0.002708
0.310659
4,822
160
86
30.1375
0.792118
0
0
0.193798
0
0
0.094981
0.021775
0
0
0
0
0
1
0.131783
false
0
0.069767
0.007752
0.232558
0.124031
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41ba22bd38c8d51f5bf64e1e12f05338b9a305b8
247
py
Python
onegreek/universities/views.py
goldhand/onegreek
1ad105f15608284a9e80802734f0c6222413a4a0
[ "BSD-3-Clause" ]
1
2019-06-13T11:46:08.000Z
2019-06-13T11:46:08.000Z
onegreek/universities/views.py
goldhand/onegreek
1ad105f15608284a9e80802734f0c6222413a4a0
[ "BSD-3-Clause" ]
null
null
null
onegreek/universities/views.py
goldhand/onegreek
1ad105f15608284a9e80802734f0c6222413a4a0
[ "BSD-3-Clause" ]
null
null
null
from rest_framework import viewsets from .serializers import UniversitySerializer from .models import University class UniversityViewSet(viewsets.ModelViewSet): queryset = University.objects.all() serializer_class = UniversitySerializer
27.444444
47
0.82996
24
247
8.458333
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.121457
247
8
48
30.875
0.935484
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
41bb07f1d7bec345e20d3accf21f60a21b94cceb
192
py
Python
ABC104/ABC104a.py
VolgaKurvar/AtCoder
21acb489f1594bbb1cdc64fbf8421d876b5b476d
[ "Unlicense" ]
null
null
null
ABC104/ABC104a.py
VolgaKurvar/AtCoder
21acb489f1594bbb1cdc64fbf8421d876b5b476d
[ "Unlicense" ]
null
null
null
ABC104/ABC104a.py
VolgaKurvar/AtCoder
21acb489f1594bbb1cdc64fbf8421d876b5b476d
[ "Unlicense" ]
null
null
null
# ABC104a import sys input = sys.stdin.readline sys.setrecursionlimit(10**6) r = int(input()) if r < 1200: print('ABC') exit(0) if r < 2800: print('ARC') exit(0) print('AGC')
13.714286
28
0.609375
30
192
3.9
0.666667
0.051282
0
0
0
0
0
0
0
0
0
0.10596
0.213542
192
13
29
14.769231
0.668874
0.036458
0
0.181818
0
0
0.04918
0
0
0
0
0
0
1
0
false
0
0.090909
0
0.090909
0.272727
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41bc1f9037b836003a997838387cdbfb375a905b
1,509
py
Python
com test.py
brainelectronics/Python-Arduino-Communicator
e721c06e4e2731402c61f553c780f6e6ad996587
[ "FSFAP" ]
null
null
null
com test.py
brainelectronics/Python-Arduino-Communicator
e721c06e4e2731402c61f553c780f6e6ad996587
[ "FSFAP" ]
1
2015-10-24T14:33:11.000Z
2015-10-24T14:38:09.000Z
com test.py
brainelectronics/Python-Arduino-Communicator
e721c06e4e2731402c61f553c780f6e6ad996587
[ "FSFAP" ]
null
null
null
# -*- coding: UTF-8 -*- #!/usr/bin/env python # ---------------------------------------------------------------------------- # Tower Defense # Copyright (c) 2015 brainelectronics # Scharpf, Jonas # # All rights reserved. # # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- # # 's1D12' to set digital Pin D12 to 1 (HIGH) # 's0D9' to set digital Pin D9 to 0 (LOW) # 's217D3' to set digital Pin D3 to 217 (PWM) # 'gD1' to get the value of digital Pin D1 # 'gA0' to get the value of analog Pin A0 # '#d1' to switch serial debug output off # '#f0' to switch serial feedback output off import serial import time port = serial.Serial("/dev/tty.usbmodem1411", 19200, timeout=3.0) port.write("s1d13\n")
37.725
78
0.665341
211
1,509
4.758294
0.625592
0.039841
0.035857
0.044821
0.077689
0
0
0
0
0
0
0.033858
0.158383
1,509
39
79
38.692308
0.756693
0.878728
0
0
0
0
0.190476
0.142857
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
41be27ee6f0699ebf633e908194ed90a7940707d
13,708
py
Python
low_level_simulation/src/path_utilities/src/simulation_util.py
abiantorres/autonomous-vehicles-system-simulation
3f0112036b2b270f5055729c648a1310976df933
[ "Apache-2.0" ]
null
null
null
low_level_simulation/src/path_utilities/src/simulation_util.py
abiantorres/autonomous-vehicles-system-simulation
3f0112036b2b270f5055729c648a1310976df933
[ "Apache-2.0" ]
null
null
null
low_level_simulation/src/path_utilities/src/simulation_util.py
abiantorres/autonomous-vehicles-system-simulation
3f0112036b2b270f5055729c648a1310976df933
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python import rosbag, rospy, actionlib, time, sys, csv, rospkg, re, os from gazebo_msgs.msg import ModelState from std_msgs.msg import Empty from std_srvs.srv import Empty from gazebo_msgs.srv import SetModelState from geometry_msgs.msg import PoseArray from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal from actionlib_msgs.msg import * from obstacles_util import ObstaclesModelGenerator from results_util import SimulationResults from db_client import DBClient # Path information messages from costum_msgs.msg import SimulationMsg import Tkinter import tkMessageBox class SimulationExecutor(): def __init__(self): self.rospack = rospkg.RosPack() self.navigation_pkg_path = str(self.rospack.get_path('navigation')) self.csv_path = re.sub("navigation","", self.navigation_pkg_path) self.csv_path = re.sub("/src/","/csv/",self.csv_path) self.db_client = DBClient() # Get some parameters self.plan_file = rospy.get_param('~plan_file') self.n_iterations = int(rospy.get_param('~n_iterations')) self.distance_between_obstacles = \ float(rospy.get_param('~distance_between_obstacles')) self.robot_radius = float(rospy.get_param('~robot_radius')) self.obstacle_length = float(rospy.get_param('~obstacle_length')) self.max_obstacle_shiftment = \ float(rospy.get_param('~max_obstacle_shiftment')) self.timeout_factor = int(rospy.get_param('~timeout_factor')) self.max_robot_speed = float(rospy.get_param('~max_robot_speed')) self.simulation_data_pub = \ rospy.Publisher('/simulation_data', SimulationMsg, queue_size=1) self.poseArray_publisher = rospy.Publisher('/waypoints', PoseArray, queue_size=1) self.frame_id = rospy.get_param('~goal_frame_id','map') # List of 2D points that describe de trajectory of the robot self.points_2d = [] # Initial robot state self.initial_state = ModelState() # Trayectory goals self.waypoints = [] # Get plan self.get_plan_from_file() # Build an obstacles model generator self.obstacles_model_generator = \ ObstaclesModelGenerator("MySimulation", self.obstacle_length,\ self.robot_radius, self.points_2d[0][0], self.points_2d[0][1],\ self.distance_between_obstacles, self.max_obstacle_shiftment) i = 0 # Append a segment for point in self.points_2d: if(i != 0): self.obstacles_model_generator.append_point(\ str(i), point[0], point[1]) i += 1 self.n_segments = len(self.waypoints) # Buil a results listener self.simulation_results_listener = \ SimulationResults(self.n_segments, self.n_iterations) # Set some metadata for each segment for i in range(0, self.n_segments): if(i != 0): self.simulation_results_listener.set_segment_metadata(i, \ self.points_2d[i][0], self.points_2d[i][1], \ self.points_2d[i+1][0], self.points_2d[i+1][1], self.distance_between_obstacles, \ self.obstacles_model_generator.segments[i].get_segment_timeout(\ self.max_robot_speed, self.timeout_factor)) else: self.simulation_results_listener.set_segment_metadata(0, \ self.points_2d[0][0], self.points_2d[0][1], \ self.points_2d[1][0], self.points_2d[1][1], self.distance_between_obstacles, \ self.obstacles_model_generator.segments[0].get_segment_timeout(\ self.max_robot_speed, self.timeout_factor)) i += 1 def reset_gazebo_world(self): # reset the gazebo world to the initial state rospy.wait_for_service('/gazebo/reset_world') reset_world = rospy.ServiceProxy('/gazebo/reset_world', Empty) try: res = reset_world() except rospy.ServiceException as exc: rospy.loginfo("Service did not process request: " + str(exc)) def set_vehicle_model_state(self): # Se the initial robot model state rospy.wait_for_service('gazebo/set_model_state') set_model_state = rospy.ServiceProxy('gazebo/set_model_state', SetModelState) try: set_model_state(self.initial_state) except rospy.ServiceException as exc: rospy.loginfo("Service did not process request: " + str(exc)) def get_plan_from_file(self): """ Function with allows us to get a path pre-configured from file and load it to be used in the simulation. """ self.waypoints = [] self.points_2d = [] # Read the ros bag file from ~/.ros/ bag = rosbag.Bag(self.plan_file) # Get the robot initial state for topic, msg, t in bag.read_messages(topics=['initial_model_state']): self.initial_state = msg self.points_2d.append((round(float(msg.pose.position.x),2),\ round(float(msg.pose.position.y),2))) # Get the trayectory goals for topic, msg, t in bag.read_messages(topics=['path_goals_bag']): self.waypoints.append(msg) self.points_2d.append((round(float(msg.pose.pose.position.x), 2),\ round(float(msg.pose.pose.position.y),2))) bag.close() def convert_PoseWithCovArray_to_PoseArray(self): """Used to publish waypoints as pose array so that you can see them in rviz, etc.""" poses = PoseArray() poses.header.frame_id = 'map' poses.poses = [pose.pose.pose for pose in self.waypoints] return poses def msg_to_csv(self, msg): with open(self.csv_path + msg.metadata.simulation_hash + "_" + msg.metadata.date + ".csv", 'wb') as csvfile: fieldnames_global_segments_results = ['segment_index', 'n_failures', \ 'time_mean', 'time_stdev', \ 'time_max', 'time_min', \ 'distance_mean', 'distance_stdev', \ 'distance_max', 'distance_min', \ 'speed_mean', 'speed_stdev', \ 'speed_max', 'speed_min'] fieldnames_global_simulation_results = ['n_failures', \ 'time_mean', 'time_stdev', \ 'time_max', 'time_min', \ 'distance_mean', 'distance_stdev', \ 'distance_max', 'distance_min', \ 'speed_mean', 'speed_stdev', \ 'speed_max', 'speed_min'] fieldnames_segments_metadata = ['segment_index', 'initial_point', \ 'end_point', 'distance_between_obstacles', \ 'segment_simulation_timeout'] fieldnames_simulation_metadata = ['simulation_hash', 'robot_file', \ 'world_file', 'plan_file', \ 'map_file', 'date', \ 'n_segments', 'n_iterations', \ 'timeout_factor', 'useful_simulation', \ 'local_planner', 'global_planner'] # Simulation metadata writer = csv.DictWriter(csvfile, fieldnames=['Simulation metadata'], delimiter=';', quotechar='"') writer.writeheader() writer = csv.DictWriter(csvfile, fieldnames=fieldnames_simulation_metadata, delimiter=';', quotechar='"') writer.writeheader() writer.writerow({'simulation_hash':msg.metadata.simulation_hash, 'robot_file':msg.metadata.robot_file, \ 'world_file':msg.metadata.world_file, 'plan_file':msg.metadata.plan_file, 'map_file':msg.metadata.map_file, \ 'date':msg.metadata.date, 'n_segments':msg.metadata.n_segments, \ 'n_iterations':msg.metadata.n_iterations, 'timeout_factor':msg.metadata.timeout_factor, \ 'useful_simulation':msg.metadata.useful_simulation, 'useful_simulation':msg.metadata.useful_simulation, \ 'local_planner':msg.metadata.local_planner, 'global_planner':msg.metadata.global_planner}) # Segments metadata writer = csv.DictWriter(csvfile, fieldnames=['Segments metadata'], delimiter=';', quotechar='"') writer.writeheader() writer = csv.DictWriter(csvfile, fieldnames=fieldnames_segments_metadata, delimiter=';', quotechar='"') writer.writeheader() for i in msg.metadata.segments_metadata.segments_metadata: writer.writerow({'segment_index':i.segment_index, 'initial_point':i.initial_point, \ 'end_point':i.end_point, 'distance_between_obstacles':i.distance_between_obstacles, \ 'segment_simulation_timeout':i.segment_simulation_timeout}) # Global Simulation Results writer = csv.DictWriter(csvfile, fieldnames=['Global simulation results'], delimiter=';', quotechar='"') writer.writeheader() writer = csv.DictWriter(csvfile, fieldnames=fieldnames_global_simulation_results, delimiter=';', quotechar='"') writer.writeheader() writer.writerow({'n_failures':msg.global_simulation_results.n_failures, \ 'time_mean':msg.global_simulation_results.time_mean, 'time_stdev':msg.global_simulation_results.time_stdev, \ 'time_max':msg.global_simulation_results.time_max, 'time_min':msg.global_simulation_results.time_min, \ 'distance_mean':msg.global_simulation_results.distance_mean, 'distance_stdev':msg.global_simulation_results.distance_stdev, \ 'distance_max':msg.global_simulation_results.distance_max, 'distance_min':msg.global_simulation_results.distance_min, \ 'speed_mean':msg.global_simulation_results.speed_mean, 'speed_stdev':msg.global_simulation_results.speed_stdev, \ 'speed_max':msg.global_simulation_results.speed_max, 'speed_min':msg.global_simulation_results.speed_min}) # Global Segments Results writer = csv.DictWriter(csvfile, fieldnames=['Global segments results'], delimiter=';', quotechar='"') writer.writeheader() writer = csv.DictWriter(csvfile, fieldnames=fieldnames_global_segments_results, delimiter=';', quotechar='"') writer.writeheader() for i in msg.global_segments_results: writer.writerow({'segment_index':i.segment_index, 'n_failures':i.n_failures, \ 'time_mean':i.time_mean, 'time_stdev':i.time_stdev, \ 'time_max':i.time_max, 'time_min':i.time_min, \ 'distance_mean':i.distance_mean, 'distance_stdev':i.distance_stdev, \ 'distance_max':i.distance_max, 'distance_min':i.distance_min, \ 'speed_mean':i.speed_mean, 'speed_stdev':i.speed_stdev, \ 'speed_max':i.speed_max, 'speed_min':i.speed_min}) tkMessageBox.showinfo('Results', "A CSV file has been generated behind the path "+self.csv_path + msg.metadata.simulation_hash + "_" + msg.metadata.date + ".csv") def start(self): """ Low level information publisher. High level should be subscribed to the simulation_data topic. """ # Get a move_base action client client = actionlib.SimpleActionClient('move_base', MoveBaseAction) client.wait_for_server() # Start publishing goals for i in range(0, self.n_iterations): self.poseArray_publisher.publish(self.convert_PoseWithCovArray_to_PoseArray()) # Initialize the simulation for each iteration self.reset_gazebo_world() self.set_vehicle_model_state() self.obstacles_model_generator.spawn_obstacles() time.sleep(3) for j in range(0, self.n_segments): # Build goal goal = MoveBaseGoal() goal.target_pose.header.frame_id = self.frame_id goal.target_pose.header.stamp = rospy.Time.now() goal.target_pose.pose.position = self.waypoints[j].pose.pose.position goal.target_pose.pose.orientation = self.waypoints[j].pose.pose.orientation self.simulation_results_listener.start(j, i) # send the goal client.send_goal(goal) finished_within_time = client.wait_for_result(\ rospy.Duration(self.simulation_results_listener.segments_metadata[j].segment_simulation_timeout)) # Check simulation state if not finished_within_time: client.cancel_goal() self.simulation_results_listener.stop(j, i, True) break else: state = client.get_state() if state == GoalStatus.SUCCEEDED: self.simulation_results_listener.stop(j, i, False) else: self.simulation_results_listener.stop(j, i, True) break time.sleep(3) msg = self.simulation_results_listener.get_msg(\ self.plan_file, self.timeout_factor) self.simulation_data_pub.publish(msg) self.msg_to_csv(msg) self.db_client.insert_simulation_results(msg)
55.723577
170
0.619273
1,537
13,708
5.251139
0.1581
0.05687
0.048445
0.041878
0.424731
0.322637
0.270103
0.220047
0.187337
0.147937
0
0.005444
0.276408
13,708
245
171
55.95102
0.808247
0.070032
0
0.223881
0
0
0.126747
0.015636
0
0
0
0
0
1
0.034826
false
0
0.069652
0
0.114428
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41bf0aa7aa8657c18ffec31a5eceb725b9f22e18
141
py
Python
Chapter 07/Chap07_Example7.23.py
bpbpublications/Programming-Techniques-using-Python
49b785f37e95a3aad1d36cef51e219ac56e5e9f0
[ "MIT" ]
null
null
null
Chapter 07/Chap07_Example7.23.py
bpbpublications/Programming-Techniques-using-Python
49b785f37e95a3aad1d36cef51e219ac56e5e9f0
[ "MIT" ]
null
null
null
Chapter 07/Chap07_Example7.23.py
bpbpublications/Programming-Techniques-using-Python
49b785f37e95a3aad1d36cef51e219ac56e5e9f0
[ "MIT" ]
null
null
null
myl1 = [10, 20, -30.1, 'Hello'] list_length = len(myl1) count = 0 while count<list_length: print(count, myl1[count]) count +=1
20.142857
32
0.609929
22
141
3.818182
0.590909
0.238095
0
0
0
0
0
0
0
0
0
0.110092
0.22695
141
6
33
23.5
0.66055
0
0
0
0
0
0.037037
0
0
0
0
0
0
1
0
false
0
0
0
0
0.166667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
41c495323f09d70b9547788b90e0defddbe36bbc
2,371
py
Python
plugins/intern/markov.py
rbracken/internbot
58b802e0dd7597ace12acd9342bb938e2f33c25d
[ "BSD-2-Clause" ]
1
2016-09-24T16:00:06.000Z
2016-09-24T16:00:06.000Z
plugins/intern/markov.py
rbracken/internbot
58b802e0dd7597ace12acd9342bb938e2f33c25d
[ "BSD-2-Clause" ]
null
null
null
plugins/intern/markov.py
rbracken/internbot
58b802e0dd7597ace12acd9342bb938e2f33c25d
[ "BSD-2-Clause" ]
null
null
null
import random """ Credits for this code go to Shabda Raaj, pulled from the article 'Generating pseudo-random text with Markov chains using Python', which can be found at: http://agiliq.com/blog/2009/06/generating-pseudo-random-text-with-markov-chains-u/ """ class Markov(object): def __init__(self, open_file): self.cache = {} self.open_file = open_file self.words = self.file_to_words() self.word_size = len(self.words) self.database() def file_to_words(self): self.open_file.seek(0) data = self.open_file.read() words = data.lower().split() return words def triples(self): """ Generates triples from the given data string. So if our string were "What a lovely day", we'd generate (What, a, lovely) and then (a, lovely, day). """ if len(self.words) < 3: return for i in range(len(self.words) - 2): yield (self.words[i], self.words[i+1], self.words[i+2]) def database(self): for w1, w2, w3 in self.triples(): key = (w1, w2) if key in self.cache: self.cache[key].append(w3) else: self.cache[key] = [w3] def generate_markov_text(self, size=25): seed = random.randint(0, self.word_size-3) seed_word, next_word = self.words[seed], self.words[seed+1] w1, w2 = seed_word, next_word gen_words = [] for i in xrange(size): gen_words.append(w1) w1, w2 = w2, random.choice(self.cache[(w1, w2)]) gen_words.append(w2) return ' '.join(gen_words) def generate_markov_response(self, seed_word=None, next_word=None, size=25): w1, w2 = seed_word, next_word gen_words = [] try: for i in xrange(size): gen_words.append(w1) w1, w2 = w2, random.choice(self.cache[(w1, w2)]) gen_words.append(w2) except: seed = self.words.index(next_word) seed_word = self.words[seed-1] w1, w2 = seed_word, next_word for i in xrange(size): gen_words.append(w1) w1, w2 = w2, random.choice(self.cache[(w1, w2)]) gen_words.append(w2) return ' '.join(gen_words)
32.040541
90
0.557992
325
2,371
3.944615
0.298462
0.077223
0.065523
0.049922
0.359594
0.359594
0.359594
0.294072
0.265991
0.265991
0
0.033022
0.32307
2,371
73
91
32.479452
0.765732
0.061999
0
0.365385
0
0
0.001042
0
0
0
0
0
0
1
0.115385
false
0
0.019231
0
0.230769
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41c53301d55d3133fde81eb12b2a9be32599efe5
3,590
py
Python
isee/infrastructure/mdengine.py
team-mayes/isEE
c22d7cc78a43f0c0a7b2ec18fbc3b628ddef8d54
[ "BSD-3-Clause" ]
null
null
null
isee/infrastructure/mdengine.py
team-mayes/isEE
c22d7cc78a43f0c0a7b2ec18fbc3b628ddef8d54
[ "BSD-3-Clause" ]
1
2021-09-17T18:20:36.000Z
2021-10-06T16:56:34.000Z
isee/infrastructure/mdengine.py
team-mayes/isEE
c22d7cc78a43f0c0a7b2ec18fbc3b628ddef8d54
[ "BSD-3-Clause" ]
null
null
null
""" Interface for MDEngine objects. New MDEngines can be implemented by constructing a new class that inherits from MDEngine and implements its abstract methods. """ import abc import os import pytraj import mdtraj class MDEngine(abc.ABC): """ Abstract base class for molecular dynamics engines. Implements methods for all of the engine-specific tasks that isEE might need. """ @abc.abstractmethod def get_frame(self, trajectory, frame, settings): """ Return a new file containing just the frame'th frame of a trajectory in Amber .rst7 format Parameters ---------- trajectory : str Name of trajectory file to obtain last frame from frame : int Index of frame to return; 1-indexed, -1 gives last frame, 0 is invalid settings : argparse.Namespace Settings namespace object Returns ------- last_frame : str Name of .rst7 format coordinate file corresponding to desired frame of trajectory, if it exists; an empty string otherwise """ pass class AdaptAmber(MDEngine): """ Adapter class for Amber MDEngine. """ def get_frame(self, trajectory, frame, settings): new_restart_name = trajectory + '_frame_' + str(frame) + '.rst7' if not os.path.exists(trajectory): return '' # since it's possible to call this before the trajectory file has been initialized if frame >= 1: shift_frame = frame - 1 # because write_traj is 0-indexed but get_frame is 1-indexed elif frame == -1: shift_frame = -1 else: raise IndexError('invalid frame index for get_frame: ' + str(frame) + ' (must be >= 1, or exactly -1)') # Use mdtraj to check for non-zero trajectory length (pytraj gives an error below if n_frames = 0) try: traj = mdtraj.load(trajectory, top=settings.topology) if traj.n_frames == 0: del traj return '' except ValueError: # sometimes this is the result of trying to load a trajectory too early return '' traj = pytraj.iterload(trajectory, settings.topology) try: pytraj.write_traj(new_restart_name, traj, format='rst7', frame_indices=[shift_frame], options='multi', overwrite=True, velocity=True) except ValueError: # pytraj raises a ValueError if frame index is out of range raise IndexError('frame index ' + str(frame) + ' is out of range for trajectory: ' + trajectory) except AssertionError: # sometimes there's an assertion error when shift_frame = -1; cause unknown, but this fixes it if shift_frame == -1: shift_frame = traj.n_frames - 1 try: pytraj.write_traj(new_restart_name, traj, format='rst7', frame_indices=[shift_frame], options='multi', overwrite=True, velocity=True) except ValueError: # pytraj raises a ValueError if frame index is out of range raise IndexError('frame index ' + str(frame) + ' is out of range for trajectory: ' + trajectory) try: os.rename(new_restart_name + '.1', new_restart_name) except OSError: if not os.path.exists(new_restart_name): raise OSError('expected pytraj to write either ' + new_restart_name + ' or ' + new_restart_name + '.1, ' 'but found neither.') return new_restart_name
39.888889
145
0.618106
443
3,590
4.918736
0.34763
0.041303
0.057825
0.022028
0.267095
0.251492
0.251492
0.216613
0.216613
0.216613
0
0.009204
0.3039
3,590
89
146
40.337079
0.862745
0.372145
0
0.304348
0
0
0.118015
0
0
0
0
0
0.021739
1
0.043478
false
0.021739
0.086957
0
0.26087
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41c6f084ce0e27af11e57d81c680c580e271abf4
3,899
py
Python
data/feature_extraction.py
mlcruw/covid-news-analyzer
76c6a906de20633ff0e325f1ebe669c1e0eac325
[ "MIT" ]
8
2020-04-11T23:13:58.000Z
2021-05-06T02:39:45.000Z
data/feature_extraction.py
mlcruw/covid-news-analyzer
76c6a906de20633ff0e325f1ebe669c1e0eac325
[ "MIT" ]
null
null
null
data/feature_extraction.py
mlcruw/covid-news-analyzer
76c6a906de20633ff0e325f1ebe669c1e0eac325
[ "MIT" ]
1
2020-05-26T01:56:24.000Z
2020-05-26T01:56:24.000Z
import gensim from gensim.models import Word2Vec from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer import numpy as np class FeatureExtractor: """ Description =========== Tranforms text to features """ def __init__(self, X_train, X_val, feat='none'): feat_mapping_dict = { 'bow': self.bow, 'word2vec': self.word2vec, 'tfidf': self.tfidf, 'ngram': self.ngram } self.out = feat_mapping_dict[feat](X_train, X_val) def bow(self, X_train, X_val): """ [sentence_1, sentence_2, ..., sentence_n] => dictionary """ vectorizer = CountVectorizer() #vectorizer.fit(X) X_train_counts = vectorizer.fit_transform(X_train) #return vectorizer.vocabulary_ X_val_counts = vectorizer.transform(X_val) return X_train_counts.toarray(), X_val_counts.toarray() #Fit requires dense def tfidf(self, X_train, X_val): """ [sentence_1, sentence_2, ..., sentence_n] => dictionary """ vectorizer = TfidfVectorizer() X_train_counts = vectorizer.fit_transform(X_train) X_val_counts = vectorizer.transform(X_val) return X_train_counts.toarray(), X_val_counts.toarray() #Fit requires dense def ngram(self, X_train, X_val): """ [sentence_1, sentence_2, ..., sentence_n] => dictionary """ #TODO 5 is large I guess? vectorizer = CountVectorizer(analyzer='char_wb', ngram_range=(5, 5)) X_train_counts = vectorizer.fit_transform(X_train) #print(vectorizer.get_feature_names()) X_val_counts = vectorizer.transform(X_val) return X_train_counts.toarray(), X_val_counts.toarray() #Fit requires dense #TODO: load a pretrained embedding model def word2vec(self, X_train, X_val): """ nested list of words => nested list of "word embedding vector" """ embed_dim = 30 max_word = 30 model = Word2Vec(sentences = X_train, size = embed_dim, sg = 1, window = 3, min_count = 1, iter = 30) pretrained_weights = model.wv.syn0 vocab_size, embedding_size = pretrained_weights.shape print('Vocab size ', vocab_size, ' embed shape ', embedding_size) #https://github.com/buomsoo-kim/Word-embedding-with-Python/blob/master/word2vec/source%20code/word2vec.ipynb embed_train = np.zeros((len(X_train), max_word, embed_dim)) for i in range(len(X_train)): for j in range(max_word): for k in range(embed_dim): if j < len(X_train[i]): embed_train[i][j][k] = model[X_train[i][j]][k] embed_train = embed_train.reshape((len(X_train), max_word * embed_dim)) #embed_train = [[[model[X_train[i][j]] if j < len(X_train[i]) else 0] for j in range(56)] for i in range(len(X_train))] #embed_train = np.array(embed_train) print('Embed train . shape' , embed_train.shape) #embed_val = [[[(model[X_val[i][j]] if X_val[i][j] in model.wv.vocab else 0) if j < len(X_val[i]) else 0] for j in range(56)] for i in range(len(X_val))] embed_val = np.zeros((len(X_val), max_word, embed_dim)) for i in range(len(X_train), len(X_train) + len(X_val)): for j in range(max_word): for k in range(embed_dim): if j < len(X_val[i]): if X_val[i][j] in model.wv.vocab: embed_val[i - len(X_train)][j][k] = model[X_val[i][j]][k] embed_val = embed_val.reshape((len(X_val), max_word * embed_dim)) print('Embed val . shape' , embed_val.shape) return embed_train, embed_val
41.042105
161
0.588356
522
3,899
4.149425
0.216475
0.074792
0.037396
0.032318
0.475531
0.434441
0.419668
0.388735
0.333333
0.313019
0
0.012681
0.292126
3,899
94
162
41.478723
0.772101
0.22929
0
0.245283
0
0
0.031967
0
0
0
0
0.010638
0
1
0.09434
false
0
0.075472
0
0.264151
0.056604
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
41c72ad44a90d0da8b7d83a6acf768aead8bfbf7
180
py
Python
student/filters.py
Floran-Github/Project-code
186ea185f1a5e57bf3fdc34b31b9979bafef4cfc
[ "Apache-2.0" ]
3
2021-02-02T12:24:44.000Z
2021-12-28T11:23:11.000Z
student/filters.py
Floran-Github/Project-code
186ea185f1a5e57bf3fdc34b31b9979bafef4cfc
[ "Apache-2.0" ]
8
2021-03-20T18:51:54.000Z
2021-09-08T02:39:21.000Z
student/filters.py
Floran-Github/Project-code
186ea185f1a5e57bf3fdc34b31b9979bafef4cfc
[ "Apache-2.0" ]
1
2020-11-16T11:29:23.000Z
2020-11-16T11:29:23.000Z
import django_filters from .models import * class StudentFilter(django_filters.FilterSet): class Meta: model = Student fields = ['current_year','current_dept']
25.714286
48
0.711111
20
180
6.2
0.75
0.209677
0
0
0
0
0
0
0
0
0
0
0.2
180
7
48
25.714286
0.861111
0
0
0
0
0
0.132597
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
4