hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
5ca225f5afe43c654b35619caef218fd0f7a679c
3,286
py
Python
promort/predictions_manager/serializers.py
lucalianas/ProMort
63702e1b573025e1f956f7d7a0e829f655e728f9
[ "MIT" ]
3
2016-12-28T08:12:51.000Z
2020-07-08T21:03:48.000Z
promort/predictions_manager/serializers.py
lucalianas/ProMort
63702e1b573025e1f956f7d7a0e829f655e728f9
[ "MIT" ]
37
2016-11-11T09:57:45.000Z
2022-03-31T16:04:53.000Z
promort/predictions_manager/serializers.py
lucalianas/ProMort
63702e1b573025e1f956f7d7a0e829f655e728f9
[ "MIT" ]
4
2016-04-22T07:49:40.000Z
2021-09-22T08:09:44.000Z
# Copyright (c) 2021, CRS4 # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. try: import simplejson as json except ImportError: import json from rest_framework import serializers from predictions_manager.models import Prediction, TissueFragmentsCollection, TissueFragment from slides_manager.serializers import SlideSerializer class PredictionSerializer(serializers.ModelSerializer): class Meta: model = Prediction fields = ('id', 'label', 'creation_date', 'slide', 'type', 'omero_id', 'provenance') read_only_fields = ('id', 'creation_date') def validate_provenance(self, value): try: json.loads(value) return value except ValueError: raise serializers.ValidationError('Not a valid JSON in \'provenance\' field') class PredictionDetailsSerializer(serializers.ModelSerializer): slide = SlideSerializer(many=False, read_only=True) class Meta: model = Prediction fields = ('id', 'label', 'creation_date', 'slide', 'type', 'omero_id', 'provenance') read_only_fields = ('id', 'label', 'creation_date', 'slide', 'type', 'omero_id', 'provenance') class TissueFragmentsCollectionSerializer(serializers.ModelSerializer): class Meta: model = TissueFragmentsCollection fields = ('id', 'prediction', 'creation_date') read_only_fields = ('id', 'creation_date') class TissueFragmentSerializer(serializers.ModelSerializer): class Meta: model = TissueFragment fields = ('id', 'collection', 'shape_json', 'creation_date') read_only_fields = ('id', 'creation_date') def validate_shape_json(self, value): try: json.loads(value) return value except ValueError: raise serializers.ValidationError('Not a valid JSON in \'shape_json\' field') class TissueFragmentsCollectionDetailsSerializer(serializers.ModelSerializer): fragments = TissueFragmentSerializer(many=True, read_only=True) prediction = PredictionSerializer(many=False, read_only=True) class Meta: model = TissueFragmentsCollection fields = ('id', 'prediction', 'creation_date', 'fragments') read_only_fields = ('id', 'creation_date')
38.209302
102
0.714242
378
3,286
6.119048
0.391534
0.034587
0.030264
0.034587
0.35668
0.316904
0.304799
0.304799
0.242542
0.182879
0
0.0019
0.19933
3,286
85
103
38.658824
0.877233
0.322276
0
0.510638
0
0
0.158133
0
0
0
0
0
0
1
0.042553
false
0
0.12766
0
0.489362
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5ca5b710e576de9630f78f2e41bbd7b1030e61bd
327
py
Python
tests/test_tweet.py
dashtagger/sentiment_analysis_covid
b1e948ad83d573b2fa82be778ed04f989f062de5
[ "MIT" ]
null
null
null
tests/test_tweet.py
dashtagger/sentiment_analysis_covid
b1e948ad83d573b2fa82be778ed04f989f062de5
[ "MIT" ]
null
null
null
tests/test_tweet.py
dashtagger/sentiment_analysis_covid
b1e948ad83d573b2fa82be778ed04f989f062de5
[ "MIT" ]
null
null
null
import os import pytest from src.modules.tweet_module import Twitter_api class TestTweet: def test_tokens(self): with pytest.raises(ValueError) as e: api = Twitter_api() err_msg = 'Provide the correct tokens and keys' assert e.match(err_msg), 'All credentials are given and accepted'
25.153846
73
0.69419
46
327
4.804348
0.76087
0.090498
0
0
0
0
0
0
0
0
0
0
0.238532
327
12
74
27.25
0.88755
0
0
0
0
0
0.223242
0
0
0
0
0
0.111111
1
0.111111
false
0
0.333333
0
0.555556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
5cabf0aecc3c712f0928ee78a802672a8b445c83
657
py
Python
src/gameslib/app.py
inigoini/GamesLib
c2476c1872acc610a559707cccab2d2eac3f15cd
[ "MIT" ]
null
null
null
src/gameslib/app.py
inigoini/GamesLib
c2476c1872acc610a559707cccab2d2eac3f15cd
[ "MIT" ]
null
null
null
src/gameslib/app.py
inigoini/GamesLib
c2476c1872acc610a559707cccab2d2eac3f15cd
[ "MIT" ]
null
null
null
import pygame, gameslib class App(): def __init__(self, size: gameslib.Size = (600, 300), title: str = 'GamesLib'): self._screen = pygame.display.set_mode(size) self._scene = None pygame.display.set_caption(title) @property def screen(self) -> pygame.Surface: return self._screen @property def size(self) -> gameslib.Size: return self.screen.get_size() @property def scene(self) -> object: return self._scene @scene.setter def scene(self, scene: object) -> None: if self._scene is not None: self._scene.stop() self._scene = scene if self._scene is not None: self._scene.start(self)
19.909091
80
0.663623
89
657
4.719101
0.348315
0.171429
0.07619
0.061905
0.138095
0.138095
0.138095
0.138095
0
0
0
0.011628
0.214612
657
32
81
20.53125
0.802326
0
0
0.227273
0
0
0.0128
0
0
0
0
0
0
1
0.227273
false
0
0.045455
0.136364
0.454545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
5cac943593c7fe65cc7bacbda5a3b4ec95fe7dc4
3,764
py
Python
research/im2txt/im2txt/gradcam_wrapper.py
dorazhao99/women-snowboard
9cb2569d7a3cbb846d10aabae825ead9a6e1de29
[ "Apache-2.0" ]
19
2018-09-26T03:52:59.000Z
2021-08-19T08:41:06.000Z
research/im2txt/im2txt/gradcam_wrapper.py
dorazhao99/women-snowboard
9cb2569d7a3cbb846d10aabae825ead9a6e1de29
[ "Apache-2.0" ]
13
2020-06-29T03:53:45.000Z
2022-03-11T23:28:19.000Z
research/im2txt/im2txt/gradcam_wrapper.py
dorazhao99/women-snowboard
9cb2569d7a3cbb846d10aabae825ead9a6e1de29
[ "Apache-2.0" ]
6
2018-09-19T17:07:00.000Z
2021-03-21T14:20:25.000Z
"""Model wrapper class for performing GradCam visualization with a ShowAndTellModel.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from im2txt import show_and_tell_model from im2txt.inference_utils import inference_wrapper_base import numpy as np import matplotlib # Fix to run remotely (with no display) # matplotlib.use('agg') import tensorflow as tf import PIL.Image from matplotlib import pylab as P import pickle import matplotlib.pyplot as plt plt.switch_backend('agg') import matplotlib.colors as mcolors import os import os.path as osp slim=tf.contrib.slim import scipy import sys sys.path.append('gradcam') def transparent_cmap(cmap, N=255): "Copy colormap and set alpha values" mycmap = cmap mycmap._init() mycmap._lut[:,-1] = np.linspace(0, 0.8, N+4) return mycmap class GradCamWrapper(inference_wrapper_base.InferenceWrapperBase): """Model wrapper class for performing inference with a ShowAndTellModel.""" def __init__(self): super(GradCamWrapper, self).__init__() def build_model(self, model_config): model = show_and_tell_model.ShowAndTellModel(model_config, mode="gradcam") model.build() return model def process_image(self, sess, encoded_image, input_feed, filename, vocab, word_index=1, word_id=None, save_path=None): graph = tf.get_default_graph() softmax = sess.run(fetches=["softmax:0"], feed_dict={"image_feed:0": encoded_image, "input_feed:0": input_feed}) logits = graph.get_tensor_by_name('softmax:0') neuron_selector = tf.placeholder(tf.int32) neuron_pred = logits[0,word_index][neuron_selector] pred_max = np.argmax(softmax[0][0][word_index]) if word_id != None: print('%s\tpredicted: %s with prob %f , given: %s with prob %.10f' % (filename, vocab.id_to_word(pred_max), np.max(softmax[0][0][word_index]), vocab.id_to_word(word_id), softmax[0][0][word_index][word_id])) pred_max = word_id from grad_cam import GradCam grad_cam = GradCam(graph, sess, neuron_pred, graph.get_tensor_by_name('concat:0'), conv_layer = graph.get_tensor_by_name('InceptionV3/InceptionV3/Mixed_7c/concat:0')) input_image = PIL.Image.open(filename) input_image = input_image.convert('RGB') im = np.asarray(input_image) im_resized = scipy.misc.imresize(im, (299, 299), interp='bilinear', mode=None) im_resized = im_resized / 127.5 - 1.0 grad_mask_2d = grad_cam.GetMask(im_resized, feed_dict = {neuron_selector: pred_max, "input_feed:0": input_feed}, should_resize = False, three_dims = False) # if np.min(grad_mask_2d) == np.max(grad_mask_2d): grad_mask_2d[0,0]=1.0000001 # Fix for a bug that happens very rarely mycmap = transparent_cmap(plt.cm.jet) w = im_resized.shape[0] h = im_resized.shape[1] y, x = np.mgrid[0:h, 0:w] grad_mask_2d_norm = grad_mask_2d / np.max(grad_mask_2d) grad_mask_2d_upscaled = scipy.misc.imresize(grad_mask_2d_norm, (w, h), interp='bilinear', mode='F') percentile = 99 vmax = np.percentile(grad_mask_2d_upscaled, percentile) vmin = np.min(grad_mask_2d_upscaled) mask_grayscale_upscaled = np.clip((grad_mask_2d_upscaled - vmin) / (vmax - vmin), 0, 1) fig, ax = plt.subplots(1, 1) plt.axis('off') ax.imshow( ((im_resized + 1.0) * 127.5)/255.0) cb = ax.contourf(x, y, mask_grayscale_upscaled, 15, cmap=mycmap) if save_path != None and save_path != '': np.save(save_path + osp.basename(filename)[0:-4] + '_' + vocab.id_to_word(pred_max) + '.npy', grad_mask_2d) plt.savefig(save_path + osp.basename(filename)[0:-4] + '_' + vocab.id_to_word(pred_max) + '.jpg', bbox_inches='tight') plt.close() else: plt.show()
36.192308
212
0.7144
578
3,764
4.385813
0.32699
0.041026
0.051282
0.020513
0.16568
0.074162
0.066272
0.066272
0.066272
0.066272
0
0.029477
0.161796
3,764
103
213
36.543689
0.77401
0.096706
0
0
0
0
0.073121
0.011992
0
0
0
0
0
1
0.056338
false
0
0.253521
0
0.352113
0.028169
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cb00102137240f57c13ca8984615debc98d7b6c
1,212
py
Python
Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/lib/ops/cmd/groups.py
bidhata/EquationGroupLeaks
1ff4bc115cb2bd5bf2ed6bf769af44392926830c
[ "Unlicense" ]
9
2019-11-22T04:58:40.000Z
2022-02-26T16:47:28.000Z
Python.Fuzzbunch/Resources/Ops/PyScripts/lib/ops/cmd/groups.py
010001111/Vx-Suites
6b4b90a60512cce48aa7b87aec5e5ac1c4bb9a79
[ "MIT" ]
null
null
null
Python.Fuzzbunch/Resources/Ops/PyScripts/lib/ops/cmd/groups.py
010001111/Vx-Suites
6b4b90a60512cce48aa7b87aec5e5ac1c4bb9a79
[ "MIT" ]
8
2017-09-27T10:31:18.000Z
2022-01-08T10:30:46.000Z
import ops.cmd import ops import ops.env import ops.cmd.safetychecks import ops.security.auditing from ops.cmd import getBoolOption, setBoolOption, getValueOption, setListOption OpsCommandException = ops.cmd.OpsCommandException VALID_OPTIONS = ['user', 'network', 'local', 'target'] class GroupsCommand(ops.cmd.DszCommand, ): optgroups = {} reqgroups = [] reqopts = [] defopts = {} def __init__(self, plugin='groups', netmap_type=None, **optdict): ops.cmd.DszCommand.__init__(self, plugin, **optdict) def validateInput(self): for opt in self.optdict: if (opt not in VALID_OPTIONS): return False return True local = property((lambda x: getBoolOption(x, 'local')), (lambda x, y: setBoolOption(x, y, 'local'))) remote = property((lambda x: getBoolOption(x, 'remote')), (lambda x, y: setBoolOption(x, y, 'remote'))) target = property((lambda x: getValueOption(x, 'target')), (lambda x, y: setStringOption(x, y, 'target'))) user = property((lambda x: getValueOption(x, 'user')), (lambda x, y: setStringOption(x, y, 'user'))) ops.cmd.command_classes['groups'] = GroupsCommand ops.cmd.aliasoptions['groups'] = VALID_OPTIONS
40.4
110
0.679043
145
1,212
5.586207
0.365517
0.059259
0.074074
0.069136
0.264198
0.118519
0
0
0
0
0
0
0.175743
1,212
30
111
40.4
0.810811
0
0
0
0
0
0.067657
0
0
0
0
0
0
1
0.076923
false
0
0.230769
0
0.730769
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
5cb01f72c2e1f02740ee366e9419f4fc76761ed9
1,249
py
Python
run/f1score.py
osmanbaskaya/wsid
b486fb907a85981aa0d0c30210df1d0c33fcbff2
[ "MIT" ]
1
2016-12-22T21:41:21.000Z
2016-12-22T21:41:21.000Z
run/f1score.py
osmanbaskaya/mapping-impact
8024dd3b916ac2dfc336221dd32faba4c0a98442
[ "MIT" ]
4
2015-06-20T14:09:35.000Z
2015-06-26T18:29:26.000Z
run/f1score.py
osmanbaskaya/mapping-impact
8024dd3b916ac2dfc336221dd32faba4c0a98442
[ "MIT" ]
1
2018-05-11T00:12:59.000Z
2018-05-11T00:12:59.000Z
#! /usr/bin/python # -*- coding: utf-8 -*- __author__ = "Osman Baskaya" import os import subprocess import sys def s10_f1_run(): systems = os.listdir('../../systems') command = "./sup_eval.sh ../../systems/%s . ../80_20/all/mapping.%d.key ../80_20/all/test.%d.key 2>/dev/null | tail -1 | grep -oP '0.\d+'" for system in systems: scores = [] if system != 'filesToSystemsMap': for i in range(1,6): c = command % (system, i, i) s = subprocess.Popen(c, shell=True, stdout=subprocess.PIPE).stdout.read().strip() scores.append(float(s)) print >> sys.stderr, system, scores print "%s\t%f" % (system, sum(scores) / len(scores)) def s07_f1_run(): systems = os.listdir('../../s07/systems') command = "./sup_eval.sh ../../s07/systems/%s . ../../s07/keys/random_split/82_18/senseinduction.random82train.key ../../s07/keys/random_split/82_18/senseinduction.random82test.key 2>/dev/null | tail -1 | grep -oP '0.\d+'" for system in systems: c = command % (system) s = subprocess.Popen(c, shell=True, stdout=subprocess.PIPE).stdout.read().strip() s = float(s) print "%s\t%f" % (system, s) s07_f1_run()
35.685714
226
0.583667
173
1,249
4.109827
0.421965
0.021097
0.033755
0.039381
0.554149
0.390999
0.390999
0.289733
0.289733
0.289733
0
0.047619
0.226581
1,249
34
227
36.735294
0.688406
0.031225
0
0.16
0
0.08
0.337748
0.150662
0
0
0
0
0
0
null
null
0
0.12
null
null
0.12
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
5cb1d1c1b2d0620a0d048a8f6d7b4fcc7668f049
1,851
py
Python
tests/tcn/test_keras_onnx.py
ggardiakos/timemachines
845001fc6ca3005d3612ef8f44040f5d1e15d9b8
[ "MIT" ]
null
null
null
tests/tcn/test_keras_onnx.py
ggardiakos/timemachines
845001fc6ca3005d3612ef8f44040f5d1e15d9b8
[ "MIT" ]
null
null
null
tests/tcn/test_keras_onnx.py
ggardiakos/timemachines
845001fc6ca3005d3612ef8f44040f5d1e15d9b8
[ "MIT" ]
null
null
null
# SPDX-License-Identifier: Apache-2.0 from timemachines.skaters.tcn.tcninclusiontraining import using_tcntraining if using_tcntraining: from onnxruntime import InferenceSession import numpy as np from tensorflow import keras from tensorflow.keras import layers, Input def test_keras_onnx_runtime(): """ :return: test if onnx and keras seem to be working """ # adapted from https://github.com/microprediction/tensorflow-onnx/blob/master/examples/end2end_tfkeras.py # Creates the model. model = keras.Sequential() model.add(Input((4, 4))) model.add(layers.SimpleRNN(8)) model.add(layers.Dense(2)) print(model.summary()) input_names = [n.name for n in model.inputs] output_names = [n.name for n in model.outputs] print('inputs:', input_names) print('outputs:', output_names) ######################################## # Training # .... # Skipped. ######################################## # Testing the model. input = np.random.randn(2, 4, 4).astype(np.float32) expected = model.predict(input) print(expected) ######################################## # Serialize but do not save the model from tf2onnx.keras2onnx_api import convert_keras onnx_model = convert_keras(model=model,name='example') onnx_model_as_byte_string = onnx_model.SerializeToString() ######################################## # Runs onnxruntime. session = InferenceSession(onnx_model_as_byte_string) got = session.run(None, {'input_1': input}) print(got[0]) ######################################## # Measures the differences. assert (np.abs(got[0] - expected).max())<1e-5
34.277778
113
0.558617
196
1,851
5.153061
0.505102
0.035644
0.027723
0.025743
0.083168
0.041584
0.041584
0
0
0
0
0.013591
0.244733
1,851
53
114
34.924528
0.70887
0.179363
0
0
0
0
0.022551
0
0
0
0
0
0.038462
1
0.038462
false
0
0.230769
0
0.269231
0.192308
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cb446a4e4e2baeadec21e9cc78c16dd22923e50
2,905
py
Python
utest/controller/test_rename_keywords.py
hoteltianya/RIDE
dcdccfec631517743d24c8e31fc6687fd29338a0
[ "ECL-2.0", "Apache-2.0" ]
1
2020-04-11T12:55:29.000Z
2020-04-11T12:55:29.000Z
utest/controller/test_rename_keywords.py
hoteltianya/RIDE
dcdccfec631517743d24c8e31fc6687fd29338a0
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
utest/controller/test_rename_keywords.py
hoteltianya/RIDE
dcdccfec631517743d24c8e31fc6687fd29338a0
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
import unittest from robotide.controller.commands import * from nose.tools import assert_true, assert_false, assert_equals from base_command_test import TestCaseCommandTest class TestRenameKeywords(TestCaseCommandTest): def test_test_is_gerkin_kw(self): observer = NullObserver() myobject = RenameKeywordOccurrences("Step 1", "My New Keyword", observer) # ._get_gherkin("keyword value") is_gherkin, kw_value = myobject._get_gherkin("Given a Keyword") assert_true(is_gherkin) assert_equals(kw_value, "a Keyword") is_gherkin, kw_value = myobject._get_gherkin("Then a Keyword") assert_true(is_gherkin) assert_equals(kw_value, "a Keyword") is_gherkin, kw_value = myobject._get_gherkin("And a Keyword") assert_true(is_gherkin) assert_equals(kw_value, "a Keyword") is_gherkin, kw_value = myobject._get_gherkin("When a Keyword") assert_true(is_gherkin) assert_equals(kw_value, "a Keyword") is_gherkin, kw_value = myobject._get_gherkin("But a Keyword") assert_true(is_gherkin) assert_equals(kw_value, "a Keyword") is_gherkin, kw_value = myobject._get_gherkin("But Given a Keyword") assert_true(is_gherkin) assert_equals(kw_value, "Given a Keyword") is_gherkin, kw_value = myobject._get_gherkin("If a Keyword") assert_false(is_gherkin) assert_equals(kw_value, "If a Keyword") def test_check_gerkin_kw(self): observer = NullObserver() myobject = RenameKeywordOccurrences("Step 1", "My New Keyword", observer) # ._check_gherkin("new keyword value", "original keyword value") original_kw, new_kw = myobject._check_gherkin("Given a Keyword", "a Keyword") assert_equals(new_kw, "Given a Keyword") assert_equals(original_kw, "a Keyword") original_kw, new_kw = myobject._check_gherkin("a Keyword", "Given a Keyword") assert_equals(new_kw, "a Keyword") assert_equals(original_kw, "Given a Keyword") original_kw, new_kw = myobject._check_gherkin("When a Keyword", "Given a Keyword") assert_equals(new_kw, "When a Keyword") assert_equals(original_kw, "Given a Keyword") original_kw, new_kw = myobject._check_gherkin("My new Keyword", "Old Keyword") assert_equals(new_kw, "My new Keyword") assert_equals(original_kw, "Old Keyword") original_kw, new_kw = myobject._check_gherkin("But Given a new Keyword", "Given a new Keyword") assert_equals(new_kw, "But Given a new Keyword") assert_equals(original_kw, "Given a new Keyword") original_kw, new_kw = myobject._check_gherkin("Given a new Keyword", "Given an old Keyword") assert_equals(new_kw, "a new Keyword") assert_equals(original_kw, "an old Keyword") if __name__ == "__main__": unittest.main()
47.622951
103
0.692255
383
2,905
4.916449
0.125326
0.110462
0.096654
0.05948
0.787573
0.762613
0.655337
0.599044
0.573022
0.443972
0
0.000873
0.21136
2,905
60
104
48.416667
0.821039
0.032014
0
0.326923
0
0
0.204343
0
0
0
0
0
0.519231
1
0.038462
false
0
0.076923
0
0.134615
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
2
5cb48a34918725ed322fa84f1070cebbca501f40
488
py
Python
bin/uber.py
taps1197/Traahi
52765e26b844169349de7c5a13da8edcbd6e7d47
[ "MIT" ]
1
2019-03-29T11:38:03.000Z
2019-03-29T11:38:03.000Z
bin/uber.py
taps1197/Traahi
52765e26b844169349de7c5a13da8edcbd6e7d47
[ "MIT" ]
null
null
null
bin/uber.py
taps1197/Traahi
52765e26b844169349de7c5a13da8edcbd6e7d47
[ "MIT" ]
null
null
null
import os import json from uber_rides.session import Session from uber_rides.client import UberRidesClient from pprint import pprint session = Session(server_token="P0xeLpEgkQct68R3USut3nBst62X83Tz4V8BT7CR") client = UberRidesClient(session) response = client.get_products(37.77, -122.41) products = response.json.get('products') with open('data.json','w') as outfile: json.dump(products, outfile) with open('data.json') as inFile: data = json.load(inFile) pprint(data)
21.217391
74
0.770492
64
488
5.8125
0.453125
0.064516
0.069892
0.086022
0
0
0
0
0
0
0
0.04918
0.125
488
22
75
22.181818
0.822014
0
0
0
0
0
0.137295
0.081967
0
0
0
0
0
1
0
false
0
0.357143
0
0.357143
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
5cb4f3d7bb829612fae0f449b3de61ecc0409094
1,476
py
Python
software/Opal/spud/diamond/build/lib.linux-x86_64-2.7/diamond/databuttonswidget.py
msc-acse/acse-9-independent-research-project-Wade003
cfcba990d52ccf535171cf54c0a91b184db6f276
[ "MIT" ]
2
2020-05-11T02:39:46.000Z
2020-05-11T03:08:38.000Z
software/multifluids_icferst/libspud/diamond/build/lib.linux-x86_64-2.7/diamond/databuttonswidget.py
msc-acse/acse-9-independent-research-project-Wade003
cfcba990d52ccf535171cf54c0a91b184db6f276
[ "MIT" ]
null
null
null
software/multifluids_icferst/libspud/diamond/build/lib.linux-x86_64-2.7/diamond/databuttonswidget.py
msc-acse/acse-9-independent-research-project-Wade003
cfcba990d52ccf535171cf54c0a91b184db6f276
[ "MIT" ]
2
2020-05-21T22:50:19.000Z
2020-10-28T17:16:31.000Z
#!/usr/bin/env python # This file is part of Diamond. # # Diamond is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Diamond is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Diamond. If not, see <http://www.gnu.org/licenses/>. import gobject import gtk class DataButtonsWidget(gtk.HBox): __gsignals__ = { "revert" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()), "store" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ())} def __init__(self): gtk.HBox.__gobject_init__(self) revertButton = gtk.Button() revertButton.set_label("Revert data") revertButton.connect("clicked", self._revert) storeButton = gtk.Button() storeButton.set_label("Store data") storeButton.connect("clicked", self._store) self.pack_start(revertButton) self.pack_end(storeButton) return def _revert(self, widget = None): self.emit("revert") def _store(self, widget = None): self.emit("store") gobject.type_register(DataButtonsWidget)
30.75
79
0.70664
199
1,476
5.100503
0.517588
0.014778
0.038424
0.056158
0.193103
0.124138
0.068966
0
0
0
0
0.000845
0.197832
1,476
47
80
31.404255
0.856419
0.45935
0
0
0
0
0.072797
0
0
0
0
0
0
1
0.142857
false
0
0.095238
0
0.380952
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cb56963b7976f720d715207e1996f45b2421639
149
py
Python
shelf/link_title.py
not-nexus/shelf
ea59703082402ad3b6454482f0487418295fbd19
[ "MIT" ]
4
2016-11-07T13:02:18.000Z
2019-09-03T02:04:05.000Z
shelf/link_title.py
not-nexus/shelf
ea59703082402ad3b6454482f0487418295fbd19
[ "MIT" ]
21
2016-11-30T20:44:52.000Z
2017-05-02T15:38:56.000Z
shelf/link_title.py
not-nexus/shelf
ea59703082402ad3b6454482f0487418295fbd19
[ "MIT" ]
2
2017-01-24T14:36:04.000Z
2020-01-13T16:10:05.000Z
class LinkTitle(object): ARTIFACT_LIST = "artifact-list" ARTIFACT_ROOT = "artifact-root" ARTIFACT = "artifact" METADATA = "metadata"
24.833333
35
0.684564
15
149
6.666667
0.466667
0.24
0.4
0
0
0
0
0
0
0
0
0
0.201342
149
5
36
29.8
0.840336
0
0
0
0
0
0.281879
0
0
0
0
0
0
1
0
false
0
0
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
5
5cb5891ba04d0b3c481ca3b5fc5261fc95bd5a1e
5,274
py
Python
metapose/launch_iterative_solver.py
dumpmemory/google-research
bc87d010ab9086b6e92c3f075410fa6e1f27251b
[ "Apache-2.0" ]
null
null
null
metapose/launch_iterative_solver.py
dumpmemory/google-research
bc87d010ab9086b6e92c3f075410fa6e1f27251b
[ "Apache-2.0" ]
null
null
null
metapose/launch_iterative_solver.py
dumpmemory/google-research
bc87d010ab9086b6e92c3f075410fa6e1f27251b
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Launch script for running a full probabilistic iterative solver baseline.""" from absl import app from absl import flags import tensorflow as tf import tensorflow_datasets as tfds from metapose import data_utils from metapose import inference_time_optimization as inf_opt _INPUT_PATH = flags.DEFINE_string( 'input_path', '', 'path to an folder containing a tfrec file and a features.json file') _OUTPUT_PATH = flags.DEFINE_string( 'output_path', None, 'path to the output a dataset with refined 3d poses') _N_STEPS = flags.DEFINE_integer('n_steps', 100, 'optimizer (adam) steps') _DEBUG_FIRST_N = flags.DEFINE_integer( 'debug_first_n', None, 'read only first n records') _LEARNING_RATE = flags.DEFINE_float( 'learning_rate', 1e-2, 'optimizer (adam) learning rate') _REPORT_N_APPROX = flags.DEFINE_integer( 'report_n_approx', 50, 'number of intermediate optimization results to report') _CAM_SUBSET = flags.DEFINE_list( 'cam_subset', list(map(str, range(4))), 'comma-separated list of camera ids to use, e.g. 3,4,5') _GT_HEATMAPS = flags.DEFINE_bool( 'gt_heatmaps', False, 'whether to replace heatmaps with fake ground truth heatmaps') _FAKE_GT_HT_STD = flags.DEFINE_float( 'fake_gt_ht_std', 0.0, 'how much noise to add to positions of means of fake gt heatmaps') _USE_WEAK_REPR = flags.DEFINE_bool( 'use_weak_repr', False, 'whether to use weak projection to get ground truth heatmaps') _FAKE_GT_INIT = flags.DEFINE_bool( 'fake_gt_init', False, 'whether to use ground truth instead of monocular 3d predictions') _RANDOM_INIT = flags.DEFINE_bool( 'random_init', False, 'whether to use random noise instead of monocular 3d predictions') _EDGE_LENS_LAMBDA = flags.DEFINE_float( 'edge_lens_lambda', 0.0, 'weight of the normalized limb length loss during refinement') flags.mark_flag_as_required('output_path') def main(_): cam_subset = list(map(int, _CAM_SUBSET.value)) n_cam = len(cam_subset) report_n = ( _N_STEPS.value // (_N_STEPS.value // (_REPORT_N_APPROX.value - 1)) + 1) output_shape_dtype = { # optimization results 'loss': ([report_n], tf.float32), 'iters': ([report_n], tf.int32), 'pose3d_opt_preds': ([report_n, 17, 3], tf.float32), 'cam_rot_opt_preds': ([report_n, n_cam, 3, 3], tf.float32), 'scale_opt_preds': ([report_n, n_cam], tf.float32), 'shift_opt_preds': ([report_n, n_cam, 3], tf.float32), # metrics 'pose2d_opt_preds': ([report_n, n_cam, 17, 2], tf.float32), 'pose3d_gt_aligned_pred_3d_proj': ([report_n, n_cam, 17, 2], tf.float32), 'pose3d_pred_pmpjpe': ([report_n], tf.float32), 'pose2d_pred_err': ([report_n], tf.float32), 'pose2d_pred_vs_posenet_err': ([report_n], tf.float32), 'pose2d_gt_posenet_err_mean': ([], tf.float32), 'pose3d_gt_backaligned_pose2d_gt_err': ([report_n], tf.float32), # input data 'pose3d': ([17, 3], tf.float64), 'cam_pose3d': ([n_cam, 3], tf.float64), 'cam_rot': ([n_cam, 3, 3], tf.float64), 'cam_intr': ([n_cam, 4], tf.float64), 'cam_kd': ([n_cam, 5], tf.float64), 'pose2d_gt': ([n_cam, 17, 2], tf.float64), 'pose2d_repr': ([n_cam, 17, 2], tf.float64), 'heatmaps': ([n_cam, 17, 4, 4], tf.float64), # note! pose2d_pred is actually the "mean heatmap" 2D pred 'pose2d_pred': ([n_cam, 17, 2], tf.float64), 'keys': ([n_cam], tf.string), 'bboxes': ([n_cam, 4], tf.int32), 'pose3d_epi_pred': ([n_cam, 17, 3], tf.float32), 'cam_subset': ([n_cam], tf.int32), } output_spec = tfds.features.FeaturesDict({ k: tfds.features.Tensor(shape=s, dtype=d) for k, (s, d) in output_shape_dtype.items() }) ds = data_utils.read_tfrec_feature_dict_ds(_INPUT_PATH.value) if _DEBUG_FIRST_N.value is not None: ds = ds.take(_DEBUG_FIRST_N.value) dataset = [] for _, data_rec in ds: opt_stats = inf_opt.run_inference_optimization( data_rec=data_rec, opt_steps=_N_STEPS.value, report_n_results=_REPORT_N_APPROX.value, cam_subset=cam_subset, edge_lens_lambda=_EDGE_LENS_LAMBDA.value, fake_gt_heatmaps=_GT_HEATMAPS.value, fake_gt_ht_std=_FAKE_GT_HT_STD.value, fake_gt_init=_FAKE_GT_INIT.value, random_init=_RANDOM_INIT.value, recompute_weak_repr=_USE_WEAK_REPR.value, learning_rate=_LEARNING_RATE.value) print('pmpjpe', opt_stats['pose3d_pred_pmpjpe'][-1]) dataset.append(opt_stats) data_utils.write_tfrec_feature_dict_ds( dataset, output_spec, _OUTPUT_PATH.value) if __name__ == '__main__': app.run(main)
37.140845
79
0.695108
795
5,274
4.296855
0.300629
0.036885
0.012295
0.023419
0.147248
0.074063
0.028689
0.016979
0.016979
0
0
0.031591
0.183732
5,274
141
80
37.404255
0.761905
0.142207
0
0
0
0
0.269496
0.025994
0
0
0
0
0
1
0.009804
false
0
0.058824
0
0.068627
0.009804
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cb679704e23a1b0acbe6405efd9aa5634185c0e
3,450
py
Python
models.py
RCSnyder/subreddit_scraper
17062c585f2dc0136e6e4ecb914d1ff456c80069
[ "MIT" ]
null
null
null
models.py
RCSnyder/subreddit_scraper
17062c585f2dc0136e6e4ecb914d1ff456c80069
[ "MIT" ]
null
null
null
models.py
RCSnyder/subreddit_scraper
17062c585f2dc0136e6e4ecb914d1ff456c80069
[ "MIT" ]
null
null
null
from abc import ABC, abstractmethod import nltk nltk.download('vader_lexicon') from nltk.sentiment.vader import SentimentIntensityAnalyzer s = SentimentIntensityAnalyzer() import flair flair_sentiment = flair.models.TextClassifier.load('en-sentiment') """ from azure.ai.textanalytics import TextAnalyticsClient from azure.core.credentials import AzureKeyCredential azureclient = TextAnalyticsClient(endpoint="https://textsentimentcheck.cognitiveservices.azure.com/", credential=AzureKeyCredential("")) """ # add an instance of your model to this once you have defined it models = [] # all added sentiment analysis models must be wrapped # in a class that inherits from this class to enforce # a common api between different models class baseSentimentModel(ABC): def __init__(self, name, model): self.name = name self.model = model # this is the only required method # it should take the text and return the predicted # sentiment as a number between [-1, 1] where # 1 is maximally positive, 0 is nuetral, and -1 is maximally negative @abstractmethod def predict(self, texts): pass class nltkModel(baseSentimentModel): def predict(self, texts): return [self.parsePolarity(self.model.polarity_scores(text)) for text in texts] def parsePolarity(self, polarity): if polarity['neg'] > polarity['pos'] and polarity['neg'] > polarity['neu']: return -1.0 elif polarity['pos'] > polarity['neg'] and polarity['pos'] > polarity['neu']: return 1.0 return 0.0 models.append(nltkModel('nltkVader', s)) class flairModel(baseSentimentModel): def __init__(self, name, model): self.sentMapping = {'NEGATIVE' : -1.0, 'NEUTRAL': 0.0, 'POSITIVE': 1.0} super().__init__(name, model) def predict(self, texts): sents = [flair.data.Sentence(text) for text in texts] self.model.predict(sents) result = [] for i, t in enumerate(sents): try: result.append(self.sentMapping[t.labels[0].value]) except: print(texts[i]) return result models.append(flairModel('flair', flair_sentiment)) """ class azureModel(baseSentimentModel): def predict(self, texts): responses = self.model.analyze_sentiment(documents=texts) return list(map(self.parseResponses, responses)) def parseResponses(self, responses): totals = [0.0, 0.0, 0.0] for response in responses: totals[0] += response.confidence_scores.positive totals[1] += response.confidence_scores.neutral totals[2] += response.confidence_scores.negative max_idx = 0 if totals[1] > totals[0]: max_idx = 1 if totals[2] > totals[max_idx]: max_idx = 2 return 1.0 - max_idx # this returns 1.0 for pos, 0.0 for neutral, and -1.0 for negative models.append(azureModel('azureModel', azureclient)) """ """ example of this: class myModel(baseSentimentModel): # this example is a categorical model # so the values must be converted to numbers def predict(self, text): pred = self.model.evaluateSentiment(text) if pred == 'positive': return 1.0 elif pred == 'nuetral': return 0.0 else: return -1.0 models.append(myModel('example model', somePackage.model)) """
31.944444
140
0.655072
419
3,450
5.334129
0.334129
0.008054
0.03132
0.034004
0.09038
0.021477
0
0
0
0
0
0.017898
0.238841
3,450
107
141
32.242991
0.833206
0.116232
0
0.125
0
0
0.055556
0
0
0
0
0
0
1
0.15
false
0.025
0.1
0.025
0.45
0.025
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cb8c0c2be663ce0d5770be43328e7edfd49f0dd
384
py
Python
marioshop/tools/migrations/0008_auto_20210509_1401.py
alpinista06/GBB-Store-project
55a8c614923e770eb747c37e145a9f422d1bac2a
[ "MIT" ]
null
null
null
marioshop/tools/migrations/0008_auto_20210509_1401.py
alpinista06/GBB-Store-project
55a8c614923e770eb747c37e145a9f422d1bac2a
[ "MIT" ]
null
null
null
marioshop/tools/migrations/0008_auto_20210509_1401.py
alpinista06/GBB-Store-project
55a8c614923e770eb747c37e145a9f422d1bac2a
[ "MIT" ]
null
null
null
# Generated by Django 3.0.14 on 2021-05-09 17:01 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('tools', '0007_auto_20210429_1427'), ] operations = [ migrations.AlterField( model_name='tool', name='quantity', field=models.IntegerField(null=True), ), ]
20.210526
49
0.596354
41
384
5.487805
0.853659
0
0
0
0
0
0
0
0
0
0
0.117216
0.289063
384
18
50
21.333333
0.70696
0.119792
0
0
1
0
0.119048
0.068452
0
0
0
0
0
1
0
false
0
0.083333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
5cbd0d3f9fd7d3affecf3aeb1717980ce341da1d
12,141
py
Python
tests/unit/data/test_datamodule.py
pietrolesci/pytorch-energizer
31b23347967963cda704bda8b05f3e567368c9bb
[ "MIT" ]
null
null
null
tests/unit/data/test_datamodule.py
pietrolesci/pytorch-energizer
31b23347967963cda704bda8b05f3e567368c9bb
[ "MIT" ]
null
null
null
tests/unit/data/test_datamodule.py
pietrolesci/pytorch-energizer
31b23347967963cda704bda8b05f3e567368c9bb
[ "MIT" ]
null
null
null
import pytest from pytorch_lightning.utilities.exceptions import MisconfigurationException from torch.utils.data.sampler import SequentialSampler from energizer.data import ActiveDataModule from energizer.data.datamodule import FixedLengthSampler @pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True) def test_len(dataset_arg): """Test that measures of length are consistent.""" # no instances ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg) ads.prepare_data() # useless: just pass but for coverage ads.setup() # useless: just pass but for coverage assert ads.total_labelled_size == ads.train_size + ads.val_size assert len(ads.train_dataset) == ads.train_size == ads.val_size == ads.total_labelled_size == 0 assert len(dataset_arg) == len(ads.pool_dataset) == ads.pool_size assert len(dataset_arg) == ads.total_labelled_size + ads.pool_size # one instance in the train dataset ads.label(0) assert ads.total_labelled_size == ads.train_size + ads.val_size assert len(ads.train_dataset) == ads.train_size == ads.total_labelled_size == 1 assert ads.val_dataset is None assert len(dataset_arg) - ads.total_labelled_size == len(ads.pool_dataset) == ads.pool_size assert len(dataset_arg) == ads.total_labelled_size + ads.pool_size # one instance in the train dataset and one in the val dataset ads.val_split = 0.5 # hack ads.label([0, 1]) assert ads.total_labelled_size == ads.train_size + ads.val_size assert len(ads.train_dataset) == ads.train_size == 2 assert len(ads.val_dataset) == ads.val_size == 1 assert len(dataset_arg) - ads.total_labelled_size == len(ads.pool_dataset) == ads.pool_size assert len(dataset_arg) == ads.total_labelled_size + ads.pool_size @pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True) def test_indexing(dataset_arg): """Test that ActiveDataModule is not indexable directly.""" ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg) with pytest.raises(TypeError): assert ads[0] @pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True) def test_labelling(dataset_arg): """Test that labelling changes all the required states.""" ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg) len_dataset_arg = len(dataset_arg) assert ads.last_labelling_step == 0 assert ads.train_size == 0 assert ads.pool_size == len_dataset_arg assert ads.has_labelled_data is False assert ads.has_unlabelled_data is True assert ads.train_dataset.indices == [] for i in range(1, len_dataset_arg + 1): ads.label(0) # always label the first instance in the pool assert ads.last_labelling_step == i assert ads.train_size == i assert ads.pool_size == len_dataset_arg - ads.train_size assert ads.has_labelled_data is True if i < len_dataset_arg: assert ads.has_unlabelled_data is True else: assert ads.has_unlabelled_data is False assert ads.train_dataset.indices == list(range(i)) assert ads.last_labelling_step == len_dataset_arg assert ads.train_size == len_dataset_arg assert ads.pool_size == len_dataset_arg - ads.train_size assert ads.has_labelled_data is True assert ads.has_unlabelled_data is False assert ads.train_dataset.indices == list(range(len_dataset_arg)) @pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True) def test_labelling_multiple_indices(dataset_arg): """Test labelling multiple instances at once.""" ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg) pool_ids = [0, 8, 7] # they are the first to be labelled so correspond to ids in oracle ads.label(pool_ids) assert ads.train_dataset.indices == sorted(pool_ids) @pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True) def test_labelling_duplicates(dataset_arg): """Test that labelling duplicate indices results in a single instance to be labelled.""" # check behaviour when batch of indices contains ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg) pool_ids = [0, 0] # they are the first to be labelled so correspond to ids in oracle ads.label(pool_ids) assert ads.train_size == 1 # check behaviour when batch of indices contains ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, val_split=0.5) pool_ids = [0, 0, 1] # they are the first to be labelled so correspond to ids in oracle ads.label(pool_ids) assert ads.train_size == ads.val_size == 1 @pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True) def test_labelling_val_split(dataset_arg): """Test that labelling with val_split works.""" # check split works ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, val_split=0.5) pool_ids = [0, 1] # they are the first to be labelled so correspond to ids in oracle ads.label(pool_ids) assert ads.train_size == ads.val_size == 1 # check that val_split receives at least 1 instance when there are two labelled instances # and the probability is too small that it randomly would receive just one ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, val_split=0.0001) pool_ids = [0, 1] # they are the first to be labelled so correspond to ids in oracle ads.label(pool_ids) assert ads.train_size == ads.val_size == 1 # check behaviour when there is only one instance (bonus: using a duplicate) ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, val_split=0.99) pool_ids = [0, 0] # they are the first to be labelled so correspond to ids in oracle ads.label(pool_ids) assert ads.train_size == 1 @pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True) def test_reset_at_labelling_step(dataset_arg): """Test that resetting the labelling steps sets the correct states.""" ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg) len_dataset_arg = len(dataset_arg) ads.label(0) # label first assert ads.last_labelling_step == 1 assert ads.train_size == 1 assert ads.pool_size == len_dataset_arg - ads.train_size assert ads.has_labelled_data is True assert ads.has_unlabelled_data is True assert ads.train_dataset.indices == [0] ads.label(list(range(len_dataset_arg - 1))) # label the rest assert ads.train_size == len_dataset_arg assert ads.pool_size == len_dataset_arg - ads.train_size assert ads.has_labelled_data is True assert ads.has_unlabelled_data is False assert ads.train_dataset.indices == list(range(len_dataset_arg)) ads.reset_at_labelling_step(1) # go back to when there was one instance assert ads.train_size == 1 assert ads.pool_size == len_dataset_arg - ads.train_size assert ads.has_labelled_data is True assert ads.has_unlabelled_data is True assert ads.train_dataset.indices == [0] ads.reset_at_labelling_step(0) # go back to when there was nothing labelled assert ads.last_labelling_step == 2 assert ads.train_size == 0 assert ads.pool_size == len_dataset_arg - ads.train_size assert ads.has_labelled_data is False assert ads.has_unlabelled_data is True assert ads.train_dataset.indices == [] ads.reset_at_labelling_step(ads.last_labelling_step) # reset to the last step assert ads.train_size == len_dataset_arg assert ads.pool_size == len_dataset_arg - ads.train_size assert ads.has_labelled_data is True assert ads.has_unlabelled_data is False assert ads.train_dataset.indices == list(range(len_dataset_arg)) with pytest.raises(ValueError): assert ads.reset_at_labelling_step(100) @pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True) def test_sample_pool_indices(dataset_arg): ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg) with pytest.raises(ValueError): assert ads.sample_pool_idx(-1) with pytest.raises(ValueError): assert ads.sample_pool_idx(0) with pytest.raises(ValueError): assert ads.sample_pool_idx(ads.pool_size + 1) assert len(ads.sample_pool_idx(ads.pool_size)) == ads.pool_size assert len(ads.sample_pool_idx(1)) == 1 @pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True) def test_curriculum(dataset_arg): ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg) for _ in range(5): ads.label(0) assert ads.curriculum_dataset().indices == list(range(5)) @pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True) def test_initial_labelling(dataset_arg): ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg) assert ads.train_size == 0 ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, initial_labels=[0]) assert ads.train_size == 1 ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, initial_labels=2) assert ads.train_size == 2 ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, initial_labels=2, val_split=0.5) assert ads.train_size == ads.val_size == 1 @pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True) def test_dataloader_len(dataset_arg): for batch_size in range(1, len(dataset_arg) + 1): ads = ActiveDataModule( num_classes=2, train_dataset=dataset_arg, initial_labels=2, batch_size=batch_size, ) assert ads.train_dataloader().batch_size is None assert ads.train_dataloader().batch_sampler.batch_size == batch_size assert len(ads.train_dataloader().batch_sampler) == len(ads.train_dataloader()) # min_steps_per_epoch for shuffle in (True, False): ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, initial_labels=2, shuffle=shuffle) ads._min_steps_per_epoch = 1 assert len(ads.train_dataloader().batch_sampler) == len(ads.train_dataloader()) == 2 for _ in range(2): assert next(iter(ads.train_dataloader())) ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg, initial_labels=2, shuffle=shuffle) ads._min_steps_per_epoch = 10 assert len(ads.train_dataloader().batch_sampler) == len(ads.train_dataloader()) == 10 for _ in range(10): assert next(iter(ads.train_dataloader())) @pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True) def test_sampler_type(dataset_arg): ads = ActiveDataModule( num_classes=2, train_dataset=dataset_arg, test_dataset=dataset_arg, predict_dataset=dataset_arg, val_dataset=dataset_arg, initial_labels=2, batch_size=1, ) assert isinstance(ads.train_dataloader().batch_sampler.sampler, FixedLengthSampler) assert isinstance(ads.pool_dataloader().batch_sampler.sampler, SequentialSampler) assert isinstance(ads.val_dataloader().batch_sampler.sampler, SequentialSampler) assert isinstance(ads.test_dataloader().batch_sampler.sampler, SequentialSampler) assert isinstance(ads.predict_dataloader().batch_sampler.sampler, SequentialSampler) @pytest.mark.parametrize("dataset_arg", ["mock_dataset", "mock_hf_dataset"], indirect=True) def test_raise_errors(dataset_arg): for i in (-0.5, 1.0): with pytest.raises(MisconfigurationException): ActiveDataModule(num_classes=2, train_dataset=dataset_arg, val_split=i) with pytest.raises(MisconfigurationException): ActiveDataModule(num_classes=2, train_dataset=dataset_arg, val_split=i, val_dataset=dataset_arg) with pytest.raises(RuntimeError): ads = ActiveDataModule(num_classes=2, train_dataset=dataset_arg) next(iter(ads.train_dataloader()))
43.206406
107
0.732889
1,748
12,141
4.828375
0.093822
0.097156
0.047749
0.073578
0.817062
0.738863
0.706043
0.697749
0.659242
0.631754
0
0.012018
0.170744
12,141
280
108
43.360714
0.826281
0.123878
0
0.55665
0
0
0.046705
0
0
0
0
0
0.44335
1
0.064039
false
0
0.024631
0
0.08867
0
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
5
5cc027e9ce7d216791c0e4db8b4a2c118390640d
1,086
py
Python
apis/invoice-generator/utlis/utlis.py
sats268842/billie-invoice
f10a365acd4c12369466dd5c03a813bcf997c87b
[ "Apache-2.0" ]
null
null
null
apis/invoice-generator/utlis/utlis.py
sats268842/billie-invoice
f10a365acd4c12369466dd5c03a813bcf997c87b
[ "Apache-2.0" ]
null
null
null
apis/invoice-generator/utlis/utlis.py
sats268842/billie-invoice
f10a365acd4c12369466dd5c03a813bcf997c87b
[ "Apache-2.0" ]
null
null
null
# import os # from typing import List # # import aiofiles # from fastapi import UploadFile # from datetime import datetime # import shutil # # from app.model.emails import EmailRequest # # from app.utils.mail_utils import send_mail # def get_datetime(): # # return a current datetime string # return datetime.now().strftime("%Y%m%d_%H%M%S") # async def save_uploaded_files_to_wkdir(files): # # Create temporary folder for storing uploaded files # file_path = f"app/data/temp/upload_{get_datetime()}" # os.mkdir(file_path) # # save the file in local directory and get the list of files # list_files = [] # for file in files: # _file_name = os.path.join(file_path, file.filename) # print("File Name: ", _file_name) # async with aiofiles.open(_file_name, "wb") as out_file: # content = await file.read() # async read # await out_file.write(content) # async write # list_files.append(_file_name) # return { # "path_to_folder": file_path, # "list_files": list_files, # }
31.028571
66
0.656538
148
1,086
4.614865
0.459459
0.058565
0.040996
0
0
0
0
0
0
0
0
0
0.235727
1,086
34
67
31.941176
0.822892
0.934622
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
2
5cc062591cfe12fa5a06316443ec900f3dbf315c
1,139
py
Python
sagemaker-pyspark-sdk/tests/wrapper_test.py
hyandell/sagemaker-spark
0149cf0f52562008a1a163e455207bb6d00d3e4a
[ "Apache-2.0" ]
261
2017-11-30T04:53:01.000Z
2022-03-27T14:52:46.000Z
sagemaker-pyspark-sdk/tests/wrapper_test.py
hyandell/sagemaker-spark
0149cf0f52562008a1a163e455207bb6d00d3e4a
[ "Apache-2.0" ]
114
2017-12-15T23:10:09.000Z
2022-01-07T18:52:30.000Z
sagemaker-pyspark-sdk/tests/wrapper_test.py
hyandell/sagemaker-spark
0149cf0f52562008a1a163e455207bb6d00d3e4a
[ "Apache-2.0" ]
127
2017-11-30T18:53:51.000Z
2022-03-13T18:58:10.000Z
import os import pytest from pyspark import SparkConf, SparkContext from sagemaker_pyspark import classpath_jars from sagemaker_pyspark.wrapper import Option, ScalaMap, ScalaList @pytest.fixture(autouse=True) def with_spark_context(): os.environ['SPARK_CLASSPATH'] = ":".join(classpath_jars()) conf = (SparkConf() .set("spark.driver.extraClassPath", os.environ['SPARK_CLASSPATH'])) if SparkContext._active_spark_context is None: SparkContext(conf=conf) yield SparkContext._active_spark_context # TearDown SparkContext.stop(SparkContext._active_spark_context) def test_convert_dictionary(): dictionary = {"key": "value"} map = ScalaMap(dictionary)._to_java() assert map.apply("key") == "value" def test_convert_list(): list = ["features", "label", "else"] s_list = ScalaList(list)._to_java() assert s_list.apply(0) == "features" assert s_list.apply(1) == "label" assert s_list.apply(2) == "else" def test_convert_option(): list = ["features", "label", "else"] option = Option(list)._to_java() assert option.get().apply(0) == "features"
24.76087
79
0.697103
138
1,139
5.514493
0.384058
0.063075
0.09067
0.118265
0
0
0
0
0
0
0
0.004228
0.169447
1,139
45
80
25.311111
0.800211
0.007024
0
0.071429
0
0
0.117803
0.023915
0
0
0
0
0.178571
1
0.142857
false
0
0.178571
0
0.321429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cc07fe278fde69f39bb0b891fbc01b7d26bc5c0
645
py
Python
Proj2/neuralnetworks/dropout.py
sebemery/EE559-DeepLearning-MiniProjects
34ea114884e2e4f1416d63fa53466619e6a5c5fd
[ "MIT" ]
null
null
null
Proj2/neuralnetworks/dropout.py
sebemery/EE559-DeepLearning-MiniProjects
34ea114884e2e4f1416d63fa53466619e6a5c5fd
[ "MIT" ]
null
null
null
Proj2/neuralnetworks/dropout.py
sebemery/EE559-DeepLearning-MiniProjects
34ea114884e2e4f1416d63fa53466619e6a5c5fd
[ "MIT" ]
1
2020-03-18T18:58:02.000Z
2020-03-18T18:58:02.000Z
import torch import numpy as np from .base import Module class Dropout(Module): def __init__(self, p, input_size, seed=0): self.p = p self.generator = np.random.RandomState(seed) self.activation = self.generator.binomial(size=input_size, n=1, p=1-p) self.activation = torch.from_numpy(self.activation).float() self.train = True def set_training(self, b): self.train = b def forward(self, input): if self.train: self.output = input*self.activation else: self.output = input def backward(self, grad): return self.activation*grad
24.807692
78
0.624806
86
645
4.593023
0.44186
0.177215
0.075949
0
0
0
0
0
0
0
0
0.006383
0.271318
645
25
79
25.8
0.834043
0
0
0
0
0
0
0
0
0
0
0
0
1
0.210526
false
0
0.157895
0.052632
0.473684
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
5cc149375b44096dc121a4abf69408bc16d3f4e2
401
py
Python
python/controls/datepicker/datepicker_with_change_event.py
pglet/pglet-samples
ab47e797a4daccfa4779daa3d1fd1cc27d92e7f9
[ "MIT" ]
null
null
null
python/controls/datepicker/datepicker_with_change_event.py
pglet/pglet-samples
ab47e797a4daccfa4779daa3d1fd1cc27d92e7f9
[ "MIT" ]
null
null
null
python/controls/datepicker/datepicker_with_change_event.py
pglet/pglet-samples
ab47e797a4daccfa4779daa3d1fd1cc27d92e7f9
[ "MIT" ]
null
null
null
from datetime import datetime import pglet from pglet import DatePicker, Text with pglet.page("datepicker-with-change-event") as page: def datepicker_changed(e): t.value = f"DatePicker value changed to {dp.value}" t.update() now = datetime.now() t = Text() dp = DatePicker(label="Start date", value=now, width=150, on_change=datepicker_changed) page.add(dp, t) input()
28.642857
89
0.698254
58
401
4.775862
0.5
0.101083
0
0
0
0
0
0
0
0
0
0.009146
0.182045
401
14
90
28.642857
0.835366
0
0
0
0
0
0.189055
0.069652
0
0
0
0
0
1
0.083333
false
0
0.25
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cc2c89702cb6bfef8fed47f8984c04ad8a693d6
250
py
Python
ExPy/ExPy/module30.py
brad-h/expy
d3f3dfbbdae31ab8c7e134a5ce9d5f6adf94b516
[ "MIT" ]
null
null
null
ExPy/ExPy/module30.py
brad-h/expy
d3f3dfbbdae31ab8c7e134a5ce9d5f6adf94b516
[ "MIT" ]
null
null
null
ExPy/ExPy/module30.py
brad-h/expy
d3f3dfbbdae31ab8c7e134a5ce9d5f6adf94b516
[ "MIT" ]
null
null
null
""" Multiplication Table """ def ex30(): """Generate a multiplication table""" for left in range(13): for right in range(13): print('{} X {} = {}'.format(left, right, left * right)) if __name__ == '__main__': ex30()
22.727273
67
0.556
29
250
4.517241
0.62069
0.290076
0.137405
0
0
0
0
0
0
0
0
0.043478
0.264
250
10
68
25
0.668478
0.208
0
0
1
0
0.108108
0
0
0
0
0
0
1
0.166667
false
0
0
0
0.166667
0.166667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
5cc5b7cbbc5f291340922f4de02d42c9001eb684
5,312
py
Python
interactive_bots/test/test_commons/test_form_crawler.py
dmitrijbozhkov/emergenecy-medicine-data
7fea6b2c76a180c5e4c145a7fa6c83ae3e7af7bc
[ "Apache-2.0" ]
null
null
null
interactive_bots/test/test_commons/test_form_crawler.py
dmitrijbozhkov/emergenecy-medicine-data
7fea6b2c76a180c5e4c145a7fa6c83ae3e7af7bc
[ "Apache-2.0" ]
null
null
null
interactive_bots/test/test_commons/test_form_crawler.py
dmitrijbozhkov/emergenecy-medicine-data
7fea6b2c76a180c5e4c145a7fa6c83ae3e7af7bc
[ "Apache-2.0" ]
null
null
null
""" Tests for form crawlers """ from unittest import TestCase, main from unittest.mock import Mock from functools import partial from interactive_bots.commons.form_crawler import FormActionOptions, FormCrawler class FormActionOptionsTestCase(TestCase): """ Test case for FormActionOptions class """ def setUp(self): self.driver_mock = Mock() self.form_action = FormActionOptions(self.driver_mock) self.navigate_mock = Mock() self.action_mock = Mock() self.data_mock = Mock() def test_set_actions_should_set_navigate(self): """ set_actions should take function for navigate and make partial with driver """ self.navigate_mock.side_effect = lambda x: self.assertTrue(self.driver_mock is x) self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock) self.form_action.navigate() def test_set_actions_should_set_data(self): """ set_actions should take function for data and make partial with driver """ self.data_mock.side_effect = lambda x: self.assertTrue(self.driver_mock is x) self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock) self.form_action.data() def test_set_actions_should_set_action(self): """ set_actions should take function for action and make partial with driver """ self.action_mock = lambda x: self.assertTrue(self.driver_mock is x) self.form_action.set_actions(self.navigate_mock, self.action_mock, self.action_mock) self.form_action.action() def test_reset_accumulator_should_set_acc_to_0(self): """ reset_accumulator should set acc to 0 """ self.form_action.acc = 12 self.form_action.reset_accumulator() self.assertEqual(self.form_action.acc, 0) def test_iteration_should_stop_iteration_if_acc_is_False(self): """ Iteration through actions should stop if accumulator passed from action is false """ self.navigate_mock.return_value = [] self.action_mock.return_value = False self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock) self.assertRaises(StopIteration, partial(next, self.form_action)) def test_iteration_should_pass_acc_to_data(self): """ acc should be passed to data if True """ acc = ["stuff"] self.navigate_mock.return_value = [1] self.action_mock.return_value = acc self.data_mock.side_effect = lambda d, a: self.assertTrue(a is acc) self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock) next(self.form_action) def test_iteration_should_return_from_data(self): """ Iteration through FormActionOptions should return wahtever data returned """ val = 1 self.navigate_mock.return_value = [1] self.data_mock.return_value = val self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock) self.assertEqual(next(self.form_action), val) class FormCrawlerTestCase(TestCase): """ Test case for FormCrawler """ def setUp(self): self.form_crawler = FormCrawler() def test_add_action_should_add_action_to_list(self): """ add_action method should append action to actions list """ act = Mock() self.form_crawler.add_action(act) self.assertTrue(act is self.form_crawler.actions[0]) def test_remove_action_should_remove_action(self): """ remove_action should remove action from actions list by given index """ act = Mock() self.form_crawler.add_action(act) self.form_crawler.remove_action(0) self.assertEqual(len(self.form_crawler.actions), 0) def test_crawl_should_set_header(self): """ crawl should call writeheader before writing anything else """ writer = Mock() option = FormActionOptions(Mock()) option.set_actions(Mock(return_value=[]), Mock(return_value=False), Mock()) self.form_crawler.add_action(option) self.form_crawler.crawl(writer) writer.writeheader.assert_called_once() def test_crawl_should_write_row_of_all_values(self): """ crawl should write row from dictionary with all the fields passed by actions data function """ write_dict = {"foo": 1, "bar": 2} writer = Mock() writer.writerow = lambda d: self.assertEqual(d, write_dict) def counter(d, l, a): if not a: return True else: return False option1 = FormActionOptions(Mock()) option2 = FormActionOptions(Mock()) option1.set_actions(Mock(return_value=[1]), Mock(side_effect=counter), Mock(return_value={"foo": write_dict["foo"]})) option2.set_actions(Mock(return_value=[1]), Mock(side_effect=counter), Mock(return_value=[{"bar": write_dict["bar"]}])) self.form_crawler.add_action(option1) self.form_crawler.add_action(option2) self.form_crawler.crawl(writer) def test_crawl_should_throw_exception_if_actions_list_is_empty(self): """ crawl should throw IndexError if actions is empty """ self.assertRaises(IndexError, partial(self.form_crawler.crawl, Mock())) if __name__ == "__main__": main()
46.191304
127
0.69823
703
5,312
5.002845
0.167852
0.063691
0.063691
0.040944
0.466022
0.388684
0.325562
0.260449
0.217799
0.19619
0
0.004974
0.205196
5,312
114
128
46.596491
0.828044
0.164157
0
0.202381
0
0
0.00713
0
0
0
0
0
0.142857
1
0.178571
false
0.011905
0.047619
0
0.27381
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cc84883f5a1ed2810b170d833c1b4dcfd7d3d83
872
py
Python
django/stock/urls.py
phantomhieve/cashcenter
642a496c5e60847f4c4a6cc2c80b957ae2d6285b
[ "MIT" ]
null
null
null
django/stock/urls.py
phantomhieve/cashcenter
642a496c5e60847f4c4a6cc2c80b957ae2d6285b
[ "MIT" ]
4
2021-04-08T22:00:11.000Z
2021-09-22T19:42:08.000Z
django/stock/urls.py
phantomhieve/cashcenter
642a496c5e60847f4c4a6cc2c80b957ae2d6285b
[ "MIT" ]
null
null
null
from django.urls import path from django.contrib.auth.decorators import login_required from .views import ( StockListView, stockAddView, stockDeleteView, autoComplete ) urlpatterns = [ path( '', login_required(StockListView.as_view(), login_url='/user/login/'), name='stock_list' ), path( 'add/', login_required(stockAddView, login_url='/user/login/'), name='stock_add' ), path( 'add/<int:id>/', login_required(stockAddView, login_url='/user/login/'), name='stock_update' ), path( 'delete/<int:id>/', login_required(stockDeleteView, login_url='/user/login/'), name='stock_delete' ), path( 'autocomplete/', login_required(autoComplete, login_url='/user/login/'), name='stock_autocomplete' ), ]
24.222222
74
0.595183
86
872
5.837209
0.325581
0.155378
0.119522
0.169323
0.358566
0.358566
0.203187
0.203187
0.203187
0
0
0
0.263761
872
36
75
24.222222
0.781931
0
0
0.342857
0
0
0.191294
0
0
0
0
0
0
1
0
false
0
0.085714
0
0.085714
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
5cc96bccafaee3453353f6c6ebd4f4f2e2c88027
1,828
py
Python
tests/test_reader.py
hsuchristine/code_challenge
de82243a685e465a01445cd700f156cbf9b89572
[ "MIT" ]
1
2020-06-29T01:53:30.000Z
2020-06-29T01:53:30.000Z
tests/test_reader.py
hsuchristine/code_challenge
de82243a685e465a01445cd700f156cbf9b89572
[ "MIT" ]
2
2022-01-13T01:54:57.000Z
2022-03-12T00:07:09.000Z
tests/test_reader.py
hsuchristine/code_challenge
de82243a685e465a01445cd700f156cbf9b89572
[ "MIT" ]
null
null
null
"""Unit test for DataReader (public methods only)""" import unittest import numpy as np import os from dicom_data_preprocess import parsing from dicom_data_preprocess.reader import DataReader __author__ = 'Christine Hsu' class TestReader(unittest.TestCase): @classmethod def setUpClass(TestReader): TestReader.download_data_path = 'tests/data/sample-batchset/' TestReader.data_basepath = 'tests/data/output_data/' TestReader.logs_path = 'tests/logs/', TestReader.plots_path = 'tests/plots/' TestReader.contour_type = 'i-contours' TestReader.save_plot = False TestReader.dicoms_basepath = os.path.join(TestReader.download_data_path, 'dicoms') TestReader.contours_basepath = os.path.join(TestReader.download_data_path, 'contourfiles') TestReader.link_filepath = os.path.join(TestReader.download_data_path, 'link.csv') link_tuples = DataReader._read_link(TestReader, TestReader.link_filepath) TestReader.sample_tuples = DataReader._assemble_link(TestReader, link_tuples) def test_load_samples(self): print('\nTesting the loading of eight assembled samples...') reader = DataReader(download_data_path=TestReader.download_data_path, data_basepath=TestReader.data_basepath, logs_path=TestReader.logs_path, plots_path=TestReader.plots_path, contour_type=TestReader.contour_type, save_plot=TestReader.save_plot) images, masks, metadata = reader.load_samples(TestReader.sample_tuples) self.assertTrue(isinstance(images, list)) self.assertTrue(isinstance(masks, list)) self.assertTrue(isinstance(metadata, list)) self.assertTrue(isinstance(images[0], np.ndarray)) self.assertEqual(masks[0].dtype, np.bool) self.assertTrue(isinstance(metadata[0], str)) reader.plot_samples(images, masks, metadata, 'test_load_samples.jpg') if __name__ == "__main__": unittest.main()
35.153846
92
0.791028
231
1,828
5.991342
0.341991
0.052023
0.069364
0.093931
0.089595
0.089595
0.089595
0.063584
0
0
0
0.001827
0.101751
1,828
51
93
35.843137
0.841048
0.025164
0
0
0
0
0.113739
0.039977
0
0
0
0
0.157895
1
0.052632
false
0
0.131579
0
0.210526
0.026316
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cc99818ef3d420ba12270f6fd7f4e8403fb924e
4,064
py
Python
lib/dashboard/logger.py
hexueyuan/Adanos
b35873fc88b61dabda49c85f0e2b2d126731d34f
[ "MIT" ]
null
null
null
lib/dashboard/logger.py
hexueyuan/Adanos
b35873fc88b61dabda49c85f0e2b2d126731d34f
[ "MIT" ]
8
2020-07-17T01:49:53.000Z
2022-02-17T22:58:31.000Z
lib/dashboard/logger.py
hexueyuan/Adanos
b35873fc88b61dabda49c85f0e2b2d126731d34f
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import logging import logging.config import logging.handlers class Logger: _default_conf = { "version": 1, "disable_existing_loggers": False, "formatters": { "default": { "format": "[%(asctime)s][%(name)s][%(levelname)s][%(filename)s:%(lineno)d]: %(message)s", "datefmt": "%d-%M-%Y %H:%M:%S" } }, "handlers": { "defaultHandler": { "class":"logging.StreamHandler", "level":"DEBUG", "formatter":"default", "stream":"ext://sys.stdout" } }, "root": { "level": "DEBUG", "handlers": ['defaultHandler'] } } _current_conf = None _logger = None _register_loggers = ['root'] def __init__(self, conf=None): if conf is not None and not getattr(conf, 'get'): raise TypeError("conf has no get method") self._current_conf = self._default_conf if conf is not None: self._current_conf['formatters'].update(conf.get('formatters', {})) self._current_conf['handlers'].update(conf.get('handlers', {})) self._current_conf['loggers'] = conf.get('loggers', {}) #set default propagate = 0 for logger in self._current_conf['loggers'].values(): logger['propagate'] = 0 try: logging.config.dictConfig(self._current_conf) except ValueError: self._current_conf = self._default_conf logging.config.dictConfig(self._current_conf) logging.getLogger("defaultLogger").exception("logger config error.") finally: self._logger = logging.getLogger("defaultLogger") for key in self._current_conf.get('loggers', {}).keys(): self._register_loggers.append(key) def getLogger(self, name): if name == "root": return self._logger if name in self._register_loggers: return logging.getLogger(name) else: raise NameError("No this logger: {}".format(name)) if __name__ == "__main__": conf = { "formatters": { "default": { "format": "[%(asctime)s][%(name)s][%(levelname)s][%(filename)s:%(lineno)d]: %(message)s", "datefmt": "%d-%M-%Y %H:%M:%S" } }, "handlers": { "consoleHandler": { "class":"logging.StreamHandler", "level":"NOTSET", "formatter":"default", "stream":"ext://sys.stdout" }, "fileHandler": { "class": "logging.FileHandler", "level": "NOTSET", "formatter": "default", "filename": "testHandler2.log" } }, "loggers": { "testLogger1": { "handlers": ["consoleHandler"], "level": "INFO" }, "testLogger2": { "handlers": ["fileHandler"], "level": "DEBUG" } } } loggerHome = Logger(conf) #root = loggerHome.getLogger('root') #root.debug('this is a debug message') #root.info('this is a info message') #root.warn('this is a warning message') #root.error('this is a error message') #root.fatal('this is a fatal message') testLogger1 = loggerHome.getLogger('testLogger1') testLogger1.debug('this is a debug message') testLogger1.info('this is a info message') testLogger1.warn('this is a warning message') testLogger1.error('this is a error message') testLogger1.fatal('this is a fatal message') testLogger2 = loggerHome.getLogger('testLogger2') testLogger2.debug('this is a debug message') testLogger2.info('this is a info message') testLogger2.warn('this is a warning message') testLogger2.error('this is a error message') testLogger2.fatal('this is a fatal message')
33.04065
105
0.532726
397
4,064
5.329975
0.239295
0.042533
0.049622
0.017013
0.365312
0.351134
0.086011
0.086011
0.086011
0.086011
0
0.007631
0.322835
4,064
122
106
33.311475
0.761265
0.065207
0
0.245098
0
0.019608
0.293668
0.051187
0
0
0
0
0
1
0.019608
false
0
0.029412
0
0.117647
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5ccbceb319266f245b6042a34c87089c38999e11
846
py
Python
FileFolder/tempFile.py
jwannebo3524/Attendence
e8ff3f7457337c0516b1e53f2918b9a87f3f1de4
[ "Unlicense", "MIT" ]
null
null
null
FileFolder/tempFile.py
jwannebo3524/Attendence
e8ff3f7457337c0516b1e53f2918b9a87f3f1de4
[ "Unlicense", "MIT" ]
2
2021-09-17T16:56:28.000Z
2021-11-02T00:57:32.000Z
FileFolder/tempFile.py
jwannebo3524/Attendence
e8ff3f7457337c0516b1e53f2918b9a87f3f1de4
[ "Unlicense", "MIT" ]
null
null
null
from tempfile import NamedTemporaryFile import shutil import csv import datetime import time #filename = 'tmpEmployeeDatabase.csv' tempfile = NamedTemporaryFile('w+t', newline='', delete=False) class tempFile: def __init__ (): filename = "" + str(datetime.date().month) + str(datetime.date().day) + str((datetime.date().year) - 2000) + "-WildStang_Attendance.csv" def createTemp (): tempfile = NamedTemporaryFile('w+t', newline='', delete=False) def findID (): x=2 with open(filename, 'r', newline='') as csvFile, tempfile: reader = csv.reader(csvFile, delimiter=',', quotechar='"') writer = csv.writer(tempfile, delimiter=',', quotechar='"') for row in reader: row[1] = row[1].title() writer.writerow(row) shutil.move(tempfile.name, filename)
26.4375
144
0.637116
93
846
5.741935
0.505376
0.061798
0.08427
0.104869
0.172285
0.172285
0.172285
0
0
0
0
0.010495
0.211584
846
31
145
27.290323
0.790105
0.042553
0
0.1
0
0
0.044499
0.030902
0
0
0
0
0
1
0.15
false
0
0.25
0
0.45
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cce94b475009bd0fb2fec116696fc64c40388d6
121
py
Python
Pacote Download/Python/OlaMundo.py
DiegoGomesFerreira/Python
bcc33f8c1c77078e8be49333c008d74c59fe0221
[ "MIT" ]
null
null
null
Pacote Download/Python/OlaMundo.py
DiegoGomesFerreira/Python
bcc33f8c1c77078e8be49333c008d74c59fe0221
[ "MIT" ]
null
null
null
Pacote Download/Python/OlaMundo.py
DiegoGomesFerreira/Python
bcc33f8c1c77078e8be49333c008d74c59fe0221
[ "MIT" ]
1
2022-03-28T17:38:52.000Z
2022-03-28T17:38:52.000Z
from tkinter import * janela = Tk() botao = Button(janela, text="Click") botao.grid(row=0, column=0) janela.mainloop()
15.125
36
0.702479
18
121
4.722222
0.777778
0
0
0
0
0
0
0
0
0
0
0.019048
0.132231
121
8
37
15.125
0.790476
0
0
0
0
0
0.040984
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
5ccfae5fa95146d589ab65e046f6f95b2dcb1775
3,205
py
Python
imagingresponse/explore_layouts.py
marivasq/gamma-ai
735953e80901afea3e5cdeb2a7b27c9ab5725434
[ "MIT" ]
6
2020-01-29T07:24:14.000Z
2022-03-16T10:05:25.000Z
imagingresponse/explore_layouts.py
marivasq/gamma-ai
735953e80901afea3e5cdeb2a7b27c9ab5725434
[ "MIT" ]
6
2020-07-03T00:31:10.000Z
2021-09-10T07:45:01.000Z
imagingresponse/explore_layouts.py
marivasq/gamma-ai
735953e80901afea3e5cdeb2a7b27c9ab5725434
[ "MIT" ]
5
2019-02-27T22:56:49.000Z
2019-08-24T19:01:41.000Z
################################################################################################### # # # Copyright (C) by Shivani Kishnani & Andreas Zoglauer. # All rights reserved. # # Please see the file License.txt in the main repository for the copyright-notice. # ################################################################################################### ################################################################################################### import os import sys import argparse import itertools from ToyModel3DCone import ToyModel3DCone import signal ################################################################################################### """ This program loops over different layout and determines their performance For all the command line options, try: python3 explorelayouts.py --help """ parser = argparse.ArgumentParser(description='Passing in values to run ToyModel3DCone to test different layouts') parser.add_argument('-f', '--file', default='changethis.txt', help='File name used for training/testing') parser.add_argument('-o', '--output', default='output.txt', help='The output file name where the final results will be stored') parser.add_argument('-l', '--hiddenlayers', default='3', help='Number of hidden layers. Default: 3') parser.add_argument('-n', '--startingnode', default='10', help='Number of nodes to start with. Default: 50') parser.add_argument('-m', '--multfactor', default='10', help='Number that is to be multiplied to starting nodes to get layers of new file') parser.add_argument('-a', '--activation', default='relu', help='Name of default activation layer to be applied') parser.add_argument('-mn', '--maxNode', default='50', help='Maximum number of nodes in a layer') parser.add_argument('-t', '--time', default='600', help='Time in seconds to run the model for') args = parser.parse_args() hiddenLayers = int(args.hiddenlayers) multFactor = int(args.multfactor) startingNode = int(args.startingnode) maxNode = int(args.maxNode) LayoutList = [] output = args.output filew = open(output,"w+") #Step 0: Take care of Ctrl+C Interrupted = False NInterrupts = 0 def signal_handler(signal, frame): print("You pressed Ctrl+C! inside explore_layouts!") global Interrupted Interrupted = True global NInterrupts NInterrupts += 1 if NInterrupts >= 3: print("Aborting!") filew.close() System.exit(0) signal.signal(signal.SIGINT, signal_handler) # Step 1: Create function to get layout def create_layout(node, numLayers): layer_list = [node] while numLayers > 0 and node!= 0: add = node*multFactor layer_list.append(node*multFactor) node = add numLayers -= 1 return layer_list # Step 2: Create list of layouts for NN for Layout in list(create_layout(x, hiddenLayers) for x in range(startingNode, maxNode+1, 10)): LayoutList.append(Layout) print(Layout) # Step 3: Loop over all layouts and record performance for Layout in LayoutList: ToyModel3DCone(filew, Layout, args.activation) filew.close() print("Finished!") # END ###################################################################################################
33.041237
139
0.60936
372
3,205
5.204301
0.419355
0.03719
0.070248
0.019628
0
0
0
0
0
0
0
0.011565
0.136661
3,205
96
140
33.385417
0.68811
0.099532
0
0.039216
0
0
0.281601
0
0
0
0
0
0
1
0.039216
false
0.019608
0.117647
0
0.176471
0.078431
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cd04bfaaa27c8c69618c51262862dca892f4515
4,013
py
Python
examples/cloudml-sentiment-analysis/preprocessing/preprocess.py
ruchirjain86/professional-services
739ac0f5ffc8237f750804fa9f0f14d4d918a0fa
[ "Apache-2.0" ]
2,116
2017-05-18T19:33:05.000Z
2022-03-31T13:34:48.000Z
examples/cloudml-sentiment-analysis/preprocessing/preprocess.py
ruchirjain86/professional-services
739ac0f5ffc8237f750804fa9f0f14d4d918a0fa
[ "Apache-2.0" ]
548
2017-05-20T05:05:35.000Z
2022-03-28T16:38:12.000Z
examples/cloudml-sentiment-analysis/preprocessing/preprocess.py
ruchirjain86/professional-services
739ac0f5ffc8237f750804fa9f0f14d4d918a0fa
[ "Apache-2.0" ]
1,095
2017-05-19T00:02:36.000Z
2022-03-31T05:21:39.000Z
# Copyright 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Defines data preprocessing pipeline.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import enum import os import random import apache_beam as beam from apache_beam.io import tfrecordio from apache_beam.pvalue import TaggedOutput from tensorflow import gfile from tensorflow import logging from tensorflow_transform.coders import example_proto_coder from tensorflow_transform.tf_metadata import dataset_schema from constants import constants from utils import utils class _DatasetType(enum.Enum): """Encodes integer values to differentiate train, validation, test sets.""" UNSPECIFIED = 0 TRAIN = 1 VAL = 2 class _SplitData(beam.DoFn): """DoFn that randomly splits records in training / validation sets.""" def process(self, element, train_size, val_label): """Randomly assigns element to training or validation set.""" if random.random() > train_size: yield TaggedOutput(val_label, element) else: yield element class ReadFile(beam.DoFn): """DoFn to read and label files.""" def process(self, element): labels = { constants.SUBDIR_POSITIVE: constants.POSITIVE_SENTIMENT_LABEL, constants.SUBDIR_NEGATIVE: constants.NEGATIVE_SENTIMENT_LABEL } found_labels = [labels[l] for l in labels if l in element] if len(found_labels) > 1: raise ValueError('Incompatible path: `{}`.'.format(element)) if found_labels: with gfile.GFile(element, 'r') as single_file: for line in single_file: yield {constants.LABELS: found_labels[0], constants.REVIEW: line} else: logging.debug('Label not found for file: `%s`.', element) @beam.ptransform_fn def shuffle(p): """Shuffles data from PCollection. Args: p: PCollection. Returns: PCollection of shuffled data. """ class _AddRandomKey(beam.DoFn): def process(self, element): yield random.random(), element shuffled_data = ( p | 'PairWithRandom' >> beam.ParDo(_AddRandomKey()) | 'GroupByRandom' >> beam.GroupByKey() | 'DropRandom' >> beam.FlatMap(lambda (k, vs): vs)) return shuffled_data def run(p, params): """Defines Beam preprocessing pipeline. Performs the following: - Reads text files from pattern. - Split text files in train and validation sets. Args: p: PCollection, initial pipeline. params: Object holding a set of parameters as name-value pairs. """ path_pattern = os.path.join(params.input_dir, '*', '*{}'.format( constants.FILE_EXTENSION)) data = ( p | 'ListFiles' >> beam.Create(gfile.Glob(path_pattern)) | 'ReadFiles' >> beam.ParDo(ReadFile()) | 'SplitData' >> beam.ParDo( _SplitData(), train_size=params.train_size, val_label=_DatasetType.VAL.name).with_outputs( _DatasetType.VAL.name, main=_DatasetType.TRAIN.name)) schema = dataset_schema.from_feature_spec(utils.get_processed_data_schema()) for dataset in _DatasetType: if not dataset.value: continue _ = ( data[dataset.name] | 'Shuffle{}'.format(dataset.name) >> shuffle() # pylint: disable=no-value-for-parameter | 'WriteFiles{}'.format(dataset.name) >> tfrecordio.WriteToTFRecord( os.path.join(params.output_dir, dataset.name + constants.TFRECORD), coder=example_proto_coder.ExampleProtoCoder(schema)))
30.172932
97
0.705956
505
4,013
5.473267
0.411881
0.021708
0.017366
0.022793
0
0
0
0
0
0
0
0.004029
0.195863
4,013
132
98
30.401515
0.852495
0.146275
0
0.054795
0
0
0.052997
0
0
0
0
0
0
0
null
null
0
0.205479
null
null
0.013699
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
5cd096bb54ddc46b06b8bf177a453f80287a8129
24,252
py
Python
inspect4py/utils.py
SoftwareUnderstanding/inspect4py
9c4d7252535082ad938b26baf281d93f3a27285e
[ "BSD-3-Clause" ]
2
2022-02-15T20:30:57.000Z
2022-03-17T00:50:37.000Z
inspect4py/utils.py
SoftwareUnderstanding/inspect4py
9c4d7252535082ad938b26baf281d93f3a27285e
[ "BSD-3-Clause" ]
14
2022-01-25T14:03:50.000Z
2022-03-28T13:21:08.000Z
inspect4py/utils.py
SoftwareUnderstanding/inspect4py
9c4d7252535082ad938b26baf281d93f3a27285e
[ "BSD-3-Clause" ]
null
null
null
import ast import os import subprocess from pathlib import Path from json2html import * from inspect4py.parse_setup_files import inspect_setup from inspect4py.structure_tree import DisplayablePath, get_directory_structure def print_summary(json_dict): """ This method prints a small summary of the classes and properties recognized during the analysis. At the moment this method is only invoked when a directory with multiple files is passed. """ folders = 0 files = 0 dependencies = 0 functions = 0 classes = 0 for key, value in json_dict.items(): if "/" in key: folders += 1 if isinstance(value, list): for element in value: files += 1 if "dependencies" in element: dependencies += len(element["dependencies"]) if "functions" in element: functions += len(element["functions"]) if "classes" in element: classes += len(element["classes"]) print("Analysis completed") print("Total number of folders processed (root folder is considered a folder):", folders) print("Total number of files found: ", files) print("Total number of classes found: ", classes) print("Total number of dependencies found in those files", dependencies) print("Total number of functions parsed: ", functions) def extract_directory_tree(input_path, ignore_dirs, ignore_files, visual=0): """ Method to obtain the directory tree of a repository. The ignored directories and files that were inputted are also ignored. :input_path path of the repo to """ ignore_set = ['.git', '__pycache__', '.idea', '.pytest_cache'] ignore_set = tuple(list(ignore_dirs) + list(ignore_files) + ignore_set) if visual: paths = DisplayablePath.make_tree(Path(input_path), criteria=lambda path: True if path.name not in ignore_set and not os.path.join("../", path.name).endswith(".pyc") else False) for path in paths: print(path.displayable()) return get_directory_structure(input_path, ignore_set) def prune_json(json_dict): """ Method that given a JSON object, removes all its empty fields. This method simplifies the resultant JSON. :param json_dict input JSON file to prune :return JSON file removing empty values """ final_dict = {} if not (isinstance(json_dict, dict)): # Ensure the element provided is a dict return json_dict else: for a, b in json_dict.items(): if b or isinstance(b, bool): if isinstance(b, dict): aux_dict = prune_json(b) if aux_dict: # Remove empty dicts final_dict[a] = aux_dict elif isinstance(b, list): aux_list = list(filter(None, [prune_json(i) for i in b])) if len(aux_list) > 0: # Remove empty lists final_dict[a] = aux_list else: final_dict[a] = b return final_dict def extract_requirements(input_path): print("Finding the requirements with the pigar package for %s" % input_path) try: file_name = 'requirements_' + os.path.basename(input_path) + '.txt' # Attention: we can modify the output of pigar, if we use echo N. # Answering yes (echo y), we allow searching for PyPI # for the missing modules and filter some unnecessary modules. cmd = 'echo y | pigar -P ' + input_path + ' --without-referenced-comments -p ' + file_name # cmd = 'echo n | pigar -P ' + input_path + ' --without-referenced-comments -p ' + file_name # print("cmd: %s" %cmd) proc = subprocess.Popen(cmd.encode('utf-8'), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() req_dict = {} with open(file_name, "r") as file: lines = file.readlines()[1:] file.close() for line in lines: try: if line != "\n": splitLine = line.split(" == ") req_dict[splitLine[0]] = splitLine[1].split("\n")[0] except: pass # Note: Pigar requirement file is being deleted # in the future we might want to keep it (just commenting the line bellow) os.system('rm ' + file_name) return req_dict except: print("Error finding the requirements in" % input_path) def extract_software_invocation(dir_info, dir_tree_info, input_path, call_list, readme): """ Method to detect the directory type of a software project. This method also detects tests We distinguish four main types: script, package, library and service. Some can be more than one. :dir_info json containing all the extracted information about the software repository :dir_tree_info json containing the directory information of the target repo :input_path path of the repository to analyze :call_list json file containing the list of calls per file and functions or methods. :readme content of the readme file of the project (if any) """ software_invocation_info = [] setup_files = ("setup.py", "setup.cfg") server_dependencies = ("flask", "flask_restful", "falcon", "falcon_app", "aiohttp", "bottle", "django", "fastapi", "locust", "pyramid", "hug", "eve", "connexion") # Note: other server dependencies are missing here. More testing is needed. flag_package_library = 0 for directory in dir_tree_info: for elem in setup_files: # first check setup.py, then cfg if elem in dir_tree_info[directory]: # 1. Exploration for package or library software_invocation_info.append(inspect_setup(input_path, elem)) flag_package_library = 1 break # We continue exploration to make sure we continue exploring mains even after detecting this is a # library # Looping across all mains # to decide if it is a service (main + server dep) or just a script (main without server dep) main_files = [] # new list to store the "mains that have been previously classified as "test". test_files_main = [] test_files_no_main = [] # new list to store files without mains body_only_files = [] flag_service_main = 0 for key in dir_info: # filter (lambda key: key not in "directory_tree", dir_info): if key!="requirements": for elem in dir_info[key]: if elem["main_info"]["main_flag"]: flag_service_main = 0 flag_service = 0 main_stored = 0 if elem["is_test"]: test_files_main.append(elem["file"]["path"]) main_stored = 1 else: try: # 2. Exploration for services in files with "mains" flag_service, software_invocation_info = service_check(elem, software_invocation_info, server_dependencies, "main", readme) except: main_files.append(elem["file"]["path"]) if flag_service: flag_service_main = 1 if not flag_service and not main_stored: main_files.append(elem["file"]["path"]) elif elem["is_test"]: test_files_no_main.append(elem["file"]["path"]) # Filtering scripts with just body in software invocation elif elem['body']['calls']: body_only_files.append(elem) m_secondary = [0] * len(main_files) flag_script_main = 0 # this list (of lists) stores the mains that each main import import_mains = [] # this list (of lists) stores the mains that each main is imported by imported_by = [None]*len(main_files) # 3. Exploration for main scripts for m in range(0, len(main_files)): m_calls = find_file_calls(main_files[m], call_list) # HERE I STORE WHICH OTHER MAIN FILES CALLS EACH "M" MAIN_FILE m_imports = extract_relations(main_files[m], m_calls, main_files, call_list) # storing those m_imports in the import_mains[m] import_mains.append(m_imports) for m_i in m_imports: m_secondary[main_files.index(m_i)] = 1 if not imported_by[main_files.index(m_i)]: imported_by[main_files.index(m_i)] = [] imported_by[main_files.index(m_i)].append(main_files[m]) for m in range(0, len(main_files)): soft_info = {"type": "script", "run": "python " + main_files[m], "has_structure": "main", "mentioned_in_readme": os.path.basename(os.path.normpath(main_files[m])) in readme, "imports": import_mains[m], "imported_by": imported_by[m]} software_invocation_info.append(soft_info) flag_script_main = 1 # tests with main. for t in range(0, len(test_files_main)): # Test files do not have help, they are usually run by themselves soft_info = {"type": "test", "run": "python " + test_files_main[t], "has_structure": "main", "mentioned_in_readme": os.path.basename(os.path.normpath(test_files_main[t])) in readme} software_invocation_info.append(soft_info) # tests with no main. for t in range(0, len(test_files_no_main)): # Test files do not have help, they are usually run by themselves soft_info = {"type": "test", "run": "python " + test_files_no_main[t], "has_structure": "body", "mentioned_in_readme": os.path.basename(os.path.normpath(test_files_no_main[t])) in readme} software_invocation_info.append(soft_info) flag_service_body = 0 flag_script_body = 0 for elem in body_only_files: # 4. Exploration for services in files with body flag_service, software_invocation_info = service_check(elem, software_invocation_info, server_dependencies, "body", readme) if flag_service: flag_service_body = 1 # Only adding this information if we haven't not found libraries, packages, services or scripts with mains. # 5. Exploration for script without main in files with body if not flag_service_main and not flag_service_body and not flag_package_library and not flag_script_main: soft_info = {"type": "script", "run": "python " + elem["file"]["path"], "has_structure": "body", "mentioned_in_readme": elem["file"]["fileNameBase"] + "." + elem["file"][ "extension"] in readme} software_invocation_info.append(soft_info) flag_script_body = 1 # Only adding this information if we haven't not found libraries, packages, services or scripts with mains # or bodies. # 6. Exploration for script without main or body in files with body if not flag_script_body and not flag_service_main and not flag_service_body and not flag_package_library \ and not flag_script_main: python_files = [] for directory in dir_tree_info: for elem in dir_tree_info[directory]: if ".py" in elem: python_files.append(os.path.abspath(input_path + "/" + directory + "/" + elem)) for f in range(0, len(python_files)): soft_info = {"type": "script without main", "import": python_files[f], "has_structure": "without_body", "mentioned_in_readme": os.path.basename(os.path.normpath(python_files[f])) in readme} software_invocation_info.append(soft_info) return software_invocation_info def generate_output_html(pruned_json, output_file_html): """ Method to generate a simple HTML view of the obtained JSON. :pruned_json JSON to print out :output_file_html path where to write the HTML """ html = json2html.convert(json=pruned_json) with open(output_file_html, "w") as ht: ht.write(html) def top_level_functions(body): return (f for f in body if isinstance(f, ast.FunctionDef)) def top_level_classes(body): return (c for c in body if isinstance(c, ast.ClassDef)) def parse_module(filename): with open(filename, "rt") as file: return ast.parse(file.read(), filename=filename) def list_functions_classes_from_module(m, path): functions_classes = [] try: # to open a module inside a directory m = m.replace(".", "/") repo_path = Path(path).parent.absolute() abs_repo_path = os.path.abspath(repo_path) file_module = abs_repo_path + "/" + m + ".py" tree = parse_module(file_module) for func in top_level_functions(tree.body): functions_classes.append(func.name) for cl in top_level_classes(tree.body): functions_classes.append(cl.name) type = "internal" except: #module = __import__(m) #functions = dir(module) type = "external" return functions_classes, type def type_module(m, i, path): repo_path = Path(path).parent.absolute() abs_repo_path = os.path.abspath(repo_path) if m: m = m.replace(".", "/") file_module = abs_repo_path + "/" + m + "/" + i + ".py" else: file_module = abs_repo_path + "/" + i + ".py" file_module_path = Path(file_module) if file_module_path.is_file(): return "internal" else: return "external" def extract_call_functions(funcs_info, body=0): call_list = {} if body: if funcs_info["body"]["calls"]: call_list["local"] = funcs_info["body"]["calls"] else: for funct in funcs_info: if funcs_info[funct]["calls"]: call_list[funct] = {} call_list[funct]["local"] = funcs_info[funct]["calls"] if funcs_info[funct]["functions"]: call_list[funct]["nested"] = extract_call_functions(funcs_info[funct]["functions"]) return call_list def extract_call_methods(classes_info): call_list = {} for method in classes_info: if classes_info[method]["calls"]: call_list[method] = {} call_list[method]["local"] = classes_info[method]["calls"] if classes_info[method]["functions"]: call_list[method]["nested"] = extract_call_methods(classes_info[method]["functions"]) return call_list def call_list_file(code_info): call_list = {} call_list["functions"] = extract_call_functions(code_info.funcsInfo) call_list["body"] = extract_call_functions(code_info.bodyInfo, body=1) for class_n in code_info.classesInfo: call_list[class_n] = extract_call_methods(code_info.classesInfo[class_n]["methods"]) return call_list def call_list_dir(dir_info): call_list = {} for dir in dir_info: call_list[dir] = {} for file_info in dir_info[dir]: file_path = file_info["file"]["path"] call_list[dir][file_path] = extract_call_functions(file_info["functions"]) for class_n in file_info["classes"]: call_list[dir][file_path][class_n] = extract_call_methods(file_info["classes"][class_n]["methods"]) return call_list def find_file_calls(file_name, call_list): for dir in call_list: for elem in call_list[dir]: if elem in file_name: return call_list[dir][elem] def find_module_calls(module, call_list): for dir in call_list: for elem in call_list[dir]: if "/"+module+"." in elem: #print("---MODULE %s, elem %s, giving call_list[%s][%s]" %(module, elem, dir, elem)) return call_list[dir][elem] # DFS algorithm - Allowing up to 2 levels of depth. def file_in_call(base, call, file, m_imports, call_list, orig_base, level): ### NOTE: LEVEL is a parameter very important here! ### It allows us to track how deep we are inside the recursivity search. ### If we want to modify the depth of the recursity, we just need to change the level_depth. level_depth = 2 ## For each call, we extract all its sub_calls (level 1), ## and for each sub_call we extract all its sub_sub_calls (level 2) #### if base in call and m_imports.count(file) == 0 and orig_base not in call: m_imports.append(file) return 1 elif orig_base in call: return 0 elif level < level_depth and call!="": m_calls_extern = {} module_base = call.split(".")[0] module_base = module_base + "." m_calls_extern = find_module_calls(module_base, call_list) # Note: Here is when we increase the level of recursivity level += 1 if m_calls_extern: for m_c in m_calls_extern: flag_found = extract_data(base, m_calls_extern[m_c], file, m_imports, 0, call_list, orig_base, level) if flag_found: return 1 return 0 else: return 0 def extract_local_function(base, m_calls_local, file, m_imports, flag_found, call_list, orig_base, level): for call in m_calls_local: flag_found = file_in_call(base, call, file, m_imports, call_list, orig_base, level) if flag_found: return flag_found return flag_found def extract_nested_function(base, m_calls_nested, file, m_imports, flag_found, call_list, orig_base, level): for call in m_calls_nested: flag_found = extract_data(base, m_calls_nested, file, m_imports, flag_found, call_list, orig_base, level) if flag_found: return flag_found return flag_found def extract_data(base, m_calls, file, m_imports, flag_found, call_list, orig_base, level): for elem in m_calls: if elem == "local": flag_found = extract_local_function(base, m_calls[elem], file, m_imports, flag_found, call_list, orig_base, level) elif elem == "nested": flag_found = extract_nested_function(base, m_calls[elem], file, m_imports, flag_found, call_list, orig_base, level) else: flag_found = extract_data(base, m_calls[elem], file, m_imports, flag_found, call_list, orig_base, level) if flag_found: return flag_found return flag_found # We will apply the DFS strategy later to find the external relationships. def extract_relations(file_name, m_calls, main_files, call_list): m_imports = [] orig_base = os.path.basename(file_name) orig_base = os.path.splitext(orig_base)[0] orig_base = orig_base + "." for file in main_files: if file not in file_name: flag_found = 0 base = os.path.basename(file) base = os.path.splitext(base)[0] base = base + "." for m_c in m_calls: level = 0 flag_found = extract_data(base, m_calls[m_c], file, m_imports, flag_found, call_list, orig_base, level) if flag_found: return m_imports return m_imports def service_check(elem, software_invocation_info, server_dependencies, has_structure, readme): flag_service = 0 for dep in elem["dependencies"]: imports = dep["import"] flag_service, software_invocation_info = service_in_set(imports, server_dependencies, elem, software_invocation_info, has_structure, readme) if flag_service: return flag_service, software_invocation_info else: modules = dep["from_module"] flag_service, software_invocation_info = service_in_set(modules, server_dependencies, elem, software_invocation_info, has_structure, readme) if flag_service: return flag_service, software_invocation_info return flag_service, software_invocation_info def service_in_set(data, server_dependencies, elem, software_invocation_info, has_structure, readme): flag_service = 0 if isinstance(data, list): for data_dep in data: if data_dep.lower() in server_dependencies: soft_info = {"type": "service", "run": "python " + elem["file"]["path"], "has_structure": has_structure, "mentioned_in_readme": elem["file"]["fileNameBase"] + "." + elem["file"][ "extension"] in readme} flag_service = 1 if soft_info not in software_invocation_info: software_invocation_info.append(soft_info) else: if data: if data.lower() in server_dependencies: soft_info = {"type": "service", "run": "python " + elem["file"]["path"], "has_structure": has_structure, "mentioned_in_readme": elem["file"]["fileNameBase"] + "." + elem["file"][ "extension"] in readme} flag_service = 1 if soft_info not in software_invocation_info: software_invocation_info.append(soft_info) return flag_service, software_invocation_info def rank_software_invocation(soft_invocation_info_list): """ Function to create a ranking over the different ways of executing a program. If two elements have the same position in the ranking, it means that there is no priority among them. Heuristic to order the invocation list is as follows, in decreasing order of prioritization: - If package or library is detected, this will be always first. - If something (script or service) is mentioned in the readme file, it is considered a priority. - Services are prioritized over scripts - Scripts with main are prioritized over script with body. - Scripts with body are prioritized over scripts with no body. TO DOs: - If a script imports other scripts (or service), it gets prioritized (TO DO when examples are available) - If several scripts are available, those at root level are prioritized (TO DO when examples are available) :param soft_invocation_info_list JSON list with the different ways to execute a program. """ if len(soft_invocation_info_list) == 0: return soft_invocation_info_list # Calculate score for every entry in the list for entry in soft_invocation_info_list: score = 0 if "library" in entry["type"] or "package" in entry["type"]: score += 100 try: if entry["mentioned_in_readme"]: score += 10 except: pass if "service" in entry["type"]: score += 5 try: if "main" in entry["has_structure"]: score += 2 if "body" in entry["has_structure"]: score += 1 except: pass entry["ranking"] = score # Reorder vector and assign ranking soft_invocation_info_list.sort(key=lambda x: x["ranking"], reverse=True) # Replace score by number (but keep those with same score with the same ranking) position = 1 previous_score = soft_invocation_info_list[0]["ranking"] for entry in soft_invocation_info_list: current_score = entry["ranking"] if previous_score > current_score: # Ordered in descending order position += 1 previous_score = current_score entry["ranking"] = position return soft_invocation_info_list
41.813793
121
0.616485
3,128
24,252
4.557545
0.144182
0.027497
0.040123
0.012346
0.388608
0.319304
0.282057
0.255261
0.229026
0.210227
0
0.004732
0.294161
24,252
579
122
41.88601
0.828076
0.204643
0
0.285354
0
0
0.084796
0.001524
0
0
0
0
0
1
0.063131
false
0.007576
0.088384
0.005051
0.242424
0.025253
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cd359d31bdc95e8c8ed20bfb7e74a2a5dfecd3f
582
py
Python
PythonStarter/ClassOne.py
fsbd1285228/PythonCodes
e71fcd695e19e3d286ec249113791d0729bec751
[ "MIT" ]
null
null
null
PythonStarter/ClassOne.py
fsbd1285228/PythonCodes
e71fcd695e19e3d286ec249113791d0729bec751
[ "MIT" ]
null
null
null
PythonStarter/ClassOne.py
fsbd1285228/PythonCodes
e71fcd695e19e3d286ec249113791d0729bec751
[ "MIT" ]
null
null
null
class Bird(object): have_feather = True way_of_reproduction = 'egg' def move(self, dx, dy): position = [0,0] position[0] = position[0] + dx position[1] = position[1] + dy return position class Chicken(Bird): way_of_move = 'walk' possible_in_KFC = True class Oriole(Bird): way_of_move = 'fly' possible_in_KFC = False class happyBird(Bird): def __init__(self,more_words): print 'We are happy birds: ', more_words winter = happyBird('Happy') summer = Chicken() print summer.have_feather print summer.move(5,8)
23.28
48
0.647766
81
582
4.432099
0.493827
0.041783
0.05571
0.072423
0
0
0
0
0
0
0
0.018059
0.238832
582
25
49
23.28
0.792325
0
0
0
0
0
0.060034
0
0
0
0
0
0
0
null
null
0
0
null
null
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
5cd539226446c2a2f5d8bb7fd1ebb3684b3007da
3,353
py
Python
toolkit/cmds/ls_users.py
suraj-testing2/Flowers_Toilet
21c981531a505a8b74ee42c33a3f4d68ef72d7f3
[ "Apache-2.0" ]
22
2015-01-22T12:10:50.000Z
2021-10-12T03:30:56.000Z
toolkit/cmds/ls_users.py
suraj-testing2/Flowers_Toilet
21c981531a505a8b74ee42c33a3f4d68ef72d7f3
[ "Apache-2.0" ]
null
null
null
toolkit/cmds/ls_users.py
suraj-testing2/Flowers_Toilet
21c981531a505a8b74ee42c33a3f4d68ef72d7f3
[ "Apache-2.0" ]
17
2016-01-28T04:54:39.000Z
2021-10-12T03:30:49.000Z
#!/usr/bin/python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Show a list of users in an Apps Domain. Tool to show usage of Admin SDK Directory APIs. APIs Used: Admin SDK Directory API: user management """ import sys # setup_path required to allow imports from component dirs (e.g. utils) # and lib (where the OAuth and Google API Python Client modules reside). import setup_path # pylint: disable=unused-import,g-bad-import-order from admin_sdk_directory_api import users_api from utils import admin_api_tool_errors from utils import auth_helper from utils import common_flags from utils import file_manager from utils import log_utils FILE_MANAGER = file_manager.FILE_MANAGER def AddFlags(arg_parser): """Handle command line flags unique to this script. Args: arg_parser: object from argparse.ArgumentParser() to accumulate flags. """ common_flags.DefineAppsDomainFlagWithDefault(arg_parser) common_flags.DefineForceFlagWithDefaultFalse(arg_parser) common_flags.DefineVerboseFlagWithDefaultFalse(arg_parser) arg_parser.add_argument('--json', action='store_true', default=False, help='Output results to a json file.') arg_parser.add_argument('--first_n', type=int, default=0, help='Show the first n users in the list.') def main(argv): """A script to test Admin SDK Directory APIs.""" flags = common_flags.ParseFlags(argv, 'List domain users.', AddFlags) if flags.json: FILE_MANAGER.ExitIfCannotOverwriteFile(FILE_MANAGER.USERS_FILE_NAME, overwrite_ok=flags.force) http = auth_helper.GetAuthorizedHttp(flags) api_wrapper = users_api.UsersApiWrapper(http) max_results = flags.first_n if flags.first_n > 0 else None try: if flags.json: user_list = api_wrapper.GetDomainUsers(flags.apps_domain, max_results=max_results) else: api_wrapper.PrintDomainUsers(flags.apps_domain, max_results=max_results) except admin_api_tool_errors.AdminAPIToolUserError as e: log_utils.LogError( 'Unable to enumerate users from domain %s.' % flags.apps_domain, e) sys.exit(1) if flags.json: try: filename_path = FILE_MANAGER.WriteJsonFile(FILE_MANAGER.USERS_FILE_NAME, user_list, overwrite_ok=flags.force) except admin_api_tool_errors.AdminAPIToolFileError as e: # This usually means the file already exists and --force not supplied. log_utils.LogError('Unable to write the domain users file.', e) sys.exit(1) print 'Users list written to %s.' % filename_path if __name__ == '__main__': main(sys.argv[1:])
35.294737
78
0.70176
451
3,353
5.04878
0.405765
0.038647
0.032938
0.023715
0.093983
0.030742
0.030742
0
0
0
0
0.004996
0.223979
3,353
94
79
35.670213
0.8701
0.252311
0
0.234043
0
0
0.102373
0
0
0
0
0
0
0
null
null
0
0.170213
null
null
0.021277
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
5cd6e644acdbb15b82dfab8d7e7022c02998853f
2,151
py
Python
days/day10.py
Kurocon/AdventOfCode2020
40ae8e604eb0e3bc0967c220cf868a8194769a6b
[ "BSD-3-Clause" ]
null
null
null
days/day10.py
Kurocon/AdventOfCode2020
40ae8e604eb0e3bc0967c220cf868a8194769a6b
[ "BSD-3-Clause" ]
null
null
null
days/day10.py
Kurocon/AdventOfCode2020
40ae8e604eb0e3bc0967c220cf868a8194769a6b
[ "BSD-3-Clause" ]
null
null
null
from functools import lru_cache from typing import List from days import AOCDay, day @day(10) class Day10(AOCDay): print_debug = "c12" test_input = """16 10 15 5 1 11 7 19 6 12 4""".split("\n") test_input2 = """28 33 18 42 31 14 46 20 48 47 24 23 49 45 19 38 39 11 1 32 25 35 8 17 7 9 4 2 34 10 3""".split("\n") def common(self, input_data): # input_data = self.test_input2 self.input_data = list(map(int, input_data)) def check_smallest_adapter_recurse(self, current_rating, target_rating, adapters_left) -> List[int]: options = [current_rating + i for i in range(1, 4)] for option in options: if option in adapters_left: difference = option - current_rating current_rating = option if current_rating + 3 == target_rating: return [difference, 3] new_adapters = adapters_left[:] new_adapters.remove(option) return self.check_smallest_adapter_recurse(current_rating, target_rating, new_adapters) + [difference] def part1(self, input_data): current_rating = 0 target_rating = max(self.input_data) + 3 adapters_left = self.input_data[:] differences = self.check_smallest_adapter_recurse(current_rating, target_rating, adapters_left) yield len([x for x in differences if x == 1]) * len([x for x in differences if x == 3]) @lru_cache def check_adapter_recurse(self, current_rating, target_rating, adapters) -> int: if current_rating == target_rating: return 1 options = [i for i in adapters if 1 <= i - current_rating <= 3] count = 0 for option in options: count += self.check_adapter_recurse(option, target_rating, adapters) return count def part2(self, input_data): current_rating = 0 target_rating = max(self.input_data) + 3 adapters_plus_builtin = tuple(self.input_data[:] + [target_rating]) differences = self.check_adapter_recurse(current_rating, target_rating, adapters_plus_builtin) yield differences
23.637363
118
0.645281
298
2,151
4.432886
0.315436
0.127933
0.078728
0.11355
0.342922
0.342922
0.336866
0.295231
0.181681
0.096896
0
0.061108
0.269642
2,151
90
119
23.9
0.779758
0.013482
0
0.146341
0
0
0.056132
0
0
0
0
0
0
1
0.060976
false
0
0.036585
0
0.195122
0.012195
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cd8b1b9a2e75158a87d41bc9d2a842af4dc3ce7
642
py
Python
problems/287_find_dup_number.py
apoorvkk/LeetCodeSolutions
1c3461cfc05deb930d0866428eb00362b4338aab
[ "MIT" ]
1
2018-02-03T14:17:18.000Z
2018-02-03T14:17:18.000Z
problems/287_find_dup_number.py
apoorvkk/LeetCodeSolutions
1c3461cfc05deb930d0866428eb00362b4338aab
[ "MIT" ]
null
null
null
problems/287_find_dup_number.py
apoorvkk/LeetCodeSolutions
1c3461cfc05deb930d0866428eb00362b4338aab
[ "MIT" ]
null
null
null
''' URL: https://leetcode.com/problems/find-the-duplicate-number/ Time complexity: O(nlogn) Space complexity: O(1) ''' class Solution(object): def findDuplicate(self, nums): """ :type nums: List[int] :rtype: int """ if len(nums) < 2: return -1 lo, hi = 1, len(nums) - 1 while lo < hi: mid = (lo + hi) // 2 count = 0 for num in nums: if num <= mid: count += 1 if count <= mid: lo = mid + 1 else: # count > mid hi = mid return lo
20.0625
61
0.423676
73
642
3.726027
0.547945
0.044118
0
0
0
0
0
0
0
0
0
0.025788
0.456386
642
31
62
20.709677
0.753582
0.244548
0
0
0
0
0
0
0
0
0
0
0
1
0.0625
false
0
0
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5cda1b0a013daae06c8aa16db5b80089deaa912f
626
py
Python
src/digi_edit/routes.py
simple-digital-edition/digital-edition-editor
89c409a20bc947f576176061f9aa96de0efe4123
[ "MIT" ]
null
null
null
src/digi_edit/routes.py
simple-digital-edition/digital-edition-editor
89c409a20bc947f576176061f9aa96de0efe4123
[ "MIT" ]
8
2021-12-17T16:35:07.000Z
2022-03-03T10:57:13.000Z
src/digi_edit/routes.py
modern-digital-edition/digital-edition-editor
fdf477ff015ceffc691d541110387ee18ee561bc
[ "MIT" ]
null
null
null
def includeme(config): config.add_static_view('static', 'static', cache_max_age=3600) config.add_route('root', '/') config.add_route('ui', '/ui*path') config.add_route('webhooks.github', '/webhooks/github', request_method='POST') config.add_route('webhooks.gitlab', '/webhooks/gitlab', request_method='POST') config.add_route('config', '/config') config.add_route('config.ui', '/config/ui') config.add_route('config.tei_schema', '/config/tei-schema') config.add_route('theme.css', '/theme/theme.css') config.add_route('theme.files', '/theme/*path') config.include('.views.api')
36.823529
82
0.682109
83
626
4.951807
0.337349
0.218978
0.306569
0.145985
0.150852
0.150852
0
0
0
0
0
0.007246
0.118211
626
16
83
39.125
0.737319
0
0
0
0
0
0.354633
0
0
0
0
0
0
1
0.083333
false
0
0
0
0.083333
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
5cdf0538bafcb5aaa18058d4c0a939f600b83ff4
46,926
py
Python
tests/server/services/RecommendReviewers_test.py
elifesciences/peerscout
2e899f268b4712ffb7a09b171de3f3841337d65d
[ "MIT" ]
3
2018-09-10T16:33:14.000Z
2021-04-06T06:09:23.000Z
tests/server/services/RecommendReviewers_test.py
elifesciences/peerscout
2e899f268b4712ffb7a09b171de3f3841337d65d
[ "MIT" ]
247
2018-03-01T12:06:07.000Z
2019-10-25T05:25:16.000Z
tests/server/services/RecommendReviewers_test.py
elifesciences/peerscout
2e899f268b4712ffb7a09b171de3f3841337d65d
[ "MIT" ]
3
2018-03-01T12:25:44.000Z
2019-09-04T00:43:34.000Z
import pprint import logging from contextlib import contextmanager import pytest import pandas as pd from peerscout.shared.database import populated_in_memory_database from peerscout.server.services.ManuscriptModel import ManuscriptModel from peerscout.server.services.DocumentSimilarityModel import DocumentSimilarityModel from peerscout.server.services.manuscript_person_relationship_service import RelationshipTypes from peerscout.server.services.RecommendReviewers import RecommendReviewers, set_debugv_enabled from .test_data import ( PERSON_ID, PERSON_ID1, PERSON_ID2, PERSON_ID3, PERSON1, PERSON2, PERSON3, MANUSCRIPT_VERSION1, MANUSCRIPT_ID1, MANUSCRIPT_ID2, MANUSCRIPT_ID_FIELDS1, MANUSCRIPT_ID_FIELDS2, MANUSCRIPT_ID_FIELDS3, MANUSCRIPT_ID_FIELDS4, MANUSCRIPT_ID_FIELDS5, MANUSCRIPT_VERSION_ID1, MANUSCRIPT_VERSION_ID2, MANUSCRIPT_VERSION_ID3, MANUSCRIPT_TITLE2, MANUSCRIPT_TITLE3, MANUSCRIPT_KEYWORD1, VALID_DECISIONS, VALID_MANUSCRIPT_TYPES, PUBLISHED_DECISIONS, PUBLISHED_MANUSCRIPT_TYPES, Decisions, KEYWORD1 ) MANUSCRIPT_ID = 'manuscript_id' VERSION_ID = 'version_id' MANUSCRIPT_ID_COLUMNS = [VERSION_ID] PERSON_ID_COLUMNS = [PERSON_ID] LDA_DOCVEC_COLUMN = 'lda_docvec' EMAIL_1 = 'email1' ROLE_1 = 'role1' PERSON1_RESULT = { **PERSON1, 'memberships': [], 'dates_not_available': [], 'stats': { 'overall': None, 'last_12m': None } } PERSON2_RESULT = { **PERSON1_RESULT, **PERSON2 } PERSON3_RESULT = { **PERSON1_RESULT, **PERSON3 } MEMBERSHIP1_RESULT = { 'member_type': 'memberme', 'member_id': '12345' } MEMBERSHIP1 = { **MEMBERSHIP1_RESULT, PERSON_ID: PERSON_ID1, } MANUSCRIPT_VERSION1_RESULT = { **MANUSCRIPT_VERSION1, 'authors': [], 'senior_editors': [], 'subject_areas': [], 'is_published': True } MANUSCRIPT_VERSION2_RESULT = { **MANUSCRIPT_VERSION1_RESULT, **MANUSCRIPT_ID_FIELDS2, 'title': MANUSCRIPT_TITLE2 } MANUSCRIPT_VERSION2 = MANUSCRIPT_VERSION2_RESULT MANUSCRIPT_VERSION3_RESULT = { **MANUSCRIPT_VERSION1_RESULT, **MANUSCRIPT_ID_FIELDS3, 'title': MANUSCRIPT_TITLE3 } MANUSCRIPT_VERSION3 = MANUSCRIPT_VERSION3_RESULT MANUSCRIPT_VERSION4_RESULT = { **MANUSCRIPT_VERSION1_RESULT, **MANUSCRIPT_ID_FIELDS4 } MANUSCRIPT_VERSION4 = MANUSCRIPT_VERSION4_RESULT MANUSCRIPT_VERSION5_RESULT = { **MANUSCRIPT_VERSION1_RESULT, **MANUSCRIPT_ID_FIELDS5 } MANUSCRIPT_VERSION5 = MANUSCRIPT_VERSION5_RESULT SUBJECT_AREA1 = 'Subject Area 1' SUBJECT_AREA2 = 'Subject Area 2' MANUSCRIPT_SUBJECT_AREA1 = { **MANUSCRIPT_ID_FIELDS1, 'subject_area': SUBJECT_AREA1 } MANUSCRIPT_SUBJECT_AREA2 = { **MANUSCRIPT_ID_FIELDS1, 'subject_area': SUBJECT_AREA2 } DOCVEC1 = [1, 1] DOCVEC2 = [2, 2] ABSTRACT_DOCVEC1 = { **MANUSCRIPT_ID_FIELDS1, LDA_DOCVEC_COLUMN: DOCVEC1 } ABSTRACT_DOCVEC2 = { **MANUSCRIPT_ID_FIELDS2, LDA_DOCVEC_COLUMN: DOCVEC2 } DOI1 = 'doi/1' AUTHOR1 = { **MANUSCRIPT_ID_FIELDS1, PERSON_ID: PERSON_ID1, 'seq': 0, 'is_corresponding_author': False } AUTHOR2 = { **AUTHOR1, **MANUSCRIPT_ID_FIELDS1, PERSON_ID: PERSON_ID2 } AUTHOR3 = { **AUTHOR1, **MANUSCRIPT_ID_FIELDS1, PERSON_ID: PERSON_ID3 } STAGE_CONTACTING_REVIEWERS = 'Contacting Reviewers' STAGE_REVIEW_ACCEPTED = 'Reviewers Accept' STAGE_REVIEW_DECLINE = 'Reviewers Decline' STAGE_REVIEW_COMPLETE = 'Review Received' MANUSCRIPT_HISTORY_REVIEW_COMPLETE1 = { **MANUSCRIPT_ID_FIELDS1, 'stage_name': STAGE_REVIEW_COMPLETE, 'stage_timestamp': pd.Timestamp('2017-01-01'), PERSON_ID: PERSON_ID1 } KEYWORD_SEARCH1 = { 'keywords': [KEYWORD1] } EARLY_CAREER_RESEARCHER_WITH_SUBJECT_AREAS_DATASET = { 'person': [{ **PERSON1, 'is_early_career_researcher': True }, { **PERSON2, 'is_early_career_researcher': True }], 'person_subject_area': [{ 'person_id': PERSON_ID1, 'subject_area': SUBJECT_AREA1 }, { 'person_id': PERSON_ID2, 'subject_area': SUBJECT_AREA2 }] } class PersonRoles: SENIOR_EDITOR = 'Senior Editor' OTHER = 'Other' PP = pprint.PrettyPrinter(indent=2, width=40) def setup_module(): logging.basicConfig(level=logging.DEBUG) set_debugv_enabled(True) logging.getLogger().setLevel(logging.DEBUG) def get_logger(): return logging.getLogger('test') @pytest.fixture(name='logger') def _logger_fixture(): return get_logger() @contextmanager def create_recommend_reviewers(dataset, filter_by_subject_area_enabled=False): logger = get_logger() with populated_in_memory_database(dataset) as db: logger.debug("view manuscript_person_review_times:\n%s", db.manuscript_person_review_times.read_frame()) logger.debug("view person_review_stats_overall:\n%s", db.person_review_stats_overall.read_frame()) manuscript_model = ManuscriptModel( db, valid_decisions=VALID_DECISIONS, valid_manuscript_types=VALID_MANUSCRIPT_TYPES, published_decisions=PUBLISHED_DECISIONS, published_manuscript_types=PUBLISHED_MANUSCRIPT_TYPES ) similarity_model = DocumentSimilarityModel( db, manuscript_model=manuscript_model ) yield RecommendReviewers( db, manuscript_model=manuscript_model, similarity_model=similarity_model, filter_by_subject_area_enabled=filter_by_subject_area_enabled ) def recommend_for_dataset(dataset, filter_by_subject_area_enabled=False, **kwargs): with create_recommend_reviewers( dataset, filter_by_subject_area_enabled=filter_by_subject_area_enabled) as recommend_reviewers: result = recommend_reviewers.recommend(**kwargs) get_logger().debug("result: %s", PP.pformat(result)) return result def _potential_reviewers_person_ids(potential_reviewers): return [r['person'][PERSON_ID] for r in potential_reviewers] def _potential_reviewer_scores_by_person_id(potential_reviewers): return {r['person'][PERSON_ID]: r['scores']['keyword'] for r in potential_reviewers} def _potential_reviewer_related_version_ids(potential_reviewers, relationship_type): return { r['person'][PERSON_ID]: set( r.get('related_manuscript_version_ids_by_relationship_type', {}) .get(relationship_type, []) ) for r in potential_reviewers } def _review_complete_stages(id_fields, contacted, accepted, reviewed): return [{ **id_fields, 'stage_name': STAGE_CONTACTING_REVIEWERS, 'stage_timestamp': contacted }, { **id_fields, 'stage_name': STAGE_REVIEW_ACCEPTED, 'stage_timestamp': accepted }, { **id_fields, 'stage_name': STAGE_REVIEW_COMPLETE, 'stage_timestamp': reviewed }] def _declined_stages(id_fields, contacted, declined): return [{ **id_fields, 'stage_name': STAGE_CONTACTING_REVIEWERS, 'stage_timestamp': contacted }, { **id_fields, 'stage_name': STAGE_REVIEW_DECLINE, 'stage_timestamp': declined }] def _awaiting_accept_stages(id_fields, contacted): return [{ **id_fields, 'stage_name': STAGE_CONTACTING_REVIEWERS, 'stage_timestamp': contacted }] def _awaiting_review_stages(id_fields, contacted, accepted): return [{ **id_fields, 'stage_name': STAGE_CONTACTING_REVIEWERS, 'stage_timestamp': contacted }, { **id_fields, 'stage_name': STAGE_REVIEW_ACCEPTED, 'stage_timestamp': accepted }] @pytest.mark.slow class TestRecommendReviewers: class TestRecommendReviewersRegular: def test_no_match(self): dataset = { 'person': [PERSON1], 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords='', manuscript_no='unknown') assert result['matching_manuscripts'] == [] assert result['potential_reviewers'] == [] def test_matching_manuscript(self): dataset = { 'person': [PERSON1], 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1) assert result == { 'potential_reviewers': [], 'related_manuscript_by_version_id': {}, 'matching_manuscripts': [{ **MANUSCRIPT_VERSION1_RESULT }] } def test_matching_manuscript_should_include_subject_areas(self): dataset = { 'person': [PERSON1], 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1], 'manuscript_subject_area': [ MANUSCRIPT_SUBJECT_AREA1, MANUSCRIPT_SUBJECT_AREA2 ] } result = recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1) subject_areas = result['matching_manuscripts'][0]['subject_areas'] assert subject_areas == [SUBJECT_AREA1, SUBJECT_AREA2] def test_should_not_fail_for_manuscript_with_docvecs(self): dataset = { 'person': [PERSON1], 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1], 'ml_manuscript_data': [ABSTRACT_DOCVEC1] } recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1) def test_should_not_fail_for_manuscript_with_partial_docvecs(self): dataset = { 'person': [PERSON1], 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1], 'ml_manuscript_data': [ ABSTRACT_DOCVEC1, { **ABSTRACT_DOCVEC2, LDA_DOCVEC_COLUMN: None } ] } recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1) def test_search_should_filter_early_career_reviewer_by_subject_area(self): dataset = EARLY_CAREER_RESEARCHER_WITH_SUBJECT_AREAS_DATASET result = recommend_for_dataset( dataset, subject_area=SUBJECT_AREA1, keywords=None, manuscript_no=None ) recommended_person_ids = [ (r['person'][PERSON_ID], r['person'].get('is_early_career_researcher')) for r in result['potential_reviewers'] ] assert recommended_person_ids == [(PERSON_ID1, True)] def test_search_should_not_filter_early_career_reviewer_by_subject_area_if_blank(self): dataset = EARLY_CAREER_RESEARCHER_WITH_SUBJECT_AREAS_DATASET result = recommend_for_dataset( dataset, subject_area=None, keywords=KEYWORD1, manuscript_no=None ) recommended_person_ids = [ (r['person'][PERSON_ID], r['person'].get('is_early_career_researcher')) for r in result['potential_reviewers'] ] assert ( set(recommended_person_ids) == {(PERSON_ID1, True), (PERSON_ID2, True)} ) def test_matching_manuscript_should_filter_early_career_reviewer_by_subject_area(self): dataset = { **EARLY_CAREER_RESEARCHER_WITH_SUBJECT_AREAS_DATASET, 'person': ( EARLY_CAREER_RESEARCHER_WITH_SUBJECT_AREAS_DATASET['person'] + [PERSON3] ), 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_author': [{**AUTHOR3, **MANUSCRIPT_ID_FIELDS1}], 'manuscript_subject_area': [MANUSCRIPT_SUBJECT_AREA1] } result = recommend_for_dataset( dataset, filter_by_subject_area_enabled=False, subject_area=None, keywords=None, manuscript_no=MANUSCRIPT_ID1 ) recommended_person_ids = [ (r['person'][PERSON_ID], r['person'].get('is_early_career_researcher')) for r in result['potential_reviewers'] ] assert recommended_person_ids == [(PERSON_ID1, True)] def test_matching_manuscript_should_return_draft_version_with_authors(self): dataset = { 'person': [PERSON1], 'manuscript_version': [{ **MANUSCRIPT_VERSION1, 'decision': Decisions.REJECTED }], 'manuscript_author': [AUTHOR1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1) assert [m[MANUSCRIPT_ID] for m in result['matching_manuscripts']] == [MANUSCRIPT_ID1] assert [p[PERSON_ID] for p in result['matching_manuscripts'][0]['authors']] == [PERSON_ID1] def test_matching_manuscript_should_return_multiple_authors(self): dataset = { 'person': [PERSON1, PERSON2], 'manuscript_version': [MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2], 'manuscript_author': [ AUTHOR1, {**AUTHOR1, **MANUSCRIPT_ID_FIELDS2}, {**AUTHOR2, **MANUSCRIPT_ID_FIELDS1} ] } result = recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1) author_person_ids = [p[PERSON_ID] for p in result['matching_manuscripts'][0]['authors']] assert set(author_person_ids) == set([PERSON_ID1, PERSON_ID2]) def test_matching_manuscript_should_indicate_corresponding_authors(self): dataset = { 'person': [PERSON1, PERSON2], 'manuscript_version': [MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2], 'manuscript_author': [ { **AUTHOR1, 'is_corresponding_author': True }, { **AUTHOR2, **MANUSCRIPT_ID_FIELDS1, 'is_corresponding_author': False }, { # make author1 not the corresponding author of another manuscript **AUTHOR1, **MANUSCRIPT_ID_FIELDS2, 'is_corresponding_author': False } ] } result = recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1) authors = sorted(result['matching_manuscripts'][0] ['authors'], key=lambda p: p[PERSON_ID]) author_summary = [(p[PERSON_ID], p.get('is_corresponding_author')) for p in authors] assert author_summary == [(PERSON_ID1, True), (PERSON_ID2, False)] def test_matching_manuscript_should_not_recommend_its_authors(self): dataset = { 'person': [PERSON1, PERSON2], 'manuscript_version': [MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2], 'manuscript_keyword': [ MANUSCRIPT_KEYWORD1, {**MANUSCRIPT_KEYWORD1, **MANUSCRIPT_ID_FIELDS2} ], 'manuscript_author': [ AUTHOR1, {**AUTHOR1, **MANUSCRIPT_ID_FIELDS2}, {**AUTHOR2, **MANUSCRIPT_ID_FIELDS2} ] } result = recommend_for_dataset(dataset, keywords='', manuscript_no=MANUSCRIPT_ID1) recommended_person_ids = [r['person'][PERSON_ID] for r in result['potential_reviewers']] assert recommended_person_ids == [PERSON_ID2] def _do_test_matching_manuscript_should_filter_by_subject_areas_if_enabled( self, filter_by_subject_area_enabled): dataset = { 'person': [PERSON1, PERSON2, PERSON3], 'manuscript_version': [ MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2, MANUSCRIPT_VERSION3 ], 'manuscript_keyword': [ MANUSCRIPT_KEYWORD1, {**MANUSCRIPT_KEYWORD1, **MANUSCRIPT_ID_FIELDS2}, {**MANUSCRIPT_KEYWORD1, **MANUSCRIPT_ID_FIELDS3} ], 'manuscript_subject_area': [ MANUSCRIPT_SUBJECT_AREA1, {**MANUSCRIPT_SUBJECT_AREA2, **MANUSCRIPT_ID_FIELDS2}, {**MANUSCRIPT_SUBJECT_AREA1, **MANUSCRIPT_ID_FIELDS3} ], 'manuscript_author': [ AUTHOR1, {**AUTHOR2, **MANUSCRIPT_ID_FIELDS2}, {**AUTHOR3, **MANUSCRIPT_ID_FIELDS3} ] } result = recommend_for_dataset( dataset, filter_by_subject_area_enabled=filter_by_subject_area_enabled, keywords='', manuscript_no=MANUSCRIPT_ID1 ) recommended_person_ids = [r['person'][PERSON_ID] for r in result['potential_reviewers']] if filter_by_subject_area_enabled: assert recommended_person_ids == [PERSON_ID3] else: assert set(recommended_person_ids) == {PERSON_ID2, PERSON_ID3} def test_matching_manuscript_should_filter_by_subject_areas_if_enabled(self): self._do_test_matching_manuscript_should_filter_by_subject_areas_if_enabled( filter_by_subject_area_enabled=True ) def test_matching_manuscript_should_not_filter_by_subject_areas_if_disabled(self): self._do_test_matching_manuscript_should_filter_by_subject_areas_if_enabled( filter_by_subject_area_enabled=False ) def test_matching_manuscript_should_filter_by_search_subject_area_only(self): dataset = { 'person': [PERSON2, PERSON3], 'manuscript_version': [MANUSCRIPT_VERSION2, MANUSCRIPT_VERSION3], 'manuscript_subject_area': [ MANUSCRIPT_SUBJECT_AREA1, { **MANUSCRIPT_SUBJECT_AREA2, **MANUSCRIPT_ID_FIELDS2 }, { **MANUSCRIPT_SUBJECT_AREA1, **MANUSCRIPT_ID_FIELDS3 } ], 'manuscript_author': [ { **AUTHOR2, **MANUSCRIPT_ID_FIELDS2 }, { **AUTHOR3, **MANUSCRIPT_ID_FIELDS3 } ] } result = recommend_for_dataset( dataset, filter_by_subject_area_enabled=False, keywords='', subject_area=SUBJECT_AREA1 ) recommended_person_ids = [r['person'][PERSON_ID] for r in result['potential_reviewers']] assert recommended_person_ids == [PERSON_ID3] def test_matching_one_keyword_author_should_return_author(self): dataset = { 'person': [PERSON1], 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_author': [AUTHOR1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='') assert [r['person'][PERSON_ID] for r in result['potential_reviewers']] == [PERSON_ID1] def test_matching_one_keyword_should_not_fail_on_unset_first_and_last_name(self): # Note: use two persons to trigger sort dataset = { 'person': [ PERSON1, {**PERSON2, 'first_name': None, 'last_name': None} ], 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_author': [AUTHOR1, AUTHOR2], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='') assert ( {r['person'][PERSON_ID] for r in result['potential_reviewers']} == {PERSON_ID1, PERSON_ID2} ) def test_matching_one_keyword_author_should_not_suggest_authors_of_rejected_manuscripts( self): dataset = { 'person': [PERSON1], 'manuscript_version': [{ **MANUSCRIPT_VERSION1, 'decision': Decisions.REJECTED, 'is_published': None }], 'manuscript_author': [AUTHOR1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='') assert result['potential_reviewers'] == [] def test_matching_one_keyword_author_should_suggest_reviewers_of_rejected_manuscripts(self): dataset = { 'person': [PERSON1], 'manuscript_version': [{ **MANUSCRIPT_VERSION1, 'decision': Decisions.REJECTED }], 'manuscript_stage': _review_complete_stages( {**MANUSCRIPT_ID_FIELDS1, PERSON_ID: PERSON_ID1}, contacted=pd.Timestamp('2017-01-01'), accepted=pd.Timestamp('2017-01-02'), reviewed=pd.Timestamp('2017-01-03') ), 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='') assert _potential_reviewers_person_ids(result['potential_reviewers']) == [PERSON_ID1] def test_matching_author_should_suggest_authors_with_unknown_decision_if_published( self): dataset = { 'person': [PERSON1], 'manuscript_version': [{ **MANUSCRIPT_VERSION1, 'decision': None, 'is_published': True }], 'manuscript_author': [AUTHOR1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='') assert _potential_reviewers_person_ids(result['potential_reviewers']) == [PERSON_ID1] def test_should_return_manuscript_scores_by_version_id(self): dataset = { 'person': [PERSON1], 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_author': [AUTHOR1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='') assert _potential_reviewers_person_ids(result['potential_reviewers']) == [PERSON_ID1] related_manuscript_by_version_id = result['related_manuscript_by_version_id'] assert related_manuscript_by_version_id[MANUSCRIPT_VERSION_ID1].get('score') == { 'combined': 1.0, 'keyword': 1.0, 'similarity': None } def test_should_return_decision_timestamp_as_published_timestamp(self): dataset = { 'person': [PERSON1], 'manuscript_version': [{ **MANUSCRIPT_VERSION1, 'decision_timestamp': pd.Timestamp('2017-01-01') }], 'manuscript_author': [AUTHOR1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='') assert _potential_reviewers_person_ids(result['potential_reviewers']) == [PERSON_ID1] related_manuscript_by_version_id = result['related_manuscript_by_version_id'] assert ( related_manuscript_by_version_id[ MANUSCRIPT_VERSION_ID1 ].get('published_timestamp') == pd.Timestamp('2017-01-01') ) def test_should_return_created_timestamp_as_published_timestamp(self): dataset = { 'person': [PERSON1], 'manuscript_version': [{ **MANUSCRIPT_VERSION1, 'created_timestamp': pd.Timestamp('2017-01-01') }], 'manuscript_author': [AUTHOR1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='') assert _potential_reviewers_person_ids(result['potential_reviewers']) == [PERSON_ID1] related_manuscript_by_version_id = result['related_manuscript_by_version_id'] assert ( related_manuscript_by_version_id[ MANUSCRIPT_VERSION_ID1 ].get('published_timestamp') == pd.Timestamp('2017-01-01') ) def test_matching_one_keyword_author_should_return_stats(self, logger): dataset = { 'person': [PERSON1], 'manuscript_version': [ MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2, MANUSCRIPT_VERSION3, MANUSCRIPT_VERSION4, MANUSCRIPT_VERSION5 ], 'manuscript_author': [AUTHOR1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1], # add two review durations (two stages each) # also add an open review (accepted) 'manuscript_stage': ( _review_complete_stages( { **MANUSCRIPT_ID_FIELDS1, PERSON_ID: PERSON_ID1 }, contacted=pd.Timestamp('2017-01-01'), accepted=pd.Timestamp('2017-01-02'), reviewed=pd.Timestamp('2017-01-03') ) + _review_complete_stages( { **MANUSCRIPT_ID_FIELDS2, PERSON_ID: PERSON_ID1 }, contacted=pd.Timestamp('2017-02-01'), accepted=pd.Timestamp('2017-02-02'), reviewed=pd.Timestamp('2017-02-04') ) + _awaiting_accept_stages( { **MANUSCRIPT_ID_FIELDS3, PERSON_ID: PERSON_ID1 }, contacted=pd.Timestamp('2017-02-01') ) + _awaiting_review_stages( { **MANUSCRIPT_ID_FIELDS4, PERSON_ID: PERSON_ID1 }, contacted=pd.Timestamp('2017-02-01'), accepted=pd.Timestamp('2017-02-02') ) + _declined_stages( { **MANUSCRIPT_ID_FIELDS5, PERSON_ID: PERSON_ID1 }, contacted=pd.Timestamp('2017-02-01'), declined=pd.Timestamp('2017-02-02') ) ) } result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='') review_duration = { 'min': 1.0, 'mean': 1.5, 'max': 2, 'count': 2 } overall_stats = { 'review_duration': review_duration, 'reviews_in_progress': 1, 'waiting_to_be_accepted': 1, 'declined': 1 } result_person = result['potential_reviewers'][0]['person'] logger.debug("result_person: %s", PP.pformat(result_person)) assert result_person['stats'] == { 'overall': overall_stats, 'last_12m': overall_stats } def test_matching_one_keyword_author_should_return_memberships(self, logger): dataset = { 'person': [PERSON1], 'person_membership': [MEMBERSHIP1], 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_author': [AUTHOR1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='') result_person = result['potential_reviewers'][0]['person'] logger.debug("result_person: %s", PP.pformat(result_person)) assert result_person.get('memberships') == [MEMBERSHIP1_RESULT] def test_matching_one_keyword_author_should_return_other_accepted_papers(self, logger): dataset = { 'person': [PERSON1], 'manuscript_version': [ MANUSCRIPT_VERSION1, { **MANUSCRIPT_VERSION2, 'decision': Decisions.ACCEPTED } ], 'manuscript_author': [ AUTHOR1, { **AUTHOR1, **MANUSCRIPT_ID_FIELDS2 } ], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='') potential_reviewers = result['potential_reviewers'] author_of_manuscript_ids_by_person_id = _potential_reviewer_related_version_ids( potential_reviewers, RelationshipTypes.AUTHOR ) logger.debug("author_of_manuscript_ids_by_person_id: %s", author_of_manuscript_ids_by_person_id) assert author_of_manuscript_ids_by_person_id == { PERSON_ID1: { MANUSCRIPT_VERSION_ID1, MANUSCRIPT_VERSION_ID2 } } assert result['related_manuscript_by_version_id'].keys() == { MANUSCRIPT_VERSION_ID1, MANUSCRIPT_VERSION_ID2 } def test_matching_one_keyword_author_should_not_return_other_draft_papers(self, logger): dataset = { 'person': [PERSON1], 'manuscript_version': [ MANUSCRIPT_VERSION1, { **MANUSCRIPT_VERSION2, 'decision': Decisions.REJECTED, 'is_published': None } ], 'manuscript_author': [ AUTHOR1, { **AUTHOR1, **MANUSCRIPT_ID_FIELDS2 } ], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='') potential_reviewers = result['potential_reviewers'] author_of_manuscript_ids_by_person_id = _potential_reviewer_related_version_ids( potential_reviewers, RelationshipTypes.AUTHOR ) logger.debug( "author_of_manuscript_ids_by_person_id: %s", author_of_manuscript_ids_by_person_id ) assert author_of_manuscript_ids_by_person_id == { PERSON_ID1: { MANUSCRIPT_VERSION_ID1 } } assert result['related_manuscript_by_version_id'].keys() == { MANUSCRIPT_VERSION_ID1 } def test_should_consider_previous_reviewer_as_potential_reviewer(self): dataset = { 'person': [PERSON1], 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_stage': [MANUSCRIPT_HISTORY_REVIEW_COMPLETE1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='') assert [r['person'][PERSON_ID] for r in result['potential_reviewers']] == [PERSON_ID1] def test_should_return_reviewer_as_potential_reviewer_only_once(self): dataset = { 'person': [PERSON1], 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_stage': [ { **MANUSCRIPT_HISTORY_REVIEW_COMPLETE1, 'stage_timestamp': pd.Timestamp('2017-01-01'), }, { **MANUSCRIPT_HISTORY_REVIEW_COMPLETE1, 'stage_timestamp': pd.Timestamp('2017-01-02'), } ], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset(dataset, keywords=KEYWORD1, manuscript_no='') assert [ r['person'][PERSON_ID] for r in result['potential_reviewers'] ] == [PERSON_ID1] class TestRecommendReviewersByRole: def test_should_not_recommend_regular_reviewer_when_searching_for_senior_editor_via_keyword( self): # a regular reviewer doesn't have a role dataset = { 'person': [PERSON1], 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_author': [AUTHOR1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset( dataset, keywords=KEYWORD1, manuscript_no=None, role=PersonRoles.SENIOR_EDITOR ) person_ids = _potential_reviewers_person_ids(result['potential_reviewers']) assert person_ids == [] def test_should_not_recommend_regular_reviewer_when_searching_for_senior_editor_via_man_no( self): # a regular reviewer doesn't have a role dataset = { 'person': [PERSON1], 'manuscript_version': [MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2], 'manuscript_author': [AUTHOR1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1, { **MANUSCRIPT_KEYWORD1, **MANUSCRIPT_ID_FIELDS2 }] } result = recommend_for_dataset( dataset, keywords=None, manuscript_no=MANUSCRIPT_ID2, role=PersonRoles.SENIOR_EDITOR ) person_ids = _potential_reviewers_person_ids(result['potential_reviewers']) assert person_ids == [] def test_should_not_recommend_reviewer_with_other_role_when_searching_for_senior_editor( self): dataset = { 'person': [PERSON1], 'person_role': [{PERSON_ID: PERSON_ID1, 'role': PersonRoles.OTHER}], 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_author': [AUTHOR1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset( dataset, keywords=KEYWORD1, manuscript_no=None, role=PersonRoles.SENIOR_EDITOR ) person_ids = _potential_reviewers_person_ids(result['potential_reviewers']) assert person_ids == [] def test_should_recommend_senior_editor_based_on_manuscript_keyword(self): dataset = { 'person': [PERSON1], 'person_role': [{PERSON_ID: PERSON_ID1, 'role': PersonRoles.SENIOR_EDITOR}], 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_author': [AUTHOR1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } result = recommend_for_dataset( dataset, keywords=KEYWORD1, manuscript_no=None, role=PersonRoles.SENIOR_EDITOR ) person_ids = _potential_reviewers_person_ids(result['potential_reviewers']) assert person_ids == [PERSON_ID1] def test_should_recommend_senior_editor_based_on_manuscript_keyword_via_manuscript_no(self): dataset = { 'person': [PERSON1], 'person_role': [{PERSON_ID: PERSON_ID1, 'role': PersonRoles.SENIOR_EDITOR}], 'manuscript_version': [MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2], 'manuscript_author': [AUTHOR1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1, { **MANUSCRIPT_KEYWORD1, **MANUSCRIPT_ID_FIELDS2 }] } result = recommend_for_dataset( dataset, keywords=None, manuscript_no=MANUSCRIPT_ID2, role=PersonRoles.SENIOR_EDITOR ) person_ids = _potential_reviewers_person_ids(result['potential_reviewers']) assert person_ids == [PERSON_ID1] def test_should_recommend_previous_senior_editors_and_reflect_in_score(self): dataset = { 'person': [PERSON1], 'person_role': [{PERSON_ID: PERSON_ID1, 'role': PersonRoles.SENIOR_EDITOR}], 'manuscript_version': [MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2], 'manuscript_senior_editor': [{**MANUSCRIPT_ID_FIELDS1, 'person_id': PERSON_ID1}], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1, { **MANUSCRIPT_KEYWORD1, **MANUSCRIPT_ID_FIELDS2 }] } result = recommend_for_dataset( dataset, keywords=None, manuscript_no=MANUSCRIPT_ID2, role=PersonRoles.SENIOR_EDITOR, recommend_relationship_types=[ RelationshipTypes.AUTHOR, RelationshipTypes.EDITOR, RelationshipTypes.SENIOR_EDITOR, RelationshipTypes.REVIEWER ] ) potential_reviewers = result['potential_reviewers'] person_ids = _potential_reviewers_person_ids(potential_reviewers) assert person_ids == [PERSON_ID1] assert _potential_reviewer_scores_by_person_id(potential_reviewers) == { PERSON_ID1: 1.0 } def test_should_return_manuscript_ids_person_has_senior_editor_of(self): dataset = { 'person': [PERSON1, PERSON2, PERSON3], 'person_role': [{PERSON_ID: PERSON_ID1, 'role': PersonRoles.SENIOR_EDITOR}], 'manuscript_version': [ MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2, MANUSCRIPT_VERSION3 ], 'manuscript_author': [{**MANUSCRIPT_ID_FIELDS3, 'person_id': PERSON_ID1}], 'manuscript_editor': [{**MANUSCRIPT_ID_FIELDS2, 'person_id': PERSON_ID1}], 'manuscript_senior_editor': [{**MANUSCRIPT_ID_FIELDS1, 'person_id': PERSON_ID1}], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1, { **MANUSCRIPT_KEYWORD1, **MANUSCRIPT_ID_FIELDS2 }] } result = recommend_for_dataset( dataset, keywords=KEYWORD1, manuscript_no=None, role=PersonRoles.SENIOR_EDITOR, recommend_relationship_types=[ RelationshipTypes.SENIOR_EDITOR ], return_relationship_types=[ RelationshipTypes.SENIOR_EDITOR, RelationshipTypes.EDITOR, RelationshipTypes.AUTHOR ] ) potential_reviewers = result['potential_reviewers'] person_ids = _potential_reviewers_person_ids(potential_reviewers) assert person_ids == [PERSON_ID1] assert _potential_reviewer_related_version_ids( potential_reviewers, RelationshipTypes.SENIOR_EDITOR ) == {PERSON_ID1: {MANUSCRIPT_VERSION_ID1}} assert _potential_reviewer_related_version_ids( potential_reviewers, RelationshipTypes.EDITOR ) == {PERSON_ID1: {MANUSCRIPT_VERSION_ID2}} assert _potential_reviewer_related_version_ids( potential_reviewers, RelationshipTypes.AUTHOR ) == {PERSON_ID1: {MANUSCRIPT_VERSION_ID3}} assert result['related_manuscript_by_version_id'].keys() == { MANUSCRIPT_VERSION_ID1, MANUSCRIPT_VERSION_ID2, MANUSCRIPT_VERSION_ID3 } def test_should_recommend_based_on_stage_name(self): custom_stage = 'custom_stage' dataset = { 'person': [PERSON1], 'person_role': [{PERSON_ID: PERSON_ID1, 'role': PersonRoles.SENIOR_EDITOR}], 'manuscript_version': [MANUSCRIPT_VERSION1, MANUSCRIPT_VERSION2], 'manuscript_stage': [ { **MANUSCRIPT_ID_FIELDS1, 'person_id': PERSON_ID1, 'stage_timestamp': pd.Timestamp('2017-01-01'), 'stage_name': custom_stage } ], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1, { **MANUSCRIPT_KEYWORD1, **MANUSCRIPT_ID_FIELDS2 }] } result = recommend_for_dataset( dataset, keywords=None, manuscript_no=MANUSCRIPT_ID2, role=PersonRoles.SENIOR_EDITOR, recommend_stage_names=[ custom_stage ] ) potential_reviewers = result['potential_reviewers'] person_ids = _potential_reviewers_person_ids(potential_reviewers) assert person_ids == [PERSON_ID1] def test_should_recommend_senior_editor_based_on_person_keyword_and_reflect_in_score(self): dataset = { 'person': [PERSON1], 'person_role': [{PERSON_ID: PERSON_ID1, 'role': PersonRoles.SENIOR_EDITOR}], 'person_keyword': [{PERSON_ID: PERSON_ID1, 'keyword': KEYWORD1}] } result = recommend_for_dataset( dataset, keywords=KEYWORD1, manuscript_no=None, role=PersonRoles.SENIOR_EDITOR ) potential_reviewers = result['potential_reviewers'] person_ids = _potential_reviewers_person_ids(potential_reviewers) assert person_ids == [PERSON_ID1] assert _potential_reviewer_scores_by_person_id(potential_reviewers) == { PERSON_ID1: 1.0 } class TestAllKeywords: def test_should_include_manuscript_keywords_in_all_keywords(self): dataset = { 'manuscript_version': [MANUSCRIPT_VERSION1], 'manuscript_keyword': [MANUSCRIPT_KEYWORD1] } with create_recommend_reviewers(dataset) as recommend_reviewers: assert recommend_reviewers.get_all_keywords() == [KEYWORD1] def test_should_include_person_keywords_in_all_keywords(self): dataset = { 'person': [PERSON1], 'person_keyword': [{PERSON_ID: PERSON_ID1, 'keyword': KEYWORD1}] } with create_recommend_reviewers(dataset) as recommend_reviewers: assert recommend_reviewers.get_all_keywords() == [KEYWORD1] class TestUserHasRoleByEmail: def test_should_return_wether_user_has_role(self): dataset = { 'person': [{**PERSON1, 'email': EMAIL_1}], 'person_role': [{PERSON_ID: PERSON_ID1, 'role': ROLE_1}] } with create_recommend_reviewers(dataset) as recommend_reviewers: assert recommend_reviewers.user_has_role_by_email( email=EMAIL_1, role=ROLE_1) is True assert recommend_reviewers.user_has_role_by_email( email=EMAIL_1, role='other') is False assert recommend_reviewers.user_has_role_by_email( email='other', role=ROLE_1) is False class TestGetUserRolesByEmail: def test_should_return_roles_of_existing_user(self): dataset = { 'person': [{**PERSON1, 'email': EMAIL_1}], 'person_role': [{PERSON_ID: PERSON_ID1, 'role': ROLE_1}] } with create_recommend_reviewers(dataset) as recommend_reviewers: assert recommend_reviewers.get_user_roles_by_email(email=EMAIL_1) == {ROLE_1} class TestGetManuscriptDetails: def test_should_return_none_if_version_id_is_invalid(self): dataset = { 'manuscript_version': [MANUSCRIPT_VERSION2] } with create_recommend_reviewers(dataset) as recommend_reviewers: assert recommend_reviewers.get_manuscript_details(MANUSCRIPT_VERSION_ID1) is None def test_should_return_details_if_version_id_is_valid(self): dataset = { 'manuscript_version': [MANUSCRIPT_VERSION1] } with create_recommend_reviewers(dataset) as recommend_reviewers: manuscript_details = recommend_reviewers.get_manuscript_details( MANUSCRIPT_VERSION_ID1) assert manuscript_details is not None assert manuscript_details.get(VERSION_ID) == MANUSCRIPT_VERSION_ID1 assert manuscript_details.get('manuscript_id') == MANUSCRIPT_ID1 assert manuscript_details.get('title') == MANUSCRIPT_VERSION1['title']
40.279828
100
0.578315
4,084
46,926
6.186337
0.070029
0.049159
0.028577
0.039105
0.767584
0.717989
0.677696
0.642312
0.609104
0.579497
0
0.021924
0.337084
46,926
1,164
101
40.314433
0.790247
0.005477
0
0.521033
0
0
0.11309
0.01768
0
0
0
0
0.057361
1
0.054493
false
0
0.010516
0.008604
0.08413
0.001912
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
5ce1c1e45e684257c61a22c09e36b324c5a48fc3
263
py
Python
Mundo 3_Estruturas Compostas/Desafio_77.py
VictorOliveira02/Desafios-Python3-Curso-em-Video
53ee8bd814b816f3a21936677ef3f155b582843f
[ "MIT" ]
null
null
null
Mundo 3_Estruturas Compostas/Desafio_77.py
VictorOliveira02/Desafios-Python3-Curso-em-Video
53ee8bd814b816f3a21936677ef3f155b582843f
[ "MIT" ]
null
null
null
Mundo 3_Estruturas Compostas/Desafio_77.py
VictorOliveira02/Desafios-Python3-Curso-em-Video
53ee8bd814b816f3a21936677ef3f155b582843f
[ "MIT" ]
null
null
null
palavra = 'MACACO','ARROZ','AZEITONA','LASANHA','PIZZA','CANETA','PARALELEPIPEDO','ONZE','FERNANDO','CAIO' for p in palavra: print(f'\nNA PALAVRA {p} TEMOS AS VOGAIS ',end='') for letra in p: if letra in 'AEIOU': print(letra, end=' ')
37.571429
106
0.60076
35
263
4.514286
0.685714
0.088608
0
0
0
0
0
0
0
0
0
0
0.201521
263
6
107
43.833333
0.752381
0
0
0
0
0
0.403042
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
5ce1c9f7533769cd42ba4b49d7b1951d113a40f2
2,218
py
Python
sms/apps/products/models.py
vyshakTs/STORE_MANAGEMENT_SYSTEM
b6b82a02c0b512083c35a8656e191436552569a9
[ "CC0-1.0" ]
null
null
null
sms/apps/products/models.py
vyshakTs/STORE_MANAGEMENT_SYSTEM
b6b82a02c0b512083c35a8656e191436552569a9
[ "CC0-1.0" ]
null
null
null
sms/apps/products/models.py
vyshakTs/STORE_MANAGEMENT_SYSTEM
b6b82a02c0b512083c35a8656e191436552569a9
[ "CC0-1.0" ]
null
null
null
import uuid from autoslug import AutoSlugField from django.db import models from sms.apps.accounts.models import TimeStampedModel # from django_resized import ResizedImageField # Create your models here. class Product(models.Model): IN_STOCK = 1 OUT_OF_STOCK = 0 STATUS_CHOICES = ( (IN_STOCK, 'In stock'), (OUT_OF_STOCK, 'Out of stock') ) LABEL_CHOICES = ( ('P', 'primary'), ('S', 'secondary'), ('D', 'danger'), ) status = models.PositiveIntegerField( choices=STATUS_CHOICES, default=IN_STOCK ) product_id = models.UUIDField(default=uuid.uuid4) name = models.CharField(max_length=200,) description = models.TextField(blank=True, null=True) label = models.CharField(choices=LABEL_CHOICES, max_length=1) category = models.ForeignKey('Category', null=True, blank=True, on_delete=models.CASCADE) sub_category = models.ForeignKey('SubCategory', null=True, blank=True, on_delete=models.CASCADE) product_image = models.FileField(upload_to='products', null=True, blank=True) product_video = models.FileField(upload_to='products', null=True, blank=True) tax = models.IntegerField(null=True, blank=True) price = models.FloatField(null=True, blank=True) shipping_charge = models.FloatField(null=True, blank=True) net_amount = models.FloatField(null=True, blank=True) offer_percentage = models.IntegerField(null=True, blank=True) # category = models.ForeignKey(category) def __str__(self): return self.name class Category(TimeStampedModel): title = models.CharField(max_length=200, blank=True, null=True) slug = AutoSlugField(unique=True,) class Meta: ordering = ('-created_date',) verbose_name = 'category' verbose_name_plural = 'categories' def __str__(self): return self.title class SubCategory(TimeStampedModel): title = models.CharField(max_length=200, blank=True, null=True) slug = AutoSlugField(unique=True,) class Meta: ordering = ('-created_date',) verbose_name = 'sub-category' verbose_name_plural = 'sub-categories' def __str__(self): return self.title
31.685714
100
0.688909
263
2,218
5.631179
0.330798
0.072924
0.079001
0.103309
0.486158
0.454423
0.340311
0.293045
0.241729
0.176907
0
0.007307
0.197926
2,218
70
101
31.685714
0.825183
0.048693
0
0.25
0
0
0.071191
0
0
0
0
0
0
1
0.057692
false
0
0.076923
0.057692
0.711538
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
5ce2e2885759b71f922db45b1b250447965a354c
14,927
py
Python
certifire/cli.py
CertiFire/certifire
722da20bade41b8cc8553177e70e1f56015fe335
[ "MIT" ]
null
null
null
certifire/cli.py
CertiFire/certifire
722da20bade41b8cc8553177e70e1f56015fe335
[ "MIT" ]
null
null
null
certifire/cli.py
CertiFire/certifire
722da20bade41b8cc8553177e70e1f56015fe335
[ "MIT" ]
1
2021-02-06T03:29:56.000Z
2021-02-06T03:29:56.000Z
import argparse import logging import os import sys from certifire import app, auth, config, database, db, get_version from certifire.errors import CertifireError from certifire.plugins.acme import crypto from certifire.plugins.acme.models import Account, Certificate, Order from certifire.plugins.acme.plugin import (create_order, register, reorder, revoke_certificate) from certifire.plugins.destinations.models import Destination from certifire import app logger = logging.getLogger(__name__) # Text DESCRIPTION = \ """ Certifire {}. Interact with ACME certification authorities such as Let's Encrypt. No idea what you're doing? Register an account, authorize your domains and issue a certificate or two. Call a command with -h for more instructions. """.format(get_version()) DESCRIPTION_REGISTER = \ """ Creates a new account key and registers on the server. The resulting --account is saved in the database, and required for most other operations. Takes email as required argument You can pass arguments like organization, organizational_unit, country, state, and location for csr generations from this account. if not provided, default values from the config file will be used You can also pass your own RSA private key if needed (Provide key size 2048 and above, otherwise the server won't accept it.) You only have to do this once. """ DESCRIPTION_ISSUE = \ """ Issues a certificate for one or more domains. Firstly, domains passed will be authorized by the type of authentication specified. If dns authentication is used, also provide the dns provider. If type and dns provider not passed, default values will be used from the config file Takes account_id as required argument You can pass arguments like organization, organizational_unit, country, state, and location for csr generations from this account. if not provided, default values from the account will be used This will generate a new RSA key and CSR for you. But if you want, you can bring your own with the --key-file and --csr-file attributes. (Provide key size 2048 and above, otherwise the server won't accept it.) The resulting key and certificate are written into the database. A chained certificate with the intermediate included is also written to databse. (If you're passing your own CSR, the given domains can be whatever you want.) Note that unlike many other certification authorities, ACME does not add a non-www or www alias to certificates. If you want this to happen, add it yourself. You need to authorize both as well. Certificate issuance has a server-side rate limit. Don't overdo it. """ DESCRIPTION_REVOKE = \ """ Revokes a certificate. The certificate must have been issued using the current account. Takes account_id and certificate_id as required arguments """ # Command handlers def _register(args): key = None if args.key_file: with open(args.key_file, 'rb') as f: key = crypto.load_private_key(f.read()) with app.app_context(): ret, act_id = register( user_id=1, email=args.email, server=args.server, rsa_key=key, organization=args.organization, organizational_unit=args.organizational_unit, country=args.country, state=args.state, location=args.location) if ret: print("Account created with account id: {}".format(act_id)) print("Pass this account id for issue, revoke, etc...") else: print("Account with same email exists: account id: {}".format(act_id)) def _issue(args): key = None if args.key_file: with open(args.key_file, 'rb') as f: key = crypto.load_private_key(f.read()) csr = None if args.csr_file: with open(args.csr_file, 'rb') as f: key = crypto.load_csr(f.read()) with app.app_context(): ret, order_id = create_order( account_id=args.account, destination_id=args.destination, domains=args.domains, type=args.type, provider=args.provider, email=args.email, organization=args.organization, organizational_unit=args.organizational_unit, country=args.country, state=args.state, location=args.location, reissue=args.reissue, csr=csr, key=key) if ret: print("Order created with order id: {}".format(order_id)) else: print("Order creation failed.") def _revoke(args): with app.app_context(): certdb = Certificate.query.get(args.certificate) if not certdb: print("There is no such certificate {}".format(args.certificate)) return order = Order.query.get(certdb.order_id) if not order: print("Order for this certificate not found") return revoke_certificate(order.account_id, certdb.id) def _create_dest(args): pkey = None if args.pkey: with open(args.pkey, 'rb') as f: pkey = crypto.load_private_key(f.read()) with app.app_context(): dest = Destination(user_id=1, host=args.host, port=args.port, user=args.user, password=args.pwd, ssh_priv_key=pkey, ssh_priv_key_pass=args.pkeypass, challengeDestinationPath=args.challengePath, certDestinationPath=args.certPath, exportFormat=args.exportFormat, no_check=args.nocheck) if dest.create(): print("Destination: {} created".format(dest.id)) print(dest.json) else: print("Error creating destination with given data. Check hostname, password, private key") print(dest.json) def _update_dest(args): with app.app_context(): dest = Destination.query.get(args.id) if not dest: print("There is no such destination {}".format(args.id)) return if dest.user_id != 1: print("This destination does not belong to the admin") return pkey = None if args.pkey: with open(args.pkey, 'rb') as f: pkey = crypto.load_private_key(f.read()) if dest.update(user_id=1, host=args.host, port=args.port, user=args.user, password=args.pwd, ssh_priv_key=pkey, ssh_priv_key_pass=args.pkeypass, challengeDestinationPath=args.challengePath, certDestinationPath=args.certPath, exportFormat=args.exportFormat, no_check=args.nocheck): print("Destination: {} updated".format(dest.id)) print(dest.json) else: print("Error updating destination with given data. Check hostname, password, private key") print(dest.json) def _delete_dest(args): with app.app_context(): dest = Destination.query.get(args.id) if not dest: print("There is no such destination {}".format(args.id)) return if dest.user_id != 1: print("This destination does not belong to the admin") return dest = dest.delete() print("Destination {} deleted from database".format(dest.id)) class Formatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter): pass def certifire_main(): parser = argparse.ArgumentParser( description=DESCRIPTION, formatter_class=Formatter, ) subparsers = parser.add_subparsers() # Account creation register = subparsers.add_parser( 'register', help="Create a new account and register", description=DESCRIPTION_REGISTER, formatter_class=Formatter, ) register.add_argument('email', type=str, help="Account email address") register.add_argument('--server', '-i', help="ACME Server url") register.add_argument('--key-file', '-k', help="Existing key file to use for the account") register.add_argument('--organization', '-o', help="Name of organization") register.add_argument('--organizational_unit', '-u', help="Name of organizational unit") register.add_argument('--country', '-c', help="Name of country") register.add_argument('--state', '-s', help="Name of state") register.add_argument('--location', '-l', help="Name of location") register.set_defaults(func=_register) # Certificate issuance issue = subparsers.add_parser( 'issue', help="Authorize and Request a new certificate", description=DESCRIPTION_ISSUE, formatter_class=Formatter, ) issue.add_argument('--account', '-a', help="The acme account id to use", required=True) issue.add_argument('--destination', help="Destination to authorize/push certificates") issue.add_argument('--domains', help="One or more domain names to authorize", nargs='+') issue.add_argument('--type', '-t', help="Authorization type", choices=('dns', 'sftp'), default='dns') issue.add_argument('--provider', '-p', help="DNS Provider", choices=config.VALID_DNS_PROVIDERS, default=config.VALID_DNS_PROVIDERS[0]) issue.add_argument('--key-file', '-k', help="Existing key file to use for the certificate") issue.add_argument('--csr-file', help="Existing signing request to use") issue.add_argument('--email', '-e', help="email address for CSR") issue.add_argument('--organization', '-o', help="Name of organization") issue.add_argument('--organizational_unit', '-u', help="Name of organizational unit") issue.add_argument('--country', '-c', help="Name of country") issue.add_argument('--state', '-s', help="Name of state") issue.add_argument('--location', '-l', help="Name of location") issue.add_argument('--reissue', dest='reissue', help="Reissue certificate", action='store_true') issue.set_defaults(func=_issue, reissue=False) # Certificate revocation revoke = subparsers.add_parser( 'revoke', help="Revoke an issued certificate", description=DESCRIPTION_REVOKE, formatter_class=Formatter, ) revoke.add_argument("certificate", help="The certificate id to revoke") revoke.add_argument('--account', '-a', help="The acme account id to use", required=True) revoke.set_defaults(func=_revoke) destination = subparsers.add_parser( 'destination', help="Manage Destinations", # description=DESCRIPTION_REVOKE, #TODO: Destinations description formatter_class=Formatter, ) destination_subparsers = destination.add_subparsers() create_dest = destination_subparsers.add_parser( 'create', help='Create a Destination', formatter_class=Formatter ) create_dest.add_argument("host", help="Host FQDN. eg: api.certifire.xyz") create_dest.add_argument('--port', '-p', help="SSH port", default=22) create_dest.add_argument('--user', '-u', help="SSH user", default='root') create_dest.add_argument('--pwd', '-s', help="SSH password") create_dest.add_argument('--pkey', '-k', help="SSH private key file") create_dest.add_argument('--pkeypass', '-c', help="SSH private key password") create_dest.add_argument('--challengePath', help="HTTP-01 Challenge destination path", default='/var/www/html') create_dest.add_argument('--certPath', help="Certificate push destination path", default='/etc/nginx/certs') create_dest.add_argument('--exportFormat', help="Certificate export format", choices=('NGINX', 'Apache'),default='NGINX') create_dest.add_argument('--nocheck', help="Pass this flag to skip SSH initial checks", dest='nocheck', action='store_true') create_dest.set_defaults(func=_create_dest, nocheck=False) update_dest = destination_subparsers.add_parser( 'update', help='Update a Destination', formatter_class=Formatter ) update_dest.add_argument("id", help="Destination id") update_dest.add_argument("--host", '-f', help="Host FQDN. eg: api.certifire.xyz") update_dest.add_argument('--port', '-p', help="SSH port") update_dest.add_argument('--user', '-u', help="SSH user") update_dest.add_argument('--pwd', '-s', help="SSH password") update_dest.add_argument('--pkey', '-k', help="SSH private key file") update_dest.add_argument('--pkeypass', '-c', help="SSH private key password") update_dest.add_argument('--challengePath', help="HTTP-01 Challenge destination path") update_dest.add_argument('--certPath', help="Certificate push destination path") update_dest.add_argument('--exportFormat', help="Certificate export format", choices=('NGINX', 'Apache')) update_dest.add_argument('--nocheck', help="Pass this flag to skip SSH initial checks", dest='nocheck', action='store_true') update_dest.set_defaults(func=_update_dest, nocheck=False) delete_dest = destination_subparsers.add_parser( 'delete', help='Delete a Destination', formatter_class=Formatter ) delete_dest.add_argument("id", help="Destination id") delete_dest.set_defaults(func=_delete_dest) # Version version = subparsers.add_parser("version", help="Show the version number") version.set_defaults(func=lambda *args: print( "certifire {}\n".format(get_version()))) # Parse args = parser.parse_args() if not hasattr(args, 'func'): parser.print_help() sys.exit() # Set up logging root = logging.getLogger('certifire') root.setLevel(logging.INFO) handler = logging.StreamHandler(sys.stderr) handler.setFormatter(logging.Formatter("%(message)s")) root.addHandler(handler) # Let's encrypt try: args.func(args) except CertifireError as e: if str(e): logger.error(e) sys.exit() except KeyboardInterrupt: logger.error("") logger.error("Interrupted.") sys.exit() except Exception as e: logger.error("Oops! An unhandled error occurred. Please file a bug.") logger.exception(e) sys.exit() if __name__ == "__main__": certifire_main()
36.85679
128
0.638239
1,793
14,927
5.192415
0.182376
0.05435
0.035446
0.024812
0.435768
0.40247
0.39957
0.380666
0.32739
0.306767
0
0.001795
0.253567
14,927
404
129
36.94802
0.833782
0.012595
0
0.318339
0
0
0.213956
0.003342
0
0
0
0.002475
0
1
0.024221
false
0.048443
0.038062
0
0.086505
0.076125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5ce43cf725e3bf8836e708394822671100ec1604
2,783
py
Python
data_gen/gen_p_e_m/gen_p_e_m_from_wiki.py
EMBEDDIA/multilingual_entity_linking
9042259dd72ae85d94a460a981e9716df4eac203
[ "Apache-2.0" ]
null
null
null
data_gen/gen_p_e_m/gen_p_e_m_from_wiki.py
EMBEDDIA/multilingual_entity_linking
9042259dd72ae85d94a460a981e9716df4eac203
[ "Apache-2.0" ]
2
2021-04-20T13:30:09.000Z
2021-05-03T14:24:06.000Z
data_gen/gen_p_e_m/gen_p_e_m_from_wiki.py
EMBEDDIA/multilingual_entity_linking
9042259dd72ae85d94a460a981e9716df4eac203
[ "Apache-2.0" ]
null
null
null
import argparse, os from urllib.parse import unquote import os.path from os import path import pickle ap = argparse.ArgumentParser() ap.add_argument("-l", "--language", default='en',type = str, help="path") args = ap.parse_args() exec(open("utils/utils.py").read()) exec(open("data_gen/parse_wiki_dump/parse_wiki_dump_tools.py").read()) print('Computing Wikipedia p_e_m') wiki_e_m_counts = {} num_lines = 0 parsing_errors = 0 list_ent_errors = 0 diez_ent_errors = 0 disambiguation_ent_errors = 0 num_valid_hyperlinks = 0 with open('wiki_data/' + args.language + '/' + args.language + '-wikidataid-TextWithAnchorsFromAllWikipedia.txt', encoding="utf-8") as f: for line in f: line = unquote(line.strip()) num_lines += 1 if num_lines % 5000000 == 0: print('Processed ' + str(num_lines) + ' lines. Parsing errs = ' +\ str(parsing_errors) + ' List ent errs = ' + \ str(list_ent_errors) + ' diez errs = ' + str(diez_ent_errors) +\ ' disambig errs = ' + str(disambiguation_ent_errors) + \ ' . Num valid hyperlinks = ' + str(num_valid_hyperlinks)) if not '<doc id="' in line: list_hyp, text, le_errs, p_errs, dis_errs, diez_errs = extract_text_and_hyp(line, False) parsing_errors += p_errs list_ent_errors += le_errs disambiguation_ent_errors += dis_errs diez_ent_errors += diez_errs for el in list_hyp: mention = el ent_wikiid = list_hyp[el]['ent_wikiid'] num_valid_hyperlinks += 1 if mention not in wiki_e_m_counts: wiki_e_m_counts[mention] = {} if ent_wikiid not in wiki_e_m_counts[mention]: wiki_e_m_counts[mention][ent_wikiid] = 0 wiki_e_m_counts[mention][ent_wikiid] += 1 print(' Done computing Wikipedia p(e|m). Num valid hyperlinks = ', num_valid_hyperlinks) print('Now sorting and writing ..') with open('generated/' + args.language + '/wikipedia_p_e_m.txt', "w", encoding="utf-8") as f: for mention in wiki_e_m_counts: tbl = {} for ent_wikiid in wiki_e_m_counts[mention]: tbl[ent_wikiid] = wiki_e_m_counts[mention][ent_wikiid] tbl = {k: v for k, v in sorted(tbl.items(), key=lambda item: item[1], reverse=True)} text = '' total_freq = 0 for ent_wikiid in tbl: text += str(ent_wikiid) + ',' + str(tbl[ent_wikiid]) text += ',' + get_ent_name_from_wikiid(ent_wikiid).replace(' ', '_') + '\t' total_freq = total_freq + tbl[ent_wikiid] f.write(mention + '\t' + str(total_freq) + '\t' + text + '\n') print(' Done sorting and writing.')
37.106667
137
0.610492
381
2,783
4.170604
0.275591
0.073631
0.033984
0.067967
0.161737
0.114537
0.052863
0
0
0
0
0.010779
0.266619
2,783
74
138
37.608108
0.767761
0
0
0
0
0
0.164211
0.034495
0
0
0
0
0
1
0
false
0
0.083333
0
0.083333
0.083333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5ce52894ee777c761bbe80312dc0afb967f4d975
141
py
Python
homie/models/__init__.py
timpur/homie-discovery-python
f157e3843cae7b1ad3e4fd810b340c35c34473eb
[ "MIT" ]
null
null
null
homie/models/__init__.py
timpur/homie-discovery-python
f157e3843cae7b1ad3e4fd810b340c35c34473eb
[ "MIT" ]
null
null
null
homie/models/__init__.py
timpur/homie-discovery-python
f157e3843cae7b1ad3e4fd810b340c35c34473eb
[ "MIT" ]
null
null
null
"""Homie Models module""" from .homie_device import HomieDevice from .homie_node import HomieNode from .homie_property import HomieProperty
23.5
41
0.822695
18
141
6.277778
0.611111
0.238938
0
0
0
0
0
0
0
0
0
0
0.113475
141
5
42
28.2
0.904
0.134752
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
5ce7079747845637d28230c2367ed9d83c91e81e
1,865
py
Python
setup.py
Pranavj94/All-things-NLP
009e63e35611679afb54ca981675019679179fd3
[ "Apache-2.0" ]
null
null
null
setup.py
Pranavj94/All-things-NLP
009e63e35611679afb54ca981675019679179fd3
[ "Apache-2.0" ]
null
null
null
setup.py
Pranavj94/All-things-NLP
009e63e35611679afb54ca981675019679179fd3
[ "Apache-2.0" ]
1
2021-07-27T05:53:36.000Z
2021-07-27T05:53:36.000Z
############################################################################################ #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. ############################################################################################ from setuptools import setup, find_packages def readme(): with open("README.md") as f: README = f.read() return README #with open("requirements.txt") as f: # required = f.read().splitlines() #with open("requirements-optional.txt") as f: # optional_required = f.read().splitlines() setup( name="allthingsnlp", version="0.0.4", description="All things NLP - An open source, low-code NLP library in Python.", long_description=readme(), long_description_content_type="text/markdown", url="https://github.com/Pranavj94/all-things-nlp", author="Pranav J", author_email="pranavj13594@gmail.com", license="MIT", classifiers=[ "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", ], packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), include_package_data=True, install_requires=['pandas','numpy','tqdm','nltk','wordcloud','matplotlib','IPython'] #extras_require={"full": optional_required,}, )
38.061224
92
0.619839
221
1,865
5.171946
0.588235
0.052493
0.065617
0.068241
0
0
0
0
0
0
0
0.012739
0.158177
1,865
49
93
38.061224
0.715287
0.384987
0
0
0
0
0.422996
0.023207
0
0
0
0
0
1
0.04
false
0
0.04
0
0.12
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
5ce7ad53f5d3fe95cc2e2d929219a1298a0e9cfe
533
py
Python
src/jobs/migrations/0005_auto_20200507_0951.py
earth-emoji/infotechia
44ed7aecf052001573b47320e6a1239968d2a067
[ "BSD-2-Clause" ]
null
null
null
src/jobs/migrations/0005_auto_20200507_0951.py
earth-emoji/infotechia
44ed7aecf052001573b47320e6a1239968d2a067
[ "BSD-2-Clause" ]
11
2019-10-27T23:41:10.000Z
2022-02-10T10:30:00.000Z
src/jobs/migrations/0005_auto_20200507_0951.py
earth-emoji/august
065d4b449a138ead1557293bffcb20cd2db90a41
[ "BSD-2-Clause" ]
null
null
null
# Generated by Django 2.2.12 on 2020-05-07 09:51 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('jobs', '0004_application_qualifications'), ] operations = [ migrations.AlterField( model_name='application', name='applicant', field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='applications', to='accounts.Professional'), ), ]
26.65
150
0.666041
58
533
6.034483
0.689655
0.068571
0.08
0.125714
0
0
0
0
0
0
0
0.047962
0.217636
533
19
151
28.052632
0.791367
0.086304
0
0
1
0
0.181443
0.107216
0
0
0
0
0
1
0
false
0
0.153846
0
0.384615
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
5ce7f8d38fe3fa47eb76e2373cb55bdd2e43f061
4,257
py
Python
learn_python/hexlet/data_abstraction/lessons/rectangle.py
PavliukKonstantin/learn-python
3319f60dea252927deadad8a02d24617dbdc6f37
[ "MIT" ]
null
null
null
learn_python/hexlet/data_abstraction/lessons/rectangle.py
PavliukKonstantin/learn-python
3319f60dea252927deadad8a02d24617dbdc6f37
[ "MIT" ]
null
null
null
learn_python/hexlet/data_abstraction/lessons/rectangle.py
PavliukKonstantin/learn-python
3319f60dea252927deadad8a02d24617dbdc6f37
[ "MIT" ]
null
null
null
# Реализуйте абстракцию (набор функций) для работы с прямоугольниками, # стороны которого всегда параллельны осям. Прямоугольник может # располагаться в любом месте координатной плоскости. # # При такой постановке, достаточно знать только три параметра для однозначного # задания прямоугольника на плоскости: координаты левой-верхней точки, ширину # и высоту. Зная их, мы всегда можем построить прямоугольник # одним единственным способом. # # | # 4 | точка ширина # | *------------- # 3 | | | # | | | высота # 2 | | | # | -------------- # 1 | # | # ------|--------------------------- # 0 | 1 2 3 4 5 6 7 # | # | # | # Основной интерфейс: # # make_rectangle (конструктор) – создает прямоугольник. # Принимает параметры: левую-верхнюю точку, ширину и высоту. # Ширина и высота – положительные числа. # # Селекторы get_start_point, get_width и get_height # # contains_origin – проверяет, принадлежит ли центр координат прямоугольнику # (не лежит на границе прямоугольника, а находится внутри). # Чтобы в этом убедиться, достаточно проверить, # что все вершины прямоугольника лежат в разных квадрантах # (их можно высчитать в момент проверки). # # # Создание прямоугольника: # # p - левая верхняя точка # # 4 - ширина # # 5 - высота # # # # p 4 # # ----------- # # | | # # | | 5 # # | | # # ----------- # # >>> p = make_decart_point(0, 1) # >>> rectangle = make_rectangle(p, 4, 5) # # >>> contains_origin(rectangle) # False # # >>> rectangle2 = make_rectangle(make_decart_point(-4, 3), 5, 4) # >>> contains_origin(rectangle2) # True # Подсказки # Квадрант плоскости — любая из 4 областей (углов), # на которые плоскость делится двумя взаимно перпендикулярными прямыми, # принятыми в качестве осей координат. # Для определения квадранта, в которой лежит точка, # используйте функцию get_quadrant. def make_rectangle(start_point, width, height): return { "start_point": start_point, "width": width, "height": height, } def get_start_point(rectangle): return rectangle["start_point"] def get_width(rectangle): return rectangle["width"] def get_height(rectangle): return rectangle["height"] def get_ur_rectangle_point(rectangle): return make_decart_point( get_x(get_start_point(rectangle)) + get_width(rectangle), get_y(get_start_point(rectangle)), ) def get_dl_rectangle_point(rectangle): return make_decart_point( get_x(get_start_point(rectangle)), get_y(get_start_point(rectangle)) - get_height(rectangle), ) def get_dr_rectangle_point(rectangle): return make_decart_point( get_x(get_start_point(rectangle)) + get_width(rectangle), get_y(get_start_point(rectangle)) - get_height(rectangle), ) def contains_origin(rectangle): points_quadrants = ( get_quadrant(get_start_point(rectangle)), get_quadrant(get_ur_rectangle_point(rectangle)), get_quadrant(get_dl_rectangle_point(rectangle)), get_quadrant(get_dr_rectangle_point(rectangle)), ) return len(set(points_quadrants)) == 4 # ______________________________________________________________________ def make_decart_point(x, y): return {"x": x, "y": y} def get_x(point): return point["x"] def get_y(point): return point["y"] def get_quadrant(point): x = get_x(point) y = get_y(point) if x > 0 and y > 0: return 1 if x < 0 < y: return 2 if x < 0 and y < 0: return 3 if y < 0 < x: return 4 return None def test_rectangle(): p = make_decart_point(-4, 3) rectangle1 = make_rectangle(p, 5, 4) assert contains_origin(rectangle1) rectangle2 = make_rectangle(p, 5, 2) assert not contains_origin(rectangle2) rectangle3 = make_rectangle(p, 2, 2) assert not contains_origin(rectangle3) rectangle4 = make_rectangle(p, 4, 3) assert not contains_origin(rectangle4) def test_cross_zero(): point = make_decart_point(0, 1) rectangle = make_rectangle(point, 4, 5) assert not contains_origin(rectangle) test_cross_zero() test_rectangle()
24.75
78
0.651633
512
4,257
5.082031
0.3125
0.075327
0.044965
0.06764
0.274404
0.223674
0.181399
0.169869
0.139892
0.139892
0
0.018321
0.230679
4,257
171
79
24.894737
0.774962
0.446324
0
0.101449
0
0
0.021053
0
0
0
0
0
0.072464
1
0.202899
false
0
0
0.144928
0.434783
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
7a20dd4f2ffe3e31b486d6e52ca9dfffca42838b
5,622
py
Python
tests/test_routes.py
girardinsamuel/masonite-js-routes
8918b874ff1ca64069745ca5fbb9d4b464ceaf74
[ "MIT" ]
4
2020-11-01T22:51:01.000Z
2021-09-23T23:12:53.000Z
tests/test_routes.py
girardinsamuel/masonite-js-routes
8918b874ff1ca64069745ca5fbb9d4b464ceaf74
[ "MIT" ]
17
2021-02-07T17:32:15.000Z
2022-03-21T22:08:31.000Z
tests/test_routes.py
girardinsamuel/masonite-js-routes
8918b874ff1ca64069745ca5fbb9d4b464ceaf74
[ "MIT" ]
null
null
null
from masonite.testing import TestCase from masonite.routes import Get, Post from src.masonite.js_routes.routes import Routes as JsRoutes all_expected_routes = { "home": {"uri": "home", "methods": ["GET"]}, "posts.show": {"uri": "posts/{post}", "methods": ["GET"]}, "posts.store": {"uri": "posts", "methods": ["POST"]}, "posts.index": {"uri": "posts", "methods": ["GET"]}, "postComments.index": {"uri": "posts/{post}/comments", "methods": ["GET"]}, "postComments.show": { "uri": "posts/{post}/comments/{comment}", "methods": ["GET"], }, "admin.users.index": {"uri": "admin/users", "methods": ["GET"]}, } class TestRoutes(TestCase): sqlite = False def setUp(self): super().setUp() self.routes( only=[ Get("home", "tests.TestController@show").name("home"), Get("posts", "tests.TestController@show").name("posts.index"), Get("posts/@post", "tests.TestController@show").name("posts.show"), Get("posts/@post/comments", "tests.TestController@show").name( "postComments.index" ), Get( "posts/@post/comments/@comment:int", "tests.TestController@show" ).name("postComments.show"), Post("posts", "tests.TestController@show").name("posts.store"), Get("admin/users", "tests.TestController@show").name( "admin.users.index" ), ] ) self.buildOwnContainer() def test_basic_routes_generation(self): js_routes = JsRoutes() routes = js_routes.routes self.assertEqual(all_expected_routes, routes) def test_can_filter_to_only_include_routes_matching_a_pattern(self): js_routes = JsRoutes() routes = js_routes.filter_routes(["posts.s*", "home"]) expected = { "home": {"uri": "home", "methods": ["GET"]}, "posts.show": {"uri": "posts/{post}", "methods": ["GET"]}, "posts.store": {"uri": "posts", "methods": ["POST"]}, } self.assertEqual(expected, routes) def test_can_filter_to_exclude_routes_matching_a_pattern(self): js_routes = JsRoutes() routes = js_routes.filter_routes(["posts.s*", "home", "admin.*"], False) expected = { "posts.index": {"uri": "posts", "methods": ["GET"]}, "postComments.index": {"uri": "posts/{post}/comments", "methods": ["GET"]}, "postComments.show": { "uri": "posts/{post}/comments/{comment}", "methods": ["GET"], }, } self.assertEqual(expected, routes) def test_can_set_included_routes_using_only_config(self): from config.js_routes import FILTERS FILTERS["except"] = [] FILTERS["only"] = ["posts.s*", "home"] routes = JsRoutes().to_dict()["routes"] expected = { "home": {"uri": "home", "methods": ["GET"]}, "posts.show": {"uri": "posts/{post}", "methods": ["GET"]}, "posts.store": {"uri": "posts", "methods": ["POST"]}, } self.assertEqual(expected, routes) def test_can_set_included_routes_using_except_config(self): from config.js_routes import FILTERS FILTERS["only"] = [] FILTERS["except"] = ["posts.s*", "home"] routes = JsRoutes().to_dict()["routes"] expected = { "posts.index": {"uri": "posts", "methods": ["GET"]}, "postComments.index": {"uri": "posts/{post}/comments", "methods": ["GET"]}, "postComments.show": { "uri": "posts/{post}/comments/{comment}", "methods": ["GET"], }, "admin.users.index": {"uri": "admin/users", "methods": ["GET"]}, } self.assertEqual(expected, routes) def test_returns_unfiltered_routes_when_both_only_and_except_configs_set(self): from config.js_routes import FILTERS FILTERS["except"] = ["posts.s*", "home"] FILTERS["only"] = ["some.other.routes"] routes = JsRoutes().to_dict()["routes"] self.assertEqual(all_expected_routes, routes) def test_can_set_included_routes_using_groups_config(self): from config.js_routes import FILTERS FILTERS["groups"] = {"posts": ["posts.s*"]} routes = JsRoutes("posts").to_dict()["routes"] expected = { "posts.show": {"uri": "posts/{post}", "methods": ["GET"]}, "posts.store": {"uri": "posts", "methods": ["POST"]}, } self.assertEqual(expected, routes) def test_can_set_included_routes_using_groups_array_config(self): from config.js_routes import FILTERS FILTERS["groups"] = {"posts": ["posts.s*"], "admin": ["admin.*"]} routes = JsRoutes(["posts", "admin"]).to_dict()["routes"] expected = { "posts.show": {"uri": "posts/{post}", "methods": ["GET"]}, "posts.store": {"uri": "posts", "methods": ["POST"]}, "admin.users.index": {"uri": "admin/users", "methods": ["GET"]}, } self.assertEqual(expected, routes) def can_ignore_passed_group_not_set_in_config(self): from config.js_routes import FILTERS FILTERS["groups"] = {"posts": ["posts.s*"]} routes = JsRoutes(["unknown_group"]).to_dict()["routes"] self.assertEqual(all_expected_routes, routes) def can_include_middleware(self): pass def can_include_only_middleware_set_in_config(self): pass
38.244898
87
0.556208
576
5,622
5.248264
0.135417
0.066159
0.043665
0.042342
0.75951
0.710883
0.681111
0.669864
0.668541
0.583857
0
0
0.258449
5,622
146
88
38.506849
0.725114
0
0
0.540984
0
0
0.273212
0.064746
0
0
0
0
0.07377
1
0.098361
false
0.02459
0.07377
0
0.188525
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
7a228588da1a1865d35b49af11b46dd3d71bba03
3,532
py
Python
pysph/examples/sphysics/dambreak_sphysics.py
nauaneed/pysph
9cb9a859934939307c65a25cbf73e4ecc83fea4a
[ "BSD-3-Clause" ]
293
2017-05-26T14:41:15.000Z
2022-03-28T09:56:16.000Z
pysph/examples/sphysics/dambreak_sphysics.py
nauaneed/pysph
9cb9a859934939307c65a25cbf73e4ecc83fea4a
[ "BSD-3-Clause" ]
217
2017-05-29T15:48:14.000Z
2022-03-24T16:16:55.000Z
pysph/examples/sphysics/dambreak_sphysics.py
nauaneed/pysph
9cb9a859934939307c65a25cbf73e4ecc83fea4a
[ "BSD-3-Clause" ]
126
2017-05-25T19:17:32.000Z
2022-03-25T11:23:24.000Z
"""Dam break past an obstacle with data from SPHysics. (40 minutes) For benchmarking, we use the input geometry and discretization as the SPHYSICS Case 5 (https://wiki.manchester.ac.uk/sphysics/index.php/SPHYSICS_Home_Page) We only require the INDAT and IPART files generated by SPHysics. These define respectively, the numerical parameters and the initial particle data used for the run. The rest of the problem is set-up in the usual way. """ import os import numpy from pysph.sph.equation import Group from pysph.base.kernels import CubicSpline from pysph.sph.wc.basic import TaitEOS, TaitEOSHGCorrection, MomentumEquation from pysph.sph.basic_equations import ContinuityEquation, XSPHCorrection from pysph.solver.solver import Solver from pysph.solver.application import Application from pysph.sph.integrator import EPECIntegrator, PECIntegrator from pysph.sph.integrator_step import WCSPHStep from pysph.tools.sphysics import sphysics2pysph MY_DIR = os.path.dirname(__file__) INDAT = os.path.join(MY_DIR, 'INDAT.gz') IPART = os.path.join(MY_DIR, 'IPART.gz') # problem dimensionality dim = 3 # suggested initial time step and final time dt = 1e-5 tf = 2.0 # physical constants for the run loaded from SPHysics INDAT indat = numpy.loadtxt(INDAT) H = float( indat[10] ) B = float( indat[11] ) gamma = float( indat[12] ) eps = float( indat[14] ) rho0 = float( indat[15] ) alpha = float( indat[16] ) beta = 0.0 c0 = numpy.sqrt( B*gamma/rho0 ) class DamBreak3DSPhysics(Application): def add_user_options(self, group): group.add_argument( "--test", action="store_true", dest="test", default=False, help="For use while testing of results, uses PEC integrator." ) def create_particles(self): return sphysics2pysph(IPART, INDAT, vtk=False) def create_solver(self): kernel = CubicSpline(dim=3) if self.options.test: integrator = PECIntegrator(fluid=WCSPHStep(),boundary=WCSPHStep()) adaptive, n_damp = False, 0 else: integrator = EPECIntegrator(fluid=WCSPHStep(),boundary=WCSPHStep()) adaptive, n_damp = True, 0 solver = Solver(dim=dim, kernel=kernel, integrator=integrator, adaptive_timestep=adaptive, tf=tf, dt=dt, n_damp=n_damp) return solver def create_equations(self): equations = [ # Equation of state Group(equations=[ TaitEOS(dest='fluid', sources=None, rho0=rho0, c0=c0, gamma=gamma), TaitEOSHGCorrection(dest='boundary', sources=None, rho0=rho0, c0=c0, gamma=gamma), ], real=False), # Continuity Momentum and XSPH equations Group(equations=[ ContinuityEquation(dest='fluid', sources=['fluid', 'boundary']), ContinuityEquation(dest='boundary', sources=['fluid']), MomentumEquation( dest='fluid', sources=['fluid', 'boundary'], c0=c0, alpha=alpha, beta=beta, gz=-9.81, tensile_correction=True), # Position step with XSPH XSPHCorrection(dest='fluid', sources=['fluid'], eps=eps) ]) ] return equations if __name__ == '__main__': app = DamBreak3DSPhysics() app.run()
33.009346
79
0.625708
410
3,532
5.312195
0.417073
0.03719
0.027548
0.028926
0.111111
0.070707
0.070707
0.030303
0
0
0
0.017633
0.277463
3,532
106
80
33.320755
0.835815
0.182616
0
0.057971
0
0
0.059151
0
0
0
0
0
0
1
0.057971
false
0
0.15942
0.014493
0.275362
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a25c60958291760ba28905779810fb418738cea
396
py
Python
RaspberryPi/work1.py
DTK-Creaters/Course
eb6518306482d21cc6e5848a783ffc0820b017fd
[ "Apache-2.0" ]
3
2020-05-15T15:14:17.000Z
2021-04-05T11:39:53.000Z
RaspberryPi/work1.py
DTK-Creaters/Course
eb6518306482d21cc6e5848a783ffc0820b017fd
[ "Apache-2.0" ]
null
null
null
RaspberryPi/work1.py
DTK-Creaters/Course
eb6518306482d21cc6e5848a783ffc0820b017fd
[ "Apache-2.0" ]
1
2020-05-17T02:48:13.000Z
2020-05-17T02:48:13.000Z
# -*- coding: utf-8 -*- ''' 第1回 LEDの点滅を3回繰り返すプログラムを作ってください。 LEDが3つのバージョンを作ってください。 ''' import RPi.GPIO as GPIO import time PINS=[10, 11, 12] #毎回するおまじない GPIO.setmode(GPIO.BCM) GPIO.setup(PINS,GPIO.OUT) for x in range(3): GPIO.output(PINS,GPIO.HIGH) # ピン10, 11, 12に電流を流す(HIGH) time.sleep(2) GPIO.output(PINS,GPIO.LOW) # ピン10, 11, 12に流れる電流を0にする(LOW) time.sleep(2) GPIO.cleanup()
17.217391
62
0.684343
58
396
4.672414
0.586207
0.088561
0.103321
0.132841
0
0
0
0
0
0
0
0.077381
0.151515
396
22
63
18
0.729167
0.35101
0
0.181818
0
0
0
0
0
0
0
0
0
1
0
false
0
0.181818
0
0.181818
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a267bf982c64e072a8e94dbfc7827cb9a57a32f
87
py
Python
intro-python/section4_answer.py
DanielMichelson/AusOpenRadar2019
e340ceca7dee9f064a5146876234fda04802e696
[ "BSD-2-Clause" ]
12
2019-11-03T10:17:32.000Z
2022-03-10T09:44:10.000Z
intro-python/section4_answer.py
DanielMichelson/AusOpenRadar2019
e340ceca7dee9f064a5146876234fda04802e696
[ "BSD-2-Clause" ]
2
2019-11-07T02:41:23.000Z
2019-11-11T01:04:47.000Z
intro-python/section4_answer.py
DanielMichelson/AusOpenRadar2019
e340ceca7dee9f064a5146876234fda04802e696
[ "BSD-2-Clause" ]
8
2019-09-17T00:28:46.000Z
2021-06-27T19:38:28.000Z
X = np.arange(-100, 100, 0.1) Y = 2*X + 1 plt.plot(X,Y) plt.xlabel('X') plt.ylabel('Y')
17.4
29
0.574713
21
87
2.380952
0.571429
0
0
0
0
0
0
0
0
0
0
0.133333
0.137931
87
5
30
17.4
0.533333
0
0
0
0
0
0.022727
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
7a2796507aa8a3ec5cbd031ab878c3b23d7bbd5a
1,654
py
Python
tests/test_reset.py
embray/amqp-mock
dfcf50a4a455063331fa334b19db98cf59d88ea9
[ "Apache-2.0" ]
6
2021-01-13T08:32:16.000Z
2022-03-23T08:19:47.000Z
tests/test_reset.py
embray/amqp-mock
dfcf50a4a455063331fa334b19db98cf59d88ea9
[ "Apache-2.0" ]
20
2020-12-02T09:44:15.000Z
2022-01-04T16:33:09.000Z
tests/test_reset.py
embray/amqp-mock
dfcf50a4a455063331fa334b19db98cf59d88ea9
[ "Apache-2.0" ]
3
2020-08-20T13:21:13.000Z
2021-11-05T19:14:58.000Z
import pytest from amqp_mock import Message from ._test_utils.fixtures import amqp_client, mock_client, mock_server from ._test_utils.helpers import random_uuid, to_binary from ._test_utils.steps import given, then, when __all__ = ("mock_client", "mock_server", "amqp_client",) @pytest.mark.asyncio async def test_reset_exchanges(*, mock_server, mock_client, amqp_client): with given: exchange = "test_exchange" message = {"id": random_uuid()} await amqp_client.publish(to_binary(message), exchange) with when: result = await mock_client.reset() with then: assert result is None messages = await mock_client.get_exchange_messages(exchange) assert len(messages) == 0 @pytest.mark.asyncio async def test_reset_queues(*, mock_server, mock_client, amqp_client): with given: queue = "test_queue" await mock_client.publish_message(queue, Message("text")) with when: result = await mock_client.reset() with then: assert result is None await amqp_client.consume(queue) await amqp_client.wait(seconds=0.1) assert len(amqp_client.get_consumed_messages()) == 0 @pytest.mark.asyncio async def test_reset_history(*, mock_server, mock_client, amqp_client): with given: queue = "test_queue" await mock_client.publish_message(queue, Message("text")) await amqp_client.consume(queue) with when: result = await mock_client.reset() with then: assert result is None history = await mock_client.get_queue_message_history(queue) assert len(history) == 0
27.114754
73
0.692866
216
1,654
5.027778
0.226852
0.110497
0.096685
0.060773
0.542357
0.492634
0.492634
0.461326
0.425414
0.346225
0
0.003867
0.218259
1,654
60
74
27.566667
0.83604
0
0
0.571429
0
0
0.045949
0
0
0
0
0
0.142857
1
0
false
0
0.119048
0
0.119048
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a27a7a55e2650fe9334f43d5b8dce70103b5737
12,749
py
Python
karta/raster/band.py
fortyninemaps/karta
b35d8cbcfb62e9f1d826a5c73605d34a0c0990b6
[ "MIT" ]
84
2016-03-18T15:42:02.000Z
2022-02-20T15:12:28.000Z
karta/raster/band.py
fortyninemaps/karta
b35d8cbcfb62e9f1d826a5c73605d34a0c0990b6
[ "MIT" ]
21
2016-03-06T01:47:38.000Z
2019-01-13T20:33:52.000Z
karta/raster/band.py
fortyninemaps/karta
b35d8cbcfb62e9f1d826a5c73605d34a0c0990b6
[ "MIT" ]
12
2016-03-18T15:33:53.000Z
2022-03-02T08:18:22.000Z
""" Band implementations for storing data in Karta Grid instances Overview -------- `BandIndexer` interface for accessing data from one or more bands `SimpleBand` use numpy arrays for data storage `CompressedBand` uses blosc compression to reduce in-memory footprint Implementation -------------- Bands are expected to implement the following methods: - `__init__(self, size, dtype, initval=None)` - `getblock(self, yoff, xoff, ny, nx)` - `setblock(self, yoff, xoff, array)` Attributes: - `dtype` - `size` The following methods are deprecated: - `__getitem__(self, key)`, accepting as *key* any of - an int - a slice - a 2-tuple of ints - a 2-tuple of slices - `__setitem__(self, key, value)`, accepting as *key* the same possibilities as __getitem__ """ import blosc import numpy as np from numbers import Real, Integral from math import ceil class BandIndexer(object): def __init__(self, bands): self.bands = bands def __getitem__(self, key): if isinstance(key, np.ndarray): return self._get_from_array_mask(key) if isinstance(key, slice): key = (key, slice(None, None, None), slice(None, None, None)) if not isinstance(key, tuple): raise TypeError("key should be an array or a tuple") collapse_rows = collapse_cols = collapse_bands = False ny, nx = self.bands[0].size if isinstance(key[0], Integral): collapse_rows = True r = key[0] % ny ystart, yend, ystep = (r, r+1, 1) elif isinstance(key[0], slice): ystart, yend, ystep = key[0].indices(ny) else: raise TypeError("first key item should be an integer or a slice") if isinstance(key[1], Integral): collapse_cols = True r = key[1] % nx xstart, xend, xstep = (r, r+1, 1) elif isinstance(key[1], slice): xstart, xend, xstep = key[1].indices(nx) else: raise TypeError("second key item should be an integer or a slice") if len(key) == 2: bands = list(range(len(self.bands))) elif len(key) == 3 and isinstance(key[2], Integral): collapse_bands = True bands = [key[2] % len(self.bands)] elif len(key) == 3 and isinstance(key[2], slice): bands = list(range(*key[2].indices(len(self.bands)))) else: raise TypeError("third key item should be an integer or a slice") if ystep < 0: ystart, yend = yend+1, ystart+1 if xstep < 0: xstart, xend = xend+1, xstart+1 shape = [1 + (yend-ystart-1) // abs(ystep), 1 + (xend-xstart-1) // abs(xstep), len(bands)] out = np.empty(shape, dtype = self.bands[0].dtype) for i, iband in enumerate(bands): band = self.bands[iband] band_values = band.getblock(ystart, xstart, yend-ystart, xend-xstart) out[:,:,i] = band_values[::ystep,::xstep] if collapse_bands: out = out[:,:,0] if collapse_cols: out = out[:,0] if collapse_rows: out = out[0] return out def __setitem__(self, key, value): if isinstance(key, np.ndarray): return self._set_from_array_mask(key, value) if isinstance(key, slice): key = (key, slice(None, None, None), slice(None, None, None)) if not isinstance(key, tuple): raise TypeError("key should be an array or a tuple") ny, nx = self.bands[0].size if isinstance(key[0], Integral): r = key[0] % ny ystart, yend, ystep = (r, r+1, 1) elif isinstance(key[0], slice): ystart, yend, ystep = key[0].indices(ny) else: raise TypeError("first key item should be an integer or a slice") if isinstance(key[1], Integral): r = key[1] % nx xstart, xend, xstep = (r, r+1, 1) elif isinstance(key[1], slice): xstart, xend, xstep = key[1].indices(nx) else: raise TypeError("second key item should be an integer or a slice") if len(key) == 2: bands = list(range(len(self.bands))) elif len(key) == 3 and isinstance(key[2], Integral): collapse_bands = True bands = [key[2] % len(self.bands)] elif len(key) == 3 and isinstance(key[2], slice): bands = list(range(*key[2].indices(len(self.bands)))) else: raise TypeError("third key item should be an integer or a slice") if not (xstep == ystep == 1): raise NotImplementedError("setting band values with stepped slices") #if ystep < 0: # ystart, yend = yend+1, ystart+1 #if xstep < 0: # xstart, xend = xend+1, xstart+1 shape = [1 + (yend-ystart-1) // abs(ystep), 1 + (xend-xstart-1) // abs(xstep), len(bands)] if isinstance(value, np.ndarray) and (value.ndim == 1) and (shape[0] == shape[1] == 1): val_array = np.reshape(np.atleast_3d(value), shape) else: val_array = np.broadcast_to(np.atleast_3d(value), shape) for i, iband in enumerate(bands): band = self.bands[iband] band.setblock(ystart, xstart, val_array[:,:,i]) return def _get_from_array_mask(self, mask): # The mask is assumed to be in (row, column[, band]) order # TODO: make this memory efficient if mask.ndim == 2: return self[:,:,:][mask] elif mask.ndim == 3: return self[:,:,:][mask] else: raise IndexError("masking array must have two or three dimensions") def _set_from_array_mask(self, mask, value): # The mask is assumed to be in (row, column[, band]) order # TODO: make this memory efficient for i, band in enumerate(self.bands): if mask.ndim == 3: mask_ = mask[:,:,i] else: mask_ = mask tmp = band.getblock(0, 0, *band.size) if isinstance(value, Real) or (value.ndim == 1): tmp[mask_] = value else: tmp[mask_] = value[:,i] band.setblock(0, 0, tmp) def __iter__(self): nx = self.bands[0].size[1] for i in range(self.bands[0].size[0]): if len(self.bands) == 1: yield self.bands[0].getblock(i, 0, 1, nx) else: yield np.vstack([b.getblock(i, 0, 1, nx) for b in self.bands]) @property def shape(self): """ Returns the dimensions of raster bands. If there is a single (m x n) band, output is a tuple (m, n). If there are N>1 bands, output is a tuple (N, m, n). """ if len(self.bands) == 0: raise ValueError("no bands") else: return self.bands[0].size @property def dtype(self): """ Returns bands' dtype """ return self.bands[0].dtype class SimpleBand(object): """ SimpleBand wraps a numpy.ndarray for storage. """ def __init__(self, size, dtype, initval=None): self.size = size if initval is None: self._array = np.empty(size, dtype=dtype) else: self._array = np.full(size, initval, dtype=dtype) self.dtype = dtype def getblock(self, yoff, xoff, ny, nx): return self._array[yoff:yoff+ny, xoff:xoff+nx] def setblock(self, yoff, xoff, array): (ny, nx) = array.shape self._array[yoff:yoff+ny, xoff:xoff+nx] = array return class CompressedBand(object): """ CompressedBand is a chunked, blosc-compressed array. """ CHUNKSET = 1 CHUNKUNSET = 0 def __init__(self, size, dtype, chunksize=(256, 256), initval=0): """ Initialize a CompressedBand instance. Parameters ---------- size : tuple of two ints size of band in pixels dtype : type data type of pixel values chunksize : tuple of two ints, optional size of compressed chunks, default (256, 256) initval : value, optional if set, the entire grid is initialized with this value, which should be of *dtype* """ assert len(size) == 2 self.size = size self.dtype = dtype self._chunksize = chunksize self._initval = initval self.nchunkrows = int(ceil(float(size[0])/float(chunksize[0]))) self.nchunkcols = int(ceil(float(size[1])/float(chunksize[1]))) nchunks = self.nchunkrows * self.nchunkcols # Data store self._data = [None for i in range(nchunks)] # 0 => unset # 1 => set self.chunkstatus = np.zeros(nchunks, dtype=np.int8) return def _store(self, array, index): self._data[index] = blosc.compress(array.tostring(), np.dtype(self.dtype).itemsize) self.chunkstatus[index] = self.CHUNKSET return def _retrieve(self, index): bytestr = blosc.decompress(self._data[index], as_bytearray=True) return np.frombuffer(bytestr, dtype=self.dtype).reshape(self._chunksize) def _getchunks(self, yoff, xoff, ny, nx): """ Return a generator returning tuples identifying chunks covered by a range. The tuples contain (chunk_number, ystart, yend, xstart, xend) for each chunk touched by a region defined by corner indices and region size. """ chunksize = self._chunksize ystart = yoff // chunksize[0] yend = ceil(float(yoff+ny) / chunksize[0]) xstart = xoff // chunksize[1] xend = ceil(float(xoff+nx) / chunksize[1]) nxchunks = int(ceil(float(self.size[1])/float(chunksize[1]))) i = ystart while i < yend: j = xstart while j < xend: chunk_number = i*nxchunks + j chunk_ystart = i*chunksize[0] chunk_xstart = j*chunksize[1] chunk_yend = min((i+1)*chunksize[0], self.size[0]) chunk_xend = min((j+1)*chunksize[1], self.size[1]) yield (chunk_number, chunk_ystart, chunk_yend, chunk_xstart, chunk_xend) j += 1 i+= 1 def setblock(self, yoff, xoff, array): """ Store block of values in *array* starting at offset *yoff*, *xoff*. """ size = array.shape[:2] chunksize = self._chunksize for i, yst, yen, xst, xen in self._getchunks(yoff, xoff, *size): # Get from data store if self.chunkstatus[i] == self.CHUNKSET: chunkdata = self._retrieve(i) else: chunkdata = np.full(self._chunksize, self._initval, dtype=self.dtype) # Compute region within chunk to place data in cy0 = max(0, yoff-yst) cy1 = min(chunksize[0], yoff+size[0]-yst) cx0 = max(0, xoff-xst) cx1 = min(chunksize[1], xoff+size[1]-xst) # Compute region to slice from data dy0 = max(0, yst-yoff) dy1 = min(size[0], yen-yoff) dx0 = max(0, xst-xoff) dx1 = min(size[1], xen-xoff) chunkdata[cy0:cy1, cx0:cx1] = array[dy0:dy1, dx0:dx1] # Return to data store self._store(chunkdata, i) return def getblock(self, yoff, xoff, ny, nx): """ Retrieve values with dimensions *size*, starting at offset *yoff*, *xoff*. """ result = np.empty([ny, nx], self.dtype) for i, yst, yen, xst, xen in self._getchunks(yoff, xoff, ny, nx): # Compute the bounds in the output oy0 = max(0, yst-yoff) oy1 = min(ny, yen-yoff) ox0 = max(0, xst-xoff) ox1 = min(nx, xen-xoff) if self.chunkstatus[i] == self.CHUNKUNSET: result[oy0:oy1, ox0:ox1] = np.full((oy1-oy0, ox1-ox0), self._initval, dtype=self.dtype) else: # Compute the extents from the chunk to retain cy0 = max(yoff, yst) - yst cy1 = min(yoff+ny, yen) - yst cx0 = max(xoff, xst) - xst cx1 = min(xoff+nx, xen) - xst result[oy0:oy1, ox0:ox1] = self._retrieve(i)[cy0:cy1, cx0:cx1] return result
33.287206
95
0.543337
1,618
12,749
4.211372
0.158838
0.029058
0.013208
0.013208
0.395656
0.33079
0.306721
0.288817
0.280305
0.280305
0
0.022151
0.33783
12,749
382
96
33.374346
0.785004
0.183073
0
0.412017
0
0
0.04303
0
0
0
0
0.002618
0.004292
1
0.072961
false
0
0.017167
0.004292
0.175966
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a2908a25439c4d98ae8dc45dda5930a19f48d95
16,580
py
Python
engine/views.py
Hetvi07/Keep--Blogging
89359f3d6bffda4e622bd293f815271fc9dd67b3
[ "MIT" ]
4
2020-10-07T15:08:24.000Z
2021-11-10T08:27:58.000Z
engine/views.py
Hetvi07/Keep--Blogging
89359f3d6bffda4e622bd293f815271fc9dd67b3
[ "MIT" ]
1
2018-03-13T16:52:35.000Z
2018-03-15T15:10:08.000Z
engine/views.py
briefausde/djs
aeb44debaa9708090e7e343da45de8fa9d6ba8e8
[ "MIT" ]
4
2020-10-09T19:36:00.000Z
2021-11-07T19:49:40.000Z
from math import floor from django.contrib.auth.mixins import LoginRequiredMixin from django.db.models import Q from django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden from django.shortcuts import get_object_or_404 from django.template.context_processors import csrf from django.views import generic from django.urls import reverse_lazy from engine.utils import paginator from django.utils.html import strip_tags from .forms import * from .models import * from rest_framework import viewsets, permissions from rest_framework.permissions import IsAdminUser, IsAuthenticatedOrReadOnly from engine.serializers import * # TODO hide user email from api and from profile # TODO fix password_reset_confirm, post moderation, images upload # TODO convert notifications to socket # Mixin views class StaffRequiredMixin(LoginRequiredMixin): raise_exception = True def dispatch(self, request, *args, **kwargs): if not request.user.is_staff: return self.handle_no_permission() return super(StaffRequiredMixin, self).dispatch(request, *args, **kwargs) class AuthorRequiredMixin(LoginRequiredMixin): raise_exception = True def dispatch(self, request, *args, **kwargs): obj = self.get_object() if not obj.author == self.request.user and not self.request.user.is_staff: return self.handle_no_permission() return super(AuthorRequiredMixin, self).dispatch(request, *args, **kwargs) # Register view class RegisterView(generic.CreateView): model = User form_class = RegisterForm template_name = "registration/_register.html" def get_success_url(self): return reverse_lazy("accounts:register_done") # Logs views class LogMixin(object): def dispatch(self, request, *args, **kwargs): ip = request.META.get('REMOTE_ADDR', '') or request.META.get('HTTP_X_FORWARDED_FOR', '') Log.objects.create(ip=ip, author=request.user, method=request.method, path=request.path, body=str(request.body).strip(), cookies=str(request.COOKIES), meta=str(request.META), date=timezone.now() ) return super(LogMixin, self).dispatch(request, *args, **kwargs) class LogsView(StaffRequiredMixin, generic.TemplateView): template_name = 'engine/logs_view.html' def get_context_data(self, **kwargs): context = super(LogsView, self).get_context_data() context['filter'] = self.request.GET.get('filter') context['path'] = self.request.GET.get('path') context['ip'] = self.request.GET.get('ip') context['author'] = self.request.GET.get('author') context['data'] = self.request.GET.get('data') return context class LogsListView(StaffRequiredMixin, generic.ListView): model = Log context_object_name = 'logs' template_name = 'engine/logs_list.html' def get_queryset(self): filters = self.request.GET.get('filter') if filters == "path": return self.model.objects.filter(path=self.request.GET.get(filters)).order_by('-date')[0:500] if filters == "ip": return self.model.objects.filter(ip=self.request.GET.get(filters)).order_by('-date')[0:500] if filters == "author": return self.model.objects.filter(author=self.request.GET.get(filters)).order_by('-date')[0:500] if filters == "data": return self.model.objects.filter(data=self.request.GET.get(filters)).order_by('-date')[0:500] return self.model.objects.all().order_by('-date')[0:500] class LogDetailsView(StaffRequiredMixin, generic.DetailView): model = Log context_object_name = 'log' template_name = 'engine/logs_detail.html' # Feedback views class FeedbackSendView(LogMixin, generic.CreateView): model = Feedback fields = ['email', 'subject', 'message'] template_name = "engine/form_default.html" success_url = "/" class FeedbackListView(StaffRequiredMixin, LogMixin, generic.ListView): model = Feedback context_object_name = 'feedback_list' template_name = "engine/feedback_list.html" def get_queryset(self): return self.model.objects.all().order_by('-date')[0:100] class FeedbackDetailsView(StaffRequiredMixin, LogMixin, generic.DetailView): model = Feedback context_object_name = 'feedback' template_name = "engine/feedback_detail.html" class FeedbackAnsweredView(StaffRequiredMixin, LogMixin, generic.UpdateView): model = Feedback fields = ['status'] success_url = "/" template_name = "engine/base.html" def get_object(self, queryset=None): return get_object_or_404(self.model, pk=self.request.POST.get('pk')) def post(self, *args, **kwargs): feedback = self.get_object() feedback.status = not feedback.status feedback.save() return self.get(self, *args, **kwargs) # Users views class UserDetailsView(LogMixin, generic.DetailView): model = User context_object_name = 'user' template_name = 'engine/user.html' def get_context_data(self, **kwargs): context = super(UserDetailsView, self).get_context_data(**kwargs) context['posts'] = Post.objects.filter(author__username=self.kwargs['username']) if self.request.user.is_authenticated: if self.request.user.author_subscriber.filter(author__username=self.kwargs['username']): context['subscribe'] = True return context def get_object(self): return get_object_or_404(self.model, username=self.kwargs['username']) class UserEditView(LoginRequiredMixin, LogMixin, generic.UpdateView): model = Profile fields = ['description', 'img'] template_name = 'engine/form_default.html' def get_object(self, queryset=None): return self.model.objects.get(user__username=self.request.user) def get_success_url(self): return reverse('user_detail', args=(self.object.user.username,)) class UserChangeEmailView(LoginRequiredMixin, LogMixin, generic.UpdateView): model = User fields = ['email'] template_name = 'engine/form_default.html' def get_object(self, queryset=None): return self.model.objects.get(username=self.request.user) def get_success_url(self): return reverse('user_detail', args=(self.object.username,)) # Notifications views class SubscribeOnUserNotificationsView(LoginRequiredMixin, LogMixin, generic.View): def post(self, *args, **kwargs): if not self.request.user.author_subscriber.filter(author__username=self.request.POST.get('author')): AuthorSubscriber.objects.create( author__username=self.request.POST.get('author'), subscriber=self.request.user ) return HttpResponseRedirect('/') class UnSubscribeFromUserNotificationsView(LoginRequiredMixin, LogMixin, generic.DeleteView): model = AuthorSubscriber success_url = '/' def get_object(self, queryset=None): return get_object_or_404(AuthorSubscriber, subscriber=self.request.user, author__username=self.request.POST.get('author')) class SubscribeOnPostNotificationsView(LoginRequiredMixin, LogMixin, generic.View): def post(self, *args, **kwargs): if not self.request.user.post_subscriber.filter(post__pk=self.request.POST.get('pk')): PostSubscriber.objects.create( post__pk=self.request.POST.get('pk'), subscriber=self.request.user ) return HttpResponseRedirect('/') class UnSubscribeFromPostNotificationsView(LoginRequiredMixin, LogMixin, generic.DeleteView): model = PostSubscriber success_url = '/' def get_object(self, queryset=None): return get_object_or_404(PostSubscriber, subscriber=self.request.user, post__pk=self.request.POST.get('pk')) class NotificationsListView(LoginRequiredMixin, LogMixin, generic.ListView): model = Notification context_object_name = 'notifications' template_name = 'engine/notifications.html' def get_queryset(self): posts = self.model.objects.filter(author_subscriber__subscriber=self.request.user).order_by('-pk') comments = self.model.objects.filter(post_subscriber__subscriber=self.request.user).order_by('-pk') notifications = posts | comments return notifications[0:100] class NotificationsCountView(LoginRequiredMixin, generic.View): def post(self, *args, **kwargs): notifications = Notification.objects.filter(status=False).filter( Q(post_subscriber__subscriber=self.request.user) | Q(author_subscriber__subscriber=self.request.user) ).count() return HttpResponse(notifications) class NotificationViewedView(LoginRequiredMixin, LogMixin, generic.UpdateView): model = Notification fields = ['status'] success_url = "/" template_name = "engine/base.html" def get_object(self, queryset=None): notification = get_object_or_404(Notification, pk=self.request.POST.get("pk")) if notification.post: owner = notification.author_subscriber.subscriber.username if notification.comment: owner = notification.post_subscriber.subscriber.username if owner == self.request.user.username: return notification return HttpResponseForbidden() def post(self, *args, **kwargs): notification = self.get_object() notification.status = True notification.save() return self.get(self, *args, **kwargs) class NotificationDeleteView(LoginRequiredMixin, LogMixin, generic.DeleteView): model = Notification success_url = '/' def get_object(self, queryset=None): notification = get_object_or_404(Notification, pk=self.request.POST.get("pk")) if notification.post: owner = notification.author_subscriber.subscriber.username if notification.comment: owner = notification.post_subscriber.subscriber.username if owner == self.request.user.username: return notification return HttpResponseForbidden() # Comments views class CommentsListView(generic.ListView): model = Comment context_object_name = 'comments' template_name = 'engine/comments.html' def get_queryset(self): return self.model.objects.filter(post__pk=self.kwargs['post_id']).order_by('-pk') class CommentAddView(LoginRequiredMixin, LogMixin, generic.CreateView): model = Comment fields = ['text'] template_name = "engine/comments.html" success_url = '/' def form_valid(self, form): form.instance.author = self.request.user form.instance.post = get_object_or_404(Post, pk=self.kwargs['post_id']) return super(CommentAddView, self).form_valid(form) class CommentDeleteView(AuthorRequiredMixin, LogMixin, generic.DeleteView): model = Comment success_url = '/' def get_object(self, queryset=None): return get_object_or_404(Comment, pk=self.request.POST.get("id")) # Posts views class PostMixin: form_class = PostForm model = Post class PostCreateView(PostMixin, LoginRequiredMixin, LogMixin, generic.CreateView): template_name = 'engine/form_default.html' def get_context_data(self, **kwargs): context = super(PostCreateView, self).get_context_data() context.update(csrf(self.request)) return context def form_valid(self, form): form.instance.author = self.request.user return super(PostCreateView, self).form_valid(form) class PostEditView(PostMixin, AuthorRequiredMixin, LogMixin, generic.UpdateView): template_name = 'engine/form_default.html' def get_context_data(self, **kwargs): context = super(PostEditView, self).get_context_data(**kwargs) context['button_delete_show'] = True return context def get_success_url(self): return reverse('post_detail', args=(self.object.url,)) class PostDeleteView(PostMixin, AuthorRequiredMixin, LogMixin, generic.DeleteView): success_url = '/' def post(self, request, *args, **kwargs): post = self.get_object() if self.request.POST.get("confirm_delete"): post.delete() return HttpResponseRedirect(self.success_url) elif self.request.POST.get("cancel"): return HttpResponseRedirect(post.get_absolute_url()) return self.get(self, *args, **kwargs) def get_object(self, queryset=None): return get_object_or_404(Post, pk=self.kwargs['pk']) # Main page class PostsListView(PostMixin, LogMixin, generic.ListView): context_object_name = 'posts' template_name = 'engine/post_list.html' def get_context_data(self, **kwargs): context = super(PostsListView, self).get_context_data(**kwargs) category = self.kwargs.get('category_name', 'all') pk = self.kwargs.get('pk', 1) posts = [] if category != "all": context['category'] = category if get_object_or_404(Category, name=category): posts = Post.objects.filter(category__name=category, created_date__lte=timezone.now()).order_by('category', '-created_date') else: if pk != 1: context['category'] = "all" posts = Post.objects.filter(created_date__lte=timezone.now()).order_by('-created_date') context['posts'] = paginator(posts, pk, 15) return context class PostDetailsView(PostMixin, LogMixin, generic.DetailView): context_object_name = 'post' template_name = 'engine/post_detail.html' def get_context_data(self, **kwargs): post = self.get_object() post.update_views() context = super(PostDetailsView, self).get_context_data() time = floor(len(post.text_big) * 0.075 / 60) + 1 # move to models context['read_time'] = time context['user'] = self.request.user context['text_big'] = strip_tags(post.text_big).replace('\r\n', '<br>') if self.request.user.is_authenticated: if self.request.user.post_subscriber.filter(post__pk=post.pk): context['subscribe'] = True return context def get_object(self): return get_object_or_404(Post, url=serialize_url(self.kwargs['name'])) # Search view class SearchListView(LogMixin, generic.ListView): model = Post context_object_name = 'posts' template_name = "engine/search.html" def get_context_data(self, **kwargs): context = super(SearchListView, self).get_context_data() word = self.request.GET.get('q', '') context['search_text'] = word if (len(word) < 3) or (len(word) > 120): context['text'] = "Search query should be from 3 to 120 characters" else: posts = Index.find(word) if posts: self.template_name = "engine/post_list.html" context['posts'] = paginator(posts, self.request.GET.get('pk', 1), 15) context['query'] = word else: context['text'] = "Nothing found" return context # API class IsOwnerOrIsStaffOrReadOnly(permissions.BasePermission): def has_object_permission(self, request, view, obj): if request.method in permissions.SAFE_METHODS: return True return obj.author == request.user or request.user.is_staff class UserViewSet(viewsets.ModelViewSet): queryset = User.objects.all().order_by('-date_joined') serializer_class = UserSerializer class GroupViewSet(viewsets.ModelViewSet): permission_classes = (IsAdminUser,) queryset = Group.objects.all() serializer_class = GroupSerializer class PostViewSet(viewsets.ModelViewSet): permission_classes = (IsOwnerOrIsStaffOrReadOnly, IsAuthenticatedOrReadOnly,) queryset = Post.objects.all().order_by('-pk') serializer_class = PostSerializer def perform_create(self, serializer): serializer.save(author=self.request.user) def perform_update(self, serializer): serializer.save(author=self.request.user) class CategoryViewSet(viewsets.ModelViewSet): queryset = Category.objects.all().order_by('-pk') serializer_class = CategorySerializer
34.541667
115
0.679614
1,856
16,580
5.917565
0.137392
0.055085
0.034144
0.015296
0.486206
0.393881
0.342711
0.278886
0.252117
0.216425
0
0.006555
0.208745
16,580
479
116
34.613779
0.830627
0.018034
0
0.354167
0
0
0.069966
0.023117
0
0
0
0.002088
0
1
0.119048
false
0
0.044643
0.044643
0.654762
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
7a293597c12a4586fa8facd552e68582538dca9a
798
py
Python
main_boyd.py
Ratchet-Studios/HashCode2019Qualifier
44af942ac48212632d1f0fb1d6408a91975153ed
[ "MIT" ]
null
null
null
main_boyd.py
Ratchet-Studios/HashCode2019Qualifier
44af942ac48212632d1f0fb1d6408a91975153ed
[ "MIT" ]
null
null
null
main_boyd.py
Ratchet-Studios/HashCode2019Qualifier
44af942ac48212632d1f0fb1d6408a91975153ed
[ "MIT" ]
null
null
null
import InputOutput files = ["a_example.txt", "b_lovely_landscape.txt", "c_memorable_moments.txt", "d_pet_pictures.txt", "e_shiny_selfies.txt"] data = InputOutput.Data("input_data/" + files[0]) """ data.photos[photo_id][0] - photo id data.photos[photo_id][1] - 'h' or 'v' data.photos[photo_id][2] - number of tags of this photo data.photos[photo_id][3] - a list, all the tags of that photo data.num_photos - the number of photos in the collection in total data.tags[my_tag][0] - unique id for each tag data.tags[my_tag][1] - counter for how often that tag occurs """ output = InputOutput.Output() """ output.add_slide(index_of_slide_in_show, [photo_id_0, optional_id_1]) output.write(my_output_file_name) """ # Boyd's solution def get_score(slide1, slide2):
27.517241
69
0.70802
133
798
4.022556
0.511278
0.078505
0.11215
0.127103
0
0
0
0
0
0
0
0.016369
0.157895
798
28
70
28.5
0.779762
0.018797
0
0
0
0
0.359322
0.152542
0
0
0
0
0
0
null
null
0
0.111111
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
7a29de7a43e2c76cd682a22718fc79113ae69a57
2,789
py
Python
Kepler.py
wongongv/scholarship_wonjun
b46a621a756782bf0929ef96738bf484afd1708e
[ "MIT" ]
null
null
null
Kepler.py
wongongv/scholarship_wonjun
b46a621a756782bf0929ef96738bf484afd1708e
[ "MIT" ]
null
null
null
Kepler.py
wongongv/scholarship_wonjun
b46a621a756782bf0929ef96738bf484afd1708e
[ "MIT" ]
null
null
null
import tensorflow as tf import matplotlib.pyplot as plt import pdb import numpy as np import pandas as pd from tensorflow.keras import layers sample_num = 500000 # coeff = tf.cast(4*np.pi*np.pi/(6.673*10**-11), dtype = tf.float32) coeff = tf.cast(1, dtype = tf.float32) #try the range of 10**5 ~10**7 for both mass and radius # radius = tf.random.normal(shape = [sample_num,1], mean = 0, dtype = tf.float32) # massinv = tf.random.normal(shape = [sample_num,1], mean = 0, dtype = tf.float32) radius = tf.random.truncated_normal(shape = [sample_num,1], mean = 2, stddev = 0.5, dtype = tf.float32) massinv = tf.random.truncated_normal(shape = [sample_num,1], mean = 2, stddev = 0.5, dtype = tf.float32) period = radius ** 3 * massinv * coeff def normalize(data): if isinstance(data, tf.Tensor): data = data.numpy() data = (data - np.mean(data)) / np.std(data) return tf.cast(data, dtype = tf.float64) def denorm(data, denorm_factor): # denorm_factor is a tuple of (mean, std) return data * denorm_factor[1] + denorm_factor[0] data = tf.stack([radius, massinv], axis = 1) data = tf.squeeze(data) normed_label = normalize(period) denorm_factor = (np.mean(period.numpy()), np.std(period.numpy())) def build_model(): model = tf.keras.Sequential([layers.Dense(17), layers.BatchNormalization(), layers.Activation('sigmoid'), layers.Dense(17), layers.BatchNormalization(), layers.Activation('sigmoid'), layers.Dense(1)]) model.compile(optimizer = tf.keras.optimizers.Adam(0.0001), loss = 'mse', metrics = ['mape', 'mae', 'mse']) return model model = build_model() history = model.fit(data, normed_label, epochs = 50, validation_split = 0.2, batch_size = 64, verbose =1) def plot_history(history): hist = pd.DataFrame(history.history) hist['epochs'] = history.epoch plt.figure() plt.xlabel('epochs') plt.ylabel('mae') plt.plot(hist['epochs'], hist['mae'], label = 'train_mae') plt.plot(hist['epochs'], hist['val_mae'], label = 'val_mae') plt.legend() plt.figure() plt.xlabel('epochs') plt.ylabel('mse') plt.plot(hist['epochs'], hist['mse'], label = 'train_mse') plt.plot(hist['epochs'], hist['val_mse'], label = 'val_mse') plt.legend() plt.show() plot_history(history) sun_earth = {'radius': [2440*10**6, 3390*10**6, 6052*10**6],'mass':[(3.3*10**23)**-1, (6.4*10**23)**-1, (4.87*10**24)**-1]} sun_earth_data = np.stack([sun_earth['radius'], sun_earth['mass']], axis = 1) result1 = model.predict(sun_earth_data) result = denorm(result1,denorm_factor) print(result) #수 화 금 # 수성 0.2409 # 화성 1.8809 # 금성 0.6102 # 지구 1.0000
34.432099
123
0.630333
407
2,789
4.235872
0.324324
0.028422
0.048724
0.046404
0.312645
0.312645
0.24536
0.207077
0.207077
0.207077
0
0.06062
0.201506
2,789
80
124
34.8625
0.713516
0.131947
0
0.172414
0
0
0.061021
0
0
0
0
0
0
1
0.068966
false
0
0.103448
0.017241
0.224138
0.017241
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a2ad667b00c796f41fd7d66fe4913a3f7cf25d9
16,727
py
Python
ScPlayback.py
karinharp/EM-uNetPi
a2bf585a039ef228bc34d558941865088da259b9
[ "MIT" ]
42
2018-08-21T02:49:52.000Z
2022-02-07T15:49:05.000Z
ScPlayback.py
karinharp/EM-uNetPi
a2bf585a039ef228bc34d558941865088da259b9
[ "MIT" ]
null
null
null
ScPlayback.py
karinharp/EM-uNetPi
a2bf585a039ef228bc34d558941865088da259b9
[ "MIT" ]
6
2018-10-05T14:32:58.000Z
2022-02-07T06:09:22.000Z
import sys, getopt, struct, time, termios, fcntl, sys, os, colorsys, threading, time, datetime, subprocess, random, os.path, math, json sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/fbtft') from RenderManager import RenderManager from WanemManager import WanemManager from ScBase import ScBase from gfx import Rect from DataAsset import CTX from SeekManager import SeekManager class ScPlayback(ScBase): def __init__(self, pCTX, pRender, pWanem): super(ScPlayback, self).__init__(pCTX, pRender, pWanem) self.STATE_IDLE = 1 self.STATE_PLAY = 2 self.STATE_PAUSE = 3 self.ptDef.insert( 0, self.CreateTocuhDef("BtBack", 468, 29, 62, 42, self.BtHandler)) self.ptDef.insert( 1, self.CreateTocuhDef("BtPrev", 470, 95, 43, 90, self.BtHandler)) self.ptDef.insert( 2, self.CreateTocuhDef("BtNext", 65, 95, 43, 90, self.BtHandler)) #self.ptDef.insert(3, self.CreateTocuhDef("BtAuto", 460, 268, 80, 50, self.BtHandler)) self.ptDef.insert( 4, self.CreateTocuhDef("BtStop", 370, 268, 80, 50, self.BtHandler)) self.ptDef.insert( 5, self.CreateTocuhDef("BtPlay", 280, 268, 80, 50, self.BtHandler)) self.ptDef.insert( 6, self.CreateTocuhDef("BtPause", 190, 268, 80, 50, self.BtHandler)) self.ptDef.insert( 7, self.CreateTocuhDef("BtRepeat", 100, 268, 80, 50, self.BtHandler)) #self.ptDef.insert(3, self.CreateTocuhDef("BtTargetL", 430, 95, 120, 90, self.BtHandler)) #self.ptDef.insert(4, self.CreateTocuhDef("BtTargetC", 430, 95, 120, 90, self.BtHandler)) #self.ptDef.insert(5, self.CreateTocuhDef("BtTargetR", 430, 95, 120, 90, self.BtHandler)) def BtHandler(self, key): print "BtHandler" + key + " @ " + str(self.state) if key == "BtBack": if self.state == self.STATE_IDLE: self.pWanem.Clear() self.nextScene = "Replay" self.state = self.STATE_TERM elif key == "BtPrev": if self.state == self.STATE_IDLE: self.UpdatePanel(-1) elif key == "BtNext": if self.state == self.STATE_IDLE: self.UpdatePanel(1) elif key == "BtStop": if self.state == self.STATE_PLAY or self.state == self.STATE_PAUSE: self.StopHandler() elif key == "BtPlay": if self.state == self.STATE_IDLE: self.RenderCurrentInfo("PLAYING") self.PlayHandler() elif key == "BtPause": if self.state == self.STATE_PLAY: self.seekManager.isPause = True elif self.state == self.STATE_PAUSE: self.seekManager.isPause = False elif key == "BtRepeat": self.seekManager.isRepeat = not self.seekManager.isRepeat self.RenderToggleFocus(4, self.seekManager.isRepeat) def RenderPanel(self, panelIdx, isActive, isFocus=False, datPath=""): offsetX = 128 * panelIdx if isActive == False: c = self.pRender.ConvRgb(0.31, 0.2, 0.2) self.pRender.fb.draw.rect(c, Rect(52 + offsetX, 84, 120, 90), 0) return targetPath = self.pCTX.currentReplayData + "/" + datPath file = open(targetPath) dat = json.load(file) file.close() mtime = os.path.getmtime(targetPath) t = datetime.datetime.fromtimestamp(mtime) datMtime = t.strftime("%y/%m/%d") c = self.pRender.ConvRgb(0.31, 0.2, 0.8) self.pRender.fb.draw.rect(c, Rect(52 + offsetX, 84, 120, 90), 0) c = self.pRender.ConvRgb(0.31, 0.2, 0.1) self.pRender.fb.putstr(52 + 10 + offsetX, 84 + 10, datPath[0:8], c, 2) c = self.pRender.ConvRgb(0.31, 0.2, 0.1) self.pRender.fb.putstr(52 + 10 + offsetX, 84 + 10 + 12 * 2, "Modify", c, 1) self.pRender.fb.putstr(52 + 10 + 70 + offsetX, 84 + 10 + 12 * 2, "Time", c, 1) self.pRender.fb.putstr(52 + 10 + offsetX, 84 + 10 + 12 * 3, datMtime, c, 1) self.pRender.fb.putstr( 52 + 10 + 70 + offsetX, 84 + 10 + 12 * 3, self.seekManager.Conv2FormatedTime(dat["dps"], dat["duration"]), c, 1) self.pRender.fb.putstr(52 + 10 + offsetX, 84 + 10 + 12 * 4, "Memo", c, 1) self.pRender.fb.putstr(52 + 10 + offsetX, 84 + 10 + 12 * 5, dat["memo"][0:17], c, 1) if isFocus: self.RenderGraph(dat["graph"]) self.seekManager.Setup(dat["dps"], dat["duration"]) self.RenderSeekInfo() self.dat = dat["dat"] def UpdatePanel(self, vec, forceClear=False): prevPageIdx = self.datPageIdx prevFocusIdx = self.datFocusIdx isPageSwitch = forceClear if (self.datPageIdx * 3 + self.datFocusIdx) == 0 and vec == -1: return if (self.datPageIdx * 3 + self.datFocusIdx) == (self.datNr - 1) and vec == 1: return if (self.datFocusIdx % 3) == 0 and vec == -1: self.datPageIdx -= 1 isPageSwitch = True elif (self.datFocusIdx % 3) == 2 and vec == 1: self.datPageIdx += 1 isPageSwitch = True self.datFocusIdx = (self.datFocusIdx + vec) % 3 self.ClearFocus(prevFocusIdx) if isPageSwitch: self.datFocusIdx = 0 # Render List # currentIdx = self.datPageIdx * 3 + self.datFocusIdx currentIdxTop = self.datPageIdx * 3 focusIdx = 0 for file in self.datList[currentIdxTop:currentIdxTop + 3]: if focusIdx == self.datFocusIdx: self.RenderPanel(focusIdx, True, True, file) else: self.RenderPanel(focusIdx, True, False, file) focusIdx += 1 for idx in range(focusIdx, 3): self.RenderPanel(idx, False) else: currentIdxTop = self.datPageIdx * 3 focusIdx = 0 for file in self.datList[currentIdxTop:currentIdxTop + 3]: if focusIdx == self.datFocusIdx: targetPath = self.pCTX.currentReplayData + "/" + file file = open(targetPath) dat = json.load(file) file.close() self.RenderGraph(dat["graph"]) self.seekManager.Setup(dat["dps"], dat["duration"]) self.RenderSeekInfo() self.dat = dat["dat"] focusIdx += 1 self.RenderFocus(self.datFocusIdx) def RenderFocus(self, idx): c = self.pRender.ConvRgb(1.00, 0.9, 0.8) self.pRender.fb.draw.rect(c, Rect(48 + 128 * idx, 84 - 4, 128, 4), 0) self.pRender.fb.draw.rect( c, Rect(48 + 128 * idx, 84 - 4 + 90 + 4, 128, 4), 0) self.pRender.fb.draw.rect(c, Rect(48 + 128 * idx, 84, 4, 90), 0) self.pRender.fb.draw.rect(c, Rect(48 + 128 * idx + 124, 84, 4, 90), 0) def RenderToggleFocus(self, idx, isActivey): if idx == 3: xoffset = 0 elif idx == 4: xoffset = 90 else: return if isActivey: c = self.pRender.ConvRgb(1.00, 0.9, 0.8) else: c = self.pRender.N self.pRender.fb.draw.rect(c, Rect(288 + xoffset, 264 - 2, 84, 2), 0) self.pRender.fb.draw.rect(c, Rect(288 + xoffset, 264 - 2 + 50 + 2, 84, 2), 0) self.pRender.fb.draw.rect(c, Rect(288 + xoffset, 264, 2, 50), 0) self.pRender.fb.draw.rect(c, Rect(288 + xoffset + 82, 264, 2, 50), 0) def ClearFocus(self, idx): c = self.pRender.ConvRgb(0, 0, 0) self.pRender.fb.draw.rect(c, Rect(48 + 128 * idx, 84 - 4, 128, 4), 0) self.pRender.fb.draw.rect( c, Rect(48 + 128 * idx, 84 - 4 + 90 + 4, 128, 4), 0) self.pRender.fb.draw.rect(c, Rect(48 + 128 * idx, 84, 4, 90), 0) self.pRender.fb.draw.rect(c, Rect(48 + 128 * idx + 124, 84, 4, 90), 0) def RenderFootBt(self, idx, label, h): if idx == 0: x = 200 - 180 elif idx == 1: x = 200 - 90 elif idx == 2: x = 200 + 0 elif idx == 3: x = 200 + 90 elif idx == 4: x = 200 + 180 c = self.pRender.ConvRgb(h, 0.6, 0.6) self.pRender.fb.draw.rect(c, Rect(x, 264, 80, 44), 0) c = self.pRender.ConvRgb(h, 0.6, 0.2) self.pRender.fb.draw.rect(c, Rect(x, 264 + 44, 80, 6), 0) if idx == 3: self.pRender.fb.putstr(x + 4 + 7, 278, label, c, 2) else: self.pRender.fb.putstr(x + 4, 278, label, c, 2) def RenderSeekInfo(self): self.pRender.fb.draw.rect(self.pRender.N, Rect(445, 219, 30, 7), 0) self.pRender.fb.putstr(445, 240 - 21, self.seekManager.GetTotalFormatTime(), self.pRender.W, 1) ################################################################################ def Update(self): isRender = False if self.pCTX.tick == 1: isRender = True if self.state == self.STATE_PLAY: ####################################### if isRender: if self.seekManager.isPause: self.RenderToggleFocus(3, self.seekManager.isPause) self.state = self.STATE_PAUSE return self.seekManager.seekSec += 1 if self.seekManager.seekSec < 0: return if self.seekManager.IsTerm(): if self.seekManager.isRepeat: self.RenderDotAll() self.PlayHandler() self.UpdateSeekTime() else: self.StopHandler() return ####################################### # check Seek diff and force loop and apply. # @todo variable fps if self.pCTX.tick % self.seekManager.updateInterval == 0: #datSeek = self.seekSec * 30 + int(self.pCTX.tick / 2) #if self.pCTX.tick >= 60: # print str(self.pCTX.tick) + ":" + str(self.seekManager.seekFrame) + ":" + str(self.seekManager.updateInterval) self.pWanem.DirectApply(self.dat[self.seekManager.seekFrame]) if (self.pCTX.tick % 15) == 0: self.RenderCurrentInfo( "", self.dat[self.seekManager.seekFrame]) self.seekManager.Update(isRender) # nnn.... if isRender: self.UpdateSeekTime() elif self.state == self.STATE_PAUSE: if not self.seekManager.isPause: self.RenderToggleFocus(3, self.seekManager.isPause) self.state = self.STATE_PLAY return ################################################################################ def RenderDotAll(self): for idx in range(0, self.seekManager.progressBarResolution): self.RenderDot(idx, False) def RenderDot(self, idx, isFlush): w = 10 h = 10 if isFlush: c = self.pRender.ConvRgb(0.4, 1, 1) else: c = self.pRender.ConvRgb(0.4, 0.3, 0.3) xoffset = 11 * idx + 20 self.pRender.fb.draw.rect(c, Rect(xoffset, 238, w, h), 0) def RenderGraph(self, graphDat): c = self.pRender.ConvRgb(0, 0, 0) self.pRender.fb.draw.rect(c, Rect(20, 186, 440, 30), 0) for idx in range(0, 440): xoffset = idx + 20 h = graphDat[idx] #c = self.pRender.ConvRgb(1.0/440.0*idx,0.8,0.8) c = self.pRender.ConvRgb(1.0 / 30.0 * h, 0.8, 0.8) self.pRender.fb.draw.rect(c, Rect(xoffset, 216 - h, 1, h), 0) # Update block and seek string def UpdateSeekTime(self): if self.seekManager.seekLap >= self.seekManager.progressBarResolution: return self.pRender.fb.draw.rect(self.pRender.N, Rect(224, 219, 30, 7), 0) self.pRender.fb.putstr(224, 219, self.seekManager.GetCurrentFormatTime(), self.pRender.W, 1) while self.seekManager.IsSeekSecOverCurrentLap(): self.RenderDot(self.seekManager.seekLap, True) self.seekManager.seekLap += 1 if self.seekManager.seekLap >= self.seekManager.progressBarResolution: return self.isBlockFlash = not self.isBlockFlash self.RenderDot(self.seekManager.seekLap, self.isBlockFlash) def PlayHandler(self): self.seekManager.Start() self.state = self.STATE_PLAY self.RenderDotAll() def StopHandler(self): self.seekManager.Stop() self.state = self.STATE_IDLE self.UpdateSeekTime() self.RenderDotAll() self.RenderToggleFocus(3, self.seekManager.isPause) self.RenderCurrentInfo("STOP", 0) self.pWanem.DirectApply(0) def Start(self): super(ScPlayback, self).Start() ##[ INIT STATE ]################################################################ self.progressBarResolution = 40 self.seekManager = SeekManager(self.pCTX, self.progressBarResolution) self.state = self.STATE_IDLE self.isBlockFlash = False self.dat = None ##[ Get DataDir Info ]###################################################### self.datList = os.listdir(self.pCTX.currentReplayData) self.datList.sort() self.datPageIdx = 0 self.datNr = len(self.datList) self.datFocusIdx = 0 ##[ RENDER ]################################################################ self.pRender.UpdateTitle("WAN Emulation - Replay") self.pRender.UpdateSubTitle("Dat Path : " + self.pCTX.currentReplayData) c = yellow = self.pRender.fb.rgb(255, 255, 0) self.pRender.fb.draw.rect(c, Rect(0, 54, self.pRender.xres, 1), 0) self.pRender.fb.draw.rect(c, Rect(0, 74, self.pRender.xres, 1), 0) self.pRender.fb.draw.rect(c, Rect(0, 54, 10 + 60, 20), 0) self.pRender.fb.draw.rect(c, Rect(480 - 10, 54, 10, 20), 0) self.pRender.fb.putstr(26, 54 + 7, ">>>", self.pRender.N, 1) ###################### self.UpdatePanel(0, True) c = self.pRender.ConvRgb(0.31, 0.2, 0.2) self.pRender.fb.draw.rect(c, Rect(1, 84, 43, 90), 0) self.pRender.fb.draw.rect(c, Rect(480 - 44, 84, 43, 90), 0) self.pRender.fb.putstr(10, 84 + 29, '<', 0, 4) self.pRender.fb.putstr(480 - 34, 84 + 29, '>', 0, 4) ###################### c = self.pRender.ConvRgb(0.16, 1, 0.6) #self.pRender.fb.draw.rect(c, Rect(1, 240 - 54, self.pRender.xres-2, 1), 0) self.pRender.fb.putstr(5, 240 - 21, "00:00", self.pRender.W, 1) self.UpdateSeekTime() self.pRender.fb.draw.rect(c, Rect(1, 240 - 12, self.pRender.xres - 2, 1), 0) self.pRender.fb.draw.rect(c, Rect(1, 240 + 18, self.pRender.xres - 2, 1), 0) #self.RenderFootBt(0, " Auto", 0.16) self.RenderFootBt(1, " Stop", 0.36) self.RenderFootBt(2, " Play", 0.36) self.RenderFootBt(3, "Pause", 0.16) self.RenderFootBt(4, "Repeat", 0.16) self.RenderDotAll() self.RenderBackBt(True) self.RenderCurrentInfo("STOP", 0) self.pRender.fb.putstr(12 + 54, 268 + 24 + 6, "msec", self.pRender.W, 1) self.pWanem.InitSingle() def RenderCurrentInfo(self, state="", delay=-1): if state != "": self.pRender.fb.draw.rect(self.pRender.N, Rect(12, 268, 84, 16), 0) self.pRender.fb.putstr(12, 268, state, self.pRender.W, 2) if delay >= 0: self.pRender.fb.draw.rect(self.pRender.N, Rect(12, 268 + 24, 50, 16), 0) self.pRender.fb.putstr(12, 268 + 24, "%04d" % delay, self.pRender.W, 2)
38.9
135
0.511987
1,956
16,727
4.362474
0.140082
0.110864
0.077698
0.063752
0.512364
0.465604
0.404195
0.386617
0.313489
0.232509
0
0.078169
0.335386
16,727
429
136
38.990676
0.689395
0.057512
0
0.366972
0
0
0.018195
0
0
0
0
0.002331
0
0
null
null
0
0.021407
null
null
0.003058
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
7a2b60eeeb9c5a441e5b481a07de842558d9a0f8
1,589
py
Python
IOPool/Output/test/PoolOutputTestUnscheduled_cfg.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
IOPool/Output/test/PoolOutputTestUnscheduled_cfg.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
IOPool/Output/test/PoolOutputTestUnscheduled_cfg.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
import FWCore.ParameterSet.Config as cms process = cms.Process("TESTOUTPUT") process.load("FWCore.Framework.test.cmsExceptionsFatal_cff") process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(20) ) process.Thing = cms.EDProducer("ThingProducer") process.OtherThing = cms.EDProducer("OtherThingProducer") process.thingWithMergeProducer = cms.EDProducer("ThingWithMergeProducer") process.intProducer1 = cms.EDProducer("IntProducer", ivalue = cms.int32(7) ) process.intProducer2 = cms.EDProducer("IntProducer", ivalue = cms.int32(11) ) process.aliasForInt1 = cms.EDAlias( intProducer1 = cms.VPSet( cms.PSet(type = cms.string('edmtestIntProduct')) ) ) process.aliasForInt2 = cms.EDAlias( intProducer2 = cms.VPSet( cms.PSet(type = cms.string('edmtestIntProduct')) ) ) process.output = cms.OutputModule("PoolOutputModule", fileName = cms.untracked.string('file:PoolOutputTestUnscheduled.root'), outputCommands = cms.untracked.vstring( 'keep *', 'drop *_intProducer1_*_*', 'drop *_aliasForInt1_*_*', 'drop *_intProducer2_*_*' ) ) process.getInt = cms.EDAnalyzer("TestFindProduct", inputTags = cms.untracked.VInputTag( cms.InputTag("aliasForInt1"), ), expectedSum = cms.untracked.int32(140) ) process.source = cms.Source("EmptySource") process.t = cms.Task(process.Thing, process.OtherThing, process.thingWithMergeProducer, process.intProducer1, process.intProducer2) process.path1 = cms.Path(process.getInt, process.t) process.ep = cms.EndPath(process.output)
26.04918
131
0.721838
159
1,589
7.150943
0.408805
0.063325
0.029903
0.05277
0.158311
0.158311
0.091469
0.091469
0.091469
0
0
0.021339
0.144745
1,589
60
132
26.483333
0.815305
0
0
0.045455
0
0
0.206049
0.063642
0
0
0
0
0
1
0
false
0
0.022727
0
0.022727
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a2b807cb521dd6964689b0a0c90c6958262c724
70,390
py
Python
pysnmp-with-texts/A3COM-HUAWEI-SYS-MAN-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
8
2019-05-09T17:04:00.000Z
2021-06-09T06:50:51.000Z
pysnmp-with-texts/A3COM-HUAWEI-SYS-MAN-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
4
2019-05-31T16:42:59.000Z
2020-01-31T21:57:17.000Z
pysnmp-with-texts/A3COM-HUAWEI-SYS-MAN-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module A3COM-HUAWEI-SYS-MAN-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/A3COM-HUAWEI-SYS-MAN-MIB # Produced by pysmi-0.3.4 at Wed May 1 11:07:12 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # h3cCommon, = mibBuilder.importSymbols("A3COM-HUAWEI-OID-MIB", "h3cCommon") OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection") SnmpTagValue, SnmpTagList = mibBuilder.importSymbols("SNMP-TARGET-MIB", "SnmpTagValue", "SnmpTagList") ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance") IpAddress, NotificationType, Counter32, Gauge32, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, iso, Integer32, ModuleIdentity, MibIdentifier, Counter64, ObjectIdentity, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "NotificationType", "Counter32", "Gauge32", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "iso", "Integer32", "ModuleIdentity", "MibIdentifier", "Counter64", "ObjectIdentity", "Bits") TextualConvention, DisplayString, RowStatus, DateAndTime = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus", "DateAndTime") h3cSystemMan = ModuleIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3)) h3cSystemMan.setRevisions(('2004-04-08 13:45',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: h3cSystemMan.setRevisionsDescriptions((' ',)) if mibBuilder.loadTexts: h3cSystemMan.setLastUpdated('200906070000Z') if mibBuilder.loadTexts: h3cSystemMan.setOrganization('Hangzhou H3C Tech. Co., Ltd.') if mibBuilder.loadTexts: h3cSystemMan.setContactInfo('Platform Team Hangzhou H3C Tech. Co., Ltd. Hai-Dian District Beijing P.R. China http://www.h3c.com Zip:100085') if mibBuilder.loadTexts: h3cSystemMan.setDescription('This MIB contains objects to manage the system. It focuses on the display of current configure file and image file,and the definition of reloading image. Add the support for XRN. ') h3cSystemManMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1)) h3cSysClock = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1)) h3cSysLocalClock = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 1), DateAndTime()).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cSysLocalClock.setStatus('current') if mibBuilder.loadTexts: h3cSysLocalClock.setDescription(' This node gives the current local time of the system. The unit of it is DateAndTime. ') h3cSysSummerTime = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 2)) h3cSysSummerTimeEnable = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysSummerTimeEnable.setStatus('current') if mibBuilder.loadTexts: h3cSysSummerTimeEnable.setDescription('This node indicates the status of summer time. If the value of this node is enable, means that summer time is enabled. If the value is disable, means that summer time is disabled. ') h3cSysSummerTimeZone = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 2, 2), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cSysSummerTimeZone.setStatus('current') if mibBuilder.loadTexts: h3cSysSummerTimeZone.setDescription(' This node describes the name of time zone in summer. The string is only used to display in local time when summer time is running. That the value of h3cSysLocalClock has the time zone information means that summer time is running. ') h3cSysSummerTimeMethod = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("oneOff", 1), ("repeating", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cSysSummerTimeMethod.setStatus('current') if mibBuilder.loadTexts: h3cSysSummerTimeMethod.setDescription(' This node provides the execute method of summer time. oneOff(1): means that summer time only takes effect at specified time. repeating(2): means that summer time takes effect in specified month/day once a year. ') h3cSysSummerTimeStart = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 2, 4), DateAndTime().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cSysSummerTimeStart.setStatus('current') if mibBuilder.loadTexts: h3cSysSummerTimeStart.setDescription(' This node provides the start time of summer time. ') h3cSysSummerTimeEnd = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 2, 5), DateAndTime().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cSysSummerTimeEnd.setStatus('current') if mibBuilder.loadTexts: h3cSysSummerTimeEnd.setDescription(' This node provides the end time of summer time. The end time must be more than start time one day and less than start time one year. ') h3cSysSummerTimeOffset = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 2, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 86399))).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cSysSummerTimeOffset.setStatus('current') if mibBuilder.loadTexts: h3cSysSummerTimeOffset.setDescription(' This node provides the offset time of summer time. The offset time(in seconds) means that how much time need to be appended to the local time. ') h3cSysLocalClockString = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(16, 24))).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cSysLocalClockString.setStatus('current') if mibBuilder.loadTexts: h3cSysLocalClockString.setDescription('This node gives the current local time of the system. For example, Tuesday May 26, 2002 at 1:30:15 would be displayed as: 2002-5-26T13:30:15.0Z') h3cSysCurrent = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2)) h3cSysCurTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2, 1), ) if mibBuilder.loadTexts: h3cSysCurTable.setStatus('current') if mibBuilder.loadTexts: h3cSysCurTable.setDescription(' The current status of system. A configuration file, an image file and bootrom information are used to describe the current status. ') h3cSysCurEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2, 1, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCurEntPhysicalIndex")) if mibBuilder.loadTexts: h3cSysCurEntry.setStatus('current') if mibBuilder.loadTexts: h3cSysCurEntry.setDescription(' An entry of h3cSysCurTable. ') h3cSysCurEntPhysicalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))) if mibBuilder.loadTexts: h3cSysCurEntPhysicalIndex.setStatus('current') if mibBuilder.loadTexts: h3cSysCurEntPhysicalIndex.setDescription('The value of this object is the entity index which depends on the implementation of ENTITY-MIB. If ENTITY-MIB is not supported, the value for this object is the unit ID for XRN devices , 0 for non-XRN device which has only one mainboard, the board number for non-XRN device which have several mainboards. ') h3cSysCurCFGFileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysCurCFGFileIndex.setStatus('current') if mibBuilder.loadTexts: h3cSysCurCFGFileIndex.setDescription(' The startup configuration file currently used by the specified entity. If the value of it is zero, no configuration file is used. It will be the value of corresponding h3cSysCFGFileIndex in h3cSysCFGFileTable. ') h3cSysCurImageIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysCurImageIndex.setStatus('current') if mibBuilder.loadTexts: h3cSysCurImageIndex.setDescription('The image file currently used by the specified entity. It will be the value of corresponding h3cSysImageIndex in h3cSysImageTable.') h3cSysCurBtmFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2, 1, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysCurBtmFileName.setStatus('current') if mibBuilder.loadTexts: h3cSysCurBtmFileName.setDescription('The bootrom file currently used by the specified entity.') h3cSysCurUpdateBtmFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 2, 1, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysCurUpdateBtmFileName.setStatus('current') if mibBuilder.loadTexts: h3cSysCurUpdateBtmFileName.setDescription(' The default value of this object is the same as the value of h3cSysCurBtmFileName. The value will be changed after updating the bootrom successfully. This bootrom will take effect on next startup. ') h3cSysReload = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3)) h3cSysReloadSchedule = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cSysReloadSchedule.setStatus('current') if mibBuilder.loadTexts: h3cSysReloadSchedule.setDescription(' The object points one row in h3cSysReloadScheduleTable. Its value is equal to the value of h3cSysReloadScheduleIndex. When a reload action is finished, the value of it would be zero which means no any reload schedule is selected. ') h3cSysReloadAction = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("reloadUnavailable", 1), ("reloadOnSchedule", 2), ("reloadAtOnce", 3), ("reloadCancel", 4)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cSysReloadAction.setStatus('current') if mibBuilder.loadTexts: h3cSysReloadAction.setDescription(" Writing reloadOnSchedule(2) to this object performs the reload operation on schedule. If h3cSysReloadScheduleTime is not set, the value of h3cSysReloadAction can't be set to 'reloadOnSchedule(2)'. Writing reloadAtOnce(3)to this object performs the reload operation at once, regardless of the h3cSysReloadScheduleTime. When reloadCancel(4)is set, the scheduled reload action will be cancelled and the value of h3cSysReloadAction will be 'reloadUnavailable(1)',the value of h3cSysReloadSchedule will be 0, h3cSysReloadTag will be given a value of zero length, but the content of h3cSysReloadScheduleTable will remain. The h3cSysReloadSchedule and h3cSysReloadTag determine the reload entity(ies) in mutually exclusive way. And the h3cSysReloadSchedule will be handled at first. If the value of h3cSysReloadSchedule is invalid, then the h3cSysReloadTag will be handled. If the value of h3cSysReloadSchedule is valid, the value of h3cSysReloadTag is ignored and a reload action will be implemented to the entity specified by h3cSysReloadEntity in the entry pointed by h3cSysReloadSchedule. If h3cSysReloadSchedule is valid, but the entry h3cSysReloadSchedule pointing to is not active, the reload action will be ignored , and an inconsistent value will be returned. If multiple entities are required to be reloaded at the same time, the value of h3cSysReloadTag must be specified to select the reload parameters in the h3cSysReloadSceduelTable, and h3cSysReloadSchedule must have the value of '0'. If the whole fabric is to be reloaded in an XRN device, all the units in the fabric must have at least one entry in the h3cSysReloadSceduelTable with the same tag in h3cSysReloadSceduelTagList. When a reload action is done, or there is no reload action, the value should be reloadUnavailable(1). ") h3cSysReloadScheduleTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3), ) if mibBuilder.loadTexts: h3cSysReloadScheduleTable.setStatus('current') if mibBuilder.loadTexts: h3cSysReloadScheduleTable.setDescription(' A reload parameters set table. The table is exclusively used for reloading. When reloading action finished, the value of the table may be empty or still exist. If the mainboard in non-XRN device or all the units of the fabric in XRN device are reloaded,then the table will be refreshed. ') h3cSysReloadScheduleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadScheduleIndex")) if mibBuilder.loadTexts: h3cSysReloadScheduleEntry.setStatus('current') if mibBuilder.loadTexts: h3cSysReloadScheduleEntry.setDescription('Entry of h3cSysReloadScheduleTable.') h3cSysReloadScheduleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))) if mibBuilder.loadTexts: h3cSysReloadScheduleIndex.setStatus('current') if mibBuilder.loadTexts: h3cSysReloadScheduleIndex.setDescription('The index of h3cSysReloadScheduleTable. There are two parts for this index depicted as follows: 31 15 0 +++++++++++++++++++++++++++++++++++++++++ + physical index + random index + ( bit 16..31 ) ( bit 0..15 ) +++++++++++++++++++++++++++++++++++++++++ From bit0 to bit15 (two bytes), if the row is automatic created, the value is zero, and if the row is created by users, then the value is determined by the users. From bit16 to bit31 (two bytes) is the physical index the same as the entPhysicalIndex specified in ENTITY-MIB. For XRN devices, physical index is the value of a chassis entPhysicalIndex. 0 for non-XRN device which has only one main board, the board number for non-XRN device which have multiple main boards.') h3cSysReloadEntity = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysReloadEntity.setStatus('current') if mibBuilder.loadTexts: h3cSysReloadEntity.setDescription(' The value of h3cSysReloadEntity indicates an entry in entPhysicalTable, which is the physical entity to be reloaded. If ENTITY-MIB is not supported,the value for this object is the unit ID for XRN devices , 0 for non-XRN device which has only one mainboard, the board number for non-XRN device which have several mainboards. Each entity has only one row in h3cSysReloadScheduleTable. ') h3cSysReloadCfgFile = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysReloadCfgFile.setStatus('current') if mibBuilder.loadTexts: h3cSysReloadCfgFile.setDescription(' The value indicates an entry in h3cSysCFGFileTable. It defines a configuration file for reload action. It is the value of corresponding h3cSysCFGFileIndex in h3cSysCFGFileTable. The zero value means no configuration file has been set for this entry, and no configuration file is used during system reloading. ') h3cSysReloadImage = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysReloadImage.setStatus('current') if mibBuilder.loadTexts: h3cSysReloadImage.setDescription(' The value indicates an entry in h3cSysImageTable. It defines an image file for reload action. It is the value of corresponding h3cSysImageIndex in h3cSysImageTable. If dual image is supported, the main image attribute can be set through this object or by h3cSysImageType of h3cSysImageTable of the entity. It is strongly suggested to set this attribute by the latter. If main image attribute is set here, the h3cSysImageType in h3cSysImageTable of the corresponding entity will be updated, and vice versa. Before reboot, the device will check the validation of the entry. If the file does not exist, the device will not reboot and a trap will be send to NMS. ') h3cSysReloadReason = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysReloadReason.setStatus('current') if mibBuilder.loadTexts: h3cSysReloadReason.setDescription(" The reason of system's reloading. It is a zero length octet string when not set. ") h3cSysReloadScheduleTime = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 6), DateAndTime().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysReloadScheduleTime.setStatus('current') if mibBuilder.loadTexts: h3cSysReloadScheduleTime.setDescription(' Specify the local time at which the reload action will occur. we will only take octet strings with length 8 for this object which indicates the local time of the switch. The maximum scheduled interval between the specified time and the current system clock time is 24 days . field octets contents range ----- ------ -------- ----- 1 1-2 year 0..65536 2 3 month 1..12 3 4 day 1..31 4 5 hour 0..23 5 6 minutes 0..59 6 7 seconds 0..60 For example, Tuesday May 26, 1992 at 1:30:15 PM would be displayed as: 1992-5-26,13:30:15 If the set value is less than the value of h3cSysLocalClock or beyond the maximum scheduled time limit, a bad value error occurred. The value of all-zero octet strings indicates system reload at once if the reload action is reloadOnSchedule(2). ') h3cSysReloadRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 7), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysReloadRowStatus.setStatus('current') if mibBuilder.loadTexts: h3cSysReloadRowStatus.setDescription(' If one of the value of h3cSysReloadEntity,h3cSysReloadImage is invalid, the value of h3cSysReloadRowStatus can not be set to the value of ACTIVE. A valid entry means the specified element is available in current system. ') h3cSysReloadScheduleTagList = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 3, 1, 8), SnmpTagList()).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysReloadScheduleTagList.setStatus('current') if mibBuilder.loadTexts: h3cSysReloadScheduleTagList.setDescription(' It specifies a tag list for the entry. ') h3cSysReloadTag = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 3, 4), SnmpTagValue()).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cSysReloadTag.setStatus('current') if mibBuilder.loadTexts: h3cSysReloadTag.setDescription("This object contains a single tag value which is used to select entries in the h3cSysReloadScheduleTable. In the h3cSysReloadScheduleTable,any entry that contains a tag value which is equal to the value of this object is selected. For example, the value of h3cSysReloadTag is 'TOM',and the h3cSysReloadScheduleTagList of each h3cSysReloadScheduleTable entry are as follows: 1)'TOM,ROBERT,MARY' 2)'TOM,DAVE' 3)'DAVE,MARY' Since there are 'TOM' in 1) and 2),so 1) and 2) are selected. If this object contains a value of zero length, no entries are selected. ") h3cSysImage = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4)) h3cSysImageNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysImageNum.setStatus('current') if mibBuilder.loadTexts: h3cSysImageNum.setDescription(' The number of system images. It indicates the total entries of h3cSysImageTable. ') h3cSysImageTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 2), ) if mibBuilder.loadTexts: h3cSysImageTable.setStatus('current') if mibBuilder.loadTexts: h3cSysImageTable.setDescription("The system image management table. When 'copy srcfile destfile' is executed via the CLI, if destfile is not existed, then h3cSysImageType of the new file will be 'none'; otherwise h3cSysImageType keeps its current value. When 'move srcfile destfile' is executed via the CLI, h3cSysImageType and h3cSysImageIndex remain the same while h3cSysImageLocation changes. When 'rename srcfile' is executed via the CLI,h3cSysImageType and h3cSysImageIndex remain the same while h3cSysImageName changes. When 'delete srcfile' is executed via the CLI, the file is deleted from h3cSysImageTable while index of the file keeps and will not be allocated. ") h3cSysImageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 2, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageIndex")) if mibBuilder.loadTexts: h3cSysImageEntry.setStatus('current') if mibBuilder.loadTexts: h3cSysImageEntry.setDescription(' An entity image entry. Each entry consists of information of an entity image. The h3cSysImageIndex exclusively defines an image file. ') h3cSysImageIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))) if mibBuilder.loadTexts: h3cSysImageIndex.setStatus('current') if mibBuilder.loadTexts: h3cSysImageIndex.setDescription("There are two parts for the index depicted as follows: 31 15 0 +++++++++++++++++++++++++++++++++++ + physical index + image index + +++++++++++++++++++++++++++++++++++ From bit0 to bit15 (two bytes) is the image index;Image file Index is a monotonically increasing integer for the sole purpose of indexing events. When it reaches the maximum value, an extremely unlikely event, the agent wraps the value back to 1 and may flush existing entries. From bit16 to bit31 (two bytes) is the physical index the same as the entPhysicalIndex specified in ENTITY-MIB. If ENTITY-MIB is not supported,the value for this object is the unit ID for XRN devices ,0 for non-XRN device which has only one main board,the board number for non-XRN device which have several main boards. Any index beyond the above range will not be supported. If a file is added in, its h3cSysImageIndex will be the maximum image index plus one. If the image file is removed, renamed, or moved from one place to another, its h3cSysImageIndex is not reallocated. If the image file's content is replaced, its h3cSysImageIndex will not change. ") h3cSysImageName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 2, 1, 2), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysImageName.setStatus('current') if mibBuilder.loadTexts: h3cSysImageName.setDescription('The file name of the image. It MUST NOT contain the path of the file.') h3cSysImageSize = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysImageSize.setStatus('current') if mibBuilder.loadTexts: h3cSysImageSize.setDescription(' Size of the file in bytes. ') h3cSysImageLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 2, 1, 4), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysImageLocation.setStatus('current') if mibBuilder.loadTexts: h3cSysImageLocation.setDescription(' The directory path of the image. Its form should be the same as what defined in file system. Currently it is defined as follows: For mainboard: flash:/ For slave mainboard and subboards: slotN#flash:/ For XRN devices: unitN>slotN#flash:/ ') h3cSysImageType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 4, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("main", 1), ("backup", 2), ("none", 3), ("secure", 4), ("main-backup", 5), ("main-secure", 6), ("backup-secure", 7), ("main-backup-secure", 8)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cSysImageType.setStatus('current') if mibBuilder.loadTexts: h3cSysImageType.setDescription(" It indicates the reloading sequence attribute of the image. For devices which support dual image: If the value is 'main(1)',the image will be the first image in the next reloading procedure. If the value is 'backup(2)', the image will be used if the main image fails. If the value is 'secure(4)', the image will be used if the main image and backup image both fail. If the value is 'none(3)',the image will not be used in the next reloading procedure. At the same time,you also can specify the main image by h3cSysReloadImage in h3cSysReloadScheduleTable. If the image is different from previous main image, the previous main image will not be main image again. And the image table will update with this variation. Vice versa, if you have defined the reload schedule, and then you define a new main image through h3cSysImageType when you are waiting the reload schedule to be executed, the real main image will be the latest one. It is strongly suggested to define the main image here, not by h3cSysReloadImage in h3cSysReloadScheduleTable. There are some rules for setting the value of h3cSysImageType: a)When a new image file is defined as 'main' or 'backup' file,the h3cSysImageType of old 'main' or 'backup' file will automatically be 'none'. b)It is forbidden to set 'none' attribute manually. c)It is forbidden to set 'secure' attribute manually. d)If 'main' image is set to 'backup', the file keeps 'main'. And vice versa. At this time, the file has 'main-backup' property. e)If the secure image is set to 'main' or 'backup', the file has 'main-secure' or 'backup-secure'property. f)If the secure image is set to 'main' and 'backup', the file has the 'main-backup-secure' property. g)If the none image is set to 'main' or 'backup', the file has the 'main' or 'backup' property. The following table describes whether it is ok to set to another state directly from original state. +--------------+-----------+-------------+-------------+ | set to | set to | set to | set to | | | | | | original | 'main' | 'backup' | 'none' | 'secure' | state | | | | | --------------+--------------+-----------+-------------+-------------+ | | | | | main | --- | yes | no | no | | | | | | | | | | | --------------+--------------+-----------+-------------|-------------+ | | | | | backup | yes | --- | no | no | | | | | | --------------+--------------+-----------+-------------|-------------+ | | | | | | | | | | none | yes | yes | --- | no | | | | | | --------------+--------------+-----------+-------------+-------------+ | | | | | secure | yes | yes | no | --- | | | | | | | | | | | --------------+--------------+-----------+-------------+-------------+ If there is one main image in the system, one row of H3cSysReloadScheduleEntry whose h3cSysReloadImage is equal to the main image's h3cSysImageIndex will be created automatically. But if any row is deleted, it will not be created automatically in h3cSysReloadScheduleTable. For the device which doesn't support dual image(main/backup): Only 'main' and 'none' is supported and it only can be set from none to main. When a new image file is defined as 'main' file,the h3cSysImageType of old 'main' file will automatically be 'none'. ") h3cSysCFGFile = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5)) h3cSysCFGFileNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysCFGFileNum.setStatus('current') if mibBuilder.loadTexts: h3cSysCFGFileNum.setDescription(' The number of the configuration files in the system. It indicates the total entries of h3cSysCFGFileTable. ') h3cSysCFGFileTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5, 2), ) if mibBuilder.loadTexts: h3cSysCFGFileTable.setStatus('current') if mibBuilder.loadTexts: h3cSysCFGFileTable.setDescription("A table of configuration files in this system. At present, the system doesn't support dual configure file, it should act as 'dual image' if dual configure file is supported. ") h3cSysCFGFileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5, 2, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCFGFileIndex")) if mibBuilder.loadTexts: h3cSysCFGFileEntry.setStatus('current') if mibBuilder.loadTexts: h3cSysCFGFileEntry.setDescription(' A configuration file entry. Each entry consists of information of a configuration file. h3cSysCFGFileIndex exclusively decides a configuration file. ') h3cSysCFGFileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))) if mibBuilder.loadTexts: h3cSysCFGFileIndex.setStatus('current') if mibBuilder.loadTexts: h3cSysCFGFileIndex.setDescription('There are two parts for the index depicted as follows: 31 15 0 +++++++++++++++++++++++++++++++++++ + physical index + cfgFile index + +++++++++++++++++++++++++++++++++++ From bit0 to bit15 (two bytes) is the configuration file index; the configuration file index is a monotonically increasing integer for the sole purpose of indexing events. When it reaches the maximum value, an extremely unlikely event, the agent wraps the value back to 1 and may flush existing entries. From bit16 to bit31 (two bytes) is the physical index the same as the entPhysicalIndex specified in ENTITY-MIB. If ENTITY-MIB is not supported, the value for this object is the unit ID for XRN devices ,0 for non-XRN device which has only one slot,the board number for non-XRN device which have several slots. Any index beyond the above range will not be supported. ') h3cSysCFGFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5, 2, 1, 2), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysCFGFileName.setStatus('current') if mibBuilder.loadTexts: h3cSysCFGFileName.setDescription(' Configuration file name. The name should not include the colon (:) character as it is a special separator character used to delineate the device name, partition name and the file name. ') h3cSysCFGFileSize = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysCFGFileSize.setStatus('current') if mibBuilder.loadTexts: h3cSysCFGFileSize.setDescription(' Size of the file in bytes. Note that it does not include the size of the filesystem file header. File size will always be non-zero. ') h3cSysCFGFileLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 5, 2, 1, 4), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysCFGFileLocation.setStatus('current') if mibBuilder.loadTexts: h3cSysCFGFileLocation.setDescription(' The directory path of the image. Its form should be the same as what defined in filesystem. Currently it is defined as follows: For mainboard: flash:/ For slave mainboard and subboards: slotN#flash:/ For XRN devices: unitN>slotN#flash:/ ') h3cSysBtmFile = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6)) h3cSysBtmFileLoad = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 1)) h3cSysBtmLoadMaxNumber = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysBtmLoadMaxNumber.setStatus('current') if mibBuilder.loadTexts: h3cSysBtmLoadMaxNumber.setDescription(' This object shows the maximum number of h3cSysBtmLoadEntry in each device/unit. ') h3cSysBtmLoadTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2), ) if mibBuilder.loadTexts: h3cSysBtmLoadTable.setStatus('current') if mibBuilder.loadTexts: h3cSysBtmLoadTable.setDescription(' This table is used to update the bootrom and show the results of the update operation. The bootrom files are listed at the h3cFlhFileTable. These files are used to update bootrom. ') h3cSysBtmLoadEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysBtmLoadIndex")) if mibBuilder.loadTexts: h3cSysBtmLoadEntry.setStatus('current') if mibBuilder.loadTexts: h3cSysBtmLoadEntry.setDescription(' Entries in the h3cSysBtmLoadTable are created and deleted using the h3cSysBtmRowStatus object. When a new row is being created and the number of entries is h3cSysBtmLoadMaxNumber, the row with minimal value of h3cSysBtmLoadTime and the value of h3cSysBtmFileType is none(2), should be destroyed automatically. ') h3cSysBtmLoadIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))) if mibBuilder.loadTexts: h3cSysBtmLoadIndex.setStatus('current') if mibBuilder.loadTexts: h3cSysBtmLoadIndex.setDescription(' The index of h3cSysBtmLoadTable. There are two parts for this index depicted as follows: 31 15 0 +++++++++++++++++++++++++++++++++++++++++ + physical index + random index + ( bit 16..31 ) ( bit 0..15 ) +++++++++++++++++++++++++++++++++++++++++ From bit0 to bit15 (two bytes), if the row is created by command line, the value is determined by system, and if the row is created by SNMP, the value is determined by users. From bit16 to bit31 (two bytes) is the physical index the same as the entPhysicalIndex specified in ENTITY-MIB. If ENTITY-MIB is not supported, the value of this object is the unit ID for XRN devices, 0 for non-XRN device which has only one main board, the board number for non-XRN device which has multiple main boards. ') h3cSysBtmFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysBtmFileName.setStatus('current') if mibBuilder.loadTexts: h3cSysBtmFileName.setDescription(' The bootrom file name is determined by the users. The file must exist in corresponding entity. The validity of the bootrom file will be identified by system. If the file is invalid, the bootrom should fail to be updated, and the value of h3cSysBtmErrorStatus should be failed(4). ') h3cSysBtmFileType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("main", 1), ("none", 2)))).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysBtmFileType.setStatus('current') if mibBuilder.loadTexts: h3cSysBtmFileType.setDescription(' main(1) - The effective bootrom file. none(2) - The noneffective file. When bootrom is being updated, this object must be set to main(1). When bootrom is updated successfully, this object should be main(1), and the former object with the same physical index should be none(2). When bootrom failed to be updated, this object should be none(2). ') h3cSysBtmRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2, 1, 4), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysBtmRowStatus.setStatus('current') if mibBuilder.loadTexts: h3cSysBtmRowStatus.setDescription(' Only support active(1), createAndGo(4), destroy(6). When a row is created successfully, the value of this object should be active(1), the value of h3cSysBtmFileName and h3cSysBtmFileType can not be modified by users. When bootrom is being updated, the value of h3cSysBtmErrorStatus is inProgress(2). When bootrom failed to be updated, the value of h3cSysBtmErrorStatus should be failed(4). When bootrom is updated successfully, the value of h3cSysBtmErrorStatus should be success(3). The value of h3cSysCurUpdateBtmFileName should change to the new bootrom file name. When another row is created successfully with the same physical index, and the update is successful, then the value of former h3cSysBtmFileType should be none(2) automatically. If a row is destroyed, h3cSysCurUpdateBtmFileName should not change. If a device/unit reboots, h3cSysBtmLoadTable should be empty. ') h3cSysBtmErrorStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("invalidFile", 1), ("inProgress", 2), ("success", 3), ("failed", 4)))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysBtmErrorStatus.setStatus('current') if mibBuilder.loadTexts: h3cSysBtmErrorStatus.setDescription(' This object shows the status of the specified operation after creating a row. invalidFile(1) - file is invalid. inProgress(2) - the operation is in progress. success(3) - the operation was done successfully. failed(4) - the operation failed. ') h3cSysBtmLoadTime = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 6, 2, 1, 6), TimeTicks()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysBtmLoadTime.setStatus('current') if mibBuilder.loadTexts: h3cSysBtmLoadTime.setDescription(' This object indicates operation time. ') h3cSysPackage = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7)) h3cSysPackageNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysPackageNum.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageNum.setDescription(' The number of software packages. It indicates the total entries of h3cSysPackageTable. ') h3cSysPackageTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2), ) if mibBuilder.loadTexts: h3cSysPackageTable.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageTable.setDescription('The system package management table. ') h3cSysPackageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysPackageIndex")) if mibBuilder.loadTexts: h3cSysPackageEntry.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageEntry.setDescription(' An software package entry. Each entry consists of information of an software package. ') h3cSysPackageIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))) if mibBuilder.loadTexts: h3cSysPackageIndex.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageIndex.setDescription("There are two parts for the index depicted as follows: 31 15 0 +++++++++++++++++++++++++++++++++++ + physical index + package index + +++++++++++++++++++++++++++++++++++ From bit0 to bit15 (two bytes) is the Package index; Package file Index is a monotonically increasing integer for the sole purpose of indexing events. When it reaches the maximum value, an extremely unlikely event, the agent wraps the value back to 1 and may flush existing entries. From bit16 to bit31 (two bytes) is the physical index the same as the entPhysicalIndex specified in ENTITY-MIB. Any index beyond the above range will not be supported. If a file is added in, its h3cSysPackageIndex will be the maximum image index plus one. If the package file is removed, renamed, or moved from one place to another, its h3cSysPackageIndex is not reallocated. If the package file's content is replaced, its h3cSysPackageIndex will not change. ") h3cSysPackageName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 2), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysPackageName.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageName.setDescription(' The file name of the package. It MUST NOT contain the path of the file. ') h3cSysPackageSize = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysPackageSize.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageSize.setDescription(' Size of the file in bytes. ') h3cSysPackageLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 4), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysPackageLocation.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageLocation.setDescription(' The directory path of the package. Its form should be the same as what defined in file system. Currently it is defined as follows: For mainboard: flash:/ For slave mainboard and subboards: slotN#flash:/ ') h3cSysPackageType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("boot", 1), ("system", 2), ("feature", 3), ("patch", 4)))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysPackageType.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageType.setDescription(' It indicates the type of the package file. boot : kernel, file system, memory management and other core components. system : interface management, configuration management and other basic system package. feature : feature packages, providing different services. patch : patch file contains fixes for a specific defect. ') h3cSysPackageAttribute = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("primary", 2), ("secondary", 3), ("primarySecondary", 4)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cSysPackageAttribute.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageAttribute.setDescription(" It indicates the attribute of the package file. If the value is 'primary', the package will be the first package in the next reloading procedure. If the value is 'secondary', the package will be used if the primary package fails. If the value is 'none', it will not be used in the next reloading procedure. ") h3cSysPackageStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("inactive", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysPackageStatus.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageStatus.setDescription(" It indicates the status of the package file. If this file is used in the current system, its status is 'active'. ") h3cSysPackageDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 8), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysPackageDescription.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageDescription.setDescription(' It is the description of the package. ') h3cSysPackageFeature = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 9), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysPackageFeature.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageFeature.setDescription(' Indicate the feature of the package. Different package files could be the same feature. ') h3cSysPackageVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 2, 1, 10), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysPackageVersion.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageVersion.setDescription(' Indicate the version of the package. ') h3cSysPackageOperateEntryLimit = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cSysPackageOperateEntryLimit.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageOperateEntryLimit.setDescription(' The maximum number of the entries in h3cSysPackageOperateTable. ') h3cSysPackageOperateTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 4), ) if mibBuilder.loadTexts: h3cSysPackageOperateTable.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageOperateTable.setDescription('A table of package file operate.') h3cSysPackageOperateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 4, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysPackageOperateIndex")) if mibBuilder.loadTexts: h3cSysPackageOperateEntry.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageOperateEntry.setDescription(' An operate request entry. ') h3cSysPackageOperateIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))) if mibBuilder.loadTexts: h3cSysPackageOperateIndex.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageOperateIndex.setDescription(' The unique index value of a row in this table. ') h3cSysPackageOperatePackIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysPackageOperatePackIndex.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageOperatePackIndex.setDescription(' Specify the package file in the h3cSysPackageTable. ') h3cSysPackageOperateStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("inactive", 2)))).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysPackageOperateStatus.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageOperateStatus.setDescription(' activate or deactivate a package in the h3cSysPackageTable. ') h3cSysPackageOperateRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 4, 1, 4), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysPackageOperateRowStatus.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageOperateRowStatus.setDescription(" the status of this table entry. When the status is active all the object's value in the entry is not allowed to modified. ") h3cSysPackageOperateResult = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 7, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("opInProgress", 1), ("opSuccess", 2), ("opUnknownFailure", 3), ("opInvalidFile", 4), ("opNotSupport", 5)))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysPackageOperateResult.setStatus('current') if mibBuilder.loadTexts: h3cSysPackageOperateResult.setDescription(' the result of the operation. ') h3cSysIpeFile = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8)) h3cSysIpeFileNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysIpeFileNum.setStatus('current') if mibBuilder.loadTexts: h3cSysIpeFileNum.setDescription(' The number of software IPE(Image Package Envelop) files. It indicates the total entries of h3cSysIpeFileTable. ') h3cSysIpeFileTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 2), ) if mibBuilder.loadTexts: h3cSysIpeFileTable.setStatus('current') if mibBuilder.loadTexts: h3cSysIpeFileTable.setDescription('The system IPE file manage table. ') h3cSysIpeFileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 2, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysIpeFileIndex")) if mibBuilder.loadTexts: h3cSysIpeFileEntry.setStatus('current') if mibBuilder.loadTexts: h3cSysIpeFileEntry.setDescription(' An IPE package file entry. Each entry consists of information of an IPE package file. h3cSysIpeFileIndex exclusively decides an IPE file. ') h3cSysIpeFileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))) if mibBuilder.loadTexts: h3cSysIpeFileIndex.setStatus('current') if mibBuilder.loadTexts: h3cSysIpeFileIndex.setDescription("There are two parts for the index depicted as follows: 31 15 0 +++++++++++++++++++++++++++++++++++ + physical index + IPE index + +++++++++++++++++++++++++++++++++++ From bit0 to bit15 (two bytes) is the IPE file index; IPE file Index is a monotonically increasing integer for the sole purpose of indexing events. When it reaches the maximum value, an extremely unlikely event, the agent wraps the value back to 1 and may flush existing entries. From bit16 to bit31 (two bytes) is the physical index the same as the entPhysicalIndex specified in ENTITY-MIB. Any index beyond the above range will not be supported. If a file is added in, its h3cSysIpeFileIndex will be the maximum image ndex plus one. If the IPE file is removed, renamed, or moved from one place to another, its h3cSysIpeFileIndex is not reallocated. If the IPE file's content is replaced, its h3cSysIpeFileIndex will not change. ") h3cSysIpeFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 2, 1, 2), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysIpeFileName.setStatus('current') if mibBuilder.loadTexts: h3cSysIpeFileName.setDescription(' The file name of the IPE file. It MUST NOT contain the path of the file. ') h3cSysIpeFileSize = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 2, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysIpeFileSize.setStatus('current') if mibBuilder.loadTexts: h3cSysIpeFileSize.setDescription(' Size of the file in bytes. ') h3cSysIpeFileLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 2, 1, 4), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysIpeFileLocation.setStatus('current') if mibBuilder.loadTexts: h3cSysIpeFileLocation.setDescription(' The directory path of the IPE file. Its form should be the same as what defined in file system. Currently it is defined as follows: For mainboard: flash:/ For slave mainboard and subboards: slotN#flash:/ ') h3cSysIpePackageTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3), ) if mibBuilder.loadTexts: h3cSysIpePackageTable.setStatus('current') if mibBuilder.loadTexts: h3cSysIpePackageTable.setDescription(' The IPE package file table. It shows the package files in the IPE file. ') h3cSysIpePackageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysIpeFileIndex"), (0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysIpePackageIndex")) if mibBuilder.loadTexts: h3cSysIpePackageEntry.setStatus('current') if mibBuilder.loadTexts: h3cSysIpePackageEntry.setDescription(' An entry of the h3cIpePackageTable. Indexed by h3cSysIpeFileIndex and h3cSysIpePackageIndex. ') h3cSysIpePackageIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))) if mibBuilder.loadTexts: h3cSysIpePackageIndex.setStatus('current') if mibBuilder.loadTexts: h3cSysIpePackageIndex.setDescription(' It is the IPE package index; IPE Package Index is a monotonically increasing integer for the sole purpose of indexing events. When it reaches the maximum value, an extremely unlikely event, the agent wraps the value back to 1 and may flush existing entries. Any index beyond the above range will not be supported. ') h3cSysIpePackageName = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1, 2), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysIpePackageName.setStatus('current') if mibBuilder.loadTexts: h3cSysIpePackageName.setDescription('The file name of the package file. ') h3cSysIpePackageSize = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysIpePackageSize.setStatus('current') if mibBuilder.loadTexts: h3cSysIpePackageSize.setDescription(' Size of the package file in bytes. ') h3cSysIpePackageType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("boot", 1), ("system", 2), ("feature", 3), ("patch", 4)))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysIpePackageType.setStatus('current') if mibBuilder.loadTexts: h3cSysIpePackageType.setDescription(' It indicates the type of the package file. boot : kernel, file system, memory management and other core components. system : interface management, configuration management and other basic system package. feature : feature packages, providing different services. patch : patch file contains fixes for a specific defect. ') h3cSysIpePackageDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1, 5), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysIpePackageDescription.setStatus('current') if mibBuilder.loadTexts: h3cSysIpePackageDescription.setDescription(' It is the description of the package. ') h3cSysIpePackageFeature = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1, 6), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysIpePackageFeature.setStatus('current') if mibBuilder.loadTexts: h3cSysIpePackageFeature.setDescription(' Indicate the feature of the package. ') h3cSysIpePackageVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 3, 1, 7), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysIpePackageVersion.setStatus('current') if mibBuilder.loadTexts: h3cSysIpePackageVersion.setDescription(' The version of the package. ') h3cSysIpeFileOperateTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 4), ) if mibBuilder.loadTexts: h3cSysIpeFileOperateTable.setStatus('current') if mibBuilder.loadTexts: h3cSysIpeFileOperateTable.setDescription('A table of IPE file operate.') h3cSysIpeFileOperateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 4, 1), ).setIndexNames((0, "A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysIpeFileOperateIndex")) if mibBuilder.loadTexts: h3cSysIpeFileOperateEntry.setStatus('current') if mibBuilder.loadTexts: h3cSysIpeFileOperateEntry.setDescription(' An operate request entry. ') h3cSysIpeFileOperateIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))) if mibBuilder.loadTexts: h3cSysIpeFileOperateIndex.setStatus('current') if mibBuilder.loadTexts: h3cSysIpeFileOperateIndex.setDescription(' The unique index value of a row in this table. ') h3cSysIpeFileOperateFileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysIpeFileOperateFileIndex.setStatus('current') if mibBuilder.loadTexts: h3cSysIpeFileOperateFileIndex.setDescription(' Specify the IPE file in the h3cSysIpeFileTable. This IPE file will be unpacked to package files. ') h3cSysIpeFileOperateAttribute = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("primary", 2), ("secondary", 3), ("primarySecondary", 4)))).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysIpeFileOperateAttribute.setStatus('current') if mibBuilder.loadTexts: h3cSysIpeFileOperateAttribute.setDescription(" It indicates the attribute of the IPE file when it is used in the reloading. If the value is 'primary', the packages in the IPE file will be the first packages in the next reloading procedure. If the value is 'secondary', the package in the IPE file will be used if the primary packages fails. If the value is 'none', the IPE file is only unpacked, will not be used in the reloading procedure. ") h3cSysIpeFileOperateRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 4, 1, 4), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cSysIpeFileOperateRowStatus.setStatus('current') if mibBuilder.loadTexts: h3cSysIpeFileOperateRowStatus.setDescription(" the status of this table entry. When the status is active all the object's value in the entry is not allowed to modified. ") h3cSysIpeFileOperateResult = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 1, 8, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("opInProgress", 1), ("opSuccess", 2), ("opUnknownFailure", 3), ("opInvalidFile", 4), ("opDeviceFull", 5), ("opFileOpenError", 6)))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cSysIpeFileOperateResult.setStatus('current') if mibBuilder.loadTexts: h3cSysIpeFileOperateResult.setDescription(' the result of the operation. ') h3cSystemManMIBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 2)) h3cSysClockChangedNotification = NotificationType((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 2, 1)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysLocalClock")) if mibBuilder.loadTexts: h3cSysClockChangedNotification.setStatus('current') if mibBuilder.loadTexts: h3cSysClockChangedNotification.setDescription(' A clock changed notification is generated when the current local date and time for the system has been manually changed. The value of h3cSysLocalClock reflects new date and time. ') h3cSysReloadNotification = NotificationType((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 2, 2)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadImage"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadCfgFile"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadReason"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadScheduleTime"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadAction")) if mibBuilder.loadTexts: h3cSysReloadNotification.setStatus('current') if mibBuilder.loadTexts: h3cSysReloadNotification.setDescription(' A h3cSysReloadNotification will be sent before the corresponding entity is rebooted. It will also be sent if the entity fails to reboot because the clock has changed. ') h3cSysStartUpNotification = NotificationType((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 2, 3)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageType")) if mibBuilder.loadTexts: h3cSysStartUpNotification.setStatus('current') if mibBuilder.loadTexts: h3cSysStartUpNotification.setDescription(" a h3cSysStartUpNotification trap will be sent when the system starts up with 'main' image file failed, a trap will be sent to indicate which type the current image file (I.e backup or secure)is. ") h3cSystemManMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3)) h3cSystemManMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 1)) h3cSystemManMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 1, 1)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysClockGroup"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadGroup"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageGroup"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCFGFileGroup"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSystemManNotificationGroup"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCurGroup"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSystemBtmLoadGroup")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): h3cSystemManMIBCompliance = h3cSystemManMIBCompliance.setStatus('current') if mibBuilder.loadTexts: h3cSystemManMIBCompliance.setDescription(' The compliance statement for entities which implement the system management MIB. ') h3cSystemManMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2)) h3cSysClockGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2, 1)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysLocalClock"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysSummerTimeEnable"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysSummerTimeZone"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysSummerTimeMethod"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysSummerTimeStart"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysSummerTimeEnd"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysSummerTimeOffset")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): h3cSysClockGroup = h3cSysClockGroup.setStatus('current') if mibBuilder.loadTexts: h3cSysClockGroup.setDescription('A collection of objects providing mandatory system clock information.') h3cSysReloadGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2, 2)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadSchedule"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadAction"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadImage"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadCfgFile"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadReason"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadScheduleTagList"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadTag"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadScheduleTime"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadEntity"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadRowStatus")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): h3cSysReloadGroup = h3cSysReloadGroup.setStatus('current') if mibBuilder.loadTexts: h3cSysReloadGroup.setDescription('A collection of objects providing mandatory system reload.') h3cSysImageGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2, 3)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageNum"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageName"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageSize"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageLocation"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysImageType")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): h3cSysImageGroup = h3cSysImageGroup.setStatus('current') if mibBuilder.loadTexts: h3cSysImageGroup.setDescription('A collection of objects providing mandatory system image information.') h3cSysCFGFileGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2, 4)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCFGFileNum"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCFGFileName"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCFGFileSize"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCFGFileLocation")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): h3cSysCFGFileGroup = h3cSysCFGFileGroup.setStatus('current') if mibBuilder.loadTexts: h3cSysCFGFileGroup.setDescription(' A collection of objects providing mandatory system configuration file information. ') h3cSysCurGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2, 5)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCurCFGFileIndex"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCurImageIndex")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): h3cSysCurGroup = h3cSysCurGroup.setStatus('current') if mibBuilder.loadTexts: h3cSysCurGroup.setDescription('A collection of system current status.') h3cSystemManNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2, 6)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysClockChangedNotification"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysReloadNotification"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysStartUpNotification")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): h3cSystemManNotificationGroup = h3cSystemManNotificationGroup.setStatus('current') if mibBuilder.loadTexts: h3cSystemManNotificationGroup.setDescription('A collection of notifications.') h3cSystemBtmLoadGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 3, 3, 2, 7)).setObjects(("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCurBtmFileName"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysCurUpdateBtmFileName"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysBtmLoadMaxNumber"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysBtmFileName"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysBtmFileType"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysBtmRowStatus"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysBtmErrorStatus"), ("A3COM-HUAWEI-SYS-MAN-MIB", "h3cSysBtmLoadTime")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): h3cSystemBtmLoadGroup = h3cSystemBtmLoadGroup.setStatus('current') if mibBuilder.loadTexts: h3cSystemBtmLoadGroup.setDescription('A collection of objects providing system update bootrom information.') mibBuilder.exportSymbols("A3COM-HUAWEI-SYS-MAN-MIB", h3cSysPackageOperateEntry=h3cSysPackageOperateEntry, h3cSysPackageOperateStatus=h3cSysPackageOperateStatus, h3cSysPackageFeature=h3cSysPackageFeature, h3cSysIpeFileIndex=h3cSysIpeFileIndex, h3cSysIpeFileOperateRowStatus=h3cSysIpeFileOperateRowStatus, h3cSysIpePackageIndex=h3cSysIpePackageIndex, h3cSysCurEntry=h3cSysCurEntry, h3cSysReload=h3cSysReload, h3cSysClockChangedNotification=h3cSysClockChangedNotification, h3cSysPackageStatus=h3cSysPackageStatus, h3cSysIpeFileOperateTable=h3cSysIpeFileOperateTable, h3cSysPackageType=h3cSysPackageType, h3cSysReloadReason=h3cSysReloadReason, h3cSysReloadImage=h3cSysReloadImage, h3cSysCurImageIndex=h3cSysCurImageIndex, h3cSysImageGroup=h3cSysImageGroup, h3cSysPackageDescription=h3cSysPackageDescription, h3cSysSummerTimeZone=h3cSysSummerTimeZone, h3cSysClockGroup=h3cSysClockGroup, h3cSysIpeFile=h3cSysIpeFile, h3cSysPackageOperateTable=h3cSysPackageOperateTable, h3cSysIpeFileOperateFileIndex=h3cSysIpeFileOperateFileIndex, h3cSysPackageOperateEntryLimit=h3cSysPackageOperateEntryLimit, h3cSystemManMIBCompliance=h3cSystemManMIBCompliance, h3cSysIpePackageSize=h3cSysIpePackageSize, h3cSysSummerTimeMethod=h3cSysSummerTimeMethod, h3cSysBtmLoadIndex=h3cSysBtmLoadIndex, h3cSysIpeFileNum=h3cSysIpeFileNum, h3cSysClock=h3cSysClock, h3cSystemManMIBNotifications=h3cSystemManMIBNotifications, h3cSysReloadSchedule=h3cSysReloadSchedule, h3cSysImageTable=h3cSysImageTable, h3cSysBtmRowStatus=h3cSysBtmRowStatus, h3cSysIpePackageEntry=h3cSysIpePackageEntry, h3cSysBtmErrorStatus=h3cSysBtmErrorStatus, h3cSysSummerTimeOffset=h3cSysSummerTimeOffset, h3cSysReloadGroup=h3cSysReloadGroup, h3cSysIpeFileOperateIndex=h3cSysIpeFileOperateIndex, h3cSysCFGFileIndex=h3cSysCFGFileIndex, h3cSysCFGFileName=h3cSysCFGFileName, h3cSysReloadScheduleTime=h3cSysReloadScheduleTime, h3cSysPackageTable=h3cSysPackageTable, h3cSysReloadScheduleIndex=h3cSysReloadScheduleIndex, h3cSysReloadScheduleTable=h3cSysReloadScheduleTable, h3cSysReloadEntity=h3cSysReloadEntity, h3cSysLocalClockString=h3cSysLocalClockString, h3cSysPackageOperateIndex=h3cSysPackageOperateIndex, h3cSystemManMIBConformance=h3cSystemManMIBConformance, h3cSystemBtmLoadGroup=h3cSystemBtmLoadGroup, h3cSysPackageOperatePackIndex=h3cSysPackageOperatePackIndex, h3cSysIpeFileOperateAttribute=h3cSysIpeFileOperateAttribute, h3cSysPackage=h3cSysPackage, h3cSysIpePackageName=h3cSysIpePackageName, h3cSysPackageNum=h3cSysPackageNum, h3cSysCurEntPhysicalIndex=h3cSysCurEntPhysicalIndex, h3cSysReloadCfgFile=h3cSysReloadCfgFile, h3cSysCFGFileNum=h3cSysCFGFileNum, h3cSystemManNotificationGroup=h3cSystemManNotificationGroup, h3cSysSummerTimeStart=h3cSysSummerTimeStart, h3cSysIpePackageVersion=h3cSysIpePackageVersion, h3cSystemManMIBObjects=h3cSystemManMIBObjects, h3cSysSummerTime=h3cSysSummerTime, h3cSysReloadAction=h3cSysReloadAction, h3cSysImageEntry=h3cSysImageEntry, h3cSystemManMIBCompliances=h3cSystemManMIBCompliances, h3cSysIpeFileTable=h3cSysIpeFileTable, h3cSysCFGFileSize=h3cSysCFGFileSize, h3cSysImageSize=h3cSysImageSize, h3cSysStartUpNotification=h3cSysStartUpNotification, h3cSysBtmLoadTable=h3cSysBtmLoadTable, h3cSysIpePackageDescription=h3cSysIpePackageDescription, PYSNMP_MODULE_ID=h3cSystemMan, h3cSysReloadNotification=h3cSysReloadNotification, h3cSysPackageVersion=h3cSysPackageVersion, h3cSysIpeFileOperateResult=h3cSysIpeFileOperateResult, h3cSysReloadTag=h3cSysReloadTag, h3cSysPackageOperateRowStatus=h3cSysPackageOperateRowStatus, h3cSysPackageAttribute=h3cSysPackageAttribute, h3cSysImage=h3cSysImage, h3cSysPackageOperateResult=h3cSysPackageOperateResult, h3cSysReloadRowStatus=h3cSysReloadRowStatus, h3cSysReloadScheduleTagList=h3cSysReloadScheduleTagList, h3cSysCurrent=h3cSysCurrent, h3cSysBtmFileName=h3cSysBtmFileName, h3cSysCFGFileTable=h3cSysCFGFileTable, h3cSysBtmFile=h3cSysBtmFile, h3cSysIpePackageType=h3cSysIpePackageType, h3cSystemManMIBGroups=h3cSystemManMIBGroups, h3cSysPackageSize=h3cSysPackageSize, h3cSysBtmLoadEntry=h3cSysBtmLoadEntry, h3cSysImageName=h3cSysImageName, h3cSysIpeFileOperateEntry=h3cSysIpeFileOperateEntry, h3cSysImageIndex=h3cSysImageIndex, h3cSysCurCFGFileIndex=h3cSysCurCFGFileIndex, h3cSysCurBtmFileName=h3cSysCurBtmFileName, h3cSysCFGFileEntry=h3cSysCFGFileEntry, h3cSysPackageEntry=h3cSysPackageEntry, h3cSysIpeFileName=h3cSysIpeFileName, h3cSysBtmFileType=h3cSysBtmFileType, h3cSysImageType=h3cSysImageType, h3cSysCurUpdateBtmFileName=h3cSysCurUpdateBtmFileName, h3cSysCFGFile=h3cSysCFGFile, h3cSysCurTable=h3cSysCurTable, h3cSysPackageLocation=h3cSysPackageLocation, h3cSysBtmLoadTime=h3cSysBtmLoadTime, h3cSysReloadScheduleEntry=h3cSysReloadScheduleEntry, h3cSysIpeFileEntry=h3cSysIpeFileEntry, h3cSysCFGFileGroup=h3cSysCFGFileGroup, h3cSysIpeFileLocation=h3cSysIpeFileLocation, h3cSysPackageIndex=h3cSysPackageIndex, h3cSysLocalClock=h3cSysLocalClock, h3cSysCurGroup=h3cSysCurGroup, h3cSysCFGFileLocation=h3cSysCFGFileLocation, h3cSysBtmFileLoad=h3cSysBtmFileLoad, h3cSysImageLocation=h3cSysImageLocation, h3cSysImageNum=h3cSysImageNum, h3cSysSummerTimeEnable=h3cSysSummerTimeEnable, h3cSysIpePackageTable=h3cSysIpePackageTable, h3cSysIpeFileSize=h3cSysIpeFileSize, h3cSysPackageName=h3cSysPackageName, h3cSystemMan=h3cSystemMan, h3cSysSummerTimeEnd=h3cSysSummerTimeEnd, h3cSysIpePackageFeature=h3cSysIpePackageFeature, h3cSysBtmLoadMaxNumber=h3cSysBtmLoadMaxNumber)
189.730458
5,380
0.76427
9,176
70,390
5.862576
0.083152
0.047068
0.082368
0.009146
0.503244
0.376448
0.342504
0.320587
0.306571
0.277795
0
0.065673
0.11199
70,390
370
5,381
190.243243
0.794951
0.004859
0
0.024931
0
0.157895
0.447251
0.054998
0
0
0
0
0
1
0
false
0
0.022161
0
0.022161
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
7a2e304b9a1e71032f824334c3a9e4c7ae95323f
3,748
py
Python
pineapple/pineapple_core/core/flows.py
societe-generale/Pineapple
2aad2fbd0d7e231608d1d02bce45a28cd22f82a8
[ "MIT" ]
null
null
null
pineapple/pineapple_core/core/flows.py
societe-generale/Pineapple
2aad2fbd0d7e231608d1d02bce45a28cd22f82a8
[ "MIT" ]
null
null
null
pineapple/pineapple_core/core/flows.py
societe-generale/Pineapple
2aad2fbd0d7e231608d1d02bce45a28cd22f82a8
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """This module contains everything that is related to flows. This module contains one class : Flow. """ from typing import Any, Dict from pineapple_core.utils.serialization import make_value_serializable class Flow: """Class that represents a flow Attributes ========== name: Any Name of the Flow, it can be of any type priority: int Number that defines the order of execution of the flow in a set of flows It the number is positive, the flow is enabled, otherwise it's disabled node: Node The next node to execute with this flow """ @staticmethod def from_reference(flow: "Flow"): """Creates a fake exact copy of a Flow by usurpating its hash Parameters ========== flow: Flow Reference to the flow you want to create an exact copy of """ new_flow = flow.copy() new_flow.inject_reference(flow) return new_flow def __init__(self, node: "Node", name: Any, priority: int): """Flow constructor Parameters ========== node: Node Node that the flow will trigger name: Any Name of the flow priority: int Priority of the flow """ self.node = node self.priority = priority self.name = name self.pretty_name = name self._ref = None def toggle(self): """Toggle a flow (enables it if it was disabled, disables it otherwise) """ self.priority *= -1 def disable(self): """Disables a flow (make its priority negative) """ if self.priority > 0: self.toggle() def enable(self): """Enables a flow (gives an absolute priority to the flow) """ self.priority = abs(self.priority) def increase_priority(self): """Increases the priority of the flow with the given name """ self.priority += 1 def decrease_priority(self): """Decreases the priority of the flow with the given name """ self.priority -= 1 def inject_reference(self, ref: "Flow"): """Stores a reference of the Flow to usurp Parameters ========== ref: Flow Reference to the Flow you want to usurp """ self._ref = ref def __eq__(self, other) -> bool: return self.__hash__() == other.__hash__() def __hash__(self) -> int: """Implement of special method __hash__ It allows a Flow to usurpate another one by giving its underlying _ref __hash__ with _ref being the Flow you want to usurp Returns ======= int: Hash of the Flow object """ if not self._ref: return super().__hash__() return self._ref.__hash__() def copy(self) -> "Flow": """Copies a Flow, name and priority will be copied while the Node inside the flow will be the same reference Returns ======= Flow: Reference to the newly copied Flow """ flow_copy = Flow(self.node, self.name, self.priority) flow_copy.pretty_name = self.pretty_name return flow_copy def dump(self) -> Dict[str, Any]: """Dumps a flow Returns ======= dict: A dictionnary of attributes representing its state """ return { "name": make_value_serializable(self.name), "node": str(self.node.id), "priority": self.priority, } def __repr__(self): return f"Flow(name='{self.name}', priority={self.priority}, node={self.node})"
27.15942
87
0.567236
458
3,748
4.5
0.290393
0.050946
0.034935
0.026201
0.112567
0.112567
0.080543
0.080543
0.050461
0.050461
0
0.002012
0.33698
3,748
137
88
27.357664
0.827364
0.445571
0
0
0
0
0.059952
0.029377
0
0
0
0
0
1
0.288889
false
0
0.044444
0.044444
0.511111
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
7a2fe90ca9c1760303393523fbb9dfacdfef8238
11,435
py
Python
tests/test_main.py
Asday/ytdl
96a51ba3589e855b27f75095b0cd4a6f00f8eefa
[ "MIT" ]
null
null
null
tests/test_main.py
Asday/ytdl
96a51ba3589e855b27f75095b0cd4a6f00f8eefa
[ "MIT" ]
1
2019-04-15T02:09:37.000Z
2019-04-15T02:09:37.000Z
tests/test_main.py
Asday/ytdl
96a51ba3589e855b27f75095b0cd4a6f00f8eefa
[ "MIT" ]
null
null
null
import datetime import os import subprocess from django.apps import apps from django.core.management import call_command import attr from freezegun import freeze_time import pytest import pytz from downloader.exceptions import ( NoFilesCreatedError, TooManyFilesCreatedError, YoutubeDLError, ) from playlists.models import Playlist, Video def test_server_starts(client): client.get('/') def test_checks_pass(): call_command('check') def test_get_playlist_info_raises_for_garbage_playlist(): downloader = apps.get_app_config('downloader') with pytest.raises(YoutubeDLError): downloader.get_playlist_info('asdf') _TEST_PLAYLIST_ID = 'PL59FEE129ADFF2B12' _TEST_VIDEO_ID = '007VM8NZxkI' def test_get_playlist_info_returns_iterable(): downloader = apps.get_app_config('downloader') results = downloader.get_playlist_info(_TEST_PLAYLIST_ID) iter(results) def test_get_playlist_info_returns_id_and_title_for_all_results(): downloader = apps.get_app_config('downloader') results = downloader.get_playlist_info(_TEST_PLAYLIST_ID) for result in results: assert 'id' in result assert 'title' in result def test_download_video_raises_for_garbage_video(tmp_path): downloader = apps.get_app_config('downloader') with pytest.raises(YoutubeDLError): downloader.download_video('asdf', tmp_path) def test_download_video_creates_a_file(tmp_path): downloader = apps.get_app_config('downloader') filename = downloader.download_video(_TEST_VIDEO_ID, tmp_path) expected_path = os.path.join(tmp_path, filename) assert os.path.exists(expected_path) os.remove(expected_path) def test_download_video_raises_when_youtube_dl_misbehaves(tmp_path, mocker): downloader = apps.get_app_config('downloader') def run_factory(files_to_create): def run(*args, cwd, **kwargs): for i in range(files_to_create): open(os.path.join(cwd, str(i)), 'w').close() return run mocker.patch.object(subprocess, 'run', run_factory(0)) with pytest.raises(NoFilesCreatedError): downloader.download_video(_TEST_VIDEO_ID, tmp_path) mocker.patch.object(subprocess, 'run', run_factory(2)) with pytest.raises(TooManyFilesCreatedError): downloader.download_video(_TEST_VIDEO_ID, tmp_path) @attr.s class Params(object): preexisting = attr.ib() playlist_info = attr.ib() expected = attr.ib() now = datetime.datetime(2018, 12, 2, 0, 0, 0, tzinfo=pytz.UTC) yesterday = datetime.datetime(2018, 12, 1, 0, 0, 0, tzinfo=pytz.UTC) @freeze_time('2018-12-02 00:00:00.0') @pytest.mark.django_db @pytest.mark.parametrize( 'params', [ Params( # None preexisting, none new. preexisting=[], playlist_info=[], expected=[], ), Params( # None preexisting, one new. preexisting=[], playlist_info=[{'id': 'testID', 'title': 'Test Title'}], expected=[ { 'youtube_id': 'testID', 'title': 'Test Title', 'added': now, 'removed': None, }, ] ), Params( # None preexisting, some new. preexisting=[], playlist_info=[ {'id': 'testID1', 'title': 'Test Title 1'}, {'id': 'testID2', 'title': 'Test Title 2'}, ], expected=[ { 'youtube_id': 'testID1', 'title': 'Test Title 1', 'added': now, 'removed': None, }, { 'youtube_id': 'testID2', 'title': 'Test Title 2', 'added': now, 'removed': None, }, ], ), Params( # Some preexisting, none new. preexisting=[{ 'youtube_id': 'testID', 'title': 'Test Title', 'added': now, 'removed': None, }], playlist_info=[{'id': 'testID', 'title': 'Test Title'}], expected=[{ 'youtube_id': 'testID', 'title': 'Test Title', 'added': now, 'removed': None, }], ), Params( # Some preexisting, one new. preexisting=[{ 'youtube_id': 'testID1', 'title': 'Test Title 1', 'added': yesterday, 'removed': None, }], playlist_info=[ {'id': 'testID1', 'title': 'Test Title 1'}, {'id': 'testID2', 'title': 'Test Title 2'}, ], expected=[ { 'youtube_id': 'testID1', 'title': 'Test Title 1', 'added': yesterday, 'removed': None, }, { 'youtube_id': 'testID2', 'title': 'Test Title 2', 'added': now, 'removed': None, }, ], ), Params( # Some preexisting, one removed. preexisting=[ { 'youtube_id': 'testID1', 'title': 'Test Title 1', 'added': yesterday, 'removed': None, }, { 'youtube_id': 'testID2', 'title': 'Test Title 2', 'added': yesterday, 'removed': None, }, ], playlist_info=[{'id': 'testID1', 'title': 'Test Title 1'}], expected=[ { 'youtube_id': 'testID1', 'title': 'Test Title 1', 'added': yesterday, 'removed': None, }, { 'youtube_id': 'testID2', 'title': 'Test Title 2', 'added': yesterday, 'removed': now, }, ], ), Params( # Some preexisting, one new, one removed. preexisting=[ { 'youtube_id': 'testID1', 'title': 'Test Title 1', 'added': yesterday, 'removed': None, }, { 'youtube_id': 'testID2', 'title': 'Test Title 2', 'added': yesterday, 'removed': None, }, ], playlist_info=[ {'id': 'testID1', 'title': 'Test Title 1'}, {'id': 'testID3', 'title': 'Test Title 3'}, ], expected=[ { 'youtube_id': 'testID1', 'title': 'Test Title 1', 'added': yesterday, 'removed': None, }, { 'youtube_id': 'testID2', 'title': 'Test Title 2', 'added': yesterday, 'removed': now, }, { 'youtube_id': 'testID3', 'title': 'Test Title 3', 'added': now, 'removed': None, }, ], ), Params( # Some preexisting, one renamed. preexisting=[{ 'youtube_id': 'testID', 'title': 'Test Title', 'added': yesterday, 'removed': None, }], playlist_info=[{'id': 'testID', 'title': 'Renamed'}], expected=[{ 'youtube_id': 'testID', 'title': 'Renamed', 'added': yesterday, 'removed': None, }], ), Params( # Some preexisting, one deleted. preexisting=[{ 'youtube_id': 'testID', 'title': 'Test Title', 'added': yesterday, 'removed': None, 'deleted': False, }], playlist_info=[{'id': 'testID', 'title': '[Deleted video]'}], expected=[{ 'youtube_id': 'testID', 'title': 'Test Title', 'deleted': True, 'privated': False, }], ), Params( # Some preexisting, one made private. preexisting=[{ 'youtube_id': 'testID', 'title': 'Test Title', 'added': yesterday, 'removed': None, 'privated': False, }], playlist_info=[{'id': 'testID', 'title': '[Private video]'}], expected=[{ 'youtube_id': 'testID', 'title': 'Test Title', 'deleted': False, 'privated': True, }], ), Params( # Some preexisting private, one made public. preexisting=[{ 'youtube_id': 'testID', 'title': '[Private video]', 'added': yesterday, 'removed': None, 'privated': True, }], playlist_info=[{'id': 'testID', 'title': 'Test Title'}], expected=[{ 'youtube_id': 'testID', 'title': 'Test Title', 'deleted': False, 'privated': False, }], ), Params( # None preexisting, one new private, one new deleted. preexisting=[], playlist_info=[ {'id': 'testID1', 'title': '[Private video]'}, {'id': 'testID2', 'title': '[Deleted video]'}, ], expected=[ { 'youtube_id': 'testID1', 'title': '[Private video]', 'added': now, 'removed': None, 'deleted': False, 'privated': True, }, { 'youtube_id': 'testID2', 'title': '[Deleted video]', 'added': now, 'removed': None, 'deleted': True, 'privated': False, }, ], ), ], ) def test_create_and_update_videos(params, mocker): playlist = Playlist.objects.create(youtube_id='playlistID') for details in params.preexisting: Video.objects.create(playlist=playlist, **details) downloader = apps.get_app_config('downloader') mocker.patch.object(downloader, 'get_playlist_info') downloader.get_playlist_info.return_value = ( item for item in params.playlist_info ) playlist.create_and_update_videos() videos = playlist.videos.all() for details in params.expected: video = videos.get(youtube_id=details['youtube_id']) for attr_name, value in details.items(): assert getattr(video, attr_name) == value assert playlist.videos.count() == len(params.expected)
30.412234
76
0.457718
949
11,435
5.336143
0.14647
0.058649
0.091232
0.064179
0.624605
0.549566
0.459913
0.439179
0.390798
0.380134
0
0.014275
0.418015
11,435
375
77
30.493333
0.746657
0.035068
0
0.631902
0
0
0.168315
0
0
0
0
0
0.015337
1
0.033742
false
0.003067
0.033742
0
0.082822
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a307f2f55d5637cf1662f83fbfb6b7206eaf55e
4,012
py
Python
Tests/test_doc.py
seadavis/StoryNode
e863e68e4b95b92a074554c2399492dbfb54cbab
[ "Apache-2.0" ]
null
null
null
Tests/test_doc.py
seadavis/StoryNode
e863e68e4b95b92a074554c2399492dbfb54cbab
[ "Apache-2.0" ]
null
null
null
Tests/test_doc.py
seadavis/StoryNode
e863e68e4b95b92a074554c2399492dbfb54cbab
[ "Apache-2.0" ]
null
null
null
import spacy import sys from spacy.matcher import Matcher from src.core.document import Document from src.core.relation_extraction import * def test_one_to_one_replacement(): sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense" doc = Document(sample_text) doc2 = Document("word word word word word word hello") span = doc.span(2, 3) span_swap = doc2.span(6, 7) doc.swap(span, span_swap) new_text = doc.print() assert new_text == "Python code hello the cleanest code around unlike C++ which is garbage nonsense" def test_multiple_replacements(): sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense" doc = Document(sample_text) span = doc.span(5, 6) span2 = doc.span(12, 13) span3 = doc.span(8, 10) doc2 = Document("the joker fights guys batman is evil") doc3 = Document("highlighting textbooks for fun") span_swap = doc2.span(4, 5) span_swap2 = doc3.span(0, 1) span_swap3 = doc3.span(1, 3) doc.swap(span, span_swap) doc.swap(span2, span_swap2) doc.swap(span3, span_swap3) new_text = doc.print() assert new_text == "Python code is the cleanest batman around unlike textbooks for is garbage highlighting" def test_one_to_one_replacement_same_start_end(): sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense" doc = Document(sample_text) span = doc.span(5, 6) doc2 = Document("the joker fights guys batman is evil") span_swap = doc2.span(4, 5) doc.swap(span, span_swap) new_text = doc.print() assert new_text == "Python code is the cleanest batman around unlike C++ which is garbage nonsense" def test_n_to_n_replacement(): sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense" doc = Document(sample_text) span = doc.span(0, 7) doc2 = Document("my adult nephew enjoys crafts and cutting paper like a two year old") span_swap = doc2.span(6, 14) doc.swap(span, span_swap) new_text = doc.print() assert new_text == "cutting paper like a two year old unlike C++ which is garbage nonsense" def test_n_to_n_replacement_same_start_end(): sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense" doc = Document(sample_text) span = doc.span(6, 9) doc2 = Document("my mom told me to button up jacket young man its cold outside") span_swap = doc2.span(5, 9) doc.swap(span, span_swap) new_text = doc.print() assert new_text == "Python code is the cleanest code button up jacket young which is garbage nonsense" def test_shorter_replacement(): sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense" doc = Document(sample_text) span = doc.span(4, 9) doc2 = Document("down dog, get off of the couch") span_swap = doc2.span(0, 2) doc.swap(span, span_swap) new_text = doc.print() assert new_text == "Python code is the down dog which is garbage nonsense" def test_longer_replacement(): sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense" doc = Document(sample_text) span = doc.span(8, 10) doc2 = Document("my mom told me to button up jacket young man its cold outside") span_swap = doc2.span(5,12) doc.swap(span, span_swap) new_text = doc.print() assert new_text == "Python code is the cleanest code around unlike button up jacket young man its cold is garbage nonsense" def test_original_still_prints_after_replacement(): sample_text = "Python code is the cleanest code around unlike C++ which is garbage nonsense" doc = Document(sample_text) span = doc.span(8, 10) doc2 = Document("my mom told me to button up jacket young man its cold outside") span_swap = doc2.span(5,12) doc.swap(span, span_swap) old_text = doc.print(True) assert old_text == sample_text
39.722772
127
0.709123
637
4,012
4.32967
0.169545
0.061639
0.071066
0.075417
0.799492
0.762872
0.714286
0.684191
0.684191
0.646846
0
0.023489
0.204138
4,012
100
128
40.12
0.840276
0
0
0.52381
0
0
0.392323
0
0
0
0
0
0.095238
1
0.095238
false
0
0.059524
0
0.154762
0.107143
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
7a31027b5ed0e4096f71764a61c58585d6898d31
116
py
Python
kigo/bpmn/elements/definitions.py
AsyncMicroStack/kigo-bpmn
dfe8c312399f86067393973a0c28a8695bb5e07a
[ "Apache-2.0" ]
null
null
null
kigo/bpmn/elements/definitions.py
AsyncMicroStack/kigo-bpmn
dfe8c312399f86067393973a0c28a8695bb5e07a
[ "Apache-2.0" ]
null
null
null
kigo/bpmn/elements/definitions.py
AsyncMicroStack/kigo-bpmn
dfe8c312399f86067393973a0c28a8695bb5e07a
[ "Apache-2.0" ]
null
null
null
from kigo.bpmn.elements.element import Element class BpmnDefinitions(Element): item_name = "bpmn:definitions"
19.333333
46
0.784483
14
116
6.428571
0.785714
0
0
0
0
0
0
0
0
0
0
0
0.12931
116
5
47
23.2
0.891089
0
0
0
0
0
0.137931
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
7a324221f1f84a57129bf65acf3d694eadd4186b
900
py
Python
where/parsers/vascc_crf.py
ingridfausk/where
b65398911075b7ddef3a3a1146efa428eae498fe
[ "MIT" ]
16
2018-08-31T10:31:11.000Z
2022-03-15T16:07:24.000Z
where/parsers/vascc_crf.py
ingridfausk/where
b65398911075b7ddef3a3a1146efa428eae498fe
[ "MIT" ]
5
2018-07-13T14:04:24.000Z
2021-06-17T02:14:44.000Z
where/parsers/vascc_crf.py
ingridfausk/where
b65398911075b7ddef3a3a1146efa428eae498fe
[ "MIT" ]
15
2018-06-07T05:45:24.000Z
2022-03-15T16:07:27.000Z
"""A parser for reading radio source coordinates from VASCC apriori crf Description: ------------ Reads radio source coordinates from VASCC (VLBI Software Analysis Comparison Campaign) apriori file. """ # Midgard imports from midgard.dev import plugins from midgard.parsers._parser_line import LineParser @plugins.register class VasccCrfParser(LineParser): """A parser for reading source coordinates from ICRF files """ def setup_parser(self): return dict(usecols=(0, 3, 4), dtype="U8, f8, f8", skip_header=1) def structure_data(self): self.data = { name: { "ra": ra, "dec": dec, "special": False, "undefined": True, "non_vcs": False, "vcs": False, "defining": False, } for name, ra, dec in self._array }
25.714286
100
0.576667
99
900
5.171717
0.616162
0.099609
0.123047
0.066406
0.121094
0
0
0
0
0
0
0.011382
0.316667
900
34
101
26.470588
0.821138
0.305556
0
0
0
0
0.080065
0
0
0
0
0
0
1
0.105263
false
0
0.105263
0.052632
0.315789
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a32575dae7683b788c3a10ea2963e61f6b9dee6
1,520
py
Python
data_engineering/opcua/client.py
croidzen/playground
37dfe861cdc1803b0f51a0ee623f42c450e75f04
[ "MIT" ]
null
null
null
data_engineering/opcua/client.py
croidzen/playground
37dfe861cdc1803b0f51a0ee623f42c450e75f04
[ "MIT" ]
null
null
null
data_engineering/opcua/client.py
croidzen/playground
37dfe861cdc1803b0f51a0ee623f42c450e75f04
[ "MIT" ]
null
null
null
import asyncio import sys # sys.path.insert(0, "..") import logging from asyncua import Client, Node, ua logging.basicConfig(level=logging.INFO) _logger = logging.getLogger('asyncua') async def main(): url = 'opc.tcp://localhost:4840/freeopcua/server/' # url = 'opc.tcp://commsvr.com:51234/UA/CAS_UA_Server' async with Client(url=url) as client: # Client has a few methods to get proxy to UA nodes that should always be in address space such as Root or Objects # Node objects have methods to read and write node attributes as well as browse or populate address space _logger.info('Children of root are: %r', await client.nodes.root.get_children()) uri = 'http://examples.freeopcua.github.io' idx = await client.get_namespace_index(uri) # get a specific node knowing its node id # var = client.get_node(ua.NodeId(1002, 2)) # var = client.get_node("ns=3;i=2002") var = await client.nodes.root.get_child(["0:Objects", f"{idx}:MyObject", f"{idx}:MyVariable"]) print("My variable", var, await var.read_value()) # print(var) # await var.read_data_value() # get value of node as a DataValue object # await var.read_value() # get value of node as a python builtin # await var.write_value(ua.Variant([23], ua.VariantType.Int64)) #set node value using explicit data type # await var.write_value(3.9) # set node value using implicit data type if __name__ == '__main__': asyncio.run(main())
44.705882
122
0.678289
229
1,520
4.39738
0.484716
0.039722
0.03575
0.039722
0.089374
0.043694
0.043694
0
0
0
0
0.022388
0.206579
1,520
33
123
46.060606
0.812604
0.476974
0
0
0
0
0.213368
0.053985
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0.0625
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a335c589184f653a55c2e135b27fd8fe0eb98ba
2,783
py
Python
elliptic_moab/Mesh/Selector.py
padmec-reservoir/elliptic_moab
a3b15f29a621c35a8279fd163326a0895aa67f30
[ "MIT" ]
null
null
null
elliptic_moab/Mesh/Selector.py
padmec-reservoir/elliptic_moab
a3b15f29a621c35a8279fd163326a0895aa67f30
[ "MIT" ]
null
null
null
elliptic_moab/Mesh/Selector.py
padmec-reservoir/elliptic_moab
a3b15f29a621c35a8279fd163326a0895aa67f30
[ "MIT" ]
null
null
null
from typing import Type from elliptic.Kernel.Context import ContextDelegate from elliptic_meshql.Selector import SelectorImplementationBase class LoopDelegate(ContextDelegate): loop_name = '' def __init__(self, context, unique_id): super().__init__(context, unique_id) self.loop_var_prefix = self.loop_name + str(self.unique_id) def template_kwargs(self): return {'current_entity': self.context.get_value('current_entity_name'), 'current_range': self.context.get_value('current_range_name'), 'current_index': self.context.get_value('current_index_name'), 'reduced_variables': self.context.context[self.loop_var_prefix + 'reduced'], 'mapped_variables': self.context.context[self.loop_var_prefix + 'mapped'], 'reduce_nested_children': self.context.context[self.loop_var_prefix + 'nested_children']} def context_enter(self): self.context.put_value('current_loop', self.loop_var_prefix) self.context.put_value('declare_range', self.loop_var_prefix + 'range') self.context.put_value('current_range_name', self.loop_var_prefix + 'range') self.context.put_value('declare_entityhandle', self.loop_var_prefix + 'entity') self.context.put_value('current_entity_name', self.loop_var_prefix + 'entity') self.context.put_value('declare_index', self.loop_var_prefix + 'index') self.context.put_value('current_index_name', self.loop_var_prefix + 'index') def context_exit(self): self.context.pop_value('current_range_name') self.context.pop_value('current_entity_name') self.context.pop_value('current_index_name') class SelectorImplementation(SelectorImplementationBase): def by_ent_delegate(self, dim: int) -> Type[ContextDelegate]: class ByEntDelegate(LoopDelegate): loop_name = 'by_ent' def get_template_file(self): return 'Selector/by_ent.pyx.etp' def template_kwargs(self): return {'dim': dim, **super().template_kwargs()} return ByEntDelegate def by_adj_delegate(self, bridge_dim: int, to_dim: int) -> Type[ContextDelegate]: class ByAdjDelegate(LoopDelegate): loop_name = 'by_adj' def get_template_file(self): return 'Selector/by_adj.pyx.etp' def template_kwargs(self): return {'bridge_dim': bridge_dim, 'to_dim': to_dim, 'old_entity': self.context.context['current_entity_name'][-2], **super().template_kwargs()} return ByAdjDelegate def where_delegate(self, conditions): pass
36.618421
105
0.657923
321
2,783
5.361371
0.199377
0.115049
0.070308
0.108658
0.525276
0.285299
0.250436
0.191749
0.096456
0
0
0.000469
0.234639
2,783
75
106
37.106667
0.807512
0
0
0.14
0
0
0.173913
0.024434
0
0
0
0
0
1
0.22
false
0.02
0.06
0.1
0.52
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
7a33a1943f4be516367d61e93a16ed20c91bac15
100
py
Python
brain_training/programming_challenges/leetcode/easy/T58_Length_of_Last_Word.py
kuzxnia/algoritms
eda3185f39d79a2657b7ef0da869fcc6b825889d
[ "MIT" ]
null
null
null
brain_training/programming_challenges/leetcode/easy/T58_Length_of_Last_Word.py
kuzxnia/algoritms
eda3185f39d79a2657b7ef0da869fcc6b825889d
[ "MIT" ]
null
null
null
brain_training/programming_challenges/leetcode/easy/T58_Length_of_Last_Word.py
kuzxnia/algoritms
eda3185f39d79a2657b7ef0da869fcc6b825889d
[ "MIT" ]
null
null
null
def lengthOfLastWord_(s): words = s.split() return 0 if len(words) == 0 else len(words[-1])
25
51
0.63
16
100
3.875
0.6875
0.258065
0
0
0
0
0
0
0
0
0
0.037975
0.21
100
3
52
33.333333
0.746835
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
7a33a8a8695860ddb6163f6f5857cdcec9984aa3
822
py
Python
cmskit/recipes/menu.py
ozgurgunes/django-cmskit
19d14fbb57702a6c56b6b3a5d859c93533ff1535
[ "MIT" ]
1
2015-09-28T10:10:34.000Z
2015-09-28T10:10:34.000Z
cmskit/recipes/menu.py
ozgurgunes/django-cmskit
19d14fbb57702a6c56b6b3a5d859c93533ff1535
[ "MIT" ]
null
null
null
cmskit/recipes/menu.py
ozgurgunes/django-cmskit
19d14fbb57702a6c56b6b3a5d859c93533ff1535
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from django.utils.translation import ugettext_lazy as _ from django.utils.translation import get_language from cms.menu_bases import CMSAttachMenu from menus.base import Menu, NavigationNode from menus.menu_pool import menu_pool from cmskit.recipes.models import Recipe class RecipesMenu(CMSAttachMenu): name = _("Recipes menu") def get_nodes(self, request): nodes = [] for recipe in Recipe.objects.published().select_related(): try: node = NavigationNode( recipe.title, recipe.get_absolute_url(), recipe.pk ) nodes.append(node) except: pass return nodes menu_pool.register_menu(RecipesMenu)
27.4
66
0.600973
86
822
5.593023
0.569767
0.049896
0.06237
0.108108
0.133056
0
0
0
0
0
0
0.001805
0.326034
822
29
67
28.344828
0.866426
0.025547
0
0
0
0
0.015019
0
0
0
0
0
0
1
0.045455
false
0.045455
0.272727
0
0.454545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a33ddcfa2b3e722458ff7163d0ad438baf597ed
6,218
py
Python
code/results-past/figure-3-b.py
shaifulcse/codemetrics-with-context-replication
9f0fe6e840d204b70efc9610e6887a64f9a51ce7
[ "MIT" ]
null
null
null
code/results-past/figure-3-b.py
shaifulcse/codemetrics-with-context-replication
9f0fe6e840d204b70efc9610e6887a64f9a51ce7
[ "MIT" ]
null
null
null
code/results-past/figure-3-b.py
shaifulcse/codemetrics-with-context-replication
9f0fe6e840d204b70efc9610e6887a64f9a51ce7
[ "MIT" ]
null
null
null
""" """ import re import os import matplotlib.pyplot as plt import re import numpy as np import math from scipy.stats.stats import pearsonr from scipy.stats.stats import kendalltau import scipy from matplotlib.patches import Rectangle from scipy import stats import seaborn as sns import pandas as pd sns.set(font_scale = 1.2) fig = plt.figure() ax = fig.add_subplot(111) PROJECTS_LIST = "../../info/settings-project.txt" RESULT_PATH="../../data/complexity-and-change-data/" styles=['-', '--','-.',':'] colors = ['r', 'g','b','y'] styles=["-", "--","-.", ":", "-", "--","-.", ":"] marks=["^", "d", "o", "v", "p", "s", "<", ">"] #marks_size=[15, 17, 10, 15, 17, 10, 12,15] marks_size=[15, 17, 10, 15, 17, 10, 12,15] marker_color=['#0F52BA','#ff7518','#6CA939','#e34234','#756bb1','brown','#c994c7', '#636363'] gap = [5,5,3,4,4,3] PROJECTS = {} STATS = {} correl_type = {} def list_projects(): fr = open(PROJECTS_LIST,"r") lines = fr.readlines() fr.close() projects = [] c = 0 for line in lines: c+=1 #if c>2: # break line = line.strip() data = re.findall("[^\t]+",line) if data[0] not in PROJECTS: PROJECTS[data[0]]=1 ### to help step2 def find_index(feature, project): fr = open(RESULT_PATH+project+".txt") line = fr.readline() ## header line = line.strip() data = re.findall("[^\t]+",line) for i in range(len(data)): if data[i] == feature: return i def parse_data(): global STATS for project in PROJECTS: list_indexes(feature,"checkstyle") STATS[project]={} fr = open(RESULT_PATH+project+".txt") line = fr.readline() ## header lines = fr.readlines() fr.close() for line in lines: line = line.strip() data = re.findall("[^\t]+",line) age = int(data[0]) if apply_age_restriction == 1 and age < age_restriction: continue method = data[len(data)-1] if method not in STATS[project]: STATS[project][method]={} feature_values = re.findall("[^,]+",data[feature_index]) date_values = re.findall("[^,]+",data[date_index]) diff_values = re.findall("[^,]+",data[diff_index]) addition_values = re.findall("[^,]+",data[addition_index]) edit_values = re.findall("[^,]+",data[edit_index]) track = 0 for i in range(1, len(diff_values)): if int(date_values[i]) > age_restriction: ## change not within time break if int(diff_values[i]) == 0: ## no change in content continue track = 1 feature_value = int(feature_values[i-1]) ## current change happened because of the previous state if feature_value not in STATS[project][method]: STATS[project][method][feature_value]=build_dic() update_stats(project, method, feature_value, 1, int(addition_values[i]), int(diff_values[i]), int(edit_values[i])) if track == 0: ## there was no change feature_value = int(feature_values[0]) ## if feature_value not in STATS[project][method]: STATS[project][method][feature_value]=build_dic() update_stats(project, method, feature_value, 0, 0, 0, 0) def update_stats(project, method, feature_value, rev, add, diff, edit): # print project, method STATS[project][method][feature_value][changeTypes[0]] += rev ### STATS[project][method][feature_value][changeTypes[1]] += add STATS[project][method][feature_value][changeTypes[2]] += diff STATS[project][method][feature_value][changeTypes[3]] += edit def build_dic(): dic = {} for t in changeTypes: dic[t]=0 return dic def list_indexes(feature, project): global feature_index global date_index global diff_index global addition_index global edit_index feature_index = find_index(feature, project) date_index = find_index("ChangeDates", project) diff_index = find_index("DiffSizes", project) addition_index = find_index("NewAdditions", project) edit_index = find_index("EditDistances", project) def correlation(): for project in STATS: for type in changeTypes: X=[] Y=[] for method in STATS[project]: for feature_value in STATS[project][method]: X.append(feature_value) Y.append(STATS[project][method][feature_value][type]) cr = kendalltau(X, Y) #print project, type, cr, cr[0] if type not in correl_type: correl_type[type] = [] correl_type[type].append(float(cr[0])) def draw_graph(): index = 0 for type in changeTypes: X,Y = build_cdf(correl_type[type]) #print Y line=(plt.plot(X,Y)) plt.setp(line, linewidth=3,ls=styles[index], marker=marks[index], markerfacecolor=marker_color[index], markersize = 12, color=marker_color[index],markevery=gap[index]) index += 1 plt.legend(changeTypes,loc=0,fontsize=17) plt.xlabel("Correlation",fontsize=20) plt.ylabel("CDF",fontsize=18) for label in ax.get_xticklabels(): label.set_fontsize(19) for label in ax.get_yticklabels(): label.set_fontsize(18) plt.tight_layout() plt.show() def build_cdf(ls): X = [] Y = [] prev = 0.0 total = len(ls) dic = {} for key in ls: if key not in dic: dic[key] = 0.0 dic[key] += 1.0 tracked = {} for key in sorted(ls): if key in tracked: continue tracked[key] = 1 X.append(key) prob = dic[key]/total Y.append(prob + prev) prev = prob + prev return X,Y if __name__ == "__main__": global feature global age_restriction global changeTypes global risks apply_age_restriction = 1 age_restriction = 730 risks =["Low", "Medium", "High", "Very High"] changeTypes =["#Revisions", "NewAdditions", "DiffSizes", "EditDistances"] ### will change based on feature feature = "SLOCStandard" ### changeDates for #revisions list_projects() #list_indexes(feature) parse_data() correlation() draw_graph()
24.772908
122
0.603731
811
6,218
4.499383
0.240444
0.055906
0.06906
0.075363
0.257605
0.199507
0.140861
0.129076
0.10359
0.10359
0
0.027546
0.246864
6,218
250
123
24.872
0.751655
0.06063
0
0.19186
0
0
0.062069
0.011897
0
0
0
0
0
1
0.052326
false
0
0.075581
0
0.145349
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a34367f5bced4609356f0cde39e3a13b062d891
4,585
py
Python
ppmi_tweet_collector.py
wolferobert3/uweat_icsc_2022
172f4cdd737f7405bdc83357e3dabeee94251efc
[ "MIT" ]
null
null
null
ppmi_tweet_collector.py
wolferobert3/uweat_icsc_2022
172f4cdd737f7405bdc83357e3dabeee94251efc
[ "MIT" ]
null
null
null
ppmi_tweet_collector.py
wolferobert3/uweat_icsc_2022
172f4cdd737f7405bdc83357e3dabeee94251efc
[ "MIT" ]
null
null
null
import pandas as pd from os import path, listdir import pickle from nltk import word_tokenize import json import re import emoji import string TARGET_STATE = 'OR' source_dir = f'' twitter_files = [i for i in list(listdir(source_dir)) if TARGET_STATE in i] punctuation_list = list(string.punctuation) + ['....','...', '..', '\"', '\'', '“','”','`','``','…'] tweets_by_date = {} #Collect and clean tweets for idx, tfile in enumerate(twitter_files): with open(path.join(source_dir, tfile), 'r') as i: t_f = [json.loads(line) for line in i] en_tweets = [] for tweet in t_f: en_tweets.append((tweet['id'],tweet['text'],tweet['created_at'][:10])) cleaned_string = {} for tweet in en_tweets: if tweet[0] in cleaned_string: continue refined_tweet = tweet[1].lower() refined_tweet = re.sub(emoji.get_emoji_regexp(), r'', refined_tweet) refined_tweet = re.sub(r'http\S+', '', refined_tweet) refined_tweet = re.sub(r'@\S+', '', refined_tweet) refined_tweet = re.sub(r'#', '', refined_tweet) refined_tweet = re.sub(r'&amp;', '&', refined_tweet) refined_tweet = re.sub(r'\s+', ' ', refined_tweet) refined_tweet = re.sub(r'^rts*\s+', '', refined_tweet) refined_tweet = re.sub(r'^\s+', '', refined_tweet) refined_tweet = re.sub(r'\S+…','',refined_tweet) refined_tweet = ' '.join([i for i in word_tokenize(refined_tweet) if i not in punctuation_list]) refined_tweet = refined_tweet.replace(' \' ','\'') if tweet[2] in tweets_by_date: tweets_by_date[tweet[2]].append(refined_tweet) else: tweets_by_date[tweet[2]] = [refined_tweet] cleaned_string[tweet[0]] = refined_tweet print(idx) with open(f'E:\\state_corpora\\tweets_by_date\\{TARGET_STATE}_tweets_by_date.pkl','wb') as pkl_writer: pickle.dump(tweets_by_date,pkl_writer) #Obtain ground truth dates covid_timeline = pd.read_csv(f'Public_Health_Measures.csv') start_dates = covid_timeline['Start_Date'].tolist() start_dates = list({date:'' for date in start_dates}.keys()) end_dates = covid_timeline['End_Date'].tolist() end_dates = list({date:'' for date in end_dates}.keys()) start_end_dict = {start_dates[idx]:[start_dates[idx],end_dates[idx]] for idx in range(len(start_dates))} for key in start_end_dict.keys(): start_year_int = int(key[:4]) start_month_int = int(key[5:7]) start_day_int = int(key[-2:]) end_year_int = int(start_end_dict[key][-1][:4]) end_month_int = int(start_end_dict[key][-1][5:7]) end_day_int = int(start_end_dict[key][-1][-2:]) if start_month_int == end_month_int: days_to_add = [i for i in range(start_day_int,end_day_int+1)] start_end_dict[key] = [f'{start_year_int}-{start_month_int}-{idx}' for idx in days_to_add] else: month1_to_add = [f'{start_year_int}-{start_month_int}-{idx}' for idx in range(start_day_int,32)] month2_to_add = [f'{end_year_int}-{end_month_int}-{idx}' for idx in range(1,end_day_int+1)] start_end_dict[key] = month1_to_add + month2_to_add for idx, date in enumerate(start_end_dict[key]): if date[-2] == '-': start_end_dict[key][idx] = date[:-1] + '0' + date[-1] for idx, date in enumerate(start_end_dict[key]): if date[-5] == '-': start_end_dict[key][idx] = date[:5] + '0' + date[5:] #Process with newlines for PPMI for key in tweets_by_date.keys(): tweets = tweets_by_date[key] tweets_by_date[key] = ['\n'.join(i.split(' ')) for i in tweets] #Join into time period corpora tweets_by_time_period = {} for key in start_end_dict.keys(): time_range_tweets = [] for date in start_end_dict[key]: if date in tweets_by_date: if time_range_tweets: time_range_tweets.extend(tweets_by_date[date]) else: time_range_tweets = tweets_by_date[date] tweets_by_time_period[key] = time_range_tweets if time_range_tweets: with open(f'E:\\state_corpora\\strict_divisions\\corpus_lists\\{TARGET_STATE}_{key}_tweet_list.pkl','wb') as pkl_writer: pickle.dump(time_range_tweets,pkl_writer) corpus_string = '\n\n\n\n\n\n\n\n'.join(time_range_tweets) with open(f'E:\\state_corpora\\strict_divisions\\corpus_strings\\{TARGET_STATE}_{key}_tweet_corpus.txt','w',encoding='utf8') as writer: writer.write(corpus_string)
39.188034
144
0.638168
694
4,585
3.920749
0.18732
0.114664
0.057332
0.088203
0.362367
0.332598
0.284454
0.209849
0.169055
0.168688
0
0.010261
0.213522
4,585
117
145
39.188034
0.742651
0.023555
0
0.102273
0
0.011364
0.118173
0.088573
0
0
0
0
0
1
0
false
0
0.090909
0
0.090909
0.011364
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a38fc376b5e5fd6f4153aa152aa553785289bf1
911
py
Python
all/054.py
brenodt/Desafio-365-dias-programando
6c899b4f2e314b9a3a75734f39509a665016e206
[ "MIT" ]
null
null
null
all/054.py
brenodt/Desafio-365-dias-programando
6c899b4f2e314b9a3a75734f39509a665016e206
[ "MIT" ]
null
null
null
all/054.py
brenodt/Desafio-365-dias-programando
6c899b4f2e314b9a3a75734f39509a665016e206
[ "MIT" ]
null
null
null
from collections import OrderedDict # OrderedDict deve ser importado! def enesimo_nao_repetido(entrada: str, n: int): # Cria o dicionário usando OrderedDict e inicializa # cada chave com o valor 0 dicionario = OrderedDict.fromkeys(entrada, 0) # Conta a ocorrência de cada letra for letra in entrada: dicionario[letra] += 1 # Usando List Comprehension, elimina letras duplicadas elementos_nao_repetidos = [ chave for (chave, valor) in \ dicionario.items()if valor == 1 ] # Se N é maior do que o número de letras não repetidas if len(elementos_nao_repetidos) < n: return -1 else: # Senão, retorne a n-ésima letra não repetida return elementos_nao_repetidos[n-1] if __name__ == "__main__": texto = "AA BB CC DD EE F GG H II JJ KK L M N OO" letra = 5 # -- SAÍDA --: # N print(enesimo_nao_repetido(texto, letra))
28.46875
70
0.668496
130
911
4.546154
0.615385
0.060914
0.106599
0.07445
0
0
0
0
0
0
0
0.010324
0.255763
911
32
71
28.46875
0.861357
0.333699
0
0
0
0
0.078859
0
0
0
0
0
0
1
0.066667
false
0
0.066667
0
0.266667
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a397491b82c42c5a64d723d577f9c2c7718bf3a
187
py
Python
testPins.py
brammieman1/MazeSolver
22d5a91e95fe88aecd4d4b1f75218d628d2e5fe4
[ "MIT" ]
null
null
null
testPins.py
brammieman1/MazeSolver
22d5a91e95fe88aecd4d4b1f75218d628d2e5fe4
[ "MIT" ]
null
null
null
testPins.py
brammieman1/MazeSolver
22d5a91e95fe88aecd4d4b1f75218d628d2e5fe4
[ "MIT" ]
null
null
null
import wiringpi2 as wiringpi import time wiringpi.wiringPiSetupGpio() wiringpi.pinMode(17,1) wiringpi.digitalWrite(17,1) time.sleep(4) wiringpi.digitalWrite(17,0) wiringpi.pinMode(17,0)
18.7
28
0.812834
27
187
5.62963
0.481481
0.197368
0.223684
0
0
0
0
0
0
0
0
0.08046
0.069519
187
9
29
20.777778
0.793103
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.25
0
0.25
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
7a3acfc511ddee7da218b4ddb4970a9394b9e44a
5,946
py
Python
venv/lib/python3.7/site-packages/rqdatac/services/orm/financial_indicator_sql.py
CatTiger/vnpy
7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b
[ "MIT" ]
null
null
null
venv/lib/python3.7/site-packages/rqdatac/services/orm/financial_indicator_sql.py
CatTiger/vnpy
7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b
[ "MIT" ]
1
2020-04-21T02:42:32.000Z
2020-04-21T02:42:32.000Z
venv/lib/python3.7/site-packages/rqdatac/services/orm/financial_indicator_sql.py
CatTiger/vnpy
7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b
[ "MIT" ]
null
null
null
# coding: utf-8 from sqlalchemy import Numeric, Column from .fundamental_base_sql import FundamentalBase class AnaStkFinIdx(FundamentalBase): earnings_per_share = Column(Numeric(18, 4)) fully_diluted_earnings_per_share = Column(Numeric(18, 4)) diluted_earnings_per_share = Column(Numeric(18, 4)) adjusted_earnings_per_share = Column(Numeric(18, 4)) adjusted_fully_diluted_earnings_per_share = Column(Numeric(18, 4)) adjusted_diluted_earnings_per_share = Column(Numeric(18, 4)) book_value_per_share = Column(Numeric(18, 4)) operating_cash_flow_per_share = Column(Numeric(18, 4)) operating_total_revenue_per_share = Column(Numeric(18, 4)) operating_revenue_per_share = Column(Numeric(18, 4)) capital_reserve_per_share = Column(Numeric(18, 4)) earned_reserve_per_share = Column(Numeric(18, 4)) undistributed_profit_per_share = Column(Numeric(18, 4)) retained_earnings_per_share = Column(Numeric(18, 4)) cash_flow_from_operations_per_share = Column(Numeric(18, 4)) ebit_per_share = Column(Numeric(18, 4)) free_cash_flow_company_per_share = Column(Numeric(18, 4)) free_cash_flow_equity_per_share = Column(Numeric(18, 4)) dividend_per_share = Column(Numeric(18, 4)) return_on_equity = Column(Numeric(18, 4)) return_on_equity_weighted_average = Column(Numeric(18, 4)) return_on_equity_diluted = Column(Numeric(18, 4)) adjusted_return_on_equity_average = Column(Numeric(18, 4)) adjusted_return_on_equity_weighted_average = Column(Numeric(18, 4)) adjusted_return_on_equity_diluted = Column(Numeric(18, 4)) return_on_asset = Column(Numeric(18, 4)) return_on_asset_net_profit = Column(Numeric(18, 4)) return_on_invested_capital = Column(Numeric(18, 4)) annual_return_on_equity = Column(Numeric(18, 4)) annual_return_on_asset = Column(Numeric(18, 4)) annual_return_on_asset_net_profit = Column(Numeric(18, 4)) net_profit_margin = Column(Numeric(18, 4)) gross_profit_margin = Column(Numeric(18, 4)) cost_to_sales = Column(Numeric(18, 4)) net_profit_to_revenue = Column(Numeric(18, 4)) profit_from_operation_to_revenue = Column(Numeric(18, 4)) ebit_to_revenue = Column(Numeric(18, 4)) expense_to_revenue = Column(Numeric(18, 4)) operating_profit_to_profit_before_tax = Column(Numeric(18, 4)) invesment_profit_to_profit_before_tax = Column(Numeric(18, 4)) non_operating_profit_to_profit_before_tax = Column(Numeric(18, 4)) income_tax_to_profit_before_tax = Column(Numeric(18, 4)) adjusted_profit_to_total_profit = Column(Numeric(18, 4)) debt_to_asset_ratio = Column(Numeric(18, 4)) equity_multiplier = Column(Numeric(18, 4)) current_asset_to_total_asset = Column(Numeric(18, 4)) non_current_asset_to_total_asset = Column(Numeric(18, 4)) interest_bearing_debt_to_capital = Column(Numeric(18, 4)) current_debt_to_total_debt = Column(Numeric(18, 4)) non_current_debt_to_total_debt = Column(Numeric(18, 4)) current_ratio = Column(Numeric(18, 4)) quick_ratio = Column(Numeric(18, 4)) super_quick_ratio = Column(Numeric(18, 4)) debt_to_equity_ratio = Column(Numeric(18, 4)) equity_to_debt_ratio = Column(Numeric(18, 4)) equity_to_interest_bearing_debt = Column(Numeric(18, 4)) ebit_to_debt = Column(Numeric(18, 4)) ocf_to_debt = Column(Numeric(18, 4)) ocf_to_interest_bearing_debt = Column(Numeric(18, 4)) ocf_to_current_ratio = Column(Numeric(18, 4)) ocf_to_net_debt = Column(Numeric(18, 4)) time_interest_earned_ratio = Column(Numeric(18, 4)) long_term_debt_to_working_capital = Column(Numeric(18, 4)) account_payable_turnover_rate = Column(Numeric(18, 4)) account_payable_turnover_days = Column(Numeric(18, 4)) account_receivable_turnover_days = Column(Numeric(18, 4)) inventory_turnover = Column(Numeric(18, 4)) account_receivable_turnover_rate = Column(Numeric(18, 4)) current_asset_turnover = Column(Numeric(18, 4)) fixed_asset_turnover = Column(Numeric(18, 4)) total_asset_turnover = Column(Numeric(18, 4)) inc_earnings_per_share = Column(Numeric(18, 4)) inc_diluted_earnings_per_share = Column(Numeric(18, 4)) inc_revenue = Column(Numeric(18, 4)) inc_operating_revenue = Column(Numeric(18, 4)) inc_gross_profit = Column(Numeric(18, 4)) inc_profit_before_tax = Column(Numeric(18, 4)) inc_net_profit = Column(Numeric(18, 4)) inc_adjusted_net_profit = Column(Numeric(18, 4)) inc_cash_from_operations = Column(Numeric(18, 4)) inc_return_on_equity = Column(Numeric(18, 4)) inc_book_per_share = Column(Numeric(18, 4)) inc_total_asset = Column(Numeric(18, 4)) du_return_on_equity = Column(Numeric(18, 4)) du_equity_multiplier = Column(Numeric(18, 4)) du_asset_turnover_ratio = Column(Numeric(18, 4)) du_profit_margin = Column(Numeric(18, 4)) du_return_on_sales = Column(Numeric(18, 4)) non_recurring_profit_and_loss = Column(Numeric(18, 4)) adjusted_net_profit = Column(Numeric(18, 4)) ebit = Column(Numeric(18, 4)) ebitda = Column(Numeric(18, 4)) invested_capital = Column(Numeric(18, 4)) working_capital = Column(Numeric(18, 4)) net_working_capital = Column(Numeric(18, 4)) retained_earnings = Column(Numeric(18, 4)) interest_bearing_debt = Column(Numeric(18, 4)) net_debt = Column(Numeric(18, 4)) non_interest_bearing_current_debt = Column(Numeric(18, 4)) non_interest_bearing_non_current_debt = Column(Numeric(18, 4)) fcff = Column(Numeric(18, 4)) fcfe = Column(Numeric(18, 4)) depreciation_and_amortization = Column(Numeric(18, 4)) ev = Column(Numeric(21, 4)) ev_2 = Column(Numeric(21, 4)) ev_to_ebit = Column(Numeric(18, 4)) ev_to_ebitda = Column(Numeric(19, 4)) tangible_assets = Column(Numeric(19, 4)) tangible_asset_to_debt = Column(Numeric(19, 4)) tangible_asset_to_interest_bearing_debt = Column(Numeric(19, 4))
50.389831
71
0.736798
881
5,946
4.614075
0.111237
0.351784
0.383764
0.409348
0.868143
0.793604
0.550308
0.308241
0.178352
0.023616
0
0.065756
0.150858
5,946
117
72
50.820513
0.739354
0.002186
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.017699
0
1
0
0
0
0
null
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
6
7a3e3c0c5a227d2ab34cab782fab2492bb8ae99d
35,990
py
Python
teospy/iceliq4.py
jarethholt/teospy
3bb23e67bbb765c0842aa8d4a73c1d55ea395d2f
[ "MIT" ]
null
null
null
teospy/iceliq4.py
jarethholt/teospy
3bb23e67bbb765c0842aa8d4a73c1d55ea395d2f
[ "MIT" ]
null
null
null
teospy/iceliq4.py
jarethholt/teospy
3bb23e67bbb765c0842aa8d4a73c1d55ea395d2f
[ "MIT" ]
null
null
null
"""Ice-liquid water equilibrium functions. This module provides thermodynamic properties of ice and liquid water in equilibrium, e.g. the enthalpy of melting. :Examples: >>> pressure(temp=270.) 39313338.8825 >>> densityliq(temp=270.) 1019.05568894 >>> enthalpymelt(temp=270.) 325166.686739 >>> entropymelt(temp=270.) 1204.32106199 >>> volumemelt(temp=270.) -1.04052121182e-4 >>> temperature(pres=1e7) 272.401648868 >>> densityliq(pres=1e7) 1004.79353660 >>> enthalpymelt(pres=1e7) 331548.910815 >>> entropymelt(pres=1e7) 1217.13254010 >>> volumemelt(pres=1e7) -9.4217890326e-05 :Functions: * :func:`eq_tp`: Calculate ice-liquid water equilibrium properties at either temperature or pressure. * :func:`temperature`: Temperature at ice-liquid water equilibrium. * :func:`pressure`: Pressure at ice-liquid water equilibrium. * :func:`densityliq`: Liquid water density at ice-liquid water equilibrium. * :func:`chempot`: Chemical potential at ice-liquid water equilibrium. * :func:`densityice`: Ice density at ice-liquid water equilibrium. * :func:`enthalpyice`: Ice enthalpy at ice-liquid water equilibrium. * :func:`enthalpyliq`: Liquid water enthalpy at ice-liquid water equilibrium. * :func:`enthalpymelt`: Enthalpy of melting. * :func:`entropyice`: Ice entropy at ice-liquid water equilibrium. * :func:`entropyliq`: Liquid water entropy at ice-liquid water equilibrium. * :func:`entropymelt`: Entropy of melting. * :func:`volumemelt`: Specific volume of melting. """ __all__ = ['eq_tp','temperature','pressure','densityliq','chempot','densityice', 'enthalpyice','enthalpyliq','enthalpymelt','entropyice','entropyliq', 'entropymelt','volumemelt'] import warnings import numpy from teospy import constants0 from teospy import ice1 from teospy import flu2 from teospy import ice2 from teospy import maths3 _CHKTOL = constants0.CHKTOL _TTP = constants0.TTP _PTPI = constants0.PTPI _DLTP = constants0.DLTP _LILTP = constants0.LILTP _chkflubnds = constants0.chkflubnds _chkicebnds = constants0.chkicebnds _ice_g = ice1.ice_g _eq_chempot = flu2.eq_chempot _eq_pressure = flu2.eq_pressure _newton = maths3.newton _C_APPS = ((-1.78582981492113,-12.2325084306734,-52.8236936433529), (-1.67329759176351e-7,-2.02262929999658e-13)) ## Equilibrium functions def _approx_t(temp): """Approximate PDl at T. Approximate the pressure and liquid water density for ice and liquid water in equilibrium at the given temperature. This approximation is based on an empirical polynomial for density. :arg float temp: Temperature in K. :returns: Pressure in Pa and liquid water density in kg/m3. """ tau = temp/_TTP - 1 dta = 0. for (i,a) in enumerate(_C_APPS[0]): dta += a * tau**(i+1) dliq = _DLTP * (1 + dta) pres = flu2.pressure(temp,dliq) return pres, dliq def _approx_p(pres): """Approximate TDl at P. Approximate the temperature and liquid water density for ice and liquid water in equilibrium at the given pressure. This approximation is based on empirical polynomials for temperature and density. :arg float pres: Pressure in Pa. :returns: Temperature in K and liquid water density in kg/m3. """ a1, a2 = _C_APPS[1] psi = pres/_PTPI - 1 tau = a1*psi + a2*psi**2 temp = _TTP * (1 + tau) dta = 0. for (i,a) in enumerate(_C_APPS[0]): dta += a * tau**(i+1) dliq = _DLTP * (1 + dta) return temp, dliq def _diff_t(p,dl,temp): """Calculate ice-liquid disequilibrium at T. Calculate both sides of the equations given pressure = pressure of liquid water chemical potential of ice = potential of liquid water and their Jacobians with respect to pressure and liquid water density. Solving these equations gives the pressure and liquid water density at the given temperature. :arg float p: Pressure in Pa. :arg float dl: Liquid water density in kg/m3. :arg float temp: Temperature in K. :returns: Left-hand side of the equation, right-hand side, Jacobian of LHS, and Jacobian of RHS. :rtype: tuple(array(float)) """ pl = _eq_pressure(0,0,temp,dl) gi = _ice_g(0,0,temp,p) gl = _eq_chempot(0,0,temp,dl) lhs = numpy.array([p, gi]) rhs = numpy.array([pl, gl]) pl_d = _eq_pressure(0,1,temp,dl) gi_p = _ice_g(0,1,temp,p) gl_d = _eq_chempot(0,1,temp,dl) dlhs = numpy.array([[1.,0.], [gi_p,0.]]) drhs = numpy.array([[0.,pl_d], [0.,gl_d]]) return lhs, rhs, dlhs, drhs def _diff_p(t,dl,pres): """Calculate ice-liquid disequilibrium at P. Calculate both sides of the equations given pressure = pressure of liquid water chemical potential of ice = potential of liquid water and their Jacobians with respect to temperature and liquid water density. Solving these equations gives the temperature and liquid water density at the given temperature. :arg float t: Temperature in K. :arg float dl: Liquid water density in kg/m3. :arg float pres: Pressure in Pa. :returns: Left-hand side of the equation, right-hand side, Jacobian of LHS, and Jacobian of RHS. :rtype: tuple(array(float)) """ pl = _eq_pressure(0,0,t,dl) gi = _ice_g(0,0,t,pres) gl = _eq_chempot(0,0,t,dl) lhs = numpy.array([pres, gi]) rhs = numpy.array([pl, gl]) pl_t = _eq_pressure(1,0,t,dl) pl_d = _eq_pressure(0,1,t,dl) gi_t = _ice_g(1,0,t,pres) gl_t = _eq_chempot(1,0,t,dl) gl_d = _eq_chempot(0,1,t,dl) dlhs = numpy.array([[0.,0.], [gi_t,0.]]) drhs = numpy.array([[pl_t,pl_d], [gl_t,gl_d]]) return lhs, rhs, dlhs, drhs def eq_tp(temp=None,pres=None,dliq=None,chkvals=False,chktol=_CHKTOL, temp0=None,pres0=None,dliq0=None,chkbnd=False,mathargs=None): """Get primary ice-liquid variables at T or P. Get the values of all primary variables for ice and liquid water in equilibrium at either of a given temperature or pressure. If the calculation has already been done, the results can be passed to avoid unnecessary repeat calculations. If enough values are passed, they will be checked for consistency if chkvals is True. :arg temp: Temperature in K. :type temp: float or None :arg pres: Pressure in Pa. :type pres: float or None :arg dliq: Liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg temp0: Initial guess for the temperature in K. If None (default) then `_approx_p` is used. :type temp0: float or None :arg pres0: Initial guess for the pressure in Pa. If None (default) then `_approx_t` is used. :type pres0: float or None :arg dliq0: Initial guess for the liquid water density in kg/m3. If None (default) then `_approx_t` or `_approx_p` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Temperature, pressure, and liquid water density (all in SI units). :raises ValueError: If neither of temp or pres is provided. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. """ if temp is None and pres is None: errmsg = 'One of temp or pres must be provided' raise ValueError(errmsg) if temp is not None: if any(val is None for val in (pres,dliq)): x0 = (pres0,dliq0) fargs = (temp,) if mathargs is None: mathargs = dict() x1 = _newton(_diff_t,x0,_approx_t,fargs=fargs,**mathargs) pres, dliq = x1 else: x0 = (temp0,dliq0) fargs = (pres,) if mathargs is None: mathargs = dict() x1 = _newton(_diff_p,x0,_approx_p,fargs=fargs,**mathargs) temp, dliq = x1 _chkflubnds(temp,dliq,chkbnd=chkbnd) _chkicebnds(temp,pres,chkbnd=chkbnd) if not chkvals: return temp, pres, dliq lhs, rhs, __, __ = _diff_p(temp,dliq,pres) errs = list() for (l,r) in zip(lhs,rhs): if abs(r) >= chktol: errs.append(abs(l/r-1)) else: errs.append(abs(l-r)) if max(errs) > chktol: warnmsg = ('Given values {0} and solutions {1} disagree to more than ' 'the tolerance {2}').format(lhs,rhs,chktol) warnings.warn(warnmsg,RuntimeWarning) return temp, pres, dliq ## Thermodynamic properties def temperature(temp=None,pres=None,dliq=None,chkvals=False, chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False, mathargs=None): """Calculate ice-liquid temperature. Calculate the temperature of ice and liquid water in equilibrium. :arg temp: Temperature in K. :type temp: float or None :arg pres: Pressure in Pa. :type pres: float or None :arg dliq: Liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg temp0: Initial guess for the temperature in K. If None (default) then `_approx_p` is used. :type temp0: float or None :arg pres0: Initial guess for the pressure in Pa. If None (default) then `_approx_t` is used. :type pres0: float or None :arg dliq0: Initial guess for the liquid water density in kg/m3. If None (default) then `_approx_t` or `_approx_p` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Temperature in K. :raises ValueError: If neither of temp or pres is provided. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :Examples: >>> temperature(pres=1e7) 272.40164887 """ temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals, chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd, mathargs=mathargs) return temp def pressure(temp=None,pres=None,dliq=None,chkvals=False, chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False, mathargs=None): """Calculate ice-liquid pressure. Calculate the pressure of ice and liquid water in equilibrium. :arg temp: Temperature in K. :type temp: float or None :arg pres: Pressure in Pa. :type pres: float or None :arg dliq: Liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg temp0: Initial guess for the temperature in K. If None (default) then `_approx_p` is used. :type temp0: float or None :arg pres0: Initial guess for the pressure in Pa. If None (default) then `_approx_t` is used. :type pres0: float or None :arg dliq0: Initial guess for the liquid water density in kg/m3. If None (default) then `_approx_t` or `_approx_p` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Pressure in Pa. :raises ValueError: If neither of temp or pres is provided. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :Examples: >>> pressure(temp=270.) 39313338.8825 """ temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals, chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd, mathargs=mathargs) return pres def densityliq(temp=None,pres=None,dliq=None,chkvals=False, chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False, mathargs=None): """Calculate ice-liquid liquid water density. Calculate the density of liquid water for ice and liquid water in equilibrium. :arg temp: Temperature in K. :type temp: float or None :arg pres: Pressure in Pa. :type pres: float or None :arg dliq: Liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg temp0: Initial guess for the temperature in K. If None (default) then `_approx_p` is used. :type temp0: float or None :arg pres0: Initial guess for the pressure in Pa. If None (default) then `_approx_t` is used. :type pres0: float or None :arg dliq0: Initial guess for the liquid water density in kg/m3. If None (default) then `_approx_t` or `_approx_p` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Liquid water density in kg/m3. :raises ValueError: If neither of temp or pres is provided. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :Examples: >>> densityliq(pres=1e7) 1004.79353660 >>> densityliq(temp=270.) 1019.05568894 """ temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals, chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd, mathargs=mathargs) return dliq def chempot(temp=None,pres=None,dliq=None,chkvals=False,chktol=_CHKTOL, temp0=None,pres0=None,dliq0=None,chkbnd=False,mathargs=None): """Calculate ice-liquid chemical potential. Calculate the chemical potential of ice and liquid water in equilibrium. :arg temp: Temperature in K. :type temp: float or None :arg pres: Pressure in Pa. :type pres: float or None :arg dliq: Liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg temp0: Initial guess for the temperature in K. If None (default) then `_approx_p` is used. :type temp0: float or None :arg pres0: Initial guess for the pressure in Pa. If None (default) then `_approx_t` is used. :type pres0: float or None :arg dliq0: Initial guess for the liquid water density in kg/m3. If None (default) then `_approx_t` or `_approx_p` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Chemical potential in J/kg. :raises ValueError: If neither of temp or pres is provided. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :Examples: >>> chempot(pres=1e7) 9972.8817069 >>> chempot(temp=270.) 38870.0605192 """ temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals, chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd, mathargs=mathargs) g = _ice_g(0,0,temp,pres) return g def densityice(temp=None,pres=None,dliq=None,chkvals=False, chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False, mathargs=None): """Calculate ice-liquid ice density. Calculate the density of ice for ice and liquid water in equilibrium. :arg temp: Temperature in K. :type temp: float or None :arg pres: Pressure in Pa. :type pres: float or None :arg dliq: Liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg temp0: Initial guess for the temperature in K. If None (default) then `_approx_p` is used. :type temp0: float or None :arg pres0: Initial guess for the pressure in Pa. If None (default) then `_approx_t` is used. :type pres0: float or None :arg dliq0: Initial guess for the liquid water density in kg/m3. If None (default) then `_approx_t` or `_approx_p` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Ice density in kg/m3. :raises ValueError: If neither of temp or pres is provided. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :Examples: >>> densityice(pres=1e7) 917.896690830 >>> densityice(temp=270.) 921.359428514 """ temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals, chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd, mathargs=mathargs) dice = ice2.density(temp,pres) return dice def enthalpyice(temp=None,pres=None,dliq=None,chkvals=False, chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False, mathargs=None): """Calculate liquid-ice ice enthalpy. Calculate the specific enthalpy of ice for ice and liquid water in equilibrium. :arg temp: Temperature in K. :type temp: float or None :arg pres: Pressure in Pa. :type pres: float or None :arg dliq: Liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg temp0: Initial guess for the temperature in K. If None (default) then `_approx_p` is used. :type temp0: float or None :arg pres0: Initial guess for the pressure in Pa. If None (default) then `_approx_t` is used. :type pres0: float or None :arg dliq0: Initial guess for the liquid water density in kg/m3. If None (default) then `_approx_t` or `_approx_p` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Enthalpy in J/kg. :raises ValueError: If neither of temp or pres is provided. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :Examples: >>> enthalpyice(pres=1e7) -324602.983822 >>> enthalpyice(temp=270.) -299055.938629 """ temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals, chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd, mathargs=mathargs) hi = ice2.enthalpy(temp,pres) return hi def enthalpyliq(temp=None,pres=None,dliq=None,chkvals=False, chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False, mathargs=None): """Calculate ice-liquid liquid water enthalpy. Calculate the specific enthalpy of liquid water for ice and liquid water in equilibrium. :arg temp: Temperature in K. :type temp: float or None :arg pres: Pressure in Pa. :type pres: float or None :arg dliq: Liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg temp0: Initial guess for the temperature in K. If None (default) then `_approx_p` is used. :type temp0: float or None :arg pres0: Initial guess for the pressure in Pa. If None (default) then `_approx_t` is used. :type pres0: float or None :arg dliq0: Initial guess for the liquid water density in kg/m3. If None (default) then `_approx_t` or `_approx_p` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Enthalpy in J/kg. :raises ValueError: If neither of temp or pres is provided. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :Examples: >>> enthalpyliq(pres=1e7) 6945.9269937 >>> enthalpyliq(temp=270.) 26110.7481094 """ temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals, chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd, mathargs=mathargs) hl = flu2.enthalpy(temp,dliq) return hl def enthalpymelt(temp=None,pres=None,dliq=None,chkvals=False, chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False, mathargs=None): """Calculate enthalpy of melting. Calculate the specific enthalpy of melting. :arg temp: Temperature in K. :type temp: float or None :arg pres: Pressure in Pa. :type pres: float or None :arg dliq: Liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg temp0: Initial guess for the temperature in K. If None (default) then `_approx_p` is used. :type temp0: float or None :arg pres0: Initial guess for the pressure in Pa. If None (default) then `_approx_t` is used. :type pres0: float or None :arg dliq0: Initial guess for the liquid water density in kg/m3. If None (default) then `_approx_t` or `_approx_p` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Enthalpy in J/kg. :raises ValueError: If neither of temp or pres is provided. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :Examples: >>> enthalpymelt(pres=1e7) 331548.910815 >>> enthalpymelt(temp=270.) 325166.686739 """ temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals, chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd, mathargs=mathargs) hl = flu2.enthalpy(temp,dliq) hi = ice2.enthalpy(temp,pres) hmelt = hl - hi return hmelt def entropyice(temp=None,pres=None,dliq=None,chkvals=False, chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False, mathargs=None): """Calculate ice-liquid ice entropy. Calculate the specific entropy of ice for ice and liquid water in equilibrium. :arg temp: Temperature in K. :type temp: float or None :arg pres: Pressure in Pa. :type pres: float or None :arg dliq: Liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg temp0: Initial guess for the temperature in K. If None (default) then `_approx_p` is used. :type temp0: float or None :arg pres0: Initial guess for the pressure in Pa. If None (default) then `_approx_t` is used. :type pres0: float or None :arg dliq0: Initial guess for the liquid water density in kg/m3. If None (default) then `_approx_t` or `_approx_p` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Entropy in J/kg/K. :raises ValueError: If neither of temp or pres is provided. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :Examples: >>> entropyice(pres=1e7) -1228.24464139 >>> entropyice(temp=270.) -1251.57777462 """ temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals, chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd, mathargs=mathargs) si = ice2.entropy(temp,pres) return si def entropyliq(temp=None,pres=None,dliq=None,chkvals=False, chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False, mathargs=None): """Calculate ice-liquid liquid entropy. Calculate the specific entropy of liquid water for ice and liquid water in equilibrium. :arg temp: Temperature in K. :type temp: float or None :arg pres: Pressure in Pa. :type pres: float or None :arg dliq: Liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg temp0: Initial guess for the temperature in K. If None (default) then `_approx_p` is used. :type temp0: float or None :arg pres0: Initial guess for the pressure in Pa. If None (default) then `_approx_t` is used. :type pres0: float or None :arg dliq0: Initial guess for the liquid water density in kg/m3. If None (default) then `_approx_t` or `_approx_p` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Entropy in J/kg/K. :raises ValueError: If neither of temp or pres is provided. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :Examples: >>> entropyliq(pres=1e7) -11.11210129 >>> entropyliq(temp=270.) -47.2567126291 """ temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals, chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd, mathargs=mathargs) sl = flu2.entropy(temp,dliq) return sl def entropymelt(temp=None,pres=None,dliq=None,chkvals=False, chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False, mathargs=None): """Calculate entropy of melting. Calculate the specific entropy of melting. :arg temp: Temperature in K. :type temp: float or None :arg pres: Pressure in Pa. :type pres: float or None :arg dliq: Liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg temp0: Initial guess for the temperature in K. If None (default) then `_approx_p` is used. :type temp0: float or None :arg pres0: Initial guess for the pressure in Pa. If None (default) then `_approx_t` is used. :type pres0: float or None :arg dliq0: Initial guess for the liquid water density in kg/m3. If None (default) then `_approx_t` or `_approx_p` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Entropy in J/kg/K. :raises ValueError: If neither of temp or pres is provided. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :Examples: >>> entropymelt(pres=1e7) 1217.13254010 >>> entropymelt(temp=270.) 1204.32106199 """ temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals, chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd, mathargs=mathargs) sl = flu2.entropy(temp,dliq) si = ice2.entropy(temp,pres) smelt = sl - si return smelt def volumemelt(temp=None,pres=None,dliq=None,chkvals=False, chktol=_CHKTOL,temp0=None,pres0=None,dliq0=None,chkbnd=False, mathargs=None): """Calculate specific volume of melting. Calculate the specific volume of melting. :arg temp: Temperature in K. :type temp: float or None :arg pres: Pressure in Pa. :type pres: float or None :arg dliq: Liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg temp0: Initial guess for the temperature in K. If None (default) then `_approx_p` is used. :type temp0: float or None :arg pres0: Initial guess for the pressure in Pa. If None (default) then `_approx_t` is used. :type pres0: float or None :arg dliq0: Initial guess for the liquid water density in kg/m3. If None (default) then `_approx_t` or `_approx_p` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Specific volume in m3/kg. :raises ValueError: If neither of temp or pres is provided. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :Examples: >>> volumemelt(pres=1e7) -9.4217890326e-05 >>> volumemelt(temp=270.) -1.04052121182e-4 """ temp, pres, dliq = eq_tp(temp=temp,pres=pres,dliq=dliq,chkvals=chkvals, chktol=chktol,temp0=temp0,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd, mathargs=mathargs) vi = _ice_g(0,1,temp,pres,chkbnd=chkbnd) vl = dliq**(-1) vmelt = vl - vi return vmelt
39.549451
80
0.676077
5,225
35,990
4.605933
0.057799
0.022688
0.035652
0.045375
0.858223
0.822571
0.799717
0.78077
0.777154
0.769675
0
0.031991
0.23915
35,990
909
81
39.592959
0.846876
0.703362
0
0.437811
0
0
0.029086
0
0
0
0
0
0
1
0.084577
false
0
0.034826
0
0.208955
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
7a40b49d80fe3632355e2539a5130f841bba126c
1,091
py
Python
zipline/gens/composites.py
dxcv/zipline_chstock
6cbd7e0c7d70d55d55edf86dc3f206f15a8bddc9
[ "Apache-2.0" ]
16
2019-10-15T08:35:36.000Z
2021-12-10T14:43:49.000Z
zipline/gens/composites.py
dxcv/zipline_chstock
6cbd7e0c7d70d55d55edf86dc3f206f15a8bddc9
[ "Apache-2.0" ]
2
2021-03-31T19:14:15.000Z
2021-12-13T20:20:50.000Z
zipline/gens/composites.py
fangshi1991/zipline_chstock
7911642780fa57f92e1705b9c0acaeb837b3d98f
[ "Apache-2.0" ]
11
2019-10-15T14:25:00.000Z
2021-12-09T15:42:06.000Z
#encoding:utf-8 # Copyright 2013 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import heapq def _decorate_source(source): for message in source: yield ((message.dt, message.source_id), message) def date_sorted_sources(*sources): """ Takes an iterable of sources, generating namestrings and piping their output into date_sort. # 获取可迭代的源,生成名称字符串并将其输出传递到date_sort。 """ sorted_stream = heapq.merge(*(_decorate_source(s) for s in sources)) # Strip out key decoration,脱去主键装饰 for _, message in sorted_stream: yield message
32.088235
74
0.734189
155
1,091
5.090323
0.625806
0.076046
0.032953
0.040558
0
0
0
0
0
0
0
0.010193
0.190651
1,091
33
75
33.060606
0.883352
0.668194
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.125
0
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
7a418c864b72cb7a9e6938b4593fb63b01445188
716
py
Python
sdap/studies/migrations/0026_auto_20191128_1041.py
umr1085-irset/reproGenomicsViewer
187ea320668e567d01572bfbf9497bebd691569a
[ "MIT" ]
null
null
null
sdap/studies/migrations/0026_auto_20191128_1041.py
umr1085-irset/reproGenomicsViewer
187ea320668e567d01572bfbf9497bebd691569a
[ "MIT" ]
1
2020-02-16T10:48:55.000Z
2020-02-16T11:06:36.000Z
sdap/studies/migrations/0026_auto_20191128_1041.py
umr1085-irset/reproGenomicsViewer
187ea320668e567d01572bfbf9497bebd691569a
[ "MIT" ]
4
2019-11-04T15:00:55.000Z
2020-03-02T13:36:17.000Z
# Generated by Django 2.2.5 on 2019-11-28 10:41 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('auth', '0011_update_proxy_permissions'), ('studies', '0025_auto_20191128_0958'), ] operations = [ migrations.AddField( model_name='expressionstudy', name='edit_groups', field=models.ManyToManyField(blank=True, related_name='edit_access_to', to='auth.Group'), ), migrations.AddField( model_name='expressionstudy', name='read_groups', field=models.ManyToManyField(blank=True, related_name='read_access_to', to='auth.Group'), ), ]
28.64
101
0.622905
76
716
5.657895
0.592105
0.083721
0.106977
0.125581
0.544186
0.455814
0.24186
0.24186
0
0
0
0.065543
0.25419
716
24
102
29.833333
0.7397
0.062849
0
0.333333
1
0
0.243647
0.077728
0
0
0
0
0
1
0
false
0
0.055556
0
0.222222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
7a43eae4859f7e4a0ab17cb3b690aad1f9bb0c07
7,430
py
Python
fpga/fourteensegmentdisplay.py
renzenicolai/nmigen-experiments
c6048ad9cbb29d5b478538ef04997a00c0e5dd1b
[ "Unlicense" ]
null
null
null
fpga/fourteensegmentdisplay.py
renzenicolai/nmigen-experiments
c6048ad9cbb29d5b478538ef04997a00c0e5dd1b
[ "Unlicense" ]
null
null
null
fpga/fourteensegmentdisplay.py
renzenicolai/nmigen-experiments
c6048ad9cbb29d5b478538ef04997a00c0e5dd1b
[ "Unlicense" ]
null
null
null
from nmigen import * from nmigen.build import Platform, ResourceError from nmigen.back.pysim import Simulator, Delay, Settle # Small helper classes to simulate the structure of the platform device class _OutputSimulator(): def __init__(self, signal): self.signal = signal self.eq = self.signal.eq class _SegmentSimulator(): def __init__(self, signal): self.o = _OutputSimulator(signal) class FourteenSegmentDisplay(Elaboratable): """ This submodule shows the provided ASCII character on a 14 segment display. The eight bit of the input data is used to switch the dot on or off. """ def __init__(self, deviceType="alnum_led", deviceId=0, simulation=False): # Public self.data = Signal(8, reset=0) self.simulation = simulation self.simSignals = [] # Private self._device = None self._deviceType = deviceType self._deviceId = deviceId self._segments = ['a','b','c','d','e','f','g','h','j','k','l','m','n','p'] self._dotSegment = 'dp' self._lut = [ [0,0,0,0,0,0, 0,0,0, 0, 0,0,0, 0], # (0x20) [0,0,0,0,1,1, 0,0,0, 0, 0,0,0, 0], # ! (0x21) [0,1,0,0,0,1, 0,0,0, 0, 0,0,0, 0], # " (0x22) [0,1,1,1,0,0, 0,1,0, 1, 0,1,0, 1], # # (0x23) [1,0,1,1,0,1, 0,1,0, 1, 0,1,0, 1], # $ (0x24) [0,0,1,0,0,1, 0,0,1, 0, 0,0,1, 0], # % (0x25) [1,0,0,1,1,0, 1,0,1, 0, 1,0,0, 1], # & (0x26) [0,1,0,0,0,0, 0,1,0, 0, 0,0,0, 0], # ' (0x27) [1,0,0,1,1,1, 0,0,0, 0, 0,0,0, 0], # ( (0x28) [1,1,1,1,0,0, 0,0,0, 0, 0,0,0, 0], # ) (0x29) [0,0,0,0,0,0, 1,1,1, 1, 1,1,1, 1], # * (0x2A) [0,0,0,0,0,0, 0,1,0, 1, 0,1,0, 1], # + (0x2B) [0,0,0,0,0,0, 0,0,0, 0, 1,0,0, 0], # , (0x2C) [0,0,0,0,0,0, 0,0,0, 1, 0,0,0, 1], # - (0x2D) [0,0,0,0,0,0, 0,0,0, 0, 0,1,0, 0], # . (0x2E) [0,0,0,0,0,0, 0,0,1, 0, 0,0,1, 0], # / (0x2F) [1,1,1,1,1,1, 0,0,0, 0, 0,0,0, 0], # 0 (0x30) [0,1,1,0,0,0, 0,0,1, 0, 0,0,0, 0], # 1 (0x31) [1,1,0,1,1,0, 0,0,0, 1, 0,0,0, 1], # 2 (0x32) [1,1,1,1,0,0, 0,0,0, 1, 0,0,0, 0], # 3 (0x33) [0,1,1,0,0,1, 0,0,0, 1, 0,0,0, 1], # 4 (0x34) [1,0,1,1,0,1, 0,0,0, 1, 0,0,0, 1], # 5 (0x35) [1,0,1,1,1,1, 0,0,0, 1, 0,0,0, 1], # 6 (0x36) [1,0,0,0,0,0, 0,0,1, 0, 0,1,0, 0], # 7 (0x37) [1,1,1,1,1,1, 0,0,0, 1, 0,0,0, 1], # 8 (0x38) [1,1,1,0,0,1, 0,0,0, 1, 0,0,0, 1], # 9 (0x39) [0,0,0,0,0,0, 0,1,0, 0, 0,1,0, 0], # : (0x3A) [0,0,0,0,0,0, 0,1,0, 0, 0,0,1, 0], # ; (0x3B) [0,0,0,0,0,0, 0,0,1, 0, 1,0,0, 0], # < (0x3C) [0,0,0,1,0,0, 0,0,0, 1, 0,0,0, 1], # = (0x3D) [0,0,0,0,0,0, 1,0,0, 0, 0,0,1, 0], # > (0x3E) [1,0,0,0,0,1, 0,0,1, 0, 0,1,0, 0], # ? (0x3F) [1,1,1,1,1,1, 1,0,1, 0, 1,0,1, 0], # @ (0x40) [1,1,1,0,1,1, 0,0,0, 1, 0,0,0, 1], # A (0x41) [1,1,1,1,0,0, 0,1,0, 1, 0,1,0, 0], # B (0x42) [1,0,0,1,1,1, 0,0,0, 0, 0,0,0, 0], # C (0x43) [1,1,1,1,0,0, 0,1,0, 0, 0,1,0, 0], # D (0x44) [1,0,0,1,1,1, 0,0,0, 1, 0,0,0, 1], # E (0x45) [1,0,0,0,1,1, 0,0,0, 1, 0,0,0, 1], # F (0x46) [1,0,1,1,1,1, 0,0,0, 1, 0,0,0, 0], # G (0x47) [0,1,1,0,1,1, 0,0,0, 1, 0,0,0, 1], # H (0x48) [1,0,0,1,0,0, 0,1,0, 0, 0,1,0, 0], # I (0x49) [0,1,1,1,1,0, 0,0,0, 0, 0,0,0, 0], # J (0x4A) [0,0,0,0,1,1, 0,0,1, 0, 1,0,0, 1], # K (0x4B) [0,0,0,1,1,1, 0,0,0, 0, 0,0,0, 0], # L (0x4C) [0,1,1,0,1,1, 1,0,1, 0, 0,0,0, 0], # M (0x4D) [0,1,1,0,1,1, 1,0,0, 0, 1,0,0, 0], # N (0x4E) [1,1,1,1,1,1, 0,0,0, 0, 0,0,0, 0], # O (0x4F) [1,1,0,0,1,1, 0,0,0, 1, 0,0,0, 1], # P (0x50) [1,1,1,1,1,1, 0,0,0, 0, 1,0,0, 0], # Q (0x51) [1,1,0,0,1,1, 0,0,0, 1, 1,0,0, 1], # R (0x52) [1,0,1,1,0,0, 1,0,0, 1, 0,0,0, 0], # S (0x53) [1,0,0,0,0,0, 0,1,0, 0, 0,1,0, 0], # T (0x54) [0,1,1,1,1,1, 0,0,0, 0, 0,0,0, 0], # U (0x55) [0,0,0,0,1,1, 0,0,1, 0, 0,0,1, 0], # V (0x56) [0,1,1,0,1,1, 0,0,0, 0, 1,0,1, 0], # W (0x57) [0,0,0,0,0,0, 1,0,1, 0, 1,0,1, 0], # X (0x58) [0,0,0,0,0,0, 1,0,1, 0, 0,1,0, 0], # Y (0x59) [1,0,0,1,0,0, 0,0,1, 0, 0,0,1, 0], # Z (0x5A) [1,0,0,1,1,1, 0,0,0, 0, 0,0,0, 0], # [ (0x5B) [0,0,0,0,0,0, 1,0,0, 0, 1,0,0, 0], # \ (0x5C) [1,1,1,1,0,0, 0,0,0, 0, 0,0,0, 0], # ] (0x5D) [1,1,0,0,0,1, 0,0,0, 0, 0,0,0, 0], # ^ (0x5E) [0,0,0,1,0,0, 0,0,0, 0, 0,0,0, 0], # _ (0x5F) [0,0,0,0,0,0, 1,0,0, 0, 0,0,0, 0], # ` (0x60) [1,1,1,1,1,0, 0,0,0, 1, 0,0,0, 1], # a (0x61) [0,0,0,1,1,1, 0,0,0, 0, 1,0,0, 1], # b (0x62) [0,0,0,1,1,0, 0,0,0, 1, 0,0,0, 1], # c (0x63) [0,1,1,1,0,0, 0,0,0, 1, 0,0,1, 0], # d (0x64) [1,0,0,1,1,1, 0,0,0, 0, 0,0,0, 1], # e (0x65) [1,0,0,0,1,1, 0,0,0, 0, 0,0,0, 1], # f (0x66) [1,1,1,1,0,0, 1,0,0, 1, 0,0,0, 0], # g (0x67) [0,0,1,0,1,1, 0,0,0, 1, 0,0,0, 1], # h (0x68) [0,0,0,0,0,0, 0,0,0, 0, 0,1,0, 0], # i (0x69) [0,1,1,1,0,0, 0,0,0, 0, 0,0,0, 0], # j (0x6A) [0,0,0,0,1,1, 0,0,1, 0, 1,0,0, 0], # k (0x6B) [0,0,0,0,0,0, 0,1,0, 0, 0,1,0, 0], # l (0x6C) [0,0,1,0,1,0, 0,0,0, 1, 0,1,0, 1], # m (0x6D) [0,0,0,0,1,0, 0,0,0, 0, 1,0,0, 1], # n (0x6E) [0,0,1,1,1,0, 0,0,0, 1, 0,0,0, 1], # o (0x6F) [1,0,0,0,1,1, 0,0,1, 0, 0,0,0, 1], # p (0x70) [1,1,0,0,0,1, 0,0,0, 1, 1,0,0, 1], # q (0x71) [0,0,0,0,1,0, 0,0,0, 0, 0,0,0, 1], # r (0x72) [1,0,1,1,0,0, 1,0,0, 1, 0,0,0, 0], # s (0x73) [0,0,0,1,1,1, 0,0,0, 0, 0,0,0, 1], # t (0x74) [0,0,1,1,1,0, 0,0,0, 0, 0,0,0, 0], # u (0x75) [0,0,0,0,1,0, 0,0,0, 0, 0,0,1, 0], # v (0x76) [0,0,1,0,1,0, 0,0,0, 0, 1,0,1, 0], # w (0x77) [0,0,0,0,0,0, 1,0,1, 0, 1,0,1, 0], # x (0x78) [0,1,1,1,0,0, 0,1,0, 1, 0,0,0, 0], # y (0x79) [1,0,0,1,0,0, 0,0,1, 0, 0,0,1, 0], # z (0x7A) [1,0,0,1,0,0, 1,0,0, 0, 0,0,1, 1], # { (0x7B) [0,0,0,0,0,0, 0,1,0, 0, 0,1,0, 0], # | (0x7C) [1,0,0,1,0,0, 0,0,1, 1, 1,0,0, 0], # } (0x7D) [0,0,0,0,0,0, 0,0,0, 1, 0,0,0, 1], # ~ (0x7E) ] def elaborate(self, platform: Platform) -> Module: m = Module() if self.simulation: self._device = {} for segment in self._segments + [self._dotSegment]: s = Signal(1) s.name = segment self.simSignals.append(s) self._device[segment] = _SegmentSimulator(s) else: self._device = platform.request(self._deviceType, self._deviceId) # Remove the eighth bit from the data signal and map the seven remaining bits onto the LUT data7 = Signal(unsigned(7)) with m.If(self.data[0:7] < 0x20): # Out of range m.d.comb += data7.eq(0) # Set to SPACE (0x20), 0 in our LUT, when data is out of range with m.Else(): m.d.comb += data7.eq(self.data[0:7]-0x20) # Drive the dot segment using the eighth bit of the data signal m.d.comb += self._device[self._dotSegment].o.eq(self.data[7]) # Drive the other fourteen segments using the LUT with m.Switch(data7): for i in range(len(self._lut)): with m.Case(i): # (SPACE to ~) for j in range(len(self._segments)): m.d.comb += self._device[self._segments[j]].o.eq(self._lut[i][j]) with m.Default(): # (0x7F / DEL) for j in range(len(self._segments)): m.d.comb += self._device[self._segments[j]].o.eq(1) return m def ports(self): ports = [self.data] if self.simulation: ports.extend(self.simSignals) return ports if __name__ == "__main__": m = FourteenSegmentDisplay(simulation = True) sim = Simulator(m) def process(): # This design consist purely of combinational logic # so we just loop through all possible input values for i in range(256): yield m.data.eq(i) yield Delay(1e-6) yield Settle() sim.add_process(process) with sim.write_vcd("test.vcd", "test.gtkw", traces=m.ports()): sim.run()
40.162162
92
0.490983
1,903
7,430
1.890173
0.137677
0.35196
0.353628
0.302474
0.442591
0.4134
0.406172
0.397554
0.389491
0.361412
0
0.277635
0.212248
7,430
184
93
40.380435
0.336921
0.199462
0
0.163522
0
0
0.008579
0
0
0
0.001373
0
0
1
0.037736
false
0
0.018868
0
0.08805
0
0
0
1
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
7a4693933697b877ba83ef27e07a84e5fbb283c6
884
py
Python
motorway/contrib/amazon_kinesis/intersections.py
alesdotio/motorway
8514f9e6494c9e55576705b72dda306c175e62dc
[ "Apache-2.0" ]
1
2016-09-16T14:51:59.000Z
2016-09-16T14:51:59.000Z
motorway/contrib/amazon_kinesis/intersections.py
alesdotio/motorway
8514f9e6494c9e55576705b72dda306c175e62dc
[ "Apache-2.0" ]
null
null
null
motorway/contrib/amazon_kinesis/intersections.py
alesdotio/motorway
8514f9e6494c9e55576705b72dda306c175e62dc
[ "Apache-2.0" ]
1
2020-12-12T17:35:55.000Z
2020-12-12T17:35:55.000Z
import json import boto.kinesis from motorway.intersection import Intersection class KinesisInsertIntersection(Intersection): stream_name = None def __init__(self, **kwargs): super(KinesisInsertIntersection, self).__init__(**kwargs) self.conn = boto.kinesis.connect_to_region(**self.connection_parameters()) assert self.stream_name, "Please define attribute stream_name on your KinesisInsertIntersection" def connection_parameters(self): return { 'region_name': 'eu-west-1', # Add this or use ENV VARS # 'aws_access_key_id': '', # 'aws_secret_access_key': '' } def process(self, message): self.conn.put_record( self.stream_name, json.dumps(message.content), message.grouping_value ) self.ack(message) yield
29.466667
104
0.640271
93
884
5.817204
0.580645
0.073937
0.051756
0
0
0
0
0
0
0
0
0.001546
0.2681
884
29
105
30.482759
0.834621
0.087104
0
0
0
0
0.110834
0.031133
0
0
0
0
0.047619
1
0.142857
false
0
0.142857
0.047619
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a469ca4efeb9a5e00a92c23d28f63e455d24a3f
266
py
Python
image processing/4/1/1.py
DzmitrySakalenka/stepik_courses
7c43ac35cd921e8f6f96fb4f15f77ace38cc2d21
[ "MIT" ]
null
null
null
image processing/4/1/1.py
DzmitrySakalenka/stepik_courses
7c43ac35cd921e8f6f96fb4f15f77ace38cc2d21
[ "MIT" ]
null
null
null
image processing/4/1/1.py
DzmitrySakalenka/stepik_courses
7c43ac35cd921e8f6f96fb4f15f77ace38cc2d21
[ "MIT" ]
null
null
null
from skimage.io import imread, imsave from numpy import ones from scipy.signal import convolve2d import warnings warnings.filterwarnings("ignore") img = imread('img.png') img = convolve2d(img, ones((5, 5), dtype=int), mode='valid') // 25 imsave('out_img.png', img)
26.6
66
0.744361
40
266
4.925
0.575
0.060914
0.091371
0
0
0
0
0
0
0
0
0.025532
0.116541
266
10
67
26.6
0.812766
0
0
0
0
0
0.108614
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
7a4736a2415c86f69bf9548f7c81b587162aa65a
2,087
py
Python
embedKB/datatools/__init__.py
zafarali/embedKB
52d7b65cd47caebb01498826a4c67c76e48ed2c7
[ "MIT" ]
1
2018-04-29T18:43:56.000Z
2018-04-29T18:43:56.000Z
embedKB/datatools/__init__.py
zafarali/embedKB
52d7b65cd47caebb01498826a4c67c76e48ed2c7
[ "MIT" ]
null
null
null
embedKB/datatools/__init__.py
zafarali/embedKB
52d7b65cd47caebb01498826a4c67c76e48ed2c7
[ "MIT" ]
1
2020-01-08T03:01:09.000Z
2020-01-08T03:01:09.000Z
from .knowledgebase import KnowledgeBase from .dataset import Dataset from .dataset import SmartNegativeSampling, NegativeSampling def get_data(data_path, batch_size): train_path = data_path + 'train.txt' valid_path = data_path + 'valid.txt' test_path = data_path + 'test.txt' # load knowledge base of train data kb_train = KnowledgeBase.load_from_raw_data(train_path) kb_train.convert_triples() dset_train = Dataset(kb_train, batch_size=batch_size) # derive a knowledge base of validation data kb_val = KnowledgeBase.derive_from(kb_train) kb_val.load_raw_triples(valid_path) kb_val.convert_triples() dset_val = Dataset(kb_val, batch_size=batch_size) # derive a knowledge base of testing data kb_test = KnowledgeBase.derive_from(kb_train) kb_test.load_raw_triples(test_path) kb_test.convert_triples() dset_test = Dataset(kb_test, batch_size=batch_size) return kb_train, dset_train, kb_val, dset_val, kb_test, dset_test def load_saved_data(folder): #currently only the kb for the training data was saved. kb_train = KnowledgeBase() kb_train.load_converted_triples(folder + '/triples.npy') kb_train.load_mappings_from_json(folder + '/entity2id.json', folder + '/relation2id.json') # have to do this temporarily. should be fixed to be more general batch_size = 32 data_path = '../data/Release/' valid_path = data_path + 'valid.txt' test_path = data_path + 'test.txt' dset_train = Dataset(kb_train, batch_size=batch_size) # derive a knowledge base of validation data kb_val = KnowledgeBase.derive_from(kb_train) kb_val.load_raw_triples(valid_path) kb_val.convert_triples() dset_val = Dataset(kb_val, batch_size=batch_size) # derive a knowledge base of testing data kb_test = KnowledgeBase.derive_from(kb_train) kb_test.load_raw_triples(test_path) kb_test.convert_triples() dset_test = Dataset(kb_test, batch_size=batch_size) return kb_train, dset_train, kb_val, dset_val, kb_test, dset_test
35.372881
69
0.735985
306
2,087
4.679739
0.186275
0.087989
0.058659
0.075419
0.625698
0.625698
0.625698
0.625698
0.625698
0.625698
0
0.00235
0.184475
2,087
58
70
35.982759
0.839013
0.151893
0
0.631579
0
0
0.058456
0
0
0
0
0
0
1
0.052632
false
0
0.078947
0
0.184211
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
7a4758fe075141b33a34759f92db71968b493b1d
3,034
py
Python
middle-tier/app/interact_contract.py
Surfndez/Microsoft-Identities-on-the-Ethereum-Blockchain
35fc2ac329f3a17b59d5bb90115e1e8f7e2ae501
[ "MIT" ]
4
2018-07-23T22:01:16.000Z
2020-09-22T11:15:39.000Z
middle-tier/app/interact_contract.py
Surfndez/Microsoft-Identities-on-the-Ethereum-Blockchain
35fc2ac329f3a17b59d5bb90115e1e8f7e2ae501
[ "MIT" ]
1
2021-01-21T13:15:16.000Z
2021-01-21T13:15:16.000Z
middle-tier/app/interact_contract.py
Surfndez/Microsoft-Identities-on-the-Ethereum-Blockchain
35fc2ac329f3a17b59d5bb90115e1e8f7e2ae501
[ "MIT" ]
5
2018-07-20T00:23:11.000Z
2020-09-22T11:15:47.000Z
import json import time import web3 import sha3 from os import environ from web3 import Web3, HTTPProvider from web3.contract import ConciseContract from contract import IDENTITY_STORE_JSON API_KEY = environ.get('API_KEY') PRIVATE_KEY = environ.get('PRIVATE_KEY') CONTRACT_ADDRESS = environ.get('CONTRACT_ADDRESS') NETWORK_ENDPOINT = "https://ropsten.infura.io/v3/{}".format(API_KEY) w3 = Web3(HTTPProvider(NETWORK_ENDPOINT)) w3.eth.enable_unaudited_features() #known_nonce = set() def setTenant(hashObject, address, timestamp, tenantId): #global known_nonce contract = load_contract() account = w3.eth.account.privateKeyToAccount(PRIVATE_KEY) get_data = contract.encodeABI( fn_name='setTenant', args=[ hashObject, address, timestamp, tenantId ]) trans_count = w3.eth.getTransactionCount(account.address) nonce = trans_count #while nonce in known_nonce: # nonce += 1 print("transaction count=%d nonce=%d" %(trans_count, nonce)) price = w3.toWei('21', 'gwei') success = False retry = 100 while not success and retry > 0: retry -= 1 try: transaction = { 'to': contract.address, 'data': get_data, 'gas': 1728712, 'gasPrice': price, 'nonce': nonce } signed = w3.eth.account.signTransaction(transaction, PRIVATE_KEY) txn_hash = w3.eth.sendRawTransaction(signed.rawTransaction) txn = w3.eth.getTransaction(txn_hash) print('Contract Transaction Hash {}'.format(txn_hash)) print('Transaction {}'.format(txn)) #known_nonce.add(nonce) success = True except ValueError as err: err_msg = err.args[0]['message'] print('web3 error:: %s' % err_msg) if 'replacement transaction underpriced' in err_msg: price += 1 retry += 1 # underprice doesn't count for retrying print('increase price to %d' % price) elif 'nonce too low' in err_msg or 'known transaction' in err_msg: #known_nonce.add(nonce) nonce += 1 print('increase nonce to %d' % nonce) else: raise err if retry <= 0: print('stop retrying') return txn def get_deloyed_contract(contract_definition, contract_address): contract_abi = contract_definition['abi'] contract = w3.eth.contract(abi=contract_abi, address=contract_address) return contract def load_contract(): contract_definition = json.loads(IDENTITY_STORE_JSON) return get_deloyed_contract(contract_definition, CONTRACT_ADDRESS) def is_valid(tenant_id, user_address): contract = load_contract() timestamp = int(time.time()) # call isValid on contract isValid = contract.functions.isValid(tenant_id, user_address, timestamp).call() return isValid
30.34
83
0.627884
343
3,034
5.387755
0.35277
0.018939
0.012987
0.036797
0.055195
0.055195
0.055195
0
0
0
0
0.016856
0.276533
3,034
99
84
30.646465
0.825057
0.060976
0
0.027027
0
0
0.111228
0
0
0
0
0
0
1
0.054054
false
0
0.108108
0
0.216216
0.094595
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a4900d73a1ad7f743a5146f147752cc74077496
2,227
py
Python
startup/90-functions.py
NSLS-II-XPD-tomo/profile_collection
a960faa6bd24dc87bd094399f2124f80159207be
[ "BSD-3-Clause" ]
null
null
null
startup/90-functions.py
NSLS-II-XPD-tomo/profile_collection
a960faa6bd24dc87bd094399f2124f80159207be
[ "BSD-3-Clause" ]
null
null
null
startup/90-functions.py
NSLS-II-XPD-tomo/profile_collection
a960faa6bd24dc87bd094399f2124f80159207be
[ "BSD-3-Clause" ]
2
2021-11-08T19:13:50.000Z
2022-01-08T16:17:01.000Z
print(f'Loading {__file__}') def configure_area_det(det,acq_time,acq_period=None,exposure=None,num_exposures=1): if det.name == 'prosilica': acq_time = min(acq_time,25) if det.cam.acquire.get() == 0: yield from bps.abs_set(det.cam.acquire, 1, wait=True) if det.name == 'dexela': yield from bps.abs_set(det.cam.acquire_time, max(acq_time,0.1), wait=True) acq_time_rbv = det.cam.acquire_time.get() else: yield from bps.abs_set(det.cam.acquire_time, acq_time, wait=True) acq_time_rbv = det.cam.acquire_time.get() if det.name == 'dexela': yield from bps.abs_set(det.cam.acquire_period, acq_time_rbv+0.005, wait=True) acq_period_rbv = det.cam.acquire_period.get() else: if acq_period is None: if det.name == 'blackfly': yield from bps.abs_set(det.cam.acquire_period, 0.1, wait=False) else: yield from bps.abs_set(det.cam.acquire_period, acq_time_rbv, wait=True) acq_period_rbv = det.cam.acquire_period.get() else: if det.name == 'blackfly': yield from bps.abs_set(det.cam.acquire_period, min(1,acq_period), wait=False) else: yield from bps.abs_set(det.cam.acquire_period, acq_period, wait=True) acq_period_rbv = det.cam.acquire_period.get() if exposure is None: exposure = acq_time_rbv*10 num_frames = np.ceil(exposure / acq_time_rbv) yield from bps.abs_set(det.images_per_set, num_frames, wait=True) yield from bps.abs_set(det.number_of_sets, num_exposures, wait=True) if det.name == 'emergent': print(">>>%s is configured as:\n acq_time = %.3fmsec; acq_period = %.3fmsec; exposure = %.3fmsec \ (num frames = %.2f); num_exposures = %d"%(det.name,acq_time_rbv,acq_period_rbv,exposure,num_frames,num_exposures)) else: print(">>>%s is configured as:\n acq_time = %.3fsec; acq_period = %.3fsec; exposure = %.3fsec \ (num frames = %.2f); num_exposures = %d"%(det.name,acq_time_rbv,acq_period_rbv,exposure,num_frames,num_exposures)) return
40.490909
118
0.624158
327
2,227
4.003058
0.180428
0.080214
0.139037
0.114591
0.689076
0.669977
0.637892
0.637892
0.571429
0.538579
0
0.014414
0.252357
2,227
54
119
41.240741
0.771772
0
0
0.425
0
0.05
0.028289
0
0
0
0
0
0
1
0.025
false
0
0
0
0.05
0.075
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a4982c716e53e21237cd2d0f5536f385174c347
15,085
py
Python
same-same.py
gwk/same-same
065674668bf26dd2bcc62ab7a556629d21647fe4
[ "CC0-1.0" ]
22
2018-05-25T20:45:46.000Z
2021-01-24T23:26:20.000Z
same-same.py
gwk/same-same
065674668bf26dd2bcc62ab7a556629d21647fe4
[ "CC0-1.0" ]
null
null
null
same-same.py
gwk/same-same
065674668bf26dd2bcc62ab7a556629d21647fe4
[ "CC0-1.0" ]
null
null
null
#!/usr/bin/env python3 # Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/. import re from argparse import ArgumentParser from difflib import SequenceMatcher from itertools import chain, groupby from os import environ from sys import stderr, stdout from typing import * from typing import Match class DiffLine: def __init__(self, kind:str, match:Match, rich_text:str) -> None: self.kind = kind # The name from `diff_pat` named capture groups. self.match = match self.rich_text = rich_text # Original colorized text from git. self.old_num = 0 # 1-indexed. self.new_num = 0 # ". self.chunk_idx = 0 # Positive for rem/add. self.is_src = False # True for ctx/rem/add. self.text = '' # Final text for ctx/rem/add. @property def plain_text(self) -> str: return self.match.string # type: ignore def main() -> None: arg_parser = ArgumentParser(prog='same-same', description='Git diff filter.') arg_parser.add_argument('-interactive', action='store_true', help="Accommodate git's interactive mode.") args = arg_parser.parse_args() # Git can generate utf8-illegal sequences; ignore them. stdin = open(0, errors='replace') if 'SAME_SAME_OFF' in environ: for line in stdin: stdout.write(line) exit(0) dbg = ('SAME_SAME_DBG' in environ) buffer:List[DiffLine] = [] def flush_buffer() -> None: nonlocal buffer if buffer: handle_file_lines(buffer, interactive=args.interactive) buffer = [] try: for rich_text in stdin: rich_text = rich_text.rstrip('\n') plain_text = sgr_pat.sub('', rich_text) # remove colors. match = diff_pat.match(plain_text) assert match is not None kind = match.lastgroup assert kind is not None, match if dbg: print(kind, ':', repr(plain_text)) continue if kind == 'diff': flush_buffer() buffer.append(DiffLine(kind, match, rich_text)) flush_buffer() except BrokenPipeError: stderr.close() # Prevents warning message. def handle_file_lines(lines:List[DiffLine], interactive:bool) -> None: first = lines[0] kind = first.kind skip = False # Detect if we should skip these lines. if kind not in ('diff', 'loc'): skip = True elif graph_pat.match(first.plain_text).end(): skip = True # type: ignore if skip: for line in lines: print(line.rich_text) return old_ctx_nums:Set[int] = set() # Line numbers of context lines. new_ctx_nums:Set[int] = set() # ". old_lines:Dict[int, DiffLine] = {} # Maps of line numbers to line structs. new_lines:Dict[int, DiffLine] = {} # ". old_uniques:Dict[str, Optional[int]] = {} # Maps unique line bodies to line numbers. new_uniques:Dict[str, Optional[int]] = {} # ". old_num = 0 # 1-indexed source line number. new_num = 0 # ". chunk_idx = 0 # Counter to differentiate chunks; becomes part of the groupby key. # Accumulate source lines into structures. old_path = '<OLD_PATH>' new_path = '<NEW_PATH>' is_prev_add_rem = False for line in lines: match = line.match kind = line.kind is_add_rem = (kind in ('rem', 'add')) if not is_prev_add_rem and is_add_rem: chunk_idx += 1 is_prev_add_rem = is_add_rem if kind in ('ctx', 'rem', 'add'): line.is_src = True if kind == 'ctx': line.text = match['ctx_text'] elif kind == 'rem': line.text = match['rem_text'] line.chunk_idx = chunk_idx insert_unique_line(old_uniques, line.text, old_num) elif kind == 'add': line.text = match['add_text'] line.chunk_idx = chunk_idx insert_unique_line(new_uniques, line.text, new_num) if kind in ('ctx', 'rem'): assert old_num not in old_lines assert old_num not in old_ctx_nums line.old_num = old_num old_lines[old_num] = line old_ctx_nums.add(old_num) old_num += 1 if kind in ('ctx', 'add'): assert new_num not in new_lines assert new_num not in new_ctx_nums line.new_num = new_num new_lines[new_num] = line new_ctx_nums.add(new_num) new_num += 1 elif kind == 'loc': o = int(match['old_num']) if o > 0: assert o > old_num, (o, old_num, match.string) old_num = o n = int(match['new_num']) if n > 0: assert n > new_num new_num = n elif kind == 'old': old_path = vscode_path(match['old_path'].rstrip('\t')) elif kind == 'new': new_path = vscode_path(match['new_path'].rstrip('\t')) # Not sure why this trailing tab appears. # Detect moved lines. def diff_lines_match(old_idx:int, new_idx:int) -> bool: if old_idx in old_ctx_nums or new_idx in new_ctx_nums: return False try: return old_lines[old_idx].text.strip() == new_lines[new_idx].text.strip() except KeyError: return False old_moved_nums:Set[int] = set() new_moved_nums:Set[int] = set() for body, new_idx in new_uniques.items(): if new_idx is None: continue old_idx = old_uniques.get(body) if old_idx is None: continue p_o = old_idx p_n = new_idx while diff_lines_match(p_o-1, p_n-1): p_o -= 1 p_n -= 1 e_o = old_idx + 1 e_n = new_idx + 1 while diff_lines_match(e_o, e_n): e_o += 1 e_n += 1 old_moved_nums.update(range(p_o, e_o)) new_moved_nums.update(range(p_n, e_n)) # Break lines into rem/add chunks. # While a "hunk" is a series of (possibly many) ctx/rem/add lines provided by git diff, # a "chunk" is either a contiguous block of rem/add lines, or else any other single line. # This approach simplifies the token diffing process so that it is a reasonably # straightforward comparison of a rem block to an add block. def chunk_key(line:DiffLine) -> Tuple[int, bool]: return (line.is_src, line.chunk_idx, (line.old_num in old_moved_nums or line.new_num in new_moved_nums)) for ((is_src, chunk_idx, is_moved), _chunk) in groupby(lines, key=chunk_key): chunk = list(_chunk) # We iterate over the sequence several times. if chunk_idx and not is_moved: # Chunk should be diffed by tokens. # We must ensure that the same number of lines is output, at least for `-interactive` mode. # Currently, we do not reorder lines at all, but that is an option for the future. rem_lines = [l for l in chunk if l.old_num] add_lines = [l for l in chunk if l.new_num] add_token_diffs(rem_lines, add_lines) elif is_src: # ctx or moved. for l in chunk: l.text = highlight_strange_chars(l.text) # Print lines. for line in chunk: kind = line.kind match = line.match text = line.text if kind == 'ctx': print(text) elif kind == 'rem': m = C_REM_MOVED if line.old_num in old_moved_nums else '' print(C_REM_LINE, m, text, C_END, sep='') elif kind == 'add': m = C_ADD_MOVED if line.new_num in new_moved_nums else '' print(C_ADD_LINE, m, text, C_END, sep='') elif kind == 'loc': new_num = match['new_num'] snippet = match['parent_snippet'] s = ' ' + C_SNIPPET if snippet else '' print(C_LOC, new_path, ':', new_num, ':', s, snippet, C_END, sep='') elif kind == 'diff': msg = new_path if (old_path == new_path) else '{} -> {}'.format(old_path, new_path) print(C_FILE, msg, ':', C_END, sep='') elif kind == 'meta': print(C_MODE, new_path, ':', RST, ' ', line.rich_text, sep='') elif kind in dropped_kinds: if interactive: # cannot drop lines, becasue interactive mode slices the diff by line counts. print(C_DROPPED, line.plain_text, RST, sep='') elif kind in pass_kinds: print(line.rich_text) else: raise Exception('unhandled kind: {}\n{!r}'.format(kind, text)) def insert_unique_line(d:Dict[str, Optional[int]], line:str, idx:int) -> None: 'For the purpose of movement detection, lines are tested for uniqueness after stripping leading and trailing whitespace.' body = line.strip() if body in d: d[body] = None else: d[body] = idx def add_token_diffs(rem_lines:List[DiffLine], add_lines:List[DiffLine]) -> None: 'Rewrite DiffLine.text values to include per-token diff highlighting.' # Get lists of tokens for the entire chunk. r_tokens = tokenize_difflines(rem_lines) a_tokens = tokenize_difflines(add_lines) m = SequenceMatcher(isjunk=is_token_junk, a=r_tokens, b=a_tokens, autojunk=True) r_frags:List[List[str]] = [[] for _ in rem_lines] # Accumulate highlighted tokens. a_frags:List[List[str]] = [[] for _ in add_lines] r_line_idx = 0 # Step through the accumulators. a_line_idx = 0 r_d = 0 # Token index of previous/next diff. a_d = 0 # TODO: r_lit, a_lit flags could slightly reduce emission of color sequences. blocks = m.get_matching_blocks() # last block is the sentinel: (len(a), len(b), 0). for r_p, a_p, l in m.get_matching_blocks(): # Highlight the differing tokens. r_line_idx = append_frags(r_frags, r_tokens, r_line_idx, r_d, r_p, C_REM_TOKEN) a_line_idx = append_frags(a_frags, a_tokens, a_line_idx, a_d, a_p, C_ADD_TOKEN) r_d = r_p+l # update to end of match / beginning of next diff. a_d = a_p+l # Do not highlight the matching tokens. r_line_idx = append_frags(r_frags, r_tokens, r_line_idx, r_p, r_d, C_RST_TOKEN) a_line_idx = append_frags(a_frags, a_tokens, a_line_idx, a_p, a_d, C_RST_TOKEN) for rem_line, frags in zip(rem_lines, r_frags): rem_line.text = ''.join(frags) for add_line, frags in zip(add_lines, a_frags): add_line.text = ''.join(frags) def tokenize_difflines(lines:List[DiffLine]) -> List[str]: 'Convert the list of line texts into a single list of tokens, including newline tokens.' tokens:List[str] = [] for i, line in enumerate(lines): if i: tokens.append('\n') tokens.extend(m[0] for m in token_pat.finditer(line.text)) return tokens def is_token_junk(token:str) -> bool: ''' Treate newlines as tokens, but all other whitespace as junk. This forces the diff algorithm to respect line breaks but not get distracted aligning to whitespace. ''' return token.isspace() and token != '\n' def append_frags(frags:List[List[str]], tokens:List[str], line_idx:int, pos:int, end:int, highlight:str) -> int: for frag in tokens[pos:end]: if frag == '\n': line_idx += 1 else: line_frags = frags[line_idx] line_frags.append(highlight) line_frags.append(highlight_strange_chars(frag)) return line_idx def highlight_strange_chars(string:str) -> str: return strange_char_pat.sub( lambda m: '{}{}{}'.format(C_STRANGE, m[0].translate(strange_char_trans_table), C_RST_STRANGE), string) dropped_kinds = { 'idx', 'old', 'new' } pass_kinds = { 'empty', 'other' } sgr_pat = re.compile(r'\x1B\[[0-9;]*m') graph_pat = re.compile(r'(?x) [ /\*\|\\]*') # space is treated as literal inside of brackets, even in extended mode. diff_pat = re.compile(r'''(?x) (?: (?P<empty> $) | (?P<commit> commit\ [0-9a-z]{40} ) | (?P<author> Author: ) | (?P<date> Date: ) | (?P<diff> diff\ --git ) | (?P<idx> index ) | (?P<old> --- \ (?P<old_path>.+) ) | (?P<new> \+\+\+ \ (?P<new_path>.+) ) | (?P<loc> @@\ -(?P<old_num>\d+)(?P<old_len>,\d+)?\ \+(?P<new_num>\d+)(?P<new_len>,\d+)?\ @@\ ?(?P<parent_snippet>.*) ) | (?P<ctx> \ (?P<ctx_text>.*) ) | (?P<rem> - (?P<rem_text>.*) ) | (?P<add> \+ (?P<add_text>.*) ) | (?P<meta> ( old\ mode | new\ mode | deleted\ file\ mode | new\ file\ mode | copy\ from | copy\ to | rename\ from | rename\ to | similarity\ index | dissimilarity\ index ) ) | (?P<other> .* ) ) ''') token_pat = re.compile(r'''(?x) \w[\w\d]* # Symbol token. | \d+ # Number token. | \ + # Spaces; distinct from other whitespace. | \t+ # Tabs; distinct from other whitespace. | \s+ # Other whitespace. | . # Any other single character; newlines are never present so DOTALL is irrelevant. ''') # Unicode ranges for strange characters: # C0: \x00 - \x1F # \n: \x0A # C0 !\n: [ \x00-\x09 \x0B-\x1F ] # SP: \x20 # DEL: \x7F # C1: \x80 - \x9F # NBSP: \xA0 (nonbreaking space) # SHY: \xAD (soft hyphen) strange_char_re = r'(?x) [\x00-\x09\x0B-\x1F\x7F\x80-\x9F\xA0\xAD]+' strange_char_pat = re.compile(strange_char_re) assert not strange_char_pat.match(' ') strange_char_ords = chain(range(0, 0x09+1), range(0x0B, 0x1F+1), range(0x7F, 0x7F+1), range(0x80, 0x9F+1), range(0xA0, 0xA0+1), range(0xAD, 0xAD+1)) assert ord(' ') not in strange_char_ords strange_char_names = { i : '\\x{:02x}'.format(i) for i in strange_char_ords } strange_char_names.update({ '\0' : '\\0', '\a' : '\\a', '\b' : '\\b', '\f' : '\\f', '\r' : '\\r', '\t' : '\\t', '\v' : '\\v', }) strange_char_trans_table = str.maketrans(strange_char_names) # ANSI control sequence indicator. CSI = '\x1b[' ERASE_LINE_F = CSI + 'K' # Sending erase line forward while background color is set colors to end of line. def sgr(*codes:Any) -> str: 'Select Graphic Rendition control sequence string.' code = ';'.join(str(c) for c in codes) return '\x1b[{}m'.format(code) RST = sgr() RST_BOLD, RST_ULINE, RST_BLINK, RST_INVERT, RST_TXT, RST_BG = (22, 24, 25, 27, 39, 49) BOLD, ULINE, BLINK, INVERT = (1, 4, 5, 7) # xterm-256 sequence initiators; these should be followed by a single color index. # both text and background can be specified in a single sgr call. TXT = '38;5' BG = '48;5' # RGB6 color cube: 6x6x6, from black to white. K = 16 # black. W = 231 # white. # Grayscale: the 24 palette values have a suggested 8 bit grayscale range of [8, 238]. middle_gray_indices = range(232, 256) def gray26(n:int) -> int: assert 0 <= n < 26 if n == 0: return K if n == 25: return W return W + n def rgb6(r:int, g:int, b:int) -> int: 'index RGB triples into the 256-color palette (returns 16 for black, 231 for white).' assert 0 <= r < 6 assert 0 <= g < 6 assert 0 <= b < 6 return (((r * 6) + g) * 6) + b + 16 # same-same colors. C_FILE = sgr(BG, rgb6(1, 0, 1)) C_MODE = sgr(BG, rgb6(1, 0, 1)) C_LOC = sgr(BG, rgb6(0, 1, 2)) C_UNKNOWN = sgr(BG, rgb6(5, 0, 5)) C_SNIPPET = sgr(TXT, gray26(22)) C_DROPPED = sgr(TXT, gray26(10)) C_REM_LINE = sgr(BG, rgb6(1, 0, 0)) C_ADD_LINE = sgr(BG, rgb6(0, 1, 0)) C_REM_MOVED = sgr(TXT, rgb6(4, 2, 0)) C_ADD_MOVED = sgr(TXT, rgb6(2, 4, 0)) C_REM_TOKEN = sgr(TXT, rgb6(5, 2, 3), BOLD) C_ADD_TOKEN = sgr(TXT, rgb6(2, 5, 3), BOLD) C_RST_TOKEN = sgr(RST_TXT, RST_BOLD) C_STRANGE = sgr(INVERT) C_RST_STRANGE = sgr(RST_INVERT) C_END = ERASE_LINE_F + RST def vscode_path(path:str) -> str: 'VSCode will only recognize source locations if the path contains a slash; add "./" to plain file names.' if '/' in path or '<' in path or '>' in path: return path # Do not alter pseudo-names like <stdin>. return './' + path def errL(*items:Any) -> None: print(*items, sep='', file=stderr) def errSL(*items:Any) -> None: print(*items, file=stderr) if __name__ == '__main__': main()
33.300221
124
0.646868
2,431
15,085
3.828466
0.203209
0.012894
0.007091
0.005587
0.121092
0.072634
0.058236
0.038036
0.028581
0.019985
0
0.019585
0.214717
15,085
452
125
33.373894
0.766081
0.214915
0
0.058997
0
0.0059
0.163408
0.011241
0
0
0.003584
0.002212
0.041298
1
0.056047
false
0.0059
0.023599
0.00885
0.115044
0.035398
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a49b0b0bcc9d432f9a0ecf41ea8e6994b42eabc
2,443
py
Python
hyperglass/models/commands/__init__.py
blkmajik/hyperglass
c52a6f609843177671d38bcad59b8bd658f46b64
[ "BSD-3-Clause-Clear" ]
298
2019-06-17T13:51:46.000Z
2021-06-23T18:09:51.000Z
hyperglass/models/commands/__init__.py
blkmajik/hyperglass
c52a6f609843177671d38bcad59b8bd658f46b64
[ "BSD-3-Clause-Clear" ]
137
2019-06-18T12:59:37.000Z
2021-06-19T05:50:58.000Z
hyperglass/models/commands/__init__.py
blkmajik/hyperglass
c52a6f609843177671d38bcad59b8bd658f46b64
[ "BSD-3-Clause-Clear" ]
42
2019-06-18T07:25:23.000Z
2021-06-18T17:40:20.000Z
"""Validate command configuration variables.""" # Local from .frr import FRRCommands from .bird import BIRDCommands from .tnsr import TNSRCommands from .vyos import VyosCommands from ..main import HyperglassModelExtra from .common import CommandGroup from .huawei import HuaweiCommands from .juniper import JuniperCommands from .cisco_xr import CiscoXRCommands from .cisco_ios import CiscoIOSCommands from .arista_eos import AristaEOSCommands from .cisco_nxos import CiscoNXOSCommands from .nokia_sros import NokiaSROSCommands from .mikrotik_routeros import MikrotikRouterOS from .mikrotik_switchos import MikrotikSwitchOS _NOS_MAP = { "arista_eos": AristaEOSCommands, "bird": BIRDCommands, "cisco_ios": CiscoIOSCommands, "cisco_nxos": CiscoNXOSCommands, "cisco_xr": CiscoXRCommands, "frr": FRRCommands, "huawei": HuaweiCommands, "juniper": JuniperCommands, "mikrotik_routeros": MikrotikRouterOS, "mikrotik_switchos": MikrotikSwitchOS, "nokia_sros": NokiaSROSCommands, "tnsr": TNSRCommands, "vyos": VyosCommands, } class Commands(HyperglassModelExtra): """Base class for command definitions.""" arista_eos: CommandGroup = AristaEOSCommands() bird: CommandGroup = BIRDCommands() cisco_ios: CommandGroup = CiscoIOSCommands() cisco_nxos: CommandGroup = CiscoNXOSCommands() cisco_xr: CommandGroup = CiscoXRCommands() frr: CommandGroup = FRRCommands() huawei: CommandGroup = HuaweiCommands() juniper: CommandGroup = JuniperCommands() mikrotik_routeros: CommandGroup = MikrotikRouterOS() mikrotik_switchos: CommandGroup = MikrotikSwitchOS() nokia_sros: CommandGroup = NokiaSROSCommands() tnsr: CommandGroup = TNSRCommands() vyos: CommandGroup = VyosCommands() @classmethod def import_params(cls, **input_params): """Import loaded YAML, initialize per-command definitions. Dynamically set attributes for the command class. Arguments: input_params {dict} -- Unvalidated command definitions Returns: {object} -- Validated commands object """ obj = Commands() for nos, cmds in input_params.items(): nos_cmd_set = _NOS_MAP.get(nos, CommandGroup) nos_cmds = nos_cmd_set(**cmds) setattr(obj, nos, nos_cmds) return obj class Config: """Override pydantic config.""" validate_all = False
31.727273
66
0.720016
232
2,443
7.426724
0.344828
0.01567
0.023215
0
0
0
0
0
0
0
0
0
0.198526
2,443
76
67
32.144737
0.87998
0.139173
0
0
0
0
0.053615
0
0
0
0
0
0
1
0.018519
false
0
0.296296
0
0.611111
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
7a4aca28089648fa6cf04d319fbef84a5a8cbf94
1,912
py
Python
ED_Chapter4.py
Wang-ZhengYi/ED_Chapter5_code
f63861ab8b6bce4756b7f0e1fd15041a976c1a38
[ "MIT" ]
1
2019-12-19T11:04:49.000Z
2019-12-19T11:04:49.000Z
ED_Chapter4.py
Wang-ZhengYi/ED_assignment
f63861ab8b6bce4756b7f0e1fd15041a976c1a38
[ "MIT" ]
null
null
null
ED_Chapter4.py
Wang-ZhengYi/ED_assignment
f63861ab8b6bce4756b7f0e1fd15041a976c1a38
[ "MIT" ]
null
null
null
#!\usr\bin\python3 # -*- coding: utf-8 -*- ''' Created on Oct. 2019 ED_Chapter4 @author: ZYW @ BNU ''' import numpy as np import matplotlib.pyplot as plt from matplotlib import cm from scipy import interpolate from mpl_toolkits.mplot3d import Axes3D import os from matplotlib import font_manager as fm, rcParams import astropy.units as u ##------------parameters settings-----------------## pixel_sides = 10#pixels per cm N = np.array([3,3,3])#wave node numbers L = np.array([100,100,100])#unit:cm A = np.array([2,12,5])#initial intensities pi = np.pi K_0 = np.array([N[0]*pi/L[0],N[1]*pi/L[1],N[2]*pi/L[2]])/pixel_sides#wave vector fpath = os.path.join(rcParams["datapath"], "fonts/ttf/cmr10.ttf") prop = fm.FontProperties(fname=fpath) xx = np.linspace(0,L[0]*pixel_sides,L[0]*pixel_sides) yy = np.linspace(0,L[1]*pixel_sides,L[1]*pixel_sides) zz = np.zeros(0,L[1]*pixel_sides,L[1]*pixel_sides) ##------------functions settings-----------------## ''' def E_x(x,y,z): return A[0]*np.cos(x*K_0[0])*np.sin(y*K_0[1])*np.sin(z*K_0[2]) def E_y(x,y,z): return A[1]*np.sin(x*K_0[0])*np.cos(y*K_0[1])*np.sin(z*K_0[2]) ''' def E_z(x,y,z): return A[2]*np.sin(x*K_0[0])*np.sin(y*K_0[1])*np.cos(z*K_0[2]) #Intensities of 3 directions in Cartissian coordinate xx, yy= np.meshgrid(xx, yy) zz = 11 E = E_z(xx,yy,zz) def draw3D(X,Y,Z,angle): fig = plt.figure(figsize=(15,7)) ax1 = fig.add_subplot(121) ax1.imshow(Z,cmap='YlGnBu') ax2 = fig.add_subplot(122,projection='3d') ax2.view_init(angle[0],angle[1]) ax2.plot_surface(X, Y, Z, rstride=1, cstride=1,cmap='rainbow',alpha=0.8) surf = ax2.contourf(X,Y,Z,zdir='z',offset=-5,cmap='rainbow') ax1.set_title(r'$E_z-plane-figure$') ax2.set_title(r'$E_z-hologram$') plt.tight_layout() plt.savefig('ED_4.png',dpi=600) plt.show() ##------------data writting & figures making-----------------## draw3D(xx,yy,E,(45,45)) exit()
29.875
80
0.643828
368
1,912
3.25
0.394022
0.016722
0.01505
0.040134
0.147157
0.103679
0.103679
0.090301
0.090301
0.048495
0
0.061429
0.11454
1,912
63
81
30.349206
0.645009
0.190377
0
0
0
0
0.066617
0
0
0
0
0
0
1
0.051282
false
0
0.205128
0.025641
0.282051
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a4b1a9183a636b76d0da0668faad0f0e939fdff
25
py
Python
dataloaders/__init__.py
RishiTejaMadduri/pyramid-fuse
a8bad9adc2734572c87c5ee4c2a956aa2d04fb97
[ "MIT" ]
null
null
null
dataloaders/__init__.py
RishiTejaMadduri/pyramid-fuse
a8bad9adc2734572c87c5ee4c2a956aa2d04fb97
[ "MIT" ]
null
null
null
dataloaders/__init__.py
RishiTejaMadduri/pyramid-fuse
a8bad9adc2734572c87c5ee4c2a956aa2d04fb97
[ "MIT" ]
null
null
null
from .voc1 import VOC
5
21
0.68
4
25
4.25
1
0
0
0
0
0
0
0
0
0
0
0.055556
0.28
25
4
22
6.25
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
7a4c25d4caa2651ec1f11e6dec4ad5d6ef3519b1
6,618
py
Python
test/dramatis/actor/name_test.py
dramatis/dramatis
1a43a6be1d7e7e9fd2cde052430d6e84700dc822
[ "MIT" ]
5
2015-11-05T01:51:29.000Z
2019-04-16T09:09:19.000Z
test/dramatis/actor/name_test.py
halorgium/dramatis
50b35c4e79c33e438cb9f5eeab51ab73119bd75d
[ "MIT" ]
null
null
null
test/dramatis/actor/name_test.py
halorgium/dramatis
50b35c4e79c33e438cb9f5eeab51ab73119bd75d
[ "MIT" ]
1
2022-03-03T19:51:04.000Z
2022-03-03T19:51:04.000Z
#!/bin/env python import inspect import sys import os.path import threading from logging import warning sys.path[0:0] = [ os.path.join( os.path.dirname( inspect.getabsfile( inspect.currentframe() ) ), '..', '..', 'lib' ) ] from inspect import currentframe from inspect import getframeinfo from traceback import format_list from traceback import extract_tb from traceback import print_exc from sys import exc_info import dramatis import dramatis.error from dramatis import interface Actor = dramatis.Actor sys.path[0:0] = [ os.path.join( os.path.dirname( inspect.getabsfile( inspect.currentframe() ) ), '..', '..' ) ] from test_helper import DramatisTestHelper class Name_Test ( DramatisTestHelper ): def teardown(self): self.runtime_check() def test_attribute_error_no_atts(self): "should return AttributeError as appropriate" actor = dramatis.Actor( object() ) okay = False try: actor.foo() raise Exception("should not be reached") except AttributeError, ae: assert str(ae) == "'object' object has no attribute 'foo'" okay = True assert okay def test_attribute_error(self): "should return AttributeError as appropriate" o = object() actor = dramatis.Actor( o ) okay = False try: actor.foo() raise Exception("should not be reached") except AttributeError, ae: assert str(ae) == "'object' object has no attribute 'foo'" okay = True assert okay def test_recreate_errors(self): "should recreate errors rather just forward them(?)" def test_block_methods_during_cont(self): "should block other methods during a continuation" def test_unbound(self): "should be creatable unbound" dramatis.Actor() def test_msg_unbound(self): "should allow messages to unbound" okay = False try: dramatis.Actor().foo() raise Exception("should not be reached") except dramatis.Deadlock: okay = True assert okay def test_creatable_bound(self): "should be creatable bound" name = dramatis.Actor( object() ) assert isinstance(name,dramatis.Actor.Name) def test_allow_and_exec_msgs(self): "should allow and execute messages to bound names" class o ( object ): def foo(self,arg): assert arg == "bar" return "foobar" name = dramatis.Actor( o() ) result = name.foo("bar") assert result == "foobar" def test_delv_releases(self): class O (object): def foo(self,arg): assert arg == "bar" name = dramatis.Actor( O() ) dramatis.interface( name ).continuation(None).foo("bar") def test_short_release(self): "should have a nice short method for casts" class O (object): def foo(self,arg): assert arg == "bar" name = dramatis.Actor( O() ) dramatis.release( name ).foo( "bar" ) def test_release_from_interface(self): "should suport cast from the object interface" def test_no_double_binding(self): "shouldn't be possible to bind twice" name = dramatis.Actor() dramatis.interface( name ).bind( object() ) okay = False try: dramatis.interface( name ).bind( object() ) raise Exception("should not be reached") except dramatis.error.Bind: okay = True assert okay def test_allow_exec_blocks(self): "should allow and execute block continuations" class O (object): def foo(self,arg): assert arg == "bar" return "foobar" actor = O() name = dramatis.Actor(actor) result = [] def block(value): result[:] = [value] retval = dramatis.interface( name ).continuation(block).foo( "bar" ) assert retval == None assert result == [] assert result == [] dramatis.Runtime.current.quiesce() assert result == ["foobar"] def test_exec_tasks_after_binding(self): "should execute messages to unbound names once bound" name = dramatis.Actor() class O(object): def foo(self,arg): assert arg == "bar" return "foobar" result = [] def block(value): result[:] = [ value ] retval = dramatis.interface( name ).continuation(block).foo("bar") assert retval == None assert result == [] dramatis.Runtime.current.quiesce() assert result == [] dramatis.interface( name ).bind( O() ) dramatis.Runtime.current.quiesce() assert result == [ "foobar" ] def test_rpc_binds_return_name(self): "rpc binds should return an actor name" name = dramatis.Actor() retval = dramatis.interface( name ).bind( dict() ) assert isinstance(retval,dramatis.Actor.Name) def test_bind_with_release(self): "should be possible to bind with a non-rpc continuation" name = dramatis.Actor() result = [] def block(v): result[:] = [ v ] name = dramatis.interface( name ).continuation(block) retval = dramatis.interface( name ).bind( object() ) assert retval == None assert result == [] dramatis.Runtime.current.quiesce() assert result != [] def test_url(self): "should provide a url, if asked" def test_unboudn_queue_ordered(self): "unbound names should queue messages and deliver them in order" def test_sometimes_out_of_order(self): "messages should be delivered out of order sometimes" def test_flush_quarantees_order(self): "flushing should guarantee message order" def test_can_use_call_sytanx(self): class Foo( dramatis.Actor ): def __call__( self, arg, foo, bar ): assert arg == "foobar" assert foo == "foo" assert bar == "bar" return "okay" actor = Foo() assert actor("foobar", "foo", bar = "bar" ) == "okay" def test_can_use_left_shift_sytanx(self): class Foo( dramatis.Actor ): def __lshift__( self, arg ): assert arg == "foobar" return "okay" actor = Foo() assert actor << "foobar" == "okay"
29.154185
118
0.586129
740
6,618
5.133784
0.214865
0.040537
0.044749
0.02527
0.473282
0.396683
0.360884
0.324559
0.307186
0.266649
0
0.000881
0.313841
6,618
226
119
29.283186
0.835719
0.002418
0
0.473988
0
0
0.165429
0
0
0
0
0
0.179191
0
null
null
0
0.086705
null
null
0.00578
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
7a4c4b862367ea16f7d5febe0d7eca4640eb21bd
6,281
py
Python
ReanalysisRetrieval/NARR_RetrieveLocation_Variable.py
shaunwbell/FOCI_Analysis
dde4a5f0badd76fe5719575d5c138813ab156b70
[ "MIT" ]
null
null
null
ReanalysisRetrieval/NARR_RetrieveLocation_Variable.py
shaunwbell/FOCI_Analysis
dde4a5f0badd76fe5719575d5c138813ab156b70
[ "MIT" ]
null
null
null
ReanalysisRetrieval/NARR_RetrieveLocation_Variable.py
shaunwbell/FOCI_Analysis
dde4a5f0badd76fe5719575d5c138813ab156b70
[ "MIT" ]
null
null
null
#!/usr/bin/env python """ Background: -------- NARR_RetrieveLocation_Variable.py Purpose: -------- Routines to retrieve, output NARR data from a single point over time to combine for analysis History: -------- 2016-09-20 : Bell - simplify existing multiple routines for various locations into one package """ #System Stack import datetime import sys #Science Stack import numpy as np from netCDF4 import num2date #User Stack from io_utils.EcoFOCI_netCDF_read import EcoFOCI_netCDF from calc.EPIC2Datetime import EPIC2Datetime, get_UDUNITS, Datetime2EPIC import calc.haversine as sphered __author__ = 'Shaun Bell' __email__ = 'shaun.bell@noaa.gov' __created__ = datetime.datetime(2016, 9, 20) __modified__ = datetime.datetime(2016, 9, 20) __version__ = "0.1.0" __status__ = "Development" __keywords__ = 'NARR' "---" def rotate_coord(angle_rot, mag, dir): """ converts math coords to along/cross shelf. + onshore / along coast with land to right (right handed) - offshore / along coast with land to left Todo: convert met standard for winds (left handed coordinate system """ dir = dir - angle_rot along = mag * np.sin(np.deg2rad(dir)) cross = mag * np.cos(np.deg2rad(dir)) return (along, cross) def triangle_smoothing(data_in): weights=np.array([0.25,0.5,0.25]) filtered_data = np.convolve(data_in,np.array(weights),'same') #edge effects return filtered_data def from_netcdf_1dsplice(infile, height_ind, lat_ind, lon_ind): """ Uses ncreadfile_dic which returns a dictionary of all data from netcdf""" ###nc readin/out df = EcoFOCI_netCDF(infile) nchandle = df._getnchandle_() params = df.get_vars() #gets all of them print "Parameters available: " #print params ncdata = ncreadfile_dic_slice(nchandle, params, height_ind=height_ind, lat_ind=lat_ind, lon_ind=lon_ind) df.close() return ncdata def get_geocoords(infile, lat='lat', lon='lon'): df = EcoFOCI_netCDF(infile) nchandle = df._getnchandle_() data = {} for j, v in enumerate([lat, lon]): data[v] = nchandle.variables[v][:] df.close() return (data) def ncreadfile_dic_slice(nchandle, params, height_ind=None, lat_ind=None, lon_ind=None): """returns slice of data for all times but for specified height/lat/lon indicies""" data = {} if height_ind == None: for j, v in enumerate(params): try: #check for nc variable data[v] = nchandle.variables[v][:,lat_ind,lon_ind] except ValueError: #if parameter is not of expected dimensions data[v] = nchandle.variables[v][:] else: for j, v in enumerate(params): try: #check for nc variable data[v] = nchandle.variables[v][:,:,lat_ind,lon_ind] except ValueError: #if parameter is not of expected dimensions data[v] = nchandle.variables[v][:] return data """--------------------------------main Routines---------------------------------------""" """ currently hard coded - variables and ranges """ ### Grab grid points for future slicing - assume grid is same in all model output NARR = '/Volumes/WDC_internal/Users/bell/Data_Local/Reanalysis_Files/NARR/daily/' infile = [NARR + 'uwnd.10m.2016.nc'] lat_lon = get_geocoords(infile[0]) #stn ['1','2'] station_name = ['UP stn_1'] sta_lat = [54.5] sta_long = [161.0] #Find NARR nearest point to moorings - haversine formula # NARR data is -180->180 (positive east), Moorings are usually expressed +W for FOCI station_1 = sphered.nearest_point([sta_lat[0],-1 * sta_long[0]],lat_lon['lat'],lat_lon['lon'], '2d') stn1_modelpt = [lat_lon['lat'][station_1[3],station_1[4]],lat_lon['lon'][station_1[3],station_1[4]]] print "stn1 nearest point to %s, %s which is lat:%s , lon:%s" \ % (sta_lat[0], sta_long[0], stn1_modelpt[0], stn1_modelpt[1]) """ #loop over all requested data years = range(2010,2017) years = ['mon.mean'] for yy in years: # retrieve only these location's data # uwnd infile = NARR + 'uwnd.10m.'+ str(yy) + '.nc' print "Working on file " + infile stn1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4]) #filter data stn1u_f = triangle_smoothing(stn1_data['uwnd']) stn1u = stn1_data['uwnd'] # retrieve only these location's data # vwnd infile = NARR + 'vwnd.10m.'+ str(yy) + '.nc' print "Working on file " + infile stn1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4]) #filter data stn1v_f = triangle_smoothing(stn1_data['vwnd']) stn1v = stn1_data['vwnd'] #convert to EPIC time #epic_time, epic_time1 = Datetime2EPIC(num2date(stn1_data['time'], "hours since 1800-1-1 00:00:0.0")) Datetime2EPIC(num2date(x, "hours since 1800-1-1 00:00:0.0")) for x in stn1_data['time'] ### #output 0,6,12,18 UTC #subsample data # time_ind = np.where(pydate%0.25 == 0)[0] # output u,v wind components from model grid points save_to_nc = False if save_to_nc: # write to NetCDF outfile = 'data/NARR_stn1_' + str(yy) + '.nc' print "Writing to Epic NetCDF " + outfile # write2epic( outfile, station_name[1], [epic_time[time_ind], epic_time1[time_ind]], stn1_modelpt, [stn1u_f[time_ind], stn1v_f[time_ind]]) write2epic( outfile, station_name[1], [epic_time, epic_time1], stn1_modelpt, [stn1u, stn1v]) """ """-----------using xarray---------""" import pandas as pd import xarray as xa #index = [station_1[3],station_1[4]] index=[195,76] ufilein='/Volumes/WDC_internal/Users/bell/Data_Local/Reanalysis_Files/NARR/daily/uwnd.10m.2016.nc' udata = xa.open_dataset(ufilein, decode_cf=False) udata = xa.decode_cf(udata,mask_and_scale=False) dum = udata.uwnd[:443,195,76].resample('D', udata.time, how='mean') print dum.to_pandas().to_csv() vfilein='/Volumes/WDC_internal/Users/bell/Data_Local/Reanalysis_Files/NARR/daily/vwnd.10m.2016.nc' vdata = xa.open_dataset(vfilein, decode_cf=False) vdata = xa.decode_cf(vdata,mask_and_scale=False) dvm = vdata.vwnd[:443,195,76].resample('D', vdata.time, how='mean') print dvm.to_pandas().to_csv()
30.940887
145
0.660723
906
6,281
4.397351
0.317881
0.022088
0.011295
0.02761
0.331576
0.271084
0.24247
0.182229
0.182229
0.171185
0
0.041362
0.19551
6,281
202
146
31.094059
0.747081
0.079764
0
0.217949
0
0.025641
0.1305
0.074744
0
0
0
0.004951
0
0
null
null
0
0.115385
null
null
0.051282
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
7a4dcae230249ada20dad7e710e2b47b9491c925
710
py
Python
rootUtilLib.py
fermi-lat/rootUtil
3d326b1c35284080b3049cdcee8107205851048b
[ "BSD-3-Clause" ]
null
null
null
rootUtilLib.py
fermi-lat/rootUtil
3d326b1c35284080b3049cdcee8107205851048b
[ "BSD-3-Clause" ]
null
null
null
rootUtilLib.py
fermi-lat/rootUtil
3d326b1c35284080b3049cdcee8107205851048b
[ "BSD-3-Clause" ]
null
null
null
# $Header: /nfs/slac/g/glast/ground/cvs/GlastRelease-scons/rootUtil/rootUtilLib.py,v 1.3 2008/10/27 17:49:11 ecephas Exp $ def generate(env, **kw): if not kw.get('depsOnly', 0): env.Tool('addLibrary', library = ['rootUtil']) if env['PLATFORM']=='win32' and env.get('CONTAINERNAME','')=='GlastRelease': env.Tool('findPkgPath', package = 'rootUtil') env.Tool('addLibrary', library = env['rootLibs']) env.Tool('addLibrary', library = env['minuitLibs']) env.Tool('addLibrary', library = env['rootGuiLibs']) if env['PLATFORM']=='win32' and env.get('CONTAINERNAME','')=='GlastRelease': env.Tool('findPkgPath', package = 'facilities') def exists(env): return 1;
50.714286
122
0.649296
89
710
5.179775
0.52809
0.091106
0.147505
0.208243
0.509761
0.334056
0.334056
0.334056
0.334056
0.334056
0
0.036484
0.150704
710
13
123
54.615385
0.728027
0.169014
0
0.166667
1
0
0.341837
0
0
0
0
0
0
0
null
null
0
0
null
null
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
7a4f041d9b0a170d8f07557ba868331447b86391
3,748
py
Python
experiments/prediction_similarity.py
CrispyHarder/deep-weight-prior
b87e61d6ad590c61b90e188ec86bfb956073be65
[ "MIT" ]
null
null
null
experiments/prediction_similarity.py
CrispyHarder/deep-weight-prior
b87e61d6ad590c61b90e188ec86bfb956073be65
[ "MIT" ]
null
null
null
experiments/prediction_similarity.py
CrispyHarder/deep-weight-prior
b87e61d6ad590c61b90e188ec86bfb956073be65
[ "MIT" ]
null
null
null
import os import torch import argparse import yaml import utils import matplotlib.pyplot as plt import seaborn as sns import math import numpy as np from datetime import date def plot_matrix_as_heatmap(matrix,show=False,title='',xlabel='',ylabel='',save_path=''): '''plots the cosine similariy matrix of a number of models or model configurations''' n = np.shape(np.array(matrix))[0] ticks = math.floor(n/4) sns.set_theme() ax = sns.heatmap(matrix,xticklabels=ticks,yticklabels=ticks,cmap='bwr') ax.invert_yaxis() ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_title(title) if save_path: plt.savefig(save_path) if show: plt.show() logs_path = os.path.join('logs','exman-train-net.py','runs') runs = [os.path.join(logs_path,run) for run in os.listdir(logs_path) if run[:6] not in ['000001','000002']] INIT_NAMES = [['vae'],['ghn_default']] SAVE_PATH = os.path.join('..','..','small-results',str(date.today()),'prediction_similarity') if not os.path.exists(SAVE_PATH): os.makedirs(SAVE_PATH) parser = argparse.ArgumentParser() parser.add_argument('--init',type=str) parser.add_argument('--device') parser.add_argument('--sim',choices=['pred','logits']) args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id use_cuda = torch.cuda.is_available() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") torch.cuda.manual_seed_all(42) torch.manual_seed(42) init = args.init model_paths = [] for run in runs: file = os.path.join(run,'net_params.torch') yaml_p = os.path.join(run,'params.yaml') with open(yaml_p) as f: dict = yaml.full_load(f) if not 'mult_init_prior' in dict: if dict['mult_init_mode'] == init: model_paths.append(file) _, testloader = utils.load_dataset(data='cifar', train_bs=64, test_bs=500, num_examples=None, seed=42,augmentation=False) if args.sim == 'pred': all_predictions = [] for model_path in model_paths: if init == 'vae': model = utils.load_vae(model_path,device) predictions = [] for x,_ in testloader: x = x.to(device) p = model(x) predictions.append(p.max(1)[1]) predictions = torch.cat(predictions) all_predictions.append(predictions) all_predictions = torch.stack(all_predictions) length_data = all_predictions.shape[1] matrix = torch.zeros(length_data,length_data) for i in range(length_data): for j in range(i+1): pred_sim = torch.sum(all_predictions[i] == all_predictions[j])/length_data matrix[i,j] = matrix[j,i] = pred_sim if args.sim == 'logits': CosineSimilarity = torch.nn.CosineSimilarity(dim=0) all_predictions = [] for model_path in model_paths: if init == 'vae': model = utils.load_vae(model_path,device) predictions = [] for x,_ in testloader: x = x.to(device) p = model(x) predictions.append(torch.flatten(p)) predictions = torch.cat(predictions) all_predictions.append(predictions) all_predictions = torch.stack(all_predictions) length_data = all_predictions.shape[1] matrix = torch.zeros(length_data,length_data) for i in range(length_data): for j in range(i+1): cos_sim = CosineSimilarity(all_predictions[i],all_predictions[j]) matrix[i,j] = matrix[j,i] = cos_sim title = f'{args.sim} Similarity of {args.init} inits' save_path = os.path.join(SAVE_PATH,title) plot_matrix_as_heatmap(matrix,title=title,save_path=save_path)
33.765766
122
0.648346
526
3,748
4.444867
0.30038
0.083832
0.025663
0.017964
0.366125
0.329341
0.289991
0.289991
0.289991
0.289991
0
0.011648
0.221185
3,748
110
123
34.072727
0.789311
0.021078
0
0.326087
0
0
0.076078
0.005917
0
0
0
0
0
1
0.01087
false
0
0.108696
0
0.119565
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a4f2b510df94c47aa5b903e4031611038525ce6
425
py
Python
website/models/user.py
olaruandreea/flaskbootstrapblog
462f2f700c3b73a794842f1fad4b318c6406c678
[ "MIT" ]
null
null
null
website/models/user.py
olaruandreea/flaskbootstrapblog
462f2f700c3b73a794842f1fad4b318c6406c678
[ "MIT" ]
4
2020-05-03T11:45:01.000Z
2020-06-13T19:43:26.000Z
website/models/user.py
olaruandreea/flaskbootstrapblog
462f2f700c3b73a794842f1fad4b318c6406c678
[ "MIT" ]
null
null
null
from datetime import datetime from flask_login import UserMixin from flask import Flask, current_app from website.routes import get_db db = get_db() class User(db.Model, UserMixin): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(20), unique=True, nullable=False) email = db.Column(db.String(120), unique=True, nullable=False) password = db.Column(db.String(60), nullable=False)
32.692308
68
0.748235
65
425
4.815385
0.476923
0.102236
0.127796
0.153355
0
0
0
0
0
0
0
0.019126
0.138824
425
12
69
35.416667
0.836066
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0.1
0.4
0
0.9
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
3
7a4fe2e81bcaaac8a994f19cd3b167cfb47d0dfa
2,116
py
Python
plugins/pattern_navigate.py
OdatNurd/SublimeScraps
e9f97d2abc2a182ccec4a7ac8fa56e0911ef4c5c
[ "MIT" ]
2
2017-01-26T06:27:58.000Z
2017-07-13T22:48:19.000Z
plugins/pattern_navigate.py
OdatNurd/SublimeScraps
e9f97d2abc2a182ccec4a7ac8fa56e0911ef4c5c
[ "MIT" ]
null
null
null
plugins/pattern_navigate.py
OdatNurd/SublimeScraps
e9f97d2abc2a182ccec4a7ac8fa56e0911ef4c5c
[ "MIT" ]
null
null
null
import sublime import sublime_plugin # Related Reading: # https://forum.sublimetext.com/t/find-for-a-macro/57387/ # # This example command allows you to jump the cursor to the next or previous # location of a given pattern of text, which can be either a regex or not and # case sensitive or not based on command arguments. # # A use case for this is implementing a specific Find operation in a macro in # a repeatable way. class PatternNavigateCommand(sublime_plugin.TextCommand): """ Jump the selection in the file to the next or previous location of the given textual pattern based on the current cursor location. The search direction is controlled by the forward argument, and will wrap around the ends of the buffer. """ def run(self, edit, pattern, literal=True, ignorecase=False, forward=True): # Convert the incoming arguments to the appropriate search flags. flags = ((sublime.LITERAL if literal else 0) | (sublime.IGNORECASE if ignorecase else 0)) # Find the locations where this pattern occurs; leave if none regions = self.view.find_all(pattern, flags) if not regions: return # Get a starting point for our search, and where we should jump to if # there are no matches in the specified direction. point = self.view.sel()[0].b fallback = regions[-1] if not forward else regions[0] # Remove all selections. self.view.sel().clear() # Look in the given direction for the first match from the current # position; if one is found jump there. pick = lambda p: (point < p.a) if forward else (point > p.a) for pos in regions if forward else reversed(regions): if pick(pos): return self.jump(pos.a) # No matches in the search direction, so wrap around. self.jump(fallback.a) def jump(self, point): # Add in the given position as a selection and ensure that it's # visible. self.view.sel().add(sublime.Region(point)) self.view.show(point, True)
37.785714
79
0.666352
308
2,116
4.568182
0.435065
0.017768
0.023454
0.015636
0.041222
0.041222
0.041222
0
0
0
0
0.006406
0.262287
2,116
55
80
38.472727
0.894939
0.518904
0
0
0
0
0
0
0
0
0
0
0
1
0.1
false
0
0.1
0
0.35
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a50d6dea9ac0f55af007919a82e31ba7ff4734d
6,440
py
Python
main.py
gaelfargeas/markdown_to_static_site
8f270ab38e7cf93f74b58bd64c96e7571f8c5262
[ "BSD-3-Clause" ]
1
2021-12-13T12:00:21.000Z
2021-12-13T12:00:21.000Z
main.py
gaelfargeas/markdown_to_static_site
8f270ab38e7cf93f74b58bd64c96e7571f8c5262
[ "BSD-3-Clause" ]
null
null
null
main.py
gaelfargeas/markdown_to_static_site
8f270ab38e7cf93f74b58bd64c96e7571f8c5262
[ "BSD-3-Clause" ]
null
null
null
import argparse from pathlib import Path import markdown2 import jinja2 import os import shutil parser = argparse.ArgumentParser() parser.add_argument("-i", help="Chemin du/des source.", type=str) parser.add_argument("-o", help="Chemin du dossier des fichiers générés.", type=str) parser.add_argument("-t", help="Chemin du dossier des fichiers modeles.", type=str) parser.add_argument( "-s", help="type de sources (fichier ou dossier).", action="store_true" ) parser.add_argument("-v", "--verbose", help="Verbose mode.", action="store_true") args = parser.parse_args() VERBOSE = args.verbose if VERBOSE: print("input :", args.i) print("output :", args.o) print("template:", args.t) print("type input", args.s) def add_image(html_test): src_path = Path(args.o + "/src") if not os.path.exists(src_path): os.makedirs(src_path) if args.s: input_path = os.fspath(args.i) else: input_path = os.path.dirname(args.i) result_string = "" for line in html_test.split("\n"): line = str(line) if "<img " in line: image_path = line.split('src="')[1].split('" ')[0] if VERBOSE: print("image_path", image_path) image_name = image_path.split("/")[-1].split("\\")[-1] if VERBOSE: print("image_name", image_name) shutil.copyfile( str(input_path) + "/" + image_path, str(src_path) + "/" + image_name ) line = ( line.split('src="')[0] + 'src="./src/' + image_name + '" ' + line.split('src="')[-1].split('" ')[-1] ) result_string += line + "\n" else: result_string += line + "\n" return result_string if __name__ == "__main__": if args.s: if args.i != None and args.o != None: with Path(args.i) as directory: for file in list(directory.glob("*_main.md")): config_dict = {} with open(file, "r") as input_file: if VERBOSE: print("intput file :", input_file.name) file_name = ( input_file.name.split(".")[-2] .split("/")[-1] .split("\\")[-1] .split("_main")[0] ) with open( str(args.o) + "/" + str(file_name) + ".html", "w" ) as output_file: if VERBOSE: print("output file :", output_file.name) html = markdown2.markdown(input_file.read()) config_dict["main"] = html if args.t != None: for config_file in list( directory.glob(file_name + "*.md") ): config_name = ( config_file.name.split(".")[-2] .split(file_name + "_")[-1] .lower() ) if config_name != "main": with open(config_file, "r") as open_config_file: config_dict[ config_name ] = open_config_file.read() with open(args.t) as template_file: resutl = jinja2.Template( template_file.read() ).render(config_dict) else: resutl = html if VERBOSE: print("template file :", args.t) resutl = add_image(resutl) output_file.write(resutl) else: if args.i != None and args.o != None: config_dict = {} with open(args.i, "r") as input_file: if VERBOSE: print("intput file :", input_file.name) file_name = ( input_file.name.split(".")[-2] .split("/")[-1] .split("\\")[-1] .split("_main")[0] ) with open( str(args.o) + "/" + str(file_name) + ".html", "w" ) as output_file: if VERBOSE: print("output file :", output_file.name) html = markdown2.markdown(input_file.read()) config_dict["main"] = html if args.t != None: path_directory = args.i.split(file_name + "_main.md")[0] with Path(path_directory) as directory: # recupe le dossier ou est le fichier for config_file in list(directory.glob(file_name + "*.md")): config_name = ( config_file.name.split(".")[-2] .split(file_name + "_")[-1] .lower() ) if config_name != "main": with open(config_file, "r") as open_config_file: config_dict[ config_name ] = open_config_file.read() with open(args.t) as template_file: resutl = jinja2.Template(template_file.read()).render( config_dict ) else: resutl = html if VERBOSE: print("template file :", args.t) resutl = add_image(resutl) output_file.write(resutl)
34.438503
88
0.39472
576
6,440
4.230903
0.164931
0.055806
0.051703
0.029545
0.579811
0.526057
0.501436
0.501436
0.482561
0.482561
0
0.007998
0.495186
6,440
186
89
34.623656
0.741618
0.005435
0
0.535211
0
0
0.072634
0
0
0
0
0
0
1
0.007042
false
0
0.042254
0
0.056338
0.084507
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a574b70d61a04a7120b0d3ff05a024266c78785
5,837
py
Python
src/rics/cardinality/_enum.py
rsundqvist/rics
c67ff6703facb3170535dcf173d7e55734cedbc6
[ "MIT" ]
1
2022-02-24T22:12:13.000Z
2022-02-24T22:12:13.000Z
src/rics/cardinality/_enum.py
rsundqvist/rics
c67ff6703facb3170535dcf173d7e55734cedbc6
[ "MIT" ]
26
2022-02-24T21:08:51.000Z
2022-03-19T19:55:26.000Z
src/rics/cardinality/_enum.py
rsundqvist/rics
c67ff6703facb3170535dcf173d7e55734cedbc6
[ "MIT" ]
null
null
null
from enum import Enum from typing import Tuple, Union # CardinalityLiteral = Literal["1:1", "1:N", "N:1", "M:N"] CardinalityT = Union[str, "Cardinality"] class Cardinality(Enum): """Enumeration type for cardinality relationships. Cardinalities are comparable using numerical operators, and can be thought of as comparing "preciseness". The less ambiguity there is for a given cardinality, the smaller it is in comparison to the others. The hierarchy is given by ``1:1 < 1:N = N:1 < M:N``. Note that ``1:N`` and ``N:1`` are considered equally precise. Examples: Comparing cardinalities >>> from rics.cardinality import Cardinality >>> Cardinality.ManyToOne <Cardinality.ManyToOne: 'N:1'> >>> Cardinality.OneToOne <Cardinality.OneToOne: '1:1'> >>> Cardinality.ManyToOne < Cardinality.OneToOne False """ OneToOne = "1:1" OneToMany = "1:N" ManyToOne = "N:1" ManyToMany = "M:N" @property def many_left(self) -> bool: """Many-relationship on the right, True for ``N:1`` and ``M:N``.""" return self == Cardinality.ManyToMany or self == Cardinality.ManyToOne # pragma: no cover @property def many_right(self) -> bool: """Many-relationship on the right, True for ``1:N`` and ``M:N``.""" return self == Cardinality.ManyToMany or self == Cardinality.OneToMany # pragma: no cover @property def one_left(self) -> bool: """One-relationship on the right, True for ``1:1`` and ``1:N``.""" return not self.many_left # pragma: no cover @property def one_right(self) -> bool: """One-relationship on the right, True for ``1:1`` and ``N:1``.""" return not self.many_right # pragma: no cover @property def inverse(self) -> "Cardinality": """Inverse cardinality. For symmetric cardinalities, ``self.inverse == self``. Returns: Inverse cardinality. See Also: :attr:`symmetric` """ if self == Cardinality.OneToMany: return Cardinality.ManyToOne if self == Cardinality.ManyToOne: return Cardinality.OneToMany return self @property def symmetric(self) -> bool: """Symmetry flag. For symmetric cardinalities, ``self.inverse == self``. Returns: Symmetry flag. See Also: :attr:`inverse` """ return self == Cardinality.OneToOne or self == Cardinality.ManyToMany def __ge__(self, other: "Cardinality") -> bool: """Equivalent to :meth:`set.issuperset`.""" return _is_superset(self, other) def __lt__(self, other: "Cardinality") -> bool: return not self >= other @classmethod def from_counts(cls, left_count: int, right_count: int) -> "Cardinality": """Derive a `Cardinality` from counts. Args: left_count: Number of elements on the left-hand side. right_count: Number of elements on the right-hand side. Returns: A :class:`Cardinality`. Raises: ValueError: For counts < 1. """ return _from_counts(left_count, right_count) @classmethod def parse(cls, arg: CardinalityT, strict: bool = False) -> "Cardinality": """Convert to cardinality. Args: arg: Argument to parse. strict: If True, `arg` must match exactly when it is given as a string. Returns: A :class:`Cardinality`. Raises: ValueError: If the argument could not be converted. """ return arg if isinstance(arg, Cardinality) else _from_generous_string(arg, strict) ######################################################################################################################## # Supporting functions # # Would rather have this in a "friend module", but that's not practical (before 3.10?) ######################################################################################################################## def _parsing_failure_message(arg: str, strict: bool) -> str: options = tuple([c.value for c in Cardinality]) alternatively = tuple([c.name for c in Cardinality]) strict_hint = "." if strict: try: strict = False Cardinality.parse(arg, strict=strict) strict_hint = f". Hint: set {strict=} to allow this input." except ValueError: pass return f"Could not convert {arg=} to Cardinality{strict_hint} Correct input {options=} or {repr(alternatively)}" _MATRIX = ( (Cardinality.ManyToMany, Cardinality.ManyToOne), (Cardinality.OneToMany, Cardinality.OneToOne), ) def _is_superset(c0: Cardinality, c1: Cardinality) -> bool: if c0 == c1: return True c0_i, c0_j = _pos(c0) c1_i, c1_j = _pos(c1) return c0_i <= c1_i and c0_j <= c1_j def _pos(cardinality: Cardinality) -> Tuple[int, int]: for i in range(2): for j in range(2): if _MATRIX[i][j] == cardinality: return i, j raise AssertionError("This should be impossible.") def _from_counts(left_count: int, right_count: int) -> Cardinality: if left_count < 1: raise ValueError(f"{left_count=} < 1") if right_count < 1: raise ValueError(f"{right_count=} < 1") one_left = left_count == 1 one_right = right_count == 1 return _MATRIX[int(one_left)][int(one_right)] def _from_generous_string(s: str, strict: bool) -> Cardinality: if not strict: s = s.strip().upper().replace("-", ":", 1).replace("*", "N", 2) if s == "N:N": s = "M:N" for c in Cardinality: if c.value == s: return c raise ValueError(_parsing_failure_message(s, strict))
31.38172
120
0.582662
687
5,837
4.848617
0.244541
0.040528
0.015011
0.026418
0.221855
0.194236
0.138397
0.088262
0.083458
0.058841
0
0.012314
0.262635
5,837
185
121
31.551351
0.761617
0.328422
0
0.094118
0
0.011765
0.087578
0.013405
0
0
0
0
0.011765
1
0.176471
false
0.011765
0.023529
0.011765
0.470588
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a5751b3bd6e1acf691790d0550d0f5c13fdbbcc
5,443
py
Python
salt-pillar-linter.py
Noah-Huppert/salt-pillar-linter
a18d5504a46fe314e729ed611a0d8b29a2ea97aa
[ "MIT" ]
1
2019-10-13T18:52:25.000Z
2019-10-13T18:52:25.000Z
salt-pillar-linter.py
Noah-Huppert/salt-pillar-linter
a18d5504a46fe314e729ed611a0d8b29a2ea97aa
[ "MIT" ]
null
null
null
salt-pillar-linter.py
Noah-Huppert/salt-pillar-linter
a18d5504a46fe314e729ed611a0d8b29a2ea97aa
[ "MIT" ]
1
2019-03-18T16:51:40.000Z
2019-03-18T16:51:40.000Z
#!/usr/bin/env python3 import argparse import os import sys import re import yaml import jinja2 # {{{1 Parse arguments parser = argparse.ArgumentParser(description="Lints Salt states to ensure " + "pillars are used correctly") parser.prog = 'salt-pillar-linter' parser.add_argument('-p', action='append', metavar='PILLARS_ROOT', required=True, dest='pillar_roots', help="Directories where pillars are present, can be " + "specified multiple times") parser.add_argument('-s', action='append', metavar='STATES_ROOT', required=True, dest='state_roots', help="Directories where states are located, can be " + "specified multiple times") parser.add_argument('-f', action='append', metavar='TMPL_FILE', dest='template_files', help="Non state files which uses Jinja templating to " + "check, can be specified multiple times") parser.add_argument('-d', action='store_true', default=False, dest='debug', help="Print additional debug information") args = parser.parse_args() # {{{1 Locate all state and pillar files def gather_sls_files(initial_dirs): """ Walks directories to find locations of all sls files """ dirs = set() dirs.update(initial_dirs) sls_files = set() while dirs: root = dirs.pop() for top_dir, sub_dirs, files in os.walk(root): sls_files.update([os.path.join(top_dir, f) for f in files if f != 'top.sls' and os.path.splitext(f)[1] == '.sls']) dirs.update([os.path.join(top_dir, sub_dir) for sub_dir in sub_dirs]) return sls_files pillar_files = gather_sls_files(args.pillar_roots) state_files = gather_sls_files(args.state_roots) if args.template_files: state_files.update(args.template_files) # {{{1 Get all pillar keys def flatten_dict(d, parent_key=''): """ Return array of flattened dict keys """ keys = [] for k in d: combined_key = k if parent_key: combined_key = "{}.{}".format(parent_key, k) if type(d[k]) == dict: keys.extend(flatten_dict(d[k], parent_key=combined_key)) else: keys.append(combined_key) return keys pillar_keys = {} loader = jinja2.FileSystemLoader(searchpath=os.getcwd()) env = jinja2.Environment(loader=loader) if args.debug: print("###################") print("# PARSING PILLARS #") print("###################") for pillar_file in pillar_files: template = env.get_template(pillar_file) template_str = None try: template_str = template.render() except Exception as e: raise ValueError("Failed to render Jinja template: {}".format(e)) value = yaml.load(template_str) flat_keys = flatten_dict(value) if args.debug: print() print ("{} keys:".format(pillar_file)) print() for k in flat_keys: print(" {}".format(k)) for k in flat_keys: pillar_keys[k] = True if args.debug: print() # {{{1 Lint states if args.debug: print("##################") print("# LINTING STATES #") print("##################") jinja_pattern = re.compile(r"{{\s*pillar\.([0-9a-zA-Z\._]*)\s*}}") for state_file in state_files: with open(state_file, 'r') as f: line_num = 1 not_keys = {} if args.debug: print("{} keys used by state:".format(state_file)) print() # For each line in a state for line in f: # For each Jinja pillar usage in state for match in re.finditer(jinja_pattern, line): # Get groups from match for pillar_str in match.groups(): if args.debug: print(" {}".format(pillar_str)) # Check if pillar key used exists if pillar_str not in pillar_keys: # Create entry in not_keys dict for line if this is the # first item on this line if line_num not in not_keys: not_keys[line_num] = [] # Add pillar key to dict so we can tell user about # improper usage later not_keys[line_num].append(pillar_str) # Increment line number so we can keep track of where errors are line_num += 1 if args.debug: print() # If any errors if not_keys: common_prefix = os.path.commonprefix([os.getcwd(), state_file]) pretty_file_name = os.path.relpath(state_file, common_prefix) print("{} uses pillar keys which do not exist".format(pretty_file_name)) for line_num in not_keys: print(" Line {}:".format(line_num)) for k in not_keys[line_num]: print (" {}".format(k)) print()
29.263441
84
0.529855
634
5,443
4.395899
0.291798
0.017223
0.027628
0.040187
0.112307
0.06315
0.047363
0.047363
0
0
0
0.003721
0.358075
5,443
185
85
29.421622
0.793932
0.103619
0
0.229508
0
0
0.152514
0.007214
0
0
0
0
0
1
0.016393
false
0
0.04918
0
0.081967
0.155738
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a58c9e4d6b79dfb6949d7b8df14eeeba0805cf6
7,493
py
Python
tests/unit/dataactvalidator/test_b9_award_financial.py
COEJKnight/one
6a5f8cd9468ab368019eb2597821b7837f74d9e2
[ "CC0-1.0" ]
1
2018-10-29T12:54:44.000Z
2018-10-29T12:54:44.000Z
tests/unit/dataactvalidator/test_b9_award_financial.py
COEJKnight/one
6a5f8cd9468ab368019eb2597821b7837f74d9e2
[ "CC0-1.0" ]
null
null
null
tests/unit/dataactvalidator/test_b9_award_financial.py
COEJKnight/one
6a5f8cd9468ab368019eb2597821b7837f74d9e2
[ "CC0-1.0" ]
null
null
null
from tests.unit.dataactcore.factories.staging import AwardFinancialFactory from tests.unit.dataactcore.factories.domain import ProgramActivityFactory from tests.unit.dataactcore.factories.job import SubmissionFactory from tests.unit.dataactvalidator.utils import number_of_errors, query_columns _FILE = 'b9_award_financial' def test_column_headers(database): expected_subset = {'row_number', 'agency_identifier', 'main_account_code', 'program_activity_name', 'program_activity_code'} actual = set(query_columns(_FILE, database)) assert (actual & expected_subset) == expected_subset def test_success(database): """ Testing valid program activity name for the corresponding TAS/TAFS as defined in Section 82 of OMB Circular A-11. """ af_1 = AwardFinancialFactory(row_number=1, agency_identifier='test', main_account_code='test', program_activity_name='test', program_activity_code='test') af_2 = AwardFinancialFactory(row_number=2, agency_identifier='test', main_account_code='test', program_activity_name='test', program_activity_code='test') pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test', account_number='test', program_activity_name='test', program_activity_code='test') assert number_of_errors(_FILE, database, models=[af_1, af_2, pa]) == 0 def test_success_null(database): """Program activity name/code as null""" af = AwardFinancialFactory(row_number=1, agency_identifier='test', main_account_code='test', program_activity_name=None, program_activity_code=None) pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test', account_number='test') assert number_of_errors(_FILE, database, models=[af, pa]) == 0 def test_success_fiscal_year(database): """ Testing valid name for FY that matches with budget_year""" af_1 = AwardFinancialFactory(row_number=1, submission_id='1', agency_identifier='test', main_account_code='test', program_activity_name='test', program_activity_code='test') af_2 = AwardFinancialFactory(row_number=1, submission_id='1', agency_identifier='test2', main_account_code='test2', program_activity_name='test2', program_activity_code='test2') pa_1 = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test', account_number='test', program_activity_name='test', program_activity_code='test') pa_2 = ProgramActivityFactory(budget_year=2017, agency_id='test2', allocation_transfer_id='test2', account_number='test2', program_activity_name='test2', program_activity_code='test2') submission = SubmissionFactory(submission_id='1', reporting_fiscal_year='2017') assert number_of_errors(_FILE, database, models=[af_1, af_2, pa_1, pa_2], submission=submission) == 0 def test_failure_fiscal_year(database): """ Testing invalid name for FY, not matches with budget_year""" af = AwardFinancialFactory(row_number=1, submission_id='1', agency_identifier='test4', main_account_code='test4', program_activity_name='test4', program_activity_code='test4') pa_1 = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test', account_number='test', program_activity_name='test', program_activity_code='test') pa_2 = ProgramActivityFactory(budget_year=2017, agency_id='test2', allocation_transfer_id='test2', account_number='test2', program_activity_name='test2', program_activity_code='test2') pa_3 = ProgramActivityFactory(budget_year=2018, agency_id='test3', allocation_transfer_id='test3', account_number='test3', program_activity_name='test3', program_activity_code='test3') pa_4 = ProgramActivityFactory(budget_year=2019, agency_id='test4', allocation_transfer_id='test4', account_number='test4', program_activity_name='test4', program_activity_code='test4') submission = SubmissionFactory(submission_id='1', reporting_fiscal_year='2017') assert number_of_errors(_FILE, database, models=[af, pa_1, pa_2, pa_3, pa_4], submission=submission) == 1 def test_success_ignore_case(database): """ Testing program activity validation to ignore case """ af = AwardFinancialFactory(row_number=1, agency_identifier='test', main_account_code='test', program_activity_name='TEST', program_activity_code='test') pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test', account_number='test', program_activity_name='test', program_activity_code='test') assert number_of_errors(_FILE, database, models=[af, pa]) == 0 def test_failure_program_activity_name(database): """ Testing invalid program activity name for the corresponding TAS/TAFS as defined in Section 82 of OMB Circular A-11. """ af_1 = AwardFinancialFactory(row_number=1, agency_identifier='test', main_account_code='test', program_activity_name='test_wrong', program_activity_code='test') af_2 = AwardFinancialFactory(row_number=1, agency_identifier='test', main_account_code='test', program_activity_name='test_wrong', program_activity_code='0000') pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test', account_number='test', program_activity_name='test', program_activity_code='test') assert number_of_errors(_FILE, database, models=[af_1, af_2, pa]) == 1 def test_failure_program_activity_code(database): """Failure where the program _activity_code does not match""" af_1 = AwardFinancialFactory(row_number=1, agency_identifier='test', main_account_code='test', program_activity_name='test', program_activity_code='test_wrong') af_2 = AwardFinancialFactory(row_number=1, agency_identifier='test', main_account_code='test', program_activity_name='Unknown/Other', program_activity_code='12345') pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test', account_number='test', program_activity_name='test', program_activity_code='test') assert number_of_errors(_FILE, database, models=[af_1, af_2, pa]) == 1 def test_success_null_program_activity(database): """program activity name/code as null""" af = AwardFinancialFactory(row_number=1, agency_identifier='test_wrong', main_account_code='test', program_activity_name=None, program_activity_code=None) pa = ProgramActivityFactory(budget_year=2016, agency_id='test', allocation_transfer_id='test', account_number='test') assert number_of_errors(_FILE, database, models=[af, pa]) == 0
52.034722
119
0.680101
859
7,493
5.571595
0.119907
0.172378
0.111158
0.076891
0.775387
0.731718
0.731091
0.731091
0.731091
0.680944
0
0.026469
0.218471
7,493
143
120
52.398601
0.790813
0.069798
0
0.47561
0
0
0.083719
0.006073
0
0
0
0
0.109756
1
0.109756
false
0
0.04878
0
0.158537
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
7a58e81c41213397323d2016af44248b99f994ce
15,107
py
Python
machine.py
rkronberg/ml-project
3129beb4ad1c92d7813df1611ccebdeca591b513
[ "MIT" ]
null
null
null
machine.py
rkronberg/ml-project
3129beb4ad1c92d7813df1611ccebdeca591b513
[ "MIT" ]
null
null
null
machine.py
rkronberg/ml-project
3129beb4ad1c92d7813df1611ccebdeca591b513
[ "MIT" ]
null
null
null
''' Optimal hyperparameters for CM + Laplacian kernel Ea: alpha 1e-11, gamma 1e-4 polarizability: alpha 1e-3, gamma 1e-4 HOMO-LUMO gap: alpha 1e-2, gamma 1e-4 Dipole moment: alpha 1e-1, gamma 1e-3 Optimal hyperparameters for BoB + Laplacian kernel Ea: alpha 1e-11, gamma 1e-5 polarizability: alpha 1e-3, gamma 1e-4 HOMO-LUMO gap: alpha 1e-3, gamma 1e-4 Dipole moment: alpha 1e-1, gamma 1e-3 Optimal hyperparameters for MBTR + Gaussian kernel Ea: alpha 1e-7, gamma 1e-8 polarizability: alpha 1e-6, gamma 1e-7 HOMO-LUMO gap: alpha 1e-3, gamma 1e-6 Dipole moment: alpha 1e-2, gamma 1e-5 Results for CM + Laplacian kernel Ea: MAE 0.38, RMSE 0.55, R2 0.9977 polarizability: MAE 0.12, RMSE 0.18, R2 0.9828 HOMO-LUMO gap: MAE 0.56, RMSE 0.70, R2 0.7203 Dipole moment: MAE 0.14, RMSE 0.19, R2 0.5901 Results for BoB + Laplacian kernel Ea: MAE 0.08, RMSE 0.13, R2 0.9998 polarizability: MAE 0.06, RMSE 0.09, R2 0.9952 HOMO-LUMO gap: MAE 0.23, RMSE 0.31, R2 0.9465 Dipole moment: MAE 0.11, RMSE 0.16, R2 0.7327 Results for MBTR + Gaussian kernel Ea: MAE 0.04, RMSE 0.06, R2 0.9999 polarizability: MAE 0.02, RMSE 0.04, R2 0.9993 HOMO-LUMO gap: MAE 0.17, RMSE 0.23, R2 0.9686 Dipole moment: MAE 0.08, RMSE 0.11, R2 0.8508 ''' import numpy as np import matplotlib.pyplot as plt import os from sklearn.kernel_ridge import KernelRidge from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.metrics import mean_absolute_error as MAE from sklearn.metrics import mean_squared_error as MSE from sklearn.metrics import r2_score as R2 from scipy.special import comb from itertools import combinations, permutations ## This part of the code reads the raw data (.xyz files) and returns the central quantities stored in arrays def preprocess(datasize,atoms): # Selects all molecules with 7 or fewer non-H atoms (3963) and (datasize - 3963) molecules with 8 non-H atoms at random. # This compensates the underrepresentation of small molecules (molecules with 9 non-H atoms are excluded) ind = np.concatenate((np.arange(1,3964),np.random.randint(3964,21989,size=datasize-3963))) # Initialize the variables as empty lists # natoms = number of atoms in a given molecule # nonHatoms = number of non-H atoms in a given molecule 21989 # Ea = Atomization energy (Ha) # dipmom = Dipole moment (D) # polar = Isotropic polarizability (bohr^3) # atomlist = list of the atoms constituting a given molecule (e.g. ['C','H','H','H'] for methane) # coords = xyz coordinates of each atom in a given molecule natoms,nonHatoms,Ea,polar,dipmom,gap,atomlist,coords=[],[],[],[],[],[],[],[] # Energies (Ha) of single atoms [H,C,N,O,F] atomref=[-0.500273,-37.846772,-54.583861,-75.064579,-99.718730] # Loop over all selected indices (molecules) for i in ind: # Initialize list that will contain coordinates and element types of ith molecule xyz,elemtype,mulliken,nnonH=[],[],[],0 # This pads the index with zeros so that all contain 6 digits (e.g. index 41 -> 000041) i = str(i).zfill(6) # Define the path to the .xyz file of ith molecule. Here it is assumed that the dataset is stored in a # subdirectory "xyz" within the one containing machine.py # xyz/*.xyz fpath = os.path.join('xyz',"dsgdb9nsd_%s.xyz" % i) # Open the file and loop over the lines with open(fpath) as f: for j, line in enumerate(f): if j == 0: # Number of atoms in molecule na = int(line) natoms.append(na) elif j == 1: # Properties written on second line. Atomization energy, dipole moment, polarizability, HOMO-LUMO gap E = float(line.split()[12]) dipmom.append(float(line.split()[5])*0.20819) polar.append(float(line.split()[6])*0.14818) gap.append(float(line.split()[9])*27.21139) elif 2 <= j <= na+1: # Lines 2 -> na+1 contains element types, coordinates and charges parts = line.split() # Index 0 = element type, 1 = x, 2 = y, 3 = z elemtype.append(parts[0]) # Subtract energy of isolated atom from total energy E = E - atomref[atoms.index(parts[0])] if parts[0] != 'H': nnonH += 1 xyz.append(np.array([float(parts[1]),float(parts[2]),float(parts[3])])) Ea.append(-E*27.21139) atomlist.append(elemtype) coords.append(xyz) nonHatoms.append(nnonH) # Return all lists in the form of numpy arrays return np.array(natoms),np.array(Ea),np.array(dipmom),np.array(polar),np.array(gap), \ np.array(atomlist),np.array(coords),np.array(nonHatoms) def gauss(x,weight,sigma,mu): return weight/(sigma*np.sqrt(2*np.pi))*np.exp(-((x-mu)**2)/(2*sigma**2)) # The many-body tensor representation (MBTR) descriptor def mbtr(atomlist,coords,atoms,Z): # Decay factor (d) and sigmas are roughly optimal d=0.5 w1=1 sigma1,sigma2,sigma3=0.1,0.01,0.05 x1=np.linspace(0,10,201) x2=np.linspace(0,1.25,201) x3=np.linspace(-1,1,201) mbtr_output=[] atoms = list(set([''.join(p) for p in combinations('CHONF',1)])) pairs = list(set([''.join(p) for p in combinations('CCHHOONNFF',2)])) triples = list(set([''.join(p) for p in permutations('CCCHHHOOONNNFFF',3)])) for i in range(len(atomlist)): bag1=dict((k,np.zeros(len(x1))) for k in atoms) bag2=dict((k,np.zeros(len(x2))) for k in pairs) bag3=dict((k,np.zeros(len(x3))) for k in triples) MBTRvec=np.array([]) for j in range(len(atomlist[i])): g1=Z[atoms.index(atomlist[i][j])] bag1[atomlist[i][j]]+=gauss(x1,w1,sigma1,g1) for k in range(len(atomlist[i])): if k > j: Rjk=np.linalg.norm(coords[i][j]-coords[i][k]) w2=np.exp(-d*Rjk) g2=1/Rjk try: bag2[atomlist[i][j]+atomlist[i][k]]+=gauss(x2,w2,sigma2,g2) except KeyError: bag2[atomlist[i][k]+atomlist[i][j]]+=gauss(x2,w2,sigma2,g2) for l in range(len(atomlist[i])): if l > k: Rjl=np.linalg.norm(coords[i][j]-coords[i][l]) Rkl=np.linalg.norm(coords[i][k]-coords[i][l]) w3=np.exp(-d*(Rjk+Rjl+Rkl)) g3=np.dot(coords[i][j]-coords[i][l],coords[i][k]-coords[i][l])/(Rjl*Rkl) try: bag3[atomlist[i][j]+atomlist[i][l]+atomlist[i][k]]+=gauss(x3,w3,sigma3,g3) except KeyError: bag3[atomlist[i][k]+atomlist[i][l]+atomlist[i][j]]+=gauss(x3,w3,sigma3,g3) for atom in bag1: MBTRvec = np.concatenate((MBTRvec,bag1[atom])) for pair in bag2: MBTRvec = np.concatenate((MBTRvec,bag2[pair])) for triple in bag3: MBTRvec = np.concatenate((MBTRvec,bag3[triple])) mbtr_output.append(MBTRvec) return mbtr_output ## The bag-of-bonds (BOB) descriptor def bob(atomlist,coords,atoms,Z): bob_output = [] # 18 H atoms in octane -> comb(18,2) H-H pairs (max. size of a bond vector in a bag of bonds) dim = int(comb(18,2)) perms = list(set([''.join(p) for p in combinations('CCHHOONNFF',2)])) for i in range(len(atomlist)): bag=dict((k,dim*[0]) for k in perms) BoBvec = np.array([]) for j in range(len(atomlist[i])): for k in range(len(atomlist[i])): if j > k: try: bag[atomlist[i][j]+atomlist[i][k]].insert(0,Z[atoms.index(atomlist[i][j])]* \ Z[atoms.index(atomlist[i][k])]/np.linalg.norm(coords[i][j]-coords[i][k])) del bag[atomlist[i][j]+atomlist[i][k]][-1] except KeyError: bag[atomlist[i][k]+atomlist[i][j]].insert(0,Z[atoms.index(atomlist[i][j])]* \ Z[atoms.index(atomlist[i][k])]/np.linalg.norm(coords[i][j]-coords[i][k])) # Avoid KeyError raised by "wrong" order of atoms in a bond (e.g. 'CH' -> 'HC') del bag[atomlist[i][k]+atomlist[i][j]][-1] for pair in bag: BoBvec = np.concatenate((BoBvec,np.array(sorted(bag[pair],reverse=True)))) bob_output.append(BoBvec) return bob_output ## The following function takes the number of atoms in each molecule, the atom types and corresponding coordinates ## and returns an array of corresponding Coulomb matrices (CM) def coulomb(natoms,atomlist,coords,atoms,Z): # Specify the dimensions of the Coulomb matrices based on the largest molecule dim = natoms.max() # Initialize an array of all Coulomb matrices CM = np.zeros((len(natoms),dim,dim)) CMvec = [] # Loop over all molecules for i in range(len(natoms)): for j in range(len(atomlist[i])): # Loop over all atom pairs (j,k) in molecule i for k in range(len(atomlist[i])): if j == k: CM[i][j][k] = 0.5*Z[atoms.index(atomlist[i][j])]**2.4 else: CM[i][j][k] = Z[atoms.index(atomlist[i][j])]*Z[atoms.index(atomlist[i][k])]/ \ np.linalg.norm(coords[i][j]-coords[i][k]) # Sort Coulomb matrix according to descending row norm # Get the indices in the sorted order indexlist = np.argsort(-np.linalg.norm(CM[i],axis=1)) # Rearrange the matrix CM[i] = CM[i][indexlist] # Convert the lower triangular matrix into a vector and append to a list of Coulomb 'vectors' CMvec.append(CM[i][np.tril_indices(dim,k=0)]) return CMvec ## Do grid search (if optimal hyperparameters are not known), then training and prediction using KRR ## If doing grid search for optimal parameters use small training set size, like 1k (takes forever otherwise) def krr(x,y,nonHatoms): inp4 = input('Do grid search for optimal hyperparameters? [True/False]\n') if inp4 == True: inp5 = raw_input('Provide kernel. [laplacian/rbf]\n').split() x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.9,stratify=nonHatoms) kr = GridSearchCV(KernelRidge(kernel=inp5[0]),cv=5,param_grid={"alpha": np.logspace(-11,-1,11), \ "gamma": np.logspace(-9,-3,7)}) kr.fit(x_train,y_train) print(kr.best_params_) elif inp4 == False: inp5 = raw_input('Provide kernel and hyperparameters. [kernel alpha gamma]\n').split() x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.1,stratify=nonHatoms) kr = KernelRidge(kernel=inp5[0],alpha=float(inp5[1]),gamma=float(inp5[2])) kr.fit(x_train,y_train) y_pred = kr.predict(x_test) mae = MAE(y_test,y_pred) rmse = np.sqrt(MSE(y_test,y_pred)) r2 = R2(y_test,y_pred) # Print mean absolute error and root mean squared error print('Mean absolute error: ' + repr(mae) + ', Root mean squared error: ' + repr(rmse) + \ ', R2-score: ' + repr(r2)) return y_pred,y_test def learning_curve(x,y,nonHatoms): # Do training with different sample sizes and see how the MAE behaves (learning curve) inp5 = raw_input('Provide kernel and hyperparameters. [kernel alpha gamma]\n').split() mae,rmse,r2=[],[],[] sample_sizes = [50,200,1000,3000,9000] kr = KernelRidge(kernel=inp5[0],alpha=float(inp5[1]),gamma=float(inp5[2])) for i in sample_sizes: x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=1-float(i)/len(y),stratify=nonHatoms) kr.fit(x_train,y_train) y_pred = kr.predict(x_test) mae.append(MAE(y_test,y_pred)) rmse.append(np.sqrt(MSE(y_test,y_pred))) r2.append(R2(y_test,y_pred)) print('Mean absolute error: ' + repr(mae[-1]) + ', Root mean squared error: ' \ + repr(rmse[-1]) + ', R2-score: ' + repr(r2[-1])) return y_pred,y_test,mae,rmse,sample_sizes ## The main routine and plotting def main(): # Just some plot settings plt.rc('text', usetex=True) plt.rc('font', family='serif', size=14) plt.rc('xtick', direction='in') # Preprocess data datasize=10000 atoms = ['H','C','N','O','F'] Z = [1,6,7,8,9] natoms,Ea,dipmom,polar,gap,atomlist,coords,nonHatoms = preprocess(datasize,atoms) inp1 = raw_input('Which descriptor? [CM/BoB/MBTR]\n') if inp1 == 'CM': descriptor = coulomb(natoms,atomlist,coords,atoms,Z) elif inp1 == 'BoB': descriptor = bob(atomlist,coords,atoms,Z) elif inp1 == 'MBTR': descriptor = mbtr(atomlist,coords,atoms,Z) inp2 = raw_input('Which property? [Ea/gap/polar/dipmom]\n') plt.figure() if inp2 == 'Ea': prop = Ea plt.title(r'Atomization energy (eV)') plt.xlabel(r'$\Delta_\mathrm{at}E^\mathrm{DFT}$ (eV)') plt.ylabel(r'$\Delta_\mathrm{at}E^\mathrm{KRR}$ (eV)') elif inp2 == 'gap': prop = gap plt.title(r'HOMO-LUMO gap (eV)') plt.xlabel(r'$\Delta\varepsilon^\mathrm{DFT}$ (eV)') plt.ylabel(r'$\Delta\varepsilon^\mathrm{KRR}$ (eV)') elif inp2 == 'polar': prop = polar plt.title(r'Isotropic polarizability (\r{A}$^3$)') plt.xlabel(r'$\alpha^\mathrm{DFT}$ (\r{A}$^3$)') plt.ylabel(r'$\alpha^\mathrm{KRR}$ (\r{A}$^3$)') elif inp2 == 'dipmom': prop = dipmom plt.title(r'Dipole moment (e\r{A})') plt.xlabel(r'$\mu^\mathrm{DFT}$ (e\r{A})') plt.ylabel(r'$\mu^\mathrm{KRR}$ (e\r{A})') inp3 = input('Plot learning curve? [True/False]\n') if inp3 == True: # Train y_pred,y_test,mae,rmse,sample_sizes=learning_curve(descriptor,prop,nonHatoms) np.savetxt('dipmom_BoB.dat',np.c_[y_test,y_pred]) np.savetxt('dipmom_BoB_lc.dat',np.c_[sample_sizes,mae]) # Plot learning curve plt.semilogx(sample_sizes,mae,'o-',color='blue') plt.xlabel(r'Training set size') plt.ylabel(r'MAE') elif inp3 == False: # Train y_pred,y_test=krr(descriptor,prop,nonHatoms) #Plot results plt.plot(y_test,y_pred,'.',color='blue') plt.plot(np.linspace(y_test.min(),y_test.max(),1000),np.linspace(y_test.min(),y_test.max(),1000),'k--') plt.show() if __name__ == '__main__': main()
41.05163
131
0.585358
2,235
15,107
3.908725
0.205369
0.035027
0.016026
0.018544
0.330243
0.252518
0.196314
0.175137
0.143887
0.119277
0
0.048628
0.271728
15,107
367
132
41.163488
0.74541
0.284702
0
0.116279
0
0
0.092924
0.018343
0
0
0
0
0
1
0.037209
false
0
0.046512
0.004651
0.116279
0.013953
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a59ef54c76376084770c691cbba94348cea232b
244
py
Python
Core/CoreException.py
gengyuming/Foresee
78b61c276a4e859e996c9ec39040f2145d16b49b
[ "Apache-2.0" ]
null
null
null
Core/CoreException.py
gengyuming/Foresee
78b61c276a4e859e996c9ec39040f2145d16b49b
[ "Apache-2.0" ]
null
null
null
Core/CoreException.py
gengyuming/Foresee
78b61c276a4e859e996c9ec39040f2145d16b49b
[ "Apache-2.0" ]
null
null
null
from Core.Logger import log class MethodException(Exception): def __init__(self, method): self.method = method log('Request method error, not support {} method, please choose ["GET", "POST","PUT","DELETE"]'.format(method))
34.857143
119
0.680328
30
244
5.4
0.766667
0.123457
0
0
0
0
0
0
0
0
0
0
0.180328
244
7
119
34.857143
0.81
0
0
0
0
0.2
0.363265
0.089796
0
0
0
0
0
1
0.2
false
0
0.2
0
0.6
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
7a5a3364c51511dc6143a8fdb73fad93ea8e274b
981
py
Python
allegro_notify/soap/response/__init__.py
marlowww/AllegroNotify
5fa8581debacbfa6e28cb2f3a0e6c177c88bc724
[ "MIT" ]
null
null
null
allegro_notify/soap/response/__init__.py
marlowww/AllegroNotify
5fa8581debacbfa6e28cb2f3a0e6c177c88bc724
[ "MIT" ]
null
null
null
allegro_notify/soap/response/__init__.py
marlowww/AllegroNotify
5fa8581debacbfa6e28cb2f3a0e6c177c88bc724
[ "MIT" ]
null
null
null
from rinse import NS_MAP from rinse.util import safe_parse_string from soap import SoapFault class SoapResponse(): def __init__(self, response): self._response = response # Parse response try: self._doc = safe_parse_string(response.content) self._body = self._doc.xpath( "/soapenv:Envelope/soapenv:Body", namespaces=NS_MAP)[0] except: raise SoapFault("ResponseParseError", "Cannot parse response") self._fault = self._body.find("soapenv:Fault", NS_MAP) if self._fault is not None: raise SoapFault(self._fault.find("faultcode").text, self._fault.find("faultstring").text) # Get and set Allegro API namespaces self._ns = NS_MAP.copy() for i, v in enumerate(self._doc.nsmap.values()): if v != NS_MAP["soapenv"]: self._ns["ns{}".format(i)] = v from soap.response.item_list import *
31.645161
74
0.607543
119
981
4.789916
0.462185
0.04386
0.052632
0
0
0
0
0
0
0
0
0.001425
0.284404
981
30
75
32.7
0.810541
0.049949
0
0
0
0
0.121636
0.032293
0
0
0
0
0
1
0.047619
false
0
0.190476
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a5b5fa3f82709fc1c5b8a1b0be0a9e5c031ce11
16,969
py
Python
ecn.py
asappinc/emergent_comms_negotiation
19ad405dcb83a3a521b6e1752cec075b69aa164b
[ "MIT" ]
9
2020-03-04T13:24:25.000Z
2022-03-15T09:52:37.000Z
ecn.py
asappinc/emergent_comms_negotiation
19ad405dcb83a3a521b6e1752cec075b69aa164b
[ "MIT" ]
2
2019-12-30T07:28:33.000Z
2020-10-13T11:38:34.000Z
ecn.py
asappinc/emergent_comms_negotiation
19ad405dcb83a3a521b6e1752cec075b69aa164b
[ "MIT" ]
6
2018-03-15T18:08:45.000Z
2019-07-15T06:49:16.000Z
import json import time import argparse import os import datetime from os import path import numpy as np import torch from torch import autograd, optim, nn from torch.autograd import Variable import torch.nn.functional as F import nets import sampling import rewards_lib import alive_sieve def render_action(t, s, prop, term): agent = t % 2 speaker = 'A' if agent == 0 else 'B' utility = s.utilities[:, agent] print(' ', end='') if speaker == 'B': print(' ', end='') if term[0][0]: print(' ACC') else: print(' ' + ''.join([str(v) for v in s.m_prev[0].view(-1).tolist()]), end='') print(' %s:%s/%s %s:%s/%s %s:%s/%s' % ( utility[0][0], prop[0][0], s.pool[0][0], utility[0][1], prop[0][1], s.pool[0][1], utility[0][2], prop[0][2], s.pool[0][2], ), end='') print('') if t + 1 == s.N[0]: print(' [out of time]') def save_model(model_file, agent_models, agent_opts, start_time, episode): state = {} for i in range(2): state['agent%s' % i] = {} state['agent%s' % i]['model_state'] = agent_models[i].state_dict() state['agent%s' % i]['opt_state'] = agent_opts[i].state_dict() state['episode'] = episode state['elapsed_time'] = time.time() - start_time with open(model_file + '.tmp', 'wb') as f: torch.save(state, f) os.rename(model_file + '.tmp', model_file) def load_model(model_file, agent_models, agent_opts): with open(model_file, 'rb') as f: state = torch.load(f) for i in range(2): agent_models[i].load_state_dict(state['agent%s' % i]['model_state']) agent_opts[i].load_state_dict(state['agent%s' % i]['opt_state']) episode = state['episode'] # create a kind of 'virtual' start_time start_time = time.time() - state['elapsed_time'] return episode, start_time class State(object): def __init__(self, N, pool, utilities): batch_size = N.size()[0] self.N = N self.pool = pool self.utilities = torch.zeros(batch_size, 2, 3).long() self.utilities[:, 0] = utilities[0] self.utilities[:, 1] = utilities[1] self.last_proposal = torch.zeros(batch_size, 3).long() self.m_prev = torch.zeros(batch_size, 6).long() def cuda(self): self.N = self.N.cuda() self.pool = self.pool.cuda() self.utilities = self.utilities.cuda() self.last_proposal = self.last_proposal.cuda() self.m_prev = self.m_prev.cuda() def sieve_(self, still_alive_idxes): self.N = self.N[still_alive_idxes] self.pool = self.pool[still_alive_idxes] self.utilities = self.utilities[still_alive_idxes] self.last_proposal = self.last_proposal[still_alive_idxes] self.m_prev = self.m_prev[still_alive_idxes] def run_episode( batch, enable_cuda, enable_comms, enable_proposal, prosocial, agent_models, # batch_size, testing, render=False): """ turning testing on means, we disable stochasticity: always pick the argmax """ type_constr = torch.cuda if enable_cuda else torch batch_size = batch['N'].size()[0] s = State(**batch) if enable_cuda: s.cuda() sieve = alive_sieve.AliveSieve(batch_size=batch_size, enable_cuda=enable_cuda) actions_by_timestep = [] alive_masks = [] # next two tensofrs wont be sieved, they will stay same size throughout # entire batch, we will update them using sieve.out_idxes[...] rewards = type_constr.FloatTensor(batch_size, 3).fill_(0) num_steps = type_constr.LongTensor(batch_size).fill_(10) term_matches_argmax_count = 0 utt_matches_argmax_count = 0 utt_stochastic_draws = 0 num_policy_runs = 0 prop_matches_argmax_count = 0 prop_stochastic_draws = 0 entropy_loss_by_agent = [ Variable(type_constr.FloatTensor(1).fill_(0)), Variable(type_constr.FloatTensor(1).fill_(0)) ] if render: print(' ') for t in range(10): agent = t % 2 agent_model = agent_models[agent] if enable_comms: _prev_message = s.m_prev else: # we dont strictly need to blank them, since they'll be all zeros anyway, # but defense in depth and all that :) _prev_message = type_constr.LongTensor(sieve.batch_size, 6).fill_(0) if enable_proposal: _prev_proposal = s.last_proposal else: # we do need to blank this one though :) _prev_proposal = type_constr.LongTensor(sieve.batch_size, 3).fill_(0) nodes, term_a, s.m_prev, this_proposal, _entropy_loss, \ _term_matches_argmax_count, _utt_matches_argmax_count, _utt_stochastic_draws, \ _prop_matches_argmax_count, _prop_stochastic_draws = agent_model( pool=Variable(s.pool), utility=Variable(s.utilities[:, agent]), m_prev=Variable(s.m_prev), prev_proposal=Variable(_prev_proposal), testing=testing ) entropy_loss_by_agent[agent] += _entropy_loss actions_by_timestep.append(nodes) term_matches_argmax_count += _term_matches_argmax_count num_policy_runs += sieve.batch_size utt_matches_argmax_count += _utt_matches_argmax_count utt_stochastic_draws += _utt_stochastic_draws prop_matches_argmax_count += _prop_matches_argmax_count prop_stochastic_draws += _prop_stochastic_draws if render and sieve.out_idxes[0] == 0: render_action( t=t, s=s, term=term_a, prop=this_proposal ) new_rewards = rewards_lib.calc_rewards( t=t, s=s, term=term_a ) rewards[sieve.out_idxes] = new_rewards s.last_proposal = this_proposal sieve.mark_dead(term_a) sieve.mark_dead(t + 1 >= s.N) alive_masks.append(sieve.alive_mask.clone()) sieve.set_dead_global(num_steps, t + 1) if sieve.all_dead(): break s.sieve_(sieve.alive_idxes) sieve.self_sieve_() if render: print(' r: %.2f' % rewards[0].mean()) print(' ') return actions_by_timestep, rewards, num_steps, alive_masks, entropy_loss_by_agent, \ term_matches_argmax_count, num_policy_runs, utt_matches_argmax_count, utt_stochastic_draws, \ prop_matches_argmax_count, prop_stochastic_draws def safe_div(a, b): """ returns a / b, unless b is zero, in which case returns 0 this is primarily for usage in cases where b might be systemtically zero, eg because comms are disabled or similar """ return 0 if b == 0 else a / b def run(enable_proposal, enable_comms, seed, prosocial, logfile, model_file, batch_size, term_entropy_reg, utterance_entropy_reg, proposal_entropy_reg, enable_cuda, no_load, testing, test_seed, render_every_seconds): """ testing option will: - use argmax, ie disable stochastic draws - not run optimizers - not save model """ type_constr = torch.cuda if enable_cuda else torch if seed is not None: np.random.seed(seed) torch.manual_seed(seed) train_r = np.random.RandomState(seed) else: train_r = np.random test_r = np.random.RandomState(test_seed) test_batches = sampling.generate_test_batches(batch_size=batch_size, num_batches=5, random_state=test_r) test_hashes = sampling.hash_batches(test_batches) episode = 0 start_time = time.time() agent_models = [] agent_opts = [] for i in range(2): model = nets.AgentModel( enable_comms=enable_comms, enable_proposal=enable_proposal, term_entropy_reg=term_entropy_reg, utterance_entropy_reg=utterance_entropy_reg, proposal_entropy_reg=proposal_entropy_reg ) if enable_cuda: model = model.cuda() agent_models.append(model) agent_opts.append(optim.Adam(params=agent_models[i].parameters())) if path.isfile(model_file) and not no_load: episode, start_time = load_model( model_file=model_file, agent_models=agent_models, agent_opts=agent_opts) print('loaded model') elif testing: print('') print('ERROR: must have loadable model to use --testing option') print('') return last_print = time.time() rewards_sum = type_constr.FloatTensor(3).fill_(0) steps_sum = 0 count_sum = 0 for d in ['logs', 'model_saves']: if not path.isdir(d): os.makedirs(d) f_log = open(logfile, 'w') f_log.write('meta: %s\n' % json.dumps({ 'enable_proposal': enable_proposal, 'enable_comms': enable_comms, 'prosocial': prosocial, 'seed': seed })) last_save = time.time() baseline = type_constr.FloatTensor(3).fill_(0) term_matches_argmax_count = 0 num_policy_runs = 0 utt_matches_argmax_count = 0 utt_stochastic_draws = 0 prop_matches_argmax_count = 0 prop_stochastic_draws = 0 while True: render = time.time() - last_print >= render_every_seconds # render = True batch = sampling.generate_training_batch(batch_size=batch_size, test_hashes=test_hashes, random_state=train_r) actions, rewards, steps, alive_masks, entropy_loss_by_agent, \ _term_matches_argmax_count, _num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws, \ _prop_matches_argmax_count, _prop_stochastic_draws = run_episode( batch=batch, enable_cuda=enable_cuda, enable_comms=enable_comms, enable_proposal=enable_proposal, agent_models=agent_models, prosocial=prosocial, # batch_size=batch_size, render=render, testing=testing) term_matches_argmax_count += _term_matches_argmax_count utt_matches_argmax_count += _utt_matches_argmax_count utt_stochastic_draws += _utt_stochastic_draws num_policy_runs += _num_policy_runs prop_matches_argmax_count += _prop_matches_argmax_count prop_stochastic_draws += _prop_stochastic_draws if not testing: for i in range(2): agent_opts[i].zero_grad() reward_loss_by_agent = [0, 0] baselined_rewards = rewards - baseline rewards_by_agent = [] for i in range(2): if prosocial: rewards_by_agent.append(baselined_rewards[:, 2]) else: rewards_by_agent.append(baselined_rewards[:, i]) sieve_playback = alive_sieve.SievePlayback(alive_masks, enable_cuda=enable_cuda) for t, global_idxes in sieve_playback: agent = t % 2 if len(actions[t]) > 0: for action in actions[t]: _rewards = rewards_by_agent[agent] _reward = _rewards[global_idxes].float().contiguous().view( sieve_playback.batch_size, 1) _reward_loss = - (action * Variable(_reward)) _reward_loss = _reward_loss.sum() reward_loss_by_agent[agent] += _reward_loss for i in range(2): loss = entropy_loss_by_agent[i] + reward_loss_by_agent[i] loss.backward() agent_opts[i].step() rewards_sum += rewards.sum(0) steps_sum += steps.sum() baseline = 0.7 * baseline + 0.3 * rewards.mean(0) count_sum += batch_size if render: """ run the test batches, print the results """ test_rewards_sum = 0 for test_batch in test_batches: actions, test_rewards, steps, alive_masks, entropy_loss_by_agent, \ _term_matches_argmax_count, _num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws, \ _prop_matches_argmax_count, _prop_stochastic_draws = run_episode( batch=test_batch, enable_cuda=enable_cuda, enable_comms=enable_comms, enable_proposal=enable_proposal, agent_models=agent_models, prosocial=prosocial, render=True, testing=True) test_rewards_sum += test_rewards[:, 2].mean() print('test reward=%.3f' % (test_rewards_sum / len(test_batches))) time_since_last = time.time() - last_print if prosocial: baseline_str = '%.2f' % baseline[2] # rewards_str = '%.2f' % (rewards_sum[2] / count_sum) else: baseline_str = '%.2f,%.2f' % (baseline[0], baseline[1]) rewards_str = '%.2f,%.2f,%.2f' % (rewards_sum[0] / count_sum, rewards_sum[1] / count_sum, rewards_sum[2] / count_sum) print('e=%s train=%s b=%s games/sec %s avg steps %.4f argmaxp term=%.4f utt=%.4f prop=%.4f' % ( episode, rewards_str, baseline_str, int(count_sum / time_since_last), steps_sum / count_sum, term_matches_argmax_count / num_policy_runs, safe_div(utt_matches_argmax_count, utt_stochastic_draws), prop_matches_argmax_count / prop_stochastic_draws )) f_log.write(json.dumps({ 'episode': episode, 'avg_reward_0': rewards_sum[2] / count_sum, 'test_reward': test_rewards_sum / len(test_batches), 'avg_steps': steps_sum / count_sum, 'games_sec': count_sum / time_since_last, 'elapsed': time.time() - start_time, 'argmaxp_term': (term_matches_argmax_count / num_policy_runs), 'argmaxp_utt': safe_div(utt_matches_argmax_count, utt_stochastic_draws), 'argmaxp_prop': (prop_matches_argmax_count / prop_stochastic_draws) }) + '\n') f_log.flush() last_print = time.time() steps_sum = 0 rewards_sum.fill_(0) term_matches_argmax_count = 0 num_policy_runs = 0 utt_matches_argmax_count = 0 utt_stochastic_draws = 0 prop_matches_argmax_count = 0 prop_stochastic_draws = 0 count_sum = 0 if not testing and time.time() - last_save >= 30.0: save_model( model_file=model_file, agent_models=agent_models, agent_opts=agent_opts, start_time=start_time, episode=episode) print('saved model') last_save = time.time() episode += 1 f_log.close() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model-file', type=str, default='model_saves/model.dat') parser.add_argument('--batch-size', type=int, default=128) parser.add_argument('--test-seed', type=int, default=123, help='used for generating test game set') parser.add_argument('--seed', type=int, help='optional') parser.add_argument('--term-entropy-reg', type=float, default=0.05) parser.add_argument('--utterance-entropy-reg', type=float, default=0.001) parser.add_argument('--proposal-entropy-reg', type=float, default=0.05) parser.add_argument('--disable-proposal', action='store_true') parser.add_argument('--disable-comms', action='store_true') parser.add_argument('--disable-prosocial', action='store_true') parser.add_argument('--render-every-seconds', type=int, default=30) parser.add_argument('--testing', action='store_true', help='turn off learning; always pick argmax') parser.add_argument('--enable-cuda', action='store_true') parser.add_argument('--no-load', action='store_true') parser.add_argument('--name', type=str, default='', help='used for logfile naming') parser.add_argument('--logfile', type=str, default='logs/log_%Y%m%d_%H%M%S{name}.log') args = parser.parse_args() args.enable_comms = not args.disable_comms args.enable_proposal = not args.disable_proposal args.prosocial = not args.disable_prosocial args.logfile = args.logfile.format(**args.__dict__) args.logfile = datetime.datetime.strftime(datetime.datetime.now(), args.logfile) del args.__dict__['disable_comms'] del args.__dict__['disable_proposal'] del args.__dict__['disable_prosocial'] del args.__dict__['name'] run(**args.__dict__)
38.830664
129
0.613236
2,145
16,969
4.522611
0.142657
0.052263
0.072364
0.029482
0.353366
0.327801
0.273271
0.231213
0.199464
0.183074
0
0.012472
0.281808
16,969
436
130
38.919725
0.78354
0.0452
0
0.245283
0
0.002695
0.067497
0.007472
0
0
0
0
0
1
0.024259
false
0
0.040431
0
0.078167
0.056604
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a5cd88fd8c0d16613c9c5ac3d7d23b3867ecc8f
2,724
py
Python
nexar_requests.py
NexarDeveloper/nexar-examples-py
9a55964d9a847cf124e8928308369074e67d1dfe
[ "MIT" ]
null
null
null
nexar_requests.py
NexarDeveloper/nexar-examples-py
9a55964d9a847cf124e8928308369074e67d1dfe
[ "MIT" ]
null
null
null
nexar_requests.py
NexarDeveloper/nexar-examples-py
9a55964d9a847cf124e8928308369074e67d1dfe
[ "MIT" ]
null
null
null
"""Resources for making Nexar requests.""" import os, requests, re from typing import Callable, Dict, Iterator from requests_toolbelt import MultipartEncoder NEXAR_URL = "https://api.nexar.com/graphql" NEXAR_FILE_URL = "https://files.nexar.com/Upload/WorkflowAttachment" class NexarClient: def __init__(self, token) -> None: self.s = requests.session() self.s.headers.update({"token": token}) self.s.keep_alive = False def get_query(self, query: str, variables: Dict) -> dict: """Return Nexar response for the query.""" try: r = self.s.post( NEXAR_URL, json={"query": query, "variables": variables}, ) except Exception as e: print(e) raise Exception("Error while getting Nexar response") response = r.json() if ("errors" in response): for error in response["errors"]: print(error["message"]) raise SystemExit return response["data"] def upload_file(self, workspaceUrl: str, path: str, container: str) -> str: """Return Nexar response for the file upload.""" try: multipart_data = MultipartEncoder( fields = { 'file': (os.path.basename(path), open(path, 'rb'), 'text/plain'), 'workspaceUrl': workspaceUrl, 'container': container, } ) r = self.s.post( NEXAR_FILE_URL, data = multipart_data, headers = { 'Content-Type': multipart_data.content_type, } ) except Exception as e: print(e) raise Exception("Error while uploading file to Nexar") return r.text class Node: def __init__(self, client, query: str, variables: Dict, f: Callable) -> None: self.client = client self.query = query self.variables = variables self.f = f self.name = re.search("after[\s]*:[\s]*\$([\w]*)", query).group(1) def __iter__(self) -> Iterator: self.pageInfo = {"hasNextPage": True} return self def __next__(self): if (not self.pageInfo["hasNextPage"]): raise StopIteration data = self.client.get_query(self.query, self.variables) self.pageInfo = self.f(data)["pageInfo"] self.variables[self.name] = self.pageInfo["endCursor"] return self.f(data)["nodes"] def NodeIter(self, query: str, variables: dict, f: Callable) -> Iterator: return NexarClient.Node(self, query, variables, f)
32.819277
85
0.552863
289
2,724
5.103806
0.33564
0.016949
0.034576
0.042712
0.179661
0.105763
0.065085
0.065085
0.065085
0.065085
0
0.000547
0.329295
2,724
82
86
33.219512
0.806787
0.042584
0
0.129032
0
0
0.118441
0.009645
0
0
0
0
0
1
0.112903
false
0
0.048387
0.016129
0.274194
0.048387
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a5e39fe1eef3901acdd4ef96bf4275f32e60fa9
756
py
Python
{{cookiecutter.project_slug}}/sources/app/utils/tests/backends/test_admin_backend.py
AsheKR/cookiecutter-django
d0402aefcc2eeaffa747faa7ef50ad97286bfcca
[ "BSD-3-Clause" ]
null
null
null
{{cookiecutter.project_slug}}/sources/app/utils/tests/backends/test_admin_backend.py
AsheKR/cookiecutter-django
d0402aefcc2eeaffa747faa7ef50ad97286bfcca
[ "BSD-3-Clause" ]
null
null
null
{{cookiecutter.project_slug}}/sources/app/utils/tests/backends/test_admin_backend.py
AsheKR/cookiecutter-django
d0402aefcc2eeaffa747faa7ef50ad97286bfcca
[ "BSD-3-Clause" ]
null
null
null
from django.contrib.auth import authenticate from django.test import TestCase, override_settings class TestAdminBackend(TestCase): @override_settings( AUTHENTICATION_BACKENDS=[ "utils.backend.admin_backends.SettingsBackend", "django.contrib.auth.backends.ModelBackend", ], ADMIN_LOGIN="admin", ADMIN_PASSWORD="admin", ) def test_local_settings_admin_login(self): user = authenticate(username="admin", password="admin") self.assertIsNotNone(user, "The user must be returned.") self.assertIsNotNone( user.is_superuser, "User must have is_superuser permission" ) self.assertIsNotNone(user.is_staff, "User must have is_staff permission")
36
81
0.687831
79
756
6.405063
0.455696
0.112648
0.136364
0.098814
0
0
0
0
0
0
0
0
0.220899
756
20
82
37.8
0.859083
0
0
0
0
0
0.268519
0.112434
0
0
0
0
0.166667
1
0.055556
false
0.111111
0.111111
0
0.222222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
7a5ea12ecc4b49137705efc9bc329a10e0a04827
1,560
py
Python
lab3/test.py
dssgabriel/obhpc
6358dffc85b18e1e65cb80f8e9b11b191b86463d
[ "MIT" ]
null
null
null
lab3/test.py
dssgabriel/obhpc
6358dffc85b18e1e65cb80f8e9b11b191b86463d
[ "MIT" ]
null
null
null
lab3/test.py
dssgabriel/obhpc
6358dffc85b18e1e65cb80f8e9b11b191b86463d
[ "MIT" ]
null
null
null
import numpy as np import sys import time import mblas # Python matrix multiplication def sgemm_py(A, B, C, n): for i in range(0, n): for j in range(0, n): loc = A[i * n + j] for k in range(0, n): C[i * n + k] += loc * B[j * n + k] # Time measurement of Python sgemm def measure_py(A, B, n): C = np.zeros((n * n,), dtype=np.float32) before = time.perf_counter() sgemm_py(A, B, C, n) after = time.perf_counter() return after - before # Time measurement of Numpy sgemm def measure_np(A, B, n): C = np.zeros((n, n), dtype=np.float32) A = A.reshape(n, n) B = B.reshape(n, n) before = time.perf_counter() C = np.dot(A, B) after = time.perf_counter() return after - before # Time measurement of C sgemm def measure_c(A, B, n): C = np.zeros((n * n,), dtype=np.float32) before = time.perf_counter() mblas.sgemm_c(A, B, C, n) after = time.perf_counter() return after - before # Main function def main(): size = int(sys.argv[1]) A = np.random.rand(size * size).astype(np.float32) B = np.random.rand(size * size).astype(np.float32) elapsed_py = measure_py(A, B, size) elapsed_np = measure_np(A, B, size) elapsed_c = measure_c(A, B, size) #elapsed_avx2 = measure_avx(A, B, size) #elapsed_avx512 = measure_avx(A, B, size) print("py / c: ", elapsed_py / elapsed_c); print("py / np: ", elapsed_py / elapsed_np); print("np / c: ", elapsed_np / elapsed_c); if __name__ == "__main__": main()
23.636364
54
0.592949
257
1,560
3.459144
0.210117
0.026997
0.101237
0.058493
0.445444
0.409449
0.389201
0.389201
0.310461
0.310461
0
0.015598
0.260256
1,560
65
55
24
0.754766
0.136538
0
0.261905
0
0
0.026139
0
0
0
0
0
0
1
0.119048
false
0
0.095238
0
0.285714
0.071429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7a5ef0d1d60184e8ef2a2b4a6360f59137607317
488
py
Python
tests/conftest.py
odra/kelo
22930954c6a75ba3e60ec07d258d65d13533b5b0
[ "MIT" ]
null
null
null
tests/conftest.py
odra/kelo
22930954c6a75ba3e60ec07d258d65d13533b5b0
[ "MIT" ]
null
null
null
tests/conftest.py
odra/kelo
22930954c6a75ba3e60ec07d258d65d13533b5b0
[ "MIT" ]
null
null
null
import pytest @pytest.fixture def hello_world_fn(): def fn(): return 'hello world' return fn @pytest.fixture def greetings_fn(): def fn(name): return 'hello %s' % name return fn @pytest.fixture def greetings_default_fn(): def fn(name='nobody'): return 'hello %s' % name return fn @pytest.fixture def complex_fn(): def fn(name, age=32, **kwargs): return '%s is %s years old and lives in %s' % (name, age, kwargs.get('country', 'nowhere')) return fn
16.266667
95
0.655738
74
488
4.243243
0.351351
0.165605
0.203822
0.200637
0.388535
0.388535
0.254777
0.254777
0.254777
0
0
0.005155
0.204918
488
29
96
16.827586
0.804124
0
0
0.47619
0
0
0.166324
0
0
0
0
0
0
1
0.380952
false
0
0.047619
0.190476
0.809524
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5