hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
45502c36194120822d47a489bb35792d7a4b090d | 754 | py | Python | matplotlib/axessetting.py | mk43/python-practice-project | 4260456c1006c1f3e2a6f00bcb2639d6e8a71e5e | [
"Apache-2.0"
] | 7 | 2018-05-29T07:14:22.000Z | 2020-03-05T06:45:04.000Z | matplotlib/axessetting.py | mk43/python-practice-project | 4260456c1006c1f3e2a6f00bcb2639d6e8a71e5e | [
"Apache-2.0"
] | null | null | null | matplotlib/axessetting.py | mk43/python-practice-project | 4260456c1006c1f3e2a6f00bcb2639d6e8a71e5e | [
"Apache-2.0"
] | 5 | 2018-11-08T04:03:48.000Z | 2020-03-05T06:45:06.000Z | import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-10, 10, 40)
y1 = 10 * x + 50
y2 = x**2
plt.figure()
plt.plot(x, y1, 'b-')
plt.plot(x, y2, 'b--')
plt.xlim((-20, 20))
plt.ylim((-60, 160))
plt.xlabel('I am x')
plt.ylabel('I am y')
plt.xticks(np.linspace(-20, 20, 5))
plt.yticks([0, 50, 100], [r'$bad$', r'$normal$', r'$good$'])
boderparameter = plt.gca()
boderparameter.spines['right'].set_color('none')
boderparameter.spines['top'].set_color('none')
boderparameter.xaxis.set_ticks_position('top')
boderparameter.spines['left'].set_position(('data',0))
boderparameter.spines['bottom'].set_position(('data',0))
boderparameter.xaxis.set_ticks_position('bottom')
boderparameter.set_xlabel('')
boderparameter.set_ylabel('')
plt.show() | 24.322581 | 60 | 0.690981 | 119 | 754 | 4.294118 | 0.436975 | 0.156556 | 0.031311 | 0.101761 | 0.227006 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053857 | 0.088859 | 754 | 31 | 61 | 24.322581 | 0.689956 | 0 | 0 | 0 | 0 | 0 | 0.104636 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4552844d1989cd409ab97253e0c1d21d415c08b6 | 1,943 | py | Python | Xgam/__init__.py | aurelio-amerio/Xgam | fb65ed009bb35984eadd0c576aa385ca3702c8ce | [
"MIT"
] | 1 | 2021-06-14T20:27:30.000Z | 2021-06-14T20:27:30.000Z | Xgam/__init__.py | aurelio-amerio/Xgam | fb65ed009bb35984eadd0c576aa385ca3702c8ce | [
"MIT"
] | null | null | null | Xgam/__init__.py | aurelio-amerio/Xgam | fb65ed009bb35984eadd0c576aa385ca3702c8ce | [
"MIT"
] | 1 | 2021-06-14T20:27:55.000Z | 2021-06-14T20:27:55.000Z | #!/usr/bin/env python #
# #
# Autor: Michela Negro, GSFC/CRESST/UMBC . #
# On behalf of the Fermi-LAT Collaboration. #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU GengReral Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
#------------------------------------------------------------------------------#
"""Xgam: Framework for Gamma-ray X-correlation Analysis
"""
import os
PACKAGE_NAME = 'Xgam'
"""Basic folder structure of the package.
"""
X_ROOT = os.path.abspath(os.path.dirname(__file__))
X_BIN = os.path.join(X_ROOT, 'bin')
X_CONFIG = os.path.join(X_ROOT, 'config')
X_UTILS = os.path.join(X_ROOT, 'utils')
""" This is where we put the actual (FT1 and FT2) data sets.
"""
from Xgam.utils.logging_ import logger
try:
FT_DATA_FOLDER = os.environ['P8_DATA']
logger.info('Base data folder set to $P8_DATA = %s...' % FT_DATA_FOLDER)
except KeyError:
FT_DATA_FOLDER = '/Users/mnegro/data/Fermi-LAT'
logger.info('$P8_DATA not set, base data folder set to %s...' %\
FT_DATA_FOLDER)
""" This is the output directory.
"""
try:
X_OUT = os.environ['X_OUT']
X_OUT_FIG = os.environ['X_OUT_FIG']
except:
X_OUT = os.path.join(X_ROOT, 'output')
X_OUT_FIG = os.path.join(X_ROOT, 'output/figures')
if __name__ == '__main__':
from Xgam.utils.logging_ import startmsg
startmsg()
print(('X_ROOT: %s' % X_ROOT))
| 36.660377 | 80 | 0.507463 | 230 | 1,943 | 4.086957 | 0.452174 | 0.042553 | 0.053191 | 0.058511 | 0.188298 | 0.044681 | 0 | 0 | 0 | 0 | 0 | 0.004747 | 0.34946 | 1,943 | 52 | 81 | 37.365385 | 0.738924 | 0.468863 | 0 | 0.083333 | 0 | 0 | 0.224299 | 0.03271 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4555f8bdea823e3c6bf72f128d96fb3a61fb035f | 6,542 | py | Python | day_10.py | JeffHanna/Advent_of_Code_2018 | a47f7c5dc1ef28df41a26a21fc16626cb2a9c922 | [
"MIT"
] | null | null | null | day_10.py | JeffHanna/Advent_of_Code_2018 | a47f7c5dc1ef28df41a26a21fc16626cb2a9c922 | [
"MIT"
] | null | null | null | day_10.py | JeffHanna/Advent_of_Code_2018 | a47f7c5dc1ef28df41a26a21fc16626cb2a9c922 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
--- Day 10: The Stars Align ---
It's no use; your navigation system simply isn't capable of providing walking directions in the arctic circle, and certainly not in 1018.
The Elves suggest an alternative. In times like these, North Pole rescue operations will arrange points of light in the sky to guide missing Elves back to base. Unfortunately, the message is easy to miss: the points move slowly enough that it takes hours to align them, but have so much momentum that they only stay aligned for a second. If you blink at the wrong time, it might be hours before another message appears.
You can see these points of light floating in the distance, and record their position in the sky and their velocity, the relative change in position per second (your puzzle input). The coordinates are all given from your perspective; given enough time, those positions and velocities will move the points into a cohesive message!
Rather than wait, you decide to fast-forward the process and calculate what the points will eventually spell.
For example, suppose you note the following points:
position=< 9, 1> velocity=< 0, 2>
position=< 7, 0> velocity=<-1, 0>
position=< 3, -2> velocity=<-1, 1>
position=< 6, 10> velocity=<-2, -1>
position=< 2, -4> velocity=< 2, 2>
position=<-6, 10> velocity=< 2, -2>
position=< 1, 8> velocity=< 1, -1>
position=< 1, 7> velocity=< 1, 0>
position=<-3, 11> velocity=< 1, -2>
position=< 7, 6> velocity=<-1, -1>
position=<-2, 3> velocity=< 1, 0>
position=<-4, 3> velocity=< 2, 0>
position=<10, -3> velocity=<-1, 1>
position=< 5, 11> velocity=< 1, -2>
position=< 4, 7> velocity=< 0, -1>
position=< 8, -2> velocity=< 0, 1>
position=<15, 0> velocity=<-2, 0>
position=< 1, 6> velocity=< 1, 0>
position=< 8, 9> velocity=< 0, -1>
position=< 3, 3> velocity=<-1, 1>
position=< 0, 5> velocity=< 0, -1>
position=<-2, 2> velocity=< 2, 0>
position=< 5, -2> velocity=< 1, 2>
position=< 1, 4> velocity=< 2, 1>
position=<-2, 7> velocity=< 2, -2>
position=< 3, 6> velocity=<-1, -1>
position=< 5, 0> velocity=< 1, 0>
position=<-6, 0> velocity=< 2, 0>
position=< 5, 9> velocity=< 1, -2>
position=<14, 7> velocity=<-2, 0>
position=<-3, 6> velocity=< 2, -1>
Each line represents one point. Positions are given as <X, Y> pairs: X represents how far left (negative) or right (positive) the point appears, while Y represents how far up (negative) or down (positive) the point appears.
At 0 seconds, each point has the position given. Each second, each point's velocity is added to its position. So, a point with velocity <1, -2> is moving to the right, but is moving upward twice as quickly. If this point's initial position were <3, 9>, after 3 seconds, its position would become <6, 3>.
Over time, the points listed above would move like this:
Initially:
........#.............
................#.....
.........#.#..#.......
......................
#..........#.#.......#
...............#......
....#.................
..#.#....#............
.......#..............
......#...............
...#...#.#...#........
....#..#..#.........#.
.......#..............
...........#..#.......
#...........#.........
...#.......#..........
After 1 second:
......................
......................
..........#....#......
........#.....#.......
..#.........#......#..
......................
......#...............
....##.........#......
......#.#.............
.....##.##..#.........
........#.#...........
........#...#.....#...
..#...........#.......
....#.....#.#.........
......................
......................
After 2 seconds:
......................
......................
......................
..............#.......
....#..#...####..#....
......................
........#....#........
......#.#.............
.......#...#..........
.......#..#..#.#......
....#....#.#..........
.....#...#...##.#.....
........#.............
......................
......................
......................
After 3 seconds:
......................
......................
......................
......................
......#...#..###......
......#...#...#.......
......#...#...#.......
......#####...#.......
......#...#...#.......
......#...#...#.......
......#...#...#.......
......#...#..###......
......................
......................
......................
......................
After 4 seconds:
......................
......................
......................
............#.........
........##...#.#......
......#.....#..#......
.....#..##.##.#.......
.......##.#....#......
...........#....#.....
..............#.......
....#......#...#......
.....#.....##.........
...............#......
...............#......
......................
......................
After 3 seconds, the message appeared briefly: HI. Of course, your message will be much longer and will take many more seconds to appear.
What message will eventually appear in the sky?
"""
from collections import namedtuple
from itertools import count
import numpy
import re
def _parse( filepath ):
nums = re.compile( R'[+-]?\d+(?:\.\d+)?' )
Light = namedtuple( 'Light', 'p_x p_y v_x v_y' )
with open( filepath, 'r' ) as f:
lines = f.readlines( )
lights = [ ]
for line in lines:
vals = [ int( x ) for x in nums.findall( line ) ]
lights.append( Light( vals[ 0 ], vals[ 1 ], vals[ 2 ], vals[ 3 ] ) )
return lights
def _simulate( lights ) -> tuple:
sky_height = 0
Light_Position = namedtuple( 'Light_Position', 'x, y' )
light_positions = [ ]
for time in count( ):
new_time = time + 1
new_light_positions = [ Light_Position( x = l.p_x + l.v_x * new_time, y = l.p_y + l.v_y * new_time ) for l in lights ]
new_light_positions = sorted( new_light_positions, key = lambda l: l.y )
min_y = new_light_positions[ 0 ].y
max_y = new_light_positions[ -1 ].y
new_sky_height = max_y - min_y
if not sky_height or new_sky_height <= sky_height:
sky_height = new_sky_height
light_positions = new_light_positions
else:
break
xs, ys = list( zip( *light_positions ) )
xs = sorted( xs )
min_x = xs[ 0 ]
max_x = xs[ -1 ]
x_range = range( min_x - 1, max_x + 2 )
ys = sorted( ys )
min_y = ys[ 0 ]
max_y = ys[ -1 ]
y_range = range( min_y - 1, max_y + 2 )
return '\n'.join( ''.join( '#' if ( i, j ) in light_positions else ' ' for i in x_range ) for j in y_range ), time
if __name__ == '__main__':
lights = _parse( r'day_10_input.txt' )
message, time = _simulate( lights )
print( 'The message {0} will appear after {1} seconds.'.format( message, time ))
| 32.068627 | 419 | 0.486854 | 790 | 6,542 | 3.943038 | 0.298734 | 0.046228 | 0.019262 | 0.034671 | 0.097271 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030822 | 0.151941 | 6,542 | 203 | 420 | 32.226601 | 0.530642 | 0.752369 | 0 | 0 | 0 | 0 | 0.081316 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.181818 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45563edf7177293ae810f1142fc2457d7d0c9f98 | 170,658 | py | Python | TACT.py | CFARS/TACT | 1b2bbf1f9d0a45cff232ec447286419faac66b58 | [
"BSD-3-Clause"
] | 1 | 2022-03-23T11:50:53.000Z | 2022-03-23T11:50:53.000Z | TACT.py | CFARS/TACT | 1b2bbf1f9d0a45cff232ec447286419faac66b58 | [
"BSD-3-Clause"
] | 4 | 2021-12-18T04:01:41.000Z | 2022-03-10T16:13:18.000Z | TACT.py | CFARS/TACT | 1b2bbf1f9d0a45cff232ec447286419faac66b58 | [
"BSD-3-Clause"
] | null | null | null | """
This is the main script to analyze projects without an NDA in place.
Authors: Nikhil Kondabala, Alexandra Arntsen, Andrew Black, Barrett Goudeau, Nigel Swytink-Binnema, Nicolas Jolin
Updated: 7/01/2021
Example command line execution:
python TACT.py -in /Users/aearntsen/cfarsMASTER/CFARSPhase3/test/518Tower_Windcube_Filtered_subset.csv -config /Users/aearntsen/cfarsMASTER/CFARSPhase3/test/configuration_518Tower_Windcube_Filtered_subset_ex.xlsx -rtd /Volumes/New\ P/DataScience/CFARS/WISE_Phase3_Implementation/RTD_chunk -res /Users/aearntsen/cfarsMASTER/CFARSPhase3/test/out.xlsx --timetestFlag
python phase3_implementation_noNDA.py -in /Users/aearntsen/cfarsMaster/cfarsMASTER/CFARSPhase3/test/NRG_canyonCFARS_data.csv -config /Users/aearntsen/cfarsMaster/CFARSPhase3/test/Configuration_template_phase3_NRG_ZX.xlsx -rtd /Volumes/New\ P/DataScience/CFARS/WISE_Phase3_Implementation/RTD_chunk -res /Users/aearntsen/cfarsMaster/CFARSPhase3/test/out.xlsx --timetestFlag
"""
try:
from TACT import logger
except ImportError:
pass
from TACT.computation.adjustments import Adjustments
from TACT.computation.methods.GC import perform_G_C_adjustment
# from TACT.computation.methods.GLTERRAWC1HZ import perform_G_LTERRA_WC_1HZ_adjustment
from TACT.computation.methods.GSa import perform_G_Sa_adjustment
from TACT.computation.methods.GSFc import perform_G_SFc_adjustment
from TACT.computation.methods.SSLTERRAML import perform_SS_LTERRA_ML_adjustment
from TACT.computation.methods.SSLTERRASML import perform_SS_LTERRA_S_ML_adjustment
from TACT.computation.methods.SSNN import perform_SS_NN_adjustment
from TACT.computation.methods.SSSS import perform_SS_SS_adjustment
from TACT.computation.methods.SSWS import perform_SS_WS_adjustment
from TACT.computation.methods.SSWSStd import perform_SS_WS_Std_adjustment
from TACT.computation.match import perform_match, perform_match_input
from TACT.computation.TI import get_count_per_WSbin, get_TI_MBE_Diff_j, get_TI_Diff_r, get_representative_TI, get_TI_bybin, get_TI_byTIrefbin, get_description_stats, Dist_stats, get_representative_TI
from TACT.extrapolation.extrapolation import log_of_ratio, perform_TI_extrapolation, extrap_configResult
from TACT.extrapolation.calculations import log_of_ratio
from TACT.readers.windcube import import_WC_file_VAD, get_10min_spectrum_WC_raw
from TACT.readers.config import Config
from TACT.readers.data import Data
from TACT.writers.files import write_all_resultstofile
import pandas as pd
import numpy as np
import sys
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
import os
import math
import datetime
def get_modelRegression(inputdata, column1, column2, fit_intercept=True):
'''
:param inputdata: input data (dataframe)
:param column1: string, column name for x-variable
:param column2: string, column name for y-variable
:param columnNameOut: string, column name for predicted value
:return: dict with output of regression
'''
x = inputdata[column1].values.astype(float)
y = inputdata[column2].values.astype(float)
mask = ~np.isnan(x) & ~np.isnan(y)
x = x[mask]
y = y[mask]
x = x.reshape(len(x), 1)
y = y.reshape(len(y), 1)
regr = linear_model.LinearRegression(fit_intercept=fit_intercept)
regr.fit(x, y)
slope = regr.coef_[0][0]
intercept = regr.intercept_[0]
predict = regr.predict(x)
y = y.astype(np.float)
r = np.corrcoef(x, y)[0, 1]
r2 = r2_score(y, predict) # coefficient of determination, explained variance
mse = mean_squared_error(y, predict, multioutput='raw_values')[0]
rmse = np.sqrt(mse)
difference = abs((x - y).mean())
resultsDict = {'c': intercept, 'm': slope, 'r': r, 'r2': r2, 'mse': mse, 'rmse': rmse, 'predicted': predict,
'difference': difference}
results = [slope, intercept , r2 , difference, mse, rmse]
return results
def get_all_regressions(inputdata, title=None):
# get the ws regression results for all the col required pairs. Title is the name of subset of data being evaluated
# Note the order in input to regression function. x is reference.
pairList = [['Ref_WS','RSD_WS'],['Ref_WS','Ane2_WS'],['Ref_TI','RSD_TI'],['Ref_TI','Ane2_TI'],['Ref_SD','RSD_SD'],['Ref_SD','Ane2_SD']]
lenFlag = False
if len(inputdata) < 2:
lenFlag = True
columns = [title, 'm', 'c', 'rsquared', 'mean difference', 'mse', 'rmse']
results = pd.DataFrame(columns=columns)
logger.debug(f"getting regr for {title}")
for p in pairList:
res_name = str(p[0].split('_')[1] + '_regression_' + p[0].split('_')[0] + '_' + p[1].split('_')[0])
if p[1] in inputdata.columns and lenFlag == False:
_adjuster = Adjustments(inputdata)
results_regr = [res_name] + _adjuster.get_regression(inputdata[p[0]], inputdata[p[1]])
else:
results_regr = [res_name, 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN']
_results = pd.DataFrame(columns=columns, data=[results_regr])
results = pd.concat([results, _results], ignore_index=True, axis=0, join='outer')
# labels not required
labelsExtra = ['RSD_SD_Ht1','RSD_TI_Ht1', 'RSD_WS_Ht1','RSD_SD_Ht2', 'RSD_TI_Ht2',
'RSD_WS_Ht2', 'RSD_SD_Ht3', 'RSD_TI_Ht3', 'RSD_WS_Ht3',
'RSD_WS_Ht4', 'RSD_SD_Ht4', 'RSD_TI_Ht4']
labelsRef = ['Ref_WS', 'Ref_TI', 'Ref_SD']
labelsAne = ['Ane_SD_Ht1', 'Ane_TI_Ht1', 'Ane_WS_Ht1', 'Ane_SD_Ht2', 'Ane_TI_Ht2', 'Ane_WS_Ht2',
'Ane_SD_Ht3', 'Ane_TI_Ht3', 'Ane_WS_Ht3', 'Ane_WS_Ht4', 'Ane_SD_Ht4','Ane_TI_Ht4']
for l in labelsExtra:
parts = l.split('_')
reg_type = list(set(parts).intersection(['WS', 'TI', 'SD']))
if 'RSD' in l:
ht_type = parts[2]
ref_type = [s for s in labelsAne if reg_type[0] in s]
ref_type = [s for s in ref_type if ht_type in s]
res_name = str(reg_type[0] + '_regression_' + parts[0])
if 'Ht' in parts[2]:
res_name = res_name + parts[2] + '_' + ref_type[0].split('_')[0] + ref_type[0].split('_')[2]
else:
res_name = res_name + '_Ref'
logger.debug(res_name)
if l in inputdata.columns and lenFlag == False:
_adjuster = Adjustments(inputdata)
res = [res_name] + _adjuster.get_regression(inputdata[ref_type[0]],inputdata[l])
else:
res = [res_name, 'NaN', 'NaN', 'NaN', 'NaN', 'NaN', 'NaN']
logger.debug(res)
_results = pd.DataFrame(columns=columns, data=[res])
results = pd.concat([results, _results], ignore_index=True, axis=0, join='outer')
return results
def min_diff(array_orig,array_to_find,tol):
#Finds indices in array_orig that correspond to values closest to numbers in array_to_find with tolerance tol
#Inputs
#array_orig: Original array where you want to find matching values
#array_to_find: Array of numbers to find in array_orig
#tol: Tolerance to find matching value
#Outputs
#found_indices: Indices corresponding to matching values. If no values matched with desired tolerance, index will be filled by NaN.
import numpy as np
found_indices = []
if not np.shape(array_to_find):
array_to_find = [array_to_find]
for i in array_to_find:
min_difference = tol
found_index_temp = np.nan
for j in range(0,len(array_orig)):
diff_temp = abs(i-array_orig[j])
if diff_temp < min_difference:
min_difference = diff_temp
found_index_temp = j
found_indices.append(found_index_temp)
return np.array(found_indices)
def var_adjustment(vr_n,vr_e,vr_s,vr_w,vr_z,wd,U,height_needed,frequency_vert_beam,el_angle,mode):
#Uses Taylor's frozen turbulence hypothesis with data from the vertically
#pointing beam to estimate new values of the u and v variance.
#Inputs
#vr_n, vr_e, vr_s, vr_w, vr_z: Time series of radial velocity from north-, east-, south, west-, and
#vertically pointing beams, respectively, at height of interest.
#wd: 10-min. Mean wind direction
#U: 10-min. Mean horizontal wind speed
#height_needed: Measurement height corresponding to velocity data
#frequency_vert_beam: Sampling frequency of data from vertically pointing beam
#el_angle: Elevation angle of off-vertical beam positions (in degrees, measured from the ground)
#mode: Type of variance contamination adjustment to be applied. Options are taylor_ws and taylor_var.
#Outputs
#var_diff: Estimate of increase in streamwise variance due to variance contamination
import numpy as np
w_N = np.zeros(len(vr_z))
w_N[:] = np.nan
w_E = np.zeros(len(vr_z))
w_E[:] = np.nan
w_S = np.zeros(len(vr_z))
w_S[:] = np.nan
w_W = np.zeros(len(vr_z))
w_W[:] = np.nan
u_bar = np.sin(np.radians(wd - 180))*U
v_bar = np.cos(np.radians(wd - 180))*U
delta_t_vert_beam = 1./frequency_vert_beam
#Calculate the number of time steps needed for eddies to travel from one
#side of the scanning circle to the other
dist = height_needed/np.tan(np.radians(el_angle))
delta_t_u = dist/u_bar
interval_u = round(delta_t_u/delta_t_vert_beam)
delta_t_v = dist/v_bar
interval_v = round(delta_t_v/delta_t_vert_beam)
#Estimate values of w at different sides of the scanning circle by using
#Taylor's frozen turbulence hypothesis
for i in range(len(vr_z)):
try:
w_N[i] = vr_z[i-interval_v]
w_E[i] = vr_z[i-interval_u]
except:
w_N[i] = np.nan
w_E[i] = np.nan
try:
w_S[i] = vr_z[i+interval_v]
w_W[i] = vr_z[i+interval_u]
except:
w_S[i] = np.nan
w_W[i] = np.nan
if "taylor_ws" in mode:
#Use the new values of w to estimate the u and v components using the DBS technique
#and calculate the variance
u_DBS_new = ((vr_e-vr_w) - (w_E-w_W)*np.sin(np.radians(el_angle)))/(2*np.cos(np.radians(el_angle)))
v_DBS_new = ((vr_n-vr_s) - (w_N-w_S)*np.sin(np.radians(el_angle)))/(2*np.cos(np.radians(el_angle)))
u_var_lidar_new = get_10min_var(u_DBS_new,frequency_vert_beam)
v_var_lidar_new = get_10min_var(v_DBS_new,frequency_vert_beam)
else:
#Calculate change in w across the scanning circle in north-south and east-west directions
dw_est1 = w_S - w_N
dw_est2 = w_W - w_E
vr1_var = get_10min_var(vr_n,1./4)
vr2_var = get_10min_var(vr_e,1./4)
vr3_var = get_10min_var(vr_s,1./4)
vr4_var = get_10min_var(vr_w,1./4)
dw_var1 = get_10min_var(dw_est1,1./4)
dw_var2 = get_10min_var(dw_est2,1./4)
vr1_vr3_var = get_10min_covar(vr_n,vr_s,1./4)
vr2_vr4_var = get_10min_covar(vr_e,vr_w,1./4)
vr1_dw_var = get_10min_covar(vr_n,dw_est1,1./4)
vr3_dw_var = get_10min_covar(vr_s,dw_est1,1./4)
vr2_dw_var = get_10min_covar(vr_e,dw_est2,1./4)
vr4_dw_var = get_10min_covar(vr_w,dw_est2,1./4)
#These equations are adapted from Newman et al. (2016), neglecting terms involving
#du or dv, as these terms are expected to be small compared to dw
#Reference: Newman, J. F., P. M. Klein, S. Wharton, A. Sathe, T. A. Bonin,
#P. B. Chilson, and A. Muschinski, 2016: Evaluation of three lidar scanning
#strategies for turbulence measurements, Atmos. Meas. Tech., 9, 1993-2013.
u_var_lidar_new = (1./(4*np.cos(np.radians(el_angle))**2))*(vr2_var + vr4_var- 2*vr2_vr4_var + 2*vr2_dw_var*np.sin(np.radians(el_angle)) \
- 2*vr4_dw_var*np.sin(np.radians(el_angle)) + dw_var2*np.sin(np.radians(el_angle))**2)
v_var_lidar_new = (1./(4*np.cos(np.radians(el_angle))**2))*(vr1_var + vr3_var- 2*vr1_vr3_var + 2*vr1_dw_var*np.sin(np.radians(el_angle)) \
- 2*vr3_dw_var*np.sin(np.radians(el_angle)) + dw_var1*np.sin(np.radians(el_angle))**2)
#Rotate the variance into the mean wind direction
#Note: The rotation should include a term with the uv covariance, but the
#covariance terms are also affected by variance contamination. In Newman
#et al. (2016), it was found that the uv covariance is usually close to 0 and
#can safely be neglected.
#Reference: Newman, J. F., P. M. Klein, S. Wharton, A. Sathe, T. A. Bonin,
#P. B. Chilson, and A. Muschinski, 2016: Evaluation of three lidar scanning
#strategies for turbulence measurements, Atmos. Meas. Tech., 9, 1993-2013.
u_rot_var_new = u_var_lidar_new*(np.sin(np.radians(wd)))**2 + v_var_lidar_new*(np.cos(np.radians(wd)))**2
#Calculate the wind speed and variance if w is assumed to be the same on all
#sides of the scanning circle
u_DBS = (vr_e-vr_w)/(2*np.cos(np.radians(el_angle)))
v_DBS = (vr_n-vr_s)/(2*np.cos(np.radians(el_angle)))
u_var_DBS = get_10min_var(u_DBS,frequency_vert_beam)
v_var_DBS = get_10min_var(v_DBS,frequency_vert_beam)
u_rot_var = u_var_DBS*(np.sin(np.radians(wd)))**2 + v_var_DBS*(np.cos(np.radians(wd)))**2
return u_rot_var-u_rot_var_new
def acvf(ts):
#Calculate autocovariance function for a time series
#Inputs
#ts: Time series of data
#Outputs
#ts_adj: Values of autovariance function starting from lag 0
import numpy as np
lags = range(0,len(ts))
ts_adj = []
for i in lags:
ts_subset_temp = ts[i:len(ts)]
ts_subset_temp2 = ts[0:len(ts)-i]
ts_adj.append(np.nanmean((ts_subset_temp-np.nanmean(ts_subset_temp))*(ts_subset_temp2-np.nanmean(ts_subset_temp2))))
return ts_adj
def inertial_subrange_func(t, b, C):
#Inertial subrange fit for autocovariance function
#t is lag time, b is the variance at lag 0, and C is a parameter corresponding to eddy dissipation
return -C*t**(2./3) + b
def lenschow_technique(ts,frequency,mode_ws,option):
#Apply different forms of the Lenschow et al. (2000) technique
#Reference: Lenschow, D. H., V. Wulfmeyer, and C. Senff, 2000: Measuring second-through fourth-order moments in noisy data. J. Atmos. Oceanic Technol., 17, 1330–1347.
#Inputs
#ts: Time series of data
#frequency: Sampling frequency of data
#mode_ws: raw_WC, VAD, or raw_ZephIR
#mode_noise: Type of Lenschow noise adjustment to be applied. Options are linear, subrange, and spectrum.
#Outputs
#new_ts_var: 10-min. variance after noise adjustment has been applied
import numpy as np
from scipy.optimize import curve_fit
ts = fill_nan(ts)
#Number of samples in a 10-min period
ten_min_count = int(frequency*60*10)
var_diff = []
var_orig = []
lags = np.arange(0,ten_min_count)/float(frequency)
for i in np.arange(0,len(ts)-ten_min_count+1,ten_min_count):
#10-min. window of data
ts_window = ts[i:i+ten_min_count]
ten_min_index = (i-1)/ten_min_count + 1
var_orig.append(get_10min_var(ts_window,frequency))
if 'linear' in option:
#Use values of ACVF from first four non-zero lags to linearly extrpolate
#ACVF to lag 0
ts_adj = acvf(ts_window)
x_vals = lags[1:4];
y_vals = ts_adj[1:4]
p = np.polyfit(x_vals,y_vals,1)
var_diff.append(var_orig[ten_min_index]-p[1])
if 'subrange' in option:
#Use values of ACVF from first four non-zero lags to produce fit
#to inertial subrange function. Value of function at lag 0 is assumed
#to be the true variance.
ts_adj = acvf(ts_window)
x_vals = lags[1:4];
y_vals = ts_adj[1:4]
try:
popt, pcov = curve_fit(inertial_subrange_func, x_vals, y_vals,\
p0 = [np.mean((ts_window-np.mean(ts_window))**2),0.002])
var_diff.append(var_orig[ten_min_index]-popt[0])
except:
var_diff.append(np.nan)
if 'spectrum' in option:
#Assume spectral power at high frequencies is due only to noise. Average
#the spectral power at the highest 20% of frequencies in the time series
#and integrate the average power across all frequencies to estimate the
#noise floor
import numpy.ma as ma
if "raw_WC" in mode_ws:
[S,fr] = get_10min_spectrum_WC_raw(ts_window,frequency)
else:
[S,fr] = get_10min_spectrum(ts_window,frequency)
x = ma.masked_inside(fr,0.8*fr[-1],fr[-1])
func_temp = []
for j in range(len(fr)):
func_temp.append(np.mean(S[x.mask]))
noise_floor = np.trapz(func_temp,fr)
var_diff.append(noise_floor)
var_diff = np.array(var_diff)
#Only use var_diff values where the noise variance is positive
var_diff[var_diff < 0] = 0
new_ts_var = np.array(var_orig)-var_diff
return new_ts_var
def get_10min_spectrum(ts,frequency):
#Calculate power spectrum for 10-min. period
#Inputs
#ts: Time series of data
#frequency: Sampling frequency of data
#Outputs
#S_A_fast: Spectral power
#frequency_fft: Frequencies correspond to spectral power values
import numpy as np
N = len(ts)
delta_f = float(frequency)/N
frequency_fft = np.linspace(0,float(frequency)/2,float(N/2))
F_A_fast = np.fft.fft(ts)/N
E_A_fast = 2*abs(F_A_fast[0:N/2]**2)
S_A_fast = (E_A_fast)/delta_f
return S_A_fast,frequency_fft
def get_10min_covar(ts1,ts2,frequency):
#Calculate the covariance of two variables
#Inputs
#ts1: Time series of variable 1
#ts2: Time series of variable 2
#frequency: Sampling frequency
#Outputs
#ts_covar: 10-min. covariance of variables 1 and 2
import numpy as np
import functools
#Number of samples in a 10-min period
ten_min_count = int(frequency*60*10)
ts_covar = []
for i in np.arange(0,len(ts1)-ten_min_count+1,ten_min_count):
ts_temp1 = ts1[i:i+ten_min_count]
ts_temp2 = ts2[i:i+ten_min_count]
mask = [~np.isnan(ts_temp1),~np.isnan(ts_temp2)]
total_mask = functools.reduce(np.logical_and, mask)
ts_temp1 = ts_temp1[total_mask]
ts_temp2 = ts_temp2[total_mask]
ts_covar.append(np.nanmean((ts_temp1-np.nanmean(ts_temp1))*(ts_temp2-np.nanmean(ts_temp2))))
return np.array(ts_covar)
def fill_nan(A):
'''
interpolate to fill nan values
'''
#Adapted from code posted on Stack Overflow: http://stackoverflow.com/a/9815522
#1-D linear interpolation to fill missing values
#Inputs
#A: Time series where NaNs need to be filled
#Outputs
#B: Time series with NaNs filled
from scipy import interpolate
import numpy as np
inds = np.arange(A.shape[0])
good = np.where(np.isfinite(A))
#Only perform interpolation if more than 75% of the data are valid
if float(len(np.array(good).ravel()))/len(A) > 0.75:
f = interpolate.interp1d(inds[good], A[good],bounds_error=False,fill_value='extrapolate')
B = np.where(np.isfinite(A),A,f(inds))
else:
B = A
return B
def spike_filter(ts,frequency):
#Spike filter based on procedure used in Wang et al. (2015)
#Reference: Wang, H., R. J. Barthelmie, A. Clifton, and S. C. Pryor, 2015:
#Wind measurements from arc scans with Doppler wind lidar, J. Atmos.
#Ocean. Tech., 32, 2024–2040.
#Inputs
#ts: Time series of data
#frequency: Sampling frequency of data
#Outputs
#ts_filtered_interp: Filtered time series with NaNs filled in
import numpy as np
#Number of samples in a 10-min period
ten_min_count = int(frequency*60*10)
ts_filtered = np.copy(ts)
ts_filtered_interp = np.copy(ts)
for i in np.arange(0,len(ts)-ten_min_count+1,ten_min_count):
ts_window = ts_filtered[i:i+ten_min_count]
#Calculate delta_v, difference between adjacent velocity values
delta_v = np.zeros(len(ts_window)-1)
for j in range(len(ts_window)-1):
delta_v[j] = ts_window[j+1] - ts_window[j]
q75, q25 = np.percentile(delta_v, [75 ,25])
IQR= q75 - q25
#If abs(delta_v) at times i and i-1 are larger than twice the interquartile
#range (IQR) and the delta_v values at times i and i-1 are of opposite sign,
#the velocity at time i is considered a spike and set to NaN.
for j in range(1,len(ts_window)-1):
if abs(delta_v[j]) > 2*IQR and abs(delta_v[j-1]) > 2*IQR:
if np.sign(delta_v[j]) != np.sign(delta_v[j-1]):
ts_window[j] = np.nan
ts_filtered[i+j] = np.nan
#Set entire 10-min. period to NaN if more than 40% of the velocity points
#are already NaN.
if (float(len(ts_window[np.isnan(ts_window)]))/len(ts_window)) > 0.4:
ts_filtered[i:i+ten_min_count] = np.nan
#Use a 1-D linear interpolation to fill in missing values
ts_filtered_interp[i:i+ten_min_count] = fill_nan(ts_filtered[i:i+ten_min_count])
return ts_filtered_interp
def lidar_processing_noise(ts,frequency,mode_ws,mode_noise):
#Function to apply noise adjustment to time series. Outputs new variance after
#noise adjustment has been applied.
#Inputs
#ts: Time series of data
#frequency: Sampling frequency of data
#mode_ws: raw_WC, VAD, or raw_ZephIR
#mode_noise: Type of noise adjustment to be applied. Options are spike, lenschow_linear, lenschow_subrange, and lenschow_spectrum.
#Outputs
#new_ts_var: New 10-min. variance values after noise adjustment has been applied
if "spike" in mode_noise:
ts_filtered = spike_filter(ts,frequency)
new_ts_var = get_10min_var(ts_filtered,frequency)
if "lenschow_linear" in mode_noise:
new_ts_var = lenschow_technique(ts,frequency,mode_ws,'linear')
if "lenschow_subrange" in mode_noise:
new_ts_var = lenschow_technique(ts,frequency,mode_ws,'subrange')
if "lenschow_spectrum" in mode_noise:
new_ts_var = lenschow_technique(ts,frequency,mode_ws,'spectrum')
return new_ts_var
def Kaimal_spectrum_func(X, L):
#Given values of frequency (fr), mean horizontal wind speed (U), streamwise
#variance (u_var), and length scale (L), calculate idealized Kaimal spectrum
#This uses the form given by Eq. 2.24 in Burton et al. (2001)
#Reference: Burton, T., D. Sharpe, N. Jenkins, N., and E. Bossanyi, 2001:
#Wind Energy Handbook, John Wiley & Sons, Ltd., 742 pp.
fr,U,u_var = X
return u_var*fr*((4*(L/U)/((1+6*(fr*L/U))**(5./3))))
def Kaimal_spectrum_func2(pars, x, data=None):
#Kaimal spectrum function for fitting. Trying to minimize the difference
#between the actual spectrum (data) and the modeled spectrum (model)
vals = pars.valuesdict()
L = vals['L']
U = vals['U']
u_var = vals['u_var']
model = u_var*x*((4*(L/U)/((1+6*(x*L/U))**(5./3))))
if data is None:
return model
return model-data
def spectral_adjustment(u_rot,frequency,mode_ws,option):
#Estimates loss of variance due to volume averaging by extrapolating spectrum
#out to higher frequencies and integrating spectrum over higher frequencies
#Inputs
#u_rot: Time series of streamwise wind speed
#frequency: Sampling frequency of time series
#mode_ws: raw_WC, VAD, or raw_ZephIR
#option: Type of volume averaging adjustment to be applied. Options are spectral_adjustment_fit and acf.
#Outputs
#var_diff: Estimate of loss of streamwise variance due to volume averaging
import numpy as np
import scipy.signal
from lmfit import minimize,Parameters
ten_min_count = frequency*60*10
var_diff = []
for i in np.arange(0,len(u_rot)-ten_min_count+1,ten_min_count):
u_temp = u_rot[i:i+ten_min_count]
U = np.mean(u_temp)
u_var = get_10min_var(u_temp,frequency)
#Detrend time series before estimating parameters for modeled spectrum
u_temp = scipy.signal.detrend(u_temp)
if "raw_WC" in mode_ws:
[S,fr] = get_10min_spectrum_WC_raw(u_temp,frequency)
else:
[S,fr] = get_10min_spectrum(u_temp,frequency)
if "spectral_adjustment_fit" in option:
#Find value of length scale that produces best fit to idealized
#Kaimal spectrum
fit_params = Parameters()
fit_params.add('L', value=500,min=0,max=1500)
fit_params.add('U', value=U,vary=False)
fit_params.add('u_var', value=u_var,vary=False)
out = minimize(Kaimal_spectrum_func2, fit_params, args=(fr,), kws={'data':fr*S})
L = out.params['L'].value
else:
#Otherwise, use ACF to estimate integral length scale and use this
#value for the length scale in the Kaimal modeled spectrum
lags = np.arange(0,ten_min_count)/float(frequency)
u_adj = acvf(u_temp)
u_acf = u_adj/u_adj[0]
indices = np.arange(0,len(u_acf))
x = indices[np.array(u_acf)<=0]
#ACF is integrated to the first zero crossing to esimate the integral
#time scale and multipled by the mean horizontal wind speed to estimate
#the integral length scale
L = np.trapz(u_acf[:x[0]],lags[:x[0]])*U
fr2 = np.linspace(0,float(10)/2,float(6000/2))
#Calculate Kaimal spectrum from 0 to 5 Hz
S_model = Kaimal_spectrum_func((fr2,U,u_var),L)
#Integrate spectrum for frequency values higher than those in the original
#spectrum from the lidar
var_diff.append(np.trapz((S_model[fr2 > fr[-1]]/fr2[fr2 > fr[-1]]),fr2[fr2 > fr[-1]]))
return np.array(var_diff)
def var_adjustment(vr_n,vr_e,vr_s,vr_w,vr_z,wd,U,height_needed,frequency_vert_beam,el_angle,mode):
#Uses Taylor's frozen turbulence hypothesis with data from the vertically
#pointing beam to estimate new values of the u and v variance.
#Inputs
#vr_n, vr_e, vr_s, vr_w, vr_z: Time series of radial velocity from north-, east-, south, west-, and
#vertically pointing beams, respectively, at height of interest.
#wd: 10-min. Mean wind direction
#U: 10-min. Mean horizontal wind speed
#height_needed: Measurement height corresponding to velocity data
#frequency_vert_beam: Sampling frequency of data from vertically pointing beam
#el_angle: Elevation angle of off-vertical beam positions (in degrees, measured from the ground)
#mode: Type of variance contamination adjustment to be applied. Options are taylor_ws and taylor_var.
#Outputs
#var_diff: Estimate of increase in streamwise variance due to variance contamination
import numpy as np
w_N = np.zeros(len(vr_z))
w_N[:] = np.nan
w_E = np.zeros(len(vr_z))
w_E[:] = np.nan
w_S = np.zeros(len(vr_z))
w_S[:] = np.nan
w_W = np.zeros(len(vr_z))
w_W[:] = np.nan
u_bar = np.sin(np.radians(wd - 180))*U
v_bar = np.cos(np.radians(wd - 180))*U
delta_t_vert_beam = 1./frequency_vert_beam
#Calculate the number of time steps needed for eddies to travel from one
#side of the scanning circle to the other
dist = height_needed/np.tan(np.radians(el_angle))
delta_t_u = dist/u_bar
interval_u = np.round(delta_t_u/delta_t_vert_beam)
delta_t_v = dist/v_bar
interval_v = np.round(delta_t_v/delta_t_vert_beam)
#Estimate values of w at different sides of the scanning circle by using
#Taylor's frozen turbulence hypothesis
for i in range(len(vr_z)):
try:
w_N[i] = vr_z[i-interval_v]
w_E[i] = vr_z[i-interval_u]
except:
w_N[i] = np.nan
w_E[i] = np.nan
try:
w_S[i] = vr_z[i+interval_v]
w_W[i] = vr_z[i+interval_u]
except:
w_S[i] = np.nan
w_W[i] = np.nan
if "taylor_ws" in mode:
#Use the new values of w to estimate the u and v components using the DBS technique
#and calculate the variance
u_DBS_new = ((vr_e-vr_w) - (w_E-w_W)*np.sin(np.radians(el_angle)))/(2*np.cos(np.radians(el_angle)))
v_DBS_new = ((vr_n-vr_s) - (w_N-w_S)*np.sin(np.radians(el_angle)))/(2*np.cos(np.radians(el_angle)))
u_var_lidar_new = get_10min_var(u_DBS_new,frequency_vert_beam)
v_var_lidar_new = get_10min_var(v_DBS_new,frequency_vert_beam)
else:
#Calculate change in w across the scanning circle in north-south and east-west directions
dw_est1 = w_S - w_N
dw_est2 = w_W - w_E
vr1_var = get_10min_var(vr_n,1./4)
vr2_var = get_10min_var(vr_e,1./4)
vr3_var = get_10min_var(vr_s,1./4)
vr4_var = get_10min_var(vr_w,1./4)
dw_var1 = get_10min_var(dw_est1,1./4)
dw_var2 = get_10min_var(dw_est2,1./4)
vr1_vr3_var = get_10min_covar(vr_n,vr_s,1./4)
vr2_vr4_var = get_10min_covar(vr_e,vr_w,1./4)
vr1_dw_var = get_10min_covar(vr_n,dw_est1,1./4)
vr3_dw_var = get_10min_covar(vr_s,dw_est1,1./4)
vr2_dw_var = get_10min_covar(vr_e,dw_est2,1./4)
vr4_dw_var = get_10min_covar(vr_w,dw_est2,1./4)
#These equations are adapted from Newman et al. (2016), neglecting terms involving
#du or dv, as these terms are expected to be small compared to dw
#Reference: Newman, J. F., P. M. Klein, S. Wharton, A. Sathe, T. A. Bonin,
#P. B. Chilson, and A. Muschinski, 2016: Evaluation of three lidar scanning
#strategies for turbulence measurements, Atmos. Meas. Tech., 9, 1993-2013.
u_var_lidar_new = (1./(4*np.cos(np.radians(el_angle))**2))*(vr2_var + vr4_var- 2*vr2_vr4_var + 2*vr2_dw_var*np.sin(np.radians(el_angle)) \
- 2*vr4_dw_var*np.sin(np.radians(el_angle)) + dw_var2*np.sin(np.radians(el_angle))**2)
v_var_lidar_new = (1./(4*np.cos(np.radians(el_angle))**2))*(vr1_var + vr3_var- 2*vr1_vr3_var + 2*vr1_dw_var*np.sin(np.radians(el_angle)) \
- 2*vr3_dw_var*np.sin(np.radians(el_angle)) + dw_var1*np.sin(np.radians(el_angle))**2)
#Rotate the variance into the mean wind direction
#Note: The rotation should include a term with the uv covariance, but the
#covariance terms are also affected by variance contamination. In Newman
#et al. (2016), it was found that the uv covariance is usually close to 0 and
#can safely be neglected.
#Reference: Newman, J. F., P. M. Klein, S. Wharton, A. Sathe, T. A. Bonin,
#P. B. Chilson, and A. Muschinski, 2016: Evaluation of three lidar scanning
#strategies for turbulence measurements, Atmos. Meas. Tech., 9, 1993-2013.
u_rot_var_new = u_var_lidar_new*(np.sin(np.radians(wd)))**2 + v_var_lidar_new*(np.cos(np.radians(wd)))**2
#Calculate the wind speed and variance if w is assumed to be the same on all
#sides of the scanning circle
u_DBS = (vr_e-vr_w)/(2*np.cos(np.radians(el_angle)))
v_DBS = (vr_n-vr_s)/(2*np.cos(np.radians(el_angle)))
u_var_DBS = get_10min_var(u_DBS,frequency_vert_beam)
v_var_DBS = get_10min_var(v_DBS,frequency_vert_beam)
u_rot_var = u_var_DBS*(np.sin(np.radians(wd)))**2 + v_var_DBS*(np.cos(np.radians(wd)))**2
return u_rot_var-u_rot_var_new
def lidar_processing_vol_averaging(u,frequency,mode_ws,mode_vol):
#Function to estimate variance lost due to volume/temporal averaging
#Inputs
#u: Time series of streamwise wind speed
#frequency: Sampling frequency of time series
#mode_ws: raw_WC, VAD, or raw_ZephIR
#mode_vol: Type of volume averaging adjustment to be applied. Options are spectral_adjustment_fit and acf.
#Outputs
#var_diff: Estimate of loss of streamwise variance due to volume averaging
var_diff = spectral_adjustment(u,frequency,mode_ws,mode_vol)
return var_diff
def lidar_processing_var_contam(vr_n,vr_e,vr_s,vr_w,vr_z,wd,U,height_needed,frequency_vert_beam,el_angle,mode):
#Function to estimate additional variance that results from variance contamination
#Inputs
#vr_n, vr_e, vr_s, vr_w, vr_z: Time series of radial velocity from north-, east-, south, west-, and
#vertically pointing beams, respectively, at height of interest.
#wd: 10-min. Mean wind direction
#U: 10-min. Mean horizontal wind speed
#height_needed: Measurement height corresponding to velocity data
#frequency_vert_beam: Sampling frequency of data from vertically pointing beam
#el_angle: Elevation angle of off-vertical beam positions (in degrees, measured from the ground)
#mode: Type of variance contamination adjustment to be applied. Options are taylor_ws and taylor_var.
#Outputs
#var_diff: Estimate of increase in streamwise variance due to variance contamination
var_diff = var_adjustment(vr_n,vr_e,vr_s,vr_w,vr_z,wd,U,height_needed,frequency_vert_beam,el_angle,mode)
#Set negative values of var_diff to 0 as they would increase the corrected variance
#Note: This is not the best procedure and should probably be fixed at some point.
#It's possible that at times, the change in w across the scanning circle could
#decrease, rather than increase, the u and v variance.
try:
if var_diff < 0:
var_diff = 0.
return var_diff
except:
var_diff = 0.
def VAD_func(az, x1, x2, x3):
import numpy as np
return np.array(x3+x1*np.cos(np.radians(az)-x2))
def get_10min_var(ts,frequency):
#Calculates variance for each 10-min. period
#Inputs
#ts: Time series of data
#frequency: Sampling frequency of data
#Outputs
#ts_var: 10-min. variance values from time series
import numpy as np
#Number of samples in a 10-min period
ten_min_count = int(frequency*60*10)
ts_var = []
for i in np.arange(0,len(ts)-ten_min_count+1,ten_min_count):
ts_temp = ts[i:i+ten_min_count]
ts_var.append(np.nanmean((ts_temp-np.nanmean(ts_temp))**2))
return np.array(ts_var)
def get_10min_spectrum(ts,frequency):
#Calculate power spectrum for 10-min. period
#Inputs
#ts: Time series of data
#frequency: Sampling frequency of data
#Outputs
#S_A_fast: Spectral power
#frequency_fft: Frequencies correspond to spectral power values
import numpy as np
N = len(ts)
delta_f = float(frequency)/N
frequency_fft = np.linspace(0,float(frequency)/2,float(N/2))
F_A_fast = np.fft.fft(ts)/N
E_A_fast = 2*abs(F_A_fast[0:N/2]**2)
S_A_fast = (E_A_fast)/delta_f
return S_A_fast,frequency_fft
def rotate_ws(u,v,w,frequency):
#Performs coordinate rotation according to Eqs. 22-29 in Wilczak et al. (2001)
#Reference: Wilczak, J. M., S. P. Oncley, and S. A. Stage, 2001: Sonic anemometer tilt adjustment algorithms.
#Bound.-Layer Meteor., 99, 127–150.
#Inputs
#u, v, w: Time series of east-west, north-south, and vertical wind speed components, respectively
#frequency: Sampling frequency of velocity
#Outputs
#u_rot, v_rot, w_rot: Rotated u, v, and w wind speed, with u rotated into the 10-min. mean wind direction and
#the 10-min. mean of v and w forced to 0
import numpy as np
#Number of samples in a 10-min period
ten_min_count = int(frequency*60*10)
u_rot = []
v_rot = []
w_rot = []
#Perform coordinate rotation. First rotation rotates u into the mean wind direction and forces the mean v to 0.
#Second rotation forces the mean w to 0.
for i in np.arange(0,len(u)-ten_min_count+1,ten_min_count):
u_temp = u[i:i+ten_min_count]
v_temp = v[i:i+ten_min_count]
w_temp = w[i:i+ten_min_count]
phi_temp = np.arctan2(np.nanmean(v_temp),np.nanmean(u_temp))
u1_temp = u_temp*np.cos(phi_temp) + v_temp*np.sin(phi_temp)
v1_temp = -u_temp*np.sin(phi_temp) + v_temp*np.cos(phi_temp)
w1_temp = w_temp;
phi_temp2 = np.arctan2(np.nanmean(w1_temp),np.nanmean(u1_temp))
u_rot.append(u1_temp*np.cos(phi_temp2) + w1_temp*np.sin(phi_temp2))
v_rot.append(v1_temp)
w_rot.append(-u1_temp*np.sin(phi_temp2) + w1_temp*np.cos(phi_temp2))
return np.array(u_rot).ravel(),np.array(v_rot).ravel(),np.array(w_rot).ravel()
def get_10min_mean_ws_wd(u,v,time,frequency):
#Calculates the 10-min. scalar average wind speed and wind direction at all measurement heights
#Inputs
#u: East-west velocity time series
#v: North-south velocity time series
#time: Timestamps in datetime format
#frequency: Sampling frequency of velocity data
#Outputs
#U: 10-min. mean horizontal wind speeds
#wd: 10-min. mean wind direction
#time_datenum_10min: Timestamp corresponding to the start of each 10-min. averaging period
import numpy as np
ten_min_count = int(frequency*60*10)
U = []
wd = []
time_datenum_10min = []
for i in np.arange(0,len(u)-ten_min_count+1,ten_min_count):
U_height = []
wd_height = []
#10-min. window of data
if len(np.shape(u)) > 1:
u_temp = u[i:i+ten_min_count,:]
v_temp = v[i:i+ten_min_count,:]
else:
u_temp = u[i:i+ten_min_count]
v_temp = v[i:i+ten_min_count]
for j in range(np.shape(u_temp)[1]):
U_height.append(np.nanmean((u_temp[:,j]**2 + v_temp[:,j]**2)**0.5,axis=0));
u_bar = np.nanmean(u_temp[:,j])
v_bar = np.nanmean(v_temp[:,j])
wd_height.append((180./np.pi)*(np.arctan2(u_bar,v_bar) + np.pi))
U.append(U_height)
wd.append(wd_height)
time_datenum_10min.append(time[i])
return np.array(U),np.array(wd),time_datenum_10min
def get_10min_shear_parameter(U,heights,height_needed):
import functools
#Calculates the shear parameter for every 10-min. period of data by fitting power law equation to
#10-min. mean wind speeds
#Inputs
#U: 10-min. mean horizontal wind speed at all measurement heights
#heights: Measurement heights
#height_needed: Height where TI is being extracted - values used to calculate shear parameter
#should be centered around this height
#Outputs
#p: 10-min. values of shear parameter
import warnings
p = []
#Set heights for calculation of shear parameter and find corresponding indices
zprofile = np.arange(0.5*height_needed,1.5*height_needed + 10,10)
height_indices = np.unique(min_diff(heights,zprofile,5))
height_indices = height_indices[~np.isnan(height_indices)]
#Arrays of height and mean wind speed to use for calculation
heights_temp = np.array([heights[int(i)] for i in height_indices])
U_temp = np.array([U[:,int(i)] for i in height_indices])
mask = [~np.isnan(U_temp)]
mask = functools.reduce(np.logical_and, mask)
with warnings.catch_warnings():
warnings.filterwarnings('error')
#For each set of 10-min. U values, use linear fit to determine value of shear parameter
for i in range(0,len(U)):
try:
try:
p_temp = np.polyfit(np.log(heights_temp[mask[:,i]]),np.log(U_temp[mask[:,i],i]),1)
p.append(p_temp[0])
except np.RankWarning:
p.append(np.nan)
except:
p.append(np.nan)
return np.array(p)
def interp_ts(ts,time_datenum,interval):
#Interpolates time series ts with timestamps time_datenum to a grid with constant temporal spacing of "interval"
#Inputs
#ts: Time series for interpolation
#time_datenum: Original timestamps for time series in datetime format
#interval: Temporal interval to use for interpolation
#Outputs
#ts_interp: Interpolated time series
#time_interp: Timestamps of interpolated time series in datetime format
import numpy as np
from datetime import datetime
import calendar as cal
#Convert timestamps to unix time (seconds after 1970 01-01) as it's easier to perform the interpolation
unix_time = []
for i in range(0,len(time_datenum)):
unix_time.append(cal.timegm(datetime.timetuple(time_datenum[i])) + (time_datenum[i].microsecond/1e6))
unix_time = np.array(unix_time)
#Select the start and end time for the interpolation
#The starting minute value of the interpolation should be the next multiple of 10
if time_datenum[0].minute%10 == 0:
start_minute = str((time_datenum[0].minute//10)*10)
else:
start_minute = str((time_datenum[0].minute//10 + 1)*10)
start_hour = str(time_datenum[0].hour)
if int(start_minute) == 60:
start_minute = '00'
start_hour = str(time_datenum[0].hour + 1)
end_hour = str(time_datenum[-1].hour)
#The ending minute value of the interpolation should end with a 9
if (time_datenum[-1].minute-9)%10 == 0:
end_minute = str((time_datenum[-1].minute//10*10) + 9)
else:
end_minute = str((time_datenum[-1].minute//10)*10 - 1)
if int(end_minute) < 0:
end_minute = '59'
end_hour = str(time_datenum[-1].hour - 1)
#Convert start and end times into unix time and get interpolation times in unix time
timestamp_start = str(time_datenum[0].year) + "/" + str(time_datenum[0].month) + "/" + str(time_datenum[0].day) + \
" " + start_hour + ":" + start_minute + ":00"
time_datenum_start = datetime.strptime(timestamp_start,"%Y/%m/%d %H:%M:%S")
unix_time_start = cal.timegm(datetime.timetuple(time_datenum_start))
timestamp_end = str(time_datenum[-1].year) + "/" + str(time_datenum[-1].month) + "/" + str(time_datenum[-1].day) + \
" " + end_hour + ":" + end_minute + ":59"
time_datenum_end = datetime.strptime(timestamp_end,"%Y/%m/%d %H:%M:%S")
unix_time_end = cal.timegm(datetime.timetuple(time_datenum_end))
time_interp_unix = np.arange(unix_time_start,unix_time_end+1,interval)
#Interpolate time series
ts_interp = []
#If more than 75% of the data are valid, perform interpolation using only non-NaN data. (Every fifth point of the
#u and v data will be NaNs because of the vertically pointing beam.)
if float(len(ts[~np.isnan(ts)])/float(len(ts))) > 0.75:
ts_temp = ts[~np.isnan(ts)]
time_temp = unix_time[~np.isnan(ts)]
else:
ts_temp = ts
time_temp = unix_time
ts_interp = np.interp(time_interp_unix,time_temp,ts_temp)
#If several points in a row have the same value, set these points to NaN. This can occur when the interpolation is
#performed on a dataset with one valid value surrounded by several NaNs.
for i in range(2,len(ts_interp)-2):
if ts_interp[i-2] == ts_interp[i] and ts_interp[i+2] == ts_interp[i]:
ts_interp[i-2:i+2] = np.nan
time_interp = [datetime.utcfromtimestamp(int(i) + round(i-int(i),10)) for i in time_interp_unix]
return np.transpose(ts_interp),time_interp
def calculate_stability_alpha(inputdata, config_file, RSD_alphaFlag, Ht_1_rsd, Ht_2_rsd):
'''
from Wharton and Lundquist 2012
stability class from shear exponent categories:
[1] strongly stable -------- alpha > 0.3
[2] stable -------- 0.2 < alpha < 0.3
[3] near-neutral -------- 0.1 < TKE < 0.2
[4] convective -------- 0.0 < TKE < 0.1
[5] strongly convective -------- alpha < 0.0
'''
regimeBreakdown_ane = pd.DataFrame()
#check for 2 anemometer heights (use furthest apart) for cup alpha calculation
configHtData = pd.read_excel(config_file, usecols=[3, 4], nrows=17).iloc[[3,12,13,14,15]]
primaryHeight = configHtData['Selection'].to_list()[0]
all_heights, ane_heights, RSD_heights, ane_cols, RSD_cols = config.check_for_additional_heights(primaryHeight)
if len(list(ane_heights))> 1:
all_keys = list(all_heights.values())
max_key = list(all_heights.keys())[all_keys.index(max(all_heights.values()))]
min_key = list(all_heights.keys())[all_keys.index(min(all_heights.values()))]
if max_key == 'primary':
max_cols = [s for s in inputdata.columns.to_list() if 'Ref' in s and 'WS' in s]
else:
subname = str('Ht' + str(max_key))
max_cols = [s for s in inputdata.columns.to_list() if subname in s and 'Ane' in s and 'WS' in s]
if min_key == 'primary':
min_cols = [s for s in inputdata.columns.to_list() if 'Ref' in s and 'WS' in s]
else:
subname = str('Ht' + str(min_key))
min_cols = [s for s in inputdata.columns.to_list() if subname in s and 'Ane' in s and 'WS' in s]
# Calculate shear exponent
tmp = pd.DataFrame(None)
baseName = str(max_cols + min_cols)
tmp[str(baseName + '_y')] = [val for sublist in log_of_ratio(inputdata[max_cols].values.astype(float),
inputdata[min_cols].values.astype(float)) for val in sublist]
tmp[str(baseName + '_alpha')] = tmp[str(baseName + '_y')] / (log_of_ratio(max(all_heights.values()), min(all_heights.values())))
stabilityMetric_ane = tmp[str(baseName + '_alpha')]
Ht_2_ane = max(all_heights.values())
Ht_1_ane = min(all_heights.values())
tmp[str(baseName + 'stabilityClass')] = tmp[str(baseName + '_alpha')]
tmp.loc[(tmp[str(baseName + '_alpha')] <= 0.4), str(baseName + 'stabilityClass')] = 1
tmp.loc[(tmp[str(baseName + '_alpha')] > 0.4) & (tmp[str(baseName + '_alpha')] <= 0.7), str(baseName + 'stabilityClass')] = 2
tmp.loc[(tmp[str(baseName + '_alpha')] > 0.7) & (tmp[str(baseName + '_alpha')] <= 1.0), str(baseName + 'stabilityClass')] = 3
tmp.loc[(tmp[str(baseName + '_alpha')] > 1.0) & (tmp[str(baseName + '_alpha')] <= 1.4), str(baseName + 'stabilityClass')] = 4
tmp.loc[(tmp[str(baseName + '_alpha')] > 1.4), str(baseName + 'stabilityClass')] = 5
# get count and percent of data in each class
numNans = tmp[str(baseName) + '_alpha'].isnull().sum()
totalCount = len(inputdata) - numNans
name_class = str('stability_shear' + '_class')
name_stabilityClass = str(baseName + 'stabilityClass')
regimeBreakdown_ane[name_class] = ['1 (strongly stable)', '2 (stable)', '3 (near-neutral)', '4 (convective)', '5 (strongly convective)']
name_count = str('stability_shear_obs' + '_count')
regimeBreakdown_ane[name_count] = [len(tmp[(tmp[name_stabilityClass] == 1)]), len(tmp[(tmp[name_stabilityClass] == 2)]),
len(tmp[(tmp[name_stabilityClass] == 3)]), len(tmp[(tmp[name_stabilityClass] == 4)]),
len(tmp[(tmp[name_stabilityClass] == 5)])]
name_percent = str('stability_shear_obs' + '_percent')
regimeBreakdown_ane[name_percent] = [len(tmp[(tmp[name_stabilityClass] == 1)])/totalCount, len(tmp[(tmp[name_stabilityClass] == 2)])/totalCount,
len(tmp[(tmp[name_stabilityClass] == 3)])/totalCount, len(tmp[(tmp[name_stabilityClass] == 4)])/totalCount,
len(tmp[(tmp[name_stabilityClass] == 5)])/totalCount]
stabilityClass_ane = tmp[name_stabilityClass]
cup_alphaFlag = True
else:
stabilityClass_ane = None
stabilityMetric_ane = None
regimeBreakdown_ane = None
Ht_1_ane = None
Ht_2_ane = None
cup_alphaFlag = False
# If possible, perform stability calculation with RSD data
if RSD_alphaFlag:
regimeBreakdown_rsd = pd.DataFrame()
tmp = pd.DataFrame(None)
baseName = str('WS_' + str(Ht_1_rsd) + '_' + 'WS_' + str(Ht_2_rsd))
max_col = 'RSD_alpha_lowHeight'
min_col = 'RSD_alpha_highHeight'
tmp[str(baseName + '_y')] = log_of_ratio(inputdata[max_col].values.astype(float),inputdata[min_col].values.astype(float))
tmp[str(baseName + '_alpha')] = tmp[str(baseName + '_y')] / (log_of_ratio(Ht_2_rsd, Ht_1_rsd))
stabilityMetric_rsd = tmp[str(baseName + '_alpha')]
tmp[str(baseName + 'stabilityClass')] = tmp[str(baseName + '_alpha')]
tmp.loc[(tmp[str(baseName + '_alpha')] <= 0.4), str(baseName + 'stabilityClass')] = 1
tmp.loc[(tmp[str(baseName + '_alpha')] > 0.4) & (tmp[str(baseName + '_alpha')] <= 0.7), str(baseName + 'stabilityClass')] = 2
tmp.loc[(tmp[str(baseName + '_alpha')] > 0.7) & (tmp[str(baseName + '_alpha')] <= 1.0), str(baseName + 'stabilityClass')] = 3
tmp.loc[(tmp[str(baseName + '_alpha')] > 1.0) & (tmp[str(baseName + '_alpha')] <= 1.4), str(baseName + 'stabilityClass')] = 4
tmp.loc[(tmp[str(baseName + '_alpha')] > 1.4), str(baseName + 'stabilityClass')] = 5
# get count and percent of data in each class
numNans = tmp[str(baseName) + '_alpha'].isnull().sum()
totalCount = len(inputdata) - numNans
name_stabilityClass = str(baseName + 'stabilityClass')
regimeBreakdown_rsd[name_class] = ['1 (strongly stable)', '2 (stable)', '3 (near-neutral)', '4 (convective)', '5 (strongly convective)']
name_count = str('stability_shear_obs' + '_count')
regimeBreakdown_rsd[name_count] = [len(tmp[(tmp[name_stabilityClass] == 1)]), len(tmp[(tmp[name_stabilityClass] == 2)]),
len(tmp[(tmp[name_stabilityClass] == 3)]), len(tmp[(tmp[name_stabilityClass] == 4)]),
len(tmp[(tmp[name_stabilityClass] == 5)])]
name_percent = str('stability_shear_obs' + '_percent')
regimeBreakdown_rsd[name_percent] = [len(tmp[(tmp[name_stabilityClass] == 1)])/totalCount, len(tmp[(tmp[name_stabilityClass] == 2)])/totalCount,
len(tmp[(tmp[name_stabilityClass] == 3)])/totalCount, len(tmp[(tmp[name_stabilityClass] == 4)])/totalCount,
len(tmp[(tmp[name_stabilityClass] == 5)])/totalCount]
stabilityClass_rsd = tmp[name_stabilityClass]
else:
stabilityClass_rsd = None
stabilityMetric_rsd = None
regimeBreakdown_rsd = None
Ht_1_rsd = None
Ht_2_rsd = None
return cup_alphaFlag,stabilityClass_ane, stabilityMetric_ane, regimeBreakdown_ane, Ht_1_ane, Ht_2_ane, stabilityClass_rsd, stabilityMetric_rsd, regimeBreakdown_rsd
def calculate_stability_TKE(inputdata):
'''
from Wharton and Lundquist 2012
stability class from TKE categories:
[1] strongly stable -------- TKE < 0.4 m^(2)/s^(-2))
[2] stable -------- 0.4 < TKE < 0.7 m^(2)/s^(-2))
[3] near-neutral -------- 0.7 < TKE < 1.0 m^(2)/s^(-2))
[4] convective -------- 1.0 < TKE < 1.4 m^(2)/s^(-2))
[5] strongly convective -------- TKE > 1.4 m^(2)/s^(-2))
'''
regimeBreakdown = pd.DataFrame()
# check to see if instrument type allows the calculation
if RSDtype['Selection']=='Triton':
print ('Triton TKE calc')
elif 'ZX' in RSDtype['Selection']:
# look for pre-calculated TKE column
TKE_cols = [s for s in inputdata.columns.to_list() if 'TKE' in s or 'tke' in s]
if len(TKE_cols) < 1:
print ('!!!!!!!!!!!!!!!!!!!!!!!! Warning: Input data does not include calculated TKE. Exiting tool. Either add TKE to input data or contact aea@nrgsystems.com for assistence !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
sys.exit()
else:
for t in TKE_cols:
name_stabilityClass = str(t + '_class')
inputdata[name_stabilityClass] = inputdata[t]
inputdata.loc[(inputdata[t] <= 0.4), name_stabilityClass] = 1
inputdata.loc[(inputdata[t] > 0.4) & (inputdata[t] <= 0.7), name_stabilityClass] = 2
inputdata.loc[(inputdata[t] > 0.7) & (inputdata[t] <= 1.0), name_stabilityClass] = 3
inputdata.loc[(inputdata[t] > 1.0) & (inputdata[t] <= 1.4), name_stabilityClass] = 4
inputdata.loc[(inputdata[t] > 1.4), name_stabilityClass] = 5
# get count and percent of data in each class
numNans = inputdata[t].isnull().sum()
totalCount = len(inputdata) - numNans
regimeBreakdown[name_stabilityClass] = ['1 (strongly stable)', '2 (stable)', '3 (near-neutral)', '4 (convective)', '5 (strongly convective)']
name_count = str(name_stabilityClass.split('_class')[0] + '_count')
regimeBreakdown[name_count] = [len(inputdata[(inputdata[name_stabilityClass] == 1)]), len(inputdata[(inputdata[name_stabilityClass] == 2)]),
len(inputdata[(inputdata[name_stabilityClass] == 3)]), len(inputdata[(inputdata[name_stabilityClass] == 4)]),
len(inputdata[(inputdata[name_stabilityClass] == 5)])]
name_percent = str(name_stabilityClass.split('_class')[0] + '_percent')
regimeBreakdown[name_percent] = [len(inputdata[(inputdata[name_stabilityClass] == 1)])/totalCount, len(inputdata[(inputdata[name_stabilityClass] == 2)])/totalCount,
len(inputdata[(inputdata[name_stabilityClass] == 3)])/totalCount, len(inputdata[(inputdata[name_stabilityClass] == 4)])/totalCount,
len(inputdata[(inputdata[name_stabilityClass] == 5)])/totalCount]
elif 'WindCube' in RSDtype['Selection']:
# convert to radians
dir_cols = [s for s in inputdata.columns.to_list() if 'Direction' in s]
if len(dir_cols)==0:
stabilityClass = None
stabilityMetric = None
regimeBreakdown = None
print ('Warning: Could not find direction columns in configuration key. TKE derived stability, check data.')
sys.exit()
else:
for c in dir_cols:
name_radians = str(c + '_radians')
inputdata[name_radians] = inputdata[c] * (math.pi/180)
if name_radians.split('_')[2] == 'radians':
name_u_std = str(name_radians.split('_')[0] + '_u_std')
name_v_std = str(name_radians.split('_')[0] + '_v_std')
else:
name_u_std = str(name_radians.split('_')[0] + '_' + name_radians.split('_')[2] + '_u_std')
name_v_std = str(name_radians.split('_')[0] + '_' + name_radians.split('_')[2] + '_v_std')
name_dispersion = None
name_std = c.replace('Direction','SD')
inputdata[name_u_std] = inputdata[name_std] * np.cos(inputdata[name_radians])
inputdata[name_v_std] = inputdata[name_std] * np.sin(inputdata[name_radians])
name_tke = str(name_u_std.split('_u')[0] + '_LidarTKE')
inputdata[name_tke] = 0.5 * (inputdata[name_u_std]**2 + inputdata[name_v_std]**2 + inputdata[name_std]**2)
name_stabilityClass = str(name_tke + '_class')
inputdata[name_stabilityClass] = inputdata[name_tke]
inputdata.loc[(inputdata[name_tke] <= 0.4), name_stabilityClass] = 1
inputdata.loc[(inputdata[name_tke] > 0.4) & (inputdata[name_tke] <= 0.7), name_stabilityClass] = 2
inputdata.loc[(inputdata[name_tke] > 0.7) & (inputdata[name_tke] <= 1.0), name_stabilityClass] = 3
inputdata.loc[(inputdata[name_tke] > 1.0) & (inputdata[name_tke] <= 1.4), name_stabilityClass] = 4
inputdata.loc[(inputdata[name_tke] > 1.4), name_stabilityClass] = 5
# get count and percent of data in each class
numNans = inputdata[name_tke].isnull().sum()
totalCount = len(inputdata) - numNans
name_class = str(name_u_std.split('_u')[0] + '_class')
regimeBreakdown[name_class] = ['1 (strongly stable)', '2 (stable)', '3 (near-neutral)', '4 (convective)', '5 (strongly convective)']
name_count = str(name_u_std.split('_u')[0] + '_count')
regimeBreakdown[name_count] = [len(inputdata[(inputdata[name_stabilityClass] == 1)]), len(inputdata[(inputdata[name_stabilityClass] == 2)]),
len(inputdata[(inputdata[name_stabilityClass] == 3)]), len(inputdata[(inputdata[name_stabilityClass] == 4)]),
len(inputdata[(inputdata[name_stabilityClass] == 5)])]
name_percent = str(name_u_std.split('_u')[0] + '_percent')
regimeBreakdown[name_percent] = [len(inputdata[(inputdata[name_stabilityClass] == 1)])/totalCount, len(inputdata[(inputdata[name_stabilityClass] == 2)])/totalCount,
len(inputdata[(inputdata[name_stabilityClass] == 3)])/totalCount, len(inputdata[(inputdata[name_stabilityClass] == 4)])/totalCount,
len(inputdata[(inputdata[name_stabilityClass] == 5)])/totalCount]
else:
print ('Warning: Due to senor type, TKE is not being calculated.')
stabilityClass = None
stabilityMetric = None
regimeBreakdown = None
classCols = [s for s in inputdata.columns.to_list() if '_class' in s]
stabilityClass = inputdata[classCols]
tkeCols = [s for s in inputdata.columns.to_list() if '_LidarTKE' in s or 'TKE' in s or 'tke' in s]
tkeCols = [s for s in tkeCols if '_class' not in s]
stabilityMetric = inputdata[tkeCols]
return stabilityClass, stabilityMetric, regimeBreakdown
def initialize_resultsLists(appendString):
resultsLists = {}
resultsLists[str('TI_MBEList' + '_' + appendString)] = []
resultsLists[str('TI_DiffList' + '_' + appendString)] = []
resultsLists[str('TI_DiffRefBinsList' + '_' + appendString)] = []
resultsLists[str('TI_RMSEList' + '_' + appendString)] = []
resultsLists[str('RepTI_MBEList' + '_' + appendString)] = []
resultsLists[str('RepTI_DiffList' + '_' + appendString)] = []
resultsLists[str('RepTI_DiffRefBinsList' + '_' + appendString)] = []
resultsLists[str('RepTI_RMSEList' + '_' + appendString)] = []
resultsLists[str('rep_TI_results_1mps_List' + '_' + appendString)] = []
resultsLists[str('rep_TI_results_05mps_List' + '_' + appendString)] = []
resultsLists[str('TIBinList' + '_' + appendString)] = []
resultsLists[str('TIRefBinList' + '_' + appendString)] = []
resultsLists[str('total_StatsList' + '_' + appendString)] = []
resultsLists[str('belownominal_statsList' + '_' + appendString)] = []
resultsLists[str('abovenominal_statsList' + '_' + appendString)] = []
resultsLists[str('lm_adjList' + '_' + appendString)] = []
resultsLists[str('adjustmentTagList' + '_' + appendString)] = []
resultsLists[str('Distribution_statsList' + '_' + appendString)] = []
resultsLists[str('sampleTestsLists' + '_' + appendString)] = []
return resultsLists
def train_test_split(trainPercent, inputdata, stepOverride = False):
'''
train is 'split' == True
'''
import copy
import numpy as np
_inputdata = pd.DataFrame(columns=inputdata.columns, data=copy.deepcopy(inputdata.values))
if stepOverride:
msk = [False] * len(inputdata)
_inputdata['split'] = msk
_inputdata.loc[stepOverride[0]:stepOverride[1], 'split'] = True
else:
msk = np.random.rand(len(_inputdata)) < float(trainPercent/100)
train = _inputdata[msk]
test = _inputdata[~msk]
_inputdata['split'] = msk
return _inputdata
def quick_metrics(inputdata, results_df, lm_adj_dict, testID):
""""""
from TACT.computation.adjustments import Adjustments
_adjuster = Adjustments(raw_data=inputdata)
inputdata_train = inputdata[inputdata['split'] == True].copy()
inputdata_test = inputdata[inputdata['split'] == False].copy()
# baseline results
results_ = get_all_regressions(inputdata_test, title='baselines')
results_RSD_Ref = results_.loc[results_['baselines'].isin(['TI_regression_Ref_RSD'])].reset_index()
results_Ane2_Ref = results_.loc[results_['baselines'].isin(['TI_regression_Ref_Ane2'])].reset_index()
results_RSD_Ref_SD = results_.loc[results_['baselines'].isin(['SD_regression_Ref_RSD'])].reset_index()
results_Ane2_Ref_SD = results_.loc[results_['baselines'].isin(['SD_regression_Ref_Ane2'])].reset_index()
results_RSD_Ref_WS = results_.loc[results_['baselines'].isin(['WS_regression_Ref_RSD'])].reset_index()
results_Ane2_Ref_WS = results_.loc[results_['baselines'].isin(['WS_regression_Ref_Ane2'])].reset_index()
results_RSD_Ref.loc[0,'testID'] = [testID]
results_Ane2_Ref.loc[0,'testID'] = [testID]
results_RSD_Ref_SD.loc[0,'testID'] = [testID]
results_Ane2_Ref_SD.loc[0,'testID'] = [testID]
results_RSD_Ref_WS.loc[0,'testID'] = [testID]
results_Ane2_Ref_WS.loc[0,'testID'] = [testID]
results_df = pd.concat([results_df,results_RSD_Ref,results_Ane2_Ref,results_RSD_Ref_SD,results_Ane2_Ref_SD,
results_RSD_Ref_WS,results_Ane2_Ref_WS],axis = 0)
# Run a few adjustments with this timing test aswell
inputdata_adj, lm_adj, m, c = _adjuster.perform_SS_S_adjustment(inputdata.copy())
lm_adj_dict[str(str(testID) + ' :SS_S' )] = lm_adj
inputdata_adj, lm_adj, m, c = _adjuster.perform_SS_SF_adjustment(inputdata.copy())
lm_adj_dict[str(str(testID) + ' :SS_SF' )] = lm_adj
inputdata_adj, lm_adj, m, c = perform_SS_WS_adjustment(inputdata.copy())
lm_adj_dict[str(str(testID) + ' :SS_WS-Std' )] = lm_adj
inputdata_adj, lm_adj = perform_match(inputdata.copy())
lm_adj_dict[str(str(testID) + ' :Match' )] = lm_adj
inputdata_adj, lm_adj = perform_match_input(inputdata.copy())
lm_adj_dict[str(str(testID) + ' :SS_Match_erforminput' )] = lm_adj
override = False
inputdata_adj, lm_adj, m, c = perform_G_Sa_adjustment(inputdata.copy(),override,RSDtype)
lm_adj_dict[str(str(testID) + ' :SS_G_SFa' )] = lm_adj
return results_df, lm_adj_dict
def block_print():
'''
disable print statements
'''
sys.stdout = open(os.devnull, 'w')
def enable_print():
'''
restore printing statements
'''
sys.stdout = sys.__stdout__
def record_TIadj(adjustment_name, inputdata_adj, Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False):
if isinstance(inputdata_adj, pd.DataFrame) == False:
pass
else:
adj_cols = [s for s in inputdata_adj.columns.to_list() if 'adj' in s]
adj_cols = [s for s in adj_cols if not ('diff' in s or 'Diff' in s or 'error' in s)]
for c in adj_cols:
TI_10minuteAdjusted[str(c + '_' + method)] = inputdata_adj[c]
return TI_10minuteAdjusted
def populate_resultsLists(resultDict, appendString, adjustment_name, lm_adj, inputdata_adj,
Timestamps, method, emptyclassFlag = False):
""""""
if isinstance(inputdata_adj, pd.DataFrame) == False:
emptyclassFlag = True
elif inputdata_adj.empty:
emptyclassFlag = True
else:
try:
TI_MBE_j_, TI_Diff_j_, TI_RMSE_j_, RepTI_MBE_j_, RepTI_Diff_j_, RepTI_RMSE_j_ = get_TI_MBE_Diff_j(inputdata_adj)
TI_Diff_r_, RepTI_Diff_r_ = get_TI_Diff_r(inputdata_adj)
rep_TI_results_1mps, rep_TI_results_05mps = get_representative_TI(inputdata_adj) # char TI but at bin level
TIbybin = get_TI_bybin(inputdata_adj)
TIbyRefbin = get_TI_byTIrefbin(inputdata_adj)
total_stats, belownominal_stats, abovenominal_stats = get_description_stats(inputdata_adj)
except:
emptyclassFlag = True
if emptyclassFlag == True:
resultDict[str('TI_MBEList' + '_' + appendString)].append(None)
resultDict[str('TI_DiffList' + '_' + appendString)].append(None)
resultDict[str('TI_DiffRefBinsList' + '_' + appendString)].append(None)
resultDict[str('TI_RMSEList' + '_' + appendString)].append(None)
resultDict[str('RepTI_MBEList' + '_' + appendString)].append(None)
resultDict[str('RepTI_DiffList' + '_' + appendString)].append(None)
resultDict[str('RepTI_DiffRefBinsList' + '_' + appendString)].append(None)
resultDict[str('RepTI_RMSEList' + '_' + appendString)].append(None)
resultDict[str('rep_TI_results_1mps_List' + '_' + appendString)].append(None)
resultDict[str('rep_TI_results_05mps_List' + '_' + appendString)].append(None)
resultDict[str('TIBinList' + '_' + appendString)].append(None)
resultDict[str('TIRefBinList' + '_' + appendString)].append(None)
resultDict[str('total_StatsList' + '_' + appendString)].append(None)
resultDict[str('belownominal_statsList' + '_' + appendString)].append(None)
resultDict[str('abovenominal_statsList' + '_' + appendString)].append(None)
resultDict[str('lm_adjList' + '_' + appendString)].append(lm_adj)
resultDict[str('adjustmentTagList' + '_' + appendString)].append(method)
resultDict[str('Distribution_statsList' + '_' + appendString)].append(None)
resultDict[str('sampleTestsLists' + '_' + appendString)].append(None)
else:
resultDict[str('TI_MBEList' + '_' + appendString)].append(TI_MBE_j_)
resultDict[str('TI_DiffList' + '_' + appendString)].append(TI_Diff_j_)
resultDict[str('TI_DiffRefBinsList' + '_' + appendString)].append(TI_Diff_r_)
resultDict[str('TI_RMSEList' + '_' + appendString)].append(TI_RMSE_j_)
resultDict[str('RepTI_MBEList' + '_' + appendString)].append(RepTI_MBE_j_)
resultDict[str('RepTI_DiffList' + '_' + appendString)].append(RepTI_Diff_j_)
resultDict[str('RepTI_DiffRefBinsList' + '_' + appendString)].append(RepTI_Diff_r_)
resultDict[str('RepTI_RMSEList' + '_' + appendString)].append(RepTI_RMSE_j_)
resultDict[str('rep_TI_results_1mps_List' + '_' + appendString)].append(rep_TI_results_1mps)
resultDict[str('rep_TI_results_05mps_List' + '_' + appendString)].append(rep_TI_results_05mps)
resultDict[str('TIBinList' + '_' + appendString)].append(TIbybin)
resultDict[str('TIRefBinList' + '_' + appendString)].append(TIbyRefbin)
resultDict[str('total_StatsList' + '_' + appendString)].append(total_stats)
resultDict[str('belownominal_statsList' + '_' + appendString)].append(belownominal_stats)
resultDict[str('abovenominal_statsList' + '_' + appendString)].append(abovenominal_stats)
resultDict[str('lm_adjList' + '_' + appendString)].append(lm_adj)
resultDict[str('adjustmentTagList' + '_' + appendString)].append(method)
try:
Distribution_stats, sampleTests = Dist_stats(inputdata_adj, Timestamps,adjustment_name)
resultDict[str('Distribution_statsList' + '_' + appendString)].append(Distribution_stats)
resultDict[str('sampleTestsLists' + '_' + appendString)].append(sampleTests)
except:
resultDict[str('Distribution_statsList' + '_' + appendString)].append(None)
resultDict[str('sampleTestsLists' + '_' + appendString)].append(None)
return resultDict
def populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, appendString):
ResultsLists_stability[str('TI_MBEList_stability' + '_' + appendString)].append(ResultsLists_class[str('TI_MBEList_class_' + appendString)])
ResultsLists_stability[str('TI_DiffList_stability' + '_' + appendString)].append(ResultsLists_class[str('TI_DiffList_class_' + appendString)])
ResultsLists_stability[str('TI_DiffRefBinsList_stability' + '_' + appendString)].append(ResultsLists_class[str('TI_DiffRefBinsList_class_' + appendString)])
ResultsLists_stability[str('TI_RMSEList_stability' + '_' + appendString)].append(ResultsLists_class[str('TI_RMSEList_class_' + appendString)])
ResultsLists_stability[str('RepTI_MBEList_stability' + '_' + appendString)].append(ResultsLists_class[str('RepTI_MBEList_class_' + appendString)])
ResultsLists_stability[str('RepTI_DiffList_stability' + '_' + appendString)].append(ResultsLists_class[str('RepTI_DiffList_class_' + appendString)])
ResultsLists_stability[str('RepTI_DiffRefBinsList_stability' + '_' + appendString)].append(ResultsLists_class[str('RepTI_DiffRefBinsList_class_' + appendString)])
ResultsLists_stability[str('RepTI_RMSEList_stability' + '_' + appendString)].append(ResultsLists_class[str('RepTI_RMSEList_class_' + appendString)])
ResultsLists_stability[str('rep_TI_results_1mps_List_stability' + '_' + appendString)].append(ResultsLists_class[str('rep_TI_results_1mps_List_class_' + appendString)])
ResultsLists_stability[str('rep_TI_results_05mps_List_stability' + '_' + appendString)].append(ResultsLists_class[str('rep_TI_results_05mps_List_class_' + appendString)])
ResultsLists_stability[str('TIBinList_stability' + '_' + appendString)].append(ResultsLists_class[str('TIBinList_class_' + appendString)])
ResultsLists_stability[str('TIRefBinList_stability' + '_' + appendString)].append(ResultsLists_class[str('TIRefBinList_class_' + appendString)])
ResultsLists_stability[str('total_StatsList_stability' + '_' + appendString)].append(ResultsLists_class[str('total_StatsList_class_' + appendString)])
ResultsLists_stability[str('belownominal_statsList_stability' + '_' + appendString)].append(ResultsLists_class[str('belownominal_statsList_class_' + appendString)])
ResultsLists_stability[str('abovenominal_statsList_stability' + '_' + appendString)].append(ResultsLists_class[str('abovenominal_statsList_class_' + appendString)])
ResultsLists_stability[str('lm_adjList_stability' + '_' + appendString)].append(ResultsLists_class[str('lm_adjList_class_' + appendString)])
ResultsLists_stability[str('adjustmentTagList_stability' + '_' + appendString)].append(ResultsLists_class[str('adjustmentTagList_class_' + appendString)])
ResultsLists_stability[str('Distribution_statsList_stability' + '_' + appendString)].append(ResultsLists_class[str('Distribution_statsList_class_' + appendString)])
ResultsLists_stability[str('sampleTestsLists_stability' + '_' + appendString)].append(ResultsLists_class[str('sampleTestsLists_class_' + appendString)])
return ResultsLists_stability
if __name__ == '__main__':
# Python 2 caveat: Only working for Python 3 currently
if sys.version_info[0] < 3:
raise Exception("Tool will not run at this time. You must be using Python 3, as running on Python 2 will encounter errors.")
# ------------------------
# set up and configuration
# ------------------------
"""parser get_input_files"""
config = Config()
input_filename = config.input_filename
config_file = config.config_file
rtd_files = config.rtd_files
results_filename = config.results_file
saveModel = config.save_model_location
timetestFlag = config.time_test_flag
globalModel = config.global_model
"""config object assignments"""
outpath_dir = config.outpath_dir
outpath_file = config.outpath_file
"""metadata parser"""
config.get_site_metadata()
siteMetadata = config.site_metadata
config.get_filtering_metadata()
filterMetadata = config.config_metadata
config.get_adjustments_metadata()
adjustments_metadata = config.adjustments_metadata
RSDtype = config.RSDtype
extrap_metadata = config.extrap_metadata
extrapolation_type = config.extrapolation_type
"""data object assignments"""
data=Data(input_filename, config_file)
data.get_inputdata()
data.get_refTI_bins() # >> to data_file.py
data.check_for_alphaConfig()
inputdata = data.inputdata
Timestamps = data.timestamps
a = data.a
lab_a = data.lab_a
RSD_alphaFlag = data.RSD_alphaFlag
Ht_1_rsd = data.Ht_1_rsd
Ht_2_rsd = data.Ht_2_rsd
"""sensor, height"""
sensor = config.model
height = config.height
print ('%%%%%%%%%%%%%%%%%%%%%%%%% Processing Data %%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
# -------------------------------
# special handling for data types
# -------------------------------
stabilityFlag = False
if RSDtype['Selection'][0:4] == 'Wind':
stabilityFlag = True
if RSDtype['Selection']=='ZX':
stabilityFlag = True
TI_computed = inputdata['RSD_SD']/inputdata['RSD_WS']
RepTI_computed = TI_computed + 1.28 * inputdata['RSD_SD']
inputdata = inputdata.rename(columns={'RSD_TI':'RSD_TI_instrument'})
inputdata = inputdata.rename(columns={'RSD_RepTI':'RSD_RepTI_instrument'})
inputdata['RSD_TI'] = TI_computed
inputdata['RSD_RepTI'] = RepTI_computed
elif RSDtype['Selection']=='Triton':
print ('RSD type is triton, not that output uncorrected TI is instrument corrected')
# ------------------------
# Baseline Results
# ------------------------
# Get all regressions available
reg_results = get_all_regressions(inputdata, title='Full comparison')
stabilityClass_tke, stabilityMetric_tke, regimeBreakdown_tke = calculate_stability_TKE(inputdata)
cup_alphaFlag, stabilityClass_ane, stabilityMetric_ane, regimeBreakdown_ane, Ht_1_ane, Ht_2_ane, stabilityClass_rsd, stabilityMetric_rsd, regimeBreakdown_rsd = calculate_stability_alpha(inputdata, config_file, RSD_alphaFlag, Ht_1_rsd, Ht_2_rsd)
#------------------------
# Time Sensivity Analysis
#------------------------
# TimeTestA = pd.DataFrame()
# TimeTestB = pd.DataFrame()
# TimeTestC = pd.DataFrame()
if timetestFlag == True:
# A) increase % of test train split -- check for convergence --- basic metrics recorded baseline but also for every adjustments
splitList = np.linspace(0.0, 100.0, num = 20, endpoint =False)
print ('Testing model generation time period sensitivity...% of data')
time_test_A_adjustment_df = {}
TimeTestA_baseline_df = pd.DataFrame()
for s in splitList[1:]:
sys.stdout.write("\r")
sys.stdout.write(f"{str(s).rjust(10, ' ')} % ")
inputdata_test = train_test_split(s,inputdata.copy())
TimeTestA_baseline_df, time_test_A_adjustment_df = quick_metrics(inputdata_test, TimeTestA_baseline_df, time_test_A_adjustment_df,str(100-s))
sys.stdout.flush()
print()
# B) incrementally Add days to training set sequentially -- check for convergence
numberofObsinOneDay = 144
numberofDaysInTest = int(round(len(inputdata)/numberofObsinOneDay))
print ('Testing model generation time period sensitivity...days to train model')
print ('Number of days in the study ' + str(numberofDaysInTest))
time_test_B_adjustment_df = {}
TimeTestB_baseline_df = pd.DataFrame()
for i in range(0,numberofDaysInTest):
sys.stdout.write("\r")
sys.stdout.write(f"{str(i).rjust(10, ' ')} of {str(numberofDaysInTest)} days ")
windowEnd = (i+1)*(numberofObsinOneDay)
inputdata_test = train_test_split(i,inputdata.copy(), stepOverride = [0,windowEnd])
TimeTestB_baseline_df, time_test_B_adjustment_df = quick_metrics(inputdata_test,TimeTestB_baseline_df, time_test_B_adjustment_df,str(numberofDaysInTest-i))
sys.stdout.flush()
print()
# C) If experiment is greater than 3 months, slide a 6 week window (1 week step)
if len(inputdata) > (numberofObsinOneDay*90): # check to see if experiment is greater than 3 months
print ('Testing model generation time period sensitivity...6 week window pick')
windowStart = 0
windowEnd = (numberofObsinOneDay*42)
time_test_C_adjustment_df = {}
TimeTestC_baseline_df = pd.DataFrame()
while windowEnd < len(inputdata):
print (str('After observation #' + str(windowStart) + ' ' + 'Before observation #' + str(windowEnd)))
windowStart += numberofObsinOneDay*7
windowEnd = windowStart + (numberofObsinOneDay*42)
inputdata_test = train_test_split(i,inputdata.copy(), stepOverride = [windowStart,windowEnd])
TimeTestC_baseline_df, time_test_C_adjustment_df = quick_metrics(inputdata_test, TimeTestC_baseline_df, time_test_C_adjustment_df,
str('After_' + str(windowStart) + '_' + 'Before_' + str(windowEnd)))
else:
TimeTestA_baseline_df = pd.DataFrame()
TimeTestB_baseline_df = pd.DataFrame()
TimeTestC_baseline_df = pd.DataFrame()
time_test_A_adjustment_df = {}
time_test_B_adjustment_df = {}
time_test_C_adjustment_df = {}
#-----------------------
# Test - Train split
#-----------------------
# random 80-20 split
inputdata = train_test_split(80.0, inputdata.copy())
inputdata_train = inputdata[inputdata['split'] == True].copy().join(Timestamps)
inputdata_test = inputdata[inputdata['split'] == False].copy().join(Timestamps)
timestamp_train = inputdata_train['Timestamp']
timestamp_test = inputdata_test['Timestamp']
#-----------------------------
# stability class subset lists
#-----------------------------
# get reg_results by stability class: list of df's for each height
reg_results_class1 = []
reg_results_class2 = []
reg_results_class3 = []
reg_results_class4 = []
reg_results_class5 = []
reg_results_class1_alpha = {}
reg_results_class2_alpha = {}
reg_results_class3_alpha = {}
reg_results_class4_alpha = {}
reg_results_class5_alpha = {}
if RSDtype['Selection'][0:4] == 'Wind' or 'ZX' in RSDtype['Selection']:
inputdata_class1 = []
inputdata_class2 = []
inputdata_class3 = []
inputdata_class4 = []
inputdata_class5 = []
RSD_h = []
Alldata_inputdata = inputdata.copy()
for h in stabilityClass_tke.columns.to_list():
RSD_h.append(h)
inputdata_class1.append(Alldata_inputdata[Alldata_inputdata[h] == 1])
inputdata_class2.append(Alldata_inputdata[Alldata_inputdata[h] == 2])
inputdata_class3.append(Alldata_inputdata[Alldata_inputdata[h] == 3])
inputdata_class4.append(Alldata_inputdata[Alldata_inputdata[h] == 4])
inputdata_class5.append(Alldata_inputdata[Alldata_inputdata[h] == 5])
All_class_data = [inputdata_class1,inputdata_class2, inputdata_class3,
inputdata_class4, inputdata_class5]
All_class_data_clean = [inputdata_class1, inputdata_class2, inputdata_class3,
inputdata_class4, inputdata_class5]
for h in RSD_h:
idx = RSD_h.index(h)
df = inputdata_class1[idx]
reg_results_class1.append(get_all_regressions(df, title = str('TKE_stability_' + h + 'class1')))
df = inputdata_class2[idx]
reg_results_class2.append(get_all_regressions(df, title = str('TKE_stability_' + h + 'class2')))
df = inputdata_class3[idx]
reg_results_class3.append(get_all_regressions(df, title = str('TKE_stability_' + h + 'class3')))
df = inputdata_class4[idx]
reg_results_class4.append(get_all_regressions(df, title = str('TKE_stability_' + h + 'class4')))
df = inputdata_class5[idx]
reg_results_class5.append(get_all_regressions(df, title = str('TKE_stability_' + h + 'class5')))
if RSD_alphaFlag:
del inputdata_class1, inputdata_class2, inputdata_class3, inputdata_class4, inputdata_class5
Alldata_inputdata = inputdata.copy()
colName = stabilityClass_rsd.name
Alldata_inputdata[colName] = stabilityClass_rsd.values
inputdata_class1=Alldata_inputdata[Alldata_inputdata[stabilityClass_rsd.name] == 1.0]
inputdata_class2=Alldata_inputdata[Alldata_inputdata[stabilityClass_rsd.name] == 2.0]
inputdata_class3=Alldata_inputdata[Alldata_inputdata[stabilityClass_rsd.name] == 3.0]
inputdata_class4=Alldata_inputdata[Alldata_inputdata[stabilityClass_rsd.name] == 4.0]
inputdata_class5=Alldata_inputdata[Alldata_inputdata[stabilityClass_rsd.name] == 5.0]
All_class_data_alpha_RSD = [inputdata_class1,inputdata_class2, inputdata_class3,
inputdata_class4, inputdata_class5]
All_class_data_alpha_RSD_clean = [inputdata_class1.copy(),inputdata_class2.copy(), inputdata_class3.copy(),
inputdata_class4.copy(), inputdata_class5.copy()]
reg_results_class1_alpha['RSD'] = get_all_regressions(inputdata_class1, title = str('alpha_stability_RSD' + 'class1'))
reg_results_class2_alpha['RSD'] = get_all_regressions(inputdata_class2, title = str('alpha_stability_RSD' + 'class2'))
reg_results_class3_alpha['RSD'] = get_all_regressions(inputdata_class3, title = str('alpha_stability_RSD' + 'class3'))
reg_results_class4_alpha['RSD'] = get_all_regressions(inputdata_class4, title = str('alpha_stability_RSD' + 'class4'))
reg_results_class5_alpha['RSD'] = get_all_regressions(inputdata_class5, title = str('alpha_stability_RSD' + 'class5'))
if cup_alphaFlag:
del inputdata_class1, inputdata_class2, inputdata_class3, inputdata_class4, inputdata_class5
Alldata_inputdata = inputdata.copy()
colName = stabilityClass_ane.name
Alldata_inputdata[colName] = stabilityClass_ane.values
inputdata_class1 = Alldata_inputdata[Alldata_inputdata[stabilityClass_ane.name] == 1.0]
inputdata_class2 = Alldata_inputdata[Alldata_inputdata[stabilityClass_ane.name] == 2.0]
inputdata_class3 = Alldata_inputdata[Alldata_inputdata[stabilityClass_ane.name] == 3.0]
inputdata_class4 = Alldata_inputdata[Alldata_inputdata[stabilityClass_ane.name] == 4.0]
inputdata_class5 = Alldata_inputdata[Alldata_inputdata[stabilityClass_ane.name] == 5.0]
All_class_data_alpha_Ane = [inputdata_class1,inputdata_class2, inputdata_class3,
inputdata_class4, inputdata_class5]
All_class_data_alpha_Ane_clean = [inputdata_class1.copy(),inputdata_class2.copy(), inputdata_class3.copy(),
inputdata_class4.copy(), inputdata_class5.copy()]
reg_results_class1_alpha['Ane'] = get_all_regressions(inputdata_class1, title = str('alpha_stability_Ane' + 'class1'))
reg_results_class2_alpha['Ane'] = get_all_regressions(inputdata_class2, title = str('alpha_stability_Ane' + 'class2'))
reg_results_class3_alpha['Ane'] = get_all_regressions(inputdata_class3, title = str('alpha_stability_Ane' + 'class3'))
reg_results_class4_alpha['Ane'] = get_all_regressions(inputdata_class4, title = str('alpha_stability_Ane' + 'class4'))
reg_results_class5_alpha['Ane'] = get_all_regressions(inputdata_class5, title = str('alpha_stability_Ane' + 'class5'))
# ------------------------
# TI Adjustments
# ------------------------
from TACT.computation.adjustments import Adjustments
baseResultsLists = initialize_resultsLists('')
# get number of observations in each bin
count_1mps, count_05mps = get_count_per_WSbin(inputdata, 'RSD_WS')
inputdata_train = inputdata[inputdata['split'] == True].copy().join(Timestamps)
inputdata_test = inputdata[inputdata['split'] == False].copy().join(Timestamps)
timestamp_train = inputdata_train['Timestamp']
timestamp_test = inputdata_test['Timestamp']
count_1mps_train, count_05mps_train = get_count_per_WSbin(inputdata_train, 'RSD_WS')
count_1mps_test, count_05mps_test = get_count_per_WSbin(inputdata_test, 'RSD_WS')
if RSDtype['Selection'][0:4] == 'Wind' or 'ZX' in RSDtype['Selection']:
primary_c = [h for h in RSD_h if 'Ht' not in h]
primary_idx = RSD_h.index(primary_c[0])
ResultsLists_stability = initialize_resultsLists('stability_')
if cup_alphaFlag:
ResultsLists_stability_alpha_Ane = initialize_resultsLists('stability_alpha_Ane')
if RSD_alphaFlag:
ResultsLists_stability_alpha_RSD = initialize_resultsLists('stability_alpha_RSD')
name_1mps_tke = []
name_1mps_alpha_Ane = []
name_1mps_alpha_RSD = []
name_05mps_tke = []
name_05mps_alpha_Ane = []
name_05mps_alpha_RSD = []
count_1mps_tke = []
count_1mps_alpha_Ane = []
count_1mps_alpha_RSD = []
count_05mps_tke = []
count_05mps_alpha_Ane = []
count_05mps_alpha_RSD = []
for c in range(0,len(All_class_data)):
name_1mps_tke.append(str('count_1mps_class_' + str(c) + '_tke'))
name_1mps_alpha_Ane.append(str('count_1mps_class_' + str(c) + '_alpha_Ane'))
name_1mps_alpha_RSD.append(str('count_1mps_class_' + str(c) + '_alpha_RSD'))
name_05mps_tke.append(str('count_05mps_class_' + str(c) + '_tke'))
name_05mps_alpha_Ane.append(str('count_05mps_class_' + str(c) + '_alpha_Ane'))
name_05mps_alpha_RSD.append(str('count_05mps_class_' + str(c) + '_alpha_RSD'))
try:
c_1mps_tke, c_05mps_tke = get_count_per_WSbin(All_class_data[c][primary_idx], 'RSD_WS')
count_1mps_tke.append(c_1mps_tke)
count_05mps_tke.append(c_05mps_tke)
except:
count_1mps_tke.append(None)
count_05mps_tke.append(None)
try:
c_1mps_alpha_Ane, c_05mps_alpha_Ane = get_count_per_WSbin(All_class_data_alpha_Ane[c], 'RSD_WS')
count_1mps_alpha_Ane.append(c_1mps_alpha_Ane)
count_05mps_alpha_Ane.append(c_05mps_alpha_Ane)
except:
count_1mps_alpha_Ane.append(None)
count_05mps_alpha_Ane.append(None)
try:
c_1mps_alpha_RSD, c_05mps_alpha_RSD = get_count_per_WSbin(All_class_data_alpha_RSD[c], 'RSD_WS')
count_1mps_alpha_RSD.append(c_1mps_alpha_RSD)
count_05mps_alpha_RSD.append(c_05mps_alpha_RSD)
except:
count_1mps_alpha_RSD.append(None)
count_05mps_alpha_RSD.append(None)
# intialize 10 minute output
TI_10minuteAdjusted = pd.DataFrame()
# initialize Adjustments object
adjuster = Adjustments(inputdata.copy(), adjustments_metadata, baseResultsLists)
for method in adjustments_metadata:
# ************************************ #
# Site Specific Simple Adjustment (SS-S)
if method != 'SS-S':
pass
elif method == 'SS-S' and adjustments_metadata['SS-S'] == False:
pass
else:
print('Applying Adjustment Method: SS-S')
logger.info('Applying Adjustment Method: SS-S')
inputdata_adj, lm_adj, m, c = adjuster.perform_SS_S_adjustment(inputdata.copy())
print("SS-S: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS-S'
adjustment_name = 'SS_S'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind':
print('Applying Adjustment Method: SS-S by stability class (TKE)')
logger.info('Applying Adjustment Method: SS-S by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c = adjuster.perform_SS_S_adjustment(item[primary_idx].copy())
print("SS-S: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-S' + '_TKE_' + 'class_' + str(className))
adjustment_name = str('SS-S'+ '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-S by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: SS-S by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
print (str('class ' + str(className)))
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = adjuster.perform_SS_S_adjustment(item.copy())
print ("SS-S: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-S' + '_' + 'class_' + str(className))
adjustment_name = str('SS-S' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-S by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-S by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = adjuster.perform_SS_S_adjustment(item.copy())
print ("SS-S: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-S' + '_alphaCup_' + 'class_' + str(className))
adjustment_name = str('SS-S' + '_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ********************************************** #
# Site Specific Simple + Filter Adjustment (SS-SF)
if method != 'SS-SF':
pass
elif method == 'SS-SF' and adjustments_metadata['SS-SF'] == False:
pass
else:
print('Applying Adjustment Method: SS-SF')
logger.info('Applying Adjustment Method: SS-SF')
# inputdata_adj, lm_adj, m, c = perform_SS_SF_adjustment(inputdata.copy())
inputdata_adj, lm_adj, m, c = adjuster.perform_SS_SF_adjustment(inputdata.copy())
print("SS-SF: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS-SF'
adjustment_name = 'SS_SF'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind' or 'ZX' in RSDtype['Selection']:
print('Applying Adjustment Method: SS-SF by stability class (TKE)')
logger.info('Applying Adjustment Method: SS-SF by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c = adjuster.perform_SS_SF_adjustment(item[primary_idx].copy())
print("SS-SF: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-SF' + '_' + 'class_' + str(className))
adjustment_name = str('SS_SF' + '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-SF by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: SS-SF by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = adjuster.perform_SS_SF_adjustment(item.copy())
print ("SS-SF: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-SF' + '_' + 'class_' + str(className))
adjustment_name = str('SS_SF' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD,
ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-SF by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-SF by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = adjuster.perform_SS_SF_adjustment(item.copy())
print ("SS-SF: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-SF' + '_' + 'class_' + str(className))
adjustment_name = str('SS_SF' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane,
ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ************************************ #
# Site Specific Simple Adjustment (SS-SS) combining stability classes adjusted differently
if method != 'SS-SS':
pass
elif method == 'SS-SS' and adjustments_metadata['SS-SS'] == False:
pass
elif RSDtype['Selection'][0:4] != 'Wind' and 'ZX' not in RSDtype['Selection']:
pass
else:
print('Applying Adjustment Method: SS-SS')
logger.info('Applying Adjustment Method: SS-SS')
inputdata_adj, lm_adj, m, c = perform_SS_SS_adjustment(inputdata.copy(),All_class_data,primary_idx)
print("SS-SS: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS-SS'
adjustment_name = 'SS_SS'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind':
print('Applying Adjustment Method: SS-SS by stability class (TKE). SAME as Baseline')
logger.info('Applying Adjustment Method: SS-SS by stability class (TKE). SAME as Baseline')
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
print("SS-SS: y = " + str(m) + " * x + " + str(c))
adjustment_name = str('SS_SS' + '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-SS by stability class Alpha w/ RSD. SAEM as Baseline')
logger.info('Applying Adjustment Method: SS-SS by stability class Alpha w/ RSD. SAEM as Baseline')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
print ("SS-SS: y = " + str(m) + "* x +" + str(c))
adjustment_name = str('SS_SS' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-SS by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-SS by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
print ("SS-SS: y = " + str(m) + "* x +" + str(c))
emptyclassFlag = False
adjustment_name = str('SS_SS' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ******************************************* #
# Site Specific WindSpeed Adjustment (SS-WS)
if method != 'SS-WS':
pass
elif method == 'SS-WS' and adjustments_metadata['SS-WS'] == False:
pass
else:
print('Applying Adjustment Method: SS-WS')
logger.info('Applying Adjustment Method: SS-WS')
inputdata_adj, lm_adj, m, c = perform_SS_WS_adjustment(inputdata.copy())
print("SS-WS: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS-WS'
adjustment_name = 'SS_WS'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind' or 'ZX' in RSDtype['Selection']:
print('Applying Adjustment Method: SS-WS by stability class (TKE)')
logger.info('Applying Adjustment Method: SS-WS by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c = perform_SS_WS_adjustment(item[primary_idx].copy())
print("SS-WS: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-WS' + '_' + 'class_' + str(className))
adjustment_name = str('SS_WS' + '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-WS by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: SS-WS by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = perform_SS_WS_adjustment(item.copy())
print ("SS-WS: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-WS' + '_' + 'class_' + str(className))
adjustment_name = str('SS_WS' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-WS by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-WS by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = perform_SS_WS_adjustment(item.copy())
print ("SS-WS: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-WS' + '_' + 'class_' + str(className))
emptyclassFlag = False
adjustment_name = str('SS_WS' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ******************************************* #
# Site Specific Comprehensive Adjustment (SS-WS-Std)
if method != 'SS-WS-Std':
pass
elif method == 'SS-WS-Std' and adjustments_metadata['SS-WS-Std'] == False:
pass
else:
print('Applying Adjustment Method: SS-WS-Std')
logger.info('Applying Adjustment Method: SS-WS-Std')
inputdata_adj, lm_adj, m, c = perform_SS_WS_Std_adjustment(inputdata.copy())
print("SS-WS-Std: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS-WS-Std'
adjustment_name = 'SS_WS_Std'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind' or 'ZX' in RSDtype['Selection']:
print('Applying Adjustment Method: SS-WS-Std by stability class (TKE)')
logger.info('Applying Adjustment Method: SS-WS-Std by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c = perform_SS_WS_Std_adjustment(item[primary_idx].copy())
print("SS-WS-Std: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-WS-Std' + '_' + 'class_' + str(className))
adjustment_name = str('SS_WS_Std' + '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-WS-Std by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: SS-WS-Std by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = perform_SS_WS_Std_adjustment(item.copy())
print ("SS-WS-Std: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-WS-Std' + '_' + 'class_' + str(className))
adjustment_name = str('SS_WS_Std' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-WS-Std by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-WS-Std by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = perform_SS_WS_Std_adjustment(item.copy())
print ("SS-WS-Std: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-WS-Std' + '_' + 'class_' + str(className))
emptyclassFlag = False
adjustment_name = str('SS_WS_Std' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# **************************************************************** #
# Site Specific LTERRA for WC 1HZ Data Adjustment (G-LTERRA_WC_1HZ)
if method != 'SS-LTERRA-WC-1HZ':
pass
elif method == 'SS-LTERRA-WC-1HZ' and adjustments_metadata['SS-LTERRA-WC-1HZ'] == False:
pass
else:
print('Applying Adjustment Method: SS-LTERRA-WC-1HZ')
logger.info('Applying Adjustment Method: SS-LTERRA-WC-1HZ')
# ******************************************************************* #
# Site Specific LTERRA WC Machine Learning Adjustment (SS-LTERRA-MLa)
# Random Forest Regression with now ancillary columns
if method != 'SS-LTERRA-MLa':
pass
elif method == 'SS-LTERRA-MLa' and adjustments_metadata['SS-LTERRA-MLa'] == False:
pass
else:
print('Applying Adjustment Method: SS-LTERRA-MLa')
logger.info('Applying Adjustment Method: SS-LTERRA-MLa')
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_ML_adjustment(inputdata.copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS_LTERRA_MLa'
adjustment_name = 'SS_LTERRA_MLa'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind':
print('Applying Adjustment Method: SS-LTERRA MLa by stability class (TKE)')
logger.info('Applying Adjustment Method: SS-LTERRA MLa by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c= perform_SS_LTERRA_ML_adjustment(item[primary_idx].copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS_LTERRA_MLa' + '_' + 'class_' + str(className))
adjustment_name = str('SS_LTERRA_MLa' + '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-LTERRA MLa by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: SS-LTERRA MLa by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_ML_adjustment(item.copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-LTERRA_MLa' + '_' + 'class_' + str(className))
adjustment_name = str('SS_LTERRA_ML' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-LTERRA_MLa by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-LTERRA_MLa by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_ML_adjustment(item.copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS_LTERRA_MLa' + '_' + 'class_' + str(className))
emptyclassFlag = False
adjustment_name = str('SS_LTERRA_MLa' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ************************************************************************************ #
# Site Specific LTERRA WC (w/ stability) Machine Learning Adjustment (SS-LTERRA_MLc)
if method != 'SS-LTERRA-MLc':
pass
elif method == 'SS-LTERRA-MLc' and adjustments_metadata['SS-LTERRA-MLc'] == False:
pass
else:
print('Applying Adjustment Method: SS-LTERRA-MLc')
logger.info('Applying Adjustment Method: SS-LTERRA-MLc')
all_trainX_cols = ['x_train_TI', 'x_train_TKE','x_train_WS','x_train_DIR','x_train_Hour']
all_trainY_cols = ['y_train']
all_testX_cols = ['x_test_TI','x_test_TKE','x_test_WS','x_test_DIR','x_test_Hour']
all_testY_cols = ['y_test']
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_S_ML_adjustment(inputdata.copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS_LTERRA_MLc'
adjustment_name = 'SS_LTERRA_MLc'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj, Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind':
print('Applying Adjustment Method: SS-LTERRA_MLc by stability class (TKE)')
logger.info('Applying Adjustment Method: SS-LTERRA_MLc by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c= perform_SS_LTERRA_S_ML_adjustment(item[primary_idx].copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS_LTERRA_MLc' + '_' + 'class_' + str(className))
adjustment_name = str('SS_LTERRA_MLc' + '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-LTERRA_MLc by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: SS-LTERRA_MLc by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_S_ML_adjustment(item.copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-LTERRA_MLc' + '_' + 'class_' + str(className))
adjustment_name = str('SS_LTERRA_S_ML' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-LTERRA_MLc by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-LTERRA_MLc by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_S_ML_adjustment(item.copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS_LTERRA_MLc' + '_' + 'class_' + str(className))
emptyclassFlag = False
adjustment_name = str('SS_LTERRA_MLc' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# *********************** #
# Site Specific SS-LTERRA-MLb
if method != 'SS-LTERRA-MLb':
pass
elif method == 'SS-LTERRA-MLb' and adjustments_metadata['SS-LTERRA-MLb'] == False:
pass
else:
print('Applying Adjustment Method: SS-LTERRA-MLb')
logger.info('Applying Adjustment Method: SS-LTERRA-MLb')
all_trainX_cols = ['x_train_TI', 'x_train_TKE']
all_trainY_cols = ['y_train']
all_testX_cols = ['x_test_TI','x_test_TKE']
all_testY_cols = ['y_test']
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_S_ML_adjustment(inputdata.copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS_LTERRA_MLb'
adjustment_name = 'SS_LTERRA_MLb'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj, Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind':
print('Applying Adjustment Method: SS-LTERRA_MLb by stability class (TKE)')
logger.info('Applying Adjustment Method: SS-LTERRA_MLb by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c= perform_SS_LTERRA_S_ML_adjustment(item[primary_idx].copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS_LTERRA_MLb' + '_' + 'class_' + str(className))
adjustment_name = str('SS_LTERRA_MLb' + '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-LTERRA_MLb by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: SS-LTERRA_MLb by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_S_ML_adjustment(item.copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-LTERRA_MLb' + '_' + 'class_' + str(className))
adjustment_name = str('SS_LTERRA_MLb' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-LTERRA_MLb by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-LTERRA_MLb by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = perform_SS_LTERRA_S_ML_adjustment(item.copy(),all_trainX_cols,all_trainY_cols,all_testX_cols,all_testY_cols)
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS_LTERRA_MLb' + '_' + 'class_' + str(className))
emptyclassFlag = False
adjustment_name = str('SS_LTERRA_MLb' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# *********************** #
# TI Extrapolation (TI-Ext)
if method != 'TI-Extrap':
pass
elif method == 'TI-Extrap' and adjustments_metadata['TI-Extrap'] == False:
pass
else:
print ('Found enough data to perform extrapolation comparison')
block_print()
# Get extrapolation height
height_extrap = float(extrap_metadata['height'][extrap_metadata['type'] == 'extrap'])
# Extrapolate
inputdata_adj, lm_adj, shearTimeseries= perform_TI_extrapolation(inputdata.copy(), extrap_metadata,
extrapolation_type, height)
adjustment_name = 'TI_EXTRAP'
lm_adj['adjustment'] = adjustment_name
inputdataEXTRAP = inputdata_adj.copy()
inputdataEXTRAP, baseResultsLists = extrap_configResult(extrapolation_type, inputdataEXTRAP, baseResultsLists, method,lm_adj)
if RSDtype['Selection'][0:4] == 'Wind':
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, shearTimeseries= perform_TI_extrapolation(item[primary_idx].copy(), extrap_metadata,
extrapolation_type, height)
lm_adj['adjustment'] = str('TI_EXT_class1' + '_TKE_' + 'class_' + str(className))
inputdataEXTRAP = inputdata_adj.copy()
inputdataEXTRAP, ResultsLists_class = extrap_configResult(extrapolation_type, inputdataEXTRAP, ResultsLists_class,
method, lm_adj, appendString = 'class_')
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if cup_alphaFlag:
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, shearTimeseries= perform_TI_extrapolation(item.copy(), extrap_metadata,
extrapolation_type, height)
lm_adj['adjustment'] = str('TI_Ane_class1' + '_alphaCup_' + 'class_' + str(className))
inputdataEXTRAP = inputdata_adj.copy()
inputdataEXTRAP, ResultsLists_class_alpha_Ane = extrap_configResult(extrapolation_type, inputdataEXTRAP,
ResultsLists_class_alpha_Ane, method,
lm_adj, appendString = 'class_alpha_Ane')
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane,
ResultsLists_class_alpha_Ane, 'alpha_Ane')
if RSD_alphaFlag:
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, shearTimeseries= perform_TI_extrapolation(item.copy(), extrap_metadata,
extrapolation_type, height)
lm_adj['adjustment'] = str('TI_RSD_class1' + '_alphaRSD_' + 'class_' + str(className))
inputdataEXTRAP = inputdata_adj.copy()
inputdataEXTRAP, ResultsLists_class_alpha_RSD = extrap_configResult(extrapolation_type, inputdataEXTRAP,
ResultsLists_class_alpha_RSD, method,
lm_adj, appendString = 'class_alpha_RSD')
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD,
ResultsLists_class_alpha_RSD, 'alpha_RSD')
# Add extra info to meta data and reformat
if extrapolation_type == 'simple':
desc = 'No truth measurement at extrapolation height'
else:
desc = 'Truth measurement available at extrapolation height'
extrap_metadata = (extrap_metadata
.append({'type': np.nan, 'height': np.nan, 'num': np.nan},
ignore_index=True)
.append(pd.DataFrame([['extrapolation type', extrapolation_type, desc]],
columns=extrap_metadata.columns))
.rename(columns={'type': 'Type',
'height': 'Height (m)',
'num': 'Comparison Height Number'}))
enable_print()
# ************************************************** #
# Histogram Matching
if method != 'SS-Match':
pass
elif method == 'SS-Match' and adjustments_metadata['SS-Match'] == False:
pass
else:
print('Applying Match algorithm: SS-Match')
logger.info('Applying Match algorithm: SS-Match')
inputdata_adj, lm_adj = perform_match(inputdata.copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS-Match'
adjustment_name = 'SS_Match'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind':
print('Applying Adjustment Method: SS-Match by stability class (TKE)')
logger.info('Applying Adjustment Method: SS-Match by stability class (TKE)')
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj = perform_match(item[primary_idx].copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-Match' + '_' + 'class_' + str(className))
adjustment_name = str('SS_Match' + '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-Match by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: SS-Match by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj = perform_match(item.copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-Match' + '_' + 'class_' + str(className))
adjustment_name = str('SS_Match' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-Match by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-Match by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj = perform_match(item.copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-Match' + '_' + 'class_' + str(className))
adjustment_name = str('SS_Match' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ************************************************** #
# Histogram Matching Input Corrected
if method != 'SS-Match2':
pass
elif method == 'SS-Match2' and adjustments_metadata['SS-Match2'] == False:
pass
else:
print('Applying input match algorithm: SS-Match2')
logger.info('Applying input match algorithm: SS-Match2')
inputdata_adj, lm_adj = perform_match_input(inputdata.copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'SS-Match2'
adjustment_name = 'SS_Match2'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind':
print('Applying Adjustment Method: SS-Match2 by stability class (TKE)')
logger.info('Applying Adjustment Method: SS-Match2 by stability class (TKE)')
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj = perform_match_input(item[primary_idx].copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-Match2' + '_' + 'class_' + str(className))
adjustment_name = str('SS_Match2' + '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: SS-Match2 by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: SS-Match2 by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj = perform_match_input(item.copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-Match2' + '_' + 'class_' + str(className))
adjustment_name = str('SS_Match2' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: SS-Match2 by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: SS-Match2 by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj = perform_match_input(item.copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('SS-Match2' + '_' + 'class_' + str(className))
adjustment_name = str('SS_Match2' + '_alphaCup_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ************************************************** #
# Global Simple Phase II mean Linear Reressions (G-Sa) + project
'''
RSD_TI = .984993 * RSD_TI + .087916
'''
if method != 'G-Sa':
pass
elif method == 'G-Sa' and adjustments_metadata['G-Sa'] == False:
pass
else:
print('Applying Adjustment Method: G-Sa')
logger.info('Applying Adjustment Method: G-Sa')
override = False
inputdata_adj, lm_adj, m, c = perform_G_Sa_adjustment(inputdata.copy(),override,RSDtype)
print("G-Sa: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'G-Sa'
adjustment_name = 'G_Sa'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind':
print('Applying Adjustment Method: G-Sa by stability class (TKE)')
logger.info('Applying Adjustment Method: G-Sa by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c = perform_G_Sa_adjustment(item[primary_idx].copy(),override,RSDtype)
print("G-Sa: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('G-Sa' + '_TKE_' + 'class_' + str(className))
adjustment_name = str('G-Sa'+ '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: G-Sa by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: G-Sa by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = perform_G_Sa_adjustment(item.copy(),override,RSDtype)
print ("G-Sa: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('G-Sa' + '_' + 'class_' + str(className))
adjustment_name = str('G-Sa' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: G-Sa by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: G-Sa by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = perform_G_Sa_adjustment(item.copy(),override,RSDtype)
print ("G-Sa: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('G-Sa' + '_alphaCup_' + 'class_' + str(className))
adjustment_name = str('G-Sa' + '_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ******************************************************** #
# Global Simple w/filter Phase II Linear Regressions (G-SFa) + project
# Check these values, but for WC m = 0.7086 and c = 0.0225
if method != 'G-SFa':
pass
elif method == 'G-SFa' and adjustments_metadata['G-SFa'] == False:
pass
elif RSDtype['Selection'][0:4] != 'Wind':
pass
else:
print('Applying Adjustment Method: G-SFa')
logger.info('Applying Adjustment Method: G-SFa')
override = [0.7086, 0.0225]
inputdata_adj, lm_adj, m, c = perform_G_Sa_adjustment(inputdata.copy(),override,RSDtype)
print("G-SFa: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'G-SFa'
adjustment_name = 'G_SFa'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind':
print('Applying Adjustment Method: G-SFa by stability class (TKE)')
logger.info('Applying Adjustment Method: G-SFa by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c = perform_G_Sa_adjustment(item[primary_idx].copy(),override,RSDtype)
print("G-SFa: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('G-SFa' + '_TKE_' + 'class_' + str(className))
adjustment_name = str('G-SFa'+ '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: G-SFa by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: G-SFa by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = perform_G_Sa_adjustment(item.copy(),override,RSDtype)
print ("G-SFa: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('G-Sa' + '_' + 'class_' + str(className))
adjustment_name = str('G-SFa' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: G-SFa by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: G-SFa by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = perform_G_Sa_adjustment(item.copy(),override,RSDtype)
print ("G-SFa: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('G-SFa' + '_alphaCup_' + 'class_' + str(className))
adjustment_name = str('G-SFa' + '_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ************************************************ #
# Global Standard Deviation and WS adjustment (G-Sc)
if method != 'G-SFc':
pass
elif method == 'G-SFc' and adjustments_metadata['G-SFc'] == False:
pass
elif RSDtype['Selection'][0:4] != 'Wind':
pass
else:
print('Applying Adjustment Method: G-Sc')
logger.info('Applying Adjustment Method: G-Sc')
inputdata_adj, lm_adj, m, c = perform_G_SFc_adjustment(inputdata.copy())
print("G-SFc: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'G-SFc'
adjustment_name = 'G_SFc'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind':
print('Applying Adjustment Method: G-SFa by stability class (TKE)')
logger.info('Applying Adjustment Method: G-SFa by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
inputdata_adj, lm_adj, m, c = perform_G_SFc_adjustment(item[primary_idx].copy())
print("G-SFc: y = " + str(m) + " * x + " + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('G-SFc' + '_TKE_' + 'class_' + str(className))
adjustment_name = str('G-SFc'+ '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: G-SFc by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: G-SFc by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
inputdata_adj, lm_adj, m, c = perform_G_SFc_adjustment(item.copy())
print ("G-SFc: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('G-SFc' + '_' + 'class_' + str(className))
adjustment_name = str('G-SFc' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: G-SFc by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: G-SFc by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
inputdata_adj, lm_adj, m, c = perform_G_SFc_adjustment(item.copy())
print ("G-SFc: y = " + str(m) + "* x +" + str(c))
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('G-SFc' + '_alphaCup_' + 'class_' + str(className))
adjustment_name = str('G-SFc' + '_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ************************ #
# Global Comprehensive (G-C)
'''
based on empirical calibrations by EON
'''
if method != 'G-C':
pass
elif method == 'G-C' and adjustments_metadata['G-C'] == False:
pass
else:
print('Applying Adjustment Method: G-C')
logger.info('Applying Adjustment Method: G-C')
inputdata_adj, lm_adj, m, c = perform_G_C_adjustment(inputdata.copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = 'G-C'
adjustment_name = 'G_C'
baseResultsLists = populate_resultsLists(baseResultsLists, '', adjustment_name, lm_adj, inputdata_adj,
Timestamps, method)
TI_10minuteAdjusted = record_TIadj(adjustment_name,inputdata_adj,Timestamps, method, TI_10minuteAdjusted, emptyclassFlag=False)
if RSDtype['Selection'][0:4] == 'Wind':
print('Applying Adjustment Method: G-C by stability class (TKE)')
logger.info('Applying Adjustment Method: G-C by stability class (TKE)')
# stability subset output for primary height (all classes)
ResultsLists_class = initialize_resultsLists('class_')
className = 1
for item in All_class_data:
print (str('class ' + str(className)))
inputdata_adj, lm_adj, m, c = perform_G_C_adjustment(item[primary_idx].copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('G-C' + '_TKE_' + 'class_' + str(className))
adjustment_name = str('G-C'+ '_TKE_' + str(className))
ResultsLists_class = populate_resultsLists(ResultsLists_class, 'class_', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsList_stability = populate_resultsLists_stability(ResultsLists_stability, ResultsLists_class, '')
if RSD_alphaFlag:
print('Applying Adjustment Method: G-C by stability class Alpha w/ RSD')
logger.info('Applying Adjustment Method: G-C by stability class Alpha w/ RSD')
ResultsLists_class_alpha_RSD = initialize_resultsLists('class_alpha_RSD')
className = 1
for item in All_class_data_alpha_RSD:
print (str('class ' + str(className)))
inputdata_adj, lm_adj, m, c = perform_G_C_adjustment(item.copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('G-C' + '_' + 'class_' + str(className))
adjustment_name = str('G-C' + '_alphaRSD_' + str(className))
ResultsLists_class_alpha_RSD = populate_resultsLists(ResultsLists_class_alpha_RSD, 'class_alpha_RSD', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_RSD = populate_resultsLists_stability(ResultsLists_stability_alpha_RSD, ResultsLists_class_alpha_RSD, 'alpha_RSD')
if cup_alphaFlag:
print('Applying Adjustment Method: G-C by stability class Alpha w/cup')
logger.info('Applying Adjustment Method: G-C by stability class Alpha w/cup')
ResultsLists_class_alpha_Ane = initialize_resultsLists('class_alpha_Ane')
className = 1
for item in All_class_data_alpha_Ane:
print (str('class ' + str(className)))
inputdata_adj, lm_adj, m, c = perform_G_C_adjustment(item.copy())
lm_adj['sensor'] = sensor
lm_adj['height'] = height
lm_adj['adjustment'] = str('G-C' + '_alphaCup_' + 'class_' + str(className))
adjustment_name = str('G-C' + '_' + str(className))
ResultsLists_class_alpha_Ane = populate_resultsLists(ResultsLists_class_alpha_Ane, 'class_alpha_Ane', adjustment_name, lm_adj,
inputdata_adj, Timestamps, method)
className += 1
ResultsLists_stability_alpha_Ane = populate_resultsLists_stability(ResultsLists_stability_alpha_Ane, ResultsLists_class_alpha_Ane, 'alpha_Ane')
# ************************ #
# Global Comprehensive (G-Match)
if method != 'G-Match':
pass
elif method == 'G-Match' and adjustments_metadata['G-Match'] == False:
pass
else:
print('Applying Adjustment Method: G-Match')
logger.info('Applying Adjustment Method: G-Match')
# ************************ #
# Global Comprehensive (G-Ref-S)
if method != 'G-Ref-S':
pass
elif method == 'G-Ref-S' and adjustments_metadata['G-Ref-S'] == False:
pass
else:
print('Applying Adjustment Method: G-Ref-S')
logger.info('Applying Adjustment Method: G-Ref-S')
# ************************ #
# Global Comprehensive (G-Ref-Sf)
if method != 'G-Ref-Sf':
pass
elif method == 'G-Ref-Sf' and adjustments_metadata['G-Ref-Sf'] == False:
pass
else:
print('Applying Adjustment Method: G-Ref-Sf')
logger.info('Applying Adjustment Method: G-Ref-Sf')
# ************************ #
# Global Comprehensive (G-Ref-SS)
if method != 'G-Ref-SS':
pass
elif method == 'G-Ref-SS' and adjustments_metadata['G-Ref-SS'] == False:
pass
else:
print('Applying Adjustment Method: G-Ref-SS')
logger.info('Applying Adjustment Method: G-Ref-SS')
# ************************ #
# Global Comprehensive (G-Ref-SS-S)
if method != 'G-Ref-SS-S':
pass
elif method == 'G-Ref-SS-S' and adjustments_metadata['G-Ref-SS-S'] == False:
pass
else:
print('Applying Adjustment Method: G-Ref-SS-S')
logger.info('Applying Adjustment Method: G-Ref-SS-S')
# ************************ #
# Global Comprehensive (G-Ref-WS-Std)
if method != 'G-Ref-WS-Std':
pass
elif method == 'G-Ref-WS-Std' and adjustments_metadata['G-Ref-WS-Std'] == False:
pass
else:
print('Applying Adjustment Method: G-Ref-WS-Std')
logger.info('Applying Adjustment Method: G-Ref-WS-Std')
# ***************************************** #
# Global LTERRA WC 1Hz Data (G-LTERRA_WC_1Hz)
if method != 'G-LTERRA_WC_1Hz':
pass
elif method == 'G-LTERRA_WC_1Hz' and adjustments_metadata['G-LTERRA_WC_1Hz'] == False:
pass
else:
print('Applying Adjustment Method: G-LTERRA_WC_1Hz')
logger.info('Applying Adjustment Method: G-LTERRA_WC_1Hz')
# ************************************************ #
# Global LTERRA ZX Machine Learning (G-LTERRA_ZX_ML)
if method != 'G-LTERRA_ZX_ML':
pass
elif adjustments_metadata['G-LTERRA_ZX_ML'] == False:
pass
else:
print('Applying Adjustment Method: G-LTERRA_ZX_ML')
logger.info('Applying Adjustment Method: G-LTERRA_ZX_ML')
# ************************************************ #
# Global LTERRA WC Machine Learning (G-LTERRA_WC_ML)
if method != 'G-LTERRA_WC_ML':
pass
elif adjustments_metadata['G-LTERRA_WC_ML'] == False:
pass
else:
print('Applying Adjustment Method: G-LTERRA_WC_ML')
logger.info('Applying Adjustment Method: G-LTERRA_WC_ML')
# ************************************************** #
# Global LTERRA WC w/Stability 1Hz (G-LTERRA_WC_S_1Hz)
if method != 'G-LTERRA_WC_S_1Hz':
pass
elif method == 'G-LTERRA_WC_S_1Hz' and adjustments_metadata['G-LTERRA_WC_S_1Hz'] == False:
pass
else:
print('Applying Adjustment Method: G-LTERRA_WC_S_1Hz')
logger.info('Applying Adjustment Method: G-LTERRA_WC_S_1Hz')
# ************************************************************** #
# Global LTERRA WC w/Stability Machine Learning (G-LTERRA_WC_S_ML)
if method != 'G-LTERRA_WC_S_ML':
pass
elif method == 'G-LTERRA_WC_S_ML' and adjustments_metadata['G-LTERRA_WC_S_ML'] == False:
pass
else:
print('Applying Adjustment Method: G-LTERRA_WC_S_ML')
logger.info('Applying Adjustment Method: G-LTERRA_WC_S_ML')
if RSD_alphaFlag:
pass
else:
ResultsLists_stability_alpha_RSD = ResultsList_stability
if cup_alphaFlag:
pass
else:
ResultsLists_stability_alpha_Ane = ResultsList_stability
if RSDtype['Selection'][0:4] != 'Wind':
reg_results_class1 = np.nan
reg_results_class2 = np.nan
reg_results_class3 = np.nan
reg_results_class4 = np.nan
reg_results_class5 = np.nan
TI_MBEList_stability = np.nan
TI_DiffList_stability = np.nan
TI_DiffRefBinsList_stability = np.nan
TI_RMSEList_stability = np.nan
RepTI_MBEList_stability = np.nan
RepTI_DiffList_stability = np.nan
RepTI_DiffRefBinsList_stability = np.nan
RepTI_RMSEList_stability = np.nan
rep_TI_results_1mps_List_stability = np.nan
rep_TI_results_05mps_List_stability = np.nan
TIBinList_stability = np.nan
TIRefBinList_stability = np.nan
total_StatsList_stability = np.nan
belownominal_statsList_stability = np.nan
abovenominal_statsList_stability = np.nan
lm_adjList_stability = np.nan
adjustmentTagList_stability = np.nan
Distibution_statsList_stability = np.nan
sampleTestsLists_stability = np.nan
# Write 10 minute Adjusted data to a csv file
outpath_dir = os.path.dirname(results_filename)
outpath_file = os.path.basename(results_filename)
outpath_file = str('TI_10minuteAdjusted_' + outpath_file.split('.xlsx')[0] + '.csv')
out_dir = os.path.join(outpath_dir,outpath_file)
TI_10minuteAdjusted.to_csv(out_dir)
write_all_resultstofile(reg_results, baseResultsLists, count_1mps, count_05mps, count_1mps_train, count_05mps_train,
count_1mps_test, count_05mps_test, name_1mps_tke, name_1mps_alpha_Ane, name_1mps_alpha_RSD,
name_05mps_tke, name_05mps_alpha_Ane, name_05mps_alpha_RSD, count_05mps_tke, count_05mps_alpha_Ane, count_05mps_alpha_RSD,
count_1mps_tke, count_1mps_alpha_Ane, count_1mps_alpha_RSD,results_filename, siteMetadata, filterMetadata,
Timestamps,timestamp_train,timestamp_test,regimeBreakdown_tke, regimeBreakdown_ane, regimeBreakdown_rsd,
Ht_1_ane, Ht_2_ane, extrap_metadata, reg_results_class1, reg_results_class2, reg_results_class3,
reg_results_class4, reg_results_class5,reg_results_class1_alpha, reg_results_class2_alpha, reg_results_class3_alpha,
reg_results_class4_alpha, reg_results_class5_alpha, Ht_1_rsd, Ht_2_rsd, ResultsLists_stability, ResultsLists_stability_alpha_RSD,
ResultsLists_stability_alpha_Ane, stabilityFlag, cup_alphaFlag, RSD_alphaFlag, TimeTestA_baseline_df, TimeTestB_baseline_df,
TimeTestC_baseline_df,time_test_A_adjustment_df,time_test_B_adjustment_df,time_test_C_adjustment_df)
| 55.211258 | 372 | 0.607127 | 20,203 | 170,658 | 4.846854 | 0.054447 | 0.015523 | 0.033701 | 0.020711 | 0.733673 | 0.688953 | 0.636186 | 0.602935 | 0.569541 | 0.546042 | 0 | 0.015021 | 0.282993 | 170,658 | 3,090 | 373 | 55.229126 | 0.785205 | 0.12818 | 0 | 0.488698 | 0 | 0.000904 | 0.135432 | 0.010268 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015823 | false | 0.027577 | 0.026221 | 0.000452 | 0.057414 | 0.055154 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4557176f3d49e4358253305342218dd4750b9adc | 1,343 | py | Python | FileStorage/language/LanguagePack.py | Thiefxt/FileStorage | db2882b2ea861f4412cb453edef6439501b13705 | [
"MIT"
] | 1 | 2020-07-15T10:02:40.000Z | 2020-07-15T10:02:40.000Z | FileStorage/language/LanguagePack.py | Thiefxt/FileStorage | db2882b2ea861f4412cb453edef6439501b13705 | [
"MIT"
] | null | null | null | FileStorage/language/LanguagePack.py | Thiefxt/FileStorage | db2882b2ea861f4412cb453edef6439501b13705 | [
"MIT"
] | null | null | null | """
@Author : xiaotao
@Email : 18773993654@163.com
@Lost modifid : 2020/4/24 10:18
@Filename : LanguagePack.py
@Description :
@Software : PyCharm
"""
class RET:
"""
语言类包
"""
OK = "200"
DBERR = "501"
NODATA = "462"
DATAEXIST = "433"
DATAERR = "499"
REQERR = "521"
IPERR = "422"
THIRDERR = "431"
IOERR = "502"
SERVERERR = "500"
UNKNOWERR = "451"
USER_STATUS = "465"
# 元组中第一个为中文,第二个为英文,第三个为繁体
language_pack = {
RET.OK: ("成功",),
RET.DBERR: ("数据库查询错误",),
RET.NODATA: ("数据不存在",),
RET.DATAEXIST: ("数据已存在",),
RET.DATAERR: ("数据格式错误",),
RET.REQERR: ("非法请求或请求次数受限",),
RET.IPERR: ("IP受限",),
RET.THIRDERR: ("第三方系统错误",),
RET.IOERR: ("文件读写错误",),
RET.SERVERERR: ("内部错误",),
RET.UNKNOWERR: ("未知错误",),
RET.USER_STATUS: ("账号已被禁用,如有疑义请联系平台客服",),
}
class Language(object):
_lang ='zh_cn'
@classmethod
def init(cls, lang):
cls._lang = lang
@classmethod
def get(cls, value):
lang = language_pack.get(value)
if not lang:
return None
if cls._lang == 'zh_cn' and len(lang) > 0:
return lang[0]
elif cls._lang == 'en_US' and len(lang) > 1:
return lang[1]
elif cls._lang == 'zh_F' and len(lang) > 2:
return lang[2]
| 19.463768 | 52 | 0.531646 | 155 | 1,343 | 4.522581 | 0.567742 | 0.049929 | 0.042796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.071277 | 0.300074 | 1,343 | 68 | 53 | 19.75 | 0.674468 | 0.204021 | 0 | 0.046512 | 0 | 0 | 0.11943 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0 | 0 | 0.488372 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4558b0a8efff5ece908e59c6b9303248612636c0 | 2,427 | py | Python | validate_binary_tree/solution_2.py | nunezpaul/practice_problems | 22449c014046b702a4284bb66548f3a70c265622 | [
"MIT"
] | null | null | null | validate_binary_tree/solution_2.py | nunezpaul/practice_problems | 22449c014046b702a4284bb66548f3a70c265622 | [
"MIT"
] | null | null | null | validate_binary_tree/solution_2.py | nunezpaul/practice_problems | 22449c014046b702a4284bb66548f3a70c265622 | [
"MIT"
] | null | null | null | class TreeNode(object):
def __str__(self):
left = self.left.val if self.left else 'N'
right = self.right.val if self.right else 'N'
return "{left} {val} {right}".format(val=self.val, left=left, right=right)
def __init__(self, val):
self.val = val
self.right = None
self.left = None
def is_valid_tree(root):
flattened_tree = []
_traverse_tree(root, flattened_tree)
return _is_in_order(flattened_tree)
def _traverse_tree(root, flattened_tree):
if root:
_traverse_tree(root.left, flattened_tree)
flattened_tree.append(root.val)
_traverse_tree(root.right, flattened_tree)
return
def _is_in_order(flattened_tree):
for idx, num in enumerate(flattened_tree):
if idx == 0:
prev_num = num
continue
if prev_num >= num:
return False
prev_num = num
return True
def construct_bad_tree():
root = TreeNode(4)
root.left = TreeNode(3)
root.left.left = TreeNode(1)
root.left.right = TreeNode(2)
root.right = TreeNode(10)
root.right.left = TreeNode(6)
root.right.right = TreeNode(12)
root.right.left.left = TreeNode(5)
root.right.left.right = TreeNode(8)
root.right.right.right = TreeNode(14)
root.right.right.left = TreeNode(11)
return root
def construct_good_tree():
root = TreeNode(4)
root.left = TreeNode(3)
root.left.left = TreeNode(1)
# root.left.right = TreeNode(2)
root.right = TreeNode(10)
root.right.left = TreeNode(6)
root.right.right = TreeNode(12)
root.right.left.left = TreeNode(5)
root.right.left.right = TreeNode(8)
root.right.right.right = TreeNode(14)
root.right.right.left = TreeNode(11)
return root
def construct_bad_tree2():
root = TreeNode(5)
root.left = TreeNode(1)
root.right = TreeNode(4)
root.right.left = TreeNode(3)
root.right.right = TreeNode(6)
return root
def construct_good_tree2():
root = TreeNode(2)
root.left = TreeNode(1)
root.right = TreeNode(3)
if __name__ == '__main__':
bad_root = construct_bad_tree()
assert not is_valid_tree(bad_root)
good_root = construct_good_tree()
assert is_valid_tree(good_root)
bad_root2 = construct_bad_tree2()
assert not is_valid_tree(bad_root2)
good_root2 = construct_good_tree2()
assert is_valid_tree(good_root2) | 22.682243 | 82 | 0.651834 | 337 | 2,427 | 4.468843 | 0.160237 | 0.113546 | 0.060425 | 0.045153 | 0.555113 | 0.439575 | 0.409031 | 0.363878 | 0.363878 | 0.363878 | 0 | 0.025405 | 0.237742 | 2,427 | 107 | 83 | 22.682243 | 0.788649 | 0.011949 | 0 | 0.369863 | 0 | 0 | 0.012516 | 0 | 0 | 0 | 0 | 0 | 0.054795 | 1 | 0.123288 | false | 0 | 0 | 0 | 0.246575 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
455cfcd1a4533c63cce20863058bc3230fd98394 | 2,076 | py | Python | aoc_cqkh42/year_2020/day_12.py | cqkh42/advent-of-code | bcf31cf8973a5b6d67492c412dce10df742e04d1 | [
"MIT"
] | null | null | null | aoc_cqkh42/year_2020/day_12.py | cqkh42/advent-of-code | bcf31cf8973a5b6d67492c412dce10df742e04d1 | [
"MIT"
] | null | null | null | aoc_cqkh42/year_2020/day_12.py | cqkh42/advent-of-code | bcf31cf8973a5b6d67492c412dce10df742e04d1 | [
"MIT"
] | null | null | null | """
Solutions for day 12 of 2020's Advent of Code
"""
from typing import Tuple
def _rotate_right(n, e) -> Tuple[int, int]:
return -e, n
def _rotate_left(n, e) -> Tuple[int, int]:
return e, -n
def part_a(data) -> int:
"""
Solution for part a
Parameters
----------
data: str
Returns
-------
answer: int
"""
directions = ['N', 'E', 'S', 'W']
direction = 'E'
n = 0
e = 0
for instruction in data.split('\n'):
action = instruction[0]
number = int(instruction[1:])
if action == 'F':
action = direction
if action == 'N':
n += number
elif action == 'S':
n -= number
elif action == 'E':
e += number
elif action == 'W':
e -= number
elif action == 'R':
turns = number / 90
new_index = (directions.index(direction) + turns) % 4
direction = directions[int(new_index)]
elif action == 'L':
turns = number / 90
new_index = (directions.index(direction) - turns) % 4
direction = directions[int(new_index)]
return abs(n) + abs(e)
def part_b(data, **_) -> int:
"""
Solution for part b
Parameters
----------
data: str
Returns
-------
answer: int
"""
w_n = 1
w_e = 10
n = 0
e = 0
for instruction in data.split('\n'):
action = instruction[0]
number = int(instruction[1:])
if action == 'F':
n += (w_n*number)
e += (w_e*number)
if action == 'N':
w_n += number
elif action == 'S':
w_n -= number
elif action == 'E':
w_e += number
elif action == 'W':
w_e -= number
elif action == 'R':
for _ in range(number // 90):
w_n, w_e = _rotate_right(w_n, w_e)
elif action == 'L':
for _ in range(number // 90):
w_n, w_e = _rotate_left(w_n, w_e)
return abs(n) + abs(e)
| 21.625 | 65 | 0.461464 | 255 | 2,076 | 3.627451 | 0.207843 | 0.108108 | 0.138378 | 0.073514 | 0.765405 | 0.568649 | 0.456216 | 0.456216 | 0.456216 | 0.404324 | 0 | 0.021395 | 0.3921 | 2,076 | 95 | 66 | 21.852632 | 0.711569 | 0.100674 | 0 | 0.551724 | 0 | 0 | 0.012936 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.017241 | 0.034483 | 0.155172 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
455db22f99db07331fddab80325507e7476dc805 | 3,786 | py | Python | src/main/resources/python/stack.py | VAlgoLang/ManimDSLCompiler | 87020d135fa7360aaeccf7e2b9a453f6ffb0fb33 | [
"BSD-3-Clause"
] | 19 | 2020-11-05T13:55:45.000Z | 2021-01-08T13:19:40.000Z | src/main/resources/python/stack.py | VAlgoLang/ManimDSLCompiler | 87020d135fa7360aaeccf7e2b9a453f6ffb0fb33 | [
"BSD-3-Clause"
] | 29 | 2020-10-13T10:29:21.000Z | 2021-01-10T18:34:06.000Z | src/main/resources/python/stack.py | VAlgoLang/ManimDSLCompiler | 87020d135fa7360aaeccf7e2b9a453f6ffb0fb33 | [
"BSD-3-Clause"
] | 6 | 2021-03-20T07:04:11.000Z | 2022-03-22T02:39:03.000Z | class Stack(DataStructure, ABC):
def __init__(self, ul, ur, ll, lr, aligned_edge, color=WHITE, text_color=WHITE, text_weight=NORMAL,
font="Times New Roman"):
super().__init__(ul, ur, ll, lr, aligned_edge, color, text_color, text_weight, font)
self.empty = None
def create_init(self, text=None, creation_style=None):
if not creation_style:
creation_style = "ShowCreation"
empty = InitStructure(text, 0, self.max_width - 2 * MED_SMALL_BUFF, color=self.color,
text_color=self.text_color)
self.empty = empty.all
empty.all.move_to(np.array([self.width_center, self.lr[1], 0]), aligned_edge=self.aligned_edge)
self.all.add(empty.all)
creation_transform = globals()[creation_style]
return [creation_transform(empty.text), ShowCreation(empty.shape)]
def push(self, obj, creation_style=None):
if not creation_style:
creation_style = "FadeIn"
animations = []
obj.all.move_to(np.array([self.width_center, self.ul[1] - 0.1, 0]), UP)
shrink, scale_factor = self.shrink_if_cross_boundary(obj.all)
if shrink:
animations.append([shrink])
target_width = self.all.get_width() * (scale_factor if scale_factor else 1)
obj.all.scale(target_width / obj.all.get_width())
creation_transform = globals()[creation_style]
animations.append([creation_transform(obj.all)])
animations.append([ApplyMethod(obj.all.next_to, self.all, np.array([0, 0.25, 0]))])
return animations
def pop(self, obj, fade_out=True):
self.all.remove(obj.all)
animation = [[ApplyMethod(obj.all.move_to, np.array([self.width_center, self.ul[1] - 0.1, 0]), UP)]]
if fade_out:
animation.append([FadeOut(obj.all)])
enlarge, scale_factor = self.shrink(new_width=self.all.get_width(), new_height=self.all.get_height() + 0.25)
if enlarge:
animation.append([enlarge])
return animation
def shrink_if_cross_boundary(self, new_obj):
height = new_obj.get_height()
if self.will_cross_boundary(height, "TOP"):
return self.shrink(new_width=self.all.get_width(), new_height=self.all.get_height() + height + 0.4)
return 0, 1
def push_existing(self, obj):
animation = [[ApplyMethod(obj.all.move_to, np.array([self.width_center, self.ul[1] - 0.1, 0]), UP)]]
enlarge, scale_factor = obj.owner.shrink(new_width=obj.owner.all.get_width(),
new_height=obj.owner.all.get_height() + 0.25)
sim_list = list()
if enlarge:
sim_list.append(enlarge)
scale_factor = self.all.get_width() / obj.all.get_width()
if scale_factor != 1:
sim_list.append(ApplyMethod(obj.all.scale, scale_factor, {"about_edge": UP}))
if len(sim_list) != 0:
animation.append(sim_list)
animation.append([ApplyMethod(obj.all.next_to, self.all, np.array([0, 0.25, 0]))])
return animation
def clean_up(self):
return [FadeOut(self.all)]
# Object representing a stack instantiation.
class InitStructure:
def __init__(self, text, angle, length=1.5, color=WHITE, text_color=WHITE, text_weight=NORMAL,
font="Times New Roman"):
self.shape = Line(color=color)
self.shape.set_length(length)
self.shape.set_angle(angle)
if text is not None:
self.text = Text(text, color=text_color, weight=text_weight, font=font)
self.text.next_to(self.shape, DOWN, SMALL_BUFF)
self.all = VGroup(self.text, self.shape)
else:
self.all = VGroup(self.shape)
| 46.740741 | 120 | 0.626519 | 513 | 3,786 | 4.423002 | 0.192982 | 0.040106 | 0.033936 | 0.019392 | 0.379022 | 0.304099 | 0.304099 | 0.285148 | 0.285148 | 0.227413 | 0 | 0.014376 | 0.246698 | 3,786 | 80 | 121 | 47.325 | 0.781206 | 0.011094 | 0 | 0.169014 | 0 | 0 | 0.016301 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112676 | false | 0 | 0 | 0.014085 | 0.239437 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
455ecf77b284ae23c05aaa297b5a0ed55da0477f | 1,519 | py | Python | notebooks_for_development/phase_fold_bh_peg.py | mwanakijiji/rrlfe2 | 0637b348b8d3e54ff34c56caa8b4c6fdac1c699e | [
"MIT"
] | null | null | null | notebooks_for_development/phase_fold_bh_peg.py | mwanakijiji/rrlfe2 | 0637b348b8d3e54ff34c56caa8b4c6fdac1c699e | [
"MIT"
] | null | null | null | notebooks_for_development/phase_fold_bh_peg.py | mwanakijiji/rrlfe2 | 0637b348b8d3e54ff34c56caa8b4c6fdac1c699e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# Reads in photometry from different sources, normalizes them, and puts them
# onto a BJD time scale
# Created 2021 Dec. 28 by E.S.
import numpy as np
import pandas as pd
from astropy.time import Time
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
file_name_photometry_input = "./all_photometry_program_stars/polished/bh_peg_aavso_polished_ingest.txt"
period_input = 0.640993 # Monson 2017 period of BH Peg
# read in photometry
df_test2 = pd.read_csv(file_name_photometry_input,
names=["jd","mag","error"], delim_whitespace=True)
# phase-folded data
df_test2["epoch_start_zero"] = np.subtract(df_test2["jd"],np.min(df_test2["jd"]))
df_test2["baseline_div_period"] = np.divide(df_test2["epoch_start_zero"],period_input)
df_phase_folded = pd.DataFrame(data = [t%1. for t in df_test2["baseline_div_period"]], columns=["phase"])
df_phase_folded["mag"] = df_test2["mag"]
# find where maximum is, and set the phase there to be zero
idx_max = df_phase_folded["mag"] == np.min(df_phase_folded["mag"])
df_phase_folded["phase"] = np.mod(np.subtract(df_phase_folded["phase"],df_phase_folded["phase"].loc[idx_max].values),1.)
# quick plot
plt.clf()
plt.scatter(df_phase_folded["phase"], df_phase_folded["mag"], s=2)
plt.title("Phase-folded curve using ")
plt.gca().invert_yaxis()
plt.show()
# write out
file_name_out = "./data/phase_folded_curves/junk.csv"
df_phase_folded.to_csv(file_name_out)
print("Wrote ", file_name_out)
| 33.021739 | 120 | 0.75181 | 251 | 1,519 | 4.290837 | 0.474104 | 0.132776 | 0.120706 | 0.059424 | 0.181987 | 0.057567 | 0.057567 | 0 | 0 | 0 | 0 | 0.021545 | 0.113891 | 1,519 | 45 | 121 | 33.755556 | 0.778603 | 0.20079 | 0 | 0 | 0 | 0 | 0.217789 | 0.088944 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.217391 | 0 | 0.217391 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
456020c6f80bf546826faf9d3db7db5ef4399eed | 4,012 | py | Python | history/predict.py | Snipa22/pytrader | 5a730435332a159e68ba13ec01b4b7bfa380ec82 | [
"MIT"
] | 3 | 2016-04-11T13:51:07.000Z | 2022-03-10T15:42:24.000Z | history/predict.py | Snipa22/pytrader | 5a730435332a159e68ba13ec01b4b7bfa380ec82 | [
"MIT"
] | null | null | null | history/predict.py | Snipa22/pytrader | 5a730435332a159e68ba13ec01b4b7bfa380ec82 | [
"MIT"
] | null | null | null | from history.tools import normalization, filter_by_mins, create_sample_row
from history.models import Price, PredictionTest
import time
from history.tools import print_and_log
def predict_v2(ticker,hidden_layers=15,NUM_MINUTES_BACK=1000,NUM_EPOCHS=1000,granularity_minutes=15,datasetinputs=5,learningrate=0.005,bias=False,momentum=0.1,weightdecay=0.0,recurrent=False,timedelta_back_in_granularity_increments=0):
#setup
print_and_log( "(p)starting ticker:{} hidden:{} min:{} epoch:{} gran:{} dsinputs:{} learningrate:{} bias:{} momentum:{} weightdecay:{} recurrent:{}, timedelta_back_in_granularity_increments:{} ".format(ticker,hidden_layers,NUM_MINUTES_BACK,NUM_EPOCHS,granularity_minutes,datasetinputs,learningrate,bias,momentum,weightdecay,recurrent,timedelta_back_in_granularity_increments))
pt = PredictionTest()
pt.type = 'mock'
pt.symbol = ticker
pt.datasetinputs = datasetinputs
pt.hiddenneurons = hidden_layers
pt.minutes_back = NUM_MINUTES_BACK
pt.epochs = NUM_EPOCHS
pt.momentum = momentum
pt.granularity = granularity_minutes
pt.bias = bias
pt.bias_chart = -1 if pt.bias is None else ( 1 if pt.bias else 0 )
pt.learningrate = learningrate
pt.weightdecay = weightdecay
pt.recurrent = recurrent
pt.recurrent_chart = -1 if pt.recurrent is None else ( 1 if pt.recurrent else 0 )
pt.timedelta_back_in_granularity_increments = timedelta_back_in_granularity_increments
all_output = ""
start_time = int(time.time())
#get neural network & data
nn = pt.get_nn()
sample_data, test_data = pt.get_train_and_test_data()
#output / testing
round_to = 2
num_times_directionally_correct = 0
num_times = 0
diffs = []
profitloss_pct = []
for i,val in enumerate(test_data):
try:
# get NN projection
sample = create_sample_row(test_data,i,datasetinputs)
recommend, nn_price, last_sample, projected_change_pct = pt.predict(sample)
## calculate profitability
actual_price = test_data[i+datasetinputs]
diff = nn_price - actual_price
diff_pct = 100 * diff / actual_price
directionally_correct = ( (actual_price - last_sample) > 0 and (nn_price - last_sample) > 0 ) or ( (actual_price - last_sample) < 0 and (nn_price - last_sample) < 0 )
if recommend != 'HOLD':
profitloss_pct = profitloss_pct + [abs( (actual_price - last_sample) / last_sample ) * ( 1 if directionally_correct else -1 )]
if directionally_correct:
num_times_directionally_correct = num_times_directionally_correct + 1
num_times = num_times + 1
diffs.append(diff)
output = "{}) seq ending in {} => {} (act {}, {}/{} pct off); Recommend: {}; Was Directionally Correct:{}".format(i,round(actual_price,round_to),round(nn_price,round_to),round(actual_price,round_to),round(diff,round_to),round(diff_pct,1),recommend,directionally_correct)
all_output = all_output + "\n" + output
except Exception as e:
if "list index out of range" not in str(e):
print_and_log("(p)"+str(e))
pass;
avg_diff = sum([abs(diff[0]) for diff in diffs]) / num_times
pct_correct = 100 * num_times_directionally_correct / num_times
modeled_profit_loss = sum(profitloss_pct) / len(profitloss_pct)
output = 'directionally correct {} of {} times. {}%. avg diff={}, profit={}'.format(num_times_directionally_correct,num_times,round(pct_correct,0),round(avg_diff,4),round(modeled_profit_loss,3))
print_and_log("(p)"+output)
all_output = all_output + "\n" + output
end_time = int(time.time())
pt.time = end_time - start_time
pt.prediction_size = len(diffs)
pt.output = all_output
pt.percent_correct = pct_correct
pt.avg_diff = avg_diff
pt.profitloss = modeled_profit_loss
pt.profitloss_int = int(pt.profitloss * 100)
pt.save()
return pt.pk
| 48.337349 | 380 | 0.692423 | 532 | 4,012 | 4.943609 | 0.255639 | 0.03346 | 0.034221 | 0.04943 | 0.242586 | 0.201521 | 0.093536 | 0.093536 | 0.093536 | 0.093536 | 0 | 0.016886 | 0.202891 | 4,012 | 82 | 381 | 48.926829 | 0.805503 | 0.021934 | 0 | 0.029851 | 0 | 0.029851 | 0.096988 | 0.010975 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014925 | false | 0.014925 | 0.059701 | 0 | 0.089552 | 0.059701 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
456368c28972d08c5777b27f3941bd69bf9f4b4c | 2,786 | py | Python | rl_teacher/selector.py | oguzserbetci/rl-teacher-atari | fd6c399921d347333d7c5b4b12c63f1a955cea5c | [
"MIT"
] | null | null | null | rl_teacher/selector.py | oguzserbetci/rl-teacher-atari | fd6c399921d347333d7c5b4b12c63f1a955cea5c | [
"MIT"
] | 5 | 2018-10-15T11:52:05.000Z | 2018-10-30T12:58:53.000Z | rl_teacher/selector.py | oguzserbetci/rl-teacher-atari | fd6c399921d347333d7c5b4b12c63f1a955cea5c | [
"MIT"
] | null | null | null | from rl_teacher.segment_sampling import segments_from_rand_rollout, sample_segment_from_path, basic_segment_from_null_action
import numpy as np
class Selector(object):
def __init__(self):
print("Selector initialized")
def select(self, segments):
print("Selector.select()")
return segments[:2], 0
class MinMaxSelector(object):
def __init__(self):
print("Selector initialized")
def select(self, segments):
print("Selector.select()")
sort = np.argsort([sum(segment['rewards']) for segment in segments])
return [segments[sort[0]], segments[sort[-1]]], 0
class VarianceSelector(object):
def __init__(self):
print("Selector initialized")
def select(self, segments):
print("Selector.select()")
variances = [segment['variance'] for segment in segments]
sort = np.argsort(variances)
return [segments[sort[-1]]], 0
class ClipSelector(object):
""" Wraps a reward model's path_callback to sample, select and record segments for human to annotate. """
def __init__(self, model, env_id, make_env, save_dir, paths_per_selection=500):
self.model = model
self.selector = VarianceSelector()
self.env_id = env_id
self.make_env = make_env
self.save_dir = save_dir
self.paths_per_wait = 1
self.clip_length = 90
self.stacked_frames = 4
self.workers = 4
self.paths_per_selection = paths_per_selection
self._num_paths_seen = 0 # Internal counter of how many paths we've seen
self.collected_paths = []
def path_callback(self, path):
# Video recording to elicit human feedback every x steps.
if (self._num_paths_seen % self.paths_per_wait <= self.paths_per_selection) and (self.clip_manager.total_number_of_clips < self.label_schedule.n_desired_labels):
if (len(self.collected_paths) < self.paths_per_selection):
self.collected_paths.append(path)
elif (len(self.collected_paths) == self.paths_per_selection):
selected_paths, selection_time = self.selector.select(self.collected_paths)
for selected_path in selected_paths:
segment = sample_segment_from_path(selected_path, int(self.model._frames_per_segment))
if segment:
self.model.clip_manager.add(segment, source="on-policy callback")
self.model.clip_manager.sort_clips(wait_until_database_fully_sorted=True)
self.collected_paths = []
print("clips sorted.")
self._num_paths_seen += 1
self.model.path_callback(path)
def predict_reward(self, path):
return self.model.predict_reward(path)
| 37.648649 | 169 | 0.66224 | 348 | 2,786 | 5.014368 | 0.321839 | 0.036676 | 0.058453 | 0.048138 | 0.209169 | 0.187393 | 0.187393 | 0.187393 | 0.139255 | 0.139255 | 0 | 0.008057 | 0.242642 | 2,786 | 73 | 170 | 38.164384 | 0.818957 | 0.072146 | 0 | 0.259259 | 0 | 0 | 0.060924 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.037037 | 0.018519 | 0.351852 | 0.12963 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45636a74d98ccc7ab3ff22be9c83602f958559c0 | 9,474 | py | Python | pynet/models/vae/vunet.py | CorentinAmbroise/pynet | c353e5f80e75f785a460422ab7b39fa8f776991a | [
"CECILL-B"
] | null | null | null | pynet/models/vae/vunet.py | CorentinAmbroise/pynet | c353e5f80e75f785a460422ab7b39fa8f776991a | [
"CECILL-B"
] | null | null | null | pynet/models/vae/vunet.py | CorentinAmbroise/pynet | c353e5f80e75f785a460422ab7b39fa8f776991a | [
"CECILL-B"
] | null | null | null | # -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
The Variational U-Net auto-encoder.
"""
# Imports
import logging
import collections
import torch
import torch.nn as nn
import torch.nn.functional as func
from pynet.interfaces import DeepLearningDecorator
from pynet.utils import Networks
import numpy as np
from .base import BaseVAE
from ..unet import Down, Up, Conv1x1x1
# Global parameters
logger = logging.getLogger("pynet")
@Networks.register
@DeepLearningDecorator(family=("encoder", "vae"))
class VUNet(BaseVAE):
""" VUNet.
The Variational U-Net is a convolutional encoder-decoder neural network.
The convolutional encoding/decoding parts are the same as the UNet.
The model is composed of two sub-networks:
1. Given x (image), encode it into a distribution over the latent space -
referred to as Q(z|x).
2. Given z in latent space (code representation of an image), decode it
into the image it represents - referred to as f(z).
"""
def __init__(self, latent_dim, in_channels=1, depth=5,
start_filts=64, up_mode="transpose",
batchnorm=True, dim="3d", input_shape=None,
num_classes=None):
""" Init class.
Parameters
----------
latent_dim: int
the latent dimension.
in_channels: int, default 1
number of channels in the input tensor.
depth: int, default 5
number of layers in the U-Net.
start_filts: int, default 64
number of convolutional filters for the first conv.
up_mode: string, default 'transpose'
type of upconvolution. Choices: 'transpose' for transpose
convolution, 'upsample' for nearest neighbour upsampling.
batchnorm: bool, default False
normalize the inputs of the activation function.
dim: str, default '3d'
'3d' or '2d' input data.
input_shape: uplet
the tensor data shape (X, Y, Z) used during upsample (by default
use a scale factor of 2).
num_classes: int, default None
the number of classes for the conditioning.
"""
# Inheritance
nn.Module.__init__(self)
# Check inputs
if dim in ("2d", "3d"):
self.dim = dim
else:
raise ValueError(
"'{}' is not a valid mode for merging up and down paths. Only "
"'3d' and '2d' are allowed.".format(dim))
if up_mode in ("transpose", "upsample"):
self.up_mode = up_mode
else:
raise ValueError(
"'{}' is not a valid mode for upsampling. Only 'transpose' "
"and 'upsample' are allowed.".format(up_mode))
# Declare class parameters
self.latent_dim = latent_dim
self.num_classes = num_classes
self.in_channels = in_channels
self.start_filts = start_filts
self.depth = depth
self.down = []
self.up = []
self.shapes = None
if input_shape is not None:
self.shapes = self.downsample_shape(
input_shape, nb_iterations=(depth - 1))
self.shapes = self.shapes[::-1]
# Create the encoder pathway
self.hidden_dims = []
for cnt in range(depth):
in_channels = self.in_channels if cnt == 0 else out_channels
out_channels = self.start_filts * (2**cnt)
self.hidden_dims.append(out_channels)
pooling = False if cnt == 0 else True
self.down.append(
Down(in_channels, out_channels, self.dim, pooling=pooling,
batchnorm=batchnorm))
# Create the decoder pathway
# - careful! decoding only requires depth-1 blocks
for cnt in range(depth - 1):
in_channels = out_channels
out_channels = in_channels // 2
shape = None
if self.shapes is not None:
shape = self.shapes[cnt + 1]
self.up.append(
Up(in_channels, out_channels, up_mode=up_mode, dim=self.dim,
merge_mode="none", batchnorm=batchnorm, shape=shape))
# Add the list of modules to current module
self.down = nn.Sequential(*self.down)
hidden_dim = self.hidden_dims[-1] * np.prod(self.shapes[0])
self.mu = nn.Linear(hidden_dim, latent_dim)
self.var = nn.Linear(hidden_dim, latent_dim)
self.latent_to_hidden = nn.Linear(latent_dim, hidden_dim)
self.up = nn.Sequential(*self.up)
self.conv_final = Conv1x1x1(out_channels, self.in_channels, self.dim)
self.logit = nn.Tanh()
# Kernel initializer
self.kernel_initializer()
def encode(self, x):
""" Encodes the input by passing through the encoder network
and returns the latent codes.
Parameters
----------
x: Tensor, (N, C, X, Y, Z)
input tensor to encode.
Returns
-------
mu: Tensor (N, D)
mean of the latent Gaussian.
logvar: Tensor (N, D)
standard deviation of the latent Gaussian.
"""
logger.debug("Encode...")
self.debug("input", x)
x = self.down(x)
self.debug("down", x)
x = torch.flatten(x, start_dim=1)
self.debug("flatten", x)
# Split x into mu and var components of the latent Gaussian
# distribution
z_mu = self.mu(x)
z_logvar = self.var(x)
self.debug("z_mu", z_mu)
self.debug("z_logvar", z_logvar)
return z_mu, z_logvar
def decode(self, x_sample):
""" Maps the given latent codes onto the image space.
Parameters
----------
x_sample: Tensor (N, D)
sample from the distribution having latent parameters mu, var.
Returns
-------
x: Tensor, (N, C, X, Y, Z)
the prediction.
"""
logger.debug("Decode...")
self.debug("x sample", x_sample)
x = self.latent_to_hidden(x_sample)
self.debug("hidden", x)
x = x.view(-1, self.hidden_dims[-1], *self.shapes[0])
self.debug("view", x)
x = self.up(x)
self.debug("up", x)
x = self.conv_final(x)
self.debug("final", x)
return self.logit(x)
def reparameterize(self, z_mu, z_logvar):
""" Reparameterization trick to sample from N(mu, var) from
N(0,1).
Parameters
----------
mu: Tensor (N, D)
mean of the latent Gaussian.
logvar: Tensor (N, D)
standard deviation of the latent Gaussian.
Returns
-------
x_sample: Tensor (N, D)
sample from the distribution having latent parameters mu, var.
"""
logger.debug("Reparameterize...")
self.debug("z_mu", z_mu)
self.debug("z_logvar", z_logvar)
std = torch.exp(0.5 * z_logvar)
eps = torch.randn_like(std)
x_sample = eps.mul(std).add_(z_mu)
self.debug("x sample", x_sample)
return x_sample
def forward(self, x):
logger.debug("VUnet...")
z_mu, z_logvar = self.encode(x)
x_sample = self.reparameterize(z_mu, z_logvar)
predicted = self.decode(x_sample)
return predicted, {"z_mu": z_mu, "z_logvar": z_logvar}
class DecodeLoss(object):
""" VAE consists of two loss functions:
1. Reconstruction loss: how well we can reconstruct the image
2. KL divergence loss: how off the distribution over the latent space is
from the prior. Given the prior is a standard Gaussian and the inferred
distribution is a Gaussian with a diagonal covariance matrix,
the KL-divergence becomes analytically solvable.
loss = REC_loss + k1 * KL_loss.
"""
def __init__(self, k1=1, rec_loss="mse", nodecoding=False):
super(DecodeLoss, self).__init__()
if rec_loss not in ("mse", "bce"):
raise ValueError("Requested loss not yet supported.")
self.layer_outputs = None
self.k1 = k1
self.rec_loss = rec_loss
self.nodecoding = nodecoding
def __call__(self, x_sample, x):
if self.nodecoding:
return -1
if self.layer_outputs is None:
raise ValueError("The model needs to return the latent space "
"distribution parameters z_mu, z_logvar.")
z_mu = self.layer_outputs["z_mu"]
z_logvar = self.layer_outputs["z_logvar"]
if self.rec_loss == "bce":
recon_loss = func.binary_cross_entropy(
x_sample, x, reduction="sum")
else:
recon_loss = func.mse_loss(
x_sample, x, reduction="mean")
# kld_loss = 0.5 * torch.sum(
# torch.exp(z_logvar) + z_mu**2 - 1.0 - z_logvar)
kld_loss = torch.mean(-0.5 * torch.sum(
1 + z_logvar - z_mu ** 2 - z_logvar.exp(), dim=-1), dim=0)
return recon_loss + self.k1 * kld_loss
| 34.830882 | 79 | 0.576947 | 1,212 | 9,474 | 4.378713 | 0.237624 | 0.025061 | 0.007537 | 0.01319 | 0.140946 | 0.10929 | 0.100622 | 0.084794 | 0.084794 | 0.07085 | 0 | 0.010666 | 0.307262 | 9,474 | 271 | 80 | 34.95941 | 0.797958 | 0.329533 | 0 | 0.081481 | 0 | 0 | 0.088324 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051852 | false | 0 | 0.074074 | 0 | 0.185185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
456499cb6f4f56978c9d9e33ec8c47f7702e105f | 1,161 | py | Python | api/subparts/alpha/alpha.py | schana/swagger-based-api | 964cd549e73a81a0a72037cb8f75271708d061db | [
"Apache-2.0"
] | null | null | null | api/subparts/alpha/alpha.py | schana/swagger-based-api | 964cd549e73a81a0a72037cb8f75271708d061db | [
"Apache-2.0"
] | null | null | null | api/subparts/alpha/alpha.py | schana/swagger-based-api | 964cd549e73a81a0a72037cb8f75271708d061db | [
"Apache-2.0"
] | null | null | null | import collections
import flask_restplus
from flask_restplus import fields
from flask_restplus import reqparse
from api import util
api = util.build_api('alpha', __name__, url_prefix='/subparts/alpha')
v1 = util.build_namespace(api, 'v1', description='Version 1')
AlphaSpec = collections.namedtuple('Alpha', ['x_and_y', 'z'])
alpha_model = v1.model(AlphaSpec.__name__, AlphaSpec(
x_and_y=fields.Integer(description='x plus y', required=True),
z=fields.String(description='z-e-d', required=True)
)._asdict())
alpha_params = reqparse.RequestParser()
alpha_params.add_argument('x', type=int, required=True)
alpha_params.add_argument('y', type=int, required=False, default=0)
alpha_params.add_argument('z', type=str, required=True)
@v1.route('/alphas')
class Alpha(flask_restplus.Resource):
@v1.expect(alpha_params)
@v1.marshal_with(alpha_model)
@v1.doc(description='A super-helpful description as to what is going on',
params={'x': 'The best x of them all'})
def get(self):
args = alpha_params.parse_args()
return AlphaSpec(x_and_y=args['x'] + args['y'],
z=args['z'])._asdict()
| 32.25 | 77 | 0.707149 | 166 | 1,161 | 4.728916 | 0.439759 | 0.084076 | 0.019108 | 0.084076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009109 | 0.149009 | 1,161 | 35 | 78 | 33.171429 | 0.785425 | 0 | 0 | 0 | 0 | 0 | 0.12317 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.192308 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
456b9d36efdf097837bfdedc583dac092dcbd61c | 4,927 | py | Python | EDGAR/Generic_Parser.py | laurakchen/Intended-Use-Of_Proceeds | 4d958fbeddb3eb20b6a3ab1166ad918673408ddc | [
"Apache-2.0"
] | null | null | null | EDGAR/Generic_Parser.py | laurakchen/Intended-Use-Of_Proceeds | 4d958fbeddb3eb20b6a3ab1166ad918673408ddc | [
"Apache-2.0"
] | null | null | null | EDGAR/Generic_Parser.py | laurakchen/Intended-Use-Of_Proceeds | 4d958fbeddb3eb20b6a3ab1166ad918673408ddc | [
"Apache-2.0"
] | 4 | 2021-01-10T02:22:24.000Z | 2021-01-29T07:01:16.000Z | """
Program to provide generic parsing for all files in user-specified directory.
The program assumes the input files have been scrubbed,
i.e., HTML, ASCII-encoded binary, and any other embedded document structures that are not
intended to be analyzed have been deleted from the file.
Dependencies:
Python: Load_MasterDictionary.py
Data: LoughranMcDonald_MasterDictionary_XXXX.csv
The program outputs:
1. File name
2. File size (in bytes)
3. Number of words (based on LM_MasterDictionary
4. Proportion of positive words (use with care - see LM, JAR 2016)
5. Proportion of negative words
6. Proportion of uncertainty words
7. Proportion of litigious words
8. Proportion of modal-weak words
9. Proportion of modal-moderate words
10. Proportion of modal-strong words
11. Proportion of constraining words (see Bodnaruk, Loughran and McDonald, JFQA 2015)
12. Number of alphanumeric characters (a-z, A-Z)
13. Number of digits (0-9)
14. Number of numbers (collections of digits)
15. Average number of syllables
16. Average word length
17. Vocabulary (see Loughran-McDonald, JF, 2015)
ND-SRAF
McDonald 2016/06 : updated 2018/03
"""
import csv
import glob
import re
import string
import sys
import time
sys.path.append('/Users/laurachen/Desktop/FinProject/')
# sys.path.append('D:\GD\Python\TextualAnalysis\Modules') # Modify to identify path for custom modules
import Load_MasterDictionary as LM
# User defined directory for files to be parsed
TARGET_FILES = r'/Users/laurachen/Desktop/FinProject/fbCleaned.txt'
# User defined file pointer to LM dictionary
MASTER_DICTIONARY_FILE = r'/Users/laurachen/Desktop/FinProject/' + \
'LoughranMcDonald_MasterDictionary_2018.csv'
# User defined output file
OUTPUT_FILE = r'/Users/laurachen/Desktop/FinProject//Parser.csv'
# Setup output
OUTPUT_FIELDS = ['file name,', 'file size,', 'number of words,', '% positive,', '% negative,',
'% uncertainty,', '% litigious,', '% modal-weak,', '% modal moderate,',
'% modal strong,', '% constraining,', '# of alphabetic,', '# of digits,',
'# of numbers,', 'avg # of syllables per word,', 'average word length,', 'vocabulary']
lm_dictionary = LM.load_masterdictionary(MASTER_DICTIONARY_FILE, True)
def main():
f_out = open(OUTPUT_FILE, 'w')
wr = csv.writer(f_out, lineterminator='\n')
wr.writerow(OUTPUT_FIELDS)
file_list = glob.glob(TARGET_FILES)
for file in file_list:
print(file)
with open(file, 'r', encoding='UTF-8', errors='ignore') as f_in:
doc = f_in.read()
doc_len = len(doc)
doc = re.sub('(May|MAY)', ' ', doc) # drop all May month references
doc = doc.upper() # for this parse caps aren't informative so shift
output_data = get_data(doc)
output_data[0] = file
output_data[1] = doc_len
wr.writerow(output_data)
def get_data(doc):
vdictionary = {}
_odata = [0] * 17
total_syllables = 0
word_length = 0
tokens = re.findall('\w+', doc) # Note that \w+ splits hyphenated words
for token in tokens:
if not token.isdigit() and len(token) > 1 and token in lm_dictionary:
_odata[2] += 1 # word count
word_length += len(token)
if token not in vdictionary:
vdictionary[token] = 1
if lm_dictionary[token].positive: _odata[3] += 1
if lm_dictionary[token].negative: _odata[4] += 1
if lm_dictionary[token].uncertainty: _odata[5] += 1
if lm_dictionary[token].litigious: _odata[6] += 1
if lm_dictionary[token].weak_modal: _odata[7] += 1
if lm_dictionary[token].moderate_modal: _odata[8] += 1
if lm_dictionary[token].strong_modal: _odata[9] += 1
if lm_dictionary[token].constraining: _odata[10] += 1
total_syllables += lm_dictionary[token].syllables
_odata[11] = len(re.findall('[A-Z]', doc))
_odata[12] = len(re.findall('[0-9]', doc))
# drop punctuation within numbers for number count
doc = re.sub('(?!=[0-9])(\.|,)(?=[0-9])', '', doc)
doc = doc.translate(str.maketrans(string.punctuation, " " * len(string.punctuation)))
_odata[13] = len(re.findall(r'\b[-+\(]?[$€£]?[-+(]?\d+\)?\b', doc))
_odata[14] = total_syllables / _odata[2]
_odata[15] = word_length / _odata[2]
_odata[16] = len(vdictionary)
# Convert counts to %
for i in range(3, 10 + 1):
_odata[i] = (_odata[i] / _odata[2]) * 100
# Vocabulary
return _odata
if __name__ == '__main__':
print('\n' + time.strftime('%c') + '\nGeneric_Parser.py\n')
main()
print('\n' + time.strftime('%c') + '\nNormal termination.')
| 38.795276 | 104 | 0.629592 | 645 | 4,927 | 4.683721 | 0.336434 | 0.047666 | 0.050645 | 0.039722 | 0.102615 | 0.03906 | 0 | 0 | 0 | 0 | 0 | 0.030213 | 0.247615 | 4,927 | 126 | 105 | 39.103175 | 0.784192 | 0.331642 | 0 | 0 | 0 | 0 | 0.191929 | 0.090562 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028986 | false | 0 | 0.101449 | 0 | 0.144928 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
456d5f84b2506b512723f3a37ab5b953c8eded00 | 4,882 | py | Python | Code/techne_library_code.py | uk-gov-mirror/nationalarchives.TechneTraining | aabb15f2bfe6bbbcc824dbdaa7f8c59632fea21a | [
"MIT"
] | null | null | null | Code/techne_library_code.py | uk-gov-mirror/nationalarchives.TechneTraining | aabb15f2bfe6bbbcc824dbdaa7f8c59632fea21a | [
"MIT"
] | null | null | null | Code/techne_library_code.py | uk-gov-mirror/nationalarchives.TechneTraining | aabb15f2bfe6bbbcc824dbdaa7f8c59632fea21a | [
"MIT"
] | null | null | null | import os
from sklearn.model_selection import train_test_split
import numpy as np
from operator import itemgetter
from math import log
import random
from gensim.summarization.summarizer import summarize
from sklearn.naive_bayes import BernoulliNB
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.neighbors import KDTree
from matplotlib import pyplot
from sklearn.feature_extraction.text import TfidfVectorizer
import seaborn as sns
import pandas as pd
import ipywidgets as widgets
from matplotlib.colors import LogNorm
def add_to_dict(D, k, v=1):
if k in D:
D[k] += v
else:
D[k] = v
def clean_string(string):
out_string = ""
for c in string:
if c.isalpha():
out_string += c
else:
if len(out_string) > 0 and out_string[-1] != " ":
out_string += " "
return out_string
def read_topic_list(file_name):
topic_words = {}
topic_file = open(file_name, 'r')
for row in topic_file:
fields = row[:-1].split("|")
topic_id = int(fields[0])
words = fields[1].split(",")
topic_words[topic_id] = words
topic_file.close()
return topic_words
def read_doc_topics(file_name):
topics_per_doc = {}
doc_topics = open(file_name, 'r')
for row in doc_topics:
fields = row[:-1].split("|")
file_name = fields[0]
topic_probs = [float(x) for x in fields[1:]]
topics_per_doc[file_name] = topic_probs
doc_topics.close()
return topics_per_doc
def normalise_vector(v):
norm = np.linalg.norm(v, ord=1)
if norm == 0:
return v
return v / norm
def plot_doc_topics(doc_ids, doc_topic_lookup, topic_count, normalise=True):
fig, ax = pyplot.subplots(2,2)
fig.set_size_inches(8.5,5)
for i, file_number in enumerate(doc_ids):
topic_probs = doc_topic_lookup["file_" + str(file_number) + ".txt"]
if normalise:
topic_probs = normalise_vector(topic_probs)
ax[int(i/2), i % 2].bar(x = [str(x) for x in range(topic_count)], height = topic_probs)
return fig, ax
def filter_topics_by_threshold(topic_dict, threshold):
filtered_dict = {}
for k,v in topic_dict.items():
scores = [x if x >= threshold else 0.0 for i,x in enumerate(v)]
filtered_dict[k] = scores
return filtered_dict
def topic_to_class_scores(topic_scores, topic_class_map):
file_class_scores = {}
max_class = max([v for v in topic_class_map.values()])
for doc_id,scores in topic_scores.items():
class_scores = np.zeros(max_class+1)
for t,s in enumerate(scores):
class_scores[topic_class_map[t]] += s
file_class_scores[doc_id] = class_scores
return file_class_scores
def load_content_file_map(file_name):
file_domain = {}
file_map = open(data_drive + "TM/content_file_map.txt","r")
file_url = {}
for row in file_map:
fields = row[:-1].split("|")
file_url[fields[0]] = fields[1]
file_domain[fields[0]] = fields[1].split("/")[0]
file_map.close()
def load_content(file_name):
content_file = open(file_name, "r")
file_contents = {}
for row in content_file:
fields = row[:-1].split("|")
file_contents[fields[0]] = fields[1]
content_file.close()
return file_contents
def load_summaries(file_name):
summary_file = open(file_name, 'r')
file_summaries = {}
for row in summary_file:
fields = row[:-1].split("|")
file_summaries[fields[0]] = fields[1]
summary_file.close()
return file_summaries
def prepare_for_ml(tfidf_features, classes_per_doc, file_to_idx_map):
training_files = []
training_features = []
training_class = []
feature_matrix = tfidf_features.todense()
for filename, scores in classes_per_doc.items():
norm_scores = normalise_vector(scores)
highest = np.argmax(norm_scores)
training_files.append(filename)
training_class.append(highest)
training_features.append(feature_matrix[file_to_idx_map[filename]])
training_features = np.vstack(training_features)
return training_files, training_features, training_class
def draw_confusion(y_true, y_pred, model, class_names):
fig, ax = pyplot.subplots(1,1,figsize=(7, 7))
N = len(model.classes_)
sns.heatmap(pd.DataFrame(confusion_matrix(y_true, y_pred, normalize=None),
range(N), range(N)), cmap='magma', annot=True, annot_kws={"size": 15}, fmt='g', ax = ax) #, norm=LogNorm())
#ax.table(cellText=topN[{'TaxonomyCategory','TAXID'}].sort_values(by='TAXID').values, colLabels=['TaxonomyCategory','TAXID'], loc='top')
ax.set_xticklabels([class_names[c] for c in model.classes_])
ax.set_yticklabels([class_names[c] for c in model.classes_], rotation = 30)
return fig, ax
| 33.902778 | 140 | 0.670012 | 704 | 4,882 | 4.396307 | 0.255682 | 0.028433 | 0.012924 | 0.024233 | 0.107593 | 0.087884 | 0.03231 | 0.01874 | 0 | 0 | 0 | 0.010694 | 0.214666 | 4,882 | 143 | 141 | 34.13986 | 0.796557 | 0.031135 | 0 | 0.071429 | 0 | 0 | 0.011844 | 0.004865 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103175 | false | 0 | 0.134921 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
456e07cc6bba172145470a41d9133e6c7f291230 | 2,701 | py | Python | sensor/src/upload_metrics.py | tubone24/raspi_plant_checker | e80ccd61c50cbba883f4af8fafafc0404bdf8978 | [
"MIT"
] | null | null | null | sensor/src/upload_metrics.py | tubone24/raspi_plant_checker | e80ccd61c50cbba883f4af8fafafc0404bdf8978 | [
"MIT"
] | null | null | null | sensor/src/upload_metrics.py | tubone24/raspi_plant_checker | e80ccd61c50cbba883f4af8fafafc0404bdf8978 | [
"MIT"
] | 1 | 2021-12-03T05:28:20.000Z | 2021-12-03T05:28:20.000Z | from gql import gql, Client
from gql.transport.requests import RequestsHTTPTransport
import requests
from datetime import datetime, timedelta, timezone
import os
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), "../../", '.env')
load_dotenv(dotenv_path)
RASPI_URL = os.environ.get("RASPI_URL")
HASURA_URL = os.environ.get("HASURA_URL")
HASURA_SECRET = os.environ.get("HASURA_SECRET")
def get_metrics(url: str):
moisture = requests.get(url=f"{url}/moisture").json()
light = requests.get(url=f"{url}/light").json()
return {"moisture": moisture["value"], "light": light["value"]}
def upload_metric_to_hasura(moisture, light):
client = Client(
transport=RequestsHTTPTransport(
url=HASURA_URL,
use_json=True,
headers={
"Content-type": "application/json",
"x-hasura-admin-secret": HASURA_SECRET
},
retries=3,
),
fetch_schema_from_transport=True,
)
query = gql(
"""
mutation MyMutation ($light: numeric!, $moisture: numeric!){
insert_raspi_plant_checker_one(object: {light: $light, moisture: $moisture}) {
id
light
moisture
timestamp
}
}
"""
)
params = {"light": light, "moisture": moisture}
result = client.execute(query, variable_values=params)
print(result)
def delete_old_metrics_to_hasura(days_before=7):
dt_now = datetime.now(timezone.utc)
before_day = dt_now - timedelta(days=days_before)
dt = before_day.astimezone().isoformat(timespec='microseconds')
client = Client(
transport=RequestsHTTPTransport(
url=HASURA_URL,
use_json=True,
headers={
"Content-type": "application/json",
"x-hasura-admin-secret": HASURA_SECRET
},
retries=3,
),
fetch_schema_from_transport=True,
)
query = gql(
"""
mutation MyMutation ($dt: timestamptz){
delete_raspi_plant_checker(where: {timestamp: {_lt: $dt}}) {
returning {
id
light
moisture
timestamp
}
}
}
"""
)
params = {"dt": dt}
result = client.execute(query, variable_values=params)
print(result)
def main():
metrics = get_metrics(RASPI_URL)
upload_metric_to_hasura(moisture=metrics["moisture"], light=metrics["light"])
delete_old_metrics_to_hasura()
if __name__ == "__main__":
main()
| 28.135417 | 90 | 0.583858 | 277 | 2,701 | 5.451264 | 0.306859 | 0.023841 | 0.023841 | 0.02649 | 0.460927 | 0.328477 | 0.328477 | 0.328477 | 0.328477 | 0.328477 | 0 | 0.00159 | 0.30137 | 2,701 | 95 | 91 | 28.431579 | 0.798622 | 0 | 0 | 0.42623 | 0 | 0 | 0.112595 | 0.020038 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065574 | false | 0 | 0.114754 | 0 | 0.196721 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4571c0f5a311380362a2ed94f464b2d2bf10f14f | 1,162 | py | Python | project/concept_recog.py | hskang9/HelloWorld | 50b09f710176f082c5dac955bc7ea3578a42bd40 | [
"MIT"
] | null | null | null | project/concept_recog.py | hskang9/HelloWorld | 50b09f710176f082c5dac955bc7ea3578a42bd40 | [
"MIT"
] | null | null | null | project/concept_recog.py | hskang9/HelloWorld | 50b09f710176f082c5dac955bc7ea3578a42bd40 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
from watson_developer_cloud import NaturalLanguageUnderstandingV1 as nlu
import watson_developer_cloud.natural_language_understanding.features.v1 \
as Features
def concept_recog(path='./test.txt'):
#with open(path, 'rt', encoding='utf-8') as f:
# inputs = f.read()
with open(path, 'rb') as f:
inputs = f.read().decode("UTF-8")
natural_language_understanding = nlu(
url=("https://gateway.aibril-watson.kr/" +
"natural-language-understanding/api"),
username="01fc633a-01c2-486e-a202-44a3b7653a1d",
password="wwbFwHfLV4jK",
version="2017-02-27")
response = natural_language_understanding.analyze(
text=inputs,
features=[
Features.Concepts(
# Concepts options
limit=3
)
]
)
# print(json.dumps(response))
texts = response['concepts']
text_list = []
for text in texts:
text_list.append(text['text'])
for text in text_list:
print(text)
return ' '.join(text_list)
if __name__ == '__main__':
concept_recog()
| 28.341463 | 74 | 0.605852 | 128 | 1,162 | 5.3125 | 0.539063 | 0.088235 | 0.164706 | 0.029412 | 0.041176 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043581 | 0.269363 | 1,162 | 40 | 75 | 29.05 | 0.757362 | 0.113597 | 0 | 0 | 0 | 0 | 0.15918 | 0.068359 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0.033333 | 0.1 | 0 | 0.166667 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4573153b3fe512c2a5e16ccae54eb2974cf42494 | 6,242 | py | Python | i7app/__main__.py | eblade/images7 | 7fa7c961e046a178243c866fd1f3b82f7e58c73d | [
"BSD-3-Clause",
"MIT"
] | null | null | null | i7app/__main__.py | eblade/images7 | 7fa7c961e046a178243c866fd1f3b82f7e58c73d | [
"BSD-3-Clause",
"MIT"
] | null | null | null | i7app/__main__.py | eblade/images7 | 7fa7c961e046a178243c866fd1f3b82f7e58c73d | [
"BSD-3-Clause",
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import os
import zmq
import time
import logging
from PyQt5 import QtWidgets as W, QtCore as C, QtGui as G
from qtzevents.bus import Pub, Push
from qtzevents.background import Background
from images7.config import Config
from images7.system import System
from .grid import ThumbView
from .browser import BrowserWidget, DateItem
from images7 import (
date,
entry,
files,
importer,
job,
)
from images7.job import (
register,
transcode,
to_cut,
to_main,
calculate_hash,
read_metadata,
create_proxy,
clean_cut,
)
from images7.analyse import exif
from images7.job.transcode import imageproxy
# Logging
FORMAT = '%(asctime)s [%(threadName)s] %(filename)s +%(levelno)s ' + \
'%(funcName)s %(levelname)s %(message)s'
logging.basicConfig(
format=FORMAT,
level=logging.DEBUG if '-g' in sys.argv else logging.INFO,
filename='log',
filemode='w',
)
class View(W.QMainWindow):
def __init__(self):
super().__init__()
self.context = zmq.Context(1)
self.command = Push(self.context, 'command')
self.system = None
self.control = Control(self.system)
self.setWindowTitle('Images7')
self.setup_menu()
self.setup_layout()
#self.setup_model_event_handler()
self.setup_control_event_handler()
self.on_open('images.ini')
logging.getLogger().setLevel(logging.DEBUG)
self.show()
def setup_menu(self):
import_action = W.QAction('&Import', self)
import_action.setShortcut('Ctrl+I')
import_action.setStatusTip('Triggers an import of files from known cards')
import_action.triggered.connect(self.on_import)
reload_action = W.QAction('&Reload', self)
reload_action.setShortcut('Ctrl+R')
reload_action.setStatusTip('Reload the data browser')
reload_action.triggered.connect(self.on_reload)
m = self.menuBar()
file_menu = m.addMenu('&File')
file_menu.addAction(import_action)
file_menu.addAction(reload_action)
def setup_layout(self):
self.setGeometry(100, 100, 1000, 800)
splitter = W.QSplitter(C.Qt.Horizontal)
self.tree = BrowserWidget()
self.tree.currentItemChanged.connect(self.on_browser_selection_changed)
self.main = W.QStackedWidget()
empty = W.QFrame()
self.main.addWidget(empty)
self.main.setCurrentWidget(empty)
splitter.addWidget(self.tree)
splitter.addWidget(self.main)
splitter.setSizes([300, 700])
self.setCentralWidget(splitter)
def setup_model_event_handler(self):
self.model_event_handler = ModelEventHandler.as_thread(
self.system.event.subscriber('state', 'system', 'error'))
self.model_event_handler.message.connect(self.on_message)
self.model_event_handler.error.connect(self.on_error)
def setup_control_event_handler(self):
self.control_event_handler = ControlEventHandler.as_thread(self.control, self.command.puller())
self.control_event_handler.error.connect(self.on_error)
self.control_event_handler.model_changed.connect(self.on_model_changed)
def new_main_frame(self, widget):
self.main.removeWidget(self.main.currentWidget())
self.main.addWidget(widget)
self.main.setCurrentWidget(widget)
def on_open(self, path):
self.command.send({
'command': 'load',
'path': path,
})
def on_import(self):
self.command.send({
'command': 'import',
})
def on_reload(self):
self.command.send({
'command': 'reload',
})
def on_model_changed(self):
self.system = self.control.system
self.tree.load()
def on_browser_selection_changed(self, current, previous):
if isinstance(current, DateItem):
widget = ThumbView()
self.new_main_frame(widget)
query = entry.EntryQuery(date=current.date.date)
feed = entry.get_entries(query)
widget.populate(feed)
def on_message(self, state):
self.state_label.setText(state)
def on_error(self, message):
W.QMessageBox.information(self, 'Error', message)
def closeEvent(self, event):
self.command.send({'command': 'quit'})
class ModelEventHandler(Background):
message = C.pyqtSignal(str)
error = C.pyqtSignal(str)
def on_state(self, message):
self.message.emit(message)
def on_system(self, message):
if message == 'quit':
self.running = False
self.quit_and_wait()
def on_error(self, message):
self.error.emit(message)
class ControlEventHandler(Background):
enable = C.pyqtSignal(bool)
error = C.pyqtSignal(str)
model_changed = C.pyqtSignal()
def __init__(self, control, *args):
super().__init__(*args)
self.control = control
self.running = True
def on_message(self, message):
if message['command'] == 'load':
try:
self.control.load_config(message['path'])
self.model_changed.emit()
except ValueError as e:
self.error.emit(str(e))
if message['command'] == 'reload':
self.model_changed.emit()
elif message['command'] == 'import':
try:
logging.info('Importing...')
from images7.importer import trig_import
trig_import()
self.model_changed.emit()
logging.info('Imported.')
except ValueError as e:
self.error.emit(str(e))
elif message['command'] == 'quit':
self.running = False
class Control:
def __init__(self, system):
self.system = system
def load_config(self, path):
config = Config(path)
self.system = System(config)
importer.App.run(workers=1)
job.App.run(workers=4)
if __name__ == '__main__':
app = W.QApplication(sys.argv)
main = View()
logging.getLogger().setLevel(logging.DEBUG)
sys.exit(app.exec_())
| 27.257642 | 103 | 0.630567 | 723 | 6,242 | 5.278008 | 0.272476 | 0.014413 | 0.023847 | 0.023061 | 0.138889 | 0.037212 | 0.037212 | 0.018868 | 0.018868 | 0 | 0 | 0.006892 | 0.256168 | 6,242 | 228 | 104 | 27.377193 | 0.81499 | 0.009773 | 0 | 0.132948 | 0 | 0 | 0.061347 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121387 | false | 0 | 0.17341 | 0 | 0.346821 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45754cc8939f2ca8f5551df60c1a102df61680b5 | 2,049 | py | Python | week01/day01/shopping.py | GsQxZz/fullstack | 9010c0c69aec901fd0e0b4434445e822f682c367 | [
"Apache-2.0"
] | null | null | null | week01/day01/shopping.py | GsQxZz/fullstack | 9010c0c69aec901fd0e0b4434445e822f682c367 | [
"Apache-2.0"
] | null | null | null | week01/day01/shopping.py | GsQxZz/fullstack | 9010c0c69aec901fd0e0b4434445e822f682c367 | [
"Apache-2.0"
] | null | null | null |
iphone_price = 5800
mac_price = 9000
coffee_price = 32
python_price = 80
bicyle_price = 1500
list = []
salary = int(input("Please input your salary:"))
print("**********欢迎来到购物车系统**********")
while True:
print("1. iPhone 11 ————%d元" % iphone_price)
print("2. Mac book ————%d" % mac_price)
print("3. coffee ————%d元" % coffee_price)
print("4. python book ————%d元" % python_price)
print("5. bicyle ————%d元" % bicyle_price)
print("0. 退出")
operator = int(input("Please input your choose:"))
if operator == 1:
if salary >= iphone_price:
list.append(["iPhone", iphone_price])
salary -= iphone_price
print("iPhone 11已加入到你的购物车,当前余额剩余:%d" % salary)
else:
print("余额不足,剩余 %d" % salary)
elif operator == 2:
if salary >= mac_price:
list.append(["Mac Book", mac_price])
salary -= mac_price
print("Mac Book已加入到你的购物车,当前余额剩余:%d" % salary)
else:
print("余额不足,剩余 %d" % salary)
elif operator == 3:
if salary >= coffee_price:
list.append(["Coffee", coffee_price])
salary -= coffee_price
print("Coffee已加入到你的购物车,当前余额剩余:%d" % salary)
else:
print("余额不足,剩余 %d" % salary)
elif operator == 4:
if salary >= python_price:
list.append(["Python Book", python_price])
salary -= python_price
print("Python Book已加入到你的购物车,当前余额剩余:%d" % salary)
else:
print("余额不足,剩余 %d" % salary)
elif operator == 5:
if salary >= bicyle_price:
list.append(["Bicyle", bicyle_price])
salary -= bicyle_price
print("Mac Book已加入到你的购物车,当前余额剩余:%d" % salary)
else:
print("余额不足,剩余 %d" % salary)
elif operator == 0:
break
exit()
else:
print("输入错误,请重新输入")
if list is not None:
print("您已购买以下商品:")
for i in list:
print("%s %d" % (i[0], i[1]))
print("您的余额为:%d" % salary)
print("欢迎下次光临") | 28.859155 | 60 | 0.537823 | 244 | 2,049 | 4.495902 | 0.241803 | 0.070191 | 0.068368 | 0.077484 | 0.315406 | 0.273473 | 0.273473 | 0.273473 | 0.273473 | 0.273473 | 0 | 0.024045 | 0.309907 | 2,049 | 71 | 61 | 28.859155 | 0.737624 | 0 | 0 | 0.209677 | 0 | 0 | 0.219619 | 0.070278 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.354839 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45756f7f3335ea9abab94df669ff19ff707b1db9 | 1,079 | py | Python | test/test_edit_contact.py | Zaichkov/python_training | be8aff0b38c5a93c5574762ce5c8c27e6fe11b5a | [
"Apache-2.0"
] | null | null | null | test/test_edit_contact.py | Zaichkov/python_training | be8aff0b38c5a93c5574762ce5c8c27e6fe11b5a | [
"Apache-2.0"
] | null | null | null | test/test_edit_contact.py | Zaichkov/python_training | be8aff0b38c5a93c5574762ce5c8c27e6fe11b5a | [
"Apache-2.0"
] | null | null | null | from model.contact import Contact
import random
def test_edit_some_contact(app, orm, check_ui):
if len(orm.get_contact_list()) == 0:
app.contact.create(Contact(firstname="St_Claus"))
old_contacts = orm.get_contact_list()
contact_for_edit = random.choice(old_contacts)
contact = Contact(firstname="edited_firstname", lastname="edited_lastname",
address="edited_address", mobile_phone="edited_phone", email="edited_email",
title="new_title", bday="19", bmonth="October", byear="1988", id=contact_for_edit.id)
app.contact.edit_contact_by_id(contact.id, contact)
new_contacts = orm.get_contact_list()
old_contacts.remove(contact_for_edit)
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
ui_list = app.contact.get_contact_list()
orm_list = app.contact.make_list_like_ui(new_contacts)
assert sorted(orm_list, key=Contact.id_or_max) == sorted(ui_list, key=Contact.id_or_max)
| 46.913043 | 107 | 0.720111 | 154 | 1,079 | 4.701299 | 0.331169 | 0.075967 | 0.077348 | 0.077348 | 0.212707 | 0.143646 | 0 | 0 | 0 | 0 | 0 | 0.007778 | 0.165894 | 1,079 | 22 | 108 | 49.045455 | 0.796667 | 0 | 0 | 0 | 0 | 0 | 0.091752 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
457919564bdf80b71f716bf686e0ed54de0bb593 | 9,424 | py | Python | pytype/overlays/typed_dict.py | hboshnak/pytype | b6b6448dc562a7200326c92e75efeed203984e16 | [
"Apache-2.0"
] | null | null | null | pytype/overlays/typed_dict.py | hboshnak/pytype | b6b6448dc562a7200326c92e75efeed203984e16 | [
"Apache-2.0"
] | null | null | null | pytype/overlays/typed_dict.py | hboshnak/pytype | b6b6448dc562a7200326c92e75efeed203984e16 | [
"Apache-2.0"
] | null | null | null | """Implementation of TypedDict."""
import dataclasses
from typing import Any, Dict, Set
from pytype.abstract import abstract
from pytype.abstract import abstract_utils
from pytype.abstract import function
from pytype.overlays import classgen
from pytype.pytd import pytd
@dataclasses.dataclass
class TypedDictProperties:
"""Collection of typed dict properties passed between various stages."""
name: str
fields: Dict[str, Any]
required: Set[str]
total: bool
@property
def keys(self):
return set(self.fields.keys())
@property
def optional(self):
return self.keys - self.required
def add(self, k, v, total):
self.fields[k] = v
if total:
self.required.add(k)
def check_keys(self, keys):
keys = set(keys)
missing = (self.keys - keys) & self.required
extra = keys - self.keys
return missing, extra
class TypedDictBuilder(abstract.PyTDClass):
"""Factory for creating typing.TypedDict classes."""
def __init__(self, ctx):
typing_ast = ctx.loader.import_name("typing")
pyval = typing_ast.Lookup("typing._TypedDict")
pyval = pyval.Replace(name="typing.TypedDict")
super().__init__("TypedDict", pyval, ctx)
def call(self, node, *args):
details = ("Use the class definition form of TypedDict instead.")
self.ctx.errorlog.not_supported_yet(
self.ctx.vm.frames, "TypedDict functional constructor", details)
return node, self.ctx.new_unsolvable(node)
def _validate_bases(self, cls_name, bases):
"""Check that all base classes are valid."""
for base_var in bases:
for base in base_var.data:
if not isinstance(base, (TypedDictClass, TypedDictBuilder)):
details = (f"TypedDict {cls_name} cannot inherit from "
"a non-TypedDict class.")
self.ctx.errorlog.base_class_error(
self.ctx.vm.frames, base_var, details)
def _merge_base_class_fields(self, bases, props):
"""Add the merged list of base class fields to the fields dict."""
# Updates props in place, raises an error if a duplicate key is encountered.
provenance = {k: props.name for k in props.fields}
for base_var in bases:
for base in base_var.data:
if not isinstance(base, TypedDictClass):
continue
for k, v in base.props.fields.items():
if k in props.fields:
classes = f"{base.name} and {provenance[k]}"
details = f"Duplicate TypedDict key {k} in classes {classes}"
self.ctx.errorlog.base_class_error(
self.ctx.vm.frames, base_var, details)
else:
props.add(k, v, base.props.total)
provenance[k] = base.name
def make_class(self, node, bases, f_locals, total):
# If BuildClass.call() hits max depth, f_locals will be [unsolvable]
# See comment in NamedTupleClassBuilder.make_class(); equivalent logic
# applies here.
if isinstance(f_locals.data[0], abstract.Unsolvable):
return node, self.ctx.new_unsolvable(node)
f_locals = abstract_utils.get_atomic_python_constant(f_locals)
# retrieve __qualname__ to get the name of class
name_var = f_locals["__qualname__"]
cls_name = abstract_utils.get_atomic_python_constant(name_var)
if "." in cls_name:
cls_name = cls_name.rsplit(".", 1)[-1]
if total is None:
total = True
else:
total = abstract_utils.get_atomic_python_constant(total, bool)
props = TypedDictProperties(
name=cls_name, fields={}, required=set(), total=total)
# Collect the key types defined in the current class.
cls_locals = classgen.get_class_locals(
cls_name,
allow_methods=False,
ordering=classgen.Ordering.FIRST_ANNOTATE,
ctx=self.ctx)
for k, local in cls_locals.items():
assert local.typ
props.add(k, local.typ, total)
# Process base classes and generate the __init__ signature.
self._validate_bases(cls_name, bases)
self._merge_base_class_fields(bases, props)
cls = TypedDictClass(props, self, self.ctx)
cls_var = cls.to_variable(node)
return node, cls_var
def make_class_from_pyi(self, cls_name, pytd_cls, total=True):
"""Make a TypedDictClass from a pyi class."""
# NOTE: Returns the abstract class, not a variable.
if total is None:
total = True
props = TypedDictProperties(
name=cls_name, fields={}, required=set(), total=total)
for c in pytd_cls.constants:
typ = self.ctx.convert.constant_to_var(c.type)
props.add(c.name, typ, total)
# Process base classes and generate the __init__ signature.
bases = [self.ctx.convert.constant_to_var(x)
for x in pytd_cls.bases]
self._validate_bases(cls_name, bases)
self._merge_base_class_fields(bases, props)
cls = TypedDictClass(props, self, self.ctx)
return cls
class TypedDictClass(abstract.PyTDClass):
"""A template for typed dicts."""
def __init__(self, props, base_cls, ctx):
self.props = props
self._base_cls = base_cls # TypedDictBuilder for constructing subclasses
super().__init__(props.name, ctx.convert.dict_type.pytd_cls, ctx)
self.init_method = self._make_init(props)
def __repr__(self):
return f"TypedDictClass({self.name})"
def _make_init(self, props):
# __init__ method for type checking signatures.
# We construct this here and pass it to TypedDictClass because we need
# access to abstract.SignedFunction.
sig = function.Signature.from_param_names(
f"{props.name}.__init__", props.fields.keys(),
kind=pytd.ParameterKind.KWONLY)
sig.annotations = {k: abstract_utils.get_atomic_value(v)
for k, v in props.fields.items()}
sig.defaults = {k: self.ctx.new_unsolvable(self.ctx.root_node)
for k in props.optional}
return abstract.SignedFunction(sig, self.ctx)
def _new_instance(self, container, node, args):
self.init_method.match_and_map_args(node, args, {})
ret = TypedDict(self.props, self.ctx)
for (k, v) in args.namedargs.items():
ret.set_str_item(node, k, v)
return ret
def instantiate(self, node, container):
del container
return TypedDict(self.props, self.ctx).to_variable(node)
def make_class(self, *args, **kwargs):
return self._base_cls.make_class(*args, **kwargs)
class TypedDict(abstract.Dict):
"""Representation of TypedDict instances.
Internally, a TypedDict is a dict with a restricted set of string keys
allowed, each with a fixed type. We implement it as a subclass of Dict, with
some type checks wrapped around key accesses. If a check fails, we simply add
an error to the logs and then continue processing the method as though it were
a regular dict.
"""
def __init__(self, props, ctx):
super().__init__(ctx)
self.props = props
self.set_slot("__delitem__", self.delitem_slot)
@property
def fields(self):
return self.props.fields
@property
def class_name(self):
return self.props.name
def __repr__(self):
return f"<TypedDict {self.class_name}>"
def _check_str_key(self, name):
if name not in self.fields:
self.ctx.errorlog.typed_dict_error(self.ctx.vm.frames, self, name)
return False
return True
def _check_str_key_value(self, node, name, value_var):
if not self._check_str_key(name):
return
typ = abstract_utils.get_atomic_value(self.fields[name])
bad = self.ctx.matcher(node).bad_matches(value_var, typ)
for view, error_details in bad:
binding = view[value_var]
self.ctx.errorlog.annotation_type_mismatch(
self.ctx.vm.frames, typ, binding, name, error_details,
typed_dict=self
)
def _check_key(self, name_var):
"""Check that key is in the typed dict."""
try:
name = abstract_utils.get_atomic_python_constant(name_var, str)
except abstract_utils.ConversionError:
self.ctx.errorlog.typed_dict_error(self.ctx.vm.frames, self, name=None)
return False
return self._check_str_key(name)
def _check_value(self, node, name_var, value_var):
"""Check that value has the right type."""
# We have already called check_key so name is in fields
name = abstract_utils.get_atomic_python_constant(name_var, str)
self._check_str_key_value(node, name, value_var)
def getitem_slot(self, node, name_var):
# A typed dict getitem should have a concrete string arg. If we have a var
# with multiple bindings just fall back to Any.
if not self._check_key(name_var):
return node, self.ctx.new_unsolvable(node)
name = abstract_utils.get_atomic_python_constant(name_var, str)
typ = self.fields[name]
ret = [v.instantiate(node) for v in typ.data]
return node, self.ctx.join_variables(node, ret)
def setitem_slot(self, node, name_var, value_var):
if self._check_key(name_var):
self._check_value(node, name_var, value_var)
return super().setitem_slot(node, name_var, value_var)
def set_str_item(self, node, name, value_var):
self._check_str_key_value(node, name, value_var)
return super().set_str_item(node, name, value_var)
def delitem_slot(self, node, name_var):
self._check_key(name_var)
return self.call_pytd(node, "__delitem__", name_var)
def pop_slot(self, node, key_var, default_var=None):
self._check_key(key_var)
return super().pop_slot(node, key_var, default_var)
| 34.520147 | 80 | 0.694822 | 1,344 | 9,424 | 4.65253 | 0.194196 | 0.031345 | 0.02047 | 0.028146 | 0.303534 | 0.225972 | 0.184391 | 0.168079 | 0.168079 | 0.149048 | 0 | 0.0004 | 0.205008 | 9,424 | 272 | 81 | 34.647059 | 0.834223 | 0.170204 | 0 | 0.221053 | 0 | 0 | 0.049864 | 0.006201 | 0 | 0 | 0 | 0 | 0.005263 | 1 | 0.152632 | false | 0 | 0.042105 | 0.036842 | 0.373684 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4579450f7ab9dcd55c2527f2db1b882e121fbb78 | 4,962 | py | Python | esmvaltool/cmorizers/obs/cmorize_obs_aphro_ma.py | cffbots/ESMValTool | a9b6592a02f2085634a214ff5f36a736fa18ff47 | [
"Apache-2.0"
] | 148 | 2017-02-07T13:16:03.000Z | 2022-03-26T02:21:56.000Z | esmvaltool/cmorizers/obs/cmorize_obs_aphro_ma.py | cffbots/ESMValTool | a9b6592a02f2085634a214ff5f36a736fa18ff47 | [
"Apache-2.0"
] | 2,026 | 2017-02-03T12:57:13.000Z | 2022-03-31T15:11:51.000Z | esmvaltool/cmorizers/obs/cmorize_obs_aphro_ma.py | cffbots/ESMValTool | a9b6592a02f2085634a214ff5f36a736fa18ff47 | [
"Apache-2.0"
] | 113 | 2017-01-27T13:10:19.000Z | 2022-02-03T13:42:11.000Z | """ESMValTool CMORizer for APHRODITE Monsoon Asia (APHRO-MA) data.
Tier
Tier 3: restricted dataset.
Source
http://aphrodite.st.hirosaki-u.ac.jp/download/
Last access
20200306
Download and processing instructions
Register at
http://aphrodite.st.hirosaki-u.ac.jp/download/create/
Download the following files from
http://aphrodite.st.hirosaki-u.ac.jp/product/:
APHRO_V1808_TEMP/APHRO_MA
025deg_nc/APHRO_MA_TAVE_025deg_V1808.nc.tgz
050deg_nc/APHRO_MA_TAVE_050deg_V1808.nc.tgz
APHRO_V1101/APHRO_MA
025deg_nc/APHRO_MA_025deg_V1101.1951-2007.nc.gz.tar
050deg_nc/APHRO_MA_050deg_V1101.1951-2007.nc.gz.tar
APHRO_V1101EX_R1/APHRO_MA
025deg_nc/APHRO_MA_025deg_V1101_EXR1.nc.tgz
050deg_nc/APHRO_MA_050deg_V1101_EXR1.nc.tgz
Please untar / unzip all *.tar *.tgz *.gz files in the same directory
(no subdirectories!) prior to running the cmorizer!
Issues:
In input file APHRO_MA_TAVE_050deg_V1808.2015.nc the input variable is
called ta instead of tave as in the other files.
Currently resolved using raw_fallback: ta in case of thrown
iris.exceptions.ConstraintMismatchError
Refs:
APHRO_V1101 and APHRO_V1101EX_R1
Yatagai, A., K. Kamiguchi, O. Arakawa, A. Hamada, N. Yasutomi, and
A. Kitoh, 2012: APHRODITE: Constructing a Long-Term Daily Gridded
Precipitation Dataset for Asia Based on a Dense Network of Rain Gauges.
Bull. Amer. Meteor. Soc., 93, 1401–1415
https://doi.org/10.1175/BAMS-D-11-00122.1
APHRO_V1808_TEMP
Yasutomi, N., Hamada, A., Yatagai, A. (2011) Development of a long-term
daily gridded temperature dataset and its application to rain/snow
discrimination of daily precipitation,
Global Environmental Research 15 (2), 165-172
"""
import logging
from warnings import catch_warnings, filterwarnings
from pathlib import Path
import iris
from esmvalcore.preprocessor import monthly_statistics
from . import utilities as utils
logger = logging.getLogger(__name__)
def _extract_variable(short_name, var, cfg, filepath, out_dir, version):
"""Extract variable."""
logger.info("CMORizing variable '%s' from input file '%s'", short_name,
filepath)
with catch_warnings():
filterwarnings(
action='ignore',
message="Skipping global attribute 'calendar': 'calendar' is .*",
category=UserWarning,
module='iris',
)
try:
cube = iris.load_cube(
str(filepath),
constraint=utils.var_name_constraint(var['raw']),
)
except iris.exceptions.ConstraintMismatchError:
cube = iris.load_cube(
str(filepath),
constraint=utils.var_name_constraint(var['raw_fallback']),
)
# Fix var units
cmor_info = cfg['cmor_table'].get_variable(var['mip'], short_name)
cube.units = var.get('raw_units', short_name)
cube.convert_units(cmor_info.units)
utils.fix_var_metadata(cube, cmor_info)
# fix coordinates
if 'height2m' in cmor_info.dimensions:
utils.add_height2m(cube)
utils.fix_coords(cube)
# Fix metadata
attrs = cfg['attributes'].copy()
attrs['mip'] = var['mip']
attrs['version'] = version.replace('_', '-')
attrs['reference'] = var['reference']
attrs['source'] = attrs['source']
utils.set_global_atts(cube, attrs)
# Save variable
utils.save_variable(cube,
short_name,
out_dir,
attrs,
unlimited_dimensions=['time'])
if 'add_mon' in var.keys():
if var['add_mon']:
logger.info("Building monthly means")
# Calc monthly
cube = monthly_statistics(cube)
cube.remove_coord('month_number')
cube.remove_coord('year')
# Fix metadata
attrs['mip'] = 'Amon'
# Fix coordinates
utils.fix_coords(cube)
# Save variable
utils.save_variable(cube,
short_name,
out_dir,
attrs,
unlimited_dimensions=['time'])
def cmorization(in_dir, out_dir, cfg, _):
"""Cmorization func call."""
raw_filename = cfg['filename']
# Run the cmorization
for (short_name, var) in cfg['variables'].items():
for version in var['version'].values():
logger.info("CMORizing variable '%s'", short_name)
filenames = raw_filename.format(raw_file_var=var['raw_file_var'],
version=version)
for filepath in sorted(Path(in_dir).glob(filenames)):
_extract_variable(short_name, var, cfg, filepath, out_dir,
version)
| 33.08 | 77 | 0.620919 | 598 | 4,962 | 4.964883 | 0.386288 | 0.025935 | 0.018188 | 0.02324 | 0.274166 | 0.228697 | 0.185248 | 0.175817 | 0.129336 | 0.129336 | 0 | 0.046963 | 0.283353 | 4,962 | 149 | 78 | 33.302013 | 0.787683 | 0.403265 | 0 | 0.235294 | 0 | 0 | 0.11376 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.088235 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
457e2b38d8a451c8e775c1c394ef7b2670d5e8bd | 5,074 | py | Python | tests/walkers/enum_values_test.py | yyang08/swagger-spec-compatibility | e7a6ba6fc53c6a8a92ba26016219a595a8cecbbe | [
"Apache-2.0"
] | 18 | 2019-04-30T21:07:30.000Z | 2021-12-16T17:56:08.000Z | tests/walkers/enum_values_test.py | yyang08/swagger-spec-compatibility | e7a6ba6fc53c6a8a92ba26016219a595a8cecbbe | [
"Apache-2.0"
] | 30 | 2019-02-26T11:25:44.000Z | 2021-04-16T00:12:11.000Z | tests/walkers/enum_values_test.py | yyang08/swagger-spec-compatibility | e7a6ba6fc53c6a8a92ba26016219a595a8cecbbe | [
"Apache-2.0"
] | 6 | 2019-02-25T22:12:29.000Z | 2020-12-23T00:24:48.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from copy import deepcopy
import mock
import pytest
from swagger_spec_compatibility.spec_utils import load_spec_from_spec_dict
from swagger_spec_compatibility.util import EntityMapping
from swagger_spec_compatibility.walkers.enum_values import _different_enum_values_mapping
from swagger_spec_compatibility.walkers.enum_values import EnumValuesDiff
from swagger_spec_compatibility.walkers.enum_values import EnumValuesDifferWalker
@pytest.mark.parametrize(
'left_dict, right_dict, expected_value',
[
(None, None, None),
({}, {}, None),
({'type': 'object'}, {}, None),
({'enum': ['v1']}, {}, None),
({'type': 'string', 'enum': ['v1']}, {}, EntityMapping({'v1'}, set())),
({}, {'type': 'string', 'enum': ['v1']}, EntityMapping(set(), {'v1'})),
({'type': 'string', 'enum': ['v1']}, {'type': 'string', 'enum': ['v1']}, None),
({'type': 'string', 'enum': ['v1', 'v2']}, {'type': 'string', 'enum': ['v2', 'v1']}, None),
({'type': 'string', 'enum': ['old', 'common']}, {'type': 'string', 'enum': ['common', 'new']}, EntityMapping({'old'}, {'new'})),
],
)
def test__different_enum_values_mapping(left_dict, right_dict, expected_value):
assert _different_enum_values_mapping(
left_spec=mock.sentinel.LEFT_SPEC,
right_spec=mock.sentinel.RIGHT_SPEC,
left_schema=left_dict,
right_schema=right_dict,
) == expected_value
def test_EnumValuesDifferWalker_returns_no_paths_if_no_endpoints_defined(minimal_spec):
assert EnumValuesDifferWalker(minimal_spec, minimal_spec).walk() == []
def test_EnumValuesDifferWalker_returns_paths_of_endpoints_responses(minimal_spec_dict):
old_spec_dict = dict(
minimal_spec_dict,
definitions={
'enum_1': {
'type': 'string',
'enum': ['value_to_remove', 'E2', 'E3'],
'x-model': 'enum_1',
},
'enum_2': {
'type': 'string',
'enum': ['E1', 'E2', 'E3'],
'x-model': 'enum_2',
},
'object': {
'properties': {
'enum_1': {'$ref': '#/definitions/enum_1'},
'enum_2': {'$ref': '#/definitions/enum_2'},
},
'type': 'object',
'x-model': 'object',
},
},
paths={
'/endpoint': {
'get': {
'parameters': [{
'in': 'body',
'name': 'body',
'required': True,
'schema': {
'$ref': '#/definitions/object',
},
}],
'responses': {
'200': {
'description': '',
'schema': {
'$ref': '#/definitions/object',
},
},
},
},
},
},
)
new_spec_dict = deepcopy(old_spec_dict)
del new_spec_dict['definitions']['enum_1']['enum'][0]
new_spec_dict['definitions']['enum_2']['enum'].append('new_value')
old_spec = load_spec_from_spec_dict(old_spec_dict)
new_spec = load_spec_from_spec_dict(new_spec_dict)
assert sorted(EnumValuesDifferWalker(old_spec, new_spec).walk()) == sorted([
EnumValuesDiff(
path=('definitions', 'enum_2'),
mapping=EntityMapping(old=set(), new={'new_value'}),
),
EnumValuesDiff(
path=('definitions', 'object', 'properties', 'enum_2'),
mapping=EntityMapping(old=set(), new={'new_value'}),
),
EnumValuesDiff(
path=('definitions', 'object', 'properties', 'enum_1'),
mapping=EntityMapping(old={'value_to_remove'}, new=set()),
),
EnumValuesDiff(
path=('definitions', 'enum_1'),
mapping=EntityMapping(old={'value_to_remove'}, new=set()),
),
EnumValuesDiff(
path=('paths', '/endpoint', 'get', 'responses', '200', 'schema', 'properties', 'enum_2'),
mapping=EntityMapping(old=set(), new={'new_value'}),
),
EnumValuesDiff(
path=('paths', '/endpoint', 'get', 'responses', '200', 'schema', 'properties', 'enum_1'),
mapping=EntityMapping(old={'value_to_remove'}, new=set()),
),
EnumValuesDiff(
path=('paths', '/endpoint', 'get', 'parameters', 0, 'schema', 'properties', 'enum_2'),
mapping=EntityMapping(old=set(), new={'new_value'}),
),
EnumValuesDiff(
path=('paths', '/endpoint', 'get', 'parameters', 0, 'schema', 'properties', 'enum_1'),
mapping=EntityMapping(old={'value_to_remove'}, new=set()),
),
])
| 38.732824 | 136 | 0.522862 | 467 | 5,074 | 5.379015 | 0.188437 | 0.038217 | 0.055732 | 0.055732 | 0.52707 | 0.407245 | 0.364252 | 0.343551 | 0.282643 | 0.282643 | 0 | 0.013383 | 0.307844 | 5,074 | 130 | 137 | 39.030769 | 0.701879 | 0.004139 | 0 | 0.252101 | 0 | 0 | 0.192635 | 0 | 0 | 0 | 0 | 0 | 0.02521 | 1 | 0.02521 | false | 0 | 0.092437 | 0 | 0.117647 | 0.008403 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
458209565dea8013a3f4eb68bbd3789f17400e4a | 2,458 | py | Python | igvjs/main.py | Sparktx-Data-Science/igv.js-flask | 1fc3a6623b6a747830ea0b4c3636adb4e6134c9f | [
"MIT"
] | 25 | 2017-07-20T07:41:30.000Z | 2021-12-30T15:49:03.000Z | igvjs/main.py | kkapuria3/igv.js-flask | f3cde4547eaa29133535f95501778d9fe492532d | [
"MIT"
] | 10 | 2017-09-07T23:30:26.000Z | 2021-08-06T05:08:32.000Z | igvjs/main.py | kkapuria3/igv.js-flask | f3cde4547eaa29133535f95501778d9fe492532d | [
"MIT"
] | 12 | 2017-10-05T15:00:13.000Z | 2021-12-30T15:49:07.000Z | import requests
import re
import os
from flask import Response, request, abort, render_template, url_for, Blueprint
from igvjs._config import basedir
seen_tokens = set()
igvjs_blueprint = Blueprint('igvjs', __name__)
# give blueprint access to app config
@igvjs_blueprint.record
def record_igvjs(setup_state):
igvjs_blueprint.config = setup_state.app.config;
# routes
@igvjs_blueprint.route('/')
def show_vcf():
return render_template('igv.html')
@igvjs_blueprint.before_app_request
def before_request():
if igvjs_blueprint.config['USES_OAUTH'] and (not igvjs_blueprint.config['PUBLIC_DIR'] or \
not os.path.exists('.'+igvjs_blueprint.config['PUBLIC_DIR']) or \
not request.path.startswith(igvjs_blueprint.config['PUBLIC_DIR'])):
auth = request.headers.get("Authorization", None)
#print auth
if auth:
token = auth.split()[1]
if token not in seen_tokens:
google_url = 'https://www.googleapis.com/oauth2/v1/userinfo'
params = {'access_token':token}
res = requests.get(google_url, params=params)
email = res.json()['email']
if email in allowed_emails():
seen_tokens.add(token)
else:
abort(403)
else:
if "static/data" in request.path and "data/static/data" not in request.path:
abort(401)
return ranged_data_response(request.headers.get('Range', None), request.path[1:])
def allowed_emails():
emails = []
if os.path.isfile(app.config['ALLOWED_EMAILS']):
with open(app.config['ALLOWED_EMAILS'], 'r') as f:
for line in f:
emails.append(line.strip())
return emails
def ranged_data_response(range_header, rel_path):
path = os.path.join(basedir, rel_path)
if not range_header:
return None
m = re.search('(\d+)-(\d*)', range_header)
if not m:
return "Error: unexpected range header syntax: {}".format(range_header)
size = os.path.getsize(path)
offset = int(m.group(1))
length = int(m.group(2) or size) - offset + 1
data = None
with open(path, 'rb') as f:
f.seek(offset)
data = f.read(length)
rv = Response(data, 206, mimetype="application/octet-stream", direct_passthrough=True)
rv.headers['Content-Range'] = 'bytes {0}-{1}/{2}'.format(offset, offset + length-1, size)
return rv
| 35.114286 | 94 | 0.634662 | 321 | 2,458 | 4.707165 | 0.376947 | 0.083388 | 0.066181 | 0.051621 | 0.064196 | 0.045003 | 0.045003 | 0 | 0 | 0 | 0 | 0.010707 | 0.240033 | 2,458 | 69 | 95 | 35.623188 | 0.79818 | 0.021155 | 0 | 0.034483 | 0 | 0 | 0.12448 | 0.009992 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086207 | false | 0.017241 | 0.086207 | 0.017241 | 0.275862 | 0.155172 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4584c9acdb19bbdce86f86b2d488f787d8a6387a | 5,068 | py | Python | kraken/shut_down/common_shut_down_func.py | Sau1506mya/kraken-1 | 4f7616a1508e0f1f64356512aeda265e1dce5144 | [
"Apache-2.0"
] | 83 | 2021-01-15T10:42:22.000Z | 2022-03-23T16:01:22.000Z | kraken/shut_down/common_shut_down_func.py | Sau1506mya/kraken-1 | 4f7616a1508e0f1f64356512aeda265e1dce5144 | [
"Apache-2.0"
] | 126 | 2021-01-19T07:41:06.000Z | 2022-03-31T16:27:02.000Z | kraken/shut_down/common_shut_down_func.py | Sau1506mya/kraken-1 | 4f7616a1508e0f1f64356512aeda265e1dce5144 | [
"Apache-2.0"
] | 26 | 2021-01-27T19:34:33.000Z | 2022-03-18T21:18:35.000Z | #!/usr/bin/env python
import sys
import yaml
import logging
import time
from multiprocessing.pool import ThreadPool
import kraken.cerberus.setup as cerberus
import kraken.kubernetes.client as kubecli
import kraken.post_actions.actions as post_actions
from kraken.node_actions.aws_node_scenarios import AWS
from kraken.node_actions.openstack_node_scenarios import OPENSTACKCLOUD
from kraken.node_actions.az_node_scenarios import Azure
from kraken.node_actions.gcp_node_scenarios import GCP
def multiprocess_nodes(cloud_object_function, nodes):
try:
# pool object with number of element
pool = ThreadPool(processes=len(nodes))
logging.info("nodes type " + str(type(nodes[0])))
if type(nodes[0]) is tuple:
node_id = []
node_info = []
for node in nodes:
node_id.append(node[0])
node_info.append(node[1])
logging.info("node id " + str(node_id))
logging.info("node info" + str(node_info))
pool.starmap(cloud_object_function, zip(node_info, node_id))
else:
logging.info("pool type" + str(type(nodes)))
pool.map(cloud_object_function, nodes)
pool.close()
except Exception as e:
logging.info("Error on pool multiprocessing: " + str(e))
# Inject the cluster shut down scenario
def cluster_shut_down(shut_down_config):
runs = shut_down_config["runs"]
shut_down_duration = shut_down_config["shut_down_duration"]
cloud_type = shut_down_config["cloud_type"]
timeout = shut_down_config["timeout"]
if cloud_type.lower() == "aws":
cloud_object = AWS()
elif cloud_type.lower() == "gcp":
cloud_object = GCP()
elif cloud_type.lower() == "openstack":
cloud_object = OPENSTACKCLOUD()
elif cloud_type.lower() in ["azure", "az"]:
cloud_object = Azure()
else:
logging.error("Cloud type " + cloud_type + " is not currently supported for cluster shut down")
sys.exit(1)
nodes = kubecli.list_nodes()
node_id = []
for node in nodes:
instance_id = cloud_object.get_instance_id(node)
node_id.append(instance_id)
logging.info("node id list " + str(node_id))
for _ in range(runs):
logging.info("Starting cluster_shut_down scenario injection")
stopping_nodes = set(node_id)
multiprocess_nodes(cloud_object.stop_instances, node_id)
stopped_nodes = stopping_nodes.copy()
while len(stopping_nodes) > 0:
for node in stopping_nodes:
if type(node) is tuple:
node_status = cloud_object.wait_until_stopped(node[1], node[0], timeout)
else:
node_status = cloud_object.wait_until_stopped(node, timeout)
# Only want to remove node from stopping list when fully stopped/no error
if node_status:
stopped_nodes.remove(node)
stopping_nodes = stopped_nodes.copy()
logging.info("Shutting down the cluster for the specified duration: %s" % (shut_down_duration))
time.sleep(shut_down_duration)
logging.info("Restarting the nodes")
restarted_nodes = set(node_id)
multiprocess_nodes(cloud_object.start_instances, node_id)
logging.info("Wait for each node to be running again")
not_running_nodes = restarted_nodes.copy()
while len(not_running_nodes) > 0:
for node in not_running_nodes:
if type(node) is tuple:
node_status = cloud_object.wait_until_running(node[1], node[0], timeout)
else:
node_status = cloud_object.wait_until_running(node, timeout)
if node_status:
restarted_nodes.remove(node)
not_running_nodes = restarted_nodes.copy()
logging.info("Waiting for 150s to allow cluster component initialization")
time.sleep(150)
logging.info("Successfully injected cluster_shut_down scenario!")
def run(scenarios_list, config, wait_duration):
failed_post_scenarios = []
for shut_down_config in scenarios_list:
if len(shut_down_config) > 1:
pre_action_output = post_actions.run("", shut_down_config[1])
else:
pre_action_output = ""
with open(shut_down_config[0], "r") as f:
shut_down_config_yaml = yaml.full_load(f)
shut_down_config_scenario = shut_down_config_yaml["cluster_shut_down_scenario"]
start_time = int(time.time())
cluster_shut_down(shut_down_config_scenario)
logging.info("Waiting for the specified duration: %s" % (wait_duration))
time.sleep(wait_duration)
failed_post_scenarios = post_actions.check_recovery(
"", shut_down_config, failed_post_scenarios, pre_action_output
)
end_time = int(time.time())
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
| 40.870968 | 103 | 0.653907 | 641 | 5,068 | 4.892356 | 0.215289 | 0.063776 | 0.0625 | 0.026786 | 0.214286 | 0.153061 | 0.103954 | 0.103954 | 0.065689 | 0.065689 | 0 | 0.005325 | 0.258879 | 5,068 | 123 | 104 | 41.203252 | 0.829606 | 0.032557 | 0 | 0.142857 | 0 | 0 | 0.108798 | 0.005307 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.114286 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4588e1870c69bb48a27afabbf83f4786a23a64c3 | 16,539 | py | Python | game_seq_embedder/app/app_utils.py | Anonymous9999999/Pretrain-via-MTC | bc47c162aecb68708b68d8ff7c9bfd54b0fc485e | [
"Artistic-1.0-Perl"
] | null | null | null | game_seq_embedder/app/app_utils.py | Anonymous9999999/Pretrain-via-MTC | bc47c162aecb68708b68d8ff7c9bfd54b0fc485e | [
"Artistic-1.0-Perl"
] | null | null | null | game_seq_embedder/app/app_utils.py | Anonymous9999999/Pretrain-via-MTC | bc47c162aecb68708b68d8ff7c9bfd54b0fc485e | [
"Artistic-1.0-Perl"
] | null | null | null | import h5py
import collections
import numpy as np
import os
import ipdb
import math
import time
import datetime
import random
import torch
from torch.utils.data.dataset import Dataset
def hdf5_load_dataset(hdf5_file_path, all_indices, step_size, large_batch=1024, is_decode_utf8=False):
random.shuffle(all_indices)
for step_i in range(step_size):
hdf5_file = h5py.File(hdf5_file_path, 'r')
next_indices = all_indices[step_i * large_batch:(step_i + 1) * large_batch]
next_data = collections.defaultdict(lambda: [])
for x in next_indices:
*dataset_name, index = x.split('_')
dataset_name = '_'.join(dataset_name)
next_data[dataset_name].append(int(index))
large_batch_data = []
print(f"Load dataset from hdf5 step {step_i}, size next indices: {len(next_indices)}")
for dataset_name, dataset_indices in next_data.items():
if dataset_name == 'nsh_2020-04-04':
print(f"Skip for {dataset_name}")
continue
else:
print(f"Read from {dataset_name} done, size: {len(dataset_indices)}")
if is_decode_utf8:
temp_indices_data = hdf5_file[dataset_name][sorted(dataset_indices)]
temp_indices_data_str = []
for i, temp_line in enumerate(temp_indices_data):
temp_line = [x.decode('utf-8') for x in temp_line]
temp_indices_data_str.append(temp_line)
temp_indices_data_str = np.stack(temp_indices_data_str)
large_batch_data.append(temp_indices_data_str)
else:
large_batch_data.append(hdf5_file[dataset_name][sorted(dataset_indices)])
large_batch_data = np.concatenate(large_batch_data).astype(str)
hdf5_file.close()
yield large_batch_data
class TextDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
def __init__(
self,
hdf_data,
tokenizer,
block_size: int,
use_time_embed: bool = False,
use_bpe=False,
debugN=None,
max_time_gap=None,
use_sinusoidal=False,
behave_tokenizer=None,
design_tokenizer=None,
):
if tokenizer is None:
assert use_time_embed
assert behave_tokenizer and design_tokenizer
assert not use_bpe
self.tokenizer = tokenizer
self.behave_tokenizer = behave_tokenizer
self.design_tokenizer = design_tokenizer
self.use_time_embed = use_time_embed
self.examples = []
self.time_gaps = []
self.design_ids = []
for sample_i, sample in enumerate(hdf_data):
if debugN:
if sample_i >= debugN:
print(f"[DEBUG N] Stop loading data, debug N is set to {debugN}")
break
sample = [x for x in sample if x != '[PAD]']
text_block = ' '.join(sample)
# --------------------------------------------------------------------------------------------------
# CODE BY PJS
# --------------------------------------------------------------------------------------------------
if use_time_embed:
pure_text_block = [x for i, x in enumerate(text_block.split(' ')) if (i + 1) % 3 != 0]
time_gap_block = text_block.split(' ')[2::3]
# This is only for data assertion
temp_test_time_gap = time_gap_block[0]
temp_date_obj = datetime.datetime.fromtimestamp(int(temp_test_time_gap))
assert 2019 < temp_date_obj.year < 2022
# compute the time gap if sinusoidal is not used
if not use_sinusoidal:
# TODO, 这个地方我觉得还是要改一下,统一成最大值1024秒,最小单位是秒,但是最小的单位是1秒*100
time_gap_block = list(zip(time_gap_block, time_gap_block[1:] + [0]))
time_gap_block = [math.ceil((int(t2) - int(t1)) / 100) for t1, t2 in time_gap_block]
time_gap_block[-1] = 0
assert min(time_gap_block) >= 0
if tokenizer is not None:
time_gap_block = [y for x in zip(time_gap_block, time_gap_block) for y in x][:block_size]
else:
time_gap_block = [y for x in zip(time_gap_block, time_gap_block) for y in x][:int(block_size * 2)]
# assert len(pure_text_block) == len(time_gap_block)
if use_bpe:
text_block = ''.join(pure_text_block)
else:
text_block = ' '.join(pure_text_block)
else:
pure_text_block = [x for i, x in enumerate(text_block.split(' ')) if (i + 1) % 3 != 0]
if use_bpe:
text_block = ''.join(pure_text_block)
else:
text_block = ' '.join(pure_text_block)
if tokenizer is not None:
output = tokenizer.encode(text_block)
tokenized_ids = output.ids
tokenized_texts = output.tokens
design_tokenized_ids = None
else:
# get behave token
behave_output = behave_tokenizer.encode(' '.join(text_block.split()[::2]))
behave_tokenized_ids = behave_output.ids
behave_texts = behave_output.tokens
# get design token
design_output = design_tokenizer.encode(' '.join(text_block.split()[1::2]))
design_tokenized_ids = design_output.ids
design_texts = design_output.tokens
# combine them all
assert len(behave_tokenized_ids) == len(design_tokenized_ids) == len(behave_texts) == len(design_texts)
tokenized_ids = behave_tokenized_ids
tokenized_texts = None
# if use_bpe:
# # assert len(''.join([y for x in output.tokens for y in x]).replace('_', '').replace('▁', '')) == len(
# # text_block), print(f"len of tokenized_texts no equal to origin, text: {tokenized_texts}")
# assert len(''.join([y for x in output.tokens for y in x]).replace('_', '').replace('▁', '')) == len(
# text_block), print(f"len of tokenized_texts no equal to origin, text: {tokenized_texts}")
tokenized_ids = tokenized_ids[:block_size]
if design_tokenized_ids:
design_tokenized_ids = design_tokenized_ids[:block_size]
if tokenized_texts:
tokenized_texts = tokenized_texts[:block_size]
example = np.array(tokenized_ids)
if use_time_embed:
time_gaps = np.array([int(x) for x in time_gap_block], dtype=int)
if use_bpe:
new_time_gaps = []
start_index = 0
for word in tokenized_texts:
word = word.replace('_', '').replace('▁', '')
new_time_gap = time_gaps[start_index:start_index + len(word)]
new_time_gaps.append(sum(new_time_gap))
start_index += len(word)
new_time_gaps = np.array(new_time_gaps)
time_gaps = new_time_gaps
# cut off max time gap
if not use_sinusoidal:
time_gaps = np.array([x if x <= max_time_gap - 1 else max_time_gap - 1 for x in time_gaps])
else:
# 这里做一下转换,一天有86400秒
time_gap0 = datetime.datetime.fromtimestamp(time_gaps[0])
today_start = datetime.datetime(year=time_gap0.year, month=time_gap0.month, day=time_gap0.day)
today_start_timestamp = int(time.mktime(today_start.timetuple()))
time_gaps = time_gaps - today_start_timestamp
# recover the length of time gaps for sperate ids
if tokenizer is None:
time_gaps = time_gaps[::2]
assert example.shape == time_gaps.shape
if tokenizer is None:
assert example.shape == time_gaps.shape == np.array(design_tokenized_ids).shape
# --------------------------------------------------------------------------------------------------
if len(example) < block_size:
# pad example
if tokenizer:
all_pad_example = np.full(block_size, tokenizer.pad_token_id)
else:
all_pad_example = np.full(block_size, behave_tokenizer.pad_token_id)
all_pad_example[:len(example)] = example
example = all_pad_example
# pad design_id
if design_tokenized_ids:
all_pad_design_ids = np.full(block_size, design_tokenizer.pad_token_id)
all_pad_design_ids[:len(design_tokenized_ids)] = design_tokenized_ids
design_tokenized_ids = all_pad_design_ids
if use_time_embed:
all_pad_time_gap = np.full(block_size, 0)
all_pad_time_gap[:len(time_gaps)] = time_gaps
time_gaps = all_pad_time_gap
# add example
self.examples.append(example)
# add design id
if not tokenizer:
self.design_ids.append(np.array(design_tokenized_ids))
if use_time_embed:
self.time_gaps.append(time_gaps)
# Note that we are losing the last truncated example here for the sake of simplicity (no padding)
# If your dataset is small, first you should loook for a bigger one :-) and second you
# can change this behavior by adding (model specific) padding.
# with open(cached_features_file, "wb") as handle:
# pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
# logger.info(
# "Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
# )
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
# Mode: three in one
if self.tokenizer is None:
cat_arr = np.concatenate([self.examples[i], self.design_ids[i], self.time_gaps[i]])
return torch.tensor(cat_arr, dtype=torch.long)
else:
# Mode: General
if self.use_time_embed:
cat_arr = np.concatenate([self.examples[i], self.time_gaps[i]])
return torch.tensor(cat_arr, dtype=torch.long)
else:
return torch.tensor(self.examples[i], dtype=torch.long)
def get_dataset(
tokenizer,
block_size,
behave_tokenizer=None,
design_tokenizer=None,
use_time_embed=False,
debugN=None,
hdf_data=None,
use_bpe=False,
max_time_gap=None,
use_sinusoidal=False
):
return TextDataset(
hdf_data=hdf_data,
tokenizer=tokenizer,
behave_tokenizer=behave_tokenizer,
design_tokenizer=design_tokenizer,
block_size=block_size,
use_time_embed=use_time_embed,
debugN=debugN,
use_bpe=use_bpe,
max_time_gap=max_time_gap,
use_sinusoidal=use_sinusoidal
)
def get_all_indices(h5_data_file_path, debug_N):
hdf5_file = h5py.File(h5_data_file_path, 'r')
all_indices = []
all_keys = sorted(hdf5_file.keys())
total_num = 0
for key in all_keys:
data = hdf5_file[key]
shape = data.shape
total_num += shape[0]
all_indices.extend([f'{key}_{x}' for x in range(shape[0])])
if debug_N:
all_indices, total_num = all_indices[:debug_N], debug_N
return all_indices, total_num
def load_dataset_from_hdf5_by_indices(hdf5_file_path, indices, is_decode_utf8=False):
hdf5_file = h5py.File(hdf5_file_path, 'r')
next_data = collections.defaultdict(lambda: [])
for x in indices:
*dataset_name, index = x.split('_')
dataset_name = '_'.join(dataset_name)
next_data[dataset_name].append(int(index))
large_batch_data = []
for dataset_name, dataset_indices in next_data.items():
if is_decode_utf8:
temp_indices_data = hdf5_file[dataset_name][sorted(dataset_indices)]
temp_indices_data_str = []
for i, temp_line in enumerate(temp_indices_data):
temp_line = [x.decode('utf-8') for x in temp_line]
temp_indices_data_str.append(temp_line)
temp_indices_data_str = np.stack(temp_indices_data_str)
large_batch_data.append(temp_indices_data_str)
else:
large_batch_data.append(hdf5_file[dataset_name][sorted(dataset_indices)])
large_batch_data = np.concatenate(large_batch_data).astype(str)
hdf5_file.close()
return large_batch_data
def _convert_token_to_id_with_added_voc(token, added_tokens_encoder):
if token is None:
return None
if token in added_tokens_encoder:
return added_tokens_encoder[token]
def create_func1(sep_token_id, cls_token_id):
def get_special_tokens_mask(token_ids_0, token_ids_1=None, already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0: list of ids (must not contain special tokens)
token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
for sequence pairs
already_has_special_tokens: (default False) Set to True if the token list is already formated with
special tokens for the model
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formated with special tokens for the model."
)
return list(map(lambda x: 1 if x in [sep_token_id, cls_token_id] else 0, token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
return get_special_tokens_mask
def create_func2(added_tokens_encoder, mask_token_id):
def convert_tokens_to_ids(tokens):
""" Converts a single token, or a sequence of tokens, (str) in a single integer id
(resp. a sequence of ids), using the vocabulary.
"""
if tokens == '[MASK]':
return mask_token_id
if tokens is None:
return None
if isinstance(tokens, str):
return _convert_token_to_id_with_added_voc(tokens, added_tokens_encoder)
ids = []
for token in tokens:
ids.append(_convert_token_to_id_with_added_voc(token, added_tokens_encoder))
return ids
return convert_tokens_to_ids
def tokenizer_post_process(tokenizer, block_size, type):
if type == 'whitespace':
tokenizer.max_len = block_size
tokenizer.get_special_tokens_mask = create_func1(tokenizer.pad_token_id, tokenizer.cls_token_id)
tokenizer.convert_tokens_to_ids = create_func2(tokenizer.added_tokens_encoder, tokenizer.mask_token_id)
elif type == 'bpe':
tokenizer.max_len = block_size
tokenizer.cls_token_id = 0
tokenizer.pad_token_id = 1
tokenizer.sep_token_id = 2
tokenizer.unk_token_id = 3
tokenizer.mask_token_id = 4
tokenizer.get_special_tokens_mask = create_func1(tokenizer.pad_token_id, tokenizer.cls_token_id)
tokenizer.added_tokens_encoder = {}
tokenizer.convert_tokens_to_ids = create_func2(tokenizer.added_tokens_encoder, tokenizer.mask_token_id)
tokenizer.mask_token = '[MASK]'
tokenizer._pad_token = '[PAD]'
else:
raise NotImplementedError
return tokenizer
| 39.949275 | 120 | 0.589697 | 2,063 | 16,539 | 4.41493 | 0.146389 | 0.024594 | 0.022398 | 0.019763 | 0.417435 | 0.388669 | 0.339152 | 0.306324 | 0.271959 | 0.242973 | 0 | 0.012125 | 0.311869 | 16,539 | 413 | 121 | 40.046005 | 0.787892 | 0.147953 | 0 | 0.339161 | 0 | 0 | 0.030469 | 0.001581 | 0 | 0 | 0 | 0.002421 | 0.027972 | 1 | 0.045455 | false | 0 | 0.038462 | 0.006993 | 0.153846 | 0.013986 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
458a74a131096b9821f4d2a10c82a473fe5352fd | 30,220 | py | Python | src/pagure/forms.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | src/pagure/forms.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | src/pagure/forms.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
(c) 2014-2016 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
"""
# pylint: disable=too-few-public-methods
# pylint: disable=no-init
# pylint: disable=super-on-old-class
from __future__ import unicode_literals, absolute_import
import datetime
import re
import flask
import flask_wtf as wtf
try:
from flask_wtf import FlaskForm
except ImportError:
from flask_wtf import Form as FlaskForm
import six
import wtforms
import pagure.lib.query
import pagure.validators
from pagure.config import config as pagure_config
from pagure.utils import urlpattern, is_admin
STRICT_REGEX = "^[a-zA-Z0-9-_]+$"
# This regex is used when creating tags, there we do not want to allow ','
# as otherwise it breaks the UI.
TAGS_REGEX = "^[a-zA-Z0-9][a-zA-Z0-9-_ .:]+$"
TAGS_REGEX_RE = re.compile(TAGS_REGEX)
# In the issue page tags are sent as a comma-separated list, so in order to
# allow having multiple tags in an issue, we need to allow ',' in them.
TAGS_REGEX_MULTI = "^[a-zA-Z0-9][a-zA-Z0-9-_, .:]+$"
FALSE_VALUES = ("false", "", False, "False", 0, "0")
WTF_VERSION = tuple()
if hasattr(wtf, "__version__"):
WTF_VERSION = tuple(int(v) for v in wtf.__version__.split("."))
class PagureForm(FlaskForm):
""" Local form allowing us to form set the time limit. """
def __init__(self, *args, **kwargs):
delta = pagure_config.get("WTF_CSRF_TIME_LIMIT", 3600)
if delta and WTF_VERSION < (0, 10, 0):
self.TIME_LIMIT = datetime.timedelta(seconds=delta)
else:
self.TIME_LIMIT = delta
if "csrf_enabled" in kwargs and kwargs["csrf_enabled"] is False:
kwargs["meta"] = {"csrf": False}
if WTF_VERSION >= (0, 14, 0):
kwargs.pop("csrf_enabled")
super(PagureForm, self).__init__(*args, **kwargs)
def convert_value(val):
""" Convert the provided values to strings when possible. """
if val:
if not isinstance(val, (list, tuple, six.text_type)):
return val.decode("utf-8")
elif isinstance(val, six.string_types):
return val
class MultipleEmail(wtforms.validators.Email):
"""Split the value by comma and run them through the email validator
of wtforms.
"""
def __call__(self, form, field):
message = field.gettext("One or more invalid email address.")
for data in field.data.split(","):
data = data.strip()
if not self.regex.match(data or ""):
raise wtforms.validators.ValidationError(message)
def user_namespace_if_private(form, field):
"""Check if the data in the field is the same as in the password field."""
if form.private.data:
field.data = flask.g.fas_user.username
def file_virus_validator(form, field):
"""Checks for virus in the file from flask request object,
raises wtf.ValidationError if virus is found else None."""
if not pagure_config["VIRUS_SCAN_ATTACHMENTS"]:
return
from pyclamd import ClamdUnixSocket
if (
field.name not in flask.request.files
or flask.request.files[field.name].filename == ""
):
# If no file was uploaded, this field is correct
return
uploaded = flask.request.files[field.name]
clam = ClamdUnixSocket()
if not clam.ping():
raise wtforms.ValidationError(
"Unable to communicate with virus scanner"
)
results = clam.scan_stream(uploaded.stream.read())
if results is None:
uploaded.stream.seek(0)
return
else:
result = results.values()
res_type, res_msg = result
if res_type == "FOUND":
raise wtforms.ValidationError("Virus found: %s" % res_msg)
else:
raise wtforms.ValidationError("Error scanning uploaded file")
def ssh_key_validator(form, field):
""" Form for ssh key validation """
if not pagure.lib.query.are_valid_ssh_keys(field.data):
raise wtforms.ValidationError("Invalid SSH keys")
class ProjectFormSimplified(PagureForm):
""" Form to edit the description of a project. """
description = wtforms.StringField(
"Description",
[wtforms.validators.DataRequired()],
)
url = wtforms.StringField(
"URL",
[
wtforms.validators.optional(),
wtforms.validators.Regexp(urlpattern, flags=re.IGNORECASE),
],
)
avatar_email = wtforms.StringField(
"Avatar email",
[
pagure.validators.EmailValidator("avatar_email must be an email"),
wtforms.validators.optional(),
],
)
tags = wtforms.StringField(
"Project tags",
[wtforms.validators.optional(), wtforms.validators.Length(max=255)],
)
private = wtforms.BooleanField(
"Private", [wtforms.validators.Optional()], false_values=FALSE_VALUES
)
mirrored_from = wtforms.StringField(
"Mirrored from",
[wtforms.validators.optional(), wtforms.validators.Length(max=255)],
)
class ProjectForm(ProjectFormSimplified):
""" Form to create or edit project. """
name = wtforms.StringField("Project name")
mirrored_from = wtforms.StringField(
"Mirror from URL",
[
wtforms.validators.optional(),
wtforms.validators.Regexp(urlpattern, flags=re.IGNORECASE),
],
)
create_readme = wtforms.BooleanField(
"Create README",
[wtforms.validators.optional()],
false_values=FALSE_VALUES,
)
namespace = wtforms.SelectField(
"Project Namespace",
[user_namespace_if_private, wtforms.validators.optional()],
choices=[],
coerce=convert_value,
)
ignore_existing_repos = wtforms.BooleanField(
"Ignore existing repositories",
[wtforms.validators.optional()],
false_values=FALSE_VALUES,
)
repospanner_region = wtforms.SelectField(
"repoSpanner Region",
[wtforms.validators.optional()],
choices=(
[("none", "Disabled")]
+ [
(region, region)
for region in pagure_config["REPOSPANNER_REGIONS"].keys()
]
),
coerce=convert_value,
default=pagure_config["REPOSPANNER_NEW_REPO"],
)
default_branch = wtforms.StringField(
"Default branch",
[wtforms.validators.optional()],
)
def __init__(self, *args, **kwargs):
"""Calls the default constructor with the normal argument but
uses the list of collection provided to fill the choices of the
drop-down list.
"""
super(ProjectForm, self).__init__(*args, **kwargs)
# set the name validator
regex = pagure_config.get(
"PROJECT_NAME_REGEX", "^[a-zA-z0-9_][a-zA-Z0-9-_.+]*$"
)
self.name.validators = [
wtforms.validators.DataRequired(),
wtforms.validators.Regexp(regex, flags=re.IGNORECASE),
]
# Set the list of namespace
if "namespaces" in kwargs:
self.namespace.choices = [
(namespace, namespace) for namespace in kwargs["namespaces"]
]
if not pagure_config.get("USER_NAMESPACE", False):
self.namespace.choices.insert(0, ("", ""))
if not (
is_admin()
and pagure_config.get("ALLOW_ADMIN_IGNORE_EXISTING_REPOS")
) and (
flask.g.fas_user.username
not in pagure_config["USERS_IGNORE_EXISTING_REPOS"]
):
self.ignore_existing_repos = None
if not (
is_admin()
and pagure_config.get("REPOSPANNER_NEW_REPO_ADMIN_OVERRIDE")
):
self.repospanner_region = None
class IssueFormSimplied(PagureForm):
""" Form to create or edit an issue. """
title = wtforms.StringField(
"Title",
[wtforms.validators.DataRequired()],
)
issue_content = wtforms.TextAreaField(
"Content",
[wtforms.validators.DataRequired()],
)
private = wtforms.BooleanField(
"Private", [wtforms.validators.optional()], false_values=FALSE_VALUES
)
milestone = wtforms.SelectField(
"Milestone",
[wtforms.validators.Optional()],
choices=[],
coerce=convert_value,
)
priority = wtforms.SelectField(
"Priority",
[wtforms.validators.Optional()],
choices=[],
coerce=convert_value,
)
def __init__(self, *args, **kwargs):
"""Calls the default constructor with the normal argument but
uses the list of collection provided to fill the choices of the
drop-down list.
"""
super(IssueFormSimplied, self).__init__(*args, **kwargs)
self.priority.choices = []
if "priorities" in kwargs:
for key in sorted(kwargs["priorities"]):
self.priority.choices.append((key, kwargs["priorities"][key]))
self.milestone.choices = []
if "milestones" in kwargs and kwargs["milestones"]:
for key in kwargs["milestones"]:
self.milestone.choices.append((key, key))
self.milestone.choices.insert(0, ("", ""))
class IssueForm(IssueFormSimplied):
""" Form to create or edit an issue. """
status = wtforms.SelectField(
"Status", [wtforms.validators.DataRequired()], choices=[]
)
def __init__(self, *args, **kwargs):
"""Calls the default constructor with the normal argument but
uses the list of collection provided to fill the choices of the
drop-down list.
"""
super(IssueForm, self).__init__(*args, **kwargs)
if "status" in kwargs:
self.status.choices = [
(status, status) for status in kwargs["status"]
]
class RequestPullForm(PagureForm):
""" Form to create a pull request. """
title = wtforms.StringField(
"Title",
[wtforms.validators.DataRequired()],
)
initial_comment = wtforms.TextAreaField(
"Initial Comment", [wtforms.validators.Optional()]
)
allow_rebase = wtforms.BooleanField(
"Allow rebasing",
[wtforms.validators.Optional()],
false_values=FALSE_VALUES,
)
class RequestPullEditForm(RequestPullForm):
""" Form to edit a pull request. """
branch_to = wtforms.SelectField(
"Target branch",
[wtforms.validators.Required()],
choices=[],
coerce=convert_value,
)
def __init__(self, *args, **kwargs):
"""Calls the default constructor with the normal argument but
uses the list of collection provided to fill the choices of the
drop-down list.
"""
super(RequestPullEditForm, self).__init__(*args, **kwargs)
if "branches" in kwargs:
self.branch_to.choices = [
(branch, branch) for branch in kwargs["branches"]
]
class RemoteRequestPullForm(RequestPullForm):
""" Form to create a remote pull request. """
git_repo = wtforms.StringField(
"Git repo address",
[
wtforms.validators.DataRequired(),
wtforms.validators.Regexp(urlpattern, flags=re.IGNORECASE),
],
)
branch_from = wtforms.StringField(
"Git branch",
[wtforms.validators.DataRequired()],
)
branch_to = wtforms.StringField(
"Git branch to merge in",
[wtforms.validators.DataRequired()],
)
class DeleteIssueTagForm(PagureForm):
""" Form to remove a tag to from a project. """
tag = wtforms.StringField(
"Tag",
[
wtforms.validators.Optional(),
wtforms.validators.Regexp(TAGS_REGEX, flags=re.IGNORECASE),
wtforms.validators.Length(max=255),
],
)
class AddIssueTagForm(DeleteIssueTagForm):
""" Form to add a tag to a project. """
tag_description = wtforms.StringField(
"Tag Description", [wtforms.validators.Optional()]
)
tag_color = wtforms.StringField(
"Tag Color", [wtforms.validators.DataRequired()]
)
class ApiAddIssueTagForm(PagureForm):
""" Form to add a tag to a project from the API endpoint """
tag = wtforms.StringField(
"Tag",
[
wtforms.validators.DataRequired(),
wtforms.validators.Regexp(TAGS_REGEX, flags=re.IGNORECASE),
wtforms.validators.Length(max=255),
],
)
tag_description = wtforms.StringField(
"Tag Description", [wtforms.validators.Optional()]
)
tag_color = wtforms.StringField(
"Tag Color", [wtforms.validators.DataRequired()]
)
class StatusForm(PagureForm):
""" Form to add/change the status of an issue. """
status = wtforms.SelectField(
"Status", [wtforms.validators.DataRequired()], choices=[]
)
close_status = wtforms.SelectField(
"Closed as", [wtforms.validators.Optional()], choices=[]
)
def __init__(self, *args, **kwargs):
"""Calls the default constructor with the normal argument but
uses the list of collection provided to fill the choices of the
drop-down list.
"""
super(StatusForm, self).__init__(*args, **kwargs)
if "status" in kwargs:
self.status.choices = [
(status, status) for status in kwargs["status"]
]
self.close_status.choices = []
if "close_status" in kwargs:
for key in sorted(kwargs["close_status"]):
self.close_status.choices.append((key, key))
self.close_status.choices.insert(0, ("", ""))
class MilestoneForm(PagureForm):
""" Form to change the milestone of an issue. """
milestone = wtforms.SelectField(
"Milestone",
[wtforms.validators.Optional()],
choices=[],
coerce=convert_value,
)
def __init__(self, *args, **kwargs):
"""Calls the default constructor with the normal argument but
uses the list of collection provided to fill the choices of the
drop-down list.
"""
super(MilestoneForm, self).__init__(*args, **kwargs)
self.milestone.choices = []
if "milestones" in kwargs and kwargs["milestones"]:
for key in kwargs["milestones"]:
self.milestone.choices.append((key, key))
self.milestone.choices.insert(0, ("", ""))
class NewTokenForm(PagureForm):
""" Form to add a new token. """
description = wtforms.StringField(
"description", [wtforms.validators.Optional()]
)
expiration_date = wtforms.DateField(
"expiration date",
[wtforms.validators.DataRequired()],
default=datetime.date.today() + datetime.timedelta(days=(30 * 6)),
)
acls = wtforms.SelectMultipleField(
"ACLs", [wtforms.validators.DataRequired()], choices=[]
)
def __init__(self, *args, **kwargs):
"""Calls the default constructor with the normal argument but
uses the list of collection provided to fill the choices of the
drop-down list.
"""
super(NewTokenForm, self).__init__(*args, **kwargs)
if "acls" in kwargs:
self.acls.choices = [
(acl.name, acl.name) for acl in kwargs["acls"]
]
if "sacls" in kwargs:
self.acls.choices = [(acl, acl) for acl in kwargs["sacls"]]
class UpdateIssueForm(PagureForm):
""" Form to add a comment to an issue. """
tag = wtforms.StringField(
"tag",
[
wtforms.validators.Optional(),
wtforms.validators.Regexp(TAGS_REGEX_MULTI, flags=re.IGNORECASE),
wtforms.validators.Length(max=255),
],
)
depending = wtforms.StringField(
"depending issue", [wtforms.validators.Optional()]
)
blocking = wtforms.StringField(
"blocking issue", [wtforms.validators.Optional()]
)
comment = wtforms.TextAreaField("Comment", [wtforms.validators.Optional()])
assignee = wtforms.TextAreaField(
"Assigned to", [wtforms.validators.Optional()]
)
status = wtforms.SelectField(
"Status", [wtforms.validators.Optional()], choices=[]
)
priority = wtforms.SelectField(
"Priority", [wtforms.validators.Optional()], choices=[]
)
milestone = wtforms.SelectField(
"Milestone",
[wtforms.validators.Optional()],
choices=[],
coerce=convert_value,
)
private = wtforms.BooleanField(
"Private", [wtforms.validators.optional()], false_values=FALSE_VALUES
)
close_status = wtforms.SelectField(
"Closed as",
[wtforms.validators.Optional()],
choices=[],
coerce=convert_value,
)
def __init__(self, *args, **kwargs):
"""Calls the default constructor with the normal argument but
uses the list of collection provided to fill the choices of the
drop-down list.
"""
super(UpdateIssueForm, self).__init__(*args, **kwargs)
if "status" in kwargs:
self.status.choices = [
(status, status) for status in kwargs["status"]
]
self.priority.choices = []
if "priorities" in kwargs:
for key in sorted(kwargs["priorities"]):
self.priority.choices.append((key, kwargs["priorities"][key]))
self.milestone.choices = []
if "milestones" in kwargs and kwargs["milestones"]:
for key in kwargs["milestones"]:
self.milestone.choices.append((key, key))
self.milestone.choices.insert(0, ("", ""))
self.close_status.choices = []
if "close_status" in kwargs:
for key in sorted(kwargs["close_status"]):
self.close_status.choices.append((key, key))
self.close_status.choices.insert(0, ("", ""))
class AddPullRequestCommentForm(PagureForm):
""" Form to add a comment to a pull-request. """
commit = wtforms.HiddenField("commit identifier")
filename = wtforms.HiddenField("file changed")
row = wtforms.HiddenField("row")
requestid = wtforms.HiddenField("requestid")
tree_id = wtforms.HiddenField("treeid")
comment = wtforms.TextAreaField(
"Comment",
[wtforms.validators.DataRequired()],
)
class AddPullRequestFlagFormV1(PagureForm):
""" Form to add a flag to a pull-request or commit. """
username = wtforms.StringField(
"Username", [wtforms.validators.DataRequired()]
)
percent = wtforms.StringField(
"Percentage of completion", [wtforms.validators.optional()]
)
comment = wtforms.TextAreaField(
"Comment", [wtforms.validators.DataRequired()]
)
url = wtforms.StringField(
"URL",
[
wtforms.validators.DataRequired(),
wtforms.validators.Regexp(urlpattern, flags=re.IGNORECASE),
],
)
uid = wtforms.StringField("UID", [wtforms.validators.optional()])
class AddPullRequestFlagForm(AddPullRequestFlagFormV1):
""" Form to add a flag to a pull-request or commit. """
def __init__(self, *args, **kwargs):
# we need to instantiate dynamically because the configuration
# values may change during tests and we want to always respect
# the currently set value
super(AddPullRequestFlagForm, self).__init__(*args, **kwargs)
self.status.choices = list(
zip(
pagure_config["FLAG_STATUSES_LABELS"].keys(),
pagure_config["FLAG_STATUSES_LABELS"].keys(),
)
)
status = wtforms.SelectField(
"status", [wtforms.validators.DataRequired()], choices=[]
)
class AddSSHKeyForm(PagureForm):
""" Form to add a SSH key to a user. """
ssh_key = wtforms.StringField(
"SSH Key",
[wtforms.validators.DataRequired()]
# TODO: Add an ssh key validator?
)
class AddDeployKeyForm(AddSSHKeyForm):
""" Form to add a deploy key to a project. """
pushaccess = wtforms.BooleanField(
"Push access",
[wtforms.validators.optional()],
false_values=FALSE_VALUES,
)
class AddUserForm(PagureForm):
""" Form to add a user to a project. """
user = wtforms.StringField(
"Username",
[wtforms.validators.DataRequired()],
)
access = wtforms.StringField(
"Access Level",
[wtforms.validators.DataRequired()],
)
branches = wtforms.StringField(
"Git branches",
[wtforms.validators.Optional()],
)
class AddUserToGroupForm(PagureForm):
""" Form to add a user to a pagure group. """
user = wtforms.StringField(
"Username",
[wtforms.validators.DataRequired()],
)
class AssignIssueForm(PagureForm):
""" Form to assign an user to an issue. """
assignee = wtforms.StringField(
"Assignee",
[wtforms.validators.Optional()],
)
class AddGroupForm(PagureForm):
""" Form to add a group to a project. """
group = wtforms.StringField(
"Group",
[
wtforms.validators.DataRequired(),
wtforms.validators.Regexp(STRICT_REGEX, flags=re.IGNORECASE),
],
)
access = wtforms.StringField(
"Access Level",
[wtforms.validators.DataRequired()],
)
branches = wtforms.StringField(
"Git branches",
[wtforms.validators.Optional()],
)
class ConfirmationForm(PagureForm):
""" Simple form used just for CSRF protection. """
pass
class ModifyACLForm(PagureForm):
""" Form to change ACL of a user or a group to a project. """
user_type = wtforms.SelectField(
"User type",
[wtforms.validators.DataRequired()],
choices=[("user", "User"), ("group", "Group")],
)
name = wtforms.StringField(
"User- or Groupname",
[wtforms.validators.DataRequired()],
)
acl = wtforms.SelectField(
"ACL type",
[wtforms.validators.Optional()],
choices=[
("admin", "Admin"),
("ticket", "Ticket"),
("commit", "Commit"),
(None, None),
],
coerce=convert_value,
)
class UploadFileForm(PagureForm):
""" Form to upload a file. """
filestream = wtforms.FileField(
"File", [wtforms.validators.DataRequired(), file_virus_validator]
)
class UserEmailForm(PagureForm):
""" Form to edit the description of a project. """
email = wtforms.StringField("email", [wtforms.validators.DataRequired()])
def __init__(self, *args, **kwargs):
super(UserEmailForm, self).__init__(*args, **kwargs)
if "emails" in kwargs:
if kwargs["emails"]:
self.email.validators.append(
wtforms.validators.NoneOf(kwargs["emails"])
)
else:
self.email.validators = [wtforms.validators.DataRequired()]
class ProjectCommentForm(PagureForm):
""" Form to represent project. """
objid = wtforms.StringField(
"Ticket/Request id", [wtforms.validators.DataRequired()]
)
useremail = wtforms.StringField(
"Email", [wtforms.validators.DataRequired()]
)
class CommentForm(PagureForm):
""" Form to upload a file. """
comment = wtforms.FileField(
"Comment", [wtforms.validators.DataRequired(), file_virus_validator]
)
class EditGroupForm(PagureForm):
""" Form to ask for a password change. """
display_name = wtforms.StringField(
"Group name to display",
[
wtforms.validators.DataRequired(),
wtforms.validators.Length(max=255),
],
)
description = wtforms.StringField(
"Description",
[
wtforms.validators.DataRequired(),
wtforms.validators.Length(max=255),
],
)
class NewGroupForm(EditGroupForm):
""" Form to ask for a password change. """
group_name = wtforms.StringField(
"Group name",
[
wtforms.validators.DataRequired(),
wtforms.validators.Length(max=255),
wtforms.validators.Regexp(STRICT_REGEX, flags=re.IGNORECASE),
],
)
group_type = wtforms.SelectField(
"Group type", [wtforms.validators.DataRequired()], choices=[]
)
def __init__(self, *args, **kwargs):
"""Calls the default constructor with the normal argument but
uses the list of collection provided to fill the choices of the
drop-down list.
"""
super(NewGroupForm, self).__init__(*args, **kwargs)
if "group_types" in kwargs:
self.group_type.choices = [
(grptype, grptype) for grptype in kwargs["group_types"]
]
class EditFileForm(PagureForm):
""" Form used to edit a file. """
content = wtforms.TextAreaField("content", [wtforms.validators.Optional()])
commit_title = wtforms.StringField(
"Title", [wtforms.validators.DataRequired()]
)
commit_message = wtforms.TextAreaField(
"Commit message", [wtforms.validators.optional()]
)
email = wtforms.SelectField(
"Email", [wtforms.validators.DataRequired()], choices=[]
)
branch = wtforms.StringField("Branch", [wtforms.validators.DataRequired()])
def __init__(self, *args, **kwargs):
"""Calls the default constructor with the normal argument but
uses the list of collection provided to fill the choices of the
drop-down list.
"""
super(EditFileForm, self).__init__(*args, **kwargs)
if "emails" in kwargs:
self.email.choices = [
(email.email, email.email) for email in kwargs["emails"]
]
class DefaultBranchForm(PagureForm):
"""Form to change the default branh for a repository"""
branches = wtforms.SelectField(
"default_branch", [wtforms.validators.DataRequired()], choices=[]
)
def __init__(self, *args, **kwargs):
"""Calls the default constructor with the normal argument but
uses the list of collection provided to fill the choices of the
drop-down list.
"""
super(DefaultBranchForm, self).__init__(*args, **kwargs)
if "branches" in kwargs:
self.branches.choices = [
(branch, branch) for branch in kwargs["branches"]
]
class DefaultPriorityForm(PagureForm):
"""Form to change the default priority for a repository"""
priority = wtforms.SelectField(
"default_priority", [wtforms.validators.optional()], choices=[]
)
def __init__(self, *args, **kwargs):
"""Calls the default constructor with the normal argument but
uses the list of collection provided to fill the choices of the
drop-down list.
"""
super(DefaultPriorityForm, self).__init__(*args, **kwargs)
if "priorities" in kwargs:
self.priority.choices = [
(priority, priority) for priority in kwargs["priorities"]
]
class EditCommentForm(PagureForm):
"""Form to verify that comment is not empty"""
update_comment = wtforms.TextAreaField(
"Comment ",
[wtforms.validators.DataRequired()],
)
class ForkRepoForm(PagureForm):
""" Form to fork a project in the API. """
repo = wtforms.StringField(
"The project name", [wtforms.validators.DataRequired()]
)
username = wtforms.StringField(
"User who forked the project", [wtforms.validators.optional()]
)
namespace = wtforms.StringField(
"The project namespace", [wtforms.validators.optional()]
)
class AddReportForm(PagureForm):
"""Form to verify that comment is not empty"""
report_name = wtforms.TextAreaField(
"Report name",
[wtforms.validators.DataRequired()],
)
class PublicNotificationForm(PagureForm):
"""Form to verify that comment is not empty"""
issue_notifs = wtforms.TextAreaField(
"Public issue notification",
[wtforms.validators.optional(), MultipleEmail()],
)
pr_notifs = wtforms.TextAreaField(
"Public PR notification",
[wtforms.validators.optional(), MultipleEmail()],
)
class SubscribtionForm(PagureForm):
""" Form to subscribe to or unsubscribe from an issue or a PR. """
status = wtforms.BooleanField(
"Subscription status",
[wtforms.validators.optional()],
false_values=FALSE_VALUES,
)
class MergePRForm(PagureForm):
delete_branch = wtforms.BooleanField(
"Delete branch after merging",
[wtforms.validators.optional()],
false_values=FALSE_VALUES,
)
class TriggerCIPRForm(PagureForm):
def __init__(self, *args, **kwargs):
# we need to instantiate dynamically because the configuration
# values may change during tests and we want to always respect
# the currently set value
super(TriggerCIPRForm, self).__init__(*args, **kwargs)
choices = []
trigger_ci = pagure_config["TRIGGER_CI"]
if isinstance(trigger_ci, dict):
# make sure to preserver compatibility with older configs
# which had TRIGGER_CI as a list
for comment, meta in trigger_ci.items():
if meta is not None:
choices.append((comment, comment))
self.comment.choices = choices
comment = wtforms.SelectField(
"comment", [wtforms.validators.Required()], choices=[]
)
class AddGitTagForm(PagureForm):
""" Form to create a new git tag. """
tagname = wtforms.StringField(
"Name of the tag",
[wtforms.validators.DataRequired()],
)
commit_hash = wtforms.StringField(
"Hash of the commit to tag", [wtforms.validators.DataRequired()]
)
message = wtforms.TextAreaField(
"Annotation message", [wtforms.validators.Optional()]
)
force = wtforms.BooleanField(
"Force the creation of the git tag",
[wtforms.validators.optional()],
false_values=FALSE_VALUES,
)
| 30.556117 | 79 | 0.615321 | 3,120 | 30,220 | 5.852244 | 0.141346 | 0.111726 | 0.068459 | 0.013144 | 0.522537 | 0.4865 | 0.446903 | 0.402048 | 0.333096 | 0.296566 | 0 | 0.003344 | 0.26777 | 30,220 | 988 | 80 | 30.587045 | 0.821809 | 0.151621 | 0 | 0.363768 | 0 | 0 | 0.088881 | 0.007851 | 0 | 0 | 0 | 0.001012 | 0 | 1 | 0.030435 | false | 0.001449 | 0.021739 | 0 | 0.272464 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
458b9c87e0590b71665c205e005ed91d9aac38d7 | 5,285 | py | Python | docknv/shell/handlers/service.py | sharingcloud/docknv | 6eec6a576a32cb05278b7af045f90859066c9f1d | [
"MIT"
] | null | null | null | docknv/shell/handlers/service.py | sharingcloud/docknv | 6eec6a576a32cb05278b7af045f90859066c9f1d | [
"MIT"
] | null | null | null | docknv/shell/handlers/service.py | sharingcloud/docknv | 6eec6a576a32cb05278b7af045f90859066c9f1d | [
"MIT"
] | null | null | null | """Service sub commands."""
from docknv.shell.common import exec_handler, load_project
def _init(subparsers):
cmd = subparsers.add_parser(
"service", help="manage one service at a time (service mode)"
)
cmd.add_argument(
"-c", "--config", help="configuration name (swap)", default=None
)
subs = cmd.add_subparsers(dest="service_cmd", metavar="")
# Start
start_cmd = subs.add_parser("start", help="start a container")
start_cmd.add_argument("service", help="service name")
# Stop
stop_cmd = subs.add_parser("stop", help="stop a container")
stop_cmd.add_argument("service", help="service name")
# Restart
restart_cmd = subs.add_parser("restart", help="restart a container")
restart_cmd.add_argument("service", help="service name")
restart_cmd.add_argument(
"-f", "--force", action="store_true", help="force restart"
)
# Run
run_cmd = subs.add_parser("run", help="run a command on a container")
run_cmd.add_argument("service", help="service name")
run_cmd.add_argument("run_command", help="command to run")
run_cmd.add_argument(
"-d", "--daemon", action="store_true", help="run in background"
)
# Exec
exec_cmd = subs.add_parser(
"exec", help="execute command on a running container"
)
exec_cmd.add_argument("service", help="service name")
exec_cmd.add_argument("run_command", help="command to run")
# Shell
shell_cmd = subs.add_parser("shell", help="run shell")
shell_cmd.add_argument("service", help="service name")
shell_cmd.add_argument(
"shell", help="shell executable", default="/bin/bash", nargs="?"
)
# Logs
logs_cmd = subs.add_parser("logs", help="show container logs")
logs_cmd.add_argument("service", help="service name")
logs_cmd.add_argument(
"-t", "--tail", type=int, help="tail logs", default=0
)
logs_cmd.add_argument(
"-f",
"--follow",
help="follow logs",
action="store_true",
default=False,
)
# Push
push_cmd = subs.add_parser("push", help="push a file to a container")
push_cmd.add_argument("service", help="service name")
push_cmd.add_argument("host_path", help="host path")
push_cmd.add_argument("container_path", help="container path")
# Pull
pull_cmd = subs.add_parser("pull", help="pull a file from a container")
pull_cmd.add_argument("service", help="service name")
pull_cmd.add_argument("container_path", help="container path")
pull_cmd.add_argument("host_path", help="host path")
# Build
build_cmd = subs.add_parser("build", help="build a service")
build_cmd.add_argument("service", help="service name")
build_cmd.add_argument("-b", "--build-args", nargs="+", help="build args")
build_cmd.add_argument(
"--no-cache", help="build without cache", action="store_true"
)
def _handle(args):
return exec_handler("service", args, globals())
def _handle_build(args):
project = load_project(args.project)
project.lifecycle.service.build(
args.service,
config_name=args.config,
build_args=args.build_args,
no_cache=args.no_cache,
dry_run=args.dry_run,
)
def _handle_run(args):
project = load_project(args.project)
project.lifecycle.service.run(
args.service,
args.run_command,
daemon=args.daemon,
config_name=args.config,
dry_run=args.dry_run,
)
def _handle_exec(args):
project = load_project(args.project)
project.lifecycle.service.execute(
args.service,
cmds=[args.run_command],
config_name=args.config,
dry_run=args.dry_run,
)
def _handle_shell(args):
project = load_project(args.project)
project.lifecycle.service.shell(
args.service,
config_name=args.config,
shell=args.shell,
dry_run=args.dry_run,
)
def _handle_restart(args):
project = load_project(args.project)
project.lifecycle.service.restart(
args.service,
config_name=args.config,
force=args.force,
dry_run=args.dry_run,
)
def _handle_stop(args):
project = load_project(args.project)
project.lifecycle.service.stop(
args.service, config_name=args.config, dry_run=args.dry_run
)
def _handle_start(args):
project = load_project(args.project)
project.lifecycle.service.start(
args.service, config_name=args.config, dry_run=args.dry_run
)
def _handle_push(args):
project = load_project(args.project)
project.lifecycle.service.push(
args.service,
args.host_path,
args.container_path,
config_name=args.config,
dry_run=args.dry_run,
)
def _handle_pull(args):
project = load_project(args.project)
project.lifecycle.service.pull(
args.service,
args.container_path,
args.host_path,
config_name=args.config,
dry_run=args.dry_run,
)
def _handle_logs(args):
project = load_project(args.project)
project.lifecycle.service.logs(
args.service,
config_name=args.config,
tail=args.tail,
follow=args.follow,
dry_run=args.dry_run,
)
| 28.111702 | 78 | 0.653359 | 686 | 5,285 | 4.80758 | 0.123907 | 0.045482 | 0.10188 | 0.048514 | 0.513341 | 0.50849 | 0.470891 | 0.360825 | 0.31413 | 0.090964 | 0 | 0.000241 | 0.216083 | 5,285 | 187 | 79 | 28.262032 | 0.7958 | 0.01457 | 0 | 0.297872 | 0 | 0 | 0.174658 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0.007092 | 0.007092 | 0.099291 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
458efec9cad1dcaa401e2261be2ecb2c2d569528 | 1,466 | py | Python | canvasaio/bookmark.py | spapadim/canvasaio | a17e60447acd45cdbd6e4f0f24f3c9ae03a58ca8 | [
"MIT"
] | null | null | null | canvasaio/bookmark.py | spapadim/canvasaio | a17e60447acd45cdbd6e4f0f24f3c9ae03a58ca8 | [
"MIT"
] | null | null | null | canvasaio/bookmark.py | spapadim/canvasaio | a17e60447acd45cdbd6e4f0f24f3c9ae03a58ca8 | [
"MIT"
] | null | null | null | from canvasaio.canvas_object import CanvasObject
from canvasaio.util import combine_kwargs
class Bookmark(CanvasObject):
def __str__(self):
return "{} ({})".format(self.name, self.id)
async def delete(self, **kwargs):
"""
Delete this bookmark.
:calls: `DELETE /api/v1/users/self/bookmarks/:id \
<https://canvas.instructure.com/doc/api/bookmarks.html#method.bookmarks/bookmarks.destroy>`_
:rtype: :class:`canvasaio.bookmark.Bookmark`
"""
response = await self._requester.request(
"DELETE",
"users/self/bookmarks/{}".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return Bookmark(self._requester, await response.json())
async def edit(self, **kwargs):
"""
Modify this bookmark.
:calls: `PUT /api/v1/users/self/bookmarks/:id \
<https://canvas.instructure.com/doc/api/bookmarks.html#method.bookmarks/bookmarks.update>`_
:rtype: :class:`canvasaio.bookmark.Bookmark`
"""
response = await self._requester.request(
"PUT",
"users/self/bookmarks/{}".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = await response.json()
if "name" in response_json and "url" in response_json:
super(Bookmark, self).set_attributes(response_json)
return Bookmark(self._requester, response_json)
| 32.577778 | 100 | 0.621419 | 157 | 1,466 | 5.66242 | 0.318471 | 0.094488 | 0.08099 | 0.031496 | 0.485939 | 0.485939 | 0.485939 | 0.485939 | 0.485939 | 0.485939 | 0 | 0.001807 | 0.244884 | 1,466 | 44 | 101 | 33.318182 | 0.801265 | 0 | 0 | 0.272727 | 0 | 0 | 0.072632 | 0.048421 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0.045455 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4590230328e5c1d8487fcc4dc18325b330d72349 | 2,712 | py | Python | sublime/Packages/BracketHighlighter/bh_modules/tagattrselect.py | herove/dotfiles | 5f08d4d6f518758b3ad0516d9e704edf251e8ff3 | [
"MIT"
] | 1 | 2018-06-23T08:07:39.000Z | 2018-06-23T08:07:39.000Z | sublime/Packages/BracketHighlighter/bh_modules/tagattrselect.py | herove/dotfiles | 5f08d4d6f518758b3ad0516d9e704edf251e8ff3 | [
"MIT"
] | null | null | null | sublime/Packages/BracketHighlighter/bh_modules/tagattrselect.py | herove/dotfiles | 5f08d4d6f518758b3ad0516d9e704edf251e8ff3 | [
"MIT"
] | null | null | null | import bh_plugin
class SelectAttr(bh_plugin.BracketPluginCommand):
def run(self, edit, name, direction='right'):
"""
Select next attribute in the given direction.
Wrap when the end is hit.
"""
if self.left.size() <= 1:
return
tag_name = r'[\w\:\.\-]+'
attr_name = r'''([\w\-\.:]+)(?:\s*=\s*(?:(?:"((?:\.|[^"])*)")|(?:'((?:\.|[^'])*)')|([^>\s]+)))?'''
tname = self.view.find(tag_name, self.left.begin)
current_region = self.selection[0]
current_pt = self.selection[0].b
region = self.view.find(attr_name, tname.b)
selection = self.selection
if direction == 'left':
last = None
# Keep track of last attr
if region is not None and current_pt <= region.b and region.b < self.left.end:
last = region
while region is not None and region.b < self.left.end:
# Select attribute until you have closest to the left of selection
if (
current_pt > region.b or
(
current_pt <= region.b and current_region.a >= region.a and not
(
region.a == current_region.a and region.b == current_region.b
)
)
):
selection = [region]
last = None
# Update last attr
elif last is not None:
last = region
region = self.view.find(attr_name, region.b)
# Wrap right
if last is not None:
selection = [last]
else:
first = None
# Keep track of first attr
if region is not None and region.b < self.left.end:
first = region
while region is not None and region.b < self.left.end:
# Select closest attr to the right of the selection
if(
current_pt < region.b or
(
current_pt <= region.b and current_region.a >= region.a and not
(
region.a == current_region.a and region.b == current_region.b
)
)
):
selection = [region]
first = None
break
region = self.view.find(attr_name, region.b)
# Wrap left
if first is not None:
selection = [first]
self.selection = selection
def plugin():
return SelectAttr
| 35.220779 | 106 | 0.450959 | 285 | 2,712 | 4.217544 | 0.224561 | 0.087354 | 0.052413 | 0.066556 | 0.481697 | 0.468386 | 0.429285 | 0.404326 | 0.404326 | 0.342762 | 0 | 0.002004 | 0.448009 | 2,712 | 76 | 107 | 35.684211 | 0.800935 | 0.101032 | 0 | 0.357143 | 0 | 0 | 0.04125 | 0.032917 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.017857 | 0.017857 | 0.107143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4591e96ca8eecc10cdc680e19398cdebfb252e8a | 5,189 | py | Python | interpro7dw/interpro/oracle/clans.py | matthiasblum/i7dw | b40e5b9984dec2895956828ddf9db8af4a8ec932 | [
"Apache-2.0"
] | null | null | null | interpro7dw/interpro/oracle/clans.py | matthiasblum/i7dw | b40e5b9984dec2895956828ddf9db8af4a8ec932 | [
"Apache-2.0"
] | null | null | null | interpro7dw/interpro/oracle/clans.py | matthiasblum/i7dw | b40e5b9984dec2895956828ddf9db8af4a8ec932 | [
"Apache-2.0"
] | null | null | null | import json
import pickle
from typing import Dict
import cx_Oracle
from interpro7dw import pfam
from interpro7dw.utils import logger
from interpro7dw.utils.oracle import lob_as_str
from interpro7dw.utils.store import BasicStore
def get_clans(cur: cx_Oracle.Cursor) -> Dict[str, dict]:
cur.execute(
"""
SELECT
C.CLAN_AC, C.NAME, C.DESCRIPTION, D.DBSHORT, CM.MEMBER_AC,
M.NAME, M.DESCRIPTION, CM.LEN, CM.SCORE
FROM INTERPRO.CLAN C
INNER JOIN INTERPRO.CV_DATABASE D
ON C.DBCODE = D.DBCODE
INNER JOIN INTERPRO.CLAN_MEMBER CM
ON C.CLAN_AC = CM.CLAN_AC
INNER JOIN INTERPRO.METHOD M
ON CM.MEMBER_AC = M.METHOD_AC
"""
)
clans = {}
for row in cur:
clan_acc = row[0]
clan_name = row[1]
clan_desc = row[2]
database = row[3]
member_acc = row[4]
if row[5] and row[5] != member_acc:
member_name = row[5]
else:
member_name = None
member_desc = row[6]
seq_length = row[7]
score = row[8]
try:
c = clans[clan_acc]
except KeyError:
c = clans[clan_acc] = {
"accession": clan_acc,
"name": clan_name,
"description": clan_desc,
"database": database,
"members": []
}
finally:
c["members"].append((member_acc, member_name, member_desc, score,
seq_length))
return clans
def iter_alignments(cur: cx_Oracle.Cursor):
# Fetching DOMAINS (LOB object) as a string
cur.outputtypehandler = lob_as_str
cur.execute(
"""
SELECT QUERY_AC, TARGET_AC, EVALUE, DOMAINS
FROM INTERPRO.CLAN_MATCH
"""
)
for query, target, evalue, json_domains in cur:
domains = []
for start, end in json.loads(json_domains):
domains.append({
"start": start,
"end": end
})
yield query, target, evalue, domains
def export_clans(ipr_uri: str, pfam_uri: str, clans_file: str,
alignments_file: str, **kwargs):
threshold = kwargs.get("threshold", 1e-2)
logger.info("loading clans")
con = cx_Oracle.connect(ipr_uri)
cur = con.cursor()
clans = get_clans(cur)
clan_links = {}
entry2clan = {}
for accession, clan in clans.items():
clan_links[accession] = {}
for member_acc, _, _, _, seq_length in clan["members"]:
entry2clan[member_acc] = (accession, seq_length)
logger.info("exporting alignments")
with BasicStore(alignments_file, "w") as store:
alignments = iter_alignments(cur)
for i, (query, target, evalue, domains) in enumerate(alignments):
if evalue > threshold:
continue
try:
query_clan_acc, seq_length = entry2clan[query]
except KeyError:
continue
try:
target_clan_acc, _ = entry2clan[target]
except KeyError:
target_clan_acc = None
store.write((query_clan_acc, query, target, target_clan_acc,
evalue, seq_length, json.dumps(domains)))
if query_clan_acc == target_clan_acc:
# Query and target from the same clan: update clan's links
links = clan_links[query_clan_acc]
if query > target:
query, target = target, query
try:
targets = links[query]
except KeyError:
links[query] = {target: evalue}
else:
if target not in targets or evalue < targets[target]:
targets[target] = evalue
if (i + 1) % 1e7 == 0:
logger.info(f"{i + 1:>15,}")
logger.info(f"{i + 1:>15,}")
cur.close()
con.close()
logger.info("loading additional details for Pfam clans")
pfam_clans = pfam.get_clans(pfam_uri)
logger.info("finalizing")
for clan_acc, clan in clans.items():
nodes = []
for member_acc, member_name, member_desc, score, _ in clan["members"]:
nodes.append({
"accession": member_acc,
"short_name": member_name,
"name": member_desc,
"type": "entry",
"score": score
})
links = []
for query_acc, targets in clan_links[clan_acc].items():
for target_acc, score in targets.items():
links.append({
"source": query_acc,
"target": target_acc,
"score": score
})
clan["relationships"] = {
"nodes": nodes,
"links": links
}
if clan_acc in pfam_clans:
# Replace `description`, add `authors` and `literature`
clan.update(pfam_clans[clan_acc])
with open(clans_file, "wb") as fh:
pickle.dump(clans, fh)
logger.info("complete")
| 28.827778 | 78 | 0.533629 | 582 | 5,189 | 4.582474 | 0.238832 | 0.041995 | 0.025497 | 0.021372 | 0.036745 | 0.036745 | 0.025497 | 0 | 0 | 0 | 0 | 0.009457 | 0.368279 | 5,189 | 179 | 79 | 28.988827 | 0.804149 | 0.029293 | 0 | 0.166667 | 0 | 0 | 0.060372 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.063492 | 0 | 0.095238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
459361bbb100cff1efc28163721eb85c951bcf87 | 4,054 | py | Python | tests/test_core/test_controller.py | aaron-parsons/pymalcolm | 4e7ebd6b09382ab7e013278a81097d17873fa5c4 | [
"Apache-2.0"
] | null | null | null | tests/test_core/test_controller.py | aaron-parsons/pymalcolm | 4e7ebd6b09382ab7e013278a81097d17873fa5c4 | [
"Apache-2.0"
] | null | null | null | tests/test_core/test_controller.py | aaron-parsons/pymalcolm | 4e7ebd6b09382ab7e013278a81097d17873fa5c4 | [
"Apache-2.0"
] | null | null | null | import unittest
from annotypes import add_call_types, Anno
from malcolm.core import Controller, Part, PartRegistrar, StringMeta, \
Process, Queue, Get, Return, Put, Error, Post, Subscribe, Update, \
Unsubscribe
with Anno("The return value"):
AWorld = str
class MyPart(Part):
my_attribute = None
exception = None
context = None
@add_call_types
def method(self):
# type: () -> AWorld
return 'world'
def setup(self, registrar):
# type: (PartRegistrar) -> None
self.my_attribute = StringMeta(
description="MyString"
).create_attribute_model('hello_block')
registrar.add_attribute_model(
"myAttribute", self.my_attribute, self.my_attribute.set_value)
registrar.add_method_model(self.method)
class TestController(unittest.TestCase):
maxDiff = None
def setUp(self):
self.process = Process("proc")
self.part = MyPart("test_part")
self.o = Controller("mri")
self.o.add_part(self.part)
self.process.add_controller(self.o)
self.process.start()
def tearDown(self):
self.process.stop(timeout=1)
def test_init(self):
assert self.o.mri == "mri"
assert self.o.process == self.process
def test_make_view(self):
b = self.process.block_view("mri")
method_view = b.method
attribute_view = b.myAttribute
dict_view = b.method.returns.elements
list_view = b.method.returns.required
assert method_view() == 'world'
assert attribute_view.value == "hello_block"
assert dict_view['return'].description == "The return value"
assert list_view[0] == "return"
def test_handle_request(self):
q = Queue()
request = Get(id=41, path=["mri", "myAttribute"])
request.set_callback(q.put)
self.o.handle_request(request)
response = q.get(timeout=.1)
self.assertIsInstance(response, Return)
assert response.id == 41
assert response.value["value"] == "hello_block"
self.part.my_attribute.meta.writeable = False
request = Put(
id=42, path=["mri", "myAttribute"], value='hello_block2', get=True)
request.set_callback(q.put)
self.o.handle_request(request)
response = q.get(timeout=.1)
self.assertIsInstance(response, Error) # not writeable
assert response.id == 42
self.part.my_attribute.meta.writeable = True
self.o.handle_request(request)
response = q.get(timeout=.1)
self.assertIsInstance(response, Return)
assert response.id == 42
assert response.value == "hello_block2"
request = Post(id=43, path=["mri", "method"])
request.set_callback(q.put)
self.o.handle_request(request)
response = q.get(timeout=.1)
self.assertIsInstance(response, Return)
assert response.id == 43
assert response.value == "world"
# cover the controller._handle_post path for parameters
request = Post(id=43, path=["mri", "method"], parameters={'dummy': 1})
request.set_callback(q.put)
self.o.handle_request(request)
response = q.get(timeout=.1)
self.assertIsInstance(response, Error)
assert response.id == 43
assert str(response.message) == "Method passed argument 'dummy' which is not in []"
request = Subscribe(id=44, path=["mri", "myAttribute"], delta=False)
request.set_callback(q.put)
self.o.handle_request(request)
response = q.get(timeout=.1)
self.assertIsInstance(response, Update)
assert response.id == 44
assert response.value["typeid"] == "epics:nt/NTScalar:1.0"
assert response.value["value"] == "hello_block2"
request = Unsubscribe(id=44)
request.set_callback(q.put)
self.o.handle_request(request)
response = q.get(timeout=.1)
self.assertIsInstance(response, Return)
assert response.id == 44
| 33.783333 | 91 | 0.629008 | 483 | 4,054 | 5.165631 | 0.221532 | 0.024048 | 0.030862 | 0.050501 | 0.381162 | 0.34509 | 0.319439 | 0.296994 | 0.296994 | 0.296994 | 0 | 0.013509 | 0.251357 | 4,054 | 119 | 92 | 34.067227 | 0.808567 | 0.028614 | 0 | 0.333333 | 0 | 0 | 0.081363 | 0.005339 | 0 | 0 | 0 | 0 | 0.270833 | 1 | 0.072917 | false | 0.010417 | 0.03125 | 0.010417 | 0.177083 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4594f72e405e0824b4be1fe0fefd39e46766217c | 684 | py | Python | dado.py | crysller/Dado | 63c77bb81bec6fe9d2add2b0d14ba5d399b30018 | [
"MIT"
] | null | null | null | dado.py | crysller/Dado | 63c77bb81bec6fe9d2add2b0d14ba5d399b30018 | [
"MIT"
] | null | null | null | dado.py | crysller/Dado | 63c77bb81bec6fe9d2add2b0d14ba5d399b30018 | [
"MIT"
] | null | null | null | from random import randint
from time import sleep
continuar = 'Ss'
while continuar in 'Ss':
print('\033[1;32mVamos jogar. Exemplo: 1d20 ou 5d10\033[m')
opc = input()
qntDados = int(opc[:1])
valorDado = int(opc[2:])
if qntDados > 10 or valorDado not in [4,6,8,10,12,20,100]:
print('\033[1;31mErro! Quantidade da dados ou valor dos dados é inválido!\033[m')
elif valorDado in [4,6,8,10,12,20,100]:
dado = 1
for l in range(qntDados):
sleep(0.5)
print(f'Valor do dado 0{dado}: {randint(1, valorDado)}')
dado = dado + 1
continuar = str(input('Jogar novamente? S ou N?'))
sleep(1)
print('Até! o/') | 34.2 | 89 | 0.599415 | 110 | 684 | 3.727273 | 0.536364 | 0.039024 | 0.043902 | 0.02439 | 0.068293 | 0.068293 | 0.068293 | 0.068293 | 0 | 0 | 0 | 0.115686 | 0.254386 | 684 | 20 | 90 | 34.2 | 0.688235 | 0 | 0 | 0 | 0 | 0 | 0.29635 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.105263 | 0 | 0.105263 | 0.210526 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4595f169799444e02f534a9747ed945159901694 | 1,094 | py | Python | test/signature.py | orbs-network/orbs-client-sdk-python | 1e8d5699ee98e2ff59d36081eb569237949a558b | [
"MIT"
] | 5 | 2018-08-10T15:39:46.000Z | 2020-02-10T03:14:51.000Z | test/signature.py | orbs-network/orbs-client-sdk-python | 1e8d5699ee98e2ff59d36081eb569237949a558b | [
"MIT"
] | 3 | 2018-06-22T07:32:46.000Z | 2018-12-13T14:16:56.000Z | test/signature.py | orbs-network/orbs-client-sdk-python | 1e8d5699ee98e2ff59d36081eb569237949a558b | [
"MIT"
] | 2 | 2018-07-01T12:45:38.000Z | 2020-04-13T11:09:36.000Z | import unittest
from os import sys, path
from crypto.signature import Signature
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
class TestSignatureFunctions(unittest.TestCase):
def test_sign_ed25519(self):
private_key = bytes.fromhex('3b24b5f9e6b1371c3b5de2e402a96930eeafe52111bb4a1b003e5ecad3fab53892d469d7c004cc0b24a192d9457836bf38effa27536627ef60718b00b0f33152')
public_key = bytes.fromhex('92d469d7c004cc0b24a192d9457836bf38effa27536627ef60718b00b0f33152')
data = b'This is what we want to sign'
sig = Signature.sign_ed25519(private_key, data)
self.assertEqual(Signature.ED25519_SIGNATURE_SIZE_BYTES, len(sig), 'signature length should equal 64 bytes')
self.assertEqual(Signature.verify_ed25519(public_key, data, sig), True, 'signature cannot be verified')
modified_sig = bytearray(sig)
modified_sig[0] += 1 # corrupt the signature
sig = bytes(modified_sig)
self.assertEqual(Signature.verify_ed25519(public_key, data, sig), False)
if __name__ == '__main__':
unittest.main()
| 42.076923 | 167 | 0.761426 | 116 | 1,094 | 6.939655 | 0.5 | 0.03354 | 0.089441 | 0.074534 | 0.131677 | 0.131677 | 0.131677 | 0.131677 | 0.131677 | 0 | 0 | 0.167568 | 0.154479 | 1,094 | 25 | 168 | 43.76 | 0.702703 | 0.019196 | 0 | 0 | 0 | 0 | 0.27451 | 0.179272 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.055556 | false | 0 | 0.166667 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
459719287cbd8d4d670931c7497d555a6c24e23b | 318 | py | Python | q3volume.py | tiwa2022/Lab2 | 22ebbe4367316a969c0a1642747516b606618ca5 | [
"MIT"
] | 1 | 2022-01-27T16:57:53.000Z | 2022-01-27T16:57:53.000Z | q3volume.py | tiwa2022/Lab2 | 22ebbe4367316a969c0a1642747516b606618ca5 | [
"MIT"
] | null | null | null | q3volume.py | tiwa2022/Lab2 | 22ebbe4367316a969c0a1642747516b606618ca5 | [
"MIT"
] | null | null | null | #input
print("This program find the volume of a cylinder")
PI= 3.14159265359
diameter= float(input("Enter diameter: "))
height= float(input("Enter height: "))
#processing
volume= PI * diameter *height
#output
(print("The volume of a cyclinder with a diameter of",diameter, "and a height of", height, "is",volume))
| 26.5 | 105 | 0.72327 | 47 | 318 | 4.893617 | 0.489362 | 0.078261 | 0.095652 | 0.104348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043956 | 0.141509 | 318 | 11 | 106 | 28.909091 | 0.798535 | 0.069182 | 0 | 0 | 0 | 0 | 0.453925 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
459b4482cc67e06b82631e6053118e1bd6455789 | 2,219 | py | Python | ebay_accounts/test_settings.py | luke-dixon/django-ebay-accounts | 54cf0e90b75dfbdd63bcd588f3c4771ebe1297c0 | [
"BSD-3-Clause"
] | 4 | 2018-01-28T20:10:11.000Z | 2020-09-06T14:30:36.000Z | ebay_accounts/test_settings.py | luke-dixon/django-ebay-accounts | 54cf0e90b75dfbdd63bcd588f3c4771ebe1297c0 | [
"BSD-3-Clause"
] | 7 | 2017-06-04T08:50:06.000Z | 2020-09-06T16:03:53.000Z | ebay_accounts/test_settings.py | luke-dixon/django-ebay-accounts | 54cf0e90b75dfbdd63bcd588f3c4771ebe1297c0 | [
"BSD-3-Clause"
] | 7 | 2017-06-01T09:51:35.000Z | 2021-05-25T16:01:53.000Z | # -*- coding: utf-8 -*-
"""
Test Settings
"""
import django
APP_NAME = 'ebay_accounts'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'ebay_accounts',
)
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
if django.VERSION[:2] >= (1, 8):
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
SECRET_KEY = '^&*TESTING123^&*'
ROOT_URLCONF = APP_NAME + '.urls'
USE_TZ = True
LOGGING = {
'version': 1,
'formatters': {
'simple': {
'format': '%(levelname)s %(module)s: %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
}
},
'loggers': {
'ebay_accounts': {
'handlers': ['console'],
'level': 'DEBUG',
}
}
}
EBAY_SANDBOX_DEVID = 'TEST_SANDBOX_DEVID'
EBAY_SANDBOX_APPID = 'TEST_SANDBOX_APPID'
EBAY_SANDBOX_CERTID = 'TEST_SANDBOX_CERTID'
EBAY_SANDBOX_RU_NAME = 'TEST_SANDBOX_RU_NAME'
EBAY_PRODUCTION_DEVID = 'TEST_PRODUCTION_DEVID'
EBAY_PRODUCTION_APPID = 'TEST_PRODUCTION_APPID'
EBAY_PRODUCTION_CERTID = 'TEST_PRODUCTION_CERTID'
EBAY_PRODUCTION_RU_NAME = 'TEST_PRODUCTION_RU_NAME'
TIME_ZONE = 'Europe/London'
USE_TZ = True
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
| 26.105882 | 74 | 0.605228 | 206 | 2,219 | 6.262136 | 0.42233 | 0.08062 | 0.039535 | 0.048062 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007884 | 0.256872 | 2,219 | 84 | 75 | 26.416667 | 0.774409 | 0.04822 | 0 | 0.084507 | 0 | 0 | 0.482644 | 0.308131 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.014085 | 0 | 0.014085 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
459db3d81ac904122652cb0bff922fb7f630a44a | 5,475 | py | Python | export/views.py | felix-engelmann/badgecc | 5bc0ced339f18737e24cc34935a87e96ae14a825 | [
"MIT"
] | null | null | null | export/views.py | felix-engelmann/badgecc | 5bc0ced339f18737e24cc34935a87e96ae14a825 | [
"MIT"
] | null | null | null | export/views.py | felix-engelmann/badgecc | 5bc0ced339f18737e24cc34935a87e96ae14a825 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.template.loader import render_to_string
from django.http import HttpResponse
from django.conf import settings
from persons.models import Role, Right, Department, Person
from math import floor
import os
from subprocess import Popen, PIPE
import tempfile
# Create your views here.
def _render_tex(texcode):
with tempfile.TemporaryDirectory(dir=os.path.join(settings.BASE_DIR, 'tex')) as tempdir:
# Create subprocess, supress output with PIPE and
# run latex twice to generate the TOC properly.
# Finally read the generated pdf.
env = os.environ.copy()
#env["TEXINPUTS"] = env["TEXINPUTS"]+":"+os.path.join(settings.BASE_DIR, 'media')
for i in range(1):
process = Popen(
['pdflatex', '-output-directory', tempdir, '-halt-on-error'],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=env,
)
out, err = process.communicate(bytes(texcode, 'UTF-8'))
if process.returncode:
return HttpResponse(err.decode('UTF-8')+'\n\n\n'+out.decode('UTF-8')+'\n\n\n'+texcode)
with open(os.path.join(tempdir, 'texput.pdf'), 'rb') as f:
pdf = f.read()
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'filename="{}"'.format("badges.pdf")
response.write(pdf)
return response
def _make_side(x,y,person,template):
render_to_string("export/tex/front.tex",{'x':"felix"})
def _make_sheet(front,back):
"""
Arranges a double sided sheet
Args:
front: plain tikz commands for front sides
back: plain tikz commands for back sides
Returns:
String of self contained TeX commands
"""
sheet=""
sheet+="\\centering\\begin{tikzpicture}[font=\\sffamily]\n"
sheet+=front
sheet+="\\end{tikzpicture}\n"
sheet+="\\newpage\n"
sheet+="\\centering\\begin{tikzpicture}[font=\\sffamily]\n"
sheet+=back
sheet+="\\end{tikzpicture}\n"
sheet+="\\newpage\n"
return sheet
def texit(persons):
for p in persons:
p.printed = True;
p.save()
r=set(p.extra_rights.all())
r=r|set(p.department.rights.all())
if(p.role):
r=r|set(p.role.rights.all())
rslug=[]
for ro in list(r):
rslug.append(ro.slug)
print(rslug)
p.calc_rights=list(rslug)
#sheet layout in cm
badge_height=6
badge_width=9.5
#count of badges
badge_rows=4
badge_cols=2
document=""
evenpage=""
oddpage=""
for idx,person in enumerate(persons):
evenpage+=render_to_string("export/tex/front.tex",{'x':(idx%badge_cols)*badge_width,'y':floor((idx%(badge_cols*badge_rows))/badge_cols)*badge_height,'person':person})
oddpage+=render_to_string("export/tex/back.tex",{'x':badge_width-(idx%badge_cols)*badge_width,'y':floor((idx%(badge_cols*badge_rows))/badge_cols)*badge_height,'person':person})
if idx%(badge_cols*badge_rows)==7:
document+=_make_sheet(evenpage,oddpage)
evenpage=""
oddpage=""
if evenpage!="":
document+=_make_sheet(evenpage,oddpage)
#return document
return _render_tex(render_to_string("export/tex/wrapper.tex", {'content':document}))
def index(request):
if request.method == 'POST':
persons = Person.objects.filter(id__in=request.POST.getlist('print')).order_by("department")
return texit(persons)
else:
persons = Person.objects.order_by("department")
for p in persons:
r=set(p.extra_rights.all())
r=r|set(p.department.rights.all())
if(p.role):
r=r|set(p.role.rights.all())
p.calc_rights=list(r)
return render(request, "export/index.html", {'person':persons})
def update(request):
if request.method == 'POST':
persons = Person.objects.filter(id__in=request.POST.getlist('print')).order_by("department")
return texit(persons)
else:
persons = Person.objects.order_by("department")
for p in persons:
r=set(p.extra_rights.all())
r=r|set(p.department.rights.all())
if(p.role):
r=r|set(p.role.rights.all())
p.calc_rights=list(r)
return render(request, "export/updates.html", {'person':persons})
def dep(request):
if request.method == 'POST':
persons = Person.objects.filter(id__in=request.POST.getlist('print')).order_by("department")
#print(persons)
#print(request.GET['id'])
return texit(persons)
#return render(request, "export/departments.html", {'departments':None})
else:
departments = Department.objects.order_by("name")
for d in departments:
d.persons = Person.objects.filter(department=d)
for p in d.persons:
r=set(p.extra_rights.all())
r=r|set(p.department.rights.all())
if(p.role):
r=r|set(p.role.rights.all())
p.calc_rights=list(r)
return render(request, "export/departments.html", {'departments':departments})
| 29.755435 | 184 | 0.588493 | 673 | 5,475 | 4.692422 | 0.273403 | 0.015199 | 0.018999 | 0.015199 | 0.479417 | 0.437935 | 0.41387 | 0.368588 | 0.317923 | 0.317923 | 0 | 0.002509 | 0.271963 | 5,475 | 183 | 185 | 29.918033 | 0.789764 | 0.103744 | 0 | 0.424779 | 0 | 0 | 0.117901 | 0.029835 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061947 | false | 0 | 0.079646 | 0 | 0.230089 | 0.044248 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
459fe241fa9cf83ded4a40b6c2040315ffceaa98 | 7,118 | py | Python | Gems/Atom/Feature/Common/Editor/Scripts/ColorGrading/lut_helper.py | fromasmtodisasm/o3de | 0d728a76778cb0ca88caa5c07f17162fac668b2a | [
"Apache-2.0",
"MIT"
] | null | null | null | Gems/Atom/Feature/Common/Editor/Scripts/ColorGrading/lut_helper.py | fromasmtodisasm/o3de | 0d728a76778cb0ca88caa5c07f17162fac668b2a | [
"Apache-2.0",
"MIT"
] | null | null | null | Gems/Atom/Feature/Common/Editor/Scripts/ColorGrading/lut_helper.py | fromasmtodisasm/o3de | 0d728a76778cb0ca88caa5c07f17162fac668b2a | [
"Apache-2.0",
"MIT"
] | null | null | null | # coding:utf-8
#!/usr/bin/python
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
# lut_helper.py
import sys
import os
import argparse
import math
import site
import pathlib
from pathlib import Path
import logging as _logging
import numpy as np
from pathlib import Path
# ------------------------------------------------------------------------
_MODULENAME = 'ColorGrading.lut_helper'
import ColorGrading.initialize
ColorGrading.initialize.start()
_LOGGER = _logging.getLogger(_MODULENAME)
_LOGGER.debug('Initializing: {0}.'.format({_MODULENAME}))
try:
import OpenImageIO as oiio
pass
except ImportError as e:
_LOGGER.error(f"invalid import: {e}")
sys.exit(1)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# Transform from high dynamic range to normalized
from ColorGrading import inv_shaper_transform
# Transform from normalized range to high dynamic range
from ColorGrading import shaper_transform
# utils
from ColorGrading import get_uv_coord
from ColorGrading import log2
from ColorGrading import is_power_of_two
shaper_presets = {"Log2-48nits": (-6.5, 6.5),
"Log2-1000nits": (-12.0, 10.0),
"Log2-2000nits": (-12.0, 11.0),
"Log2-4000nits": (-12.0, 12.0)}
def transform_exr(image_buffer, out_image_buffer, op, out_path, write_exr):
# Set the destination image pixels by applying the shaperfunction
for y in range(out_image_buffer.ybegin, out_image_buffer.yend):
for x in range(out_image_buffer.xbegin, out_image_buffer.xend):
src_pixel = image_buffer.getpixel(x, y)
# _LOGGER.debug(f'src_pixel is: {src_pixel}')
if op == 0:
dst_pixel = (inv_shaper_transform(bias, scale, src_pixel[0]),
inv_shaper_transform(bias, scale, src_pixel[1]),
inv_shaper_transform(bias, scale, src_pixel[2]))
out_image_buffer.setpixel(x, y, dst_pixel)
elif op == 1:
dst_pixel = (shaper_transform(bias, scale, src_pixel[0]),
shaper_transform(bias, scale, src_pixel[1]),
shaper_transform(bias, scale, src_pixel[2]))
out_image_buffer.setpixel(x, y, dst_pixel)
else:
# Unspecified operation. Just write zeroes
out_image_buffer.setpixel(x, y, 0.0, 0.0, 0.0)
if write_exr:
_LOGGER.info(f"writing {out_path}.exr ...")
out_image_buffer.write(out_path + '.exr', "float")
return out_image_buffer
###########################################################################
# Main Code Block, runs this script as main (testing)
# -------------------------------------------------------------------------
if __name__ == '__main__':
"""Run this file as main"""
operations = {"pre-grading": 0, "post-grading": 1}
parser = argparse.ArgumentParser()
parser.add_argument('--i', type=str, required=True, help='input file')
parser.add_argument('--shaper', type=str, required=True,
help='shaper preset. Should be one of \'Log2-48nits\', \'Log2-1000nits\', \'Log2-2000nits\', \'Log2-4000nits\'')
parser.add_argument('--op', type=str, required=True, help='operation. Should be \'pre-grading\' or \'post-grading\'')
parser.add_argument('--o', type=str, required=True, help='output file')
parser.add_argument('-e', dest='writeExr', action='store_true', help='output lut as exr file (float)')
parser.add_argument('-l', dest='write3dl', action='store_true', help='output lut as .3dl file')
parser.add_argument('-a', dest='writeAsset', action='store_true', help='write out lut as O3dE .azasset file')
args = parser.parse_args()
# Check for valid shaper type
invalid_shaper = (0, 0)
invalid_op = -1
shaper_limits = shaper_presets.get(args.shaper, invalid_shaper)
if shaper_limits == invalid_shaper:
_LOGGER.error("invalid shaper")
sys.exit(1)
op = operations.get(args.op, invalid_op)
if op == invalid_op:
_LOGGER.error("invalid operation")
sys.exit(1)
# input validation
input_file = Path(args.i)
if input_file.is_file():
# file exists
pass
else:
FILE_ERROR_MSG = f'File does not exist: {input_file}'
_LOGGER.error(FILE_ERROR_MSG)
#raise FileNotFoundError(FILE_ERROR_MSG)
sys.exit(1)
# Read input image
#buf = oiio.ImageBuf("linear_lut.exr")
image_buffer = oiio.ImageBuf(args.i)
image_spec = image_buffer.spec()
_LOGGER.info(f"Resolution is x:{image_spec.height}, y:{image_spec.width}")
if image_spec.height < 16:
_LOGGER.info(f"invalid input file dimensions: x is {image_spec.height}. Expected LUT with height dimension >= 16 pixels")
sys.exit(1)
if not is_power_of_two(image_buffer.spec().height):
_LOGGER.info(f"invalid input file dimensions: {buf.spec().height}. Expected LUT dimensions power of 2: 16, 32, or 64 height")
sys.exit(1)
elif image_spec.width != image_spec.height * image_spec.height:
_LOGGER.info("invalid input file dimensions. Expect lengthwise LUT with dimension W: s*s X H: s, where s is the size of the LUT")
sys.exit(1)
lut_size = image_spec.height
middle_grey = 0.18
lower_stops = shaper_limits[0]
upper_stops = shaper_limits[1]
middle_grey = math.log(middle_grey, 2.0)
log_min = middle_grey + lower_stops
log_max = middle_grey + upper_stops
scale = 1.0 / (log_max - log_min)
bias = -scale * log_min
_LOGGER.info("Shaper: range in stops %.1f -> %.1f (linear: %.3f -> %.3f) logMin %.3f logMax %.3f scale %.3f bias %.3f\n" %
(lower_stops, upper_stops, middle_grey * math.pow(2.0, lower_stops), middle_grey * math.pow(2.0, upper_stops),
log_min, log_max, scale, bias))
buffer_name = Path(args.o).name
# Create a writing image
out_image_spec = oiio.ImageSpec(image_buffer.spec().width, image_buffer.spec().height, 3, "float")
out_image_buffer = oiio.ImageBuf(out_image_spec)
# write out the modified exr file
write_exr = False
if args.writeExr:
write_exr = True
out_image_buffer = transform_exr(image_buffer, out_image_buffer, op, args.o, write_exr)
from ColorGrading.exr_to_3dl_azasset import generate_lut_values
lut_intervals, lut_values = generate_lut_values(image_spec, out_image_buffer)
if args.write3dl:
from ColorGrading.exr_to_3dl_azasset import write_3DL
write_3DL(args.o, lut_size, lut_intervals, lut_values)
if args.writeAsset:
from ColorGrading import AZASSET_LUT
from ColorGrading.from_3dl_to_azasset import write_azasset
write_azasset(args.o, lut_intervals, lut_values, AZASSET_LUT)
| 38.064171 | 139 | 0.631779 | 944 | 7,118 | 4.54661 | 0.246822 | 0.056384 | 0.045666 | 0.033551 | 0.179171 | 0.146552 | 0.14096 | 0.048462 | 0.030289 | 0.030289 | 0 | 0.023542 | 0.212279 | 7,118 | 186 | 140 | 38.268817 | 0.74193 | 0.147232 | 0 | 0.12605 | 0 | 0.02521 | 0.16807 | 0.007578 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008403 | false | 0.016807 | 0.193277 | 0 | 0.210084 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45a097cba2dbbe048aae9c8954d61af23516651a | 7,560 | py | Python | recon/nmap.py | bbhunter/recon-pipeline | 234fe5e639c2a6ef7573410eb4765df07fb352d1 | [
"MIT"
] | null | null | null | recon/nmap.py | bbhunter/recon-pipeline | 234fe5e639c2a6ef7573410eb4765df07fb352d1 | [
"MIT"
] | null | null | null | recon/nmap.py | bbhunter/recon-pipeline | 234fe5e639c2a6ef7573410eb4765df07fb352d1 | [
"MIT"
] | 1 | 2020-01-30T10:27:30.000Z | 2020-01-30T10:27:30.000Z | import pickle
import logging
import subprocess
import concurrent.futures
from pathlib import Path
import luigi
from luigi.util import inherits
from recon.config import defaults
from recon.masscan import ParseMasscanOutput
@inherits(ParseMasscanOutput)
class ThreadedNmapScan(luigi.Task):
""" Run nmap against specific targets and ports gained from the ParseMasscanOutput Task.
nmap commands are structured like the example below.
nmap --open -sT -sC -T 4 -sV -Pn -p 43,25,21,53,22 -oA htb-targets-nmap-results/nmap.10.10.10.155-tcp 10.10.10.155
The corresponding luigi command is shown below.
PYTHONPATH=$(pwd) luigi --local-scheduler --module recon.nmap ThreadedNmap --target-file htb-targets --top-ports 5000
Args:
threads: number of threads for parallel nmap command execution
rate: desired rate for transmitting packets (packets per second) *--* Required by upstream Task
interface: use the named raw network interface, such as "eth0" *--* Required by upstream Task
top_ports: Scan top N most popular ports *--* Required by upstream Task
ports: specifies the port(s) to be scanned *--* Required by upstream Task
target_file: specifies the file on disk containing a list of ips or domains *--* Required by upstream Task
results_dir: specifes the directory on disk to which all Task results are written *--* Required by upstream Task
"""
threads = luigi.Parameter(default=defaults.get("threads", ""))
def requires(self):
""" ThreadedNmap depends on ParseMasscanOutput to run.
TargetList expects target_file as a parameter.
Masscan expects rate, target_file, interface, and either ports or top_ports as parameters.
Returns:
luigi.Task - ParseMasscanOutput
"""
args = {
"results_dir": self.results_dir,
"rate": self.rate,
"target_file": self.target_file,
"top_ports": self.top_ports,
"interface": self.interface,
"ports": self.ports,
}
return ParseMasscanOutput(**args)
def output(self):
""" Returns the target output for this task.
Naming convention for the output folder is TARGET_FILE-nmap-results.
The output folder will be populated with all of the output files generated by
any nmap commands run. Because the nmap command uses -oA, there will be three
files per target scanned: .xml, .nmap, .gnmap.
Returns:
luigi.local_target.LocalTarget
"""
return luigi.LocalTarget(f"{self.results_dir}/nmap-{self.target_file}-results")
def run(self):
""" Parses pickled target info dictionary and runs targeted nmap scans against only open ports. """
try:
self.threads = abs(int(self.threads))
except TypeError:
return logging.error("The value supplied to --threads must be a non-negative integer.")
ip_dict = pickle.load(open(self.input().path, "rb"))
nmap_command = [ # placeholders will be overwritten with appropriate info in loop below
"nmap",
"--open",
"PLACEHOLDER-IDX-2",
"-n",
"-sC",
"-T",
"4",
"-sV",
"-Pn",
"-p",
"PLACEHOLDER-IDX-10",
"-oA",
]
commands = list()
"""
ip_dict structure
{
"IP_ADDRESS":
{'udp': {"161", "5000", ... },
...
i.e. {protocol: set(ports) }
}
"""
for target, protocol_dict in ip_dict.items():
for protocol, ports in protocol_dict.items():
tmp_cmd = nmap_command[:]
tmp_cmd[2] = "-sT" if protocol == "tcp" else "-sU"
# arg to -oA, will drop into subdir off curdir
tmp_cmd[9] = ",".join(ports)
tmp_cmd.append(f"{self.output().path}/nmap.{target}-{protocol}")
tmp_cmd.append(target) # target as final arg to nmap
commands.append(tmp_cmd)
# basically mkdir -p, won't error out if already there
Path(self.output().path).mkdir(parents=True, exist_ok=True)
with concurrent.futures.ThreadPoolExecutor(max_workers=self.threads) as executor:
executor.map(subprocess.run, commands)
@inherits(ThreadedNmapScan)
class SearchsploitScan(luigi.Task):
""" Run searchcploit against each nmap*.xml file in the TARGET-nmap-results directory and write results to disk.
searchsploit commands are structured like the example below.
searchsploit --nmap htb-targets-nmap-results/nmap.10.10.10.155-tcp.xml
The corresponding luigi command is shown below.
PYTHONPATH=$(pwd) luigi --local-scheduler --module recon.nmap Searchsploit --target-file htb-targets --top-ports 5000
Args:
threads: number of threads for parallel nmap command execution *--* Required by upstream Task
rate: desired rate for transmitting packets (packets per second) *--* Required by upstream Task
interface: use the named raw network interface, such as "eth0" *--* Required by upstream Task
top_ports: Scan top N most popular ports *--* Required by upstream Task
ports: specifies the port(s) to be scanned *--* Required by upstream Task
target_file: specifies the file on disk containing a list of ips or domains *--* Required by upstream Task
results_dir: specifies the directory on disk to which all Task results are written *--* Required by upstream Task
"""
def requires(self):
""" Searchsploit depends on ThreadedNmap to run.
TargetList expects target_file as a parameter.
Masscan expects rate, target_file, interface, and either ports or top_ports as parameters.
ThreadedNmap expects threads
Returns:
luigi.Task - ThreadedNmap
"""
args = {
"rate": self.rate,
"ports": self.ports,
"threads": self.threads,
"top_ports": self.top_ports,
"interface": self.interface,
"target_file": self.target_file,
"results_dir": self.results_dir,
}
return ThreadedNmapScan(**args)
def output(self):
""" Returns the target output for this task.
Naming convention for the output folder is TARGET_FILE-searchsploit-results.
The output folder will be populated with all of the output files generated by
any searchsploit commands run.
Returns:
luigi.local_target.LocalTarget
"""
return luigi.LocalTarget(f"{self.results_dir}/searchsploit-{self.target_file}-results")
def run(self):
""" Grabs the xml files created by ThreadedNmap and runs searchsploit --nmap on each one, saving the output. """
for entry in Path(self.input().path).glob("nmap*.xml"):
proc = subprocess.run(["searchsploit", "--nmap", str(entry)], stderr=subprocess.PIPE)
if proc.stderr:
Path(self.output().path).mkdir(parents=True, exist_ok=True)
# change wall-searchsploit-results/nmap.10.10.10.157-tcp to 10.10.10.157
target = entry.stem.replace("nmap.", "").replace("-tcp", "").replace("-udp", "")
Path(
f"{self.output().path}/searchsploit.{target}-{entry.stem[-3:]}.txt"
).write_bytes(proc.stderr)
| 38.571429 | 121 | 0.626852 | 925 | 7,560 | 5.069189 | 0.261622 | 0.034122 | 0.049904 | 0.060994 | 0.517594 | 0.493495 | 0.489657 | 0.459373 | 0.441459 | 0.441459 | 0 | 0.014585 | 0.274471 | 7,560 | 195 | 122 | 38.769231 | 0.840292 | 0.497884 | 0 | 0.275 | 0 | 0 | 0.156693 | 0.066934 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075 | false | 0 | 0.1125 | 0 | 0.2875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45a34389a4196f702824ce54a8d761be8862131f | 3,041 | py | Python | docs/source/generate_cli_help.py | Conzel/CompressAI | 55be017e93e25fc936fe0fb4fa5851b3c1032dfc | [
"BSD-3-Clause-Clear"
] | 515 | 2020-06-24T23:48:02.000Z | 2022-03-31T08:09:19.000Z | docs/source/generate_cli_help.py | Conzel/CompressAI | 55be017e93e25fc936fe0fb4fa5851b3c1032dfc | [
"BSD-3-Clause-Clear"
] | 102 | 2020-08-12T15:13:19.000Z | 2022-03-30T22:28:16.000Z | docs/source/generate_cli_help.py | Conzel/CompressAI | 55be017e93e25fc936fe0fb4fa5851b3c1032dfc | [
"BSD-3-Clause-Clear"
] | 123 | 2020-06-25T00:32:29.000Z | 2022-03-28T19:19:16.000Z | # Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Based on https://github.com/facebookresearch/ParlAI/tree/c06c40603f45918f58cb09122fa8c74dd4047057/docs/source
import importlib
import io
from pathlib import Path
import compressai.utils
def get_utils():
rootdir = Path(compressai.utils.__file__).parent
for d in rootdir.iterdir():
if d.is_dir() and (d / "__main__.py").is_file():
yield d
def main():
fout = open("cli_usage.inc", "w")
for p in get_utils():
try:
m = importlib.import_module(f"compressai.utils.{p.name}.__main__")
except ImportError:
continue
if not hasattr(m, "setup_args"):
continue
fout.write(p.name)
fout.write("\n")
fout.write("-" * len(p.name))
fout.write("\n")
doc = m.__doc__
if doc:
fout.write(doc)
fout.write("\n")
fout.write(".. code-block:: text\n\n")
capture = io.StringIO()
parser = m.setup_args()
if isinstance(parser, tuple):
parser = parser[0]
parser.prog = f"python -m compressai.utils.{p.name}"
parser.print_help(capture)
for line in capture.getvalue().split("\n"):
fout.write(f"\t{line}\n")
fout.write("\n\n")
fout.close()
if __name__ == "__main__":
main()
| 34.954023 | 111 | 0.694837 | 413 | 3,041 | 5.03632 | 0.479419 | 0.038942 | 0.019231 | 0.022115 | 0.116346 | 0.065385 | 0.065385 | 0.065385 | 0.065385 | 0.065385 | 0 | 0.016007 | 0.219336 | 3,041 | 86 | 112 | 35.360465 | 0.860152 | 0.583032 | 0 | 0.128205 | 0 | 0 | 0.128122 | 0.047542 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.153846 | 0 | 0.205128 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45a3d4d8a1f127580265a0a3c5979a90f99be58c | 2,219 | py | Python | implementation/data_io.py | rpalo/masters-thesis | fcc0beb933634b17dbe41bde982e947204fd498b | [
"MIT"
] | null | null | null | implementation/data_io.py | rpalo/masters-thesis | fcc0beb933634b17dbe41bde982e947204fd498b | [
"MIT"
] | null | null | null | implementation/data_io.py | rpalo/masters-thesis | fcc0beb933634b17dbe41bde982e947204fd498b | [
"MIT"
] | null | null | null | """Data I/O: Import and export data to other useable formats."""
import csv
from pathlib import Path
from model import Job
def import_csv(filename, base_dir=Path("data/")):
"""Converts CSV files with the relevant data (see columns below) to
a list of Jobs.
"""
datafile = base_dir / filename
with open(datafile, "r", newline="", encoding="utf-8-sig") as csvfile:
reader = csv.DictReader(csvfile)
return [
Job(
line["part number"],
int(line["quantity"]),
float(line["cycle"]),
int(line["cavities"]),
float(line["due date"]),
line["mold"],
line["material"],
[int(num) for num in line["machines"].split(",")],
float(line["setup"]),
float(line["teardown"])
) for i, line in enumerate(reader, start=2)
]
def export_csv(schedule, fitness, time_elapsed, filename, base_dir=Path("results/")):
"""Exports a generated schedule to CSV in a format where each machine
has its jobs listed with start and end dates in order of operation.
Each machine separated by a blank line.
"""
outfile = base_dir / filename
with open(outfile, "w") as csvfile:
fieldnames = ["part number", "due date", "material", "start", "end"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for machine in schedule:
writer.writerow({"part number": f"Machine {machine.number}"})
for assignment in machine.queue:
writer.writerow({
"part number": assignment.job.number,
"due date": assignment.job.due_date,
"material": assignment.job.material,
"start": assignment.start,
"end": assignment.end,
})
writer.writerow({})
writer.writerow({})
writer.writerow({
"part number": "Total fitness:",
"due date": fitness
})
writer.writerow({
"part number": "Time elapsed:",
"due date": time_elapsed
}) | 36.377049 | 85 | 0.545291 | 240 | 2,219 | 5.004167 | 0.4125 | 0.049958 | 0.05995 | 0.079933 | 0.038301 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001348 | 0.33123 | 2,219 | 61 | 86 | 36.377049 | 0.807951 | 0.141505 | 0 | 0.170213 | 0 | 0 | 0.143469 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.085106 | 0 | 0.148936 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45a6eb2cafb85abd876f4684bf56dbe0066463d9 | 16,694 | py | Python | research/cv/eppmvsnet/src/networks.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/cv/eppmvsnet/src/networks.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/cv/eppmvsnet/src/networks.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""sub-networks of EPP-MVSNet"""
import numpy as np
import mindspore
import mindspore.ops as P
from mindspore import nn
from mindspore import Tensor, Parameter
from src.modules import depth_regression, soft_argmin, entropy
class BasicBlockA(nn.Cell):
"""BasicBlockA"""
def __init__(self, in_channels, out_channels, stride):
super(BasicBlockA, self).__init__()
self.conv2d_0 = nn.Conv2d(in_channels, out_channels, 3, stride=stride, padding=1, pad_mode="pad")
self.conv2d_1 = nn.Conv2d(in_channels, out_channels, 1, stride=stride, padding=0, pad_mode="valid")
self.batchnorm2d_2 = nn.BatchNorm2d(out_channels, eps=9.999999747378752e-06, momentum=0.8999999761581421)
self.batchnorm2d_3 = nn.BatchNorm2d(out_channels, eps=9.999999747378752e-06, momentum=0.8999999761581421)
self.relu_4 = nn.ReLU()
self.conv2d_5 = nn.Conv2d(out_channels, out_channels, 3, stride=1, padding=(1, 1, 1, 1), pad_mode="pad")
self.batchnorm2d_6 = nn.BatchNorm2d(out_channels, eps=9.999999747378752e-06, momentum=0.8999999761581421)
self.relu_8 = nn.ReLU()
def construct(self, x):
"""construct"""
x1 = self.conv2d_0(x)
x1 = self.batchnorm2d_2(x1)
x1 = self.relu_4(x1)
x1 = self.conv2d_5(x1)
x1 = self.batchnorm2d_6(x1)
res = self.conv2d_1(x)
res = self.batchnorm2d_3(res)
out = P.Add()(x1, res)
out = self.relu_8(out)
return out
class BasicBlockB(nn.Cell):
"""BasicBlockB"""
def __init__(self, in_channels, out_channels):
super(BasicBlockB, self).__init__()
self.conv2d_0 = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1, pad_mode="pad")
self.batchnorm2d_1 = nn.BatchNorm2d(out_channels, eps=9.999999747378752e-06, momentum=0.8999999761581421)
self.relu_2 = nn.ReLU()
self.conv2d_3 = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1, pad_mode="pad")
self.batchnorm2d_4 = nn.BatchNorm2d(out_channels, eps=9.999999747378752e-06, momentum=0.8999999761581421)
self.relu_6 = nn.ReLU()
def construct(self, x):
"""construct"""
x1 = self.conv2d_0(x)
x1 = self.batchnorm2d_1(x1)
x1 = self.relu_2(x1)
x1 = self.conv2d_3(x1)
x1 = self.batchnorm2d_4(x1)
res = x
out = P.Add()(x1, res)
out = self.relu_6(out)
return out
class UNet2D(nn.Cell):
"""UNet2D"""
def __init__(self):
super(UNet2D, self).__init__()
self.conv2d_0 = nn.Conv2d(3, 16, 5, stride=2, padding=2, pad_mode="pad")
self.batchnorm2d_1 = nn.BatchNorm2d(16, eps=9.999999747378752e-06, momentum=0.8999999761581421)
self.leakyrelu_2 = nn.LeakyReLU(alpha=0.009999999776482582)
self.convblocka_0 = BasicBlockA(16, 32, 1)
self.convblockb_0 = BasicBlockB(32, 32)
self.convblocka_1 = BasicBlockA(32, 64, 2)
self.convblockb_1 = BasicBlockB(64, 64)
self.convblocka_2 = BasicBlockA(64, 128, 2)
self.convblockb_2 = BasicBlockB(128, 128)
self.conv2dbackpropinput_51 = P.Conv2DBackpropInput(64, 3, stride=2, pad=1, pad_mode="pad")
self.conv2dbackpropinput_51_weight = Parameter(Tensor(
np.random.uniform(0, 1, (128, 64, 3, 3)).astype(np.float32)))
self.conv2d_54 = nn.Conv2d(128, 64, 3, stride=1, padding=1, pad_mode="pad")
self.convblockb_3 = BasicBlockB(64, 64)
self.conv2dbackpropinput_62 = P.Conv2DBackpropInput(32, 3, stride=2, pad=1, pad_mode="pad")
self.conv2dbackpropinput_62_weight = Parameter(Tensor(
np.random.uniform(0, 1, (64, 32, 3, 3)).astype(np.float32)))
self.conv2d_65 = nn.Conv2d(64, 32, 3, stride=1, padding=1, pad_mode="pad")
self.convblockb_4 = BasicBlockB(32, 32)
self.conv2d_52 = nn.Conv2d(128, 32, 3, stride=1, padding=1, pad_mode="pad")
self.conv2d_63 = nn.Conv2d(64, 32, 3, stride=1, padding=1, pad_mode="pad")
self.conv2d_73 = nn.Conv2d(32, 32, 3, stride=1, padding=1, pad_mode="pad")
self.concat = P.Concat(axis=1)
param_dict = mindspore.load_checkpoint("./ckpts/feat_ext.ckpt")
params_not_loaded = mindspore.load_param_into_net(self, param_dict, strict_load=True)
print(params_not_loaded)
def construct(self, imgs):
"""construct"""
_, _, h, w = imgs.shape
x = self.conv2d_0(imgs)
x = self.batchnorm2d_1(x)
x = self.leakyrelu_2(x)
x1 = self.convblocka_0(x)
x1 = self.convblockb_0(x1)
x2 = self.convblocka_1(x1)
x2 = self.convblockb_1(x2)
x3 = self.convblocka_2(x2)
x3 = self.convblockb_2(x3)
x2_upsample = self.conv2dbackpropinput_51(x3, self.conv2dbackpropinput_51_weight,
(x2.shape[0], x2.shape[1], h // 4, w // 4))
x2_upsample = self.concat((x2_upsample, x2,))
x2_upsample = self.conv2d_54(x2_upsample)
x2_upsample = self.convblockb_3(x2_upsample)
x1_upsample = self.conv2dbackpropinput_62(x2_upsample, self.conv2dbackpropinput_62_weight,
(x1.shape[0], x1.shape[1], h // 2, w // 2))
x1_upsample = self.concat((x1_upsample, x1,))
x1_upsample = self.conv2d_65(x1_upsample)
x1_upsample = self.convblockb_4(x1_upsample)
x3_final = self.conv2d_52(x3)
x2_final = self.conv2d_63(x2_upsample)
x1_final = self.conv2d_73(x1_upsample)
return x3_final, x2_final, x1_final
class ConvBnReLu(nn.Cell):
"""ConvBnReLu"""
def __init__(self, in_channels, out_channels):
super(ConvBnReLu, self).__init__()
self.conv3d_0 = nn.Conv3d(in_channels, out_channels, (3, 1, 1), stride=1, padding=(1, 1, 0, 0, 0, 0),
pad_mode="pad")
self.batchnorm3d_1 = nn.BatchNorm3d(out_channels, eps=9.999999747378752e-06, momentum=0.8999999761581421)
self.leakyrelu_2 = nn.LeakyReLU(alpha=0.009999999776482582)
def construct(self, x):
"""construct"""
x = self.conv3d_0(x)
x = self.batchnorm3d_1(x)
x = self.leakyrelu_2(x)
return x
class CostCompression(nn.Cell):
"""CostCompression"""
def __init__(self):
super(CostCompression, self).__init__()
self.basicblock_0 = ConvBnReLu(8, 64)
self.basicblock_1 = ConvBnReLu(64, 64)
self.basicblock_2 = ConvBnReLu(64, 8)
param_dict = mindspore.load_checkpoint("./ckpts/stage1_cost_compression.ckpt")
params_not_loaded = mindspore.load_param_into_net(self, param_dict, strict_load=True)
print(params_not_loaded)
def construct(self, x):
"""construct"""
x = self.basicblock_0(x)
x = self.basicblock_1(x)
x = self.basicblock_2(x)
return x
class Pseudo3DBlock_A(nn.Cell):
"""Pseudo3DBlock_A"""
def __init__(self, in_channels, out_channels):
super(Pseudo3DBlock_A, self).__init__()
self.conv3d_0 = nn.Conv3d(in_channels, out_channels, (1, 3, 3), stride=1, padding=(0, 0, 1, 1, 1, 1),
pad_mode="pad")
self.conv3d_1 = nn.Conv3d(out_channels, out_channels, (3, 1, 1), stride=1, padding=(1, 1, 0, 0, 0, 0),
pad_mode="pad")
self.batchnorm3d_2 = nn.BatchNorm3d(out_channels, eps=9.999999747378752e-06, momentum=0.8999999761581421)
self.relu_3 = nn.ReLU()
self.conv3d_4 = nn.Conv3d(out_channels, out_channels, (1, 3, 3), stride=1, padding=(0, 0, 1, 1, 1, 1),
pad_mode="pad")
self.conv3d_5 = nn.Conv3d(out_channels, out_channels, (3, 1, 1), stride=1, padding=(1, 1, 0, 0, 0, 0),
pad_mode="pad")
self.batchnorm3d_6 = nn.BatchNorm3d(out_channels, eps=9.999999747378752e-06, momentum=0.8999999761581421)
self.relu_8 = nn.ReLU()
def construct(self, x):
"""construct"""
x1 = self.conv3d_0(x)
x1 = self.conv3d_1(x1)
x1 = self.batchnorm3d_2(x1)
x1 = self.relu_3(x1)
x1 = self.conv3d_4(x1)
x1 = self.conv3d_5(x1)
x1 = self.batchnorm3d_6(x1)
res = x
out = P.Add()(x1, res)
out = self.relu_8(out)
return out
class Pseudo3DBlock_B(nn.Cell):
"""Pseudo3DBlock_B"""
def __init__(self):
super(Pseudo3DBlock_B, self).__init__()
self.conv3d_0 = nn.Conv3d(8, 8, (1, 3, 3), stride=(1, 2, 2), padding=(0, 0, 1, 1, 1, 1), pad_mode="pad")
self.conv3d_1 = nn.Conv3d(8, 16, (1, 1, 1), stride=2, padding=0, pad_mode="valid")
self.conv3d_2 = nn.Conv3d(8, 16, (3, 1, 1), stride=(2, 1, 1), padding=(1, 1, 0, 0, 0, 0), pad_mode="pad")
self.batchnorm3d_3 = nn.BatchNorm3d(16, eps=9.999999747378752e-06, momentum=0.8999999761581421)
self.batchnorm3d_4 = nn.BatchNorm3d(16, eps=9.999999747378752e-06, momentum=0.8999999761581421)
self.relu_5 = nn.ReLU()
self.conv3d_6 = nn.Conv3d(16, 16, (1, 3, 3), stride=1, padding=(0, 0, 1, 1, 1, 1), pad_mode="pad")
self.conv3d_7 = nn.Conv3d(16, 16, (3, 1, 1), stride=1, padding=(1, 1, 0, 0, 0, 0), pad_mode="pad")
self.batchnorm3d_8 = nn.BatchNorm3d(16, eps=9.999999747378752e-06, momentum=0.8999999761581421)
self.relu_10 = nn.ReLU()
def construct(self, x):
"""construct"""
x1 = self.conv3d_0(x)
x1 = self.conv3d_2(x1)
x1 = self.batchnorm3d_4(x1)
x1 = self.relu_5(x1)
x1 = self.conv3d_6(x1)
x1 = self.conv3d_7(x1)
x1 = self.batchnorm3d_8(x1)
res = self.conv3d_1(x)
res = self.batchnorm3d_3(res)
out = P.Add()(x1, res)
out = self.relu_10(out)
return out
class CoarseStageRegFuse(nn.Cell):
"""CoarseStageRegFuse"""
def __init__(self):
super(CoarseStageRegFuse, self).__init__()
self.basicblocka_0 = Pseudo3DBlock_A(8, 8)
self.basicblockb_0 = Pseudo3DBlock_B()
self.conv3dtranspose_21 = nn.Conv3dTranspose(16, 8, 3, stride=2, padding=1, pad_mode="pad", output_padding=1)
self.conv3d_23 = nn.Conv3d(16, 8, (1, 3, 3), stride=1, padding=(0, 0, 1, 1, 1, 1), pad_mode="pad")
self.conv3d_24 = nn.Conv3d(8, 8, (3, 1, 1), stride=1, padding=(1, 1, 0, 0, 0, 0), pad_mode="pad")
self.conv3d_25 = nn.Conv3d(8, 1, 3, stride=1, padding=1, pad_mode="pad")
self.concat_1 = P.Concat(axis=1)
self.squeeze_1 = P.Squeeze(axis=1)
param_dict = mindspore.load_checkpoint("./ckpts/stage1_reg_fuse.ckpt")
params_not_loaded = mindspore.load_param_into_net(self, param_dict, strict_load=True)
print(params_not_loaded)
def construct(self, fused_interim, depth_values):
"""construct"""
x1 = self.basicblocka_0(fused_interim)
x2 = self.basicblockb_0(x1)
x1_upsample = self.conv3dtranspose_21(x2)
cost_volume = self.concat_1((x1_upsample, x1))
cost_volume = self.conv3d_23(cost_volume)
cost_volume = self.conv3d_24(cost_volume)
score_volume = self.conv3d_25(cost_volume)
score_volume = self.squeeze_1(score_volume)
prob_volume, _, prob_map = soft_argmin(score_volume, dim=1, keepdim=True, window=2)
est_depth = depth_regression(prob_volume, depth_values, keep_dim=True)
return est_depth, prob_map, prob_volume
class CoarseStageRegPair(nn.Cell):
"""CoarseStageRegPair"""
def __init__(self):
super(CoarseStageRegPair, self).__init__()
self.basicblocka_0 = Pseudo3DBlock_A(8, 8)
self.basicblockb_0 = Pseudo3DBlock_B()
self.conv3dtranspose_21 = nn.Conv3dTranspose(16, 8, 3, stride=2, padding=1, pad_mode="pad", output_padding=1)
self.concat_22 = P.Concat(axis=1)
self.conv3d_23 = nn.Conv3d(16, 8, (1, 3, 3), stride=1, padding=(0, 0, 1, 1, 1, 1), pad_mode="pad")
self.conv3d_24 = nn.Conv3d(8, 8, (3, 1, 1), stride=1, padding=(1, 1, 0, 0, 0, 0), pad_mode="pad")
self.conv3d_25 = nn.Conv3d(8, 1, 3, stride=1, padding=1, pad_mode="pad")
self.conv2d_38 = nn.Conv2d(1, 8, 3, stride=1, padding=1, pad_mode="pad")
self.batchnorm2d_39 = nn.BatchNorm2d(num_features=8, eps=9.999999747378752e-06, momentum=0.8999999761581421)
self.leakyrelu_40 = nn.LeakyReLU(alpha=0.009999999776482582)
self.conv2d_41 = nn.Conv2d(8, 8, 3, stride=1, padding=1, pad_mode="pad")
self.batchnorm2d_42 = nn.BatchNorm2d(num_features=8, eps=9.999999747378752e-06, momentum=0.8999999761581421)
self.leakyrelu_43 = nn.LeakyReLU(alpha=0.009999999776482582)
self.conv2d_45 = nn.Conv2d(8, 1, 3, stride=1, padding=1, pad_mode="pad")
self.conv2d_46 = nn.Conv2d(8, 1, 3, stride=1, padding=1, pad_mode="pad")
self.concat_1 = P.Concat(axis=1)
self.squeeze_1 = P.Squeeze(axis=1)
param_dict = mindspore.load_checkpoint("./ckpts/stage1_reg_pair.ckpt")
params_not_loaded = mindspore.load_param_into_net(self, param_dict, strict_load=True)
print(params_not_loaded)
def construct(self, cost_volume, depth_values):
"""construct"""
x1 = self.basicblocka_0(cost_volume)
x2 = self.basicblockb_0(x1)
x1_upsample = self.conv3dtranspose_21(x2)
interim = self.concat_1((x1_upsample, x1))
interim = self.conv3d_23(interim)
interim = self.conv3d_24(interim)
score_volume = self.conv3d_25(interim)
score_volume = self.squeeze_1(score_volume)
prob_volume, _ = soft_argmin(score_volume, dim=1, keepdim=True)
est_depth = depth_regression(prob_volume, depth_values, keep_dim=True)
entropy_ = entropy(prob_volume, dim=1, keepdim=True)
x = self.conv2d_38(entropy_)
x = self.batchnorm2d_39(x)
x = self.leakyrelu_40(x)
x = self.conv2d_41(x)
x = self.batchnorm2d_42(x)
x = self.leakyrelu_43(x)
out = P.Add()(x, entropy_)
uncertainty_map = self.conv2d_45(out)
occ = self.conv2d_46(out)
return interim, est_depth, uncertainty_map, occ
class StageRegFuse(nn.Cell):
"""StageRegFuse"""
def __init__(self, ckpt_path):
super(StageRegFuse, self).__init__()
self.basicblocka_0 = Pseudo3DBlock_A(8, 8)
self.basicblocka_1 = Pseudo3DBlock_A(8, 8)
self.basicblockb_0 = Pseudo3DBlock_B()
self.basicblocka_2 = Pseudo3DBlock_A(16, 16)
self.conv3dtranspose_38 = nn.Conv3dTranspose(16, 8, 3, stride=2, padding=1, pad_mode="pad", output_padding=1)
self.concat_39 = P.Concat(axis=1)
self.conv3d_40 = nn.Conv3d(16, 8, (1, 3, 3), stride=1, padding=(0, 0, 1, 1, 1, 1), pad_mode="pad")
self.conv3d_41 = nn.Conv3d(8, 8, (3, 1, 1), stride=1, padding=(1, 1, 0, 0, 0, 0), pad_mode="pad")
self.conv3d_42 = nn.Conv3d(8, 1, 3, stride=1, padding=1, pad_mode="pad")
self.concat_1 = P.Concat(axis=1)
self.squeeze_1 = P.Squeeze(axis=1)
param_dict = mindspore.load_checkpoint(ckpt_path)
params_not_loaded = mindspore.load_param_into_net(self, param_dict, strict_load=True)
print(params_not_loaded)
def construct(self, fused_interim, depth_values):
"""construct"""
x1 = self.basicblocka_0(fused_interim)
x1 = self.basicblocka_1(x1)
x2 = self.basicblockb_0(x1)
x2 = self.basicblocka_2(x2)
x1_upsample = self.conv3dtranspose_38(x2)
cost_volume = self.concat_1((x1_upsample, x1))
cost_volume = self.conv3d_40(cost_volume)
cost_volume = self.conv3d_41(cost_volume)
score_volume = self.conv3d_42(cost_volume)
score_volume = self.squeeze_1(score_volume)
prob_volume, _, prob_map = soft_argmin(score_volume, dim=1, keepdim=True, window=2)
est_depth = depth_regression(prob_volume, depth_values, keep_dim=True)
return est_depth, prob_map, prob_volume
| 41.424318 | 117 | 0.636157 | 2,369 | 16,694 | 4.257915 | 0.093288 | 0.008526 | 0.036681 | 0.047189 | 0.651135 | 0.637752 | 0.608803 | 0.567067 | 0.531674 | 0.527114 | 0 | 0.120376 | 0.229184 | 16,694 | 402 | 118 | 41.527363 | 0.663506 | 0.054331 | 0 | 0.333333 | 0 | 0 | 0.014935 | 0.007212 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070922 | false | 0 | 0.021277 | 0 | 0.163121 | 0.017731 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45aa13a44c60964639568833f156cf26339f8f91 | 9,162 | py | Python | master-v0/todolist_seperated.py | kaixindelele/Study-System | d64ed15d425064445e2ca6a4bb89515dec68d19c | [
"Apache-2.0"
] | null | null | null | master-v0/todolist_seperated.py | kaixindelele/Study-System | d64ed15d425064445e2ca6a4bb89515dec68d19c | [
"Apache-2.0"
] | null | null | null | master-v0/todolist_seperated.py | kaixindelele/Study-System | d64ed15d425064445e2ca6a4bb89515dec68d19c | [
"Apache-2.0"
] | null | null | null | import tkinter
from tkinter import messagebox
import random
WINDOW_HEIGHT=1200
WINDOW_WIDTH=600
BUTTON_HEIGHT=2
BUTTON_WIDTH=10
class Item:
def __init__(self, id, content):
self.index=id
if len(content.split(" ")) == 1:
self.text=content.split(" ")[0]
elif not content.split(" ")[-1].isdigit():
self.text=content.split(" ")[:]
else:
self.text=content.split(" ")[:-1]
self.text=''.join(self.text)
score=content.split(" ")[-1]
if score.isdigit():
self.score=score
else:
self.score=0
class Project:
def __init__(self, root, start_column, title):
self.root=root
self.title=title
self.items_list=[]
self.total_score=0
try:
self.read_file()
except:
pass
self._build_window(start_column, start_column+1, start_column+2)
self.update_listbox()
def read_file(self):
file_name=self.title+".txt"
f=open(file_name, 'r', encoding='utf-8')
sourceInLines=f.readlines() # 按行读出文件内容
f.close()
new=[] # 定义一个空列表,用来存储结果
for line in sourceInLines:
temp1=line.strip('\n') # 去掉每行最后的换行符'\n'
temp2=temp1.split(' ') # 以','为标志,将每行分割成列表
self.total_score += int(temp2[-1])
new.append(temp2) # 将上一步得到的列表添加到new中
for n in new:
content=str(n[1]) + " " + str(n[2])
new_item=Item(n[0],content)
self.items_list.append(new_item)
def _build_window(self, c1, c2, c3):
self._build_index(column=c1)
self._build_list(c2)
self._build_score_list(column=c3)
self._build_score_analysis(5)
def _build_index(self, column):
self.num_label=tkinter.Label(self.root, text="序号", width=3, bg="white")
self.num_label.grid(row=1, column=column)
# 显示序号
self.index_display=tkinter.Listbox(self.root, width=3, height=20)
self.index_display.grid(row=2, column=column, rowspan=12)
def _build_list(self, column):
if self.title == "task":
text="任务清单"
default_text="输入模板:看一篇paper 5"
if self.title == "wish":
text="愿望商店"
default_text="输入模板:看十分钟小说 2"
self.top_display=tkinter.Label(self.root, width=36, text=text, padx=10, pady=0, bg="SkyBlue", )
self.top_display.grid(row=0, column=column)
self.input=tkinter.Entry(self.root, width=36, bg="SkyBlue", )
self.input.bind('<Return>', self.event_Save)
self.input.bind('<ButtonPress>', self.event_Disappear)
self.input.bind("<FocusOut>", self.event_Leave)
self.input.insert(0, default_text)
self.input.grid(row=1, column=column)
# 显示窗口
self.display=tkinter.Listbox(self.root, width=40, height=20)
self.display.bind('<Delete>', self.eventDeleteOne)
self.display.grid(row=2, column=column, rowspan=7)
# lb for listbox
def _build_score_list(self, column):
self.score_label=tkinter.Label(self.root, text="对应积分", width=4, height=BUTTON_HEIGHT, padx=10, pady=0, bg="lightCyan")
self.score_label.grid(row=1, column=column)
# 显示序号
self.score_display=tkinter.Listbox(self.root, width=6, height=20)
self.score_display.grid(row=2, column=column, rowspan=12)
def _build_score_analysis(self, column):
if self.title == "task":
default_text="累计积分"
label_row=1
if self.title == "wish":
default_text="愿望积分"
label_row=3
self.score_label=tkinter.Label(self.root, text=default_text, width=8, height=BUTTON_HEIGHT, padx=10, pady=0,
bg="Violet")
self.score_label.grid(row=label_row, column=column)
self.score_value=tkinter.Label(self.root, text=self.total_score, width=8, height=BUTTON_HEIGHT, padx=10,
pady=0, bg="cornsilk")
self.score_value.grid(row=label_row+1, column=column)
def add_item(self):
# Get the task to add
self.item_content=self.input.get()
self.items_num=len(self.items_list)
print("num:", self.items_num)
self.item_id=self.items_num + 1
self.new_item=Item(self.item_id, self.item_content)
# Make sure the task is not empty
if self.new_item.text != "":
# Append to the list
self.items_list.append(self.new_item)
self.total_score += int(self.new_item.score)
self.score_value['text']=self.total_score
# Update the listbox
self.update_listbox()
else:
# tkinter.messagebox.showwarning("Warning", "Please enter a task.")
pass
self.input.delete(0, "end") # clears the input box after a new task is entered
def eventDeleteOne(self, event):
self.del_one()
def event_Save(self, event):
self.add_item()
def event_Disappear(self, event):
self.input.delete(0, last='end')
def event_Leave(self, event):
self.default_wish_text='输入模版:看十分钟小说 2'
self.input.insert(0, self.default_wish_text)
def clear_listbox(self):
self.display.delete(0, "end")
def clear_index(self):
self.index_display.delete(0, 'end')
def clear_score_index(self):
self.score_display.delete(0, 'end')
def update_listbox(self):
# Clear the current list to keep from add the same tasks to the list over and over again
self.clear_listbox()
self.clear_index()
self.clear_score_index()
# Populate the Listbox
for item in self.items_list:
self.display.insert("end", item.text)
self.index_display.insert("end", item.index)
self.score_display.insert("end", item.score)
def clear_listbox(self):
self.display.delete(0, "end")
def clear_wishes_index(self):
self.index_display.delete(0, 'end')
def clear_wishes_score_index(self):
self.score_display.delete(0, 'end')
def del_all(self):
confirmed=tkinter.messagebox.askyesno("Please Confirm", "Do you really want to delete all?")
if confirmed == True:
# Since we are changing the list, it needs to be global.
# Clear the tasks list
self.tasks_list=[]
self.total_score=0
self.score_value['text']=self.total_score
# Update the listbox
self.update_listbox()
self.sort_asc()
def del_one(self):
# Get the text of the currently selected item
text=self.display.get("active")
for t in self.items_list:
if text == t.text:
self.total_score -= int(t.score)
self.score_value['text']=self.total_score
self.items_list.remove(t)
self.sort_asc()
# TODO 根据ID进行重新排序!
def sort_asc(self):
# sort the list
tem_list=[]
for i in range(len(self.items_list)):
tem_task=self.items_list[i]
tem_task.index=i+1
tem_list.append(tem_task)
self.items_list=tem_list
#update the listbox
self.update_listbox()
def save_to_local(self):
file_name=self.title + ".txt"
f=open(file_name, 'w', encoding='utf-8')
for t in self.items_list:
f.write(str(t.index))
f.write(" ")
f.write(t.text)
f.write(" ")
f.write(str(t.score))
f.write("\n")
f.close()
class TODO_list:
def __init__(self):
#Create root window
self.root=tkinter.Tk()
# Change root window background color
self.root.configure(bg="white")
# Change the title
self.root.title("骆永乐的任务清单商店")
# Change the window size
self.root.geometry("1200x600")
# Create an empty list
self.project_list=[]
self._build_window()
def _build_window(self):
self.task_project=Project(self.root, 2, "task")
self.wish_project=Project(self.root, 7, "wish")
self.project_list.append(self.task_project)
self.project_list.append(self.wish_project)
self.check=tkinter.Label(self.root, text="账单", width=8, height=BUTTON_HEIGHT, padx=10, pady=0,
bg="White")
self.check.grid(row=0, column=5)
self.check.bind_all('<Escape>', self.eventEsc)
# Start the main events loop
self.root.mainloop()
def save_to_all(self):
for p in self.project_list:
p.save_to_local()
def eventEsc(self, event):
self.save_to_all()
exit()
def main():
todo_list=TODO_list()
if __name__ == "__main__":
main() | 34.186567 | 127 | 0.573456 | 1,174 | 9,162 | 4.308348 | 0.189949 | 0.030051 | 0.028272 | 0.023725 | 0.28964 | 0.234085 | 0.184263 | 0.175168 | 0.132661 | 0.132661 | 0 | 0.017801 | 0.307138 | 9,162 | 268 | 128 | 34.186567 | 0.778986 | 0.081314 | 0 | 0.179104 | 0 | 0 | 0.043103 | 0 | 0 | 0 | 0 | 0.003731 | 0 | 1 | 0.144279 | false | 0.00995 | 0.014925 | 0 | 0.174129 | 0.004975 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45aafdc53dd12d0a6a957bd81175c72fb3ad70c0 | 2,665 | py | Python | src/settings/base.py | Hammerstad/blog | 3075e59c46321fd5fd5ccafbfb36e6d59b765259 | [
"MIT"
] | null | null | null | src/settings/base.py | Hammerstad/blog | 3075e59c46321fd5fd5ccafbfb36e6d59b765259 | [
"MIT"
] | null | null | null | src/settings/base.py | Hammerstad/blog | 3075e59c46321fd5fd5ccafbfb36e6d59b765259 | [
"MIT"
] | null | null | null | import os, re
from django.contrib.messages import constants as messages
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
makepath = lambda *f: os.path.join(BASE_DIR, *f)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')2x-r4x+4oxdmvvxenj*dhq##uxrgl%f3=#+l*1s32y=f^51hz'
ALLOWED_HOSTS = []
TIME_ZONE = 'Europe/Oslo'
# Application definition
INSTALLED_APPS = (
'grappelli',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'app.blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages'
)
ROOT_URLCONF = 'urls'
WSGI_APPLICATION = 'wsgi.application'
LANGUAGE_CODE = 'en-gb'
STANDARD_USER_LANGUAGE = 'en-gb'
DATE_FORMAT = 'd.m.Y'
TIME_FORMAT = 'H.i'
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
USE_I18N = True
USE_L10N = True
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MEDIA_ROOT = makepath("media")
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
makepath("static"),
)
# Used by collect static and nginx
STATIC_ROOT = os.path.join(BASE_DIR, '../static')
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
GRAPPELLI_ADMIN_TITLE = "Eirik M Hammerstad - Blog"
## Translates messages tags into our correct CSS classes
MESSAGE_TAGS = {
messages.DEBUG: 'alert-debug',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-info',
messages.ERROR: 'alert-error',
}
SITE_ID = 1
# Login specific
LOGIN_URL = "/login/"
LOGIN_REDIRECT_URL = "/"
LOGOUT_URL = "/logout/" | 26.386139 | 73 | 0.734709 | 317 | 2,665 | 6.037855 | 0.485804 | 0.115465 | 0.043887 | 0.048589 | 0.017764 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006092 | 0.137711 | 2,665 | 101 | 74 | 26.386139 | 0.826806 | 0.127955 | 0 | 0 | 0 | 0 | 0.507991 | 0.397408 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.028169 | 0 | 0.028169 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45af13113748b76dff6737ce6168b834565ba369 | 737 | py | Python | cpias/cli/__init__.py | CellProfiling/cpias | e2d9426436573b40625287101570b849ce9f4a38 | [
"Apache-2.0"
] | null | null | null | cpias/cli/__init__.py | CellProfiling/cpias | e2d9426436573b40625287101570b849ce9f4a38 | [
"Apache-2.0"
] | null | null | null | cpias/cli/__init__.py | CellProfiling/cpias | e2d9426436573b40625287101570b849ce9f4a38 | [
"Apache-2.0"
] | null | null | null | # type: ignore
"""Provide a CLI."""
import logging
import click
from cpias import __version__
from cpias.cli.client import run_client
from cpias.cli.server import start_server
SETTINGS = dict(help_option_names=["-h", "--help"])
@click.group(
options_metavar="", subcommand_metavar="<command>", context_settings=SETTINGS
)
@click.option("--debug", is_flag=True, help="Start server in debug mode.")
@click.version_option(__version__)
@click.pass_context
def cli(ctx, debug):
"""Run CPIAS server."""
ctx.obj = {}
ctx.obj["debug"] = debug
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
cli.add_command(start_server)
cli.add_command(run_client)
| 23.03125 | 81 | 0.717775 | 99 | 737 | 5.121212 | 0.434343 | 0.053254 | 0.047337 | 0.118343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.143826 | 737 | 31 | 82 | 23.774194 | 0.803487 | 0.062415 | 0 | 0 | 0 | 0 | 0.082353 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0.047619 | 0.238095 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45b1387d07ded17b27c38254744ea03c4ab7dcf9 | 1,368 | py | Python | server/config.py | knrdl/casa | 411a71f2fcc7b2f7c6cd33973ce6f5919c9f7180 | [
"MIT"
] | 40 | 2022-01-08T18:33:33.000Z | 2022-01-17T11:52:40.000Z | server/config.py | knrdl/casa | 411a71f2fcc7b2f7c6cd33973ce6f5919c9f7180 | [
"MIT"
] | null | null | null | server/config.py | knrdl/casa | 411a71f2fcc7b2f7c6cd33973ce6f5919c9f7180 | [
"MIT"
] | 3 | 2022-01-10T03:05:45.000Z | 2022-01-10T16:17:29.000Z | import os
from typing import Literal, get_args, Final, Set, Dict
AUTH_API_URL = os.getenv('AUTH_API_URL')
AUTH_API_FIELD_USERNAME = os.getenv('AUTH_API_FIELD_USERNAME', 'username')
AUTH_API_FIELD_PASSWORD = os.getenv('AUTH_API_FIELD_PASSWORD', 'password')
if not AUTH_API_URL:
raise Exception('please provide AUTH_API_URL env var')
PermissionType = Literal[
'info', 'info-annotations', 'state', 'logs', 'term', 'procs', 'files', 'files-read', 'files-write']
PERMISSIONS: Final[Set[PermissionType]] = set(get_args(PermissionType))
ROLES_PERMS: Dict[str, Set[PermissionType]] = {}
for key, value in os.environ.items():
if key.startswith('ROLES_'):
role_name = key.removeprefix('ROLES_').strip().replace('_', '.')
if role_name:
permissions = {p.strip() for p in value.split(',')}
permissions = {p for p in permissions if p}
unknown_permission = next((p for p in permissions if p not in PERMISSIONS), None)
if unknown_permission:
raise Exception(f'unknown permission "{unknown_permission}" for role "{role_name}"')
ROLES_PERMS[role_name] = permissions
if not ROLES_PERMS:
raise Exception('no roles defined, please set ROLES_* env vars')
print('Roles:')
for role in sorted(ROLES_PERMS):
print('*', role, '->', ', '.join(sorted(ROLES_PERMS[role])))
print()
| 41.454545 | 103 | 0.681287 | 183 | 1,368 | 4.885246 | 0.349727 | 0.06264 | 0.044743 | 0.050336 | 0.091723 | 0.04698 | 0.04698 | 0 | 0 | 0 | 0 | 0 | 0.179094 | 1,368 | 32 | 104 | 42.75 | 0.796082 | 0 | 0 | 0 | 0 | 0 | 0.225146 | 0.049708 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.037037 | 0.074074 | 0 | 0.074074 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45b98152e1ec2d40f01789e515fcc68c070f6c42 | 2,452 | py | Python | src/python/lessons/Pokedex/Club Leader Resources/pokedex-finished.py | arve0/example_lessons | 56ce7f386df2478165f4583a86b462974f5e19ec | [
"CC0-1.0"
] | 2 | 2017-02-19T21:31:34.000Z | 2019-06-27T07:55:50.000Z | src/python/lessons/Pokedex/Club Leader Resources/pokedex-finished.py | arve0/example_lessons | 56ce7f386df2478165f4583a86b462974f5e19ec | [
"CC0-1.0"
] | null | null | null | src/python/lessons/Pokedex/Club Leader Resources/pokedex-finished.py | arve0/example_lessons | 56ce7f386df2478165f4583a86b462974f5e19ec | [
"CC0-1.0"
] | null | null | null | from random import *
import tkinter
from pokeapi import *
smallFont = ["Ariel" , 10]
mediumFont = ["Ariel" , 14]
bigFont = ["Ariel" , 24]
#function to display data for a pokemon number
def showPokemonData():
#get the number typed into the entry box
pokemonNumber = randint(1,178)
#use the function above to get the pokemon data
pokemonDictionary = getPokemonData(pokemonNumber)
#get the data from the dictionary and add it to the labels
lblNameValue.configure(text = pokemonDictionary["name"])
lblHPValue.configure(text = pokemonDictionary["hp"])
lblAttackValue.configure(text = pokemonDictionary["attack"])
lblDefenceValue.configure(text = pokemonDictionary["defense"])
lblSpeedValue.configure(text = pokemonDictionary["speed"])
#create the main window
window = tkinter.Tk()
window.config(bg="#e0e0ff")
window.title("Pokedex")
#button to show a random pokemon
btnGo = tkinter.Button(window,text="Get Random Pokemon!", command=showPokemonData, font=smallFont)
btnGo.pack()
#pokemon name
lblNameText = tkinter.Label(window,text="Name:", font=mediumFont)
lblNameText.config(bg="#e0e0ff", fg="#111111")
lblNameText.pack()
lblNameValue = tkinter.Label(window,text="?", font=bigFont)
lblNameValue.config(bg="#e0e0ff", fg="#111111")
lblNameValue.pack()
#pokemon hp
lblHPText = tkinter.Label(window,text="HP:", font=mediumFont)
lblHPText.config(bg="#e0e0ff", fg="#111111")
lblHPText.pack()
lblHPValue = tkinter.Label(window,text="?", font=bigFont)
lblHPValue.config(bg="#e0e0ff", fg="#111111")
lblHPValue.pack()
#pokemon attack
lblAttackText = tkinter.Label(window,text="Attack:", font=mediumFont)
lblAttackText.config(bg="#e0e0ff", fg="#111111")
lblAttackText.pack()
lblAttackValue = tkinter.Label(window,text="?", font=bigFont)
lblAttackValue.config(bg="#e0e0ff", fg="#111111")
lblAttackValue.pack()
#pokemon defence
lblDefenceText = tkinter.Label(window,text="Defence:", font=mediumFont)
lblDefenceText.config(bg="#e0e0ff", fg="#111111")
lblDefenceText.pack()
lblDefenceValue = tkinter.Label(window,text="?", font=bigFont)
lblDefenceValue.config(bg="#e0e0ff", fg="#111111")
lblDefenceValue.pack()
#pokemon speed
lblSpeedText = tkinter.Label(window,text="Speed:", font=mediumFont)
lblSpeedText.config(bg="#e0e0ff", fg="#111111")
lblSpeedText.pack()
lblSpeedValue = tkinter.Label(window,text="?", font=bigFont)
lblSpeedValue.config(bg="#e0e0ff", fg="#111111")
lblSpeedValue.pack()
window.mainloop()
| 31.844156 | 98 | 0.745514 | 289 | 2,452 | 6.32526 | 0.269896 | 0.04814 | 0.084245 | 0.12035 | 0.210613 | 0.090263 | 0 | 0 | 0 | 0 | 0 | 0.041742 | 0.101142 | 2,452 | 76 | 99 | 32.263158 | 0.787659 | 0.12398 | 0 | 0 | 0 | 0 | 0.115115 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0 | 0.06 | 0 | 0.08 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45bb4339df10b2c504d23e3c5f87aad73571f0d8 | 551 | py | Python | PythonExercicios/ex092.py | lordvinick/Python | c03fd08d4c204104bf0196b0bd129427fd2067ae | [
"MIT"
] | null | null | null | PythonExercicios/ex092.py | lordvinick/Python | c03fd08d4c204104bf0196b0bd129427fd2067ae | [
"MIT"
] | null | null | null | PythonExercicios/ex092.py | lordvinick/Python | c03fd08d4c204104bf0196b0bd129427fd2067ae | [
"MIT"
] | null | null | null | from datetime import date
info = dict()
info['nome'] = str(input('Nome: '))
ano = int(input('Ano de Nascimento: '))
info['idade'] = date.today().year - ano
info['ctps'] = int(input('Carteira de Trabalho (0 não tem): '))
if info['ctps'] > 0:
info['contratação'] = int(input('Ano de contratação: '))
info['Salário'] = float(input('Salário: R$ '))
anostrabalho = date.today().year - info['contratação']
info['aposentadoria'] = 35 - anostrabalho + info['idade']
print('-='*30)
for k, v in info.items():
print(f'- {k} tem o valor {v}')
| 36.733333 | 63 | 0.618875 | 77 | 551 | 4.428571 | 0.519481 | 0.070381 | 0.064516 | 0.076246 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013015 | 0.163339 | 551 | 14 | 64 | 39.357143 | 0.726681 | 0 | 0 | 0 | 0 | 0 | 0.323049 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45bb5f01972a0422e197939fbc86dcc3efd9e8cf | 2,547 | py | Python | src/words/compiler/compile.py | DavidStrootman/ATP | 72005be0ac75339bb5da037a7e98573e338d16db | [
"MIT"
] | 2 | 2021-08-20T17:56:15.000Z | 2021-08-21T01:04:08.000Z | src/words/compiler/compile.py | DavidStrootman/Words | 72005be0ac75339bb5da037a7e98573e338d16db | [
"MIT"
] | null | null | null | src/words/compiler/compile.py | DavidStrootman/Words | 72005be0ac75339bb5da037a7e98573e338d16db | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import Callable, Dict, List, Type, Iterator
from words.lexer.lex import Lexer
from words.parser.parse import Parser
from words.parser.parse_util import Program
from words.token_types.lexer_token import LexerToken
from words.token_types.parser_token import ParserToken, VariableParserToken
class Compiler:
"""
The compiler is used to compile the code to run natively on a piece of hardware.
The only supported hardware is the Arduino Due.
"""
@staticmethod
def compile(ast: Program, target: str = "arduino_due") -> str:
selected_target = target.lower()
if selected_target not in platform_compilers:
raise NotImplementedError(f"{target:} not supported")
return platform_compilers[target](ast)
@staticmethod
def build_asm(sections: List[str]) -> str:
return "\n".join(sections) + "\n"
@staticmethod
def find_token_in_ast(ast: List[ParserToken], token: Type[ParserToken]) -> List[ParserToken]:
"""Recursively find token in ast tree"""
def _find_token_in_token(token: Type[ParserToken]) -> ParserToken:
pass
return [_find_token_in_token(token)]
@staticmethod
def compile_file(file_path: Path) -> str:
"""
Compile from a file, this is the most common entrypoint for the Compiler.
:param file_path: Path to the file to interpret.
:return: The return value of the program executed, if any.
"""
lexed_tokens: Iterator[LexerToken] = Lexer.lex_file(file_path)
program = Parser.parse(lexed_tokens)
return Compiler.compile(program)
class M0Compiler:
@staticmethod
def _compile_cpu_directive():
return ".cpu cortex-m0"
@staticmethod
def _compile_bss_segment(ast: Program):
bytes_to_reserve = len(Compiler.find_token_in_ast(ast.tokens, VariableParserToken))
bss_segment = ".bss \n" \
".byte " + ",".join(["0" for byte in range(bytes_to_reserve)]) + "\n" \
"test:\n" \
".byte 0"
return bss_segment
@staticmethod
def compile(ast: Program):
cpu_directive = M0Compiler._compile_cpu_directive()
bss_segment = M0Compiler._compile_bss_segment(ast)
return Compiler.build_asm(
[
cpu_directive,
bss_segment
]
)
platform_compilers: Dict[str, Callable[[Program], str]] = {
"arduino_due": M0Compiler.compile
}
| 29.964706 | 97 | 0.650962 | 302 | 2,547 | 5.301325 | 0.307947 | 0.065584 | 0.068707 | 0.026234 | 0.087445 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003704 | 0.257951 | 2,547 | 84 | 98 | 30.321429 | 0.843386 | 0.135846 | 0 | 0.134615 | 0 | 0 | 0.043987 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0.019231 | 0.134615 | 0.038462 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45bb63a16d11fc1ffe3d85d5f0903a0791235525 | 666 | py | Python | digger/modules/load.py | fxxkrlab/Digger | b69e23aee1f2a8eac4989badd354bd128d35100e | [
"MIT"
] | null | null | null | digger/modules/load.py | fxxkrlab/Digger | b69e23aee1f2a8eac4989badd354bd128d35100e | [
"MIT"
] | null | null | null | digger/modules/load.py | fxxkrlab/Digger | b69e23aee1f2a8eac4989badd354bd128d35100e | [
"MIT"
] | null | null | null | import os, logging, toml
_cfgFile_RAW = os.path.abspath(os.path.join("conf.toml"))
_cfg = toml.load(_cfgFile_RAW)
'''
log setting
'''
if _cfg['Servers']['server']['CONSOLE'] == 'DEBUG':
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.DEBUG,
)
else:
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
path_regex_1 = r'([\s\S]+)\/([^\/][\s\S]+)\/$'
tv_folders = r'^[\s\S]+[\.|\s]([se]\d{1,2}|[se]\d{1,2}\-*[se]\d{1,2}|complete|ep\d{1,2}\-ep\d{1,2}|ep\d{1,2})[\.|\s][\s\S]+$' | 28.956522 | 125 | 0.564565 | 102 | 666 | 3.558824 | 0.401961 | 0.038567 | 0.049587 | 0.041322 | 0.451791 | 0.451791 | 0.451791 | 0.451791 | 0.369146 | 0.369146 | 0 | 0.022928 | 0.148649 | 666 | 23 | 125 | 28.956522 | 0.617284 | 0 | 0 | 0.25 | 0 | 0.0625 | 0.424383 | 0.21142 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45bbcaa18c9d8b0af41194ae911dd0604064bd55 | 11,982 | py | Python | rados_deploy/internal/remoto/modules/rados_install.py | MariskaIJpelaar/rados-deploy | 4ffb467211c2b05d17d76c2423c72c0ee4d4ec99 | [
"MIT"
] | null | null | null | rados_deploy/internal/remoto/modules/rados_install.py | MariskaIJpelaar/rados-deploy | 4ffb467211c2b05d17d76c2423c72c0ee4d4ec99 | [
"MIT"
] | null | null | null | rados_deploy/internal/remoto/modules/rados_install.py | MariskaIJpelaar/rados-deploy | 4ffb467211c2b05d17d76c2423c72c0ee4d4ec99 | [
"MIT"
] | 1 | 2022-02-08T10:47:14.000Z | 2022-02-08T10:47:14.000Z | import os
import subprocess
import tempfile
import urllib.request
def _get_ceph_deploy(location, silent=False, retries=5):
url = 'https://github.com/ceph/ceph-deploy/archive/refs/heads/master.zip'
with tempfile.TemporaryDirectory() as tmpdir: # We use a tempfile to store the downloaded archive.
archiveloc = join(tmpdir, 'ceph-deploy.zip')
if not silent:
print('Fetching ceph-deploy from {}'.format(url))
for x in range(retries):
try:
try:
rm(archiveloc)
except Exception as e:
pass
urllib.request.urlretrieve(url, archiveloc)
break
except Exception as e:
if x == 0:
printw('Could not download ceph-deploy. Retrying...')
elif x == retries-1:
printe('Could not download ceph-deploy: {}'.format(e))
return False
try:
extractloc = join(tmpdir, 'extracted')
mkdir(extractloc, exist_ok=True)
unpack(archiveloc, extractloc)
extracted_dir = next(ls(extractloc, only_dirs=True, full_paths=True)) # find out what the extracted directory is called. There will be only 1 extracted directory.
rm(location, ignore_errors=True)
mkdir(location)
for x in ls(extracted_dir, full_paths=True): # Move every file and directory to the final location.
mv(x, location)
return True
except Exception as e:
printe('Could not extract ceph-deploy zip file correctly: ', e)
return False
def _get_rados_dev(location, arrow_url, silent=False, retries=5):
with tempfile.TemporaryDirectory() as tmpdir: # We use a tempfile to store the downloaded archive.
archiveloc = join(tmpdir, 'rados-arrow.zip')
if not silent:
print('Fetching RADOS-arrow from {}'.format(arrow_url))
for x in range(retries):
try:
try:
rm(archiveloc)
except Exception as e:
pass
urllib.request.urlretrieve(arrow_url, archiveloc)
break
except Exception as e:
if x == 0:
printw('Could not download RADOS-arrow. Retrying...')
elif x == retries-1:
printe('Could not download RADOS-arrow: {}'.format(e))
return False
try:
extractloc = join(tmpdir, 'extracted')
mkdir(extractloc, exist_ok=True)
unpack(archiveloc, extractloc)
extracted_dir = next(ls(extractloc, only_dirs=True, full_paths=True)) # find out what the extracted directory is called. There will be only 1 extracted directory.
rm(location, ignore_errors=True)
mkdir(location)
for x in ls(extracted_dir, full_paths=True): # Move every file and directory to the final location.
mv(x, location)
return True
except Exception as e:
printe('Could not extract RADOS-arrow zip file correctly: {}'.format(e))
return False
def install_ceph_deploy(location, silent=False):
'''Install ceph-deploy on the admin node. Warning: Assumes `git` is installed and available.
Warning: This only has to be executed on 1 node, which will be designated the `ceph admin node`.
Args:
location (str): Location to install ceph-deploy in. Ceph-deploy root will be`location/ceph-deploy`.
Returns:
`True` on success, `False` on failure.'''
if library_exists('ceph_deploy'):
return True
if not pip_install(py='python3'):
return False
if not exists(location):
if not _get_ceph_deploy(location, silent=silent):
return False
kwargs = {'shell': True}
if silent:
kwargs['stderr'] = subprocess.DEVNULL
kwargs['stdout'] = subprocess.DEVNULL
return subprocess.call('pip3 install . --user', cwd=location, **kwargs) == 0
def install_ceph(hosts_designations_mapping, silent=False):
'''Installs required ceph daemons on all nodes. Requires updated package manager.
Warning: This only has to be executed on 1 node, which will be designated the `ceph admin node`.
Warning: Expects to find a 'designations' extra-info key, with as value a comma-separated string for each node in the reservation, listing its designations.
Daemons for the given designations will be installed. E.g. node.extra_info['designations'] = 'mon,mds,osd,osd' will install the monitor, metadata-server and osd daemons.
Note: Designations may be repeated, which will not change behaviour from listing designations once.
Warning: We assume apt package manager.
Note: If a host has an empty list as specification, we ignore it and do not install anything.
Args:
hosts_designations_mapping (dict(str, list(str))): Dict with key=hostname and value=list of hostname's `Designations` as strings.
hosts_user_mapping (dict(str, str)): Dict with key=hostname and val=username for host.
silent (optional bool): If set, does not print compilation progress, output, etc. Otherwise, all output will be available.
Returns:
`True` on success, `False` on failure.'''
ceph_deploypath = join(os.path.expanduser('~/'), '.local', 'bin', 'ceph-deploy')
kwargs = {'shell': True, 'stderr': subprocess.DEVNULL, 'stdout': subprocess.DEVNULL}
if subprocess.call('sudo apt update -y', **kwargs) != 0:
return False
if subprocess.call('{} install --common localhost'.format(ceph_deploypath), **kwargs) != 0:
return False
executors = []
for hostname, designations in hosts_designations_mapping.items():
if not any(designations): # If no designation given for node X, we skip installation of Ceph for X.
continue
designation_out = '--'+' --'.join([x.lower() for x in set(designations)])
executors.append(Executor('{} --overwrite-conf install --release octopus {} {}'.format(ceph_deploypath, designation_out, hostname), shell=True))
Executor.run_all(executors)
return Executor.wait_all(executors, print_on_error=True)
def install_rados(location, hosts_designations_mapping, arrow_url, force_reinstall=False, debug=False, silent=False, cores=16):
'''Installs RADOS-arrow, which we need for bridging with Arrow. This function should be executed from the admin node.
Warning: This only has to be executed on 1 node, which will be designated the `ceph admin node`.
Warning: Assumes apt package manager.
Args:
location (str): Location to install RADOS-arrow in. Ceph-deploy root will be`location/ceph-deploy`.
hosts_designations_mapping (dict(str, list(str))): Dict with key=hostname and value=list of hostname's `Designations` as strings.
arrow_url (str): Download URL for Arrow library to use with RADOS-Ceph.
force_reinstall (optional bool): If set, we always will re-download and install Arrow. Otherwise, we will skip installing if we already have installed Arrow.
debug (optional bool): If set, we compile Arrow using debug flags.
silent (optional bool): If set, does not print compilation progress, output, etc. Otherwise, all output will be available.
cores (optional int): Number of cores to use for compiling (default=4).
Note: Do not set this to a higher value than the number of available cores, as it would only lead to slowdowns.
If set too high, it may happen that RAM consumption is much too high, leading to kernel panic and termination of critical processes.
Returns:
`True` on success, `False` on failure.'''
kwargs = {'shell': True}
if silent:
kwargs['stderr'] = subprocess.DEVNULL
kwargs['stdout'] = subprocess.DEVNULL
if force_reinstall or not (exists('{}/cpp/build/latest'.format(location)) and any(ls('{}/cpp/build/latest'.format(location)))):
if subprocess.call('sudo rm -rf {}'.format(location), shell=True, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) != 0:
printe('Could not remove all files at {}'.format(location))
return False
if not silent:
print('Installing required libraries for RADOS-Ceph.\nPatience...')
cmd = 'sudo apt install libradospp-dev rados-objclass-dev openjdk-8-jdk openjdk-11-jdk default-jdk libboost-all-dev automake bison flex g++ libevent-dev libssl-dev libtool make pkg-config maven cmake thrift-compiler -y'
if subprocess.call(cmd, shell=True, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) != 0:
printe('Failed to install all required libraries. Command used: {}'.format(cmd))
return False
if not silent:
prints('Installed required libraries.')
if (not isdir(location)) and not _get_rados_dev(location, arrow_url, silent=silent, retries=5):
return False
cmake_cmd = 'cmake . -DARROW_PARQUET=ON -DARROW_DATASET=ON -DARROW_JNI=ON -DARROW_ORC=ON -DARROW_CSV=ON -DARROW_CLS=ON'
if debug:
cmake_cmd += ' -DCMAKE_BUILD_TYPE=Debug'
print ("!!!! " + cmake_cmd + " !!!!!")
my_env = os.environ.copy()
my_env["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
print(my_env)
subprocess.call(cmake_cmd+' 1>&2', cwd='{}/cpp'.format(location), env=my_env, **kwargs)
if subprocess.call(cmake_cmd+' 1>&2', cwd='{}/cpp'.format(location), env=my_env, **kwargs) != 0:
return False
if subprocess.call('sudo make install -j{} 1>&2'.format(cores), cwd='{}/cpp'.format(location), **kwargs) != 0:
return False
hosts = [key for key, value in hosts_designations_mapping.items() if any(value)] # Only nodes joining the ceph cluster will receive the libraries
executors = [Executor('ssh {} "mkdir -p ~/.arrow-libs/ && sudo mkdir -p /usr/lib/rados-classes/"'.format(x), shell=True, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) for x in hosts]
Executor.run_all(executors)
if not Executor.wait_all(executors, print_on_error=True):
printe('Could not create required directories on all nodes.')
return False
executors = [Executor('scp {}/cpp/build/latest/libcls* {}:~/.arrow-libs/'.format(location, x), **kwargs) for x in hosts]
executors += [Executor('scp {}/cpp/build/latest/libarrow* {}:~/.arrow-libs/'.format(location, x), **kwargs) for x in hosts]
executors += [Executor('scp {}/cpp/build/latest/libparquet* {}:~/.arrow-libs/'.format(location, x), **kwargs) for x in hosts]
Executor.run_all(executors)
if not Executor.wait_all(executors, print_on_error=True):
printe('Could not scp Arrow libraries to all nodes.')
return False
executors = [Executor('ssh {} "sudo cp ~/.arrow-libs/libcls* /usr/lib/rados-classes/"'.format(x), **kwargs) for x in hosts]
executors += [Executor('ssh {} "sudo cp ~/.arrow-libs/libarrow* /usr/lib/"'.format(x), **kwargs) for x in hosts]
executors += [Executor('ssh {} "sudo cp ~/.arrow-libs/libparquet* /usr/lib/"'.format(x), **kwargs) for x in hosts]
Executor.run_all(executors)
if not Executor.wait_all(executors, print_on_error=True):
printe('Could not copy libraries to destinations on all nodes.')
return False
env = Environment()
env.load_to_env()
libpath = env.get('LD_LIBRARY_PATH')
if not libpath:
libpath = ''
if not libpath or not '/usr/local/lib' in libpath.strip().split(':'):
env.set('LD_LIBRARY_PATH', '/usr/local/lib:'+libpath)
os.environ['LD_LIBRARY_PATH'] = '/usr/local/lib:'+libpath
return subprocess.call('sudo cp /usr/local/lib/libparq* /usr/lib/', **kwargs) == 0
| 54.963303 | 227 | 0.647304 | 1,562 | 11,982 | 4.898207 | 0.220871 | 0.022219 | 0.009411 | 0.010064 | 0.547641 | 0.509868 | 0.467521 | 0.424781 | 0.405176 | 0.381127 | 0 | 0.004078 | 0.242864 | 11,982 | 217 | 228 | 55.21659 | 0.839286 | 0.284176 | 0 | 0.509554 | 0 | 0.031847 | 0.235818 | 0.036554 | 0.006369 | 0 | 0 | 0 | 0 | 1 | 0.031847 | false | 0.012739 | 0.025478 | 0 | 0.197452 | 0.133758 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45bc17edfd4d56c459c035506d016dde933b4b38 | 1,539 | py | Python | segment.py | jorjao81/zh-learn | 9d14033361918a759dd245984c35c1cb8aa3f24f | [
"MIT"
] | 2 | 2020-12-04T16:23:02.000Z | 2021-12-25T12:54:45.000Z | segment.py | jorjao81/zh-learn | 9d14033361918a759dd245984c35c1cb8aa3f24f | [
"MIT"
] | null | null | null | segment.py | jorjao81/zh-learn | 9d14033361918a759dd245984c35c1cb8aa3f24f | [
"MIT"
] | 1 | 2021-12-22T20:17:08.000Z | 2021-12-22T20:17:08.000Z | from pysubparser import parser
from pysubparser.util import time_to_millis
from pydub import AudioSegment
# find segments of conversation
import sys
FIVE_SECONDS = 5000
def get_segments(subtitles):
segments = []
prev_end = -1000000
curr_segment = None
for subtitle in subtitles:
this_start = time_to_millis(subtitle.start)
if this_start - prev_end > FIVE_SECONDS:
if curr_segment != None:
segments.append(curr_segment)
curr_segment = []
curr_segment.append(subtitle)
prev_end = time_to_millis(subtitle.end)
# append last segment
segments.append(curr_segment)
return segments
def print_segment(seg):
print(seg[0].start)
for sub in seg:
print(sub.text)
print(seg[-1].end)
print("------------------------------------")
print("Segment duration: " + str((time_to_millis(seg[-1].end) - time_to_millis(seg[0].start))/1000))
print("====================================")
audio_filename = sys.argv[1]
subtitle_filename = sys.argv[2]
subtitles = parser.parse(subtitle_filename)
segments = get_segments(subtitles)
song = AudioSegment.from_mp3(audio_filename)
folder = "out/"
episode = "e01"
n = 1
for seg in segments:
start = time_to_millis(seg[0].start) - 1000
end = time_to_millis(seg[-1].end) + 1500
cut = song[start:end]
cut.export(folder + episode + "_seg" + str(n) + ".mp3", format="mp3")
print("===== Segment " + str(n) + " ========")
print_segment(seg)
n += 1
| 22.632353 | 104 | 0.622482 | 195 | 1,539 | 4.723077 | 0.312821 | 0.045603 | 0.091205 | 0.065147 | 0.098806 | 0.095548 | 0.054289 | 0 | 0 | 0 | 0 | 0.031379 | 0.213125 | 1,539 | 67 | 105 | 22.970149 | 0.729149 | 0.031839 | 0 | 0.046512 | 0 | 0 | 0.088156 | 0.048452 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.093023 | 0 | 0.162791 | 0.209302 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45bc4038c9107e889c840e27905e254cabf514d3 | 4,766 | py | Python | bot.py | BinaryWorld0101201/RSS_Feederbot | 177c175fc309645661d2dd804e76f5dc8c5d726e | [
"MIT"
] | 1 | 2019-05-14T11:34:40.000Z | 2019-05-14T11:34:40.000Z | bot.py | BinaryWorld0101201/RSS_Feederbot | 177c175fc309645661d2dd804e76f5dc8c5d726e | [
"MIT"
] | null | null | null | bot.py | BinaryWorld0101201/RSS_Feederbot | 177c175fc309645661d2dd804e76f5dc8c5d726e | [
"MIT"
] | null | null | null | try:
import feedparser, html2text, asyncio, json, datetime, telepot
from loguru import logger
from telepot.aio.loop import MessageLoop
from telepot.aio.delegate import per_chat_id, create_open, pave_event_space
except ImportError:
print("Failed to import required modules.")
class RSS(telepot.aio.helper.ChatHandler):
def __init__(self, *args, **kwargs):
super(RSS, self).__init__(*args, **kwargs)
async def date_title(self, file_name, object_name, date_title: str):
"""Set the date/title of latest post from a source.
file_name: File name to open.
Object_name: Name of the object: feed name or twitter screen name.
date_title: Date/title of the object being posted."""
try:
with open(file_name, "r+") as data_file:
# Load json structure into memory.
items = json.load(data_file)
for name, data in items.items():
if ((name) == (object_name)):
# Replace value of date/title with date_title
data["date_title"] = date_title
# Go to the top of feeds.json file.
data_file.seek(0)
# Dump the new json structure to the file.
json.dump(items, data_file, indent=2)
data_file.truncate()
data_file.close()
except IOError:
logger.debug("date_title(): Failed to open requested file.")
async def feed_to_md(self, state, name, feed_data):
"""A Function for converting rss feeds into markdown text.
state: Either `set` or `None`: To execute date_title()
name: Name of RSS feed object: eg: hacker_news
feed_data: Data of the feed: URL and post_date from feeds.json"""
# Parse rss feed.
d = feedparser.parse(feed_data["url"])
# Target the first post.
first_post = d["entries"][0]
title = first_post["title"]
summary = first_post["summary"]
post_date = first_post["published"]
link = first_post["link"]
h = html2text.HTML2Text()
h.ignore_images = True
h.ignore_links = True
summary = h.handle(summary)
if ((state) == ("set")):
logger.debug(f"Running date_title for feeds.json at {datetime.datetime.now()}")
# date_title() see utils.py
await self.date_title("feeds.json", name, title)
results = []
result = {"title": title, "summary": summary,
"url": link, "post_date": post_date}
results.append(result)
# A list containing the dict object result.
return results
async def file_reader(self, path, mode):
"""Loads json data from path specified.
path: Path to target_file.
mode: Mode for file to be opened in."""
try:
with open(path, mode) as target_file:
data = json.load(target_file)
target_file.close()
return data
except IOError:
logger.debug(f"Failed to open {path}")
async def on_chat_message(self, msg):
if msg["text"] == "/start":
logger.start("file_{time}.log", rotation="300 MB")
while True:
logger.debug("Checking Feeds!")
feeds = await self.file_reader("feeds.json", "r")
for name, feed_data in feeds.items():
results = await self.feed_to_md(None, name, feed_data)
# Checking if title is the same as date in feeds.json file.
# If the same, pass; do nothing.
if ((feed_data["date_title"]) == (results[0]["title"])):
pass
elif ((feed_data["date_title"]) != (results[0]["title"])):
results = await self.feed_to_md("set", name, feed_data)
logger.debug(f"Running feed_to_md at {datetime.datetime.now()}")
rss_msg = f"""[{results[0]["title"]}]({results[0]["url"]})\n{results[0]["summary"]}"""
await self.bot.sendMessage(msg["chat"]["id"], rss_msg, parse_mode="Markdown")
# Sleep for 30 mins before re-checking.
logger.debug("Sleeping for 30 mins.")
await asyncio.sleep(1800)
if __name__ == "__main__":
TOKEN = "Insert Key Here."
bot = telepot.aio.DelegatorBot(TOKEN, [
pave_event_space()(
per_chat_id(), create_open, RSS, timeout=10),
])
loop = asyncio.get_event_loop()
loop.create_task(MessageLoop(bot).run_forever())
print('Listening ...')
loop.run_forever()
| 42.936937 | 110 | 0.563785 | 585 | 4,766 | 4.42906 | 0.304274 | 0.055577 | 0.01235 | 0.011579 | 0.056349 | 0.041683 | 0.023157 | 0 | 0 | 0 | 0 | 0.007442 | 0.323332 | 4,766 | 110 | 111 | 43.327273 | 0.795969 | 0.080781 | 0 | 0.064935 | 0 | 0.012987 | 0.13869 | 0.031557 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012987 | false | 0.012987 | 0.077922 | 0 | 0.12987 | 0.025974 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45be769f411fd4437219b4961db9cd7cef98434f | 1,423 | py | Python | Scripts/005_pyo/scripts/tutorial/s042_hilbert_transform.py | OrangePeelFX/Python-Tutorial | 0d47f194553666304765f5bbc928374b7aec8a48 | [
"MIT"
] | null | null | null | Scripts/005_pyo/scripts/tutorial/s042_hilbert_transform.py | OrangePeelFX/Python-Tutorial | 0d47f194553666304765f5bbc928374b7aec8a48 | [
"MIT"
] | 1 | 2021-06-02T00:28:17.000Z | 2021-06-02T00:28:17.000Z | Scripts/005_pyo/scripts/tutorial/s042_hilbert_transform.py | florianwns/python-scripts | 0d47f194553666304765f5bbc928374b7aec8a48 | [
"MIT"
] | 1 | 2020-01-13T11:08:18.000Z | 2020-01-13T11:08:18.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Effet de déphasage à la Barberpole.
Cet exemple utilise deux déphaseurs (basés sur une modulation complexe)
décalant linéairement le contenu fréquentiel d'un son.
Le décalage de fréquence est similaire à la modulation en anneaux, sauf que les
bandes latérales supérieures et inférieures sont séparées en sorties
individuelles.
"""
from pyo import *
from random import random
import os
class ComplexMod:
"""
Complex modulation used to shift the frequency
spectrum of the input sound.
"""
def __init__(self, hilb, freq):
# Quadrature oscillator (sine, cosine).
self._quad = Sine(freq, [0, 0.25])
# real * cosine.
self._mod1 = hilb['real'] * self._quad[1]
# imaginary * sine.
self._mod2 = hilb['imag'] * self._quad[0]
# Up shift corresponds to the sum frequencies.
self._up = (self._mod1 + self._mod2) * 0.7
def out(self, chnl=0):
self._up.out(chnl)
return self
s = Server().boot().start()
# Large spectrum source.
src = PinkNoise(.2)
# Apply the Hilbert transform.
hilb = Hilbert(src)
# LFOs controlling the amount of frequency shifting.
lf1 = Sine(.03, mul=6)
lf2 = Sine(.05, mul=6)
# Stereo Single-Sideband Modulation.
wetl = ComplexMod(hilb, lf1).out()
wetr = ComplexMod(hilb, lf2).out(1)
# Mixed with the dry sound.
dry = src.mix(2).out()
s.gui(locals())
| 25.410714 | 79 | 0.668306 | 200 | 1,423 | 4.69 | 0.625 | 0.025586 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025067 | 0.215039 | 1,423 | 55 | 80 | 25.872727 | 0.814682 | 0.511595 | 0 | 0 | 0 | 0 | 0.012158 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.142857 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45bebfee1fc6c99b8d76516c463bc09140a29f6d | 8,885 | py | Python | odfdo/paragraph_base.py | mat-m/odfdo | a4a509a056517ecf91449e029b36fe9a8ffa8ed0 | [
"Apache-2.0"
] | null | null | null | odfdo/paragraph_base.py | mat-m/odfdo | a4a509a056517ecf91449e029b36fe9a8ffa8ed0 | [
"Apache-2.0"
] | null | null | null | odfdo/paragraph_base.py | mat-m/odfdo | a4a509a056517ecf91449e029b36fe9a8ffa8ed0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Jérôme Dumonteil
# Copyright (c) 2009-2013 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors (odfdo project): jerome.dumonteil@gmail.com
# The odfdo project is a derivative work of the lpod-python project:
# https://github.com/lpod/lpod-python
# Authors: David Versmisse <david.versmisse@itaapy.com>
# Hervé Cauwelier <herve@itaapy.com>
# Romain Gauthier <romain@itaapy.com>
# Jerome Dumonteil <jerome.dumonteil@itaapy.com>
import re
from .element import Element, register_element_class, to_str
from .element import Text
_rsplitter = re.compile('(\n|\t| +)')
_rspace = re.compile('^ +$')
def _get_formatted_text(element, context, with_text=True):
document = context.get('document', None)
rst_mode = context.get('rst_mode', False)
result = []
if with_text:
objects = element.xpath('*|text()')
else:
objects = element.children
for obj in objects:
if isinstance(obj, Text):
result.append(obj)
continue
tag = obj.tag
# Good tags with text
if tag in ('text:a', 'text:p'):
result.append(_get_formatted_text(obj, context, with_text=True))
continue
# Try to convert some styles in rst_mode
if tag == 'text:span':
text = _get_formatted_text(obj, context, with_text=True)
if not rst_mode:
result.append(text)
continue
if not text.strip():
result.append(text)
continue
style = obj.style
if not style:
result.append(text)
continue
if document:
style = document.get_style('text', style)
properties = style.get_properties()
else:
properties = None
if properties is None:
result.append(text)
continue
# Compute before, text and after
before = ''
for c in text:
if c.isspace():
before += c
else:
break
after = ''
for c in reversed(text):
if c.isspace():
after = c + after
else:
break
text = text.strip()
# Bold ?
if properties.get('fo:font-weight') == 'bold':
result.append(before)
result.append('**')
result.append(text)
result.append('**')
result.append(after)
continue
# Italic ?
if properties.get('fo:font-style') == 'italic':
result.append(before)
result.append('*')
result.append(text)
result.append('*')
result.append(after)
continue
# Unknown style, ...
result.append(before)
result.append(text)
result.append(after)
continue
# Footnote or endnote
if tag == 'text:note':
note_class = obj.note_class
container = {
'footnote': context['footnotes'],
'endnote': context['endnotes']
}[note_class]
citation = obj.citation
if not citation:
# Would only happen with hand-made documents
citation = len(container)
body = obj.note_body
container.append((citation, body))
if rst_mode:
marker = {
'footnote': " [#]_ ",
'endnote': " [*]_ "
}[note_class]
else:
marker = {
'footnote': "[{citation}]",
'endnote': "({citation})"
}[note_class]
result.append(marker.format(citation=citation))
continue
# Annotations
if tag == 'office:annotation':
context['annotations'].append(obj.note_body)
if rst_mode:
result.append(' [#]_ ')
else:
result.append('[*]')
continue
# Tabulation
if tag == 'text:tab':
result.append('\t')
continue
# Line break
if tag == 'text:line-break':
if rst_mode:
result.append('\n|')
else:
result.append('\n')
continue
# other cases:
result.append(obj.get_formatted_text(context))
return ''.join(result)
class Spacer(Element):
"""This element shall be used to represent the second and all following “ “
(U+0020, SPACE) characters in a sequence of “ “ (U+0020, SPACE) characters.
Note: It is not an error if the character preceding the element is not a
white space character, but it is good practice to use this element only for
the second and all following SPACE characters in a sequence.
"""
_tag = 'text:s'
_properties = (('number', 'text:c'), )
def __init__(self, number=1, **kwargs):
"""
Arguments:
number -- int
Return: Space
"""
super().__init__(**kwargs)
if self._do_init:
self.number = str(number)
Spacer._define_attribut_property()
class Tab(Element):
"""This element represents the [UNICODE] tab character (HORIZONTAL
TABULATION, U+0009).
The position attribute contains the number of the tab-stop to which
a tab character refers. The position 0 marks the start margin of a
paragraph. Note: The position attribute is only a hint to help non-layout
oriented consumers to determine the tab/tab-stop association. Layout
oriented consumers should determine the tab positions based on the style
information
"""
_tag = 'text:tab'
_properties = (('position', 'text:tab-ref'), )
def __init__(self, position=None, **kwargs):
"""
Arguments:
position -- int
Return: Tab
"""
super().__init__(**kwargs)
if self._do_init:
if position is not None:
if position >= 0:
self.position = str(position)
Tab._define_attribut_property()
class LineBreak(Element):
"""This element represents a line break "text:line-break"
Return: LineBreak
"""
_tag = 'text:line-break'
def __init__(self, **kwargs):
super().__init__(**kwargs)
class ParagraphBase(Element):
"""Base class for Paragraph like classes.
"""
_tag = 'text:p-odfdo-notodf'
_properties = (('style', 'text:style-name'), )
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_formatted_text(self, context=None, simple=False):
if not context:
context = {
'document': None,
'footnotes': [],
'endnotes': [],
'annotations': [],
'rst_mode': False,
'img_counter': 0,
'images': [],
'no_img_level': 0
}
content = _get_formatted_text(self, context, with_text=True)
if simple:
return content
else:
return content + '\n\n'
def append_plain_text(self, text=''):
"""Append plain text to the paragraph, replacing <CR>, <TAB>
and multiple spaces by ODF corresponding tags.
"""
text = to_str(text)
blocs = _rsplitter.split(text)
for b in blocs:
if not b:
continue
if b == '\n':
self.append(LineBreak())
continue
if b == '\t':
self.append(Tab())
continue
if _rspace.match(b):
# follow ODF standard : n spaces => one space + spacer(n-1)
self.append(' ')
self.append(Spacer(len(b) - 1))
continue
# standard piece of text:
self.append(b)
ParagraphBase._define_attribut_property()
register_element_class(Spacer)
register_element_class(Tab)
register_element_class(LineBreak)
register_element_class(ParagraphBase)
| 31.39576 | 79 | 0.538323 | 942 | 8,885 | 4.956476 | 0.279193 | 0.066824 | 0.023988 | 0.016278 | 0.152495 | 0.077104 | 0.077104 | 0.051831 | 0.035554 | 0.035554 | 0 | 0.006149 | 0.35937 | 8,885 | 282 | 80 | 31.507092 | 0.814125 | 0.270906 | 0 | 0.346369 | 0 | 0 | 0.074181 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039106 | false | 0 | 0.01676 | 0 | 0.134078 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45c2d3a01088393fc4acdc16b9de54511df97676 | 17,834 | py | Python | reveries/maya/pipeline.py | davidlatwe/reveries-config | 4a282dd64a32a9b87bd1a070759b6425ff785d68 | [
"MIT"
] | 3 | 2020-04-01T10:51:17.000Z | 2021-08-05T18:35:23.000Z | reveries/maya/pipeline.py | davidlatwe/reveries-config | 4a282dd64a32a9b87bd1a070759b6425ff785d68 | [
"MIT"
] | null | null | null | reveries/maya/pipeline.py | davidlatwe/reveries-config | 4a282dd64a32a9b87bd1a070759b6425ff785d68 | [
"MIT"
] | 1 | 2020-07-05T12:06:30.000Z | 2020-07-05T12:06:30.000Z |
import os
import logging
import avalon.maya
import avalon.io
from collections import defaultdict
from avalon.maya.pipeline import (
AVALON_CONTAINER_ID,
AVALON_CONTAINERS,
containerise,
)
from maya import cmds
from . import lib
from .vendor import sticker
from .capsule import namespaced, nodes_locker
from .. import REVERIES_ICONS, utils
AVALON_GROUP_ATTR = "subsetGroup"
AVALON_CONTAINER_ATTR = "container"
log = logging.getLogger(__name__)
_node_lock_state = {"_": None}
def is_editable():
return _node_lock_state["_"] is None
def reset_edit_lock():
_node_lock_state["_"] = None
def lock_edit():
"""Restrict scene modifications
All nodes will be locked, except:
* default nodes
* startup cameras
* renderLayerManager
"""
all_nodes = set(cmds.ls(objectsOnly=True, long=True))
defaults = set(cmds.ls(defaultNodes=True))
cameras = set(lib.ls_startup_cameras())
materials = set(cmds.ls(materials=True))
nodes_to_lock = list((all_nodes - defaults - cameras).union(materials))
nodes_to_lock.remove("renderLayerManager")
# Save current lock state
_node_lock_state["_"] = lib.acquire_lock_state(nodes_to_lock)
# Lock
lib.lock_nodes(nodes_to_lock)
def unlock_edit():
"""Unleash scene modifications
Restore all nodes' previous lock states
"""
lib.restore_lock_state(_node_lock_state["_"])
reset_edit_lock()
def env_embedded_path(path):
"""Embed environment var `$AVALON_PROJECTS` and `$AVALON_PROJECT` into path
This will ensure reference or cache path resolvable when project root
moves to other place.
"""
path = path.replace(
avalon.api.registered_root(), "$AVALON_PROJECTS", 1
)
path = path.replace(
avalon.Session["AVALON_PROJECT"], "$AVALON_PROJECT", 1
)
return path
def subset_group_name(namespace, name):
return "{}:{}".format(namespace, name)
def container_naming(namespace, name, suffix):
return "%s_%s_%s" % (namespace, name, suffix)
def unique_root_namespace(asset_name, family_name, parent_namespace=""):
unique = avalon.maya.lib.unique_namespace(
asset_name + "_" + family_name + "_",
prefix=parent_namespace + ("_" if asset_name[0].isdigit() else ""),
suffix="_",
)
return ":" + unique # Ensure in root
def get_container_from_namespace(namespace):
"""Return container node from namespace
Raise `RuntimeError` if getting none or more then one container.
Arguments:
namespace (str): Namespace string
Returns a str
"""
nodes = lib.lsAttrs({"id": AVALON_CONTAINER_ID},
# (TODO) This `namespace` should be attribute, not
# node's namespace.
namespace=namespace)
if "*" in namespace:
return nodes
if not nodes:
raise RuntimeError("No matched container, this is a bug.")
if len(nodes) > 1:
cmds.warning("Has more then one matched container, "
"returning first matched.")
return nodes[0]
def iter_containers_from_namespace(namespace):
"""Yield container nodes from namespace
Arguments:
namespace (str): Namespace string
Yield str
"""
for node in lib.lsAttrs({"id": AVALON_CONTAINER_ID,
"namespace": namespace}):
yield node
def get_container_from_group(group):
"""Return container node from subset group
If the `group` is not a subset group node, return `None`.
Args:
group (str): Subset group node name
Return:
str or None
"""
if not cmds.objExists(group):
return None
nodes = list()
for node in cmds.listConnections(group + ".message",
destination=True,
source=False,
type="objectSet") or []:
if not cmds.objExists(node + ".id"):
continue
if cmds.getAttr(node + ".id") == AVALON_CONTAINER_ID:
nodes.append(node)
assert len(nodes) == 1, ("Group node has non or more then one container, "
"this is a bug.")
return nodes[0]
def get_group_from_container(container, long=True):
"""Get top group node name from container node
Arguments:
container (str): Name of container node
"""
try:
group = cmds.listConnections(container + ".subsetGroup",
source=True,
destination=False,
plugs=False)
return cmds.ls(group, long=long)[0]
except ValueError:
# The subset of family 'look' does not have subsetGroup.
return None
except IndexError:
raise Exception("Container '%s' exists but subsetGroup does not, "
"possible dirty scene." % container)
def apply_namespace_wrapper(namespace, nodes):
"""Put nodes into a namespace wrapper objectSet
For nodes that could not or did not have namespace, by putting them
into a special objectSet so those nodes can be found by the tools
which require namespace to work with.
That special objectSet node is the actual member of the namespace,
and it's AvalonID value must be `lib.AVALON_NAMESPACE_WRAPPER_ID`.
Those tools must use `.lib.ls_nodes_by_id` to find nodes, that's the
function has the implementation of reading nodes inside the wrapper.
(NOTE): Nodes that already has namespace will be ignored.
Example use case:
Currently used by XGen Legacy type subsets, since XGen Legacy did
not fully namespace supported.
Args:
namespace (str): Namespace string that will apply to the wrapper
nodes (list): A list of nodes that need to be wrapped
Returns:
(list): A list of wrapper nodes
"""
from .utils import get_id_namespace, id_namespace, upsert_id
id_cache = dict()
wrapper_group = defaultdict(list)
for node in nodes:
# Ignore nodes that already has namespace
if lib.get_ns(node) != ":":
continue
asset_id = get_id_namespace(node)
if asset_id is None:
continue
if asset_id not in id_cache:
id_cache[asset_id] = avalon.io.find_one(
{"_id": avalon.io.ObjectId(asset_id)},
projection={"name": True})["name"]
asset_name = id_cache[asset_id]
wrapper = namespace + ":wrapper_" + asset_name
wrapper_group[(wrapper, asset_id)].append(node)
wrappers = list()
for (wrapper, asset_id), nodes in wrapper_group.items():
if not cmds.objExists(wrapper):
cmds.createNode("objectSet", name=wrapper)
with id_namespace(asset_id):
upsert_id(wrapper, id=lib.AVALON_NAMESPACE_WRAPPER_ID)
cmds.sets(nodes, forceElement=wrapper)
wrappers.append(wrapper)
return wrappers
def container_metadata(container):
"""Get additional data from container node
Arguments:
container (str): Name of container node
Returns:
(dict)
"""
return {}
def parse_container(container):
"""Parse data from container node with additional data
Arguments:
container (str): Name of container node
Returns:
data (dict)
"""
data = avalon.maya.pipeline.parse_container(container)
data.update(container_metadata(container))
return data
def update_container(container, asset, subset, version, representation,
rename_group=True):
"""Update container node attributes' value and namespace
Arguments:
container (dict): container document
asset (dict): asset document
subset (dict): subset document
version (dict): version document
representation (dict): representation document
rename_group (bool): rename group
"""
container_node = container["objectName"]
namespace = container["namespace"]
child_namespace = namespace.rsplit(":", 1)[-1]
# This rely on unique namespace's naming rule
origin_family = child_namespace.rsplit("_", 3)[1]
if subset["schema"] == "avalon-core:subset-3.0":
family = subset["data"]["families"][0]
else:
family = version["data"]["families"][0]
family_name = family.split(".")[-1]
log.info("Updating container '%s'..." % container_node)
# Update namespace
asset_changed = container["assetId"] != str(asset["_id"])
family_changed = origin_family != family_name
if (asset_changed or family_changed):
parent_namespace = namespace.rsplit(":", 1)[0] + ":"
with namespaced(parent_namespace, new=False) as parent_namespace:
parent_namespace = parent_namespace[1:]
asset_name = asset["data"].get("shortName", asset["name"])
new_namespace = unique_root_namespace(asset_name,
family_name,
parent_namespace)
cmds.namespace(parent=":" + parent_namespace,
rename=(child_namespace,
new_namespace[1:].rsplit(":", 1)[-1]))
namespace = new_namespace
# Update data
for key, value in {
"name": subset["name"],
"namespace": namespace,
"assetId": str(asset["_id"]),
"subsetId": str(subset["_id"]),
"versionId": str(version["_id"]),
"representation": str(representation["_id"]),
}.items():
cmds.setAttr(container_node + "." + key, value, type="string")
name = subset["name"]
# Rename group node
if rename_group:
group = get_group_from_container(container_node)
new_name = subset_group_name(namespace, name)
if group and group != new_name and cmds.objExists(group):
group = cmds.rename(group, new_name)
log.info("Subset group renamed to '%s'." % group)
# Rename container
container_node = cmds.rename(
container_node, container_naming(namespace, name, "CON"))
log.info("Container renamed to '%s'." % container_node)
# Rename reference node
members = cmds.sets(container_node, query=True)
reference_node = next(iter(lib.get_reference_nodes(members)), None)
if reference_node:
with nodes_locker(reference_node, False, False, False):
cmds.rename(reference_node, namespace + "RN")
def subset_containerising(name,
namespace,
container_id,
nodes,
context,
cls_name,
group_name):
"""Containerise loaded subset
Containerizing imported/referenced nodes and connect subset group
node's `message` attribute to container node.
Arguments:
name (str): Name of resulting assembly
namespace (str): Namespace under which to host container
container_id (str): Container UUID
nodes (list): Long names of imported/referenced nodes
context (dict): Asset information
cls_name (str): avalon Loader class name
group_name (str): Top group node of imported/referenced new nodes
"""
container = containerise(name=name,
namespace=namespace,
nodes=nodes,
context=context,
loader=cls_name)
# Add additional data
for key, value in {
"containerId": container_id,
"assetId": str(context["asset"]["_id"]),
"subsetId": str(context["subset"]["_id"]),
"versionId": str(context["version"]["_id"]),
}.items():
cmds.addAttr(container, longName=key, dataType="string")
cmds.setAttr(container + "." + key, value, type="string")
# Connect subset group
if group_name and cmds.objExists(group_name):
lib.connect_message(group_name, container, AVALON_GROUP_ATTR)
# Put icon to main container
main_container = cmds.ls(AVALON_CONTAINERS, type="objectSet")[0]
_icon = os.path.join(REVERIES_ICONS, "container_main-01.png")
sticker.put(main_container, _icon)
# Apply icons
container_icon = os.path.join(REVERIES_ICONS, "container-01.png")
sticker.put(container, container_icon)
if cmds.objExists(group_name):
package_icon = os.path.join(REVERIES_ICONS, "package-01.png")
sticker.put(group_name, package_icon)
return parse_container(container)
def put_instance_icon(instance):
instance_icon = os.path.join(REVERIES_ICONS, "instance-01.png")
sticker.put(instance, instance_icon)
return instance
def find_stray_textures(nodes=lib._no_val):
"""Find file nodes which pointing files that were not in published space
"""
stray = list()
containers = lib.lsAttr("id", AVALON_CONTAINER_ID)
args = (nodes, ) if nodes is not lib._no_val else ()
for file_node in cmds.ls(*args, type="file"):
# Not in published space
file_path = cmds.getAttr(file_node + ".fileTextureName")
if file_path and not lib.is_versioned_texture_path(file_path):
stray.append(file_node)
continue
# OR
# Not containerized
sets = cmds.listSets(object=file_node) or []
if not any(s in containers for s in sets):
stray.append(file_node)
return stray
_uuid_required_node_types = {
"reveries.model": {
"transform",
},
"reveries.rig": {
"transform",
},
"reveries.look": {
"transform",
# (TODO): Map shaders with shadingEngine id
"shadingEngine",
# "shadingDependNode",
# "THdependNode",
"uvChooser",
},
"reveries.setdress": {
"transform",
},
"reveries.camera": {
"transform",
"camera",
},
"reveries.lightset": {
"transform",
"light",
"locator",
},
"reveries.xgen": {
"transform",
# Listed from cmds.listNodeTypes("xgen/spline")
# "xgmCurveToSpline",
"xgmModifierClump",
"xgmModifierCollision",
"xgmModifierCut",
"xgmModifierDisplacement",
"xgmModifierGuide",
"xgmModifierLinearWire",
"xgmModifierNoise",
"xgmModifierScale",
"xgmModifierSculpt",
"xgmSeExpr",
"xgmSplineBase",
"xgmSplineCache",
"xgmSplineDescription",
"xgmPalette",
"xgmDescription",
},
}
def uuid_required_node_types(family):
try:
types = _uuid_required_node_types[family]
except KeyError:
if family == "reveries.mayashare":
types = set()
for typ in _uuid_required_node_types.values():
types.update(typ)
else:
raise
return types
def has_turntable():
"""Return turntable asset name if scene has loaded one
Returns:
str: turntable asset name, if scene has truntable asset loaded,
else `None`
"""
project = avalon.io.find_one({"type": "project"},
{"data.pipeline.maya": True})
turntable = project["data"]["pipeline"]["maya"].get("turntable")
if turntable is None:
return None
if get_container_from_namespace(":{}_*".format(turntable)):
return turntable
_current_fps = {"_": None}
def set_scene_timeline(project=None, asset_name=None, strict=True):
"""Set timeline to correct frame range for the asset
Args:
project (dict, optional): Project document, query from database if
not provided.
asset_name (str, optional): Asset name, get from `avalon.Session` if
not provided.
strict (bool, optional): Whether or not to set the exactly frame range
that pre-defined for asset, or leave the scene start/end untouched
as long as the start/end frame could cover the pre-defined range.
Default `True`.
"""
log.info("Timeline setting...")
current_fps = _current_fps["_"] or lib.current_fps()
_current_fps["_"] = None
start_frame, end_frame, fps = utils.compose_timeline_data(project,
asset_name,
current_fps)
fps = lib.FPS_MAP.get(fps)
if fps is None:
raise ValueError("Unsupported FPS value: {}".format(fps))
cmds.currentUnit(time=fps)
if not strict:
scene_start = cmds.playbackOptions(query=True, minTime=True)
if start_frame < scene_start:
cmds.playbackOptions(animationStartTime=start_frame)
cmds.playbackOptions(minTime=start_frame)
scene_end = cmds.playbackOptions(query=True, maxTime=True)
if end_frame > scene_end:
cmds.playbackOptions(animationEndTime=end_frame)
cmds.playbackOptions(maxTime=end_frame)
else:
cmds.playbackOptions(animationStartTime=start_frame)
cmds.playbackOptions(minTime=start_frame)
cmds.playbackOptions(animationEndTime=end_frame)
cmds.playbackOptions(maxTime=end_frame)
cmds.currentTime(start_frame)
def set_resolution(project=None, asset_name=None):
width, height = utils.get_resolution_data(project, asset_name)
cmds.setAttr("defaultResolution.width", width)
cmds.setAttr("defaultResolution.height", height)
def set_linear_unit(project=None, asset_name=None):
unit = utils.get_linear_unit_data(project, asset_name)
cmds.currentUnit(linear=unit)
| 29.236066 | 79 | 0.618313 | 1,993 | 17,834 | 5.364777 | 0.196187 | 0.015152 | 0.00795 | 0.007108 | 0.151048 | 0.078002 | 0.06229 | 0.055556 | 0.050505 | 0.040591 | 0 | 0.002499 | 0.282046 | 17,834 | 609 | 80 | 29.284072 | 0.832552 | 0.232533 | 0 | 0.114907 | 0 | 0 | 0.111094 | 0.010148 | 0 | 0 | 0 | 0.003284 | 0.003106 | 1 | 0.074534 | false | 0 | 0.037267 | 0.009317 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45c440eae6695f819733ad7666e871c73dcc0582 | 2,697 | py | Python | app.py | kmr2/vefferk5 | 5fe8604781ed65f62a6be8e71381296693215343 | [
"MIT"
] | null | null | null | app.py | kmr2/vefferk5 | 5fe8604781ed65f62a6be8e71381296693215343 | [
"MIT"
] | null | null | null | app.py | kmr2/vefferk5 | 5fe8604781ed65f62a6be8e71381296693215343 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, session, redirect, url_for
import pyrebase
app = Flask(__name__)
app.config['SECRET_KEY'] = 'covid_19'
config = {
"apiKey": "AIzaSyB6L9PLOIa-y8spupA2UFiegQGN7gmp12E",
"authDomain": "vefferk5.firebaseapp.com",
"databaseURL": "https://vefferk5.firebaseio.com",
"projectId": "vefferk5",
"storageBucket": "vefferk5.appspot.com",
"messagingSenderId": "683227377425",
"appId": "1:683227377425:web:1317e8ebac70100ee126b0",
"measurementId": "G-0BBSXPYZF8"
}
fb = pyrebase.initialize_app(config)
db = fb.database()
# Test route til að setja gögn í db
@app.route('/')
def index():
return render_template("index.html")
# Test route til að sækja öll gögn úr db
@app.route('/login', methods=['GET', 'POST'])
def login():
login = False
if request.method == 'POST':
notendanafn = request.form['uname']
lykilorð = request.form['psw']
u = db.child("notandi").get().val()
lst = list(u.items())
for i in lst:
if notendanafn == i[1]['notendanafn'] and lykilorð == i[1]['lykilorð']:
login = True
break
if login:
session['logged_in'] = notendanafn
return redirect("/topsecret")
else:
return render_template("nologin.html")
else:
return render_template("no_method.html")
@app.route('/register')
def register():
return render_template('register.html')
# Test route til að sækja öll gögn úr db
@app.route('/doregister', methods=['GET', 'POST'])
def doregister():
usernames = []
if request.method == 'POST':
notendanafn = request.form['uname']
lykilorð = request.form['psw']
u = db.child("notandi").get().val()
lst = list(u.items())
for i in lst:
usernames.append(i[1]['notendanafn'])
if notendanafn not in usernames:
db.child("notandi").push({"notendanafn": notendanafn, "lykilorð": lykilorð})
return render_template("registered.html")
else:
return render_template("userexists.html")
@app.route('/logout')
def logout():
session.pop("logged_in", None)
return render_template("index.html")
@app.route('/topsecret')
def topsecret():
if 'logged_in' in session:
return render_template("topsecret.html")
else:
return redirect("/")
if __name__ == "__main__":
app.run(debug=True)
# skrifum nýjan í grunn hnútur sem heitir notandi
# db.child("notandi").push({"notendanafn":"dsg", "lykilorð":1234})
# # förum í grunn og sækjum allar raðir ( öll gögn )
# u = db.child("notandi").get().val()
# lst = list(u.items())
| 26.70297 | 88 | 0.622914 | 314 | 2,697 | 5.257962 | 0.363057 | 0.076317 | 0.096911 | 0.025439 | 0.307692 | 0.205936 | 0.205936 | 0.205936 | 0.205936 | 0.205936 | 0 | 0.029117 | 0.223211 | 2,697 | 100 | 89 | 26.97 | 0.75895 | 0.123841 | 0 | 0.264706 | 0 | 0 | 0.250744 | 0.044199 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.029412 | 0.029412 | 0.264706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45c45593e29f001e140d37954419e1ef43c370f9 | 2,511 | py | Python | pax/utils.py | jacr20/pax | d64d0ae4e4ec3e9bb3e61065ed92e9ea23328940 | [
"BSD-3-Clause"
] | 17 | 2016-04-24T12:02:03.000Z | 2021-07-19T19:39:47.000Z | pax/utils.py | jacr20/pax | d64d0ae4e4ec3e9bb3e61065ed92e9ea23328940 | [
"BSD-3-Clause"
] | 300 | 2016-04-01T15:29:57.000Z | 2021-01-03T23:59:45.000Z | pax/utils.py | jacr20/pax | d64d0ae4e4ec3e9bb3e61065ed92e9ea23328940 | [
"BSD-3-Clause"
] | 20 | 2016-04-14T15:11:26.000Z | 2021-09-18T06:39:09.000Z | """Helper routines needed in pax
Please only put stuff here that you *really* can't find any other place for!
e.g. a list clustering routine that isn't in some standard, library but several plugins depend on it
"""
import re
import sys
import inspect
import random
import string
import logging
import time
import os
import glob
log = logging.getLogger('pax_utils')
##
# Utilities for finding files inside pax.
##
# Store the directory of pax (i.e. this file's directory) as PAX_DIR
PAX_DIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
def data_file_name(filename):
"""Returns filename if a file exists there, else returns PAX_DIR/data/filename"""
if os.path.exists(filename):
return filename
new_filename = os.path.join(PAX_DIR, 'data', filename)
if os.path.exists(new_filename):
return new_filename
else:
raise ValueError('File name or path %s not found!' % filename)
def get_named_configuration_options():
""" Return the names of all working named configurations
"""
config_files = []
for filename in glob.glob(os.path.join(PAX_DIR, 'config', '*.ini')):
filename = os.path.basename(filename)
m = re.match(r'(\w+)\.ini', filename)
if m is None:
print("Weird file in config dir: %s" % filename)
filename = m.group(1)
# Config files starting with '_' won't appear in the usage list (they won't work by themselves)
if filename[0] == '_':
continue
config_files.append(filename)
return config_files
# Caching decorator
# Stolen from http://avinashv.net/2008/04/python-decorators-syntactic-sugar/
class Memoize:
def __init__(self, function):
self.function = function
self.memoized = {}
def __call__(self, *args):
try:
return self.memoized[args]
except KeyError:
self.memoized[args] = self.function(*args)
return self.memoized[args]
class Timer:
"""Simple stopwatch timer
punch() returns ms since timer creation or last punch
"""
last_t = 0
def __init__(self):
self.punch()
def punch(self):
now = time.time()
result = (now - self.last_t) * 1000
self.last_t = now
return result
def randomstring(n):
return ''.join(random.choice(string.ascii_letters) for _ in range(n))
def refresh_status_line(text):
sys.stdout.write('\r')
sys.stdout.write(text)
sys.stdout.flush()
| 26.15625 | 103 | 0.657905 | 346 | 2,511 | 4.66474 | 0.479769 | 0.026022 | 0.02974 | 0.022305 | 0.055762 | 0.039653 | 0.039653 | 0.039653 | 0 | 0 | 0 | 0.006767 | 0.234966 | 2,511 | 95 | 104 | 26.431579 | 0.83342 | 0.284349 | 0 | 0.035714 | 0 | 0 | 0.054639 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.160714 | 0.017857 | 0.482143 | 0.017857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45c8bb81a03ef5a3614a21fdea7815af182bb4cc | 674 | py | Python | tock/session/memory.py | elebescond/tock-py | 2addcaa671be4a7af7cdf9bc061c707bbcf7128a | [
"MIT"
] | 4 | 2020-09-05T10:08:34.000Z | 2021-10-05T05:38:59.000Z | tock/session/memory.py | elebescond/tock-py | 2addcaa671be4a7af7cdf9bc061c707bbcf7128a | [
"MIT"
] | 16 | 2020-09-03T14:13:24.000Z | 2021-03-22T09:54:08.000Z | tock/session/memory.py | elebescond/tock-py | 2addcaa671be4a7af7cdf9bc061c707bbcf7128a | [
"MIT"
] | 3 | 2020-09-15T09:04:06.000Z | 2021-03-04T12:40:27.000Z | # -*- coding: utf-8 -*-
from typing import List
from tock.session.storage import Storage
from tock.session.session import Session
from tock.models import UserId
class MemoryStorage(Storage):
def __init__(self):
self.__sessions: List[Session] = []
def get_session(self, user_id: UserId) -> Session:
for session in self.__sessions:
if session.user_id == user_id:
return session
return Session(user_id)
def save(self, session: Session):
for item in self.__sessions:
if item.user_id == session.user_id:
self.__sessions.remove(item)
self.__sessions.append(session)
| 26.96 | 54 | 0.648368 | 84 | 674 | 4.952381 | 0.345238 | 0.086538 | 0.09375 | 0.076923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002004 | 0.259644 | 674 | 24 | 55 | 28.083333 | 0.831663 | 0.031157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0.235294 | 0 | 0.588235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45c8f3a70c973964d6c049db2cb9fbfddac2a6fa | 2,607 | py | Python | tests/tests.py | hamedrb/pystripe | 8ffd6f64f9074562d2c8b293b57cc795bfdcc196 | [
"MIT"
] | null | null | null | tests/tests.py | hamedrb/pystripe | 8ffd6f64f9074562d2c8b293b57cc795bfdcc196 | [
"MIT"
] | null | null | null | tests/tests.py | hamedrb/pystripe | 8ffd6f64f9074562d2c8b293b57cc795bfdcc196 | [
"MIT"
] | null | null | null | import numpy as np
import unittest
import os
import tempfile
# import matplotlib.pyplot as plt
from pystripe import core
class TestWavedec(unittest.TestCase):
def test(self):
img = np.eye(5)
coeffs = core.wavedec(img, wavelet='db1', level=None)
approx = coeffs[0]
self.assertEqual(len(coeffs), 3)
self.assertTrue(np.allclose(approx, np.array([[1, 0], [0, 4]])))
class TestWaverec(unittest.TestCase):
def test(self):
img = np.eye(6)
wavelet = 'db1'
coeffs = core.wavedec(img, wavelet=wavelet, level=None)
recon = core.waverec(coeffs, wavelet=wavelet)
self.assertTrue(np.allclose(img, recon))
# def plot_fft(data, fdata):
# plt.subplot(121)
# plt.imshow(data)
# plt.subplot(122)
# plt.imshow(np.sqrt(np.real(fdata) ** 2 + np.imag(fdata) ** 2))
# plt.show()
class TestFFT(unittest.TestCase):
def setUp(self):
self.data = np.zeros((64, 64))
self.data[12, :] = 10 # thin horizontal stripe, should show up as high frequency vertical component
def test_shift(self):
fdata = core.fft(self.data)
self.assertAlmostEqual(fdata[44, 32], 640.0)
def test_noshift(self):
fdata = core.fft(self.data, shift=False)
self.assertAlmostEqual(fdata[12, 0], 640.0)
class TestFFT2(unittest.TestCase):
def setUp(self):
self.data = np.zeros((64, 64))
self.data[12, :] = 10 # thin horizontal stripe, should show up as high frequency vertical component
def test_shift(self):
fdata = core.fft2(self.data)
self.assertAlmostEqual(fdata[44, 32], np.complex(0, -640.0))
class TestNotch(unittest.TestCase):
def test(self):
g = core.notch(n=4, sigma=1)
self.assertTrue(np.allclose(g, np.array([0, 0.39346934, 0.86466472, 0.988891])))
g = core.notch(n=4, sigma=2)
self.assertTrue(np.allclose(g, np.array([0, 0.1175031, 0.39346934, 0.67534753])))
def test_zero(self):
with self.assertRaises(ValueError):
g = core.notch(n=4, sigma=0.0)
with self.assertRaises(ValueError):
g = core.notch(n=0, sigma=1.0)
class TestGaussianFilter(unittest.TestCase):
def test(self):
m = 10
res = core.gaussian_filter(shape=(m, 4), sigma=1)
self.assertTrue(np.allclose(res, np.array(m*[[0, 0.39346934, 0.86466472, 0.988891]])))
res = core.gaussian_filter(shape=(m, 4), sigma=2)
self.assertTrue(np.allclose(res, np.array(m*[[0, 0.1175031, 0.39346934, 0.67534753]])))
if __name__ == '__main__':
unittest.main() | 31.409639 | 108 | 0.628308 | 366 | 2,607 | 4.434426 | 0.281421 | 0.034504 | 0.07024 | 0.088725 | 0.621688 | 0.555145 | 0.521257 | 0.40419 | 0.269871 | 0.227973 | 0 | 0.089832 | 0.222862 | 2,607 | 83 | 109 | 31.409639 | 0.711254 | 0.136172 | 0 | 0.25 | 0 | 0 | 0.006242 | 0 | 0 | 0 | 0 | 0 | 0.214286 | 1 | 0.178571 | false | 0 | 0.089286 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45cd8cd79b7985a77b217cfc932114de4167b993 | 8,854 | py | Python | third_party/lid_adversarial_subspace_detection/adaptive_attacks.py | ptrcarta/neural-fingerprinting | 01fa8cb592f6fa7497c6884861adf7680ffa7f29 | [
"BSD-3-Clause"
] | 29 | 2018-03-10T04:33:25.000Z | 2022-03-18T13:03:37.000Z | third_party/lid_adversarial_subspace_detection/adaptive_attacks.py | ptrcarta/neural-fingerprinting | 01fa8cb592f6fa7497c6884861adf7680ffa7f29 | [
"BSD-3-Clause"
] | 2 | 2019-07-22T20:59:01.000Z | 2019-11-17T07:00:00.000Z | third_party/lid_adversarial_subspace_detection/adaptive_attacks.py | StephanZheng/neural-fingerprinting | 57e93e487ef324427456b14d1d81bc9e08483d27 | [
"BSD-3-Clause"
] | 20 | 2018-03-14T14:01:55.000Z | 2021-09-17T19:19:56.000Z | from __future__ import absolute_import
from __future__ import print_function
import copy
from collections import defaultdict
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from six.moves import xrange
import sys
sys.path.append('../../.')
from cleverhans.utils import other_classes
from cleverhans.utils_tf import model_argmax
from cleverhans.evaluation import batch_eval
from cleverhans.attacks_tf import (jacobian_graph, jacobian,
apply_perturbations, saliency_map)
import keras.backend as K
import os
import pickle
def adaptive_fgsm(x, predictions, eps, clip_min=None, clip_max=None,
log_dir=None, y=None, model_logits = None,
alpha = None, dataset=None
):
"""
Computes symbolic TF tensor for the adversarial samples. This must
be evaluated with a session.run call.
:param x: the input placeholder
:param predictions: the model's output tensor
:param eps: the epsilon (input variation parameter)
:param clip_min: optional parameter that can be used to set a minimum
value for components of the example returned
:param clip_max: optional parameter that can be used to set a maximum
value for components of the example returned
:param y: the output placeholder. Use None (the default) to avoid the
label leaking effect.
:return: a tensor for the adversarial example
"""
# Compute loss]
logits, = predictions.op.inputs
fingerprint_dir = log_dir
fixed_dxs = pickle.load(open(os.path.join(fingerprint_dir, "fp_inputs_dx.pkl"), "rb"))
fixed_dys = pickle.load(open(os.path.join(fingerprint_dir, "fp_outputs.pkl"), "rb"))
if y is None:
# In this case, use model predictions as ground truth
y = tf.to_float(
tf.equal(predictions,
tf.reduce_max(predictions, 1, keep_dims=True)))
output = logits
pred_class = tf.argmax(y,axis=1)
loss_fp = 0
[a,b,c] = np.shape(fixed_dys)
num_dx = b
target_dys = tf.convert_to_tensor(fixed_dys)
target_dys = (tf.gather(target_dys,pred_class))
norms = tf.sqrt(tf.reduce_sum(tf.square(output), axis=1, keep_dims=True))
norm_logits = output/norms
for i in range(num_dx):
logits_p = model_logits(x + fixed_dxs[i])
p_norm = tf.sqrt(tf.reduce_sum(tf.square(logits_p), axis=1, keep_dims=True))
logits_p_norm = logits_p/p_norm
loss_fp = loss_fp + tf.losses.mean_squared_error((logits_p_norm - norm_logits),target_dys[:,i,:])
#self appropriate fingerprint
y = y / tf.reduce_sum(y, 1, keep_dims=True)
loss_ce = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)
)
## Tune this alpha!!
loss = loss_ce - alpha*loss_fp
# Define gradient of loss wrt input
grad, = tf.gradients(loss, x)
# Take sign of gradient
signed_grad = tf.sign(grad)
# Multiply by constant epsilon
scaled_signed_grad = eps * signed_grad
# Add perturbation to original example to obtain adversarial example
adv_x = tf.stop_gradient(x + scaled_signed_grad)
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) and (clip_max is not None):
adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
return adv_x
def adaptive_fast_gradient_sign_method(sess, model, X, Y, eps, clip_min=None,
clip_max=None, batch_size=256, log_dir = None,
model_logits = None, binary_steps = 2,
dataset="cifar"):
"""
TODO
:param sess:
:param model: predictions or after-softmax
:param X:
:param Y:
:param eps:
:param clip_min:
:param clip_max:
:param batch_size:
:return:
"""
# Define TF placeholders for the input and output
x = tf.placeholder(tf.float32, shape=(None,) + X.shape[1:])
y = tf.placeholder(tf.float32, shape=(None,) + Y.shape[1:])
alpha = tf.placeholder(tf.float32, shape=(None,) + (1,))
num_samples = np.shape(X)[0]
ALPHA = 0.1*np.ones((num_samples,1))
ub = 10.0*np.ones(num_samples)
lb = 0.0*np.ones(num_samples)
Best_X_adv = None
for i in range(binary_steps):
adv_x = adaptive_fgsm(
x, model(x), eps=eps,
clip_min=clip_min,
clip_max=clip_max, y=y,
log_dir= log_dir,
model_logits = model_logits,
alpha = alpha
)
X_adv = batch_eval(
sess, [x, y, alpha], [adv_x],
[X, Y, ALPHA], feed={},
args={'batch_size': batch_size}
)
X_adv = np.array(X_adv[0])
if(i==0):
Best_X_adv = X_adv
ALPHA, Best_X_adv = binary_refinement(sess,Best_X_adv,
X_adv, Y, ALPHA, ub, lb, model, dataset)
return Best_X_adv
def binary_refinement(sess,Best_X_adv,
X_adv, Y, ALPHA, ub, lb, model, dataset='cifar'):
num_samples = np.shape(X_adv)[0]
print(dataset)
if(dataset=="mnist"):
X_place = tf.placeholder(tf.float32, shape=[1, 1, 28, 28])
else:
X_place = tf.placeholder(tf.float32, shape=[1, 3, 32, 32])
pred = model(X_place)
for i in range(num_samples):
logits_op = sess.run(pred,feed_dict={X_place:X_adv[i:i+1,:,:,:]})
if(not np.argmax(logits_op) == np.argmax(Y[i,:])):
# Success, increase alpha
Best_X_adv[i,:,:,:] = X_adv[i,:,:,]
lb[i] = ALPHA[i,0]
else:
ub[i] = ALPHA[i,0]
ALPHA[i] = 0.5*(lb[i] + ub[i])
return ALPHA, Best_X_adv
def adaptive_basic_iterative_method(sess, model, X, Y, eps, eps_iter, nb_iter=50,
clip_min=None, clip_max=None, batch_size=256,
log_dir = None, model_logits = None,
binary_steps =2, attack_type = "bim-b",
dataset="cifar"):
"""
TODO
:param sess:
:param model: predictions or after-softmax
:param X:
:param Y:
:param eps:
:param eps_iter:
:param nb_iter:
:param clip_min:
:param clip_max:
:param batch_size:
:return:
"""
print("nb_iter",nb_iter)
# Define TF placeholders for the input and output
x = tf.placeholder(tf.float32, shape=(None,)+X.shape[1:])
y = tf.placeholder(tf.float32, shape=(None,)+Y.shape[1:])
alpha = tf.placeholder(tf.float32, shape=(None,) + (1,))
num_samples = np.shape(X)[0]
ALPHA = 0.1*np.ones((num_samples,1))
ub = 10.0*np.ones(num_samples)
lb = 0.0*np.ones(num_samples)
Best_X_adv = None
results = np.zeros((nb_iter, X.shape[0],) + X.shape[1:])
# Initialize adversarial samples as the original samples, set upper and
# lower bounds
X_adv = X
X_min = X_adv - eps
X_max = X_adv + eps
print('Running BIM iterations...')
# "its" is a dictionary that keeps track of the iteration at which each
# sample becomes misclassified. The default value will be (nb_iter-1), the
# very last iteration.
def f(val):
return lambda: val
its = defaultdict(f(nb_iter-1))
# Out keeps track of which samples have already been misclassified
out = set()
for j in range(binary_steps):
for i in tqdm(range(nb_iter)):
adv_x = adaptive_fgsm(
x, model(x), eps=eps_iter,
clip_min=clip_min, clip_max=clip_max, y=y,
log_dir= log_dir,
model_logits = model_logits,
alpha = alpha
)
X_adv, = batch_eval(
sess, [x, y, alpha], [adv_x],
[X_adv, Y, ALPHA], feed={K.learning_phase(): 0},
args={'batch_size': batch_size}
)
X_adv = np.maximum(np.minimum(X_adv, X_max), X_min)
results[i] = X_adv
# check misclassifieds
predictions = model.predict_classes(X_adv, batch_size=512, verbose=0)
misclassifieds = np.where(predictions != Y.argmax(axis=1))[0]
for elt in misclassifieds:
if elt not in out:
its[elt] = i
out.add(elt)
print(i)
X_adv = results[-1]
if(j==0):
Best_X_adv = X_adv
ALPHA, Best_X_adv = binary_refinement(sess,Best_X_adv,
X_adv, Y, ALPHA, ub, lb, model, dataset)
return Best_X_adv
| 35.99187 | 106 | 0.583465 | 1,228 | 8,854 | 4.010586 | 0.219055 | 0.027614 | 0.021117 | 0.035736 | 0.39797 | 0.381726 | 0.374213 | 0.358376 | 0.315736 | 0.273096 | 0 | 0.014595 | 0.311272 | 8,854 | 245 | 107 | 36.138776 | 0.793047 | 0.200474 | 0 | 0.253165 | 0 | 0 | 0.017758 | 0 | 0 | 0 | 0 | 0.008163 | 0 | 1 | 0.031646 | false | 0 | 0.101266 | 0.006329 | 0.164557 | 0.050633 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45cdb841e19bbd7ee0c6ca21d0ef0accbb5b6f9d | 2,009 | py | Python | pykit/ir/pretty.py | ContinuumIO/pyk | 1730d7b831e0cf12a641ac23b5cf03e17e0dc550 | [
"BSD-3-Clause"
] | 9 | 2015-06-23T00:13:49.000Z | 2022-02-23T02:46:43.000Z | pykit/ir/pretty.py | ContinuumIO/pyk | 1730d7b831e0cf12a641ac23b5cf03e17e0dc550 | [
"BSD-3-Clause"
] | 1 | 2017-08-30T08:13:12.000Z | 2017-08-31T06:36:32.000Z | pykit/ir/pretty.py | ContinuumIO/pyk | 1730d7b831e0cf12a641ac23b5cf03e17e0dc550 | [
"BSD-3-Clause"
] | 7 | 2015-05-08T10:17:47.000Z | 2021-04-01T15:00:57.000Z | # -*- coding: utf-8 -*-
"""
Pretty print pykit IR.
"""
from __future__ import print_function, division, absolute_import
from pykit.utils import hashable
prefix = lambda s: '%' + s
indent = lambda s: '\n'.join(' ' + s for s in s.splitlines())
ejoin = "".join
sjoin = " ".join
ajoin = ", ".join
njoin = "\n".join
parens = lambda s: '(' + s + ')'
compose = lambda f, g: lambda x: f(g(x))
def pretty(value):
formatter = formatters[type(value).__name__]
return formatter(value)
def fmod(mod):
gs, fs = mod.globals.values(), mod.functions.values()
return njoin([njoin(map(pretty, gs)), "", njoin(map(pretty, fs))])
def ffunc(f):
restype = ftype(f.type.restype)
types, names = map(ftype, f.type.argtypes), map(prefix, f.argnames)
args = ajoin(map(sjoin, zip(types, names)))
header = sjoin(["function", restype, f.name + parens(args)])
return njoin([header + " {", njoin(map(fblock, f.blocks)), "}"])
def farg(func_arg):
return "%" + func_arg.result
def fblock(block):
body = njoin(map(compose(indent, fop), block))
return njoin([block.name + ':', body, ''])
def _farg(oparg):
from pykit import ir
if isinstance(oparg, ir.Function):
return oparg.name
else:
return str(oparg)
def fop(op):
return '%{0} = ({1}) {2}({3})'.format(op.result, ftype(op.type), op.opcode,
ajoin(map(prefix, map(_farg, op.operands))))
def fconst(c):
return 'const(%s, %s)' % (ftype(c.type), c.const)
def fglobal(val):
return "global %{0} = {1}".format(val.name, ftype(val.type))
def fundef(val):
return 'Undef'
def ftype(val):
from pykit import types
if hashable(val) and val in types.type2name:
return types.type2name[val]
return str(val)
formatters = {
'Module': fmod,
'GlobalValue': fglobal,
'Function': ffunc,
'FuncArg': farg,
'Block': fblock,
'Operation': fop,
'Constant': fconst,
'Undef': fundef,
} | 25.75641 | 86 | 0.594823 | 265 | 2,009 | 4.456604 | 0.358491 | 0.027096 | 0.013548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005818 | 0.229965 | 2,009 | 78 | 87 | 25.75641 | 0.757595 | 0.022399 | 0 | 0 | 0 | 0 | 0.07256 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.192982 | false | 0 | 0.070175 | 0.087719 | 0.491228 | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45cea41fbca701da0133eaa849e6902c4fa1ad42 | 9,059 | py | Python | parser.py | JonahSussman/closure-language | 9cbe9e381e81dc645c8e82096366fd50a60a6d32 | [
"MIT"
] | null | null | null | parser.py | JonahSussman/closure-language | 9cbe9e381e81dc645c8e82096366fd50a60a6d32 | [
"MIT"
] | null | null | null | parser.py | JonahSussman/closure-language | 9cbe9e381e81dc645c8e82096366fd50a60a6d32 | [
"MIT"
] | null | null | null | from expr import Expr
from stmt import Stmt
class Parser:
class ParserError(Exception):
pass
def __init__(self, tokens):
self.tokens = tokens
self.c_tok = 0
def match(self, *kinds):
if self.c_tok == len(self.tokens):
return False
for kind in kinds:
if kind == self.tokens[self.c_tok].kind:
return True
return False
def error(self, token):
print('Parser Error! Invalid token: %s' % (token))
raise Parser.ParserError
def declaration(self):
try:
if self.match('FUNCTION'):
self.c_tok += 1
return self.function('function')
elif self.match('LET'):
self.c_tok += 1
return self.var_declaration()
else:
return self.statement()
except Parser.ParserError:
print(self.tokens)
exit()
def statement(self):
if self.match('IF'):
self.c_tok += 1
return self.if_statement()
elif self.match('WHILE'):
self.c_tok += 1
return self.while_statement()
elif self.match('PRINT'):
self.c_tok += 1
return self.print_statement()
elif self.match('RETURN'):
self.c_tok += 1
return self.return_statment()
elif self.match('L_BRACE'):
self.c_tok += 1
return Stmt.Block(self.block())
return self.expression_statment()
def block(self):
statements = []
while not self.match('R_BRACE') and self.c_tok < len(self.tokens):
statements.append(self.declaration())
self.c_tok += 1
return statements
def function(self, like):
if not self.match('ID'):
raise Parser.ParserError
name = self.tokens[self.c_tok].value
self.c_tok += 1
if not self.match('L_PAREN'):
print('Expected \'(\' after function name')
raise Parser.ParserError
self.c_tok += 1
params = []
if not self.match('R_PAREN'):
while True:
if not self.match('ID'):
print('Expected identifier in parameters.')
raise Parser.ParserError
params.append(self.tokens[self.c_tok])
self.c_tok += 1
if not self.match('COMMA'):
break
self.c_tok += 1
if not self.match('R_PAREN'):
print('Expected \')\' after function params')
raise Parser.ParserError
self.c_tok += 1
if not self.match('L_BRACE'):
print('Expected \'{\' before body')
raise Parser.ParserError
self.c_tok += 1
body = self.block()
return Stmt.Fn(name, params, body)
def print_statement(self):
value = self.expression()
if not self.match('ENDLINE'):
raise Parser.ParserError
self.c_tok += 1
return Stmt.Print(value)
def return_statment(self):
value = None
if not self.match('ENDLINE'):
value = self.expression()
if not self.match('ENDLINE'):
print('\\n must follow return value')
raise Parser.ParserError
self.c_tok += 1
return Stmt.Return('return', value)
def if_statement(self):
if not self.match('L_PAREN'):
raise Parser.ParserError
self.c_tok += 1
expression = self.expression()
if not self.match('R_PAREN'):
raise Parser.ParserError
self.c_tok += 1
then_branch = self.statement()
else_branch = None
if self.match('ELSE'):
self.c_tok += 1
else_branch = self.statement()
return Stmt.If(expression, then_branch, else_branch)
def while_statement(self):
if not self.match('L_PAREN'):
raise Parser.ParserError
self.c_tok += 1
expression = self.expression()
if not self.match('R_PAREN'):
raise Parser.ParserError
self.c_tok += 1
body = self.statement()
return Stmt.While(expression, body)
def expression_statment(self):
value = self.expression()
if not self.match('ENDLINE'):
raise Parser.ParserError
self.c_tok += 1
return Stmt.Expression(value)
def var_declaration(self):
if not self.match('ID'):
raise Parser.ParserError
name = self.tokens[self.c_tok].value
self.c_tok += 1
initalizer = None
if self.match('EQUAL'):
self.c_tok += 1
initalizer = self.expression()
if not self.match('ENDLINE'):
raise Parser.ParserError
self.c_tok += 1
return Stmt.Let(name, initalizer)
def expression(self):
return self.assignment()
def assignment(self):
expr = self.cast()
if self.match('EQUAL'):
self.c_tok += 1
value = self.assignment()
if isinstance(expr, Expr.Variable):
return Expr.Assign(expr.name, value)
else:
raise Parser.ParserError
return expr
def cast(self):
expr = self.equality()
if self.match('CAST'):
self.c_tok += 1
kind = self.cast()
return Expr.Cast(expr, kind)
return expr
def equality(self):
expr = self.comparison()
while self.match('BANG_EQUAL', 'EQUAL_EQUAL', 'AND', 'OR'):
operator = self.tokens[self.c_tok].value
self.c_tok += 1
right = self.comparison()
expr = Expr.Listed(operator, [expr, right])
return expr
def comparison(self):
expr = self.addition()
while self.match('LESS', 'GREATER', 'LESS_EQUAL', 'GREATER_EQUAL'):
operator = self.tokens[self.c_tok].value
self.c_tok += 1
right = self.addition()
expr = Expr.Listed(operator, [expr, right])
return expr
def addition(self):
expr = self.multiplication()
while self.match('PLUS', 'MINUS'):
operator = self.tokens[self.c_tok].value
self.c_tok += 1
right = self.multiplication()
expr = Expr.Listed(operator, [expr, right])
return expr
self.c_tok += 1
return Expr.Literal(self.tokens[self.c_tok - 1].value)
def multiplication(self):
expr = self.exponentiation()
while self.match('STAR', 'SLASH', 'MOD'):
operator = self.tokens[self.c_tok].value
self.c_tok += 1
right = self.exponentiation()
expr = Expr.Listed(operator, [expr, right])
return expr
self.c_tok += 1
return Expr.Literal(self.tokens[self.c_tok - 1].value)
def exponentiation(self):
stack = [self.negation()]
while self.match('CARET'):
self.c_tok += 1
stack.append(self.negation())
while len(stack) > 1:
right = stack.pop()
left = stack.pop()
stack.append(Expr.Listed('^', [left, right]))
return stack[0]
def negation(self):
if self.match('MINUS', 'NOT', 'LN', 'LOG_10', 'SQRT', 'INPUT'):
operator = self.tokens[self.c_tok].value
self.c_tok += 1
right = self.negation()
return Expr.Listed(operator, [right])
else:
return self.custom_root()
def custom_root(self):
expr = self.logbase()
while self.match('ROOT'):
operator = self.tokens[self.c_tok].value
self.c_tok += 1
right = self.logbase()
expr = Expr.Listed(operator, [expr, right])
return expr
def logbase(self):
if self.match('LOG'):
operator = self.tokens[self.c_tok].value
self.c_tok += 1
base = self.logbase()
argument = self.logbase()
return Expr.Listed(operator, [base, argument])
else:
return self.factorial()
def factorial(self):
expr = self.call()
while self.match('BANG'):
operator = self.tokens[self.c_tok].value
self.c_tok += 1
expr = Expr.Listed(operator, [expr])
return expr
def call(self):
expr = self.primary()
while True:
if self.match('L_PAREN'):
self.c_tok += 1
expr = self.finish_call(expr)
else:
break
return expr
def finish_call(self, callee):
arguments = []
if not self.match('R_PAREN'):
while True:
arguments.append(self.expression())
if not self.match('COMMA'):
break
self.c_tok += 1
if not self.match('R_PAREN'):
print('No \')\' after arguments!')
raise Parser.ParserError
paren = self.tokens[self.c_tok]
self.c_tok += 1
return Expr.Call(callee, paren, arguments)
def primary(self):
expr = None
token_value = self.tokens[self.c_tok].value
if self.match('ENDLINE'):
self.c_tok -= 1
expr = Expr.Literal(None)
elif self.match('TRUE'): expr = Expr.Literal(True)
elif self.match('FALSE'): expr = Expr.Literal(False)
elif self.match('NIL'): expr = Expr.Literal(None)
elif self.match('STRING'): expr = Expr.Literal(token_value[1:len(token_value)-1])
elif self.match('NUM'): expr = Expr.Literal(float(token_value))
elif self.match('KIND'): expr = Expr.Literal(token_value)
elif self.match('ID'): expr = Expr.Variable(token_value)
elif self.match('L_PAREN'):
self.c_tok += 1
expr = Expr.Grouping(self.expression())
if not self.match('R_PAREN'):
raise Parser.ParserError
if not expr:
raise Parser.ParserError
self.c_tok += 1
return expr
def parse(self):
statements = []
while self.c_tok < len(self.tokens):
statements.append(self.declaration())
return statements
| 24.286863 | 85 | 0.610001 | 1,196 | 9,059 | 4.524247 | 0.10786 | 0.059139 | 0.094622 | 0.076511 | 0.494548 | 0.445759 | 0.403068 | 0.381445 | 0.347256 | 0.279061 | 0 | 0.007887 | 0.258196 | 9,059 | 372 | 86 | 24.352151 | 0.797321 | 0 | 0 | 0.467128 | 0 | 0 | 0.063142 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.100346 | false | 0.00346 | 0.00692 | 0.00346 | 0.259516 | 0.034602 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45d7d0388364df4712cd375ea072f369e100ecba | 3,542 | py | Python | oocran/django/operators/views.py | howls90/oocran | 9951f3ff752f9f6517a4d016476c1d1e2bb44a4d | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2018-12-12T10:32:16.000Z | 2022-02-07T19:46:10.000Z | oocran/django/operators/views.py | howls90/oocran | 9951f3ff752f9f6517a4d016476c1d1e2bb44a4d | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2017-01-11T06:56:35.000Z | 2017-01-11T06:58:44.000Z | oocran/django/operators/views.py | howls90/OOCRAN | 9951f3ff752f9f6517a4d016476c1d1e2bb44a4d | [
"Apache-2.0",
"BSD-3-Clause"
] | 6 | 2017-05-29T03:34:23.000Z | 2022-02-07T19:46:11.000Z | """
Open Orchestrator Cloud Radio Access Network
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.shortcuts import render, get_object_or_404, redirect
from .forms import OperatorForm, ChangeCredenForm
from .models import Operator
from vims.models import Vim
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from oocran.global_functions import paginator
from scenarios.models import Scenario
from django.http import HttpResponse
def update_scenarios(id):
operator = Operator.objects.get(id=id)
scenarios = Scenario.objects.filter(operator__user__is_staff=True)
for scenario in scenarios:
scenario.update_operators(operator)
@staff_member_required
def add(request):
form = OperatorForm(request.POST or None)
if form.is_valid():
if form.cleaned_data['password'] == form.cleaned_data['password_confirmation']:
operator = form.save(commit=False)
if operator.check_used_name():
operator.create(form.cleaned_data['email'])
update_scenarios(id=operator.id)
operator.create_influxdb_user()
messages.success(request, "Operator successfully created!", extra_tags="alert alert-success")
return redirect("operators:list")
else:
messages.success(request, "Username is already in use!", extra_tags="alert alert-danger")
else:
messages.success(request, "Password and confirmation are differents!", extra_tags="alert alert-danger")
return redirect("operators:list")
if form.errors:
messages.success(request, form.errors, extra_tags="alert alert-danger")
return redirect("operators:list")
context = {
"user": request.user,
"form": form,
}
return render(request, "operators/form.html", context)
@staff_member_required
def list(request):
operators = Operator.objects.filter().exclude(user__is_staff=True)
operators = paginator(request, operators)
context = {
"user": request.user,
"object_list": operators,
}
return render(request, "operators/list.html", context)
@staff_member_required
def delete(request, id=None):
operator = get_object_or_404(Operator, id=id)
operator.delete_influxdb_user()
operator.remove()
operator.user.delete()
messages.success(request, "Operator successfully deleted!", extra_tags="alert alert-success")
return redirect("operators:list")
@login_required(login_url='/login/')
def home(request):
operator = get_object_or_404(Operator, name=request.user.username)
context = {
"user": request.user,
"operator": operator,
}
return render(request, "operators/home.html", context)
@login_required(login_url='/login/')
def state(request, id=None):
operator = get_object_or_404(Operator, id=id)
return HttpResponse(operator.state) | 34.38835 | 115 | 0.709486 | 436 | 3,542 | 5.639908 | 0.341743 | 0.0244 | 0.044734 | 0.038634 | 0.230582 | 0.186255 | 0.123627 | 0.123627 | 0.123627 | 0.038227 | 0 | 0.005618 | 0.195935 | 3,542 | 103 | 116 | 34.38835 | 0.857795 | 0.160361 | 0 | 0.275362 | 0 | 0 | 0.142174 | 0.007177 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0.028986 | 0.144928 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45d7eeb536d9afa54b424f98a4bd81f25efce2fc | 2,066 | py | Python | ad2dispatch/news/views.py | cetaSYN/ad2dispatch | a3fa7585aaf18905d7914faaa5d1d6f584c2dab4 | [
"Apache-2.0"
] | null | null | null | ad2dispatch/news/views.py | cetaSYN/ad2dispatch | a3fa7585aaf18905d7914faaa5d1d6f584c2dab4 | [
"Apache-2.0"
] | 29 | 2019-01-25T16:05:58.000Z | 2020-03-21T21:18:58.000Z | ad2dispatch/news/views.py | cetaSYN/ad2dispatch | a3fa7585aaf18905d7914faaa5d1d6f584c2dab4 | [
"Apache-2.0"
] | 4 | 2019-01-25T15:55:04.000Z | 2019-01-25T17:33:02.000Z | import markdown
from django.http import Http404
from django.shortcuts import render
from pages.models import get_top_pages
from userprofiles.models import Volunteer
from .models import Article
def index(request):
try:
article_list = Article.objects.values(
'id', 'title', 'created_date').order_by('-created_date')
selected = Article.objects.latest('created_date')
except Article.DoesNotExist:
selected = Article(
created_by=None,
created_date=None,
title='Placeholder',
content='You are seeing this page because you do not ' +
'have any other pages created.<br> Please add content in ' +
'the <a href="/admin/">admin panel</a>.')
# Parse Markdown
try:
selected.content = markdown.markdown(selected.content)
except AttributeError:
pass
if hasattr(selected, 'created_by'):
creator = Volunteer.objects.get(user=selected.created_by)
else:
creator = None
top_pages = get_top_pages()
context = {
'top_pages': top_pages,
'article_list': article_list,
'selected': selected,
'creator': creator,
'loc': 'news:index',
}
return render(request, 'news/article.html', context)
def article(request, article_id):
try:
article_list = Article.objects.values('id', 'title', 'created_date').order_by('-created_date')
selected = Article.objects.get(id=article_id)
# Parse Markdown
try:
selected.content = markdown.markdown(selected.content)
except AttributeError:
pass
top_pages = get_top_pages()
except Article.DoesNotExist:
raise Http404("Article does not exist.")
context = {
'top_pages': top_pages,
'article_list': article_list,
'selected': selected,
'creator': Volunteer.objects.get(user=selected.created_by),
'loc': 'news:article:' + str(article_id),
}
return render(request, 'news/article.html', context)
| 29.514286 | 102 | 0.631171 | 231 | 2,066 | 5.502165 | 0.311688 | 0.056648 | 0.056648 | 0.033045 | 0.549961 | 0.520063 | 0.520063 | 0.455547 | 0.387097 | 0.387097 | 0 | 0.003924 | 0.259923 | 2,066 | 69 | 103 | 29.942029 | 0.827338 | 0.014037 | 0 | 0.436364 | 0 | 0 | 0.193215 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036364 | false | 0.036364 | 0.109091 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45d824fccee32ba7826f0cbb0aaa47b2a5382c06 | 727 | py | Python | src/python/zquantum/core/history/recording_functions_with_gradient_test.py | kottmanj/z-quantum-core | 21752e92e79aafedbfeb6e7ae196bdc2fd5803e4 | [
"Apache-2.0"
] | null | null | null | src/python/zquantum/core/history/recording_functions_with_gradient_test.py | kottmanj/z-quantum-core | 21752e92e79aafedbfeb6e7ae196bdc2fd5803e4 | [
"Apache-2.0"
] | null | null | null | src/python/zquantum/core/history/recording_functions_with_gradient_test.py | kottmanj/z-quantum-core | 21752e92e79aafedbfeb6e7ae196bdc2fd5803e4 | [
"Apache-2.0"
] | null | null | null | """Test cases for recording functions with gradients."""
import pytest
import numpy as np
from .example_functions import function_1, Function2, Function5
from .recorder import recorder
from ..interfaces.functions import CallableWithGradient
@pytest.mark.parametrize(
"function,params",
[
(function_1, np.array([3, 4])),
(Function2(5), np.array([-1, 0, 1])),
(Function5(10), np.array([1, 2, 3])),
],
)
def test_recorder_propagates_calls_to_wrapped_functions_and_its_gradient(
function: CallableWithGradient, params: np.ndarray
):
target = recorder(function)
assert target(params) == function(params)
assert np.array_equal(target.gradient(params), function.gradient(params))
| 31.608696 | 77 | 0.72077 | 89 | 727 | 5.741573 | 0.483146 | 0.054795 | 0.031311 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027732 | 0.156809 | 727 | 22 | 78 | 33.045455 | 0.805873 | 0.068776 | 0 | 0 | 0 | 0 | 0.022355 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 1 | 0.052632 | false | 0 | 0.263158 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45da00cb76f03be18a89a07dd9e8e2426fff0cf4 | 15,085 | py | Python | optical/converter/utils.py | hashtagml/optical | 1ed11c403b5041497e5b795681b962280c4723d8 | [
"MIT"
] | 10 | 2021-04-11T04:45:54.000Z | 2022-02-17T07:49:48.000Z | optical/converter/utils.py | hashtagml/optical | 1ed11c403b5041497e5b795681b962280c4723d8 | [
"MIT"
] | 9 | 2021-04-13T18:09:38.000Z | 2021-05-05T13:21:20.000Z | optical/converter/utils.py | hashtagml/optical | 1ed11c403b5041497e5b795681b962280c4723d8 | [
"MIT"
] | null | null | null | """
__author__: HashTagML
license: MIT
Created: Sunday, 28th March 2021
"""
import json
import io
import os
import shutil
import warnings
from pathlib import Path, PosixPath
from typing import Any, Callable, Dict, Optional, Union
import pandas as pd
from lxml import etree as xml
from PIL import Image
import xml.etree.ElementTree as ET
_TF_INSTALLED = True
try:
import tensorflow as tf
except ImportError:
_TF_INSTALLED = False
def ifnone(x: Any, y: Any, transform: Optional[Callable] = None, type_safe: bool = False):
"""if x is None return y otherwise x after applying transofrmation ``transform`` and
casting the result back to original type if ``type_safe``
Args:
x (Any): returns x if x is not none
y (Any): returns y if x is none
transform (Optional[Callable], optional): applies transform to the output. Defaults to None.
type_safe (bool, optional): if true, tries casting the output to the original type. Defaults to False.
"""
if transform is not None:
assert callable(transform), "`transform` should be either `None` or instance of `Callable`"
else:
def transform(x):
return x
if x is None:
orig_type = type(y)
out = transform(y)
else:
orig_type = type(x)
out = transform(x)
if type_safe:
try:
out = orig_type(out)
except (ValueError, TypeError):
warnings.warn(f"output could not be casted as type {orig_type.__name__}")
pass
return out
def exists(path: Union[str, os.PathLike]):
"""checks for whether a directory or file exists in the specified path"""
if Path(path).is_dir():
return "dir"
if Path(path).is_file():
return "file"
return
def get_image_dir(root: Union[str, os.PathLike]):
"""returns image directory given a root directory"""
return Path(root) / "images"
def get_annotation_dir(root: Union[str, os.PathLike]):
"""returns annotation directory given a root directory"""
return Path(root) / "annotations"
def find_job_metadata_key(json_data: Dict):
"""finds metadata key for sagemaker manifest format"""
for key in json_data.keys():
if key.split("-")[-1] == "metadata":
return key
def read_coco(coco_json: Union[str, os.PathLike]):
"""read a coco json and returns the images, annotations and categories dict separately"""
with open(coco_json, "r") as f:
coco = json.load(f)
return coco["images"], coco["annotations"], coco["categories"]
def write_json(data_dict: Dict, filename: Union[str, os.PathLike]):
"""writes json to disk"""
with open(filename, "w") as f:
json.dump(data_dict, f, indent=2)
def filter_split_category(
df: pd.DataFrame, split: Optional[str] = None, category: Optional[str] = None
) -> pd.DataFrame:
"""given the label df, filters the dataframe by split and/or label category
Args:
df (pd.DataFrame): the label dataframe.
split (Optional[str], optional): the dataset split e.g., ``train``, ``test`` etc. Defaults to None.
category (Optional[str], optional): the label category. Defaults to None.
Raises:
ValueError: if an unknown category is specified.
Returns:
pd.DataFrame: the filtered dataframe.
"""
if split is not None:
df = df.query("split == @split").copy()
if category is not None:
if category not in df.category.unique():
raise ValueError(f"class `{category}` is not present in annotations")
df = df.query("category == @category").copy()
return df
def copyfile(
src: Union[str, os.PathLike], dest: Union[str, os.PathLike], filename: Optional[Union[str, os.PathLike]] = None
) -> None:
"""copies a file from one path to another
Args:
src (Union[str, os.PathLike]): either a directory containing files or any filepath.
dest (Union[str, os.PathLike]): the output directory for the copy.
filename (Optional[Union[str, os.PathLike]], optional): If ``src`` is a directory, the name of the
file to copy. Defaults to None.
"""
if filename is not None:
filename = Path(src) / filename
else:
filename = Path(src)
dest = Path(dest) / filename.name
try:
shutil.copyfile(filename, dest)
except FileNotFoundError:
pass
def write_xml(
df: pd.DataFrame,
image_root: Union[str, os.PathLike, PosixPath],
output_dir: Optional[Union[str, os.PathLike, PosixPath]] = None,
) -> None:
"""write xml files in PASCAL VOC format given a label dataframe
Args:
df (pd.DataFrame): dataframe of the single image with multiple objects in it.
image_root (Union[str, os.PathLike, PosixPath]): path to image directory.
output_dir (Optional[Union[str, os.PathLike, PosixPath]], optional): output directory
"""
root = xml.Element("annotation")
folder = xml.Element("folder")
folder.text = ""
root.append(folder)
filename = xml.Element("filename")
filename.text = df.iloc[0]["image_id"]
root.append(filename)
path = xml.Element("path")
path.text = str(Path(image_root) / "images" / df.iloc[0]["split"] / df.iloc[0]["image_id"])
root.append(path)
source = xml.Element("source")
root.append(source)
database = xml.Element("database")
database.text = "UNKNOWN"
source.append(database)
size = xml.Element("size")
root.append(size)
width = xml.Element("width")
width.text = str(df.iloc[0]["image_width"])
size.append(width)
height = xml.Element("height")
height.text = str(df.iloc[0]["image_height"])
size.append(height)
depth = xml.Element("depth")
depth.text = "3"
size.append(depth)
segmented = xml.Element("segmented")
segmented.text = "0"
root.append(segmented)
for _, objec in df.iterrows():
obj = xml.Element("object")
root.append(obj)
name = xml.Element("name")
name.text = objec["category"]
obj.append(name)
pose = xml.Element("pose")
pose.text = "Unspecified"
obj.append(pose)
truncated = xml.Element("truncated")
truncated.text = "0"
obj.append(truncated)
difficult = xml.Element("difficult")
difficult.text = "0"
obj.append(difficult)
occluded = xml.Element("occluded")
occluded.text = "0"
obj.append(occluded)
bndbox = xml.Element("bndbox")
obj.append(bndbox)
xmin = xml.Element("xmin")
xmin.text = str(objec["x_min"])
bndbox.append(xmin)
xmax = xml.Element("xmax")
xmax.text = str(objec["x_max"])
bndbox.append(xmax)
ymin = xml.Element("ymin")
ymin.text = str(objec["y_min"])
bndbox.append(ymin)
ymax = xml.Element("ymax")
ymax.text = str(objec["y_max"])
bndbox.append(ymax)
tree = xml.ElementTree(root)
f_name = Path(output_dir).joinpath(df.iloc[0]["split"], Path(df.iloc[0]["image_id"]).stem + ".xml")
with open(f_name, "wb") as files:
tree.write(files, pretty_print=True)
def get_id_to_class_map(df: pd.DataFrame):
"""This function return the class_id to class name mapping
Args:
df (pd.DataFrame): master dataframe
Returns:
Dict: mapping dictionary
"""
set_df = df.drop_duplicates(subset="class_id")[["category", "class_id"]]
return set_df.set_index("class_id")["category"].to_dict()
def find_splits(image_dir: Union[str, os.PathLike], annotation_dir: Union[str, os.PathLike], format: str):
"""find the splits in the dataset, will ignore splits for which no annotation is found"""
# print(f"passed format: {format}")
exts = {
"coco": "json",
"csv": "csv",
"pascal": "xml",
"yolo": "txt",
"sagemaker": "manifest",
"createml": "json",
"simple_json": "json",
}
ext = exts[format]
im_splits = [x.name for x in Path(image_dir).iterdir() if x.is_dir() and not x.name.startswith(".")]
if format in ("yolo", "pascal"):
ann_splits = [x.name for x in Path(annotation_dir).iterdir() if x.is_dir()]
if not ann_splits:
files = list(Path(annotation_dir).glob(f"*.{ext}"))
if len(files):
ann_splits = ["main"]
else:
raise ValueError("No annotation found. Please check the directory specified.")
else:
ann_splits = [x.stem for x in Path(annotation_dir).glob(f"*.{ext}")]
no_anns = set(im_splits).difference(ann_splits)
if no_anns:
warnings.warn(f"no annotation found for {', '.join(list(no_anns))}")
return ann_splits, len(im_splits) > 0
def _tf_parse_example(example):
"""parse tf examples"""
features = {
"image/height": tf.io.FixedLenFeature([], tf.int64),
"image/width": tf.io.FixedLenFeature([], tf.int64),
"image/filename": tf.io.FixedLenFeature([], tf.string),
"image/encoded": tf.io.FixedLenFeature([], tf.string),
"image/format": tf.io.FixedLenFeature([], tf.string),
"image/object/bbox/xmin": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/xmax": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/ymin": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/ymax": tf.io.VarLenFeature(tf.float32),
"image/object/class/text": tf.io.VarLenFeature(tf.string),
"image/object/class/label": tf.io.VarLenFeature(tf.int64),
}
return tf.io.parse_single_example(example, features)
def _tf_int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _tf_bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _tf_float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _tf_bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _tf_int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def create_tf_example(df: pd.DataFrame, root: Union[str, os.PathLike, PosixPath]):
"""returns protobuf for a given image
Args:
df (pd.DataFrame): Dataframe of a single image with multiple records of objects
root (Union[str, os.PathLike, PosixPath]): root of the Image path
Returns:
protobuf: protobuf of each Image
"""
img_path = str(df["image_path"].iloc[0])
with tf.io.gfile.GFile(img_path, "rb") as fid:
encoded_jpg = fid.read()
width = df.iloc[0]["image_width"]
height = df.iloc[0]["image_height"]
filename = df["image_id"].iloc[0].encode("utf8")
image_format = b"jpg"
xmins = list(df["x_min"] / width)
xmaxs = list((df["x_max"]) / width)
ymins = list(df["y_min"] / height)
ymaxs = list((df["y_max"]) / height)
classes_text = [s.encode("utf8") for s in df["category"]]
classes = list(df["class_id"].astype(int))
tf_example = tf.train.Example(
features=tf.train.Features(
feature={
"image/height": _tf_int64_feature(height),
"image/width": _tf_int64_feature(width),
"image/filename": _tf_bytes_feature(filename),
"image/source_id": _tf_bytes_feature(filename),
"image/encoded": _tf_bytes_feature(encoded_jpg),
"image/format": _tf_bytes_feature(image_format),
"image/object/bbox/xmin": _tf_float_list_feature(xmins),
"image/object/bbox/xmax": _tf_float_list_feature(xmaxs),
"image/object/bbox/ymin": _tf_float_list_feature(ymins),
"image/object/bbox/ymax": _tf_float_list_feature(ymaxs),
"image/object/class/text": _tf_bytes_list_feature(classes_text),
"image/object/class/label": _tf_int64_list_feature(classes),
}
)
)
return tf_example
def write_label_map(id_to_class_map: Dict, output_dir: Union[str, os.PathLike, PosixPath]):
"""writes label_map used in tf object detection
Args:
id_to_class_map (Dict): mapping dictionary
output_dir ([type]): output path
"""
with open(output_dir.joinpath("label_map.pbtxt"), "w") as f:
for id, cl in id_to_class_map.items():
f.write("item\n")
f.write("{\n")
f.write("name :'{0}'".format(str(cl)))
f.write("\n")
f.write("id :{}".format(int(id)))
f.write("\n")
f.write("display_name:'{0}'".format(str(cl)))
f.write("\n")
f.write("}\n")
def tf_decode_image(root: Union[str, os.PathLike, PosixPath], data, split: Union[str, os.PathLike, PosixPath]):
"""Decodes images and save in images folder under root
Args:
root (Union[str, os.PathLike, PosixPath]): path to root directory
data (tf.train.Example): single image example
split (Union[str, os.PathLike, PosixPath]): split directory
"""
img_filename = data["image/filename"].numpy().decode("utf-8")
img = data["image/encoded"].numpy()
im = Image.open(io.BytesIO(img))
im.save(str(Path(root) / "images" / split / img_filename))
def read_xml(xml_folder: Union[str, os.PathLike, PosixPath], img_path: Union[str, os.PathLike, PosixPath]):
"""read xml files in the folder and return list's of information used to construct master_df
Args:
xml_folder (Union[str, os.PathLike, PosixPath]): Xml file folder
img_path (Union[str, os.PathLike, PosixPath]): Image Directory
"""
img_filenames = []
img_widths = []
img_heights = []
cls_names = []
x_mins = []
y_mins = []
box_widths = []
box_heights = []
img_paths = []
xml_files = [x for x in Path(xml_folder).glob("*.xml")]
for fxml in xml_files:
tree = ET.parse(fxml)
root = tree.getroot()
img_filename = root.find("filename").text
img_width = root.find("size").find("width").text
img_height = root.find("size").find("height").text
for obj in root.findall("object"):
cls_name = obj.find("name").text
x_min = int(obj.find("bndbox").find("xmin").text)
y_min = int(obj.find("bndbox").find("ymin").text)
box_width = int(obj.find("bndbox").find("xmax").text) - int(x_min)
box_height = int(obj.find("bndbox").find("ymax").text) - int(y_min)
img_filenames.append(img_filename)
img_widths.append(img_width)
img_heights.append(img_height)
cls_names.append(cls_name)
x_mins.append(x_min)
y_mins.append(y_min)
box_widths.append(box_width)
box_heights.append(box_height)
img_paths.append(str(img_path.joinpath(img_filename)))
return img_filenames, img_widths, img_heights, cls_names, x_mins, y_mins, box_widths, box_heights, img_paths
| 34.284091 | 115 | 0.628638 | 2,025 | 15,085 | 4.553086 | 0.157037 | 0.024295 | 0.030369 | 0.054664 | 0.272993 | 0.20705 | 0.142625 | 0.093601 | 0.053579 | 0.053579 | 0 | 0.005372 | 0.234935 | 15,085 | 439 | 116 | 34.362187 | 0.793519 | 0.201061 | 0 | 0.052632 | 0 | 0 | 0.123787 | 0.02486 | 0 | 0 | 0 | 0 | 0.003509 | 1 | 0.080702 | false | 0.007018 | 0.045614 | 0.021053 | 0.196491 | 0.003509 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45daf2ba13dd10d36a72a6b2b05dc29e34435812 | 2,960 | py | Python | bad_requests/request.py | Ben435/BensLoadTestTool | 039c28a6c46cc8b7d6284fffdbf3a5c95158daed | [
"Apache-2.0"
] | 1 | 2018-03-08T07:09:12.000Z | 2018-03-08T07:09:12.000Z | bad_requests/request.py | Ben435/BensLoadTestTool | 039c28a6c46cc8b7d6284fffdbf3a5c95158daed | [
"Apache-2.0"
] | null | null | null | bad_requests/request.py | Ben435/BensLoadTestTool | 039c28a6c46cc8b7d6284fffdbf3a5c95158daed | [
"Apache-2.0"
] | null | null | null | import socket
import ssl
from bad_requests.response import Response
HTTP_STANDARD_PORTS = [80]
HTTPS_STANDARD_PORTS = [443, 8080]
BUFFERSIZE = 1024
class Request:
def __init__(self, host, message, get_body=True):
self.host = host
self.message = message
self.get_body = get_body
def send(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
resource = self.host.split("/")
# Enable SSL if needed.
if "https" in resource[0].lower():
sock = ssl.wrap_socket(sock)
https = True
else:
https = False
# Connect and send message.
worked = False
for port in HTTPS_STANDARD_PORTS if https else HTTP_STANDARD_PORTS:
try:
sock.connect((self.host, port))
worked = True
except socket.error as e:
print(e)
print("Failed on port: " + str(port))
raise e
finally:
if worked:
break
if not worked:
print("Failed all ports.")
return None
# Send message.
sock.send(self.message.encode("UTF-8"))
# Get headers
headers = ""
body = ""
received = sock.recv(BUFFERSIZE)
while len(received) == BUFFERSIZE and "\r\n\r\n" not in received.decode("UTF-8"):
headers += received.decode("UTF-8")
received = sock.recv(BUFFERSIZE)
snip = received.decode("UTF-8").split("\r\n\r\n")
if len(snip) >= 2:
headers += snip[0]
body += "\r\n\r\n".join(snip[1:])
else:
headers += "\r\n\r\n".join(snip)
# Parse headers.
lines = headers.split("\r\n")
status = lines[0].split(" ")
proto = status[0]
status_code = status[1]
status_message = status[2]
dict_headers = {}
for line in lines[1:]: # Skip "HTTP/1.1 NUM MSG" line.
if len(line) <= 1:
continue
data = line.split(": ", 1)
key = data[0]
vals = data[1].strip()
dict_headers[key] = vals
if "Content-Length" in dict_headers:
total_body = int(dict_headers["Content-Length"])
else:
total_body = 0
# Get body.
total_received = len(body)
while total_received < total_body:
body += received.decode("UTF-8")
received = sock.recv(BUFFERSIZE)
total_received += len(received)
body += received.decode("UTF-8")
return Response(status_code, status_message, proto, dict_headers, body, init_req=self)
def __str__(self):
return "Host: {}\nGet Body: {}\nMessage: {{\n{}\n}}".format(
self.host, self.get_body, "\n".join(map(lambda line: "\t" + line, self.message.strip().split("\n"))))
| 30.515464 | 113 | 0.529392 | 352 | 2,960 | 4.338068 | 0.301136 | 0.011788 | 0.055665 | 0.058939 | 0.090373 | 0.073346 | 0.057629 | 0.057629 | 0 | 0 | 0 | 0.018605 | 0.346284 | 2,960 | 96 | 114 | 30.833333 | 0.770543 | 0.043243 | 0 | 0.106667 | 0 | 0 | 0.06551 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.04 | 0.013333 | 0.133333 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45db1ddc9bbef6c02d6ebbfcea0de3c07ec3bc2b | 2,055 | py | Python | tests/status_test.py | araines/supervisor-newrelic | 8061ced419e1603367272a42b729414a7a51ac35 | [
"MIT"
] | 4 | 2019-02-11T03:17:45.000Z | 2022-01-17T19:53:03.000Z | tests/status_test.py | araines/supervisor-newrelic | 8061ced419e1603367272a42b729414a7a51ac35 | [
"MIT"
] | 3 | 2016-12-24T07:30:32.000Z | 2017-07-11T11:12:51.000Z | tests/status_test.py | araines/supervisor-newrelic | 8061ced419e1603367272a42b729414a7a51ac35 | [
"MIT"
] | 2 | 2018-11-27T08:31:23.000Z | 2021-03-04T16:59:13.000Z | import mock
import unittest
from StringIO import StringIO
from supervisor_newrelic.status import Status
def mock_request(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
if (args[0] == 'https://insights-collector.newrelic.com/v1/accounts/123/events' and
kwargs.get('headers').get('X-Insert-Key') == 'abc'):
return MockResponse({'foo': 'bar'}, 200)
return MockResponse({}, 404)
class StatusTests(unittest.TestCase):
def _get_mock(self, account='123', key='abc'):
prog = Status(account, key)
prog.stdin = StringIO()
prog.stdout = StringIO()
return prog
def test_run_not_process_state(self):
prog = self._get_mock()
prog.stdin.write('eventname:TICK len:0\n')
prog.stdin.seek(0)
prog.run(runonce=True)
self.assertEqual(prog.stdout.getvalue(), 'READY\nRESULT 2\nOK')
@mock.patch('supervisor_newrelic.status.requests.post', side_effect=mock_request)
def test_run_successful_fatal_state_report(self, m):
payload = 'processname:foo groupname:bar from_state:BACKOFF'
prog = self._get_mock()
prog.stdin.write('eventname:PROCESS_STATE_FATAL len:%d\n' % len(payload))
prog.stdin.write(payload)
prog.stdin.seek(0)
prog.run(runonce=True)
self.assertEqual(prog.stdout.getvalue(), 'READY\nRESULT 2\nOK')
@mock.patch('supervisor_newrelic.status.requests.post', side_effect=mock_request)
def test_run_unsuccessful_fatal_state_report(self, m):
payload = 'processname:foo groupname:bar from_state:BACKOFF'
prog = self._get_mock('234')
prog.stdin.write('eventname:PROCESS_STATE_FATAL len:%d\n' % len(payload))
prog.stdin.write(payload)
prog.stdin.seek(0)
prog.run(runonce=True)
self.assertEqual(prog.stdout.getvalue(), 'READY\nRESULT 4\nFAIL')
| 36.052632 | 87 | 0.66618 | 265 | 2,055 | 4.988679 | 0.316981 | 0.061271 | 0.05295 | 0.034039 | 0.568079 | 0.568079 | 0.568079 | 0.568079 | 0.539334 | 0.539334 | 0 | 0.014724 | 0.206813 | 2,055 | 56 | 88 | 36.696429 | 0.796319 | 0 | 0 | 0.4 | 0 | 0 | 0.210219 | 0.067153 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.155556 | false | 0 | 0.088889 | 0.022222 | 0.377778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45ddecfc6b040befb28c16618390c57c59d78c8c | 11,100 | py | Python | tuning.py | grdavis/college-basketball-elo | b26518d3114012bac8b1994f82e7dc93099f15bf | [
"MIT"
] | 6 | 2022-02-07T01:07:57.000Z | 2022-03-28T05:49:37.000Z | tuning.py | grdavis/college-basketball-elo | b26518d3114012bac8b1994f82e7dc93099f15bf | [
"MIT"
] | 4 | 2022-03-01T21:55:24.000Z | 2022-03-29T18:49:45.000Z | tuning.py | grdavis/college-basketball-elo | b26518d3114012bac8b1994f82e7dc93099f15bf | [
"MIT"
] | 2 | 2022-02-25T01:32:53.000Z | 2022-03-03T03:45:36.000Z | import numpy as np
from tqdm import tqdm
import elo
import utils
import random
import plotly.graph_objects as go
from sklearn.metrics import r2_score
from scipy.stats import linregress
import pandas as pd
from predictions import predict_tournament, ROUNDS
ERRORS_START = 4 #after 4 seasons (starts counting errors 20141114)
class Tuning_ELO_Sim(elo.ELO_Sim):
'''
This class is an extension of ELO_Sim that allows us to keep track of errors and extra metrics through the
simulation process which are useful for tuning. Errors tracked are...
error1: calculated (1 - predicted win probability)^2 for each game and add them up. More commonly known as Brier score (https://en.wikipedia.org/wiki/Brier_score). This is the primary error of interest
error2: calculated at the end of a simulation as the average absolute difference between predicted win probability and actual win probability for teams who were given that prediction
'''
def __init__(self):
super().__init__()
self.predict_tracker = {}
self.win_tracker = {0.0: 0}
self.elo_margin_tracker = {}
self.MoV_tracker = {}
self.error1 = []
def update_errors(self, w_winp):
if self.season_count >= ERRORS_START:
rounded, roundedL = round(w_winp, 2), round(1 - w_winp, 2)
self.error1.append((1 - w_winp)**2)
self.win_tracker[rounded] = self.win_tracker.get(rounded, 0) + 1
self.predict_tracker[rounded] = self.predict_tracker.get(rounded, 0) + 1
self.predict_tracker[roundedL] = self.predict_tracker.get(roundedL, 0) + 1
def update_MoVs(self, elo_margin, MoV):
if self.season_count >= ERRORS_START:
rounded = round(elo_margin/25) * 25 #round to nearest 25
self.elo_margin_tracker[rounded] = self.elo_margin_tracker.get(rounded, 0) + 1
self.MoV_tracker[rounded] = self.MoV_tracker.get(rounded, 0) + MoV
self.elo_margin_tracker[-rounded] = self.elo_margin_tracker.get(-rounded, 0) + 1
self.MoV_tracker[-rounded] = self.MoV_tracker.get(-rounded, 0) - MoV
def get_errors(self):
error2 = 0
total_games = sum(self.predict_tracker.values())
for i in sorted(self.predict_tracker):
result = self.win_tracker.get(i, 0)/self.predict_tracker[i]
error2 += self.predict_tracker[i] * abs(result - i)
return (sum(self.error1), error2 / total_games)
def tuning_sim(data, k_factor, new_season_carry, home_elo, new_team_elo):
'''
This function runs through all of the data and updates elo and errors along the way
It is a simplified version of the official sim function used in elo.py
'''
this_sim = Tuning_ELO_Sim()
this_month = data[0][-1][4:6]
elo.NEW_ELO = new_team_elo
for row in data:
row_month = int(row[-1][4:6])
if this_month in [3, 4] and row_month == 11:
this_sim.season_count += 1
this_sim.season_reset(new_season_carry)
this_sim.date = row[-1]
this_month = row_month
elo_margin, MoV = elo.step_elo(this_sim, row, k_factor, home_elo)
this_sim.update_errors(elo.winp(elo_margin))
this_sim.update_MoVs(elo_margin, MoV)
return this_sim
def random_tune(data, number):
'''
Use this function to repeatedly narrow down tighter and tighter ranges of possible optimal values for k, carry, and home elo advantage
Start with wide ranges, then use the outputs (which are sorted by their errors) to inform a tighter range for the next iteration
Once windows are small enough, switch to brute_tune
'''
k_range = [44, 45, 46, 47]
carry_range = np.arange(.87, .93, .01)
home_range = np.arange(77, 85, 1)
new_team_range = [900]
errors = []
for i in tqdm(range(number)):
k_factor, new_season_carry, home_elo, nte = random.choice(k_range), random.choice(carry_range), random.choice(home_range), random.choice(new_team_range)
error1, error2 = tuning_sim(data, k_factor, new_season_carry, home_elo, nte).get_errors()
errors.append((error1, error2, k_factor, new_season_carry, home_elo, nte))
return errors
def brute_tune(data):
'''
Use this function to cycle through all possible combinations of the 4 variables within the defined ranges and find the optimal solution
Since brute force can take some time to run, random_tune first to help narrow possible ranges
'''
k_range = [42, 43, 44, 45]#[64, 65, 66]
carry_range = [.9, .91, .92, .93]#np.arange(.7, 1.00, .05)
home_range = [80, 81, 82, 83]#[110, 111, 112]
new_team_range = [925, 950, 975, 1000] #np.arange(750, 1250, 50)
errors = []
for k in tqdm(k_range):
for c in tqdm(carry_range, leave = False):
for h in tqdm(home_range, leave = False):
for n in tqdm(new_team_range, leave = False):
error1, error2 = tuning_sim(data, k, c, h, n).get_errors()
errors.append((error1, error2, k, c, h, n))
return errors
def tune(data):
#start with random_tune, then switch to brute_tune when the ranges for values are tight enough so as not to take too long to run
# errors = random_tune(data, 50)
errors = brute_tune(data)
print(sorted(errors, key = lambda x: x[0]))
print(sorted(errors, key = lambda x: x[1]))
filepath = utils.get_latest_data_filepath()
data = utils.read_csv(filepath)
explore = tuning_sim(data, elo.K_FACTOR, elo.SEASON_CARRY, elo.HOME_ADVANTAGE, elo.NEW_ELO)
print(explore.get_errors())
###########################TUNING############################
# tune(data)
# # start measuring after season 3 (start fall 2014), errors as of games through 11/8/2021
# # best: (error1 = 6887, error2 = 0.0097, k_factor = 43, carryover = .9, home_elo = 81, new_team = 925)
################Brier (Error 1) Over Time####################
# size = 5700 #roughly the number of games per season if we have ~40K errors over the course of 7 seasons (Fall 2014 - Spring 2021)
# leftover = len(explore.error1) % size
# y_vals = [sum(explore.error1[i*size:(i*size)+size])/size for i in range(len(explore.error1)//size)] + [sum(explore.error1[-leftover:])/leftover]
# sizes = [size for i in range(len(explore.error1)//size)] + [leftover]
# x_vals = [i for i in range(len(sizes))]
# fig = go.Figure([go.Bar(x = x_vals, y = y_vals, text = ['n = ' + str(size) for size in sizes], textposition = 'auto')])
# fig.update_layout(title_text = 'Brier Score Over Time: Fall 2014 - Fall 2021', xaxis_title = 'Bucket of Chronological Games', yaxis_title = 'Avg. Brier Score in Bucket')
# fig.show()
###################Visualizing Error 2#######################
# x_vals = [i for i in explore.predict_tracker]
# y_vals = [explore.win_tracker[i]/explore.predict_tracker[i] for i in x_vals]
# sizes = [explore.predict_tracker[i] for i in x_vals]
# fig = go.Figure()
# fig.add_trace(go.Scatter(x = x_vals, y = y_vals, mode = 'markers', name = 'Predictions', text = ['n = ' + str(size) for size in sizes]))
# fig.add_trace(go.Scatter(x = [0, 1], y = [0, 1], mode = 'lines', name = 'Perfect Line'))
# r2 = r2_score(x_vals, y_vals)
# fig.update_layout(title_text = 'Predicted vs. Actual Win Probability (R^2 = 0.99)', xaxis_title = 'Predicted Win Probability', yaxis_title = 'Actual Win Probability')
# fig.show()
##############Elo margin vs. Margin of Victory################
# x_vals = [i for i in explore.elo_margin_tracker]
# y_vals = [explore.MoV_tracker[i]/explore.elo_margin_tracker[i] for i in x_vals]
# sizes = [explore.elo_margin_tracker[i] for i in x_vals]
# fig = go.Figure()
# fig.add_trace(go.Scatter(x = x_vals, y = y_vals, mode = 'markers', name = 'Results', text = ['n = ' + str(size) for size in sizes], marker=dict(size=[s/120 for s in sizes])))
# #fit a line to middle 80% of data (Pareto principle)
# target_points = sum(sizes)*.8*.5 #I want to reach 80% of points on the positive side. They are duplicated on the negative side, so really 40% of total points
# points_reached = explore.elo_margin_tracker[0]
# for i in range(1, int(max(x_vals)/25)):
# points_reached += explore.elo_margin_tracker.get(i*25, 0)
# if points_reached > target_points: break
# x_trimmed = [j*25 for j in range(-i, i+1)]
# y_trimmed = [explore.MoV_tracker[i]/explore.elo_margin_tracker[i] for i in x_trimmed]
# slope, intercept, r, p, se = linregress(x_trimmed, y_trimmed)
# # print(slope, r)
# # slope: 0.03914846 -> 1/slope: 25.5 elo difference / point difference
# fig.add_trace(go.Scatter(x = x_trimmed, y = [i*slope + intercept for i in x_trimmed], mode = 'lines', name = 'LSRL for Middle 80% of Games (R^2 > 0.99)'))
# fig.update_layout(title_text = 'Elo Margin vs. Average Scoring Margin: 1 game point = 25.5 Elo points', xaxis_title = 'Elo Margin', yaxis_title = 'Average Actual Scoring Margin')
# fig.show()
#################Elo Season-over-Season########################
# season_totals = {}
# season_teams = {}
# years = ['20110404', '20120402', '20130408', '20140407', '20150406', '20160404', '20170403', '20180402', '20190408', '20200311', '20210405']
# for team in explore.teams:
# for year in years:
# for date, snap in explore.teams[team].snapshots:
# if date == year:
# season_totals[year] = season_totals.get(year, 0) + snap
# season_teams[year] = season_teams.get(year, 0) + 1
# y_vals = [round(season_totals[i]/season_teams[i]) for i in season_teams]
# x_vals = [i[:4] for i in season_teams]
# sizes = ['teams = ' + str(season_teams[i]) for i in season_teams]
# fig = go.Figure([go.Bar(x = x_vals, y = y_vals, text = sizes, textposition = 'auto')])
# fig.update_layout(title_text = 'Average End-of-Season Elo over Time: Spring 2011 - Spring 2021', xaxis_title = 'Year', yaxis_title = 'End of Season Elo')
# fig.show()
##################LATEST DISTRIBUTION##########################
# bucketing = {}
# for team in explore.teams:
# rounded = round(explore.get_elo(team) / 50) * 50
# bucketing[rounded] = bucketing.get(rounded, 0) + 1
# x_vals = [i for i in range(min(bucketing), max(bucketing) + 1, 10)]
# y_vals = [bucketing.get(i, 0) for i in x_vals]
# fig = go.Figure([go.Bar(x = x_vals, y = y_vals)])
# fig.update_layout(title_text = 'Elo Distribution through ' + explore.date, xaxis_title = 'Elo Rating', yaxis_title = 'Number of Teams')
# fig.show()
###############HISTORICAL BRACKET PERFORMANCE##################
# scores = [10, 20, 40, 80, 160, 320] #ESPN scoring system for correct game in round
# def evaluate_brackets(predictions, real_results):
# predictions_score = 0
# for index in range(len(ROUNDS)):
# predictions_score += sum([scores[index] if predictions[ROUNDS[index]][i] == real_results[ROUNDS[index]][i] else 0 for i in range(len(predictions[ROUNDS[index]]))])
# return predictions_score
# for stop_date, tourney_filepath in [('20190320', 'tournament_results_2019.csv'), ('20180314', 'tournament_results_2018.csv'), ('20170315', 'tournament_results_2017.csv')]:
# elo_state = elo.main(stop_short = stop_date)
# df = pd.read_csv(utils.DATA_FOLDER + tourney_filepath)
# tournamant_teams = list(df['first'].dropna())
# results = {'first': tournamant_teams}
# for r in ROUNDS:
# results[r] = df[r].dropna().values
# best_bracket = predict_tournament(elo_state, tournamant_teams, pick_mode = 1)
# print(evaluate_brackets(best_bracket, results))
# remaining = [32, 16, 8, 4, 2, 1]
# print(sum([remaining[index]*scores[index]*(.5**(index + 1)) for index in range(6)]))
# #2019: 1260
# #2018: 830
# #2017: 720
# #Random: 315 | 49.333333 | 202 | 0.697117 | 1,771 | 11,100 | 4.21118 | 0.229249 | 0.010727 | 0.01609 | 0.010324 | 0.239877 | 0.222178 | 0.20059 | 0.155001 | 0.105792 | 0.088764 | 0 | 0.050482 | 0.148739 | 11,100 | 225 | 203 | 49.333333 | 0.738808 | 0.635045 | 0 | 0.068966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091954 | false | 0 | 0.114943 | 0 | 0.264368 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45dfa94f9d32503706cf8aacffa0d5d22fe3e19c | 3,075 | py | Python | aula4/classifica_junto.py | davidpvilaca/TEP | decbf61a96863d76e1b84dc097aa37b12038aa75 | [
"MIT"
] | 2 | 2017-08-28T18:24:47.000Z | 2019-08-29T03:34:15.000Z | aula4/classifica_junto.py | davidpvilaca/TEP | decbf61a96863d76e1b84dc097aa37b12038aa75 | [
"MIT"
] | null | null | null | aula4/classifica_junto.py | davidpvilaca/TEP | decbf61a96863d76e1b84dc097aa37b12038aa75 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 19 13:21:45 2017
@author: davidpvilaca
"""
import matplotlib.pyplot as plt
import numpy as np
import cv2
def getHist(arr_img):
hists = []
sm = []
for img in arr_img:
hist = cv2.calcHist([img], [0, 1, 2], None, [8, 8, 8],
[0, 256, 0, 256, 0, 256])
hist = cv2.normalize(hist, hist).flatten()
hists.append(hist)
sm.append(np.average(hist[:]))
return
def compareHist(hist1, hist2):
OPENCV_METHODS = ( ("Correlation", cv2.HISTCMP_CORREL), ("Intersection", cv2.HISTCMP_INTERSECT) )
return cv2.compareHist(hist1, hist2, OPENCV_METHODS[0][1]) # intersec
def showImages(imgArr, titleArr):
lenImgArr = len(imgArr)
assert lenImgArr == len(titleArr)
plt.figure(figsize = (8,8))
p = int(str(lenImgArr//2) + "20" if lenImgArr > 2 and ( (lenImgArr % 2) == 0 ) else str(lenImgArr//3) + "30")
i = 0
for img in imgArr:
plt.subplot(p + i + 1)
plt.title(titleArr[i])
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
i = i + 1
def main():
imageSource = {
'Goblin Town': {
'images': [
cv2.imread('Goblin_town1.jpg'),
cv2.imread('Goblin_town2.jpg'),
cv2.imread('Goblin_town3.jpg'),
cv2.imread('Goblin_town4.jpg')
],
'hist': None
},
'Mordor': {
'images': [
cv2.imread('mordor1.jpg'),
cv2.imread('mordor2.jpg'),
cv2.imread('Mordor3.jpg'),
cv2.imread('Mordor4.jpg')
],
'hist': None
},
'Rivendell': {
'images': [
cv2.imread('Rivendell1.jpg'),
cv2.imread('Rivendell2.jpg'),
cv2.imread('Rivendell3.jpg'),
cv2.imread('Rivendell4.jpg')
],
'hist': None
},
'Shire': {
'images': [
cv2.imread('Shire1.jpg'),
cv2.imread('Shire2.jpg'),
cv2.imread('Shire3.jpg'),
cv2.imread('Shire4.jpg')
],
'hist': None
}
}
# calc histogram
for name,data in imageSource.items():
imageSource[name]['hist'] = getHist(imageSource[name]['images'])
imagesOnde = [
cv2.imread('Onde1.jpg'),
cv2.imread('Onde2.jpg'),
cv2.imread('Onde3.jpg'),
cv2.imread('Onde4.jpg')
]
imgs,titles = [],[]
i = 1
for ondeImg in imagesOnde:
ondeHist = getHist([ondeImg])
results = []
for imgSrcName, srcData in imageSource.items():
results.append((compareHist(ondeHist, srcData['hist']), imgSrcName))
result = sorted(results, reverse = True)[0]
imgs.append(ondeImg)
titles.append(result[1] + " (" + "Onde" + str(i) + ")")
i += 1
showImages(imgs, titles)
plt.show()
return 0
if __name__ == '__main__':
main() | 27.954545 | 113 | 0.506341 | 330 | 3,075 | 4.660606 | 0.393939 | 0.117035 | 0.117035 | 0.035111 | 0.044213 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052088 | 0.338211 | 3,075 | 110 | 114 | 27.954545 | 0.703686 | 0.040976 | 0 | 0.133333 | 0 | 0 | 0.124872 | 0 | 0 | 0 | 0 | 0 | 0.011111 | 1 | 0.044444 | false | 0 | 0.033333 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45dfb1283861c5bba5c59b6a90a79e3eb6fa9f46 | 8,005 | py | Python | package_management/package_manager.py | m-j/ziprepo-server | 35c1f40c3ba5489fb8731e8d66b301333dc9f8b0 | [
"MIT"
] | null | null | null | package_management/package_manager.py | m-j/ziprepo-server | 35c1f40c3ba5489fb8731e8d66b301333dc9f8b0 | [
"MIT"
] | null | null | null | package_management/package_manager.py | m-j/ziprepo-server | 35c1f40c3ba5489fb8731e8d66b301333dc9f8b0 | [
"MIT"
] | null | null | null | import copy
import json
import logging
import os
import shutil
from distutils.version import LooseVersion
from threading import Lock
from typing import List, Dict, Optional
from zipfile import ZipFile
import aiofiles
from tornado.ioloop import IOLoop
from package_management.constants import zpspec_filename, package_name_key, version_key
from package_management.data_scanning import scan_data_directory
from errors.errors import PackageAlreadyExistsError, PackageDoesntExistError, MaliciousDataError
from package_management.model import PackageMetadata, PackageInfo
from package_management.package_validation import validate_package_name, validate_package_version
from package_management.paths_util import PathsUtil
from package_management.utils import fullname
read_chunk_size = 3*1024*1024*10
def packages_metadata_from_versions(name: str, semvers: List[str]):
return [PackageMetadata(name=name, semver=semver) for semver in semvers]
def parse_zpfile(temp_file_path: str):
with ZipFile(temp_file_path) as zip_file:
print(zip_file.namelist())
zpspec_contents = zip_file.read(zpspec_filename)
json_dict = json.loads(zpspec_contents)
return json_dict
class PackageManager:
_paths_util: PathsUtil
_data_dir_path: str
_package_infos: Dict[str, PackageInfo]
_packages_in_processing_fullnames: List[str]
_package_infos_lock: Lock
def __init__(self, data_dir_path: str, paths_util: PathsUtil):
self._paths_util = paths_util
self._data_dir_path = data_dir_path
self._packages_in_processing_fullnames = []
self._package_infos_lock = Lock()
def scan(self):
package_infos = scan_data_directory(self._data_dir_path)
# todo: validate integirty here in a future
self._package_infos = package_infos
def query_all(self) -> Dict[str, PackageInfo]:
return self._package_infos
def query(self, name: str) -> Optional[PackageInfo]:
if name is None:
raise ValueError('You have to provide package name')
if name in self._package_infos:
package_info = self._package_infos[name]
return package_info
else:
return None
def remove_package_sync(self, package_name: str, package_version: str):
validate_package_name(package_name)
validate_package_version(package_version)
package_version_dir_path = self._paths_util.get_package_version_dir_path(package_name, package_version)
try:
dir_exists = os.path.isdir(package_version_dir_path)
if dir_exists:
print('Removing package ' + package_name + ' in version ' + package_version)
shutil.rmtree(package_version_dir_path)
with self._package_infos_lock:
self._remove_version_to_package_info(package_name, package_version)
logging.info(f'Successfully removed package: {fullname(package_name, package_version)}')
except OSError as err:
logging.error(f'Error occurred while removing package: {fullname(package_name, package_version)}!')
def add_package_sync(self, temp_file_path: str):
json_dict = parse_zpfile(temp_file_path)
name = json_dict[package_name_key]
version = json_dict[version_key]
validate_package_name(name)
validate_package_version(version)
package_version_dir_path = self._paths_util.get_package_version_dir_path(name, version)
package_version_file_path = self._paths_util.get_package_version_file_path(name, version)
package_version_zpspec_path = self._paths_util.get_package_version_zpspec_path(name, version)
if not self._paths_util.paths_are_valid([package_version_dir_path, package_version_file_path, package_version_zpspec_path]):
logging.error(f'Tried to create package in folder {package_version_dir_path} which is outside data directory')
raise MaliciousDataError()
self._add_fullname_to_in_processing_or_raise_exception(name, version)
try:
try:
os.makedirs(package_version_dir_path, exist_ok=False)
except OSError as err:
raise PackageAlreadyExistsError(package_name=name, package_version=version)
shutil.move(temp_file_path, package_version_file_path)
# what if we fail here? it will violate integrity
with open(package_version_zpspec_path, mode='wt') as zpspec_file:
json.dump(json_dict, zpspec_file)
with self._package_infos_lock:
self._add_version_to_package_info(name, version)
logging.info(f'Successfully added new package: {fullname(name, version)}')
finally:
with self._package_infos_lock:
self._packages_in_processing_fullnames.remove(fullname(name, version))
def _add_version_to_package_info(self, name, version):
package_infos_clone = copy.deepcopy(self._package_infos)
if name not in self._package_infos:
package_infos_clone[name] = PackageInfo(name=name, versions=[])
package_infos_clone[name].versions.append(version)
package_infos_clone[name].versions.sort(key=LooseVersion)
self._package_infos = package_infos_clone
def _remove_version_to_package_info(self, name, version):
package_infos_clone = copy.deepcopy(self._package_infos)
if name not in self._package_infos:
package_infos_clone[name] = PackageInfo(name=name, versions=[])
package_infos_clone[name].versions.remove(version)
package_infos_clone[name].versions.sort(key=LooseVersion)
if len(package_infos_clone[name].versions) == 0:
del package_infos_clone[name]
self._package_infos = package_infos_clone
def _add_fullname_to_in_processing_or_raise_exception(self, name, version):
with self._package_infos_lock:
if name in self._package_infos and version in self._package_infos[name].versions:
raise PackageAlreadyExistsError(package_name=name, package_version=version)
if fullname(name, version) in self._packages_in_processing_fullnames:
raise PackageAlreadyExistsError(package_name=name, package_version=version)
self._packages_in_processing_fullnames.append(fullname(name, version))
async def add_package(self, temp_file_path: str):
return await IOLoop.current().run_in_executor(None, self.add_package_sync, temp_file_path)
async def remove_package(self, package_name: str, package_version: str):
return await IOLoop.current().run_in_executor(None, self.remove_package_sync, package_name, package_version)
async def read_package(self, name: str, version: str):
if name is None or version is None:
raise ValueError('You have to specify both name and version')
# todo: protect from deleting package when it is being read
package_info = self.query(name=name)
if (package_info is None) or (version not in package_info.versions):
raise PackageDoesntExistError(name, version)
package_file_path = self._paths_util.get_package_version_file_path(name, version)
if not self._paths_util.path_is_valid(package_file_path):
logging.error(f'Tried to read data from file {package_file_path} which is outside data directory')
raise MaliciousDataError()
try:
async with aiofiles.open(package_file_path, mode='rb') as file:
while True:
chunk_bytes = await file.read(read_chunk_size)
if len(chunk_bytes) > 0:
yield chunk_bytes
else:
return
except OSError as oserr:
logging.exception(f'Failed to open file {package_file_path} for reading')
raise PackageDoesntExistError(name, version)
| 41.262887 | 132 | 0.713679 | 1,008 | 8,005 | 5.314484 | 0.170635 | 0.086242 | 0.053761 | 0.035281 | 0.446892 | 0.326675 | 0.278141 | 0.234646 | 0.152697 | 0.132164 | 0 | 0.00208 | 0.219363 | 8,005 | 193 | 133 | 41.476684 | 0.855177 | 0.018364 | 0 | 0.207143 | 0 | 0 | 0.068518 | 0.00917 | 0 | 0 | 0 | 0.005181 | 0 | 1 | 0.078571 | false | 0 | 0.128571 | 0.014286 | 0.307143 | 0.014286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45e194e23cc8440b698a5029bc927b2512c3eb9c | 17,667 | py | Python | cell2location/models/base/_pyro_mixin.py | jjhong922/cell2location | 2c2eb49aa3c0263fe8c6d45baf4ca0345baf21d9 | [
"Apache-2.0"
] | null | null | null | cell2location/models/base/_pyro_mixin.py | jjhong922/cell2location | 2c2eb49aa3c0263fe8c6d45baf4ca0345baf21d9 | [
"Apache-2.0"
] | null | null | null | cell2location/models/base/_pyro_mixin.py | jjhong922/cell2location | 2c2eb49aa3c0263fe8c6d45baf4ca0345baf21d9 | [
"Apache-2.0"
] | null | null | null | from datetime import date
from functools import partial
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyro
import torch
from pyro import poutine
from pyro.infer.autoguide import AutoNormal, init_to_mean
from scipy.sparse import issparse
from scvi import _CONSTANTS
from scvi.data._anndata import get_from_registry
from scvi.dataloaders import AnnDataLoader
from scvi.model._utils import parse_use_gpu_arg
from ...distributions.AutoNormalEncoder import AutoGuideList, AutoNormalEncoder
def init_to_value(site=None, values={}):
if site is None:
return partial(init_to_value, values=values)
if site["name"] in values:
return values[site["name"]]
else:
return init_to_mean(site)
class AutoGuideMixinModule:
"""
This mixin class provides methods for:
- initialising standard AutoNormal guides
- initialising amortised guides (AutoNormalEncoder)
- initialising amortised guides with special additional inputs
"""
def _create_autoguide(
self,
model,
amortised,
encoder_kwargs,
data_transform,
encoder_mode,
init_loc_fn=init_to_mean,
n_cat_list: list = [],
encoder_instance=None,
):
if not amortised:
_guide = AutoNormal(
model,
init_loc_fn=init_loc_fn,
create_plates=model.create_plates,
)
else:
encoder_kwargs = encoder_kwargs if isinstance(encoder_kwargs, dict) else dict()
n_hidden = encoder_kwargs["n_hidden"] if "n_hidden" in encoder_kwargs.keys() else 200
init_param_scale = (
encoder_kwargs["init_param_scale"] if "init_param_scale" in encoder_kwargs.keys() else 1 / 50
)
if "init_param_scale" in encoder_kwargs.keys():
del encoder_kwargs["init_param_scale"]
amortised_vars = self.list_obs_plate_vars
_guide = AutoGuideList(model, create_plates=model.create_plates)
_guide.append(
AutoNormal(
pyro.poutine.block(model, hide=list(amortised_vars["sites"].keys())),
init_loc_fn=init_loc_fn,
)
)
if isinstance(data_transform, np.ndarray):
# add extra info about gene clusters to the network
self.register_buffer("gene_clusters", torch.tensor(data_transform.astype("float32")))
n_in = model.n_vars + data_transform.shape[1]
data_transform = self.data_transform_clusters()
elif data_transform == "log1p":
# use simple log1p transform
data_transform = torch.log1p
n_in = self.model.n_vars
elif (
isinstance(data_transform, dict)
and "var_std" in list(data_transform.keys())
and "var_mean" in list(data_transform.keys())
):
# use data transform by scaling
n_in = model.n_vars
self.register_buffer(
"var_mean",
torch.tensor(data_transform["var_mean"].astype("float32").reshape((1, n_in))),
)
self.register_buffer(
"var_std",
torch.tensor(data_transform["var_std"].astype("float32").reshape((1, n_in))),
)
data_transform = self.data_transform_scale()
else:
# use custom data transform
data_transform = data_transform
n_in = model.n_vars
if len(amortised_vars["input"]) >= 2:
encoder_kwargs["n_cat_list"] = n_cat_list
amortised_vars["input_transform"][0] = data_transform
_guide.append(
AutoNormalEncoder(
pyro.poutine.block(model, expose=list(amortised_vars["sites"].keys())),
amortised_plate_sites=amortised_vars,
n_in=n_in,
n_hidden=n_hidden,
init_param_scale=init_param_scale,
encoder_kwargs=encoder_kwargs,
encoder_mode=encoder_mode,
encoder_instance=encoder_instance,
)
)
return _guide
def _data_transform_clusters(self):
def _data_transform(x):
return torch.log1p(torch.cat([x, x @ self.gene_clusters], dim=1))
return _data_transform
def _data_transform_scale(self):
def _data_transform(x):
# return (x - self.var_mean) / self.var_std
return x / self.var_std
return _data_transform
class QuantileMixin:
"""
This mixin class provides methods for:
- computing median and quantiles of the posterior distribution using both direct and amortised inference
"""
def _optim_param(
self,
lr: float = 0.01,
autoencoding_lr: float = None,
clip_norm: float = 200,
module_names: list = ["encoder", "hidden2locs", "hidden2scales"],
):
# TODO implement custom training method that can use this function.
# create function which fetches different lr for autoencoding guide
def optim_param(module_name, param_name):
# detect variables in autoencoding guide
if autoencoding_lr is not None and np.any([n in module_name + "." + param_name for n in module_names]):
return {
"lr": autoencoding_lr,
# limit the gradient step from becoming too large
"clip_norm": clip_norm,
}
else:
return {
"lr": lr,
# limit the gradient step from becoming too large
"clip_norm": clip_norm,
}
return optim_param
@torch.no_grad()
def _posterior_quantile_amortised(self, q: float = 0.5, batch_size: int = 2048, use_gpu: bool = None):
"""
Compute median of the posterior distribution of each parameter, separating local (minibatch) variable
and global variables, which is necessary when performing amortised inference.
Note for developers: requires model class method which lists observation/minibatch plate
variables (self.module.model.list_obs_plate_vars()).
Parameters
----------
q
quantile to compute
batch_size
number of observations per batch
use_gpu
Bool, use gpu?
Returns
-------
dictionary {variable_name: posterior median}
"""
gpus, device = parse_use_gpu_arg(use_gpu)
self.module.eval()
train_dl = AnnDataLoader(self.adata, shuffle=False, batch_size=batch_size)
# sample local parameters
i = 0
for tensor_dict in train_dl:
args, kwargs = self.module._get_fn_args_from_batch(tensor_dict)
args = [a.to(device) for a in args]
kwargs = {k: v.to(device) for k, v in kwargs.items()}
self.to_device(device)
if i == 0:
means = self.module.guide.quantiles([q], *args, **kwargs)
means = {
k: means[k].cpu().numpy()
for k in means.keys()
if k in self.module.model.list_obs_plate_vars()["sites"]
}
# find plate dimension
trace = poutine.trace(self.module.model).get_trace(*args, **kwargs)
# print(trace.nodes[self.module.model.list_obs_plate_vars()['name']])
obs_plate = {
name: site["cond_indep_stack"][0].dim
for name, site in trace.nodes.items()
if site["type"] == "sample"
if any(f.name == self.module.model.list_obs_plate_vars()["name"] for f in site["cond_indep_stack"])
}
else:
means_ = self.module.guide.quantiles([q], *args, **kwargs)
means_ = {
k: means_[k].cpu().numpy()
for k in means_.keys()
if k in list(self.module.model.list_obs_plate_vars()["sites"].keys())
}
means = {
k: np.concatenate([means[k], means_[k]], axis=list(obs_plate.values())[0]) for k in means.keys()
}
i += 1
# sample global parameters
tensor_dict = next(iter(train_dl))
args, kwargs = self.module._get_fn_args_from_batch(tensor_dict)
args = [a.to(device) for a in args]
kwargs = {k: v.to(device) for k, v in kwargs.items()}
self.to_device(device)
global_means = self.module.guide.quantiles([q], *args, **kwargs)
global_means = {
k: global_means[k].cpu().numpy()
for k in global_means.keys()
if k not in list(self.module.model.list_obs_plate_vars()["sites"].keys())
}
for k in global_means.keys():
means[k] = global_means[k]
self.module.to(device)
return means
@torch.no_grad()
def _posterior_quantile(self, q: float = 0.5, batch_size: int = 2048, use_gpu: bool = None):
"""
Compute median of the posterior distribution of each parameter pyro models trained without amortised inference.
Parameters
----------
q
quantile to compute
use_gpu
Bool, use gpu?
Returns
-------
dictionary {variable_name: posterior median}
"""
self.module.eval()
gpus, device = parse_use_gpu_arg(use_gpu)
train_dl = AnnDataLoader(self.adata, shuffle=False, batch_size=batch_size)
# sample global parameters
tensor_dict = next(iter(train_dl))
args, kwargs = self.module._get_fn_args_from_batch(tensor_dict)
args = [a.to(device) for a in args]
kwargs = {k: v.to(device) for k, v in kwargs.items()}
self.to_device(device)
means = self.module.guide.quantiles([q], *args, **kwargs)
means = {k: means[k].cpu().detach().numpy() for k in means.keys()}
return means
def posterior_quantile(self, q: float = 0.5, batch_size: int = 2048, use_gpu: bool = None):
"""
Compute median of the posterior distribution of each parameter.
Parameters
----------
q
quantile to compute
use_gpu
Returns
-------
"""
if self.module.is_amortised:
return self._posterior_quantile_amortised(q=q, batch_size=batch_size, use_gpu=use_gpu)
else:
return self._posterior_quantile(q=q, batch_size=batch_size, use_gpu=use_gpu)
class PltExportMixin:
r"""
This mixing class provides methods for common plotting tasks and data export.
"""
@staticmethod
def plot_posterior_mu_vs_data(mu, data):
r"""Plot expected value of the model (e.g. mean of NB distribution) vs observed data
:param mu: expected value
:param data: data value
"""
plt.hist2d(
np.log10(data.flatten() + 1),
np.log10(mu.flatten() + 1),
bins=50,
norm=matplotlib.colors.LogNorm(),
)
plt.gca().set_aspect("equal", adjustable="box")
plt.xlabel("Data, log10")
plt.ylabel("Posterior expected value, log10")
plt.title("Reconstruction accuracy")
plt.tight_layout()
def plot_history(self, iter_start=0, iter_end=-1, ax=None):
r"""Plot training history
Parameters
----------
iter_start
omit initial iterations from the plot
iter_end
omit last iterations from the plot
ax
matplotlib axis
"""
if ax is None:
ax = plt
ax.set_xlabel = plt.xlabel
ax.set_ylabel = plt.ylabel
if iter_end == -1:
iter_end = len(self.history_["elbo_train"])
ax.plot(
self.history_["elbo_train"].index[iter_start:iter_end],
np.array(self.history_["elbo_train"].values.flatten())[iter_start:iter_end],
label="train",
)
ax.legend()
ax.xlim(0, len(self.history_["elbo_train"]))
ax.set_xlabel("Training epochs")
ax.set_ylabel("-ELBO loss")
plt.tight_layout()
def _export2adata(self, samples):
r"""
Export key model variables and samples
Parameters
----------
samples
dictionary with posterior mean, 5%/95% quantiles, SD, samples, generated by ``.sample_posterior()``
Returns
-------
Updated dictionary with additional details is saved to ``adata.uns['mod']``.
"""
# add factor filter and samples of all parameters to unstructured data
results = {
"model_name": str(self.module.__class__.__name__),
"date": str(date.today()),
"factor_filter": list(getattr(self, "factor_filter", [])),
"factor_names": list(self.factor_names_),
"var_names": self.adata.var_names.tolist(),
"obs_names": self.adata.obs_names.tolist(),
"post_sample_means": samples["post_sample_means"],
"post_sample_stds": samples["post_sample_stds"],
"post_sample_q05": samples["post_sample_q05"],
"post_sample_q95": samples["post_sample_q95"],
}
return results
def sample2df_obs(
self,
samples: dict,
site_name: str = "w_sf",
summary_name: str = "means",
name_prefix: str = "cell_abundance",
):
"""Export posterior distribution summary for observation-specific parameters
(e.g. spatial cell abundance) as Pandas data frame
(means, 5%/95% quantiles or sd of posterior distribution).
Parameters
----------
samples
dictionary with posterior mean, 5%/95% quantiles, SD, samples, generated by ``.sample_posterior()``
site_name
name of the model parameter to be exported
summary_name
posterior distribution summary to return ['means', 'stds', 'q05', 'q95']
name_prefix
prefix to add to column names (f'{summary_name}{name_prefix}_{site_name}_{self\.factor_names_}')
Returns
-------
Pandas data frame corresponding to either means, 5%/95% quantiles or sd of the posterior distribution
"""
return pd.DataFrame(
samples[f"post_sample_{summary_name}"].get(site_name, None),
index=self.adata.obs_names,
columns=[f"{summary_name}{name_prefix}_{site_name}_{i}" for i in self.factor_names_],
)
def sample2df_vars(
self,
samples: dict,
site_name: str = "gene_factors",
summary_name: str = "means",
name_prefix: str = "",
):
r"""Export posterior distribution summary for variable-specific parameters as Pandas data frame
(means, 5%/95% quantiles or sd of posterior distribution).
Parameters
----------
samples
dictionary with posterior mean, 5%/95% quantiles, SD, samples, generated by ``.sample_posterior()``
site_name
name of the model parameter to be exported
summary_name
posterior distribution summary to return ('means', 'stds', 'q05', 'q95')
name_prefix
prefix to add to column names (f'{summary_name}{name_prefix}_{site_name}_{self\.factor_names_}')
Returns
-------
Pandas data frame corresponding to either means, 5%/95% quantiles or sd of the posterior distribution
"""
return pd.DataFrame(
samples[f"post_sample_{summary_name}"].get(site_name, None),
columns=self.adata.var_names,
index=[f"{summary_name}{name_prefix}_{site_name}_{i}" for i in self.factor_names_],
).T
def plot_QC(self, summary_name: str = "means", use_n_obs: int = 1000):
"""
Show quality control plots:
1. Reconstruction accuracy to assess if there are any issues with model training.
The plot should be roughly diagonal, strong deviations signal problems that need to be investigated.
Plotting is slow because expected value of mRNA count needs to be computed from model parameters. Random
observations are used to speed up computation.
Parameters
----------
summary_name
posterior distribution summary to use ('means', 'stds', 'q05', 'q95')
Returns
-------
"""
if getattr(self, "samples", False) is False:
raise RuntimeError("self.samples is missing, please run self.export_posterior() first")
if use_n_obs is not None:
ind_x = np.random.choice(self.adata.n_obs, np.min((use_n_obs, self.adata.n_obs)), replace=False)
else:
ind_x = None
self.expected_nb_param = self.module.model.compute_expected(
self.samples[f"post_sample_{summary_name}"], self.adata, ind_x=ind_x
)
x_data = get_from_registry(self.adata, _CONSTANTS.X_KEY)[ind_x, :]
if issparse(x_data):
x_data = np.asarray(x_data.toarray())
self.plot_posterior_mu_vs_data(self.expected_nb_param["mu"], x_data)
| 35.263473 | 119 | 0.582159 | 2,068 | 17,667 | 4.758221 | 0.182785 | 0.03435 | 0.009756 | 0.011382 | 0.430996 | 0.367988 | 0.32002 | 0.302033 | 0.273984 | 0.273984 | 0 | 0.009895 | 0.319296 | 17,667 | 500 | 120 | 35.334 | 0.808332 | 0.25058 | 0 | 0.254417 | 0 | 0 | 0.077186 | 0.015209 | 0 | 0 | 0 | 0.002 | 0 | 1 | 0.060071 | false | 0 | 0.056537 | 0.007067 | 0.190813 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45e2441449b8a57f4c898888fafc70417c101445 | 570 | py | Python | mayan/apps/sources/literals.py | atitaya1412/Mayan-EDMS | bda9302ba4b743e7d829ad118b8b836221888172 | [
"Apache-2.0"
] | 343 | 2015-01-05T14:19:35.000Z | 2018-12-10T19:07:48.000Z | mayan/apps/sources/literals.py | atitaya1412/Mayan-EDMS | bda9302ba4b743e7d829ad118b8b836221888172 | [
"Apache-2.0"
] | 191 | 2015-01-03T00:48:19.000Z | 2018-11-30T09:10:25.000Z | mayan/apps/sources/literals.py | atitaya1412/Mayan-EDMS | bda9302ba4b743e7d829ad118b8b836221888172 | [
"Apache-2.0"
] | 114 | 2015-01-08T20:21:05.000Z | 2018-12-10T19:07:53.000Z | import os
from django.conf import settings
DEFAULT_BINARY_SCANIMAGE_PATH = '/usr/bin/scanimage'
DEFAULT_SOURCES_BACKEND_ARGUMENTS = {
'mayan.apps.sources.source_backends.SourceBackendSANEScanner': {
'scanimage_path': DEFAULT_BINARY_SCANIMAGE_PATH
}
}
DEFAULT_SOURCES_CACHE_STORAGE_BACKEND = 'django.core.files.storage.FileSystemStorage'
DEFAULT_SOURCES_CACHE_STORAGE_BACKEND_ARGUMENTS = {
'location': os.path.join(settings.MEDIA_ROOT, 'source_cache')
}
DEFAULT_SOURCES_LOCK_EXPIRE = 600
STORAGE_NAME_SOURCE_CACHE_FOLDER = 'sources__source_cache'
| 30 | 85 | 0.815789 | 68 | 570 | 6.367647 | 0.5 | 0.12933 | 0.101617 | 0.120092 | 0.152425 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005859 | 0.101754 | 570 | 18 | 86 | 31.666667 | 0.839844 | 0 | 0 | 0 | 0 | 0 | 0.307018 | 0.215789 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45e3e43df6217404133f05bdb8876753107b0f86 | 3,050 | py | Python | test/test14_advanced_search.py | kyu-su/pixplusPlus | 2813771c8292ea05d0fd9e7e6b1f4a71e2aba1f8 | [
"MIT"
] | null | null | null | test/test14_advanced_search.py | kyu-su/pixplusPlus | 2813771c8292ea05d0fd9e7e6b1f4a71e2aba1f8 | [
"MIT"
] | null | null | null | test/test14_advanced_search.py | kyu-su/pixplusPlus | 2813771c8292ea05d0fd9e7e6b1f4a71e2aba1f8 | [
"MIT"
] | null | null | null | import warnings
import time
import random
import util
from test_base import TestCase
class Test_AdvancedSearch(TestCase):
def get_radio(self, name):
return self.q('#search-option .content form input[type="radio"][name="%s"]:checked' % name)
def set_size(self, wlt, hlt, wgt, hgt):
for name in 'wlt', 'hlt', 'wgt', 'hgt':
value = locals()[name]
e = self.q('#pp-search-size-custom-' + name)
e.clear()
if value is not None:
e.send_keys(str(value))
radio = self.get_radio('size')
value = '%sx%s-%sx%s' % tuple(map(lambda a: '' if a is None else str(a), [wlt, hlt, wgt, hgt]))
self.assertEqual(radio.get_attribute('value'), value)
def check_size(self, wlt, hlt, wgt, hgt):
self.open('/search.php?s_mode=s_tag&word=pixiv')
self.click(self.q('.search-option'))
self.set_size(wlt, hlt, wgt, hgt)
self.q('#search-option .content form').submit()
self.wait_page_load()
self.assertTrue(self.url.startswith('http://www.pixiv.net/search.php?'))
url = util.urlparse(self.url)
query = dict(util.parse_qsl(url.query))
for name in 'wlt', 'hlt', 'wgt', 'hgt':
value = locals()[name]
self.assertEqual(name in query, value is not None)
if value is not None:
self.assertEqual(query[name], str(value))
# def test_size(self):
# r = lambda: random.randint(1, 2000)
# self.check_size(*sorted(random.sample(range(2000), 4)))
# self.check_size(r(), None, r(), None)
# self.check_size(None, r(), None, r())
def check_slider(self, slider, knob, text):
sx, sy, sw, sh = self.geom(slider)
self.ac().click_and_hold(knob or slider).move_by_offset(-sw, 0).release().perform()
self.assertEqual(text.get_attribute('value'), '-1.5')
self.assertEqual(self.get_radio('ratio').get_attribute('value'), '-1.5')
if knob:
kx, ky, kw, kh = self.geom(knob)
self.assertEqual(kx, sx)
self.assertEqual(ky, sy)
ac = self.ac()
if knob:
ac.click_and_hold(knob)
else:
ac.move_to_element_with_offset(slider, 4, int(sh / 2)).click_and_hold()
ac.move_by_offset(sw * 2, 0).release().perform()
self.assertEqual(text.get_attribute('value'), '1.5')
self.assertEqual(self.get_radio('ratio').get_attribute('value'), '1.5')
if knob:
kx, ky, kw, kh = self.geom(knob)
self.assertEqual(kx, sx + sw - kw)
self.assertEqual(ky, sy)
def test_ratio(self):
self.open('/search.php?s_mode=s_tag&word=pixiv')
self.click(self.q('.search-option'))
slider = self.q('#pp-search-ratio-custom-slider')
if slider.tag_name.lower() != 'input':
self.skipTest('%s seems not supports <input type=range>' % self.b.name)
return
text = self.q('#pp-search-ratio-custom-text')
self.assertEqual(slider.get_attribute('min'), '-1.5')
self.assertEqual(slider.get_attribute('max'), '1.5')
self.check_slider(slider, None, text)
text.clear()
text.send_keys('123')
self.assertEqual(self.get_radio('ratio').get_attribute('value'), '123')
| 32.795699 | 99 | 0.639672 | 464 | 3,050 | 4.099138 | 0.262931 | 0.11041 | 0.028391 | 0.037855 | 0.445321 | 0.360147 | 0.284437 | 0.284437 | 0.284437 | 0.258675 | 0 | 0.013242 | 0.182951 | 3,050 | 92 | 100 | 33.152174 | 0.75 | 0.064262 | 0 | 0.253731 | 0 | 0 | 0.164326 | 0.066362 | 0 | 0 | 0 | 0 | 0.223881 | 1 | 0.074627 | false | 0 | 0.074627 | 0.014925 | 0.19403 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
45e52e60e920306770881bdab0e81dba6a9ab8d4 | 511 | py | Python | cogs/help/onHelpMessage.py | Narzaru/AiShindou | 01daa4e51c5d19b0fa39ab7dff24adad6d764976 | [
"BSD-2-Clause"
] | null | null | null | cogs/help/onHelpMessage.py | Narzaru/AiShindou | 01daa4e51c5d19b0fa39ab7dff24adad6d764976 | [
"BSD-2-Clause"
] | null | null | null | cogs/help/onHelpMessage.py | Narzaru/AiShindou | 01daa4e51c5d19b0fa39ab7dff24adad6d764976 | [
"BSD-2-Clause"
] | null | null | null | import discord
from discord.ext import commands
from service.utils import TemplateColours
class Help_command(commands.MinimalHelpCommand):
async def send_pages(self):
ctx = self.get_destination()
self.paginator.suffix = "\nif something is wrong, he is to blame\n ---> <@202011264589758464>"
for page in self.paginator.pages:
embed = discord.Embed(description=page, color=TemplateColours("service\\templateColours.json").Yellow)
await ctx.send(embed=embed)
| 36.5 | 114 | 0.716243 | 61 | 511 | 5.95082 | 0.655738 | 0.071625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043269 | 0.18591 | 511 | 13 | 115 | 39.307692 | 0.829327 | 0 | 0 | 0 | 0 | 0 | 0.189824 | 0.097847 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.3 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
afd77d47937da5ad76e9e4fe3eba28dc639a3d40 | 3,587 | py | Python | tools/WBDSP/UI_InputSignal.py | ptracton/wb_dsp | 73586b10141952e26bbbfb2213b2ccaa1ddbcd39 | [
"MIT"
] | null | null | null | tools/WBDSP/UI_InputSignal.py | ptracton/wb_dsp | 73586b10141952e26bbbfb2213b2ccaa1ddbcd39 | [
"MIT"
] | null | null | null | tools/WBDSP/UI_InputSignal.py | ptracton/wb_dsp | 73586b10141952e26bbbfb2213b2ccaa1ddbcd39 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
from PyQt4 import QtGui
import Signal
class UI_InputSignal(QtGui.QDialog):
"""
"""
def __init__(self, parent=None):
super(UI_InputSignal, self).__init__(parent)
vbox = QtGui.QVBoxLayout()
label = QtGui.QLabel("Input Signal")
vbox.addWidget(label)
self.Signal = Signal.Signal()
self.StartTimeLabel = QtGui.QLabel("Start Time:")
self.StartTimeInput = QtGui.QLineEdit("-3")
self.StartTimeHBox = QtGui.QHBoxLayout()
self.StartTimeHBox.addWidget(self.StartTimeLabel)
self.StartTimeHBox.addWidget(self.StartTimeInput)
vbox.addLayout(self.StartTimeHBox)
self.EndTimeLabel = QtGui.QLabel("End Time:")
self.EndTimeInput = QtGui.QLineEdit("3")
self.EndTimeHBox = QtGui.QHBoxLayout()
self.EndTimeHBox.addWidget(self.EndTimeLabel)
self.EndTimeHBox.addWidget(self.EndTimeInput)
vbox.addLayout(self.EndTimeHBox)
self.SampleFrequencyLabel = QtGui.QLabel("Sample Frequency:")
self.SampleFrequencyInput = QtGui.QLineEdit("1000")
self.SampleFrequencyHBox = QtGui.QHBoxLayout()
self.SampleFrequencyHBox.addWidget(self.SampleFrequencyLabel)
self.SampleFrequencyHBox.addWidget(self.SampleFrequencyInput)
vbox.addLayout(self.SampleFrequencyHBox)
self.SignalTypeLabel = QtGui.QLabel("Signal Type:")
self.SignalTypeHBox = QtGui.QHBoxLayout()
self.SignalTypeComboBox = QtGui.QComboBox()
self.SignalTypeComboBox.addItem("sine")
self.SignalTypeComboBox.addItem("square")
self.SignalTypeComboBox.addItem("triangle")
self.SignalTypeHBox.addWidget(self.SignalTypeLabel)
self.SignalTypeHBox.addWidget(self.SignalTypeComboBox)
vbox.addLayout(self.SignalTypeHBox)
self.AmplitudeLabel = QtGui.QLabel("Amplitude:")
self.AmplitudeInput = QtGui.QLineEdit("0.75")
self.AmplitudeHBox = QtGui.QHBoxLayout()
self.AmplitudeHBox.addWidget(self.AmplitudeLabel)
self.AmplitudeHBox.addWidget(self.AmplitudeInput)
vbox.addLayout(self.AmplitudeHBox)
self.FrequencyLabel = QtGui.QLabel("Signal Frequency:")
self.FrequencyInput = QtGui.QLineEdit("1")
self.FrequencyHBox = QtGui.QHBoxLayout()
self.FrequencyHBox.addWidget(self.FrequencyLabel)
self.FrequencyHBox.addWidget(self.FrequencyInput)
vbox.addLayout(self.FrequencyHBox)
self.PhaseLabel = QtGui.QLabel("Phase:")
self.PhaseInput = QtGui.QLineEdit("0")
self.PhaseHBox = QtGui.QHBoxLayout()
self.PhaseHBox.addWidget(self.PhaseLabel)
self.PhaseHBox.addWidget(self.PhaseInput)
vbox.addLayout(self.PhaseHBox)
self.DataLabel = QtGui.QLabel("Data Size:")
self.DataInput = QtGui.QLineEdit("0")
self.DataHBox = QtGui.QHBoxLayout()
self.DataHBox.addWidget(self.DataLabel)
self.DataHBox.addWidget(self.DataInput)
vbox.addLayout(self.DataHBox)
self.GraphPushButton = QtGui.QPushButton("Graph It")
vbox.addWidget(self.GraphPushButton)
self.MixSignalsPushButton = QtGui.QPushButton("Mix Signals")
vbox.addWidget(self.MixSignalsPushButton)
self.RemovePushButton = QtGui.QPushButton("Remove Last Graph")
vbox.addWidget(self.RemovePushButton)
self.WriteDataPushButton = QtGui.QPushButton("Write File")
vbox.addWidget(self.WriteDataPushButton)
self.setLayout(vbox)
return
| 39.417582 | 70 | 0.681628 | 328 | 3,587 | 7.42378 | 0.265244 | 0.106776 | 0.065708 | 0.015606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004954 | 0.212155 | 3,587 | 90 | 71 | 39.855556 | 0.856688 | 0.006133 | 0 | 0 | 0 | 0 | 0.051253 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014286 | false | 0 | 0.028571 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
afd967c3188fbb324693e3925d0624ce1f560347 | 1,044 | py | Python | ichnaea/alembic/versions/cad2875fd8cb_extend_api_keys.py | crankycoder/ichnaea | fb54000e92c605843b7a41521e36fd648c11ae94 | [
"Apache-2.0"
] | 1 | 2018-01-18T16:02:43.000Z | 2018-01-18T16:02:43.000Z | ichnaea/alembic/versions/cad2875fd8cb_extend_api_keys.py | crankycoder/ichnaea | fb54000e92c605843b7a41521e36fd648c11ae94 | [
"Apache-2.0"
] | null | null | null | ichnaea/alembic/versions/cad2875fd8cb_extend_api_keys.py | crankycoder/ichnaea | fb54000e92c605843b7a41521e36fd648c11ae94 | [
"Apache-2.0"
] | 1 | 2018-01-19T17:56:48.000Z | 2018-01-19T17:56:48.000Z | """Extend api keys with sample_store columns.
Revision ID: cad2875fd8cb
Revises: 385f842b2526
Create Date: 2017-02-22 11:52:47.837989
"""
import logging
from alembic import op
import sqlalchemy as sa
log = logging.getLogger('alembic.migration')
revision = 'cad2875fd8cb'
down_revision = '385f842b2526'
def upgrade():
log.info('Add store_sample_* columns to api_key table.')
op.execute(sa.text(
'ALTER TABLE api_key '
'ADD COLUMN `store_sample_locate` TINYINT(4) '
'AFTER `fallback_cache_expire`, '
'ADD COLUMN `store_sample_submit` TINYINT(4) '
'AFTER `store_sample_locate`'
))
op.execute(sa.text(
'UPDATE api_key SET store_sample_locate = 100'
))
op.execute(sa.text(
'UPDATE api_key SET store_sample_submit = 100'
))
def downgrade():
log.info('Drop store_sample_* columns from api_key table.')
op.execute(sa.text(
'ALTER TABLE api_key '
'DROP COLUMN `store_sample_locate`, '
'DROP COLUMN `store_sample_submit`'
))
| 24.27907 | 63 | 0.672414 | 137 | 1,044 | 4.919708 | 0.416058 | 0.146884 | 0.065282 | 0.089021 | 0.246291 | 0.246291 | 0.246291 | 0.246291 | 0.246291 | 0.246291 | 0 | 0.071253 | 0.220307 | 1,044 | 42 | 64 | 24.857143 | 0.756757 | 0.125479 | 0 | 0.357143 | 0 | 0 | 0.523179 | 0.143488 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.107143 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
afdbf4b87bacf33c9e03d4659cab0141ad61a3a5 | 1,870 | py | Python | sdk/policyinsights/azure-mgmt-policyinsights/azure/mgmt/policyinsights/models/tracked_resource_modification_details_py3.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | sdk/policyinsights/azure-mgmt-policyinsights/azure/mgmt/policyinsights/models/tracked_resource_modification_details_py3.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | sdk/policyinsights/azure-mgmt-policyinsights/azure/mgmt/policyinsights/models/tracked_resource_modification_details_py3.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TrackedResourceModificationDetails(Model):
"""The details of the policy triggered deployment that created or modified the
tracked resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar policy_details: The details of the policy that created or modified
the tracked resource.
:vartype policy_details: ~azure.mgmt.policyinsights.models.PolicyDetails
:ivar deployment_id: The ID of the deployment that created or modified the
tracked resource.
:vartype deployment_id: str
:ivar deployment_time: Timestamp of the deployment that created or
modified the tracked resource.
:vartype deployment_time: datetime
"""
_validation = {
'policy_details': {'readonly': True},
'deployment_id': {'readonly': True},
'deployment_time': {'readonly': True},
}
_attribute_map = {
'policy_details': {'key': 'policyDetails', 'type': 'PolicyDetails'},
'deployment_id': {'key': 'deploymentId', 'type': 'str'},
'deployment_time': {'key': 'deploymentTime', 'type': 'iso-8601'},
}
def __init__(self, **kwargs) -> None:
super(TrackedResourceModificationDetails, self).__init__(**kwargs)
self.policy_details = None
self.deployment_id = None
self.deployment_time = None
| 37.4 | 82 | 0.642781 | 200 | 1,870 | 5.88 | 0.485 | 0.055272 | 0.044218 | 0.071429 | 0.237245 | 0.201531 | 0.201531 | 0.201531 | 0.120748 | 0.120748 | 0 | 0.003305 | 0.190909 | 1,870 | 49 | 83 | 38.163265 | 0.773959 | 0.567914 | 0 | 0 | 0 | 0 | 0.259459 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
afdccd4d644296589a91c7378dac649e2358df5a | 11,059 | py | Python | backUp/jd_jxz.py | ZelZhu/faker3 | 4d1f2aff532211da9f356119c9f8273c3d0796dd | [
"MIT"
] | 68 | 2021-11-19T09:28:04.000Z | 2022-03-25T07:06:01.000Z | backUp/jd_jxz.py | ZelZhu/faker3 | 4d1f2aff532211da9f356119c9f8273c3d0796dd | [
"MIT"
] | 2 | 2022-03-09T12:26:22.000Z | 2022-03-10T03:00:49.000Z | backUp/jd_jxz.py | ZelZhu/faker3 | 4d1f2aff532211da9f356119c9f8273c3d0796dd | [
"MIT"
] | 83 | 2021-11-19T08:27:05.000Z | 2022-03-23T07:32:01.000Z | #!/usr/bin/python3
# -*- coding: utf8 -*-
"""
cron: 30 9 * * *
new Env('集勋章');
活动入口:东东农场->东东乐园(点大风车)->集勋章 500豆
"""
# 是否开启通知,Ture:发送通知,False:不发送
isNotice = True
# UA 可自定义你的, 默认随机生成UA。
UserAgent = ''
import asyncio
import json
import random
import os, re, sys
try:
import requests
except Exception as e:
print(e, "\n缺少requests 模块,请执行命令安装:python3 -m pip install requests")
exit(3)
try:
import aiohttp
except Exception as e:
print(e, "\n缺少requests 模块,请执行命令安装:python3 -m pip install requests")
exit(3)
##############
requests.packages.urllib3.disable_warnings()
# host_api = 'https://api.m.jd.com/client.action'
pwd = os.path.dirname(os.path.abspath(__file__)) + os.sep
def userAgent():
"""
随机生成一个UA
:return: jdapp;iPhone;9.4.8;14.3;xxxx;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/2455696156;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1
"""
if not UserAgent:
uuid = ''.join(random.sample('123456789abcdef123456789abcdef123456789abcdef123456789abcdef', 40))
addressid = ''.join(random.sample('1234567898647', 10))
iosVer = ''.join(
random.sample(["14.5.1", "14.4", "14.3", "14.2", "14.1", "14.0.1", "13.7", "13.1.2", "13.1.1"], 1))
iosV = iosVer.replace('.', '_')
iPhone = ''.join(random.sample(["8", "9", "10", "11", "12", "13"], 1))
ADID = ''.join(random.sample('0987654321ABCDEF', 8)) + '-' + ''.join(
random.sample('0987654321ABCDEF', 4)) + '-' + ''.join(random.sample('0987654321ABCDEF', 4)) + '-' + ''.join(
random.sample('0987654321ABCDEF', 4)) + '-' + ''.join(random.sample('0987654321ABCDEF', 12))
return f'jdapp;iPhone;10.0.4;{iosVer};{uuid};network/wifi;ADID/{ADID};supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone{iPhone},1;addressid/{addressid};supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS {iosV} like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1'
else:
return UserAgent
## 获取通知服务
class msg(object):
def __init__(self, m=''):
self.str_msg = m
self.message()
def message(self):
global msg_info
print(self.str_msg)
try:
msg_info = "{}\n{}".format(msg_info, self.str_msg)
except:
msg_info = "{}".format(self.str_msg)
sys.stdout.flush()
def getsendNotify(self, a=0):
if a == 0:
a += 1
try:
url = 'https://gitee.com/curtinlv/Public/raw/master/sendNotify.py'
response = requests.get(url)
if 'curtinlv' in response.text:
with open('sendNotify.py', "w+", encoding="utf-8") as f:
f.write(response.text)
else:
if a < 5:
a += 1
return self.getsendNotify(a)
else:
pass
except:
if a < 5:
a += 1
return self.getsendNotify(a)
else:
pass
def main(self):
global send
cur_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(cur_path)
if os.path.exists(cur_path + "/sendNotify.py"):
try:
from sendNotify import send
except:
self.getsendNotify()
try:
from sendNotify import send
except:
print("加载通知服务失败~")
else:
self.getsendNotify()
try:
from sendNotify import send
except:
print("加载通知服务失败~")
###################
msg().main()
# @logger.catch
async def get_headers():
"""
获取请求头
:return:
"""
headers = {
'Host': 'api.m.jd.com',
'Connection': 'keep-alive',
'Accept': 'application/json, text/plain, */*',
'Origin': 'https://h5.m.jd.com',
'User-Agent': userAgent(),
'content-type': 'application/x-www-form-urlencoded',
'Referer': 'https://gongyi.m.jd.com/m/welfare/donate/index.html',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
'X-Requested-With': 'com.jingdong.app.mall',
}
return headers
async def post(session, url, body=None):
try:
if body is None:
body = {}
response = await session.post(url=url, data=body)
await asyncio.sleep(1)
text = await response.text()
data = json.loads(text)
return data
except Exception as e:
print('请求服务器错误, {}!'.format(e.args))
return {
'success': False
}
async def get(session, url):
try:
response = await session.get(url=url)
await asyncio.sleep(1)
text = await response.text()
data = json.loads(text)
return data
except Exception as e:
print('请求服务器错误, {}!'.format(e.args))
return {
'success': False
}
# POST 判断任务列表
async def collect_Init_task(session):
"""
查询用户信息
:return:
"""
url = 'https://api.m.jd.com/client.action'
body = {"channel": 1}
params = f'functionId=collect_Init&body={json.dumps(body)}&client=wh5&clientVersion=1.0.0'
data = await post(session, url, params)
if data['code'] == '0' and data['success'] == True:
if data['result']['activityStatus'] == 2:
print(f"""完成任务得对应勋章""")
taskInfo = data['result']['taskInfo']
for task in taskInfo:
if task['status'] == 4:
print(f"""勋章:{task['medalName']}, 完成情况{task['currentTaskCount']}/{task['maxTaskCount']},已点亮""")
continue
if task['status'] == 1:
print(f"""勋章:{task['medalName']}, 完成情况{task['currentTaskCount']}/{task['maxTaskCount']},还没进度,要加油""")
continue
if task['status'] == 2:
print(f"""勋章:{task['medalName']}, 完成情况{task['currentTaskCount']}/{task['maxTaskCount']},明天再来看""")
continue
if task['status'] == 3:
print(f"""勋章:{task['medalName']}, 完成情况{task['currentTaskCount']}/{task['maxTaskCount']},去点亮""")
await asyncio.sleep(1)
await collect_taskAward(session, task)
elif data['result']['activityStatus'] == 3:
print(f"""勋章全部点亮了,去合成领奖""")
await asyncio.sleep(1)
await collect_getAwardInfo(session)
elif data['result']['activityStatus'] == 4:
msg(f"""勋章全部点亮了,合成领奖已完成!""")
else:
msg(f"""勋章状态异常{data}""")
else:
print(f"""获取勋章列表异常{data}""")
return 999
# 查询合成项目
async def collect_getAwardInfo(session):
"""
查询用户信息
:return:
"""
url = 'https://api.m.jd.com/client.action'
body = {}
params = f'functionId=collect_getAwardInfo&body={json.dumps(body)}&client=wh5&clientVersion=1.0.0'
data = await post(session, url, params)
if data['code'] == '0' and data['success'] == True:
print(f"""合成领奖""")
awardList = data['result']['awardList']
for i in awardList:
if i['awardValue'] == '500':
await collect_exchangeAward(session, i['awardType'])
else:
print(f"""获取勋章列表异常{data}""")
return 999
# 执行合成
async def collect_exchangeAward(session, awardType):
"""
查询用户信息
:return:
"""
url = 'https://api.m.jd.com/client.action'
body = {"type": awardType}
params = f'functionId=collect_exchangeAward&body={json.dumps(body)}&client=wh5&clientVersion=1.0.0'
data = await post(session, url, params)
print(data)
if data['code'] == '1' and data['success'] == False:
print(f"""合成领奖获得:{data['message']}""")
elif data['code'] == '0' and data['success'] == True:
msg(f"""合成领奖获得:{data['result']['awardValue']}京豆""")
else:
print(f"""合成领奖获得异常:{data}""")
return 999
# POST 获取任务列表
async def collect_Init(session):
"""
查询用户信息
:return:
"""
await asyncio.sleep(0.5)
try:
url = 'https://api.m.jd.com/client.action'
body = {"channel": 1}
params = f'functionId=collect_Init&body={json.dumps(body)}&client=wh5&clientVersion=1.0.0'
data = await post(session, url, params)
return data
except Exception as e:
print(e.args)
# POST 点亮勋章
async def collect_taskAward(session, task):
"""
查询用户信息
:return:
"""
taskType = task['taskType']
medalName = task['medalName']
url = 'https://api.m.jd.com/client.action'
body = {"taskType": taskType}
params = f'functionId=collect_taskAward&body={json.dumps(body)}&client=wh5&clientVersion=1.0.0'
data = await post(session, url, params)
if data['code'] == '1' and data['success'] == False:
print(f"""点亮勋章{medalName}获得:{data['message']}""")
elif data['code'] == '0' and data['success'] == True:
msg(f"""点亮勋章{medalName}获得:水滴{data['result']['awardValue']}g""")
else:
print(f"""点亮勋章{medalName}异常:{data}""")
return 999
# POST 领取新人奖励
async def collect_newUserAward(session):
"""
查询用户信息
:return:
"""
url = 'https://api.m.jd.com/client.action'
body = {}
params = f'functionId=collect_newUserAward&body={json.dumps(body)}&client=wh5&clientVersion=1.0.0'
data = await post(session, url, params)
if data['code'] == '1' and data['success'] == False:
print(f"""领取新人奖励:{data['msg']}""")
elif data['code'] == '0' and data['success'] == True:
msg(f"""领取新人奖励:{data['msg']}""")
else:
print(f"""领取新人奖励:{data}""")
return 999
async def run():
"""
程序入口
:return:
"""
scriptName = '集勋章'
print(scriptName)
headers = await get_headers()
cks = os.environ["JD_COOKIE"].split("&")
for ck in cks:
ptpin = re.findall(r"pt_pin=(.*?);", ck)[0]
print("--------开始京东账号" + ptpin + "--------")
ck = ck.rstrip(';')
ck = dict(item.split("=") for item in ck.split(";"))
async with aiohttp.ClientSession(headers=headers, cookies=ck) as session:
await collect_Init(session)
await asyncio.sleep(1)
await collect_newUserAward(session)
await asyncio.sleep(1)
await collect_Init_task(session)
if isNotice:
send(scriptName, msg_info)
else:
print("\n", scriptName, "\n", msg_info)
if __name__ == '__main__':
# from config import JD_COOKIES
#
# app = JdDdWorld()
# asyncio.run(run())
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
# from utils.process import process_start
# process_start(JdDdWorld, '东东世界', code_key=CODE_KEY)
| 31.962428 | 353 | 0.559996 | 1,306 | 11,059 | 4.688361 | 0.239663 | 0.014699 | 0.009799 | 0.011759 | 0.44586 | 0.44586 | 0.430508 | 0.385922 | 0.385922 | 0.380532 | 0 | 0.042487 | 0.270006 | 11,059 | 345 | 354 | 32.055072 | 0.715967 | 0.072068 | 0 | 0.440816 | 0 | 0.032653 | 0.286089 | 0.139915 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020408 | false | 0.008163 | 0.036735 | 0 | 0.122449 | 0.110204 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
afdcedd441d4f48bbd1a4d60e657e73de3e6409b | 1,726 | py | Python | SRTM/convert_hgt.py | iDigBio/guoda-datasets | abcba7b03b27e641cd96825dde64f2180a65d978 | [
"MIT"
] | 6 | 2016-06-24T09:47:22.000Z | 2018-04-10T20:04:58.000Z | SRTM/convert_hgt.py | iDigBio/guoda-datasets | abcba7b03b27e641cd96825dde64f2180a65d978 | [
"MIT"
] | 14 | 2016-06-17T20:29:21.000Z | 2019-06-13T13:17:21.000Z | SRTM/convert_hgt.py | iDigBio/guoda-datasets | abcba7b03b27e641cd96825dde64f2180a65d978 | [
"MIT"
] | null | null | null | from __future__ import division, absolute_import, print_function
import os
import csv
import re
import sys
import numpy as np
import multiprocessing
re_split = re.compile("([NS])(\d+)([EW])(\d+)")
SAMPLES = 1201
def read_hgt_file(f):
fname = os.path.split(f)[-1][:-4].upper()
with open(f, "rb") as hgt_data:
m = re_split.match(fname)
if os.path.exists(fname + ".csv"):
print(fname + " SKIP")
return
elif m is None:
print(fname + " BAD MATCH")
return
print(fname)
g = m.groups()
base_lat = int(g[1])
base_lon = int(g[3])
if g[0] == "N":
lat_sign = 1
else:
lat_sign = -1
if g[2] == "E":
lon_sign = 1
else:
lon_sign = -1
try:
elevations = np.fromfile(
hgt_data,
np.dtype('>i2'),
SAMPLES*SAMPLES
).reshape((SAMPLES, SAMPLES))
with open(fname + ".csv", "w") as outf:
cw = csv.writer(outf)
for x in range(0, SAMPLES):
for y in range(0, SAMPLES):
lat = lat_sign * (base_lat + (1200-y)/1200)
lon = lon_sign * (base_lon + x/1200)
hgt = elevations[y, x].astype(int)
cw.writerow([lat - 1/2400, lat + 1/2400, lon - 1/2400, lon + 1/2400, hgt])
except Exception:
print(fname + "FAIL")
def main():
p = multiprocessing.Pool()
for root, dirs, files in os.walk("dem3"):
p.map(read_hgt_file, [root + "/" + f for f in files])
if __name__ == '__main__':
main()
| 25.382353 | 98 | 0.479722 | 219 | 1,726 | 3.634703 | 0.415525 | 0.050251 | 0.027638 | 0.037688 | 0.026382 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047214 | 0.386443 | 1,726 | 67 | 99 | 25.761194 | 0.704438 | 0 | 0 | 0.075472 | 0 | 0 | 0.040556 | 0.012746 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037736 | false | 0 | 0.132075 | 0 | 0.207547 | 0.09434 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
afe45f4906350be6c265277d8a549116790aeb73 | 895 | py | Python | iv/Leetcode/easy/530_min_absolute_diff_binary_search_tree.py | iamsuman/iv | bf68d3fd45455b6041e74b09272f69503bf7a8ac | [
"MIT"
] | 2 | 2020-09-19T22:28:15.000Z | 2020-10-03T01:44:53.000Z | iv/Leetcode/easy/530_min_absolute_diff_binary_search_tree.py | iamsuman/iv | bf68d3fd45455b6041e74b09272f69503bf7a8ac | [
"MIT"
] | null | null | null | iv/Leetcode/easy/530_min_absolute_diff_binary_search_tree.py | iamsuman/iv | bf68d3fd45455b6041e74b09272f69503bf7a8ac | [
"MIT"
] | 1 | 2020-10-03T01:43:30.000Z | 2020-10-03T01:43:30.000Z | class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def getMinimumDifference(self, root: TreeNode) -> int:
nodes = []
def traversal(root, nodes: list):
if not root:
return
if root.left:
traversal(root.left, nodes)
nodes.append(root.val)
if root.right:
traversal(root.right, nodes)
traversal(root, nodes)
# print(nodes)
mindiff = 2 ** 31 - 1
for i in range(len(nodes) - 1):
diff = abs(nodes[i] - nodes[i + 1])
if mindiff > diff:
mindiff = diff
return mindiff
root = TreeNode(1)
root.right = TreeNode(3)
root.right.left = TreeNode(2)
s = Solution()
print(s.getMinimumDifference(root))
| 24.189189 | 58 | 0.532961 | 104 | 895 | 4.548077 | 0.336538 | 0.109937 | 0.07611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017422 | 0.358659 | 895 | 36 | 59 | 24.861111 | 0.80662 | 0.013408 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0 | 0 | 0.25 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
afe5cfafb8008bc831b95a3a68e90c4bb129626a | 2,964 | py | Python | barbante/utils/tests/test_text.py | hypermindr/barbante | 40056e9e4f4564461294b3a1d9afc855062350ac | [
"MIT"
] | 10 | 2015-06-01T21:48:16.000Z | 2021-08-20T20:18:48.000Z | barbante/utils/tests/test_text.py | hypermindr/barbante | 40056e9e4f4564461294b3a1d9afc855062350ac | [
"MIT"
] | null | null | null | barbante/utils/tests/test_text.py | hypermindr/barbante | 40056e9e4f4564461294b3a1d9afc855062350ac | [
"MIT"
] | 2 | 2015-06-03T21:54:32.000Z | 2015-11-24T23:13:05.000Z | """ Test module for barbante.text.
"""
import nose.tools
import barbante.utils.text as text
def test_calculate_tf_en():
""" Tests calculate_tf for English contents.
"""
language = "english"
contents = "Cooks who don't love cooking don't cook well."
results = text.calculate_tf(language, contents)
nose.tools.eq_(results['cook'], 3, "Wrong TF")
nose.tools.eq_(results['love'], 1, "Wrong TF")
nose.tools.eq_(results['well'], 1, "Wrong TF")
def test_calculate_tf_pt():
""" Tests calculate_tf for Portuguese contents.
"""
language = "portuguese"
contents = "Eu não gostava do gosto gasto do gesto de agosto."
results = text.calculate_tf(language, contents)
nose.tools.eq_(results['gost'], 2, "Wrong TF")
nose.tools.eq_(results['gast'], 1, "Wrong TF")
nose.tools.eq_(results['gest'], 1, "Wrong TF")
nose.tools.eq_(results['agost'], 1, "Wrong TF")
def test_performance():
""" Tests calculate_tf for huge texts.
"""
import random
palavras = ["zero", "one", "two", "three", "four", "five", "six",
"seven", "eight", "nine", "R$1000.00"]
contents = ""
language = 'english'
for _ in range(10000): # increase number and measure time when necessary
contents += palavras[random.randint(0, len(palavras) - 1)] + " "
text.calculate_tf(language, contents)
def test_tokenize():
""" Tests tokenization.
"""
actual = text.tokenize("The car is going to Mountain View. You! You \
should go too... Or, maybe, shouldn't!?")
expected = ["The", "car", "is", "going", "to", "Mountain", "View", "You",
"You", "should", "go", "too", "Or", "maybe", "shouldn", "\'",
"t"]
nose.tools.eq_(actual, expected)
def test_remove_stopwords():
""" Tests removal of stopwords.
"""
actual = text.remove_stopwords(["The", "car", "is", "going", "to",
"crash", "or", "going", "to", "win"],
"english", 3)
expected = ['The', 'car', 'going', 'crash', 'going', 'win']
nose.tools.eq_(actual, expected)
def test_count_common_terms_English():
""" Tests common terms counting.
"""
language = "english"
text1 = "Just a test sentence for the purpose of just testing common terms counting."
text2 = "This is just a sentence for tests purposes."
text1_tokens = text.tokenize(text1)
text2_tokens = text.tokenize(text2)
text1_stems = text.get_stems(text1_tokens, language)
text2_stems = text.get_stems(text2_tokens, language)
text1_stems_no_stopwords = set(text.remove_stopwords(text1_stems, language))
text2_stems_no_stopwords = set(text.remove_stopwords(text2_stems, language))
nose.tools.eq_(text.count_common_terms(text1_stems_no_stopwords,
text2_stems_no_stopwords),
3) # sentence, purpos3, tests
| 36.146341 | 89 | 0.610999 | 366 | 2,964 | 4.789617 | 0.330601 | 0.056475 | 0.06275 | 0.071877 | 0.327439 | 0.284084 | 0.255562 | 0.131204 | 0.131204 | 0.131204 | 0 | 0.01731 | 0.239879 | 2,964 | 81 | 90 | 36.592593 | 0.760763 | 0.111673 | 0 | 0.115385 | 0 | 0 | 0.193972 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.057692 | 0 | 0.173077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
afe5dba68241cb5dd2453af805ca160811228622 | 3,365 | py | Python | YouTubeDownloader/views.py | gabzin/django-ytdownloader | e59e728aeac459b73fd4fb9ca663560855af19fd | [
"MIT"
] | 27 | 2021-11-18T22:01:26.000Z | 2022-01-08T14:10:32.000Z | YouTubeDownloader/views.py | gabzin/django-ytdownloader | e59e728aeac459b73fd4fb9ca663560855af19fd | [
"MIT"
] | 1 | 2021-11-21T13:28:00.000Z | 2021-11-21T15:05:42.000Z | YouTubeDownloader/views.py | gabzin/django-ytdownloader | e59e728aeac459b73fd4fb9ca663560855af19fd | [
"MIT"
] | 5 | 2021-11-20T07:16:54.000Z | 2021-12-16T10:44:38.000Z | #Imports
from django.http.response import HttpResponse
from django.shortcuts import render
from django.contrib import messages
from .forms import DownloadForm
from pytube import YouTube
from math import pow, floor, log
from datetime import timedelta
from requests import get
# Your YouTube V3 Api Key
KEY = ""
# Convert from bytes
def convertsize(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(floor(log(size_bytes, 1024)))
p = pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
# Convert long numbers
def humanformat(number):
units = ['', 'K', 'M', 'B', 'T', 'Q']
k = 1000.0
magnitude = int(floor(log(number, k)))
return '%.2f%s' % (number / k**magnitude, units[magnitude])
#When click search button
def download_video(request, string=""):
global video_url
form = DownloadForm(request.POST or None)
if form.is_valid():
video_url = form.cleaned_data.get("url")
try:
yt_obj = YouTube(video_url)
videos = yt_obj.streams.filter(is_dash=False).desc()
audios = yt_obj.streams.filter(only_audio=True).order_by('abr').desc()
except Exception as e:
#messages.error(request, 'Invalid URL.')
messages.error(request, e)
return render(request, 'home.html',{ 'form': form })
video_audio_streams = []
audio_streams = []
try:
url = f"https://www.googleapis.com/youtube/v3/videos?id={yt_obj.video_id}&key={KEY}&part=statistics"
video_stats = get(url).json()
video_likes = video_stats['items'][0]['statistics']['likeCount']
video_favs = video_stats['items'][0]['statistics']['favoriteCount']
except:
video_likes = 0
# List of video streams dictionaries
for s in videos:
video_audio_streams.append({
'resolution' : s.resolution,
'extension' : s.mime_type.replace('video/',''),
'file_size' : convertsize(s.filesize),
'video_url' : s.url, 'file_name' : yt_obj.title + '.' + s.mime_type.replace('video/','')
})
# List of audio streams dictionaries
for s in audios:
audio_streams.append({
'resolution' : s.abr,
'extension' : s.mime_type.replace('audio/',''),
'file_size' : convertsize(s.filesize),
'video_url' : s.url, 'file_name' : yt_obj.title + '.' + s.mime_type.replace('video/','')
})
if yt_obj.rating == None:
rating = 5
else:
rating = yt_obj.rating
# Full content to render
context = {
'form' : form,'title' : yt_obj.title,
'rating': humanformat(int(video_likes)),
'thumb' : yt_obj.thumbnail_url, 'author' : yt_obj.author,
'author_url' : yt_obj.channel_url,
'duration' : str(timedelta(seconds=yt_obj.length)), 'views' : humanformat(yt_obj.views) if yt_obj.views >= 1000 else yt_obj.views,
'stream_audio' : audio_streams, 'streams' : video_audio_streams
}
return render(request, 'home.html', context)
return render(request, 'home.html',{ 'form': form }) | 37.388889 | 142 | 0.581278 | 413 | 3,365 | 4.585956 | 0.37046 | 0.042239 | 0.019007 | 0.033791 | 0.248152 | 0.12038 | 0.12038 | 0.083421 | 0.083421 | 0.083421 | 0 | 0.01112 | 0.278455 | 3,365 | 90 | 143 | 37.388889 | 0.768946 | 0.067459 | 0 | 0.140845 | 0 | 0.014085 | 0.129393 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042254 | false | 0 | 0.112676 | 0 | 0.239437 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
afe75e37643caa6a9af81c6d249b336b0e5aca17 | 45,429 | py | Python | fhirzeug/fhirspec.py | skalarsystems/fhir-zeug | 19973438823c41247e3efb5b1d35e8942ae01fdb | [
"Apache-2.0"
] | 10 | 2020-04-23T18:13:13.000Z | 2020-11-25T07:45:26.000Z | fhirzeug/fhirspec.py | skalarsystems/fhir-zeug | 19973438823c41247e3efb5b1d35e8942ae01fdb | [
"Apache-2.0"
] | 71 | 2020-05-20T09:11:22.000Z | 2020-10-26T14:01:03.000Z | fhirzeug/fhirspec.py | skalarsystems/fhir-zeug | 19973438823c41247e3efb5b1d35e8942ae01fdb | [
"Apache-2.0"
] | 1 | 2020-06-03T11:55:47.000Z | 2020-06-03T11:55:47.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import re
import json
import datetime
from pathlib import Path
import stringcase # type: ignore
from typing import Any, Dict, List, Optional, Union, TYPE_CHECKING
from .logger import logger
from . import fhirclass
if TYPE_CHECKING:
from .generators.yaml_model import GeneratorConfig
# TODO: check
# allow to skip some profiles by matching against their url (used while WiP)
skip_because_unsupported = [
r"SimpleQuantity",
]
class FHIRSpec(object):
""" The FHIR specification.
"""
def __init__(self, directory: Path, generator_config: "GeneratorConfig"):
assert directory.is_dir()
self.directory = directory
self.generator_config = generator_config
self.info = FHIRVersionInfo(self, directory)
# system-url: FHIRValueSet()
self.valuesets: Dict[str, "FHIRValueSet"] = {}
# system-url: FHIRCodeSystem()
self.codesystems: Dict[str, "FHIRCodeSystem"] = {}
# profile-name: FHIRStructureDefinition()
self.profiles: Dict[str, "FHIRStructureDefinition"] = {}
# Load profiles
self.prepare()
self.read_profiles()
self.finalize()
def prepare(self):
""" Run actions before starting to parse profiles.
"""
self.read_valuesets()
self.handle_manual_profiles()
def read_bundle_resources(self, filename: str):
""" Return an array of the Bundle's entry's "resource" elements.
"""
logger.info("Reading {}".format(filename))
filepath = os.path.join(self.directory, filename)
with io.open(filepath, encoding="utf-8") as handle:
parsed = json.load(handle)
if "resourceType" not in parsed:
raise Exception(
'Expecting "resourceType" to be present, but is not in {}'.format(
filepath
)
)
if "Bundle" != parsed["resourceType"]:
raise Exception('Can only process "Bundle" resources')
if "entry" not in parsed:
raise Exception(
"There are no entries in the Bundle at {}".format(filepath)
)
return [e["resource"] for e in parsed["entry"]]
# MARK: Managing ValueSets and CodeSystems
def read_valuesets(self):
resources = self.read_bundle_resources("valuesets.json")
for resource in resources:
if "ValueSet" == resource["resourceType"]:
assert "url" in resource
valueset = FHIRValueSet(self, resource)
self.valuesets[valueset.url] = valueset
if valueset.dstu2_inlined_codesystem:
codesystem = FHIRCodeSystem(self, valueset.dstu2_inlined_codesystem)
codesystem.valueset_url = valueset.url
self.found_codesystem(codesystem)
elif "CodeSystem" == resource["resourceType"]:
assert "url" in resource
if "content" in resource and "concept" in resource:
codesystem = FHIRCodeSystem(self, resource)
self.found_codesystem(codesystem)
else:
logger.warning(f"CodeSystem with no concepts: {resource['url']}")
logger.info(
f"Found {len(self.valuesets)} ValueSets and {len(self.codesystems)} CodeSystems"
)
def found_codesystem(self, codesystem):
if codesystem.url not in self.generator_config.mapping_rules.enum_ignore:
self.codesystems[codesystem.url] = codesystem
def valueset_with_uri(self, uri) -> Optional["FHIRValueSet"]:
assert uri
if uri not in self.valuesets:
logger.warning(f"Valueset not found for URI : {uri}")
return None
return self.valuesets[uri]
def codesystem_with_uri(self, uri) -> Optional["FHIRCodeSystem"]:
assert uri
if uri not in self.codesystems:
logger.warning(f"Codesystem not found for URI : {uri}")
return None
return self.codesystems[uri]
# MARK: Handling Profiles
def read_profiles(self):
""" Find all (JSON) profiles and instantiate into FHIRStructureDefinition.
"""
resources = []
for filename in [
"profiles-types.json",
"profiles-resources.json",
]: # , 'profiles-others.json']:
bundle_res = self.read_bundle_resources(filename)
for resource in bundle_res:
if "StructureDefinition" == resource["resourceType"]:
resources.append(resource)
else:
logger.debug(
"Not handling resource of type {}".format(
resource["resourceType"]
)
)
# create profile instances
for resource in resources:
profile = FHIRStructureDefinition(self, resource)
for pattern in skip_because_unsupported:
if re.search(pattern, profile.url) is not None:
logger.info('Skipping "{}"'.format(resource["url"]))
profile = None
break
if profile is not None and self.found_profile(profile):
profile.process_profile()
def found_profile(self, profile):
if not profile or not profile.name:
raise Exception("No name for profile {}".format(profile))
if profile.name.lower() in self.profiles:
logger.debug('Already have profile "{}", discarding'.format(profile.name))
return False
self.profiles[profile.name.lower()] = profile
return True
def handle_manual_profiles(self):
""" Creates in-memory representations for all our manually defined
profiles.
"""
for manual_profile in self.generator_config.manual_profiles:
for contained in manual_profile.contains:
profile = FHIRStructureDefinition(self, None)
profile.manual_module = manual_profile.module
prof_dict = {
"name": contained,
"differential": {"element": [{"path": contained}]},
}
profile.structure = FHIRStructureDefinitionStructure(profile, prof_dict)
if self.found_profile(profile):
profile.process_profile()
def finalize(self):
""" Should be called after all profiles have been parsed and allows
to perform additional actions, like looking up class implementations
from different profiles.
"""
for _, prof in self.profiles.items():
prof.finalize()
# MARK: Naming Utilities
def as_module_name(self, name: str) -> str:
if self.generator_config.naming_rules.resource_modules_lowercase:
return name.lower()
else:
return name
def as_class_name(
self, classname: Optional[str], parent_name: Optional[str] = None
) -> Optional[str]:
""" This method formulates a class name from the given arguments,
applying formatting according to config.
"""
if classname is None or len(classname) == 0:
return None
classmap = self.generator_config.mapping_rules.classmap
if parent_name is not None:
# if we have a parent, do we have a mapped class?
pathname = f"{parent_name}.{classname}"
if pathname in classmap:
return classmap[pathname]
# is our plain class mapped?
if classname in classmap:
return classmap[classname]
# CamelCase or just plain
if self.generator_config.naming_rules.camelcase_classes:
return stringcase.pascalcase(classname) # upper camelcase
return classname
def class_name_for_type(
self, type_name: str, parent_name: Optional[str] = None
) -> Optional[str]:
return self.as_class_name(type_name, parent_name)
def class_name_for_type_if_property(self, type_name: str) -> Optional[str]:
classname = self.class_name_for_type(type_name)
if not classname:
return None
return self.generator_config.mapping_rules.replacemap.get(classname, classname)
def class_name_for_profile(
self, profile_name: Optional[Union[List[str], str]]
) -> Optional[Union[List[Optional[str]], str]]:
if not profile_name:
return None
# TODO need to figure out what to do with this later. Annotation author supports multiples types that caused this to fail
if isinstance(profile_name, (list,)):
classnames = []
for name_part in profile_name:
classnames.append(
self.as_class_name(name_part.split("/")[-1])
) # may be the full Profile URI, like http://hl7.org/fhir/Profile/MyProfile
return classnames
type_name = profile_name.split("/")[
-1
] # may be the full Profile URI, like http://hl7.org/fhir/Profile/MyProfile
return self.as_class_name(type_name)
def class_name_is_native(self, class_name: str) -> bool:
return class_name in self.generator_config.mapping_rules.natives
def safe_property_name(self, prop_name: str) -> str:
return self.generator_config.mapping_rules.reservedmap.get(prop_name, prop_name)
def safe_enum_name(self, enum_name: str, ucfirst: bool = False) -> str:
assert enum_name, "Must have a name"
name = self.generator_config.mapping_rules.enum_map.get(enum_name, enum_name)
parts = re.split(r"[\W_]+", name)
# /!\ "CamelCase" term here is misleading.
# "CamelCase" is not opposed to "snake_case", at least here.
# See tests to see real cases.
if self.generator_config.naming_rules.camelcase_enums:
name = "".join([n[:1].upper() + n[1:] for n in parts])
if not ucfirst and name.upper() != name:
name = name[:1].lower() + name[1:]
else:
# /!\ This is not a real snakecase.
# Is it a problem ? Ex: HTTPVerb remains HTTPVerb
name = "_".join(parts)
if re.match(r"^\d", name):
name = f"_{name}"
return self.generator_config.mapping_rules.reservedmap.get(name, name)
def json_class_for_class_name(self, class_name: str) -> str:
return self.generator_config.mapping_rules.jsonmap.get(
class_name, self.generator_config.mapping_rules.jsonmap_default
)
# MARK: Writing Data
def writable_profiles(self):
""" Returns a list of `FHIRStructureDefinition` instances.
"""
return [
profile
for profile in self.profiles.values()
if profile.manual_module is None
]
class FHIRVersionInfo(object):
""" The version of a FHIR specification.
"""
def __init__(self, spec, directory):
self.spec = spec
now = datetime.date.today()
self.date = now.isoformat()
self.year = now.year
infofile = os.path.join(directory, "version.info")
self.version = self.read_version(infofile)
def read_version(self, filepath):
assert os.path.isfile(filepath)
with io.open(filepath, "r", encoding="utf-8") as handle:
for line in handle.readlines():
if line.startswith("FhirVersion"):
return line.split("=", 2)[1].strip()
class FHIRValueSetEnum(object):
""" Holds on to parsed `FHIRValueSet` properties.
"""
def __init__(
self,
name: str,
restricted_to: List[str],
value_set: "FHIRValueSet",
is_codesystem_known: bool,
):
self.name = name
self.restricted_to = restricted_to if len(restricted_to) > 0 else None
self.value_set = value_set
self.is_codesystem_known = is_codesystem_known
self.represents_class = True # required for FHIRClass compatibility
self.module = name # required for FHIRClass compatibility
self.name_if_class = name # required for FHIRClass compatibility
self.superclass_name = None # required for FHIRClass compatibility
self.path = None # required for FHIRClass compatibility
@property
def definition(self) -> "FHIRValueSet":
return self.value_set
def name_of_resource(self) -> None: # required for FHIRClass compatibility
return None
class FHIRValueSet(object):
""" Holds on to ValueSets bundled with the spec.
"""
def __init__(self, spec: "FHIRSpec", set_dict: Dict[str, Any]):
self.spec = spec
self.definition = set_dict
self.url = set_dict.get("url")
self.dstu2_inlined_codesystem = self.definition.get("codeSystem")
if self.dstu2_inlined_codesystem is not None:
self.dstu2_inlined_codesystem["url"] = self.dstu2_inlined_codesystem[
"system"
]
self.dstu2_inlined_codesystem["content"] = "complete"
self.dstu2_inlined_codesystem["name"] = self.definition.get("name")
self.dstu2_inlined_codesystem["description"] = self.definition.get(
"description"
)
self._enum: Optional["FHIRValueSetEnum"] = None
@property
def short(self):
return self.definition.get("title")
@property
def formal(self):
return self.definition.get("description")
@property
def enum(self) -> Optional[FHIRValueSetEnum]:
""" Returns FHIRValueSetEnum if this valueset can be represented by one.
"""
if self._enum is not None:
return self._enum
include = self.__safely_get_single_include()
if include is None:
return None
system = include.get("system")
if system is None:
return None
# alright, this is a ValueSet with 1 include and a system, is there a CodeSystem?
cs = self.spec.codesystem_with_uri(system)
is_codesystem_known = True
if cs is None or not cs.generate_enum:
# If no CodeSystem is found, we build an unofficial enum
# Example : system = "http://unitsofmeasure.org" is not defined in FHIR
is_codesystem_known = False
cs_name = "unknown_codesystem_enum"
else:
cs_name = cs.name
# Restrict CodeSystem to subset of concepts
restricted_to = []
for concept in include.get("concept", []):
assert "code" in concept
restricted_to.append(concept["code"])
self._enum = FHIRValueSetEnum(
name=cs_name,
restricted_to=restricted_to,
value_set=self,
is_codesystem_known=is_codesystem_known,
)
return self._enum
def __safely_get_single_include(self) -> Optional[Dict[str, Any]]:
include = None
if self.dstu2_inlined_codesystem is not None:
include = [self.dstu2_inlined_codesystem]
else:
compose = self.definition.get("compose")
if compose is None:
msg = f"Currently only composed ValueSets are supported. {self.definition}"
raise Exception(msg)
if "exclude" in compose:
msg = "Not currently supporting 'exclude' on ValueSet"
raise Exception(msg)
# "import" is for DSTU-2 compatibility
include = compose.get("include") or compose.get("import") or []
if len(include) != 1:
logger.warning(
f"Ignoring ValueSet with more than 1 includes ({len(include)}: {include})"
)
return None
return include[0]
class FHIRCodeSystem(object):
""" Holds on to CodeSystems bundled with the spec.
"""
def __init__(self, spec: FHIRSpec, resource):
assert "content" in resource
self.spec = spec
self.definition = resource
self.url = resource.get("url")
if self.url in self.spec.generator_config.mapping_rules.enum_namemap:
self.name = self.spec.generator_config.mapping_rules.enum_namemap[self.url]
else:
self.name = self.spec.safe_enum_name(resource.get("name"), ucfirst=True)
if len(self.name) < 1:
raise Exception(
f"Unable to create a name for enum of system {self.url}. You may need to specify a name explicitly in mappings.enum_namemap. Code system content: {resource}"
)
self.description = resource.get("description")
self.valueset_url = resource.get("valueSet")
self.codes = None
self.generate_enum = False
concepts = resource.get("concept", [])
if resource.get("experimental"):
return
if resource["content"] == "complete":
self.generate_enum = True
if not self.generate_enum:
logger.warning(
f"Will not generate enum for CodeSystem '{self.url}' whose content is {resource['content']}"
)
return
assert concepts, 'Expecting at least one code for "complete" CodeSystem'
if len(concepts) > 200:
self.generate_enum = False
logger.info(
f"Will not generate enum for CodeSystem '{self.url}' because it has > 200 ({len(concepts)}) concepts"
)
return
self.codes = self.parsed_codes(concepts)
def parsed_codes(self, codes, prefix=None):
found = []
for c in codes:
if c["code"][:1].isdigit():
self.generate_enum = False
logger.info(
f"Will not generate enum for CodeSystem '{self.url}' because at least one concept code starts with a number"
)
return None
cd = c["code"]
# name = (
# "{}-{}".format(prefix, cd)
# if prefix and not cd.startswith(prefix)
# else cd
# )
code_name = self.spec.safe_enum_name(cd)
if len(code_name) < 1:
raise Exception(
f"Unable to create a member name for enum '{cd}' in {self.url}. You may need to add '{cd}' to mappings.enum_map"
)
c["name"] = code_name
c["definition"] = c.get("definition") or c["name"]
found.append(c)
# nested concepts?
if "concept" in c:
fnd = self.parsed_codes(c["concept"])
if fnd is None:
return None
found.extend(fnd)
return found
class FHIRStructureDefinition(object):
""" One FHIR structure definition.
"""
def __init__(self, spec, profile):
self.manual_module = None
self.spec = spec
self.url = None
self.targetname = None
self.structure = None
self.elements = None
self.main_element = None
self._class_map = {}
self.classes: List[fhirclass.FHIRClass] = []
self._did_finalize = False
if profile is not None:
self.parse_profile(profile)
def __repr__(self):
return f"<{self.__class__.__name__}> name: {self.name}, url: {self.url}"
@property
def name(self):
return self.structure.name if self.structure is not None else None
def read_profile(self, filepath):
""" Read the JSON definition of a profile from disk and parse.
Not currently used.
"""
profile = None
with io.open(filepath, "r", encoding="utf-8") as handle:
profile = json.load(handle)
self.parse_profile(profile)
def parse_profile(self, profile):
""" Parse a JSON profile into a structure.
"""
assert profile
assert "StructureDefinition" == profile["resourceType"]
# parse structure
self.url = profile.get("url")
logger.info('Parsing profile "{}"'.format(profile.get("name")))
self.structure = FHIRStructureDefinitionStructure(self, profile)
def process_profile(self):
""" Extract all elements and create classes.
"""
struct = self.structure.differential # or self.structure.snapshot
if struct is not None:
mapped = {}
self.elements = []
for elem_dict in struct:
element = FHIRStructureDefinitionElement(
self, elem_dict, self.main_element is None
)
self.elements.append(element)
mapped[element.path] = element
# establish hierarchy (may move to extra loop in case elements are no longer in order)
if element.is_main_profile_element:
self.main_element = element
parent = mapped.get(element.parent_name)
if parent:
parent.add_child(element)
# resolve element dependencies
for element in self.elements:
element.resolve_dependencies()
# run check: if n_min > 0 and parent is in summary, must also be in summary
for element in self.elements:
if element.n_min is not None and element.n_min > 0:
if (
element.parent is not None
and element.parent.is_summary
and not element.is_summary
):
logger.error(
"n_min > 0 but not summary: `{}`".format(element.path)
)
element.summary_n_min_conflict = True
# create classes and class properties
if self.main_element is not None:
snap_class, subs = self.main_element.create_class()
if snap_class is None:
raise Exception(
'The main element for "{}" did not create a class'.format(self.url)
)
self.found_class(snap_class)
for sub in subs:
self.found_class(sub)
self.targetname = snap_class.name
def element_with_id(self, ident):
""" Returns a FHIRStructureDefinitionElementDefinition with the given
id, if found. Used to retrieve elements defined via `contentReference`.
"""
if self.elements is not None:
for element in self.elements:
if element.definition.id == ident:
return element
return None
def dstu2_element_with_name(self, name):
""" Returns a FHIRStructureDefinitionElementDefinition with the given
name, if found. Used to retrieve elements defined via `nameReference`
used in DSTU-2.
"""
if self.elements is not None:
for element in self.elements:
if element.definition.name == name:
return element
return None
# MARK: Class Handling
def found_class(self, klass):
self.classes.append(klass)
def needed_external_classes(self):
""" Returns a unique list of class items that are needed for any of the
receiver's classes' properties and are not defined in this profile.
:raises: Will raise if called before `finalize` has been called.
"""
if not self._did_finalize:
raise Exception("Cannot use `needed_external_classes` before finalizing")
internal = set([c.name for c in self.classes])
needed = set()
needs = []
for klass in self.classes:
# are there superclasses that we need to import?
sup_cls = klass.superclass
if (
sup_cls is not None
and sup_cls.name not in internal
and sup_cls.name not in needed
):
needed.add(sup_cls.name)
needs.append(sup_cls)
# look at all properties' classes and assign their modules
for prop in klass.properties:
prop_cls_name = prop.class_name
if prop.enum is not None:
enum_cls, did_create = fhirclass.FHIRClass.for_element(prop.enum)
enum_cls.module = prop.enum.name
prop.module_name = enum_cls.module
if enum_cls.name not in needed:
needed.add(enum_cls.name)
needs.append(enum_cls)
elif (
prop_cls_name not in internal
and not self.spec.class_name_is_native(prop_cls_name)
):
prop_cls = fhirclass.FHIRClass.with_name(prop_cls_name)
if prop_cls is None:
raise Exception(
'There is no class "{}" for property "{}" on "{}" in {}'.format(
prop_cls_name, prop.name, klass.name, self.name
)
)
else:
prop.module_name = prop_cls.module
if prop_cls_name not in needed:
needed.add(prop_cls_name)
needs.append(prop_cls)
return sorted(needs, key=lambda n: n.module or n.name)
def referenced_classes(self):
""" Returns a unique list of **external** class names that are
referenced from at least one of the receiver's `Reference`-type
properties.
:raises: Will raise if called before `finalize` has been called.
"""
if not self._did_finalize:
raise Exception("Cannot use `referenced_classes` before finalizing")
references = set()
for klass in self.classes:
for prop in klass.properties:
if len(prop.reference_to_names) > 0:
references.update(prop.reference_to_names)
# no need to list references to our own classes, remove them
for klass in self.classes:
references.discard(klass.name)
return sorted(references)
def writable_classes(self):
return [klass for klass in self.classes if klass.should_write()]
# MARK: Finalizing
def finalize(self):
""" Our spec object calls this when all profiles have been parsed.
"""
# assign all super-classes as objects
for cls in self.classes:
if cls.superclass is None:
super_cls = fhirclass.FHIRClass.with_name(cls.superclass_name)
if super_cls is None and cls.superclass_name is not None:
raise Exception(
'There is no class implementation for class named "{}" in profile "{}"'.format(
cls.superclass_name, self.url
)
)
else:
cls.superclass = super_cls
self._did_finalize = True
class FHIRStructureDefinitionStructure(object):
""" The actual structure of a complete profile.
"""
def __init__(self, profile, profile_dict):
self.profile = profile
self.name = None
self.base = None
self.kind = None
self.subclass_of = None
self.snapshot = None
self.differential = None
self.parse_from(profile_dict)
def parse_from(self, json_dict):
name = json_dict.get("name")
if not name:
raise Exception("Must find 'name' in profile dictionary but found nothing")
self.name = self.profile.spec.class_name_for_profile(name)
self.base = json_dict.get("baseDefinition")
self.kind = json_dict.get("kind")
if self.base:
self.subclass_of = self.profile.spec.class_name_for_profile(self.base)
# find element definitions
if "snapshot" in json_dict:
self.snapshot = json_dict["snapshot"].get("element", [])
if "differential" in json_dict:
self.differential = json_dict["differential"].get("element", [])
class FHIRStructureDefinitionElement(object):
""" An element in a profile's structure.
"""
def __init__(self, profile, element_dict, is_main_profile_element=False):
assert isinstance(profile, FHIRStructureDefinition)
self.profile = profile
self.path = None
self.parent = None
self.children = None
self.parent_name = None
self.definition = None
self.n_min = None
self.n_max = None
self.is_summary = False
# to mark conflicts, see #13215 (http://gforge.hl7.org/gf/project/fhir/tracker/?action=TrackerItemEdit&tracker_item_id=13125)
self.summary_n_min_conflict = False
self.valueset = None
self.enum = None # assigned if the element has a binding to a ValueSet that is a CodeSystem generating an enum
self.is_main_profile_element = is_main_profile_element
self.represents_class = False
self._superclass_name = None
self._name_if_class = None
self._did_resolve_dependencies = False
if element_dict is not None:
self.parse_from(element_dict)
else:
self.definition = FHIRStructureDefinitionElementDefinition(self, None)
def parse_from(self, element_dict):
self.path = element_dict["path"]
parts = self.path.split(".")
self.parent_name = ".".join(parts[:-1]) if len(parts) > 0 else None
prop_name = parts[-1]
if "-" in prop_name:
prop_name = "".join([n[:1].upper() + n[1:] for n in prop_name.split("-")])
self.definition = FHIRStructureDefinitionElementDefinition(self, element_dict)
self.definition.prop_name = prop_name
self.n_min = element_dict.get("min")
self.n_max = element_dict.get("max")
self.is_summary = element_dict.get("isSummary")
def resolve_dependencies(self):
if self.is_main_profile_element:
self.represents_class = True
if (
not self.represents_class
and self.children is not None
and len(self.children) > 0
):
self.represents_class = True
if self.definition is not None:
self.definition.resolve_dependencies()
self._did_resolve_dependencies = True
# MARK: Hierarchy
def add_child(self, element):
assert isinstance(element, FHIRStructureDefinitionElement)
element.parent = self
if self.children is None:
self.children = [element]
else:
self.children.append(element)
def create_class(self, module=None):
""" Creates a FHIRClass instance from the receiver, returning the
created class as the first and all inline defined subclasses as the
second item in the tuple.
"""
assert self._did_resolve_dependencies
if not self.represents_class:
return None, None
subs = []
cls, did_create = fhirclass.FHIRClass.for_element(self)
if did_create: # manual_profiles
if module is None:
if self.profile.manual_module is not None:
module = self.profile.manual_module
elif self.is_main_profile_element:
module = self.profile.spec.as_module_name(cls.name)
cls.module = module
logger.debug('Created class "{}", module {}'.format(cls.name, module))
# child classes
if self.children is not None:
for child in self.children:
properties = child.as_properties()
if properties is not None:
# collect subclasses
sub, subsubs = child.create_class(module)
if sub is not None:
subs.append(sub)
if subsubs is not None:
subs.extend(subsubs)
# add properties to class
if did_create:
for prop in properties:
cls.add_property(prop)
return cls, subs
def as_properties(self):
""" If the element describes a *class property*, returns a list of
FHIRClassProperty instances, None otherwise.
"""
assert self._did_resolve_dependencies
if self.is_main_profile_element or self.definition is None:
return None
# TODO: handle slicing information (not sure why these properties were
# omitted previously)
# if self.definition.slicing:
# logger.debug('Omitting property "{}" for slicing'.format(self.definition.prop_name))
# return None
# this must be a property
if self.parent is None:
raise Exception(
'Element reports as property but has no parent: "{}"'.format(self.path)
)
# create a list of FHIRClassProperty instances (usually with only 1 item)
if len(self.definition.types) > 0:
props = []
for type_obj in self.definition.types:
# an inline class
if (
"BackboneElement" == type_obj.code or "Element" == type_obj.code
): # data types don't use "BackboneElement"
props.append(
fhirclass.FHIRClassProperty(self, type_obj, self.name_if_class)
)
# TODO: look at http://hl7.org/fhir/StructureDefinition/structuredefinition-explicit-type-name ?
else:
props.append(fhirclass.FHIRClassProperty(self, type_obj))
return props
# no `type` definition in the element: it's a property with an inline class definition
type_obj = FHIRElementType()
return [fhirclass.FHIRClassProperty(self, type_obj, self.name_if_class)]
# MARK: Name Utils
def name_of_resource(self):
assert self._did_resolve_dependencies
if (
not self.is_main_profile_element
or self.profile.structure.kind is None
or self.profile.structure.kind != "resource"
):
return None
return self.profile.name
@property
def name_if_class(self):
if self._name_if_class is None:
self._name_if_class = self.definition.name_if_class()
return self._name_if_class
@property
def superclass_name(self):
""" Determine the superclass for the element (used for class elements).
"""
if self._superclass_name is None:
tps = self.definition.types
if len(tps) > 1:
raise Exception(
'Have more than one type to determine superclass in "{}": "{}"'.format(
self.path, tps
)
)
type_code = None
if (
self.is_main_profile_element
and self.profile.structure.subclass_of is not None
):
type_code = self.profile.structure.subclass_of
elif len(tps) > 0:
type_code = tps[0].code
elif self.profile.structure.kind:
type_code = self.profile.spec.generator_config.default_base[
self.profile.structure.kind
]
self._superclass_name = self.profile.spec.class_name_for_type(type_code)
return self._superclass_name
def __repr__(self):
return f"<{self.__class__.__name__}> path: {self.path}"
class FHIRStructureDefinitionElementDefinition(object):
""" The definition of a FHIR element.
"""
def __init__(self, element, definition_dict):
self.id = None
self.element = element
self.types = []
self.name = None
self.prop_name = None
self.content_reference = None
self._content_referenced = None
self.short = None
self.formal = None
self.comment = None
self.binding = None
self.constraint = None
self.mapping = None
self.slicing = None
self.representation = None
# TODO: extract "defaultValue[x]", "fixed[x]", "pattern[x]"
# TODO: handle "slicing"
if definition_dict is not None:
self.parse_from(definition_dict)
def parse_from(self, definition_dict):
self.id = definition_dict.get("id")
self.types = []
for type_dict in definition_dict.get("type", []):
self.types.append(FHIRElementType(type_dict))
self.name = definition_dict.get("name")
self.content_reference = definition_dict.get("contentReference")
self.dstu2_name_reference = definition_dict.get("nameReference")
self.short = definition_dict.get("short")
self.formal = definition_dict.get("definition")
if (
self.formal and self.short == self.formal[:-1]
): # formal adds a trailing period
self.formal = None
self.comment = definition_dict.get("comments")
if "binding" in definition_dict:
self.binding = FHIRElementBinding(definition_dict["binding"])
if "constraint" in definition_dict:
self.constraint = FHIRElementConstraint(definition_dict["constraint"])
if "mapping" in definition_dict:
self.mapping = FHIRElementMapping(definition_dict["mapping"])
if "slicing" in definition_dict:
self.slicing = definition_dict["slicing"]
self.representation = definition_dict.get("representation")
def resolve_dependencies(self):
# update the definition from a reference, if there is one
if self.content_reference is not None:
if "#" != self.content_reference[:1]:
raise Exception(
"Only relative 'contentReference' element definitions are supported right now"
)
elem = self.element.profile.element_with_id(self.content_reference[1:])
if elem is None:
raise Exception(
f'There is no element definiton with id "{self.content_reference}", as referenced by {self.path} in {self.profile.url}'
)
self._content_referenced = elem.definition
elif self.dstu2_name_reference is not None: # DSTU-2 backwards-compatibility
elem = self.element.profile.dstu2_element_with_name(
self.dstu2_name_reference
)
if elem is None:
raise Exception(
f'There is no element definiton with name "{self.dstu2_name_reference}", as referenced by {self.path} in {self.profile.url}'
)
self._content_referenced = elem.definition
# resolve bindings
if (
self.binding is not None
and self.binding.is_required
and self.binding.has_valueset
):
uri = self.binding.valueset_uri
if not uri.startswith("http://hl7.org/fhir"):
logger.debug('Ignoring foreign ValueSet "{}"'.format(uri))
return
# remove version from canonical URI, if present, e.g. "http://hl7.org/fhir/ValueSet/name-use|4.0.0"
uri = uri.split("|")[0]
valueset = self.element.profile.spec.valueset_with_uri(uri)
if valueset is None:
logger.error(
'There is no ValueSet for required binding "{}" on {} in {}'.format(
uri, self.name or self.prop_name, self.element.profile.name
)
)
else:
self.element.valueset = valueset
self.element.enum = valueset.enum
def name_if_class(self):
""" Determines the class-name that the element would have if it was
defining a class. This means it uses "name", if present, and the last
"path" component otherwise. It also detects if the definition is a
reference and will re-use the class name defined by the referenced
element (such as `ValueSet.codeSystem.concept.concept`).
"""
# This Element is a reference, pick up the original name
if self._content_referenced is not None:
return self._content_referenced.name_if_class()
with_name = self.name or self.prop_name
parent_name = (
self.element.parent.name_if_class
if self.element.parent is not None
else None
)
classname = self.element.profile.spec.class_name_for_type(
with_name, parent_name
)
if (
parent_name is not None
and self.element.profile.spec.generator_config.naming_rules.backbone_class_adds_parent
):
classname = parent_name + classname
return classname
class FHIRElementType(object):
"""Representing a type of an element.
https://www.hl7.org/fhir/element.html
"""
def __init__(self, type_dict=None):
self.code = None
self.profile = None
if type_dict is not None:
self.parse_from(type_dict)
def parse_from(self, type_dict):
self.code = type_dict.get("code")
# Look for the "structuredefinition-fhir-type" extension, introduced after R4
ext_type = type_dict.get("extension")
# http://hl7.org/fhir/2020Feb/extensibility.html#Extension
if ext_type is not None:
fhir_ext = [
e
for e in ext_type
if e.get("url")
== "http://hl7.org/fhir/StructureDefinition/structuredefinition-fhir-type"
]
if len(fhir_ext) == 1: # This may hit after R4
if "valueUri" in fhir_ext[0]:
self.code = fhir_ext[0].get("valueUri")
if "valueUrl" in fhir_ext[0]:
self.code = fhir_ext[0].get("valueUrl")
# This may hit on R4 or earlier
ext_code = type_dict.get("_code")
if self.code is None and ext_code is not None:
json_ext = [
e
for e in ext_code.get("extension", [])
if e.get("url")
== "http://hl7.org/fhir/StructureDefinition/structuredefinition-json-type"
]
if len(json_ext) < 1:
raise Exception(
f'Expecting either "code" or "_code" and a JSON type extension, found neither in {type_dict}'
)
if len(json_ext) > 1:
raise Exception(
f"Found more than one structure definition JSON type in {type_dict}"
)
self.code = json_ext[0].get("valueString")
if self.code is None:
raise Exception(f"No element type code found in {type_dict}")
if not isinstance(self.code, str):
raise Exception(
"Expecting a string for 'code' definition of an element type, got {} as {}".format(
self.code, type(self.code)
)
)
if not isinstance(type_dict.get("targetProfile"), (list,)):
self.profile = type_dict.get("targetProfile")
if (
self.profile is not None
and not isinstance(self.profile, str)
and not isinstance(type_dict.get("targetProfile"), (list,))
): # Added a check to make sure the targetProfile wasn't a list
raise Exception(
"Expecting a string for 'targetProfile' definition of an element type, got {} as {}".format(
self.profile, type(self.profile)
)
)
class FHIRElementBinding(object):
""" The "binding" element in an element definition
"""
def __init__(self, binding_obj):
self.strength = binding_obj.get("strength")
self.description = binding_obj.get("description")
self.valueset = binding_obj.get("valueSet")
self.legacy_uri = binding_obj.get("valueSetUri")
self.legacy_canonical = binding_obj.get("valueSetCanonical")
self.dstu2_reference = binding_obj.get("valueSetReference", {}).get("reference")
self.is_required = "required" == self.strength
@property
def has_valueset(self):
return self.valueset_uri is not None
@property
def valueset_uri(self):
return (
self.valueset
or self.legacy_uri
or self.legacy_canonical
or self.dstu2_reference
)
class FHIRElementConstraint(object):
""" Constraint on an element.
"""
def __init__(self, constraint_arr):
pass
class FHIRElementMapping(object):
""" Mapping FHIR to other standards.
"""
def __init__(self, mapping_arr):
pass
| 36.964199 | 173 | 0.585133 | 5,104 | 45,429 | 5.063284 | 0.112265 | 0.014859 | 0.013234 | 0.011492 | 0.250938 | 0.175328 | 0.134311 | 0.097318 | 0.074527 | 0.046047 | 0 | 0.003476 | 0.328689 | 45,429 | 1,228 | 174 | 36.9943 | 0.843919 | 0.143323 | 0 | 0.192661 | 0 | 0.009174 | 0.105339 | 0.008263 | 0 | 0 | 0 | 0.003257 | 0.019495 | 1 | 0.079128 | false | 0.002294 | 0.013761 | 0.016055 | 0.183486 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
afecd187c7058a489009b41d8ff819203ecdac2f | 1,868 | py | Python | src/sims4communitylib/dialogs/_common_ui_dialog_text_input_ok_cancel.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | 118 | 2019-08-31T04:33:18.000Z | 2022-03-28T21:12:14.000Z | src/sims4communitylib/dialogs/_common_ui_dialog_text_input_ok_cancel.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | 15 | 2019-12-05T01:29:46.000Z | 2022-02-18T17:13:46.000Z | src/sims4communitylib/dialogs/_common_ui_dialog_text_input_ok_cancel.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | 28 | 2019-09-07T04:11:05.000Z | 2022-02-07T18:31:40.000Z | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Tuple, Any, Callable
from protocolbuffers.Localization_pb2 import LocalizedString
from sims.sim_info import SimInfo
from sims4communitylib.utils.localization.common_localization_utils import CommonLocalizationUtils
from ui.ui_dialog_generic import UiDialogTextInputOkCancel
class _CommonUiDialogTextInputOkCancel(UiDialogTextInputOkCancel):
def __init__(
self,
sim_info: SimInfo,
*args,
title: Callable[..., LocalizedString]=None,
text: Callable[..., LocalizedString]=None,
**kwargs
):
super().__init__(
sim_info,
*args,
title=title,
text=text,
**kwargs
)
self.text_input_responses = {}
def on_text_input(self, text_input_name: str='', text_input: str='') -> bool:
"""A callback that occurs upon text being entered.
"""
self.text_input_responses[text_input_name] = text_input
return False
def build_msg(self, text_input_overrides=None, additional_tokens: Tuple[Any]=(), **kwargs):
"""Build the message.
"""
from sims4communitylib.dialogs.utils.common_dialog_utils import CommonDialogUtils
msg = super().build_msg(additional_tokens=(), **kwargs)
text_input_msg = msg.text_input.add()
text_input_msg.text_input_name = CommonDialogUtils.TEXT_INPUT_NAME
if additional_tokens and additional_tokens[0] is not None:
text_input_msg.initial_value = CommonLocalizationUtils.create_localized_string(str(additional_tokens[0]))
return msg
| 36.627451 | 125 | 0.697002 | 212 | 1,868 | 5.886792 | 0.419811 | 0.100962 | 0.041667 | 0.014423 | 0.059295 | 0.059295 | 0.059295 | 0.059295 | 0.059295 | 0.059295 | 0 | 0.009504 | 0.211456 | 1,868 | 50 | 126 | 37.36 | 0.837746 | 0.181478 | 0 | 0.121212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
afee559bb9619fc76eddbb0de5034b9cc836e90b | 1,923 | py | Python | build_an_ai_startup_demo/app/views/main.py | bbueno5000/BuildAnAIStartUpDemo | f70371802a2546530c34b7f04e2b644cd1faec8a | [
"MIT"
] | null | null | null | build_an_ai_startup_demo/app/views/main.py | bbueno5000/BuildAnAIStartUpDemo | f70371802a2546530c34b7f04e2b644cd1faec8a | [
"MIT"
] | null | null | null | build_an_ai_startup_demo/app/views/main.py | bbueno5000/BuildAnAIStartUpDemo | f70371802a2546530c34b7f04e2b644cd1faec8a | [
"MIT"
] | null | null | null | """
DOCSTRING
"""
import app
import flask
import keras
import numpy
import os
import random
@app.app.route('/')
#disease_list = [
# 'Atelectasis',
# 'Consolidation',
# 'Infiltration',
# 'Pneumothorax',
# 'Edema',
# 'Emphysema',
# 'Fibrosis',
# 'Effusion',
# 'Pneumonia',
# 'Pleural_Thickening',
# 'Cardiomegaly',
# 'Nodule',
# 'Mass',
# 'Hernia']
@app.app.route('/contact')
def contact():
return flask.render_template('contact.html', title='Contact')
@app.app.route('/index')
def index():
return flask.render_template('index.html', title='Home')
@app.app.route('/map')
def map():
return flask.render_template('map.html', title='Map')
@app.app.route('/map/refresh', methods=['POST'])
def map_refresh():
points = [(
random.uniform(48.8434100, 48.8634100),
random.uniform(2.3388000, 2.3588000)) for _ in range(random.randint(2, 9))]
return flask.jsonify({'points': points})
@app.app.route('/uploaded', methods = ['GET', 'POST'])
def upload_file():
if flask.request.method == 'POST':
f = flask.request.files['file']
path = os.path.join(app.app.config['UPLOAD_FOLDER'], f.filename)
model = keras.applications.resnet50.ResNet50(weights='imagenet')
img = keras.preprocessing.image.load_img(path, target_size=(224, 224))
x = keras.preprocessing.image.img_to_array(img)
x = numpy.expand_dims(x, axis=0)
x = keras.applications.resnet50.preprocess_input(x)
preds = model.predict(x)
preds_decoded = keras.applications.resnet50.decode_predictions(preds, top=3)[0]
print(keras.applications.resnet50.decode_predictions(preds, top=3)[0])
f.save(path)
return flask.render_template(
'uploaded.html', title='Success',
predictions=preds_decoded, user_image=f.filename)
@app.app.route('/upload')
def upload_file2():
return flask.render_template('index.html')
| 27.869565 | 86 | 0.658346 | 237 | 1,923 | 5.244726 | 0.421941 | 0.038616 | 0.061947 | 0.100563 | 0.138375 | 0.138375 | 0.083669 | 0.083669 | 0.083669 | 0 | 0 | 0.036318 | 0.169527 | 1,923 | 68 | 87 | 28.279412 | 0.742016 | 0.133645 | 0 | 0 | 0 | 0 | 0.101582 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0.095238 | 0.428571 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
aff5059c5098386517f47226d8e0e39141c4a8f9 | 3,001 | py | Python | MGCosmoPop/posteriors/prior.py | nicoborghi/MGCosmoPop | ebf07744caed1ac6694e7c750c1147ac30442fe5 | [
"BSD-3-Clause"
] | 4 | 2022-01-31T02:00:30.000Z | 2022-03-22T08:00:00.000Z | MGCosmoPop/posteriors/prior.py | nicoborghi/MGCosmoPop | ebf07744caed1ac6694e7c750c1147ac30442fe5 | [
"BSD-3-Clause"
] | null | null | null | MGCosmoPop/posteriors/prior.py | nicoborghi/MGCosmoPop | ebf07744caed1ac6694e7c750c1147ac30442fe5 | [
"BSD-3-Clause"
] | 5 | 2021-12-13T03:33:48.000Z | 2022-03-22T08:00:02.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 11:31:43 2021
@author: Michi
"""
import numpy as np
class Prior(object):
'''
Class implementing the prior. At the moment it only supports disjoint priors.
contains a method logPrior that returns the sum of log priors for each variable
in the inference
'''
def __init__(self, priorLimits, params_inference, priorNames, priorParams):
'''
Parameters
----------
priorLimits : list
list of max and min values for the prior range used for every parameter
of the inference, in the correcto oder.
Example: for inference on H0, lambda:
[ (20, 140) , (-10, 10) ]
params_inference : list
list of names of parameters used in the inference .
Example: ['H0', 'lambdaRedshift']
priorNames : dict
Ditrionary specifying the type of prior used for each parameter.
Supported so far are 'flat', 'flatLog', 'gauss'
Example: gaussian prior on H0, flat on lambda:
{'H0': gauss, 'lambdaRedshift':flat}
priorParams : dict
If any of the prior types requires parameters (e.g. mu and sigma for the gaussian)
they are passed though this argument.
Example: mu and sigma for gauss prior on H0
{'mu': 67.9, 'sigma': 0.1 }
'''
self.priorLimits = priorLimits
self.params_inference = params_inference
self.priorNames = priorNames
self.priorParams = priorParams
def _logGauss(self, x, mu, sigma):
'''
gaussian prior
'''
if np.abs(x-mu)>7*sigma:
return np.NINF
return (-np.log(sigma)-(x-mu)**2/(2*sigma**2))
def _flatLog(self, x):
'''
1/x prior
'''
return -np.log(x)
def logPrior(self, Lambda_test):
if np.isscalar(Lambda_test):
limInf, limSup = self.priorLimits[self.params_inference[0]]
condition = limInf < Lambda_test < limSup
else:
condition = True
for i,param in enumerate(self.params_inference):
limInf, limSup = self.priorLimits[param]
condition &= limInf < Lambda_test[i] < limSup
if not condition:
return np.NINF
lp = 0
for i,param in enumerate(self.params_inference):
pname= self.priorNames[param]
if np.isscalar(Lambda_test):
x = Lambda_test
else:
x=Lambda_test[i]
if pname=='flatLog':
lp+=self._flatLog(x)
elif pname=='gauss':
mu, sigma = self.priorParams[param]['mu'], self.priorParams[param]['sigma']
lp += self._logGauss( x, mu, sigma)
return lp
| 30.313131 | 94 | 0.539154 | 340 | 3,001 | 4.694118 | 0.367647 | 0.065789 | 0.047619 | 0.016291 | 0.076441 | 0.048872 | 0.048872 | 0.048872 | 0 | 0 | 0 | 0.020526 | 0.366878 | 3,001 | 99 | 95 | 30.313131 | 0.819474 | 0.373209 | 0 | 0.216216 | 0 | 0 | 0.011853 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.027027 | 0 | 0.297297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
aff8c4003c708639ad7c4f72b3b09d1135fb4817 | 4,058 | py | Python | mmhuman3d/core/visualization/renderer/torch3d_renderer/depth_renderer.py | ykk648/mmhuman3d | 26af92bcf6abbe1855e1a8a48308621410f9c047 | [
"Apache-2.0"
] | 472 | 2021-12-03T03:12:55.000Z | 2022-03-31T01:33:13.000Z | mmhuman3d/core/visualization/renderer/torch3d_renderer/depth_renderer.py | ykk648/mmhuman3d | 26af92bcf6abbe1855e1a8a48308621410f9c047 | [
"Apache-2.0"
] | 127 | 2021-12-03T05:00:14.000Z | 2022-03-31T13:47:33.000Z | mmhuman3d/core/visualization/renderer/torch3d_renderer/depth_renderer.py | ykk648/mmhuman3d | 26af92bcf6abbe1855e1a8a48308621410f9c047 | [
"Apache-2.0"
] | 37 | 2021-12-03T03:23:22.000Z | 2022-03-31T08:41:58.000Z | from typing import Iterable, Optional, Tuple, Union
import torch
from pytorch3d.structures import Meshes
from mmhuman3d.core.cameras import MMCamerasBase
from .base_renderer import BaseRenderer
from .builder import RENDERER, build_shader
from .utils import normalize
@RENDERER.register_module(
name=['Depth', 'depth', 'depth_renderer', 'DepthRenderer'])
class DepthRenderer(BaseRenderer):
"""Render depth map with the help of camera system."""
shader_type = 'DepthShader'
def __init__(
self,
resolution: Tuple[int, int] = None,
device: Union[torch.device, str] = 'cpu',
output_path: Optional[str] = None,
out_img_format: str = '%06d.png',
depth_max: Union[int, float, torch.Tensor] = None,
**kwargs,
) -> None:
"""Renderer for depth map of meshes.
Args:
resolution (Iterable[int]):
(width, height) of the rendered images resolution.
device (Union[torch.device, str], optional):
You can pass a str or torch.device for cpu or gpu render.
Defaults to 'cpu'.
output_path (Optional[str], optional):
Output path of the video or images to be saved.
Defaults to None.
out_img_format (str, optional): The image format string for
saving the images.
Defaults to '%06d.png'.
depth_max (Union[int, float, torch.Tensor], optional):
The max value for normalize depth range. Defaults to None.
Returns:
None
"""
super().__init__(
resolution=resolution,
device=device,
output_path=output_path,
out_img_format=out_img_format,
**kwargs)
self.depth_max = depth_max
def _init_renderer(self,
rasterizer=None,
shader=None,
materials=None,
lights=None,
blend_params=None,
**kwargs):
shader = build_shader(dict(
type='DepthShader')) if shader is None else shader
return super()._init_renderer(rasterizer, shader, materials, lights,
blend_params, **kwargs)
def forward(self,
meshes: Optional[Meshes] = None,
cameras: Optional[MMCamerasBase] = None,
indexes: Optional[Iterable[int]] = None,
backgrounds: Optional[torch.Tensor] = None,
**kwargs):
"""Render depth map.
Args:
meshes (Optional[Meshes], optional): meshes to be rendered.
Defaults to None.
cameras (Optional[MMCamerasBase], optional): cameras for rendering.
Defaults to None.
indexes (Optional[Iterable[int]], optional): indexes for the
images.
Defaults to None.
backgrounds (Optional[torch.Tensor], optional): background images.
Defaults to None.
Returns:
Union[torch.Tensor, None]: return tensor or None.
"""
meshes = meshes.to(self.device)
self._update_resolution(cameras, **kwargs)
fragments = self.rasterizer(meshes_world=meshes, cameras=cameras)
depth_map = self.shader(
fragments=fragments, meshes=meshes, cameras=cameras)
if self.output_path is not None:
rgba = self.tensor2rgba(depth_map)
if self.output_path is not None:
self._write_images(rgba, backgrounds, indexes)
return depth_map
def tensor2rgba(self, tensor: torch.Tensor):
rgbs, valid_masks = tensor.repeat(1, 1, 1, 3), (tensor > 0) * 1.0
depth_max = self.depth_max if self.depth_max is not None else rgbs.max(
)
rgbs = normalize(
rgbs, origin_value_range=(0, depth_max), out_value_range=(0, 1))
return torch.cat([rgbs, valid_masks], -1)
| 36.558559 | 79 | 0.575899 | 438 | 4,058 | 5.207763 | 0.260274 | 0.028058 | 0.036826 | 0.01929 | 0.170978 | 0.055239 | 0.055239 | 0.033319 | 0.033319 | 0 | 0 | 0.007034 | 0.334401 | 4,058 | 110 | 80 | 36.890909 | 0.837468 | 0.294973 | 0 | 0.065574 | 0 | 0 | 0.026892 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065574 | false | 0 | 0.114754 | 0 | 0.262295 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
aff8e664de9e8a592be6d93982dd50a72c78e151 | 7,131 | py | Python | bot.py | 0xsmoos/PMD | 049ef60f9a4c44b635fc4dc88c5096685d78b5b7 | [
"MIT"
] | 3 | 2021-04-22T17:00:05.000Z | 2021-08-19T05:33:37.000Z | bot.py | 0xsmoos/PMD | 049ef60f9a4c44b635fc4dc88c5096685d78b5b7 | [
"MIT"
] | 4 | 2021-04-24T10:46:03.000Z | 2022-01-06T14:36:00.000Z | bot.py | 0xsmoos/PMD | 049ef60f9a4c44b635fc4dc88c5096685d78b5b7 | [
"MIT"
] | 1 | 2021-05-06T17:12:52.000Z | 2021-05-06T17:12:52.000Z | # -*- coding: utf-8 -*-
# filename : bot.py
# description : Discord bot interface for interacting with the server
# author : LikeToAccess
# email : liketoaccess@protonmail.com
# date : 08-01-2021
# version : v2.0
# usage : python main.py
# notes :
# license : MIT
# py version : 3.8.2 (must run on 3.6 or higher)
#==============================================================================
import time
from threading import Thread
import discord
from requests.exceptions import MissingSchema
from discord.ext import commands, tasks
from scraper import Scraper
from errors import NoResults
import config as cfg
import media
import download
credentials = media.read_file("credentials.md", filter=True)
scraper = Scraper()
token = credentials[0]
allowed_users = credentials[1:]
channel_id = {
"commands": 776367990560129066,
"log": 776354053222826004,
"spam": 780948981299150888,
}
bot = commands.Bot(command_prefix=
[
"beta ",
"Beta ",
"BETA ",
"test ",
],
help_command=None, case_insensitive=True)
# |
# Discord Functions |
# V
@bot.event
async def on_ready():
check_logs.start()
print(f"{bot.user} successfuly connected!")
await set_status("Free Movies on Plex!", discord.Status.online)
@bot.listen("on_message")
async def on_message(message):
if not message.content.startswith("https://gomovies-online."): return
if message.channel.id != channel_id["commands"]: return
if message.author == bot.user: return
await send("Testing link...", silent=False)
# if "--res=" in message.content:
# forced_resolution = message.content.split("--res=")[1]
# cfg.write_attempts(int(forced_resolution))
author = message.author
source_url = message.content
download_queue = scraper.get_download_link(source_url)
for data in download_queue:
target_url, metadata, *_ = data
run_download(target_url, metadata, author.id)
@tasks.loop(seconds=0.5)
async def check_logs(filename="log.txt"):
log_data = media.read_file(filename, filter=True)
if log_data:
media.write_file(filename, "### Beginning of message buffer from server ###\n")
bulk_message = []
for message in log_data:
if "--embed" in message:
metadata = eval(message.replace("--embed",""))
await create_embed(metadata)
elif "--channel=" in message:
message = message.split("--channel=")
await send(message[0], channel=message[1])
elif "--file" in message:
await send(message)
# elif "--res=" in message:
# forced_resolution = message.split("--res=")[1]
# cfg.write_attempts(int(forced_resolution))
# bulk_message.append(message.split("--res=")[0])
else:
bulk_message.append(message)
if bulk_message: await send("\n".join(bulk_message))
# |
# Discord Commands |
# V
@bot.command()
async def downloads(ctx, user: discord.User, *flags):
total_size = 0 # This is in MB
movies = []
user_id = user.id
lines = media.read_file(f"{user_id}.txt", filter=True)
for line in lines:
line = line.split("|")
movies.append(line[0])
total_size += float(line[2])
if "--list" in flags:
await send("{}".format("\n".join(movies)))
author = user.display_name
total_size = (
f"{int(round(total_size, 0))} MB" if total_size < 2048 else f"{round(total_size/1024, 2)} GB"
)
await send(
f"{author} has downloaded {len(movies)} movies/episodes totaling {total_size}."
)
@bot.command(aliases=["add", "download"])
async def download_first_result(ctx, *movie_name):
movie_name = " ".join(movie_name)
author = ctx.author.id
scraper.author = author
if "https://gomovies-online." in movie_name:
await send("Downloading via direct link...")
download_queue = scraper.get_download_link(movie_name) # This would be a link not a query
else:
await send("Searching for matches...")
try:
download_queue = scraper.download_first_from_search(movie_name) # Searches using a movie title
except NoResults:
download_queue = None
if download_queue:
for data in download_queue:
url, metadata, author = data
if url:
# If there were results and there is a valid URL, then download
await send("Link found, downloading starting...")
print(f"DEBUG: {metadata}")
await create_embed(metadata[list(metadata)[0]])
run_download(url, metadata[list(metadata)[0]], author)
else:
await send("**ERROR**: No search results found!")
else:
await send("No results!", silent=False)
@bot.command()
async def search(ctx, *search_query):
search_query = " ".join(search_query)
author = ctx.author.id
scraper.author = author
start_time = time.time()
if search_query:
results, metadata = scraper.search(
"https://gomovies-online.cam/search/" + \
"-".join(search_query.split())
)
print(f"Finished scraping search results in {round(time.time()-start_time,2)} seconds!")
if results and metadata:
for description in metadata:
# print(description)
await create_embed(metadata[description])
else:
await send("**ERROR**: No search results found!")
@bot.command()
async def react(ctx):
await ctx.message.add_reaction("\U0001F44D")
@bot.command(aliases=["status", "validate"])
async def validate_url(ctx, *url):
url = " ".join(url)
try:
status_code = download.validate_url(url)[0]
await send(f"Status for URL: {status_code}")
except MissingSchema as error:
await send(str(error))
@bot.command()
async def solve(ctx, captcha_solution):
await ctx.message.delete()
filename = "solved_captcha.txt"
media.write_file(filename, captcha_solution)
await ctx.send("Attempting captcha solve...")
# |
# Async Functions |
# V
async def create_embed(metadata, color=0xcbaf2f, channel="commands"):
embed = discord.Embed(
title=metadata["data-filmname"],
description=metadata["data-genre"],
color=color
)
embed.set_footer(text=metadata["data-descript"])
embed.set_thumbnail(url=metadata["img"])
embed.add_field(name="\U0001F4C5", value=metadata["data-year"], inline=True)
embed.add_field(name="IMDb", value=metadata["data-imdb"], inline=True)
embed.add_field(name="\U0001F554", value=metadata["data-duration"], inline=True)
await bot.get_channel(channel_id[channel]).send(embed=embed)
async def send(msg, channel="commands", silent=True):
channel = bot.get_channel(channel_id[channel])
if "--file" in msg:
msg = msg.split("--file=")
print(f"DEBUG: msg contains \"--file\" and the filename is \"{msg[1]}\"")
await channel.send(msg[0].strip())
await channel.send(file=discord.File(msg[1]))
else:
await channel.send(msg)
if not silent: print(msg)
async def set_status(activity, status=discord.Status.online):
await bot.change_presence(status=status, activity=discord.Game(activity))
# |
# Functions |
# V
def run_download(url, metadata, author):
download_function = download.Download(url, metadata, author)
threaded_download = Thread(target=download_function.run)
threaded_download.start()
def run():
return bot.run(token)
if __name__ == "__main__":
run()
| 29.345679 | 98 | 0.680409 | 944 | 7,131 | 5.020127 | 0.261653 | 0.026588 | 0.014349 | 0.015193 | 0.097489 | 0.088204 | 0.0498 | 0.034606 | 0.018569 | 0 | 0 | 0.02026 | 0.169401 | 7,131 | 242 | 99 | 29.466942 | 0.779841 | 0.160567 | 0 | 0.115607 | 0 | 0 | 0.172802 | 0.013111 | 0 | 0 | 0.001345 | 0 | 0 | 1 | 0.011561 | false | 0 | 0.057803 | 0.00578 | 0.075145 | 0.028902 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
affae23e9d7b59854ff8ab5e58706d9252734abf | 12,277 | py | Python | rq1and3_localness/happiness/compute_happiness.py | joh12041/chi-2016-localness | a7048015aac417217d23fccb5f49971922af6322 | [
"MIT"
] | 4 | 2016-11-06T21:55:51.000Z | 2019-07-23T19:39:00.000Z | rq1and3_localness/happiness/compute_happiness.py | joh12041/chi-2016-localness | a7048015aac417217d23fccb5f49971922af6322 | [
"MIT"
] | null | null | null | rq1and3_localness/happiness/compute_happiness.py | joh12041/chi-2016-localness | a7048015aac417217d23fccb5f49971922af6322 | [
"MIT"
] | null | null | null | """RQ3: Happiness algorithm as impacted by localness"""
import csv
import os
import argparse
import sys
from collections import OrderedDict
import numpy
from scipy.stats import spearmanr
from scipy.stats import wilcoxon
sys.path.append("./utils")
import bots
LOCALNESS_METRICS = ['nday','plurality']
HAPPINESS_EVALUATIONS_FN = "../resources/happiness_evaluations.txt"
def build_happiness_dict():
"""Return dictionary containing word : happiness."""
with open(HAPPINESS_EVALUATIONS_FN, 'r') as fin:
csvreader = csv.reader(fin, delimiter='\t')
# Clear out metadata
for i in range(0, 3):
next(csvreader)
assert next(csvreader) == ['word', 'happiness_rank', 'happiness_average', 'happiness_standard_deviation', 'twitter_rank', 'google_rank', 'nyt_rank', 'lyrics_rank']
happy_dict = {}
for line in csvreader:
word = line[0]
h_avg = float(line[2])
if h_avg > 6 or h_avg < 4:
happy_dict[word] = h_avg
return happy_dict
def compute_happiness(scale='counties'):
"""Compute happiness by county based on localness-processed CSV from localness.py."""
# generate word -> happiness dictionary
happy_dict = build_happiness_dict()
bots_filter = bots.build_bots_filter()
# directory containing all of the tweets sorted by state or county depending on scale - one file for each region
tweets_dir = './{0}'.format(scale)
tweets_fns = os.listdir(tweets_dir)
output_fn = "./raw_happiness_results_{0}.csv".format(scale)
with open(output_fn, "w") as fout:
csvwriter = csv.writer(fout)
for localness in LOCALNESS_METRICS:
csvwriter.writerow(['{0}_fips'.format(scale), '{0}_med_h'.format(localness), '{0}_avg_h'.format(localness),
'nonlocal_med_h', 'nonlocal_avg_h', 'unfiltered_med_h', 'unfiltered_avg_h',
'total_local', 'total_nonlocal', 'local_excluded', 'nonlocal_excluded'])
local_filtered_out = 0
nonlocal_filtered_out = 0
for file in tweets_fns:
with open(os.path.join(tweets_dir, file), 'r') as fin:
fips = os.path.splitext(file)[0] # files named by <FIPS-CODE>.csv
csvreader = csv.reader(fin)
header = ['text','uid','nday','plurality']
txt_idx = header.index('text')
uid_idx = header.index('uid')
localness_idx = header.index(localness)
assert next(csvreader) == header
local_tweets = []
lt_no_happy_words = 0
non_local = []
nl_no_happy_words = 0
for line in csvreader:
txt = line[txt_idx]
uid = line[uid_idx]
if not line[localness_idx]:
continue
local = (line[localness_idx] == 'True')
if uid in bots_filter:
if local:
local_filtered_out += 1
else:
nonlocal_filtered_out += 1
continue
total_happ = 0.0
count_words = 0
for word in txt.split():
cleaned = word.lower().strip('?!.,;:()[]{}"\'')
if cleaned in happy_dict:
count_words += 1
total_happ += happy_dict[cleaned]
if count_words > 0:
h_avg_txt = total_happ / count_words
if local:
local_tweets.append(h_avg_txt)
else:
non_local.append(h_avg_txt)
else:
if local:
lt_no_happy_words += 1
else:
nl_no_happy_words += 1
local_med_h = numpy.median(local_tweets)
local_avg_h = numpy.average(local_tweets)
nonlocal_med_h = numpy.median(non_local)
nonlocal_avg_h = numpy.average(non_local)
unfiltered_med_h = numpy.median(local_tweets + non_local)
unfiltered_avg_h = numpy.average(local_tweets + non_local)
csvwriter.writerow([fips, local_med_h, local_avg_h, nonlocal_med_h, nonlocal_avg_h, unfiltered_med_h,
unfiltered_avg_h, len(local_tweets), len(non_local), lt_no_happy_words, nl_no_happy_words])
print("{0} 'local' tweets and {1} 'nonlocal' tweets filtered out from organizations for {2}.".format(local_filtered_out, nonlocal_filtered_out, localness))
process_happiness_results(scale, output_fn)
def process_happiness_results(scale, input_fn):
"""
Go through all counties/states happiness results and filter for counties with sufficient tweets to produce rankings
:param scale: counties or states
:return: writes rankings to CSV
"""
tweet_threshold = 3000 # minimum "happiness" tweets for county to be considered
output_fn = "happiness_rankings_{0}_min{1}tweets.csv".format(scale, tweet_threshold)
# include county/state names for easier evaluation of results
fips_to_county = {}
with open('../resources/fips_to_names.csv', 'r') as fin:
csvreader = csv.reader(fin)
assert next(csvreader) == ['FIPS','STATE','COUNTY']
for line in csvreader:
fips = line[0]
if scale == 'counties':
if len(fips) == 4:
fips = '0' + fips
fips_to_county[fips] = '{0}, {1}'.format(line[2], line[1])
else:
fips = fips[:2]
fips_to_county[fips] = line[1]
# read in raw results by county/state from analyzing all tweets - four tables in succession for each localness metric
with open(input_fn, "r") as fin:
csvreader = csv.reader(fin)
idx = 0
localness = LOCALNESS_METRICS[idx]
header = ['{0}_fips'.format(scale), '{0}_med_h'.format(localness), '{0}_avg_h'.format(localness),
'nonlocal_med_h', 'nonlocal_avg_h', 'unfiltered_med_h', 'unfiltered_avg_h', 'total_local',
'total_nonlocal', 'local_excluded', 'nonlocal_excluded']
assert next(csvreader) == header
total_local_idx = header.index('total_local')
total_nonlocal_idx = header.index('total_nonlocal')
fips_idx = header.index('counties_fips')
local_havg_idx = header.index('{0}_avg_h'.format(localness))
nonlocal_havg_idx = header.index('nonlocal_avg_h')
unfiltered_havg_idx = header.index('unfiltered_avg_h')
# aggregate unfiltered, local, and nonlocal happiness by county/state for generating rankings
data = {}
for line in csvreader:
if line[0] == header[0]: # have reached next localness metric
idx += 1
localness = LOCALNESS_METRICS[idx]
else:
total_local = float(line[total_local_idx])
total_nonlocal = float(line[total_nonlocal_idx])
fips = fips_to_county[line[fips_idx]]
local_havg = line[local_havg_idx]
nonlocal_havg = line[nonlocal_havg_idx]
unfiltered_havg = line[unfiltered_havg_idx]
if total_local + total_nonlocal >= tweet_threshold: # if sufficiently robust number of tweets for comparing to other counties/states
pct_local = total_local / (total_local + total_nonlocal)
if fips in data:
data[fips]['{0}_local'.format(localness)] = local_havg
data[fips]['{0}_nonlocal'.format(localness)] = nonlocal_havg
data[fips]['{0}_pct_local'.format(localness)] = pct_local
data[fips]['total_local_{0}'.format(localness)] = total_local
data[fips]['total_nonlocal_{0}'.format(localness)] = total_nonlocal
else:
data[fips] = {'county' : fips,
'total_tweets' : total_local + total_nonlocal,
'total_local_{0}'.format(localness) : total_local,
'total_nonlocal_{0}'.format(localness) : total_nonlocal,
'{0}_local'.format(localness) : local_havg,
'{0}_nonlocal'.format(localness) : nonlocal_havg,
'unfiltered' : unfiltered_havg,
'{0}_pct_local'.format(localness) : pct_local}
ranks = []
unfiltered = {}
for i in range(1, len(data) + 1):
ranks.append({})
# sort results by unfiltered happiest to saddest
sd = OrderedDict(sorted(data.items(), key=lambda x: x[1]['unfiltered'], reverse=True))
for i, fips in enumerate(sd):
ranks[i]['county'] = fips
ranks[i]['unfiltered'] = i + 1
ranks[i]['total_tweets'] = sd[fips]['total_tweets']
unfiltered[fips] = i
for localness in LOCALNESS_METRICS:
for property in ['local','nonlocal']:
sd = {}
for k in data:
if '{0}_{1}'.format(localness, property) in data[k]:
sd[k] = data[k]
# sort happiest to saddest for localness metric + local or nonlocal
sd = OrderedDict(sorted(sd.items(), key=lambda x: x[1]['{0}_{1}'.format(localness, property)], reverse=True))
# write ranking for that metric and (non)local to the row where the unfiltered county name is (so sorting any given column by rankings has the correct county labels to understand it)
for i, fips in enumerate(sd):
ranks[unfiltered[fips]]['{0}_{1}'.format(localness, property)] = i + 1
# write out rankings
with open(output_fn, 'w') as fout:
header = ['county', 'total_tweets', 'unfiltered']
for property in ['local','nonlocal']:
for localness in LOCALNESS_METRICS:
header.append('{0}_{1}'.format(localness, property))
csvwriter = csv.DictWriter(fout, fieldnames=header, extrasaction='ignore')
csvwriter.writeheader()
for rank in ranks:
csvwriter.writerow(rank)
# generate Spearman's rho comparing unfiltered to each localness metric and counting geographies that changed dramatically
ten_pct_threshold = int(len(ranks) * 0.1)
for localness in LOCALNESS_METRICS:
for property in ['local','nonlocal']:
metric = []
uf = []
ten_pct_diff = 0
name = '{0}_{1}'.format(localness, property)
for rank in ranks:
if name in rank:
uf.append(rank['unfiltered'])
metric.append(rank[name])
if abs(rank[name] - rank['unfiltered']) >= ten_pct_threshold:
ten_pct_diff += 1
rho, pval = spearmanr(metric,uf)
print('{0}:'.format(name))
print("Spearman's rho between {0} and unfiltered rankings is {1} with a p-value of {2}.".format(name, rho, pval))
print("{0} counties out of {1} were more than {2} rankings different than the unfiltered results.".format(ten_pct_diff, len(ranks), ten_pct_threshold))
stat, pval = wilcoxon(metric, uf, zero_method="pratt")
print("Wilcoxon statistic between {0} and unfiltered rankings is {1} with a p-value of {2}.\n".format(name, stat, pval))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--scale", default = "counties", help = "compute happiness by either 'states' or 'counties'")
args = parser.parse_args()
compute_happiness(scale = args.scale)
if __name__ == "__main__":
main() | 49.108 | 194 | 0.564144 | 1,394 | 12,277 | 4.753228 | 0.186514 | 0.045276 | 0.019016 | 0.024298 | 0.235738 | 0.193027 | 0.147751 | 0.095684 | 0.08693 | 0.08693 | 0 | 0.011243 | 0.333469 | 12,277 | 250 | 195 | 49.108 | 0.798485 | 0.116315 | 0 | 0.165854 | 0 | 0.004878 | 0.137538 | 0.015375 | 0 | 0 | 0 | 0 | 0.019512 | 1 | 0.019512 | false | 0 | 0.043902 | 0 | 0.068293 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
affb5675ca38a4b1d3c3c74ee6838fd7720f8076 | 4,239 | py | Python | rubix-stress/workload_runner.py | raunaqmorarka/presto-rubix | 3149b6385f6685f5fe934551126b6593f59da9c8 | [
"Apache-2.0"
] | 162 | 2016-07-04T05:03:52.000Z | 2022-03-29T03:31:59.000Z | rubix-stress/workload_runner.py | raunaqmorarka/presto-rubix | 3149b6385f6685f5fe934551126b6593f59da9c8 | [
"Apache-2.0"
] | 381 | 2016-07-25T04:09:36.000Z | 2022-02-11T11:39:27.000Z | rubix-stress/workload_runner.py | raunaqmorarka/presto-rubix | 3149b6385f6685f5fe934551126b6593f59da9c8 | [
"Apache-2.0"
] | 64 | 2016-07-13T05:47:14.000Z | 2022-03-10T09:03:35.000Z | import logging
import random
import time
import threading
from threading import Event
from patched_commands import PrestoCommand
class WorkloadRunner(threading.Thread):
log = logging.getLogger(__name__)
def __init__(self, exitEvent, silencePeriodEvent, tid, queries, cluster_label):
threading.Thread.__init__(self)
self.tid = "thread-" + str(tid)
self.queries = queries
self.cluster_label = cluster_label
self.exitEvent = exitEvent
self.silencePeriodEvent = silencePeriodEvent
self.failures = []
self.backOffTime = 0 # This serves as wait time in case silence period is selected
self.current_cmd = None
# Kill ongoing command to start silence period
def kill_ongoing_commmand(self):
if self.current_cmd != None:
# TODO add synchronization to assure 100% cancellations
self.log.warning("Thread %s Cancelling %s" % (self.tid, self.current_cmd.id))
self.current_cmd.cancel()
# Kill ongoing command for exit
def interrupt(self):
self.log.warning("Thread %s interrupted" %self.tid)
self.kill_ongoing_commmand()
# Sleep when main thread has started silence period
# Or, Randomly backoff for some time to get random downscaling events
# Or, Run some query
def run(self):
while not self.exitEvent.is_set():
while self.silencePeriodEvent.is_set():
self.exitEvent.wait(5)
if self.exitEvent.is_set():
return
should_wait = random.choice([True, False, True, False, False, False])
if (should_wait) and self.backOffTime != 0:
# If decided to backoff then backoff between [10s, 120s]
timeToBackOff = min(max(10, self.backOffTime), 120)
self.log.warning("Thread %s backing off for %dseconds" %(self.tid, timeToBackOff))
self.exitEvent.wait(timeToBackOff)
# reset backOffTime to avoid back to back backOffs
self.backOffTime = 0
else:
self.run_query()
# Run a query randomly selected from the query pool
# Sometimes cancel the submitted query after some random time
# Collect failures
def run_query(self):
idx = random.randint(0, len(self.queries) - 1)
queryName = self.queries[idx][0]
queryString = self.queries[idx][1]
start = time.time()
shouldCancelQuery = random.randint(0, 500) < 25 # cancel with very less chance
cancelTime = random.randint(10, 500) # lot of times query will finish before this time, so there will be even fewer cancellations
queryStartTime = time.time()
self.current_cmd = PrestoCommand.create(name="%s_%s" %(self.tid, queryName),
label=self.cluster_label,
query=queryString)
self.log.warning("Thread %s running query %s via Command %s" % (self.tid, queryName, self.current_cmd.id))
while not self.current_cmd.is_done(self.current_cmd.status):
if shouldCancelQuery and (time.time() - queryStartTime) > cancelTime:
self.current_cmd.cancel()
self.log.warning("Thread %s cancelled Command %s" % (self.tid, self.current_cmd.id))
self.exitEvent.wait(1)
self.current_cmd = self.current_cmd.find(self.current_cmd.id)
elapsed = time.time() - start
if self.current_cmd.status == "cancelled":
# expected
pass
elif not self.current_cmd.is_success(self.current_cmd.status):
# TODO: get actual error code and classify failures as per codes
self.failures.append(self.current_cmd.id)
self.log.warning("Thread %s Command failed %s" %(self.tid, self.current_cmd.id))
else:
self.backOffTime = elapsed
self.current_cmd = None
def log_failures(self):
if len(self.failures) == 0:
return
message = "Failures in " + self.tid + ":\n"
for failure in self.failures:
message = message + str(failure) + "\n"
message += "\n"
self.log.warning(message)
| 39.616822 | 137 | 0.624912 | 510 | 4,239 | 5.094118 | 0.317647 | 0.080447 | 0.102386 | 0.046189 | 0.100077 | 0.030793 | 0.030793 | 0.021555 | 0 | 0 | 0 | 0.011195 | 0.283557 | 4,239 | 106 | 138 | 39.990566 | 0.844254 | 0.176457 | 0 | 0.135135 | 0 | 0 | 0.062482 | 0 | 0 | 0 | 0 | 0.009434 | 0 | 1 | 0.081081 | false | 0.013514 | 0.081081 | 0 | 0.216216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b30543d6bc96edaac43450d48578207ded1283f4 | 2,585 | py | Python | src/commands/debug.py | mdabessa/discordbot | 37f605c5218f55365d4a82914ba604f8c62266e3 | [
"MIT"
] | 5 | 2021-03-11T01:47:12.000Z | 2022-01-18T05:33:18.000Z | src/commands/debug.py | mdabessa/discordbot | 37f605c5218f55365d4a82914ba604f8c62266e3 | [
"MIT"
] | null | null | null | src/commands/debug.py | mdabessa/discordbot | 37f605c5218f55365d4a82914ba604f8c62266e3 | [
"MIT"
] | null | null | null | import modules.database as db
import modules.entity as entity
category = 'Depuração'
entity.Command.newcategory(category, 'Depuração',is_visible=False)
async def exe(message, commandpar, bot):
if commandpar != None:
cont = commandpar.split()
text = f'Executando: {cont[0]}'
if len(cont) > 1:
text += ' [' + ' '.join(cont[1:]) + ']'
m = await message.channel.send(text)
await entity.Command.trycommand(m, commandpar, bot)
else:
raise entity.CommandError('Falta algo nesse comando!')
entity.Command(name='exec', func=exe , category=category, desc=f'Executar um comando através do bot.', args=[['comando', '*'], ['parametros do comando', '']], perm=2)
async def get_all_scripts(message, commandpar, bot):
scripts = entity.Script.get_scripts()
text = 'Scripts infos:\n'
for script in scripts:
text += f''' Nome: {script.name}\n Cache: {script.cache}\n'''
text += '==========\n'
await message.channel.send(text)
entity.Command(name='get_all_scripts', func=get_all_scripts, category=category, desc='Listar todos os scripts rodando.', aliases=['gas'], perm=2)
async def get_allowed_bots(message, commandpar, bot):
bots = db.get_allowed_bots(bot.db_connection)
await message.channel.send('Bots(ids) permitidos:\n'+' ,'.join(bots))
entity.Command(name='get_allowed_bots', func=get_allowed_bots, category=category, desc='Listar todos os bots permitidos.', aliases=['gab'], perm=2)
async def add_allowed_bot(message, commandpar, bot):
if commandpar != None:
bots = db.get_allowed_bots(bot.db_connection)
if str(commandpar) in bots:
raise entity.CommandError('Esse id de bot, ja esta registrado como um `allowed_bot`')
db.add_bot(commandpar, bot.db_connection)
await message.add_reaction('✅')
else:
raise entity.CommandError('Falta parametros nesse comando!')
entity.Command(name='add_allowed_bot', func=add_allowed_bot, category=category, desc='Permitir com que um bot especifico seja respondido.', aliases=['aab'], args=[['bot_id', '*']], perm=2)
async def del_allowed_bot(message, commandpar, bot):
if commandpar != None:
db.del_bot(commandpar, bot.db_connection)
await message.add_reaction('✅')
else:
raise entity.CommandError('Falta parametros nesse comando!')
entity.Command(name='del_allowed_bot', func=del_allowed_bot, category=category, desc='Remover um bot especifico da lista de bots permitidos.', aliases=['dab'], args=[['bot_id', '*']], perm=2)
| 41.031746 | 191 | 0.677369 | 343 | 2,585 | 4.991254 | 0.28863 | 0.060748 | 0.058411 | 0.030374 | 0.442173 | 0.297897 | 0.238318 | 0.238318 | 0.143692 | 0.143692 | 0 | 0.003763 | 0.177563 | 2,585 | 62 | 192 | 41.693548 | 0.800564 | 0 | 0 | 0.272727 | 0 | 0 | 0.238298 | 0 | 0.022727 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.045455 | 0 | 0.045455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b30ec77ef09bdffed154fe1328bf919b56c53f19 | 2,877 | py | Python | tests/test_amqp_transport.py | 0x1EE7/tomodachi | 8147b16d8be19b80b3bd7c5d8ed21c9863eaaa83 | [
"MIT"
] | null | null | null | tests/test_amqp_transport.py | 0x1EE7/tomodachi | 8147b16d8be19b80b3bd7c5d8ed21c9863eaaa83 | [
"MIT"
] | null | null | null | tests/test_amqp_transport.py | 0x1EE7/tomodachi | 8147b16d8be19b80b3bd7c5d8ed21c9863eaaa83 | [
"MIT"
] | null | null | null | import os
import signal
import pytest
from typing import Any
from tomodachi.transport.amqp import AmqpTransport, AmqpException
from run_test_service_helper import start_service
def test_routing_key(monkeypatch: Any) -> None:
routing_key = AmqpTransport.get_routing_key('test.topic', {})
assert routing_key == 'test.topic'
routing_key = AmqpTransport.get_routing_key('test.topic', {'options': {'amqp': {'routing_key_prefix': 'prefix-'}}})
assert routing_key == 'prefix-test.topic'
def test_encode_routing_key(monkeypatch: Any) -> None:
routing_key = AmqpTransport.encode_routing_key('test-topic')
assert routing_key == 'test-topic'
routing_key = AmqpTransport.encode_routing_key('test.topic')
assert routing_key == 'test.topic'
def test_decode_routing_key(monkeypatch: Any) -> None:
routing_key = AmqpTransport.decode_routing_key('test-topic')
assert routing_key == 'test-topic'
routing_key = AmqpTransport.decode_routing_key('test.topic')
assert routing_key == 'test.topic'
def test_queue_name(monkeypatch: Any) -> None:
_uuid = '5d0b530f-5c44-4981-b01f-342801bd48f5'
queue_name = AmqpTransport.get_queue_name('test.topic', 'func', _uuid, False, {})
assert queue_name == 'b444917b9b922e8c29235737c7775c823e092c2374d1bfde071d42c637e3b4fd'
queue_name = AmqpTransport.get_queue_name('test.topic', 'func2', _uuid, False, {})
assert queue_name != 'b444917b9b922e8c29235737c7775c823e092c2374d1bfde071d42c637e3b4fd'
queue_name = AmqpTransport.get_queue_name('test.topic', 'func', _uuid, False, {'options': {'amqp': {'queue_name_prefix': 'prefix-'}}})
assert queue_name == 'prefix-b444917b9b922e8c29235737c7775c823e092c2374d1bfde071d42c637e3b4fd'
queue_name = AmqpTransport.get_queue_name('test.topic', 'func', _uuid, True, {})
assert queue_name == '540e8e5bc604e4ea618f7e0517a04f030ad1dcbff2e121e9466ddd1c811450bf'
queue_name = AmqpTransport.get_queue_name('test.topic', 'func2', _uuid, True, {})
assert queue_name == '540e8e5bc604e4ea618f7e0517a04f030ad1dcbff2e121e9466ddd1c811450bf'
queue_name = AmqpTransport.get_queue_name('test.topic', 'func', _uuid, True, {'options': {'amqp': {'queue_name_prefix': 'prefix-'}}})
assert queue_name == 'prefix-540e8e5bc604e4ea618f7e0517a04f030ad1dcbff2e121e9466ddd1c811450bf'
def test_publish_invalid_credentials(monkeypatch: Any, capsys: Any, loop: Any) -> None:
services, future = start_service('tests/services/dummy_service.py', monkeypatch)
instance = services.get('test_dummy')
with pytest.raises(AmqpException):
loop.run_until_complete(AmqpTransport.publish(instance, 'data', 'test.topic', wait=True))
os.kill(os.getpid(), signal.SIGINT)
loop.run_until_complete(future)
out, err = capsys.readouterr()
assert 'Unable to connect [amqp] to 127.0.0.1:54321' in err
assert out == ''
| 42.308824 | 138 | 0.75113 | 323 | 2,877 | 6.427245 | 0.229102 | 0.105973 | 0.074181 | 0.100674 | 0.638728 | 0.638728 | 0.638728 | 0.638728 | 0.575145 | 0.575145 | 0 | 0.112306 | 0.127216 | 2,877 | 67 | 139 | 42.940299 | 0.714456 | 0 | 0 | 0.155556 | 0 | 0 | 0.295794 | 0.161627 | 0 | 0 | 0 | 0 | 0.311111 | 1 | 0.111111 | false | 0 | 0.133333 | 0 | 0.244444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b30f1e158c5e65ac948bbdec93ad290aa5c96a51 | 3,451 | py | Python | enteletaor_lib/modules/redis/__init__.py | Seabreg/enteletaor | d1fbda5fcd68677fbce76e3ed4e79a886b8ad9db | [
"BSD-3-Clause"
] | 159 | 2016-03-05T09:57:19.000Z | 2022-02-20T02:45:03.000Z | enteletaor_lib/modules/redis/__init__.py | Seabreg/enteletaor | d1fbda5fcd68677fbce76e3ed4e79a886b8ad9db | [
"BSD-3-Clause"
] | 8 | 2016-03-06T13:02:45.000Z | 2020-06-12T08:19:16.000Z | enteletaor_lib/modules/redis/__init__.py | Seabreg/enteletaor | d1fbda5fcd68677fbce76e3ed4e79a886b8ad9db | [
"BSD-3-Clause"
] | 30 | 2016-03-06T16:52:42.000Z | 2021-03-31T09:46:39.000Z | # -*- coding: utf-8 -*-
#
# Enteletaor - https://github.com/cr0hn/enteletaor
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import logging
from .. import IModule
from ...libs.core.structs import CommonData
from ...libs.core.models import StringField, IntegerField
from .redis_dump import action_redis_dump
from .redis_shell import action_redis_shell
from .redis_info import action_redis_server_info
from .redis_cache import action_redis_cache_poison
from .redis_discover_db import action_redis_discover_dbs
from .redis_clients import action_redis_server_connected
from .redis_disconnect import action_redis_server_disconnect
from .cmd_actions import parser_redis_dump, parser_redis_server_disconnect, parser_redis_server_cache_poison
log = logging.getLogger()
# ----------------------------------------------------------------------
class ModuleModel(CommonData):
target = StringField(required=True)
port = IntegerField(default=6379)
db = IntegerField(default=0)
# ----------------------------------------------------------------------
class RedisModule(IModule):
"""
Try to extract information from remote processes
"""
__model__ = ModuleModel
__submodules__ = {
'dump': dict(
help="dumps all keys in Redis database",
cmd_args=parser_redis_dump,
action=action_redis_dump
),
'info': dict(
help="open a remote shell through the Redis server",
action=action_redis_server_info
),
'connected': dict(
help="get connected users to Redis server",
action=action_redis_server_connected
),
'disconnect': dict(
help="disconnect one or all users from Redis server",
cmd_args=parser_redis_server_disconnect,
action=action_redis_server_disconnect
),
'discover-dbs': dict(
help="discover all Redis DBs at server",
action=action_redis_discover_dbs
),
'cache': dict(
help="poison remotes cache using Redis server",
action=action_redis_cache_poison,
cmd_args=parser_redis_server_cache_poison
),
}
name = "redis"
description = "some attacks over Redis service"
| 38.344444 | 118 | 0.753115 | 460 | 3,451 | 5.48913 | 0.419565 | 0.06099 | 0.047129 | 0.036436 | 0.145347 | 0.080792 | 0.053861 | 0.053861 | 0.053861 | 0.053861 | 0 | 0.003388 | 0.144596 | 3,451 | 89 | 119 | 38.775281 | 0.851965 | 0.488554 | 0 | 0.12 | 0 | 0 | 0.177457 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.24 | 0 | 0.42 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b31211dcdebcd9d087794655b7c39c4690abb81c | 1,110 | py | Python | handler.py | altbdoor/legend-of-8ball-bot | d6be034bc6e440c2b99cfbf2c7f608f15af3f537 | [
"WTFPL"
] | null | null | null | handler.py | altbdoor/legend-of-8ball-bot | d6be034bc6e440c2b99cfbf2c7f608f15af3f537 | [
"WTFPL"
] | null | null | null | handler.py | altbdoor/legend-of-8ball-bot | d6be034bc6e440c2b99cfbf2c7f608f15af3f537 | [
"WTFPL"
] | null | null | null | import random
import time
def send_bytes(con, channel, msg):
con.send((f'PRIVMSG #{channel} : {msg}\r\n').encode('utf-8'))
def sync_fn(con, channel, epoch, user, msg):
pass
def async_fn(con, channel, epoch, user, msg):
if msg.startswith('!8ball'):
answer_list = [
'it is certain',
'it is decidedly so',
'without a doubt',
'yes - definitely',
'you may rely on it',
'as I see it, yes',
'most likely',
'outlook good',
'yes',
'signs point to yes',
'reply hazy, try again',
'ask again later',
'better not tell you now',
'cannot predict now',
'concentrate and ask again',
"don't count on it",
'my reply is no',
'my sources say no',
'outlook not so good',
'very doubtful',
]
time.sleep(1)
answer_index = random.randint(0, len(answer_list) - 1)
send_bytes(con, channel, f'[8ball] @{user}, {answer_list[answer_index]}')
| 27.073171 | 81 | 0.502703 | 136 | 1,110 | 4.036765 | 0.588235 | 0.07286 | 0.043716 | 0.069217 | 0.087432 | 0.087432 | 0 | 0 | 0 | 0 | 0 | 0.008584 | 0.37027 | 1,110 | 40 | 82 | 27.75 | 0.776824 | 0 | 0 | 0 | 0 | 0 | 0.366667 | 0.024324 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.030303 | 0.060606 | 0 | 0.151515 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3129e2827a5d1653cdcb5e64182818266d524f0 | 518 | py | Python | main.py | gruporofex/alexa_rofex | 338eb1e08da37a45f44eaab70a633e255d4b2be7 | [
"Apache-2.0"
] | null | null | null | main.py | gruporofex/alexa_rofex | 338eb1e08da37a45f44eaab70a633e255d4b2be7 | [
"Apache-2.0"
] | null | null | null | main.py | gruporofex/alexa_rofex | 338eb1e08da37a45f44eaab70a633e255d4b2be7 | [
"Apache-2.0"
] | 1 | 2019-06-02T14:17:14.000Z | 2019-06-02T14:17:14.000Z | import logging
from configuration.config import LOGGING_LEVEL
from alexa_handlers.AlexaForRFXHandler import AlexaForRFXHandler
"""
Main entry point for the Lambda function.
"""
logging.basicConfig(format='%(asctime)s %(message)s')
logging.getLogger().setLevel(LOGGING_LEVEL)
def lambda_handler(event, context):
logging.info("Executing main lambda_handler for AlexaForRFXHandler class")
alexa = AlexaForRFXHandler()
alexa_response = alexa.process_request(event, context)
return alexa_response
| 21.583333 | 78 | 0.787645 | 59 | 518 | 6.779661 | 0.576271 | 0.065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.127413 | 518 | 23 | 79 | 22.521739 | 0.884956 | 0 | 0 | 0 | 0 | 0 | 0.17382 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b314cd99dbfe61e94cec61ba01708bcc90cd79e7 | 236 | py | Python | course/source/exercises/E001/test.py | sebastian-mutz/integrate | ce2a83358e2eb7f482d4fb70d167b1eba2abf2a8 | [
"MIT"
] | 2 | 2021-05-17T14:23:50.000Z | 2021-08-24T13:07:42.000Z | course/source/exercises/E001/test.py | sebastian-mutz/integrate | ce2a83358e2eb7f482d4fb70d167b1eba2abf2a8 | [
"MIT"
] | null | null | null | course/source/exercises/E001/test.py | sebastian-mutz/integrate | ce2a83358e2eb7f482d4fb70d167b1eba2abf2a8 | [
"MIT"
] | 1 | 2021-08-24T13:04:01.000Z | 2021-08-24T13:04:01.000Z | # wow. such script. many calculation. wow.
# let's do some operations and save the results in variables
a=20 + 22
b=2077 - 93
c=578 * 4
d=1332/2
e=16**2
print(a, b, c, d, e) # tell the computer to show us the values of each variable
| 21.454545 | 80 | 0.686441 | 49 | 236 | 3.306122 | 0.816327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 0.207627 | 236 | 10 | 81 | 23.6 | 0.748663 | 0.661017 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b3171b0e547151b36dca4c7d117c611b0459a447 | 5,070 | py | Python | scripts/test_script.py | OmoooJ/gluon-facex | c5606fc9e2223c6d6dce2aaf2858d83f5eac1d54 | [
"MIT"
] | 257 | 2018-12-28T12:02:28.000Z | 2021-11-25T08:43:52.000Z | scripts/test_script.py | OmoooJ/gluon-facex | c5606fc9e2223c6d6dce2aaf2858d83f5eac1d54 | [
"MIT"
] | 37 | 2019-01-10T02:31:12.000Z | 2020-11-09T03:09:40.000Z | scripts/test_script.py | OmoooJ/gluon-facex | c5606fc9e2223c6d6dce2aaf2858d83f5eac1d54 | [
"MIT"
] | 57 | 2018-12-29T01:18:31.000Z | 2021-09-14T14:41:35.000Z | # -*- coding: utf-8 -*-
# Author: pistonyang@gmail.com
import argparse
import os
import mxnet as mx
import sklearn
import numpy as np
from mxnet import gluon, nd
from mxnet.gluon.data import DataLoader
from mxnet.gluon.data.vision import transforms
from gluonfr.model_zoo import get_model
from gluonfr.data import get_recognition_dataset
from gluonfr.metrics.verification import FaceVerification
parser = argparse.ArgumentParser(description='Test a model for face recognition.')
parser.add_argument('--batch-size', type=int, default=512,
help='Test batch size.')
parser.add_argument('-n', '--model', type=str, default='l_se_resnet50v2',
help='Model to test.')
parser.add_argument('--model-params', type=str, required=True,
help='Model params to load.')
parser.add_argument('-t', '--val-dateset', dest='target', type=str, default='lfw',
help='Val datasets, default is lfw.'
'Options are lfw, calfw, cplfw, agedb_30, cfp_ff, vgg2_fp.')
parser.add_argument('--export', action='store_true',
help='Whether to export model.')
parser.add_argument('--export-path', type=str, default='',
help='Path to save export files.')
parser.add_argument('--dtype', type=str, default='float32',
help='data type for training. default is float32')
parser.add_argument('--ctx', type=str, default="0",
help='Use GPUs to train.')
parser.add_argument('--hybrid', action='store_true',
help='Whether to use hybrid.')
opt = parser.parse_args()
assert opt.batch_size % len(opt.ctx.split(",")) == 0, "Per batch on each GPU must be same."
assert opt.dtype in ('float32', 'float16'), "Data type only support FP16/FP32."
transform_test = transforms.Compose([
transforms.ToTensor()
])
def transform_test_flip(data, isf=False):
flip_data = nd.flip(data, axis=1)
if isf:
data = nd.transpose(data, (2, 0, 1)).astype('float32')
flip_data = nd.transpose(flip_data, (2, 0, 1)).astype('float32')
return data, flip_data
return transform_test(data), transform_test(flip_data)
export_path = os.path.dirname(opt.model_params) if opt.export_path == '' else opt.export_path
ctx = [mx.gpu(int(i)) for i in opt.ctx.split(",")]
batch_size = opt.batch_size
targets = opt.target
val_sets = [get_recognition_dataset(name, transform=transform_test_flip) for name in targets.split(",")]
val_datas = [DataLoader(dataset, batch_size, last_batch='keep') for dataset in val_sets]
test_net = get_model(opt.model, need_cls_layer=False)
test_net.cast(opt.dtype)
test_net.load_parameters(opt.model_params, ctx=ctx, ignore_extra=True)
def validate(nfolds=10):
metric = FaceVerification(nfolds)
metric_flip = FaceVerification(nfolds)
for loader, name in zip(val_datas, targets.split(",")):
metric.reset()
for i, batch in enumerate(loader):
data0s = gluon.utils.split_and_load(batch[0][0][0], ctx, even_split=False)
data1s = gluon.utils.split_and_load(batch[0][1][0], ctx, even_split=False)
data0s_flip = gluon.utils.split_and_load(batch[0][0][1], ctx, even_split=False)
data1s_flip = gluon.utils.split_and_load(batch[0][1][1], ctx, even_split=False)
issame_list = gluon.utils.split_and_load(batch[1], ctx, even_split=False)
embedding0s = [test_net(X) for X in data0s]
embedding1s = [test_net(X) for X in data1s]
embedding0s_flip = [test_net(X) for X in data0s_flip]
embedding1s_flip = [test_net(X) for X in data1s_flip]
emb0s = [nd.L2Normalization(e, mode='instance') for e in embedding0s]
emb1s = [nd.L2Normalization(e, mode='instance') for e in embedding1s]
for embedding0, embedding1, issame in zip(emb0s, emb1s, issame_list):
metric.update(issame, embedding0, embedding1)
emb0s_flip = [nd.L2Normalization(nd.concatenate([e, ef], 1), mode='instance')
for e, ef in zip(embedding0s, embedding0s_flip)]
emb1s_flip = [nd.L2Normalization(nd.concatenate([e, ef], 1), mode='instance')
for e, ef in zip(embedding1s, embedding1s_flip)]
for embedding0, embedding1, issame in zip(emb0s_flip, emb1s_flip, issame_list):
metric_flip.update(issame, embedding0, embedding1)
tpr, fpr, accuracy, val, val_std, far, accuracy_std = metric.get()
print("{}: \t{:.6f}+-{:.6f}".format(name, accuracy, accuracy_std))
_, _, accuracy, _, _, _, accuracy_std = metric_flip.get()
print("{}-flip: {:.6f}+-{:.6f}".format(name, accuracy, accuracy_std))
if __name__ == '__main__':
if opt.hybrid:
test_net.hybridize()
validate()
if opt.export:
assert opt.hybrid is True, 'Export need --hybrid.'
expot_name = os.path.join(export_path, opt.model)
test_net.export(expot_name)
print('export model is saved at {}'.format(expot_name))
| 44.867257 | 104 | 0.65641 | 690 | 5,070 | 4.652174 | 0.268116 | 0.025234 | 0.047664 | 0.028037 | 0.241433 | 0.209657 | 0.17134 | 0.099065 | 0.038006 | 0.038006 | 0 | 0.023471 | 0.210059 | 5,070 | 112 | 105 | 45.267857 | 0.778027 | 0.009862 | 0 | 0 | 0 | 0 | 0.135738 | 0 | 0 | 0 | 0 | 0 | 0.032967 | 1 | 0.021978 | false | 0 | 0.120879 | 0 | 0.164835 | 0.032967 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b317c5d6b720147d41037a618b469ae428deb28e | 7,043 | py | Python | gamepyd/readPad.py | Marceline/PyXinput | fa60e215e99b8c0fe95767c21fd9ba239a0719bd | [
"Unlicense"
] | 2 | 2020-11-26T09:23:35.000Z | 2020-11-27T13:36:46.000Z | gamepyd/readPad.py | Marceline/PyXinput | fa60e215e99b8c0fe95767c21fd9ba239a0719bd | [
"Unlicense"
] | 7 | 2020-10-03T16:38:26.000Z | 2020-10-03T17:17:00.000Z | gamepyd/readPad.py | Marceline/PyXinput | fa60e215e99b8c0fe95767c21fd9ba239a0719bd | [
"Unlicense"
] | 1 | 2021-06-04T17:44:55.000Z | 2021-06-04T17:44:55.000Z | """Read the current state of Xbox Controllers"""
from ctypes import *
import pandas as pd
from time import time_ns
# Xinput DLL
try:
_xinput = windll.xinput1_4
except OSError as err:
_xinput = windll.xinput1_3
class _xinput_gamepad(Structure):
"""CType XInput Gamepad Object"""
_fields_ = [
("wButtons",
c_ushort), #Contains all button information in one integer
("LT", c_ubyte), #Left Trigger
("RT", c_ubyte), #Right Trigger
("Lx", c_short), #Right stick horizontal movement
("Ly", c_short), #Right stick vertical movement
("Rx", c_short), #Left stick horizontal movement
("Ry", c_short)
] #Left stick vertical movement
fields = [f[0] for f in _fields_]
def __dict__(self):
return {field: self.__getattribute__(field) for field in self.fields}
def __str__(self):
return str(self.__dict__())
def __getitem__(self, string):
return self.__dict__()[string]
class _xinput_state(Structure):
"""CType XInput State Object"""
_fields_ = [("dwPacketNumber", c_uint),
("XINPUT_GAMEPAD", _xinput_gamepad)]
fields = fields = [f[0] for f in _fields_]
def __dict__(self):
return {field: self.__getattribute__(field) for field in self.fields}
def __str__(self):
return str(self.__dict__())
def __getitem__(self, string):
return self.__dict__()[string]
class rPad(object):
"""XInput Controller State reading object"""
_buttons = { # All possible button values
'UP': 0x0001,
'DOWN': 0x0002,
'LEFT': 0x0004,
'RIGHT': 0x0008,
'START': 0x0010,
'SELECT': 0x0020,
'L3': 0x0040,
'R3': 0x0080,
'LB': 0x0100,
'RB': 0x0200,
'A': 0x1000,
'B': 0x2000,
'X': 0x4000,
'Y': 0x8000
}
def __init__(self, ControllerID: int = 1, absolute: bool = False):
"""
Initialise Controller object.
ControllerID Int Position of gamepad.
"""
self.ControllerID = ControllerID
self.dwPacketNumber = c_uint()
self.absolute = absolute
print(f"Now reading gamepad#{ControllerID} as ABSOLUTE values"
) if self.absolute else print(
f"Now reading gamepad#{ControllerID}")
self.dwPacketNumber = c_uint()
@property
def read(self):
"""
Returns the current gamepad state.
"""
"""If you wanna optimize reading, this is THE method to look at first"""
state = _xinput_state()
_xinput.XInputGetState(self.ControllerID - 1, pointer(state))
self.dwPacketNumber = state.dwPacketNumber
check = lambda x: (state.XINPUT_GAMEPAD.wButtons & x) == x
buttons = {name: check(value) for name, value in rPad._buttons.items()}
analogs = state.XINPUT_GAMEPAD.__dict__()
del analogs['wButtons']
return {**analogs, **buttons}
def __loop(self, line, start, wait_ns, i): #Provides an easy loop
#foo=str(xbox.read)
#jot.write(foo+"\n")
if (time_ns() >= start[0] + wait_ns):
moment = self.read # will return a dictionary for instantaneous state of the controller
moment['time(ns)'] = time_ns() #store current time in nanoseconds
moment['timeDelta(ms)'] = (
time_ns() -
start[0]) / 10**6 #Store the time diffference in milliseconds
moment['error(ms)'] = moment['timeDelta(ms)'] - wait_ns / 10**6
line.append(moment)
i[0] += 1
#print(f"time elapsed={((time_ns()-start)/10**6)/1000}")
start[0] = time_ns()
def __write(
self, line, type: str,
dest: str): #Provides writing facility given a type and location
supportedTypes = ["df"]
if type not in supportedTypes:
print(
f"sorry, currently supported types are: {str(supportedTypes)[1:-1]}"
)
if (type == "df"):
output = pd.DataFrame(line)
if not self.absolute:
#The following line is technically inaccurate as Bryan says "Axis are -32768 to 32767"
output[['Lx', 'Ly', 'Rx',
'Ry']] = output[['Lx', 'Ly', 'Rx', 'Ry']] / 32768
output[['LT', 'RT']] = output[['LT', 'RT']] / 255
#Save to disk if required
if (len(dest) > 0 and type == "df"):
(pd.DataFrame(line)).to_feather(dest)
#elif(len(file) > 0 and type == "list"):
return output
#elif(type == "list"):
def record(self,
duration: float = 5,
rate: float = float(1 / 120),
file: str = "",
type="df"):
"""
Records for a given duration at a fixed rate, possibly to a file
"""
#Setup loop parameters
line = []
start = [time_ns()]
count = duration // rate
wait_ns = rate * 10**9
i = [0]
#Time for the loop
#pbar = tq(total=count, position=0, leave=True)
while (i[0] < count):
self.__loop(line, start, wait_ns, i)
return self.__write(line, type, file)
#write to disk if wanted
def capture(self,
stopper,
rate: float = float(1 / 120),
file: str = "",
type="df"):
"""
Records till mentioned button is pressed at a fixed rate, possibly to a file
"""
if stopper not in self._buttons:
print("Choose a button label to end recording please")
print(f"Your choices are ${self._buttons}")
return 1
#Setup loop parameters
line = [self.read]
start = [time_ns()]
wait_ns = rate * 10**9
i = [0]
while not bool((line[-1])[stopper]):
self.__loop(line, start, wait_ns, i)
#write to disk if wanted
return self.__write(line, type, file)
def main():
"""Test the functionality of the rPad object"""
from time import sleep
print('Testing controller in position 1:')
print(
"This will just take a second. We'll look at the controller values in 200 milli-second intervals:"
)
# Initialise Controller
con = rPad(1)
# Loop printing controller state and buttons held
for i in range(5):
print(f"{i}---------------------------------------------")
print(f'State:{con.read}')
print("---------------------------------------------")
sleep(0.2)
print(
"Better yet, you can use prettyRead() to sample as many times as desired for any required duration."
)
print(
f"And then return it as a dataframe, can even write it to a file by supplying the filename.\n {con.prettyRead(1).head()}"
)
print("Do note that the final three columns are metadata.")
if __name__ == '__main__':
main()
| 31.868778 | 129 | 0.552321 | 832 | 7,043 | 4.519231 | 0.325721 | 0.014362 | 0.01516 | 0.011968 | 0.207979 | 0.170745 | 0.137766 | 0.117021 | 0.10266 | 0.10266 | 0 | 0.031198 | 0.317336 | 7,043 | 220 | 130 | 32.013636 | 0.750832 | 0.192106 | 0 | 0.22973 | 0 | 0.013514 | 0.167094 | 0.034773 | 0 | 0 | 0.015373 | 0 | 0 | 1 | 0.087838 | false | 0 | 0.027027 | 0.040541 | 0.25 | 0.087838 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |