hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bf0736d5b20e13bca565ff9aa02b56f4f4cdfd07 | 1,708 | py | Python | randomwordz/words.py | noyoshi/randomwordz | cc785456465abd2234a4449bb559374c28660814 | [
"BSD-3-Clause"
] | null | null | null | randomwordz/words.py | noyoshi/randomwordz | cc785456465abd2234a4449bb559374c28660814 | [
"BSD-3-Clause"
] | null | null | null | randomwordz/words.py | noyoshi/randomwordz | cc785456465abd2234a4449bb559374c28660814 | [
"BSD-3-Clause"
] | null | null | null | # https://stackoverflow.com/questions/6028000/how-to-read-a-static-file-from-inside-a-python-package
# see above how to read in words from the files...
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
from random import choice as randchoice
from random import shuffle
class WordGenerator:
def __init__(self, debug=False):
self.data = {}
self.codes = [
"ADJ", "ADP", "CONJ", "DET", "NOUN",
"NUM", "PRON", "PRT", "VERB", "NAME"
]
self.debug = debug
self._load()
def _load(self):
for name in self.codes:
self.data[name] = pkg_resources.read_text(
"randomwordz.data",
f"{name}.txt"
).split('\n')
def _debug_msg(self, code):
if code not in self.data and self.debug:
print(f"Code {code} is invalid, try one of these:")
print(self.codes)
def get_all(self, code):
"""Returns all of a certain word type"""
code = code.upper()
self._debug_msg(code)
return list(self.data.get(code, []))
def get_all_random(self, code):
"""Returns all of a certain word type, randomized"""
code = code.upper()
self._debug_msg(code)
l = list(self.data.get(code, []))
shuffle(l)
return l
def get_random(self, code):
"""Returns a random word from the specified code"""
code = code.upper()
self._debug_msg(code)
try:
return randchoice(self.data.get(code, []))
except:
return ""
| 28.949153 | 100 | 0.580211 | 218 | 1,708 | 4.43578 | 0.394495 | 0.055843 | 0.046536 | 0.05274 | 0.282316 | 0.24302 | 0.164426 | 0.074457 | 0.074457 | 0 | 0 | 0.0075 | 0.297424 | 1,708 | 58 | 101 | 29.448276 | 0.798333 | 0.18911 | 0 | 0.190476 | 0 | 0 | 0.076135 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.119048 | 0 | 0.380952 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf0a0817fceb6cb3452738d6ba612af45126de33 | 588 | py | Python | python/Exercicios/ex079.py | Robert-Marchinhaki/primeiros-passos-Python | 515c2c418bfb941bd9af14cf598eca7fe2985592 | [
"MIT"
] | null | null | null | python/Exercicios/ex079.py | Robert-Marchinhaki/primeiros-passos-Python | 515c2c418bfb941bd9af14cf598eca7fe2985592 | [
"MIT"
] | null | null | null | python/Exercicios/ex079.py | Robert-Marchinhaki/primeiros-passos-Python | 515c2c418bfb941bd9af14cf598eca7fe2985592 | [
"MIT"
] | null | null | null | numeros = list()
while True:
digitado = (int(input('Digite os valores: ')))
if digitado not in numeros[:]:
print('Número adicionado com sucesso...')
else:
numeros.remove(digitado)
print('Erro! Valor já digitado')
numeros.append(digitado)
parada = str(input('Quer adicionar mais valores[S/N]? ')).upper().strip()
while parada not in 'SN':
parada = str(input('Quer adicionar mais valores[S/N]? ')).upper().strip()
if 'N' in parada:
break
numeros.sort()
print(f'Os números digitados foram: {numeros} (estão ordenados)') | 29.4 | 81 | 0.627551 | 75 | 588 | 4.92 | 0.56 | 0.0271 | 0.075881 | 0.097561 | 0.271003 | 0.271003 | 0.271003 | 0.271003 | 0.271003 | 0.271003 | 0 | 0 | 0.221088 | 588 | 20 | 82 | 29.4 | 0.805677 | 0 | 0 | 0.125 | 0 | 0 | 0.339559 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.1875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf0e7375f7aee88a0021d0ee0b3932c8be01309f | 3,403 | py | Python | eureka/S4_generate_lightcurves/clipping.py | afeinstein20/Eureka | 7c330086ff7978b81d0f6ebb83a88c0bee01dc50 | [
"MIT"
] | null | null | null | eureka/S4_generate_lightcurves/clipping.py | afeinstein20/Eureka | 7c330086ff7978b81d0f6ebb83a88c0bee01dc50 | [
"MIT"
] | null | null | null | eureka/S4_generate_lightcurves/clipping.py | afeinstein20/Eureka | 7c330086ff7978b81d0f6ebb83a88c0bee01dc50 | [
"MIT"
] | null | null | null | import numpy as np
from astropy.convolution import Box1DKernel, convolve
from astropy.stats import sigma_clip
def clip_outliers(data, log, wavelength, sigma=10, box_width=5, maxiters=5, fill_value='mask', verbose=False):
'''Find outliers in 1D time series.
Be careful when using this function on a time-series with known astrophysical variations. The variable
box_width should be set to be significantly smaller than any astrophysical variation timescales otherwise
these signals may be clipped.
Parameters
----------
data: ndarray (1D, float)
The input array in which to identify outliers
log: logedit.Logedit
The open log in which notes from this step can be added.
wavelength: float
The wavelength currently under consideration.
sigma: float
The number of sigmas a point must be from the rolling mean to be considered an outlier
box_width: int
The width of the box-car filter (used to calculated the rolling median) in units of number of data points
maxiters: int
The number of iterations of sigma clipping that should be performed.
fill_value: string or float
Either the string 'mask' to mask the outlier values, 'boxcar' to replace data with the mean from the box-car filter, or a constant float-type fill value.
Returns
-------
data: ndarray (1D, boolean)
An array with the same dimensions as the input array with outliers replaced with fill_value.
Notes
-----
History:
- Jan 29-31, 2022 Taylor Bell
Initial version, added logging
'''
kernel = Box1DKernel(box_width)
# Compute the moving mean
smoothed_data = convolve(data, kernel, boundary='extend')
# Compare data to the moving mean (to remove astrophysical signals)
residuals = data-smoothed_data
# Sigma clip residuals to find bad points in data
residuals = sigma_clip(residuals, sigma=sigma, maxiters=maxiters, cenfunc=np.ma.median)
outliers = np.ma.getmaskarray(residuals)
if np.any(outliers) and verbose:
log.writelog('Identified {} outliers for wavelength {}'.format(np.sum(outliers), wavelength))
# Replace clipped data
if fill_value=='mask':
data = np.ma.masked_where(outliers, data)
elif fill_value=='boxcar':
data = replace_moving_mean(data, outliers, kernel)
else:
data[outliers] = fill_value
return data, np.sum(outliers)
def replace_moving_mean(data, outliers, kernel):
'''Replace clipped values with the mean from a moving mean.
Parameters
----------
data: ndarray (1D, float)
The input array in which to replace outliers
outliers: ndarray (1D, bool)
The input array in which to replace outliers
kernel: astropy.convolution.Kernel1D
The kernel used to compute the moving mean.
Returns
-------
data: ndarray (boolean)
An array with the same dimensions as the input array with outliers replaced with fill_value.
Notes
-----
History:
- Jan 29, 2022 Taylor Bell
Initial version
'''
# First set outliers to NaN so they don't bias moving mean
data[outliers] = np.nan
smoothed_data = convolve(data, kernel, boundary='extend')
# Replace outliers with value of moving mean
data[outliers] = smoothed_data[outliers]
return data
| 36.202128 | 161 | 0.685572 | 459 | 3,403 | 5.03268 | 0.337691 | 0.031169 | 0.028139 | 0.038095 | 0.245022 | 0.220779 | 0.190476 | 0.152381 | 0.12987 | 0.12987 | 0 | 0.010085 | 0.242433 | 3,403 | 94 | 162 | 36.202128 | 0.885958 | 0.600353 | 0 | 0.086957 | 0 | 0 | 0.05815 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf101eee860699b761e7cd830f6392a50c600f29 | 3,164 | py | Python | EduSim/Envs/KSS/meta/Learner.py | bigdata-ustc/EduSim | 849eed229c24615e5f2c3045036311e83c22ea68 | [
"MIT"
] | 18 | 2019-11-11T03:45:35.000Z | 2022-02-09T15:31:51.000Z | EduSim/Envs/KSS/meta/Learner.py | ghzhao78506/EduSim | cb10e952eb212d8a9344143f889207b5cd48ba9d | [
"MIT"
] | 3 | 2020-10-23T01:05:57.000Z | 2021-03-16T12:12:24.000Z | EduSim/Envs/KSS/meta/Learner.py | bigdata-ustc/EduSim | 849eed229c24615e5f2c3045036311e83c22ea68 | [
"MIT"
] | 6 | 2020-06-09T21:32:00.000Z | 2022-03-12T00:25:18.000Z | # coding: utf-8
# 2019/11/26 @ tongshiwei
import numpy as np
import random
import math
import networkx as nx
from EduSim.Envs.meta import MetaLearner, MetaInfinityLearnerGroup, MetaLearningModel, Item
from EduSim.Envs.shared.KSS_KES.KS import influence_control
__all__ = ["Learner", "LearnerGroup"]
class LearningModel(MetaLearningModel):
def __init__(self, state, learning_target, knowledge_structure, last_visit=None):
self._state = state
self._target = learning_target
self._ks = knowledge_structure
self._ks_last_visit = last_visit
def step(self, state, knowledge):
if self._ks_last_visit is not None:
if knowledge not in influence_control(
self._ks, state, self._ks_last_visit, allow_shortcut=False, target=self._target,
)[0]:
return
self._ks_last_visit = knowledge
# capacity growth function
discount = math.exp(sum([(5 - state[node]) for node in self._ks.predecessors(knowledge)] + [0]))
ratio = 1 / discount
inc = (5 - state[knowledge]) * ratio * 0.5
def _promote(_ind, _inc):
state[_ind] += _inc
if state[_ind] > 5:
state[_ind] = 5
for node in self._ks.successors(_ind):
_promote(node, _inc * 0.5)
_promote(knowledge, inc)
class Learner(MetaLearner):
def __init__(self,
initial_state,
knowledge_structure: nx.DiGraph,
learning_target: set,
_id=None,
seed=None):
super(Learner, self).__init__(user_id=_id)
self.learning_model = LearningModel(
initial_state,
learning_target,
knowledge_structure,
)
self.structure = knowledge_structure
self._state = initial_state
self._target = learning_target
self._logs = []
self.random_state = np.random.RandomState(seed)
def update_logs(self, logs):
self._logs = logs
@property
def profile(self):
return {
"id": self.id,
"logs": self._logs,
"target": self.target
}
def learn(self, learning_item: Item):
self.learning_model.step(self._state, learning_item.knowledge)
@property
def state(self):
return self._state
def response(self, test_item: Item) -> ...:
return self._state[test_item.knowledge]
@property
def target(self):
return self._target
class LearnerGroup(MetaInfinityLearnerGroup):
def __init__(self, knowledge_structure, seed=None):
super(LearnerGroup, self).__init__()
self.knowledge_structure = knowledge_structure
self.random_state = np.random.RandomState(seed)
def __next__(self):
knowledge = self.knowledge_structure.nodes
return Learner(
[self.random_state.randint(-3, 0) - (0.1 * i) for i, _ in enumerate(knowledge)],
self.knowledge_structure,
set(self.random_state.choice(len(knowledge), self.random_state.randint(3, len(knowledge)))),
)
| 29.849057 | 104 | 0.617573 | 355 | 3,164 | 5.202817 | 0.267606 | 0.097455 | 0.040606 | 0.032485 | 0.161343 | 0.08013 | 0.044396 | 0.044396 | 0 | 0 | 0 | 0.011082 | 0.286979 | 3,164 | 105 | 105 | 30.133333 | 0.807624 | 0.019595 | 0 | 0.113924 | 0 | 0 | 0.010006 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151899 | false | 0 | 0.075949 | 0.050633 | 0.341772 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf1590338f5ebdeb7ad55fecd78c338fd33c5131 | 1,492 | py | Python | 2021/4a.py | DanielDionne/advent_of_code | 891fd46b29a4eac2ef4ec1402df69dda10bd6c5d | [
"MIT"
] | null | null | null | 2021/4a.py | DanielDionne/advent_of_code | 891fd46b29a4eac2ef4ec1402df69dda10bd6c5d | [
"MIT"
] | null | null | null | 2021/4a.py | DanielDionne/advent_of_code | 891fd46b29a4eac2ef4ec1402df69dda10bd6c5d | [
"MIT"
] | null | null | null | import re
def mark(number, board):
for row in board:
for element in row:
if element[0] == number:
element[1] = True
def check(board):
# check rows
for row in board:
if all([e[1] == True for e in row]):
return True
# check columns
for colIndex in range(len(board)):
if all([board[rowIndex][colIndex][1] for rowIndex in range(len(board))]):
return True
return False
def sumUnmarked(board):
return sum([e[0],0][e[1]] for row in board for e in row)
def solve():
with open('4.input') as inputFile:
lines = inputFile.readlines()
numbers = map(int, lines[0].split(','))
boards = []
boardLines = []
for line in lines[2:]:
if len(line) == 1:
boards += [boardLines]
boardLines = []
continue
lineNumbers = [[x,False] for x in map(int,[line[0:2], line[3:5], line[6:8], line[9:11], line[12:14]])]
boardLines += [lineNumbers]
boards += [boardLines]
for number in numbers:
for board in boards:
mark(number, board)
if check(board):
# calculate result
# sum of all unmarked numbers
s = sumUnmarked(board)
# multiply that sum by the number that was just called
print(s*number)
return
solve()
| 29.84 | 114 | 0.502681 | 181 | 1,492 | 4.143646 | 0.375691 | 0.032 | 0.032 | 0.052 | 0.042667 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026172 | 0.385389 | 1,492 | 49 | 115 | 30.44898 | 0.791712 | 0.081769 | 0 | 0.210526 | 0 | 0 | 0.005865 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.026316 | 0.026316 | 0.263158 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf18bd5e5fa5284c1ee0a56f9ee6c1dd7b2dc30a | 5,509 | py | Python | scripts/python/plot_radiation_advection.py | lanl/phoebus | c570f42882c1c9e01e3bfe4b00b22e15a7a9992b | [
"BSD-3-Clause"
] | 3 | 2022-03-24T22:09:12.000Z | 2022-03-29T23:16:21.000Z | scripts/python/plot_radiation_advection.py | lanl/phoebus | c570f42882c1c9e01e3bfe4b00b22e15a7a9992b | [
"BSD-3-Clause"
] | 8 | 2022-03-15T20:49:43.000Z | 2022-03-29T17:45:04.000Z | scripts/python/plot_radiation_advection.py | lanl/phoebus | c570f42882c1c9e01e3bfe4b00b22e15a7a9992b | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# © 2022. Triad National Security, LLC. All rights reserved. This
# program was produced under U.S. Government contract
# 89233218CNA000001 for Los Alamos National Laboratory (LANL), which
# is operated by Triad National Security, LLC for the U.S. Department
# of Energy/National Nuclear Security Administration. All rights in
# the program are reserved by Triad National Security, LLC, and the
# U.S. Department of Energy/National Nuclear Security
# Administration. The Government is granted for itself and others
# acting on its behalf a nonexclusive, paid-up, irrevocable worldwide
# license in this material to reproduce, prepare derivative works,
# distribute copies to the public, perform publicly and display
# publicly, and to permit others to do so.
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import glob
import argparse
from parthenon_tools import phdf
# Boosted diffusion solution for given frame 3-velocity and opacity
# initial time and position in boosted frame are t0p and x0p
# x and t are position in time in the observer frame, returns J at
# these position
def BoostedDiffusion(kappa, x0p, v, t0p, J0, x, t):
gamma = 1/np.sqrt(1-v*v)
tp = gamma*(t - v*x)
xp = gamma*(x - v*t)
return J0*np.sqrt(t0p/tp)*np.exp(-3*kappa*(xp-x0p)**2/(4*tp))
parser = argparse.ArgumentParser(description='Plot a boosted diffusion wave.')
parser.add_argument('-f', '--files', dest='files', nargs='*', default='rad_adv*.phdf', help='List of input Parthenon hdf files to plot')
parser.add_argument('-o', '--out', dest='out_file', default='rad_adv_J.pdf', help='Plot output file')
# Set the parameters defining the initial conditions
# Defaults should be consistent with inputs/radiation_advection.pin
parser.add_argument('-v', dest='v', default=0.3, action="store", type=float)
parser.add_argument('-k', '--kappa', dest='kappa', default=1e3, action="store", type=float,
help='Background opacity in comoving frame')
parser.add_argument('-s', '--sigma', dest='sigma', default=0.03333, action="store", type=float,
help='Initial spread of gaussian pulse in comoving frame')
parser.add_argument('--J0', dest='J0', default=1.0, action="store", type=float,
help='Height of pulse at t=t0 in comoving frame')
parser.add_argument('--savefig', type=bool, default=True, help='Whether to save figure')
parser.add_argument('--analytic', type=bool, default=True, help='Whether to include analytic boosted diffusion in plot')
args = parser.parse_args()
v = args.v
kappa = args.kappa
sigma = args.sigma
J0 = args.J0
# Calculate the initial time in the primed frame based on the initial spread
# since at t=0 J \propto \delta(x)
t0p = 3/2*kappa*sigma**2
# Initial time in the observer frame is defined to be equal to the initial
# time in the co-moving frame
t0 = t0p
# Get the central position of the gaussian in the observer frame at t0
x0p = (0.5 - v*t0)/np.sqrt(1-v*v)
# Lorentz factor
W = 1/np.sqrt(1-v*v)
# Read in the files
files = []
for file in args.files:
files += glob.glob(file)
files = sorted(files)
# Set up unit conversions
file0 = phdf.phdf(files[0])
L_unit = file0.Params['phoebus/LengthCodeToCGS']
T_unit = file0.Params['phoebus/TimeCodeToCGS']
M_unit = file0.Params['phoebus/MassCodeToCGS']
scale_free = True
if not np.isclose(L_unit, 1.) or not np.isclose(T_unit, 1.) or not np.isclose(M_unit, 1.):
scale_free = False
E_unit = M_unit*L_unit**2/T_unit**2
UE_unit = E_unit / L_unit**3
J0 *= UE_unit
# Find the minimum and maximum times of the data
minTime = sys.float_info.max
maxTime = -sys.float_info.max
for file in files:
dfile = phdf.phdf(file)
minTime = min([dfile.Time, minTime])
maxTime = max([dfile.Time, maxTime])
maxTime = max([maxTime, minTime + 0.01])
# Set up the axes with a time colorbar
cmap = cm.get_cmap('viridis')
fig = plt.figure(figsize=[20,8])
plt_ax = fig.add_axes([0.15, 0.15, 0.68, 0.8])
c_map_ax = fig.add_axes([0.86, 0.2, 0.03, 0.7])
mpl.colorbar.ColorbarBase(c_map_ax, cmap=cmap,
norm=mpl.colors.Normalize(minTime, maxTime),
orientation = 'vertical',
label='Time')
# Plot the data (should work for refinement, but untested)
# Choose the species and y and z locations
# ispec currently has to be fixed to 0 because of tensor issues in Parthenon output
ispec = 0
iz = 0
iy = 0
for file in files[0::1]:
dfile = phdf.phdf(file)
J = dfile.Get("r.p.J", flatten=False)*UE_unit
x = dfile.x*L_unit
t = dfile.Time
if (t>maxTime): continue
color = cmap((t - minTime)/(maxTime - minTime))
for block in range(dfile.NumBlocks):
plt_ax.plot(x[block, :], J[block, iz, iy, :, ispec], color=color)
xmin = np.amin(x)
xmax = np.amax(x)
xgrid = np.arange(xmin, xmax, (xmax-xmin)/1000)
tdiff = t + t0
if args.analytic:
plt_ax.plot(xgrid, BoostedDiffusion(kappa, x0p, v, t0p, J0, xgrid/L_unit, t + t0p), linestyle='--', color='k')
xl = v*L_unit # 0.3
xh = 1.0*L_unit
yl = -0.1
yh = 1.05*J0
if scale_free:
plt_ax.set_ylabel('J (arb. units)')
plt_ax.set_xlabel('x (arb. units)')
else:
plt_ax.set_ylabel('J (erg cm^-3)')
plt_ax.set_xlabel('x (cm)')
plt_ax.set_xlim([xl, xh])
plt_ax.set_ylim([yl, yh])
etot = sum(J[0, iz, iy, :, ispec])
print("etot: ", etot)
plt_ax.text(0.05*(xh-xl)+xl, 0.95*(yh-yl)+yl, '$\kappa={}$'.format(kappa))
if args.savefig:
plt.savefig(args.out_file)
else:
plt.show()
| 35.541935 | 136 | 0.697223 | 921 | 5,509 | 4.103149 | 0.335505 | 0.013231 | 0.035988 | 0.02117 | 0.18497 | 0.105319 | 0.04869 | 0.031754 | 0.031754 | 0.031754 | 0 | 0.029649 | 0.167363 | 5,509 | 154 | 137 | 35.772727 | 0.793983 | 0.305319 | 0 | 0.04 | 0 | 0 | 0.152082 | 0.017132 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01 | false | 0 | 0.08 | 0 | 0.1 | 0.01 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf1d0a2600c1e32dca7dd7713967984af9f10c7e | 7,731 | py | Python | emma/utils/visualizations.py | rpp0/emma | fab81e1c66b8a88d14e68b8878ddbb5ee6528de2 | [
"MIT"
] | 36 | 2019-01-08T12:49:36.000Z | 2022-03-31T08:11:48.000Z | emma/utils/visualizations.py | rpp0/emma | fab81e1c66b8a88d14e68b8878ddbb5ee6528de2 | [
"MIT"
] | 6 | 2020-01-28T22:59:05.000Z | 2022-02-10T00:14:43.000Z | emma/utils/visualizations.py | rpp0/emma | fab81e1c66b8a88d14e68b8878ddbb5ee6528de2 | [
"MIT"
] | 3 | 2019-02-12T11:55:42.000Z | 2020-08-12T23:30:05.000Z | import matplotlib.pyplot as plt
import os
import numpy as np
from datetime import datetime
from matplotlib.backends.backend_pdf import PdfPages
from emma.io.traceset import TraceSet
from emma.utils.utils import MaxPlotsReached, EMMAException
#plt.rcParams['axes.prop_cycle'] = plt.cycler(color=plt.get_cmap('flag').colors) # Use different cycling colors
#plt.style.use('bmh') # Use different style
def plt_save_pdf(path):
"""
Save plot as pdf to path
:param path:
:return:
"""
pp = PdfPages(path)
pp.savefig(dpi=300)
pp.close()
plt.clf()
plt.cla()
def plot_spectogram(trace_set,
sample_rate,
nfft=2**10,
noverlap=0,
cmap='plasma',
params=None,
num_traces=1024):
if not trace_set.windowed:
raise EMMAException("Trace set should be windowed")
# Check params
if params is not None:
if len(params) == 1:
nfft = int(params[0])
elif len(params) == 2:
nfft = int(params[0])
noverlap = int(nfft * int(params[1]) / 100.0)
all_signals = np.array([trace.signal for trace in trace_set.traces[0:num_traces]]).flatten()
"""
# Old style
for trace in trace_set.traces[0:num_traces]:
plt.specgram(trace.signal, NFFT=nfft, Fs=sample_rate, noverlap=noverlap, cmap=cmap)
"""
plt.specgram(all_signals, NFFT=nfft, Fs=sample_rate, noverlap=noverlap, cmap=cmap, mode='psd', scale='dB')
plt.tight_layout()
plt.show()
def plot_colormap(inputs,
show=True,
cmap='inferno',
draw_axis=True,
title='',
xlabel='',
ylabel='',
colorbar_label='',
save=False,
**kwargs):
"""
Plot signals given in the inputs numpy array in a colormap.
:param inputs:
:param show:
:param cmap:
:param draw_axis:
:param title:
:param cmap:
:param xlabel:
:param ylabel:
:param colorbar_label:
:param save:
:param kwargs:
:return:
"""
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
if inputs.dtype == np.complex64 or inputs.dtype == np.complex128:
inputs = np.real(inputs)
print("Warning: converting colormap to np.real(complex)")
#inputs += 0.01
vmin = inputs.min()
vmax = inputs.max()
colorplot = plt.imshow(inputs,
vmin=vmin,
vmax=vmax,
interpolation='nearest',
# norm=LogNorm(vmin=vmin, vmax=vmax),
cmap=cmap,
**kwargs)
if draw_axis:
# https://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph
from mpl_toolkits.axes_grid1 import make_axes_locatable
axis = plt.gca()
figure = plt.gcf()
divider = make_axes_locatable(axis)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = figure.colorbar(colorplot, cax=cax)
cbar.set_label(colorbar_label)
plt.tight_layout()
if save:
if title:
plt_save_pdf('/tmp/%s.pdf' % title)
else:
plt_save_pdf('/tmp/%s.pdf' % str(datetime.now()))
if show:
plt.show()
def _get_x_axis_values(signal, time_domain=True, sample_rate=1.0):
if not time_domain:
freqs = np.fft.fftfreq(len(signal), d=1.0/sample_rate)
x = np.fft.fftshift(freqs)
else:
x = range(0, len(signal))
return x
def plot_trace_sets(reference_signal,
trace_sets,
params=None,
no_reference_plot=False,
num_traces=1024,
title='',
xlabel='',
ylabel='',
colorbar_label='',
time_domain=True,
sample_rate=1.0):
"""
Plot num_traces signals from a list of trace sets using matplotlib
"""
saveplot = False
colormap = False
# Check params
if params is not None:
if len(params) >= 1:
if 'save' in params:
saveplot = True
if '2d' in params:
colormap = True
if not isinstance(trace_sets, list) or isinstance(trace_sets, TraceSet):
raise ValueError("Expected list of TraceSets")
if len(trace_sets) == 0:
return
# Make title
common_path = os.path.commonprefix([trace_set.name for trace_set in trace_sets])
if title == '':
title = "%d trace sets from %s" % (len(trace_sets), common_path)
if reference_signal.dtype == np.complex64 or reference_signal.dtype == np.complex128:
title += " (complex, only real values plotted)"
# Make plots
count = 0
all_signals = []
try:
for trace_set in trace_sets:
for trace in trace_set.traces:
all_signals.append(trace.signal)
count += 1
if count >= num_traces:
raise MaxPlotsReached
except MaxPlotsReached:
pass
finally:
if xlabel == '':
if time_domain:
xlabel = 'Samples'
else:
xlabel = 'Frequency (assuming sample rate %.2f)' % sample_rate
if colormap:
plot_colormap(np.array(all_signals),
show=False,
title=title,
xlabel=xlabel,
ylabel=ylabel,
colorbar_label=colorbar_label)
else:
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
for signal in all_signals:
x = _get_x_axis_values(signal, sample_rate=sample_rate, time_domain=time_domain)
plt.plot(x, signal)
if not no_reference_plot:
x = _get_x_axis_values(reference_signal, sample_rate=sample_rate, time_domain=time_domain)
plt.plot(x, reference_signal, linewidth=2, linestyle='dashed')
if saveplot:
plt_save_pdf('/tmp/plotted_trace_sets.pdf')
plt.clf()
else:
plt.show()
def plot_correlations(values1, values2, label1="", label2="", show=False):
values1 = np.reshape(values1, (-1,)) # TODO doesnt account for numkeys. Use only for a single key byte!
values2 = np.reshape(values2, (-1,))
correlation = np.corrcoef(values1, values2, rowvar=False)[1, 0]
mean_values1 = np.mean(values1, axis=0)
mean_values2 = np.mean(values2, axis=0)
plt.title("Correlation: " + str(correlation))
plt.plot(values1, "o", label=label1, markersize=5.0)
plt.plot(values2, "o", label=label2, markersize=5.0)
#plt.plot(values1, values2, "o", label=label2, markersize=5.0)
plt.gca().legend()
if show:
plt.show()
def plot_keyplot(keyplot, time_domain=True, sample_rate=1.0, show=False):
plt.title("Keyplot")
if time_domain:
plt.xlabel("Samples")
else:
plt.xlabel("Frequency assuming sample rate of %.2f" % sample_rate)
plt.ylabel("Amplitude")
color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
for value, mean_signal in sorted(keyplot.items()):
color = color_cycle[int(value.rpartition(',')[2].strip(')'), 16) % len(color_cycle)]
x = _get_x_axis_values(mean_signal, sample_rate=sample_rate, time_domain=time_domain)
plt.plot(x, mean_signal, label=value, color=color)
plt.legend()
if show:
plt.show()
| 31.555102 | 112 | 0.567844 | 926 | 7,731 | 4.600432 | 0.2473 | 0.039906 | 0.00939 | 0.013146 | 0.24061 | 0.165728 | 0.1277 | 0.10939 | 0.093427 | 0.056808 | 0 | 0.02 | 0.320916 | 7,731 | 244 | 113 | 31.684426 | 0.791429 | 0.10542 | 0 | 0.231214 | 0 | 0 | 0.059391 | 0.00407 | 0 | 0 | 0 | 0.004098 | 0 | 1 | 0.040462 | false | 0.00578 | 0.046243 | 0 | 0.098266 | 0.00578 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf1eec4185206fab6a61df1d56c8c21212cdfa42 | 2,031 | py | Python | list-manual-packages.py | qidydl/debian-package-scripts | a68f2d4c493e00761cc7d6cdc11ca1e661684741 | [
"MIT"
] | null | null | null | list-manual-packages.py | qidydl/debian-package-scripts | a68f2d4c493e00761cc7d6cdc11ca1e661684741 | [
"MIT"
] | null | null | null | list-manual-packages.py | qidydl/debian-package-scripts | a68f2d4c493e00761cc7d6cdc11ca1e661684741 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""List manually-installed Debian packages
This script can be used to see which packages are flagged as having been installed manually. Manually-installed
packages are not eligible for autoremove. Managing this flag will ensure that libraries are cleaned up when no longer
needed.
This script outputs two parts: first, a list of one package name per line for packages that are manually-installed and
also a "root" (see list-root-packages.py). Second, a single big line listing packages that are manually-installed but
not a "root". This output is not designed or intended to be machine-readable; this script is just a heuristic, it does
not even attempt to be bulletproof."""
__author__ = "David Osolkowski"
__copyright__ = "Copyright 2020 David Osolkowski"
__license__ = "MIT"
__status__ = "Development"
__version__ = "1.1.0"
from apt import cache
aptCache = cache.Cache()
# All installed packages
installed = {
pkg
for pkg in aptCache
if pkg.is_installed
}
installedNames = {pkg.name for pkg in installed}
# All installed dependencies of installed packages
depends = {
dep_pkg.name
for pkg in installed
for dep in pkg.installed.get_dependencies('PreDepends', 'Depends', 'Recommends')
for dep_pkg in dep
if dep_pkg.name in installedNames
}
# All installed suggestions of installed packages
suggests = {
dep_pkg.name
for pkg in installed
for dep in pkg.installed.get_dependencies('Suggests')
for dep_pkg in dep
if dep_pkg.name in installedNames
}
# All manually-installed packages that nothing installed depends on
manualRoots = [
pkg.name + (" (SUGGESTED)" if pkg.name in suggests else "")
for pkg in installed
if not pkg.is_auto_installed and pkg.name not in depends
]
manualRoots.sort()
print('\n'.join(manualRoots))
manualDepends = [
pkg.name
for pkg in installed
if not pkg.is_auto_installed and pkg.name in depends
]
manualDepends.sort()
print("\nManual depended on by something: " + ', '.join(manualDepends))
| 28.605634 | 118 | 0.746923 | 296 | 2,031 | 5.013514 | 0.39527 | 0.04717 | 0.032345 | 0.057278 | 0.275606 | 0.23248 | 0.21159 | 0.21159 | 0.21159 | 0.21159 | 0 | 0.004808 | 0.180699 | 2,031 | 70 | 119 | 29.014286 | 0.887019 | 0.432792 | 0 | 0.243902 | 0 | 0 | 0.1331 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.02439 | 0 | 0.02439 | 0.04878 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf2071740b73e90331bef4394f5adc4ea46bf3e5 | 1,847 | py | Python | machine_learning/boston.py | zmaas/scratch | ee996bb6a15e3eb322a07b8637f8eb0046ec9d89 | [
"MIT"
] | null | null | null | machine_learning/boston.py | zmaas/scratch | ee996bb6a15e3eb322a07b8637f8eb0046ec9d89 | [
"MIT"
] | null | null | null | machine_learning/boston.py | zmaas/scratch | ee996bb6a15e3eb322a07b8637f8eb0046ec9d89 | [
"MIT"
] | null | null | null | '''Boston Housing Classification'''
import numpy as np
from keras.datasets import boston_housing
from keras import models
from keras import layers
(train_data, train_targets), (test_data,
test_targets) = boston_housing.load_data()
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
def build_model():
model = models.Sequential()
model.add(
layers.Dense(
64, activation='relu', input_shape=(train_data.shape[1], )))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print('processing fold #', i)
val_data = train_data[i * num_val_samples:(i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples:(i + 1) * num_val_samples]
partial_train_data = np.concatenate(
[
train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]
],
axis=0)
partial_train_targets = np.concatenate(
[
train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]
],
axis=0)
model = build_model()
model.fit(
partial_train_data,
partial_train_targets,
epochs=num_epochs,
batch_size=1,
verbose=0)
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
# We figured out that we only need ~80 epochs
model = build_model()
model.fit(train_data, train_targets, epochs=80, batch_size=16)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
| 28.859375 | 78 | 0.655658 | 258 | 1,847 | 4.414729 | 0.290698 | 0.102722 | 0.102722 | 0.049166 | 0.258121 | 0.208955 | 0.147498 | 0.050922 | 0.050922 | 0 | 0 | 0.018961 | 0.22902 | 1,847 | 63 | 79 | 29.31746 | 0.780899 | 0.040606 | 0 | 0.113208 | 0 | 0 | 0.021518 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018868 | false | 0 | 0.075472 | 0 | 0.113208 | 0.018868 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf231f29d738cf1a12e825364e3815688ca78557 | 4,456 | py | Python | zorg/buildbot/builders/LibCXXBuilder.py | antiagainst/llvm-zorg | a5b58cdd800d0d45b1bdd1f7fe058db6acbfd918 | [
"Apache-2.0"
] | 1 | 2019-02-10T03:05:05.000Z | 2019-02-10T03:05:05.000Z | zorg/buildbot/builders/LibCXXBuilder.py | antiagainst/llvm-zorg | a5b58cdd800d0d45b1bdd1f7fe058db6acbfd918 | [
"Apache-2.0"
] | null | null | null | zorg/buildbot/builders/LibCXXBuilder.py | antiagainst/llvm-zorg | a5b58cdd800d0d45b1bdd1f7fe058db6acbfd918 | [
"Apache-2.0"
] | null | null | null |
import os
import buildbot
import buildbot.process.factory
import buildbot.steps.shell
import buildbot.steps.source as source
import buildbot.steps.source.svn as svn
import buildbot.process.properties as properties
import zorg.buildbot.commands.LitTestCommand as lit_test_command
import zorg.buildbot.util.artifacts as artifacts
import zorg.buildbot.util.phasedbuilderutils as phased_builder_utils
reload(lit_test_command)
reload(artifacts)
reload(phased_builder_utils)
def getLibCXXBuilder(f=None, source_path=None,
lit_dir=None):
if f is None:
f = buildbot.process.factory.BuildFactory()
# Find the build directory. We assume if f is passed in that the build
# directory has already been found.
f = phased_builder_utils.getBuildDir(f)
# Grab the sources if we are not passed in any.
if source_path is None:
source_path = 'sources'
src_url = 'http://llvm.org/svn/llvm-project/libcxx/trunk'
f = phased_builder_utils.SVNCleanupStep(f, source_path)
f.addStep(svn.SVN(name='pull.src', mode='full', repourl=src_url,
workdir=source_path, method='fresh',
alwaysUseLatest=False, retry = (60, 5),
description='pull.src'))
# Grab the artifacts for our build.
f = artifacts.GetCompilerArtifacts(f)
host_compiler_dir = properties.WithProperties('%(builddir)s/host-compiler')
f = artifacts.GetCCFromCompilerArtifacts(f, host_compiler_dir)
f = artifacts.GetCXXFromCompilerArtifacts(f, host_compiler_dir)
# Build libcxx.
CC = properties.WithProperties('%(cc_path)s')
CXX = properties.WithProperties('%(cxx_path)s')
HEADER_INCLUDE = \
properties.WithProperties('-I %s' % os.path.join('%(builddir)s',
source_path,
'include'))
SOURCE_LIB = \
properties.WithProperties(os.path.join('%(builddir)s',
source_path, 'lib',
'libc++.1.dylib'))
f.addStep(buildbot.steps.shell.ShellCommand(
name='build.libcxx', command=['./buildit'], haltOnFailure=True,
workdir=os.path.join(source_path, 'lib'),
env={ 'CC' : CC, 'CXX' : CXX, 'TRIPLE' : '-apple-'}))
# Get the 'lit' sources if we need to.
if lit_dir is None:
lit_dir = 'lit.src'
f.addStep(svn.SVN(
name='pull.lit', mode='incremental', method='fresh',
repourl='http://llvm.org/svn/llvm-project/llvm/trunk/utils/lit',
workdir=lit_dir, alwaysUseLatest=False))
# Install a copy of 'lit' in a virtualenv.
f.addStep(buildbot.steps.shell.ShellCommand(
name='venv.lit.clean',
command=['rm', '-rf', 'lit.venv'],
workdir='.', haltOnFailure=True))
f.addStep(buildbot.steps.shell.ShellCommand(
name='venv.lit.make',
command=['/usr/local/bin/virtualenv', 'lit.venv'],
workdir='.', haltOnFailure=True))
f.addStep(buildbot.steps.shell.ShellCommand(
name='venv.lit.install',
command=[
properties.WithProperties('%(builddir)s/lit.venv/bin/python'),
'setup.py', 'install'],
workdir=lit_dir, haltOnFailure=True))
# Run the tests with the system's dylib
f.addStep(lit_test_command.LitTestCommand(
name='test.libcxx.system',
command=[
properties.WithProperties('%(builddir)s/lit.venv/bin/lit'),
'-v', '--show-xfail', '--show-unsupported',
properties.WithProperties(
'--param=cxx_under_test=%(cxx_path)s'),
'--param=use_system_lib=true',
'sources/test'],
workdir='.'))
# Run the tests with the newly built dylib
f.addStep(lit_test_command.LitTestCommand(
name='test.libcxx.new',
command=[
properties.WithProperties('%(builddir)s/lit.venv/bin/lit'),
'-v', '--show-xfail', '--show-unsupported',
properties.WithProperties(
'--param=cxx_under_test=%(cxx_path)s'),
'--param=use_system_lib=false',
'sources/test'],
workdir='.'))
return f
| 41.259259 | 79 | 0.5864 | 491 | 4,456 | 5.217923 | 0.281059 | 0.093677 | 0.035129 | 0.051522 | 0.346214 | 0.332162 | 0.295472 | 0.25644 | 0.236924 | 0.217799 | 0 | 0.001262 | 0.288824 | 4,456 | 107 | 80 | 41.64486 | 0.807195 | 0.079219 | 0 | 0.270588 | 0 | 0 | 0.18304 | 0.065005 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011765 | false | 0 | 0.117647 | 0 | 0.141176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf23269a20352bd1e5bf1a525b6d3e5fd76ba4f9 | 1,760 | py | Python | pydeps/rules/ruleResultChecker.py | enableiot/iotanalytics-rule-engine | 8f7c0e00f3f534944af21255cf7d98fc632b08b2 | [
"Apache-2.0"
] | 3 | 2015-12-15T10:17:10.000Z | 2016-01-19T15:24:51.000Z | pydeps/rules/ruleResultChecker.py | enableiot/iotanalytics-rule-engine | 8f7c0e00f3f534944af21255cf7d98fc632b08b2 | [
"Apache-2.0"
] | null | null | null | pydeps/rules/ruleResultChecker.py | enableiot/iotanalytics-rule-engine | 8f7c0e00f3f534944af21255cf7d98fc632b08b2 | [
"Apache-2.0"
] | 2 | 2015-12-15T10:17:11.000Z | 2018-11-01T12:40:49.000Z | # Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rules.conditions.conditionsBuilder import ConditionsBuilder
class RuleResultChecker(object):
@staticmethod
def __join_all_results__(list_of_results):
return reduce(lambda array_1, array_2: array_1 + array_2, list_of_results)
def __init__(self, rule):
self.rule = rule
if 'operator' in rule['conditions']:
self.rule_operator = rule['conditions']['operator']
else:
self.rule_operator = None
self.list_of_results = None
def is_fulfilled(self, list_of_results):
self.list_of_results = list_of_results
if self.rule_operator == ConditionsBuilder.OR:
return reduce(lambda x, y: x or y, self.__get_result_status_list__())
if self.rule_operator == ConditionsBuilder.AND or self.rule_operator is None:
return reduce(lambda x, y: x and y, self.__get_result_status_list__())
raise AttributeError("Unknown rule operator: " + str(self.rule_operator))
def __get_result_status_list__(self):
return map(lambda single_result: single_result.is_passed,
RuleResultChecker.__join_all_results__(self.list_of_results))
| 39.111111 | 85 | 0.718182 | 240 | 1,760 | 4.995833 | 0.425 | 0.053378 | 0.075897 | 0.056714 | 0.173478 | 0.075063 | 0 | 0 | 0 | 0 | 0 | 0.008571 | 0.204545 | 1,760 | 44 | 86 | 40 | 0.847857 | 0.316477 | 0 | 0 | 0 | 0 | 0.049622 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0.045455 | 0.045455 | 0.090909 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf236a0773de3a927230fe201180262fee783588 | 6,372 | py | Python | extviews/viewset.py | BilalAlpaslan/fastapi-extviews | e3ce1c4916d86009705a09e165e5ee21a197962f | [
"MIT"
] | 16 | 2022-01-01T16:00:58.000Z | 2022-03-21T09:42:35.000Z | extviews/viewset.py | BilalAlpaslan/fastapi-extviews | e3ce1c4916d86009705a09e165e5ee21a197962f | [
"MIT"
] | null | null | null | extviews/viewset.py | BilalAlpaslan/fastapi-extviews | e3ce1c4916d86009705a09e165e5ee21a197962f | [
"MIT"
] | null | null | null | from typing import Callable, List, Sequence, Union
from fastapi import APIRouter, Header
from fastapi.params import Depends
from pydantic import BaseModel
from .crudset import BaseCrudSet
__all__ = ['ViewSet', 'CrudViewSet']
supported_methods_names: List[str] = [
'list', 'retrieve', 'create', 'update', 'partial_update', 'destroy']
class ViewSet:
""" router: APIRouter = None
base_path: str = None
class_tag: str = None
path_key: str = "id"
response_model: BaseModel = None
dependencies: Sequence[Depends] = None
"""
router: APIRouter = None
base_path: str = None
class_tag: str = None
path_key: str = "id"
response_model: BaseModel = None
dependencies: Sequence[Depends] = None
marked_functions: List = []
def __init__(self) -> APIRouter:
self.functions: List[Callable] = []
self.extra_functions: List[List] = []
self.execute()
def get_response_model(self, action: str) -> Union[BaseModel, None]:
""" if override this method, you can return different response model for different action """
if self.response_model is not None:
return self.response_model
return None
def get_dependencies(self, action: str) -> Sequence[Depends]:
""" if override this method, you can return different dependencies for different action """
if self.dependencies is not None:
return self.dependencies
return None
def execute(self) -> APIRouter:
if self.router is None:
self.router = APIRouter()
if self.base_path is None:
self.base_path = '/' + self.__class__.__name__.lower()
if self.class_tag is None:
self.class_tag = self.__class__.__name__
for func in supported_methods_names:
if hasattr(self, func):
self.functions.append(getattr(self, func))
for func in self.functions:
self._register_route(func)
for func, methods, path in self.find_marked_functions():
self._register_extra_route(func, methods=methods, path=path)
def _register_route(self, func: Callable, hidden_params: List[str] = ["self"]):
# hidden_params TODO: add support for hidden params
extras = {}
extras['response_model'] = self.get_response_model(func.__name__)
extras['dependencies'] = self.get_dependencies(func.__name__)
if func.__name__ == 'list':
self.router.add_api_route(self.base_path, func, tags=[
self.class_tag], methods=['GET'], **extras)
elif func.__name__ == 'retrieve':
self.router.add_api_route(f"{self.base_path}/\u007b{self.path_key}\u007d", func, tags=[
self.class_tag], methods=['GET'], **extras)
elif func.__name__ == 'create':
self.router.add_api_route(self.base_path, func, tags=[
self.class_tag], methods=['POST'], **extras)
elif func.__name__ == 'update':
self.router.add_api_route(f"{self.base_path}/\u007b{self.path_key}\u007d", func, tags=[
self.class_tag], methods=['PUT'], **extras)
elif func.__name__ == 'partial_update':
self.router.add_api_route(f"{self.base_path}/\u007b{self.path_key}\u007d", func, tags=[
self.class_tag], methods=['PATCH'], **extras)
elif func.__name__ == 'destroy':
self.router.add_api_route(f"{self.base_path}/\u007b{self.path_key}\u007d", func, tags=[
self.class_tag], methods=['DELETE'], **extras)
else:
print(f"Method {func.__name__} is not supported")
def _register_extra_route(self, func: Callable, methods: List[str] = ["GET"], path: str = None):
extras = {}
extras['response_model'] = self.get_response_model(func.__name__)
extras['dependencies'] = self.get_dependencies(func.__name__)
if path is None:
path = func.__name__
self.router.add_api_route(f"{self.base_path}{path}", func, tags=[
self.class_tag], methods=methods, **extras)
@classmethod
def extra_method(cls, methods: List[str] = ["GET"], path_key: str = None):
""" if you want to add extra method to the viewset, you can use this decorator """
def decorator(func):
cls.marked_functions.append([func, methods, path_key])
return func
return decorator
def find_marked_functions(self):
for func in dir(self):
for marked_func in self.marked_functions:
if func == marked_func[0].__name__:
self.extra_functions.append(marked_func)
self.marked_functions.remove(marked_func)
break
return self.extra_functions
class CrudViewSet(ViewSet):
"""
This is the base viewset for CRUD operations.
"""
crud: BaseCrudSet = None
model: BaseModel = None
async_db = False
def __init__(self):
assert self.crud is not None, "You must define crud model"
assert self.model is not None, "You must define model"
self._crud = self.crud()
super().__init__()
async def list(self) -> List[model]:
if self.async_db:
return await self._crud.list()
return self._crud.list()
async def retrieve(self, id: int) -> model:
if self.async_db:
return await self._crud.retrieve(id)
return self._crud.retrieve(id)
async def create(self, data: model) -> model:
if self.async_db:
return await self._crud.create(data)
return self._crud.create(data)
async def update(self, id: int, data: model) -> model:
if self.async_db:
return await self._crud.update(id, data)
return self._crud.update(id, data)
async def partial_update(self, id: int, data: model) -> model:
if self.async_db:
return await self._crud.partial_update(id, data)
return self._crud.partial_update(id, data)
async def destroy(self, id: int) -> model:
if self.async_db:
return await self._crud.destroy(id)
return self._crud.destroy(id)
| 37.704142 | 101 | 0.608443 | 772 | 6,372 | 4.762953 | 0.141192 | 0.032635 | 0.029372 | 0.03046 | 0.451455 | 0.409029 | 0.37585 | 0.367419 | 0.345118 | 0.325809 | 0 | 0.005453 | 0.280446 | 6,372 | 168 | 102 | 37.928571 | 0.79651 | 0.07941 | 0 | 0.181818 | 0 | 0 | 0.082917 | 0.034132 | 0 | 0 | 0 | 0.005952 | 0.016529 | 1 | 0.082645 | false | 0 | 0.041322 | 0 | 0.380165 | 0.008264 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf24664ff4f061ae7a934264b0579dd203c773d6 | 6,456 | py | Python | wouldyouci_database/recommendation/contents_based_filtering.py | jhee514/wouldYouCi | 54793401fb51356587e5a4460eb606ed9943b30c | [
"MIT"
] | 1 | 2020-06-18T08:40:47.000Z | 2020-06-18T08:40:47.000Z | wouldyouci_database/recommendation/contents_based_filtering.py | jhee514/wouldYouCi | 54793401fb51356587e5a4460eb606ed9943b30c | [
"MIT"
] | 14 | 2021-03-19T08:55:06.000Z | 2022-03-12T00:37:51.000Z | wouldyouci_database/recommendation/contents_based_filtering.py | jhee514/wouldYouCi | 54793401fb51356587e5a4460eb606ed9943b30c | [
"MIT"
] | 1 | 2021-05-27T08:52:01.000Z | 2021-05-27T08:52:01.000Z | import os
import time
import pymysql
import pandas as pd
from decouple import config
from datetime import datetime
from sklearn.linear_model import Lasso
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform as sp_rand
def contentsbased1(user_id, movie_id, genres_p):
print('======== 전체영화 예상평점 - 장르 ===========')
print('START TIME : ', str(datetime.now())[10:19])
start = time.time()
conn = pymysql.connect(host=config('HOST'), port=3306, user=config('USER'),
password=config('PASSWORD'), db=config('DB'))
sql = 'SELECT * FROM wouldyouci.accounts_rating where user_id=' + str(user_id)
ratings = pd.read_sql_query(sql, conn)
genres = genres_p
conn.close()
user_profile = ratings.merge(genres, left_on='movie_id', right_index=True)
model = Lasso()
param_grid = {'alpha': sp_rand()}
research = RandomizedSearchCV(estimator=model,
param_distributions=param_grid,
n_iter=20,
cv=5,
random_state=406)
research.fit(user_profile[genres.columns], user_profile['score'])
predictions = research.best_estimator_.predict(genres)
genres.reset_index()
genres['predict'] = predictions
predicted_score = genres.at[movie_id, 'predict']
print('END TIME : ', str(datetime.now())[10:19])
end = time.time()
print('TOTAL TIME : ', end-start)
print('PREDICTED SCORE : ', predicted_score)
print()
return pd.DataFrame.to_json(genres['predict'])
def contentsbased2(user_id, movie_id, movies_p):
print('======== 전체 영화 예상평점 - 장르 & 감독 & 배우 ===========')
print('START TIME : ', str(datetime.now())[10:19])
start = time.time()
conn = pymysql.connect(host=config('HOST'), port=3306, user=config('USER'),
password=config('PASSWORD'), db=config('DB'))
sql = 'SELECT * FROM wouldyouci.accounts_rating where user_id=' + str(user_id)
ratings = pd.read_sql_query(sql, conn)
movies = movies_p
conn.close()
ratings = ratings.merge(movies, left_on='movie_id', right_index=True)
x_train, x_test, y_train, y_test = train_test_split(ratings[movies.columns],
ratings['score'],
random_state=406,
test_size=.1)
reg = LinearRegression()
reg.fit(x_train, y_train)
predictions = reg.predict(movies)
movies.reset_index()
movies['predict'] = predictions
print('END TIME : ', str(datetime.now())[10:19])
predicted_score = movies.at[movie_id, 'predict']
end = time.time()
print('TOTAL TIME : ', end-start)
print('PREDICTED SCORE : ', predicted_score)
print()
return pd.DataFrame.to_json(movies['predict'])
def contentsbased3(user_id, movie_id, movies_p):
print('======== 특정 영화 예상평점 - 장르 & 감독 & 배우 ===========')
print('START TIME : ', str(datetime.now())[10:19])
start = time.time()
conn = pymysql.connect(host=config('HOST'), port=3306, user=config('USER'),
password=config('PASSWORD'), db=config('DB'))
sql = 'SELECT * FROM wouldyouci.accounts_rating where user_id=' + str(user_id)
ratings = pd.read_sql_query(sql, conn)
movies = movies_p
conn.close()
ratings = ratings.merge(movies, left_on='movie_id', right_index=True)
train, test = train_test_split(ratings, test_size=0.1, random_state=406)
x_train = train[movies.columns]
y_train = train['score']
reg = Lasso(alpha=0.03)
reg.fit(x_train, y_train)
user_profile = []
user_profile.append([reg.intercept_, *reg.coef_])
user_profile = pd.DataFrame(user_profile,
index=train['user_id'].unique(),
columns=['intercept', *movies.columns])
intercept = user_profile.loc[user_id, 'intercept']
columns_score = sum(user_profile.loc[user_id, movies.columns] * movies.loc[movie_id, movies.columns])
predicted_score = intercept + columns_score
print('END TIME : ', str(datetime.now())[10:19])
end = time.time()
print('TOTAL TIME : ', end-start)
print('PREDICTED SCORE : ', predicted_score)
print()
return predicted_score
def contentsbased4(user_id, movie_id, movies_p):
print('======== 전체 영화 예상평점 - 장르 & 감독 ===========')
print('START TIME : ',str(datetime.now())[10:19] )
start = time.time()
conn = pymysql.connect(host=config('HOST'), port=3306, user=config('USER'),
password=config('PASSWORD'), db=config('DB'))
sql = 'SELECT * FROM wouldyouci.accounts_rating where user_id=' + str(user_id)
ratings = pd.read_sql_query(sql, conn)
movies = movies_p
conn.close()
ratings = ratings.merge(movies, left_on='movie_id', right_index=True)
x_train, x_test, y_train, y_test = train_test_split(ratings[movies.columns],
ratings['score'],
random_state=406,
test_size=0.1)
reg = LinearRegression()
reg.fit(x_train, y_train)
predictions = reg.predict(movies)
movies.reset_index()
movies['predict'] = predictions
predicted_score = movies.at[movie_id, 'predict']
print('END TIME : ', str(datetime.now())[10:19])
end = time.time()
print('TOTAL TIME : ', end-start)
print('PREDICTED SCORE : ', predicted_score)
return pd.DataFrame.to_json(movies['predict'])
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
a = time.time()
genres = pd.read_pickle(os.path.join(BASE_DIR, 'movie_director_train.p'))
b = time.time()
print('Time to read pickle file 1: ', b - a)
movies = pd.read_pickle(os.path.join(BASE_DIR, 'movie_train.p'))
c = time.time()
print('Time to read pickle file 2: ', c - b)
directors = pd.read_pickle(os.path.join(BASE_DIR, 'movie_director_train.p'))
d = time.time()
print('Time to read pickle file 3: ', d - c)
print()
contentsbased1(9000007, 10016, genres)
contentsbased2(9000007, 10016, movies)
contentsbased3(9000007, 10016, movies)
contentsbased4(9000007, 10016, directors)
| 34.340426 | 105 | 0.614777 | 806 | 6,456 | 4.748139 | 0.167494 | 0.023517 | 0.031356 | 0.037627 | 0.688268 | 0.640449 | 0.635746 | 0.594722 | 0.561014 | 0.55213 | 0 | 0.026656 | 0.244579 | 6,456 | 187 | 106 | 34.524064 | 0.758048 | 0 | 0 | 0.539568 | 0 | 0 | 0.149628 | 0.022924 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028777 | false | 0.028777 | 0.079137 | 0 | 0.136691 | 0.194245 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf25dc21b097b4039654bff5dabf2d9a9ccf1daa | 830 | py | Python | netrd/utilities/__init__.py | sdmccabe/netrd | f703c19b02f42c9f54bcab57014381da11dd58da | [
"MIT"
] | 116 | 2019-01-17T18:31:43.000Z | 2022-03-31T13:37:21.000Z | netrd/utilities/__init__.py | sdmccabe/netrd | f703c19b02f42c9f54bcab57014381da11dd58da | [
"MIT"
] | 175 | 2019-01-15T01:19:13.000Z | 2021-05-25T16:51:26.000Z | netrd/utilities/__init__.py | sdmccabe/netrd | f703c19b02f42c9f54bcab57014381da11dd58da | [
"MIT"
] | 36 | 2019-01-14T20:38:32.000Z | 2022-01-21T20:58:38.000Z | """
utilities
----------
Common utilities for use within ``netrd``.
"""
from .threshold import threshold
from .graph import (
create_graph,
ensure_undirected,
undirected,
ensure_unweighted,
unweighted,
)
from .read import read_time_series
from .cluster import clusterGraph
from .standardize import mean_GNP_distance
from .entropy import (
js_divergence,
entropy_from_seq,
joint_entropy,
conditional_entropy,
categorized_data,
linear_bins,
)
__all__ = [
'threshold',
'clusterGraph',
'js_divergence',
'entropy_from_seq',
'joint_entropy',
'conditional_entropy',
'categorized_data',
'linear_bins',
'create_graph',
'undirected',
'ensure_undirected',
'unweighted',
'ensure_unweighted',
'read_time_series',
'mean_GNP_distance',
]
| 18.444444 | 42 | 0.683133 | 84 | 830 | 6.369048 | 0.404762 | 0.041122 | 0.052336 | 0.085981 | 0.302804 | 0.302804 | 0.302804 | 0.302804 | 0.302804 | 0.302804 | 0 | 0 | 0.208434 | 830 | 44 | 43 | 18.863636 | 0.814307 | 0.077108 | 0 | 0 | 0 | 0 | 0.274769 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf292f506793261af16ff7db52fd7fc8b2dbbe15 | 697 | py | Python | python/leetcode/1706-Where_Will_the_Ball_Fall-M.py | levendlee/leetcode | 35e274cb4046f6ec7112cd56babd8fb7d437b844 | [
"Apache-2.0"
] | 1 | 2020-03-02T10:56:22.000Z | 2020-03-02T10:56:22.000Z | python/leetcode/1706-Where_Will_the_Ball_Fall-M.py | levendlee/leetcode | 35e274cb4046f6ec7112cd56babd8fb7d437b844 | [
"Apache-2.0"
] | null | null | null | python/leetcode/1706-Where_Will_the_Ball_Fall-M.py | levendlee/leetcode | 35e274cb4046f6ec7112cd56babd8fb7d437b844 | [
"Apache-2.0"
] | null | null | null | class Solution:
def findBall(self, grid: List[List[int]]) -> List[int]:
m, n = len(grid), len(grid[0])
fall = list(range(n))
for i in range(m):
next_fall = [-1 for _ in range(n)]
for j in range(n):
if grid[i][j] == 1:
if j > 0 and grid[i][j-1] == 1:
next_fall[j] = fall[j-1]
else:
if j + 1 < n and grid[i][j+1] == -1:
next_fall[j] = fall[j+1]
fall = next_fall
res = [-1 for _ in range(n)]
for j, ball in enumerate(fall):
if ball != -1:
res[ball] = j
return res
| 34.85 | 59 | 0.400287 | 99 | 697 | 2.757576 | 0.272727 | 0.043956 | 0.098901 | 0.076923 | 0.307692 | 0.307692 | 0.307692 | 0.190476 | 0.190476 | 0.190476 | 0 | 0.034483 | 0.45911 | 697 | 19 | 60 | 36.684211 | 0.689655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf29b24d082fbb7129c2bab18736d49934635b11 | 5,518 | py | Python | vae/train_vae.py | mazrk7/tf_playground | 81ad741f2bfe439bab85783ccf82d4715a3adef6 | [
"MIT"
] | null | null | null | vae/train_vae.py | mazrk7/tf_playground | 81ad741f2bfe439bab85783ccf82d4715a3adef6 | [
"MIT"
] | null | null | null | vae/train_vae.py | mazrk7/tf_playground | 81ad741f2bfe439bab85783ccf82d4715a3adef6 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from dataset import load_data
from vae import VAE
from conv_vae import ConvVAE
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
# Define the VAE network architecture
def network_architecture(vae_type, latent_dim):
if vae_type == 'conv':
network_architecture = \
{'n_input': 1, # Number of input channels
'kernel_outer': 5, # Convolution kernel sizes for outer layers
'kernel_inner': 3, # Convolution kernel sizes for inner layers
'n_filters_1': 64, # Number of output convolution filters at layer 1
'n_filters_2': 64, # Number of output convolution filters at layer 2
'n_filters_3': 64, # Number of output convolution filters at layer 3
'n_filters_4': 64, # Number of output convolution filters at layer 4
'n_hidden': 500, # Dimensionality of intermediate layer
'n_z': latent_dim} # Dimensionality of latent space
else:
network_architecture = \
{'n_input': IMAGE_PIXELS, # MNIST data input
'n_hidden_1': 500, # Dimensionality of hidden layer 1
'n_hidden_2': 500, # Dimensionality of hidden layer 2
'n_z': latent_dim} # Dimensionality of latent space
return network_architecture
def main(_):
model_path = 'models/' + FLAGS.name
data = load_data(FLAGS.dataset, one_hot=True, validation_size=10000)
# Define and instantiate VAE model
if FLAGS.vae_type == 'vae':
vae = VAE(network_architecture=network_architecture(FLAGS.vae_type, FLAGS.latent_dim), batch_size=FLAGS.batch_size, learn_rate=FLAGS.learn_rate)
elif FLAGS.vae_type == 'conv':
vae = ConvVAE(network_architecture=network_architecture(FLAGS.vae_type, FLAGS.latent_dim), batch_size=FLAGS.batch_size, learn_rate=FLAGS.learn_rate)
else:
raise ValueError("Autoencoder type should be either conv or vae. Received: {}.".format(FLAGS.vae_type))
# Wish to allocate approximately gpu_memory_frac% of GPU memory
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_frac)
with tf.device('/gpu:%d' % FLAGS.gpu_device):
sess = tf.Session(config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement, gpu_options=gpu_options))
tf.set_random_seed(FLAGS.seed)
# Initialise tf variables
init = tf.global_variables_initializer()
# Launch session
sess.run(init)
num_samples = data.train.num_examples
### Training cycle ###
for epoch in range(FLAGS.n_epochs):
avg_cost = 0.
avg_recon = 0.
avg_latent = 0.
total_batch = int(num_samples / FLAGS.batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, _ = data.train.next_batch(FLAGS.batch_size)
# Fit training using batch data
if FLAGS.vae_type == 'conv':
cost, recon, latent = vae.partial_fit(sess, batch_xs, FLAGS.keep_prob)
else:
cost, recon, latent = vae.partial_fit(sess, batch_xs)
# Compute average losses
avg_cost += (cost / num_samples) * FLAGS.batch_size
avg_recon += (recon / num_samples) * FLAGS.batch_size
avg_latent += (latent / num_samples) * FLAGS.batch_size
# Display logs per epoch step
if epoch % FLAGS.display_step == 0:
print("Epoch: %04d / %04d, Cost= %04f, Recon= %04f, Latent= %04f" % \
(epoch, FLAGS.n_epochs, avg_cost, avg_recon, avg_latent))
# Create a saver object that will store all the parameter variables
saver = tf.train.Saver()
saver.save(sess, model_path)
print("Model saved as: %s" % model_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, default='digit_model_all', help='Name of model to train')
parser.add_argument('--seed', type=int, default='0', help='Sets the random seed for both numpy and tf')
parser.add_argument('--dataset', type=str, default='mnist', help='Name of dataset to load')
parser.add_argument('--vae_type', type=str, default='vae', help='Either a standard VAE (vae) or a convolutational VAE (conv)')
parser.add_argument('--batch_size', type=int, default='100', help='Sets the batch size')
parser.add_argument('--learn_rate', type=float, default='1e-5', help='Sets the learning rate')
parser.add_argument('--n_epochs', type=int, default='50', help='Number of training epochs')
parser.add_argument('--latent_dim', type=int, default='2', help='Latent dimensionality of the VAE')
parser.add_argument('--keep_prob', type=float, default='1.0', help='Sets the dropout rate')
parser.add_argument('--gpu_device', type=int, default=0, help='Specifying which GPU device to use')
parser.add_argument('--log_device_placement', type=bool, default=False, help='Logs the devices that operations and tensors are assigned to')
parser.add_argument('--gpu_memory_frac', type=float, default=0.8, help='Specifying what fraction of your GPU memory to occupy')
parser.add_argument('--display_step', type=int, default='5', help='Display step during training')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 45.229508 | 153 | 0.677419 | 753 | 5,518 | 4.743692 | 0.273572 | 0.032755 | 0.06187 | 0.017917 | 0.217245 | 0.165733 | 0.150616 | 0.150616 | 0.083427 | 0.06159 | 0 | 0.015972 | 0.217108 | 5,518 | 121 | 154 | 45.603306 | 0.81088 | 0.151685 | 0 | 0.084337 | 0 | 0.012048 | 0.197592 | 0.00473 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024096 | false | 0 | 0.108434 | 0 | 0.144578 | 0.036145 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf2bb0444957a564a4fef3e32be0670e3dc59829 | 15,049 | py | Python | DPPO/dppo_cont_gae_dist_gpu.py | ChuaCheowHuan/reinforcement_learning | 037c292e5d81cd6d302566969c0391aba47d0343 | [
"MIT"
] | 32 | 2019-06-01T18:10:12.000Z | 2021-12-17T08:12:48.000Z | DPPO/dppo_cont_gae_dist_gpu.py | ChuaCheowHuan/reinforcement_learning | 037c292e5d81cd6d302566969c0391aba47d0343 | [
"MIT"
] | 9 | 2020-03-24T18:21:20.000Z | 2022-02-10T01:41:29.000Z | DPPO/dppo_cont_gae_dist_gpu.py | ChuaCheowHuan/reinforcement_learning | 037c292e5d81cd6d302566969c0391aba47d0343 | [
"MIT"
] | 9 | 2019-05-05T12:04:30.000Z | 2021-11-13T12:14:56.000Z | """
Distributed Proximal Policy Optimization (Distributed PPO or DPPO) continuous
version implementation with distributed Tensorflow and Python’s multiprocessing
package. This implementation uses normalized running rewards with GAE. The code
is tested with Gym’s continuous action space environment, Pendulum-v0 on Colab.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
#!pip install -q tf-nightly
import tensorflow as tf
tf.reset_default_graph()
import numpy as np
import matplotlib.pyplot as plt
import gym
import time
from multiprocessing import Process
# The following class is adapted from OpenAI's baseline:
# https://github.com/openai/baselines/blob/master/baselines/common/running_mean_std.py
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
# This class is used for the normalization of rewards in this program before GAE computation.
class RunningStats(object):
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.std = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
new_mean = self.mean + delta * batch_count / (self.count + batch_count)
m_a = self.var * self.count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
self.mean = new_mean
self.var = new_var
self.std = np.maximum(np.sqrt(self.var), 1e-6)
self.count = batch_count + self.count
class PPO(object):
def __init__(self, scope, sess, env, global_PPO=None):
self.sess = sess
self.env = env
#OPT_A = tf.train.AdamOptimizer(A_LR, beta1=0.99, beta2=0.999, name='OPT_A')
#OPT_C = tf.train.AdamOptimizer(C_LR, beta1=0.99, beta2=0.999, name='OPT_C')
OPT_A = tf.train.AdamOptimizer(A_LR, name='OPT_A')
OPT_C = tf.train.AdamOptimizer(C_LR, name='OPT_C')
with tf.variable_scope(scope): # scope is either global or wid
self.state = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic
with tf.variable_scope('critic'):
h1 = tf.layers.dense(self.state, hidden, tf.nn.relu, name='hidden', trainable=True)
self.val = tf.layers.dense(h1, 1, name='val', trainable=True)
self.critic_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
self.discounted_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.discounted_r - self.val
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = OPT_C.minimize(self.closs)
with tf.variable_scope('cgrads'):
self.critic_grad_op = tf.gradients(self.closs, self.critic_params)
# actor
self.pi, self.pi_params = self._build_anet(scope, 'pi', self.env, trainable=True)
self.oldpi, self.oldpi_params = self._build_anet(scope, 'oldpi', self.env, trainable=True) # originally trainable=False
with tf.variable_scope('sample_action'):
self.sample_op = tf.squeeze(self.pi.sample(1), axis=0) # choosing action
with tf.variable_scope('update_oldpi'):
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(self.pi_params, self.oldpi_params)]
self.act = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.adv = tf.placeholder(tf.float32, [None, 1], 'advantage')
with tf.variable_scope('loss'):
with tf.variable_scope('surrogate'):
ratio = self.pi.prob(self.act) / self.oldpi.prob(self.act)
surr = ratio * self.adv
self.aloss = -tf.reduce_mean(tf.minimum(surr, tf.clip_by_value(ratio, 1.-epsilon, 1.+epsilon)*self.adv))
with tf.variable_scope('atrain'):
self.atrain_op = OPT_A.minimize(self.aloss)
with tf.variable_scope('agrads'):
self.pi_grad_op = tf.gradients(self.aloss, self.pi_params)
if scope != net_scope: # not global
with tf.name_scope('params'): # push/pull from local/worker perspective
with tf.name_scope('push_to_global'):
self.push_actor_pi_params = OPT_A.apply_gradients(zip(self.pi_grad_op, global_PPO.pi_params))
self.push_critic_params = OPT_C.apply_gradients(zip(self.critic_grad_op, global_PPO.critic_params))
with tf.name_scope('pull_fr_global'):
self.pull_actor_pi_params = [local_params.assign(global_params) for local_params, global_params in zip(self.pi_params, global_PPO.pi_params)]
self.pull_critic_params = [local_params.assign(global_params) for local_params, global_params in zip(self.critic_params, global_PPO.critic_params)]
def update(self, s, a, r, adv):
self.sess.run(self.update_oldpi_op)
for _ in range(A_EPOCH): # train actor
self.sess.run(self.atrain_op, {self.state: s, self.act: a, self.adv: adv})
# update actor
self.sess.run([self.push_actor_pi_params,
self.pull_actor_pi_params],
{self.state: s, self.act: a, self.adv: adv})
for _ in range(C_EPOCH): # train critic
# update critic
self.sess.run(self.ctrain_op, {self.state: s, self.discounted_r: r})
self.sess.run([self.push_critic_params,
self.pull_critic_params],
{self.state: s, self.discounted_r: r})
def _build_anet(self, scope, name, env, trainable):
with tf.variable_scope(name):
h1 = tf.layers.dense(self.state, hidden, tf.nn.relu, name='hidden', trainable=trainable)
mu = self.env.action_space.high * tf.layers.dense(h1, A_DIM, tf.nn.tanh, name='mu', trainable=trainable)
sigma = tf.layers.dense(h1, A_DIM, tf.nn.softplus, name='sigma', trainable=trainable)
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/' + name)
return norm_dist, params
def choose_action(self, s):
s = s[None, :]
a = self.sess.run(self.sample_op, {self.state: s})[0]
return np.clip(a, self.env.action_space.low, self.env.action_space.high)
def get_val(self, s):
if s.ndim < 2: s = s[None, :]
return self.sess.run(self.val, {self.state: s})[0, 0]
# This function is adapted from OpenAI's Baseline
# GAE computation
# returns TD lamda return & advantage
def add_vtarg_and_adv(self, R, done, V, v_s_, gamma, lam):
# Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
# last element is only used for last vtarg, but we already zeroed it if last new = 1
done = np.append(done, 0)
V_plus = np.append(V, v_s_)
T = len(R)
adv = gaelam = np.empty(T, 'float32')
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-done[t+1]
delta = R[t] + gamma * V_plus[t+1] * nonterminal - V_plus[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
#print("adv=", adv.shape)
#print("V=", V.shape)
#print("V_plus=", V_plus.shape)
tdlamret = np.vstack(adv) + V
#print("tdlamret=", tdlamret.shape)
return tdlamret, adv # tdlamret is critic_target or Qs
class Worker(object):
def __init__(self, wid, GLOBAL_PPO, GLOBAL_EP, GLOBAL_RUNNING_R, sess):
self.wid = wid
self.env = gym.make(GAME).unwrapped
self.g_ppo = GLOBAL_PPO
self.ppo = PPO(wid, sess, self.env, GLOBAL_PPO)
self.running_stats_r = RunningStats()
self.sess = sess
self.GLOBAL_EP = GLOBAL_EP
self.GLOBAL_RUNNING_R = GLOBAL_RUNNING_R
def work(self):
T = 0
t = 0
SESS = self.sess
GLOBAL_EP = self.GLOBAL_EP
GLOBAL_RUNNING_R = self.GLOBAL_RUNNING_R
while SESS.run(GLOBAL_EP) < EP_MAX:
s = self.env.reset()
buffer_s, buffer_a, buffer_r, buffer_done, buffer_V = [], [], [], [], []
ep_r = 0
for t in range(EP_LEN):
a = self.ppo.choose_action(s)
s_, r, done, _ = self.env.step(a)
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
buffer_done.append(done)
v = self.ppo.get_val(s)
buffer_V.append(v)
s = s_
ep_r += r
# update ppo
if (t+1) % BATCH == 0 or t == EP_LEN-1:
self.running_stats_r.update(np.array(buffer_r))
buffer_r = np.clip( (np.array(buffer_r) - self.running_stats_r.mean) / self.running_stats_r.std, -stats_CLIP, stats_CLIP )
v_s_ = self.ppo.get_val(s_)
tdlamret, adv = self.ppo.add_vtarg_and_adv(np.vstack(buffer_r), np.vstack(buffer_done), np.vstack(buffer_V), v_s_, GAMMA, lamda)
bs, ba, br, b_adv = np.vstack(buffer_s), np.vstack(buffer_a), tdlamret, np.vstack(adv)
buffer_s, buffer_a, buffer_r, buffer_done, buffer_V = [], [], [], [], []
self.ppo.update(bs, ba, br, b_adv)
SESS.run(GLOBAL_EP.assign_add(1.0))
qe = GLOBAL_RUNNING_R.enqueue(ep_r)
SESS.run(qe)
GAME = 'Pendulum-v0'
env = gym.make(GAME).unwrapped
net_scope = 'global'
EP_MAX = 500 #500 # max number of episodes
EP_LEN = 200 # episode length
GAMMA = 0.9
lamda = 0.95 #0.95
hidden = 50 #100
A_LR = 0.0001 # actor's learning rate
C_LR = 0.0002 # critic's learning rate
BATCH = 32 # minibatch size
A_EPOCH = 10 # number of epoch
C_EPOCH = 10 # number of epoch
S_DIM, A_DIM = 3, 1 # state, action dimension
stats_CLIP = 10 # upper bound of RunningStats
epsilon=0.2
cluster = tf.train.ClusterSpec({
"worker": ["localhost:3331",
"localhost:3332",
"localhost:3333",
"localhost:3334"
],
"ps": ["localhost:3330"]
})
def parameter_server():
#tf.reset_default_graph()
server = tf.train.Server(cluster,
job_name="ps",
task_index=0)
sess = tf.Session(target=server.target)
with tf.device("/job:ps/task:0"):
GLOBAL_PPO = PPO(net_scope, sess, env, global_PPO=None) # only need its params
GLOBAL_EP = tf.Variable(0.0, name='GLOBAL_EP') # num of global episodes
# a queue of ep_r
GLOBAL_RUNNING_R = tf.FIFOQueue(EP_MAX, tf.float32, shared_name="GLOBAL_RUNNING_R")
print("Parameter server: waiting for cluster connection...")
sess.run(tf.report_uninitialized_variables())
print("Parameter server: cluster ready!")
print("Parameter server: initializing variables...")
sess.run(tf.global_variables_initializer())
print("Parameter server: variables initialized")
while True:
time.sleep(1.0)
if sess.run(GLOBAL_RUNNING_R.size()) >= EP_MAX: # GLOBAL_EP starts from 0, hence +1 to max_global_episodes
time.sleep(10.0)
GLOBAL_RUNNING_R_list = []
ep_r_prev = 0.0
for i in range(sess.run(GLOBAL_RUNNING_R.size())):
ep_r = sess.run(GLOBAL_RUNNING_R.dequeue())
if i==0:
GLOBAL_RUNNING_R_list.append(ep_r) # for display
else:
GLOBAL_RUNNING_R_list.append(GLOBAL_RUNNING_R_list[-1]*0.9 + ep_r*0.1) # for display
break
# display
plt.plot(np.arange(len(GLOBAL_RUNNING_R_list)), GLOBAL_RUNNING_R_list)
plt.xlabel('episode')
plt.ylabel('reward')
plt.show()
#print("Parameter server: blocking...")
#server.join() # currently blocks forever
print("Parameter server: ended...")
def worker(worker_n):
#tf.reset_default_graph()
server = tf.train.Server(cluster,
job_name="worker",
task_index=worker_n)
sess = tf.Session(target=server.target)
with tf.device("/job:ps/task:0"):
GLOBAL_PPO = PPO(net_scope, sess, env, global_PPO=None) # only need its params
GLOBAL_EP = tf.Variable(0.0, name='GLOBAL_EP') # num of global episodes
# a queue of ep_r
GLOBAL_RUNNING_R = tf.FIFOQueue(EP_MAX, tf.float32, shared_name="GLOBAL_RUNNING_R")
"""
with tf.device(tf.train.replica_device_setter(
worker_device='/job:worker/task:' + str(worker_n),
cluster=cluster)):
"""
print("Worker %d: waiting for cluster connection..." % worker_n)
sess.run(tf.report_uninitialized_variables())
print("Worker %d: cluster ready!" % worker_n)
#while sess.run(tf.report_uninitialized_variables()):
while (sess.run(tf.report_uninitialized_variables())).any(): # ********** .any() .all() **********
print("Worker %d: waiting for variable initialization..." % worker_n)
time.sleep(1.0)
print("Worker %d: variables initialized" % worker_n)
w = Worker(str(worker_n), GLOBAL_PPO, GLOBAL_EP, GLOBAL_RUNNING_R, sess)
print("Worker %d: created" % worker_n)
sess.run(tf.global_variables_initializer()) # got to initialize after Worker creation
w.work()
print("Worker %d: w.work()" % worker_n)
#print("Worker %d: blocking..." % worker_n)
server.join() # currently blocks forever
print("Worker %d: ended..." % worker_n)
start_time = time.time()
ps_proc = Process(target=parameter_server, daemon=True)
w1_proc = Process(target=worker, args=(0, ), daemon=True)
w2_proc = Process(target=worker, args=(1, ), daemon=True)
w3_proc = Process(target=worker, args=(2, ), daemon=True)
w4_proc = Process(target=worker, args=(3, ), daemon=True)
ps_proc.start()
w1_proc.start()
w2_proc.start()
w3_proc.start()
w4_proc.start()
# if not join, parent will terminate before children
# & children will terminate as well cuz children are daemon
ps_proc.join()
#w1_proc.join()
#w2_proc.join()
#w3_proc.join()
#w4_proc.join()
for proc in [w1_proc,
w2_proc,
w3_proc,
w4_proc,
ps_proc]:
proc.terminate() # only way to kill server is to kill it's process
print('All done.')
print("--- %s seconds ---" % (time.time() - start_time))
| 41.802778 | 171 | 0.618779 | 2,091 | 15,049 | 4.254902 | 0.184601 | 0.029223 | 0.031471 | 0.021356 | 0.337305 | 0.231876 | 0.18377 | 0.147128 | 0.13409 | 0.122963 | 0 | 0.01677 | 0.259021 | 15,049 | 359 | 172 | 41.91922 | 0.781096 | 0.156024 | 0 | 0.086614 | 0 | 0 | 0.06597 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051181 | false | 0 | 0.027559 | 0 | 0.106299 | 0.059055 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf2ca4850e1601c96655725a34150bcc61effb04 | 7,417 | py | Python | main_entry.py | LuxxxLucy/text_generation_SeqRNN | 27676f8c0844a69dd9c68ef6ff519b7ff03e50b9 | [
"MIT"
] | null | null | null | main_entry.py | LuxxxLucy/text_generation_SeqRNN | 27676f8c0844a69dd9c68ef6ff519b7ff03e50b9 | [
"MIT"
] | 1 | 2017-08-28T18:45:04.000Z | 2017-08-30T09:27:37.000Z | main_entry.py | LuxxxLucy/text_generation_SeqRNN | 27676f8c0844a69dd9c68ef6ff519b7ff03e50b9 | [
"MIT"
] | null | null | null | # utility modules
import os
from os import path
import shutil
import sys
import time
import json
import argparse
import numpy as np
from pprint import pprint as pr
ITEM_DIM=100
dir_path = path.dirname(path.dirname(path.dirname(path.realpath(__file__))))
sys.path.append(dir_path)
import settings
# -----------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser()
# data I/O
parser.add_argument('--model_directory', type=str, default=settings.MODEL_STORE_PATH,
help='Location for parameter checkpoints and samples')
parser.add_argument('--model_file_name', type=str, default='seq_rnn',
help='model file name (will create a separated folder)')
parser.add_argument('--data_set', type=str, default='linux_data',
help='Can be fake_seq | quick_draw')
parser.add_argument('--checkpoint_interval', type=int, default=1,
help='Every how many epochs to write checkpoint/samples?')
parser.add_argument('--report_interval', type=int, default=20,
help='Every how many epochs to report current situation?')
parser.add_argument('--validation_interval', type=int, default=50,
help='Every how many epochs to do validation current situation?')
parser.add_argument('--load_params', dest='load_params', action='store_true',
help='Restore training from previous model checkpoint')
# model
parser.add_argument('--hist_length', type=int, default=5,
help='The minimum length of history sequence')
parser.add_argument('--training_num', type=int, default=None,
help='number of training samples')
parser.add_argument('--training_epoch', type=int, default=1,
help='number of training epoch')
parser.add_argument('--val_portion', type=float, default=0.4,
help='The portion of data to be validation data')
parser.add_argument('--shuffle', dest='shuffle', action='store_true',
help='shuffle the training samples or not')
# hyper-parameter for optimization
parser.add_argument('-l', '--learning_rate', type=float,
default=0.01, help='Base learning rate')
parser.add_argument('-e', '--lr_decay', type=float, default=0.999995,
help='Learning rate decay, applied every step of the optimization')
parser.add_argument('-b', '--batch_size', type=int, default=128,
help='Batch size during training per GPU')
parser.add_argument('-p', '--dropout_rate', type=float, default=0.2,
help='Dropout strength, where 0 = No dropout, higher = more dropout.')
parser.add_argument('-x', '--max_epochs', type=int, default=5000,
help='The maximum epochs to run')
parser.add_argument('-g', '--nr_gpu', type=int, default=1,
help='The number GPUs to distribute the training across')
# reproducibility:random seed
parser.add_argument('-s', '--random_seed', type=int, default=42,
help='Random seed to use')
args = parser.parse_args()
print('INFO CHECK!\ninput args:\n', json.dumps(vars(args), indent=4, separators=(',', ':')))
################################################
# The main program starts
################################################
# fix random seed for reproducibility
args.random_state = np.random.RandomState(args.random_seed)
# tf.set_random_seed(args.random_seed)
train(args)
def train(args):
class_num = {'quick_draw': 10,'fake_seq':1,'linux_data':101}[args.data_set]
args.class_num=class_num
# initialize data loaders for train/test splits
# data loader
print(args.data_set)
if args.data_set == 'linux_data':
import data.linux_code_data as linux_code_data
print('start loading dataset',args.data_set)
train_data = linux_code_data.DataLoader(args,'train')
print('dataset',args.data_set,'loading completed')
from learner_model.SeqRNN import Sequence_RNN_Model_Session as model_session
print('import seq RNN model okay')
elif args.data_set == 'shakespeare_data':
import data.shakespeare_data as shakespeare_data
print('start loading dataset',args.data_set)
train_data = linux_code_data.DataLoader(args,'train')
test_data = linux_code_data.DataLoader(args,'test')
print('dataset',args.data_set,'loading completed')
from learner_model.SeqRNN import Sequence_RNN_Model_Session as model_session
print('import seq RNN model okay')
else:
print('this dataset is not available , or the dataset name not correct')
quit()
model_path_name=path.join(args.model_directory,args.model_file_name)
print(model_path_name)
file_path_name=path.join(args.model_directory,args.model_file_name+"Gen")
if os.path.exists(model_path_name) and args.load_params == True :
try:
model = model_session.restore(model_path_name)
except:
print("error happens, now remove the original folder name from",model_path_name)
shutil.rmtree(model_path_name)
os.makedirs(model_path_name)
model = model_session.create(class_num=len(train_data.dictionary))
session = model_session(model,args)
else:
try:
os.makedirs(model_path_name)
except:
print("directory okay")
if os.path.exists(model_path_name) == False:
print("there is no previous file")
if args.load_params == False:
print("deliberately do want to laod a previous model")
print("create a new model")
model = model_session.create(class_num=len(train_data.dictionary))
session = model_session(model,args)
print(model)
session.register_dictionary(train_data.dictionary)
session.register_index(train_data.index)
if args.training_num is None:
args.training_num = train_data.record_num
print('Last Check :overall training number',train_data.record_num)
# Train the model, iterating on the data in batches of 32 samples
iteration=0
for iEpoch in range(args.training_epoch):
for data in train_data:
# x, y = training_data.next_batch(args.batch_size)
x=data
session.train(x)
if iteration % args.report_interval == 0:
score = session.evaluate(data, batch_size=args.batch_size)
# print(" training batch score" , score)
if iteration % args.validation_interval == 0:
session.generate(random_sentence_start=x,file_directory=file_path_name)
if iteration % args.checkpoint_interval == 0:
session.save(model_path_name)
iteration+=1
print("Final model %s" % model)
model_session.save(model,model_path_name)
def test(args):
model_path_name=path.join(args.model_directory,args.model_file_name)
model = ModelSession.restore(model_path_name)
print(model)
accuracy = model.test(test_data.X_data, test_data.Y_data)
print("Test accuracy %0.4f" % accuracy)
if __name__ == "__main__":
main()
| 41.903955 | 96 | 0.637994 | 936 | 7,417 | 4.855769 | 0.25641 | 0.037624 | 0.071067 | 0.014962 | 0.259846 | 0.19846 | 0.175798 | 0.163916 | 0.163916 | 0.163916 | 0 | 0.008996 | 0.235675 | 7,417 | 176 | 97 | 42.142045 | 0.792732 | 0.065121 | 0 | 0.198473 | 0 | 0 | 0.238018 | 0.006156 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022901 | false | 0 | 0.122137 | 0 | 0.145038 | 0.160305 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf2e31dad78bdc69eb2a8bd13549245a36b50b40 | 4,741 | py | Python | data_process/face_rectify.py | ZephyrDu/Face-Sketch-Wild | 4e967e58b8cb95af74d5fb26a4f9761a966c04bd | [
"MIT"
] | 73 | 2018-11-13T09:32:31.000Z | 2022-02-25T13:28:29.000Z | data_process/face_rectify.py | ZephyrDu/Face-Sketch-Wild | 4e967e58b8cb95af74d5fb26a4f9761a966c04bd | [
"MIT"
] | 4 | 2019-10-22T09:15:21.000Z | 2021-12-02T08:21:54.000Z | data_process/face_rectify.py | ZephyrDu/Face-Sketch-Wild | 4e967e58b8cb95af74d5fb26a4f9761a966c04bd | [
"MIT"
] | 22 | 2018-11-04T00:08:30.000Z | 2022-03-07T00:50:13.000Z | """
Rectify the face photo according to the paper: Real-Time Exemplar-Based Face Sketch Synthesis.
shape: h=250, w=200
position: left eye (x=75,y=125), right eye (x=125, y=125)
This module use similarity transformation to roughly align the two eyes.
Specifically, the transformation matrix can be written as:
S = |s_x cos(\theta), sin(\theta) , t_x |
|-sin(\theta) , s_y cos(\theta), t_y |
There are 5 degrees in the above function, needs at least 3 points(x, y) to solve it.
we can simply hallucinate a third point such that it forms an equilateral triangle with the two known points.
Reference:
http://www.learnopencv.com/average-face-opencv-c-python-tutorial/
http://blog.csdn.net/GraceDD/article/details/51382952
"""
import math
import numpy as np
import os
import dlib
import cv2 as cv
from PIL import Image
import matplotlib.pyplot as plt
from natsort import natsorted
def detect_fiducial_points(img, predictor_path):
"""
Detect face landmarks and return the mean points of left and right eyes.
If there are multiple faces in one image, only select the first one.
"""
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
dets = detector(img, 1)
if len(dets) < 1:
return []
for k, d in enumerate(dets):
shape = predictor(img, d)
break
landmarks = []
for i in range(68):
landmarks.append([shape.part(i).x, shape.part(i).y])
landmarks = np.array(landmarks)
left_eye = landmarks[36:42]
right_eye = landmarks[42:48]
mouth = landmarks[48:68]
return np.array([np.mean(left_eye, 0), np.mean(right_eye, 0)]).astype('int')
def similarityTransform(inPoints, outPoints) :
"""
Calculate similarity transform:
Input:
(left eye, right eye) in (x, y)
inPoints: (2, 2), numpy array.
outPoints: (2, 2), numpy array
Return:
A partial affine transform.
"""
s60 = math.sin(60*math.pi/180)
c60 = math.cos(60*math.pi/180)
inPts = np.copy(inPoints).tolist()
outPts = np.copy(outPoints).tolist()
xin = c60*(inPts[0][0] - inPts[1][0]) - s60*(inPts[0][1] - inPts[1][1]) + inPts[1][0]
yin = s60*(inPts[0][0] - inPts[1][0]) + c60*(inPts[0][1] - inPts[1][1]) + inPts[1][1]
inPts.append([np.int(xin), np.int(yin)])
xout = c60*(outPts[0][0] - outPts[1][0]) - s60*(outPts[0][1] - outPts[1][1]) + outPts[1][0]
yout = s60*(outPts[0][0] - outPts[1][0]) + c60*(outPts[0][1] - outPts[1][1]) + outPts[1][1]
outPts.append([np.int(xout), np.int(yout)])
tform = cv.estimateRigidTransform(np.array([inPts]), np.array([outPts]), False)
return tform
def rectify_img(img_path, predictor_path):
template_eye_pos = np.array([[75, 125], [125, 125]])
template_size = (200, 250)
img = cv.imread(img_path)
detected_eyes = detect_fiducial_points(np.array(img), predictor_path)
if not len(detected_eyes):
return None
trans = similarityTransform(detected_eyes, template_eye_pos)
rect_img = cv.warpAffine(img, trans, template_size)
return rect_img
def align_img(ref_path, src_path, predictor_path):
ref_img = cv.imread(ref_path)
src_img = cv.imread(src_path)
ref_eyes = detect_fiducial_points(np.array(ref_img), predictor_path)
src_eyes = detect_fiducial_points(np.array(src_img), predictor_path)
trans = similarityTransform(src_eyes, ref_eyes)
rect_img = cv.warpAffine(src_img, trans, (200, 250))
return rect_img
if __name__ == '__main__':
src_dir = '../result_ours/CUFSF_intersect/ours_result'
ref_dir = '../result_ours/CUFSF_intersect/gt_sketch'
save_dir = '../result_ours/CUFSF_intersect/ours_warp'
if not os.path.exists(save_dir): os.mkdir(save_dir)
ref_img_list = natsorted(os.listdir(ref_dir))
src_img_list = natsorted(os.listdir(src_dir))
for i in range(len(ref_img_list)):
ref_path = os.path.join(ref_dir, ref_img_list[i])
src_path = os.path.join(src_dir, src_img_list[i])
save_path = os.path.join(save_dir, ref_img_list[i])
warp_src = align_img(ref_path, src_path, './shape_predictor_68_face_landmarks.dat')
cv.imwrite(save_path, warp_src)
# template_eye_pos = np.array([[75, 125], [125, 125]])
# template_size = (200, 250)
# img_path = '/disk1/cfchen/data/FERET/original_photo/00001.jpg'
# img = cv.imread(img_path)
# detected_eyes = detect_fiducial_points(np.array(img), './shape_predictor_68_face_landmarks.dat')
# trans = similarityTransform(detected_eyes, template_eye_pos)
# rect_img = cv.warpAffine(img, trans, template_size)
# cv.imshow('test', rect_img)
# cv.waitKey()
| 37.928 | 109 | 0.669479 | 720 | 4,741 | 4.230556 | 0.295833 | 0.022981 | 0.03283 | 0.031517 | 0.299081 | 0.255745 | 0.159882 | 0.159882 | 0.131648 | 0.131648 | 0 | 0.044514 | 0.194474 | 4,741 | 124 | 110 | 38.233871 | 0.753077 | 0.322506 | 0 | 0.028986 | 0 | 0 | 0.054638 | 0.051144 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057971 | false | 0 | 0.115942 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf2ea22b630b974814db2da578466418ae00ae5f | 643 | py | Python | old_app.py | stimura/interactive_visualizations_and_dashboards_plotly.js | 980ee5078b2fc93fc2f906769b97b013885d5580 | [
"ADSL"
] | null | null | null | old_app.py | stimura/interactive_visualizations_and_dashboards_plotly.js | 980ee5078b2fc93fc2f906769b97b013885d5580 | [
"ADSL"
] | null | null | null | old_app.py | stimura/interactive_visualizations_and_dashboards_plotly.js | 980ee5078b2fc93fc2f906769b97b013885d5580 | [
"ADSL"
] | null | null | null | from data_wrangling import *
from flask import Flask, jsonify, render_template
app = Flask(__name__)
@app.route("/")
def index():
return render_template('index.html')
@app.route("/names")
def names():
# Store results into a dictionary
forecast = get_samples()
return jsonify(forecast)
# Redirect back to home page
# return redirect("http://localhost:5000/", code=302)
@app.route("/pie")
def make_pie_chart():
data = [{
"labels": get_otu_pie_labels(),
"values": get_otu_pie_values(),
"type": "pie"}]
return jsonify(data)
if __name__ == "__main__":
app.run(debug=True)
| 18.371429 | 57 | 0.642302 | 81 | 643 | 4.802469 | 0.567901 | 0.061697 | 0.046272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013834 | 0.213064 | 643 | 34 | 58 | 18.911765 | 0.754941 | 0.171073 | 0 | 0 | 0 | 0 | 0.090737 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0.105263 | 0.052632 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf34bb0f6cb77d847d353e75322326b1e613f85e | 1,484 | py | Python | word2vec/data_test.py | luozhouyang/machine-learning-notes | 332bea905398891fed4a98aa139eac02c88cb5ae | [
"Apache-2.0"
] | 73 | 2018-09-07T06:47:18.000Z | 2022-01-25T06:14:41.000Z | word2vec/data_test.py | luozhouyang/machine-learning-notes | 332bea905398891fed4a98aa139eac02c88cb5ae | [
"Apache-2.0"
] | 2 | 2018-10-18T06:40:19.000Z | 2019-11-16T01:48:39.000Z | word2vec/data_test.py | luozhouyang/machine-learning-notes | 332bea905398891fed4a98aa139eac02c88cb5ae | [
"Apache-2.0"
] | 47 | 2018-09-27T10:50:21.000Z | 2022-01-25T06:20:23.000Z | # Copyright 2018 luozhouyang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
from .data import SkipGramDataSet
import os
_CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
LICENSE_FILE = os.path.join(os.path.curdir, "LICENSE")
INIT_FILE = os.path.join(_CURRENT_DIR, "__init__.py")
TEST_FILE = os.path.join(_CURRENT_DIR, "test.txt")
class TestDataSet(unittest.TestCase):
def testGenBatchInputs(self):
ds = SkipGramDataSet(file=TEST_FILE)
BATCH_SIZE = 16
features, labels = ds.gen_batch_inputs(BATCH_SIZE, 1)
for i in range(BATCH_SIZE):
print("%s --> %s" % (ds.id2word[features[i]], ds.id2word[labels[i]]))
for i in range(16):
features, labels = ds.gen_batch_inputs(BATCH_SIZE, 1)
for i in range(BATCH_SIZE):
print("%s --> %s" % (ds.id2word[features[i]], ds.id2word[labels[i]]))
if __name__ == "__main__":
unittest.main()
| 31.574468 | 80 | 0.679919 | 210 | 1,484 | 4.638095 | 0.485714 | 0.061602 | 0.030801 | 0.043121 | 0.26078 | 0.26078 | 0.211499 | 0.211499 | 0.211499 | 0.211499 | 0 | 0.0144 | 0.157682 | 1,484 | 46 | 81 | 32.26087 | 0.7648 | 0.422507 | 0 | 0.3 | 0 | 0 | 0.061758 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.25 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf355dafca1416e7627639d652c8fed0b68000f6 | 11,140 | py | Python | scripts/elasticArchive.py | softandbyte/elasticArchive | 90a2bbe28eca8ee4f8bdeca560e2202ceeef2ba4 | [
"MIT"
] | 16 | 2020-06-09T03:29:03.000Z | 2022-03-12T05:05:54.000Z | scripts/elasticArchive.py | softandbyte/elasticArchive | 90a2bbe28eca8ee4f8bdeca560e2202ceeef2ba4 | [
"MIT"
] | 2 | 2021-11-09T20:50:58.000Z | 2022-03-23T05:17:50.000Z | scripts/elasticArchive.py | softandbyte/elasticArchive | 90a2bbe28eca8ee4f8bdeca560e2202ceeef2ba4 | [
"MIT"
] | 5 | 2021-02-25T08:43:18.000Z | 2022-03-12T05:05:54.000Z | """
This script serializes the entire traffic dump, including websocket traffic,
as JSON, and either sends it to an elasticsearch endpoint for permenant storage.
Unlike some plugins, this one sends all requests and responses to elasticsearch
in real-time.
This script is based on the original mitmproxy scripts jsondump.py and har_dump.py
Usage:
mitmproxy
-s elasticArchive.py
--set elasticsearch_URL=http://<your elasticsearch server>:9200/mitmproxy/_doc
OPTIONAL
--set storeBinaryContent=true
--set elastic_username=<username>
--set elastic_password=<password>
You can also put those --set options inside ~/.mitmproxy/config.yaml but I prefer setting
them at startup
"""
from threading import Thread
from queue import Queue
import base64
import json
import requests
from mitmproxy import ctx
from mitmproxy.net.http import encoding
HTTP_WORKERS = 10
class elasticArchive:
"""
elasticArchive performs JSON serialization and some extra processing
for out-of-the-box Elasticsearch support, and then either writes
the result to a file or sends it to a URL.
"""
def __init__(self):
self.transformations = None
self.storeBinaryContent = None
self.url = None
self.auth = None
self.queue = Queue()
print("elasticArchive loaded")
def done(self):
self.queue.join()
fields = {
'timestamp': (
('error', 'timestamp'),
('request', 'timestamp_start'),
('request', 'timestamp_end'),
('response', 'timestamp_start'),
('response', 'timestamp_end'),
('client_conn', 'timestamp_start'),
('client_conn', 'timestamp_end'),
('client_conn', 'timestamp_tls_setup'),
('server_conn', 'timestamp_start'),
('server_conn', 'timestamp_end'),
('server_conn', 'timestamp_tls_setup'),
('server_conn', 'timestamp_tcp_setup'),
),
'ip': (
('server_conn', 'source_address'),
('server_conn', 'ip_address'),
('server_conn', 'address'),
('client_conn', 'address'),
),
'ws_messages': (
('messages', ),
),
'headers': (
('request', 'headers'),
('response', 'headers'),
),
'content': (
('request', 'content'),
('response', 'content'),
),
'tls': (
('client_conn', 'tls_extensions'),
),
}
def _init_transformations(self):
self.transformations = [
{
'fields': self.fields['headers'],
'func': dict,
},
{
'fields': self.fields['tls'],
'func': lambda exts: [{
str(ext[0]): str(ext[1]),
} for ext in exts],
},
{
'fields': self.fields['timestamp'],
'func': lambda t: int(t * 1000),
},
{
'fields': self.fields['ip'],
'func': lambda addr: {
'host': addr[0].replace('::ffff:', ''),
'port': addr[1],
},
},
{
'fields': self.fields['ws_messages'],
'func': lambda ms: [{
'type': m[0],
'from_client': m[1],
'content': base64.b64encode(bytes(m[2], 'utf-8')) if strutils.is_mostly_bin(m[2]) else m[2],
'timestamp': int(m[3] * 1000),
} for m in ms],
}
]
@staticmethod
def transform_field(obj, path, func):
"""
Apply a transformation function `func` to a value
under the specified `path` in the `obj` dictionary.
"""
for key in path[:-1]:
if not (key in obj and obj[key]):
return
obj = obj[key]
if path[-1] in obj and obj[path[-1]]:
obj[path[-1]] = func(obj[path[-1]])
@classmethod
def convert_to_strings(cls, obj):
"""
Recursively convert all list/dict elements of type `bytes` into strings.
"""
if isinstance(obj, dict):
return {cls.convert_to_strings(key): cls.convert_to_strings(value)
for key, value in obj.items()}
elif isinstance(obj, list) or isinstance(obj, tuple):
return [cls.convert_to_strings(element) for element in obj]
elif isinstance(obj, bytes):
return str(obj)[2:-1]
return obj
def worker(self):
while True:
frame = self.queue.get()
self.dump(frame)
self.queue.task_done()
def dump(self, frame):
"""
Transform and dump (write / send) a data frame.
"""
#print('Frame= %s' % frame)
requestContentType = None
responseContentType = None
requestContentEncoding = None
responseContentEncoding = None
for header in frame["request"]["headers"]:
h = header[0].decode('utf-8')
#print(h)
if h.lower() == "content-type":
requestContentType = header[1].decode("utf-8")
if h.lower() == "content-encoding":
requestContentEncoding = header[1].decode("utf-8")
for header in frame["response"]["headers"]:
h = header[0].decode('utf-8')
#print(h)
if h.lower() == "content-type":
responseContentType = header[1].decode("utf-8")
if h.lower() == "content-encoding":
responseContentEncoding = header[1].decode("utf-8")
for tfm in self.transformations:
for field in tfm['fields']:
self.transform_field(frame, field, tfm['func'])
#print("requestContentType %s" % requestContentType)
#print("responseContentType %s" % responseContentType)
#print("requestContentEncoding %s" % requestContentEncoding)
#print("responseContentEncoding %s" % responseContentEncoding)
if responseContentEncoding:
rawContent = frame["response"]["content"]
#print(type(rawContent))
#print("rawContent %s " % rawContent)
#print("decoding content of type %s" % responseContentEncoding)
#print("decoding with input string of type %s" % type(responseContentEncoding))
decodedContent = encoding.decode(rawContent, responseContentEncoding)
#print("decodedContent %s" % decodedContent)
frame["response"]["content"] = decodedContent
if self.storeBinaryContent:
if self.isBinaryContent(requestContentType):
frame["request"]["content"] = base64.b64encode(frame["request"]["content"])
if self.isBinaryContent(responseContentType):
frame["response"]["content"] = base64.b64encode(frame["response"]["content"])
else:
if self.isBinaryContent(requestContentType):
frame["request"]["content"] = "Binary content removed"
if self.isBinaryContent(responseContentType):
frame["response"]["content"] = "Binary content removed"
frame = self.convert_to_strings(frame)
print("Sending frame to Elasticsearch")
# If you need to debug this, print/log frame and result as it will show you
# what wasc sent and what errors you got back. This generates a lot of noise though...
result = requests.post(self.url, json=json.dumps(frame), auth=(self.auth or None))
print(result.text)
@staticmethod
def isBinaryContent(contentType):
if contentType is None:
print("Check is None")
return False
else:
print(contentType)
if contentType.startswith("text/"):
return False
elif contentType.startswith("multipart/form-data"):
return False
elif contentType.startswith("application/json"):
return False
elif contentType.startswith("application/xml"):
return False
else:
return True
@staticmethod
def load(loader):
"""
Extra options to be specified in `~/.mitmproxy/config.yaml`.
"""
loader.add_option('elasticsearch_URL', str, 'http://localhost:9200/mitmproxy/_doc',
'Elasticsearch resource path including index (mitmproxy) and type (usually _doc) ')
loader.add_option('storeBinaryContent', bool, False,
'Store binary content in Elasticsearch. If true, it will get pretty big pretty fast. Text is always stored.')
loader.add_option('elastic_username', str, '',
'Basic auth username for URL destinations.')
loader.add_option('elastic_password', str, '',
'Basic auth password for URL destinations.')
def configure(self, _):
"""
Determine the destination type and path, initialize the output
transformation rules.
"""
self.storeBinaryContent = ctx.options.storeBinaryContent
print('storeBinaryContent set to %s' % self.storeBinaryContent)
print('Sending all data frames to %s' % ctx.options.elasticsearch_URL)
if ctx.options.elasticsearch_URL.startswith('http'):
self.url = ctx.options.elasticsearch_URL
ctx.log.info('Sending all data frames to %s' % self.url)
if ctx.options.elastic_username and ctx.options.elastic_password:
self.auth = (ctx.options.elastic_username, ctx.options.elastic_password)
ctx.log.info('HTTP Basic auth enabled.')
else:
print("Invalid elasticsearch_URL. Exiting.")
exit()
self._init_transformations()
for i in range(HTTP_WORKERS):
print("Start of create worker loop")
t = Thread(target=self.worker)
t.daemon = True
t.start()
print("Started HTTP worker")
def response(self, flow):
"""
Dump request/response pairs.
"""
self.queue.put(flow.get_state())
print("Put frame on queue (response)")
def error(self, flow):
"""
Dump errors.
"""
self.queue.put(flow.get_state())
def websocket_end(self, flow):
"""
Dump websocket messages once the connection ends.
Alternatively, you can replace `websocket_end` with
`websocket_message` if you want the messages to be
dumped one at a time with full metadata. Warning:
this takes up _a lot_ of space.
"""
self.queue.put(flow.get_state())
def websocket_error(self, flow):
"""
Dump websocket errors.
"""
self.queue.put(flow.get_state())
addons = [elasticArchive()] # pylint: disable=invalid-name
| 34.382716 | 135 | 0.560503 | 1,136 | 11,140 | 5.417254 | 0.260563 | 0.0117 | 0.00975 | 0.00975 | 0.153234 | 0.131297 | 0.098148 | 0.040949 | 0.029249 | 0.029249 | 0 | 0.008232 | 0.323878 | 11,140 | 323 | 136 | 34.489164 | 0.808816 | 0.203591 | 0 | 0.156098 | 0 | 0.004878 | 0.190737 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068293 | false | 0.019512 | 0.034146 | 0 | 0.165854 | 0.053659 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf35df664a6f006ae32195314f2da6cc632d3aa5 | 1,425 | py | Python | app_covid19data/tests/test_forms.py | falken20/covid19web | 3826e5cc51dc24d373a1f614ccdb7c30993312ce | [
"MIT"
] | null | null | null | app_covid19data/tests/test_forms.py | falken20/covid19web | 3826e5cc51dc24d373a1f614ccdb7c30993312ce | [
"MIT"
] | null | null | null | app_covid19data/tests/test_forms.py | falken20/covid19web | 3826e5cc51dc24d373a1f614ccdb7c30993312ce | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.utils import timezone
from model_bakery import baker
from app_covid19data.models import DataCovid19Item
class Covid19dataTest(TestCase):
def create_DataCovid19Item(self, country='countryTest', state='stateTest', latitude=1, longitude=1):
return DataCovid19Item.objects.create(country=country, state=state, latitude=latitude,
longitude=longitude, date=timezone.now())
def test_covid19data_creation(self):
# w = self.create_DataCovid19Item()
w = baker.make(DataCovid19Item)
self.assertTrue(isinstance(w, DataCovid19Item))
r = f'Daily data from {w.country}/{w.state} at {w.date}' \
f'Lat/Long: {w.latitude}/{w.longitude}' \
f'\nConfirmed: {w.confirmed_cases}' \
f'\nDeaths: {w.dead_cases}' \
f'\nRecovered: {w.recovered_cases}' \
f'\nActive: {w.active_cases}' \
f'\nIncidence: {w.incidence_rate}' \
f'\nFatality Ratio: {w.case_fatality_ratio}'
self.assertEqual(w.__str__(), r)
def test_covid19data_exception(self):
self.assertRaises(Exception, self.create_DataCovid19Item, latitude='1')
def test_covid19data_save(self):
# w = self.create_DataCovid19Item()
w = baker.make(DataCovid19Item)
w.latitude = '1'
self.assertRaises(Exception, w.save) | 39.583333 | 104 | 0.648421 | 159 | 1,425 | 5.666667 | 0.396226 | 0.09323 | 0.059933 | 0.033296 | 0.122087 | 0.122087 | 0.122087 | 0.122087 | 0.122087 | 0 | 0 | 0.029304 | 0.233684 | 1,425 | 36 | 105 | 39.583333 | 0.795788 | 0.047018 | 0 | 0.076923 | 0 | 0 | 0.216077 | 0.051622 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.153846 | false | 0 | 0.153846 | 0.038462 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf365d62fde31326141c9b2b13f45dee2f7dc651 | 823 | py | Python | python/2015/Day 1 Not Quite Lisp/main.py | FirinKinuo/advent-of-code | 97059fc2832b224c24e80bdb658c668bcbb1cb12 | [
"MIT"
] | null | null | null | python/2015/Day 1 Not Quite Lisp/main.py | FirinKinuo/advent-of-code | 97059fc2832b224c24e80bdb658c668bcbb1cb12 | [
"MIT"
] | null | null | null | python/2015/Day 1 Not Quite Lisp/main.py | FirinKinuo/advent-of-code | 97059fc2832b224c24e80bdb658c668bcbb1cb12 | [
"MIT"
] | null | null | null | from python import SolvingBase
class Solving(SolvingBase):
def first_problem(self):
floor = 0
with open(self.test_case, 'r', encoding='utf-8') as file:
instructions = file.read()
for command in instructions:
floor += 1 if command == '(' else -1
return floor
def second_problem(self):
floor = 0
with open(self.test_case, 'r', encoding='utf-8') as file:
instructions = file.read()
for command_index, command in enumerate(instructions):
floor += 1 if command == '(' else -1
if floor == -1:
return command_index + 1
if __name__ == "__main__":
solve = Solving(test_case=False)
print(f"First Problem: {solve.first_problem()}\nSecond Problem: {solve.second_problem()}")
| 24.939394 | 94 | 0.589307 | 99 | 823 | 4.727273 | 0.414141 | 0.025641 | 0.068376 | 0.07265 | 0.504274 | 0.504274 | 0.504274 | 0.367521 | 0.367521 | 0.367521 | 0 | 0.017241 | 0.295261 | 823 | 32 | 95 | 25.71875 | 0.789655 | 0 | 0 | 0.4 | 0 | 0 | 0.123937 | 0.066829 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.05 | 0 | 0.3 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf366ea4faa9d83ef47009d3ecfc52584f3dc9bd | 10,032 | py | Python | downscale_/downscale/utils/utils_func.py | louisletoumelin/wind_downscaling_cnn | 9d08711620db1ee1f472847f0e822c5f4eb1d300 | [
"W3C"
] | null | null | null | downscale_/downscale/utils/utils_func.py | louisletoumelin/wind_downscaling_cnn | 9d08711620db1ee1f472847f0e822c5f4eb1d300 | [
"W3C"
] | 12 | 2021-11-30T16:56:05.000Z | 2021-12-13T16:26:31.000Z | downscale_/downscale/utils/utils_func.py | louisletoumelin/wind_downscaling_cnn | 9d08711620db1ee1f472847f0e822c5f4eb1d300 | [
"W3C"
] | null | null | null | import numpy as np
import pandas as pd
import datetime
from downscale.utils.decorators import timer_decorator
def select_range(month_begin, month_end, year_begin, year_end, date_begin, date_end):
import pandas as pd
if (month_end != month_begin) or (year_begin != year_end):
dates = pd.date_range(date_begin, date_end, freq='M')
iterator = zip(dates.day, dates.month, dates.year)
else:
dates = pd.to_datetime(date_end)
iterator = zip([dates.day], [dates.month], [dates.year])
return iterator
def select_range_7days_for_long_periods_prediction(begin="2017-8-2", end="2020-6-30", prm=None):
"""
This function takes as input a date range (begin and end) and split it in 7-days range around excluded dates
Works if we have only one splitting in a week
"""
begin = np.datetime64(pd.to_datetime(begin))
end = np.datetime64(pd.to_datetime(end))
# Define 7 days periods within date range
dates = pd.date_range(start=begin, end=end, freq="7D")
dates_shift = pd.date_range(start=begin, end=end, freq="7D").shift()
dates_shift = dates_shift.where(dates_shift <= end, [end])
# Split range around selected dates
if prm["GPU"]:
d1 = datetime.datetime(2017, 8, 1, 6)
d2 = datetime.datetime(2018, 8, 1, 6)
d3 = datetime.datetime(2019, 5, 1, 6)
d4 = datetime.datetime(2019, 6, 1, 6)
d5 = datetime.datetime(2020, 6, 2, 6)
splitting_dates = [np.datetime64(date) for date in [d1, d2, d3, d4, d5]]
else:
d1 = datetime.datetime(2017, 8, 1, 6)
d2 = datetime.datetime(2018, 8, 1, 6)
d3 = datetime.datetime(2019, 6, 1, 6)
d6 = datetime.datetime(2020, 7, 1, 6)
splitting_dates = [np.datetime64(date) for date in [d1, d2, d3, d6]]
begins = []
ends = []
for index, (begin, end) in enumerate(zip(dates.values, dates_shift.values)):
# Add one day to begin after first element
begin = begin if index == 0 else begin + np.timedelta64(1, "D")
end = end + np.timedelta64(23, "h")
if begin > end:
continue
split = False
for splt_date in splitting_dates:
# If date range needs to be splitted
if begin <= splt_date < end:
begins.append(begin)
ends.append(splt_date - np.timedelta64(1, "h"))
begins.append(splt_date)
ends.append(end)
split = True
# If we didn't split date range
if not split:
begins.append(begin)
ends.append(end)
begins = [pd.to_datetime(begin) for begin in begins]
ends = [pd.to_datetime(end) for end in ends]
return begins, ends
def select_range_30_days_for_long_periods_prediction(begin="2017-8-2", end="2020-6-30", GPU=False):
begin = np.datetime64(pd.to_datetime(begin))
end = np.datetime64(pd.to_datetime(end))
# Define 30 days periods within date range
dates = pd.date_range(start=begin, end=end, freq="MS")
dates_shift = pd.date_range(start=begin, end=end, freq="M", closed='right').shift()
dates_shift = dates_shift.where(dates_shift <= end, [end])
# Split range around selected dates
if not GPU:
d1 = datetime.datetime(2017, 8, 1, 6)
d2 = datetime.datetime(2018, 8, 1, 6)
d3 = datetime.datetime(2019, 6, 1, 6)
d6 = datetime.datetime(2020, 7, 1, 6)
splitting_dates = [np.datetime64(date) for date in [d1, d2, d3, d6]]
else:
d1 = datetime.datetime(2017, 8, 1, 6)
d2 = datetime.datetime(2018, 8, 1, 6)
d3 = datetime.datetime(2019, 5, 1, 6)
d4 = datetime.datetime(2019, 6, 1, 6)
d5 = datetime.datetime(2020, 6, 2, 6)
splitting_dates = [np.datetime64(date) for date in [d1, d2, d3, d4, d5]]
begins = []
ends = []
for index, (begin, end) in enumerate(zip(dates.values, dates_shift.values)):
# Add one day to begin after first element
end = end + np.timedelta64(23, "h")
split = False
for splt_date in splitting_dates:
# If date range needs to be splitted
if begin <= splt_date < end:
begins.append(begin)
ends.append(splt_date - np.timedelta64(1, "h"))
begins.append(splt_date)
ends.append(end)
split = True
# If we didn't split date range
if not split:
begins.append(begin)
ends.append(end)
# begins = [pd.to_datetime(begin) for begin in begins]
for index, begin in enumerate(begins):
if not isinstance(begin, str):
begins[index] = pd.to_datetime(begin)
# ends = [pd.to_datetime(end) for end in ends]
for index, end in enumerate(ends):
if not isinstance(end, str):
ends[index] = pd.to_datetime(end)
return begins, ends
def print_current_line(time_step, nb_sim, division):
nb_sim_divided = nb_sim // division
for k in range(1, division + 1):
print(f" {k}/{division}") if (time_step == k * nb_sim_divided) else True
def change_dtype_if_required(variable, dtype):
if variable.dtype != dtype:
variable = variable.astype(dtype, copy=False)
return variable
def change_several_dtype_if_required(list_variable, dtypes):
result = []
for variable, dtype in zip(list_variable, dtypes):
if isinstance(variable, (list, int, float)):
variable = np.array(variable)
result.append(change_dtype_if_required(variable, dtype))
return result
def change_dtype_decorator(dtype):
"""Timer decorator"""
def decorator(function):
def wrapper(*args, **kwargs):
result = function(*args, **kwargs)
result = change_dtype_if_required(result, dtype)
return result
return wrapper
return decorator
def assert_equal_shapes(arrays, shape):
for k in range(len(arrays) - 1):
assert arrays[k].shape == shape
def round(t1, t2):
return np.round(t2 - t1, 2)
def reshape_list_array(list_array=None, shape=None):
"""
Utility function that takes as input a list of arrays to reshape to the same shape
Parameters
----------
list_array : list
List of arrays
shape : tuple
typle of shape
Returns
-------
result : list
List of reshaped arrays
"""
result = []
for array in list_array:
result.append(np.reshape(array, shape))
return result
def several_empty_like(array_like, nb_empty_arrays=None):
result = []
for array in range(nb_empty_arrays):
result.append(np.empty_like(array_like))
return result
def _list_to_array_if_required(list_or_array):
if isinstance(list_or_array, list):
return np.array(list_or_array)
else:
return list_or_array
def lists_to_arrays_if_required(lists_or_arrays):
if np.ndim(lists_or_arrays) > 1:
return (_list_to_array_if_required(list_or_array) for list_or_array in lists_or_arrays)
else:
return _list_to_array_if_required(lists_or_arrays)
@timer_decorator("statistical description array", unit="minute", level="")
def print_statistical_description_array(array, name="Acceleration CNN", level="________"):
print(f"{level}{name} min", np.nanmin(array))
print(f"\n{level}{name} q0.10", np.nanquantile(array, 0.1))
print(f"\n{level}{name} q0.25", np.nanquantile(array, 0.25))
print(f"\n{level}{name} median", np.nanmedian(array))
print(f"\n{level}{name} q0.75", np.nanquantile(array, 0.75))
print(f"\n{level}{name} q0.90", np.nanquantile(array, 0.9))
print(f"\n{level}{name} q0.95", np.nanquantile(array, 0.95))
print(f"\n{level}{name} q0.99", np.nanquantile(array, 0.99))
print(f"\n{level}{name} maximum", np.nanmax(array))
return None
def print_with_frame(text):
print('\n\n__________________________')
print('__________________________\n')
print(f'_______{text}_______\n')
print('__________________________')
print('__________________________\n\n')
def print_begin_end(begin, end):
print('\n\n__________________________')
print('__________________________\n')
print(f'_______{begin}___________\n')
print(f'_______{end}___________\n')
print('__________________________')
print('__________________________\n\n')
def print_second_begin_end(begin, end):
print('\n__________________________')
print(f'____{begin}___')
print(f'____{end}___')
print('__________________________')
def print_intro():
intro = """
''' '
' ' '
''' ''' '''
+ hs ' ''''' '.' '
'shh ho ' '
.yhhh hh+ ' ''
/hhhs +hhh/
hhhh' hhhh '''
ohhho +hhh: '. '.'
'yhhh: ohhh: ''''' ''' .
.+. -hhhy. ohhh: ' ''''' ''
-hhho' /hhhs' ohhh: '''''''''
:hhhhhhyhhh+ ohhh/ .' ''
/hhho+hhhhh: +hhh+ '. '.'
+hhh+ '+hy /hhho ''
ohhh/ ' :hhhs'
'shhh: :yhhy-
gyhhhg Wind speed 'shhh/
hyhhyf +hhhs'
:hhhs' Downscaling -hhhh:
+hhho 'ohhhsg
hhh/ using CNN :yhhh
hy- '+hh
o' by Louis Le Toumelin .s
CEN - Meteo-France
"""
print(intro)
| 33.66443 | 112 | 0.572269 | 1,250 | 10,032 | 4.1976 | 0.1816 | 0.054889 | 0.025157 | 0.018296 | 0.509053 | 0.493044 | 0.444444 | 0.420431 | 0.393749 | 0.38117 | 0 | 0.039533 | 0.309111 | 10,032 | 297 | 113 | 33.777778 | 0.717501 | 0.086324 | 0 | 0.412935 | 0 | 0 | 0.229532 | 0.039229 | 0 | 0 | 0 | 0 | 0.00995 | 1 | 0.099502 | false | 0 | 0.024876 | 0.004975 | 0.20398 | 0.159204 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf3812d6d4f4e3479497b05d3cbefb4d7e0abe08 | 11,979 | py | Python | assetfactory/images/2021/08/30/base.py | reinhrst/reinhrst.github.io | 3e9dce26c923fca54589ffd1d19d56af0dd27910 | [
"CC0-1.0"
] | null | null | null | assetfactory/images/2021/08/30/base.py | reinhrst/reinhrst.github.io | 3e9dce26c923fca54589ffd1d19d56af0dd27910 | [
"CC0-1.0"
] | 6 | 2021-07-01T19:35:47.000Z | 2022-02-06T10:30:35.000Z | assetfactory/images/2021/08/30/base.py | reinhrst/reinhrst.github.io | 3e9dce26c923fca54589ffd1d19d56af0dd27910 | [
"CC0-1.0"
] | 1 | 2021-08-11T22:46:47.000Z | 2021-08-11T22:46:47.000Z | from __future__ import annotations
import pathlib
import typing as t
import numpy as np
import math
def rgb_to_hsv(r, g, b):
r = float(r)
g = float(g)
b = float(b)
high = max(r, g, b)
low = min(r, g, b)
h, s, v = high, high, high
d = high - low
s = 0 if high == 0 else d/high
if high == low:
h = 0.0
else:
h = {
r: (g - b) / d + (6 if g < b else 0),
g: (b - r) / d + 2,
b: (r - g) / d + 4,
}[high]
h /= 6
return h, s, v
def hsv_to_rgb(h, s, v):
i = math.floor(h*6)
f = h*6 - i
p = v * (1-s)
q = v * (1-f*s)
t = v * (1-(1-f)*s)
r, g, b = [
(v, t, p),
(q, v, p),
(p, v, t),
(p, q, v),
(t, p, v),
(v, p, q),
][int(i%6)]
return r, g, b
def rgb_to_hsl(r, g, b):
r = float(r)
g = float(g)
b = float(b)
high = max(r, g, b)
low = min(r, g, b)
h, s, l = ((high + low) / 2,)*3
if high == low:
h = 0.0
s = 0.0
else:
d = high - low
s = d / (2 - high - low) if l > 0.5 else d / (high + low)
h = {
r: (g - b) / d + (6 if g < b else 0),
g: (b - r) / d + 2,
b: (r - g) / d + 4,
}[high]
h /= 6
return h, s, l
def hsl_to_rgb(h, s, l):
def hue_to_rgb(p, q, t):
t += 1 if t < 0 else 0
t -= 1 if t > 1 else 0
if t < 1/6: return p + (q - p) * 6 * t
if t < 1/2: return q
if t < 2/3: return p + (q - p) * (2/3 - t) * 6
return p
if s == 0:
r, g, b = l, l, l
else:
q = l * (1 + s) if l < 0.5 else l + s - l * s
p = 2 * l - q
r = hue_to_rgb(p, q, h + 1/3)
g = hue_to_rgb(p, q, h)
b = hue_to_rgb(p, q, h - 1/3)
return r, g, b
def hex_to_rgb(rgb: str) -> t.Tuple[float, float, float]:
assert rgb[0] == "#"
if len(rgb) == len("#rgb"):
elementlength = 1
else:
assert len(rgb) == len("#rrggbb")
elementlength = 2
return [int(rgb[i:i + elementlength], 16) / (16 ** elementlength - 1)
for i in range(1, len(rgb), elementlength)]
def rgb_to_hex(r: float, g: float, b: float) -> str:
assert 0 <= r <= 1
assert 0 <= g <= 1
assert 0 <= b <= 1
return "#" + "".join(
"%02x" % int(round(c * 255)) for c in (r, g, b))
def lighter(rgb: str, pct: float):
assert 0 < pct <= 100
r, g, b = hex_to_rgb(rgb)
h, s, l = rgb_to_hsl(r, g, b)
l = 1 - (1 - l) / (1 + pct / 100)
return rgb_to_hex(*hsl_to_rgb(h, s, l))
class RunDataNoMatchException(Exception):
def __init__(self, runData: RunData):
self.runData = runData
super().__init__("Ran out of data to match")
class RunDataOutOfLinesException(Exception):
def __init__(self):
super().__init__("No more lines")
TYPE_MAP = {
"go-native": "Go (native)",
"go": "Go (WebAssembly)",
"tinygo": "TinyGo",
"fzf-for-js": "fzf-for-js",
"gopherjs": "GopherJS",
"go-debugnogc": "Go (WebAssembly; no GC)",
"tinygo-leakinggc": "TinyGo (no GC)",
"go-native-nogc": "Go (native; no GC)",
}
COLOUR_MAP = {
"Go (native)": ["#003f5c", "#668eaa", "#002633"],
"Go (native; no GC)": ["#ffa600", "#ffcc33"],
"Go (WebAssembly)": ["#58508d", "#9e94c5", "#262145"],
"Go (WebAssembly; no GC)": ["#9e94ff", "#b7b2ff"],
"TinyGo": ["#bc5090", "#df94be", "#8F2464"],
"TinyGo (no GC)": ["#00c786", "#33ffa0"],
"fzf-for-js": ["#ff6361", "#ffa097", "#cc2020"],
"GopherJS": ["#ffa600", "#ffc171", "#cc5000"],
}
COLOUR_MAP = {
**COLOUR_MAP,
**{f"{key} - {browser}": [lighter(v, pct) for v in value]
for key, value in COLOUR_MAP.items()
for browser, pct in [
("Firefox", 20),
("Chrome", 40),
("Safari", 60),
("Edge", 80),
]}
}
LOG2_MAP = {2**i: i for i in range(40)}
class RunData:
fzf_type: str = None
nrlines: int = None
lines_load_time_ms: int = None
fzf_init_time_ms: int = None
memory_used_mib: float = None
search_times_ms_nr_results: t.MutableMapping[str, t.Tuple[int, int]] = None
aborted: bool
browser: bool
def popuntilstartmatch(self, lines: t.MutableSequence[str], start: str) -> str:
try:
while not (line := lines.pop()).startswith(start):
if line.startswith("******"):
self.aborted = True
lines.append(line)
raise RunDataNoMatchException(self)
except IndexError:
self.aborted = True
raise RunDataNoMatchException(self)
return line
def __init__(self, lines: t.MutableSequence[str]):
self.aborted = False
self.search_times_ms_nr_results = {}
try:
while not (line := lines.pop()).startswith("******"):
pass
except IndexError:
raise RunDataOutOfLinesException()
line = self.popuntilstartmatch(lines, "fzf-type: ")
raw_fzf_type = line.split()[1]
if any(raw_fzf_type.endswith(f"-{x}")
for x in ("edge", "safari", "firefox", "chrome")):
base, browser = raw_fzf_type.rsplit("-", 1)
self.fzf_type = TYPE_MAP[base] + f" - {browser.capitalize()}"
self.browser = True
else:
self.fzf_type = TYPE_MAP[raw_fzf_type]
self.browser = False
line = self.popuntilstartmatch(lines, "lines.txt loaded:")
_, _, nrlines, _, _, lines_load_time_ms = line.split()
self.nrlines = int(nrlines)
self.lines_load_time_ms = int(lines_load_time_ms)
line = self.popuntilstartmatch(lines, "Fzf initialized ")
self.fzf_init_time_ms = int(line.split()[-1]) - self.lines_load_time_ms
while "hello world" not in self.search_times_ms_nr_results:
line = self.popuntilstartmatch(lines, "Searching for '")
nrresults = int(line.split()[-2])
line = lines.pop()
assert line.startswith(f"--- ../{self.nrlines}.txt "), line
searchtime = int(line.split()[2])
searchterm = line.split(" ", 4)[-1]
if lines[-1].startswith("hash: "):
line = lines.pop()
assert line.startswith("hash: ")
hash = line.split()[1][:5]
else:
hash = None
if lines[-1].startswith("+++ filename "):
line = lines.pop()
gosearchtime = int(line.split()[2])
else:
gosearchtime = None
self.search_times_ms_nr_results[searchterm] = (searchtime, gosearchtime, hash, nrresults)
if self.browser:
self.memory_used_mib = None
else:
line = self.popuntilstartmatch(lines, " Maximum resident set size (kbytes):")
self.memory_used_mib = float(line.split()[-1]) / 1024
def __repr__(self):
aborted = "<aborted>" if self.aborted else ""
memused = self.memory_used_mib and round(self.memory_used_mib, 1)
return (
f"RunData{aborted}: {self.fzf_type}({self.nrlines}). "
f"load {self.lines_load_time_ms} ms; "
f"fzf init {self.fzf_init_time_ms} ms; "
f"search results: {self.search_times_ms_nr_results}; "
f"memory used results: {memused} MiB; "
)
def loadRunData() -> t.Sequence[RunData]:
runDatas: t.MutableSequence[RunData] = []
for filename in (
pathlib.Path(__file__).parent / "results-native-2.txt",
pathlib.Path(__file__).parent / "results-native-nogc-2.txt",
pathlib.Path(__file__).parent / "results.browsers.txt",
pathlib.Path(__file__).parent / "results-new.txt",
pathlib.Path(__file__).parent / "results-debugnogc.txt",
):
data = pathlib.Path(filename).read_text()
lines = list(reversed(data.splitlines()))
while True:
try:
runData = RunData(lines)
runDatas.append(runData)
except RunDataNoMatchException as e:
runDatas.append(e.runData)
except RunDataOutOfLinesException:
break
hashes = {}
for runData in runDatas:
if runData.aborted:
continue
key = runData.nrlines
myhashes = tuple([i[2] for i in runData.search_times_ms_nr_results.values()])
if key in hashes:
if hashes[key][0] != myhashes:
print(f"For {key}:\n {hashes[key][0]} ({hashes[key][1]}) !=\n {myhashes} {runData.fzf_type}")
breakpoint()
else:
hashes[key] = (myhashes, runData.fzf_type)
return t.cast(t.Sequence[RunData], runDatas)
def markdown_table(data, large_small_multiplier) -> str:
totaldata = {
key: {
nr: np.sum(data[key][nr]) for nr in data[key]
} for key in data
}
return "\n".join(
[
"|".join(["Haystack size", *[key for key in data]]),
"|".join(["---"] * (len(data) + 1)),
*[
"|".join([
f"2<sup>{LOG2_MAP[nr]}</sup> = {nr}",
*["---" if np.isnan(dfk[nr])
else
f"{dfk[nr]:.2f} ({dfk[nr] * large_small_multiplier / nr:.1f})"
for key, dfk in totaldata.items()],
])
for nr in list(data.values())[0]
]
]
)
def do_create_table_and_plot(
ax,
nrlinesexp: t.Sequence[int],
data_element_getter: t.Callable[[RunData], t.Sequence[float]],
to_show: t.Sequence[t.Optional[str]],
colourmap: t.Sequence[int],
ylim: t.Tuple[float, float],
large_small_multiplier: float=1e6,
):
runDatas = loadRunData()
fzf_types = {r.fzf_type for r in runDatas}
assert all(key in fzf_types for key in to_show if key is not None)
data = {key: {2**nr: [] for nr in nrlinesexp}
for key in to_show if key is not None}
runData: RunData
datalength = len(colourmap)
for runData in runDatas:
if (
runData.aborted
or (key := runData.fzf_type) not in data
or (nrlines := runData.nrlines) not in data[key]):
continue
data[key][nrlines].append(data_element_getter(runData))
assert datalength == len(data[key][nrlines][-1])
# calculate averages
for key in data:
for nr in data[key]:
if data[key][nr]:
data[key][nr] = np.mean(data[key][nr], axis=0)
else:
data[key][nr] = np.full((datalength, ), np.nan)
xaxis = nrlinesexp - nrlinesexp[0]
ax.set_xticks(xaxis)
ax.set_xticklabels([f"$2^{{{exp}}}$" for exp in nrlinesexp], rotation=45)
gap = (ylim[1] - ylim[0]) / 500
bargap = "".join(" " if k is None else "b" for k in to_show)
nrbars = bargap.count("b")
nrgaps = len(bargap.strip()) - nrbars
width = 0.9 / (nrbars + nrgaps / 2)
x_offset = -0.45
for label in to_show:
if label is None:
x_offset += width / 2
continue
bottom = np.zeros((len(xaxis), ))
for a in range(datalength):
itemdata = np.array([data[label][2**exp][a] for exp in nrlinesexp]
) / 2**nrlinesexp * large_small_multiplier
colour = COLOUR_MAP[label][colourmap[a]]
ax.bar(xaxis[:] + x_offset,
itemdata - (gap if a < datalength - 1 else 0),
bottom=bottom,
width=width,
color=colour,
label = label if a == min(1, datalength - 1) else None)
bottom += itemdata
x_offset += width
ax.set_ylim(*ylim)
return markdown_table(data, large_small_multiplier)
| 32.201613 | 115 | 0.515986 | 1,569 | 11,979 | 3.810707 | 0.173996 | 0.007025 | 0.007526 | 0.015053 | 0.227128 | 0.145844 | 0.075598 | 0.041813 | 0.037464 | 0.037464 | 0 | 0.029073 | 0.333834 | 11,979 | 371 | 116 | 32.28841 | 0.720175 | 0.001503 | 0 | 0.187117 | 0 | 0.003067 | 0.108119 | 0.020988 | 0 | 0 | 0 | 0 | 0.030675 | 1 | 0.04908 | false | 0.003067 | 0.015337 | 0 | 0.138037 | 0.003067 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bf3a8b4843ac801538ccc5ef189a2684601a4cd6 | 10,419 | py | Python | SemanticCopyandPaste.py | WeiChihChern/Copy-Paste-Semantic-Segmentation | f7725bb385b6decc4e139262fc1c6e3ba30255a3 | [
"MIT"
] | 3 | 2021-08-19T20:08:27.000Z | 2021-09-25T04:12:59.000Z | SemanticCopyandPaste.py | WeiChihChern/Copy-Paste-Semantic-Segmentation | f7725bb385b6decc4e139262fc1c6e3ba30255a3 | [
"MIT"
] | null | null | null | SemanticCopyandPaste.py | WeiChihChern/Copy-Paste-Semantic-Segmentation | f7725bb385b6decc4e139262fc1c6e3ba30255a3 | [
"MIT"
] | 1 | 2022-01-03T07:53:34.000Z | 2022-01-03T07:53:34.000Z | import albumentations as A
import random
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
class SemanticCopyandPaste(A.DualTransform):
def __init__(self,
nClass,
path2rgb,
path2mask,
shift_x_limit = [0,0],
shift_y_limit = [0,0],
rotate_limit = [0,0],
scale = [0,0],
class_weights = [],
always_apply = False,
show_stats = False,
auto_weights = False,
p=0.5):
super().__init__(always_apply=always_apply, p=p)
self.nClass = nClass
self.rgb_base = path2rgb
self.mask_base = path2mask
self.rgbs = os.listdir(path2rgb)
self.masks = os.listdir(path2mask)
self.nImages = len(self.rgbs)
self.threshold = 30
self.targetClass= 0
self.c_image = None # candidate image
self.c_mask = None # candidate mask
self.found = False
self.imgRow = None # for image translation
self.imgCol = None # for image translation
self.shift_x_limit = shift_x_limit
self.shift_y_limit = shift_y_limit
self.rotate_limit = rotate_limit
self.scale = scale
self.transformation_matrix = None
self.translated_mask = None
self.counter = 0
self.class_counter = np.zeros(self.nClass, dtype=np.int64)
# Class weights is used to control what classes to be augmented more than the others
self.class_weights = [abs(ele) for ele in class_weights]
self.class_pool = []
self.img_pool = np.zeros((self.nClass, len(self.masks))) - 1 # Use -1 as the flag of empty
self.class_pixels_statistics = np.zeros((self.nClass,1), dtype=np.float64)
self.auto_weights = auto_weights
# Image pool initialization for fast image lookup
# Go through all masks, and find out what class(es) each mask has
class_count_tmp = np.zeros((self.nClass, 1), dtype=np.int)
for i in range(len(self.masks)):
c_mask = cv2.imread(os.path.join(self.mask_base, self.masks[i]))
assert c_mask is not None, "Your image directories may contain some non-image hidden files. Image is empty!"
for j in range(self.nClass):
if self.target_class_in_image(c_mask, j):
self.img_pool[j, class_count_tmp[j, 0]] = i
class_count_tmp[j, 0] += 1
# Initialization for weighted class augmentation
if self.auto_weights:
print('- Copy and Paste: Auto weights calculation used -')
tmp = np.copy(self.class_pixels_statistics)
tmp = 1 / tmp
tmp[0,0] = 0
self.class_weights = np.round(tmp / np.sum(tmp) * 100) # Normalized
for i in range(nClass):
for j in range(np.int(self.class_weights[i])):
self.class_pool.append(i)
else:
if not class_weights:
print('- Copy and Paste: Using equal weights for all classes (background not included) -')
for i in range(1,self.nClass): self.class_pool.append(i)
else:
print('- Copy and Paste: Using user defined class weights -')
self.class_weights = np.round(self.class_weights / np.sum(self.class_weights) * 100) # Normalized
assert len(class_weights) == nClass, "class_weights' length != nClass, nClass should also include the background class."
for i in range(nClass):
for j in range(np.int(self.class_weights[i])):
self.class_pool.append(i)
# Params checking
assert len(self.rgbs) == len(self.masks), "rgb path's file count != mask path's file count"
assert self.nClass > 0, "Incorrect class number"
if shift_x_limit is not None:
assert type(shift_x_limit) == list and type(shift_y_limit) == list and type(rotate_limit) == list and type(scale) == list
assert abs(shift_x_limit[0]) <= 1 and abs(shift_y_limit[0]) <= 1 and abs(rotate_limit[0]) <= 1 and abs(rotate_limit[1]) <= 1 and scale[0] >= 0 and scale[1] >= scale[0] and scale[1] >= 1, 'The range for shift_x/y_limit and rotate is [-1 to 1], and [0 to 1] for scale'
if show_stats: print('Pixel Count for Each Class: \n', self.class_pixels_statistics)
def apply(self, image, **params):
'''
Args:
image: 3-channel RGB images
This function will first randomly generate a class that being copied (Exclude 0, which is the background class). Then randomly picks a mask via provided path, and search whether it contains the previously picked target class. Keep randomly picks a new mask until a match is found. Finally start doing copy and paste process.
Since semantic segmentation's annotation may not be labeled in the same way as instance segmentation therefore currently we copy and paste entire mask without further processing.
'''
self.targetClass = random.choice(self.class_pool)
# Finding candidates with the target class
ret = -1
while ret == -1:
ret = int(random.choice(self.img_pool[self.targetClass, :]))
c_image = cv2.imread(os.path.join(self.rgb_base, self.rgbs[ret]))
c_mask = cv2.imread(os.path.join(self.mask_base, self.masks[ret]))
c_image = cv2.cvtColor(c_image, cv2.COLOR_BGR2RGB)
self.found = True
self.c_mask = c_mask
self.c_image = c_image
return self.copy_and_paste_image(self.c_image, self.c_mask, image, self.targetClass)
def apply_to_mask(self, mask, **params):
assert self.found == True
return self.copy_and_paste_mask(self.c_mask, mask, self.targetClass)
# Augmentation will be added to rgb2 (extract content from rgb1)
# Mask1 is need to know where to extract pixels for color image copy and paste
def copy_and_paste_image(self, rgb1, mask1, rgb2, targetClassForAug):
assert rgb1 is not None
assert rgb2 is not None
assert mask1 is not None
assert mask1.shape[2] == 3 # We imread it without further process, so its a 3 channel
if rgb2.shape != rgb1.shape:
r, c, _ = rgb2.shape
rgb1 = cv2.resize(rgb1, (c,r), interpolation = cv2.INTER_NEAREST)
mask1 = cv2.resize(mask1, (c,r), interpolation = cv2.INTER_NEAREST)
tmp = mask1[...,1] # All 3 channels have same content, we take 1 to process
masks = [(tmp == v) for v in range(self.nClass)]
masks = np.stack(masks, axis=-1).astype('float') # mask.shape = (x,y,ClassNums)
self.c_mask = masks
masks[..., targetClassForAug] = self.imgTransform(masks[..., targetClassForAug], self.shift_x_limit, self.shift_y_limit)
self.translated_mask = masks[..., targetClassForAug]
rgb1 = cv2.warpAffine(rgb1, self.transformation_matrix, (self.imgCol, self.imgRow))
# Pasting
mask_3channel = np.stack((self.translated_mask,self.translated_mask,self.translated_mask),axis=2)
idxs = mask_3channel > 0
rgb2[idxs] = rgb1[idxs]
return rgb2.astype('uint8')
def copy_and_paste_mask(self, mask1, mask2, targetClassForAug):
'''
Args:
mask1 = randomly picked qualified mask from apply(), has shape = (x, y, nClasses)
mask2 = dataloader loaded mask, aug is added to mask2
'''
assert mask2.shape[2] == self.nClass # Processed by dataloader, so its a nClass channel
assert self.translated_mask is not None
mask2_1channel = np.argmax(mask2, axis=2)
# Pasting augmentation
mask2_1channel[self.translated_mask > 0] = targetClassForAug
masks = [(mask2_1channel == v) for v in range(self.nClass)] # mask.shape = (x,y,ClassNums)
masks = np.stack(masks, axis=-1).astype('float')
# Reset
self.c_mask = None
self.found == False
self.transformation_matrix = None
self.translated_mask = None
return masks
# We imread the mask, so it's a 3-channel mask (not one-hot encoded)
def target_class_in_image(self, mask, targetClassIdx):
#hard coded pixel threshold
s = np.sum(mask[..., 0] == targetClassIdx)
self.class_pixels_statistics[targetClassIdx, 0] += s
if s > self.threshold:
return True
return False
def imgTransform(self, image, offset_x_limit, offset_y_limit ):
'''
Args:
image: it can be mask or rgb image
offset_x_limt: x-axis shift limit [-1,1]
offset_y_limt: y-axis shift limit [-1,1]
'''
self.imgRow, self.imgCol = image.shape
col_shift = random.uniform(offset_x_limit[0], offset_x_limit[1])*self.imgCol
row_shift = random.uniform(offset_y_limit[0], offset_y_limit[1])*self.imgRow
rotate_deg= random.uniform(self.rotate_limit[0], self.rotate_limit[1])*180
scale_coef= random.uniform(self.scale[0] , self.scale[1])
self.transformation_matrix = cv2.getRotationMatrix2D((self.imgRow//2, self.imgCol//2), rotate_deg, scale_coef)
self.transformation_matrix[0,2] += col_shift
self.transformation_matrix[1,2] += row_shift
return cv2.warpAffine(image, self.transformation_matrix, (self.imgCol, self.imgRow))
def apply_to_bbox(self, bbox, **params):
return bbox
def apply_to_keypoint(self, keypoint, **params):
return keypoint
def get_transform_init_args_names(self):
return ("image", "mask")
| 38.164835 | 335 | 0.585181 | 1,330 | 10,419 | 4.443609 | 0.21203 | 0.025888 | 0.020305 | 0.011506 | 0.21489 | 0.141794 | 0.113875 | 0.067005 | 0.038917 | 0.038917 | 0 | 0.022118 | 0.323064 | 10,419 | 272 | 336 | 38.305147 | 0.815823 | 0.167099 | 0 | 0.106667 | 0 | 0.006667 | 0.063923 | 0 | 0 | 0 | 0 | 0 | 0.086667 | 1 | 0.066667 | false | 0 | 0.04 | 0.02 | 0.18 | 0.026667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
170d3e0c7bb509155665bc1b0e23573bfaf15d45 | 1,214 | py | Python | _vcs/git.py | devsetup/devsetup_framework | 6ccd59dab83bc4305e8ff18321bfc14a4e7e79ca | [
"BSD-3-Clause"
] | null | null | null | _vcs/git.py | devsetup/devsetup_framework | 6ccd59dab83bc4305e8ff18321bfc14a4e7e79ca | [
"BSD-3-Clause"
] | null | null | null | _vcs/git.py | devsetup/devsetup_framework | 6ccd59dab83bc4305e8ff18321bfc14a4e7e79ca | [
"BSD-3-Clause"
] | null | null | null | # -*- coding:utf8 -*-
import os
import re
import dsf
def change_branch(branch, cwd=None):
# checkout the branch
dsf.core.shell.run(['git', 'checkout', branch], cwd=cwd)
def get_current_branch(cwd=None):
output = dsf.core.shell.get_output_from_command(['git', 'branch'], cwd=cwd)
for line in output:
if line[0:2] == '* ':
branch = line[2:].rstrip()
return branch
# if we get here, we are not on a current branch
return "* HEADLESS"
def is_repository(cwd=None):
# make sure we have a current working directory
if not cwd:
cwd = os.getcwd()
# quickest test - is there a .git folder?
dotgit_folder = os.path.join(cwd, '.git')
if not os.path.isdir(dotgit_folder):
return False
# is the folder a real git repo?
output = dsf.core.shell.get_output_from_command(['git', 'status'], cwd=cwd)
regex=re.compile("fatal: Not a git repository")
if any(regex.match(line) for line in output):
return False
# what about when self.repodir is a subfolder of a git repo?
output = dsf.core.shell.get_output_from_command(['git', 'rev-parse', '--show-toplevel'], cwd=cwd)
if output[0].rstrip() != dsf.core.fs.get_realpath(cwd):
return False
# if we get here, then it is a git repo
return True | 26.977778 | 98 | 0.696046 | 202 | 1,214 | 4.10396 | 0.386139 | 0.04222 | 0.057901 | 0.065139 | 0.165259 | 0.165259 | 0.165259 | 0.165259 | 0.165259 | 0.115802 | 0 | 0.004936 | 0.165568 | 1,214 | 45 | 99 | 26.977778 | 0.813425 | 0.247117 | 0 | 0.115385 | 0 | 0 | 0.109272 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.115385 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
170d6548b3dc09065e688e302239c6b72d24faa5 | 243 | py | Python | 7KYU/is_prime.py | yaznasivasai/python_codewars | 25493591dde4649dc9c1ec3bece8191a3bed6818 | [
"MIT"
] | 4 | 2021-07-17T22:48:03.000Z | 2022-03-25T14:10:58.000Z | 7KYU/is_prime.py | yaznasivasai/python_codewars | 25493591dde4649dc9c1ec3bece8191a3bed6818 | [
"MIT"
] | null | null | null | 7KYU/is_prime.py | yaznasivasai/python_codewars | 25493591dde4649dc9c1ec3bece8191a3bed6818 | [
"MIT"
] | 3 | 2021-06-14T14:18:16.000Z | 2022-03-16T06:02:02.000Z | def is_prime(n: int) -> bool:
'''
This function returns True if n is a prime number otherwise return False.
'''
if n <= 1:
return False
d = 2
while d * d <= n and n % d != 0:
d += 1
return d * d > n
| 22.090909 | 77 | 0.497942 | 40 | 243 | 3 | 0.55 | 0.05 | 0.05 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026846 | 0.386831 | 243 | 10 | 78 | 24.3 | 0.778523 | 0.300412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1710ee96f11f5467c52c0c184c4411c5e7e24339 | 1,628 | py | Python | HSM/load_data.py | 18F/10x-MLaaS | 3e1df3bbd88037c20e916fab2c07117a63e3c639 | [
"CC0-1.0"
] | 13 | 2019-03-15T20:30:35.000Z | 2022-02-19T08:05:10.000Z | HSM/load_data.py | 18F/10x-MLaaS | 3e1df3bbd88037c20e916fab2c07117a63e3c639 | [
"CC0-1.0"
] | 106 | 2018-11-28T21:17:55.000Z | 2022-03-25T09:18:27.000Z | HSM/load_data.py | 18F/10x-MLaaS | 3e1df3bbd88037c20e916fab2c07117a63e3c639 | [
"CC0-1.0"
] | 8 | 2019-01-05T16:31:02.000Z | 2022-03-20T15:35:06.000Z | import json
from argparse import ArgumentParser
import pandas as pd
from utils import db, db_utils
from utils.db import Data, SupportData
filter_feature = 'Comments Concatenated'
validation = 'Validation'
def main(file):
db_utils.create_postgres_db()
db.dal.connect()
session = db.dal.Session()
df = pd.read_excel(file)
data_columns = [filter_feature, validation]
data = df[data_columns]
support_data = json.loads(df[df.columns.difference(data_columns)].to_json(orient='records'))
for i in range(len(data)):
data_row = data.iloc[i]
support_data_row = support_data[i]
data_obj = Data(filter_feature=str(data_row[filter_feature]), validation=int(data_row[validation]))
session.add(data_obj)
session.flush()
support_data_obj = SupportData(support_data=support_data_row)
data_obj.support_data = support_data_obj
support_data_obj.data = data_obj
support_data_obj.data_id = support_data_obj.data.id
session.add(support_data_obj)
session.commit()
print(f'Loaded {len(data)} records of data and support_data.')
if __name__ == '__main__':
program_desc = '''This application will get the spreadsheet and pull out essential data to fill out
the database. It will populate the database in the `data` table. It also put all
other data in the database as well in support_data table.'''
parser = ArgumentParser(description=program_desc)
parser.add_argument("file", help="specify path to file")
args = parser.parse_args()
main(file=args.file)
| 30.716981 | 107 | 0.699017 | 226 | 1,628 | 4.792035 | 0.376106 | 0.142198 | 0.077562 | 0.049862 | 0.066482 | 0.046168 | 0 | 0 | 0 | 0 | 0 | 0 | 0.210074 | 1,628 | 52 | 108 | 31.307692 | 0.842146 | 0 | 0 | 0 | 0 | 0 | 0.237715 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.138889 | 0 | 0.166667 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17111432226dd73d653390de29656f4d3d8a9a11 | 2,882 | py | Python | tools/optimization/driver/ask_tell_parallel_driver.py | MRossol/HOPP | c8bcf610fdd2cbb27a807ddaf444684ef1aab7e8 | [
"BSD-3-Clause"
] | 3 | 2021-03-10T20:03:42.000Z | 2022-03-18T17:10:04.000Z | tools/optimization/driver/ask_tell_parallel_driver.py | MRossol/HOPP | c8bcf610fdd2cbb27a807ddaf444684ef1aab7e8 | [
"BSD-3-Clause"
] | 14 | 2020-12-28T22:32:07.000Z | 2022-03-17T15:33:04.000Z | tools/optimization/driver/ask_tell_parallel_driver.py | MRossol/HOPP | c8bcf610fdd2cbb27a807ddaf444684ef1aab7e8 | [
"BSD-3-Clause"
] | 8 | 2021-01-19T02:39:01.000Z | 2022-01-31T18:04:39.000Z | import multiprocessing
from typing import (
Callable,
Tuple,
)
from ..data_logging.data_recorder import DataRecorder
from ..driver.ask_tell_driver import AskTellDriver
from ..optimizer.ask_tell_optimizer import AskTellOptimizer
from .ask_tell_parallel_driver_fns import *
class AskTellParallelDriver(AskTellDriver):
def __init__(self,
nprocs: int = multiprocessing.cpu_count()):
self._num_evaluations: int = 0
self._num_iterations: int = 0
self._nprocs = nprocs
self._pool = None
# self.evaluations = []
def __getstate__(self):
"""
This prevents the pool from being pickled when using the pool...
"""
self_dict = self.__dict__.copy()
if 'pool' in self_dict:
del self_dict['pool']
return self_dict
def __setstate__(self, state):
"""
This prevents the pool from being pickled when using the pool...
"""
self.__dict__.update(state)
def __del__(self):
"""
This prevents the pool from being pickled when using the pool...
"""
if hasattr(self, 'pool') and self._pool is not None:
self._pool.close()
def setup(
self,
objective: Callable[[any], Tuple[float, float, any]],
recorder: DataRecorder,
) -> None:
"""
Must be called before calling step() or run().
Sets the objective function for this driver and the data recorder.
:param objective: objective function for evaluating candidate solutions
:param recorder: data recorder
:return:
"""
self._pool = multiprocessing.Pool(
initializer=make_initializer(objective),
processes=self._nprocs)
def step(self,
optimizer: AskTellOptimizer,
) -> bool:
"""
Steps the optimizer through one iteration of generating candidates, evaluating them, and updating with their
evaluations.
:param optimizer: the optimizer to use
:return: True if the optimizer reached a stopping point (via calling optimizer.stop())
"""
# print('step()')
num_candidates = optimizer.get_num_candidates()
candidates = optimizer.ask(num_candidates)
evaluations = self._pool.map(evaluate, candidates)
num_candidates = len(evaluations)
# print('telling')
# self.evaluations = list(evaluations)
optimizer.tell(evaluations)
self._num_evaluations += num_candidates
self._num_iterations += 1
# print('done')
return optimizer.stop()
def get_num_evaluations(self) -> int:
return self._num_evaluations
def get_num_iterations(self) -> int:
return self._num_iterations
| 32.022222 | 116 | 0.611381 | 304 | 2,882 | 5.569079 | 0.351974 | 0.024808 | 0.031896 | 0.033668 | 0.128175 | 0.104548 | 0.104548 | 0.104548 | 0.104548 | 0.104548 | 0 | 0.001496 | 0.304303 | 2,882 | 89 | 117 | 32.382022 | 0.842893 | 0.26891 | 0 | 0 | 0 | 0 | 0.006214 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.163265 | false | 0 | 0.122449 | 0.040816 | 0.387755 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1711d50df16cf6bbbc6b89a5b298a52cab9c6c7f | 4,255 | py | Python | main.py | BenAsaf/moodle-attendance-bot | bd27263fbb57badbe0ec622b8f72f507795591a2 | [
"MIT"
] | null | null | null | main.py | BenAsaf/moodle-attendance-bot | bd27263fbb57badbe0ec622b8f72f507795591a2 | [
"MIT"
] | null | null | null | main.py | BenAsaf/moodle-attendance-bot | bd27263fbb57badbe0ec622b8f72f507795591a2 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.common import exceptions
import time
import sched
MOODLE_USER_NAME = ""
MOODLE_PASSWORD = ""
MOODLE_HOME_PAGE = "Moodle Address" # THe moodle main homepage
COURSE_TITLE = "Name of the course as it shows up on the left" # What course to search for in the list
START_HOUR, START_MINUTE, START_SECS = 16, 0, 0 # Starts at 16:00:00
SLEEP_INTERVAL = 60 # For what duration to sleep in-between attempts (in seconds)
WEBDRIVER_EXECUTABLE_PATH = "./chromedriver" # Path to the Chrome WebDriver
CHROME_EXECUTABLE_PATH = "/usr/bin/google-chrome" # Path to the Chrome browser
IS_HEADLESS = True # Whether or not to display the Chrome GUI.
def init_browser():
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument("--test-type")
if IS_HEADLESS:
options.add_argument("--headless")
options.binary_location = CHROME_EXECUTABLE_PATH
browser = webdriver.Chrome(executable_path=WEBDRIVER_EXECUTABLE_PATH, chrome_options=options)
browser.get(MOODLE_HOME_PAGE)
return browser
def login_to_moodle(browser):
# <input type="text" name="username" id="login_username" class="form-control" value="" autocomplete="username">
user_input = browser.find_element_by_id("login_username")
user_input.send_keys(MOODLE_USER_NAME)
# <input type="password" name="password" id="login_password" class="form-control" value="" autocomplete="current-password">
password_input = browser.find_element_by_id("login_password")
password_input.send_keys(MOODLE_PASSWORD)
# <input type="submit" class="btn btn-primary btn-block" value="Log in">
all_btns = browser.find_elements_by_class_name("btn-primary")
for x in all_btns:
if x.get_attribute("type") == "submit" and x.get_attribute("value") == "Log in":
x.click()
return
raise Exception("Could not find the submit button for some reason.")
def go_to_course_page(browser):
all_links = browser.find_elements_by_tag_name("a")
for x in all_links:
if x.get_attribute("title") == COURSE_TITLE:
x.click()
return
def go_to_attendance(browser):
all_links = browser.find_elements_by_tag_name("a")
for x in all_links:
if x.text == "Attendance":
x.click()
return
def handle_attendance(browser):
all_links = browser.find_elements_by_tag_name("a")
found_submit_attendance = False
for x in all_links:
if x.text == "Submit attendance":
x.click()
found_submit_attendance = True
break
if not found_submit_attendance:
return False
all_spans = browser.find_elements_by_tag_name("span")
for x in all_spans:
if x.text == "Present":
x.click()
break
submit_btn = browser.find_element_by_id("id_submitbutton")
submit_btn.click()
return True
def create_time_today(hour, min, sec):
now = time.localtime()
when = time.mktime((now.tm_year, now.tm_mon, now.tm_mday, hour, min, sec, 1, 85, 0))
return when
def log(msg):
print(time.asctime()+": %s" % msg)
def wait_until_lesson_starts_and_launch_job(job):
log("Waiting for the time %02d:%02d:%02d" % (START_HOUR, START_MINUTE, START_SECS))
s = sched.scheduler(time.time, time.sleep)
s.enterabs(create_time_today(START_HOUR, START_MINUTE, START_SECS), 1, job)
s.run()
def main():
log("The time has come! starting attempts.")
is_successful = False
browser = init_browser()
while not is_successful:
try:
login_to_moodle(browser)
go_to_course_page(browser)
go_to_attendance(browser)
is_successful = handle_attendance(browser)
except exceptions.WebDriverException as e:
log("Error WebDriverException...")
browser.close()
if is_successful:
log("Done!") # Will stop iterating afterwards (while condition)
else:
log("Failed. Sleeping now. will try again in %d seconds" % SLEEP_INTERVAL)
time.sleep(SLEEP_INTERVAL)
if __name__ == '__main__':
# main()
wait_until_lesson_starts_and_launch_job(job=main)
| 32.984496 | 127 | 0.683901 | 578 | 4,255 | 4.775087 | 0.307958 | 0.031884 | 0.03442 | 0.038043 | 0.211957 | 0.167391 | 0.125725 | 0.102536 | 0.067391 | 0.067391 | 0 | 0.006876 | 0.213866 | 4,255 | 128 | 128 | 33.242188 | 0.818236 | 0.140541 | 0 | 0.168421 | 0 | 0 | 0.131449 | 0.01921 | 0 | 0 | 0 | 0 | 0 | 1 | 0.094737 | false | 0.031579 | 0.042105 | 0 | 0.210526 | 0.010526 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17134739b8596a864d6e6e37035825d72ffbe045 | 3,119 | py | Python | securetea/lib/antivirus/scanner/yara_scanner.py | neerajv18/SecureTea-Project | e999cbe7c8e497c69b76b4c886de0d063169ea03 | [
"MIT"
] | 257 | 2018-03-28T12:43:20.000Z | 2022-03-29T07:07:23.000Z | securetea/lib/antivirus/scanner/yara_scanner.py | neerajv18/SecureTea-Project | e999cbe7c8e497c69b76b4c886de0d063169ea03 | [
"MIT"
] | 155 | 2018-03-31T14:57:46.000Z | 2022-03-17T18:12:41.000Z | securetea/lib/antivirus/scanner/yara_scanner.py | neerajv18/SecureTea-Project | e999cbe7c8e497c69b76b4c886de0d063169ea03 | [
"MIT"
] | 132 | 2018-03-27T06:25:20.000Z | 2022-03-28T11:32:45.000Z | # -*- coding: utf-8 -*-
u"""Yara Scanner module for SecureTea AntiVirus.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: Abhishek Sharma <abhishek_official@hotmail.com> , Jul 4 2019
Version: 1.4
Module: SecureTea
"""
from securetea.lib.antivirus.scanner.scanner_parent import Scanner
import sys
import os
yara_status = True
try:
import yara
except ImportError:
yara_status = False
print("[-] Yara not installed")
except AttributeError:
yara_status = False
print("[-] Yara not configured: libyara.so not found")
except Exception as e:
yara_status = False
print(e)
class YaraScanner(Scanner):
"""YaraScanner class."""
def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None):
"""
Initialize YaraEngine.
Args:
debug (bool): Log on terminal or not
config_path (str): Configuration JSON file path
vt_api_key (str): VirusTotal API Key
file_list (list): List of files to scan
Raises:
None
Returns:
None
"""
# Initialize parent class
super().__init__(debug, config_path, file_list, vt_api_key)
if self.os_name:
try:
# Load threads
self._WORKERS = self.config_dict[self.os_name]["scanner"]["yara"]["threads"]
# Load Yara rules storage path
self._YARA_STORAGE = self.config_dict[self.os_name]["update"]["yara"]["storage"]
except KeyError:
self.logger.log(
"Could not load configuration for: {}".format(self.os_name),
logtype="error"
)
sys.exit(0)
else:
self.logger.log(
"Could not determine the OS",
logtype="error"
)
sys.exit(0)
def scan_file(self, file_path):
"""
Scan file using Yara rules.
Args:
file_path (str): Path of the file to scan
Raises:
None
Returns:
None
"""
if yara_status:
yara_files_list = os.listdir(self._YARA_STORAGE)
for yara_file in yara_files_list:
if yara_file.endswith(".yar") or yara_file.endswith(".yara"):
yara_file_path = os.path.join(self._YARA_STORAGE, yara_file)
rule_compile = yara.compile(yara_file_path)
matches = rule_compile.match(file_path)
if matches:
self.logger.log(
"Possible malicious file detected: {0}".format(file_path),
logtype="warning"
)
if file_path not in self.malicious_file_list:
self.malicious_file_list.append(file_path)
super().check_virus_total(file_path)
return
return
| 30.281553 | 96 | 0.520359 | 342 | 3,119 | 4.748538 | 0.377193 | 0.049261 | 0.024631 | 0.036946 | 0.146552 | 0.096059 | 0 | 0 | 0 | 0 | 0 | 0.005624 | 0.372876 | 3,119 | 102 | 97 | 30.578431 | 0.789366 | 0.235011 | 0 | 0.264151 | 0 | 0 | 0.102995 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037736 | false | 0 | 0.09434 | 0 | 0.169811 | 0.056604 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
171703df70423764cdb150d7746703a8105fdfbc | 5,823 | py | Python | stable_baselines3/common/buffer_multi_level.py | atishdixit16/stable-baselines3 | 0188d6a7b0c905693f41a68484d71b02faee6146 | [
"MIT"
] | null | null | null | stable_baselines3/common/buffer_multi_level.py | atishdixit16/stable-baselines3 | 0188d6a7b0c905693f41a68484d71b02faee6146 | [
"MIT"
] | null | null | null | stable_baselines3/common/buffer_multi_level.py | atishdixit16/stable-baselines3 | 0188d6a7b0c905693f41a68484d71b02faee6146 | [
"MIT"
] | null | null | null | from os import times
from typing import Generator, Optional, Union, NamedTuple
import numpy as np
import torch as th
from gym import spaces
from stable_baselines3.common.type_aliases import RolloutBufferSamples
from stable_baselines3.common.buffers import RolloutBuffer
from stable_baselines3.common.vec_env import VecNormalize
class AnalysisRolloutBufferSamples(NamedTuple):
observations: th.Tensor
actions: th.Tensor
old_values: th.Tensor
old_log_prob: th.Tensor
advantages: th.Tensor
returns: th.Tensor
times: th.Tensor
class RolloutBufferMultiLevel(RolloutBuffer):
"""
Rollout buffer used in on-policy algorithm PPO_SL.
It corresponds to ``buffer_size`` transitions collected
using the current policy.
This experience will be discarded after the policy update.
In order to use PPO objective, we also store the current value of each state
and the log probability of each taken action.
The term rollout here refers to the model-free notion and should not
be used with the concept of rollout used in model-based RL or planning.
Hence, it is only involved in policy and value function training but not action selection.
:param buffer_size: Max number of element in the buffer
:param observation_space: Observation space
:param action_space: Action space
:param device:
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
Equivalent to classic advantage when set to 1.
:param gamma: Discount factor
:param n_envs: Number of parallel environments
"""
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "cpu",
gae_lambda: float = 1,
gamma: float = 0.99,
n_envs: int = 1,
):
super(RolloutBufferMultiLevel, self).__init__(buffer_size, observation_space, action_space, device, gae_lambda, gamma, n_envs=n_envs)
self.times = None
self.reset()
def reset(self) -> None:
self.times = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
super(RolloutBufferMultiLevel, self).reset()
def record_times(self, comp_times: np.ndarray) -> None:
'warning: usage only valid if this function is excuted right before `add` function'
self.times[self.pos] = comp_times
def swap_and_flatten_for_analysis(self, arr: np.ndarray) -> np.ndarray:
"""
Swap and then flatten axes 0 (buffer_size) and 1 (n_envs)
to convert shape from [n_steps, n_envs, ...] (when ... is the shape of the features)
to [n_steps * n_envs, ...] (which maintain the order)
:param arr:
:return:
"""
shape = arr.shape
if len(shape) < 3:
shape = shape + (1,)
return arr.swapaxes(0, 1).reshape(shape[0] * shape[1], *shape[2:], order='F')
def get_analysis_batch(self, batch_size: Optional[int] = None) -> Generator[RolloutBufferSamples, None, None]:
# Return everything, don't create minibatches
if batch_size is None:
batch_size = self.buffer_size * self.n_envs
indices = np.random.permutation(self.buffer_size * self.n_envs)
# Prepare the data
if not self.generator_ready:
_tensor_names = [
"observations",
"actions",
"values",
"log_probs",
"advantages",
"returns",
"times",
]
for tensor in _tensor_names:
self.__dict__[tensor] = self.swap_and_flatten_for_analysis(self.__dict__[tensor])
self.generator_ready = True
start_idx = 0
while start_idx < batch_size:
yield self._get_analysis_samples(indices[start_idx : start_idx + batch_size])
start_idx += batch_size
def _get_analysis_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> RolloutBufferSamples:
data = (
self.observations[batch_inds],
self.actions[batch_inds],
self.values[batch_inds].flatten(),
self.log_probs[batch_inds].flatten(),
self.advantages[batch_inds].flatten(),
self.returns[batch_inds].flatten(),
self.times[batch_inds].flatten(),
)
return AnalysisRolloutBufferSamples(*tuple(map(self.to_torch, data)))
def get_sync(self, sync_rollout_buffer, batch_size: Optional[int] = None) -> Generator[RolloutBufferSamples, None, None]:
assert self.full, ""
indices = np.random.permutation(self.buffer_size * self.n_envs)
# Prepare the data
if not self.generator_ready:
_tensor_names = [
"observations",
"actions",
"values",
"log_probs",
"advantages",
"returns",
]
for tensor in _tensor_names:
self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])
sync_rollout_buffer.__dict__[tensor] = sync_rollout_buffer.swap_and_flatten(sync_rollout_buffer.__dict__[tensor])
self.generator_ready = True
sync_rollout_buffer.generator_ready = True
# Return everything, don't create minibatches
if batch_size is None:
batch_size = self.buffer_size * self.n_envs
start_idx = 0
while start_idx < self.buffer_size * self.n_envs:
yield self._get_samples(indices[start_idx : start_idx + batch_size])
yield sync_rollout_buffer._get_samples(indices[start_idx : start_idx + batch_size])
start_idx += batch_size
| 37.567742 | 141 | 0.644341 | 714 | 5,823 | 5.02381 | 0.285714 | 0.018121 | 0.027321 | 0.030109 | 0.318093 | 0.293003 | 0.237803 | 0.237803 | 0.237803 | 0.191246 | 0 | 0.005187 | 0.271681 | 5,823 | 154 | 142 | 37.811688 | 0.840604 | 0.230981 | 0 | 0.309278 | 0 | 0 | 0.043419 | 0 | 0 | 0 | 0 | 0 | 0.010309 | 1 | 0.072165 | false | 0 | 0.082474 | 0 | 0.268041 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
171b77518c7cb37c9e4c98fa3f24461a2dd6e589 | 3,032 | py | Python | Scripts/plot_ArcticSystemsWorkshop_Fig2.py | zmlabe/ThicknessSensitivity | 6defdd897a61d7d1a02f34a9f4ec92b2b17b3075 | [
"MIT"
] | 1 | 2017-10-22T02:22:14.000Z | 2017-10-22T02:22:14.000Z | Scripts/plot_ArcticSystemsWorkshop_Fig2.py | zmlabe/ThicknessSensitivity | 6defdd897a61d7d1a02f34a9f4ec92b2b17b3075 | [
"MIT"
] | null | null | null | Scripts/plot_ArcticSystemsWorkshop_Fig2.py | zmlabe/ThicknessSensitivity | 6defdd897a61d7d1a02f34a9f4ec92b2b17b3075 | [
"MIT"
] | 4 | 2018-04-05T17:55:36.000Z | 2022-03-31T07:05:01.000Z | """
Plot for NCAR Arctic Systems workshop poster. Graph is DJF sea ice volume
from PIOMAS over the satellite era.
Notes
-----
Author : Zachary Labe
Date : 4 April 2018
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import nclcmaps as ncm
import datetime
import read_MonthlyOutput as MO
import calc_Utilities as UT
import cmocean
import itertools
### Directory and time
directorydata = '/home/zlabe/Documents/Projects/Tests/SIV_animate/Data/'
directoryfigure = '/home/zlabe/Desktop/'
now = datetime.datetime.now()
currentmn = str(now.month-1)
currentdy = str(now.day)
currentyr = str(now.year)
years = np.arange(1979,2018,1)
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Plotting Poster Figure 2 - %s----' % titletime)
### Read data
years,j,f,d = np.genfromtxt(directorydata + 'monthly_piomas.txt',
unpack=True,delimiter='',usecols=[0,1,2,12])
siv = (j[1:] + f[1:] + d[:-1])/3
### Plot Figure
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
fig = plt.figure()
ax = plt.subplot()
adjust_spines(ax, ['left', 'bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('dimgrey')
ax.spines['bottom'].set_color('dimgrey')
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
ax.tick_params('both',length=4,width=2,which='major',color='dimgrey',pad=1)
ax.yaxis.grid(zorder=1,color='darkgrey',alpha=1,linewidth=0.4)
plt.plot(years[1:],siv,color=cmocean.cm.balance(0.78),linewidth=3.5,marker='o',markersize=7,
label=r'\textbf{PIOMAS v2.1 [Zhang and Rothrock, 2003]}')
plt.xticks(np.arange(1980,2021,10),list(map(str,np.arange(1980,2021,10))),
fontsize=13,color='dimgrey')
plt.yticks(np.arange(14,29,2),list(map(str,np.arange(14,29,2))),fontsize=13,
color='dimgrey')
plt.ylabel(r'\textbf{VOLUME [$\times$1000 km$^{3}$]}',
color='k',fontsize=16)
plt.title(r'\textbf{DEC-FEB : ARCTIC SEA ICE}',color='K',fontsize=27)
le = plt.legend(shadow=False,fontsize=8,loc='upper center',
bbox_to_anchor=(0.27, 0.07),fancybox=True,frameon=False,ncol=1)
for text in le.get_texts():
text.set_color('dimgrey')
plt.xlim([1980,2020])
plt.ylim([14,28])
plt.savefig(directoryfigure + 'PosterFig2.png',dpi=1000) | 30.626263 | 92 | 0.663918 | 443 | 3,032 | 4.480813 | 0.44921 | 0.032242 | 0.018136 | 0.022166 | 0.189421 | 0.079597 | 0.079597 | 0.079597 | 0 | 0 | 0 | 0.045904 | 0.166557 | 3,032 | 99 | 93 | 30.626263 | 0.739612 | 0.083773 | 0 | 0.132353 | 0 | 0 | 0.151977 | 0.019587 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014706 | false | 0 | 0.147059 | 0 | 0.161765 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
171f19a1c9e621f8b1151c6b7c0684a1d90d12d0 | 552 | py | Python | src/ch6/heap_sort.py | tchitchikov/algorithms_practice | 6a1ab226b4eb664a8a46853c94148a1ad0e0a558 | [
"MIT"
] | null | null | null | src/ch6/heap_sort.py | tchitchikov/algorithms_practice | 6a1ab226b4eb664a8a46853c94148a1ad0e0a558 | [
"MIT"
] | null | null | null | src/ch6/heap_sort.py | tchitchikov/algorithms_practice | 6a1ab226b4eb664a8a46853c94148a1ad0e0a558 | [
"MIT"
] | null | null | null | import random
from heaps import max_heaps, min_heaps
def heap_sort(array):
array = max_heaps.build_max_heap(array)
i = len(array) - 1
output = []
while i >= 0:
output.insert(0, array[0])
array = array[1:]
array = max_heaps.max_heap(array, 0)
i = i - 1
return output
if __name__ == '__main__':
# array = [5, 1, 3, 4, 2]
array = [random.randrange(0,10) for x in range(15)]
print(max_heaps.build_max_heap(array))
print(heap_sort(array))
print(min_heaps.build_min_heap(array))
| 21.230769 | 55 | 0.615942 | 85 | 552 | 3.729412 | 0.4 | 0.100946 | 0.113565 | 0.100946 | 0.157729 | 0.157729 | 0 | 0 | 0 | 0 | 0 | 0.041363 | 0.255435 | 552 | 25 | 56 | 22.08 | 0.729927 | 0.041667 | 0 | 0 | 0 | 0 | 0.01518 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.235294 | 0.176471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17211f08965cfcc8becd31fe9182b1682d452336 | 2,110 | py | Python | setup.py | nickhand/jupyter-panel-proxy | 55f405b7292df281dfd0306f1154fb31992eef19 | [
"BSD-3-Clause"
] | 3 | 2020-04-17T19:54:48.000Z | 2021-03-07T17:08:06.000Z | setup.py | nickhand/jupyter-panel-proxy | 55f405b7292df281dfd0306f1154fb31992eef19 | [
"BSD-3-Clause"
] | 12 | 2020-03-04T13:45:26.000Z | 2022-01-14T04:01:53.000Z | setup.py | nickhand/jupyter-panel-proxy | 55f405b7292df281dfd0306f1154fb31992eef19 | [
"BSD-3-Clause"
] | 3 | 2021-03-08T13:26:50.000Z | 2021-12-20T01:02:00.000Z | import param
from setuptools import find_packages, setup
extras_require = {
'build': ['param >=1.7.0', 'setuptools'],
'tests': [
'flake8',
'twine',
'rfc3986',
'keyring'
],
}
setup_args = dict(
name="jupyter-panel-proxy",
description='Jupyter Server Proxy for Panel applications',
version=param.version.get_setup_version(
__file__,
"panel_server",
archive_commit="$Format:%h$",
),
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
author="Julia Signell",
author_email= "developers@holoviz.org",
maintainer= "HoloViz developers",
maintainer_email= "developers@pyviz.org",
url="https://github.com/holoviz/jupyter-panel-proxy",
project_urls = {
"Bug Tracker": "http://github.com/holoviz/jupyter-panel-proxy/issues",
"Documentation": "https://github.com/holoviz/jupyter-panel-proxy/blob/master/README.md",
"Source Code": "https://github.com/holoviz/jupyter-panel-proxy",
},
platforms=['Windows', 'Mac OS X', 'Linux'],
license='BSD',
classifiers = [
"License :: OSI Approved :: BSD License",
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Natural Language :: English",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries"
],
python_requires=">=3.6",
install_requires=['jupyter-server-proxy', 'panel >=0.11'],
extras_require=extras_require,
packages=find_packages(),
entry_points={
'jupyter_serverproxy_servers': [
'panel = panel_server:setup_panel_server',
]
},
)
if __name__ == '__main__':
setup(**setup_args)
| 31.969697 | 96 | 0.619905 | 216 | 2,110 | 5.87963 | 0.527778 | 0.047244 | 0.066929 | 0.072441 | 0.115748 | 0.115748 | 0.089764 | 0 | 0 | 0 | 0 | 0.013522 | 0.22891 | 2,110 | 65 | 97 | 32.461538 | 0.767056 | 0 | 0 | 0.033333 | 0 | 0.016667 | 0.511848 | 0.048341 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.033333 | 0 | 0.033333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
172229cdfd779697e208462fd6b7eaf4fd180fa2 | 4,579 | py | Python | cli/api.py | chacreton190/covid-data-model | 10e86dee0aa17e9a4261787203d30c4631b5afb1 | [
"MIT"
] | null | null | null | cli/api.py | chacreton190/covid-data-model | 10e86dee0aa17e9a4261787203d30c4631b5afb1 | [
"MIT"
] | null | null | null | cli/api.py | chacreton190/covid-data-model | 10e86dee0aa17e9a4261787203d30c4631b5afb1 | [
"MIT"
] | null | null | null | import api
import logging
import pathlib
import click
import itertools
import us
from api.can_api_definition import CovidActNowAreaTimeseries
from api.can_api_definition import CovidActNowBulkTimeseries
from libs.pipelines import api_pipeline
from libs.datasets.dataset_utils import AggregationLevel
from libs.datasets import combined_datasets
from libs.enums import Intervention
from libs.datasets.dataset_utils import AggregationLevel
PROD_BUCKET = "data.covidactnow.org"
_logger = logging.getLogger(__name__)
@click.group("api")
def main():
pass
@main.command()
@click.option(
"--output-dir",
"-o",
type=pathlib.Path,
help="Output directory to save schemas in.",
default="api/schemas",
)
def update_schemas(output_dir):
"""Updates all public facing API schemas."""
schemas = api.find_public_model_classes()
for schema in schemas:
path = output_dir / f"{schema.__name__}.json"
_logger.info(f"Updating schema {schema} to {path}")
path.write_text(schema.schema_json(indent=2))
@main.command()
@click.option(
"--input-dir",
"-i",
default="results",
help="Input directory of state projections",
type=pathlib.Path,
)
@click.option(
"--output",
"-o",
default="results/output/states",
help="Output directory for artifacts",
type=pathlib.Path,
)
@click.option(
"--summary-output",
default="results/output",
help="Output directory for state summaries.",
type=pathlib.Path,
)
@click.option("--aggregation-level", "-l", type=AggregationLevel)
@click.option("--state")
@click.option("--fips")
def generate_api(input_dir, output, summary_output, aggregation_level, state, fips):
"""The entry function for invocation"""
active_states = [state.abbr for state in us.STATES]
us_latest = combined_datasets.build_us_latest_with_all_fields().get_subset(
aggregation_level, state=state, fips=fips, states=active_states
)
us_timeseries = combined_datasets.build_us_timeseries_with_all_fields().get_subset(
aggregation_level, state=state, fips=fips, states=active_states
)
for intervention in list(Intervention):
_logger.info(f"Running intervention {intervention.name}")
all_timeseries = api_pipeline.run_on_all_fips_for_intervention(
us_latest, us_timeseries, intervention, input_dir
)
county_timeseries = [
output for output in all_timeseries if output.aggregate_level is AggregationLevel.COUNTY
]
api_pipeline.deploy_single_level(intervention, county_timeseries, summary_output, output)
state_timeseries = [
output for output in all_timeseries if output.aggregate_level is AggregationLevel.STATE
]
api_pipeline.deploy_single_level(intervention, state_timeseries, summary_output, output)
@main.command("generate-top-counties")
@click.option(
"--disable-validation", "-dv", is_flag=True, help="Run the validation on the deploy command",
)
@click.option(
"--input-dir", "-i", default="results", help="Input directory of state/county projections",
)
@click.option(
"--output",
"-o",
default="results/top_counties",
help="Output directory for artifacts",
type=pathlib.Path,
)
@click.option("--state")
@click.option("--fips")
def generate_top_counties(disable_validation, input_dir, output, state, fips):
"""The entry function for invocation"""
intervention = Intervention.SELECTED_INTERVENTION
active_states = [state.abbr for state in us.STATES]
us_latest = combined_datasets.build_us_latest_with_all_fields().get_subset(
AggregationLevel.COUNTY, states=active_states, state=state, fips=fips
)
us_timeseries = combined_datasets.build_us_timeseries_with_all_fields().get_subset(
AggregationLevel.COUNTY, states=active_states, state=state, fips=fips
)
def sort_func(output: CovidActNowAreaTimeseries):
return -output.projections.totalHospitalBeds.peakShortfall
all_timeseries = api_pipeline.run_on_all_fips_for_intervention(
us_latest,
us_timeseries,
Intervention.SELECTED_INTERVENTION,
input_dir,
sort_func=sort_func,
limit=100,
)
bulk_timeseries = CovidActNowBulkTimeseries(__root__=all_timeseries)
api_pipeline.deploy_json_api_output(
intervention, bulk_timeseries, output, filename_override="counties_top_100.json"
)
# top_counties_pipeline.deploy_results(county_results_api, "counties_top_100", output)
# _logger.info("finished top counties job")
| 33.181159 | 100 | 0.72767 | 551 | 4,579 | 5.791289 | 0.22686 | 0.041366 | 0.023504 | 0.025071 | 0.505798 | 0.492949 | 0.429646 | 0.374491 | 0.351614 | 0.351614 | 0 | 0.002623 | 0.167504 | 4,579 | 137 | 101 | 33.423358 | 0.83447 | 0.051103 | 0 | 0.336207 | 0 | 0 | 0.147643 | 0.01964 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043103 | false | 0.008621 | 0.112069 | 0.008621 | 0.163793 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17227cf30b721c2e199e41d4753d20961284c5a7 | 2,335 | py | Python | WilliamWallace/choose_db_dialog.py | gilliM/wallace | 59202aefb6375f23fa6be72c13969bfe36614433 | [
"Apache-2.0"
] | null | null | null | WilliamWallace/choose_db_dialog.py | gilliM/wallace | 59202aefb6375f23fa6be72c13969bfe36614433 | [
"Apache-2.0"
] | null | null | null | WilliamWallace/choose_db_dialog.py | gilliM/wallace | 59202aefb6375f23fa6be72c13969bfe36614433 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
/***************************************************************************
WilliamWallaceDialog
A QGIS plugin
This plugin do a supervised classification
-------------------
begin : 2016-05-17
git sha : $Format:%H$
copyright : (C) 2016 by Gillian
email : gillian.milani@geo.uzh.ch
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from PyQt4 import QtGui, uic, QtCore, QtSql
s = QtCore.QSettings()
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'choose_db_dialog_base.ui'))
class ChooseDbDialog(QtGui.QDialog, FORM_CLASS):
def __init__(self, parent = None):
"""Constructor."""
super(ChooseDbDialog, self).__init__(parent)
self.setupUi(self)
listOfConnections = self.getPostgisConnections()
self.fillComboBox(listOfConnections)
currentConnection = s.value('WallacePlugins/connectionName')
if currentConnection is not None:
index = self.comboBox.findData(currentConnection)
self.comboBox.setCurrentIndex(index)
def fillComboBox(self, list):
self.comboBox.addItem('', None)
for name in list:
self.comboBox.addItem(name, name)
def getPostgisConnections(self):
keyList = []
for key in s.allKeys():
if key.startswith('PostgreSQL/connections'):
if key.endswith('database'):
connectionName = key.split('/')[2]
keyList.append(connectionName)
return keyList
| 40.258621 | 77 | 0.468951 | 191 | 2,335 | 5.638743 | 0.623037 | 0.044568 | 0.029712 | 0.042711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010038 | 0.317345 | 2,335 | 57 | 78 | 40.964912 | 0.665621 | 0.498073 | 0 | 0 | 0 | 0 | 0.072476 | 0.064711 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.074074 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
172496d4e1a8fa80c8e14527010611354beb9faf | 20,039 | py | Python | PyYDLidar/PyX.py | SweiLz/PyYDLidar | ce2d916904af6cad3f64c6a48f7e6534b952d9a9 | [
"MIT"
] | 6 | 2020-09-02T15:32:22.000Z | 2021-12-12T08:27:10.000Z | PyYDLidar/PyX.py | SweiLz/PyYDLidar | ce2d916904af6cad3f64c6a48f7e6534b952d9a9 | [
"MIT"
] | 1 | 2020-03-21T15:24:30.000Z | 2020-03-22T16:50:33.000Z | PyYDLidar/PyX.py | SweiLz/PyYDLidar | ce2d916904af6cad3f64c6a48f7e6534b952d9a9 | [
"MIT"
] | null | null | null | import threading
import time
from math import atan, pi
import numpy as np
from serial import Serial
class Lidar:
RESULT_OK = 0
RESULT_TIMEOUT = -1
RESULT_FAIL = -2
DEFAULT_TIMEOUT = 2000
cmd_stop = 0x65
cmd_scan = 0x60
cmd_force_scan = 0x61
cmd_reset = 0x80
cmd_force_stop = 0x00
cmd_get_eai = 0x55
cmd_get_device_info = 0x90
cmd_get_device_health = 0x92
ans_type_devinfo = 0x4
and_type_devhealth = 0x6
cmd_sync_byte = 0xA5
cmdflag_has_payload = 0x80
ans_sync_byte1 = 0xA5
ans_sync_byte2 = 0x5A
ans_type_measurement = 0x81
resp_measurement_syncbit = (0x1 << 0)
resp_measurement_quality_shift = 2
resp_measurement_sync_quality_shift = 8
resp_measurement_checkbit = (0x1 << 0)
resp_measurement_angle_shift = 1
resp_measurement_angle_sample_shift = 8
resp_measurement_distance_shift = 2
resp_measurement_distance_half_shift = 1
class LaserScan:
class LaserConfig:
min_angle = -pi # Start angle for the laser scan [rad]
max_angle = pi # Stop angle for the laser scan [rad]
ang_increment = None # Scan resolution [rad]
time_increment = None # Scan resolution [s]
scan_time = None # Time between scans
min_range = 0.15 # Minimum range [m]
max_range = 10.0 # Maximum range [m]
range_res = None # Range resolution [m]
ranges = [] # Array of ranges
intensities = [] # Array of intensities
self_time_stamp = None # Self reported time stamp [ns]
system_time_stamp = None # System time when first range was measured [ns]
config = LaserConfig()
class YDLidarX4:
def __init__(self, port):
self._port = port
self._baudrate = 128000
self._isConnected = False
self._isScanning = False
try:
self._serial = Serial(self._port, self._baudrate, timeout=2.0)
self._isConnected = True
self._serial.reset_input_buffer()
self._serial.write([0xA5, 0x00])
self._serial.write([0xA5, 0x65])
self._serial.setDTR(0)
self._serial.flush()
except Exception as e:
print("Cannot open port {}".format(self._port))
return
self._scan = LaserScan()
self.thread = threading.Thread(target=self.cacheScanData)
def cacheScanData(self):
index = 0
self._scan.ranges = np.zeros(20)
# print(self.scan.ranges.shape)
while self._isScanning:
for pos in range(self._scan.ranges.shape[0]):
self._scan.ranges[pos] = pos
pass
def getScanData(self):
nodes = self._scan
count = nodes.ranges.shape[0]
all_nodes_counts = count
each_angle = 360.0 / all_nodes_counts
angle_compensate_nodes = np.zeros((all_nodes_counts, 2), dtype=int)
for i in range(all_nodes_counts):
if nodes[i, 0] != 0:
angle = (nodes[i, 1] >>
Lidar.resp_measurement_angle_shift)/64.0
inter = int(angle/each_angle)
angle_pre = angle - inter * each_angle
angle_next = (inter+1)*each_angle - angle
if angle_pre < angle_next:
if inter < all_nodes_counts:
angle_compensate_nodes[inter] = nodes[i]
else:
if inter < all_nodes_counts - 1:
angle_compensate_nodes[inter+1] = nodes[i]
diff_angle = nodes.config.max_angle - nodes.config.min_angle
counts = int(all_nodes_counts * (diff_angle / (2*pi)))
angle_start = int(pi + nodes.config.min_angle)
node_start = int(all_nodes_counts * (angle_start / (2*pi)))
nodes.ranges = np.zeros(counts)
index = 0
for i in range(all_nodes_counts):
dist_range = angle_compensate_nodes[i, 0] / 4000
if i < all_nodes_counts // 2:
index = all_nodes_counts // 2 - 1 - i
else:
index = all_nodes_counts - 1 - (i-all_nodes_counts//2)
if dist_range > nodes.config.max_range or dist_range < nodes.config.min_range:
dist_range = 0.0
pos = index - node_start
if 0 <= pos and pos < counts:
scan.ranges[pos] = dist_range
if diff_angle == 2*pi:
nodes.config.ang_increment = diff_angle / counts
else:
nodes.config.ang_increment = diff_angle / (counts - 1)
# for i in range(0, self.scan.ranges.shape[0], 3):
# pass
# for i in range(20):
# angle = self.scan.config.min_angle + i * self.scan.config.ang_increment
# dist = self.scan.ranges[i]
# print("{}: {}".format(angle, dist))
# print("\n\n")
return nodes
def startScanning(self):
if not self._isConnected:
return
self._serial.setDTR(1)
self._serial.flush()
self._serial.write([0xA5, 0x60])
time.sleep(0.1)
header = list(self._serial.read(7)) # read lidar_ans_header
# print(header)
self._isScanning = True
self.thread.start()
def stopScanning(self):
if not self._isConnected:
return
self._serial.setDTR(0)
self._serial.flush()
self._isScanning = False
class YDLidarX42:
def __init__(self, port):
self._port = port
self._baudrate = 128000
self.scan = LaserScan()
self.scan.config.min_angle = -pi
self.scan.config.max_angle = pi
self.scan.config.min_range = 0.25
self.scan.config.max_range = 10.0
self._intensities = False
self._auto_reconnect = True
self._resolution_fixed = True
self._reversion = False
self._low_exposure = False
self._samp_rate = 4
self._frequency = 7
self._node_counts = 720
self._each_angle = 0.5
self._isConnect = False
self._isScanning = False
self.device_info = {
"Model": None,
"Firmware version": None,
"Hardware version": None,
"Serial number": None
}
self.device_health = {
"Status": None,
"Error code": None
}
self.thread = threading.Thread(target=self.cacheScanData)
self.laser = LaserScan()
self.count = 3600
self.scan_node_buf = np.zeros((self.count, 2), dtype=int)
self._package_sample_index = 0
def initialize(self):
try:
if not self._isConnect:
self._serial = Serial(self._port, self._baudrate, timeout=2.0)
self._isConnect = True
self._serial.reset_input_buffer()
self._serial.write([Lidar.cmd_sync_byte, Lidar.cmd_force_stop])
self._serial.write([Lidar.cmd_sync_byte, Lidar.cmd_stop])
self.clearDTR()
# self.setDTR()
else:
raise Exception("Already connected")
if self._isScanning:
return True
else:
if not self.getDeviceHealth():
return False
if not self.getDeviceInfo():
return False
except Exception as e:
print(e)
return False
def startScan(self):
self._serial.reset_input_buffer()
self._serial.write([Lidar.cmd_sync_byte, Lidar.cmd_force_stop])
self._serial.write([Lidar.cmd_sync_byte, Lidar.cmd_stop])
m_pointTime = 1e9 / 5000
self.setDTR()
self._serial.write([Lidar.cmd_sync_byte, Lidar.cmd_scan])
time.sleep(0.1)
header = list(self._serial.read(7)) # read lidar_ans_header
# data = list(self._serial.read(10)) # read data
print(header)
# print(data)
self._isScanning = True
self.thread.start()
def stopScan(self):
self._isScanning = False
self.thread.join()
def getDeviceHealth(self):
self._serial.reset_input_buffer()
self._serial.write([Lidar.cmd_sync_byte, Lidar.cmd_get_device_health])
time.sleep(0.1)
header = list(self._serial.read(7)) # read lidar_ans_header
data = list(self._serial.read(header[2])) # read data
if not any(data):
return True
return False
def getDeviceInfo(self):
self._serial.reset_input_buffer()
self._serial.write([Lidar.cmd_sync_byte, Lidar.cmd_get_device_info])
time.sleep(0.1)
header = list(self._serial.read(7)) # read lidar_ans_header
data = list(self._serial.read(header[2])) # read data
if data[0] == 6:
self.device_info["Model"] = "X4"
ver = int.from_bytes(data[1:3], byteorder='little', signed=False)
self.device_info["Firmware version"] = "{}.{}.{}".format(
ver >> 8, (ver & 0xff)//10, (ver & 0xff) % 10)
self.device_info["Hardware version"] = str(data[3])
self.device_info["Serial number"] = "".join(map(str, data[4:]))
print(self.device_info)
return True
@classmethod
def _AngleCorr(cls, dist):
if dist == 0:
return 0
else:
return int((atan(((21.8 * (155.3 - (dist / 4.0))) / 155.3) / (dist / 4.0)) * 180.0/pi)*64.0)
def waitScanData(self, nodebuffer, count):
if not self._isConnect:
count = 0
recvNodeCount = 0
while recvNodeCount < count:
node = self.waitPackage()
nodebuffer[recvNodeCount] = node
recvNodeCount += 1
if recvNodeCount == count:
break
return nodebuffer, count
def waitPackage(self):
node = np.array([0, Lidar.resp_measurement_checkbit], dtype=int)
packageSampleDistance = []
recvPos = 0
recvBuffer = []
packageBuffer = []
CheckSum = 0
CheckSumCal = 0
CheckSumResult = False
SampleNumlAndCTCal = 0
LastSampleAngleCal = 0
package_sample_num = 0
FirstSampleAngle = 0
LastSampleAngle = 0
IntervalSampleAngle = 0
package_type = 0
if self._package_sample_index == 0:
recvPos = 0
while self._isScanning:
currentByte = ord(self._serial.read())
if recvPos == 0:
if currentByte == 0xAA:
pass
else:
continue
elif recvPos == 1:
CheckSumCal = 0x55AA
if currentByte == 0x55:
pass
else:
recvPos = 0
continue
elif recvPos == 2:
SampleNumlAndCTCal = currentByte
package_type = currentByte & 0x01
if package_type == 0 or package_type == 1:
if package_type == 1:
scan_frequence = (currentByte & 0xFE) >> 1
else:
recvPos = 0
continue
elif recvPos == 3:
SampleNumlAndCTCal += (currentByte * 0x100)
package_sample_num = currentByte
elif recvPos == 4:
if currentByte & Lidar.resp_measurement_checkbit:
FirstSampleAngle = currentByte
else:
recvPos = 0
continue
elif recvPos == 5:
FirstSampleAngle += currentByte * 0x100
CheckSumCal ^= FirstSampleAngle
FirstSampleAngle = FirstSampleAngle >> 1
elif recvPos == 6:
if currentByte & Lidar.resp_measurement_checkbit:
LastSampleAngle = currentByte
else:
recvPos = 0
continue
elif recvPos == 7:
LastSampleAngle = currentByte * 0x100 + LastSampleAngle
LastSampleAngleCal = LastSampleAngle
LastSampleAngle = LastSampleAngle >> 1
if package_sample_num == 1:
IntervalSampleAngle = 0
else:
if LastSampleAngle < FirstSampleAngle:
if (FirstSampleAngle >= 180 * 64) and (LastSampleAngle <= 180*64):
IntervalSampleAngle = float(
(360 * 64 + LastSampleAngle - FirstSampleAngle) / (package_sample_num - 1))
else:
if FirstSampleAngle > 360:
IntervalSampleAngle = float(
LastSampleAngle-FirstSampleAngle)/(package_sample_num - 1)
else:
temp = FirstSampleAngle
FirstSampleAngle = LastSampleAngle
LastSampleAngle = temp
IntervalSampleAngle = float(
(LastSampleAngle - FirstSampleAngle)/(package_sample_num-1))
else:
IntervalSampleAngle = float(
(LastSampleAngle - FirstSampleAngle)/(package_sample_num-1))
IntervalSampleAngle_LastPackage = IntervalSampleAngle
elif recvPos == 8:
CheckSum = currentByte
elif recvPos == 9:
CheckSum += (currentByte*0x100)
packageBuffer.append(currentByte)
recvPos += 1
if recvPos == 10:
package_recvPos = recvPos
break
if recvPos == 10:
recvPos = 0
packageSampleDistance.clear()
inComingByte = self._serial.inWaiting()
recvBuffer = list(self._serial.read(inComingByte))
Valu8Tou16 = 0
for i in range(inComingByte):
if recvPos % 2 == 1:
Valu8Tou16 += recvBuffer[i] * 0x100
CheckSumCal ^= Valu8Tou16
packageSampleDistance.append(Valu8Tou16)
else:
Valu8Tou16 = recvBuffer[i]
packageBuffer.append(recvBuffer[i])
recvPos += 1
if package_sample_num * 2 == recvPos:
package_recvPos += recvPos
else:
recvBuffer.clear()
CheckSumCal ^= SampleNumlAndCTCal
CheckSumCal ^= LastSampleAngleCal
if CheckSumCal != CheckSum:
CheckSumResult = False
else:
CheckSumResult = True
sync_flag = 0
if package_type == 0:
sync_flag = 2
else:
sync_flag = 1
sync_quality = 10
if CheckSumResult and recvBuffer != []:
node[0] = packageSampleDistance[self._package_sample_index]
AngleCorrectForDistance = self._AngleCorr(node[0])
temp = FirstSampleAngle + IntervalSampleAngle * \
self._package_sample_index + AngleCorrectForDistance
if temp < 0:
node[1] = (int(temp + 360 * 64) << 1) + \
Lidar.resp_measurement_checkbit
else:
if temp > 360 * 64:
node[1] = (int(temp - 360*64) << 1) + \
Lidar.resp_measurement_checkbit
else:
node[1] = (int(temp) << 1)+Lidar.resp_measurement_checkbit
else:
sync_flag = 2
sync_quality = 10
self._package_sample_index += 1
if self._package_sample_index >= package_sample_num:
self._package_sample_index = 0
return node
def cacheScanData(self):
count = 128
local_buff = np.zeros((count, 2), dtype=int)
local_scan = np.zeros((3600, 2), dtype=int)
scan_count = 0
while self._isScanning:
local_buff, count = self.waitScanData(local_buff, count)
print(local_buff)
# package_sample_index = 0
# package_sample_num = 0
# recvPos = 0
# packageBuffer = []
# CheckSumCal = 0x55AA
# CheckSum = 0
# SampleNumlAndCTCal = 0
# LastSampleAngleCal = 0
# FirstSampleAngle = 0
# LastSampleAngle = 0
# IntervalSampleAngle = 0
# IntervalSampleAngle_LastPackage = 0
def doProcessSimple(self):
# node [ $distance_q2$, $angle_q6_checkbit$ ]
nodes = self.scan_node_buf
all_nodes_counts = self._node_counts
each_angle = 360.0 / all_nodes_counts
angle_compensate_nodes = np.zeros((all_nodes_counts, 2), dtype=int)
for i in range(self.count):
if nodes[i, 0] != 0:
angle = (nodes[i, 1] >>
Lidar.resp_measurement_angle_shift)/64.0
inter = int(angle/each_angle)
angle_pre = angle - inter * each_angle
angle_next = (inter+1)*each_angle - angle
if angle_pre < angle_next:
if inter < all_nodes_counts:
angle_compensate_nodes[inter] = nodes[i]
else:
if inter < all_nodes_counts - 1:
angle_compensate_nodes[inter+1] = nodes[i]
# print(nodes[i], angle, inter, angle_pre, angle_next)
# print("\n")
diff_angle = self.scan.config.max_angle - self.scan.config.min_angle
counts = int(all_nodes_counts * (diff_angle / (2*pi)))
angle_start = int(pi + self.scan.config.min_angle)
node_start = int(all_nodes_counts * (angle_start / (2*pi)))
self.scan.ranges = np.zeros(counts)
index = 0
for i in range(all_nodes_counts):
dist_range = angle_compensate_nodes[i, 0] / 4000
if i < all_nodes_counts // 2:
index = all_nodes_counts // 2 - 1 - i
else:
index = all_nodes_counts - 1 - (i-all_nodes_counts//2)
if dist_range > self.scan.config.max_range or dist_range < self.scan.config.min_range:
dist_range = 0.0
pos = index - node_start
if 0 <= pos and pos < counts:
self.scan.ranges[pos] = dist_range
if diff_angle == 2*pi:
self.scan.config.ang_increment = diff_angle / counts
else:
self.scan.config.ang_increment = diff_angle / (counts - 1)
# for i in range(0, self.scan.ranges.shape[0], 3):
# pass
# for i in range(20):
# angle = self.scan.config.min_angle + i * self.scan.config.ang_increment
# dist = self.scan.ranges[i]
# print("{}: {}".format(angle, dist))
# print("\n\n")
def setDTR(self):
if not self._isConnect:
return
else:
self._serial.setDTR(1)
self._serial.flush()
def clearDTR(self):
if not self._isConnect:
return
else:
self._serial.setDTR(0)
self._serial.flush()
# if __name__ == "__main__":
# lidar = YDLidarX4("/dev/ttyUSB0")
# lidar.initialize()
# lidar.startScan()
# # for i in range(5):
# # lidar.doProcessSimple()
# # print(lidar._serial.inWaiting())
# time.sleep(1)
# lidar.stopScan()
# while True:
# scan = LaserScan()
| 33.849662 | 111 | 0.532062 | 2,071 | 20,039 | 4.928054 | 0.134718 | 0.036253 | 0.034294 | 0.010778 | 0.471977 | 0.417402 | 0.389379 | 0.333235 | 0.32138 | 0.28885 | 0 | 0.035867 | 0.380857 | 20,039 | 591 | 112 | 33.906937 | 0.786733 | 0.08199 | 0 | 0.433036 | 0 | 0 | 0.009166 | 0 | 0 | 0 | 0.007802 | 0 | 0 | 1 | 0.040179 | false | 0.006696 | 0.011161 | 0 | 0.171875 | 0.011161 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1725e9716ced32c4e508d22f5f3d4b1ba272e429 | 44,983 | py | Python | Icarus/Photometry/Photometry_legacy.py | mhvk/Icarus | bd07ba440cc82d4374e90d6d95dc8844fd82ff19 | [
"BSD-3-Clause"
] | 10 | 2016-03-01T10:12:30.000Z | 2021-08-02T02:36:53.000Z | Icarus/Photometry/Photometry_legacy.py | mhvk/Icarus | bd07ba440cc82d4374e90d6d95dc8844fd82ff19 | [
"BSD-3-Clause"
] | 2 | 2016-03-30T07:13:09.000Z | 2016-04-15T08:54:09.000Z | Icarus/Photometry/Photometry_legacy.py | mhvk/Icarus | bd07ba440cc82d4374e90d6d95dc8844fd82ff19 | [
"BSD-3-Clause"
] | 13 | 2016-02-29T19:20:01.000Z | 2017-05-21T15:25:32.000Z | # Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function, division
__all__ = ["Photometry_legacy"]
from ..Utils.import_modules import *
from .. import Utils
from .. import Core
from .. import Atmosphere
######################## class Photometry ########################
class Photometry_legacy(object):
"""Photometry_legacy
This class allows to fit the flux from the primary star
of a binary system, assuming it is heated by the secondary
(which in most cases will be a pulsar).
It is meant to deal with photometry data. Many sets of photometry
data (i.e. different filters) are read. For each data set, one can
calculate the predicted flux of the model at every data point (i.e.
for a given orbital phase).
"""
def __init__(self, atmo_fln, data_fln, ndiv, porb, x2sini, edot=1., read=True):
"""__init__(atmo_fln, data_fln, ndiv, porb, x2sini, edot=1., read=True)
This class allows to fit the flux from the primary star
of a binary system, assuming it is heated by the secondary
(which in most cases will be a pulsar).
It is meant to deal with photometry data. Many sets of photometry
data (i.e. different filters) are read. For each data set, one can
calculate the predicted flux of the model at every data point (i.e.
for a given orbital phase).
atmo_fln (str): A file containing the grid model information for each
data set. The format of each line of the file is as follows:
Col 0: band name
Col 1: band filename
data_fln (str): A file containing the information for each data set.
Three formats are currently supported.
9-column (preferred):
Col 0: band name
Col 1: column id for orbital phase. Orbital phases must be 0-1.
Phase 0 is defined as the primary star (the one modelled),
located at inferior conjunction.
Col 2: column id for flux/magnitude
Col 3: column id for flux/magnitude error
Col 4: shift to phase zero. Sometimes people use other
definition for orbital phases, so this allows to correct for
it.
Col 5: band calibration error, in magnitude
Col 6: softening parameter for asinh magnitude conversion. If
the value is 0., then standard magnitudes are used.
Col 7: flux or mag flag. Currently, all the data must be in the
same format.
'mag' means magnitude system
'flux' means flux system
Col 8: filename
8-column (support for asinh magnitudes, no fluxes input):
Col 0: band name
Col 1: column id for orbital phase. Orbital phases must be 0-1.
Phase 0 is defined as the primary star (the one modelled),
located at inferior conjunction.
Col 2: column id for magnitude
Col 3: column id for magnitude error
Col 4: shift to phase zero. Sometimes people use other
definition for orbital phases, so this allows to correct for
it.
Col 5: band calibration error, in magnitude
Col 6: softening parameter for asinh magnitude conversion. If
the value is 0., then standard magnitudes are used.
Col 7: filename
7-column (only support standard magnitude input):
Col 0: band name
Col 1: column id for orbital phase. Orbital phases must be 0-1.
Phase 0 is defined as the primary star (the one modelled),
located at inferior conjunction.
Col 2: column id for magnitude
Col 3: column id for magnitude error
Col 4: shift to phase zero. Sometimes people use other
definition for orbital phases, so this allows to correct for
it.
Col 5: band calibration error, in magnitude
Col 6: filename
ndiv (int): The number of surface slice. Defines how coarse/fine the
surface grid is.
porb (float): Orbital period of the system in seconds.
x2sini (float): Projected semi-major axis of the secondary (pulsar)
in light-second.
edot (float): Irradiated energy from the secondary, aka pulsar (i.e.
spin-down luminosity) in erg/s. This is only used for the
calculation of the irradiation efficiency so it does not
enter in the modeling itself.
read (bool): If True, Icarus will use the pre-calculated geodesic
primitives. This is the recommended option, unless you have the
pygts package installed to calculate it on the spot.
>>> fit = Photometry(atmo_fln, data_fln, ndiv, porb, x2sini)
"""
DeprecationWarning("This is the old Photometry class. Use the one from the Photometry instead.")
# We define some class attributes.
self.porb = porb
self.x2sini = x2sini
self.edot = edot
# We read the data.
self._Read_data(data_fln)
# We read the atmosphere models with the atmo_grid class
self._Read_atmo(atmo_fln)
# We make sure that the length of data and atmo_dict are the same
if len(self.atmo_grid) != len(self.data['id']):
print('The number of atmosphere grids and data sets '
'(i.e. photometric bands) do not match!!!')
return
else:
# We keep in mind the number of datasets
self.ndataset = len(self.atmo_grid)
# We initialize some important class attributes.
self._Init_lightcurve(ndiv, read=read)
self._Setup()
def Calc_chi2(self, par, offset_free=1, func_par=None, nsamples=None, influx=False, full_output=False, verbose=False):
"""Calc_chi2(par, offset_free=1, func_par=None, nsamples=None, influx=False, full_output=False, verbose=False)
Returns the chi-square of the fit of the data to the model.
par (list/array): Parameter list.
[0]: Orbital inclination in radians.
[1]: Corotation factor.
[2]: Roche-lobe filling.
[3]: Companion temperature.
[4]: Gravity darkening coefficient.
[5]: K (projected velocity semi-amplitude) in m/s.
[6]: Front side temperature or irradiation temperature.
The irradiation temperature is in the case of the
photometry_modeling_temperature class.
[7]: Distance modulus (can be None).
[8]: Absorption A_V (can be None).
Note: DM and A_V can be set to None. In which case, if
offset_free = 1, these parameters will be fit for.
Note: Can also be a dictionary:
par.keys() = ['av','corotation','dm','filling','gravdark','incl','k1','tday','tnight']
offset_free (int):
1) offset_free = 0:
If the offset is not free and the DM and A_V are specified, the chi2
is calculated directly without allowing an offset between the data and
the bands.
The full chi2 should be:
chi2 = sum[ w_i*(off_i-dm-av*C_i)**2]
+ w_dm*(dm-dm_obs)**2
+ w_av*(av-av_obs)**2, with w = 1/sigma**2
The extra terms (i.e. dm-dm_obs and av-av_obs) should be included
as priors.
1) offset_free = 1:
The model light curves are fitted to the data with an arbitrary offset
for each band. After, a post-fit is performed in order to adjust the offsets
of the curves accounting for the fact that the absolute calibration of the
photometry may vary.
Note:
The errors should be err**2 = calib_err**2 + 1/sum(flux_err)**2
but we neglect the second term because it is negligeable.
func_par (None): Function that takes the parameter vector and
returns the parameter vector. This allow for possible constraints
on the parameters. The vector returned by func_par must have a length
equal to the number of expected parameters.
nsamples (None): Number of points for the lightcurve sampling.
If None, the lightcurve will be sampled at the observed data
points.
influx (False): If true, will calculate the fit between the data and the
model in the flux domain.
full_output (bool): If true, will output a dictionnary of additional parameters.
'offset' (array): the calculated offset for each band.
'par' (array): the input parameters (useful if one wants to get the optimized
values of DM and A_V.
'res' (array): the fit residuals.
verbose (bool): If true will display the list of parameters and fit information.
>>> chi2 = self.Calc_chi2([PIBYTWO,1.,0.9,4000.,0.08,300e3,5000.,10.,0.])
"""
# We can provide a function that massages the input parameters and returns them.
# This function can, for example, handle fixed parameters or boundary limits.
if func_par is not None:
par = func_par(par)
# check if we are dealing with a dictionary
if isinstance(par, dict):
par = [par['incl'], par['corotation'], par['filling'], par['tnight'], par['gravdark'], par['k1'], par['tday'], par['dm'], par['av']]
if offset_free == 0:
pred_flux = self.Get_flux(par, flat=True, nsamples=nsamples, verbose=verbose)
((par[7],par[8]), chi2_data, rank, s) = Utils.Misc.Fit_linear(self.mag-pred_flux, x=self.ext, err=self.mag_err, b=par[7], m=par[8])
if full_output:
residuals = ( (self.mag-pred_flux) - (self.ext*par[8] + par[7]) ) / self.mag_err
offset = np.zeros(self.ndataset)
chi2_band = 0.
chi2 = chi2_data + chi2_band
else:
# Calculate the theoretical flux
pred_flux = self.Get_flux(par, flat=False, nsamples=nsamples, verbose=verbose)
# Calculate the residuals between observed and theoretical flux
if influx: # Calculate the residuals in the flux domain
res1 = np.array([ Utils.Misc.Fit_linear(self.data['flux'][i], x=Utils.Flux.Mag_to_flux(pred_flux[i], flux0=self.atmo_grid[i].flux0), err=self.data['flux_err'][i], b=0., inline=True) for i in np.arange(self.ndataset) ])
offset = -2.5*np.log10(res1[:,1])
if full_output:
print( "Impossible to return proper residuals" )
residuals = None
else: # Calculate the residuals in the magnitude domain
res1 = np.array([ Utils.Misc.Fit_linear(self.data['mag'][i]-pred_flux[i], err=self.data['mag_err'][i], m=0., inline=True) for i in np.arange(self.ndataset) ])
offset = res1[:,0]
if full_output:
residuals = [ ((self.data['mag'][i]-pred_flux[i]) - offset[i])/self.data['mag_err'][i] for i in np.arange(self.ndataset) ]
chi2_data = res1[:,2].sum()
# Fit for the best offset between the observed and theoretical flux given the DM and A_V
res2 = Utils.Misc.Fit_linear(offset, x=self.data['ext'], err=self.data['calib'], b=par[7], m=par[8], inline=True)
par[7], par[8] = res2[0], res2[1]
chi2_band = res2[2]
# Here we add the chi2 of the data from that of the offsets for the bands.
chi2 = chi2_data + chi2_band
# Update the offset to be the actual offset between the data and the band (i.e. minus the DM and A_V contribution)
offset -= self.data['ext']*par[8] + par[7]
# Output results
if verbose:
print('chi2: {:.3f}, chi2 (data): {:.3f}, chi2 (band offset): {:.3f}, DM: {:.3f}, A_V: {:.3f}'.format(chi2, chi2_data, chi2_band, par[7], par[8]))
if full_output:
return chi2, {'offset':offset, 'par':par, 'res':residuals}
else:
return chi2
def Get_flux(self, par, flat=False, func_par=None, DM_AV=False, nsamples=None, verbose=False):
"""Get_flux(par, flat=False, func_par=None, DM_AV=False, nsamples=None, verbose=False)
Returns the predicted flux (in magnitude) by the model evaluated
at the observed values in the data set.
par: Parameter list.
[0]: Orbital inclination in radians.
[1]: Corotation factor.
[2]: Roche-lobe filling.
[3]: Companion temperature.
[4]: Gravity darkening coefficient.
[5]: K (projected velocity semi-amplitude) in m/s.
[6]: Front side temperature or irradiation temperature.
The irradiation temperature is in the case of the
photometry_modeling_temperature class.
[7]: Distance modulus (optional).
[8]: Absorption A_V (optional).
Note: Can also be a dictionary:
par.keys() = ['av', 'corotation', 'dm', 'filling',
'gravdark', 'incl','k1','tday','tnight']
flat (False): If True, the values are returned in a 1D vector.
If False, predicted values are grouped by data set left in a list.
func_par (None): Function that takes the parameter vector and
returns the parameter vector. This allow for possible constraints
on the parameters. The vector returned by func_par must have a length
equal to the number of expected parameters.
DM_AV (False): If true, will include the DM and A_V in the flux.
nsamples (None): Number of points for the lightcurve sampling.
If None, the lightcurve will be sampled at the observed data
points.
Note: tirr = (par[6]**4 - par[3]**4)**0.25
>>> self.Get_flux([PIBYTWO,1.,0.9,4000.,0.08,300e3,5000.,10.,0.])
"""
# func_par
if func_par is not None:
par = func_par(par)
# check if we are dealing with a dictionary
if isinstance(par, dict):
par = [par['incl'], par['corotation'], par['filling'], par['tnight'], par['gravdark'], par['k1'], par['tday'], par['dm'], par['av']]
# We call Make_surface to make the companion's surface.
self.Make_surface(par, verbose=verbose)
# If nsamples is None we evaluate the lightcurve at each data point.
if nsamples is None:
phases = self.data['phase']
# If nsamples is set, we evaluate the lightcurve at nsamples
else:
phases = (np.arange(nsamples, dtype=float)/nsamples).repeat(self.ndataset).reshape((nsamples,self.ndataset)).T
# If DM_AV, we take into account the DM and AV into the flux here.
if DM_AV:
DM_AV = self.data['ext']*par[8] + par[7]
else:
DM_AV = self.data['ext']*0.
# Calculate the actual lightcurves
flux = []
for i in np.arange(self.ndataset):
# If we use the interpolation method and if the filter is the same as a previously
# calculated one, we do not recalculate the fluxes and simply copy them.
if nsamples is not None and self.grouping[i] < i:
flux.append(flux[self.grouping[i]])
else:
flux.append( np.array([self.star.Mag_flux(phase, atmo_grid=self.atmo_grid[i]) for phase in phases[i]]) + DM_AV[i] )
# If nsamples is set, we interpolate the lightcurve at nsamples.
if nsamples is not None:
for i in np.arange(self.ndataset):
ws, inds = Utils.Series.Getaxispos_vector(phases[i], self.data['phase'][i])
flux[i] = flux[i][inds]*(1-ws) + flux[i][inds+1]*ws
# We can flatten the flux array to simplify some of the calculations in the Calc_chi2 function
if flat:
return np.hstack(flux)
else:
return flux
def Get_flux_theoretical(self, par, phases, func_par=None, verbose=False):
"""Get_flux_theoretical(par, phases, func_par=None, verbose=False)
Returns the predicted flux (in magnitude) by the model evaluated at the
observed values in the data set.
par: Parameter list.
[0]: Orbital inclination in radians.
[1]: Corotation factor.
[2]: Roche-lobe filling.
[3]: Companion temperature.
[4]: Gravity darkening coefficient.
[5]: K (projected velocity semi-amplitude) in m/s.
[6]: Front side temperature or irradiation temperature.
The irradiation temperature is in the case of the
photometry_modeling_temperature class.
[7]: Distance modulus.
[8]: Absorption A_V.
Note: Can also be a dictionary:
par.keys() = ['av','corotation','dm','filling','gravdark','incl','k1','tday','tnight']
phases: A list of orbital phases at which the model should be
evaluated. The list must have the same length as the
number of data sets, each element can contain many phases.
func_par (None): Function that takes the parameter vector and
returns the parameter vector. This allow for possible constraints
on the parameters. The vector returned by func_par must have a length
equal to the number of expected parameters.
verbose (False)
Note: tirr = (par[6]**4 - par[3]**4)**0.25
>>> self.Get_flux_theoretical([PIBYTWO,1.,0.9,4000.,0.08,300e3,5000.,10.,0.], [[0.,0.25,0.5,0.75]]*4)
"""
# func_par
if func_par is not None:
par = func_par(par)
# check if we are dealing with a dictionary
if isinstance(par, dict):
par = [par['incl'], par['corotation'], par['filling'], par['tnight'], par['gravdark'], par['k1'], par['tday'], par['dm'], par['av']]
# We call Make_surface to make the companion's surface.
self.Make_surface(par, verbose=verbose)
DM_AV = self.data['ext']*par[8] + par[7]
flux = []
for i in np.arange(self.ndataset):
# If the filter is the same as a previously calculated one
# we do not recalculate the fluxes and simply copy them.
if self.grouping[i] < i:
flux.append( flux[self.grouping[i]] )
else:
flux.append( np.array([self.star.Mag_flux(phase, atmo_grid=self.atmo_grid[i]) for phase in phases[i]]) + DM_AV[i] )
return flux
def Get_Keff(self, par, nphases=20, atmo_grid=0, func_par=None, make_surface=False, verbose=False):
"""
Returns the effective projected velocity semi-amplitude of the star in m/s.
The luminosity-weighted average velocity of the star is returned for
nphases, for the specified dataset, and a sin wave is fitted to them.
par: Parameter list.
[0]: Orbital inclination in radians.
[1]: Corotation factor.
[2]: Roche-lobe filling.
[3]: Companion temperature.
[4]: Gravity darkening coefficient.
[5]: K (projected velocity semi-amplitude) in m/s.
[6]: Front side temperature.
[7]: Distance modulus.
[8]: Absorption A_V.
nphases (int): Number of phases to evaluate the velocity at.
atmo_grid (int, AtmoGridPhot): The atmosphere grid to use for the velocity
calculation. Can be an integer that represents the index of the atmosphere
grid object in self.atmo_grid, and it can be an AtmoGridPhot instance.
func_par (function): Function that takes the parameter vector and
returns the parameter vector. This allow for possible constraints
on the parameters. The vector returned by func_par must have a length
equal to the number of expected parameters.
make_surface (bool): Whether lightcurve.make_surface should be called
or not. If the flux has been evaluate before and the parameters have
not changed, False is fine.
verbose (bool): Verbosity. Will plot the velocities and the sin fit.
"""
# If it is required to recalculate the stellar surface.
if make_surface:
self.Make_surface(par, func_par=func_par, verbose=verbose)
# Deciding which atmosphere grid we use to evaluate Keff
if isinstance(atmo_grid, int):
atmo_grid = self.atmo_grid[atmo_grid]
# Get the Keffs and fluxes
phases = np.arange(nphases)/float(nphases)
Keffs = np.array( [self.star.Keff(phase, atmo_grid=atmo_grid) for phase in phases] )
tmp = Utils.Misc.Fit_linear(Keffs, np.sin(cts.TWOPI*(phases)), inline=True)
if verbose:
pylab.plot(np.linspace(0.,1.), tmp[1]*np.sin(np.linspace(0.,1.)*cts.TWOPI)+tmp[0])
pylab.scatter(phases, Keffs)
Keff = tmp[1]
return Keff
def _Init_lightcurve(self, ndiv, read=False):
"""_Init_lightcurve(ndiv, read=False)
Call the appropriate Lightcurve class and initialize
the stellar array.
>>> self._Init_lightcurve(ndiv)
"""
self.star = Core.Star(ndiv, read=read)
return
def Make_surface(self, par, func_par=None, verbose=False):
"""Make_surface(par, func_par=None, verbose=False)
This function gets the parameters to construct to companion
surface model and calls the Make_surface function from the
Lightcurve object.
par: Parameter list.
[0]: Orbital inclination in radians.
[1]: Corotation factor.
[2]: Roche-lobe filling.
[3]: Companion temperature.
[4]: Gravity darkening coefficient.
[5]: K (projected velocity semi-amplitude) in m/s.
[6]: Front side temperature or irradiation temperature.
The irradiation temperature is in the case of the
photometry_modeling_temperature class.
[7]: Distance modulus (optional). Not needed here.
[8]: Absorption A_V (optional). Not needed here.
Note: Can also be a dictionary:
par.keys() = ['av','corotation','dm','filling','gravdark','incl','k1','tday','tnight']
func_par (None): Function that takes the parameter vector and
returns the parameter vector. This allow for possible constraints
on the parameters. The vector returned by func_par must have a length
equal to the number of expected parameters.
>>> self.Make_surface([PIBYTWO,1.,0.9,4000.,0.08,300e3,5000.,10.,0.])
"""
# Apply a function that can modify the value of parameters.
if func_par is not None:
par = func_par(par)
# check if we are dealing with a dictionary
if isinstance(par, dict):
par = [par['incl'], par['corotation'], par['filling'], par['tnight'], par['gravdark'], par['k1'], par['tday'], par['dm'], par['av']]
# Verify parameter values to make sure they make sense.
#if par[6] < par[3]: par[6] = par[3]
# Let's move on with the flux calculation.
q = par[5] * self.K_to_q
tirr = (par[6]**4 - par[3]**4)**0.25
if verbose:
print( "#####\n" + str(par[0]) + ", " + str(par[1]) + ", " + str(par[2]) + ", " + str(par[3]) + ", " + str(par[4]) + ", " + str(par[5]) + ", " + str(par[6]) + ", " + str(par[7]) + ", " + str(par[8]) + "\n" + "q: " + str(q) + ", tirr: " + str(tirr) )
self.star.Make_surface(q=q, omega=par[1], filling=par[2], temp=par[3], tempgrav=par[4], tirr=tirr, porb=self.porb, k1=par[5], incl=par[0])
return
def Plot(self, par, nphases=51, verbose=True, func_par=None, nsamples=None, output=False):
"""
Plots the observed and predicted values along with the
light curve.
par (list): Parameter list.
[0]: Orbital inclination in radians.
[1]: Corotation factor.
[2]: Roche-lobe filling.
[3]: Companion temperature.
[4]: Gravity darkening coefficient.
[5]: K (projected velocity semi-amplitude) in m/s.
[6]: Front side temperature or irradiation temperature.
The irradiation temperature is in the case of the
photometry_modeling_temperature class.
[7]: Distance modulus.
[8]: Absorption A_V.
Note: Can also be a dictionary:
par.keys() = ['av','corotation','dm','filling','gravdark','incl','k1','tday','tnight']
nphases (int): Orbital phase resolution of the model
light curve.
verbose (bool): verbosity.
func_par (function): Function that takes the parameter vector and
returns the parameter vector. This allow for possible constraints
on the parameters. The vector returned by func_par must have a length
equal to the number of expected parameters.
nsamples (int): Number of points for the lightcurve sampling.
If None, the lightcurve will be sampled at the observed data
points.
output (bool): If true, will return the model flux values and the offsets.
>>> self.Plot([PIBYTWO,1.,0.9,4000.,0.08,300e3,5000.,10.,0.])
"""
# Calculate the orbital phases at which the flux will be evaluated
phases = np.resize(np.linspace(0.,1.,nphases), (self.ndataset, nphases))
# Fit the data in order to get the offset
chi2, extras = self.Calc_chi2(par, offset_free=1, verbose=verbose, func_par=func_par, nsamples=nsamples, full_output=True)
offset = extras['offset']
par = extras['par']
# Calculate the theoretical flux at the orbital phases.
pred_flux = self.Get_flux_theoretical(par, phases)
# Calculating the min and the max
tmp = []
for i in np.arange(self.ndataset):
tmp = np.r_[tmp, pred_flux[i]+offset[i]]
minmag = tmp.min()
maxmag = tmp.max()
deltamag = (maxmag - minmag)
spacing = 0.2
#---------------------------------
##### Plot using matplotlib
try:
fig = pylab.gcf()
try:
ax = pylab.gca()
except:
ax = fig.add_subplot(1,1,1)
except:
fig, ax = pylab.subplots(nrows=1, ncols=1)
ncolors = self.ndataset - 1
if ncolors == 0:
ncolors = 1
for i in np.arange(self.ndataset):
color = np.ones((self.data['mag'][i].size,1), dtype=float) * matplotlib.cm.jet(float(i)/ncolors)
ax.errorbar(self.data['phase'][i], self.data['mag'][i], yerr=self.data['mag_err'][i], fmt='none', ecolor=color[0])
ax.scatter(self.data['phase'][i], self.data['mag'][i], edgecolor=color, facecolor=color)
ax.plot(phases[i], pred_flux[i], 'k--')
ax.plot(phases[i], pred_flux[i]+offset[i], 'k-')
ax.text(1.01, pred_flux[i].max(), self.data['id'][i])
ax.set_xlim([0,1])
ax.set_ylim([maxmag+spacing*deltamag, minmag-spacing*deltamag])
ax.set_xlabel( "Orbital Phase" )
ax.set_ylabel( "Magnitude" )
pylab.draw()
if output:
return pred_flux, offset
return
def Plot_theoretical(self, par, nphases=31, verbose=False, device='/XWIN', func_par=None, output=False):
"""Plot_theoretical(par, nphases=31, verbose=False, device='/XWIN', func_par=None, output=False)
Plots the predicted light curves.
par: Parameter list.
[0]: Orbital inclination in radians.
[1]: Corotation factor.
[2]: Roche-lobe filling.
[3]: Companion temperature.
[4]: Gravity darkening coefficient.
[5]: K (projected velocity semi-amplitude) in m/s.
[6]: Front side temperature or irradiation temperature.
The irradiation temperature is in the case of the
photometry_modeling_temperature class.
[7]: Distance modulus.
[8]: Absorption A_V.
Note: Can also be a dictionary:
par.keys() = ['av','corotation','dm','filling','gravdark','incl','k1','tday','tnight']
nphases (31): Orbital phase resolution of the model
light curve.
verbose (False): verbosity.
device ('/XWIN'): Device driver for Pgplot (can be '/XWIN',
'filename.ps/PS', 'filename.ps./CPS', '/AQT' (on mac only)).
func_par (None): Function that takes the parameter vector and
returns the parameter vector. This allow for possible constraints
on the parameters. The vector returned by func_par must have a length
equal to the number of expected parameters.
output (False): If true, will return the model flux values and the offsets.
>>> self.Plot_theoretical([PIBYTWO,1.,0.9,4000.,0.08,300e3,5000.,10.,0.])
"""
# Calculate the orbital phases at which the flux will be evaluated
phases = np.resize(np.linspace(0.,1.,nphases), (self.ndataset, nphases))
# Calculate the theoretical flux at the orbital phases.
pred_flux = self.Get_flux_theoretical(par, phases, func_par=func_par, verbose=verbose)
# Loop over the data set and plot the flux, theoretical flux and offset theoretical flux
for i in np.arange(self.ndataset):
plotxy(pred_flux[i], phases[i], color=1+i, line=1, rangey=[np.max(pred_flux)+0.5,np.min(pred_flux)-0.5], rangex=[0.,1.], device=device)
if output:
return pred_flux
return
def Pretty_print(self, par, make_surface=True, verbose=True):
"""Pretty_print(par, make_surface=True, verbose=True)
Return a nice representation of the important
parameters.
par: Parameter list.
[0]: Orbital inclination in radians.
[1]: Corotation factor.
[2]: Roche-lobe filling.
[3]: Companion temperature.
[4]: Gravity darkening coefficient.
[5]: K (projected velocity semi-amplitude) in m/s.
[6]: Front side temperature or irradiation temperature.
The irradiation temperature is in the case of the
photometry_modeling_temperature class.
[7]: Distance modulus.
[8]: Absorption A_V.
Note: Can also be a dictionary:
par.keys() = ['av','corotation','dm','filling','gravdark','incl','k1','tday','tnight']
make_surface (True): Whether to recalculate the
surface of the star or not.
verbose (True): Output the nice representation
of the important parameters or just return them
as a list.
>>> self.Pretty_print([PIBYTWO,1.,0.9,4000.,0.08,300e3,5000.,10.,0.])
"""
# check if we are dealing with a dictionary
if isinstance(par, dict):
par = [par['incl'], par['corotation'], par['filling'], par['tnight'], par['gravdark'], par['k1'], par['tday'], par['dm'], par['av']]
incl = par[0]
corot = par[1]
fill = par[2]
temp_back = par[3]
gdark = par[4]
K = par[5]
temp_front = par[6]
DM = par[7]
A_V = par[8]
if DM is None: DM = 0.
if A_V is None: A_V = 0.
q = K * self.K_to_q
tirr = (temp_front**4 - temp_back**4)**0.25
if make_surface:
self.star.Make_surface(q=q, omega=corot, filling=fill, temp=temp_back, tempgrav=gdark, tirr=tirr, porb=self.porb, k1=K, incl=incl)
separation = self.star.separation
roche = self.star.Roche()
Mwd = self.star.mass1
Mns = self.star.mass2
# below we transform sigma from W m^-2 K^-4 to erg s^-1 cm^-2 K^-4
# below we transform the separation from m to cm
Lirr = tirr**4 * (cts.sigma*1e3) * (separation*100)**2 * 4*cts.PI
eff = Lirr/self.edot
# we convert Lirr in Lsun units
Lirr /= 3.839e33
if verbose:
print( "##### Pretty Print #####" )
print( "%9.7f, %3.1f, %9.7f, %10.5f, %4.2f, %9.2f, %9.7f, %6.3f, %6.3f" %tuple(par) )
print( "" )
print( "Corotation factor: %4.2f" %corot )
print( "Gravity Darkening: %5.3f" %gdark )
print( "" )
print( "Filling factor: %6.4f" %fill )
print( "Orbital separation: %5.4e km" %(separation/1000) )
print( "Roche lobe size: %6.4f (orb. sep.)" %roche )
print( "" )
print( "Irradiation efficiency: %6.4f" %eff )
print( "Irration luminosity: %5.4e Lsun" %Lirr )
print( "Backside temperature: %7.2f K" %temp_back )
print( "Frontside temperature: %7.2f (tabul.), %7.2f (approx.) K" %(np.exp(self.star.logteff.max()),temp_front) )
print( "" )
print( "Distance Modulus: %6.3f" %DM )
print( "Absorption (V band): %6.3f" %A_V )
print( "" )
print( "Inclination: %5.3f rad (%6.2f deg)" %(incl,incl*cts.RADTODEG) )
print( "K: %7.3f km/s" %(K/1000) )
print( "" )
print( "Mass ratio: %6.3f" %q )
print( "Mass NS: %5.3f Msun" %Mns )
print( "Mass Comp: %5.3f Msun" %Mwd )
return np.r_[corot,gdark,fill,separation,roche,eff,tirr,temp_back,np.exp(self.star.logteff.max()),temp_front,DM,A_V,incl,incl*cts.RADTODEG,K,q,Mns,Mwd]
def _Read_atmo(self, atmo_fln):
"""_Read_atmo(atmo_fln)
Reads the atmosphere model data.
atmo_fln (str): A file containing the grid model information for each
data set. The format of each line of the file is as follows:
Col 0: band name
Col 1: band filename
>>> self._Read_atmo(atmo_fln)
"""
f = open(atmo_fln,'r')
lines = f.readlines()
self.atmo_grid = []
for line in lines:
if (line[0] != '#') and (line[0] != '\n'):
tmp = line.split()
self.atmo_grid.append(Atmosphere.AtmoGridPhot.ReadHDF5(tmp[1]))
return
def _Read_data(self, data_fln):
"""_Read_data(data_fln)
Reads the photometric data.
data_fln (str): A file containing the information for each data set.
Three formats are currently supported.
9-column (preferred):
Col 0: band name
Col 1: column id for orbital phase. Orbital phases must be 0-1.
Phase 0 is defined as the primary star (the one modelled),
located at inferior conjunction.
Col 2: column id for flux/magnitude
Col 3: column id for flux/magnitude error
Col 4: shift to phase zero. Sometimes people use other
definition for orbital phases, so this allows to correct for
it.
Col 5: band calibration error, in magnitude
Col 6: softening parameter for asinh magnitude conversion. If
the value is 0., then standard magnitudes are used.
Col 7: flux or mag flag. Currently, all the data must be in the
same format.
'mag' means magnitude system
'flux' means flux system
Col 8: filename
8-column (support for asinh magnitudes, no fluxes input):
Col 0: band name
Col 1: column id for orbital phase. Orbital phases must be 0-1.
Phase 0 is defined as the primary star (the one modelled),
located at inferior conjunction.
Col 2: column id for magnitude
Col 3: column id for magnitude error
Col 4: shift to phase zero. Sometimes people use other
definition for orbital phases, so this allows to correct for
it.
Col 5: band calibration error, in magnitude
Col 6: softening parameter for asinh magnitude conversion. If
the value is 0., then standard magnitudes are used.
Col 7: filename
7-column (only support standard magnitude input):
Col 0: band name
Col 1: column id for orbital phase. Orbital phases must be 0-1.
Phase 0 is defined as the primary star (the one modelled),
located at inferior conjunction.
Col 2: column id for magnitude
Col 3: column id for magnitude error
Col 4: shift to phase zero. Sometimes people use other
definition for orbital phases, so this allows to correct for
it.
Col 5: band calibration error, in magnitude
Col 6: filename
>>> self._Read_data(data_fln)
"""
f = open(data_fln,'r')
lines = f.readlines()
self.data = {'phase':[], 'mag':[], 'mag_err':[], 'flux':[], 'flux_err':[], 'calib':[], 'fln':[], 'id':[], 'softening':[]}
for line in lines:
if (line[0] != '#') and (line[0] != '\n'):
tmp = line.split()
## Old version of the data files
if len(tmp) == 7:
d = np.loadtxt(tmp[-1], usecols=[int(tmp[1]),int(tmp[2]),int(tmp[3])], unpack=True)
## With the flag '_' in the observation id, we do not take %1 so that
## we preserve the long-term phase coherence.
if tmp[0].find('_') != -1:
self.data['phase'].append( np.atleast_1d(d[0] - float(tmp[4])) )
else:
self.data['phase'].append( np.atleast_1d((d[0] - float(tmp[4]))%1.) )
self.data['mag'].append( np.atleast_1d(d[1]) )
self.data['mag_err'].append( np.atleast_1d(d[2]) )
self.data['calib'].append( float(tmp[5]) )
self.data['fln'].append( tmp[-1] )
self.data['id'].append( tmp[0] )
self.data['softening'].append( 0. )
## Old version of the data files including asinh magnitudes
elif len(tmp) == 8:
d = np.loadtxt(tmp[-1], usecols=[int(tmp[1]),int(tmp[2]),int(tmp[3])], unpack=True)
# With the flag '_' in the observation id, we do not take %1 so that
# we preserve the long-term phase coherence.
if tmp[0].find('_') != -1:
self.data['phase'].append( np.atleast_1d(d[0] - float(tmp[4])) )
else:
self.data['phase'].append( np.atleast_1d((d[0] - float(tmp[4]))%1.) )
self.data['mag'].append( np.atleast_1d(d[1]) )
self.data['mag_err'].append( np.atleast_1d(d[2]) )
self.data['calib'].append( float(tmp[5]) )
self.data['fln'].append( tmp[-1] )
self.data['id'].append( tmp[0] )
self.data['softening'].append( float(tmp[6]) )
## Current version of the data files including asinh magnitudes
elif len(tmp) == 9:
d = np.loadtxt(tmp[-1], usecols=[int(tmp[1]),int(tmp[2]),int(tmp[3])], unpack=True)
## Data can be set in magnitude
if tmp[-2] == 'mag':
# With the flag '_' in the observation id, we do not take %1 so that
# we preserve the long-term phase coherence.
if tmp[0].find('_') != -1:
self.data['phase'].append( np.atleast_1d(d[0] - float(tmp[4])) )
else:
self.data['phase'].append( np.atleast_1d((d[0] - float(tmp[4]))%1.) )
self.data['mag'].append( np.atleast_1d(d[1]) )
self.data['mag_err'].append( np.atleast_1d(d[2]) )
self.data['calib'].append( float(tmp[5]) )
self.data['fln'].append( tmp[-1] )
self.data['id'].append( tmp[0] )
self.data['softening'].append( float(tmp[6]) )
## Data can be set in flux
elif tmp[-2] == 'flux':
# With the flag '_' in the observation id, we do not take %1 so that
# we preserve the long-term phase coherence.
if tmp[0].find('_') != -1:
self.data['phase'].append( np.atleast_1d(d[0] - float(tmp[4])) )
else:
self.data['phase'].append( np.atleast_1d((d[0] - float(tmp[4]))%1.) )
self.data['flux'].append( np.atleast_1d(d[1]) )
self.data['flux_err'].append( np.atleast_1d(d[2]) )
self.data['calib'].append( float(tmp[5]) )
self.data['fln'].append( tmp[-1] )
self.data['id'].append( tmp[0] )
self.data['softening'].append( float(tmp[6]) )
## Current version of the data files including asinh magnitudes
else:
raise Exception("The data file does not have the expected number of columns.")
return
def _Setup(self):
"""_Setup()
Stores some important information in class variables.
>>> self._Setup()
"""
# We calculate the constant for the conversion of K to q (observed
# velocity semi-amplitude to mass ratio, with K in m/s)
self.K_to_q = Utils.Binary.Get_K_to_q(self.porb, self.x2sini)
# Storing values in 1D arrays.
# The V band extinction will be extracted from the atmosphere_grid class
ext = []
self.data['ext'] = []
# Converting magnitudes <-> fluxes in case this would be needed for upper limits
if len(self.data['flux']) == 0:
has_mag = True
else:
has_mag = False
# The grouping will define datasets that are in the same band and can be evaluated only once in order to save on computation.
grouping = np.arange(self.ndataset)
for i in np.arange(self.ndataset):
ext.extend(self.data['phase'][i]*0.+self.atmo_grid[i].meta['ext'])
self.data['ext'].append(self.atmo_grid[i].meta['ext'])
if self.data['softening'][i] == 0:
if has_mag:
flux,flux_err = Utils.Flux.Mag_to_flux(self.data['mag'][i], mag_err=self.data['err'][i], flux0=self.atmo_grid[i].meta['zp'])
self.data['flux'].append( flux )
self.data['flux_err'].append( flux_err )
else:
mag,mag_err = Utils.Flux.Flux_to_mag(self.data['flux'][i], flux_err=self.data['flux_err'][i], flux0=self.atmo_grid[i].meta['zp'])
self.data['mag'].append( mag )
self.data['mag_err'].append( mag_err )
else:
flux,flux_err = Utils.Flux.Asinh_to_flux(self.data['mag'][i], mag_err=self.data['mag_err'][i], flux0=self.atmo_grid[i].meta['zp'], softening=self.data['softening'][i])
self.data['flux'].append( flux )
self.data['flux_err'].append( flux_err )
for j in np.arange(i+1):
if self.data['id'][i] == self.data['id'][j]:
grouping[i] = j
break
self.ext = np.asarray(ext)
self.grouping = np.asarray(grouping)
self.data['ext'] = np.asarray(self.data['ext'])
self.data['calib'] = np.asarray(self.data['calib'])
self.mag = np.hstack(self.data['mag'])
self.mag_err = np.hstack(self.data['mag_err'])
self.phase = np.hstack(self.data['phase'])
self.flux = np.hstack(self.data['flux'])
self.flux_err = np.hstack(self.data['flux_err'])
self.ndata = self.flux.size
return
######################## class Photometry ########################
| 52.003468 | 262 | 0.569415 | 5,983 | 44,983 | 4.217617 | 0.106301 | 0.026948 | 0.009154 | 0.010779 | 0.626575 | 0.584687 | 0.561306 | 0.543037 | 0.536181 | 0.526234 | 0 | 0.023577 | 0.319232 | 44,983 | 864 | 263 | 52.063657 | 0.800444 | 0.505013 | 0 | 0.407186 | 0 | 0.005988 | 0.087424 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038922 | false | 0 | 0.01497 | 0 | 0.107784 | 0.08982 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17271fe6063a50ed2061ed3af93c658a70080d06 | 3,952 | py | Python | stayhome/business/forms/add_form.py | mageo/stayhomech | 5afe922b13f0350a79eaff0401709f99c5a31e8b | [
"MIT"
] | 3 | 2020-03-20T11:01:57.000Z | 2020-03-20T16:29:12.000Z | stayhome/business/forms/add_form.py | stayhomech/stayhomech | 5afe922b13f0350a79eaff0401709f99c5a31e8b | [
"MIT"
] | 74 | 2020-03-23T21:35:07.000Z | 2020-04-27T12:55:50.000Z | stayhome/business/forms/add_form.py | mageo/stayhomech | 5afe922b13f0350a79eaff0401709f99c5a31e8b | [
"MIT"
] | 3 | 2020-03-20T11:02:35.000Z | 2020-03-20T16:29:23.000Z | import json
import os
import uuid
from django import forms
from captcha.fields import ReCaptchaField
from phonenumber_field.formfields import PhoneNumberField
from django.utils.translation import gettext_lazy as _
from django.utils.translation import get_language
from geodata.models import NPA
from business.models import Request
class BusinessAddForm(forms.Form):
name = forms.CharField(
label=_('Company name'),
max_length=255,
help_text=_('The name of the company.'),
widget=forms.TextInput(attrs={
'class': 'form-control form-control-sm'
})
)
description = forms.CharField(
label=_('Description'),
help_text=_('A short description of the services of the company.'),
widget=forms.Textarea(attrs={
'class': 'form-control form-control-sm',
'rows': 5
})
)
address = forms.CharField(
label=_('Street and number'),
max_length=255,
help_text=_('The street and street number of your address'),
widget=forms.TextInput(attrs={
'class': 'form-control form-control-sm'
})
)
location = forms.ModelChoiceField(
label=_('City'),
help_text=_('Where is the company based?'),
queryset=NPA.objects.all(),
widget=forms.Select(attrs={
'class': 'form-control form-control-sm'
})
)
category = forms.CharField(
label=_('Categories'),
max_length=255,
help_text=_('List possible categories of the service that the company provides (eg. Food, Books, Drinks, Music, Games, Mobility)'),
widget=forms.TextInput(attrs={
'class': 'form-control form-control-sm'
})
)
delivery = forms.CharField(
label=_('Delivery locations'),
max_length=255,
help_text=_('Where are you delivering ? Whole Switzerland, cantons, districts, municipalities, be as precise as possible.'),
widget=forms.TextInput(attrs={
'class': 'form-control form-control-sm'
})
)
website = forms.CharField(
label=_('Website'),
max_length=255,
help_text=_('Company website, if any.'),
required=False,
widget=forms.TextInput(attrs={
'class': 'form-control form-control-sm'
})
)
phone = forms.CharField(
label=_('Phone number'),
max_length=100,
help_text=_('Company phone number, if any.'),
required=False,
widget=forms.TextInput(attrs={
'class': 'form-control form-control-sm'
})
)
email = forms.CharField(
label=_('Email address'),
max_length=255,
help_text=_('Company email address, if any.'),
required=False,
widget=forms.TextInput(attrs={
'class': 'form-control form-control-sm'
})
)
if os.environ.get("RUNNING_ENV", default='dev') != 'dev':
captcha = ReCaptchaField(
label=''
)
def get_location_choices(self):
return [
(0, 'Test')
]
def save_request(self):
location = str(self.cleaned_data['location']) + ' [PK:' + str(self.cleaned_data['location'].pk) + ']'
# Create request
r = Request(
name=self.cleaned_data['name'],
description=self.cleaned_data['description'],
address=self.cleaned_data['address'],
location=location,
website=self.cleaned_data['website'],
phone=self.cleaned_data['phone'],
email=self.cleaned_data['email'],
category=self.cleaned_data['category'],
delivery=self.cleaned_data['delivery'],
source=1,
checksum='Web Form',
source_uuid=str(uuid.uuid4()),
lang=get_language()
)
r.save()
# Set status
r.set_status(r.events.NEW)
| 29.274074 | 139 | 0.585779 | 420 | 3,952 | 5.380952 | 0.307143 | 0.087611 | 0.066372 | 0.083628 | 0.35177 | 0.290265 | 0.221239 | 0.19115 | 0.19115 | 0.19115 | 0 | 0.008909 | 0.28998 | 3,952 | 134 | 140 | 29.492537 | 0.796507 | 0.006326 | 0 | 0.292035 | 0 | 0.00885 | 0.245413 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017699 | false | 0 | 0.088496 | 0.00885 | 0.20354 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
172924a6a260577eee406ab8308643a692027ef4 | 1,897 | py | Python | src/management_tools.py | EOEPCA/um-pep-engine | efdf87027b54efc5686b3abd3882095441994978 | [
"Apache-2.0"
] | null | null | null | src/management_tools.py | EOEPCA/um-pep-engine | efdf87027b54efc5686b3abd3882095441994978 | [
"Apache-2.0"
] | 3 | 2021-04-12T11:40:39.000Z | 2022-03-08T17:04:03.000Z | src/management_tools.py | EOEPCA/um-pep-engine | efdf87027b54efc5686b3abd3882095441994978 | [
"Apache-2.0"
] | 1 | 2020-07-22T10:35:58.000Z | 2020-07-22T10:35:58.000Z | #!/usr/local/bin/python3
import argparse
import sys
from handlers.mongo_handler import Mongo_Handler
from bson.json_util import dumps
custom_mongo = Mongo_Handler("resource_db", "resources")
def list_resources(user,resource):
if resource is not None:
return custom_mongo.get_from_mongo("resource_id", resource)
if user is not None:
resources=custom_mongo.get_all_resources()
return list(filter(lambda x: x["ownership_id"] == user,resources))
return custom_mongo.get_all_resources()
def remove_resources(user,resource,all):
if resource is not None:
return custom_mongo.delete_in_mongo("resource_id", resource)
if user is not None and all:
return custom_mongo.remove_resources("ownership_id",user)
if user is None and all:
return custom_mongo.remove_resources()
return "No action taken (missing --all flag?)"
parser = argparse.ArgumentParser(description='Operational management of resources.')
parser.add_argument('action', metavar='action', type=str,
help='Operation to perform: list/remove')
parser.add_argument('-u',
'--user',
help='Filter action by user ID')
parser.add_argument('-r',
'--resource',
help='Filter action by resource ID')
parser.add_argument('-a',
'--all',
action='store_true',
help='Apply action to all resources.')
args = vars(parser.parse_args())
if args["action"] == "list":
result = dumps(list_resources(args['user'],args['resource']))
elif args["action"] == "remove":
if args["resource"] is not None:
args["all"] = False
result = remove_resources(args['user'],args['resource'],args['all'])
else:
print("Allowed actions are 'remove' or 'list'")
sys.exit(-1)
print(result)
| 33.875 | 84 | 0.641539 | 237 | 1,897 | 4.983122 | 0.333333 | 0.065199 | 0.038103 | 0.043184 | 0.286198 | 0.193057 | 0.193057 | 0.193057 | 0.064352 | 0 | 0 | 0.00138 | 0.236162 | 1,897 | 55 | 85 | 34.490909 | 0.813665 | 0.012124 | 0 | 0.045455 | 0 | 0 | 0.214095 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.295455 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
172a85f182f53fccf2d57615f7dc161c29331240 | 2,310 | py | Python | python/speaker_2_sound.py | now-start/20_HI001L_- | 494607c21a17c093e0d6bad102416c1afa348982 | [
"MIT"
] | null | null | null | python/speaker_2_sound.py | now-start/20_HI001L_- | 494607c21a17c093e0d6bad102416c1afa348982 | [
"MIT"
] | null | null | null | python/speaker_2_sound.py | now-start/20_HI001L_- | 494607c21a17c093e0d6bad102416c1afa348982 | [
"MIT"
] | null | null | null | # speaker_2_sound.py
# 한 스피커로 녹음해서 정위상, 역위상 wav를 생성한 다음 정위상은 왼쪽, 역위상은 오른쪽 스피커에서 재생시키는 소스코드
# (정위상, 역위상 파일을 하나의 스테레오 wav로 만듦)
# 음성(소음) 녹음, 재생 하는 패키지(wav파일)
import pyaudio
import wave
# 위상 반전, 파장 결합(Merge), 소리 재생 하는 패키지
from pydub import AudioSegment
from pydub.playback import play
from scipy.io import wavfile
import matplotlib.pyplot as plt
CHUNK = 1024
FORMAT = pyaudio.paInt16 # Portaudio Sample Format 설정
CHANNELS = 1 # 채널
RATE = 44100
RECORD_SECONDS = 5 # 녹음 시간(초)
thread = None
# 녹음한 wav 파일 이름 지정
WAVE_OUTPUT_FILENAME = "originalAudio.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True, # input 스트림 명시
frames_per_buffer=CHUNK)
print("Start to record the audio.")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("Recording is finished.")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
# 지정한 wav 파일 load
originalSound = AudioSegment.from_file(WAVE_OUTPUT_FILENAME, format="wav")
# 기존 wav 파일 역위상 파장 생성
reversedSound = originalSound.invert_phase()
# 역위상 파장 wav파일로 저장 (생략 가능)
reversedSound.export("reversedAudio.wav", format="wav")
# 정위상 재생
# play(originalSound)
# 역위상 재생
# play(reversedSound)
# 정 위상을 왼쪽에서 재생 (스테레오) (pan 100% left)
# pannedLeft = originalSound.pan(-1) # -1은 100% 왼쪽으로 이동 시킨다는 의미
# play(pannedLeft)
# 정 위상을 왼쪽에서 재생 (스테레오) (pan 100% right)
# pannedRight = reversedSound.pan(1) # +1은 100% 오른쪽으로 이동 시킨다는 의미
# play(pannedRight)
# 스테레오 두 파일을 왼쪽에서 들리는 모노, 오른쪽에서만 들리는 모노로 바꾼다음 합쳐서 하나의 스테레오 파일로 만듦
stereo_sound = AudioSegment.from_mono_audiosegments(
originalSound, reversedSound)
play(stereo_sound)
stereo_sound.export("stereo_sound.wav", format="wav")
# 파형 출력 (그래프)
sample_rate, audio_samples = wavfile.read("stereo_sound.wav", 'rb')
# Show some basic information about the audio.
duration = len(audio_samples)/sample_rate
print(f'Sample rate: {sample_rate} Hz')
print(f'Total duration: {duration:.2f}s')
print(f'Size of the input: {len(audio_samples)}')
plt.plot(audio_samples)
plt.show()
| 23.814433 | 74 | 0.713853 | 352 | 2,310 | 4.596591 | 0.508523 | 0.033993 | 0.033375 | 0.012361 | 0.024722 | 0.024722 | 0.024722 | 0 | 0 | 0 | 0 | 0.016719 | 0.171429 | 2,310 | 96 | 75 | 24.0625 | 0.828631 | 0.311688 | 0 | 0 | 0 | 0 | 0.144779 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.104167 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
172d04249db22c0e936f3d55daaddef381d26533 | 1,230 | py | Python | demo_wait_negative.py | rdagger/micropython-ads1220 | c90f939517c8163b234210b8cf91b3ce948b5b1c | [
"MIT"
] | 2 | 2021-08-25T11:40:23.000Z | 2022-02-28T05:31:18.000Z | demo_wait_negative.py | rdagger/micropython-ads1220 | c90f939517c8163b234210b8cf91b3ce948b5b1c | [
"MIT"
] | null | null | null | demo_wait_negative.py | rdagger/micropython-ads1220 | c90f939517c8163b234210b8cf91b3ce948b5b1c | [
"MIT"
] | 1 | 2021-08-08T11:39:47.000Z | 2021-08-08T11:39:47.000Z | """ADS1220 example (monitor for negative voltage)."""
from time import sleep
from machine import Pin, SPI # type: ignore
from ads1220 import ADC
cs = 15 # Chip select pin
drdy = 27 # Data ready pin
spi = SPI(1,
baudrate=10000000, # 10 MHz (try lower speed to troubleshoot)
sck=Pin(14),
mosi=Pin(13),
miso=Pin(12),
phase=1) # ADS1220 uses SPI mode 1
adc = ADC(spi, cs, drdy)
def test():
"""Test code."""
adc.conversion_continuous() # Set continuous conversion mode
adc.pga_off() # Disable gain
adc.fir_filter(1) # Simultaneous 50-Hz and 60-Hz rejection
adc.operating_mode(2) # Turbo mode
adc.data_rate(2) # 180 SPS
adc.start_conversion() # Start conversions
adc.select_channel(0) # Select channel 0 (0 to 3 ADC channels)
sleep(.1) # Ensure ADC ready
try:
while True:
result = adc.read_wait_negative(timeout=1000)
if result:
print("Negative voltage acquired.")
else:
print("Timeout.")
sleep(3)
except KeyboardInterrupt:
print("\nCtrl-C pressed to exit.")
finally:
adc.power_down()
spi.deinit()
test()
| 26.73913 | 72 | 0.598374 | 161 | 1,230 | 4.509317 | 0.590062 | 0.041322 | 0.038567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.063657 | 0.297561 | 1,230 | 45 | 73 | 27.333333 | 0.77662 | 0.279675 | 0 | 0 | 0 | 0 | 0.068366 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.085714 | 0 | 0.114286 | 0.085714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
172ec1ffd20a09752459f22ce0256f74dd0dd346 | 10,026 | py | Python | models/unet.py | c22n/unet-pytorch | fc0db7ca69d4149c736b8d0923272f14fb5693fe | [
"MIT"
] | 3 | 2018-03-10T05:48:42.000Z | 2018-07-25T01:18:30.000Z | models/unet.py | c22n/unet-pytorch | fc0db7ca69d4149c736b8d0923272f14fb5693fe | [
"MIT"
] | null | null | null | models/unet.py | c22n/unet-pytorch | fc0db7ca69d4149c736b8d0923272f14fb5693fe | [
"MIT"
] | null | null | null | ### Class to define 3D U-Net.
from typing import List, Tuple
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from models.custom_layers import Softmax3d
class AnalysisLayer(nn.Module):
"""Module for analysis layer of U-Net architecture."""
def __init__(self, n_features: int,
conv_size: int = 3,
first: bool = False,
pooling: nn.MaxPool3d = None,
upconv: nn.ConvTranspose3d = None):
"""Initialisation of layer.
Args:
n_features: Number of input features (output will be double).
conv_size: Size of convolution kernel.
first: Whether this is the first layer in the U-Net.
pooling: Pooling layer (if supplied).
upconv : Upconvolution layer (for bottom layer of U-Net).
"""
super(AnalysisLayer, self).__init__()
if first:
features_in = 1 # TODO adapt for RGB images
else:
features_in = n_features
self.pooling = pooling
self.conv1 = nn.Conv3d(features_in, n_features,
kernel_size=conv_size)
self.bn1 = nn.BatchNorm3d(n_features)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv3d(n_features, n_features*2,
kernel_size=conv_size)
self.bn2 = nn.BatchNorm3d(n_features*2)
self.upconv = upconv
def forward(self, x: Variable) -> Variable:
"""Forward pass through layer."""
if self.pooling is not None:
x = self.pooling(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
if self.upconv is not None:
x = self.upconv(x)
return x
class SynthesisLayer(nn.Module):
"""Module for synthesis layer of U-Net architecture."""
def __init__(self, n_features: int, conv_size: int = 3,
upconv_size: int = 2, last: bool = False):
"""Initialisation.
Args:
n_features: Number of input features (remember shortcut layers!).
conv_size: Size of convolution layer kernel.
upconv_size: Size and stride of upconvolution layer kernel.
last: Whether this is the final layer in the network.
"""
super(SynthesisLayer, self).__init__()
features_out = n_features // 3
self.conv1 = nn.Conv3d(n_features, features_out,
kernel_size=conv_size)
self.bn1 = nn.BatchNorm3d(features_out)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv3d(features_out, features_out,
kernel_size=conv_size)
self.bn2 = nn.BatchNorm3d(features_out)
if last:
self.upconv = None
else:
self.upconv = nn.ConvTranspose3d(features_out, features_out,
kernel_size=upconv_size,
stride=upconv_size)
def forward(self, x: Variable) -> Variable:
"""Forward pass through layer."""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
if self.upconv is not None:
x = self.upconv(x)
return x
class FinalLayer(nn.Module):
"""Final layer to reduce to classed pixels."""
def __init__(self, n_features: int, n_classes: int):
"""Initilisation.
Args:
n_features: Number of input features.
n_classes: Final number of classes.
"""
super(FinalLayer, self).__init__()
self.conv_fc = nn.Conv3d(n_features, n_classes, kernel_size=1)
self.softmax = Softmax3d()
def forward(self, x: Variable) -> Variable:
"""Forward pass through layer."""
x = self.conv_fc(x)
x = self.softmax(x)
return x
class UNet3D(nn.Module):
"""3D U-Net network architecture."""
def __init__(self, n_layer: int, n_class: int, features_root: int,
input_size: Tuple[int], pool_size: int = 2,
conv_size: int = 3, upconv_size: int = 2):
"""Initialisation of network.
Args:
n_layer: Number of U-Net resolution steps, equivalent to number of
analysis layers.
n_class: Number of output classes.
features_root: Number of features in the first layer of the network.
input_size: Size of 3D input image to network.
pool_size: Size and stride of the max pooling window.
conv_size: Size of the convolution kernel.
upconv_size: Size and stride of the upconvolution kernel.
"""
super(UNet3D, self).__init__()
self.n_layer = n_layer
self.n_class = n_class
self.features_root = features_root
self.input_size = input_size
self.pool_size = pool_size
self.conv_size = conv_size
self.upconv_size = upconv_size
self.pool = nn.MaxPool3d(kernel_size=self.pool_size,
stride=self.pool_size)
self.layers = self.__construct_layers()
# Crop sizes for concatenation at shortcut connections.
self.dimen_diff = [self.calc_dimen_diff(i)
for i in range(self.n_layer-1)]
def __construct_layers(self) -> nn.ModuleList:
"""Instantiates layers for network.
Returns:
A module list of layers in the network.
"""
n_features = self.features_root
layers = nn.ModuleList([])
# Analysis path
for i in range(self.n_layer):
if i == 0:
layers.append(AnalysisLayer(n_features, first=True))
elif i == self.n_layer-1:
# lowest layer
upconv = nn.ConvTranspose3d(n_features*2, n_features*2,
kernel_size=self.upconv_size,
stride=self.upconv_size)
layers.append(AnalysisLayer(n_features, pooling=self.pool,
upconv=upconv))
else:
layers.append(AnalysisLayer(n_features, pooling=self.pool))
n_features *= 2
# Synthesis path
for i in range(self.n_layer-1, 0, -1):
n_features += n_features // 2 # shortcut connection
if i == 1:
layers.append(SynthesisLayer(n_features, last=True))
else:
layers.append(SynthesisLayer(n_features))
n_features //= 3
# Final layer
layers.append(FinalLayer(n_features, self.n_class))
return layers
def calc_layer_dimension(self, n: int) -> np.ndarray:
"""Calculates the shape of a U-Net layer for shortcut connections.
If the layer is an analysis (downward) resolution step, calculates
the output of that layer before max pooling. If the layer is a
synthesis step, calculates the input before the first convolution.
Args:
n: Layer number (first analysis layer is 0).
Returns:
The shape of the output Tensor.
"""
if n > self.n_layer-1: # this is a synthesis path layer
shape = self.calc_layer_dimension(self.n_layer-1)
num_operations = n - self.n_layer + 1
for i in range(num_operations):
if i != 0:
shape -= (2 * (self.conv_size - 1))
shape *= self.upconv_size
else: # this is an analysis path layer
shape = np.array(self.input_size)
for i in range(n+1):
if i != 0:
shape //= self.pool_size
shape -= (2 * (self.conv_size - 1))
return shape
def calc_dimen_diff(self, res_step: int) -> List[int]:
"""Calculate dimension difference between up and down layers.
The difference is the size difference (in pixels) between the
input to the `n`th layer of the U-Net and the corresponding
layer in the synthesis path. Used for concatenation in
shortcut connections.
Args:
res_step: Resolution step of network (max is self.n_layer-1).
Returns:
A list of the shape difference in each axis.
"""
shape_analysis = self.calc_layer_dimension(res_step)
shape_synthesis = self.calc_layer_dimension(2 * (self.n_layer-1)
- res_step)
return (shape_analysis - shape_synthesis)
def forward(self, x: Variable) -> Variable:
"""Forward pass through network.
Args:
x: Network input.
Returns:
The output of the network.
"""
dw_features = []
shortcut_count = 0
for i, layer in enumerate(self.layers):
if i > self.n_layer-1 and i < len(self.layers)-1:
# Concatenate shortcut connection.
i_short = 2 * (self.n_layer-1) - i # shortcut index
difference = self.dimen_diff[i_short]
crop = [(di // 2 + (di % 2 > 0), di // 2)
for di in difference]
shortcut = dw_features[i_short][:,:,
(crop[0][0]):(dw_features[i_short].size()[2] - crop[0][1]),
(crop[1][0]):(dw_features[i_short].size()[3] - crop[1][1]),
(crop[2][0]):(dw_features[i_short].size()[4] - crop[2][1])]
x = torch.cat((shortcut, x), dim=1)
shortcut_count += 1
x = layer(x)
if i < self.n_layer-1:
# Save for shortcut connection.
dw_features.append(x.clone())
return x
| 35.679715 | 80 | 0.555855 | 1,228 | 10,026 | 4.381922 | 0.142508 | 0.046831 | 0.026017 | 0.022487 | 0.328749 | 0.259245 | 0.218361 | 0.183795 | 0.116335 | 0.107229 | 0 | 0.015579 | 0.353381 | 10,026 | 280 | 81 | 35.807143 | 0.814438 | 0.264014 | 0 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003571 | 0 | 1 | 0.070513 | false | 0 | 0.044872 | 0 | 0.185897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1731602b3f7eea63308baa2e86c34fe547fe10ff | 812 | py | Python | matrix/matrix.py | GlibGozer/Matrix | 5bc978a47a63ccf51b0be01ab28f5eca5819bcba | [
"Apache-2.0"
] | null | null | null | matrix/matrix.py | GlibGozer/Matrix | 5bc978a47a63ccf51b0be01ab28f5eca5819bcba | [
"Apache-2.0"
] | null | null | null | matrix/matrix.py | GlibGozer/Matrix | 5bc978a47a63ccf51b0be01ab28f5eca5819bcba | [
"Apache-2.0"
] | null | null | null | import os
import random
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
bot = commands.Bot(command_prefix='m!')
@bot.command(name='compliment', help='Makes you feel better')
async def nine_nine(ctx):
compliments = [
'Everyone has imperfections, you\'re great!',
'Don\'t be pessimistic, go enjoy life!',
(
'Think of the worst person you\'ve ever met and be happy you\'re not like him.'
'Think of @! Allen#0001 giving you mod perms'
),
]
response = random.choice(compliments)
await ctx.send(response)
@bot.event
async def on_message_delete(message):
await message.channel.send('Some mf deleted a message, who spotted him')
bot.run(TOKEN) | 27.066667 | 92 | 0.652709 | 111 | 812 | 4.711712 | 0.657658 | 0.038241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006504 | 0.242611 | 812 | 30 | 93 | 27.066667 | 0.843902 | 0 | 0 | 0 | 0 | 0 | 0.271684 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.173913 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
173361be9e30006fffba077574f0e94fd3d9ef74 | 1,602 | py | Python | utils/plotting.py | marcelo-santos-12/lbp_paper | 56d2457dce2c97a16de9e034b1a87ef0ceb9446a | [
"MIT"
] | null | null | null | utils/plotting.py | marcelo-santos-12/lbp_paper | 56d2457dce2c97a16de9e034b1a87ef0ceb9446a | [
"MIT"
] | null | null | null | utils/plotting.py | marcelo-santos-12/lbp_paper | 56d2457dce2c97a16de9e034b1a87ef0ceb9446a | [
"MIT"
] | null | null | null | '''
Modulo que implementa as funcoes de plot da curva roc
'''
import matplotlib.pyplot as plt
import numpy as np
import os
from sklearn.metrics import plot_roc_curve
plt.style.use('ggplot')
def plot_results(_id, best_clf, x_test, y_test, method, variant, P, R, output):
if not os.path.exists(output + '/ARR_ROC/'+variant+'/y_pred'):
os.makedirs(output + '/ARR_ROC/'+variant+'/y_pred')
if not os.path.exists(output + '/ARR_ROC/'+variant+'/y_pred_roc'):
os.makedirs(output + '/ARR_ROC/'+variant+'/y_pred_roc')
from sklearn.metrics._plot.base import _get_response
y_pred_roc, _ = _get_response(x_test, best_clf, 'auto', pos_label=None)
arr_roc = output + '/ARR_ROC/{}/y_pred_roc/{}_{}_{}_{}.txt'.format(variant, method, _id.replace(' ', ''), str(P), str(R))
np.savetxt(arr_roc, y_pred_roc)
y_predict = best_clf.predict(x_test)
arr_roc = output + '/ARR_ROC/{}/y_pred/{}_{}_{}_{}.txt'.format(variant, method, _id.replace(' ', ''), str(P), str(R))
np.savetxt(arr_roc, y_predict)
plot_roc_curve(best_clf, x_test, y_test)
#PLOTANDO LINHA DIAGONAL --> y = x
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='g', alpha=.8)
# PLOTANDO INFORMACOES BASICA DO GRAFICO
plt.title('{} - {}/{} - P: {}, R:{}'.format(_id, method, variant, P, R))
plt.legend(loc="lower right")
if not os.path.exists(output + '/ROC_'+variant):
os.makedirs(output + '/ROC_'+variant)
plt.savefig(output + '/ROC_{}/{}_{}_{}_{}_{}.png'.format(variant, variant, method, _id.replace(' ', ''), P, R))
plt.close() | 36.409091 | 125 | 0.632959 | 238 | 1,602 | 3.987395 | 0.340336 | 0.063224 | 0.075869 | 0.080084 | 0.399368 | 0.38883 | 0.328767 | 0.273973 | 0.202318 | 0.202318 | 0 | 0.004535 | 0.174157 | 1,602 | 44 | 126 | 36.409091 | 0.712774 | 0.078652 | 0 | 0 | 0 | 0 | 0.157357 | 0.066757 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.2 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17352702a9e2965909b6092dc8c8e7e3c676d60c | 552 | py | Python | tests/algorithms/test_rice_siff.py | rvyjidacek/fcapsy | 6d531a337b0e65cac10e41b84d232498f3a05b76 | [
"MIT"
] | null | null | null | tests/algorithms/test_rice_siff.py | rvyjidacek/fcapsy | 6d531a337b0e65cac10e41b84d232498f3a05b76 | [
"MIT"
] | null | null | null | tests/algorithms/test_rice_siff.py | rvyjidacek/fcapsy | 6d531a337b0e65cac10e41b84d232498f3a05b76 | [
"MIT"
] | null | null | null | from fcapsy import Lattice, Context, Concept
from fcapsy.similarity import jaccard
from fcapsy.algorithms.rice_siff import concept_subset
object_labels = tuple(range(5))
attribute_labels = tuple(range(4))
bools = [
[1, 0, 0, 0],
[1, 1, 1, 0],
[0, 1, 0, 1],
[1, 1, 0, 0],
[0, 0, 1, 0],
]
context = Context(bools, object_labels, attribute_labels)
def test_rice_siff_algorithm():
lattice = Lattice(context)
concepts = concept_subset(context, jaccard)
for concept in concepts:
assert concept in lattice.concepts
| 24 | 57 | 0.682971 | 79 | 552 | 4.64557 | 0.367089 | 0.032698 | 0.024523 | 0.021798 | 0.032698 | 0.032698 | 0 | 0 | 0 | 0 | 0 | 0.049774 | 0.199275 | 552 | 22 | 58 | 25.090909 | 0.780543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055556 | 1 | 0.055556 | false | 0 | 0.166667 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17363b957c4247dd1d67870327e5ffafa8e428ab | 17,766 | py | Python | examples/direct_fidelity_estimation.py | Apratim7py/Cirq | 90bbd14f352980cc222b1b3c05a40d09734b9070 | [
"Apache-2.0"
] | null | null | null | examples/direct_fidelity_estimation.py | Apratim7py/Cirq | 90bbd14f352980cc222b1b3c05a40d09734b9070 | [
"Apache-2.0"
] | null | null | null | examples/direct_fidelity_estimation.py | Apratim7py/Cirq | 90bbd14f352980cc222b1b3c05a40d09734b9070 | [
"Apache-2.0"
] | null | null | null | """Implements direct fidelity estimation.
Fidelity between the desired pure state rho and the actual state sigma is
defined as:
F(rho, sigma) = Tr (rho sigma)
It is a unit-less measurement between 0.0 and 1.0. The following two papers
independently described a faster way to estimate its value:
Direct Fidelity Estimation from Few Pauli Measurements
https://arxiv.org/abs/1104.4695
Practical characterization of quantum devices without tomography
https://arxiv.org/abs/1104.3835
This code implements the algorithm proposed for an example circuit (defined in
the function build_circuit()) and a noise (defines in the variable noise).
"""
import argparse
import asyncio
from dataclasses import dataclass
import itertools
from typing import cast
from typing import List
from typing import Optional
from typing import Tuple
import sys
import numpy as np
import cirq
def build_circuit() -> Tuple[cirq.Circuit, List[cirq.Qid]]:
# Builds an arbitrary circuit to test. Do not include a measurement gate.
# The circuit need not be Clifford, but if it is, simulations will be
# faster.
qubits: List[cirq.Qid] = cast(List[cirq.Qid], cirq.LineQubit.range(3))
circuit: cirq.Circuit = cirq.Circuit(cirq.CNOT(qubits[0], qubits[2]),
cirq.Z(qubits[0]), cirq.H(qubits[2]),
cirq.CNOT(qubits[2], qubits[1]),
cirq.X(qubits[0]), cirq.X(qubits[1]),
cirq.CNOT(qubits[0], qubits[2]))
print('Circuit used:')
print(circuit)
return circuit, qubits
def compute_characteristic_function(circuit: cirq.Circuit,
pauli_string: cirq.PauliString,
qubits: List[cirq.Qid],
density_matrix: np.ndarray):
n_qubits = len(qubits)
d = 2**n_qubits
qubit_map = dict(zip(qubits, range(n_qubits)))
# rho_i or sigma_i in https://arxiv.org/abs/1104.3835
trace = pauli_string.expectation_from_density_matrix(
density_matrix, qubit_map)
assert np.isclose(trace.imag, 0.0, atol=1e-6)
trace = trace.real
prob = trace * trace / d # Pr(i) in https://arxiv.org/abs/1104.3835
return trace, prob
async def estimate_characteristic_function(circuit: cirq.Circuit,
pauli_string: cirq.PauliString,
qubits: List[cirq.Qid],
sampler: cirq.Sampler,
samples_per_term: int):
"""
Estimates the characteristic function using a (noisy) circuit simulator by
sampling the results.
Args:
circuit: The circuit to run the simulation on.
pauli_string: The Pauli string.
qubits: The list of qubits.
sampler: Either a noisy simulator or an engine.
samples_per_term: An integer greater than 0, the number of samples.
Returns:
The estimated characteristic function.
"""
p = cirq.PauliSumCollector(circuit=circuit,
observable=pauli_string,
samples_per_term=samples_per_term)
await p.collect_async(sampler=sampler)
sigma_i = p.estimated_energy()
assert np.isclose(sigma_i.imag, 0.0, atol=1e-6)
sigma_i = sigma_i.real
return sigma_i
def _randomly_sample_from_stabilizer_bases(
stabilizer_basis: List[cirq.DensePauliString], n_clifford_trials: int,
n_qubits: int):
"""
Given a stabilizer basis, randomly creates Pauli states by including the
basis vector or not.
Args:
stabilizer_basis: A list of Pauli strings that is the stabilizer basis
to sample from.
n_clifford_trials: An integer that is the number of samples to return.
n_qubits: An integer that is the number of qubits.
Returns:
A list of Pauli strings that is the Pauli states built.
"""
dense_pauli_strings = []
for _ in range(n_clifford_trials):
# Build the Pauli string as a random sample of the basis elements.
dense_pauli_string = cirq.DensePauliString.eye(n_qubits)
for stabilizer in stabilizer_basis:
if np.random.randint(2) == 1:
dense_pauli_string *= stabilizer
dense_pauli_strings.append(dense_pauli_string)
return dense_pauli_strings
def _enumerate_all_from_stabilizer_bases(
stabilizer_basis: List[cirq.DensePauliString], n_qubits: int):
"""
Given a stabilizer basis, creates the exhaustive list of Pauli states that
are spanned by the basis.
Args:
stabilizer_basis: A list of Pauli strings that is the stabilizer basis
to build all the Pauli strings.
n_qubits: An integer that is the number of qubits.
Returns:
A list of Pauli strings that is the Pauli states built.
"""
dense_pauli_strings = []
for coefficients in itertools.product([False, True], repeat=n_qubits):
dense_pauli_string = cirq.DensePauliString.eye(n_qubits)
for (keep, stabilizer) in zip(coefficients, stabilizer_basis):
if keep:
dense_pauli_string *= stabilizer
dense_pauli_strings.append(dense_pauli_string)
return dense_pauli_strings
@dataclass
class PauliTrace:
"""
A class that contains the Pauli states as described on page 2 of:
https://arxiv.org/abs/1104.3835
"""
# Pauli string.
P_i: cirq.PauliString
# Coefficient of the ideal pure state expanded in the Pauli basis scaled by
# sqrt(dim H), formally defined at bottom of left column of page 2.
rho_i: float
# A probablity (between 0.0 and 1.0) that is the relevance distribution,
# formally defined at top of right column of page 2.
Pr_i: float
def _estimate_pauli_traces_clifford(n_qubits: int,
clifford_state: cirq.CliffordState,
n_clifford_trials: Optional[int]):
"""
Estimates the Pauli traces in case the circuit is Clifford. When we have a
Clifford circuit, there are 2**n Pauli traces that have probability 1/2**n
and all the other traces have probability 0. In addition, there is a fast
way to compute find out what the traces are. See the documentation of
cirq.CliffordState for more detail. This function uses the speedup to sample
the Pauli states with non-zero probability.
Args:
n_qubits: An integer that is the number of qubits.
clifford_state: The basis of the Pauli states with non-zero probability.
n_clifford_trials: An integer that is the number of Pauli states to
sample. If set to None, we do an exhaustive search.
Returns:
A list of Pauli states (represented as tuples of Pauli string, rho_i,
and probability.
"""
# When the circuit consists of Clifford gates only, we can sample the
# Pauli states more efficiently as described on page 4 of:
# https://arxiv.org/abs/1104.4695
d = 2**n_qubits
# The stabilizers_basis variable only contains basis vectors. For
# example, if we have n=3 qubits, then we should have 2**n=8 Pauli
# states that we can sample, but the basis will still have 3 entries. We
# must flip a coin for each, whether or not to include them.
stabilizer_basis: List[cirq.DensePauliString] = clifford_state.stabilizers()
if n_clifford_trials is not None:
dense_pauli_strings = _randomly_sample_from_stabilizer_bases(
stabilizer_basis, n_clifford_trials, n_qubits)
else:
dense_pauli_strings = _enumerate_all_from_stabilizer_bases(
stabilizer_basis, n_qubits)
pauli_traces: List[PauliTrace] = []
for dense_pauli_string in dense_pauli_strings:
# The code below is equivalent to calling
# clifford_state.wave_function() and then calling
# compute_characteristic_function() on the results (albeit with a
# wave function instead of a density matrix). It is, however,
# unncessary to do so. Instead we directly obtain the scalar rho_i.
rho_i = dense_pauli_string.coefficient
assert np.isclose(rho_i.imag, 0.0, atol=1e-6)
rho_i = rho_i.real
dense_pauli_string *= rho_i
assert np.isclose(abs(rho_i), 1.0, atol=1e-6)
Pr_i = 1.0 / d
pauli_traces.append(
PauliTrace(P_i=dense_pauli_string.sparse(), rho_i=rho_i, Pr_i=Pr_i))
return pauli_traces
def _estimate_pauli_traces_general(qubits: List[cirq.Qid],
circuit: cirq.Circuit):
"""
Estimates the Pauli traces in case the circuit is not Clifford. In this case
we cannot use the speedup implemented in the function
_estimate_pauli_traces_clifford() above, and so do a slow, density matrix
simulation.
Args:
qubits: The list of qubits.
circuit: The (non Clifford) circuit.
Returns:
A list of Pauli states (represented as tuples of Pauli string, rho_i,
and probability.
"""
n_qubits = len(qubits)
dense_simulator = cirq.DensityMatrixSimulator()
# rho in https://arxiv.org/abs/1104.3835
clean_density_matrix = cast(
cirq.DensityMatrixTrialResult,
dense_simulator.simulate(circuit)).final_density_matrix
pauli_traces: List[PauliTrace] = []
for P_i in itertools.product([cirq.I, cirq.X, cirq.Y, cirq.Z],
repeat=n_qubits):
pauli_string = cirq.PauliString(dict(zip(qubits, P_i)))
rho_i, Pr_i = compute_characteristic_function(circuit, pauli_string,
qubits,
clean_density_matrix)
pauli_traces.append(PauliTrace(P_i=pauli_string, rho_i=rho_i,
Pr_i=Pr_i))
return pauli_traces
@dataclass
class TrialResult:
"""
Contains the results of a trial, either by simulator or actual run
"""
# The index in the list of Pauli traces.
i: int
# Coefficient of the measured/simulated pure state expanded in the Pauli
# basis scaled by sqrt(dim H), formally defined at bottom of left column of
# second page of https://arxiv.org/abs/1104.3835
sigma_i: float
@dataclass
class DFEIntermediateResult:
"""
A container for the various debug and run data from calling the function
direct_fidelity_estimation(). This is useful when running a long-computation
on an actual computer, which is expensive. This way, runs can be more easily
debugged offline.
"""
# If the circuit is Clifford, the Clifford state from which we can extract
# a list of Pauli strings for a basis of the stabilizers.
clifford_state: Optional[cirq.CliffordState]
# The list of Pauli traces we can sample from.
pauli_traces: List[PauliTrace]
# Measurement results from sampling the circuit.
trial_results: List[TrialResult]
def direct_fidelity_estimation(circuit: cirq.Circuit, qubits: List[cirq.Qid],
sampler: cirq.Sampler, n_trials: int,
n_clifford_trials: Optional[int],
samples_per_term: int):
"""
Implementation of direct fidelity estimation, as per 'Direct Fidelity
Estimation from Few Pauli Measurements' https://arxiv.org/abs/1104.4695 and
'Practical characterization of quantum devices without tomography'
https://arxiv.org/abs/1104.3835.
Args:
circuit: The circuit to run the simulation on.
qubits: The list of qubits.
sampler: Either a noisy simulator or an engine.
n_trial: The total number of Pauli measurements.
n_clifford_trials: In case the circuit is Clifford, we specify the
number of trials to estimate the noise-free pauli traces.
samples_per_term: if set to 0, we use the 'sampler' parameter above as
a noise (must be of type cirq.DensityMatrixSimulator) and
simulate noise in the circuit. If greater than 0, we instead use the
'sampler' parameter directly to estimate the characteristic
function.
Returns:
The estimated fidelity and a log of the run.
"""
# n_trials is upper-case N in https://arxiv.org/abs/1104.3835
# Number of qubits, lower-case n in https://arxiv.org/abs/1104.3835
n_qubits = len(qubits)
d = 2**n_qubits
clifford_circuit = True
clifford_state: Optional[cirq.CliffordState] = None
try:
clifford_state = cirq.CliffordState(
qubit_map={qubits[i]: i for i in range(len(qubits))})
for gate in circuit.all_operations():
clifford_state.apply_unitary(gate)
except ValueError:
clifford_circuit = False
# Computes for every \hat{P_i} of https://arxiv.org/abs/1104.3835
# estimate rho_i and Pr(i). We then collect tuples (rho_i, Pr(i), \hat{Pi})
# inside the variable 'pauli_traces'.
if clifford_circuit:
assert clifford_state is not None
pauli_traces = _estimate_pauli_traces_clifford(
n_qubits, cast(cirq.CliffordState, clifford_state),
n_clifford_trials)
else:
pauli_traces = _estimate_pauli_traces_general(qubits, circuit)
p = np.asarray([x.Pr_i for x in pauli_traces])
if not clifford_circuit:
# For Clifford circuits, we do a Monte Carlo simulations, and thus there
# is no guarantee that it adds up to 1.0 (but it should to the limit).
assert np.isclose(np.sum(p), 1.0, atol=1e-6)
# The package np.random.choice() is quite sensitive to probabilities not
# summing up to 1.0. Even an absolute difference below 1e-6 (as checked just
# above) does bother it, so we re-normalize the probs.
p /= np.sum(p)
fidelity = 0.0
if samples_per_term == 0:
# sigma in https://arxiv.org/abs/1104.3835
if not isinstance(sampler, cirq.DensityMatrixSimulator):
raise TypeError('sampler is not a cirq.DensityMatrixSimulator '
'but samples_per_term is zero.')
noisy_simulator = cast(cirq.DensityMatrixSimulator, sampler)
noisy_density_matrix = cast(
cirq.DensityMatrixTrialResult,
noisy_simulator.simulate(circuit)).final_density_matrix
trial_results: List[TrialResult] = []
for _ in range(n_trials):
# Randomly sample as per probability.
i = np.random.choice(len(pauli_traces), p=p)
Pr_i = pauli_traces[i].Pr_i
measure_pauli_string: cirq.PauliString = pauli_traces[i].P_i
rho_i = pauli_traces[i].rho_i
if samples_per_term > 0:
sigma_i = asyncio.get_event_loop().run_until_complete(
estimate_characteristic_function(circuit, measure_pauli_string,
qubits, sampler,
samples_per_term))
else:
sigma_i, _ = compute_characteristic_function(
circuit, measure_pauli_string, qubits, noisy_density_matrix)
trial_results.append(TrialResult(i=i, sigma_i=sigma_i))
fidelity += Pr_i * sigma_i / rho_i
estimated_fidelity = fidelity / n_trials * d
dfe_intermediate_result = DFEIntermediateResult(
clifford_state=clifford_state,
pauli_traces=pauli_traces,
trial_results=trial_results)
return estimated_fidelity, dfe_intermediate_result
def parse_arguments(args):
"""Helper function that parses the given arguments."""
parser = argparse.ArgumentParser('Direct fidelity estimation.')
parser.add_argument('--n_trials',
default=10,
type=int,
help='Number of trials to run.')
# TODO(#2802): Offer some guidance on how to set this flag. Maybe have an
# option to do an exhaustive sample and do numerical studies to know which
# choice is the best.
parser.add_argument('--n_clifford_trials',
default=3,
type=int,
help='Number of trials for Clifford circuits. This is '
'in effect when the circuit is Clifford. In this '
'case, we randomly sample the Pauli traces with '
'non-zero probabilities. The higher the number, '
'the more accurate the overall fidelity '
'estimation, at the cost of extra computing and '
'measurements. If set to None, we exhaustively '
'enumerate all the Pauli traces.')
parser.add_argument('--samples_per_term',
default=0,
type=int,
help='Number of samples per trial or 0 if no sampling.')
return vars(parser.parse_args(args))
def main(*, n_trials: int, n_clifford_trials: Optional[int],
samples_per_term: int):
circuit, qubits = build_circuit()
noise = cirq.ConstantQubitNoiseModel(cirq.depolarize(0.1))
print('Noise model: %s' % (noise))
noisy_simulator = cirq.DensityMatrixSimulator(noise=noise)
estimated_fidelity, _ = direct_fidelity_estimation(
circuit,
qubits,
noisy_simulator,
n_trials=n_trials,
n_clifford_trials=n_clifford_trials,
samples_per_term=samples_per_term)
print('Estimated fidelity: %f' % (estimated_fidelity))
if __name__ == '__main__':
main(**parse_arguments(sys.argv[1:]))
| 38.960526 | 80 | 0.647923 | 2,327 | 17,766 | 4.792866 | 0.179201 | 0.029588 | 0.016318 | 0.020084 | 0.354703 | 0.300188 | 0.25473 | 0.203622 | 0.194298 | 0.154667 | 0 | 0.015267 | 0.281042 | 17,766 | 455 | 81 | 39.046154 | 0.857903 | 0.373185 | 0 | 0.194444 | 0 | 0 | 0.061417 | 0.002628 | 0 | 0 | 0 | 0.002198 | 0.027778 | 1 | 0.041667 | false | 0 | 0.050926 | 0 | 0.185185 | 0.018519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1738bf0d16768883469e7f50b35ee82154920665 | 3,364 | py | Python | large_data_analysis/treePlotter.py | Codefans-fan/p2pSpider | 2f76fb43f3527cea8ed208089153ec12660907f4 | [
"MIT"
] | null | null | null | large_data_analysis/treePlotter.py | Codefans-fan/p2pSpider | 2f76fb43f3527cea8ed208089153ec12660907f4 | [
"MIT"
] | null | null | null | large_data_analysis/treePlotter.py | Codefans-fan/p2pSpider | 2f76fb43f3527cea8ed208089153ec12660907f4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Created on Mar 21, 2016
@author: fky
'''
import matplotlib.pyplot as plt
decisionNode = dict(boxstyle='sawtooth',fc='0.8')
leafNode = dict(boxstyle='round4',fc='0.8')
arrow_args = dict(arrowstyle='<-')
def plotNode(nodeTxt,centerPt,parentPt,nodeType):
createPlot.ax1.annotate(nodeTxt,xy=parentPt,xycoords='axes fraction',xytext=centerPt,textcoords='axes fraction',
va='center',ha='center',bbox=nodeType,arrowprops=arrow_args)
def createPlot():
fig=plt.figure(1,facecolor='white')
fig.clf()
createPlot.ax1=plt.subplot(111,frameon=False)
plotNode('a decision node', (0.5,0.1), (0.1,0.5), decisionNode)
plotNode('a leaf node', (0.8,0.1), (0.3,0.8), leafNode)
plt.show()
def getNumLeafs(myTree):
numLeafs = 0
firstStr = list(myTree.keys())[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
numLeafs += getNumLeafs(secondDict[key])
else:
numLeafs += 1
return numLeafs
def getTreeDepth(myTree):
maxDepth = 0
firstStr = list(myTree.keys())[0]
print(firstStr)
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':
thisDepth = 1+ getTreeDepth(secondDict[key])
else:
thisDepth = 1
if thisDepth > maxDepth:
maxDepth= thisDepth
return maxDepth
def retrieveTree(i):
listOfTree = [{'no surfacing':{0:'no',1:{'flippers':{0:'no',1:'yes'}}}},
{'no surfacing':{0:'no',1:{'flippers':{0:{'head':{0:'no',1:'yes'}},1:'no'}}}}
]
return listOfTree[i]
def plotMidText(cntrPt,parentPt,txtString):
xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
createPlot.ax1.text(xMid,yMid,txtString)
def plotTree(myTree,parentPt,nodeTxt):
numLeafs = getNumLeafs(myTree)
depth = getTreeDepth(myTree)
firstStr = list(myTree.keys())[0]
cntrPt = (plotTree.xOff+(1.0+float(numLeafs))/2.0/plotTree.totalW,plotTree.yOff)
plotMidText(cntrPt, parentPt, nodeTxt)
plotNode(firstStr, cntrPt, parentPt, decisionNode)
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
plotTree(secondDict[key], cntrPt, str(key))
else:
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key],(plotTree.xOff,plotTree.yOff),cntrPt,leafNode)
plotMidText((plotTree.xOff,plotTree.yOff), cntrPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
def createPlot_2(inTree):
fig=plt.figure(1,facecolor='white')
fig.clf()
axprops = dict(xticks=[],yticks=[])
createPlot.ax1=plt.subplot(111,frameon=False)
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD=float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW;
plotTree.yOff = 1.0
plotTree(inTree,(0.5,1.0),'')
plt.show()
if __name__=='__main__':
mytree = retrieveTree(0)
createPlot_2(mytree) | 32.660194 | 117 | 0.612663 | 401 | 3,364 | 5.079801 | 0.274314 | 0.008837 | 0.007855 | 0.032401 | 0.303387 | 0.251841 | 0.228277 | 0.167403 | 0.095729 | 0.095729 | 0 | 0.034443 | 0.231867 | 3,364 | 103 | 118 | 32.660194 | 0.75387 | 0.017836 | 0 | 0.298701 | 0 | 0 | 0.055086 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103896 | false | 0 | 0.012987 | 0 | 0.155844 | 0.012987 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
173d794fa36219432d2bd1e7a2317117f2a2f269 | 2,617 | py | Python | protogen/core.py | connermarzen/proto_gen_compiler | 38c045dcf90dcf3122dcc389c9ff0e200f9ba21d | [
"MIT"
] | null | null | null | protogen/core.py | connermarzen/proto_gen_compiler | 38c045dcf90dcf3122dcc389c9ff0e200f9ba21d | [
"MIT"
] | null | null | null | protogen/core.py | connermarzen/proto_gen_compiler | 38c045dcf90dcf3122dcc389c9ff0e200f9ba21d | [
"MIT"
] | null | null | null | import glob
import os
import sys
from pprint import pprint
from typing import List
from lark import Lark
from protogen.grammar.transformer import PGTransformer
from protogen.util import PGFile
class PGParser(object):
def __init__(self, inputs: List[str],
syntaxPath: str = 'grammar/proto_gen.lark'):
# Clean up and list input files.
self._files = {}
for items in inputs:
for item in glob.glob(items):
self._files[item] = None # Add placeholder in dict for parsing
if len(self._files) == 0:
print('No valid files were specified.')
print('Note: a glob pattern is acceptible for multiple files.\n')
print('Example:\n *.protogen\n')
print('You can also specify more than one file, '
'separated by spaces.\n')
print('Example:\n a.protogen b.protogen c.protogen')
sys.exit(1)
with open(os.path.join(os.path.dirname(__file__),
syntaxPath), 'r') as file:
grammar = file.read()
self._parser = Lark(grammar=grammar, parser='lalr',
propagate_positions=True)
def parse(self):
for item in self._files:
try:
with open(item, 'r') as data:
self._files[item] = self._parser.parse(data.read())
# MyTransformer().transform(parser._files[item])
except IsADirectoryError as e:
print('You must specify files. For multiple files in a '
'directory, a glob pattern may be used.')
print('Example: directory/*.protogen')
sys.exit(2)
def transform(self):
self._trees = {}
for file in self._files:
self._trees[file] = PGTransformer().transform(self._files[file])
# pprint(self._trees[file])
outfiles = []
for tree in self._trees:
# len(_files) == len(_trees) AND order == 'same'
outfiles.append(PGFile(tree, self._trees[tree]))
return outfiles
def display(self):
for item in self._files:
print("--- BEGIN FILE: {} ---".format(item))
print(self._files[item].pretty())
print("--- END FILE: {} ---".format(item))
def _display(self):
for item in self._files:
print("--- BEGIN FILE: {} ---".format(item))
print(self._files[item])
print("--- END FILE: {} ---".format(item))
def pretty(self):
pprint(self._files)
| 34.434211 | 79 | 0.549866 | 301 | 2,617 | 4.671096 | 0.365449 | 0.076814 | 0.025605 | 0.027738 | 0.154339 | 0.154339 | 0.105263 | 0.105263 | 0.105263 | 0.105263 | 0 | 0.001713 | 0.330913 | 2,617 | 75 | 80 | 34.893333 | 0.801256 | 0.071074 | 0 | 0.12069 | 0 | 0 | 0.184742 | 0.009072 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.137931 | 0 | 0.275862 | 0.258621 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
173e0d807440fb436cd40c600fbbb74b5622d72c | 6,145 | py | Python | shell/packaging/setup.py | garyli1019/impala | ea0e1def6160d596082b01365fcbbb6e24afb21d | [
"Apache-2.0"
] | null | null | null | shell/packaging/setup.py | garyli1019/impala | ea0e1def6160d596082b01365fcbbb6e24afb21d | [
"Apache-2.0"
] | 1 | 2022-03-29T21:58:11.000Z | 2022-03-29T21:58:11.000Z | shell/packaging/setup.py | garyli1019/impala | ea0e1def6160d596082b01365fcbbb6e24afb21d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Set up the Impala shell python package."""
import datetime
import os
import re
import sys
import time
from impala_shell import impala_build_version
from setuptools import find_packages, setup
from textwrap import dedent
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
def parse_requirements(requirements_file='requirements.txt'):
"""
Parse requirements from the requirements file, stripping comments.
Args:
requirements_file: path to a requirements file
Returns:
a list of python packages
"""
lines = []
with open(requirements_file) as reqs:
for _ in reqs:
line = _.split('#')[0]
if line.strip():
lines.append(line)
return lines
def get_version():
"""Generate package version string when calling 'setup.py'.
When setup.py is being used to CREATE a distribution, e.g., via setup.py sdist
or setup.py bdist, then use the output from impala_build_version.get_version(),
and append modifiers as specified by the RELEASE_TYPE and OFFICIAL environment
variables. By default, the package created will be a dev release, designated
by timestamp. For example, if get_version() returns the string 3.0.0-SNAPSHOT,
the package version may be something like 3.0.0.dev20180322154653.
It's also possible set an evironment variable for BUILD_VERSION to override the
default build value returned from impala_build_version.get_version().
E.g., to specify an offical 3.4 beta 2 release (3.4b2), one would call:
BUILD_VERSION=3.4 RELEASE_TYPE=b2 OFFICIAL=true python setup.py sdist
The generated version string will be written to a version.txt file to be
referenced when the distribution is installed.
When setup.py is invoked during installation, e.g., via pip install or
setup.py install, read the package version from the version.txt file, which
is presumed to contain a single line containing a valid PEP-440 version string.
The file should have been generated when the distribution being installed was
created. (Although a version.txt file can also be created manually.)
See https://www.python.org/dev/peps/pep-0440/ for more info on python
version strings.
Returns:
A package version string compliant with PEP-440
"""
version_file = os.path.join(CURRENT_DIR, 'version.txt')
if not os.path.isfile(version_file):
# If setup.py is being executed to create a distribution, e.g., via setup.py
# sdist or setup.py bdist, then derive the version and WRITE the version.txt
# file that will later be used for installations.
if os.getenv('BUILD_VERSION') is not None:
package_version = os.getenv('BUILD_VERSION')
else:
version_match = re.search('\d+\.\d+\.\d+', impala_build_version.get_version())
if version_match is None:
sys.exit('Unable to acquire Impala version.')
package_version = version_match.group(0)
# packages can be marked as alpha, beta, or rc RELEASE_TYPE
release_type = os.getenv('RELEASE_TYPE')
if release_type:
if not re.match('(a|b|rc)\d+?', release_type):
msg = """\
RELEASE_TYPE \'{0}\' does not conform to any PEP-440 release format:
aN (for alpha releases)
bN (for beta releases)
rcN (for release candidates)
where N is the number of the release"""
sys.exit(dedent(msg).format(release_type))
package_version += release_type
# packages that are not marked OFFICIAL have ".dev" + a timestamp appended
if os.getenv('OFFICIAL') != 'true':
epoch_t = time.time()
ts_fmt = '%Y%m%d%H%M%S'
timestamp = datetime.datetime.fromtimestamp(epoch_t).strftime(ts_fmt)
package_version = '{0}.dev{1}'.format(package_version, timestamp)
with open('version.txt', 'w') as version_file:
version_file.write(package_version)
else:
# If setup.py is being invoked during installation, e.g., via pip install
# or setup.py install, we expect a version.txt file from which to READ the
# version string.
with open(version_file) as version_file:
package_version = version_file.readline()
return package_version
setup(
name='impala_shell',
python_requires='>2.6, <3.0.0',
version=get_version(),
description='Impala Shell',
long_description_content_type='text/markdown',
long_description=open('README.md').read(),
author="Impala Dev",
author_email='dev@impala.apache.org',
url='https://impala.apache.org/',
license='Apache Software License',
packages=find_packages(),
include_package_data=True,
install_requires=parse_requirements(),
entry_points={
'console_scripts': [
'impala-shell = impala_shell.impala_shell:impala_shell_main'
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2 :: Only',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Database :: Front-Ends'
]
)
| 36.147059 | 84 | 0.706591 | 873 | 6,145 | 4.879725 | 0.34937 | 0.039437 | 0.016432 | 0.009859 | 0.092019 | 0.077934 | 0.052582 | 0.052582 | 0.052582 | 0.052582 | 0 | 0.012779 | 0.197722 | 6,145 | 169 | 85 | 36.360947 | 0.851318 | 0.479414 | 0 | 0.024096 | 0 | 0 | 0.347051 | 0.020739 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024096 | false | 0 | 0.096386 | 0 | 0.144578 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17447407c571b28418fb7fbb866da851ca2f25a2 | 1,420 | py | Python | tests/surf_curl_methods.py | jlmaurer/tectosaur | 7cc5606d814f061395b19754e7a4b6c5e4c236e5 | [
"MIT"
] | 17 | 2017-06-29T16:48:56.000Z | 2021-10-03T18:31:41.000Z | tests/surf_curl_methods.py | jlmaurer/tectosaur | 7cc5606d814f061395b19754e7a4b6c5e4c236e5 | [
"MIT"
] | 4 | 2018-05-29T08:21:13.000Z | 2021-04-01T01:28:50.000Z | tests/surf_curl_methods.py | jlmaurer/tectosaur | 7cc5606d814f061395b19754e7a4b6c5e4c236e5 | [
"MIT"
] | 8 | 2019-06-10T22:19:40.000Z | 2022-01-12T20:55:37.000Z | import numpy as np
basis_gradient = [[-1.0, -1.0], [1.0, 0.0], [0.0, 1.0]]
e = [[[int((i - j) * (j - k) * (k - i) / 2) for k in range(3)]
for j in range(3)] for i in range(3)]
tri = np.random.rand(3,3)
# tri = np.array([[0,0,0],[1.1,0,0],[0,1.1,0]])
# tri = np.array([[0,0,0],[1,1,0],[0,1,0]])
surf_curl = np.empty((3,3))
g1 = tri[1] - tri[0]
g2 = tri[2] - tri[0]
unscaled_normal = np.cross(g1, g2)
jacobian_mag = np.linalg.norm(unscaled_normal)
normal = unscaled_normal / jacobian_mag
for basis_idx in range(3):
for s in range(3):
surf_curl[basis_idx][s] = (
+ basis_gradient[basis_idx][0] * g2[s]
- basis_gradient[basis_idx][1] * g1[s]
) / jacobian_mag;
print(tri, jacobian_mag, normal)
print(basis_gradient)
jacobian = np.array([
g1, g2, unscaled_normal
]).T
inv_jacobian = np.linalg.inv(jacobian)
real_basis_gradient = np.zeros((3,3))
for basis_idx in range(3):
for j in range(3):
real_basis_gradient[basis_idx][j] = sum(
[basis_gradient[basis_idx][d] * inv_jacobian[d][j] for d in range(2)]
)
surf_curl2 = np.zeros((3,3))
for basis_idx in range(3):
for s in range(3):
for b in range(3):
for c in range(3):
surf_curl2[basis_idx][s] += e[b][c][s] * normal[b] * real_basis_gradient[basis_idx][c]
print(surf_curl)
print(surf_curl2)
np.testing.assert_almost_equal(surf_curl, surf_curl2)
| 28.4 | 102 | 0.607042 | 260 | 1,420 | 3.157692 | 0.192308 | 0.102314 | 0.107186 | 0.093788 | 0.320341 | 0.205847 | 0.200974 | 0.200974 | 0.165652 | 0.165652 | 0 | 0.063167 | 0.208451 | 1,420 | 49 | 103 | 28.979592 | 0.66726 | 0.061268 | 0 | 0.131579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026316 | 1 | 0 | false | 0 | 0.026316 | 0 | 0.026316 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1745411ed8dd4e2c057611bffced47e22e3caab1 | 1,119 | py | Python | ga4gh/testbed/submit/report_submitter.py | ga4gh/ga4gh-testbed-lib | 599bb28e58c82e30058239e04525fba313a4bae4 | [
"Apache-2.0"
] | null | null | null | ga4gh/testbed/submit/report_submitter.py | ga4gh/ga4gh-testbed-lib | 599bb28e58c82e30058239e04525fba313a4bae4 | [
"Apache-2.0"
] | 3 | 2022-03-21T18:30:27.000Z | 2022-03-30T18:04:05.000Z | ga4gh/testbed/submit/report_submitter.py | ga4gh/ga4gh-testbed-lib | 599bb28e58c82e30058239e04525fba313a4bae4 | [
"Apache-2.0"
] | null | null | null | from re import sub
import requests
class ReportSubmitter():
def submit_report(series_id, series_token, report, url="http://localhost:4500/reports"):
'''
Submits a report to the GA4GH testbed api.
Required arguments:
series_id - A series ID is needed by server to group the report
series_token - A token is needed to verify authenticity
report - GA4GH report in JSON format
url - URL of the testbed server
'''
header = {"GA4GH-TestbedReportSeriesId": series_id, "GA4GH-TestbedReportSeriesToken": series_token}
submit_request = requests.post(url, headers=header ,json=report)
results = {
"status_code": submit_request.status_code,
"error_message": None,
"report_id": None
}
if submit_request.status_code == 200:
results["report_id"] = submit_request.json()["id"]
else:
if "message" in submit_request.json().keys():
results["error_message"] = submit_request.json()["message"]
return results
| 33.909091 | 107 | 0.613941 | 126 | 1,119 | 5.293651 | 0.436508 | 0.116942 | 0.076462 | 0.068966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013942 | 0.294906 | 1,119 | 32 | 108 | 34.96875 | 0.831432 | 0.2395 | 0 | 0 | 0 | 0 | 0.20154 | 0.073171 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17458c0950b3d6fa10192f5ac3b3e23c730302f2 | 1,611 | py | Python | phovea_server/_utils.py | phovea/phovea_server | f83879f58669ff4d554efcb727b1c6fd0185041a | [
"BSD-3-Clause"
] | 3 | 2018-06-08T01:28:56.000Z | 2020-01-10T14:17:34.000Z | phovea_server/_utils.py | phovea/phovea_server | f83879f58669ff4d554efcb727b1c6fd0185041a | [
"BSD-3-Clause"
] | 88 | 2016-11-06T08:28:21.000Z | 2022-03-22T07:18:59.000Z | phovea_server/_utils.py | phovea/phovea_server | f83879f58669ff4d554efcb727b1c6fd0185041a | [
"BSD-3-Clause"
] | 6 | 2017-06-06T20:43:00.000Z | 2020-02-13T18:23:46.000Z | ###############################################################################
# Caleydo - Visualization for Molecular Biology - http://caleydo.org
# Copyright (c) The Caleydo Team. All rights reserved.
# Licensed under the new BSD license, available at http://caleydo.org/license
###############################################################################
from builtins import str
import logging
_log = logging.getLogger(__name__)
# extend a dictionary recursively
def extend(target, w):
for k, v in w.items():
if isinstance(v, dict):
if k not in target:
target[k] = extend({}, v)
else:
target[k] = extend(target[k], v)
else:
target[k] = v
return target
def replace_variables_f(s, lookup):
import re
s = str(s)
if re.match(r'^\$\{([^}]+)\}$', s): # full string is a pattern
s = s[2:len(s) - 1]
v = lookup(s)
if v is None:
_log.error('cant resolve ' + s)
return '$unresolved$'
return v
def match(m):
v = lookup(m.group(1))
if v is None:
_log.error('cant resolve ' + m.group(1))
return '$unresolved$'
return str(v)
return re.sub(r'\$\{([^}]+)\}', match, s)
def replace_variables(s, variables):
return replace_variables_f(s, lambda x: variables.get(x, None))
def replace_nested_variables(obj, lookup):
if isinstance(obj, list):
return [replace_nested_variables(o, lookup) for o in obj]
elif isinstance(obj, str):
return replace_variables_f(obj, lookup)
elif isinstance(obj, dict):
return {k: replace_nested_variables(v, lookup) for k, v in obj.items()}
return obj
| 26.85 | 79 | 0.577902 | 216 | 1,611 | 4.217593 | 0.356481 | 0.008782 | 0.055982 | 0.015368 | 0.061471 | 0.061471 | 0.061471 | 0.061471 | 0 | 0 | 0 | 0.003101 | 0.199255 | 1,611 | 59 | 80 | 27.305085 | 0.703101 | 0.156425 | 0 | 0.15 | 0 | 0 | 0.065272 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.075 | 0.025 | 0.475 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17479b6aa39e88b50ca6246c79ded1da0b011cc9 | 1,785 | py | Python | ait/core/server/plugins/PacketAccumulator.py | kmarwah/AIT-Core | c7af2ff58f51ba3c3d66cb28fbfe80c3b0712245 | [
"MIT"
] | 1 | 2022-01-22T13:55:49.000Z | 2022-01-22T13:55:49.000Z | ait/core/server/plugins/PacketAccumulator.py | kmarwah/AIT-Core | c7af2ff58f51ba3c3d66cb28fbfe80c3b0712245 | [
"MIT"
] | 2 | 2021-09-16T19:14:52.000Z | 2021-09-16T19:16:03.000Z | ait/core/server/plugins/PacketAccumulator.py | kmarwah/AIT-Core | c7af2ff58f51ba3c3d66cb28fbfe80c3b0712245 | [
"MIT"
] | null | null | null | from ait.core.server.plugins import Plugin
from gevent import Greenlet, sleep
class PacketAccumulator(Plugin):
def __init__(self, inputs=None, outputs=None, zmq_args=None, timer_seconds=1, max_size_octets=1024):
super().__init__(inputs, outputs, zmq_args)
self.packet_queue = []
self.size_packet_queue_octets = 0
self.glet = Greenlet.spawn(self.periodic_check)
if timer_seconds > 0:
self.timer_seconds = timer_seconds
else:
msg = f"PacketAccumulator -> timer value {timer_seconds} must be greater "
msg += "than or equal to 0! Defaulting to 1 second."
self.timer_seconds = 1
self.log.error(msg)
if max_size_octets > 0:
self.max_size_octets = max_size_octets
else:
msg = f"PacketAccumulator -> Maximum accumulation size {max_size_octets} octets must "
msg += "be greater than 0! Defaulting to 1024 octets."
self.max_size_octets = 1024
self.log.error(msg)
def periodic_check(self):
while True:
sleep(self.timer_seconds)
self.emit()
def process(self, data, topic=None):
data_len = len(data)
# Does not fit, need to emit
if self.size_packet_queue_octets + data_len > self.max_size_octets:
self.emit()
# It fits! Add and defer emission
self.packet_queue.append(data)
self.size_packet_queue_octets += data_len
def emit(self):
if self.packet_queue:
payload = self.packet_queue.pop(0)
for i in self.packet_queue:
payload += i
self.publish(payload)
self.size_packet_queue_octets = 0
self.packet_queue.clear()
| 34.326923 | 104 | 0.615126 | 228 | 1,785 | 4.578947 | 0.342105 | 0.105364 | 0.087165 | 0.072797 | 0.118774 | 0.118774 | 0.118774 | 0 | 0 | 0 | 0 | 0.017657 | 0.301961 | 1,785 | 51 | 105 | 35 | 0.820225 | 0.032493 | 0 | 0.2 | 0 | 0 | 0.133411 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.05 | 0 | 0.175 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
174851ae78f447b7133dd774eabe7edf75caa7c7 | 3,679 | py | Python | services/ui_backend_service/data/cache/generate_dag_action.py | runsascoded/metaflow-service | ac7770dfeae17fd060129d408fa3bb472fc00b86 | [
"Apache-2.0"
] | null | null | null | services/ui_backend_service/data/cache/generate_dag_action.py | runsascoded/metaflow-service | ac7770dfeae17fd060129d408fa3bb472fc00b86 | [
"Apache-2.0"
] | null | null | null | services/ui_backend_service/data/cache/generate_dag_action.py | runsascoded/metaflow-service | ac7770dfeae17fd060129d408fa3bb472fc00b86 | [
"Apache-2.0"
] | null | null | null | import hashlib
import json
from .client import CacheAction
from .utils import streamed_errors, DAGParsingFailed, DAGUnsupportedFlowLanguage
from .custom_flowgraph import FlowGraph
from metaflow import Run, Step, DataArtifact, namespace
from metaflow.exception import MetaflowNotFound
namespace(None) # Always use global namespace by default
class GenerateDag(CacheAction):
"""
Generates a DAG for a given Run.
Parameters
----------
flow_id : str
The flow id that this codepackage belongs to.
Required for finding the correct class inside the parser logic.
run_number : str
Run number to construct rest of the pathspec
Returns
--------
List or None
example:
[
boolean,
{
"step_name": {
'type': string,
'box_next': boolean,
'box_ends': string,
'next': list,
'doc': string
},
...
}
]
First field conveys whether dag generation was successful.
Second field contains the actual DAG.
"""
@classmethod
def format_request(cls, flow_id, run_number, invalidate_cache=False):
msg = {
'flow_id': flow_id,
'run_number': run_number
}
key_identifier = "{}/{}".format(flow_id, run_number)
result_key = 'dag:result:%s' % hashlib.sha1((key_identifier).encode('utf-8')).hexdigest()
stream_key = 'dag:stream:%s' % hashlib.sha1((key_identifier).encode('utf-8')).hexdigest()
return msg,\
[result_key],\
stream_key,\
[stream_key],\
invalidate_cache
@classmethod
def response(cls, keys_objs):
'''
Returns the generated DAG result
'''
return [json.loads(val) for key, val in keys_objs.items() if key.startswith('dag:result')][0]
@classmethod
def stream_response(cls, it):
for msg in it:
yield msg
@classmethod
def execute(cls,
message=None,
keys=None,
existing_keys={},
stream_output=None,
invalidate_cache=False,
**kwargs):
results = {}
flow_id = message['flow_id']
run_number = message['run_number']
result_key = [key for key in keys if key.startswith('dag:result')][0]
with streamed_errors(stream_output):
run = Run("{}/{}".format(flow_id, run_number))
param_step = Step("{}/_parameters".format(run.pathspec))
try:
dag = DataArtifact("{}/_graph_info".format(param_step.task.pathspec)).data
except MetaflowNotFound:
dag = generate_dag(run)
results[result_key] = json.dumps(dag)
return results
# Utilities
def generate_dag(run: Run):
try:
# Initialize a FlowGraph object
graph = FlowGraph(source=run.code.flowspec, name=run.parent.id)
# Build the DAG based on the DAGNodes given by the FlowGraph for the found FlowSpec class.
steps_info, graph_structure = graph.output_steps()
graph_info = {
"steps": steps_info,
"graph_structure": graph_structure,
"doc": graph.doc
}
return graph_info
except Exception as ex:
if ex.__class__.__name__ == 'KeyError' and "python" in str(ex):
raise DAGUnsupportedFlowLanguage(
'DAG parsing is not supported for the language used in this Flow.'
) from None
else:
raise DAGParsingFailed(f"DAG Parsing failed: {str(ex)}")
| 29.669355 | 101 | 0.583582 | 404 | 3,679 | 5.153465 | 0.381188 | 0.025937 | 0.021614 | 0.036023 | 0.113353 | 0.066282 | 0.042267 | 0.042267 | 0.042267 | 0 | 0 | 0.002392 | 0.318293 | 3,679 | 123 | 102 | 29.910569 | 0.827751 | 0.220712 | 0 | 0.088235 | 0 | 0 | 0.094818 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073529 | false | 0 | 0.102941 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1749ab03881bd2e5bd99504afb6c2d2a4c5881b2 | 3,803 | py | Python | examples/nontrivial/main.py | splnkit/splunk-tracer-python | 4be681cbb4156284daaaa35dcca8c8992f1aa191 | [
"MIT"
] | null | null | null | examples/nontrivial/main.py | splnkit/splunk-tracer-python | 4be681cbb4156284daaaa35dcca8c8992f1aa191 | [
"MIT"
] | null | null | null | examples/nontrivial/main.py | splnkit/splunk-tracer-python | 4be681cbb4156284daaaa35dcca8c8992f1aa191 | [
"MIT"
] | null | null | null | """
Synthetic example with high concurrency. Used primarily to stress test the
library.
"""
import argparse
import sys
import time
import threading
import random
# Comment out to test against the published copy
import os
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__)) + '/../..')
import opentracing
import splunktracing
def sleep_dot():
"""Short sleep and writes a dot to the STDOUT.
"""
time.sleep(0.05)
sys.stdout.write('.')
sys.stdout.flush()
def add_spans():
"""Calls the opentracing API, doesn't use any LightStep-specific code.
"""
with opentracing.tracer.start_active_span('trivial/initial_request') as parent_scope:
parent_scope.span.set_tag('url', 'localhost')
parent_scope.span.log_event('All good here!', payload={'N': 42, 'pi': 3.14, 'abc': 'xyz'})
parent_scope.span.set_tag('span_type', 'parent')
parent_scope.span.set_baggage_item('checked', 'baggage')
rng = random.SystemRandom()
for i in range(50):
time.sleep(rng.random() * 0.2)
sys.stdout.write('.')
sys.stdout.flush()
# This is how you would represent starting work locally.
with opentracing.tracer.start_active_span('trivial/child_request') as child_scope:
child_scope.span.log_event('Uh Oh!', payload={'error': True})
child_scope.span.set_tag('span_type', 'child')
# Play with the propagation APIs... this is not IPC and thus not
# where they're intended to be used.
text_carrier = {}
opentracing.tracer.inject(child_scope.span.context, opentracing.Format.TEXT_MAP, text_carrier)
span_context = opentracing.tracer.extract(opentracing.Format.TEXT_MAP, text_carrier)
with opentracing.tracer.start_active_span(
'nontrivial/remote_span',
child_of=span_context) as remote_scope:
remote_scope.span.log_event('Remote!')
remote_scope.span.set_tag('span_type', 'remote')
time.sleep(rng.random() * 0.1)
opentracing.tracer.flush()
def splunk_tracer_from_args():
"""Initializes splunk from the commandline args.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--token', help='Your Splunk HEC token.',
default='{your_access_token}')
parser.add_argument('--host', help='The HEC host to contact.',
default='127.0.0.1')
parser.add_argument('--port', help='The Splunk HEC port.',
type=int, default=8088)
parser.add_argument('--no_tls', help='Disable TLS for reporting',
dest="no_tls", action='store_true')
parser.add_argument('--component_name', help='The Splunk component name',
default='NonTrivialExample')
args = parser.parse_args()
if args.no_tls:
collector_encryption = 'none'
else:
collector_encryption = 'tls'
return splunktracing.Tracer(
component_name=args.component_name,
access_token=args.token,
collector_host=args.host,
collector_port=args.port,
collector_encryption=collector_encryption)
if __name__ == '__main__':
print('Hello '),
# Use LightStep's opentracing implementation
with splunk_tracer_from_args() as tracer:
opentracing.tracer = tracer
for j in range(20):
threads = []
for i in range(64):
t = threading.Thread(target=add_spans)
threads.append(t)
t.start()
for t in threads:
t.join()
print('\n')
print(' World!')
| 34.572727 | 110 | 0.608467 | 451 | 3,803 | 4.944568 | 0.399113 | 0.036323 | 0.026906 | 0.026906 | 0.1713 | 0.142152 | 0.038565 | 0 | 0 | 0 | 0 | 0.010549 | 0.27715 | 3,803 | 109 | 111 | 34.889908 | 0.800655 | 0.131475 | 0 | 0.054054 | 0 | 0 | 0.132784 | 0.020147 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040541 | false | 0 | 0.108108 | 0 | 0.162162 | 0.040541 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
174c839ee6b2c3dcc46328886c1e0ff5e1298c8c | 2,773 | py | Python | pyoti/plugins/datasources/generic.py | cellular-nanoscience/pyotic | 4cf68d4fd4efe2f1cbb4bb6fd61a66af0d15eaff | [
"Apache-2.0"
] | 1 | 2018-06-12T11:46:54.000Z | 2018-06-12T11:46:54.000Z | pyoti/plugins/datasources/generic.py | cellular-nanoscience/pyotic | 4cf68d4fd4efe2f1cbb4bb6fd61a66af0d15eaff | [
"Apache-2.0"
] | 6 | 2017-09-08T09:02:20.000Z | 2018-11-14T10:22:01.000Z | pyoti/plugins/datasources/generic.py | cellular-nanoscience/pyotic | 4cf68d4fd4efe2f1cbb4bb6fd61a66af0d15eaff | [
"Apache-2.0"
] | 3 | 2017-09-08T11:08:28.000Z | 2019-07-17T21:40:13.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 18 13:41:17 2016
@author: Tobias Jachowski
"""
import inspect
import numbers
from pyoti.data.datasource import DataSource
from pyoti.picklable import unboundfunction
class GenericDataFile(DataSource):
def __init__(self, load_data, filename, directory=None, samplingrate=1.0,
**kwargs):
"""
load_data : function
filename : str
directory : str
samplingrate : float
**kwargs
"""
super().__init__(filename=filename, directory=directory, **kwargs)
self.load_data = unboundfunction(load_data)
if isinstance(samplingrate, numbers.Number):
self.samplingrate = samplingrate
else:
samplingrate_args = {}
for par in inspect.getargspec(samplingrate)[0]:
if par in kwargs: # par can be anything, except load_data,
# filename, directory or samplingrate
samplingrate_args[par] = kwargs.get(par)
if par == 'filename': # automatically use filename
samplingrate_args['filename'] = self.absfile
self.samplingrate = samplingrate(**samplingrate_args)
self.load_data_args = {}
for par in inspect.getargspec(load_data)[0]:
if par in kwargs:
self.load_data_args[par] = kwargs.get(par)
self.name = ("Generic data originally loaded from \n"
" %s with \n"
" samplingrate %s Hz") % (self.absfile_orig,
self.samplingrate)
def as_array(self):
filename = self.absfile
data = self.load_data(filename, **self.load_data_args)
return data
class GenericData(DataSource):
def __init__(self, load_data, samplingrate=1.0, **kwargs):
"""
load_data : function
samplingrate : float
"""
self.load_data = unboundfunction(load_data)
if isinstance(samplingrate, numbers.Number):
self.samplingrate = samplingrate
else:
samplingrate_args = {}
for par in inspect.getargspec(samplingrate)[0]:
if par in kwargs:
samplingrate_args[par] = kwargs.pop(par)
self.samplingrate = samplingrate(**samplingrate_args)
self.fun_args = {}
for par in inspect.getargspec(load_data)[0]:
if par in kwargs:
self.fun_args[par] = kwargs.pop(par)
self.name = ("Generic data with \n"
" samplingrate %s Hz") % (self.samplingrate)
def as_array(self):
data = self.load_data(**self.fun_args)
return data
| 33.011905 | 77 | 0.573026 | 286 | 2,773 | 5.402098 | 0.258741 | 0.082848 | 0.069903 | 0.031068 | 0.579935 | 0.533333 | 0.339159 | 0.292557 | 0.292557 | 0.292557 | 0 | 0.011357 | 0.333213 | 2,773 | 83 | 78 | 33.409639 | 0.824229 | 0.112153 | 0 | 0.470588 | 0 | 0 | 0.055814 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078431 | false | 0 | 0.078431 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
174dc1f508c7bead5580d0a22374724068ce4f6c | 4,721 | py | Python | src/matlab2cpp/rules/parallel.py | neilferg/matlab2cpp | aa26671fc73dad297c977511053b076e05bdd2df | [
"BSD-3-Clause"
] | null | null | null | src/matlab2cpp/rules/parallel.py | neilferg/matlab2cpp | aa26671fc73dad297c977511053b076e05bdd2df | [
"BSD-3-Clause"
] | null | null | null | src/matlab2cpp/rules/parallel.py | neilferg/matlab2cpp | aa26671fc73dad297c977511053b076e05bdd2df | [
"BSD-3-Clause"
] | null | null | null |
def variable_lists(node):
nodes = node.flatten(ordered=False, reverse=False, inverse=False)
#store some variable names, in private or shared
assigned_var = []
type_info = []
#get iterator name
iterator_name = node[0].name
for n in nodes:
if n.cls == "Assign":
#index = n.parent.children.index(n)
#lhs var of the assignment
if n[0].cls == "Var":
if n[0].name not in assigned_var:
assigned_var.append(n[0].name)
type_info.append(n[0].type)
"""
if n[0].cls == "Set":
var_name = n[0].name
#subnodes to Set
#index = n.parent.children.index(n)
#subnodes = n.parent[index].flatten(ordered=False, reverse=False, inverse=False)
subnodes = n[0].flatten(ordered=False, reverse=False, inverse=False)
for subnode in subnodes[1:]:
if subnode.name and subnode.name == iterator_name:
shared_variable.append(var_name)
#print(subnode.name)
"""
#multiple return from function are assigned to vars
if n.cls == "Assigns": #and n.backend == "func_returns":
for sub_node in n:
if sub_node.cls == "Var":
if sub_node.name not in assigned_var:
assigned_var.append(sub_node.name)
type_info.append(sub_node.type)
#get the iteration variable in the for loop
if n.cls == "Var" and n.parent.cls == "For":
if n.name not in assigned_var:
assigned_var.append(n.name)
type_info.append(n.type)
#shared_variable = list(set(shared_variable))
#print(shared_variable)
#for n in nodes:
# if (n.cls == "Var" or n.cls == "Get") and n.backend != "reserved" and n.name \
# not in [shared_variable, node[0].name]:
# private_variable.append(n.name)
#private_variable = list(set(private_variable))
#return private_variable, shared_variable, assigned_var, type_info
return assigned_var, type_info
def omp(node, start, stop, step):
assigned_var, type_info = variable_lists(node)
#out = "#pragma omp parallel for\nfor (%(0)s=" + start + \
# "; %(0)s<=" + stop + "; %(0)s"
temp_str = ""
if len(assigned_var) > 1:
temp_str = ", ".join(assigned_var[1:])
temp_str = "firstprivate(" + temp_str + ")"
out = "#pragma omp parallel for " + temp_str + "\nfor (%(0)s=" + start + \
"; %(0)s<=" + stop + "; %(0)s"
return out
def tbb(node, start, stop, step):
assigned_var, type_info = variable_lists(node)
any_vec_or_mat = False
for var, type in zip(assigned_var, type_info):
if type not in ["uword", "int", "float", "double"]:
any_vec_or_mat = True
#tbb.counter += 1
out = "{\n"
#str_val = str(tbb.counter)
if any_vec_or_mat:
declare_struct = "struct tbb_var_struct" + "\n{"
for var, type in zip(assigned_var, type_info):
if type not in ["uword", "int", "float", "double"]:
declare_struct += "\n" + type + " " + var + ";"
declare_struct += "\n} " + ";\n"
declare_struct += "tbb::combinable<struct tbb_var_struct" + "> tbb_per_thread_data" + " ;\n"
out += declare_struct
#for var, type in zip(assigned_var, type_info):
# out += "tbb::enumerable_thread_specific<" + type + "> " + "_" + var + " = " + var + " ;\n"
out += "tbb::parallel_for(tbb::blocked_range<size_t>(" + start + ", " + stop + "+1" + \
"),\n" + "[&]" + "(const tbb::blocked_range<size_t>& _range) \n{\n"
#assign to local L, x, y
for var, type in zip(assigned_var, type_info):
if type in ["uword", "int", "float", "double"]:
out += type + " " + var + ";\n"
if any_vec_or_mat:
out += "struct tbb_var_struct" + " tbb_struct_vars = tbb_per_thread_data" + ".local() ;\n"
for var, type in zip(assigned_var, type_info):
if type not in ["uword", "int", "float", "double"]:
out += type + "& " + var + " = " + "tbb_struct_vars." + var + ";\n"
#for var, type in zip(assigned_var, type_info):
# out += type + "& " + var + " = _" + var + ".local() ;\n"
out += "for (" + "%(0)s = _range.begin(); %(0)s != _range.end(); %(0)s"
# special case for '+= 1'
if step == "1":
out += "++"
else:
out += "+=" + step
out += ")\n{\n%(2)s\n}"
out += "\n}\n);\n"
out += "}"
return out
#tbb.counter = 0
| 33.964029 | 100 | 0.528278 | 610 | 4,721 | 3.913115 | 0.180328 | 0.087558 | 0.069124 | 0.087558 | 0.440721 | 0.344365 | 0.322581 | 0.254294 | 0.221617 | 0.173858 | 0 | 0.008359 | 0.315823 | 4,721 | 138 | 101 | 34.210145 | 0.73065 | 0.222622 | 0 | 0.2 | 0 | 0.015385 | 0.182708 | 0.030669 | 0.076923 | 0 | 0 | 0 | 0 | 1 | 0.046154 | false | 0 | 0 | 0 | 0.092308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
174dd47506793e6d8f38f7f917f3d5a1459219eb | 1,154 | py | Python | test/test_clock.py | Ham22/python-as1130-clock | a97fbdf3d0fe5a9cafa7392458c44782f688daa7 | [
"Apache-2.0"
] | null | null | null | test/test_clock.py | Ham22/python-as1130-clock | a97fbdf3d0fe5a9cafa7392458c44782f688daa7 | [
"Apache-2.0"
] | null | null | null | test/test_clock.py | Ham22/python-as1130-clock | a97fbdf3d0fe5a9cafa7392458c44782f688daa7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import unittest
from unittest.mock import MagicMock, call
from clock import clock
class TestClock(unittest.TestCase):
def setUp(self):
self.grid = MagicMock()
self.clock = clock.Clock(self.grid)
def test_grid_is_cleared_before_setting_new_led(self):
self.clock.update_time(1, 26)
calls = [call.clear(), call.set_led(0, 0)]
self.grid.assert_has_calls(calls, any_order=False)
def test_fades_out_before_clearing(self):
self.clock.update_time(1, 26)
calls = [call.fade_out(), call.clear(), call.set_led(0, 0)]
self.grid.assert_has_calls(calls, any_order=False)
def test_fades_in_after_setting_last_led(self):
self.clock.update_time(1, 26)
calls = [call.set_led(3, 4), call.fade_in()]
self.grid.assert_has_calls(calls, any_order=False)
def test_fade_can_be_disabled(self):
self.clock = clock.Clock(self.grid, animations_on=False)
self.clock.update_time(1, 26)
self.grid.fade_out.assert_not_called()
self.grid.fade_in.assert_not_called()
if __name__ == '__main__':
unittest.main()
| 26.227273 | 67 | 0.681109 | 171 | 1,154 | 4.280702 | 0.327485 | 0.087432 | 0.071038 | 0.103825 | 0.513661 | 0.513661 | 0.409836 | 0.409836 | 0.409836 | 0.362022 | 0 | 0.020585 | 0.200173 | 1,154 | 43 | 68 | 26.837209 | 0.772481 | 0.014731 | 0 | 0.269231 | 0 | 0 | 0.007042 | 0 | 0 | 0 | 0 | 0 | 0.192308 | 1 | 0.192308 | false | 0 | 0.115385 | 0 | 0.346154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
174f039433c2e09bc4b24b82001c92bcb71f38f0 | 2,580 | py | Python | opencv/crop_youtube_video_screenshots.py | kinow/dork-scripts | a4fa7980a8cdff41df806bb4d4b70f7b4ac89349 | [
"CC-BY-4.0"
] | 1 | 2016-08-07T07:45:24.000Z | 2016-08-07T07:45:24.000Z | opencv/crop_youtube_video_screenshots.py | kinow/dork-scripts | a4fa7980a8cdff41df806bb4d4b70f7b4ac89349 | [
"CC-BY-4.0"
] | null | null | null | opencv/crop_youtube_video_screenshots.py | kinow/dork-scripts | a4fa7980a8cdff41df806bb4d4b70f7b4ac89349 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python3
"""A script to iterate through directories and produce cropped images.
The images contain the video screen area of YouTube videos. The screenshots
were taken from my computer, with 900/1600 resolution, and the location is
always the same for the ROI.
Ideally a future version will automatically detect the location based on
some algorithm/strategy.
Free to use, under MIT License.
"""
import argparse
import asyncio
import logging
import os
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True, help="input directory", dest="input_directory")
args = ap.parse_args()
logging.basicConfig(format="[%(thread)-5d]%(asctime)s: %(message)s")
logger = logging.getLogger('async')
logger.setLevel(logging.INFO)
async def crop_image(image_file, image_index, semaphore):
""""
:type image_file: str
:type image_index: int
:type semaphore: asyncio.BoundedSemaphore
"""
async with semaphore:
img = cv2.imread(image_file, -1)
output_folder = os.path.dirname(image_file)
output_file = os.path.join(output_folder, "screenshot_{}.png".format(image_index))
logger.info("Writing file: {}".format(output_file))
video_screenshot = img[255:760, 125:1025]
cv2.imwrite(output_file, video_screenshot)
async def main():
"""Process directories recursively, creating cropped screen shots."""
tasks = list()
# semaphore to process 5 files at most
semaphore = asyncio.BoundedSemaphore(6)
for _, folders, _ in os.walk(args.input_directory):
for folder in folders:
image_index = 0
images_folder = os.path.join(args.input_directory, folder)
for _, _, image_files in os.walk(images_folder): # type: str
for image_file in image_files:
if os.path.isdir(os.path.join(images_folder, image_file)):
continue
if len(image_file) < len("_.___") or image_file[-4:] not in [".png", ".jpg"]:
continue
image_file = os.path.join(images_folder, image_file)
tasks.append(asyncio.ensure_future(crop_image(image_file, image_index, semaphore)))
image_index += 1
await asyncio.sleep(0)
await asyncio.gather(*tasks)
if __name__ == '__main__':
"""Start main loop."""
logger.info("Starting main loop")
loop = asyncio.get_event_loop()
loop.set_debug(True)
loop.run_until_complete(main())
logger.info("Completed")
| 32.25 | 103 | 0.660465 | 330 | 2,580 | 4.981818 | 0.460606 | 0.060219 | 0.024331 | 0.021898 | 0.082725 | 0.082725 | 0.082725 | 0 | 0 | 0 | 0 | 0.016129 | 0.231008 | 2,580 | 79 | 104 | 32.658228 | 0.8125 | 0.174031 | 0 | 0.046512 | 0 | 0 | 0.08494 | 0.013549 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.116279 | 0 | 0.116279 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1754c01d73f56bdac624a68f9dd5e0ed03393ed4 | 1,297 | py | Python | setup.py | carlosdamazio/python-aisweb | bc54e26b5ea758bcc69351d268a44e0b520d0956 | [
"MIT"
] | 8 | 2018-04-03T15:07:09.000Z | 2022-03-13T13:12:45.000Z | setup.py | carlosdamazio/python-aisweb | bc54e26b5ea758bcc69351d268a44e0b520d0956 | [
"MIT"
] | 5 | 2018-04-03T20:09:24.000Z | 2019-09-10T01:17:42.000Z | setup.py | carlosdamazio/python-aisweb | bc54e26b5ea758bcc69351d268a44e0b520d0956 | [
"MIT"
] | 1 | 2018-04-03T04:09:58.000Z | 2018-04-03T04:09:58.000Z | # -*- coding: utf-8 -*-
from setuptools import find_packages, setup
import os
import re
package = 'python_aisweb'
init_py = open(os.path.join(package, '__init__.py')).read()
version = re.search(
"^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1)
author = re.search(
"^__author__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1)
email = re.search(
"^__email__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1)
try:
import pypandoc
readme = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
readme = ''
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name='python-aisweb',
packages=find_packages(),
version=version,
description='API Wrapper for brazilian AIS services.',
long_description=readme,
author=author,
author_email=email,
url='https://github.com/carlosdamazio/python-aisweb',
install_requires=requirements,
license="MIT",
keywords=['dev', 'api', 'aisweb', 'aeronautics'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
)
| 27.595745 | 75 | 0.619121 | 140 | 1,297 | 5.55 | 0.535714 | 0.03861 | 0.030888 | 0.065637 | 0.088803 | 0.088803 | 0 | 0 | 0 | 0 | 0 | 0.005709 | 0.189668 | 1,297 | 46 | 76 | 28.195652 | 0.733587 | 0.016191 | 0 | 0 | 0 | 0 | 0.324961 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.131579 | 0 | 0.131579 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
175519be4a7561f84e8552643f76c512dbaaf58b | 1,254 | py | Python | external_api_tests/test_weather.py | AbhinavTalari/SOAD-Project | aa89f481da2b6f29c8750d9c144f82368be81a7b | [
"MIT"
] | null | null | null | external_api_tests/test_weather.py | AbhinavTalari/SOAD-Project | aa89f481da2b6f29c8750d9c144f82368be81a7b | [
"MIT"
] | null | null | null | external_api_tests/test_weather.py | AbhinavTalari/SOAD-Project | aa89f481da2b6f29c8750d9c144f82368be81a7b | [
"MIT"
] | 2 | 2020-12-21T07:05:41.000Z | 2021-02-17T17:33:48.000Z | import pytest
import requests
MY_KEY = '02db6ca787d18d34175d3c7996cf193b'
@pytest.mark.parametrize("key , q , extras" , [
(MY_KEY , "London" , "okay") ,
('' , "London" , "Wrong key"),
('abc' , "London" , "Wrong key"),
(MY_KEY , "abc" , "Wrong city"),
(MY_KEY , " " , "blank city"),
('' , '' , 'Wong all'),
])
def test_current_weather(key,q,extras):
url = "http://api.openweathermap.org/data/2.5/weather?q={}&appid={}".format(q,key)
response = requests.get(url)
response = response.json()
if(extras == "okay"):
assert response["cod"] == 200
assert response["name"] == q
if(extras == "blank city"):
assert response["cod"] == '404'
assert response["message"] == "city not found"
if(extras == "Wrong city"):
assert response["cod"] == '404'
assert response["message"] == "city not found"
if(extras == "Wrong key"):
assert response["cod"] == 401
assert response["message"] == "Invalid API key. Please see http://openweathermap.org/faq#error401 for more info."
if(extras == "Wrong all"):
assert response["cod"] == 401
assert response["message"] == "Invalid API key. Please see http://openweathermap.org/faq#error401 for more info."
| 20.225806 | 116 | 0.596491 | 150 | 1,254 | 4.946667 | 0.353333 | 0.188679 | 0.114555 | 0.056604 | 0.474394 | 0.474394 | 0.474394 | 0.474394 | 0.474394 | 0.474394 | 0 | 0.044761 | 0.216108 | 1,254 | 61 | 117 | 20.557377 | 0.710071 | 0 | 0 | 0.266667 | 0 | 0 | 0.39645 | 0.02705 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.033333 | false | 0 | 0.066667 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
175675d7610b11f7f5a72c44eb62e7eb639038db | 1,372 | py | Python | plugins/operators/vivareal_operator.py | lucaspfigueiredo/elt-pipeline | 71537f5c2bd2e6502ea44c8dab44cc4ba8919e7f | [
"MIT"
] | 2 | 2022-03-29T23:48:35.000Z | 2022-03-30T02:10:34.000Z | plugins/operators/vivareal_operator.py | lucaspfigueiredo/elt-pipeline | 71537f5c2bd2e6502ea44c8dab44cc4ba8919e7f | [
"MIT"
] | null | null | null | plugins/operators/vivareal_operator.py | lucaspfigueiredo/elt-pipeline | 71537f5c2bd2e6502ea44c8dab44cc4ba8919e7f | [
"MIT"
] | null | null | null | import json
import logging
from hooks.vivareal_hook import VivarealHook
from airflow.utils.decorators import apply_defaults
from airflow.models.baseoperator import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class VivarealOperator(BaseOperator):
template_fields = [
"s3_key",
"s3_bucket_name"
]
@apply_defaults
def __init__(self, s3_key, s3_bucket_name, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.s3_key = s3_key
self.s3_bucket_name = s3_bucket_name
def execute(self, context):
hook = VivarealHook()
s3_hook = S3Hook(aws_conn_id="s3_connection")
logger.info(f"Getting data")
with open("vivareal.json", "w") as fp:
for blocks in hook.run():
for ap in blocks:
json.dump(ap, fp, ensure_ascii=False)
fp.write(",\n")
logger.info(f"Uploading object in S3 {self.s3_bucket_name}")
s3_hook.load_file(
filename=fp.name,
key=self.s3_key,
bucket_name=self.s3_bucket_name
)
if __name__ == "__main__":
operator = VivarealOperator(file_path="/some/directory")
operator.execute() | 31.181818 | 80 | 0.625364 | 164 | 1,372 | 4.926829 | 0.45122 | 0.086634 | 0.089109 | 0.059406 | 0.086634 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018109 | 0.27551 | 1,372 | 44 | 81 | 31.181818 | 0.794769 | 0 | 0 | 0 | 0 | 0 | 0.093955 | 0.015295 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.166667 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1758ea53434e4693152e9c5c19753c1eb492d79b | 13,073 | py | Python | Artie/internals/rl/environment.py | MaxStrange/ArtieInfant | 1edbb171a5405d2971227f2d2d83acb523c70034 | [
"MIT"
] | 1 | 2018-04-28T16:55:05.000Z | 2018-04-28T16:55:05.000Z | Artie/internals/rl/environment.py | MaxStrange/ArtieInfant | 1edbb171a5405d2971227f2d2d83acb523c70034 | [
"MIT"
] | null | null | null | Artie/internals/rl/environment.py | MaxStrange/ArtieInfant | 1edbb171a5405d2971227f2d2d83acb523c70034 | [
"MIT"
] | null | null | null | """
This module provides the observations and rewards for testing and
for training the RL agent to vocalize.
"""
import collections
import logging
import math
import numpy as np
import random
import output.voice.synthesizer as synth # pylint: disable=locally-disabled, import-error
import warnings
Step = collections.namedtuple("Step", "state action")
ObservationSpace = collections.namedtuple("ObservationSpace", "dtype high low shape")
class TestEnvironment:
"""
The test environment.
"""
def __init__(self, behavior, nsteps, first_obs, action_shape, observation_space):
"""
:param behavior: A callable of signature fn(obs, action) -> (obs, reward, done).
This function is what our step() function actually calls under the hood.
:param nsteps: The number of steps before we return 'done' for step(). If this parameter
is None, an episode will only terminate if the behavior yields a done.
:param first_obs: The first observation that should be returned by calling reset(). This may be a callable.
:param action_shape: The shape of an action in this environment.
:param observation_space: An ObservationSpace.
"""
self.behavior = behavior
self.nsteps = nsteps
self.nsteps_so_far_taken = 0
self.first_obs = first_obs
self.most_recent_obs = first_obs
self.action_shape = action_shape
self.observation_space = observation_space
def reset(self):
"""
Reset the environment and give the first observation.
:returns: The first observation of the environment.
"""
self.nsteps_so_far_taken = 0
if callable(self.first_obs):
obs = self.first_obs()
else:
obs = self.first_obs
self.most_recent_obs = obs
return obs
def step(self, action):
"""
:param action: The action to take at the current step to derive the next
:returns: ob, rew, done, info; where:
- ob: The observation of the step that we are at in response to
taking `action`.
- rew: The reward we got for taking `action`.
- done: True or False, depending on if the episode is over.
- info: Dict of random crap. Currently always empty.
"""
if self.nsteps is not None and self.nsteps_so_far_taken >= self.nsteps:
# Terminate the episode
self.nsteps_so_far_taken = 0
return self.most_recent_obs, 0, True, {}
else:
# Increment the number of steps
self.nsteps_so_far_taken += 1
# Figure out our next step
obs, reward, done = self.behavior(self.most_recent_obs, action, self.most_recent_obs)
# update the most recent observation
self.most_recent_obs = obs
# Return the observation, reward, whether we are done or not, and the info dict
return obs, reward, done, {}
class SomEnvironment:
"""
This class provides the Environment for learning to vocalize and to produce sounds that are 'phoneme'-like.
The behavior of this environment is as follows. Each episode is exactly one step. The first (and only)
observation that is given is a random (uniform) scalar that represents the cluster index of a sound,
as clustered by: Sound -> Preprocessing -> VAE -> Mean Shift Clustering over all latent vectors produced during VAE training.
The action space is len(articularizers) (continuous). The reward function depends on if this environment
is in phase 0 or phase 1 of training. During phase 0, a reward is given based on whether or not an
audible sound is produced via the chosen action, as fed into the articulatory synthesizer controller.
During phase 1, the reward is conditioned on the resulting sound sounding like the prototype of the
cluster observed, probably via a correlation DSP function.
"""
def __init__(self, nclusters, articulation_duration_ms, time_points_ms, cluster_prototypes):
"""
:param nclusters: The number of clusters.
:param articulation_duration_ms: The total number of ms of each articulation. Currently we only support
making all articulations the same duration.
:param time_points_ms: The discrete time points of the articulation. This parameter
indicates the number of times we will move the articularizers and when.
The action space is of shape (narticularizers, ntime_points).
:param cluster_prototypes: A list or dict of the form [cluster_index => cluster_prototype]. Each cluster prototype
should be an AudioSegment object.
"""
for idx, tp in enumerate(time_points_ms):
if tp < 0:
raise ValueError("Time points cannot be negative. Got", tp, "at index", idx, "in 'time_points' parameter.")
elif tp > articulation_duration_ms:
raise ValueError("Time point", tp, "at index", idx, "is greater than the duration of the articulation:", articulation_duration_ms)
self.nclusters = nclusters
self._phase = 0
self._retain_audio = False
self._audio_buffer = []
self.observed_cluster_index = None
self.articulation_duration_ms = articulation_duration_ms
self.time_points_ms = time_points_ms
self.cluster_prototypes = cluster_prototypes
self.action_shape = (len(synth.articularizers), len(time_points_ms))
self.observation_space = ObservationSpace(dtype=np.int32,
high=np.array([(self.nclusters - 1)], dtype=np.int32),
low=np.array([0], dtype=np.int32),
shape=(1,))
self._inference_mode = False
self._previous_inferred_index = -1
self._xcor_component_max = 0.0
self._step = 0
@property
def inference_mode(self):
"""Inference mode = True means that we cycle through the observations rather than sampling them randomly"""
return self._inference_mode
@inference_mode.setter
def inference_mode(self, v):
"""Inference mode = True means that we cycle through the observations rather than sampling them randomly"""
self._inference_mode = v
@property
def phase(self):
return self._phase
@phase.setter
def phase(self, p):
"""Set the phase. Phase is zero or one. Any nonzero value will set phase to one."""
if p != 0:
self._phase = 1
else:
self._phase = 0
@property
def retain_audio(self):
"""
Set to True if you want to keep the audio that is generated by the Agent.
The audio will be retained in self.produced_audio. You may dump the audio to disk
with `env.dump_audio()` or you may clear the audio buffer with `env.clear_audio()`.
"""
return self._retain_audio
@retain_audio.setter
def retain_audio(self, retain):
self._retain_audio = retain
def clear_audio(self):
"""
Clear the audio buffer. The audio buffer is the audio generated by the agent if
this environment's `retain_audio` is set to True.
"""
self._audio_buffer.clear()
def dump_audio(self, basefname=None):
"""
Dumps each audio segment from the audio buffer to disk. Does not clear the buffer.
:param basefname: If not None, the base filename, which will have the audio segment
indexes appended starting from 0. Can be a file path. If None,
the default name of 'produced_sound' is used as the base.
"""
base = basefname if basefname is not None else "produced_sound"
for i, seg in enumerate(self._audio_buffer):
seg.export("{}{}.wav".format(base, i), format='wav')
def reset(self):
"""
Reset the environment and give the first observation.
Will NOT clear the audio buffer as well.
:returns: The first observation of the environment, a uniform random scalar
from the distribution [0, self.nclusters]. If we are in inference mode
however, we will return 0 first, then 1, ..., self.nclusters - 1, then 0, etc.
"""
if self.inference_mode:
self.observed_cluster_index = (self._previous_inferred_index + 1) % self.nclusters
else:
self.observed_cluster_index = random.choice([n for n in range(0, self.nclusters)])
self._previous_inferred_index = self.observed_cluster_index
return np.array([self.observed_cluster_index], dtype=self.observation_space.dtype)
def step(self, action):
"""
:param action: The action to take at the current step to derive the next
:returns: ob, rew, done, info; where:
- ob: The observation of the step that we are at in response to
taking `action`.
- rew: The reward we got for taking `action`.
- done: True or False, depending on if the episode is over.
- info: Dict of random crap. Currently always empty.
"""
action = np.reshape(action, self.action_shape)
# Just return the cluster index we generated at reset as the observation
obs = np.array([self.observed_cluster_index], dtype=self.observation_space.dtype)
done = True # We are always done after the first step in this environment
info = {} # Info dict is just an empty dict, kept for compliance with OpenAI Gym
warnings.simplefilter(action='ignore', category=ResourceWarning)
seg = synth.make_seg_from_synthmat(action, self.articulation_duration_ms / 1000.0, [tp / 1000.0 for tp in self.time_points_ms])
if self.retain_audio:
self._audio_buffer.append(seg)
##################### Used to Debug. Should not be retained while actually using this ########################
if self._step % 50 == 0:
seg.export("mimicry-{}-{}-debug-delete-me.wav".format(self.observed_cluster_index, self._step), format='wav')
##############################################################################################################
if self.phase == 0:
# During phase 0, the reward is based on whether or not we vocalized at all
arr = seg.to_numpy_array()
assert len(arr) > 0
squares = np.square(arr)
assert len(squares) == len(arr)
sum_of_squares = np.sum(squares[squares >= 0], axis=0)
assert sum_of_squares >= 0.0, "Len: {}, Sum of squares: {}".format(len(arr), np.sum(squares, axis=0))
mean_square = sum_of_squares / len(arr)
assert mean_square > 0.0
rms = np.sqrt(mean_square)
rew = rms
if math.isnan(rew):
rew = 0.0
rew /= 100.0 # Get it into a more reasonable domain
else:
# During phase 1, the reward is based on how well we match the prototype sound
# for the given cluster index
# Shift the wave form up by most negative value
ours = seg.to_numpy_array().astype(float)
most_neg_val = min(ours)
ours += abs(most_neg_val)
prototype = self.cluster_prototypes[int(self.observed_cluster_index)].to_numpy_array().astype(float)
most_neg_val = min(prototype)
prototype += abs(most_neg_val)
assert sum(ours[ours < 0]) == 0
assert sum(prototype[prototype < 0]) == 0
# Divide by the amplitude
if max(ours) != min(ours):
ours /= max(ours) - min(ours)
if max(prototype) != min(prototype):
prototype /= max(prototype) - min(prototype)
# Now you have values in the interval [0, 1]
# XCorr with some amount of zero extension
xcor = np.correlate(ours, prototype, mode='full')
# Find the single maximum value along the xcor vector
# This is the place at which the waves match each other best
# Take the xcor value at this location as the reward
rew = max(xcor)
logging.debug("Observation: {} -> Action: {} -> Reward: {}".format(obs, action, rew))
self._step += 1
return obs, rew, done, info
| 47.365942 | 146 | 0.599556 | 1,633 | 13,073 | 4.682792 | 0.219228 | 0.018831 | 0.019877 | 0.025108 | 0.197986 | 0.178632 | 0.145417 | 0.134432 | 0.134432 | 0.125016 | 0 | 0.008052 | 0.315995 | 13,073 | 275 | 147 | 47.538182 | 0.847126 | 0.448864 | 0 | 0.139706 | 0 | 0 | 0.051155 | 0.005115 | 0 | 0 | 0 | 0 | 0.044118 | 1 | 0.102941 | false | 0 | 0.051471 | 0.007353 | 0.227941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
175a3b4d2739554618c982905727d9731a509a3f | 934 | py | Python | boot.py | Ca11MeE/easy_frame | c3ec3069e3f61d1c01e5bd7ebbdf28e953a8ffa8 | [
"Apache-2.0"
] | null | null | null | boot.py | Ca11MeE/easy_frame | c3ec3069e3f61d1c01e5bd7ebbdf28e953a8ffa8 | [
"Apache-2.0"
] | null | null | null | boot.py | Ca11MeE/easy_frame | c3ec3069e3f61d1c01e5bd7ebbdf28e953a8ffa8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from flask import Flask
import mysql,os,re
from mysql import Pool
import properties
# 定义WEB容器(同时防止json以ascii解码返回)
app=Flask(__name__)
app.config['JSON_AS_ASCII'] = False
# 处理各模块中的自动注入以及组装各蓝图
# dir_path中为蓝图模块路径,例如需要引入的蓝图都在routes文件夹中,则传入参数'/routes'
def map_apps(dir_path):
path=os.getcwd()+dir_path
list=os.listdir(path)
print('蓝图文件夹:','.',dir_path)
# list.remove('__pycache__')
while list:
try:
file=list.pop(0)
if file.startswith('__') and file.endswith('__'):
continue
print('加载蓝图模块:',file)
f_model=__import__(re.sub('/','',dir_path)+'.'+re.sub('\.py','',file),fromlist=True)
app.register_blueprint(f_model.app)
except:
pass
def get_app():
return app
print('加载数据库模块')
mysql.pool = Pool.Pool()
# print('加载完毕')
print('蓝图初始化')
for path in properties.blueprint_path:
map_apps(path)
| 22.780488 | 96 | 0.639186 | 117 | 934 | 4.846154 | 0.555556 | 0.049383 | 0.038801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002729 | 0.215203 | 934 | 40 | 97 | 23.35 | 0.770805 | 0.165953 | 0 | 0 | 0 | 0 | 0.063472 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0.037037 | 0.185185 | 0.037037 | 0.296296 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
175af7185594f72ab16004503b17449a4a18cdad | 3,213 | py | Python | chemprop/train/evaluate.py | wangdingyan/hybridUQ | c141a4bec0e716a12444f7e9ab0d7c975df93184 | [
"MIT"
] | 6 | 2021-10-01T10:17:29.000Z | 2021-12-29T04:37:10.000Z | chemprop/train/evaluate.py | wangdingyan/hybridUQ | c141a4bec0e716a12444f7e9ab0d7c975df93184 | [
"MIT"
] | null | null | null | chemprop/train/evaluate.py | wangdingyan/hybridUQ | c141a4bec0e716a12444f7e9ab0d7c975df93184 | [
"MIT"
] | 1 | 2021-09-21T17:39:03.000Z | 2021-09-21T17:39:03.000Z | from collections import defaultdict
import logging
from typing import Dict, List
from .predict import predict
from chemprop.data import MoleculeDataLoader, StandardScaler
from chemprop.utils.metrics import get_metric_func
from chemprop.models import MoleculeModel, PB_MoleculeModel
def evaluate_predictions(preds : List[List[float]],
targets : List[List[float]],
num_tasks : int,
metrics : List[str],
dataset_type : str,
logger : logging.Logger = None) -> Dict[str, List[float]]:
info = logger.info if logger is not None else print
metric_to_func = {metric: get_metric_func(metric) for metric in metrics}
if len(preds) == 0:
return {metric: [float('nan')] * num_tasks for metric in metrics}
valid_preds = [[] for _ in range(num_tasks)]
valid_targets = [[] for _ in range(num_tasks)]
for i in range(num_tasks):
for j in range(len(preds)):
if targets[j][i] is not None: # Skip those without targets
valid_preds[i].append(preds[j][i])
valid_targets[i].append(targets[j][i])
results = defaultdict(list)
for i in range(num_tasks):
if dataset_type == 'classification':
nan = False
if all(target == 0 for target in valid_targets[i]) or all(target == 1 for target in valid_targets[i]):
nan = True
info('Warning: Found a task with targets all 0s or all 1s')
if all(pred == 0 for pred in valid_preds[i]) or all(pred == 1 for pred in valid_preds[i]):
nan = True
info('Warning: Found a task with predictions all 0s or all 1s')
if nan:
for metric in metrics:
results[metric].append(float('nan'))
continue
if len(valid_targets[i]) == 0:
continue
for metric, metric_func in metric_to_func.items():
if dataset_type == 'multiclass':
results[metric].append(metric_func(valid_targets[i],
valid_preds[i],
labels=list(range(len(valid_preds[i][0])))))
else:
results[metric].append(metric_func(valid_targets[i],
valid_preds[i]))
results = dict(results)
return results
def evaluate(model : MoleculeModel,
data_loader : MoleculeDataLoader,
num_tasks : int,
metrics : List[str],
dataset_type : str,
scaler : StandardScaler = None,
logger : logging.Logger = None) -> Dict[str, List[float]]:
preds, _ = predict(
model=model,
data_loader=data_loader,
scaler=scaler
)
results = evaluate_predictions(
preds=preds,
targets=data_loader.targets,
num_tasks=num_tasks,
metrics=metrics,
dataset_type=dataset_type,
logger=logger
)
return results
| 37.360465 | 114 | 0.545907 | 362 | 3,213 | 4.707182 | 0.218232 | 0.042254 | 0.038732 | 0.035211 | 0.304577 | 0.28169 | 0.192488 | 0.192488 | 0.146714 | 0.062207 | 0 | 0.005408 | 0.366947 | 3,213 | 85 | 115 | 37.8 | 0.83235 | 0.008092 | 0 | 0.253521 | 0 | 0 | 0.0427 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028169 | false | 0 | 0.098592 | 0 | 0.169014 | 0.014085 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
175dc9a3ede195f91638565c928e9e3207f1f8ca | 1,026 | py | Python | python/prototype-python2/test-libchewing-contrib/userphrase_enumerate.py | samwhelp/demo-libchewing | 13ce445cf1b71e42e765d1500d63234f88700835 | [
"MIT"
] | null | null | null | python/prototype-python2/test-libchewing-contrib/userphrase_enumerate.py | samwhelp/demo-libchewing | 13ce445cf1b71e42e765d1500d63234f88700835 | [
"MIT"
] | null | null | null | python/prototype-python2/test-libchewing-contrib/userphrase_enumerate.py | samwhelp/demo-libchewing | 13ce445cf1b71e42e765d1500d63234f88700835 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import chewing
import ctypes
chewing._libchewing.chewing_userphrase_has_next.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint)]
chewing._libchewing.chewing_userphrase_get.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_uint, ctypes.c_char_p, ctypes.c_uint]
phrase_len = ctypes.c_uint(0)
bopomofo_len = ctypes.c_uint(0)
#chewing.Init('/usr/share/chewing', '/tmp')
ctx = chewing.ChewingContext()
## https://github.com/chewing/libchewing/blob/v0.4.0/include/chewingio.h#L523
rtn = ctx.userphrase_enumerate()
## print(rtn)
## https://github.com/chewing/libchewing/blob/v0.4.0/include/chewingio.h#L525
while ctx.userphrase_has_next(phrase_len, bopomofo_len):
print('')
phrase = ctypes.create_string_buffer(phrase_len.value)
bopomofo = ctypes.create_string_buffer(bopomofo_len.value)
ctx.userphrase_get(phrase, phrase_len, bopomofo, bopomofo_len)
print('phrase: %s' % phrase.value)
print('bopomofo: %s' % bopomofo.value)
| 30.176471 | 138 | 0.770955 | 154 | 1,026 | 4.896104 | 0.331169 | 0.092838 | 0.087533 | 0.090186 | 0.366048 | 0.270557 | 0.209549 | 0.148541 | 0.148541 | 0.148541 | 0 | 0.015907 | 0.080897 | 1,026 | 33 | 139 | 31.090909 | 0.783669 | 0.236842 | 0 | 0 | 0 | 0 | 0.028497 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1761cb53f94a10c32873088a40b0c2dc8567f7e7 | 18,276 | py | Python | lib/rule_engine/types.py | rwspielman/rule-engine | 1d84d5599fe5ab34bc8d6fc00bbe00f847352428 | [
"BSD-3-Clause"
] | 149 | 2018-04-04T12:47:38.000Z | 2022-03-25T07:25:55.000Z | lib/rule_engine/types.py | rwspielman/rule-engine | 1d84d5599fe5ab34bc8d6fc00bbe00f847352428 | [
"BSD-3-Clause"
] | 26 | 2020-01-06T17:29:26.000Z | 2022-03-25T07:01:49.000Z | lib/rule_engine/types.py | rwspielman/rule-engine | 1d84d5599fe5ab34bc8d6fc00bbe00f847352428 | [
"BSD-3-Clause"
] | 24 | 2020-02-15T22:58:30.000Z | 2022-03-22T02:15:26.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# rule_engine/types.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import collections.abc
import datetime
import decimal
import math
from . import errors
__all__ = (
'DataType',
'NoneType',
'coerce_value',
'is_integer_number',
'is_natural_number',
'is_numeric',
'is_real_number',
'iterable_member_value_type'
)
NoneType = type(None)
def _to_decimal(value):
if isinstance(value, decimal.Decimal):
return value
return decimal.Decimal(repr(value))
def coerce_value(value, verify_type=True):
"""
Take a native Python *value* and convert it to a value of a data type which can be represented by a Rule Engine
:py:class:`~.DataType`. This function is useful for converting native Python values at the engine boundaries such as
when resolving a symbol from an object external to the engine.
.. versionadded:: 2.0.0
:param value: The value to convert.
:param bool verify_type: Whether or not to verify the converted value's type.
:return: The converted value.
"""
# ARRAY
if isinstance(value, (list, range, tuple)):
value = tuple(coerce_value(v, verify_type=verify_type) for v in value)
# DATETIME
elif isinstance(value, datetime.date) and not isinstance(value, datetime.datetime):
value = datetime.datetime(value.year, value.month, value.day)
# FLOAT
elif isinstance(value, (float, int)) and not isinstance(value, bool):
value = _to_decimal(value)
# MAPPING
elif isinstance(value, (dict, collections.OrderedDict)):
value = collections.OrderedDict(
(coerce_value(k, verify_type=verify_type), coerce_value(v, verify_type=verify_type)) for k, v in value.items()
)
if verify_type:
DataType.from_value(value) # use this to raise a TypeError, if the type is incompatible
return value
def is_integer_number(value):
"""
Check whether *value* is an integer number (i.e. a whole, number). This can, for example, be used to check if a
floating point number such as ``3.0`` can safely be converted to an integer without loss of information.
.. versionadded:: 2.1.0
:param value: The value to check. This value is a native Python type.
:return: Whether or not the value is an integer number.
:rtype: bool
"""
if not is_real_number(value):
return False
if math.floor(value) != value:
return False
return True
def is_natural_number(value):
"""
Check whether *value* is a natural number (i.e. a whole, non-negative number). This can, for example, be used to
check if a floating point number such as ``3.0`` can safely be converted to an integer without loss of information.
:param value: The value to check. This value is a native Python type.
:return: Whether or not the value is a natural number.
:rtype: bool
"""
if not is_integer_number(value):
return False
if value < 0:
return False
return True
def is_real_number(value):
"""
Check whether *value* is a real number (i.e. capable of being represented as a floating point value without loss of
information as well as being finite). Despite being able to be represented as a float, ``NaN`` is not considered a
real number for the purposes of this function.
:param value: The value to check. This value is a native Python type.
:return: Whether or not the value is a natural number.
:rtype: bool
"""
if not is_numeric(value):
return False
if not math.isfinite(value):
return False
return True
def is_numeric(value):
"""
Check whether *value* is a numeric value (i.e. capable of being represented as a floating point value without loss
of information).
:param value: The value to check. This value is a native Python type.
:return: Whether or not the value is numeric.
:rtype: bool
"""
if not isinstance(value, (decimal.Decimal, float, int)):
return False
if isinstance(value, bool):
return False
return True
def iterable_member_value_type(python_value):
"""
Take a native *python_value* and return the corresponding data type of each of its members if the types are either
the same or NULL. NULL is considered a special case to allow nullable-values. This by extension means that an
iterable may not be defined as only capable of containing NULL values.
:return: The data type of the sequence members. This will never be NULL, because that is considered a special case.
It will either be UNSPECIFIED or one of the other types.
"""
subvalue_types = set()
for subvalue in python_value:
if DataType.is_definition(subvalue):
subvalue_type = subvalue
else:
subvalue_type = DataType.from_value(subvalue)
subvalue_types.add(subvalue_type)
if DataType.NULL in subvalue_types:
# treat NULL as a special case, allowing typed arrays to be a specified type *or* NULL
# this however makes it impossible to define an array with a type of NULL
subvalue_types.remove(DataType.NULL)
if len(subvalue_types) == 1:
subvalue_type = subvalue_types.pop()
else:
subvalue_type = DataType.UNDEFINED
return subvalue_type
class _DataTypeDef(object):
__slots__ = ('name', 'python_type', 'is_scalar', 'iterable_type')
def __init__(self, name, python_type):
self.name = name
self.python_type = python_type
self.is_scalar = True
@property
def is_iterable(self):
return getattr(self, 'iterable_type', None) is not None
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.name == other.name
def __hash__(self):
return hash((self.python_type, self.is_scalar))
def __repr__(self):
return "<{} name={} python_type={} >".format(self.__class__.__name__, self.name, self.python_type.__name__)
@property
def is_compound(self):
return not self.is_scalar
_DATA_TYPE_UNDEFINED = _DataTypeDef('UNDEFINED', errors.UNDEFINED)
class _CollectionDataTypeDef(_DataTypeDef):
__slots__ = ('value_type', 'value_type_nullable')
def __init__(self, name, python_type, value_type=_DATA_TYPE_UNDEFINED, value_type_nullable=True):
# check these three classes individually instead of using Collection which isn't available before Python v3.6
if not issubclass(python_type, collections.abc.Container):
raise TypeError('the specified python_type is not a container')
if not issubclass(python_type, collections.abc.Iterable):
raise TypeError('the specified python_type is not an iterable')
if not issubclass(python_type, collections.abc.Sized):
raise TypeError('the specified python_type is not a sized')
super(_CollectionDataTypeDef, self).__init__(name, python_type)
self.is_scalar = False
self.value_type = value_type
self.value_type_nullable = value_type_nullable
@property
def iterable_type(self):
return self.value_type
def __call__(self, value_type, value_type_nullable=True):
return self.__class__(
self.name,
self.python_type,
value_type=value_type,
value_type_nullable=value_type_nullable
)
def __repr__(self):
return "<{} name={} python_type={} value_type={} >".format(
self.__class__.__name__,
self.name,
self.python_type.__name__,
self.value_type.name
)
def __eq__(self, other):
if not super().__eq__(other):
return False
return all((
self.value_type == other.value_type,
self.value_type_nullable == other.value_type_nullable
))
def __hash__(self):
return hash((self.python_type, self.is_scalar, hash((self.value_type, self.value_type_nullable))))
class _ArrayDataTypeDef(_CollectionDataTypeDef):
pass
class _SetDataTypeDef(_CollectionDataTypeDef):
pass
class _MappingDataTypeDef(_DataTypeDef):
__slots__ = ('key_type', 'value_type', 'value_type_nullable')
def __init__(self, name, python_type, key_type=_DATA_TYPE_UNDEFINED, value_type=_DATA_TYPE_UNDEFINED, value_type_nullable=True):
if not issubclass(python_type, collections.abc.Mapping):
raise TypeError('the specified python_type is not a mapping')
super(_MappingDataTypeDef, self).__init__(name, python_type)
self.is_scalar = False
# ARRAY is the only compound data type that can be used as a mapping key, this is because ARRAY's are backed by
# Python tuple's while SET and MAPPING objects are set and dict instances, respectively which are not hashable.
if key_type.is_compound and not isinstance(key_type, DataType.ARRAY.__class__):
raise errors.EngineError("the {} data type may not be used for mapping keys".format(key_type.name))
self.key_type = key_type
self.value_type = value_type
self.value_type_nullable = value_type_nullable
@property
def iterable_type(self):
return self.key_type
def __call__(self, key_type, value_type=_DATA_TYPE_UNDEFINED, value_type_nullable=True):
return self.__class__(
self.name,
self.python_type,
key_type=key_type,
value_type=value_type,
value_type_nullable=value_type_nullable
)
def __repr__(self):
return "<{} name={} python_type={} key_type={} value_type={} >".format(
self.__class__.__name__,
self.name,
self.python_type.__name__,
self.key_type.name,
self.value_type.name
)
def __eq__(self, other):
if not super().__eq__(other):
return False
return all((
self.key_type == other.key_type,
self.value_type == other.value_type,
self.value_type_nullable == other.value_type_nullable
))
def __hash__(self):
return hash((self.python_type, self.is_scalar, hash((self.key_type, self.value_type, self.value_type_nullable))))
class DataTypeMeta(type):
def __new__(metacls, cls, bases, classdict):
data_type = super().__new__(metacls, cls, bases, classdict)
data_type._member_map_ = collections.OrderedDict()
for key, value in classdict.items():
if not isinstance(value, _DataTypeDef):
continue
data_type._member_map_[key] = value
return data_type
def __contains__(self, item):
return item in self._member_map_
def __getitem__(cls, item):
return cls._member_map_[item]
def __iter__(cls):
yield from cls._member_map_
def __len__(cls):
return len(cls._member_map_)
class DataType(metaclass=DataTypeMeta):
"""
A collection of constants representing the different supported data types. There are three ways to compare data
types. All three are effectively the same when dealing with scalars.
Equality checking
.. code-block::
dt == DataType.TYPE
This is the most explicit form of testing and when dealing with compound data types, it recursively checks that
all of the member types are also equal.
Class checking
.. code-block::
isinstance(dt, DataType.TYPE.__class__)
This checks that the data types are the same but when dealing with compound data types, the member types are
ignored.
Compatibility checking
.. code-block::
DataType.is_compatible(dt, DataType.TYPE)
This checks that the types are compatible without any kind of conversion. When dealing with compound data types,
this ensures that the member types are either the same or :py:attr:`~.UNDEFINED`.
"""
ARRAY = _ArrayDataTypeDef('ARRAY', tuple)
"""
.. py:function:: __call__(value_type, value_type_nullable=True)
:param value_type: The type of the array members.
:param bool value_type_nullable: Whether or not array members are allowed to be :py:attr:`.NULL`.
"""
BOOLEAN = _DataTypeDef('BOOLEAN', bool)
DATETIME = _DataTypeDef('DATETIME', datetime.datetime)
FLOAT = _DataTypeDef('FLOAT', decimal.Decimal)
MAPPING = _MappingDataTypeDef('MAPPING', dict)
"""
.. py:function:: __call__(key_type, value_type, value_type_nullable=True)
:param key_type: The type of the mapping keys.
:param value_type: The type of the mapping values.
:param bool value_type_nullable: Whether or not mapping values are allowed to be :py:attr:`.NULL`.
"""
NULL = _DataTypeDef('NULL', NoneType)
SET = _SetDataTypeDef('SET', set)
"""
.. py:function:: __call__(value_type, value_type_nullable=True)
:param value_type: The type of the set members.
:param bool value_type_nullable: Whether or not set members are allowed to be :py:attr:`.NULL`.
"""
STRING = _DataTypeDef('STRING', str)
UNDEFINED = _DATA_TYPE_UNDEFINED
"""
Undefined values. This constant can be used to indicate that a particular symbol is valid, but it's data type is
currently unknown.
"""
@classmethod
def from_name(cls, name):
"""
Get the data type from its name.
.. versionadded:: 2.0.0
:param str name: The name of the data type to retrieve.
:return: One of the constants.
"""
if not isinstance(name, str):
raise TypeError('from_name argument 1 must be str, not ' + type(name).__name__)
dt = getattr(cls, name, None)
if not isinstance(dt, _DataTypeDef):
raise ValueError("can not map name {0!r} to a compatible data type".format(name))
return dt
@classmethod
def from_type(cls, python_type):
"""
Get the supported data type constant for the specified Python type. If the type can not be mapped to a supported
data type, then a :py:exc:`ValueError` exception will be raised. This function will not return
:py:attr:`.UNDEFINED`.
:param type python_type: The native Python type to retrieve the corresponding type constant for.
:return: One of the constants.
"""
if not isinstance(python_type, type):
raise TypeError('from_type argument 1 must be type, not ' + type(python_type).__name__)
if python_type in (list, range, tuple):
return cls.ARRAY
elif python_type is bool:
return cls.BOOLEAN
elif python_type is datetime.date or python_type is datetime.datetime:
return cls.DATETIME
elif python_type in (decimal.Decimal, float, int):
return cls.FLOAT
elif python_type is dict:
return cls.MAPPING
elif python_type is NoneType:
return cls.NULL
elif python_type is set:
return cls.SET
elif python_type is str:
return cls.STRING
raise ValueError("can not map python type {0!r} to a compatible data type".format(python_type.__name__))
@classmethod
def from_value(cls, python_value):
"""
Get the supported data type constant for the specified Python value. If the value can not be mapped to a
supported data type, then a :py:exc:`TypeError` exception will be raised. This function will not return
:py:attr:`.UNDEFINED`.
:param python_value: The native Python value to retrieve the corresponding data type constant for.
:return: One of the constants.
"""
if isinstance(python_value, bool):
return cls.BOOLEAN
elif isinstance(python_value, (datetime.date, datetime.datetime)):
return cls.DATETIME
elif isinstance(python_value, (decimal.Decimal, float, int)):
return cls.FLOAT
elif python_value is None:
return cls.NULL
elif isinstance(python_value, (set,)):
return cls.SET(value_type=iterable_member_value_type(python_value))
elif isinstance(python_value, (str,)):
return cls.STRING
elif isinstance(python_value, collections.abc.Mapping):
return cls.MAPPING(
key_type=iterable_member_value_type(python_value.keys()),
value_type=iterable_member_value_type(python_value.values())
)
elif isinstance(python_value, collections.abc.Sequence):
return cls.ARRAY(value_type=iterable_member_value_type(python_value))
raise TypeError("can not map python type {0!r} to a compatible data type".format(type(python_value).__name__))
@classmethod
def is_compatible(cls, dt1, dt2):
"""
Check if two data type definitions are compatible without any kind of conversion. This evaluates to ``True``
when one or both are :py:attr:`.UNDEFINED` or both types are the same. In the case of compound data types (such
as :py:attr:`.ARRAY`) the member types are checked recursively in the same manner.
.. versionadded:: 2.1.0
:param dt1: The first data type to compare.
:param dt2: The second data type to compare.
:return: Whether or not the two types are compatible.
:rtype: bool
"""
if not (cls.is_definition(dt1) and cls.is_definition(dt2)):
raise TypeError('argument is not a data type definition')
if dt1 is _DATA_TYPE_UNDEFINED or dt2 is _DATA_TYPE_UNDEFINED:
return True
if dt1.is_scalar and dt2.is_scalar:
return dt1 == dt2
elif dt1.is_compound and dt2.is_compound:
if isinstance(dt1, DataType.ARRAY.__class__) and isinstance(dt2, DataType.ARRAY.__class__):
return cls.is_compatible(dt1.value_type, dt2.value_type)
elif isinstance(dt1, DataType.MAPPING.__class__) and isinstance(dt2, DataType.MAPPING.__class__):
if not cls.is_compatible(dt1.key_type, dt2.key_type):
return False
if not cls.is_compatible(dt1.value_type, dt2.value_type):
return False
return True
elif isinstance(dt1, DataType.SET.__class__) and isinstance(dt2, DataType.SET.__class__):
return cls.is_compatible(dt1.value_type, dt2.value_type)
return False
@classmethod
def is_definition(cls, value):
"""
Check if *value* is a data type definition.
.. versionadded:: 2.1.0
:param value: The value to check.
:return: ``True`` if *value* is a data type definition.
:rtype: bool
"""
return isinstance(value, _DataTypeDef)
| 35.695313 | 129 | 0.749343 | 2,717 | 18,276 | 4.832536 | 0.13986 | 0.049353 | 0.033663 | 0.016451 | 0.444021 | 0.402209 | 0.3508 | 0.301447 | 0.263823 | 0.224752 | 0 | 0.003788 | 0.162289 | 18,276 | 511 | 130 | 35.765166 | 0.853821 | 0.376012 | 0 | 0.328571 | 0 | 0 | 0.087252 | 0.002526 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0.007143 | 0.021429 | 0.053571 | 0.442857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17623540444eb9269b6276c7ee4a47d541023c28 | 2,437 | py | Python | data_pr_downloader.py | froi/data-pr | 07b386457f1868573181928cc8caecc970e7b44f | [
"MIT"
] | 6 | 2018-01-09T14:58:42.000Z | 2019-09-17T19:52:03.000Z | data_pr_downloader.py | froi/data-pr | 07b386457f1868573181928cc8caecc970e7b44f | [
"MIT"
] | 40 | 2019-08-21T12:05:26.000Z | 2021-07-14T10:39:27.000Z | data_pr_downloader.py | froi/data-pr | 07b386457f1868573181928cc8caecc970e7b44f | [
"MIT"
] | 1 | 2018-01-09T21:23:53.000Z | 2018-01-09T21:23:53.000Z | from datetime import datetime
import json
import logging
from mimetypes import guess_extension
import os
import requests
from slugify import slugify
FORMAT = '%(asctime)-15s - %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger('data_pr')
BASE_DATA_DIR = 'data_files'
DATA_PR_CATALOG_PATH = f'{BASE_DATA_DIR}/data_pr_catalog_{datetime.now()}.json'
DATA_PR_CATALOG_URL = 'https://data.pr.gov/data.json'
def get_new_data_pr_catalog(url):
data_pr_json_meta_response = requests.get(url)
with open(DATA_PR_CATALOG_PATH, 'w') as data_pr_catalog_json:
json.dump(data_pr_json_meta_response.json(), data_pr_catalog_json)
def get_datasets(data_pr_catalog, amount_to_download=None):
with open(f'{data_pr_catalog}') as data_catalog:
data_pr_json_meta = json.load(data_catalog)
for dataset in data_pr_json_meta['dataset']:
folder_name = slugify(dataset['title'])
data_file_path = f'{BASE_DATA_DIR}/{folder_name}'
logger.info(f"Start download of {dataset['title']} to {data_file_path}")
if not os.path.exists(data_file_path):
os.makedirs(data_file_path)
for distribution in dataset['distribution']:
file_extension = guess_extension(distribution['mediaType'])
try:
response = requests.get(distribution['downloadURL'], stream=True)
except Exception as e:
logger.error('Error requesting data: %s', e)
continue
logger.debug(f"Downloading distribution: {distribution['mediaType']}")
with open(f'{data_file_path}/data{file_extension}', 'wb') as dataset_file:
for data in response.iter_content(chunk_size=100):
dataset_file.write(data)
logger.debug(f"Done downloading distribution: {distribution['mediaType']}")
logger.info(f"Finished download of {dataset['title']} to {data_file_path}")
def main():
try:
get_new_data_pr_catalog(DATA_PR_CATALOG_URL)
except Exception as e:
logger.error('Error at get_new_data_pr_catalog: %s', e)
try:
get_datasets(DATA_PR_CATALOG_PATH)
except Exception as e:
logger.error('Error at get_datasets: %s', e)
if __name__ == '__main__':
logger.setLevel(logging.DEBUG)
logger.info(f'process started: {datetime.now()}')
main()
logger.info(f'process finished: {datetime.now()}')
| 34.814286 | 87 | 0.685679 | 329 | 2,437 | 4.778116 | 0.270517 | 0.072519 | 0.107506 | 0.035623 | 0.23028 | 0.117048 | 0.117048 | 0.09542 | 0.049618 | 0 | 0 | 0.002572 | 0.202298 | 2,437 | 69 | 88 | 35.318841 | 0.80607 | 0 | 0 | 0.113208 | 0 | 0 | 0.264259 | 0.080837 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0 | 0.132075 | 0 | 0.188679 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1764184768f6f8663aaf405c83cd769d6fb4865b | 1,735 | py | Python | CodeBERT/preprocess_data.py | code-backdoor/code-backdoor | 1eeb3d79aa8a54c8f08e8d0156b569de5edd974e | [
"MIT"
] | null | null | null | CodeBERT/preprocess_data.py | code-backdoor/code-backdoor | 1eeb3d79aa8a54c8f08e8d0156b569de5edd974e | [
"MIT"
] | null | null | null | CodeBERT/preprocess_data.py | code-backdoor/code-backdoor | 1eeb3d79aa8a54c8f08e8d0156b569de5edd974e | [
"MIT"
] | null | null | null | import gzip
import glob
import os
import json
import numpy as np
from more_itertools import chunked
DATA_DIR = '/mnt/wanyao/zsj/codesearchnet'
DEST_DIR = '/mnt/wanyao/zsj/CodeBERT/data/codesearch/train_valid'
def format_str(string):
for char in ['\r\n', '\r', '\n']:
string = string.replace(char, ' ')
return string
def read_tsv(input_file, quotechar=None):
with open(input_file, "r", encoding='utf-8') as f:
lines = []
for line in f.readlines():
line = line.strip().split('<CODESPLIT>')
if len(line) != 5:
continue
lines.append(line)
return lines
# preprocess the training data but not generate negative sample
def preprocess_train_data(lang):
path_list = glob.glob(os.path.join(DATA_DIR, '{}/train'.format(lang), '{}_train_*.jsonl.gz'.format(lang)))
path_list.sort(key=lambda t: int(t.split('_')[-1].split('.')[0]))
examples = []
for path in path_list:
print(path)
with gzip.open(path, 'r') as pf:
data = pf.readlines()
for index, data in enumerate(data):
line = json.loads(str(data, encoding='utf-8'))
doc_token = ' '.join(line['docstring_tokens'])
code_token = ' '.join([format_str(token) for token in line['code_tokens']])
example = (str(1), line['url'], line['func_name'], doc_token, code_token)
example = '<CODESPLIT>'.join(example)
examples.append(example)
dest_file = os.path.join(DEST_DIR, lang, 'raw_train.txt')
print(dest_file)
with open(dest_file, 'w', encoding='utf-8') as f:
f.writelines('\n'.join(examples))
if __name__ == '__main__':
preprocess_train_data('python')
| 33.365385 | 110 | 0.616138 | 236 | 1,735 | 4.360169 | 0.419492 | 0.03207 | 0.034985 | 0.029155 | 0.029155 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005251 | 0.2317 | 1,735 | 51 | 111 | 34.019608 | 0.766692 | 0.035159 | 0 | 0 | 0 | 0 | 0.136962 | 0.048445 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.261905 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17690a767660c008e60f5e5fe1a87037ce7c37c2 | 28,559 | py | Python | bruhat/huffman.py | punkdit/bruhat | 3231eacc49fd3464542f7eb72684751371d9876c | [
"MIT"
] | 3 | 2020-04-07T13:21:30.000Z | 2020-07-15T02:07:20.000Z | bruhat/huffman.py | punkdit/bruhat | 3231eacc49fd3464542f7eb72684751371d9876c | [
"MIT"
] | null | null | null | bruhat/huffman.py | punkdit/bruhat | 3231eacc49fd3464542f7eb72684751371d9876c | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
https://golem.ph.utexas.edu/category/2019/03/how_much_work_can_it_be_to_add.html#c055688
see also entropy.py
"""
import sys
from functools import reduce
from operator import add
from math import log, log2
from random import shuffle, choice, randint, seed
#import numpy
#from matplotlib import pyplot
#from bruhat.gelim import row_reduce, shortstr, kernel
#from qupy.dev import linalg
from bruhat.argv import argv
EPSILON = 1e-8
def is_close(a, b):
return abs(a-b) < EPSILON
def entropy(items, base=2):
"un-normalized entropy"
k = log(base)
sitems = sum(items)
r = 0.
for n in items:
r += n * log(n) / k
return -1*(r - sitems*log(sitems) / k)
def entropy(items):
"un-normalized entropy"
sitems = sum(items)
r = 0.
for n in items:
r += n * log2(n)
return -1*(r - sitems*log2(sitems))
class Multiset(object):
"un-normalized probability distribution"
def __init__(self, cs={}):
items = [(k, v) for (k, v) in cs.items() if v>0]
cs = dict(items)
self.cs = dict(cs) # map item -> count
self._len = sum(self.cs.values(), 0)
keys = list(cs.keys())
keys.sort() # canonicalize
self.keys = keys
def __str__(self):
cs = self.cs
keys = self.keys
items = reduce(add, [(str(key),)*cs[key] for key in keys], ())
items = ','.join(items)
return '{%s}'%items
__repr__ = __str__
def get_str(self):
cs = self.cs
keys = self.keys
items = [(key if cs[key]==1 else "%d%s"%(cs[key], key)) for key in keys]
items = '+'.join(items)
return items
def __eq__(self, other):
return self.cs == other.cs
def __ne__(self, other):
return self.cs != other.cs
def __mul__(X, Y):
"cartesian product of multisets"
if isinstance(Y, Multiset):
xcs, ycs = X.cs, Y.cs
cs = dict((x+y, xcs[x]*ycs[y]) for x in xcs for y in ycs)
return Multiset(cs)
return NotImplemented
def __rmul__(self, r):
"left multiplication by a number"
assert int(r) == r
assert r >= 0
cs = dict((k, r*v) for (k, v) in self.cs.items())
return Multiset(cs)
def __add__(X, Y):
# WARNING: not disjoint union (coproduct)
xcs, ycs = X.cs, Y.cs
cs = dict(xcs)
for k, v in ycs.items():
cs[k] = cs.get(k, 0) + v
return Multiset(cs)
def terms(self):
cs = self.cs
return [Multiset({k:cs[k]}) for k in self.keys]
def disjoint(X, Y):
# We only keep non-zero keys, so this works
lhs = set(X.cs.keys())
rhs = set(Y.cs.keys())
return not bool(lhs.intersection(rhs))
def contains(self, other):
"self contains other"
cs = self.cs
for k,v in other.cs.items():
if v > cs.get(k, 0):
return False
return True
def __len__(self):
return self._len
def isomorphic(self, other):
if self._len != other._len:
return False
lhs = set(self.cs.values())
rhs = set(other.cs.values())
return lhs == rhs
def entropy(self):
"un-normalized entropy"
cs = self.cs
items = [n for n in cs.values() if n>0]
return entropy(items)
def huffman(self, sort=False):
cs = self.cs
keys = list(cs.keys())
keys.sort()
if not keys:
return Node(self) # the empty tree
# build a tree, start with the leaves:
nodes = [Node(Multiset({key : cs[key]})) for key in keys]
while len(nodes) > 1:
if not sort:
shuffle(nodes)
else:
nodes.sort(key = str)
n = len(nodes)
best = (0, 1)
value = nodes[0].cost() + nodes[1].cost()
for i in range(n):
for j in range(i+1, n):
w = nodes[i].cost() + nodes[j].cost()
if w < value:
best = (i, j)
value = w
i, j = best
assert i < j, (i, j)
right = nodes.pop(j)
left = nodes.pop(i)
node = Node(left.X + right.X, left, right)
nodes.append(node)
return nodes[0]
def product_tree(self):
cs = self.cs
keys = list(cs.keys())
keys.sort()
for k in keys:
assert len(k) == 2
# fail..
def total_length(self):
n = sum([len(k)*v for (k, v) in self.cs.items()], 0)
return n
def W(self): # brain fart the name
return self.huffman().encode().total_length()
def W(item):
return item.W()
class Node(object):
"A tree over a multiset"
"mutable !!"
def __init__(self, X, left=None, right=None):
self.X = X
self._cost = len(X)
self.left = left
self.right = right
assert self.check(), str(self)
def cost(self):
return self._cost
def check(self):
X = self.X
left = self.left
right = self.right
if left is None and right is None:
return True
if not left.X.disjoint(right.X):
return False
return X == left.X + right.X and left.check() and right.check()
def __eq__(self, other):
X = self.X
left = self.left
right = self.right
if left is None and right is None:
return self.X == other.X
return self.X == other.X and (
self.left == other.left and self.right == other.right or
self.right == other.left and self.left == other.right)
def __ne__(self, other):
return not (self==other)
def clone(self):
X = self.X
left = self.left
right = self.right
if left is None and right is None:
return Node(self.X) # X is immutable.... for now..
return Node(self.X, left.clone(), right.clone())
def __getitem__(self, idx):
if type(idx) is int:
assert idx==0 or idx==1
node = [self.left, self.right][idx]
elif type(idx) is tuple:
node = self
for i in idx:
node = node[i] # recurse
else:
raise TypeError
return node
def __setitem__(self, idx, node):
assert isinstance(node, Node), node
if type(idx) is tuple and len(idx)==1:
idx = idx[0]
if type(idx) is int:
assert idx==0 or idx==1
assert self.has_children
child = [self.left, self.right][idx]
assert node.X == child.X
if idx==0:
self.left = node
else:
self.right = node
elif type(idx) is tuple:
assert len(idx)>1
child = self
for i in idx[:-1]:
child = child[i]
child[idx[-1]] = node # recurse
else:
raise TypeError
assert self.check()
@property
def has_children(self):
return self.left is not None and self.right is not None
def all_isos(self, other):
#if len(self.X) != len(other.X):
if not self.X.isomorphic(other.X):
return
if not self.has_children and not other.has_children:
yield 1
return
elif not self.has_children:
return
elif not other.has_children:
return
for l_isos in self.left.all_isos(other.left):
for r_isos in self.right.all_isos(other.right):
yield 1
for l_isos in self.right.all_isos(other.left):
for r_isos in self.left.all_isos(other.right):
yield 1
def isomorphic(self, other):
"up to multiset isomorphism.."
for iso in self.all_isos(other):
return True
return False
def _subtrees(self):
yield self
yield Node(self.X)
if not self.has_children:
return
X = self.X
left = self.left
right = self.right
lsubs = list(left.subtrees())
rsubs = list(right.subtrees())
for sub in lsubs:
yield sub
for sub in rsubs:
yield sub
for l in lsubs:
for r in rsubs:
if l.X + r.X == X:
yield Node(X, l, r)
def subtrees(self):
found = set()
for sub in self._subtrees():
key = str(sub)
if key in found:
continue
found.add(key)
yield sub
def encode(self):
" the (un-normalized) distribution of encoded words "
X = self.X
left = self.left
right = self.right
if left is None and right is None:
return Multiset({'' : len(X)})
left = left.encode()
right = right.encode()
left = Multiset(dict(('0'+k, v) for (k, v) in left.cs.items()))
right = Multiset(dict(('1'+k, v) for (k, v) in right.cs.items()))
return left + right
def W(self):
return self.encode().total_length()
def __str__(self):
X = self.X
left = self.left
right = self.right
if left is None and right is None:
s = str(X)
assert s[0]=="{"
assert s[-1]=="}"
return "(%s)"%s[1:-1]
assert left and right
return "(%s : %s)" % (left, right)
__repr__ = __str__
def idxs(self): # dict .keys()
X = self.X
left = self.left
right = self.right
if left is None and right is None:
yield ()
else:
for idx, X in left.idxs():
yield (0,)+idx
for idx, X in right.idxs():
yield (1,)+idx
def leaves(self): # dict .values()
X = self.X
left = self.left
right = self.right
if left is None and right is None:
yield X
else:
for X in left.leaves():
yield X
for X in right.leaves():
yield X
def items(self): # dict .items()
X = self.X
left = self.left
right = self.right
if left is None and right is None:
yield ((), X)
else:
for idx, X in left.items():
yield ((0,)+idx, X)
for idx, X in right.items():
yield ((1,)+idx, X)
def __rmul__(self, r):
" left multiplication by a Multiset "
X = self.X
left = self.left
right = self.right
if left is None and right is None:
return Node(r*X)
return Node(r*X, r*left, r*right)
def __lmul__(self, r):
" right multiplication by a Multiset "
X = self.X
left = self.left
right = self.right
if left is None and right is None:
return Node(X*r)
return Node(X*r, left*r, right*r)
def __mul__(TX, TY):
if not isinstance(TY, Node):
return TX.__lmul__(TY)
X = TX.X
Y = TY.X
#print("__mul__", TX, TY)
XTY = X * TY
#print("__mul__", XTY)
#TXY = TX * Y
top = XTY
for (idx, r) in TY.items():
# glue
#print("glue", idx)
if not idx:
top = TX*r
else:
top[idx] = TX*r # recurse
return top
def __add__(self, other):
assert self.X.contains(other.X)
X = self.X
left = self.left
right = self.right
if not self.has_children:
assert self.X == other.X
return other
elif left.X == other.X:
assert not left.has_children
left = other
elif right.X == other.X:
assert not right.has_children
right = other
elif left.X.contains(other.X):
left = left+other # recurse
elif right.X.contains(other.X):
right = right+other # recurse
else:
assert 0, (self, other)
return Node(X, left, right)
# ------------- rendering ----------------------
def get_bbox(self, R=1.0):
"find (width,height) of bounding box for render"
X = self.X
left = self.left
right = self.right
if not self.has_children:
s = X.get_str()
W = (1 + 0.1*len(s)) * R # fudge this
return (W, R) # square
lbb = left.get_bbox(R)
rbb = right.get_bbox(R)
W = lbb[0] + rbb[0]
H = max(lbb[1], rbb[1]) + R
return (W, H)
def render(self, x=0, y=0, R=1.0, r=0.2, cvs=None, name=None):
"(x, y) is top center of this tree"
if cvs is None:
cvs = pyx.canvas.canvas()
X = self.X
left = self.left
right = self.right
cvs.fill(path.circle(x, y, r), [white])
cvs.stroke(path.circle(x, y, r))
if not self.has_children:
cvs.text(x, y-2.8*r, X.get_str(), south)
else:
w0, h0 = left.get_bbox(R)
w1, h1 = right.get_bbox(R)
w, h = self.get_bbox(R)
x0 = x-0.5*w+0.5*w0
x1 = x+0.5*w-0.5*w1
y0 = y1 = y-R
# render self...
cvs.fill(path.circle(x, y, 0.4*r), [black])
cvs.stroke(path.line(x0, y0, x, y), st_Thick)
cvs.stroke(path.line(x1, y1, x, y), st_Thick)
left.render(x0, y0, R=R, r=r, cvs=cvs)
right.render(x1, y1, R=R, r=r, cvs=cvs)
if name is not None:
cvs.writePDFfile(name)
return cvs
class Box(object):
pass
class HBox(Box):
def __init__(self, items, align="center"):
self.items = list(items)
self.align = align
def render(self, x=0., y=0., cvs=None, name=None, **kw):
"(x, y) is top center of this box"
if cvs is None:
cvs = pyx.canvas.canvas()
items = self.items
boxs = [item.get_bbox(**kw) for item in items]
w = sum(b[0] for b in boxs) # sum widths
h = max([b[1] for b in boxs]) # max of heights
x0 = x - 0.5*w
y0 = y - 0.5*h
align = self.align
for i, item in enumerate(self.items):
b = boxs[i]
if align == "center":
item.render(x0 + 0.5*b[0], y0+0.5*b[1], cvs=cvs, **kw)
x0 += b[0]
if name is not None:
cvs.writePDFfile(name)
class TextBox(Box):
def __init__(self, s, w=1, h=1):
self.s = s
self.w = w
self.h = h
def get_bbox(self):
return self.w, self.h
def render(self, x=0., y=0., cvs=None, name=None, **kw):
cvs.text(x, y-self.h, self.s, south)
def render():
# head = "transversal2018/"
head = "tmp/"
seed(0)
a = Multiset({"a" : 1})
b = Multiset({"b" : 1})
c = Multiset({"c" : 1})
d = Multiset({"d" : 1})
d = Multiset({"d" : 1})
e = Multiset({"e" : 1})
f = Multiset({"f" : 1})
g = Multiset({"g" : 1})
def mkrand(items, a=1, b=3):
Z = Multiset()
for A in items:
Z = Z + randint(a, b)*A
return Z
T = Node(a+b+2*c, Node(a+b, Node(a), Node(b)), Node(2*c))
#T.render(name="pic_a_b_2c.pdf")
S = ((a+b)*T)
S.render(name=head+"pic_left_a_b_2c.pdf")
#T = T*T
U = Node(a*a+b*a, Node(a*a), Node(b*a))
SU = S+U
box = HBox([S, TextBox("$+$"), U, TextBox("$=$"), SU])
box.render(name=head+"pic_add.pdf")
S = Node(a+b, Node(a), Node(b))
#(S*T).render(name=head+"pic_prod.pdf")
box = HBox([S, TextBox(r"$\times$"), T, TextBox("$=$"), S*T])
box.render(name=head+"pic_prod.pdf")
box = HBox([S, TextBox(r"$\otimes$"), T, TextBox("$=$"), S*T])
box.render(name=head+"pic_tensor.pdf")
X = mkrand([a,b,c,d,e,f,g])
TX = X.huffman(sort=True)
#print(W(TX))
if 0:
box = HBox([TX])
box.render(name=head+"pic_huffman.pdf")
X = 5*a+5*b+4*c+3*d+3*e
TX = X.huffman(sort=True)
print(W(TX))
box = HBox([TX])
box.render(name=head+"pic_huffman.pdf")
X = a + b + 2*c + 4*d
TX = X.huffman(sort=True)
box = HBox([TX])
box.render(name=head+"pic_dyadic.pdf")
print("OK")
def randmonomial(X):
"random monomial tree on X"
nodes = [Node(Y) for Y in X.terms()]
if not nodes:
return Node(X)
while len(nodes)>1:
left = nodes.pop(randint(0, len(nodes)-1))
right = nodes.pop(randint(0, len(nodes)-1))
node = Node(left.X+right.X, left, right)
nodes.append(node)
return nodes[0]
def randtree(X, monomial=True):
if monomial:
return randmonomial(X)
nodes = [Node(Y) for Y in X.terms()]
if not nodes:
return Node(X)
while len(nodes)>1:
left = nodes.pop(randint(0, len(nodes)-1))
right = nodes.pop(randint(0, len(nodes)-1))
if randint(0, 1):
node = Node(left.X+right.X, left, right)
nodes.append(node)
else:
node = Node(left.X+right.X)
nodes.append(node)
return nodes[0]
def main():
X = Multiset({"a":3, "b":1})
assert (X+X) == Multiset({"a":6, "b":2})
assert (X+X) == 2*X
#print(X, X.entropy())
XX = X*X
Y = Multiset({"a":2, "b":2})
#print(Y, Y.entropy())
assert str(Y) == "{a,a,b,b}"
A = Multiset({"a" : 1})
B = Multiset({"b" : 1})
C = Multiset({"c" : 1})
D = Multiset({"d" : 1})
E = Multiset({"e" : 1})
F = Multiset({"f" : 1})
G = Multiset({"g" : 1})
assert A.disjoint(B)
assert not (A+B).disjoint(B)
assert (A+2*B).terms() == [A, 2*B]
assert not A.contains(B)
assert (A+B).contains(B)
assert not (A+B).contains(2*B)
# ---------------------------------------------------------------------
assert Node(A+B, Node(A), Node(B)) == Node(B+A, Node(B), Node(A))
lhs, rhs = (Node(A+B+C, Node(A+B, Node(A), Node(B)), Node(C)),
Node(A+B+C, Node(A), Node(B+C, Node(B), Node(C))))
assert lhs.isomorphic(rhs)
T = Node(A+B+C, Node(A+B, Node(A), Node(B)), Node(C))
subs = list(T.subtrees())
assert len(subs) == 8
# test left multiplication
for r in [2, A, A+B]:
assert r*T == Node(r*A+r*B+r*C, Node(r*A+r*B, Node(r*A), Node(r*B)), Node(r*C))
T = Node(A+B+C+D, Node(A+B, Node(A), Node(B)), Node(C+D, Node(C), Node(D)))
subs = list(T.subtrees())
assert len(subs) == 13, len(subs)
S, T = Node(A+B, Node(A), Node(B)), Node(B)
assert S[0] != T
assert S[1] == T
U = S.clone()
assert U==S
U[1] = T
assert U[0] != T
assert U[1] == T
assert U==S
T = Node(A+B+C+D, Node(A+B, Node(A), Node(B)), Node(C+D, Node(C), Node(D)))
assert T[0] == S
T[0] = Node(A+B)
T = Node(2*A+B+C+D+E+F, Node(2*A+B+E), Node(C+D+F))
U = T.clone()
U[0] = Node(2*A+B+E, Node(2*A), Node(B+E))
U[0, 1] = Node(B+E, Node(B), Node(E))
assert U.clone() == U
T = Node(A+B, Node(A), Node(B))
S = Node(A+B+2*C, Node(A+B, Node(A), Node(B)), Node(2*C))
assert str(T*S) == "((((aa) : (ba)) : ((ab) : (bb))) : ((ac,ac) : (bc,bc)))"
def randmultiset(a=0, b=4):
Z = randint(a, b)*A + randint(a, b)*B + randint(a, b)*C + randint(a, b)*D + randint(a, b)*E
return Z
#seed(1)
for trial in range(1000):
X = randmultiset()
if not(len(X)):
continue
TX = randtree(X)
assert X.entropy() <= W(X) <= W(TX)
Y = randmultiset()
TY = randtree(Y)
# W is a derivation on monomial trees
assert W(TX*TY) == len(X)*W(TY) + W(TX)*len(Y)
HX = X.huffman()
HY = Y.huffman()
print(W(X*Y), W(HX*HY))
for trial in range(100):
X = randmultiset()
TX = randtree(X, False)
Y = randmultiset()
TY = randtree(Y, False)
assert W(Y*TX) == len(Y)*W(TX)
T = TX
for n in range(1, 4):
assert W(T) == n*len(X)**(n-1) * W(TX)
T = TX*T
#print("TX=%s, TY=%s"%(TX, TY))
# W is a derivation on non-monomial trees
assert W(TX*TY) == len(X)*W(TY) + W(TX)*len(Y)
return
# ---------------------------------------------------------------------
#print( ((X*Y).entropy(), len(X)*Y.entropy() + len(Y)*X.entropy()))
assert is_close((X*Y).entropy(), len(X)*Y.entropy() + len(Y)*X.entropy())
tree = X.huffman()
assert tree.X == X
#assert str(tree) == "({B} : {A,A,A})", repr(str(tree))
#assert str(tree.encode()) == "{0,1,1,1}"
tree = XX.huffman()
#assert str(tree.encode()) == "{0,0,0,0,0,0,0,0,0,10,10,10,110,110,110,111}"
assert XX.W() == 27
def mkrand(a=1, b=3):
Z = randint(a, b)*A + randint(a, b)*B + randint(a, b)*C #+ randint(a, b)*D + randint(a, b)*E
return Z
#seed(0)
for trial in range(1000):
X = mkrand(1, 5)
lhs = X.huffman()
rhs = X.huffman()
assert lhs.isomorphic(rhs) # huffman is unique up to isomorphism ? this can't be right..
for trial in range(100):
X = mkrand(1, 3)
T = X.huffman()
for S in T.subtrees():
assert S.check()
assert W(Multiset()) == 0
for trial in range(100):
a = randint(1, 3)
b = randint(1, 3)
c = randint(1, 3)
X = a*A + b*B + c*C
lhs = W(X*X)
rhs = 2*len(X)*W(X)
assert lhs <= rhs
#if lhs==rhs: # no nice characterization of this
# print(X)
#else:
# print("*")
for trial in range(100):
X = mkrand()
Y = mkrand()
S = X.huffman()
T = Y.huffman()
ST = (X*Y).huffman()
lhs = W(ST)
rhs = len(X)*W(T) + W(S)*len(Y)
#print(lhs, rhs)
assert lhs<=rhs
def mkdyadic(a=0, b=4, terms=[A, B, C, D, E]):
while 1:
cs = [2**randint(a, b) for t in terms]
c = sum(cs)
if bin(c).count('1')==1: # is power of 2
break
Z = reduce(add, [c*term for (c, term) in zip(cs, terms)])
return Z
for trial in range(100):
X = mkdyadic()
Y = mkdyadic()
#print(X, Y)
S = X.huffman()
T = Y.huffman()
ST = (X*Y).huffman()
lhs = W(ST)
rhs = len(X)*W(T) + W(S)*len(Y)
#print(lhs, rhs)
assert lhs==rhs
assert X.entropy() == W(X)
assert Y.entropy() == W(Y)
assert (X*Y).entropy() == lhs
return
for trial in range(1000):
a = randint(1, 3)
b = randint(1, 3)
c = randint(1, 3)
X = a*A + b*B + c*C
lhs = W(X)
for aa in range(a+1):
for bb in range(b+1):
for cc in range(c+1):
Y = aa*A + bb*B + cc*C
XY = (a-aa)*A + (b-bb)*B + (c-cc)*C
assert XY + Y == X
rhs = W(XY + len(Y)*D) + W(Y)
assert lhs <= rhs
if len(Y)==0:
assert XY == X
assert XY + len(Y)*D == X
assert lhs == rhs
return
for trial in range(100):
X = mkrand()
n = randint(2, 5)
assert n*X.W() == (n*X).W()
print(X)
lhs, rhs = n*X.huffman(), (n*X).huffman()
print(lhs, rhs)
print()
#assert n*X.huffman() == (n*X).huffman()
assert lhs.isomorphic(rhs)
assert X.huffman().check()
# print(Z.entropy(), Z.W())
X = 3*A + B
#print(X.huffman())
lhs, rhs = (X*X).W(), len(X)*X.W() + len(X)*X.W()
#print(lhs, rhs)
assert lhs < rhs
assert lhs == 27
assert rhs == 32
for trial in range(100):
X = mkrand(1, 3)
Y = mkrand(1, 3)
#assert (X*Y) == (Y*X) # nope ( not on the nose.. )
assert (X*Y).W() == (Y*X).W()
#assert (X*Y).huffman() == (Y*X).huffman() # nope ( not on the nose.. )
lhs, rhs = (X*Y).W(), len(X)*Y.W() + len(Y)*X.W()
assert lhs<=rhs
lhs, rhs = (X*Y).entropy(), len(X)*Y.entropy() + len(Y)*X.entropy()
assert is_close(lhs, rhs)
# Z = 25*A + 25*B + 20*C + 15*D + 15*E
def mkrand(items, a=1, b=3):
Z = Multiset()
for A in items:
Z = Z + randint(a, b)*A
return Z
for trial in range(100):
X = mkrand([A, B, C])
Y = mkrand([D, E, F])
#print(X, Y)
#print(X+Y)
lhs = W(X+Y)
rhs = W(X + len(Y)*D) + W(Y)
#print(lhs, rhs)
assert lhs <= rhs
lhs = (X+Y).entropy()
rhs = (X + len(Y)*D).entropy() + (Y).entropy()
assert is_close(lhs, rhs)
#print(lhs, rhs)
#print()
#break
for trial in range(100):
X0 = mkrand([A, B, C], 1, 3)
Y = mkrand([D, E, F], 1, 3)
print(X, Y)
for a in range(1, 10):
X = a*X0
lhs = W(X+Y)
rhs = W(X + len(Y)*G) + W(Y)
print(lhs, rhs)
assert lhs <= rhs
if lhs==rhs:
break
else:
fail
print()
return
seed(0)
while 1:
X = mkrand([A, B, C], 1, 3)
Y = mkrand([D, E, F], 1, 3)
print(X, Y)
a = 1
while 1:
print("[%s]"%a, end="", flush=True)
#aX = a*X
#lhs, rhs = W(aX*Y), len(aX)*W(Y) + W(aX)*len(Y)
aY = a*Y
lhs, rhs = W(X*aY), len(X)*W(aY) + W(X)*len(aY)
if lhs==rhs:
break
print(lhs, rhs)
assert lhs == a*W(X*Y)
assert rhs == a*(len(X)*W(Y) + W(X)*len(Y))
a += 1
#assert a<10
if a>10:break
print(".", end="", flush=True)
return
found = set()
#for trial in range(100):
while 1:
i = 2**randint(0, 3)
j = 2**randint(0, 3)
k = 2**randint(0, 3)
X = i*A + j*B + k*C
#X = mkrand([A, B, C], 1, 8)
lhs, rhs = X.entropy(), X.W()
if is_close(lhs, rhs):
#vals = list(X.cs.values())
vals = [i, j, k]
vals.sort()
vals = tuple(vals)
print(vals)
if vals not in found:
print(vals)
found.add(vals)
return
#X = 5*A + 1*B + C + 1*D
X = 1*A + 1*B + 1*C
X1 = 1
h = X.entropy()
print(h)
n = 1
while 1:
X1 = X1*X
lhs, rhs = (X1.W(), X1.entropy())
r = n * (len(X)**(n-1))
assert is_close(rhs/r, h)
print('\t', lhs / r)
# if len(X1) > 10000000:
# break
n += 1
print(len(X1))
try:
# yay globals...
import pyx
from pyx import path, deco, trafo, style, text, color, deformer
from pyx.color import rgb, cmyk
from pyx.color import rgbfromhexstring as rgbhex
black = rgb(0., 0., 0.)
blue = rgb(0., 0., 0.8)
lred = rgb(1., 0.4, 0.4)
red = rgb(1., 0.0, 0.0)
white = rgb(1., 1., 1.)
grey = rgb(0.75, 0.75, 0.75)
shade = grey
shade0 = rgb(0.25, 0.25, 0.25)
shade1 = rgb(0.80, 0.80, 0.80)
shade2 = rgb(0.85, 0.85, 0.85)
light_shade = rgb(0.85, 0.65, 0.1)
light_shade = rgb(0.9, 0.75, 0.4)
north = [text.halign.boxcenter, text.valign.top]
northeast = [text.halign.boxright, text.valign.top]
northwest = [text.halign.boxleft, text.valign.top]
south = [text.halign.boxcenter, text.valign.bottom]
southeast = [text.halign.boxright, text.valign.bottom]
southwest = [text.halign.boxleft, text.valign.bottom]
east = [text.halign.boxright, text.valign.middle]
west = [text.halign.boxleft, text.valign.middle]
center = [text.halign.boxcenter, text.valign.middle]
st_dashed = [style.linestyle.dashed]
st_dotted = [style.linestyle.dotted]
st_round = [style.linecap.round]
#st_mitre = [style.linecap.square]
st_thick = [style.linewidth.thick]
st_Thick = [style.linewidth.Thick]
st_THick = [style.linewidth.THick]
st_THIck = [style.linewidth.THIck]
st_THICk = [style.linewidth.THICk]
st_THICK = [style.linewidth.THICK]
except ImportError:
pass
if __name__ == "__main__":
if argv.render:
render()
else:
main()
| 26.105119 | 101 | 0.482755 | 4,274 | 28,559 | 3.178287 | 0.091951 | 0.008245 | 0.007951 | 0.010306 | 0.421084 | 0.340621 | 0.307126 | 0.283495 | 0.242933 | 0.207082 | 0 | 0.026142 | 0.357085 | 28,559 | 1,093 | 102 | 26.129003 | 0.713687 | 0.096607 | 0 | 0.362176 | 0 | 0.001236 | 0.02954 | 0 | 0 | 0 | 0 | 0 | 0.098888 | 1 | 0.075402 | false | 0.002472 | 0.013597 | 0.013597 | 0.190358 | 0.02225 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
176cfc7beca4767a39789010087bddac4d12b45d | 1,759 | py | Python | p384_Shuffle_an_Array.py | bzhou26/leetcode_sol | 82506521e2cc412f96cd1dfc3c8c3ab635f67f73 | [
"MIT"
] | null | null | null | p384_Shuffle_an_Array.py | bzhou26/leetcode_sol | 82506521e2cc412f96cd1dfc3c8c3ab635f67f73 | [
"MIT"
] | null | null | null | p384_Shuffle_an_Array.py | bzhou26/leetcode_sol | 82506521e2cc412f96cd1dfc3c8c3ab635f67f73 | [
"MIT"
] | null | null | null | '''
- Leetcode problem: 384
- Difficulty: Medium
- Brief problem description:
Shuffle a set of numbers without duplicates.
Example:
// Init an array with set 1, 2, and 3.
int[] nums = {1,2,3};
Solution solution = new Solution(nums);
// Shuffle the array [1,2,3] and return its result. Any permutation of [1,2,3] must equally likely to be returned.
solution.shuffle();
// Resets the array back to its original configuration [1,2,3].
solution.reset();
// Returns the random shuffling of array [1,2,3].
solution.shuffle();
- Solution Summary:
- Used Resources:
--- Bo Zhou
'''
class Solution:
def __init__(self, nums: List[int]):
self.origin = nums[:]
self.arr = nums
def reset(self) -> List[int]:
"""
Resets the array to its original configuration and return it.
"""
return self.origin
def shuffle(self) -> List[int]:
"""
Returns a random shuffling of the array.
"""
for i in range(len(self.arr)):
l = i
r = len(self.arr) - 1 - i
ranLen = random.randint(0, max(l, r))
ranDirect = 1
j = i
if ranLen <= min(l, r):
ranDirect = random.randint(0, 1)
if l > r:
if ranDirect == 1:
j = i - ranLen
else:
j = i + ranLen
else:
if ranDirect == 1:
j = i + ranLen
else:
j = i - ranLen
self.arr[i], self.arr[j] = self.arr[j], self.arr[i]
return self.arr
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.reset()
# param_2 = obj.shuffle() | 23.77027 | 114 | 0.533258 | 229 | 1,759 | 4.069869 | 0.366812 | 0.060086 | 0.016094 | 0.035408 | 0.093348 | 0.06867 | 0.06867 | 0.06867 | 0.06867 | 0.06867 | 0 | 0.026247 | 0.350199 | 1,759 | 74 | 115 | 23.77027 | 0.789151 | 0.45992 | 0 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17710ad793bc93dd8874daa574ca41c7a810cdf1 | 1,210 | py | Python | nestor/datasets/base.py | usnistgov/nestor-tmp | 6cc35dcf4dac029f94c4bc92783b5dc37bc205ce | [
"RSA-MD"
] | 16 | 2018-07-06T16:36:56.000Z | 2021-12-13T03:02:02.000Z | nestor/datasets/base.py | usnistgov/nestor-tmp | 6cc35dcf4dac029f94c4bc92783b5dc37bc205ce | [
"RSA-MD"
] | 46 | 2018-08-06T15:51:35.000Z | 2021-08-02T21:00:51.000Z | nestor/datasets/base.py | usnistgov/nestor-tmp | 6cc35dcf4dac029f94c4bc92783b5dc37bc205ce | [
"RSA-MD"
] | 7 | 2020-04-27T18:56:24.000Z | 2021-08-14T02:44:40.000Z | from pathlib import Path
import pandas as pd
def load_excavators(cleaned=False):
"""
Helper function to load excavator toy dataset.
Hodkiewicz, M., and Ho, M. (2016)
"Cleaning historical maintenance work order data for reliability analysis"
in Journal of Quality in Maintenance Engineering, Vol 22 (2), pp. 146-163.
BscStartDate| Asset | OriginalShorttext | PMType | Cost
--- | --- | --- | --- | ---
initialization of MWO | which excavator this MWO concerns (A, B, C, D, E)| natural language description of the MWO| repair (PM01) or replacement (PM02) | MWO expense (AUD)
Args:
cleaned (bool): whether to return the original dataset (False) or the dataset with
keyword extraction rules applied (True), as described in Hodkiewicz and Ho (2016)
Returns:
pandas.DataFrame: raw data for use in testing nestor and subsequent workflows
"""
module_path = Path(__file__).parent
if cleaned:
csv_filename = module_path / "excavators-cleaned.csv"
else:
csv_filename = module_path / "excavators.csv"
df = pd.read_csv(csv_filename)
df["BscStartDate"] = pd.to_datetime(df.BscStartDate)
return df
| 35.588235 | 175 | 0.676033 | 155 | 1,210 | 5.193548 | 0.63871 | 0.037267 | 0.042236 | 0.052174 | 0.077019 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022629 | 0.233058 | 1,210 | 33 | 176 | 36.666667 | 0.844828 | 0.631405 | 0 | 0 | 0 | 0 | 0.127321 | 0.058355 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
177194c329dea268086e33e7d66d605e4b6732f1 | 2,094 | py | Python | server/test/test_server.py | byee123/battleships | 0cd527217c8727f7b3274d8a661cbf8d49aa4276 | [
"MIT"
] | 2 | 2020-11-02T08:04:06.000Z | 2020-11-25T12:20:45.000Z | server/test/test_server.py | byee123/battleships | 0cd527217c8727f7b3274d8a661cbf8d49aa4276 | [
"MIT"
] | 17 | 2020-11-02T04:30:17.000Z | 2020-12-07T00:35:34.000Z | server/test/test_server.py | byee123/battleships | 0cd527217c8727f7b3274d8a661cbf8d49aa4276 | [
"MIT"
] | 14 | 2020-10-05T08:15:59.000Z | 2020-11-28T10:31:40.000Z | import unittest
from game import Game
import server
REDIS_HOST = '192.168.20.50'
class TestServer(unittest.TestCase):
"""Please note that the tests in this suite only work if a Redis
host is available (see REDIS_HOST above).
"""
def test_redis_connection(self):
"""Test that the Battleship server can correctly find a Redis
instance.
"""
with server.Battleship(REDIS_HOST, db=1) as battleship:
self.assertTrue(battleship.ping_redis())
def test_redis_add_open_game(self):
"""Test that the Battleship server can add an open game to the
Redis instance.
"""
with server.Battleship(REDIS_HOST, db=1) as battleship:
con = battleship.redis_conn
con.flushdb()
result = con.get(battleship.OpenGames)
self.assertIsNone(result)
game = Game('New game!')
result = battleship.add_open_game(game)
self.assertTrue(result)
results = con.lrange(battleship.OpenGames, 0, -1)
self.assertEqual(len(results), 1)
self.assertEqual(results[0].decode('utf-8'), game.id)
result = battleship.close_open_game(game)
self.assertTrue(result)
results = con.lrange(battleship.OpenGames, 0, -1)
self.assertEqual(len(results), 0)
def test_find_game(self):
"""Test that the Battleship server can create a new game if no
open game is found and that it can find an open game in Redis
if one is actually there.
"""
with server.Battleship(REDIS_HOST, db=1) as battleship:
con = battleship.redis_conn
con.flushdb()
game, is_new = battleship.find_game_or_create()
self.assertTrue(is_new)
game = Game('new game')
result = battleship.add_open_game(game)
self.assertTrue(result)
found_game, is_new = battleship.find_game_or_create()
self.assertFalse(is_new)
self.assertEqual(game.id, found_game.id)
| 33.238095 | 70 | 0.617479 | 263 | 2,094 | 4.787072 | 0.292776 | 0.04448 | 0.028594 | 0.035743 | 0.554408 | 0.554408 | 0.554408 | 0.527403 | 0.467037 | 0.405083 | 0 | 0.014199 | 0.293696 | 2,094 | 62 | 71 | 33.774194 | 0.837052 | 0.189112 | 0 | 0.388889 | 0 | 0 | 0.021753 | 0 | 0 | 0 | 0 | 0 | 0.305556 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.194444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1772a508f28a639e2e47a3e3c85f76e1e18b4977 | 12,643 | py | Python | src/tenants/management/commands/update_lastseen.py | litedesk/litedesk-webserver-provision | 1576b9d3e5e2e64d1136d276767c2710cfb1938f | [
"Apache-2.0"
] | 1 | 2016-01-18T08:19:22.000Z | 2016-01-18T08:19:22.000Z | src/tenants/management/commands/update_lastseen.py | litedesk/litedesk-webserver-provision | 1576b9d3e5e2e64d1136d276767c2710cfb1938f | [
"Apache-2.0"
] | null | null | null | src/tenants/management/commands/update_lastseen.py | litedesk/litedesk-webserver-provision | 1576b9d3e5e2e64d1136d276767c2710cfb1938f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from optparse import make_option
from apiclient import errors
from apiclient.discovery import build
from dateutil import parser
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
from oauth2client.client import SignedJwtAssertionCredentials
import httplib2
import pytz
from provisioning import models
class Command(BaseCommand):
# Check
# https://developers.google.com/admin-sdk/directory/v1/guides/authorizing
# for all available scopes
OAUTH_SCOPE = ['https://www.googleapis.com/auth/admin.directory.user',
'https://www.googleapis.com/auth/admin.directory.device.chromeos']
help = 'Updates the last seen timestamp for provisioned services.'
option_list = BaseCommand.option_list + (
make_option('--skip-okta',
action='store_true',
dest='skip-okta',
default=False,
help='Do not query Okta. Default=False'),
make_option('--skip-google',
action='store_true',
dest='skip-google',
default=False,
help='Do not query Google. Default=False'),
make_option('--skip-airwatch',
action='store_true',
dest='skip-airwatch',
default=False,
help='Do not query AirWatch. Default=False'),
make_option('--tenant',
dest='tenant',
default=1,
help='Tenant id to do this for. Default=1'),
)
def _parseDateTime(self, stamp):
parsed = parser.parse(stamp)
utc = parsed.astimezone(pytz.utc)
stripped = utc.replace(tzinfo=None)
return stripped
def handle(self, *args, **options):
tenant = models.Tenant.objects.get(pk=options['tenant'])
okta_item = models.Okta.objects.get(tenant=tenant)
users = models.User.objects.filter(services=okta_item)
software_contenttype = ContentType.objects.get_for_model(
models.Software)
google_software = models.Software.objects.get(name='Google Account')
device_contenttype = ContentType.objects.get_for_model(
models.Device)
self.stdout.write("Okta users in database.")
user_dict = {}
for user in users:
username = '%s@%s' % (user.username, tenant.email_domain)
user_dict[username] = {'username': user.username, 'user': user}
self.stdout.write(username)
if not options['skip-okta']:
self.stdout.write("")
self.stdout.write("Get Okta user logins.")
okta_users = okta_item.get_users()
okta_item_type = ContentType.objects.get_for_model(okta_item)
for okta_user in okta_users:
okta_username = okta_user['profile']['login']
if okta_username in user_dict:
user_dict[okta_username].update(
{'okta_id': okta_user['id']})
if okta_user['lastLogin']:
models.LastSeenEvent.objects.create(
user=user_dict[okta_username]['user'],
item_type=okta_item_type,
object_id=okta_item.id,
last_seen=self._parseDateTime(okta_user['lastLogin']))
self.stdout.write(
'%s - %s' % (okta_username, okta_user['lastLogin']))
# Get Okta application SSO events
self.stdout.write("")
self.stdout.write("Get Okta SSO events.")
okta_client = okta_item.get_client()
usersoftwares = models.UserProvisionable.objects.filter(
user__tenant=tenant,
item_type=software_contenttype,
service=okta_item).exclude(
object_id=google_software.id)
# Google accoint login is done below directly from google
for usersoftware in usersoftwares:
oktatenantservice = usersoftware.item.tenantserviceasset_set.get(
service=okta_item)
event = okta_client.last_sso_event(
user_dict[usersoftware.user.tenant_email]['okta_id'],
oktatenantservice.get('application_id'))
if event:
models.LastSeenEvent.objects.create(
user=usersoftware.user,
item_type=software_contenttype,
object_id=usersoftware.object_id,
last_seen=self._parseDateTime(event['published']))
self.stdout.write(
'%s - %s -> %s' % (usersoftware.user.tenant_email,
usersoftware.item.name,
event and event['published'] or "never"))
if not options['skip-google']:
# Get Google lastseen
google_tenant_asset = tenant.tenantasset_set.get(
asset__name='Google Account')
# Run through the OAuth flow and retrieve credentials
certificate_file_path = os.path.join(
settings.CERTIFICATES_DIR, google_tenant_asset.get('CERTIFICATE_FILE_NAME')
)
with open(certificate_file_path) as f:
private_key = f.read()
credentials = SignedJwtAssertionCredentials(
google_tenant_asset.get('CLIENT_EMAIL'),
private_key,
scope=self.OAUTH_SCOPE,
sub=google_tenant_asset.get('ADMINISTRATOR')
)
# Create an httplib2.Http object and authorize it with our
# credentials
http = httplib2.Http()
http = credentials.authorize(http)
directory_service = build('admin', 'directory_v1', http=http)
# Get Google Account lastseen information
all_users = []
page_token = None
params = {'customer': 'my_customer'}
self.stdout.write("")
self.stdout.write("Get Google Account users")
while True:
try:
if page_token:
params['pageToken'] = page_token
current_page = directory_service.users().list(
**params).execute()
all_users.extend(current_page['users'])
page_token = current_page.get('nextPageToken')
if not page_token:
break
except errors.HttpError as error:
self.stderr.write('An error occurred: %s' % error)
break
for user in all_users:
if user['lastLoginTime'] == '1970-01-01T00:00:00.000Z':
continue
if models.UserProvisionable.objects.filter(
user__username=user['primaryEmail'].split('@')[0],
user__tenant=tenant,
item_type=software_contenttype,
object_id=google_software.id).exists():
models.LastSeenEvent.objects.create(
user=user_dict[user['primaryEmail']]['user'],
item_type=software_contenttype,
object_id=google_software.id,
last_seen=self._parseDateTime(user['lastLoginTime']))
self.stdout.write(
user['primaryEmail'] + " - " + user['lastLoginTime'])
# Get Google Device lastseen information
all_devices = []
page_token = None
params = {'customerId': 'my_customer'}
while True:
try:
if page_token:
params['pageToken'] = page_token
current_page = directory_service.chromeosdevices().list(
**params).execute()
all_devices.extend(current_page['chromeosdevices'])
page_token = current_page.get('nextPageToken')
if not page_token:
break
except errors.HttpError as error:
self.stderr.write('An error occurred: %s' % error)
break
self.stdout.write("")
self.stdout.write("Get Google Devices")
chromebook_device = models.Device.objects.get(name='Chromebook')
for device in all_devices:
if models.UserProvisionable.objects.filter(
user__username=device['annotatedUser'].split('@')[0],
user__tenant=tenant,
item_type=device_contenttype,
object_id=chromebook_device.id).exists():
models.LastSeenEvent.objects.create(
user=user_dict[device['annotatedUser']]['user'],
item_type=device_contenttype,
object_id=chromebook_device.id,
last_seen=self._parseDateTime(device['lastSync']))
self.stdout.write('%s - %s -> %s' % (device['annotatedUser'],
device[
'serialNumber'],
device['lastSync']))
if not options['skip-airwatch']:
self.stdout.write("")
self.stdout.write("Get AirWatch Devices & Platform usage")
airwatch_item = models.AirWatch.objects.get(tenant=tenant)
airwatch_client = airwatch_item.get_client()
endpoint = 'mdm/devices/search'
iPad_device = models.Device.objects.get(name='iPad')
iPhone_device = models.Device.objects.get(name='iPhone')
airwatch_item_type = ContentType.objects.get_for_model(airwatch_item)
airwatch_users = models.User.objects.filter(services=airwatch_item)
for user in airwatch_users:
response = airwatch_client.call_api(
'GET', endpoint, params={'user': user.username})
response.raise_for_status()
if response.status_code == 200:
devices = response.json().get('Devices')
newest_seen = parser.parse(devices[0]['LastSeen'])
for device in devices:
seen = parser.parse(device['LastSeen'])
if seen > newest_seen:
newest_seen = seen
if device['Model'].startswith(iPad_device.name):
device_item = iPad_device
elif device['Model'].startswith(iPhone_device.name):
device_item = iPhone_device
else:
device_item = None
models.LastSeenEvent.objects.create(
user=user,
item_type=device_contenttype,
object_id=device_item.id,
last_seen=seen)
self.stdout.write(
"%s - %s -> %s" % (user, device['SerialNumber'],
device['LastSeen']))
self.stdout.write("%s -> %s" % (user, newest_seen))
models.LastSeenEvent.objects.create(
user=user,
item_type=airwatch_item_type,
object_id=airwatch_item.id,
last_seen=newest_seen)
| 45.315412 | 91 | 0.536423 | 1,206 | 12,643 | 5.454395 | 0.237148 | 0.027364 | 0.041046 | 0.029188 | 0.331864 | 0.277896 | 0.232289 | 0.144117 | 0.106415 | 0.060201 | 0 | 0.005046 | 0.373013 | 12,643 | 278 | 92 | 45.478417 | 0.824776 | 0.081547 | 0 | 0.263393 | 0 | 0 | 0.111169 | 0.003884 | 0 | 0 | 0 | 0 | 0.008929 | 1 | 0.008929 | false | 0 | 0.053571 | 0 | 0.084821 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1773d93609383bcbb4fab691a8762940ea340e8e | 1,470 | py | Python | pset6/dna/dna.py | vipsum/cs50 | b3aa05ae470faea657343644a4073814825ceb83 | [
"MIT"
] | null | null | null | pset6/dna/dna.py | vipsum/cs50 | b3aa05ae470faea657343644a4073814825ceb83 | [
"MIT"
] | null | null | null | pset6/dna/dna.py | vipsum/cs50 | b3aa05ae470faea657343644a4073814825ceb83 | [
"MIT"
] | null | null | null | import csv
import sys
# error checking
if len(sys.argv) != 3:
print("ERROR. Usage: Usage: python dna.py data.csv sequence.txt")
sys.exit(1)
#opening csv file and sequence texts
csvFile = open(sys.argv[1])
DNAsequence = open(sys.argv[2]).read()
# reading csv file onto a dictionary
csvReader = csv.DictReader(csvFile)
peopleList = []
for person in csvReader:
peopleList.append(person)
# Short Tandem Repeats (STRs). An STR is a short sequence of DNA bases that
# tends to repeat consecutively numerous times at specific locations inside of a person’s DNA.
# grabbing keys (str) from csvReader and passing them onto a list
strList = list(peopleList[0].keys())
# removing the name key from the list since i dont want it
strList.remove("name")
strCounter = {}
for strKey in strList:
tmpDNAsequence = DNAsequence
strCounter[strKey] = 0
counter = 0
strPosition = tmpDNAsequence.find(strKey)
while strPosition >= 0:
counter += 1
tmpDNAsequence = tmpDNAsequence.replace(strKey, "", 1)
tmpStrPosition = tmpDNAsequence.find(strKey)
if strPosition != tmpStrPosition:
if counter > int(strCounter[strKey]):
strCounter[strKey] = str(counter)
counter = 0
strPosition = tmpStrPosition
for person in peopleList:
clonePerson = person.copy()
del clonePerson["name"]
if clonePerson == strCounter:
print(person["name"])
sys.exit(0)
print("No match")
| 34.186047 | 94 | 0.687075 | 188 | 1,470 | 5.37234 | 0.494681 | 0.020792 | 0.021782 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010408 | 0.215646 | 1,470 | 42 | 95 | 35 | 0.865568 | 0.253742 | 0 | 0.057143 | 0 | 0 | 0.069789 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.057143 | 0 | 0.057143 | 0.085714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1777e634f97948573907da1787803d38c27f6c8f | 311 | py | Python | script_utils/read_file.py | img-caption-mania/img_caption_dl_textAnalytic | 444385d928687673b5286ebb0a9b598de019916f | [
"Apache-2.0"
] | null | null | null | script_utils/read_file.py | img-caption-mania/img_caption_dl_textAnalytic | 444385d928687673b5286ebb0a9b598de019916f | [
"Apache-2.0"
] | null | null | null | script_utils/read_file.py | img-caption-mania/img_caption_dl_textAnalytic | 444385d928687673b5286ebb0a9b598de019916f | [
"Apache-2.0"
] | 2 | 2020-06-30T20:53:09.000Z | 2021-09-30T10:29:45.000Z | import os, sys, re, natsort
ls = [this for this in os.listdir(os.getcwd() + '/' + str(sys.argv[1]))]
# ls = ls.sort(key=lambda f: int(re.sub('\D', '', f)))
ls = natsort.natsorted(ls,reverse=False)
with open(os.getcwd() + '/' + str(sys.argv[2]), 'w') as f:
for item in ls:
f.write("%s\n" % item)
| 25.916667 | 72 | 0.569132 | 55 | 311 | 3.218182 | 0.6 | 0.090395 | 0.124294 | 0.158192 | 0.20339 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007937 | 0.189711 | 311 | 11 | 73 | 28.272727 | 0.694444 | 0.167203 | 0 | 0 | 0 | 0 | 0.027344 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
177931d9295fee1cf93668dbfec3ca5bb4baf4e5 | 1,281 | py | Python | utils.py | sc2h6o/DT | d1cde39c778d827efeb89c1b1d4d54d13214b51f | [
"MIT"
] | null | null | null | utils.py | sc2h6o/DT | d1cde39c778d827efeb89c1b1d4d54d13214b51f | [
"MIT"
] | null | null | null | utils.py | sc2h6o/DT | d1cde39c778d827efeb89c1b1d4d54d13214b51f | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import random
def int_(x):
if x==int(x):
return int(x)
else:
return int(x+1)
def rdint(x):
return int(round(x))
def IoU(box1, box2):
x1 = max(box1[0], box2[0])
y1 = max(box1[1], box2[1])
x2 = min(box1[0]+box1[2], box2[0]+box2[2])
y2 = min(box1[1]+box1[3], box2[1]+box2[3])
if x1>=x2 or y1>=y2:
return 0
else:
a1 = box1[2] * box2[3]
a2 = box1[2] * box2[3]
return (x2-x1)*(y2-y1)*1.0 / (a1 + a2)
def transform(bbox, delta=(0,0,-0,-0)):
(x,y,w,h) = bbox
cx = x + 0.5 * w
cy = y + 0.5 * h
w = w / exp(delta[2])
h = h / exp(delta[3])
cx -= delta[0] * w
cy -= delta[1] * h
x = cx - 0.5 * w
y = cy - 0.5 * h
return (int(round(x)),int(round(y)),int(round(w)),int(round(h)))
def transform_inv(bbox, delta):
(x,y,w,h) = bbox
cx = x + 0.5 * w
cy = y + 0.5 * h
cx += delta[0] * w
cy += delta[1] * h
w = w * exp(delta[2])
h = h * exp(delta[3])
x = cx - 0.5 * w
y = cy - 0.5 * h
return (int(round(x)),int(round(y)),int(round(w)),int(round(h)))
def padding(bbox, scale, minpad):
x,y,w,h = bbox
pad = max(int(scale*min(w,h)), minpad)
x -= pad
y -= pad
w += 2 * pad
h += 2 * pad
return (x,y,w,h)
def scaleBox(bbox, scale):
x,y,w,h = bbox
return (rdint(scale*x), rdint(scale*y), rdint(scale*w), rdint(scale*h))
| 20.333333 | 72 | 0.54879 | 274 | 1,281 | 2.558394 | 0.167883 | 0.10271 | 0.021398 | 0.028531 | 0.356633 | 0.333809 | 0.333809 | 0.333809 | 0.28388 | 0.28388 | 0 | 0.084325 | 0.213115 | 1,281 | 62 | 73 | 20.66129 | 0.611111 | 0 | 0 | 0.296296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12963 | false | 0 | 0.055556 | 0.018519 | 0.351852 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
177b2b2fc328442186cfa83ce8437c5a552fcc24 | 1,651 | py | Python | models/conv_lstm.py | i1idan/schizophrenia-diagnosis-eeg-signals | 50e51c16c6df0a61b5e62223c404039d659c9d48 | [
"MIT"
] | 5 | 2021-12-24T18:20:16.000Z | 2022-01-27T19:45:28.000Z | models/conv_lstm.py | i1idan/schizophrenia-diagnosis-eeg-signals | 50e51c16c6df0a61b5e62223c404039d659c9d48 | [
"MIT"
] | null | null | null | models/conv_lstm.py | i1idan/schizophrenia-diagnosis-eeg-signals | 50e51c16c6df0a61b5e62223c404039d659c9d48 | [
"MIT"
] | 1 | 2021-12-24T18:19:23.000Z | 2021-12-24T18:19:23.000Z | from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import LSTM, Conv1D, GRU
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.layers import MaxPooling1D
from tensorflow.keras.models import Model
class ConvLstm:
def __init__(self, **kwargs):
self.n_length = 100
self.n_features = 1
def get_model(self) -> Model:
model = Sequential()
model.add(TimeDistributed(Conv1D(filters=8, kernel_size=5, activation='relu'),
input_shape=(None, self.n_length, self.n_features)
)
)
model.add(TimeDistributed(Conv1D(filters=4, kernel_size=5, activation='relu'),
input_shape=(None, self.n_length, self.n_features)))
model.add(TimeDistributed(Conv1D(filters=2, kernel_size=5, activation='relu'),
input_shape=(None, self.n_length, self.n_features)))
model.add(Dropout(0.5))
model.add(TimeDistributed(MaxPooling1D(pool_size=2)))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(512))
model.add(Dropout(0.25))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
if __name__ == '__main__':
model = ConvLstm().get_model()
model.summary()
| 39.309524 | 89 | 0.643852 | 193 | 1,651 | 5.352332 | 0.310881 | 0.085189 | 0.147144 | 0.145208 | 0.575992 | 0.301065 | 0.301065 | 0.260407 | 0.260407 | 0.260407 | 0 | 0.02476 | 0.241672 | 1,651 | 41 | 90 | 40.268293 | 0.800319 | 0 | 0 | 0.114286 | 0 | 0 | 0.037553 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.228571 | 0 | 0.342857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
177e8c1d5af7739ef2b33bcff39f04682a94721f | 2,368 | py | Python | Python/CoviDetect/covid_detect/sub_app/views.py | Idhant-6/HAcktoberfest-2021-With-Python | ae6f8b5aadeca5a7329fe8e8d7de3f5069e687a1 | [
"MIT"
] | 14 | 2021-10-01T16:53:27.000Z | 2021-10-17T13:15:44.000Z | Python/CoviDetect/covid_detect/sub_app/views.py | Idhant-6/HAcktoberfest-2021-With-Python | ae6f8b5aadeca5a7329fe8e8d7de3f5069e687a1 | [
"MIT"
] | 37 | 2021-10-01T17:14:52.000Z | 2021-10-21T17:26:14.000Z | Python/CoviDetect/covid_detect/sub_app/views.py | Idhant-6/Hacktoberfest-2021 | ae6f8b5aadeca5a7329fe8e8d7de3f5069e687a1 | [
"MIT"
] | 38 | 2021-10-01T16:59:16.000Z | 2021-10-30T16:05:31.000Z | from django.http import request
from django.shortcuts import render,HttpResponse
import cv2
import numpy as np
import base64
import joblib
from numpy.core.defchararray import join
import sklearn
import pywt
def w2d(img, mode='haar', level=1):
imArray = img
#Datatype conversions
#convert to grayscale
imArray = cv2.cvtColor( imArray,cv2.COLOR_RGB2GRAY )
#convert to float
imArray = np.float32(imArray)
imArray /= 255;
# compute coefficients
coeffs=pywt.wavedec2(imArray, mode, level=level)
#Process Coefficients
coeffs_H=list(coeffs)
coeffs_H[0] *= 0;
# reconstruction
imArray_H=pywt.waverec2(coeffs_H, mode);
imArray_H *= 255;
imArray_H = np.uint8(imArray_H)
return imArray_H
def get_cv2_image_from_base64_string(b64str):
encoded_data = b64str.split(',')[1]
nparr = np.frombuffer(base64.b64decode(encoded_data), np.uint8)
img = cv2.imdecode(nparr,cv2.IMREAD_COLOR)
return img
def clf(img_base64):
result =[]
img = get_cv2_image_from_base64_string(img_base64)
scalled_raw_img = cv2.resize(img, (32,32))
img_har = w2d(img,'db1',5)
scalled_har_img = cv2.resize(img_har,(32,32))
combined_img = np.vstack((scalled_raw_img.reshape(32*32*3,1),scalled_har_img.reshape(32*32*1,1)))
len_img_array = 32*32*3 + 32*32
final = combined_img.reshape(1,len_img_array).astype(float)
with open('static/model_svm.pkl','rb') as f:
model_ = joblib.load(f)
result.append({
'prediction':model_.predict(final)[0],
'probability': np.round(model_.predict_proba(final),2).tolist()[0]
})
return result
# Create your views here.
def home(request):
return render(request,'home.html')
def classify_xray(request):
if request.method =="POST":
print('post')
img_string = request.POST.get('img_string','')
pred = clf(img_string)
keys_ = ['COVID-19', 'Other Lung Infection', 'Normal', 'Viral Pneumonia']
print(pred)
pred_no = pred[0]['prediction']
prediction = keys_[pred_no]
probability = int(float(pred[0]['probability'][pred_no])*100)
context ={
'prediction':prediction,
'probability':probability
}
return render(request, 'classify.html',context)
# def test_(request):
# return render(request,'test.html')
| 27.858824 | 101 | 0.669341 | 319 | 2,368 | 4.796238 | 0.38558 | 0.015686 | 0.037255 | 0.019608 | 0.035294 | 0.035294 | 0 | 0 | 0 | 0 | 0 | 0.046635 | 0.203125 | 2,368 | 84 | 102 | 28.190476 | 0.764176 | 0.082348 | 0 | 0 | 0 | 0 | 0.084142 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084746 | false | 0 | 0.152542 | 0.016949 | 0.322034 | 0.033898 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17801b23c209f52e4859df74018bcf887604f24c | 5,060 | py | Python | tube_dl/formats.py | hahv/tube_dl | 4641c48d74c1881ddd6785784a3884bdea2ca674 | [
"MIT"
] | 17 | 2020-12-04T16:37:22.000Z | 2022-02-26T10:19:02.000Z | tube_dl/formats.py | hahv/tube_dl | 4641c48d74c1881ddd6785784a3884bdea2ca674 | [
"MIT"
] | 11 | 2020-12-13T01:41:33.000Z | 2022-02-19T12:58:50.000Z | tube_dl/formats.py | hahv/tube_dl | 4641c48d74c1881ddd6785784a3884bdea2ca674 | [
"MIT"
] | 10 | 2020-12-26T04:47:18.000Z | 2022-02-10T09:49:19.000Z | import string
import os
import requests
from tube_dl.extras import Output
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36', 'referer': 'https: //youtube.com'}
class Format:
def __init__(self, category, description, title, stream_data: dict):
self.data = stream_data
self.category = category
self.description = description
self.title = title
self.itag = self.data['itag']
self.mime = self.data['mimeType']
self.acodec = self.data['acodec']
self.vcodec = self.data['vcodec']
self.size = self.data['size']
self.fps = self.data['fps']
self.quality = self.data['quality']
self.abr = self.data['abr']
self.url = self.data['url']
self.adaptive = self.data['adaptive']
self.progressive = self.data['progressive']
def safe_filename(self, name: str, ):
valid_chars = "-_() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(char for char in name if char in valid_chars)
return filename
def download(self, force_filename=False, onprogress=None, path=None, file_name=None, skip_existing=False):
'''
This Function downloads the format selected by user.
Params :
onprogress: Function - If defined, following data will be returned to the specified function
1. Chunk - The file Chunk
2. bytes_done - Total count of bytes done downloading
3. total_size - Total size of the file. Extracted from header
path : Str - Defines the path where to keep the file
file_name : Str - Defines the file name to be used in the file. To avoid any saving error, function safe_filename will be used to extract the filename without unsafe characters.
'''
url = self.url
if type(url) != str:
raise Exception('Download should be a single Format. Not List(Format)')
if file_name is None:
file_name = self.title
if force_filename is False:
file_name = self.safe_filename(file_name)
else:
file_name = file_name
_, extension = self.mime.split('/')
if path is None:
path = os.getcwd()
final_path = f'{path}{os.path.sep}{file_name}.{extension}'
def start():
response = requests.get(url, stream=True, headers=headers)
total_size_in_bytes = int(response.headers.get('content-length', 0))
block_size = 1024
bytes_done = 0
f = open(final_path, 'wb')
try:
for data in response.iter_content(block_size):
f.write(data)
bytes_done += block_size
if onprogress is not None:
onprogress(bytes_done=bytes_done, total_bytes=total_size_in_bytes)
f.close()
except Exception:
start()
if skip_existing is False:
start()
else:
if os.path.exists(final_path) is False:
start()
else:
print('Skipping Files : Existing check is True')
return Output(self.description, final_path)
def __repr__(self):
return f'<Format : itag={self.itag}, mimeType={self.mime}, size={self.size}, acodec={self.acodec}, vcodec={self.vcodec}, fps={self.fps}, quality={self.quality}, abr={self.abr}, progressive={self.progressive}, adaptive={self.adaptive} >'
class list_streams:
def __init__(self, data):
self.data = data
def __repr__(self):
return f'{self.data}'
def first(self):
return self.data[0]
def last(self):
return self.data[-1]
def filter_by(
self,
progressive=False,
only_audio=False,
adaptive=False,
itag=None,
fps=None,
quality=None,
no_audio=None
):
content = self.data
content_list = list()
for i in content:
if no_audio is True:
if i.acodec is None:
content_list.append(i)
if only_audio is True:
if i.mime.split('/')[0].lower() == 'audio':
content_list.append(i)
if quality is not None:
if i.quality.lower() == quality.lower():
content_list.append(i)
if fps is not None:
if i.fps == fps:
content_list.append(i)
if itag is not None:
if i.itag == itag:
content_list.append(i)
if adaptive is True:
if i.adaptive is True:
content_list.append(i)
if progressive is True:
if i.progressive is True:
content_list.append(i)
self.data = content_list
return content_list
| 37.481481 | 254 | 0.562055 | 619 | 5,060 | 4.470113 | 0.273021 | 0.054933 | 0.043007 | 0.045537 | 0.090351 | 0.017347 | 0 | 0 | 0 | 0 | 0 | 0.012261 | 0.33913 | 5,060 | 134 | 255 | 37.761194 | 0.815191 | 0.112451 | 0 | 0.136364 | 0 | 0.018182 | 0.14263 | 0.041497 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.036364 | 0.036364 | 0.209091 | 0.009091 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17810377c3837f4b8580799808c6d976e9b3f30b | 619 | py | Python | hopperpw/hopperpw/urls.py | teddywest32/hopper.pw | ff80ded3d34a5f33a5770d5e69405a66ff649a62 | [
"BSD-3-Clause"
] | 81 | 2015-01-03T20:35:52.000Z | 2021-01-05T22:33:15.000Z | hopperpw/hopperpw/urls.py | teddywest32/hopper.pw | ff80ded3d34a5f33a5770d5e69405a66ff649a62 | [
"BSD-3-Clause"
] | 9 | 2015-02-04T15:14:12.000Z | 2021-09-19T22:49:27.000Z | hopperpw/hopperpw/urls.py | teddywest32/hopper.pw | ff80ded3d34a5f33a5770d5e69405a66ff649a62 | [
"BSD-3-Clause"
] | 34 | 2015-01-01T06:19:23.000Z | 2021-11-23T11:35:26.000Z | # coding=utf-8
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^allauth/', include('allauth.urls')),
url(r'^accounts/', include('accounts.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('main.urls')),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns(
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| 28.136364 | 67 | 0.701131 | 80 | 619 | 5.3125 | 0.3375 | 0.047059 | 0.098824 | 0.084706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00189 | 0.145396 | 619 | 21 | 68 | 29.47619 | 0.801512 | 0.019386 | 0 | 0 | 0 | 0 | 0.119008 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.294118 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
178374b80192e344de3181d10c48f542eee0cc58 | 1,828 | py | Python | export_name_list.py | brandyn-gilbert/DAT-281-CAPSTONE | d86dd62eab164e6845dee63954cacc0324a449bc | [
"MIT"
] | null | null | null | export_name_list.py | brandyn-gilbert/DAT-281-CAPSTONE | d86dd62eab164e6845dee63954cacc0324a449bc | [
"MIT"
] | null | null | null | export_name_list.py | brandyn-gilbert/DAT-281-CAPSTONE | d86dd62eab164e6845dee63954cacc0324a449bc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 12, 2021
Developed for UIF to more easily handle the growing number of alumni they have,
and to track interactions with said alumni.
Final Project for CCAC DAT-281
@author: BKG
"""
import os
import sqlite3
from sqlite3 import Error
import pandas as pd
def main(location):
"""
From the db, pulls the following columns from the listed tables, formats,
the dataframe, then saves it to a .csv file. (see: 'query_read')
Specifically, this gives the user a list of all alumni with their ID nums.
Parameters
----------
location : STR
String of the path to the folder the user previously selected.
Returns
-------
None.
"""
query = ''' SELECT Alumni_ID.ID_number, first_name, last_name,
graduation_year, CORE_student, birthday
FROM Alumni_ID
INNER JOIN Basic_Info on Basic_Info.ID_number = Alumni_ID.ID_number
ORDER BY last_name ASC
'''
connection = _db_connection()
output = pd.read_sql(query, con=connection)
connection.close()
col_names = ['ID Number', #Print friendly column names
'First Name',
'Last Name',
'Graduation Year',
'CORE?',
'Birthday']
output.cloumns = col_names #rename the df col names
file_name = 'Alumni List.csv'
os.chdir(location)
output.to_csv(file_name, index=False, encoding='utf-8')
def _db_connection():
'''
Connects to the .db file
Returns
-------
connection : sqlite db connection
'''
try:
connection = sqlite3.connect('Data\\UIF_Alumni_DB.db')
except Error:
print(Error)
return connection
if __name__ == "__main__":
main()
| 24.702703 | 83 | 0.609409 | 230 | 1,828 | 4.695652 | 0.530435 | 0.02963 | 0.018519 | 0.02963 | 0.064815 | 0.064815 | 0.064815 | 0 | 0 | 0 | 0 | 0.010895 | 0.297046 | 1,828 | 73 | 84 | 25.041096 | 0.829572 | 0.379103 | 0 | 0 | 0 | 0 | 0.372249 | 0.021053 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.21875 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
178891f94229c492f6f9626a23fde3974ba0c8f5 | 2,071 | py | Python | old/Graph.py | csdevto/tradingbot | b024236f0d2801380e7f86aa1660c44060728eb1 | [
"MIT"
] | null | null | null | old/Graph.py | csdevto/tradingbot | b024236f0d2801380e7f86aa1660c44060728eb1 | [
"MIT"
] | null | null | null | old/Graph.py | csdevto/tradingbot | b024236f0d2801380e7f86aa1660c44060728eb1 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Data Source
import yfinance as yf
import time, datetime, math
from datetime import datetime
import sqlite3
#Interval required 5 minutes
con = sqlite3.connect("DB/stocks.db")
#con.row_factory = sqlite3.Row
stock = 'UBER'
data = pd.read_sql_query("SELECT * FROM stocks_hist WHERE symbol='" + stock + "' AND Datetime >= '2021-04-22' ORDER BY Datetime DESC limit 10000 ",con,index_col='Datetime')
data.index = pd.to_datetime(data.index)
data= data.sort_index()
print(data)
#RSI CALC
data['Return'] = np.log(data['Close'] / data['Close'].shift(1) )
data['Movement'] = data['Close'] - data['Close'].shift(1)
data['up'] = np.where((data['Movement'] > 0) ,data['Movement'],0)
data['down'] = np.where((data['Movement'] < 0) ,data['Movement'],0)
window_length = 14
#calculate moving average of the last 14 days gains
up = data['up'].rolling(window_length).mean()
#calculate moving average of the last 14 days losses
down = data['down'].abs().rolling(window_length).mean()
RS = up / down
#Bollinger bands, 1 std and 2 std
data['MA20'] = data['Close'].rolling(window=20).mean()
data['20dSTD'] = data['Close'].rolling(window=20).std()
data['Upper'] = data['MA20'] + (data['20dSTD'] * 2)
data['Lower'] = data['MA20'] - (data['20dSTD'] * 2)
'''data['Upper1s'] = data['MA20'] + (data['20dSTD'] * 1)
data['Lower1s'] = data['MA20'] - (data['20dSTD'] * 1)
data['LBPer']=(data['Close']/data['Lower'])-1
data['UBPer']=(data['Upper']/data['Close'])-1
data['UBPer1s']=(data['Close']/data['Upper1s'])-1'''
data['AD'] = 0
#ADL Line
data['CMFV'] = (((data['Close']-data['Low'])-(data['High']-data['Close']))/(data['High']-data['Low']))*data['Volume']
data['AD'] = data['CMFV'].rolling(14, min_periods=14).sum()
data['AD'] = data['AD'].shift(1)
data['RSI'] = 100.0 - (100.0 / (1.0 + RS))
#data = data[data.index.strftime('%Y-%m-%d') == '2021-02-27']
#Print data
print(data)
'''data[['Close','AD']].plot(figsize=(10,4))
plt.grid(True)
plt.title(stock + ' AD')
plt.axis('tight')
plt.ylabel('Price')
plt.show()'''
| 31.861538 | 172 | 0.652342 | 320 | 2,071 | 4.1875 | 0.378125 | 0.080597 | 0.058209 | 0.053731 | 0.250746 | 0.214925 | 0.146269 | 0.104478 | 0 | 0 | 0 | 0.050434 | 0.109609 | 2,071 | 64 | 173 | 32.359375 | 0.676247 | 0.139546 | 0 | 0.064516 | 0 | 0 | 0.222464 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.225806 | 0 | 0.225806 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
178b8f7b8817c326e1125ab56cb91128e6dfcebb | 6,044 | py | Python | stable_baselines_model_based_rl/sampler/gym_sampler.py | micheltokic/stable_baselines_model_based_rl | 75bac906aeba69072878ceb15d9be459b1f436c3 | [
"Apache-2.0"
] | 1 | 2022-01-08T17:08:13.000Z | 2022-01-08T17:08:13.000Z | stable_baselines_model_based_rl/sampler/gym_sampler.py | micheltokic/stable_baselines_model_based_rl | 75bac906aeba69072878ceb15d9be459b1f436c3 | [
"Apache-2.0"
] | 5 | 2021-09-15T18:14:48.000Z | 2021-09-19T16:17:51.000Z | stable_baselines_model_based_rl/sampler/gym_sampler.py | micheltokic/stable_baselines_model_based_rl | 75bac906aeba69072878ceb15d9be459b1f436c3 | [
"Apache-2.0"
] | null | null | null | import csv
import datetime
import os
import gym
from gym.spaces import space
from gym.spaces.box import Box
from gym.spaces.discrete import Discrete
from gym.spaces.multi_discrete import MultiDiscrete
from definitions import ROOT_DIR
from stable_baselines_model_based_rl.utils.configuration import Configuration
from stable_baselines_model_based_rl.utils.spaces.base import SpaceType
from stable_baselines_model_based_rl.utils.spaces.factory import space_value_from_gym
def __update_action_input_config(config: Configuration, action_space: space, action_col_names):
if isinstance(action_space, Discrete):
action_type = 'DISCRETE'
elif isinstance(action_space, MultiDiscrete):
action_type = 'MULTI_DISCRETE'
raise NotImplementedError('Not yet supported!') # TODO
elif isinstance(action_space, Box):
action_type = 'BOX'
box_bounds = {
'low': [float(x) for x in list(action_space.low)],
'high': [float(x) for x in list(action_space.high)],
}
config.set('input_config.action_box_bounds', box_bounds)
config.set('input_config.action_type', action_type)
config.set('input_config.action_cols', action_col_names)
def __update_observation_input_config(config: Configuration, observation_cols, obs_space: Box):
config.set('input_config.observation_cols', observation_cols)
if isinstance(obs_space, Box):
config.set('input_config.observation_bounds.low', [float(x) for x in list(obs_space.low)])
config.set('input_config.observation_bounds.high',
[float(x) for x in list(obs_space.high)])
def __sample_gym_environment(gym_environment_name: str, data_file: str, episode_count=20,
max_steps=100):
"""Sample given gym environment, create proper config and store generated data in data_file."""
config = Configuration(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../example_usage/sample_config.yaml'))
env = gym.make(gym_environment_name)
env.np_random.seed(0)
env.action_space.np_random.seed(0)
action_col_names = space_value_from_gym(env.action_space, env.action_space.sample(),
SpaceType.ACTION).column_names
observation_col_names = space_value_from_gym(env.observation_space,
env.observation_space.sample()).column_names
__update_action_input_config(config, env.action_space, action_col_names)
__update_observation_input_config(config, observation_col_names, env.observation_space)
config.set('gym_sampling.gym_environment_name', gym_environment_name)
data_file_handle = open(data_file, mode='w', encoding='UTF-8', newline='')
csv_writer = csv.writer(data_file_handle, delimiter=',')
# CSV Header
csv_writer.writerow(['EPISODE', 'STEP', *action_col_names, *observation_col_names])
# SAMPLE DATA
for episode in range(episode_count):
print('Start of episode %d' % episode)
obs = env.reset()
step = 0
done = False
while step < max_steps and not done:
step += 1
action = env.action_space.sample()
action_sv = space_value_from_gym(env.action_space, action, SpaceType.ACTION)
obs_sv = space_value_from_gym(env.observation_space, obs)
# Append row to CSV file
csv_writer.writerow([int(episode), int(step), *action_sv.to_value_list(),
*obs_sv.to_value_list()])
obs, reward, done, _ = env.step(action)
print(' --> finished after %d steps' % step)
data_file_handle.close()
return config
def sample_gym_environment(gym_environment_name: str, episode_count=20, max_steps=100,
output_path=os.path.join(ROOT_DIR, 'sample_output'),
debug: bool = False):
"""
Sample the given gym environment with the given amount of episodes and maximum
steps per episode.
Two files are created:
- A CSV file, containing the sampled data.
- A YAML file, containing the configuration that results from the sampled gym
environment, based on the sample_config.yaml file.
Both files are stored within the output_path directory. They will be subfolders of directories
containing the gym environment name and the current time. E.g. the follwoing folder structure
will be created within output_path: "CartPole-v1/sample_data/2021-05-01-10-00-30/data.csv".
Args:
gym_environment_name: Name of the Gym-Environment to sample.
epsiode_count: Amount of episodes to use for the sampling.
max_steps: Maximum steps per episode allowed during sampling.
output_path: The directory the generated files are stored in.
debug: Flag whether to enable debugging features, such as naming the output folder based
on the amount of episodes and max steps.
Returns:
data_file: Path to the created data (csv) file.
config: Configuration object created for the sampled environment.
"""
time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
dest_dir_name = time if not debug else f'{time}_episodes={episode_count}_max-step={max_steps}'
final_dir_path = os.path.join(output_path, gym_environment_name, 'sample_data', dest_dir_name)
os.makedirs(final_dir_path)
data_file = f'{final_dir_path}/data.csv'
config = __sample_gym_environment(gym_environment_name, data_file, episode_count=episode_count,
max_steps=max_steps)
config.set('input_config.input_file_name', os.path.abspath(data_file))
config.set('model_wrapping.reset.data_file', os.path.abspath(data_file))
config.set('model_wrapping.reset.type', 'EPISODE_START')
config.save_config(file=f'{final_dir_path}/config.yaml')
print(f'Data and config saved in: {final_dir_path}')
return data_file, config
| 43.797101 | 99 | 0.695069 | 814 | 6,044 | 4.891892 | 0.237101 | 0.056253 | 0.040683 | 0.035158 | 0.276745 | 0.200402 | 0.180563 | 0.118533 | 0.024108 | 0.024108 | 0 | 0.006325 | 0.215255 | 6,044 | 137 | 100 | 44.116788 | 0.833228 | 0.209464 | 0 | 0 | 0 | 0 | 0.138616 | 0.093336 | 0 | 0 | 0 | 0.007299 | 0 | 1 | 0.04878 | false | 0 | 0.146341 | 0 | 0.219512 | 0.036585 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
179047197bf91d3e14c6131b31ca97cf1748cbbb | 1,238 | py | Python | palaverapi/decorators.py | cocodelabs/api.palaverapp.com | cb517a2cd1dea12fadf4f72147fecf0105cbd717 | [
"BSD-3-Clause"
] | 3 | 2016-07-03T21:19:18.000Z | 2021-07-10T18:32:16.000Z | palaverapi/decorators.py | cocodelabs/api.palaverapp.com | cb517a2cd1dea12fadf4f72147fecf0105cbd717 | [
"BSD-3-Clause"
] | 66 | 2015-03-27T21:52:11.000Z | 2021-09-06T17:56:59.000Z | palaverapi/decorators.py | cocodelabs/api.palaverapp.com | cb517a2cd1dea12fadf4f72147fecf0105cbd717 | [
"BSD-3-Clause"
] | 1 | 2021-07-28T19:45:31.000Z | 2021-07-28T19:45:31.000Z | import json
from functools import wraps
from typing import Callable
from urllib.parse import parse_qsl
from rivr.http import Request, Response
from palaverapi.responses import ProblemResponse
def requires_body(func: Callable[..., Response]):
@wraps(func)
def wrapper(self, request: Request, *args, **kwargs) -> Response:
if request.content_type:
body = request.body.read()
content_type = request.content_type.split(';')[0]
try:
if content_type == 'application/json':
content = body.decode('utf-8')
payload = json.loads(content)
elif content_type == 'application/x-www-form-urlencoded':
content = body.decode('utf-8')
data = parse_qsl(content, True)
payload = dict((k, v) for k, v in data)
else:
return ProblemResponse(415, 'Unsupported Media Type')
except (UnicodeDecodeError, ValueError):
return ProblemResponse(400, 'Invalid request body')
return func(self, request, *args, payload, **kwargs)
return func(self, request, *args, {}, **kwargs)
return wrapper
| 34.388889 | 73 | 0.588853 | 131 | 1,238 | 5.503817 | 0.458015 | 0.076283 | 0.047157 | 0.055479 | 0.127601 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010526 | 0.30937 | 1,238 | 35 | 74 | 35.371429 | 0.832749 | 0 | 0 | 0.074074 | 0 | 0 | 0.082391 | 0.026656 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.222222 | 0 | 0.481481 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1790c8c518d20fe2e7697039e44e0d23b4a62c91 | 9,489 | py | Python | cli/medperf/utils.py | johnugeorge/medperf | 5bc3f643064df14e9476bd4d4c1a4c0cce5337d5 | [
"Apache-2.0"
] | 1 | 2021-09-24T18:09:53.000Z | 2021-09-24T18:09:53.000Z | cli/medperf/utils.py | johnugeorge/medperf | 5bc3f643064df14e9476bd4d4c1a4c0cce5337d5 | [
"Apache-2.0"
] | 2 | 2021-09-27T16:14:04.000Z | 2021-11-03T14:24:54.000Z | cli/medperf/utils.py | johnugeorge/medperf | 5bc3f643064df14e9476bd4d4c1a4c0cce5337d5 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from pexpect import spawn
import logging
from typing import List, Tuple
from datetime import datetime
import hashlib
import os
from shutil import rmtree
import tarfile
import yaml
from pathlib import Path
from colorama import Fore, Style
import re
import medperf.config as config
from medperf.ui import UI
def storage_path(subpath: str):
"""Helper funciton that converts a path to storage-related path"""
return os.path.join(config.storage, subpath)
def get_file_sha1(path: str) -> str:
"""Calculates the sha1 hash for a given file.
Args:
path (str): Location of the file of interest.
Returns:
str: Calculated hash
"""
BUF_SIZE = 65536
sha1 = hashlib.sha1()
with open(path, "rb") as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def init_storage():
"""Builds the general medperf folder structure.
"""
parent = config.storage
data = storage_path(config.data_storage)
cubes = storage_path(config.cubes_storage)
results = storage_path(config.results_storage)
tmp = storage_path(config.tmp_storage)
dirs = [parent, data, cubes, results, tmp]
for dir in dirs:
if not os.path.isdir(dir):
logging.info(f"Creating {dir} directory")
os.mkdir(dir)
def cleanup():
"""Removes clutter and unused files from the medperf folder structure.
"""
if os.path.exists(storage_path(config.tmp_storage)):
logging.info("Removing temporary data storage")
rmtree(storage_path(config.tmp_storage), ignore_errors=True)
dsets = get_dsets()
prefix = config.tmp_reg_prefix
unreg_dsets = [dset for dset in dsets if dset.startswith(prefix)]
for dset in unreg_dsets:
logging.info("Removing unregistered dataset")
dset_path = os.path.join(storage_path(config.data_storage), dset)
if os.path.exists(dset_path):
rmtree(dset_path, ignore_errors=True)
def get_dsets() -> List[str]:
"""Retrieves the UID of all the datasets stored locally.
Returns:
List[str]: UIDs of prepared datasets.
"""
dsets = next(os.walk(storage_path(config.data_storage)))[1]
return dsets
def pretty_error(msg: str, ui: "UI", clean: bool = True, add_instructions=True):
"""Prints an error message with typer protocol and exits the script
Args:
msg (str): Error message to show to the user
clean (bool, optional):
Run the cleanup process before exiting. Defaults to True.
add_instructions (bool, optional):
Show additional instructions to the user. Defualts to True.
"""
logging.warning(
"MedPerf had to stop execution. See logs above for more information"
)
if msg[-1] != ".":
msg = msg + "."
if add_instructions:
msg += f" See logs at {config.log_file} for more information"
ui.print_error(msg)
if clean:
cleanup()
exit(1)
def cube_path(uid: int) -> str:
"""Gets the path for a given cube.
Args:
uid (int): Cube UID.
Returns:
str: Location of the cube folder structure.
"""
return os.path.join(storage_path(config.cubes_storage), str(uid))
def generate_tmp_datapath() -> Tuple[str, str]:
"""Builds a temporary folder for prepared but yet-to-register datasets.
Returns:
str: General temporary folder location
str: Specific data path for the temporary dataset
"""
dt = datetime.utcnow()
ts = str(int(datetime.timestamp(dt)))
tmp = config.tmp_reg_prefix + ts
out_path = os.path.join(storage_path(config.data_storage), tmp)
out_path = os.path.abspath(out_path)
out_datapath = os.path.join(out_path, "data")
if not os.path.isdir(out_datapath):
logging.info(f"Creating temporary dataset path: {out_datapath}")
os.makedirs(out_datapath)
return out_path, out_datapath
def check_cube_validity(cube: "Cube", ui: "UI"):
"""Helper function for pretty printing the cube validity process.
Args:
cube (Cube): Cube to check for validity
ui (UI): Instance of an UI implementation
"""
logging.info(f"Checking cube {cube.name} validity")
ui.text = "Checking cube MD5 hash..."
if not cube.is_valid():
pretty_error("MD5 hash doesn't match")
logging.info(f"Cube {cube.name} is valid")
ui.print(f"> {cube.name} MD5 hash check complete")
def untar_additional(add_filepath: str) -> str:
"""Untars and removes the additional_files.tar.gz file
Args:
add_filepath (str): Path where the additional_files.tar.gz file can be found.
Returns:
str: location where the untared files can be found.
"""
logging.info(f"Uncompressing additional_files.tar.gz at {add_filepath}")
addpath = str(Path(add_filepath).parent)
tar = tarfile.open(add_filepath)
tar.extractall(addpath)
tar.close()
os.remove(add_filepath)
return addpath
def approval_prompt(msg: str, ui: "UI") -> bool:
"""Helper function for prompting the user for things they have to explicitly approve.
Args:
msg (str): What message to ask the user for approval.
Returns:
bool: Wether the user explicitly approved or not.
"""
logging.info("Prompting for user's approval")
approval = None
while approval is None or approval not in "yn":
approval = ui.prompt(msg.strip() + " ").lower()
logging.info(f"User answered approval with {approval}")
return approval == "y"
def dict_pretty_print(in_dict: dict, ui: "UI"):
"""Helper function for distinctively printing dictionaries with yaml format.
Args:
in_dict (dict): dictionary to print
"""
logging.debug(f"Printing dictionary to the user: {in_dict}")
ui.print()
ui.print("=" * 20)
in_dict = {k: v for (k, v) in in_dict.items() if v is not None}
ui.print(yaml.dump(in_dict))
ui.print("=" * 20)
def combine_proc_sp_text(proc: spawn, ui: "UI") -> str:
"""Combines the output of a process and the spinner.
Joins any string captured from the process with the
spinner current text. Any strings ending with any other
character from the subprocess will be returned later.
Args:
proc (spawn): a pexpect spawned child
ui (UI): An instance of an UI implementation
Returns:
str: all non-carriage-return-ending string captured from proc
"""
static_text = ui.text
proc_out = ""
while proc.isalive():
line = byte = proc.read(1)
while byte and not re.match(b"[\r\n]", byte):
byte = proc.read(1)
line += byte
if not byte:
break
line = line.decode("utf-8", "ignore")
if line:
# add to proc_out list for logging
proc_out += line
ui.text = (
f"{static_text} {Fore.WHITE}{Style.DIM}{line.strip()}{Style.RESET_ALL}"
)
return proc_out
def get_folder_sha1(path: str) -> str:
"""Generates a hash for all the contents of the folder. This procedure
hashes all of the files in the folder, sorts them and then hashes that list.
Args:
path (str): Folder to hash
Returns:
str: sha1 hash of the whole folder
"""
hashes = []
for root, _, files in os.walk(path, topdown=False):
for file in files:
filepath = os.path.join(root, file)
hashes.append(get_file_sha1(filepath))
hashes = sorted(hashes)
sha1 = hashlib.sha1()
for hash in hashes:
sha1.update(hash.encode("utf-8"))
return sha1.hexdigest()
def results_path(benchmark_uid, model_uid, data_uid):
out_path = storage_path(config.results_storage)
bmark_uid = str(benchmark_uid)
model_uid = str(model_uid)
data_uid = str(data_uid)
out_path = os.path.join(out_path, bmark_uid, model_uid, data_uid)
out_path = os.path.join(out_path, config.results_filename)
return out_path
def results_ids(ui: UI):
results_storage = storage_path(config.results_storage)
results_ids = []
try:
bmk_uids = next(os.walk(results_storage))[1]
for bmk_uid in bmk_uids:
bmk_storage = os.path.join(results_storage, bmk_uid)
model_uids = next(os.walk(bmk_storage))[1]
for model_uid in model_uids:
bmk_model_storage = os.path.join(bmk_storage, model_uid)
data_uids = next(os.walk(bmk_model_storage))[1]
bmk_model_data_list = [
(bmk_uid, model_uid, data_uid) for data_uid in data_uids
]
results_ids += bmk_model_data_list
except StopIteration:
msg = "Couldn't iterate over the results directory"
logging.warning(msg)
pretty_error(msg, ui)
return results_ids
def setup_logger(logger, log_lvl):
fh = logging.FileHandler(config["log_file"])
fh.setLevel(log_lvl)
logger.addHandler(fh)
def list_files(startpath):
tree_str = ""
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, "").count(os.sep)
indent = " " * 4 * (level)
tree_str += "{}{}/\n".format(indent, os.path.basename(root))
subindent = " " * 4 * (level + 1)
for f in files:
tree_str += "{}{}\n".format(subindent, f)
return tree_str
| 30.316294 | 89 | 0.644957 | 1,310 | 9,489 | 4.543511 | 0.238168 | 0.016129 | 0.034274 | 0.014113 | 0.123656 | 0.044187 | 0.030578 | 0.02453 | 0.02453 | 0 | 0 | 0.005355 | 0.252187 | 9,489 | 312 | 90 | 30.413462 | 0.833427 | 0.252503 | 0 | 0.045198 | 0 | 0 | 0.108885 | 0.011345 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101695 | false | 0 | 0.084746 | 0 | 0.254237 | 0.039548 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17935aaebc6842fbcfb1d8f850ae573d75c68bf6 | 1,873 | py | Python | analytics_pipeline-master/log_generator.py | danielvdao/etl_ideas | 18062c92e6441eae5b316e9cc8f0f2d085636a42 | [
"MIT"
] | null | null | null | analytics_pipeline-master/log_generator.py | danielvdao/etl_ideas | 18062c92e6441eae5b316e9cc8f0f2d085636a42 | [
"MIT"
] | 1 | 2017-10-17T02:58:33.000Z | 2017-10-17T02:58:33.000Z | analytics_pipeline-master/log_generator.py | danielvdao/etl_ideas | 18062c92e6441eae5b316e9cc8f0f2d085636a42 | [
"MIT"
] | null | null | null | from faker import Faker
from datetime import datetime
import random
import time
LINE = """\
{remote_addr} - - [{time_local} +0000] "{request_type} {request_path} HTTP/1.1" {status} {body_bytes_sent} "{http_referer}" "{http_user_agent}"\
"""
LOG_FILE_A = "log_a.txt"
LOG_FILE_B = "log_b.txt"
LOG_MAX = 100
def generate_log_line():
fake = Faker()
now = datetime.now()
remote_addr = fake.ipv4()
time_local = now.strftime('%d/%b/%Y:%H:%M:%S')
request_type = random.choice(["GET", "POST", "PUT"])
request_path = "/" + fake.uri_path()
status = random.choice([200, 401, 404])
body_bytes_sent = random.choice(range(5, 1000, 1))
http_referer = fake.uri()
http_user_agent = fake.user_agent()
log_line = LINE.format(
remote_addr=remote_addr,
time_local=time_local,
request_type=request_type,
request_path=request_path,
status=status,
body_bytes_sent=body_bytes_sent,
http_referer=http_referer,
http_user_agent=http_user_agent
)
return log_line
def write_log_line(log_file, line):
with open(log_file, "a") as f:
f.write(line)
f.write("\n")
def clear_log_file(log_file):
with open(log_file, "w+") as f:
f.write("")
if __name__ == "__main__":
current_log_file = LOG_FILE_A
lines_written = 0
clear_log_file(LOG_FILE_A)
clear_log_file(LOG_FILE_B)
while True:
line = generate_log_line()
write_log_line(current_log_file, line)
lines_written += 1
if lines_written % LOG_MAX == 0:
new_log_file = LOG_FILE_B
if current_log_file == LOG_FILE_B:
new_log_file = LOG_FILE_A
clear_log_file(new_log_file)
current_log_file = new_log_file
sleep_time = random.choice(range(1, 5, 1))
time.sleep(sleep_time)
| 24.973333 | 144 | 0.639616 | 274 | 1,873 | 3.974453 | 0.266423 | 0.15427 | 0.064279 | 0.089991 | 0.241506 | 0.10101 | 0.049587 | 0.049587 | 0 | 0 | 0 | 0.021862 | 0.242926 | 1,873 | 74 | 145 | 25.310811 | 0.746121 | 0 | 0 | 0 | 0 | 0.017857 | 0.110102 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053571 | false | 0 | 0.071429 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17957d92994d6b6b24057eeb1c8fb73bd1d0a786 | 2,027 | py | Python | scripts/visualization/style.py | bo1929/basty | 3ef84578e0154509346fdc2c0c56261448d78276 | [
"MIT"
] | 5 | 2021-12-10T17:43:52.000Z | 2022-03-01T22:19:36.000Z | scripts/visualization/style.py | bo1929/basty | 3ef84578e0154509346fdc2c0c56261448d78276 | [
"MIT"
] | null | null | null | scripts/visualization/style.py | bo1929/basty | 3ef84578e0154509346fdc2c0c56261448d78276 | [
"MIT"
] | null | null | null | MAX_LIMIT = 9999
class StyleEmbedding:
colorscheme = "tableau20"
filled = True
sizeDefault = 7
sizeMin = 5
sizeMax = 25
opacityDefault = 0.05
opacityMin = 0.05
opacityMax = 0.5
tickMinStep = 5
def get_embedding_style():
return {
"config": {
"axis": {
"labelFontSize": 20,
"labelSeparation": 10,
"titleFontSize": 24,
},
"mark": {"smooth": True},
"legend": {
"titleFontSize": 24,
"labelFontSize": 20,
"titleLimit": MAX_LIMIT,
"labelLimit": MAX_LIMIT,
"symbolLimit": MAX_LIMIT,
"orient": "right",
# "orient": "top",
# "columns": 3,
# "direction": "horizontal",
"titleAnchor": "middle",
"labelOpacity": 1,
"symbolOpacity": 1,
},
"title": {"anchor": "start", "color": "gray", "fontSize": 25},
}
}
class StyleEthogram:
colorscheme = "tableau20"
def get_ethogram_style():
return {
"config": {
"view": {"continuousWidth": 400, "continuousHeight": 300},
"axis": {
"labelFontSize": 20,
"labelSeparation": 10,
"titleFontSize": 24,
},
"legend": {
"labelFontSize": 20,
"labelLimit": MAX_LIMIT,
"labelOpacity": 1,
"orient": "right",
"symbolLimit": MAX_LIMIT,
"symbolOpacity": 1,
"titleFontSize": 24,
"titleLimit": MAX_LIMIT,
},
"title": {"anchor": "start", "color": "gray", "fontSize": 25},
}
}
| 29.808824 | 78 | 0.391712 | 126 | 2,027 | 6.214286 | 0.515873 | 0.07152 | 0.043423 | 0.086845 | 0.219668 | 0.219668 | 0.219668 | 0 | 0 | 0 | 0 | 0.053898 | 0.48742 | 2,027 | 67 | 79 | 30.253731 | 0.699711 | 0.02812 | 0 | 0.586207 | 0 | 0 | 0.226857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0 | 0.034483 | 0.275862 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
179a63e2713ef678248c5a0480244a33d8023a8f | 1,955 | py | Python | scripts/parse_aggregated_responses.py | cagnolone/openmic-2018 | 154dc425a4fac7ba45fb143ef75fa21189fc4d1c | [
"MIT"
] | 56 | 2018-08-27T15:48:37.000Z | 2021-12-25T11:01:23.000Z | scripts/parse_aggregated_responses.py | cagnolone/openmic-2018 | 154dc425a4fac7ba45fb143ef75fa21189fc4d1c | [
"MIT"
] | 37 | 2018-08-16T17:00:21.000Z | 2022-02-09T23:55:36.000Z | scripts/parse_aggregated_responses.py | cagnolone/openmic-2018 | 154dc425a4fac7ba45fb143ef75fa21189fc4d1c | [
"MIT"
] | 7 | 2018-10-09T14:48:01.000Z | 2020-06-06T12:03:15.000Z | #!/usr/bin/env python
# coding: utf8
'''Script to parse aggregated annotation responses into a CSV file of labels.
Example
-------
$ ./scripts/parse_aggregated_responses.py \
"path/to/dir/*.csv" \
openmic-2018-aggregated-labels.csv
'''
from __future__ import print_function
import argparse
import glob
import os
import pandas as pd
import sys
import tqdm
YN_MAP = {'no': -1, 'yes': 1}
COLUMNS = ['sample_key', 'instrument', 'relevance', 'num_responses']
CONF_COL = 'does_this_recording_contain_{}:confidence'
CONTAIN_COL = 'does_this_recording_contain_{}'
def parse_one(row):
sign = YN_MAP[row[CONTAIN_COL.format(row.instrument)]]
conf = row[CONF_COL.format(row.instrument)] / 2.0
proba = 0.5 + sign * conf
return dict(sample_key=row.sample_key, instrument=row.instrument,
relevance=proba, num_responses=row._trusted_judgments)
def main(csv_files, output_filename):
records = []
for csv_file in tqdm.tqdm(csv_files):
records += pd.read_csv(csv_file).apply(parse_one, axis=1).values.tolist()
df = pd.DataFrame.from_records(records)
print('Loaded {} records'.format(len(df)))
df.sort_values(by='sample_key', inplace=True)
df.to_csv(output_filename, columns=COLUMNS, index=None)
return os.path.exists(output_filename)
def process_args(args):
parser = argparse.ArgumentParser(description='Aggregated annotation results parser')
parser.add_argument('csv_pattern', type=str, action='store',
help='Glob-style file pattern for picking up CSV files.')
parser.add_argument(dest='output_filename', type=str, action='store',
help='Output filename for writing the sparse label CSV.')
return parser.parse_args(args)
if __name__ == '__main__':
args = process_args(sys.argv[1:])
csv_files = glob.glob(args.csv_pattern)
success = main(csv_files, args.output_filename)
sys.exit(0 if success else 1)
| 29.621212 | 88 | 0.703325 | 270 | 1,955 | 4.866667 | 0.444444 | 0.063927 | 0.028919 | 0.030441 | 0.074581 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009265 | 0.171867 | 1,955 | 65 | 89 | 30.076923 | 0.802347 | 0.117647 | 0 | 0 | 0 | 0 | 0.188668 | 0.041472 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.184211 | 0 | 0.342105 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
179adee8737327e1461212148f441c448d335da8 | 5,303 | py | Python | tests/test_looseserver/client/test_abstract_client.py | KillAChicken/loose-server | 082402f1fec94faea20343142b0c306dc5f86026 | [
"MIT"
] | 3 | 2019-04-21T13:10:34.000Z | 2019-10-08T05:20:04.000Z | tests/test_looseserver/client/test_abstract_client.py | KillAChicken/loose-server | 082402f1fec94faea20343142b0c306dc5f86026 | [
"MIT"
] | null | null | null | tests/test_looseserver/client/test_abstract_client.py | KillAChicken/loose-server | 082402f1fec94faea20343142b0c306dc5f86026 | [
"MIT"
] | null | null | null | """Test cases for abstract looseserver client."""
import uuid
from urllib.parse import urljoin
from looseserver.client.abstract import AbstractClient
def test_create_rule(client_rule_factory, client_response_factory, registered_rule):
"""Check request data that client uses to create a rule.
1. Create a subclass of the abstract client.
2. Implement send request so that it checks the request parameters.
3. Invoke the create_rule method.
4. Check the rule, returned by the method call.
"""
rule_id = str(uuid.uuid4())
class _Client(AbstractClient):
def _send_request(self, url, method="GET", json=None):
serialized_rule = self._rule_factory.serialize_rule(rule=registered_rule)
assert url == "rules", "Wrong url"
assert method == "POST", "Wrong method"
assert json == serialized_rule, "Wrong rule data"
response_json = {"rule_id": rule_id}
response_json.update(serialized_rule)
return response_json
client = _Client(
configuration_url="/",
rule_factory=client_rule_factory,
response_factory=client_response_factory,
)
created_rule = client.create_rule(rule=registered_rule)
assert created_rule.rule_id == rule_id, "Rule ID has not been set"
def test_get_rule(client_rule_factory, client_response_factory, registered_rule):
"""Check request data that client uses to get a rule.
1. Create a subclass of the abstract client.
2. Implement send request so that it checks the request parameters.
3. Invoke the get_rule method.
4. Check the rule, returned by the method call.
"""
rule_id = str(uuid.uuid4())
class _Client(AbstractClient):
def _send_request(self, url, method="GET", json=None):
assert url == "rule/{0}".format(rule_id), "Wrong url"
assert method == "GET", "Wrong method"
assert json is None, "Data has been specified"
response_json = {"rule_id": rule_id}
response_json.update(self._rule_factory.serialize_rule(rule=registered_rule))
return response_json
client = _Client(
configuration_url="/",
rule_factory=client_rule_factory,
response_factory=client_response_factory,
)
obtained_rule = client.get_rule(rule_id=rule_id)
assert obtained_rule.rule_id == rule_id, "Rule ID has not been set"
def test_delete_rule(client_rule_factory, client_response_factory):
"""Check request data that client uses to remove a rule.
1. Create a subclass of the abstract client.
2. Implement send request so that it checks the request parameters.
3. Invoke the remove_rule method.
"""
rule_id = str(uuid.uuid4())
class _Client(AbstractClient):
def _send_request(self, url, method="GET", json=None):
assert url == "rule/{0}".format(rule_id), "Wrong url"
assert method == "DELETE", "Wrong method"
assert json is None, "Data has been specified"
client = _Client(
configuration_url="/",
rule_factory=client_rule_factory,
response_factory=client_response_factory,
)
client.remove_rule(rule_id=rule_id)
def test_set_response(client_rule_factory, client_response_factory, registered_response):
"""Check request data that client uses to set a response.
1. Create a subclass of the abstract client.
2. Implement send request so that it checks the request parameters.
3. Invoke the set_response method.
4. Check the response, returned by the method call.
"""
rule_id = str(uuid.uuid4())
class _Client(AbstractClient):
def _send_request(self, url, method="GET", json=None):
serialized_response = self._response_factory.serialize_response(registered_response)
assert url == "response/{0}".format(rule_id), "Wrong url"
assert method == "POST", "Wrong method"
assert json == serialized_response, "Wrong response data"
return serialized_response
client = _Client(
configuration_url="/",
rule_factory=client_rule_factory,
response_factory=client_response_factory,
)
response = client.set_response(rule_id=rule_id, response=registered_response)
assert response.response_type == registered_response.response_type, "Wrong response is returned"
def test_build_url(client_rule_factory, client_response_factory):
"""Check method to build url.
1. Create a subclass of the abstract client.
2. Build url.
3. Check the built url.
"""
class _Client(AbstractClient):
def _send_request(self, url, method="GET", json=None):
pass
def exposed_build_url(self, relative_url):
"""Expose 'protected' _build_url method."""
return self._build_url(relative_url=relative_url)
configuration_endpoint = "/config/"
client = _Client(
configuration_url=configuration_endpoint,
rule_factory=client_rule_factory,
response_factory=client_response_factory,
)
relative_path = "test"
expected_url = urljoin(configuration_endpoint, relative_path)
assert client.exposed_build_url(relative_path) == expected_url, "Wrong url"
| 36.321918 | 100 | 0.686027 | 671 | 5,303 | 5.169896 | 0.128167 | 0.039781 | 0.049005 | 0.080715 | 0.712886 | 0.688095 | 0.688095 | 0.631306 | 0.602191 | 0.567599 | 0 | 0.006109 | 0.228361 | 5,303 | 145 | 101 | 36.572414 | 0.841642 | 0.215727 | 0 | 0.52439 | 0 | 0 | 0.084948 | 0 | 0 | 0 | 0 | 0 | 0.195122 | 1 | 0.134146 | false | 0.012195 | 0.036585 | 0 | 0.280488 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
179ae6f54ba09f051bfa483edda5d5eea6d22097 | 5,683 | py | Python | mlops-template-gitlab/lambda_functions/lambda-seedcode-checkin-gitlab/tests/functional/api/test_merge_requests.py | giuseppe-zappia/sagemaker-custom-project-templates | a160cf250dcabf8a9a14682e28d0a39df18e3a5c | [
"MIT-0"
] | 22 | 2021-08-24T13:43:55.000Z | 2022-03-25T06:18:19.000Z | mlops-template-gitlab/lambda_functions/lambda-seedcode-checkin-gitlab/tests/functional/api/test_merge_requests.py | giuseppe-zappia/sagemaker-custom-project-templates | a160cf250dcabf8a9a14682e28d0a39df18e3a5c | [
"MIT-0"
] | 3 | 2021-09-09T00:40:56.000Z | 2022-01-26T10:53:30.000Z | mlops-template-gitlab/lambda_functions/lambda-seedcode-checkin-gitlab/tests/functional/api/test_merge_requests.py | giuseppe-zappia/sagemaker-custom-project-templates | a160cf250dcabf8a9a14682e28d0a39df18e3a5c | [
"MIT-0"
] | 15 | 2021-08-19T23:53:24.000Z | 2022-03-28T22:26:04.000Z | import time
import pytest
import gitlab
import gitlab.v4.objects
def test_merge_requests(project):
project.files.create(
{
"file_path": "README.rst",
"branch": "master",
"content": "Initial content",
"commit_message": "Initial commit",
}
)
source_branch = "branch1"
project.branches.create({"branch": source_branch, "ref": "master"})
project.files.create(
{
"file_path": "README2.rst",
"branch": source_branch,
"content": "Initial content",
"commit_message": "New commit in new branch",
}
)
project.mergerequests.create(
{"source_branch": "branch1", "target_branch": "master", "title": "MR readme2"}
)
def test_merge_request_discussion(project):
mr = project.mergerequests.list()[0]
size = len(mr.discussions.list())
discussion = mr.discussions.create({"body": "Discussion body"})
assert len(mr.discussions.list()) == size + 1
note = discussion.notes.create({"body": "first note"})
note_from_get = discussion.notes.get(note.id)
note_from_get.body = "updated body"
note_from_get.save()
discussion = mr.discussions.get(discussion.id)
assert discussion.attributes["notes"][-1]["body"] == "updated body"
note_from_get.delete()
discussion = mr.discussions.get(discussion.id)
assert len(discussion.attributes["notes"]) == 1
def test_merge_request_labels(project):
mr = project.mergerequests.list()[0]
mr.labels = ["label2"]
mr.save()
events = mr.resourcelabelevents.list()
assert events
event = mr.resourcelabelevents.get(events[0].id)
assert event
def test_merge_request_milestone_events(project, milestone):
mr = project.mergerequests.list()[0]
mr.milestone_id = milestone.id
mr.save()
milestones = mr.resourcemilestoneevents.list()
assert milestones
milestone = mr.resourcemilestoneevents.get(milestones[0].id)
assert milestone
def test_merge_request_basic(project):
mr = project.mergerequests.list()[0]
# basic testing: only make sure that the methods exist
mr.commits()
mr.changes()
assert mr.participants()
def test_merge_request_rebase(project):
mr = project.mergerequests.list()[0]
assert mr.rebase()
@pytest.mark.skip(reason="flaky test")
def test_merge_request_merge(project):
mr = project.mergerequests.list()[0]
mr.merge()
project.branches.delete(mr.source_branch)
with pytest.raises(gitlab.GitlabMRClosedError):
# Two merge attempts should raise GitlabMRClosedError
mr.merge()
def test_merge_request_should_remove_source_branch(
project, merge_request, wait_for_sidekiq
) -> None:
"""Test to ensure
https://github.com/python-gitlab/python-gitlab/issues/1120 is fixed.
Bug reported that they could not use 'should_remove_source_branch' in
mr.merge() call"""
source_branch = "remove_source_branch"
mr = merge_request(source_branch=source_branch)
mr.merge(should_remove_source_branch=True)
result = wait_for_sidekiq(timeout=60)
assert result is True, "sidekiq process should have terminated but did not"
# Wait until it is merged
mr_iid = mr.iid
for _ in range(60):
mr = project.mergerequests.get(mr_iid)
if mr.merged_at is not None:
break
time.sleep(0.5)
assert mr.merged_at is not None
time.sleep(0.5)
# Ensure we can NOT get the MR branch
with pytest.raises(gitlab.exceptions.GitlabGetError):
project.branches.get(source_branch)
def test_merge_request_large_commit_message(
project, merge_request, wait_for_sidekiq
) -> None:
"""Test to ensure https://github.com/python-gitlab/python-gitlab/issues/1452
is fixed.
Bug reported that very long 'merge_commit_message' in mr.merge() would
cause an error: 414 Request too large
"""
source_branch = "large_commit_message"
mr = merge_request(source_branch=source_branch)
merge_commit_message = "large_message\r\n" * 1_000
assert len(merge_commit_message) > 10_000
mr.merge(merge_commit_message=merge_commit_message)
result = wait_for_sidekiq(timeout=60)
assert result is True, "sidekiq process should have terminated but did not"
# Wait until it is merged
mr_iid = mr.iid
for _ in range(60):
mr = project.mergerequests.get(mr_iid)
if mr.merged_at is not None:
break
time.sleep(0.5)
assert mr.merged_at is not None
time.sleep(0.5)
# Ensure we can get the MR branch
project.branches.get(source_branch)
def test_merge_request_merge_ref(merge_request) -> None:
source_branch = "merge_ref_test"
mr = merge_request(source_branch=source_branch)
response = mr.merge_ref()
assert response and "commit_id" in response
def test_merge_request_merge_ref_should_fail(
project, merge_request, wait_for_sidekiq
) -> None:
source_branch = "merge_ref_test2"
mr = merge_request(source_branch=source_branch)
# Create conflict
project.files.create(
{
"file_path": f"README.{source_branch}",
"branch": project.default_branch,
"content": "Different initial content",
"commit_message": "Another commit in main branch",
}
)
result = wait_for_sidekiq(timeout=60)
assert result is True, "sidekiq process should have terminated but did not"
# Check for non-existing merge_ref for MR with conflicts
with pytest.raises(gitlab.exceptions.GitlabGetError):
response = mr.merge_ref()
assert "commit_id" not in response
| 28.70202 | 86 | 0.682562 | 730 | 5,683 | 5.120548 | 0.217808 | 0.077047 | 0.035313 | 0.050829 | 0.498395 | 0.407437 | 0.324505 | 0.23114 | 0.23114 | 0.204922 | 0 | 0.012531 | 0.21362 | 5,683 | 197 | 87 | 28.847716 | 0.823898 | 0.115256 | 0 | 0.419847 | 0 | 0 | 0.139731 | 0.004417 | 0 | 0 | 0 | 0 | 0.129771 | 1 | 0.083969 | false | 0 | 0.030534 | 0 | 0.114504 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
179d043db725bce8f3b6948c83cd4e08552dd537 | 687 | py | Python | Python_OCR_JE/venv/Lib/site-packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | 1 | 2022-01-08T12:30:44.000Z | 2022-01-08T12:30:44.000Z | Python_OCR_JE/venv/Lib/site-packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | Python_OCR_JE/venv/Lib/site-packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | 1 | 2021-04-26T22:41:56.000Z | 2021-04-26T22:41:56.000Z | import numpy as np
nd1 = np.array([[1, 2], [3, 4]])
# reshape
nd1.reshape(4)
nd1.reshape(2, 2)
nd1.reshape((2, 2))
nd1.reshape((2, 2), order="C")
nd1.reshape(4, order="C")
# resize
nd1.resize()
nd1.resize(4)
nd1.resize(2, 2)
nd1.resize((2, 2))
nd1.resize((2, 2), refcheck=True)
nd1.resize(4, refcheck=True)
nd2 = np.array([[1, 2], [3, 4]])
# transpose
nd2.transpose()
nd2.transpose(1, 0)
nd2.transpose((1, 0))
# swapaxes
nd2.swapaxes(0, 1)
# flatten
nd2.flatten()
nd2.flatten("C")
# ravel
nd2.ravel()
nd2.ravel("C")
# squeeze
nd2.squeeze()
nd3 = np.array([[1, 2]])
nd3.squeeze(0)
nd4 = np.array([[[1, 2]]])
nd4.squeeze((0, 1))
| 14.3125 | 34 | 0.576419 | 114 | 687 | 3.473684 | 0.219298 | 0.030303 | 0.080808 | 0.090909 | 0.229798 | 0.229798 | 0.174242 | 0.174242 | 0 | 0 | 0 | 0.113924 | 0.195051 | 687 | 47 | 35 | 14.617021 | 0.60217 | 0.080058 | 0 | 0 | 0 | 0 | 0.006932 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.037037 | 0 | 0.037037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
179d9ebc77da5b1737344a934257dda6f2f13fc3 | 3,128 | py | Python | survae/nn/layers/autoregressive/utils.py | alisiahkoohi/survae_flows | e1747b05524c7ab540a211ed360ab3e67bc3e96d | [
"MIT"
] | 262 | 2020-07-05T20:57:44.000Z | 2022-03-28T02:24:43.000Z | survae/nn/layers/autoregressive/utils.py | alisiahkoohi/survae_flows | e1747b05524c7ab540a211ed360ab3e67bc3e96d | [
"MIT"
] | 17 | 2020-08-15T05:43:34.000Z | 2022-01-31T12:24:21.000Z | survae/nn/layers/autoregressive/utils.py | alisiahkoohi/survae_flows | e1747b05524c7ab540a211ed360ab3e67bc3e96d | [
"MIT"
] | 35 | 2020-08-24T06:55:37.000Z | 2022-02-11T05:17:58.000Z | import torch
def mask_conv2d_spatial(mask_type, height, width):
"""
Creates a mask for Conv2d such that it becomes autoregressive in
the spatial dimensions.
Input:
mask_type: str
Either 'A' or 'B'. 'A' for first layer of network, 'B' for all others.
height: int
Kernel height for layer.
width: int
Kernel width for layer.
Output:
mask: torch.FloatTensor
Shape (1, 1, height, width).
A mask with 0 in places for masked elements.
"""
mask = torch.ones([1, 1, height, width])
mask[:, :, height // 2, width // 2 + (mask_type == 'B'):] = 0
mask[:, :, height // 2 + 1:] = 0
return mask
def mask_channels(mask_type, in_channels, out_channels, data_channels=3):
"""
Creates an autoregressive channel mask.
Input:
mask_type: str
Either 'A' or 'B'. 'A' for first layer of network, 'B' for all others.
in_channels: int
Number of input channels to layer.
out_channels: int
Number of output channels of layer.
data_channels: int
Number of channels in the input data, e.g. 3 for RGB images. (default = 3).
Output:
mask: torch.FloatTensor
Shape (out_channels, in_channels).
A mask with 0 in places for masked elements.
"""
in_factor = in_channels // data_channels + 1
out_factor = out_channels // data_channels + 1
base_mask = torch.ones([data_channels,data_channels])
if mask_type == 'A':
base_mask = base_mask.tril(-1)
else:
base_mask = base_mask.tril(0)
mask_p1 = torch.cat([base_mask]*in_factor, dim=1)
mask_p2 = torch.cat([mask_p1]*out_factor, dim=0)
mask = mask_p2[0:out_channels,0:in_channels]
return mask
def mask_conv2d(mask_type, in_channels, out_channels, height, width, data_channels=3):
r"""
Creates a mask for Conv2d such that it becomes autoregressive in both
the spatial dimensions and the channel dimension.
Input:
mask_type: str
Either 'A' or 'B'. 'A' for first layer of network, 'B' for all others.
in_channels: int
Number of input channels to layer.
out_channels: int
Number of output channels of layer.
height: int
Kernel height for layer.
width: int
Kernel width for layer.
data_channels: int
Number of channels in the input data, e.g. 3 for RGB images. (default = 3).
Output:
mask: torch.FloatTensor
Shape (out_channels, in_channels, height, width).
A mask with 0 in places for masked elements.
"""
mask = torch.ones([out_channels,in_channels,height,width])
# RGB masking in central pixel
mask[:, :, height // 2, width // 2] = mask_channels(mask_type, in_channels, out_channels, data_channels)
# Masking all pixels to the right of the central pixel
mask[:, :, height // 2, width // 2 + 1:] = 0
# Masking all pixels below the central pixel
mask[:, :, height // 2 + 1:] = 0
return mask
| 34 | 108 | 0.611573 | 434 | 3,128 | 4.276498 | 0.177419 | 0.053879 | 0.054957 | 0.061422 | 0.726293 | 0.688039 | 0.634159 | 0.578125 | 0.578125 | 0.559267 | 0 | 0.019964 | 0.295396 | 3,128 | 91 | 109 | 34.373626 | 0.822142 | 0.563619 | 0 | 0.2 | 0 | 0 | 0.001747 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.04 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17a08dd6f2f3775ec7b362537d3a435fcbde0fb3 | 2,130 | py | Python | powerberry-app/src/services/config.py | Steckdoose4711/powerberry | 15c722ff66f0db5c00ddfb71ccc2c75d69b78d39 | [
"MIT"
] | null | null | null | powerberry-app/src/services/config.py | Steckdoose4711/powerberry | 15c722ff66f0db5c00ddfb71ccc2c75d69b78d39 | [
"MIT"
] | 20 | 2022-03-11T19:44:31.000Z | 2022-03-21T19:13:46.000Z | powerberry-app/src/services/config.py | Steckdoose4711/powerberry | 15c722ff66f0db5c00ddfb71ccc2c75d69b78d39 | [
"MIT"
] | null | null | null | import json
import os
import pathlib
import sys
from loguru import logger as log
class Config:
"""Retrieves configuration from environment variables or files or fails fast otherwise"""
def __init__(self):
self.keys = {}
# read configuration file first
self.from_env("CONFIG_PATH", cast=pathlib.Path)
self._json_config = self._read_config(self.config_path)
# query variables
self.from_env("REDIS_HOST", default="localhost")
self.from_env("REDIS_PORT", default=6379, cast=int)
self.from_cfg("app_influx_host", default="localhost")
self.from_cfg("app_influx_port", default=8086, cast=int)
self.from_cfg("app_voltage_reference", default=228, cast=float)
def from_env(self, key, default=None, cast=None):
value = self._get_key(key, os.environ, "env variable", default, cast)
self.keys[key.lower()] = value
def from_cfg(self, key, default=None, cast=None):
value = self._get_key(key, self._json_config, "config value", default, cast)
self.keys[key.lower()] = value
def _get_key(self, key, source, source_name, default=None, cast=None):
value = None
if key in source:
value = source[key]
elif default is not None:
log.debug(f"{source_name} '{key}' not set, using default '{default}'")
value = default
else:
log.error(f"required {source_name} '{key}' not set ")
sys.exit(1)
# potentially cast retrieved value
assert value is not None
if cast is not None:
value = cast(value)
return value
def _read_config(self, config_path):
with open(config_path, "r") as f:
jzon = json.load(f)
log.debug(f"config file '{config_path}' loaded: {jzon}")
return jzon
def __getattr__(self, name: str):
"""Enables the retrieval of configuration keys as instance fields"""
if name in self.keys:
return self.keys[name]
else:
raise AttributeError(f"no configuration key '{name}'")
| 33.28125 | 93 | 0.622066 | 280 | 2,130 | 4.567857 | 0.325 | 0.037529 | 0.025801 | 0.032838 | 0.300235 | 0.156372 | 0.123534 | 0.123534 | 0.068804 | 0.068804 | 0 | 0.007712 | 0.269484 | 2,130 | 63 | 94 | 33.809524 | 0.814267 | 0.106103 | 0 | 0.088889 | 0 | 0 | 0.153887 | 0.011105 | 0 | 0 | 0 | 0 | 0.022222 | 1 | 0.133333 | false | 0 | 0.111111 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17a2dcce962acf6079243f734b05c4cedd810650 | 3,445 | py | Python | test/parser/test_tail_parser.py | BigDataBoutique/bogi | cc247df8d18ef00ebba7986a57fefbb2ad82a1e6 | [
"Apache-2.0"
] | null | null | null | test/parser/test_tail_parser.py | BigDataBoutique/bogi | cc247df8d18ef00ebba7986a57fefbb2ad82a1e6 | [
"Apache-2.0"
] | null | null | null | test/parser/test_tail_parser.py | BigDataBoutique/bogi | cc247df8d18ef00ebba7986a57fefbb2ad82a1e6 | [
"Apache-2.0"
] | null | null | null | import unittest
from test.utils import dedent
from bogi.parser.tail import TailParser
from bogi.parser.tail_transformer import MessageBody, ContentLine, InputFileRef, MultipartField, Header, ResponseHandler, ResponseReference
class TailParserTests(unittest.TestCase):
def test_content_lines(self):
body = dedent('''
{
"foo": "bar",
"param2": "value2"
}''')
tail = TailParser().parse(body)
self.assertEqual(tail.message_body, MessageBody([
ContentLine('{'),
ContentLine(' "foo": "bar",'),
ContentLine(' "param2": "value2"'),
ContentLine('}')
]))
def test_file_refs(self):
body = dedent('''
< body.json
< /home/file''')
tail = TailParser().parse(body)
self.assertEqual(tail.message_body, MessageBody([
InputFileRef('body.json'),
InputFileRef('/home/file')
]))
def test_message_body(self):
body = dedent('''
{
"key": "val"
}
< body.json
< /home/file
testing''')
tail = TailParser().parse(body)
self.assertEqual(tail.message_body, MessageBody([
ContentLine('{'),
ContentLine(' "key": "val"'),
ContentLine('}'),
InputFileRef('body.json'),
InputFileRef('/home/file'),
ContentLine('testing')
]))
def test_multipart(self):
body = dedent('''
--abcd
Content-Disposition: form-data; name="text"
Text
--abcd
Content-Disposition: form-data; name="file_to_send"; filename="input.txt"
< ./input.txt
--abcd--''')
tail = TailParser(multipart_boundary='abcd').parse(body)
self.assertEqual(tail.message_body, MessageBody([
MultipartField(
headers=[Header(field='Content-Disposition', value='form-data; name="text"')],
messages=[ContentLine(content='Text')]),
MultipartField(
headers=[Header(field='Content-Disposition', value='form-data; name="file_to_send"; filename="input.txt"')],
messages=[InputFileRef(path='./input.txt')])
]))
def test_response_handler_script(self):
script = dedent('''
console.log('Multiline script');
client.global.set("auth", response.body.token);
''')
body = dedent('''
> {% ''' + script + ''' %}''')
tail = TailParser().parse(body)
self.assertEqual(tail.response_handler, ResponseHandler(script=script.strip(), path=None, expected_status_code=None))
def test_response_handler_path(self):
body = dedent('''
> ./script.js''')
tail = TailParser().parse(body)
self.assertEqual(tail.response_handler, ResponseHandler(script=None, path='./script.js', expected_status_code=None))
def test_response_status_code(self):
body = dedent('''
>STATUS 301''')
tail = TailParser().parse(body)
self.assertEqual(tail.response_handler, ResponseHandler(script=None, path=None, expected_status_code=301))
def test_response_ref(self):
body = dedent('''
<> ./previous-response.200.json''')
tail = TailParser().parse(body)
self.assertEqual(tail.response_ref, ResponseReference(path='./previous-response.200.json'))
| 31.898148 | 139 | 0.576488 | 325 | 3,445 | 5.993846 | 0.243077 | 0.036961 | 0.053388 | 0.098563 | 0.525667 | 0.510267 | 0.440452 | 0.402464 | 0.351129 | 0.318275 | 0 | 0.00639 | 0.273149 | 3,445 | 107 | 140 | 32.196262 | 0.771566 | 0 | 0 | 0.383721 | 0 | 0 | 0.266183 | 0.035994 | 0 | 0 | 0 | 0 | 0.093023 | 1 | 0.093023 | false | 0 | 0.046512 | 0 | 0.151163 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
17a30d472fcfbe0abfef5d81ffe2790797697496 | 3,830 | py | Python | openbox-tmux-pipe-menu.py | pawel-slowik/openbox-tmux-pipe-menu | cda64994a893c76ba1af1eb1de9fe72d1ec79c04 | [
"MIT"
] | null | null | null | openbox-tmux-pipe-menu.py | pawel-slowik/openbox-tmux-pipe-menu | cda64994a893c76ba1af1eb1de9fe72d1ec79c04 | [
"MIT"
] | null | null | null | openbox-tmux-pipe-menu.py | pawel-slowik/openbox-tmux-pipe-menu | cda64994a893c76ba1af1eb1de9fe72d1ec79c04 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import subprocess
import re
import xml.etree.ElementTree as et
import datetime as dt
import pipes
import os
import configparser
import sys
from typing import Iterable, Optional, Dict
class TmuxError(Exception):
pass
class TmuxCommandError(TmuxError):
pass
class TmuxParseError(TmuxError):
pass
class ConfigError(Exception):
pass
def list_sessions_cmd() -> str:
command = [
'tmux',
'list-sessions',
'-F',
'#{session_attached} #{session_created} #{session_name}',
]
try:
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = process.communicate()
out = out.decode('utf-8')
err = err.decode('utf-8')
except Exception as ex:
raise TmuxCommandError(repr(ex).strip())
if process.returncode == 0:
return out # type: ignore
if 'no server running' in err:
return ''
if re.search(r'^error connecting to .+ \(No such file or directory\)$', err):
return ''
raise TmuxCommandError(err.strip())
def parse_sessions(text: str) -> Iterable[Dict[str, str]]:
return [parse_session_line(l) for l in text.splitlines()]
def parse_session_line(line: str) -> Dict[str, str]:
match = re.search(
'^(?P<attached>[0-9]+) (?P<timestamp>[0-9]+) (?P<name>.*)$',
line
)
if match is None:
raise TmuxParseError('parse error: ' + line)
return match.groupdict()
def session_list_to_xml(sessions: Iterable[dict]) -> bytes:
if not sessions:
return error_message_to_xml('no sessions')
root = et.Element('openbox_pipe_menu')
cmd_tpl = reattach_cmd_template()
for session in sessions:
item = et.SubElement(root, 'item')
item.attrib['label'] = session_label(session)
action = et.SubElement(item, 'action')
action.attrib['name'] = 'Execute'
command = et.SubElement(action, 'command')
# the command is parsed with the g_shell_parse_argv funcion
# and therefore must have shell quoting (even though it does
# not spawn a shell)
command.text = cmd_tpl % pipes.quote(session['name'])
return et.tostring(root) # type: ignore
def session_label(session: Dict[str, str]) -> str:
label = session['name'] + ' started at '
label += dt.datetime.fromtimestamp(float(session['timestamp'])).isoformat()
if int(session['attached']):
label += ' (attached)'
return label
def reattach_cmd_template() -> str:
config = configparser.RawConfigParser()
config.read(os.path.expanduser('~/.config/openbox/tmux.ini'))
try:
return config.get('pipe-menu', 'attach-command-template')
except (configparser.NoSectionError, configparser.NoOptionError):
pass
term = find_executable(['urxvt', 'xterm'])
if term is None:
raise ConfigError("can't find terminal emulator")
return term + ' -e tmux attach -d -t %s'
def error_message_to_xml(message: str) -> bytes:
root = et.Element('openbox_pipe_menu')
item = et.SubElement(root, 'item')
item.attrib['label'] = message
return et.tostring(root) # type: ignore
def find_executable(names: Iterable[str]) -> Optional[str]:
path = os.environ.get("PATH", os.defpath).split(os.pathsep)
for name in names:
for directory in path:
filename = os.path.join(directory, name)
if os.path.exists(filename):
return filename
return None
def main() -> None:
try:
xml = session_list_to_xml(parse_sessions(list_sessions_cmd()))
except (TmuxError, ConfigError) as err:
xml = error_message_to_xml(repr(err))
sys.stdout.buffer.write(xml)
if __name__ == '__main__':
main()
| 27.956204 | 81 | 0.637859 | 473 | 3,830 | 5.052854 | 0.350951 | 0.01046 | 0.012552 | 0.021339 | 0.083682 | 0.083682 | 0.060251 | 0.032636 | 0 | 0 | 0 | 0.002733 | 0.23577 | 3,830 | 136 | 82 | 28.161765 | 0.813803 | 0.051175 | 0 | 0.15534 | 0 | 0.009709 | 0.135374 | 0.02509 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087379 | false | 0.048544 | 0.087379 | 0.009709 | 0.339806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |