hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eb4b3a57c36fb29e43b2cced38581d7861670f8e | 1,639 | py | Python | utils.py | josslei/Gender-Detection | 51c122eddeb33caf9350a3d974b0842fdc329527 | [
"BSD-3-Clause"
] | null | null | null | utils.py | josslei/Gender-Detection | 51c122eddeb33caf9350a3d974b0842fdc329527 | [
"BSD-3-Clause"
] | null | null | null | utils.py | josslei/Gender-Detection | 51c122eddeb33caf9350a3d974b0842fdc329527 | [
"BSD-3-Clause"
] | null | null | null | import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import os
import sys
def export_sample_images(amount:int, export_dir:str, dataset, shuffle=True):
os.makedirs(export_dir, exist_ok=True)
loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=amount, shuffle=shuffle)
for images, _ in loader:
for i, img in enumerate(images):
img = img.squeeze(0)
img = transforms.ToPILImage()(img)
img.save(os.path.join(export_dir, str(i)) + '.png')
break
def getStat(train_data):
print('Compute mean and variance for training data.')
print(len(train_data))
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=1, shuffle=False, num_workers=0,
pin_memory=True)
mean = torch.zeros(3)
std = torch.zeros(3)
for X, _ in train_loader:
for d in range(3):
mean[d] += X[:, d, :, :].mean()
std[d] += X[:, d, :, :].std()
mean.div_(len(train_data))
std.div_(len(train_data))
return list(mean.numpy()), list(std.numpy())
if __name__ == '__main__':
if input('Are you sure to start calculating mean and std? [y/n] ') != y:
exit()
if len(sys.argv) != 2:
print('Please specify the path of the dataset')
exit(-1)
transform = transforms.Compose([
transforms.Resize((200, 200)),
transforms.ToTensor()
])
train_dataset = datasets.ImageFolder(root=r'/home/user/data/gender/train', transform=transform)
mean, std = getStat(train_dataset)
print('mean = ', mean)
print('std = ', std)
| 34.145833 | 99 | 0.633923 | 222 | 1,639 | 4.540541 | 0.432432 | 0.044643 | 0.035714 | 0.039683 | 0.059524 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011085 | 0.229408 | 1,639 | 47 | 100 | 34.87234 | 0.787015 | 0 | 0 | 0 | 0 | 0 | 0.115995 | 0.017094 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.116279 | 0 | 0.186047 | 0.116279 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb4eec18185b7f1ef93165dd8d50086af9250306 | 4,708 | py | Python | waveletcodec/wave.py | zenathark/jg.waveletcodec | 7994dd18ef5472e7e4d6447062cf4dc3c2f6463f | [
"MIT"
] | 1 | 2017-05-14T01:42:18.000Z | 2017-05-14T01:42:18.000Z | waveletcodec/wave.py | zenathark/jg.waveletcodec | 7994dd18ef5472e7e4d6447062cf4dc3c2f6463f | [
"MIT"
] | null | null | null | waveletcodec/wave.py | zenathark/jg.waveletcodec | 7994dd18ef5472e7e4d6447062cf4dc3c2f6463f | [
"MIT"
] | null | null | null | """Represent a Wavelet Coefficient Set.
.. module::wave
:platform: Unix, Windows
.. modelauthor:: Juan C Galan-Hernandez <jcgalanh@gmail.com>
"""
import numpy as np
import waveletcodec.tools as tools
import waveletcodec.lwt as lwt
import cv2
import math
#Constant Section
CDF97 = 1
#End
class WCSet(np.ndarray):
"""
This object represents a wavelet.
The fundamental element for signal processing using wavelets is an N matrix
that holds the coefficients of a wavelet decomposition. This object extends
from numpy.ndarray and extends it to hold the extra values needed for a
wavelet data set
"""
level = 0
filter = None
def __new__(cls, array, level, filter_=None):
"""Create a wavelet.
This method creates a wavelet object using a numpy.ndarray as base
Args:
array. A numpy.ndarray as a base for this wavelet
level. Level of decomposition of this wavelet
filter. Filter bank name used
Return:
A Wavelet object with the same data as the numpy.ndarray object.
The data is shared between both objects
"""
print(cls)
obj = np.asarray(array).view(cls)
obj.level = level
obj.filter = filter_
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.level = getattr(obj, 'level', None)
self.filter = getattr(obj, 'filter', None)
def inverse(self):
"""Return the inverse of this wavelet coefficients.
This method returns the inverse transform of this wavelet
as another numpy.ndarray matrix. The method chooses the apropiate
inverse transform filter using the class property filter.
Return:
An numpy.ndarray instance that holds the reconstructed signal
using the filter specified in the class property filter.
Raises:
AttributeError if the property filter is not set
"""
if self.filter is None:
msg = "filter property is not set, unable to determine the inverse"
raise AttributeError(msg)
if self.filter is CDF97:
return icdf97(self)
def as_image(self):
dc_rows, dc_cols = self.shape
dc_rows //= 2 ** self.level
dc_cols //= 2 ** self.level
dc = self.copy()
ac = dc[:dc_rows, :dc_cols].copy()
dc[:dc_rows, :dc_cols] = 0
ac = tools.normalize(ac, upper_bound=255, dtype=np.uint8)
dc = np.abs(dc)
dc = tools.normalize(dc, upper_bound=255, dtype=np.uint8)
#ac = cv2.equalizeHist(ac)
dc = cv2.equalizeHist(dc)
dc[:dc_rows, :dc_cols] = ac
return dc
_CDF97 = lwt.FilterBank(
scale=1 / 1.149604398,
update=[-0.05298011854, 0.4435068522],
predict=[-1.586134342, 0.8829110762]
)
def cdf97(signal, level=1):
"""Calculate the Wavelet Transform of the signal using the CDF97 wavelet.
This method calculates the LWT of the signal given using the
Cohen-Daubechies-Feauveau wavelet using a filter bank of size 9,7
Args:
signal a 1D or 2D numpy.array instance
Returns:
An instance of Wavelet that holds the coefficients of the transform
"""
coeff = _CDF97.forward(signal, level)
wavelet = WCSet(coeff, level, CDF97)
return wavelet
def icdf97(wavelet):
"""Calculate the inverse Wavelet Transform using the CDF97 wavelet.
This method calculates the iLWT of the wavelet given using the
Cohen-Daubechies-Feauveau wavelet using a filter bank of size 9,7
Args:
wavelet a 1D or 2D Wavelet instance
Returns:
An instance of numpy.ndarray that holds the reconstructed signal
"""
signal = _CDF97.inverse(wavelet, wavelet.level)
return signal
def get_z_order(dim):
mtx = []
n = int(math.log(dim, 2))
pows = range(int(n / 2))
for i in range(dim):
x = 0
y = 0
for j in pows:
x |= ((i >> 2 * j) & 1) << j
y |= ((i >> 2 * j + 1) & 1) << j
mtx += [(y, x)]
return mtx
# def get_morton_order(dim, idx = 0, size = -1):
# if size < 0:
# mtx = deque()
# else:
# mtx = deque([],size)
# if idx <> 0:
# swp = idx
# idx = dim
# dim = swp
# n = int(math.log(dim,2))
# pows = range(int(n/2))
# for i in range(dim):
# x = 0
# y = 0
# for j in pows:
# x |= ((i >> 2*j) & 1) << j
# y |= ((i >> 2*j+1) & 1) << j
# if idx == 0:
# mtx += [vector((y,x))]
# else:
# idx -= 1
# return mtx
| 26.301676 | 79 | 0.591334 | 640 | 4,708 | 4.301563 | 0.271875 | 0.020341 | 0.017436 | 0.017436 | 0.228115 | 0.151834 | 0.133672 | 0.133672 | 0.102434 | 0.102434 | 0 | 0.039082 | 0.315208 | 4,708 | 178 | 80 | 26.449438 | 0.814826 | 0.512532 | 0 | 0 | 0 | 0 | 0.034948 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.079365 | 0 | 0.349206 | 0.015873 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb5060a785cfcf182925ba2ff985ffd151afd8b7 | 1,598 | py | Python | pr2roc/pr_curve.py | ameya98/roc2pr | ab19d7552e2e9ae32ca00a1be4a17b29a3f915fa | [
"MIT"
] | 1 | 2020-09-08T14:51:48.000Z | 2020-09-08T14:51:48.000Z | pr2roc/pr_curve.py | ameya98/pr2roc | ab19d7552e2e9ae32ca00a1be4a17b29a3f915fa | [
"MIT"
] | null | null | null | pr2roc/pr_curve.py | ameya98/pr2roc | ab19d7552e2e9ae32ca00a1be4a17b29a3f915fa | [
"MIT"
] | null | null | null | from __future__ import division
from .curve import Curve
from numpy import min, max, seterr
seterr(all='raise')
class PRCurve(Curve):
def __init__(self, points, pos_neg_ratio, label=None):
Curve.__init__(self, points, label)
self.pos_neg_ratio = pos_neg_ratio
if max([self.x_vals, self.y_vals]) > 1:
raise ValueError('Precision and recall cannot be greater than 1.')
if min([self.x_vals, self.y_vals]) < 0:
raise ValueError('Precision and recall cannot be lesser than 0.')
if self.pos_neg_ratio <= 0:
raise ValueError('\'pos_neg_ratio\' must be >= 0.')
for x, y in zip(self.x_vals, self.y_vals):
if x > 0 and y == 0:
raise ValueError('Precision cannot be 0 if recall is > 0.')
if x == 0 and y > 0:
raise ValueError('Precision cannot be > 0 if recall is 0. %s %s' % (self.x_vals, self.y_vals))
def compute_fpr_vals(self):
def compute_fpr_val(rec, prec):
try:
return rec * self.pos_neg_ratio * (1/prec - 1)
except (ZeroDivisionError, FloatingPointError):
return 1
return [compute_fpr_val(rec, prec) for rec, prec in zip(self.x_vals, self.y_vals)]
def to_roc(self):
from .roc_curve import ROCCurve
fpr_vals = self.compute_fpr_vals()
tpr_vals = self.x_vals
points = zip(fpr_vals, tpr_vals)
return ROCCurve(points, self.pos_neg_ratio)
def resample(self, num_points):
return self.to_roc().resample(num_points).to_pr() | 35.511111 | 110 | 0.615144 | 234 | 1,598 | 3.965812 | 0.25641 | 0.068966 | 0.082974 | 0.070043 | 0.359914 | 0.31681 | 0.278017 | 0.163793 | 0.114224 | 0.114224 | 0 | 0.014821 | 0.282228 | 1,598 | 45 | 111 | 35.511111 | 0.794246 | 0 | 0 | 0 | 0 | 0 | 0.121951 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147059 | false | 0 | 0.117647 | 0.029412 | 0.441176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb50a4e886b2a04a462c9218f1a0436f4b1e8244 | 3,384 | py | Python | blg604ehw2/utils.py | cbekar/DRL_HW2 | 5ecb12ee1d5d545d5059afb4cf578881acb1f00e | [
"MIT"
] | null | null | null | blg604ehw2/utils.py | cbekar/DRL_HW2 | 5ecb12ee1d5d545d5059afb4cf578881acb1f00e | [
"MIT"
] | null | null | null | blg604ehw2/utils.py | cbekar/DRL_HW2 | 5ecb12ee1d5d545d5059afb4cf578881acb1f00e | [
"MIT"
] | null | null | null | """ Utilities for homework 2.
Function "log_progress" is adapted from:
https://github.com/kuk/log-progress
"""
import matplotlib.pyplot as plt
import numpy as np
import torch
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
from blg604ehw2.atari_wrapper import LazyFrames
def comparison(*log_name_pairs, texts=[[""]*3], smooth_factor=3):
""" Plots the given logs. There will be as many plots as
the length of the texts argument. Logs will be plotted on
top of each other so that they can be compared. For each
log, mean value is plotted and the area between the
+std and -std of the mean will be shaded.
"""
plt.ioff()
plt.close()
def plot_texts(title, xlabel, ylabel):
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
for i, (title, xlabel, ylabel) in enumerate(texts):
for logs, name in log_name_pairs:
smoothed_logs = np.stack(
[smoother(log[i], smooth_factor) for log in logs])
std_logs = np.std(smoothed_logs, axis=0)
mean_logs = np.mean(smoothed_logs, axis=0)
max_logs = np.max(smoothed_logs, axis=0)
min_logs = np.min(smoothed_logs, axis=0)
plot_texts(title, xlabel, ylabel)
plt.plot(mean_logs, label=name)
plt.legend()
plt.fill_between(np.arange(len(mean_logs)),
np.minimum(mean_logs+std_logs, max_logs),
np.minimum(mean_logs-std_logs, min_logs),
alpha=0.4)
plt.show()
def smoother(array, ws):
""" Return smoothed array by the mean filter """
return np.array([sum(array[i:i+ws])/ws for i in range(len(array) - ws)])
# Optional
def normalize(frame):
""" Return normalized frame """
frame -= 128.0
frame /= 128.0
return frame
# Optional
def process_state(state):
""" If the state is 4 dimensional image state
return transposed and normalized state otherwise
directly return the state. """
if len(state.shape) == 4:
state = torch.transpose(state, 2, 3)
state = torch.transpose(state, 1, 2)
return normalize(state)
return state
class LoadingBar:
""" Loading bar for ipython notebook """
def __init__(self, size, name):
self.size = size
self.name = name
self._progress = IntProgress(min=0, max=size, value=0)
self._label = HTML()
box = VBox(children=[self._label, self._progress])
display(box)
def success(self, reward):
""" Turn loading bar into "complete state" """
self._progress.bar_style = "success"
self._progress.value = self.size
self._label.value = (
"{name}: {size}/{index}, Best reward: {reward}".format(
name=self.name,
size=self.size,
index=self.size,
reward=reward
)
)
def progress(self, index, reward):
""" Update progress with given index and best reward """
self._progress.value = index
self._label.value = (
"{name}: {size}/{index}, Best reward: {reward}".format(
name=self.name,
size=self.size,
index=index,
reward=reward
)
)
| 31.333333 | 76 | 0.589835 | 432 | 3,384 | 4.520833 | 0.31713 | 0.021505 | 0.03277 | 0.034818 | 0.138249 | 0.138249 | 0.108551 | 0.079877 | 0.079877 | 0.079877 | 0 | 0.012283 | 0.302305 | 3,384 | 107 | 77 | 31.626168 | 0.814909 | 0.204787 | 0 | 0.142857 | 0 | 0 | 0.037265 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.085714 | 0 | 0.271429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb51d6d53256cba70fc82a414f813c5e24351542 | 2,189 | py | Python | proposals/management/commands/get_statistics.py | UiL-OTS-labs/etcl | a22df7ff78620b704a500354fb218fbe9bcabf5f | [
"MIT"
] | 2 | 2017-04-22T11:07:13.000Z | 2018-03-02T12:23:24.000Z | proposals/management/commands/get_statistics.py | UiL-OTS-labs/etcl | a22df7ff78620b704a500354fb218fbe9bcabf5f | [
"MIT"
] | 6 | 2017-07-24T09:59:13.000Z | 2019-04-01T15:15:57.000Z | proposals/management/commands/get_statistics.py | UiL-OTS-labs/etcl | a22df7ff78620b704a500354fb218fbe9bcabf5f | [
"MIT"
] | null | null | null | from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand
from django.conf import settings
from django.views.i18n import set_language
from proposals.utils.statistics_utils import get_average_turnaround_time, \
get_qs_for_long_route_reviews, get_qs_for_short_route_reviews, \
get_qs_for_year, \
get_qs_for_year_and_committee, get_review_qs_for_proposals, \
get_total_long_route_proposals, \
get_total_short_route_proposals, \
get_total_students, get_total_submitted_proposals
class Command(BaseCommand):
help = 'Calculate statistics for a given year'
def add_arguments(self, parser):
parser.add_argument('year', type=int)
def handle(self, *args, **options):
AK = Group.objects.get(name=settings.GROUP_GENERAL_CHAMBER)
LK = Group.objects.get(name=settings.GROUP_LINGUISTICS_CHAMBER)
datasets = {
'Total': get_qs_for_year(options['year']),
'AK': get_qs_for_year_and_committee(options['year'], AK),
'LK': get_qs_for_year_and_committee(options['year'], LK)
}
for name, dataset in datasets.items():
print(name)
print('Total submitted:', get_total_submitted_proposals(dataset))
print(
'Total short route:',
get_total_short_route_proposals(dataset)
)
print(
'Total long route:',
get_total_long_route_proposals(dataset)
)
print()
print('Total per relation:')
for relation, count in get_total_students(dataset).items():
print(count, relation)
print()
print("Turnaround times:")
print(
"Short route",
get_average_turnaround_time(
get_qs_for_short_route_reviews(dataset)
),
'days'
)
print(
"Long route",
get_average_turnaround_time(
get_qs_for_long_route_reviews(dataset)
),
'days'
)
print() | 33.166667 | 77 | 0.600731 | 240 | 2,189 | 5.1125 | 0.295833 | 0.04075 | 0.05868 | 0.0489 | 0.394458 | 0.275469 | 0.169519 | 0.169519 | 0.07824 | 0.07824 | 0 | 0.001338 | 0.31704 | 2,189 | 66 | 78 | 33.166667 | 0.819398 | 0 | 0 | 0.236364 | 0 | 0 | 0.081279 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036364 | false | 0 | 0.090909 | 0 | 0.163636 | 0.218182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb51ff6b9107cee84e763a4b3d50eade4083e26c | 4,299 | py | Python | src/latest.py | rharish101/dilbert-viewer-py | 07492822b74e5b9242f47bdf756e147bf792e5c8 | [
"MIT"
] | 5 | 2018-12-08T12:06:29.000Z | 2022-01-23T14:25:51.000Z | src/latest.py | rharish101/dilbert-viewer-py | 07492822b74e5b9242f47bdf756e147bf792e5c8 | [
"MIT"
] | 3 | 2021-11-01T18:19:11.000Z | 2021-11-01T18:23:08.000Z | src/latest.py | rharish101/dilbert-viewer-py | 07492822b74e5b9242f47bdf756e147bf792e5c8 | [
"MIT"
] | 1 | 2020-05-16T19:16:00.000Z | 2020-05-16T19:16:00.000Z | """Scraper to get info on the latest Dilbert comic."""
from datetime import timedelta
from typing import Optional
from constants import LATEST_DATE_REFRESH, SRC_PREFIX
from scraper import Scraper, ScrapingException
from utils import curr_date, date_to_str, str_to_date
class LatestDateScraper(Scraper[str, None]):
"""Class to scrape the date of the latest Dilbert comic.
This scraper returns that date in the format used by "dilbert.com".
Attributes:
pool: The database connection pool
sess: The HTTP client session
logger: The main app logger
"""
async def _get_cached_data(self, _: None = None, /) -> Optional[str]:
"""Get the cached latest date from the database.
If the latest date entry is stale (i.e. it was updated a long time
back), or it wasn't found in the cache, None is returned.
"""
async with self.pool.acquire() as conn:
# The interval for "freshness" of the entry has to be given this
# way instead of '$1 hours', because of PostgreSQL's syntax.
# All dates managed by asyncpg are set to UTC.
date = await conn.fetchval(
"""SELECT latest FROM latest_date
WHERE last_check >= CURRENT_TIMESTAMP - INTERVAL '1 hour' * $1;
""",
LATEST_DATE_REFRESH,
)
if date is not None:
# A "fresh" entry was found
date = date_to_str(date)
return date
async def _cache_data(self, date: str, _: None = None, /) -> None:
"""Cache the latest date into the database."""
# The WHERE condition is not required as there is always only one row
# in the `latest_date` table.
async with self.pool.acquire() as conn:
result = await conn.execute(
"UPDATE latest_date SET latest = $1;", str_to_date(date)
)
rows_updated = int(result.split()[1])
if rows_updated == 1:
self.logger.info("Successfully updated latest date in cache")
return
elif rows_updated > 1:
raise RuntimeError(
'The "latest_date" table has more than one row, '
"i.e. this table is corrupt"
)
# No rows were updated, so the "latest_date" table must be empty. This
# should only happen if this table was cleared manually, or this is the
# first run of this code on this database.
self.logger.info(
"Couldn't update latest date in cache; trying to insert it"
)
async with self.pool.acquire() as conn:
await conn.execute(
"INSERT INTO latest_date (latest) VALUES ($1);",
str_to_date(date),
)
async def _scrape_data(self, _: None = None, /) -> str:
"""Scrape the date of the latest comic from "dilbert.com"."""
# If there is no comic for this date yet, "dilbert.com" will
# auto-redirect to the homepage.
latest = date_to_str(curr_date())
url = SRC_PREFIX + latest
async with self.sess.get(url) as resp:
self.logger.debug(f"Got response for latest date: {resp.status}")
date = resp.url.path.split("/")[-1]
if date == "":
# Redirected to homepage, implying that there's no comic for this
# date. There must be a comic for the previous date, so use that.
date = date_to_str(curr_date() - timedelta(days=1))
self.logger.info(
f"No comic found for today ({latest}); using date: {date}"
)
else:
# Check to see if the scraped date is invalid
try:
str_to_date(date)
except ValueError:
raise ScrapingException(
"Error in scraping the latest date from the URL"
)
return date
async def get_latest_date(self) -> str:
"""Retrieve the date of the latest comic.
Returns:
The latest date
"""
return await super().get_data(None)
async def update_latest_date(self, date: str) -> None:
"""Update the latest date in the cache."""
await self._cache_data(date)
| 37.060345 | 79 | 0.586183 | 564 | 4,299 | 4.374113 | 0.315603 | 0.08107 | 0.042156 | 0.015809 | 0.107013 | 0.067288 | 0.036482 | 0 | 0 | 0 | 0 | 0.003481 | 0.331705 | 4,299 | 115 | 80 | 37.382609 | 0.855204 | 0.237497 | 0 | 0.116667 | 0 | 0 | 0.14966 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb53bbfae8b29bdd3c9940753f30c643697fe9d2 | 742 | py | Python | tests/use_cases/test_fetch_playlists.py | eeng/montag | 8362c4bc6621e23d3b9b43990f9cf28a9e1c1c8a | [
"MIT"
] | null | null | null | tests/use_cases/test_fetch_playlists.py | eeng/montag | 8362c4bc6621e23d3b9b43990f9cf28a9e1c1c8a | [
"MIT"
] | null | null | null | tests/use_cases/test_fetch_playlists.py | eeng/montag | 8362c4bc6621e23d3b9b43990f9cf28a9e1c1c8a | [
"MIT"
] | null | null | null | from montag.domain.entities import Provider
from montag.use_cases.fetch_playlists import FetchPlaylists
from montag.use_cases.support import Failure, Success
from tests import factory
def test_fetch_playlists(repos, spotify_repo):
expected_playlists = factory.playlists(2)
spotify_repo.find_playlists.return_value = expected_playlists
response = FetchPlaylists(repos).execute(Provider.SPOTIFY)
assert response == Success(expected_playlists)
def test_error_handling_with_unexpected_errors(repos, spotify_repo):
error = ValueError("some message")
spotify_repo.find_playlists.side_effect = error
response = FetchPlaylists(repos).execute(Provider.SPOTIFY)
assert response == Failure("some message", error)
| 32.26087 | 68 | 0.801887 | 90 | 742 | 6.377778 | 0.433333 | 0.076655 | 0.045296 | 0.062718 | 0.219512 | 0.219512 | 0.219512 | 0.219512 | 0 | 0 | 0 | 0.001538 | 0.123989 | 742 | 22 | 69 | 33.727273 | 0.881538 | 0 | 0 | 0.142857 | 0 | 0 | 0.032345 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.142857 | false | 0 | 0.285714 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb598dc417f9c3b646b630ac18fa21625cf00654 | 443 | py | Python | forest_calculations.py | mpolinski/python-forest | 859238ab6a2e05e3479eda5d131f59b26a1f1a22 | [
"MIT"
] | null | null | null | forest_calculations.py | mpolinski/python-forest | 859238ab6a2e05e3479eda5d131f59b26a1f1a22 | [
"MIT"
] | null | null | null | forest_calculations.py | mpolinski/python-forest | 859238ab6a2e05e3479eda5d131f59b26a1f1a22 | [
"MIT"
] | null | null | null | from forest_constants import (LEAFY, CONIFEROUS)
def get_forest_dimensions(forest):
rows_num = len(forest)
cols_num = 0
if rows_num:
cols_num = len(forest[0])
return rows_num, cols_num
def get_tree_counts(forest):
leafy_count = 0
coniferous_count = 0
for row in forest:
leafy_count += row.count(LEAFY)
coniferous_count += row.count(CONIFEROUS)
return leafy_count, coniferous_count
| 21.095238 | 49 | 0.688488 | 61 | 443 | 4.721311 | 0.360656 | 0.072917 | 0.083333 | 0.097222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011799 | 0.234763 | 443 | 20 | 50 | 22.15 | 0.837758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.071429 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb5c1e3cdc51524fbf91bb29a9b75350d7e28939 | 13,580 | py | Python | tests/test_locators.py | msabramo/distlib | 8c201484821e7cdfd52c560eac98b45439402f39 | [
"PSF-2.0"
] | null | null | null | tests/test_locators.py | msabramo/distlib | 8c201484821e7cdfd52c560eac98b45439402f39 | [
"PSF-2.0"
] | null | null | null | tests/test_locators.py | msabramo/distlib | 8c201484821e7cdfd52c560eac98b45439402f39 | [
"PSF-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import os
import sys
from compat import unittest
from distlib.compat import url2pathname, urlparse, urljoin
from distlib.database import DistributionPath, make_graph, make_dist
from distlib.locators import (SimpleScrapingLocator, PyPIRPCLocator,
PyPIJSONLocator, DirectoryLocator,
DistPathLocator, AggregatingLocator,
JSONLocator, DistPathLocator,
DependencyFinder, locate,
get_all_distribution_names, default_locator)
HERE = os.path.abspath(os.path.dirname(__file__))
PYPI_RPC_HOST = 'http://python.org/pypi'
PYPI_WEB_HOST = os.environ.get('PYPI_WEB_HOST', 'https://pypi.python.org/simple/')
class LocatorTestCase(unittest.TestCase):
@unittest.skipIf('SKIP_SLOW' in os.environ, 'Skipping slow test')
def test_xmlrpc(self):
locator = PyPIRPCLocator(PYPI_RPC_HOST)
try:
result = locator.get_project('sarge')
except Exception: # pragma: no cover
raise unittest.SkipTest('PyPI XML-RPC not available')
self.assertIn('0.1', result)
dist = result['0.1']
self.assertEqual(dist.name, 'sarge')
self.assertEqual(dist.version, '0.1')
self.assertEqual(dist.source_url,
'https://pypi.python.org/packages/source/s/sarge/'
'sarge-0.1.tar.gz')
self.assertEqual(dist.digest,
('md5', '961ddd9bc085fdd8b248c6dd96ceb1c8'))
try:
names = locator.get_distribution_names()
except Exception: # pragma: no cover
raise unittest.SkipTest('PyPI XML-RPC not available')
self.assertGreater(len(names), 25000)
@unittest.skipIf('SKIP_SLOW' in os.environ, 'Skipping slow test')
def test_json(self):
locator = PyPIJSONLocator(PYPI_RPC_HOST)
result = locator.get_project('sarge')
self.assertIn('0.1.1', result)
dist = result['0.1.1']
self.assertEqual(dist.name, 'sarge')
self.assertEqual(dist.version, '0.1.1')
self.assertEqual(dist.source_url,
'https://pypi.python.org/packages/source/s/sarge/'
'sarge-0.1.1.tar.gz')
self.assertEqual(dist.digest,
('md5', '2a9b9d46e4ef6ae51e2a5ff7de93d9dd'))
self.assertRaises(NotImplementedError, locator.get_distribution_names)
@unittest.skipIf('SKIP_SLOW' in os.environ, 'Skipping slow test')
def test_scraper(self):
locator = SimpleScrapingLocator('https://pypi.python.org/simple/')
for name in ('sarge', 'Sarge'):
result = locator.get_project(name)
self.assertIn('0.1', result)
dist = result['0.1']
self.assertEqual(dist.name, 'sarge')
self.assertEqual(dist.version, '0.1')
self.assertEqual(dist.source_url,
'https://pypi.python.org/packages/source/s/sarge/'
'sarge-0.1.tar.gz')
self.assertEqual(dist.digest,
('md5', '961ddd9bc085fdd8b248c6dd96ceb1c8'))
return
# The following is too slow
names = locator.get_distribution_names()
self.assertGreater(len(names), 25000)
@unittest.skipIf('SKIP_SLOW' in os.environ, 'Skipping slow test')
def test_unicode_project_name(self):
# Just checking to see that no exceptions are raised.
NAME = '\u2603'
locator = SimpleScrapingLocator('https://pypi.python.org/simple/')
result = locator.get_project(NAME)
self.assertFalse(result)
locator = PyPIJSONLocator('https://pypi.python.org/pypi/')
result = locator.get_project(NAME)
self.assertFalse(result)
def test_dir(self):
d = os.path.join(HERE, 'fake_archives')
locator = DirectoryLocator(d)
expected = os.path.join(HERE, 'fake_archives', 'subdir',
'subsubdir', 'Flask-0.9.tar.gz')
def get_path(url):
t = urlparse(url)
return url2pathname(t.path)
for name in ('flask', 'Flask'):
result = locator.get_project(name)
self.assertIn('0.9', result)
dist = result['0.9']
self.assertEqual(dist.name, 'Flask')
self.assertEqual(dist.version, '0.9')
self.assertEqual(os.path.normcase(get_path(dist.source_url)),
os.path.normcase(expected))
names = locator.get_distribution_names()
expected = set(['Flask', 'python-gnupg', 'coverage', 'Django'])
if sys.version_info[:2] == (2, 7):
expected.add('config')
self.assertEqual(names, expected)
def test_dir_nonrecursive(self):
d = os.path.join(HERE, 'fake_archives')
locator = DirectoryLocator(d, recursive=False)
expected = os.path.join(HERE, 'fake_archives', 'subdir',
'subsubdir', 'Flask-0.9.tar.gz')
def get_path(url):
t = urlparse(url)
return url2pathname(t.path)
for name in ('flask', 'Flask'):
result = locator.get_project(name)
self.assertEqual(result, {})
names = locator.get_distribution_names()
expected = set(['coverage'])
self.assertEqual(names, expected)
def test_path(self):
fakes = os.path.join(HERE, 'fake_dists')
sys.path.insert(0, fakes)
try:
edp = DistributionPath(include_egg=True)
locator = DistPathLocator(edp)
cases = ('babar', 'choxie', 'strawberry', 'towel-stuff',
'coconuts-aster', 'bacon', 'grammar', 'truffles',
'banana', 'cheese')
for name in cases:
d = locator.locate(name, True)
r = locator.get_project(name)
self.assertIsNotNone(d)
self.assertEqual(r, { d.version: d })
d = locator.locate('nonexistent')
r = locator.get_project('nonexistent')
self.assertIsNone(d)
self.assertFalse(r)
finally:
sys.path.pop(0)
@unittest.skipIf('SKIP_SLOW' in os.environ, 'Skipping slow test')
def test_aggregation(self):
d = os.path.join(HERE, 'fake_archives')
loc1 = DirectoryLocator(d)
loc2 = SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=5.0)
locator = AggregatingLocator(loc1, loc2)
exp1 = os.path.join(HERE, 'fake_archives', 'subdir',
'subsubdir', 'Flask-0.9.tar.gz')
exp2 = 'https://pypi.python.org/packages/source/F/Flask/Flask-0.9.tar.gz'
result = locator.get_project('flask')
self.assertEqual(len(result), 1)
self.assertIn('0.9', result)
dist = result['0.9']
self.assertEqual(dist.name, 'Flask')
self.assertEqual(dist.version, '0.9')
scheme, _, path, _, _, _ = urlparse(dist.source_url)
self.assertEqual(scheme, 'file')
self.assertEqual(os.path.normcase(url2pathname(path)),
os.path.normcase(exp1))
locator.merge = True
locator._cache.clear()
result = locator.get_project('flask')
self.assertGreater(len(result), 1)
self.assertIn('0.9', result)
dist = result['0.9']
self.assertEqual(dist.name, 'Flask')
self.assertEqual(dist.version, '0.9')
self.assertEqual(dist.source_url, exp2)
return
# The following code is slow because it has
# to get all the dist names by scraping :-(
n1 = loc1.get_distribution_names()
n2 = loc2.get_distribution_names()
self.assertEqual(locator.get_distribution_names(), n1 | n2)
def test_dependency_finder(self):
locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
finder = DependencyFinder(locator)
dists, problems = finder.find('irc (== 5.0.1)')
self.assertFalse(problems)
actual = sorted([d.name for d in dists])
self.assertEqual(actual, ['hgtools', 'irc',
'pytest-runner'])
dists, problems = finder.find('irc (== 5.0.1)',
meta_extras=[':test:'])
self.assertFalse(problems)
actual = sorted([d.name for d in dists])
self.assertEqual(actual, ['hgtools', 'irc',
'py', 'pytest',
'pytest-runner'])
g = make_graph(dists)
slist, cycle = g.topological_sort()
self.assertFalse(cycle)
names = [d.name for d in slist]
expected = set([
('hgtools', 'py', 'pytest', 'pytest-runner', 'irc'),
('py', 'hgtools', 'pytest', 'pytest-runner', 'irc'),
('hgtools', 'py', 'pytest-runner', 'pytest', 'irc'),
('py', 'hgtools', 'pytest-runner', 'pytest', 'irc')
])
self.assertIn(tuple(names), expected)
# Test with extras
dists, problems = finder.find('Jinja2 (== 2.6)')
self.assertFalse(problems)
actual = sorted([d.name_and_version for d in dists])
self.assertEqual(actual, ['Jinja2 (2.6)'])
dists, problems = finder.find('Jinja2 [i18n] (== 2.6)')
self.assertFalse(problems)
actual = sorted([d.name_and_version for d in dists])
self.assertEqual(actual[-2], 'Jinja2 (2.6)')
self.assertTrue(actual[-1].startswith('pytz ('))
self.assertTrue(actual[0].startswith('Babel ('))
actual = [d.build_time_dependency for d in dists]
self.assertEqual(actual, [False, False, False])
# Now test with extra in dependency
locator.clear_cache()
dummy = make_dist('dummy', '0.1')
dummy.metadata.run_requires = [{'requires': ['Jinja2 [i18n]']}]
dists, problems = finder.find(dummy)
self.assertFalse(problems)
actual = sorted([d.name_and_version for d in dists])
self.assertTrue(actual[0].startswith('Babel ('))
locator.clear_cache()
dummy.metadata.run_requires = [{'requires': ['Jinja2']}]
dists, problems = finder.find(dummy)
self.assertFalse(problems)
actual = sorted([d.name_and_version for d in dists])
self.assertTrue(actual[0].startswith('Jinja2 ('))
def test_get_all_dist_names(self):
for url in (None, PYPI_RPC_HOST):
try:
all_dists = get_all_distribution_names(url)
except Exception: # pragma: no cover
raise unittest.SkipTest('PyPI XML-RPC not available')
self.assertGreater(len(all_dists), 0)
def test_url_preference(self):
cases = (('http://netloc/path', 'https://netloc/path'),
('http://pypi.python.org/path', 'http://netloc/path'),
('http://netloc/B', 'http://netloc/A'))
for url1, url2 in cases:
self.assertEqual(default_locator.prefer_url(url1, url2), url1)
def test_prereleases(self):
locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
REQT = 'SQLAlchemy (>0.5.8, < 0.6)'
finder = DependencyFinder(locator)
d = locator.locate(REQT)
self.assertIsNone(d)
d = locator.locate(REQT, True)
self.assertIsNotNone(d)
self.assertEqual(d.name_and_version, 'SQLAlchemy (0.6beta3)')
dist = make_dist('dummy', '0.1')
dist.metadata.run_requires = [{'requires': [REQT]}]
dists, problems = finder.find(dist, prereleases=True)
self.assertFalse(problems)
actual = sorted(dists, key=lambda o: o.name_and_version)
self.assertEqual(actual[0].name_and_version, 'SQLAlchemy (0.6beta3)')
dists, problems = finder.find(dist)
# Test changed since now prereleases as found as a last resort.
#self.assertEqual(dists, set([dist]))
#self.assertEqual(len(problems), 1)
#problem = problems.pop()
#self.assertEqual(problem, ('unsatisfied', REQT))
self.assertEqual(dists, set([actual[0], dist]))
self.assertFalse(problems)
def test_dist_reqts(self):
r = 'config (<=0.3.5)'
dist = default_locator.locate(r)
self.assertIsNotNone(dist)
self.assertIsNone(dist.extras)
self.assertTrue(dist.matches_requirement(r))
self.assertFalse(dist.matches_requirement('config (0.3.6)'))
def test_dist_reqts_extras(self):
r = 'config[doc,test](<=0.3.5)'
dist = default_locator.locate(r)
self.assertIsNotNone(dist)
self.assertTrue(dist.matches_requirement(r))
self.assertEqual(dist.extras, ['doc', 'test'])
if __name__ == '__main__': # pragma: no cover
import logging
logging.basicConfig(level=logging.DEBUG, filename='test_locators.log',
filemode='w', format='%(message)s')
unittest.main()
| 42.704403 | 82 | 0.583726 | 1,496 | 13,580 | 5.195856 | 0.186497 | 0.07912 | 0.048887 | 0.025473 | 0.554741 | 0.489258 | 0.442429 | 0.397144 | 0.365496 | 0.365496 | 0 | 0.022758 | 0.284904 | 13,580 | 317 | 83 | 42.839117 | 0.777675 | 0.048233 | 0 | 0.490842 | 0 | 0.003663 | 0.15136 | 0.009378 | 0 | 0 | 0 | 0 | 0.278388 | 1 | 0.058608 | false | 0 | 0.029304 | 0 | 0.106227 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb5ccd73b62ac958c2eaef0bb0a5f829cbe6ee69 | 6,946 | py | Python | aws_s3/main.py | mayurdhamecha-crest/ta_cloud_exchange_plugins | 8d64c92909f28bcb2067587ec3361499de5d5723 | [
"BSD-3-Clause"
] | null | null | null | aws_s3/main.py | mayurdhamecha-crest/ta_cloud_exchange_plugins | 8d64c92909f28bcb2067587ec3361499de5d5723 | [
"BSD-3-Clause"
] | null | null | null | aws_s3/main.py | mayurdhamecha-crest/ta_cloud_exchange_plugins | 8d64c92909f28bcb2067587ec3361499de5d5723 | [
"BSD-3-Clause"
] | null | null | null | """AWS S3 Plugin."""
import os
from typing import List
from tempfile import NamedTemporaryFile
from netskope.integrations.cls.plugin_base import (
PluginBase,
ValidationResult,
PushResult,
)
from .utils.aws_s3_validator import (
AWSS3Validator,
)
from .utils.aws_s3_client import AWSS3Client
class AWSS3Plugin(PluginBase):
"""The AWS S3 plugin implementation class."""
def transform(self, raw_data, data_type, subtype) -> List:
"""Transform the raw netskope JSON data into target platform supported data formats.
Args:
raw_data (list): The raw data to be tranformed.
data_type (str): The type of data to be ingested (alert/event)
subtype (str): The subtype of data to be ingested (DLP, anomaly etc. in case of alerts)
Raises:
NotImplementedError: If the method is not implemented.
Returns:
List: list of transformed data.
"""
return raw_data
def push(self, transformed_data, data_type, subtype) -> PushResult:
"""Push the transformed_data to the 3rd party platform."""
try:
aws_client = AWSS3Client(
self.configuration, self.logger, self.proxy
)
temp_obj_file = NamedTemporaryFile("wb", delete=False)
for data in transformed_data:
temp_obj_file.write(data)
temp_obj_file.flush()
try:
aws_client.push(temp_obj_file.name, data_type, subtype)
except Exception:
raise
finally:
temp_obj_file.close()
os.unlink(temp_obj_file.name)
except Exception as e:
self.logger.error(f"Error while pushing to AWS S3: {e}")
raise
def validate(self, configuration: dict) -> ValidationResult:
"""Validate the configuration parameters dict."""
aws_validator = AWSS3Validator(self.logger, self.proxy)
if (
"aws_public_key" not in configuration
or type(configuration["aws_public_key"]) != str
or not configuration["aws_public_key"].strip()
):
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid AWS Access Key ID (Public Key) found in the configuration parameters."
)
return ValidationResult(
success=False,
message="Invalid AWS Access Key ID (Public Key) provided.",
)
if (
"aws_private_key" not in configuration
or type(configuration["aws_private_key"]) != str
or not configuration["aws_private_key"].strip()
):
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid AWS Secret Access Key (Private Key) found in the configuration parameters."
)
return ValidationResult(
success=False,
message="Invalid AWS Secret Access Key (Private Key) provided.",
)
if (
"region_name" not in configuration
or type(configuration["region_name"]) != str
or not aws_validator.validate_region_name(
configuration["region_name"]
)
):
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid Region Name found in the configuration parameters."
)
return ValidationResult(
success=False,
message="Invalid Region Name provided.",
)
if (
"bucket_name" not in configuration
or type(configuration["bucket_name"]) != str
or not configuration["bucket_name"].strip()
):
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid Bucket Name found in the configuration parameters."
)
return ValidationResult(
success=False, message="Invalid Bucket Name provided."
)
if (
"obj_prefix" not in configuration
or type(configuration["obj_prefix"]) != str
or not configuration["obj_prefix"].strip()
):
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid Object Prefix found in the configuration parameters."
)
return ValidationResult(
success=False, message="Invalid Object Prefix provided."
)
if (
"max_file_size" not in configuration
or not aws_validator.validate_max_file_size(
configuration["max_file_size"]
)
):
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid Max File Size found in the configuration parameters."
)
return ValidationResult(
success=False, message="Invalid Max File Size provided."
)
if (
"max_duration" not in configuration
or not aws_validator.validate_max_duration(
configuration["max_duration"]
)
):
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid Max File Size found in the configuration parameters."
)
return ValidationResult(
success=False, message="Invalid Max File Size provided."
)
try:
aws_validator.validate_credentials(
configuration["aws_public_key"].strip(),
configuration["aws_private_key"].strip(),
)
except Exception:
self.logger.error(
"AWS S3 Plugin: Validation error occurred. Error: "
"Invalid AWS Access Key ID (Public Key) and AWS Secret Access Key "
"(Private Key) found in the configuration parameters."
)
return ValidationResult(
success=False,
message="Invalid AWS Access Key ID (Public Key) or AWS Secret Access "
"Key (Private Key) found in the configuration parameters.",
)
try:
aws_client = AWSS3Client(configuration, self.logger, self.proxy)
aws_client.get_bucket()
except Exception as err:
self.logger.error(
f"AWS S3 Plugin: Validation error occurred. Error: {err}"
)
return ValidationResult(
success=False,
message="Validation Error. Check logs for more details.",
)
return ValidationResult(success=True, message="Validation successful.")
| 36.366492 | 100 | 0.565073 | 692 | 6,946 | 5.562139 | 0.184971 | 0.018187 | 0.031437 | 0.049104 | 0.544817 | 0.482463 | 0.45882 | 0.420889 | 0.398545 | 0.374643 | 0 | 0.004719 | 0.359344 | 6,946 | 190 | 101 | 36.557895 | 0.860225 | 0.081342 | 0 | 0.367742 | 0 | 0 | 0.278892 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019355 | false | 0 | 0.03871 | 0 | 0.135484 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb6026d99e12af7beda1b40e5de9c9f9ef6b3948 | 751 | py | Python | 09 Evaluate the Performance of Machine Learning Algorithms with Resampling/shuffle_split.py | IshmaelAsabere/Machine_Learning-Various-Topics | 2c663ab73e2631522dac0fa1ec49042aa2088da4 | [
"MIT"
] | null | null | null | 09 Evaluate the Performance of Machine Learning Algorithms with Resampling/shuffle_split.py | IshmaelAsabere/Machine_Learning-Various-Topics | 2c663ab73e2631522dac0fa1ec49042aa2088da4 | [
"MIT"
] | null | null | null | 09 Evaluate the Performance of Machine Learning Algorithms with Resampling/shuffle_split.py | IshmaelAsabere/Machine_Learning-Various-Topics | 2c663ab73e2631522dac0fa1ec49042aa2088da4 | [
"MIT"
] | null | null | null | # Evaluate using Shuffle Split Cross Validation
from pandas import read_csv
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
filename = 'pima-indians-diabetes.data.csv'
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = read_csv(filename, names=names)
array = dataframe.values
X = array[:,0:8]
Y = array[:,8]
n_splits = 10
test_size = 0.33
seed = 7
kfold = ShuffleSplit(n_splits=n_splits, test_size=test_size, random_state=seed)
model = LogisticRegression(solver='liblinear')
results = cross_val_score(model, X, Y, cv=kfold)
print("Accuracy: %.3f%% (%.3f%%)" % (results.mean()*100.0, results.std()*100.0)) | 41.722222 | 80 | 0.747004 | 110 | 751 | 4.954545 | 0.554545 | 0.06055 | 0.058716 | 0.091743 | 0.113761 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028232 | 0.103862 | 751 | 18 | 81 | 41.722222 | 0.781575 | 0.05992 | 0 | 0 | 0 | 0 | 0.141844 | 0.042553 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.235294 | 0 | 0.235294 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb61d5d7e4bf28a8a1ca4a73ac750956135f0ec5 | 4,178 | py | Python | tests/test_s.py | Tygs/ww | 6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4 | [
"MIT"
] | 15 | 2016-10-15T10:15:08.000Z | 2021-04-06T08:31:02.000Z | tests/test_s.py | Tygs/ww | 6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4 | [
"MIT"
] | 7 | 2016-10-14T08:53:29.000Z | 2016-11-09T23:43:31.000Z | tests/test_s.py | Tygs/ww | 6a4b85141c9b74026abe8f3fa9bc7021f3c99fd4 | [
"MIT"
] | 3 | 2016-10-13T11:44:46.000Z | 2016-10-14T08:58:03.000Z | # coding: utf-8
from __future__ import (
unicode_literals, division, print_function, absolute_import
)
import re
import pytest
from ww import s, g, f
def test_lshift():
res = s >> """
This is a long text
And it's not indented
"""
assert isinstance(res, s)
s == "This is a long text\nAnd it's not indented"
def test_split():
gen = s('test').split(',')
assert isinstance(gen, g)
assert gen.list() == ['test']
assert s('test,test').split(',').list() == ['test', 'test']
assert s('a,b,c').split(',', maxsplit=1).list() == ['a', 'b,c']
assert s('a,b,c').split('b,').list() == ['a,', 'c']
assert s('a,b;c/d').split(',', ';', '/').list() == ['a', 'b', 'c', 'd']
assert s(r'a1b33c-d').split(r'\d+').list() == ['a', 'b', 'c-d']
assert s(r'a1b33c-d').split(r'\d+', '-').list() == ['a', 'b', 'c', 'd']
assert s(r'cAt').split('a', flags='i').list() == ['c', 't']
assert s(r'cAt').split('a', flags=re.I).list() == ['c', 't']
chunks = s('a,b;c/d=a,b;c/d').split(',', ';', '/', maxsplit=3)
assert chunks.list() == ['a', 'b', 'c', 'd=a,b;c/d']
with pytest.raises(TypeError):
s('foo').split(1)
def test_maxsplit_with_regex():
chunks = s('a,b;c/d=a,b;c/d').split(',', ';', '[/=]', maxsplit=4)
assert chunks.list() == ['a', 'b', 'c', 'd', 'a,b;c/d']
def test_replace():
st = s('test').replace(',', '')
assert isinstance(st, s)
assert st == 'test'
assert s('test,test').replace(',', ';') == 'test;test'
assert s('a,b,c').replace(',', ';', maxreplace=1) == 'a;b,c'
assert s('a,b,c').replace(',b,', ';') == 'a;c'
assert s('a,b;c/d').replace((',', ';', '/'), (',', ',', ',')) == 'a,b,c,d'
assert s('a,b;c/d').replace((',', ';', '/'), ',') == 'a,b,c,d'
assert s(r'a1b33c-d').replace(r'\d+', ',') == 'a,b,c-d'
assert s(r'a1b33c-d').replace((r'\d+', '-'), ',') == 'a,b,c,d'
assert s(r'cAt').replace('a', 'b', flags='i') == 'cbt'
assert s(r'cAt').replace('a', 'b', flags=re.I) == 'cbt'
with pytest.raises(ValueError):
s(r'cAt').replace(('a', 'b', 'c'), ('b', 'b'))
def test_replace_with_maxplit():
string = s(r'a-1,b-3,3c-d')
assert string.replace(('[,-]'), '', maxreplace=3) == 'a1b3,3c-d'
def test_replace_with_callback():
string = s(r'a-1,b-3,3c-d')
def upper(match):
return match.group().upper()
assert string.replace(('[ab]'), upper, maxreplace=3) == 'A-1,B-3,3c-d'
def test_join():
assert s(';').join('abc') == "a;b;c"
assert s(';').join(range(3)) == "0;1;2"
assert s(';').join(range(3), template="{:.1f}") == "0.0;1.0;2.0"
assert s(';').join(range(3), formatter=lambda s, t: "a") == "a;a;a"
def test_from_bytes():
assert isinstance(s.from_bytes(b'abc', 'ascii'), s)
assert s.from_bytes(b'abc', 'ascii') == 'abc'
assert s.from_bytes('é'.encode('utf8'), 'utf8') == 'é'
with pytest.raises(UnicodeDecodeError):
s.from_bytes('é'.encode('cp850'), 'ascii')
with pytest.raises(ValueError):
s.from_bytes('é'.encode('cp850'))
def test_format():
foo = 1
bar = [1]
string = s('{foo} {bar[0]:.1f}')
assert isinstance(string.format(foo=foo, bar=bar), s)
assert string.format(foo=foo, bar=bar) == "1 1.0"
assert f(string) == "1 1.0"
assert isinstance(f(string), s)
assert f('{foo} {bar[0]:.1f}') == "1 1.0"
def test_add():
string = s('foo')
assert string + 'bar' == 'foobar'
with pytest.raises(TypeError):
string + b'bar'
with pytest.raises(TypeError):
string + 1
assert 'bar' + string == 'barfoo'
with pytest.raises(TypeError):
b'bar' + string
with pytest.raises(TypeError):
1 + string
def test_tobool():
conversions = {
'1': True,
'0': False,
'true': True,
'false': False,
'on': True,
'off': False,
'yes': True,
'no': False,
'': False
}
for key, val in conversions.items():
assert s(key).to_bool() == val
assert s('foo').to_bool(default=True) is True
with pytest.raises(ValueError):
s('foo').to_bool()
| 23.60452 | 78 | 0.514361 | 634 | 4,178 | 3.337539 | 0.170347 | 0.026465 | 0.036862 | 0.034026 | 0.444707 | 0.324197 | 0.229679 | 0.190926 | 0.150284 | 0.136106 | 0 | 0.022072 | 0.219244 | 4,178 | 176 | 79 | 23.738636 | 0.626609 | 0.003112 | 0 | 0.09434 | 0 | 0 | 0.160221 | 0 | 0 | 0 | 0 | 0 | 0.396226 | 1 | 0.113208 | false | 0 | 0.04717 | 0.009434 | 0.169811 | 0.009434 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb64352202a6b429c03980e77f840c5767dfc418 | 513 | py | Python | antalla/migrations/versions/bfa53193d3bf_create_exchange_table.py | sambacha/antalla | 241f49058e4295aa9f5bc62efe517388d9256520 | [
"MIT"
] | null | null | null | antalla/migrations/versions/bfa53193d3bf_create_exchange_table.py | sambacha/antalla | 241f49058e4295aa9f5bc62efe517388d9256520 | [
"MIT"
] | null | null | null | antalla/migrations/versions/bfa53193d3bf_create_exchange_table.py | sambacha/antalla | 241f49058e4295aa9f5bc62efe517388d9256520 | [
"MIT"
] | null | null | null | """create exchange table
Revision ID: bfa53193d3bf
Revises: b97a89b20fa2
Create Date: 2019-09-22 01:17:06.735174
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bfa53193d3bf'
down_revision = 'b97a89b20fa2'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"exchanges",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("name", sa.String),
)
def downgrade():
op.drop_table("exchanges")
| 17.689655 | 54 | 0.695906 | 66 | 513 | 5.318182 | 0.666667 | 0.079772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.110312 | 0.187135 | 513 | 28 | 55 | 18.321429 | 0.731415 | 0.292398 | 0 | 0 | 0 | 0 | 0.135211 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb6486b781e8bb7e961d09b57c4d80fc18e300da | 2,831 | py | Python | rpeak_seg_simple_v1.0.py | ziyi-bear/ECG-ML-DL-Algorithm-Python | e1cc28c09fcd9330470b30c240ab7fb331c8ea0c | [
"Apache-2.0"
] | 180 | 2018-05-18T12:18:53.000Z | 2022-03-30T11:02:48.000Z | rpeak_seg_simple_v1.0.py | Aiwiscal/ECG-ML-DL-Algorithm-Python-version | 23b24a965fdede3552d33943a26ad824a8c03325 | [
"Apache-2.0"
] | null | null | null | rpeak_seg_simple_v1.0.py | Aiwiscal/ECG-ML-DL-Algorithm-Python-version | 23b24a965fdede3552d33943a26ad824a8c03325 | [
"Apache-2.0"
] | 61 | 2018-06-08T08:19:34.000Z | 2022-03-18T10:34:02.000Z | #!/usr/bin/python
# -*- coding:utf-8 -*-
import sys
import time
import logging
import numpy as np
from biosppy.signals import ecg
from biosppy.storage import load_txt
import matplotlib.pyplot as plt
logging.basicConfig(level = logging.DEBUG,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def test_rpeaks_simple(data_path):
signal, mdata = load_txt(data_path)
logging.info("--------------------------------------------------")
logging.info("载入信号-%s, 长度 = %d " % (data_path, len(signal)))
fs = 360 # 信号采样率 360 Hz
logging.info("调用 christov_segmenter 进行R波检测 ...")
tic = time.time()
rpeaks = ecg.christov_segmenter(signal, sampling_rate=fs)
toc = time.time()
logging.info("完成. 用时: %f 秒. " % (toc - tic))
# 以上这种方式返回的rpeaks类型为biosppy.utils.ReturnTuple, biosppy的内置类
logging.info("直接调用 christov_segmenter 返回类型为 " + str(type(rpeaks)))
# 得到R波位置序列的方法:
# 1) 取返回值的第1项:
logging.info("使用第1种方式取R波位置序列 ... ")
rpeaks_indices_1 = rpeaks[0]
logging.info("完成. 结果类型为 " + str(type(rpeaks_indices_1)))
# 2) 调用ReturnTuple的as_dict()方法,得到Python有序字典(OrderedDict)类型
logging.info("使用第2种方式取R波位置序列 ... ")
rpeaks_indices_2 = rpeaks.as_dict()
# 然后使用说明文档中的参数名(这里是rpeaks)作为key取值。
rpeaks_indices_2 = rpeaks_indices_2["rpeaks"]
logging.info("完成. 结果类型为 " + str(type(rpeaks_indices_2)))
# 检验两种方法得到的结果是否相同:
check_sum = np.sum(rpeaks_indices_1 == rpeaks_indices_2)
if check_sum == len(rpeaks_indices_1):
logging.info("两种取值方式结果相同 ... ")
else:
logging.info("两种取值方式结果不同,退出 ...")
sys.exit(1)
# 与 christov_segmenter 接口一致的还有 hamilton_segmenter
logging.info("调用接口一致的 hamilton_segmenter 进行R波检测")
tic = time.time()
rpeaks = ecg.hamilton_segmenter(signal, sampling_rate=fs)
toc = time.time()
logging.info("完成. 用时: %f 秒. " % (toc - tic))
rpeaks_indices_3 = rpeaks.as_dict()["rpeaks"]
# 绘波形图和R波位置
num_plot_samples = 3600
logging.info("绘制波形图和检测的R波位置 ...")
sig_plot = signal[:num_plot_samples]
rpeaks_plot_1 = rpeaks_indices_1[rpeaks_indices_1 <= num_plot_samples]
plt.figure()
plt.plot(sig_plot, "g", label="ECG")
plt.grid(True)
plt.plot(rpeaks_plot_1, sig_plot[rpeaks_plot_1], "ro", label="christov_segmenter")
rpeaks_plot_3 = rpeaks_indices_3[rpeaks_indices_3 <= num_plot_samples]
plt.plot(rpeaks_plot_3, sig_plot[rpeaks_plot_3], "b^", label="hamilton_segmenter")
plt.legend()
plt.title(data_path)
plt.show()
logging.info("完成.")
return
if __name__ == '__main__':
test_rpeaks_simple("./data/ecg_records_117.txt")
test_rpeaks_simple("./data/ecg_records_103.txt")
test_rpeaks_simple("./data/ecg_records_119.txt")
| 36.766234 | 107 | 0.657718 | 362 | 2,831 | 4.867403 | 0.348066 | 0.093644 | 0.047673 | 0.045403 | 0.239501 | 0.208854 | 0.191827 | 0.114642 | 0.07151 | 0.07151 | 0 | 0.020542 | 0.191805 | 2,831 | 76 | 108 | 37.25 | 0.749563 | 0.106323 | 0 | 0.103448 | 0 | 0 | 0.20221 | 0.052395 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017241 | false | 0 | 0.12069 | 0 | 0.155172 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb68b9676e0d03d63bb33be1aab4650e1c5fefb7 | 3,126 | py | Python | mediaire_toolbox/queue/tasks.py | mediaire/mediaire_toolbox | 292a005247a25eb04eaa34fe5a8155422336d04b | [
"MIT"
] | null | null | null | mediaire_toolbox/queue/tasks.py | mediaire/mediaire_toolbox | 292a005247a25eb04eaa34fe5a8155422336d04b | [
"MIT"
] | 11 | 2019-09-27T15:19:28.000Z | 2022-01-04T13:27:19.000Z | mediaire_toolbox/queue/tasks.py | mediaire/mediaire_toolbox | 292a005247a25eb04eaa34fe5a8155422336d04b | [
"MIT"
] | 3 | 2019-05-07T09:42:56.000Z | 2022-01-27T13:14:59.000Z | import time
import json
from copy import deepcopy
class Task(object):
"""Defines task objects that can be handled by the task manager."""
def __init__(self, t_id=None, user_id=None, product_id=None,
tag=None, data=None,
timestamp=None, update_timestamp=None, error=None):
"""Initializes the Task object.
Parameters
----------
t_id: int
transaction id this task belongs to
user_id: int
user_id who submitted this task, if applicable.
product_id: int
product_id of the product
tag: str
String specifying the task. Unique for each task.
data: dict
Data for specific products
timestamp: float
Timestamp of task creation from`time.time()`
update_timestamp: float
Timestamp of task update (via `create_child()`) from `time.time()`
error: str
a serialized error string in case the task failed while executing
"""
self.t_id = t_id
self.user_id = user_id
self.product_id = product_id
self.tag = tag
self.timestamp = timestamp or int(time.time())
self.update_timestamp = update_timestamp
self.data = data
self.error = error
# self.update = None
def to_dict(self):
return {'tag': self.tag,
'timestamp': self.timestamp,
'update_timestamp': self.update_timestamp,
'data': self.data,
't_id': self.t_id,
'user_id': self.user_id,
'product_id': self.product_id,
'error': self.error}
def to_json(self):
return json.dumps(self.to_dict())
def to_bytes(self):
return self.to_json().encode('utf-8')
def read_dict(self, d):
tag = d['tag']
timestamp = d['timestamp']
t_id = d.get('t_id', None)
user_id = d.get('user_id', None)
product_id = d.get('product_id', None)
update_timestamp = d.get('update_timestamp', None)
data = d.get('data', None)
error = d.get('error', None)
Task.__init__(
self, t_id=t_id, user_id=user_id,
product_id=product_id, tag=tag, data=data,
timestamp=timestamp, update_timestamp=update_timestamp,
error=error)
return self
def read_bytes(self, bytestring):
d = json.loads(bytestring.decode('utf-8'))
self.read_dict(d)
return self
def read_json(self, json_path):
with open(json_path, 'r') as f:
d = json.load(f)
self.read_dict(d)
return self
def create_child(self, tag=None):
"""Creates and returns a follow up task object."""
if tag is None:
tag = self.tag + '__child'
child_task = deepcopy(self)
child_task.tag = tag
child_task.update_timestamp = int(time.time())
return child_task
def __str__(self):
return str(self.to_dict())
def __repr__(self):
return self.__str__()
| 30.950495 | 78 | 0.570377 | 399 | 3,126 | 4.258145 | 0.22807 | 0.038846 | 0.01648 | 0.012949 | 0.110653 | 0.030606 | 0.030606 | 0 | 0 | 0 | 0 | 0.000953 | 0.328535 | 3,126 | 100 | 79 | 31.26 | 0.80848 | 0.21945 | 0 | 0.079365 | 0 | 0 | 0.059005 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15873 | false | 0 | 0.047619 | 0.079365 | 0.365079 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb69a63e704e44ba8a523693de06b874425f56d2 | 2,791 | py | Python | torch/legacy/nn/Linear.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 51 | 2020-01-26T23:32:57.000Z | 2022-03-20T14:49:57.000Z | torch/legacy/nn/Linear.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 2 | 2020-12-19T20:00:28.000Z | 2021-03-03T20:22:45.000Z | torch/legacy/nn/Linear.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | 33 | 2020-02-18T16:15:48.000Z | 2022-03-24T15:12:05.000Z | import math
import torch
from .Module import Module
from .utils import clear
class Linear(Module):
def __init__(self, inputSize, outputSize, bias=True):
super(Linear, self).__init__()
self.weight = torch.Tensor(outputSize, inputSize)
self.gradWeight = torch.Tensor(outputSize, inputSize)
self.bias = torch.Tensor(outputSize) if bias else None
self.gradBias = torch.Tensor(outputSize) if bias else None
self.reset()
self.addBuffer = None
def noBias(self):
self.bias = None
self.gradBias = None
return self
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.uniform_(-stdv, stdv)
return self
def _updateAddBuffer(self, input):
nframe = input.size(0)
if self.addBuffer is None:
self.addBuffer = input.new()
if self.addBuffer.nelement() != nframe:
self.addBuffer.resize_(nframe).fill_(1)
def updateOutput(self, input):
assert input.dim() == 2
nframe = input.size(0)
nelement = self.output.nelement()
self.output.resize_(nframe, self.weight.size(0))
if self.output.nelement() != nelement:
self.output.zero_()
self._updateAddBuffer(input)
self.output.addmm_(0, 1, input, self.weight.t())
if self.bias is not None:
self.output.addr_(self.addBuffer, self.bias)
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
nelement = self.gradInput.nelement()
self.gradInput.resize_as_(input)
if self.gradInput.nelement() != nelement:
self.gradInput.zero_()
assert input.dim() == 2
self.gradInput.addmm_(0, 1, gradOutput, self.weight)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
assert input.dim() == 2
self.gradWeight.addmm_(scale, gradOutput.t(), input)
if self.bias is not None:
# update the size of addBuffer if the input is not the same size as the one we had in last updateGradInput
self._updateAddBuffer(input)
self.gradBias.addmv_(scale, gradOutput.t(), self.addBuffer)
def clearState(self):
clear(self, 'addBuffer')
return super(Linear, self).clearState()
def __repr__(self):
return super(Linear, self).__repr__() + \
'({} -> {})'.format(self.weight.size(1), self.weight.size(0)) + \
(' without bias' if self.bias is None else '')
| 31.715909 | 118 | 0.607668 | 337 | 2,791 | 4.934718 | 0.216617 | 0.032471 | 0.050511 | 0.028864 | 0.179796 | 0.116055 | 0.074564 | 0.046903 | 0 | 0 | 0 | 0.008462 | 0.280186 | 2,791 | 87 | 119 | 32.08046 | 0.819313 | 0.037263 | 0 | 0.179104 | 0 | 0 | 0.011918 | 0 | 0 | 0 | 0 | 0 | 0.044776 | 1 | 0.134328 | false | 0 | 0.059701 | 0.014925 | 0.313433 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb6c5cabf7aff4e46d0f69f924e5ecdb4193cfdc | 2,349 | py | Python | tables_io/testUtils.py | LSSTDESC/tables_io | 1c2f119c928d05d237b1c8509e340d29650ceb8b | [
"MIT"
] | 1 | 2021-08-13T15:41:58.000Z | 2021-08-13T15:41:58.000Z | tables_io/testUtils.py | LSSTDESC/tables_io | 1c2f119c928d05d237b1c8509e340d29650ceb8b | [
"MIT"
] | 18 | 2021-08-12T00:09:36.000Z | 2022-02-24T21:11:18.000Z | tables_io/testUtils.py | LSSTDESC/tables_io | 1c2f119c928d05d237b1c8509e340d29650ceb8b | [
"MIT"
] | null | null | null | """
Utilities for testing
"""
import numpy as np
from astropy.table import Table as apTable
from astropy.utils.diff import report_diff_values
def compare_tables(t1, t2):
""" Compare all the tables in two `astropy.table.Table`)
Parameters
----------
t1 : `astropy.table.Table`
One table
t2 : `astropy.table.Table`
Another tables
Returns
-------
identical : `bool`
True if the tables are identical, False otherwise
Notes
-----
For now this explicitly flattens each of the columns, to avoid issues with shape
"""
if sorted(t1.colnames) != sorted(t2.colnames): #pragma: no cover
return False
for cname in t1.colnames:
c1 = t1[cname]
c2 = t2[cname]
if not np.allclose(np.array(c1).flat, np.array(c2).flat): #pragma: no cover
return False
return True
def compare_table_dicts(d1, d2, strict=False):
""" Compare all the tables in two `OrderedDict`, (`str`, `astropy.table.Table`)
Parameters
----------
d1 : `OrderedDict`, (`str`, `astropy.table.Table`)
One dictionary of tables
d2 : `OrderedDict`, (`str`, `astropy.table.Table`)
Another dictionary of tables
Returns
-------
identical : `bool`
True if all the tables are identical, False otherwise
"""
identical = True
for k, v in d1.items():
try:
vv = d2[k]
except KeyError: #pragma: no cover
vv = d2[k.upper()]
if strict: #pragma: no cover
identical &= report_diff_values(v, vv)
else: #pragma: no cover
identical &= compare_tables(v, vv)
return identical
def make_test_data():
""" Make and return some test data """
nrow = 1000
vect_size = 20
mat_size = 5
scalar = np.random.uniform(size=nrow)
vect = np.random.uniform(size=nrow*vect_size).reshape(nrow, vect_size)
matrix = np.random.uniform(size=nrow*mat_size*mat_size).reshape(nrow, mat_size, mat_size)
data = dict(scalar=scalar, vect=vect, matrix=matrix)
table = apTable(data)
table.meta['a'] = 1
table.meta['b'] = None
table.meta['c'] = [3, 4, 5]
small_table = apTable(dict(a=np.ones(21), b=np.zeros(21)))
small_table.meta['small'] = True
tables = dict(data=table, md=small_table)
return tables
| 27.964286 | 93 | 0.61175 | 315 | 2,349 | 4.495238 | 0.342857 | 0.059322 | 0.072034 | 0.055085 | 0.305085 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0.020127 | 0.259685 | 2,349 | 83 | 94 | 28.301205 | 0.794135 | 0.355045 | 0 | 0.05 | 0 | 0 | 0.005785 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075 | false | 0 | 0.075 | 0 | 0.275 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb6e417ff1d33694d2432e18852dc6a0bbbf5837 | 746 | py | Python | tests/inheritance/test_constructor.py | sco1/pylox | b4820828306c20cee3f8533c2547fafb92c6c1bd | [
"MIT"
] | 2 | 2021-12-18T01:52:50.000Z | 2022-01-17T19:41:52.000Z | tests/inheritance/test_constructor.py | sco1/pylox | b4820828306c20cee3f8533c2547fafb92c6c1bd | [
"MIT"
] | 18 | 2021-11-30T04:05:53.000Z | 2022-02-01T03:30:04.000Z | tests/inheritance/test_constructor.py | sco1/pylox | b4820828306c20cee3f8533c2547fafb92c6c1bd | [
"MIT"
] | null | null | null | from textwrap import dedent
import pytest
from pylox.lox import Lox
# Base cases from https://github.com/munificent/craftinginterpreters/blob/master/test/inheritance/constructor.lox
TEST_SRC = dedent(
"""\
class A {
init(param) {
this.field = param;
}
test() {
print this.field;
}
}
class B < A {}
var b = B("value");
b.test(); // expect: value
"""
)
EXPECTED_STDOUTS = ["value"]
def test_constructor(capsys: pytest.CaptureFixture) -> None:
interpreter = Lox()
interpreter.run(TEST_SRC)
assert not interpreter.had_error
assert not interpreter.had_runtime_error
all_out = capsys.readouterr().out.splitlines()
assert all_out == EXPECTED_STDOUTS
| 19.128205 | 113 | 0.648794 | 88 | 746 | 5.386364 | 0.545455 | 0.029536 | 0.084388 | 0.097046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.237265 | 746 | 38 | 114 | 19.631579 | 0.83304 | 0.148794 | 0 | 0 | 0 | 0 | 0.011933 | 0 | 0 | 0 | 0 | 0 | 0.230769 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb71982888d5f8510697907e9a0d0ca96fdd5ff9 | 1,196 | py | Python | dugaire/util.py | tadeugr/dugaire | 63e4964ed4b5016e9eb996612138c43fbcb81b53 | [
"Apache-2.0"
] | 4 | 2020-11-19T12:17:10.000Z | 2020-12-15T19:34:04.000Z | dugaire/util.py | tadeugr/dugaire | 63e4964ed4b5016e9eb996612138c43fbcb81b53 | [
"Apache-2.0"
] | 1 | 2020-11-26T01:25:28.000Z | 2020-11-26T01:25:28.000Z | dugaire/util.py | tadeugr/dugaire | 63e4964ed4b5016e9eb996612138c43fbcb81b53 | [
"Apache-2.0"
] | 1 | 2020-11-19T21:18:43.000Z | 2020-11-19T21:18:43.000Z | #!/usr/bin/env python3
""" Import comunity modules. """
import os
import sys
import jinja2
import re
HERE = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, f"{HERE}")
def string_is_latest_or_version(check_string):
prog = re.compile("^(\d+\.)?(\d+\.)?(\*|\d+)$")
result = prog.match(check_string)
if check_string != "latest" and not result:
return False
return True
def get_template(file_name, searchpath=f"{HERE}/templates"):
""" Load and return a Jinja template file. """
templateLoader = jinja2.FileSystemLoader(searchpath=searchpath)
templateEnv = jinja2.Environment(loader=templateLoader)
template = templateEnv.get_template(file_name)
return template
def get_dugaire_image_label(return_format="string"):
""" Get the default label used when building images. """
default_label_key = "builtwith"
default_label_value = "dugaire"
default_label = {default_label_key: default_label_value}
if return_format == "string":
return f"{default_label_key}={default_label_value}"
if return_format == "dockerfile":
return f'{default_label_key}="{default_label_value}"'
return default_label
| 26 | 67 | 0.705686 | 152 | 1,196 | 5.296053 | 0.421053 | 0.163975 | 0.074534 | 0.081988 | 0.171429 | 0.171429 | 0.171429 | 0.171429 | 0.114286 | 0 | 0 | 0.005025 | 0.16806 | 1,196 | 45 | 68 | 26.577778 | 0.80402 | 0.114548 | 0 | 0 | 0 | 0 | 0.169231 | 0.105769 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.153846 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb736abfcc0d07e773852cad1abeb9d44fd78b29 | 2,770 | py | Python | ipaymu/tests.py | ekaputra07/django-ipaymu | 7ea946eaeba720a002d20ad5579575951a979347 | [
"BSD-3-Clause"
] | 2 | 2018-11-14T16:25:01.000Z | 2019-03-22T08:18:43.000Z | ipaymu/tests.py | ekaputra07/django-ipaymu | 7ea946eaeba720a002d20ad5579575951a979347 | [
"BSD-3-Clause"
] | 1 | 2018-10-31T08:44:10.000Z | 2018-11-03T08:37:35.000Z | ipaymu/tests.py | ekaputra07/django-ipaymu | 7ea946eaeba720a002d20ad5579575951a979347 | [
"BSD-3-Clause"
] | 1 | 2018-10-16T09:18:20.000Z | 2018-10-16T09:18:20.000Z | from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from ipaymu.forms import IpaymuForm
from ipaymu.models import IpaymuSessionID
from ipaymu.utils import save_session, verify_session, IpaymuParamsBuilder
class IpaymuTest(TestCase):
fixtures = ['ipaymu/fixtures/sessionID.json',]
def setUp(self):
self.c = Client()
self.good_sessid = 'ad05fd717b3bb836519df7c430f0db0801d347b34ea28e4f15bc6213b9f95772ff882808442e1a5275715f2895f3db8adbd95105147e9f0856c4c5ad7de24bab'
self.junk_sessid = 'this-sesssion-not-exists-in-database'
def test_forms(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
def test_urls(self):
# Test canceled page
resp = self.c.get(reverse('ipaymu_cancel_url'))
self.assertEqual(resp.status_code, 200)
# Test return page
resp = self.c.get(reverse('ipaymu_return_url'))
self.assertEqual(resp.status_code, 200)
# Test process url - GET
resp = self.c.get(reverse('ipaymu_process_url'))
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, 'Invalid request.')
# Test process url - POST
# No data posted, will return invalid field.
resp = self.c.post(reverse('ipaymu_process_url'))
self.assertEqual(resp.status_code, 200)
self.assertTrue('valid' in resp.content)
# Test process url - POST
# With valid data, will redirected to Ipaymu
# resp = self.c.post(reverse('ipaymu_process_url'), {
# 'product': 'test product',
# 'quantity': 1,
# 'price': 5000,
# 'comments': 'this is comments',
# })
# self.assertEqual(resp.status_code, 302)
# Test notify url - GET
resp = self.c.get(reverse('ipaymu_notify_url'))
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, '')
# Test notify url - POST
resp = self.c.post(reverse('ipaymu_notify_url'))
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, '')
def test_functions(self):
# Test verify_session
verified = verify_session(self.good_sessid)
self.assertEqual(verified, True)
verified = verify_session(self.junk_sessid)
self.assertEqual(verified, False)
# Test save_session
save_session(self.junk_sessid)
try:
sess = IpaymuSessionID.objects.get(sessid=self.junk_sessid)
except IpaymuSessionID.DoesNotExist:
raise
else:
self.assertEqual(sess.sessid, self.junk_sessid)
| 33.373494 | 157 | 0.642599 | 308 | 2,770 | 5.655844 | 0.292208 | 0.120551 | 0.10907 | 0.100459 | 0.323192 | 0.306544 | 0.299082 | 0.265786 | 0.172216 | 0.172216 | 0 | 0.055474 | 0.258123 | 2,770 | 82 | 158 | 33.780488 | 0.792214 | 0.191336 | 0 | 0.186047 | 0 | 0 | 0.145066 | 0.088222 | 0 | 0 | 0 | 0 | 0.325581 | 1 | 0.093023 | false | 0 | 0.139535 | 0 | 0.27907 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb772658ca94856431b34ba889686158bd95aa78 | 1,884 | py | Python | qcdb/tests/nwchem_tests/test_tce_ccsd_pr_br_t.py | loriab/qccddb | d9e156ef8b313ac0633211fc6b841f84a3ddde24 | [
"BSD-3-Clause"
] | 8 | 2019-03-28T11:54:59.000Z | 2022-03-19T03:31:37.000Z | qcdb/tests/nwchem_tests/test_tce_ccsd_pr_br_t.py | loriab/qccddb | d9e156ef8b313ac0633211fc6b841f84a3ddde24 | [
"BSD-3-Clause"
] | 39 | 2018-10-31T23:02:18.000Z | 2021-12-12T22:11:37.000Z | qcdb/tests/nwchem_tests/test_tce_ccsd_pr_br_t.py | loriab/qccddb | d9e156ef8b313ac0633211fc6b841f84a3ddde24 | [
"BSD-3-Clause"
] | 9 | 2018-03-12T20:51:50.000Z | 2022-02-28T15:18:34.000Z | # TCE CCSD(T) and CCSD[T] calculations
import os
import sys
import qcdb
from ..utils import *
def check_ccsd_t_pr_br(return_value):
ccsd_tot = -76.240077811301250
ccsd_corl = -0.213269954065481
t_br_corr = -0.003139909173705
t_br_corl = -0.216409863239186
ccsd_t_br = -76.243217720474960
t_pr_corr = -0.003054718622142
t_pr_corl = -0.216324672687623
ccsd_t_pr = -76.243132529923390
assert compare_values(ccsd_tot, qcdb.variable("CCSD TOTAL ENERGY"), 5, "ccsd total")
assert compare_values(ccsd_corl, qcdb.variable("CCSD CORRELATION ENERGY"), 5, "ccsd corl")
assert compare_values(t_br_corr, qcdb.variable("T(CCSD) CORRECTION ENERGY"), 5, "[t] corr")
assert compare_values(t_br_corl, qcdb.variable("CCSD+T(CCSD) CORRELATION ENERGY"), 5, "ccsd[t] corl")
assert compare_values(ccsd_t_br, qcdb.variable("CCSD+T(CCSD) TOTAL ENERGY"), 5, "ccsd[t] total")
assert compare_values(t_pr_corr, qcdb.variable("(T) CORRECTION ENERGY"), 5, "(t) corr")
assert compare_values(t_pr_corl, qcdb.variable("CCSD(T) CORRELATION ENERGY"), 5, "ccsd(t) corl")
assert compare_values(ccsd_t_pr, qcdb.variable("CCSD(T) TOTAL ENERGY"), 5, "ccsd(t) tot")
@using("nwchem")
def test_1_ccsd_t():
h2o = qcdb.set_molecule(
"""
O 0.00000000 0.00000000 0.22138519
H 0.00000000 -1.43013023 -0.88554075
H 0.00000000 1.43013023 -0.88554075
units au"""
)
qcdb.set_options(
{
"basis": "cc-pvdz",
"nwchem_scf__rhf": True,
"nwchem_scf__thresh": 1.0e-10,
"nwchem_scf__tol2e": 1.0e-10,
"nwchem_scf__singlet": True,
"nwchem_tce__ccsd(t)": True,
"qc_module": "TCE",
"nwchem_tce__io": "ga",
}
)
val = qcdb.energy("nwc-ccsd(t)")
check_ccsd_t_pr_br(val)
| 34.254545 | 105 | 0.638004 | 267 | 1,884 | 4.243446 | 0.269663 | 0.083848 | 0.134157 | 0.0812 | 0.435128 | 0.213592 | 0.213592 | 0.164166 | 0.164166 | 0.090026 | 0 | 0.15931 | 0.230361 | 1,884 | 54 | 106 | 34.888889 | 0.622069 | 0.019108 | 0 | 0 | 0 | 0 | 0.250905 | 0 | 0 | 0 | 0 | 0 | 0.205128 | 1 | 0.051282 | false | 0 | 0.102564 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb7c5cbf4a4be6c62f0ee77abad0dd0ba1cedf55 | 6,246 | py | Python | autoload/squeeze.py | sangjinhan/squeeze | 0c1721724d181f146c4829afb58e78a596e38cb5 | [
"BSD-3-Clause"
] | 1 | 2018-07-08T10:39:51.000Z | 2018-07-08T10:39:51.000Z | autoload/squeeze.py | sangjinhan/squeeze | 0c1721724d181f146c4829afb58e78a596e38cb5 | [
"BSD-3-Clause"
] | null | null | null | autoload/squeeze.py | sangjinhan/squeeze | 0c1721724d181f146c4829afb58e78a596e38cb5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import time
import multiprocessing
import os
import vim
from async_worker import AsyncWorker
import utils
# Global map for host (source code) window ID -> Squeezer instance
squeezers = {}
# set of Squeezer instances that are waiting for updates
polling_squeezers = set()
def create_window(buf_name):
vim.command('rightbelow vnew {}'.format(buf_name))
vim.command('let w:squeeze_args=""')
# Use vim.command(), not buf.options[], since the options may not exist
vim.command('setlocal nomodifiable')
vim.command('setlocal buftype=nofile')
vim.command('setlocal syntax=objdump')
vim.command('setlocal filetype=squeeze')
return vim.current.window
class Squeezer:
BUFNAME_PREFIX = '__Squeeze__'
def __init__(self, win):
self.host_win = win
self.host_winid = utils.win_to_winid(win)
self.host_buf = win.buffer
self.host_bufnr = win.buffer.number
guest_buf_name = '{}.{}.{}'.format(self.BUFNAME_PREFIX,
self.host_winid,
self.host_buf.name)
self.guest_win = create_window(guest_buf_name)
self.guest_winid = utils.win_to_winid(self.guest_win)
self.guest_buf = self.guest_win.buffer
self.guest_bufnr = self.guest_buf.number
self._add_autocmd('BufWritePost', self.host_bufnr,
'trigger_build({})'.format(self.host_winid))
self._add_autocmd('QuitPre', self.host_bufnr,
'cleanup_squeezer({})'.format(self.host_winid))
self._add_autocmd('BufUnload', self.guest_bufnr,
'cleanup_squeezer({})'.format(self.host_winid))
# focus back to the host window
vim.current.window = self.host_win
utils.log('object created for {}({})'.format(
win.buffer.name, win.number))
self.worker = None
self.async_build()
def __del__(self):
if self.host_winid in squeezers:
squeezers.pop(self.host_winid)
if self in polling_squeezers:
polling_squeezers.remove(self)
def _add_autocmd(self, ev, bufnr, py_stmt):
cmd = 'call s:Python("{}")'.format(py_stmt)
vim.command('augroup SqueezeAutoCmds{}'.format(self.host_winid))
vim.command(' autocmd {} <buffer={}> {}'.format(ev, bufnr, cmd))
vim.command('augroup END')
def _del_autocmd(self, ev, bufnr):
vim.command('augroup SqueezeAutoCmds{}'.format(self.host_winid))
vim.command(' autocmd! {} <buffer={}>'.format(ev, bufnr))
vim.command('augroup END')
# Close the guest window and destroy the outstanding worker
def cleanup(self):
if self.worker:
self.worker.terminate()
self.worker.join()
self.worker = None
if self.host_winid in squeezers:
squeezers.pop(self.host_winid)
if self.guest_win.valid:
vim.command('{}close'.format(self.guest_win.number))
self._del_autocmd('*', self.host_bufnr)
self._del_autocmd('*', self.guest_bufnr)
utils.log('object destroyed for {}({})'.format(self.host_buf.name,
self.host_winid))
def async_build(self):
if self.worker:
utils.log('killing existing thread')
self.worker.terminate()
self.worker.join()
script = utils.get_var('squeeze_c_script')
args = utils.get_var('squeeze_c_args')
if args:
self.guest_win.vars['squeeze_args'] = args
else:
self.guest_win.vars['squeeze_args'] = '<none>'
path_script = os.path.join(vim.eval('s:plugin_path'), 'scripts/',
script, 'objdump')
self.out_q = multiprocessing.Queue()
self.worker = AsyncWorker(self.out_q, self.host_win.buffer.name,
path_script, args)
self.worker.start()
if len(polling_squeezers) == 0:
vim.command('''
let g:squeeze_timer = timer_start(100, \
function('s:TimerHandler'), {'repeat': -1})
''')
else:
vim.command('call timer_pause(g:squeeze_timer, 0)')
polling_squeezers.add(self)
def update_result(self):
if not self.guest_win.valid:
self.cleanup()
return
if self.worker and not self.out_q.empty():
out, err = self.out_q.get()
output = out + '\n-------\n' + err
self.worker.join()
exit_code = self.worker.exitcode
self.worker = None
# temporarily make the buffer modifiable
self.guest_buf.options['modifiable'] = 1
self.guest_buf[:] = output.split('\n')
self.guest_buf.options['modifiable'] = 0
if self in polling_squeezers:
polling_squeezers.remove(self)
def _toggle_on(win):
obj = Squeezer(win)
squeezers[obj.host_winid] = obj
def _toggle_off(win):
squeezers[utils.win_to_winid(win)].cleanup()
def toggle():
win = vim.current.window
winid = utils.win_to_winid(win)
if winid in squeezers:
_toggle_off(win)
else:
# Toggle hit in a guest window?
for obj in list(squeezers.values()):
if obj.guest_winid == winid:
_toggle_off(obj.host_win)
return
# Is is a regular file?
opts = win.buffer.options
if 'buftype' in opts and opts['buftype'] not in ['', b'']:
vim.command('echohl WarningMsg')
vim.command('echomsg "Not a regular file"')
vim.command('echohl None')
else:
_toggle_on(win)
def trigger_build(host_winid):
if host_winid in squeezers:
squeezers[host_winid].async_build()
def cleanup_squeezer(host_winid):
if host_winid in squeezers:
squeezers[host_winid].cleanup()
def poll_result():
for obj in list(polling_squeezers):
obj.update_result()
if len(polling_squeezers) == 0:
vim.command('call timer_pause(g:squeeze_timer, 1)')
| 30.468293 | 75 | 0.590618 | 749 | 6,246 | 4.727637 | 0.221629 | 0.049703 | 0.044055 | 0.026829 | 0.310929 | 0.255578 | 0.208698 | 0.158147 | 0.137249 | 0.137249 | 0 | 0.00249 | 0.292667 | 6,246 | 204 | 76 | 30.617647 | 0.799004 | 0.06244 | 0 | 0.241135 | 0 | 0 | 0.147597 | 0.014195 | 0 | 0 | 0 | 0 | 0 | 1 | 0.099291 | false | 0 | 0.042553 | 0 | 0.177305 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb823a3ab1cfe40fa4039cd8121d4b7741953f43 | 14,233 | py | Python | sirius-pipeline/SAI/sai_api_gen.py | mmiele/DASH | 30c65abc597b74a5794f9cc8f287f0febebc820c | [
"Apache-2.0"
] | 18 | 2021-09-22T04:50:09.000Z | 2022-03-26T03:54:26.000Z | sirius-pipeline/SAI/sai_api_gen.py | mmiele/DASH | 30c65abc597b74a5794f9cc8f287f0febebc820c | [
"Apache-2.0"
] | 62 | 2021-11-12T21:25:10.000Z | 2022-03-31T22:41:17.000Z | sirius-pipeline/SAI/sai_api_gen.py | mmiele/DASH | 30c65abc597b74a5794f9cc8f287f0febebc820c | [
"Apache-2.0"
] | 19 | 2021-09-22T22:05:59.000Z | 2022-03-29T04:37:54.000Z | #!/usr/bin/env python3
try:
import os
import json
import argparse
import shutil
from git import Repo
from jinja2 import Template, Environment, FileSystemLoader
except ImportError as ie:
print("Import failed for " + ie.name)
exit(1)
NAME_TAG = 'name'
TABLES_TAG = 'tables'
BITWIDTH_TAG = 'bitwidth'
ACTIONS_TAG = 'actions'
PREAMBLE_TAG = 'preamble'
OTHER_MATCH_TYPE_TAG = 'otherMatchType'
MATCH_TYPE_TAG = 'matchType'
PARAMS_TAG = 'params'
ACTION_REFS_TAG = 'actionRefs'
MATCH_FIELDS_TAG = 'matchFields'
NOACTION = 'NoAction'
STAGES_TAG = 'stages'
def get_sai_key_type(key_size, key_header, key_field):
if key_size == 1:
return 'bool', "booldata"
elif key_size <= 8:
return 'sai_uint8_t', "u8"
elif key_size == 16 and ('_id' in key_field):
return 'sai_object_id_t', "u16"
elif key_size <= 16:
return 'sai_uint16_t', "u16"
elif key_size == 32 and ('addr' in key_field or 'ip' in key_header):
return 'sai_ip_address_t', "ipaddr"
elif key_size == 32 and ('_id' in key_field):
return 'sai_object_id_t', "u32"
elif key_size <= 32:
return 'sai_uint32_t', "u32"
elif key_size == 48 and ('addr' in key_field or 'mac' in key_header):
return 'sai_mac_t', "mac"
elif key_size <= 64:
return 'sai_uint64_t', "u64"
elif key_size == 128:
return 'sai_ip_address_t', "ipaddr"
else:
raise ValueError(f'key_size={key_size} is not supported')
def get_sai_lpm_type(key_size, key_header, key_field):
if key_size == 32 and ('addr' in key_field or 'ip' in key_header):
return 'sai_ip_prefix_t', 'ipPrefix'
elif key_size == 128 and ('addr' in key_field or 'ip' in key_header):
return 'sai_ip_prefix_t', 'ipPrefix'
raise ValueError(f'key_size={key_size}, key_header={key_header}, and key_field={key_field} is not supported')
def get_sai_list_type(key_size, key_header, key_field):
if key_size <= 8:
return 'sai_u8_list_t', "u8list"
elif key_size <= 16:
return 'sai_u16_list_t', "u16list"
elif key_size == 32 and ('addr' in key_field or 'ip' in key_header):
return 'sai_ip_address_list_t', "ipaddrlist"
elif key_size <= 32:
return 'sai_u32_list_t', "u32list"
elif key_size <= 64:
ValueError(f'sai_u64_list_t is not supported')
return 'sai_u64_list_t', "no mapping"
raise ValueError(f'key_size={key_size} is not supported')
def get_sai_range_list_type(key_size, key_header, key_field):
if key_size <= 8:
return 'sai_u8_range_list_t', 'u8rangelist'
elif key_size <= 16:
return 'sai_u16_range_list_t', 'u16rangelist'
elif key_size == 32 and ('addr' in key_field or 'ip' in key_header):
return 'sai_ipaddr_range_list_t', 'ipaddrrangelist'
elif key_size <= 32:
return 'sai_u32_range_list_t', 'u32rangelist'
elif key_size <= 64:
return 'sai_u64_range_list_t', 'u64rangelist'
raise ValueError(f'key_size={key_size} is not supported')
def get_sai_key_data(key):
sai_key_data = dict()
sai_key_data['id'] = key['id']
full_key_name, sai_key_name = key[NAME_TAG].split(':')
key_tuple = full_key_name.split('.')
if len(key_tuple) == 3:
key_struct, key_header, key_field = key_tuple
else:
key_header, key_field = key_tuple
sai_key_data['sai_key_name'] = sai_key_name
key_size = key[BITWIDTH_TAG]
if OTHER_MATCH_TYPE_TAG in key:
sai_key_data['match_type'] = key[OTHER_MATCH_TYPE_TAG].lower()
elif MATCH_TYPE_TAG in key:
sai_key_data['match_type'] = key[MATCH_TYPE_TAG].lower()
else:
raise ValueError(f'No valid match tag found')
if sai_key_data['match_type'] == 'exact':
sai_key_data['sai_key_type'], sai_key_data['sai_key_field'] = get_sai_key_type(key_size, key_header, key_field)
elif sai_key_data['match_type'] == 'lpm':
sai_key_data['sai_lpm_type'], sai_key_data['sai_lpm_field'] = get_sai_lpm_type(key_size, key_header, key_field)
elif sai_key_data['match_type'] == 'list':
sai_key_data['sai_list_type'], sai_key_data['sai_list_field'] = get_sai_list_type(key_size, key_header, key_field)
elif sai_key_data['match_type'] == 'range_list':
sai_key_data['sai_range_list_type'], sai_key_data['sai_range_list_field'] = get_sai_range_list_type(key_size, key_header, key_field)
else:
raise ValueError(f"match_type={sai_key_data['match_type']} is not supported")
sai_key_data['bitwidth'] = key_size
return sai_key_data
def extract_action_data(program):
action_data = {}
for action in program[ACTIONS_TAG]:
preable = action[PREAMBLE_TAG]
id = preable['id']
name = preable[NAME_TAG].split('.')[-1]
params = []
if PARAMS_TAG in action:
for p in action[PARAMS_TAG]:
param = dict()
param['id'] = p['id']
param[NAME_TAG] = p[NAME_TAG]
param['type'], param['field'] = get_sai_key_type(int(p[BITWIDTH_TAG]), p[NAME_TAG], p[NAME_TAG])
param['bitwidth'] = p[BITWIDTH_TAG]
params.append(param)
action_data[id] = {'id': id, NAME_TAG: name, PARAMS_TAG: params}
return action_data
def table_with_counters(program, table_id):
for counter in program['directCounters']:
if counter['directTableId'] == table_id:
return 'true'
return 'false'
def generate_sai_apis(program, ignore_tables):
sai_apis = []
all_actions = extract_action_data(program)
tables = sorted(program[TABLES_TAG], key=lambda k: k[PREAMBLE_TAG][NAME_TAG])
for table in tables:
sai_table_data = dict()
sai_table_data['keys'] = []
sai_table_data[ACTIONS_TAG] = []
sai_table_data[STAGES_TAG] = []
table_control, table_name = table[PREAMBLE_TAG][NAME_TAG].split('.', 1)
if table_name in ignore_tables:
continue
table_name, api_name = table_name.split('|')
sai_table_data[NAME_TAG] = table_name.replace('.' , '_')
sai_table_data['id'] = table[PREAMBLE_TAG]['id']
sai_table_data['with_counters'] = table_with_counters(program, sai_table_data['id'])
# chechk if table belongs to a group
is_new_group = True
if ':' in table_name:
stage, group_name = table_name.split(':')
table_name = group_name
stage = stage.replace('.' , '_')
for sai_api in sai_apis:
for sai_table in sai_api[TABLES_TAG]:
if sai_table['name'] == table_name:
sai_table[STAGES_TAG].append(stage)
is_new_group = False
break
if is_new_group:
sai_table_data[NAME_TAG] = table_name
sai_table_data[STAGES_TAG].append(stage)
else:
continue
for key in table[MATCH_FIELDS_TAG]:
# skip v4/v6 selector
if 'v4_or_v6' in key[NAME_TAG]:
continue
sai_table_data['keys'].append(get_sai_key_data(key))
for action in table[ACTION_REFS_TAG]:
action_id = action["id"]
if all_actions[action_id][NAME_TAG] != NOACTION:
sai_table_data[ACTIONS_TAG].append(all_actions[action_id])
if len(sai_table_data['keys']) == 1 and sai_table_data['keys'][0]['sai_key_name'].endswith(table_name.split('.')[-1] + '_id'):
sai_table_data['is_object'] = 'true'
# Object ID itself is a key
sai_table_data['keys'] = []
elif len(sai_table_data['keys']) > 5:
sai_table_data['is_object'] = 'true'
else:
sai_table_data['is_object'] = 'false'
sai_table_data['name'] = sai_table_data['name'] + '_entry'
is_new_api = True
for sai_api in sai_apis:
if sai_api['app_name'] == api_name:
sai_api[TABLES_TAG].append(sai_table_data)
is_new_api = False
break
if is_new_api:
new_api = dict()
new_api['app_name'] = api_name
new_api[TABLES_TAG] = [sai_table_data]
sai_apis.append(new_api)
return sai_apis
def write_sai_impl_files(sai_api):
env = Environment(loader=FileSystemLoader('.'), trim_blocks=True, lstrip_blocks=True)
sai_impl_tm = env.get_template('/templates/saiapi.cpp.j2')
sai_impl_str = sai_impl_tm.render(tables = sai_api[TABLES_TAG], app_name = sai_api['app_name'])
with open('./lib/sai' + sai_api['app_name'].replace('_', '') + '.cpp', 'w') as o:
o.write(sai_impl_str)
def write_sai_makefile(sai_api_name_list):
env = Environment(loader=FileSystemLoader('.'))
makefile_tm = env.get_template('/templates/Makefile.j2')
makefile_str = makefile_tm.render(api_names = sai_api_name_list)
with open('./lib/Makefile', 'w') as o:
o.write(makefile_str)
env = Environment(loader=FileSystemLoader('.'), trim_blocks=True, lstrip_blocks=True)
sai_impl_tm = env.get_template('/templates/utils.cpp.j2')
sai_impl_str = sai_impl_tm.render(tables = sai_api[TABLES_TAG], app_name = sai_api['app_name'])
with open('./lib/utils.cpp', 'w') as o:
o.write(sai_impl_str)
env = Environment(loader=FileSystemLoader('.'), trim_blocks=True, lstrip_blocks=True)
sai_impl_tm = env.get_template('/templates/utils.h.j2')
sai_impl_str = sai_impl_tm.render(tables = sai_api[TABLES_TAG], app_name = sai_api['app_name'])
with open('./lib/utils.h', 'w') as o:
o.write(sai_impl_str)
def write_sai_files(sai_api):
# The main file
with open('templates/saiapi.h.j2', 'r') as sai_header_tm_file:
sai_header_tm_str = sai_header_tm_file.read()
env = Environment(loader=FileSystemLoader('.'), trim_blocks=True, lstrip_blocks=True)
sai_header_tm = env.get_template('templates/saiapi.h.j2')
sai_header_str = sai_header_tm.render(sai_api = sai_api)
with open('./SAI/experimental/saiexperimental' + sai_api['app_name'].replace('_', '') + '.h', 'w') as o:
o.write(sai_header_str)
# The SAI Extensions
with open('./SAI/experimental/saiextensions.h', 'r') as f:
lines = f.readlines()
new_lines = []
for line in lines:
if 'Add new experimental APIs above this line' in line:
new_lines.append(' SAI_API_' + sai_api['app_name'].upper() + ',\n\n')
if 'new experimental object type includes' in line:
new_lines.append(line)
new_lines.append('#include "saiexperimental' + sai_api['app_name'].replace('_', '') + '.h"\n')
continue
new_lines.append(line)
with open('./SAI/experimental/saiextensions.h', 'w') as f:
f.write(''.join(new_lines))
# The SAI Type Extensions
with open('./SAI/experimental/saitypesextensions.h', 'r') as f:
lines = f.readlines()
new_lines = []
for line in lines:
if 'Add new experimental object types above this line' in line:
for table in sai_api[TABLES_TAG]:
new_lines.append(' SAI_OBJECT_TYPE_' + table[NAME_TAG].upper() + ',\n\n')
new_lines.append(line)
with open('./SAI/experimental/saitypesextensions.h', 'w') as f:
f.write(''.join(new_lines))
# The SAI object struct for entries
with open('./SAI/inc/saiobject.h', 'r') as f:
lines = f.readlines()
new_lines = []
for line in lines:
if 'Add new experimental entries above this line' in line:
for table in sai_api[TABLES_TAG]:
if table['is_object'] == 'false':
new_lines.append(' /** @validonly object_type == SAI_OBJECT_TYPE_' + table[NAME_TAG].upper() + ' */\n')
new_lines.append(' sai_' + table[NAME_TAG] + '_t ' + table[NAME_TAG] + ';\n\n')
if 'new experimental object type includes' in line:
new_lines.append(line)
new_lines.append('#include "../experimental/saiexperimental' + sai_api['app_name'].replace('_', '') + '.h"\n')
continue
new_lines.append(line)
with open('./SAI/inc/saiobject.h', 'w') as f:
f.write(''.join(new_lines))
# CLI
parser = argparse.ArgumentParser(description='P4 SAI API generator')
parser.add_argument('filepath', type=str, help='Path to P4 program RUNTIME JSON file')
parser.add_argument('apiname', type=str, help='Name of the new SAI API')
parser.add_argument('--print-sai-lib', type=bool)
parser.add_argument('--sai-git-url', type=str, default='https://github.com/Opencomputeproject/SAI')
parser.add_argument('--ignore-tables', type=str, default='', help='Comma separated list of tables to ignore')
parser.add_argument('--sai-git-branch', type=str, default='master')
parser.add_argument('--overwrite', type=bool, default=False, help='Overwrite the existing SAI repo')
args = parser.parse_args()
if not os.path.isfile(args.filepath):
print('File ' + args.filepath + ' does not exist')
exit(1)
if os.path.exists('./SAI'):
if args.overwrite == False:
print('Directory ./SAI already exists. Please remove in order to proceed')
exit(1)
else:
shutil.rmtree('./SAI')
if os.path.exists('./lib'):
if args.overwrite == False:
print('Directory ./lib already exists. Please remove in order to proceed')
exit(1)
else:
shutil.rmtree('./lib')
# Get SAI dictionary from P4 dictionary
print("Generating SAI API...")
with open(args.filepath) as json_program_file:
json_program = json.load(json_program_file)
sai_apis = generate_sai_apis(json_program, args.ignore_tables.split(','))
# Clone a clean SAI repo
print("Cloning SAI repository...")
Repo.clone_from(args.sai_git_url, './SAI', branch=args.sai_git_branch)
os.mkdir("lib")
# Write SAI dictionary into SAI API headers
sai_api_name_list = []
for sai_api in sai_apis:
write_sai_files(sai_api)
write_sai_impl_files(sai_api)
sai_api_name_list.append(sai_api['app_name'].replace('_', ''))
write_sai_makefile(sai_api_name_list)
if args.print_sai_lib:
print(json.dumps(sai_api, indent=2))
| 38.158177 | 140 | 0.644488 | 2,047 | 14,233 | 4.16512 | 0.127504 | 0.032841 | 0.032372 | 0.019939 | 0.52076 | 0.438658 | 0.350575 | 0.317969 | 0.297443 | 0.292048 | 0 | 0.010457 | 0.227359 | 14,233 | 372 | 141 | 38.260753 | 0.764845 | 0.021078 | 0 | 0.271186 | 0 | 0 | 0.202069 | 0.036923 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037288 | false | 0 | 0.027119 | 0 | 0.155932 | 0.030508 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb87aab9a356ed1a7a06480dce0810bf77f27d97 | 1,658 | py | Python | cytoscapeMaker.py | admar505/python-tools | 743c0e41e6700efa3817fdb09c451f8fffccd1b3 | [
"Apache-2.0"
] | null | null | null | cytoscapeMaker.py | admar505/python-tools | 743c0e41e6700efa3817fdb09c451f8fffccd1b3 | [
"Apache-2.0"
] | null | null | null | cytoscapeMaker.py | admar505/python-tools | 743c0e41e6700efa3817fdb09c451f8fffccd1b3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys,os,re,fileinput,argparse
import csv
import random
parser = argparse.ArgumentParser(description="takes a file of CYC data, and produces pairwise info for cytoscape network viewing" )
parser.add_argument("--fi",help="the file, must be headered as \"Pathway-id Pathway-name Gene-id Gene-name\"",required=True)
args = parser.parse_args()
vcffi = args.fi
full = csv.DictReader(open(vcffi,'r'),delimiter="\t")
#parse results in a map or dict, or what??
#-------------------------------------here by DEFSgONS!!----------------------------------*
####def anyNone(rets):
def getGenes(pathid,pth):#idea here, get a gene by position, and step forward only.
count = 0
(pwyid,pwyname) = pathid.split(':')
while count < len(pth):
frontgene = pth[count]
for genes in pth[count + 1:len(pth)]:
(geneid,genename) = genes.split(":")
(frontid,frontname) = frontgene.split(":")
print(pwyid + "\t" + pwyname + "\t" + geneid + "\t" + genename + "\t" + frontid + "\t" + frontname )
count = count + 1
#---------------------------------main-----------------------------------#
pre_dict = {}
for line in full:#load dict ass array per pathway.
pathkey = line['Pathway-id'] + ":" + line["Pathway-name"]
if pathkey in pre_dict:
if "unknown" not in line['Gene-id']:
pre_dict[pathkey].append(line['Gene-id'] + ":" + line['Gene-name'])
else:
pre_dict[pathkey] = []
print('Pathway-id\tPathway-name\tGene-id\tGene-name\tTarget-id\tTarget-name')
for path in pre_dict:
getGenes(path,pre_dict[path])
| 23.027778 | 131 | 0.575392 | 211 | 1,658 | 4.483412 | 0.50237 | 0.044397 | 0.019027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002257 | 0.198432 | 1,658 | 71 | 132 | 23.352113 | 0.709556 | 0.199035 | 0 | 0 | 0 | 0.034483 | 0.195103 | 0.052028 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.103448 | 0 | 0.137931 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb893239a685151332f8c8b4695d9a76a757792e | 1,402 | py | Python | algo/searching_and_sorting/binary_search.py | avi3tal/knowledgebase | fd30805aa94332a6c14c9d8631c7044673fb3e2c | [
"MIT"
] | null | null | null | algo/searching_and_sorting/binary_search.py | avi3tal/knowledgebase | fd30805aa94332a6c14c9d8631c7044673fb3e2c | [
"MIT"
] | null | null | null | algo/searching_and_sorting/binary_search.py | avi3tal/knowledgebase | fd30805aa94332a6c14c9d8631c7044673fb3e2c | [
"MIT"
] | 1 | 2021-11-19T13:45:59.000Z | 2021-11-19T13:45:59.000Z |
def binary_search(search_num, sorted_arr):
"""
https://runestone.academy/runestone/books/published/pythonds/SortSearch/TheBinarySearch.html
First Q at https://dev.to/javinpaul/20-basic-algorithms-problems-from-coding-interviews-4o76
"""
if sorted_arr[0] == search_num:
return True
arr_len = len(sorted_arr)
if arr_len > 1:
if sorted_arr[arr_len - 1] == search_num:
return True
mid_value = sorted_arr[abs(arr_len / 2)]
if arr_len <= 2:
return False
if mid_value == search_num:
return True
if mid_value < search_num:
return binary_search(search_num, sorted_arr[mid_value:])
if mid_value > search_num:
return binary_search(search_num, sorted_arr[:mid_value ])
def binary_search_no_rec(search_num, sorted_arr):
first = 0
last = len(sorted_arr) - 1
found = False
while first <= last and not found:
midpoint = (first + last) // 2
print(midpoint, sorted_arr[midpoint], sorted_arr[first: last])
if sorted_arr[midpoint] == search_num:
found = True
else:
if sorted_arr[midpoint] > search_num:
last = midpoint - 1
else:
first = midpoint + 1
return found
if __name__ == "__main__":
arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
print(binary_search_no_rec(5, arr))
| 26.45283 | 96 | 0.617689 | 192 | 1,402 | 4.239583 | 0.322917 | 0.143735 | 0.092138 | 0.088452 | 0.291155 | 0.291155 | 0.154791 | 0.154791 | 0.154791 | 0.154791 | 0 | 0.030632 | 0.278174 | 1,402 | 52 | 97 | 26.961538 | 0.773715 | 0.131954 | 0 | 0.147059 | 0 | 0 | 0.0067 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0 | 0 | 0.264706 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb89523d17723f3e6a6db454cea7c75b5abcf4eb | 3,292 | py | Python | axelrod/tests/test_resultset.py | DumisaniZA/Axelrod | e59fc40ebb705afe05cea6f30e282d1e9c621259 | [
"MIT"
] | 33 | 2015-02-20T11:36:48.000Z | 2022-02-16T17:02:06.000Z | axelrod/tests/test_resultset.py | DumisaniZA/Axelrod | e59fc40ebb705afe05cea6f30e282d1e9c621259 | [
"MIT"
] | 108 | 2015-02-18T14:15:44.000Z | 2020-05-08T10:39:58.000Z | axelrod/tests/test_resultset.py | DumisaniZA/Axelrod | e59fc40ebb705afe05cea6f30e282d1e9c621259 | [
"MIT"
] | 41 | 2015-02-18T13:40:04.000Z | 2021-05-31T06:08:10.000Z | import unittest
import axelrod
class TestResultSet(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.players = ('Player1', 'Player2', 'Player3')
cls.test_results = [
[[0, 0], [10, 10], [21, 21]],
[[10, 8], [0, 0], [16, 20]],
[[16, 16], [16, 16], [0, 0]],
]
cls.expected_scores = [
[3.1, 3.1],
[2.6, 2.8],
[3.2, 3.2],
]
cls.expected_payoffs = [
[0.0, 2.0, 4.2],
[1.8, 0.0, 3.6],
[3.2, 3.2, 0.0],
]
cls.test_payoffs_list = [
[[0, 10, 21], [10, 0, 16], [16, 16, 0]],
[[0, 10, 21], [8, 0, 20], [16, 16, 0]],
]
cls.expected_stddevs = [
[0.0, 0.0, 0.0],
[0.20, 0.0, 0.40],
[0.0, 0.0, 0.0],
]
cls.expected_ranking = [2, 0, 1]
cls.expected_ranked_names = ['Player3', 'Player1', 'Player2']
cls.expected_csv = 'Player3,Player1,Player2\n3.2,3.1,2.6\n3.2,3.1,2.8\n'
def test_init(self):
rs = axelrod.ResultSet(self.players, 5, 2)
expected_results = [[[0,0] for j in range(3)] for i in range(3)]
self.assertEquals(rs.nplayers, 3)
self.assertEquals(rs.players, self.players)
self.assertEquals(rs.turns, 5)
self.assertEquals(rs.repetitions, 2)
self.assertTrue(rs.results, expected_results)
self.assertFalse(rs.finalised)
def test_generate_scores(self):
rs = axelrod.ResultSet(self.players, 5, 2)
rs.results = self.test_results
self.assertEquals(rs.generate_scores(), self.expected_scores)
def test_generate_ranking(self):
rs = axelrod.ResultSet(self.players, 5, 2)
rs.results = self.test_results
scores = rs.generate_scores()
self.assertEquals(rs.generate_ranking(scores), self.expected_ranking)
def test_generate_ranked_names(self):
rs = axelrod.ResultSet(self.players, 5, 2)
rs.results = self.test_results
scores = rs.generate_scores()
rankings = rs.generate_ranking(scores)
self.assertEquals(rs.generate_ranked_names(rankings), self.expected_ranked_names)
def test_generate_payoff_matrix(self):
rs = axelrod.ResultSet(self.players, 5, 2)
rs.results = self.test_results
payoffs, stddevs = rs.generate_payoff_matrix()
stddevs = [[round(x, 1) for x in row] for row in stddevs]
self.assertEquals(payoffs, self.expected_payoffs)
self.assertEquals(stddevs, self.expected_stddevs)
def test_finalise(self):
rs = axelrod.ResultSet(self.players, 5, 2)
rs.finalise(self.test_payoffs_list)
self.assertEquals(rs.scores, self.expected_scores)
self.assertEquals(rs.ranking, self.expected_ranking)
self.assertEquals(rs.ranked_names, self.expected_ranked_names)
self.assertTrue(rs.finalised)
self.assertRaises(AttributeError, rs.finalise, self.test_payoffs_list)
def test_csv(self):
rs = axelrod.ResultSet(self.players, 5, 2)
self.assertRaises(AttributeError, rs.csv)
rs.finalise(self.test_payoffs_list)
rs.results = self.test_results
self.assertEquals(rs.csv(), self.expected_csv)
| 36.577778 | 89 | 0.59599 | 433 | 3,292 | 4.399538 | 0.152425 | 0.022047 | 0.103937 | 0.014698 | 0.349081 | 0.286089 | 0.234646 | 0.234646 | 0.166404 | 0.146982 | 0 | 0.063715 | 0.265796 | 3,292 | 89 | 90 | 36.988764 | 0.724452 | 0 | 0 | 0.230769 | 0 | 0.012821 | 0.02825 | 0.015492 | 0 | 0 | 0 | 0 | 0.230769 | 1 | 0.102564 | false | 0 | 0.025641 | 0 | 0.141026 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb89728654ff6fe0b167e3b43a91c36391a5c80e | 814 | py | Python | sparse_arrays.py | erjan/coding_exercises | 53ba035be85f1e7a12b4d4dbf546863324740467 | [
"Apache-2.0"
] | null | null | null | sparse_arrays.py | erjan/coding_exercises | 53ba035be85f1e7a12b4d4dbf546863324740467 | [
"Apache-2.0"
] | null | null | null | sparse_arrays.py | erjan/coding_exercises | 53ba035be85f1e7a12b4d4dbf546863324740467 | [
"Apache-2.0"
] | null | null | null | #There is a collection of
#input strings and a collection of query strings. For each query string, determine how many times it occurs in the list of input strings
def f():
strings = ['aba', 'baba', 'aba', 'xzxb']
queries = ['aba', 'xzxb', 'ab']
res = []
'''
for q in queries:
total= 0
for s in strings:
if s == q:
total+=1
res.append(total)
'''
for q in queries:
res.append(len(list(filter(lambda s: s == q, strings))))
print(res)
return res
f()
#2nd solution
def matchingStrings(strings, queries):
res = []
for q in queries:
total = 0
for s in strings:
if s == q:
total+=1
res.append(total)
return res
| 20.35 | 136 | 0.503686 | 105 | 814 | 3.904762 | 0.419048 | 0.029268 | 0.043902 | 0.095122 | 0.287805 | 0.287805 | 0.287805 | 0.287805 | 0.287805 | 0.287805 | 0 | 0.01002 | 0.386978 | 814 | 39 | 137 | 20.871795 | 0.811623 | 0.211302 | 0 | 0.333333 | 0 | 0 | 0.048729 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.222222 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb8c05f5a993f660ca7de8d026f690e8d7461195 | 4,658 | py | Python | reactapp/FlowerBackend/strategies/FedOpt.py | ImperialAI-Blockchain-Team/group-project-decentralised | 27b4242aa850c5c32d3bdbe6e4c9e3e3c226e7d3 | [
"Apache-2.0"
] | 1 | 2022-01-03T15:15:58.000Z | 2022-01-03T15:15:58.000Z | reactapp/FlowerBackend/strategies/FedOpt.py | ImperialAI-Blockchain-Team/group-project-decentralised | 27b4242aa850c5c32d3bdbe6e4c9e3e3c226e7d3 | [
"Apache-2.0"
] | null | null | null | reactapp/FlowerBackend/strategies/FedOpt.py | ImperialAI-Blockchain-Team/group-project-decentralised | 27b4242aa850c5c32d3bdbe6e4c9e3e3c226e7d3 | [
"Apache-2.0"
] | null | null | null | '''Ref: https://arxiv.org/pdf/2003.00295.pdf'''
from typing import Callable, Dict, List, Optional, Tuple
import numpy as np
from flwr.common import (
EvaluateIns,
EvaluateRes,
FitIns,
FitRes,
Weights,
parameters_to_weights,
weights_to_parameters,
)
from flwr.server.strategy.aggregate import aggregate, weighted_loss_avg
from flwr.server.client_proxy import ClientProxy
import torch
from .FedStrategy import FedStrategy
import json
DEFAULT_SERVER_ADDRESS = "[::]:8080"
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
DATA_ROOT = "uploads/testset.csv"
class FedOpt(FedStrategy):
def __init__(
self,
*,
fraction_fit: float = 0.1,
fraction_eval: float = 0.1,
min_fit_clients: int = 2,
min_eval_clients: int = 2,
min_available_clients: int = 2,
eval_fn = None,
on_fit_config_fn = None,
on_evaluate_config_fn = None,
accept_failures = True,
mode = 'adagrad',
beta = 0.99,
initial_parameters = None,
eta: float = 1e-1,
eta_l: float = 1e-1,
tau: float = 1e-9,
) -> None:
super().__init__(
fraction_fit=fraction_fit,
fraction_eval=fraction_eval,
min_fit_clients=min_fit_clients,
min_eval_clients=min_eval_clients,
min_available_clients=min_available_clients,
eval_fn=eval_fn,
on_fit_config_fn=on_fit_config_fn,
on_evaluate_config_fn=on_evaluate_config_fn,
accept_failures=accept_failures,
initial_parameters=initial_parameters,
)
self.mode = mode
self.current_weights = initial_parameters
self.beta = beta
self.eta = eta
self.eta_l = eta_l
self.tau = tau
self.v_t: Optional[Weights] = None
def __repr__(self) -> str:
rep = f"FedOpt(accept_failures={self.accept_failures})"
return rep
def aggregate_fit(
self,
rnd: int,
results: List[Tuple[ClientProxy, FitRes]],
failures: List[BaseException],
) -> Optional[Weights]:
if not results:
return None
if not self.accept_failures and failures:
return None
net = self.model.Loader(DATA_ROOT).load_model()
testset, _ = self.model.Loader(DATA_ROOT).load_data()
testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False)
for client, fit_res in results:
self.set_weights(net, parameters_to_weights(fit_res.parameters))
net.to(DEVICE)
loss, acc = self.model.test(net, testloader, device=DEVICE)
self.contrib[fit_res.metrics['cid']].append(acc)
weights_results = [
(parameters_to_weights(fit_res.parameters), fit_res.num_examples)
for client, fit_res in results
]
fedavg_aggregate = aggregate(weights_results)
if fedavg_aggregate is None:
return None
aggregated_updates = [
subset_weights - self.current_weights[idx]
for idx, subset_weights in enumerate(fedavg_aggregate)
]
delta_t = aggregated_updates
if not self.v_t:
self.v_t = [np.zeros_like(subset_weights) for subset_weights in delta_t]
if self.mode == 'adagrad':
self.v_t = [
self.v_t[idx] + np.multiply(subset_weights, subset_weights)
for idx, subset_weights in enumerate(delta_t)
]
if self.mode == 'yogi':
self.v_t = [
self.v_t[idx] - (1 - self.beta)*np.multiply(subset_weights, subset_weights)*np.sign(self.v_t[idx] - np.multiply(subset_weights, subset_weights))
for idx, subset_weights in enumerate(delta_t)
]
if self.mode == 'adam':
self.v_t = [
self.beta*self.v_t[idx] + (1 - self.beta)*np.multiply(subset_weights, subset_weights)
for idx, subset_weights in enumerate(delta_t)
]
new_weights = [
self.current_weights[idx]
+ self.eta * delta_t[idx] / (np.sqrt(self.v_t[idx]) + self.tau)
for idx in range(len(delta_t))
]
self.current_weights = new_weights
self.set_weights(net, new_weights)
if new_weights is not None:
print(f"Saving round {rnd} model...")
torch.save(net, f"round-{rnd}-model.pt")
with open('contrib.json', 'w') as outfile:
json.dump(self.contrib, outfile)
return self.current_weights | 34 | 160 | 0.607342 | 578 | 4,658 | 4.633218 | 0.262976 | 0.072816 | 0.024645 | 0.016804 | 0.271098 | 0.230022 | 0.126214 | 0.121733 | 0.121733 | 0.121733 | 0 | 0.010375 | 0.296479 | 4,658 | 137 | 161 | 34 | 0.806836 | 0.009017 | 0 | 0.090909 | 0 | 0 | 0.036435 | 0.009976 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024793 | false | 0 | 0.066116 | 0 | 0.140496 | 0.008264 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb8d0654d1d205d5f73cc68270c3fb56a4a831b9 | 786 | py | Python | setup.py | alekordESA/package-template | c95a64bf125d41f1bcfd50494dbd0daeb0b27fca | [
"MIT"
] | null | null | null | setup.py | alekordESA/package-template | c95a64bf125d41f1bcfd50494dbd0daeb0b27fca | [
"MIT"
] | null | null | null | setup.py | alekordESA/package-template | c95a64bf125d41f1bcfd50494dbd0daeb0b27fca | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
long_description = """
Short description...
"""
setuptools.setup(
name='test_package_kthdesa',
version='1.0.0',
author='Alexandros Korkovelos',
author_email='alekor@desa.kth.se',
description='This is a test package',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/alekordESA/package-template',
packages=['test_package_kthdesa'],
install_requires=[
'numpy>=1.16',
'pandas>=0.24'
],
classifiers=[
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
],
) | 27.103448 | 75 | 0.651399 | 90 | 786 | 5.544444 | 0.744444 | 0.150301 | 0.072144 | 0.12024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020701 | 0.201018 | 786 | 29 | 76 | 27.103448 | 0.773885 | 0 | 0 | 0.076923 | 0 | 0 | 0.45108 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.038462 | 0 | 0.038462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb8d74b25be29cc19bd8b5df08384b2634717dc4 | 5,801 | py | Python | gan/Generator.py | leogeier/dl-2020-prog-gan | 12f28353548188af31cc14ee18a5444ad3d95a0c | [
"MIT"
] | null | null | null | gan/Generator.py | leogeier/dl-2020-prog-gan | 12f28353548188af31cc14ee18a5444ad3d95a0c | [
"MIT"
] | null | null | null | gan/Generator.py | leogeier/dl-2020-prog-gan | 12f28353548188af31cc14ee18a5444ad3d95a0c | [
"MIT"
] | null | null | null | import torch
from torch.nn import LeakyReLU
from torch.nn.functional import interpolate
from gan.EqualizedLayers import EqualizedConv2d, EqualizedDeconv2d
class PixelwiseNormalization(torch.nn.Module):
"""
Normalize feature vectors per pixel as suggested in section 4.2 of
https://research.nvidia.com/sites/default/files/pubs/2017-10_Progressive-Growing-of/karras2018iclr-paper.pdf.
For each pixel location (i,j) in the input image, takes the vector across all channels and normalizes it to
unit length.
"""
def __init__(self):
super(PixelwiseNormalization, self).__init__()
def forward(self, x, eps=1e-8):
"""
:param x: input with shape (batch_size x num_channels x img_width x img_height)
:param eps: small constant to avoid division by zero
:return:
"""
return x / x.pow(2).mean(dim=1, keepdim=True).add(eps).sqrt()
class GenInitialBlock(torch.nn.Module):
"""
Initial block of generator. Consisting of the following layers:
input: latent noise vector (latent_size x 1 x 1)
layer activation output shape
Convolution 4 x 4 LeakyReLU latent_size x 4 x 4
Convolution 3 x 3 LeakyReLU latent_size x 4 x 4
output: image with latent_size channels (latent_size x 4 x 4)
"""
def __init__(self, latent_size):
"""
:param latent_size: size of noise input for generator
"""
super(GenInitialBlock, self).__init__()
self.layer1 = EqualizedDeconv2d(in_channels=latent_size, out_channels=latent_size, kernel_size=(4, 4))
self.layer2 = EqualizedConv2d(in_channels=latent_size, out_channels=latent_size, kernel_size=(3, 3), padding=1)
self.pixel_normalization = PixelwiseNormalization()
self.activation = LeakyReLU(negative_slope=0.2)
def forward(self, x):
"""
:param x: input noise (batch_size x latent_size)
:return:
"""
# add image width and height dimensions:
# (batch_size x latent_size) --> (batch_size x latent_size x 1 x 1)
y = torch.unsqueeze(torch.unsqueeze(x, -1), -1)
y = self.activation(self.layer1(y))
y = self.activation(self.layer2(y))
return self.pixel_normalization(y)
class GenConvolutionalBlock(torch.nn.Module):
"""
Regular block of generator. Consisting of following layers:
input: image (in_channels x img_width x img_height)
layer activation output shape
Upsampling - in_channels x 2*img_width x 2*img_height
Convolution 3 x 3 LeakyReLU out_channels x 2*img_width x 2*img_height
Convolution 3 x 3 LeakyReLU out_channels x 2*img_width x 2*img_height
output: image with latent_size channels and doubled size (out_channels x 2*img_width x 2*img_height)
"""
def __init__(self, in_channels, out_channels):
super(GenConvolutionalBlock, self).__init__()
self.upsample = lambda x: interpolate(x, scale_factor=2)
self.layer1 = EqualizedConv2d(in_channels, out_channels, kernel_size=(3, 3), padding=1)
self.layer2 = EqualizedConv2d(out_channels, out_channels, kernel_size=(3, 3), padding=1)
self.pixel_normalization = PixelwiseNormalization()
self.activation = LeakyReLU(negative_slope=0.2)
def forward(self, x):
y = self.upsample(x)
y = self.pixel_normalization(self.activation(self.layer1(y)))
y = self.pixel_normalization(self.activation(self.layer2(y)))
return y
class Generator(torch.nn.Module):
@staticmethod
def __to_rgb(in_channels):
return EqualizedConv2d(in_channels, 3, (1, 1))
def __init__(self, depth, latent_size):
"""
:param depth: depth of the generator, i.e. number of blocks (initial + convolutional)
:param latent_size: size of input noise for the generator
"""
super(Generator, self).__init__()
self.depth = depth
self.latent_size = latent_size
self.initial_block = GenInitialBlock(self.latent_size)
self.blocks = torch.nn.ModuleList([])
# hold an rgb converter for every intermediate resolution to visualize intermediate results
self.rgb_converters = torch.nn.ModuleList([self.__to_rgb(self.latent_size)])
for i in range(self.depth - 1):
if i < 3:
# first three blocks do not reduce the number of channels
in_channels = self.latent_size
out_channels = self.latent_size
else:
# half number of channels in each block
in_channels = self.latent_size // pow(2, i - 3)
out_channels = self.latent_size // pow(2, i - 2)
block = GenConvolutionalBlock(in_channels, out_channels)
rgb = self.__to_rgb(out_channels)
self.blocks.append(block)
self.rgb_converters.append(rgb)
def forward(self, x, current_depth, alpha):
"""
:param x: input noise (batch_size x latent_size)
:param current_depth: depth at which to evaluate (maximum depth of the forward pass)
:param alpha: interpolation between current depth output (alpha) and previous depth output (1 - alpha)
:return:
"""
y = self.initial_block(x)
if current_depth == 0:
return self.rgb_converters[0](y)
for block in self.blocks[:current_depth - 1]:
y = block(y)
residual = self.rgb_converters[current_depth - 1](interpolate(y, scale_factor=2))
straight = self.rgb_converters[current_depth](self.blocks[current_depth - 1](y))
# fade in new layer
return alpha * straight + (1 - alpha) * residual
| 37.185897 | 119 | 0.651957 | 758 | 5,801 | 4.808707 | 0.221636 | 0.071331 | 0.030727 | 0.016461 | 0.371468 | 0.301783 | 0.248011 | 0.173114 | 0.173114 | 0.153361 | 0 | 0.02139 | 0.258576 | 5,801 | 155 | 120 | 37.425806 | 0.826087 | 0.3608 | 0 | 0.092308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138462 | false | 0 | 0.061538 | 0.015385 | 0.353846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb9292b5b15148056f496da1f3e38d544c9ca3dc | 846 | py | Python | Active-Contour-Loss.py | xuuuuuuchen/Active-Contour-Loss | f76737b92a2bea558f5a960bb1ef00bbe09b8457 | [
"MIT"
] | 189 | 2019-06-11T02:13:53.000Z | 2022-03-30T15:41:47.000Z | Active-Contour-Loss.py | xuuuuuuchen/Active-Contour-Loss | f76737b92a2bea558f5a960bb1ef00bbe09b8457 | [
"MIT"
] | 15 | 2019-06-29T19:22:07.000Z | 2021-07-19T03:26:51.000Z | Active-Contour-Loss.py | xuuuuuuchen/Active-Contour-Loss | f76737b92a2bea558f5a960bb1ef00bbe09b8457 | [
"MIT"
] | 28 | 2019-07-15T12:52:52.000Z | 2022-03-07T16:50:02.000Z |
from keras import backend as K
import numpy as np
def Active_Contour_Loss(y_true, y_pred):
"""
lenth term
"""
x = y_pred[:,:,1:,:] - y_pred[:,:,:-1,:] # horizontal and vertical directions
y = y_pred[:,:,:,1:] - y_pred[:,:,:,:-1]
delta_x = x[:,:,1:,:-2]**2
delta_y = y[:,:,:-2,1:]**2
delta_u = K.abs(delta_x + delta_y)
lenth = K.mean(K.sqrt(delta_u + 0.00000001)) # equ.(11) in the paper
"""
region term
"""
C_1 = np.ones((256, 256))
C_2 = np.zeros((256, 256))
region_in = K.abs(K.mean( y_pred[:,0,:,:] * ((y_true[:,0,:,:] - C_1)**2) ) ) # equ.(12) in the paper
region_out = K.abs(K.mean( (1-y_pred[:,0,:,:]) * ((y_true[:,0,:,:] - C_2)**2) )) # equ.(12) in the paper
lambdaP = 1 # lambda parameter could be various.
mu = 1 # mu parameter could be various.
return lenth + lambdaP * (mu * region_in + region_out)
| 24.171429 | 105 | 0.574468 | 149 | 846 | 3.087248 | 0.342282 | 0.076087 | 0.052174 | 0.030435 | 0.178261 | 0.178261 | 0.056522 | 0 | 0 | 0 | 0 | 0.072674 | 0.186761 | 846 | 34 | 106 | 24.882353 | 0.59593 | 0.211584 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb9398224cfe1d36acebb90ab93ee6b57c3b5626 | 7,584 | py | Python | edbo/objective.py | v13inc/edbo | dee72777a07594f7940bd03f0049a7d9be7c2266 | [
"MIT"
] | 70 | 2020-12-11T03:13:09.000Z | 2022-03-16T21:17:26.000Z | edbo/objective.py | v13inc/edbo | dee72777a07594f7940bd03f0049a7d9be7c2266 | [
"MIT"
] | 8 | 2021-01-15T14:24:00.000Z | 2022-01-16T14:43:52.000Z | edbo/objective.py | v13inc/edbo | dee72777a07594f7940bd03f0049a7d9be7c2266 | [
"MIT"
] | 18 | 2020-11-24T00:37:49.000Z | 2022-03-13T15:52:51.000Z | # -*- coding: utf-8 -*-
# Imports
import pandas as pd
from .pd_utils import load_csv_or_excel
from .pd_utils import load_experiment_results
from .pd_utils import to_torch
from .math_utils import standard
# Objective function class
class objective:
"""Objective funciton data container and operations.
Note
----
Objective internally standardizes response values to zero mean and unit
variance.
"""
def __init__(self,
results_path=None, results=pd.DataFrame(),
domain_path=None, domain=pd.DataFrame(),
exindex_path=None, exindex=pd.DataFrame(),
target=-1, gpu=False, computational_objective=None):
"""
Parameters
----------
results_path : str, optional
Path to experimental results.
results : pandas.DataFrame, optional
Experimental results with X values matching the domain.
domain_path : str, optional
Path to experimental domain.
Note
----
A domain_path or domain are required.
domain : pandas.DataFrame, optional
Experimental domain specified as a matrix of possible
configurations.
exindex_path : str, optional
Path to experiment results index if available.
exindex : pandas.DataFrame, optional
Experiment results index matching domain format. Used as lookup
table for simulations.
target : str
Column label of optimization objective. If set to -1, the last
column of the DataFrame will be set as the target.
gpu : bool
Carry out GPyTorch computations on a GPU if available.
computational_objective : function, optional
Function to be optimized for computational objectives.
"""
# Initialize
self.results_path = results_path
self.results = results
self.domain_path = domain_path
self.domain = domain
self.exindex_path = exindex_path
self.exindex = exindex
self.target = target
self.gpu = gpu
self.computational_objective = computational_objective
# Load domain
if domain_path != None:
self.domain = load_csv_or_excel(self.domain_path)
self.domain.reset_index(drop=True)
# Load results
if type(self.results) == type(pd.DataFrame()) and len(self.results) > 0:
if target == -1:
self.target = self.results.columns.values[-1]
elif results_path != None:
data = load_experiment_results(self.results_path)
self.results = data
if target == -1:
self.target = self.results.columns.values[-1]
# Load experiment index
if exindex_path != None:
self.exindex = load_csv_or_excel(exindex_path)
if target == -1:
self.target = self.exindex.columns.values[-1]
if type(exindex) == type(pd.DataFrame()) and len(exindex) > 0:
if target == -1:
self.target = exindex.columns.values[-1]
# Standardize targets (0 mean and unit variance)
self.scaler = standard()
self.results = self.scaler.standardize_target(self.results, self.target)
# Torch tensors and labeld external data
if len(self.results) > 0:
self.X = to_torch(self.results.drop(self.target,axis=1), gpu=gpu)
self.y = to_torch(self.results[self.target], gpu=gpu).view(-1)
index = ['external' + str(i) for i in range(len(self.results))]
self.results = pd.DataFrame(self.results.values,
columns=self.results.columns,
index=index)
else:
self.X = to_torch([], gpu=gpu)
self.y = to_torch([], gpu=gpu)
# Get results from the index
def get_results(self, domain_points, append=False):
"""Returns target values corresponding to domain_points.
Parameters
----------
domain_points : pandas.DataFrame
Points from experiment index to retrieve responses for. If the
objective is a computational function, run function and return
responses.
append : bool
If true append points to results and update X and y.
Returns
----------
pandas.DataFrame
Proposed experiments.
"""
# Computational objective
if self.computational_objective != None:
new_results = []
for point in domain_points.values:
result = self.computational_objective(point)
new_results.append(result)
batch = domain_points.copy()
batch[self.target] = new_results
if append == True:
# Unstandardize results and append to know outcomes
results = self.scaler.unstandardize_target(self.results, self.target)
data = pd.concat([results, batch])
# Restandardize
self.results = self.scaler.standardize_target(data, self.target)
self.X = to_torch(self.results.drop(self.target,axis=1), gpu=self.gpu)
self.y = to_torch(self.results[self.target], gpu=self.gpu).view(-1)
return batch
# Human in the loop objective
if type(self.exindex) == type(None):
return print("edbo bot: Error no experiment index")
# Retrieve domain points from index
index = self.exindex.drop(self.target, axis=1)
union_index = pd.merge(
index.reset_index(),
domain_points,
how='inner'
)['index']
batch = self.exindex.iloc[list(union_index)]
# Append to results
if append == True:
# Unstandardize results and append to know outcomes
results = self.scaler.unstandardize_target(self.results, self.target)
data = pd.concat([results, batch])
# Restandardize
self.results = self.scaler.standardize_target(data, self.target)
self.X = to_torch(self.results.drop(self.target,axis=1), gpu=self.gpu)
self.y = to_torch(self.results[self.target], gpu=self.gpu).view(-1)
return batch
# Clear results
def clear_results(self):
"""Clear results and reset X and y.
Returns
----------
None
"""
self.results = pd.DataFrame()
self.X = to_torch([], gpu=self.gpu)
self.y = to_torch([], gpu=self.gpu)
# Return unstandardized results
def results_input(self):
"""Return unstandardized results.
Returns
----------
pandas.DataFrame
Unstandardized results.
"""
if len(self.results) == 0:
results = self.results
else:
results = self.scaler.unstandardize_target(self.results, self.target)
return results
| 33.706667 | 86 | 0.548655 | 793 | 7,584 | 5.147541 | 0.190416 | 0.080843 | 0.040421 | 0.036012 | 0.323126 | 0.259922 | 0.217785 | 0.212396 | 0.212396 | 0.198432 | 0 | 0.004788 | 0.366561 | 7,584 | 225 | 87 | 33.706667 | 0.844921 | 0.2827 | 0 | 0.252747 | 0 | 0 | 0.010759 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043956 | false | 0 | 0.054945 | 0 | 0.153846 | 0.010989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb951abd2dd3908e69458a1b26cb28c1b3944745 | 664 | py | Python | p002/solution.py | jcbrockschmidt/project_euler | 49576d24f485eea1a21c8111e006a5c9ba1701d7 | [
"MIT"
] | null | null | null | p002/solution.py | jcbrockschmidt/project_euler | 49576d24f485eea1a21c8111e006a5c9ba1701d7 | [
"MIT"
] | null | null | null | p002/solution.py | jcbrockschmidt/project_euler | 49576d24f485eea1a21c8111e006a5c9ba1701d7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from time import time
def fib_sum(limit):
prev2 = 1
prev1 = 2
fib_sum = 0
while prev2 < limit:
# There is probably a more clever solution that skips the calculation
# of every 1st and 3rd element.
# For now, we will just cherry-pick the even values.
if prev1 % 2 == 0:
fib_sum += prev1
old_prev1 = prev1
prev1 = prev1 + prev2
prev2 = old_prev1
return fib_sum
if __name__ == '__main__':
start = time()
solu = fib_sum(4e6)
elapse = time() - start
print('Solution: {}'.format(solu))
print('Solution found in {:.8f}s'.format(elapse))
| 25.538462 | 77 | 0.593373 | 92 | 664 | 4.119565 | 0.630435 | 0.079156 | 0.079156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05 | 0.307229 | 664 | 25 | 78 | 26.56 | 0.773913 | 0.256024 | 0 | 0 | 0 | 0 | 0.091837 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.166667 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb9525194e5e12addc0e82651317e86bd9db3fd2 | 15,221 | py | Python | source/Gui.py | Faraphel/MKWF-Install | 8a86cae630da6702bf65b15340dc2db3e0abc182 | [
"Apache-2.0"
] | 1 | 2022-03-01T10:59:11.000Z | 2022-03-01T10:59:11.000Z | source/Gui.py | Faraphel/MKWF-Install | 8a86cae630da6702bf65b15340dc2db3e0abc182 | [
"Apache-2.0"
] | null | null | null | source/Gui.py | Faraphel/MKWF-Install | 8a86cae630da6702bf65b15340dc2db3e0abc182 | [
"Apache-2.0"
] | 3 | 2021-06-15T17:23:36.000Z | 2021-07-07T11:45:46.000Z | from tkinter import filedialog, ttk, messagebox
from tkinter import *
import traceback
import requests
import zipfile
import json
import os
from source.Game import Game, RomAlreadyPatched, InvalidGamePath, InvalidFormat, in_thread, VERSION_FILE_URL
from source.Option import Option
from source.definition import get_version_from_string
with open("./translation.json", encoding="utf-8") as f:
translation_dict = json.load(f)
class Gui:
def __init__(self):
"""
Initialize program Gui
"""
self.root = Tk()
self.option = Option()
self.option.load_from_file("./option.json")
self.game = Game(gui=self)
self.game.ctconfig.load_ctconfig_file("./ct_config.json")
self.game.ctconfig.all_version.sort(key=get_version_from_string)
latest_version: str = self.game.ctconfig.all_version[-1]
self.is_dev_version = False # Is this installer version a dev ?
self.stringvar_language = StringVar(value=self.option.language)
self.stringvar_game_format = StringVar(value=self.option.format)
self.boolvar_disable_download = BooleanVar(value=self.option.disable_download)
self.boolvar_del_track_after_conv = BooleanVar(value=self.option.del_track_after_conv)
self.boolvar_dont_check_for_update = BooleanVar(value=self.option.dont_check_for_update)
self.intvar_process_track = IntVar(value=self.option.process_track)
self.boolvar_use_1star_track = BooleanVar(value=True)
self.boolvar_use_2star_track = BooleanVar(value=True)
self.boolvar_use_3star_track = BooleanVar(value=True)
self.stringvar_mark_track_from_version = StringVar(value=latest_version)
self.root.title(self.translate("MKWFaraphel Installer"))
self.root.resizable(False, False)
self.root.iconbitmap(bitmap="./icon.ico")
if not(self.boolvar_dont_check_for_update.get()): self.check_update()
self.menu_bar = Menu(self.root)
self.root.config(menu=self.menu_bar)
self.menu_language = Menu(self.menu_bar, tearoff=0)
self.menu_bar.add_cascade(label=self.translate("Language"), menu=self.menu_language)
self.menu_language.add_radiobutton(label="Français", variable=self.stringvar_language, value="fr", command=lambda: self.option.edit("language", "fr", need_restart=True))
self.menu_language.add_radiobutton(label="English", variable=self.stringvar_language, value="en", command=lambda: self.option.edit("language", "en", need_restart=True))
self.menu_format = Menu(self.menu_bar, tearoff=0)
self.menu_bar.add_cascade(label=self.translate("Format"), menu=self.menu_format)
self.menu_format.add_radiobutton(label=self.translate("FST (Directory)"), variable=self.stringvar_game_format, value="FST", command=lambda: self.option.edit("format", "FST"))
self.menu_format.add_radiobutton(label="ISO", variable=self.stringvar_game_format, value="ISO", command=lambda: self.option.edit("format", "ISO"))
self.menu_format.add_radiobutton(label="CISO", variable=self.stringvar_game_format, value="CISO", command=lambda: self.option.edit("format", "CISO"))
self.menu_format.add_radiobutton(label="WBFS", variable=self.stringvar_game_format, value="WBFS", command=lambda: self.option.edit("format", "WBFS"))
self.menu_trackselection = Menu(self.menu_bar, tearoff=0)
self.menu_bar.add_cascade(label=self.translate("Track selection"), menu=self.menu_trackselection)
self.menu_trackselection.add_checkbutton(label=self.translate("Select"," 1 ","star"), variable=self.boolvar_use_1star_track)
self.menu_trackselection.add_checkbutton(label=self.translate("Select"," 2 ","stars"), variable=self.boolvar_use_2star_track)
self.menu_trackselection.add_checkbutton(label=self.translate("Select"," 3 ","stars"), variable=self.boolvar_use_3star_track)
self.menu_trackselection.add_separator()
self.menu_marktrackversion = Menu(self.menu_trackselection, tearoff=0)
self.menu_trackselection.add_cascade(label=self.translate("Mark all tracks from version"), menu=self.menu_marktrackversion)
self.menu_marktrackversion.add_radiobutton(label=self.translate("None"), variable=self.stringvar_mark_track_from_version, value="None")
for version in self.game.ctconfig.all_version:
self.menu_marktrackversion.add_radiobutton(label=f"v{version}", variable=self.stringvar_mark_track_from_version, value=version)
self.menu_advanced = Menu(self.menu_bar, tearoff=0)
self.menu_bar.add_cascade(label=self.translate("Advanced"), menu=self.menu_advanced)
self.menu_advanced.add_checkbutton(label=self.translate("Disable downloads"), variable=self.boolvar_disable_download, command=lambda: self.option.edit("disable_download", self.boolvar_disable_download))
self.menu_advanced.add_checkbutton(label=self.translate("Delete track after wu8 to szs conversion"), variable=self.boolvar_del_track_after_conv, command=lambda: self.option.edit("del_track_after_conv", self.boolvar_del_track_after_conv))
self.menu_advanced.add_checkbutton(label=self.translate("Don't check for update"), variable=self.boolvar_dont_check_for_update, command=lambda: self.option.edit("dont_check_for_update", self.boolvar_dont_check_for_update))
self.menu_advanced.add_separator()
self.menu_trackconvprocess = Menu(self.menu_advanced, tearoff=0)
self.menu_advanced.add_cascade(label=self.translate("Number of track conversion process"), menu=self.menu_trackconvprocess)
for cpu in range(1, 9):
self.menu_trackconvprocess.add_radiobutton(label=self.translate(str(cpu), " ", "process"), variable=self.intvar_process_track, value=cpu, command=lambda: self.option.edit("process_track", self.intvar_process_track))
self.frame_language = Frame(self.root)
self.frame_language.grid(row=1, column=1, sticky="E")
self.frame_game_path = LabelFrame(self.root, text=self.translate("Original game"))
self.frame_game_path.grid(row=2, column=1)
entry_game_path = Entry(self.frame_game_path, width=50)
entry_game_path.grid(row=1, column=1, sticky="NEWS")
def select_path():
path = filedialog.askopenfilename(filetypes=((self.translate("Wii game"),
r"*.iso *.wbfs main.dol *.wia *.ciso"),))
if os.path.exists(path):
entry_game_path.delete(0, END)
entry_game_path.insert(0, path)
Button(self.frame_game_path, text="...", relief=RIDGE, command=select_path).grid(row=1, column=2, sticky="NEWS")
self.frame_game_path_action = Frame(self.frame_game_path) # Extract and do everything button
self.frame_game_path_action.grid(row=2, column=1, columnspan=2, sticky="NEWS")
self.frame_game_path_action.columnconfigure(1, weight=1)
@in_thread
def use_path(): nothread_use_path()
def nothread_use_path():
self.frame_action.grid_forget()
try:
self.game.set_path(entry_game_path.get())
self.progress(show=True, indeter=True, statut=self.translate("Extracting the game..."))
self.game.extract()
self.frame_action.grid(row=3, column=1, sticky="NEWS")
except RomAlreadyPatched:
messagebox.showerror(self.translate("Error"), self.translate("This game is already modded"))
raise RomAlreadyPatched
except InvalidGamePath:
messagebox.showerror(self.translate("Error"), self.translate("The file path in invalid"))
raise InvalidGamePath
except InvalidFormat:
messagebox.showerror(self.translate("Error"), self.translate("This game's format is invalid"))
raise InvalidFormat
except:
self.log_error()
raise Exception
finally:
self.progress(show=False)
self.button_game_extract = Button(self.frame_game_path_action, text=self.translate("Extract file"),
relief=RIDGE, command=use_path)
self.button_game_extract.grid(row=1, column=1, sticky="NEWS")
@in_thread
def do_everything():
nothread_use_path()
self.game.nothread_patch_file()
self.game.nothread_install_mod()
self.button_do_everything = Button(self.frame_game_path_action, text=self.translate("Do everything"), relief=RIDGE, command=do_everything)
self.button_do_everything.grid(row=1, column=2, sticky="NEWS")
self.frame_action = LabelFrame(self.root, text=self.translate("Action"))
self.button_prepare_file = Button(self.frame_action, text=self.translate("Prepare files"), relief=RIDGE, command=lambda: self.game.patch_file(), width=45)
self.button_prepare_file.grid(row=1, column=1, columnspan=2, sticky="NEWS")
self.button_install_mod = Button(self.frame_action, text=self.translate("Install mod"), relief=RIDGE, command=lambda: self.game.install_mod(), width=45)
# Install mod button will only appear after prepare file step
self.progressbar = ttk.Progressbar(self.root)
self.progresslabel = Label(self.root)
def check_update(self) -> None:
"""
Check if an update is available
"""
try:
github_version_data = requests.get(VERSION_FILE_URL, allow_redirects=True).json()
with open("./version", "rb") as f: local_version_data = json.load(f)
local_version = get_version_from_string(f"{local_version_data['version']}.{local_version_data['subversion']}")
github_version = get_version_from_string(f"{github_version_data['version']}.{github_version_data['subversion']}")
if github_version > local_version: # if github version is newer than local version
if messagebox.askyesno(
self.translate("Update available !"),
self.translate("An update is available, do you want to install it ?",
f"\n\nVersion : {local_version} -> {github_version}\n"
f"Changelog :\n{github_version_data['changelog']}")):
if not (os.path.exists("./Updater/Updater.exe")):
dl = requests.get(github_version_data["updater_bin"], allow_redirects=True)
with open("./download.zip", "wb") as file:
print(self.translate("Downloading the Updater..."))
file.write(dl.content)
print(self.translate("end of the download, extracting..."))
with zipfile.ZipFile("./download.zip") as file:
file.extractall("./Updater/")
print(self.translate("finished extracting"))
os.remove("./download.zip")
print(self.translate("starting application..."))
os.startfile(os.path.realpath("./Updater/Updater.exe"))
elif local_version > github_version:
self.is_dev_version = True
except requests.ConnectionError:
messagebox.showwarning(self.translate("Warning"),
self.translate("Can't connect to internet. Download will be disabled."))
self.option.disable_download = True
except:
self.log_error()
def log_error(self) -> None:
"""
When an error occur, will show it in a messagebox and write it in error.log
"""
error = traceback.format_exc()
with open("./error.log", "a") as f:
f.write(f"---\n"
f"For game version : {self.game.ctconfig.version}\n"
f"./file/ directory : {os.listdir('./file/')}"
f"GAME/files/ information : {self.game.path, self.game.region}"
f"{error}\n")
messagebox.showerror(self.translate("Error"), self.translate("An error occured", " :", "\n", error, "\n\n"))
def progress(self, show: bool = None, indeter: bool = None, step: int = None,
statut: str = None, max: int = None, add: int = None) -> None:
"""
configure the progress bar shown when doing a task
:param show: show or hide the progress bar
:param indeter: if indeter, the progress bar will do a infinite loop animation
:param step: set the progress of the bar
:param statut: text shown under the progress bar
:param max: set the maximum step
:param add: add to step of the progress bar
"""
if indeter is True:
self.progressbar.config(mode="indeterminate")
self.progressbar.start(50)
elif indeter is False:
self.progressbar.config(mode="determinate")
self.progressbar.stop()
if show is True:
self.state_button(enable=False)
self.progressbar.grid(row=100, column=1, sticky="NEWS")
self.progresslabel.grid(row=101, column=1, sticky="NEWS")
elif show is False:
self.state_button(enable=True)
self.progressbar.grid_forget()
self.progresslabel.grid_forget()
if statut: self.progresslabel.config(text=statut)
if step: self.progressbar["value"] = step
if max:
self.progressbar["maximum"] = max
self.progressbar["value"] = 0
if add: self.progressbar.step(add)
def state_button(self, enable: bool = True) -> None:
"""
used to enable or disable button when doing task
:param enable: are the button enabled ?
"""
button = [
self.button_game_extract,
self.button_install_mod,
self.button_prepare_file,
self.button_do_everything
]
for widget in button:
if enable:
widget.config(state=NORMAL)
else:
widget.config(state=DISABLED)
def translate(self, *texts, lang: str = None) -> str:
"""
translate text into an another language in translation.json file
:param texts: all text to convert
:param lang: force a destination language to convert track
:return: translated text
"""
if lang is None: lang = self.stringvar_language.get()
elif lang == "F": lang = "fr"
elif lang == "G": lang = "ge"
elif lang == "I": lang = "it"
elif lang == "S": lang = "sp"
if lang in translation_dict:
_lang_trad = translation_dict[lang]
translated_text = ""
for text in texts:
if text in _lang_trad:
translated_text += _lang_trad[text]
else:
translated_text += text
return translated_text
return "".join(texts) # if no translation language is found
| 52.66782 | 245 | 0.648183 | 1,856 | 15,221 | 5.132004 | 0.167026 | 0.036115 | 0.028346 | 0.024147 | 0.335118 | 0.263937 | 0.147192 | 0.110341 | 0.074961 | 0.039685 | 0 | 0.005267 | 0.239143 | 15,221 | 288 | 246 | 52.850694 | 0.817201 | 0.062545 | 0 | 0.046512 | 0 | 0 | 0.115549 | 0.020433 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.046512 | 0 | 0.106977 | 0.018605 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb974878838a569a2d9cf63b612ccad301b890eb | 1,612 | py | Python | titledb/update_db.py | EMUGamesDevTeam/TitleDB | 4d13cce0f5e9d547316aba951301f001ca3b2c2c | [
"Unlicense"
] | 1 | 2020-07-13T19:20:45.000Z | 2020-07-13T19:20:45.000Z | titledb/update_db.py | EMUGamesDevTeam/TitleDB | 4d13cce0f5e9d547316aba951301f001ca3b2c2c | [
"Unlicense"
] | null | null | null | titledb/update_db.py | EMUGamesDevTeam/TitleDB | 4d13cce0f5e9d547316aba951301f001ca3b2c2c | [
"Unlicense"
] | null | null | null | import os, sys, re, transaction, base64, zlib
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from .models import (
DBSession,
CIA,
Entry,
User,
Group,
Base,
)
from .security import hash_password
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) != 2:
usage(argv)
config_uri = argv[1]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
with transaction.manager:
for cia in DBSession.query(CIA).all():
print(cia.icon_s)
icons1 = base64.b64decode(cia.icon_s)
try:
icons2 = zlib.decompress(icons1)
except zlib.error:
icons2 = icons1
iconl1 = base64.b64decode(cia.icon_l)
try:
iconl2 = zlib.decompress(iconl1)
except zlib.error:
iconl2 = iconl1
cia.icon_s = base64.b64encode(icons2)
cia.icon_l = base64.b64encode(iconl2)
DBSession.query(CIA).filter_by(id=cia.id).update(dict(icon_s=cia.icon_s,icon_l=cia.icon_l))
with transaction.manager:
for cia in DBSession.query(CIA).all():
m = re.search('(.*)#(.*)', cia.url.url)
if m:
cia.url = m.group(1)
cia.path = m.group(2)
| 26 | 103 | 0.581266 | 199 | 1,612 | 4.592965 | 0.38191 | 0.053611 | 0.035011 | 0.054705 | 0.109409 | 0.109409 | 0.109409 | 0.109409 | 0.109409 | 0.109409 | 0 | 0.031943 | 0.300868 | 1,612 | 61 | 104 | 26.42623 | 0.779059 | 0 | 0 | 0.16 | 0 | 0 | 0.046555 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0.02 | 0.1 | 0 | 0.14 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb9805e46c99a38df84681449ccdcec4ab9e7a42 | 3,297 | py | Python | src/modeling/models_cv.py | sebasjp/octopus-ml | c8f650cf9487a82d6b71a5d5bada12c5c42ab954 | [
"MIT"
] | 1 | 2021-05-15T22:35:51.000Z | 2021-05-15T22:35:51.000Z | src/modeling/models_cv.py | sebasjp/octopus | c8f650cf9487a82d6b71a5d5bada12c5c42ab954 | [
"MIT"
] | null | null | null | src/modeling/models_cv.py | sebasjp/octopus | c8f650cf9487a82d6b71a5d5bada12c5c42ab954 | [
"MIT"
] | null | null | null | from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
import numpy as np
# =============================================================
# Modeling tools for cross validation
# Reference: https://github.com/fmfn/BayesianOptimization/blob/master/examples/sklearn_example.py
# =============================================================
# ===================
# Random Forest
# ===================
def rfc_cv(n_estimators,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
metric,
X,
y,
preparessor):
"""
Random Forest cross validation.
This function will instantiate a random forest classifier with parameters
n_estimators, min_samples_split, max_depth, min_samples_leaf and max_features. Combined with X and
y this will in turn be used to perform cross validation. The result
of cross validation is returned.
Our goal is to find combinations of n_estimators, min_samples_split,
max_depth, min_samples_leaf and max_featues that maximizes the metric
"""
preprocessor = preparessor
estimator = RandomForestClassifier(
n_estimators = n_estimators,
max_depth = max_depth,
min_samples_split = min_samples_split,
min_samples_leaf = min_samples_leaf,
max_features = max_features,
random_state = 42
)
# Append classifier to preparing pipeline. Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', estimator)])
cval = cross_val_score(clf,
X,
y,
scoring = metric,
cv = 5)
return cval.mean()
# ===================
# XGBoost
# ===================
def xgb_cv(n_estimators,
max_depth,
colsample_bytree,
learning_rate,
metric,
X,
y,
preparessor):
"""
XGBoost cross validation.
This function will instantiate a XGBoost classifier this will perform
cross validation. The result of cross validation is returned.
Our goal is to find combinations that maximizes the metric
"""
preprocessor = preparessor
PARAM_SCALE_POS = np.ceil( len(y[y == 0]) / len(y[y == 1]) )
estimator = xgb.XGBClassifier(
n_estimators = n_estimators,
max_depth = max_depth,
colsample_bytree = colsample_bytree,
learning_rate = learning_rate,
objective = 'binary:logistic',
scale_pos_weight = PARAM_SCALE_POS,
random_state = 42,
verbosity = 0
)
# Append classifier to preparing pipeline. Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', estimator)])
cval = cross_val_score(clf,
X,
y,
scoring = metric,
cv = 5)
return cval.mean()
| 33.30303 | 102 | 0.558386 | 330 | 3,297 | 5.381818 | 0.339394 | 0.056306 | 0.04223 | 0.042793 | 0.583896 | 0.544482 | 0.480856 | 0.399775 | 0.356982 | 0.356982 | 0 | 0.004038 | 0.323931 | 3,297 | 98 | 103 | 33.642857 | 0.792732 | 0.358811 | 0 | 0.542373 | 0 | 0 | 0.028978 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033898 | false | 0 | 0.084746 | 0 | 0.152542 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb98399d5d514cad736c35a8e1b08c45e1e0717f | 4,089 | py | Python | flask_makespc.py | OliWright/MakeSPC | ddcc5b60de3bdb244b25da0d1a459b4b071ab278 | [
"MIT"
] | null | null | null | flask_makespc.py | OliWright/MakeSPC | ddcc5b60de3bdb244b25da0d1a459b4b071ab278 | [
"MIT"
] | null | null | null | flask_makespc.py | OliWright/MakeSPC | ddcc5b60de3bdb244b25da0d1a459b4b071ab278 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2020 Oli Wright <oli.wright.github@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# flask_makespc.py
#
# Flask container for makespc.py
# Simple script to convert images to the Stop Press Canvas .SPC format which
# is used on Amstrad PCW8256 and friends.
import os
from flask import Flask, flash, request, redirect, send_from_directory
from werkzeug.utils import secure_filename
from makespc import convert_to_spc
APP_ROOT = os.path.dirname(os.path.abspath(__file__)) # refers to application_top
UPLOAD_FOLDER = 'uploads'
OUTPUT_FOLDER = 'output'
PREVIEW_FOLDER = 'previews'
APP_UPLOAD_FOLDER = os.path.join(APP_ROOT, UPLOAD_FOLDER)
APP_OUTPUT_FOLDER = os.path.join(APP_ROOT, OUTPUT_FOLDER)
APP_PREVIEW_FOLDER = os.path.join(APP_ROOT, PREVIEW_FOLDER)
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'bmp'}
app = Flask(__name__)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/preview/<path:filename>', methods=['GET', 'POST'])
def preview(filename):
return send_from_directory(PREVIEW_FOLDER, filename=filename)
@app.route('/output/<path:filename>', methods=['GET', 'POST'])
def output(filename):
return send_from_directory(OUTPUT_FOLDER, filename=filename)
@app.route('/', methods=['GET', 'POST'])
def upload_file():
html = '''
<!doctype html>
<title>Convert an image to .SPC</title>
<h1>Make SPC Online</h1>
<p>This tool converts images to Stop Press Canvas .SPC format, popular on Amstrad PCW8256 computers.</p>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Convert to SPC>
</form>
'''
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
input_filename = secure_filename(file.filename)
full_input_filename = os.path.join(APP_UPLOAD_FOLDER, input_filename)
file.save(full_input_filename)
basename, extension = os.path.splitext(input_filename)
preview_filename = basename + ".png"
full_preview_filename = os.path.join(APP_PREVIEW_FOLDER, preview_filename)
output_filename = basename + ".spc"
full_output_filename = os.path.join(APP_OUTPUT_FOLDER, output_filename)
convert_to_spc(full_input_filename, full_preview_filename, full_output_filename)
html += '''
<p>Click the image to download your SPC file.</p>
<a href="/output/%s"><img src="/preview/%s"/></a>
''' % (output_filename, preview_filename)
return html
| 41.72449 | 108 | 0.705062 | 563 | 4,089 | 4.989343 | 0.369449 | 0.019224 | 0.02136 | 0.027768 | 0.128159 | 0.045212 | 0 | 0 | 0 | 0 | 0 | 0.004877 | 0.197603 | 4,089 | 97 | 109 | 42.154639 | 0.851265 | 0.343849 | 0 | 0.035088 | 0 | 0.017544 | 0.243019 | 0.045283 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070175 | false | 0 | 0.070175 | 0.052632 | 0.245614 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb999df27e58e23913b51b8bb91c7eb0ee53cf08 | 1,201 | py | Python | pytherface/yamlFileReader.py | aseiger/pytherface-configurator | 704703cee8dd31f28fd73552c2b40c4b4d5faa5b | [
"MIT"
] | null | null | null | pytherface/yamlFileReader.py | aseiger/pytherface-configurator | 704703cee8dd31f28fd73552c2b40c4b4d5faa5b | [
"MIT"
] | null | null | null | pytherface/yamlFileReader.py | aseiger/pytherface-configurator | 704703cee8dd31f28fd73552c2b40c4b4d5faa5b | [
"MIT"
] | null | null | null | #reads in the protocol requirements and stores the information in a class
import yaml
import logging
logger = logging.getLogger(__name__)
def loadYamlFile(filename):
#open up the filename
logger.debug("Opening file {}".format(filename))
try:
fObject = open(filename, 'r')
except FileNotFoundError:
logger.error("Config File {} not Found!".format(filename))
return []
else:
data = yaml.load(fObject.read())
fObject.close()
return data
def parseYamlConfig(data):
# we already have all of the information we need stored in the data from
# the YAML file. However, it's worthwhile to also generate a list of all
# incoming and outgoing variables. This allows checking for duplicates.
incomingVariables = []
outgoingVariables = []
# go through each message
for msg, metadata in data.items():
for k, v in metadata['variables'].items():
if metadata['type'] == 'incoming':
incomingVariables.append({k: v})
elif metadata['type'] == 'outgoing':
outgoingVariables.append(k, v)
logger.debug(incomingVariables)
| 31.605263 | 77 | 0.631973 | 137 | 1,201 | 5.510949 | 0.583942 | 0.007947 | 0.021192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.273106 | 1,201 | 37 | 78 | 32.459459 | 0.864834 | 0.273106 | 0 | 0 | 0 | 0 | 0.089157 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eb9c7f642d7fd9e4d6c05d5178d9f6237379f4fa | 2,087 | py | Python | docker/src/app_server/sse.py | ShenTengTu/leak_monitoring_app | dba3bc6aebdc4fe104508262065e426844a1ce52 | [
"MIT"
] | null | null | null | docker/src/app_server/sse.py | ShenTengTu/leak_monitoring_app | dba3bc6aebdc4fe104508262065e426844a1ce52 | [
"MIT"
] | null | null | null | docker/src/app_server/sse.py | ShenTengTu/leak_monitoring_app | dba3bc6aebdc4fe104508262065e426844a1ce52 | [
"MIT"
] | null | null | null | import logging
import io
from asyncio import Queue
from sse_starlette.sse import (
EventSourceResponse as _EventSourceResponse,
AppStatus,
ServerSentEvent,
)
from .endec import Encode
logger = logging.getLogger("app_server")
class EventSourceResponse(_EventSourceResponse):
"""Override original `EventSourceResponse`.
If data is `None`, send comment to keep connections.
"""
@staticmethod
def comment_encode(content: str = "", sep: str = None) -> bytes:
buffer = io.StringIO()
buffer.write(f": {content}")
buffer.write(sep if sep is not None else "\r\n")
return buffer.getvalue().encode("utf-8")
async def stream_response(self, send) -> None:
await send(
{
"type": "http.response.start",
"status": self.status_code,
"headers": self.raw_headers,
}
)
self._ping_task = self._loop.create_task(self._ping(send)) # type: ignore
async for data in self.body_iterator:
if AppStatus.should_exit:
logger.debug(f"Caught signal. Stopping stream_response loop.")
break
if isinstance(data, dict):
chunk = ServerSentEvent(**data).encode()
elif data is None:
chunk = self.comment_encode("NONE", sep=self.sep)
else:
chunk = ServerSentEvent(str(data), sep=self.sep).encode()
logger.debug(f"[EventSourceResponse] chunk: {chunk.decode()}")
await send({"type": "http.response.body", "body": chunk, "more_body": True})
await send({"type": "http.response.body", "body": b"", "more_body": False})
class SSEManager:
__queue = Queue()
@classmethod
def push_event(cls, event: str, data: dict):
cls.__queue.put_nowait(dict(event=event, data=Encode.json(data)))
@classmethod
async def next_event(cls):
q = cls.__queue
if q.empty():
return None
item = await q.get()
q.task_done()
return item
| 30.691176 | 88 | 0.595592 | 235 | 2,087 | 5.165957 | 0.421277 | 0.026359 | 0.032125 | 0.04201 | 0.074959 | 0.054366 | 0.054366 | 0 | 0 | 0 | 0 | 0.000671 | 0.285577 | 2,087 | 67 | 89 | 31.149254 | 0.813548 | 0.051749 | 0 | 0.038462 | 0 | 0 | 0.117108 | 0.010692 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.096154 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eba3ba5c97bda5e5f7c847bfd13a55f5e2d84a33 | 280 | py | Python | github-json_to_xml/conv.py | alvarenga/github-json_to_xml | 6ac210bea8badbed18f9e65127cb19e386e85d24 | [
"MIT"
] | null | null | null | github-json_to_xml/conv.py | alvarenga/github-json_to_xml | 6ac210bea8badbed18f9e65127cb19e386e85d24 | [
"MIT"
] | null | null | null | github-json_to_xml/conv.py | alvarenga/github-json_to_xml | 6ac210bea8badbed18f9e65127cb19e386e85d24 | [
"MIT"
] | null | null | null | def conv(user):
import requests
import json
import xmltodict
url = 'https://api.github.com/users/' + user
s = requests.get(url)
# Converter json para dict
x = {}
x['wg'] = json.loads(s.text)
y = xmltodict.unparse(x, pretty=True)
return y
| 20 | 48 | 0.6 | 39 | 280 | 4.307692 | 0.692308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.267857 | 280 | 13 | 49 | 21.538462 | 0.819512 | 0.085714 | 0 | 0 | 0 | 0 | 0.122047 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eba7e9b9d2ce20a664ab35ed0b3544b8abc90d3f | 4,394 | py | Python | rail/creation/creator.py | LSSTDESC/RAIL | 77707a708068a6818d5d815fb6b952ecc06d511b | [
"MIT"
] | 7 | 2020-09-21T13:02:23.000Z | 2022-03-23T19:26:41.000Z | rail/creation/creator.py | LSSTDESC/RAIL | 77707a708068a6818d5d815fb6b952ecc06d511b | [
"MIT"
] | 116 | 2019-11-21T17:20:52.000Z | 2022-03-30T11:21:54.000Z | rail/creation/creator.py | LSSTDESC/RAIL | 77707a708068a6818d5d815fb6b952ecc06d511b | [
"MIT"
] | 6 | 2020-01-24T17:14:43.000Z | 2022-03-30T11:27:20.000Z | import numpy as np
import pandas as pd
from rail.creation.engines import Engine
from typing import Callable
class Creator:
"""Object that supplies mock data for redshift estimation experiments.
The mock data is drawn from a probability distribution defined by the
generator, with an optional degrader applied.
"""
def __init__(self, engine: Engine, degrader: Callable = None, info: dict = None):
"""
Parameters
----------
engine: rail.Engine object
Object defining a redshift probability distribution.
Must have sample, log_prob and get_posterior methods (see engine.py)
degrader: callable, optional
A Degrader, function, or other callable that degrades the generated
sample. Must take a pandas DataFrame and a seed int, and return a
pandas DataFrame representing the degraded sample.
info: any, optional
Additional information desired to be stored with the instance
as a dictionary.
"""
self.engine = engine
self.degrader = degrader
self.info = info
def get_posterior(self, data: pd.DataFrame, column: str, grid: np.ndarray):
"""Calculate the posterior of the given column over the values in grid.
Parameters
----------
data : pd.DataFrame
Pandas dataframe of the data on which the posteriors are conditioned.
column : str
Name of the column for which the posterior is calculated.
grid : np.ndarray
Grid over which the posterior is calculated.
Returns
-------
np.ndarray
Array of posteriors, of shape (data.shape[0], grid.size).
"""
return self.engine.get_posterior(data, column, grid)
def sample(
self,
n_samples: int,
seed: int = None,
include_pdf: bool = False,
pz_grid: np.ndarray = None,
):
"""Draws n_samples from the engine
Parameters
----------
n_samples : int
Number of samples to draw
seed : int, optional
sets the random seed for drawing samples
include_pdf : boolean, optional
If True, redshift posteriors are returned for each galaxy.
The posteriors are saved in the column pz_pdf, and the
redshift grid saved as df.attrs['pz_grid'].
pz_grid : np.array, default=np.arange(0, 2.02, 0.02)
The grid over which to calculate the redshift posteriors.
Returns
-------
outputs : pd.DataFrame
samples from model, containing photometry, true redshift, and
redshift posterior PDF's if requested.
Notes
-----
Output posterior format is currently hardcoded to grid evaluations but could be integrated with qp.
We will probably change the output format to dovetail with the evaluation module when ready.
"""
if include_pdf is True and pz_grid is None:
pz_grid = np.arange(0, 2.02, 0.02)
rng = np.random.default_rng(seed)
# get samples
outputs = self.engine.sample(n_samples, seed=seed)
if self.degrader is not None:
# degrade sample
outputs = self.degrader(outputs, seed=seed)
# calculate fraction that survives the cut
selected_frac = len(outputs) / n_samples
# draw more samples and degrade until we have enough samples
while len(outputs) < n_samples:
# estimate how many extras to draw
n_supplement = int(1.1 / selected_frac * (n_samples - len(outputs)))
# draw new samples and apply cut
new_sample = self.engine.sample(n_supplement, seed=rng.integers(1e18))
new_sample = self.degrader(new_sample, seed=rng.integers(1e18))
# add these to the larger set
outputs = pd.concat((outputs, new_sample), ignore_index=True)
# cut out the extras
outputs = outputs[:n_samples]
# calculate posteriors
if include_pdf:
posteriors = self.get_posterior(outputs, column="redshift", grid=pz_grid)
outputs.attrs["pz_grid"] = pz_grid
outputs["pz_pdf"] = list(posteriors)
return outputs
| 37.237288 | 107 | 0.60924 | 535 | 4,394 | 4.927103 | 0.342056 | 0.024279 | 0.014795 | 0.014416 | 0.046282 | 0.011381 | 0.011381 | 0 | 0 | 0 | 0 | 0.007692 | 0.319527 | 4,394 | 117 | 108 | 37.555556 | 0.873913 | 0.503641 | 0 | 0 | 0 | 0 | 0.011993 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.111111 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
eba86bf586aecd5b8c7e0131858b6b347ef52969 | 834 | py | Python | contrib/nchain/devops/pipe-unittests.py | Trackerming/bitcoin-sv | fb50a64e3ea0334a86b2c80daf5147c5bc2693c4 | [
"MIT"
] | 8 | 2019-08-02T02:49:42.000Z | 2022-01-17T15:51:48.000Z | contrib/nchain/devops/pipe-unittests.py | Trackerming/bitcoin-sv | fb50a64e3ea0334a86b2c80daf5147c5bc2693c4 | [
"MIT"
] | null | null | null | contrib/nchain/devops/pipe-unittests.py | Trackerming/bitcoin-sv | fb50a64e3ea0334a86b2c80daf5147c5bc2693c4 | [
"MIT"
] | 4 | 2019-08-02T02:50:44.000Z | 2021-05-28T03:21:38.000Z | #!/usr/bin/python3
# Perform the unit tests on SV
import subprocess
import os
import pathlib
import traceback
import pipetestutils
def main():
r1 = -1
try:
pathlib.Path("build/reports").mkdir(parents=True, exist_ok=True)
os.chdir("src/test")
except Exception as e:
print("Problem changing directory")
print("type error: " + str(e))
print(traceback.format_exc())
exit(-1)
try:
args = ["./test_bitcoin", "--log_format=JUNIT" \
, "--log_sink=../../build/reports/unittests.xml"]
r1 = subprocess.call(args)
except Exception as e:
print("Problem running tests")
print("type error: " + str(e))
print(traceback.format_exc())
exit(-2)
exit(abs(r1))
if __name__ == '__main__':
main()
| 23.828571 | 72 | 0.581535 | 100 | 834 | 4.71 | 0.58 | 0.050955 | 0.072187 | 0.076433 | 0.318471 | 0.318471 | 0.191083 | 0.191083 | 0.191083 | 0.191083 | 0 | 0.011628 | 0.278177 | 834 | 34 | 73 | 24.529412 | 0.770764 | 0.055156 | 0 | 0.296296 | 0 | 0 | 0.223919 | 0.05598 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.185185 | 0 | 0.222222 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebaa426765a7f0d350ab28d87557159798371f08 | 2,439 | py | Python | tx_salaries/utils/transformers/ut_medical_branch.py | texastribune/tx_salaries | 197d8da4e1783216830b8d0a5adb23c0200fd3e8 | [
"Apache-2.0"
] | 6 | 2016-05-18T05:53:44.000Z | 2019-06-13T18:27:50.000Z | tx_salaries/utils/transformers/ut_medical_branch.py | texastribune/tx_salaries | 197d8da4e1783216830b8d0a5adb23c0200fd3e8 | [
"Apache-2.0"
] | 64 | 2015-02-13T18:29:04.000Z | 2018-06-15T19:48:56.000Z | tx_salaries/utils/transformers/ut_medical_branch.py | texastribune/tx_salaries | 197d8da4e1783216830b8d0a5adb23c0200fd3e8 | [
"Apache-2.0"
] | 2 | 2015-05-08T19:22:12.000Z | 2016-07-11T16:57:49.000Z | from . import base
from . import mixins
from datetime import date
class TransformedRecord(
mixins.GenericCompensationMixin,
mixins.GenericDepartmentMixin, mixins.GenericIdentifierMixin,
mixins.GenericJobTitleMixin, mixins.GenericPersonMixin,
mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin,
mixins.RaceMixin, mixins.LinkMixin, base.BaseTransformedRecord):
MAP = {
'last_name': 'FAMILY_NAME',
'first_name': 'GIVEN_NAME',
'department': 'DEPTID_DESCR',
'job_title': 'JOBTITLE',
'gender': 'GENDER',
'race': 'ETHNIC_GROUP_DESCR',
'hire_date': 'LAST_HIRE_DT',
'compensation': 'ANNUAL_PAY',
'longevity': 'ANNUALIZED_LONGEVITY',
'employee_type': 'FULL_PART_TIME',
}
NAME_FIELDS = ('first_name', 'last_name', )
gender_map = {'Female': 'F', 'Male': 'M'}
ORGANIZATION_NAME = 'The University of Texas Medical Branch at Galveston'
ORGANIZATION_CLASSIFICATION = 'University Hospital'
DATE_PROVIDED = date(2019, 7, 30)
URL = ('https://s3.amazonaws.com/raw.texastribune.org/ut_medical_branch/'
'salaries/2019/Response.xlsx')
@property
def compensation_type(self):
if self.employee_type == 'Part-time':
return 'PT'
else:
return 'FT'
@property
def description(self):
if self.employee_type == 'Part-time':
return "Part-time annual compensation"
else:
return "Annual compensation"
@property
def compensation(self):
#longevity is in addition to base annual_pay, add if applicable
if self.get_mapped_value('longevity') == '0':
return self.get_mapped_value('compensation')
else:
longevity = self.get_mapped_value('longevity')
salary = self.get_mapped_value('compensation')
return float(salary) + float(longevity)
@property
def is_valid(self):
# Adjust to return False on invalid fields. For example:
return self.last_name.strip() != ''
@property
def person(self):
data = {
'family_name': self.last_name,
'given_name': self.first_name,
'name': self.get_raw_name(),
'gender': self.gender_map[self.gender.strip()]
}
return data
transform = base.transform_factory(TransformedRecord)
| 29.385542 | 77 | 0.627306 | 252 | 2,439 | 5.884921 | 0.444444 | 0.037087 | 0.035064 | 0.04855 | 0.125421 | 0.04855 | 0.04855 | 0.04855 | 0 | 0 | 0 | 0.00723 | 0.262813 | 2,439 | 82 | 78 | 29.743902 | 0.817575 | 0.04838 | 0 | 0.163934 | 0 | 0 | 0.236309 | 0.011643 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081967 | false | 0 | 0.04918 | 0.016393 | 0.393443 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebac6ca248c814898ee7e3737a6b5b9899d1e5a4 | 762 | py | Python | utils/custombase.py | Zashel/utils | 1c8b9e1ad7ceb1924a719bef588fcfe38dfd1f70 | [
"Apache-2.0"
] | null | null | null | utils/custombase.py | Zashel/utils | 1c8b9e1ad7ceb1924a719bef588fcfe38dfd1f70 | [
"Apache-2.0"
] | null | null | null | utils/custombase.py | Zashel/utils | 1c8b9e1ad7ceb1924a719bef588fcfe38dfd1f70 | [
"Apache-2.0"
] | null | null | null | class AttributedDict(dict):
def __dir__(self):
directory = dir(super())
directory.extend([str(key.replace(" ", "_")) for key in self])
return directory
def __getattr__(self, attr):
_dir_dict = dict()
[_dir_dict.update({key.replace(" ", "_"): key}) for key in self]
if attr in _dir_dict:
return self[_dir_dict[attr]]
else:
raise AttributeError(attr)
def __setattr__(self, attr, value):
_dir_dict = dict()
[_dir_dict.update({key.replace(" ", "_"): key}) for key in self]
if attr in _dir_dict:
self[_dir_dict[attr]] = value
elif attr in self:
self[attr] = value
else:
raise AttributeError(attr)
| 31.75 | 72 | 0.560367 | 90 | 762 | 4.4 | 0.277778 | 0.141414 | 0.060606 | 0.090909 | 0.323232 | 0.323232 | 0.323232 | 0.323232 | 0.323232 | 0.323232 | 0 | 0 | 0.316273 | 762 | 23 | 73 | 33.130435 | 0.760077 | 0 | 0 | 0.47619 | 0 | 0 | 0.007874 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebaeccfe37530d080542e9861760ac1b8264f12f | 31,392 | py | Python | utils.py | val-iisc/ss_human_mesh | f9c7fcf577c83316eb610753e3f5678b7b5e24c5 | [
"MIT"
] | 31 | 2020-08-31T11:32:33.000Z | 2021-12-05T08:47:33.000Z | utils.py | rakeshramesha/SS_Human_Mesh | b27d53a08b60a1ac32d1845557f317c165498fd5 | [
"MIT"
] | null | null | null | utils.py | rakeshramesha/SS_Human_Mesh | b27d53a08b60a1ac32d1845557f317c165498fd5 | [
"MIT"
] | 7 | 2020-09-25T03:50:59.000Z | 2021-12-10T05:24:58.000Z |
""" General Utilities file. """
import sys
import os
############################ NON-TF UTILS ##########################
from skimage.util import img_as_float
import numpy as np
import cv2
import pickle
from PIL import Image
from io import BytesIO
import math
import tqdm
import scipy
import json
import matplotlib
gui_env = ['Agg','TKAgg','GTKAgg','Qt4Agg','WXAgg']
for gui in gui_env:
try:
print ("testing", gui)
matplotlib.use(gui,warn=False, force=True)
from matplotlib import pyplot as plt
break
except:
continue
print ("utils.py Using:",matplotlib.get_backend())
from matplotlib.backends.backend_agg import FigureCanvasAgg as Canvas
from mpl_toolkits.mplot3d import Axes3D
import config as cfg
######### Basic Utils #########
def adjust_gamma(image, gamma=1.0):
""" Gamma correct images. """
## Build a LUT mapping the pixel values [0, 255] to their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
## Apply gamma correction using the LUT
return cv2.LUT(image, table)
def scipy_sharpen(img_flt, alpha=30):
""" Sharpen images. """
from scipy import ndimage
blurred_f = ndimage.gaussian_filter(img_flt, 3)
filter_blurred_f = ndimage.gaussian_filter(blurred_f, 1)
img_flt = blurred_f + alpha * (blurred_f - filter_blurred_f)
return img_flt
def read_pickle(path):
""" Load Pickle file. """
with open(path, 'rb') as f:
data = pickle.load(f)
return data
def save_pickle(data, path):
""" Save Pickle file. """
with open(path, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
######### Pose quality and Metrics #########
def compute_similarity_transform(S1, S2):
""" Computes a similarity transform (sR, t) that takes
a set of 3D points S1 (3 x N) closest to a set of 3D points S2,
where R is an 3x3 rotation matrix, t 3x1 translation, s scale.
i.e. solves the orthogonal Procrutes problem. """
transposed = False
if S1.shape[0] != 3 and S2.shape[0] != 3:
S1 = S1.T
S2 = S2.T
transposed = True
assert(S2.shape[1] == S1.shape[1])
## Mean
mu1 = S1.mean(axis=1, keepdims=True)
mu2 = S2.mean(axis=1, keepdims=True)
X1 = S1 - mu1
X2 = S2 - mu2
## Compute variance of X1 used for scale
var1 = np.sum(X1**2)
## The outer product of X1 and X2
K = X1.dot(X2.T)
## Solution that Maximizes trace(R'K) is R=U*V', where U, V are
## Singular vectors of K
U, s, Vh = np.linalg.svd(K)
V = Vh.T
## Construct Z that fixes the orientation of R to get det(R)=1
Z = np.eye(U.shape[0])
Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))
## Construct R
R = V.dot(Z.dot(U.T))
## Recover scale
scale = np.trace(R.dot(K)) / var1
## Recover translation
t = mu2 - scale*(R.dot(mu1))
## Error
S1_hat = scale*R.dot(S1) + t
if transposed:
S1_hat = S1_hat.T
return S1_hat
def compute_error(pred_3d_all, gt_3d_all, full_out=True):
""" MPJPE and PA_MPJPE metric computation. """
pred_3d_all_flat = pred_3d_all.copy()
pred_3d_all_flat = pred_3d_all_flat - pred_3d_all_flat[:, 0:1,:]
gt_3d_all_flat = gt_3d_all.copy()
gt_3d_all_flat = gt_3d_all_flat - gt_3d_all_flat[:, 0:1,:]
joint_wise_error = []
error = []
pa_joint_wise_error = []
pa_error = []
for i in range(len(pred_3d_all_flat)):
each_pred_3d = pred_3d_all_flat[i]
each_gt_3d = gt_3d_all_flat[i]
tmp_err = np.linalg.norm(each_pred_3d-each_gt_3d, axis=1)
joint_wise_error.append(tmp_err)
error.append(np.mean(tmp_err))
pred3d_sym = compute_similarity_transform(each_pred_3d.copy(), each_gt_3d.copy())
tmp_pa_err = np.linalg.norm(pred3d_sym-each_gt_3d, axis=1)
pa_joint_wise_error.append(tmp_pa_err)
pa_error.append(np.mean(tmp_pa_err))
joint_wise_error = np.array(joint_wise_error)
if(full_out):
mpjpe = np.mean(error)*1000 ### Note: unit is mm
pampjpe = np.mean(pa_error)*1000 ### Note: unit is mm
return mpjpe, pampjpe
else:
return error, pa_error
###### Alternative manual regressors ######
def smplx45_to_17j(pose_smpl):
""" SMPLX 45 joint J3D to 17 joint J3D. """
## Remove fingers
pose_smpl = pose_smpl[:-10]
## Remove extra def feet
pose_smpl = pose_smpl[:-6]
## Remove face
pose_smpl = pose_smpl[:-5]
## Remove wrist
pose_smpl = pose_smpl[:-2]
## Remove extra def spine
pose_smpl = np.delete(pose_smpl, 3, 0) ## 3
pose_smpl = np.delete(pose_smpl, 5, 0) ## 6
pose_smpl = np.delete(pose_smpl, 7, 0) ## 9
## Remove torso
pose_smpl = np.delete(pose_smpl, 10, 0) ## 10
pose_smpl = np.delete(pose_smpl, 10, 0) ## 11
## Hip altitude increase and widen
alt_f = 0.8
wide_f = 8.0
pelvis = pose_smpl[0].copy()
r_hip = pose_smpl[2].copy()
l_hip = pose_smpl[1].copy()
## Alt inc
r_p_dir = pelvis - r_hip
l_p_dir = pelvis - l_hip
mag_rp = np.linalg.norm(r_p_dir)
r_p_dir /= mag_rp
mag_lp = np.linalg.norm(l_p_dir)
l_p_dir /= mag_lp
r_hip = r_hip + (r_p_dir*mag_rp*alt_f)
l_hip = l_hip + (l_p_dir*mag_lp*alt_f)
## H-Widen
hip_ctr = (r_hip + l_hip) / 2.0
r_dir = r_hip - hip_ctr
l_dir = l_hip - hip_ctr
## Unit vec
mag = np.linalg.norm(r_dir)
r_dir /= mag
l_dir /= np.linalg.norm(l_dir)
r_hip = r_hip + (r_dir*mag*wide_f)
l_hip = l_hip + (l_dir*mag*wide_f)
## place back
pose_smpl[2] = r_hip
pose_smpl[1] = l_hip
return pose_smpl
def smpl23_to_17j_3d(pose_smpl):
""" Simple SMPL 23 joint J3D to 17 joint J3D. """
smpl_to_17j = [ [0,1],[8,11],
[12],[17],[19], ### or 15 , 17
[13],[18], [20], ### or 16 , 18
[14],[0],[3],
[9,6],[9],[1],
[4],[10,7],[10] ]
pose_17j = np.zeros((len(smpl_to_17j),3))
for idx in range(len(smpl_to_17j)):
sel_idx = smpl_to_17j[idx]
if(len(sel_idx) == 2):
pose_17j[idx] = (pose_smpl[sel_idx[0]] + pose_smpl[sel_idx[1]]) / 2.0
else:
pose_17j[idx] = pose_smpl[sel_idx[0]]
return pose_17j
""" SMPL J17 reordering vec. """
smpl_reorder_vec = [0, 9,
12, 14, 16,
11, 13, 15,
10,
2, 4, 6, 8,
1, 3, 5, 7 ]
def reorder_smpl17_to_j17(pose_3d):
""" SMPL reorder SMPL J17 to standard J17. """
pose_3d = pose_3d[smpl_reorder_vec]
return pose_3d
def smpl24_to_17j_adv(pose_smpl):
""" Improved SMPL 23 joint J3D to 17 joint J3D. """
## Hip altitude increase and widen
alt_f = 0.8
wide_f = 8.0
pelvis = pose_smpl[0].copy()
r_hip = pose_smpl[2].copy()
l_hip = pose_smpl[1].copy()
## Alt inc
r_p_dir = pelvis - r_hip
l_p_dir = pelvis - l_hip
mag_rp = np.linalg.norm(r_p_dir)
r_p_dir /= mag_rp
mag_lp = np.linalg.norm(l_p_dir)
l_p_dir /= mag_lp
r_hip = r_hip + (r_p_dir*mag_rp*alt_f)
l_hip = l_hip + (l_p_dir*mag_lp*alt_f)
## H-Widen
hip_ctr = (r_hip + l_hip) / 2.0
r_dir = r_hip - hip_ctr
l_dir = l_hip - hip_ctr
## Unit vec
mag = np.linalg.norm(r_dir)
r_dir /= mag
l_dir /= np.linalg.norm(l_dir)
r_hip = r_hip + (r_dir*mag*wide_f)
l_hip = l_hip + (l_dir*mag*wide_f)
## Place back
pose_smpl[2] = r_hip
pose_smpl[1] = l_hip
## Neck to head raise with tilt towards nose
alt_f = 0.7
head = pose_smpl[15].copy()
neck = pose_smpl[12].copy()
## Alt inc
n_h_dir = head - neck
mag_nh = np.linalg.norm(n_h_dir)
n_h_dir /= mag_nh
head = head + (n_h_dir*mag_nh*alt_f)
## Place back
pose_smpl[15] = head
## Remove wrist
pose_smpl = pose_smpl[:-2]
## Remove extra def spine
pose_smpl = np.delete(pose_smpl, 3, 0) ## 3
pose_smpl = np.delete(pose_smpl, 5, 0) ## 6
pose_smpl = np.delete(pose_smpl, 7, 0) ## 9
## Remove torso
pose_smpl = np.delete(pose_smpl, 10, 0) ## 10
pose_smpl = np.delete(pose_smpl, 10, 0) ## 11
return pose_smpl
def hip_straighten(pose_smpl):
""" Straighten Hip in J17. """
#pelvis = pose_smpl[0].copy()
r_hip = pose_smpl[2].copy()
l_hip = pose_smpl[1].copy()
pelvis = (r_hip + l_hip) / 2
pose_smpl[0] = pelvis
return pose_smpl
""" Limb parents for SMPL joints. """
limb_parents = [ 0,
0, 0, 0,
1, 2, 3, 4,
5, 6, 7, 8,
9, 9, 9,
12,12,12,
16,17,18,19,20,21
]
""" 3D skeleton plot colours for SMPL joints. """
colors = np.array([[0,0,255], [0,255,0], [255,0,0], [255,0,255], [0,255,255], [255,255,0], [127,127,0], [0,127,0], [100,0,100],
[255,0,255], [0,255,0], [0,0,255], [255,255,0], [127,127,0], [100,0,100], [175,100,195],
[0,0,255], [0,255,0], [255,0,0], [255,0,255], [0,255,255], [255,255,0], [127,127,0], [0,127,0], [100,0,100],
[255,0,255], [0,255,0], [0,0,255], [255,255,0], [127,127,0], [100,0,100], [175,100,195]])
def fig2data(fig):
""" Convert a Matplotlib figure to a 4D numpy array with RGBA channels. """
## Draw the renderer
fig.canvas.draw()
## Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
## Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
return buf
def draw_limbs_3d_plt(joints_3d, ax, limb_parents=limb_parents):
## Direct 3d plotting
for i in range(joints_3d.shape[0]):
x_pair = [joints_3d[i, 0], joints_3d[limb_parents[i], 0]]
y_pair = [joints_3d[i, 1], joints_3d[limb_parents[i], 1]]
z_pair = [joints_3d[i, 2], joints_3d[limb_parents[i], 2]]
#ax.text(joints_3d[i, 0], joints_3d[i, 1], joints_3d[i, 2], s=str(i))
ax.plot(x_pair, y_pair, z_pair, color=colors[i]/255.0, linewidth=3, antialiased=True)
def plot_skeleton_3d(joints_3d, flag=-1, limb_parents=limb_parents, title=""):
## 3D Skeleton plotting
fig = plt.figure(frameon=False, figsize=(7, 7))
ax = fig.add_subplot(1, 1, 1, projection='3d')
ax.clear()
## Axis setup
if (flag == 0):
ax.view_init(azim=0, elev=0)
elif (flag == 1):
ax.view_init(azim=90, elev=0)
ax.set_xlim(-200, 200)
ax.set_ylim(-200, 200)
ax.set_zlim(-200, 200)
scale = 1
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
draw_limbs_3d_plt(joints_3d * scale, ax, limb_parents)
ax.set_title(title)
plt_img = fig2data(fig)
plt.close(fig)
return plt_img
def skeleton_image(joints_2d, img):
""" 2D Joint skeleton Overlay. """
img_copy = img.copy()
for i in range(joints_2d.shape[0]):
x_pair = [joints_2d[i, 0], joints_2d[limb_parents[i], 0]]
y_pair = [joints_2d[i, 1], joints_2d[limb_parents[i], 1]]
img_copy = cv2.line(img_copy, (int(x_pair[0]),int(y_pair[0])), (int(x_pair[1]),int(y_pair[1])), colors[i],4)
return img_copy
def create_collage(img_list, axis=1):
""" Collage a set of images to form a panel. (numpy) """
np_new_array = np.concatenate([i for i in img_list], axis=axis)
return np_new_array
def align_by_pelvis(joints):
""" Center by pelvis joint. """
hip_id = 0
joints -= joints[hip_id, :]
return joints
def mesh2d_center_by_nose(mesh2d,w=224 ,h=224):
""" Simple mesh centering by nose/pelvis vtx. (numpy) """
#hip_id = 0
nose_id = 0
ctr = mesh2d[nose_id,:]
mesh_ret = mesh2d - ctr + np.array([ w/2, h/5 ])
return mesh_ret
def align_with_image_j2d(points2d, img_width, img_height):
""" Perform center alignment to image coordinate system. (numpy) """
points2d[:,0] += img_width/2
points2d[:,1] += img_height/2
return points2d
""" Input preprocess """
def get_transform(center, scale, res, rot=0):
""" Generate transformation matrix. """
h = 224 * scale
t = np.zeros((3, 3))
t[0, 0] = float(res[1]) / h
t[1, 1] = float(res[0]) / h
t[0, 2] = res[1] * (-float(center[0]) / h + .5)
t[1, 2] = res[0] * (-float(center[1]) / h + .5)
t[2, 2] = 1
if not rot == 0:
rot = -rot ## To match direction of rotation from cropping
rot_mat = np.zeros((3,3))
rot_rad = rot * np.pi / 180
sn,cs = np.sin(rot_rad), np.cos(rot_rad)
rot_mat[0,:2] = [cs, -sn]
rot_mat[1,:2] = [sn, cs]
rot_mat[2,2] = 1
## Need to rotate around center
t_mat = np.eye(3)
t_mat[0,2] = -res[1]/2
t_mat[1,2] = -res[0]/2
t_inv = t_mat.copy()
t_inv[:2,2] *= -1
t = np.dot(t_inv,np.dot(rot_mat,np.dot(t_mat,t)))
return t
def transform(pt, center, scale, res, invert=0, rot=0):
""" Transform pixel location to different reference. """
t = get_transform(center, scale, res, rot=rot)
if invert:
t = np.linalg.inv(t)
new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2].astype(int) + 1
def crop(img, center, scale, res, rot=0):
""" Crop image according to the supplied bounding box. """
## Upper left point
ul = np.array(transform([1, 1], center, scale, res, invert=1)) - 1
## Bottom right point
br = np.array(transform([res[0]+1, res[1]+1], center, scale, res, invert=1)) - 1
## Padding so that when rotated proper amount of context is included
pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)
if not rot == 0:
ul -= pad
br += pad
new_shape = [br[1] - ul[1], br[0] - ul[0]]
if len(img.shape) > 2:
new_shape += [img.shape[2]]
new_img = np.zeros(new_shape)
## Range to fill new array
new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]
new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]
## Range to sample from original image
old_x = max(0, ul[0]), min(len(img[0]), br[0])
old_y = max(0, ul[1]), min(len(img), br[1])
new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1], old_x[0]:old_x[1]]
if not rot == 0:
## Remove padding
new_img = scipy.misc.imrotate(new_img, rot)
new_img = new_img[pad:-pad, pad:-pad]
new_img = scipy.misc.imresize(new_img, res)
return new_img
def j2d_crop(img, j2d_file, rescale=1.2, detection_thresh=0.2):
""" Get center and scale for Bbox from OpenPose/Centertrack detections."""
with open(j2d_file, 'r') as f:
keypoints = json.load(f)['people'][0]['pose_keypoints_2d']
keypoints = np.reshape(np.array(keypoints), (-1,3))
valid = keypoints[:,-1] > detection_thresh
valid_keypoints = keypoints[valid][:,:-1]
center = valid_keypoints.mean(axis=0)
bbox_size = (valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)).max()
## Adjust bounding box tightness
scale = bbox_size / 200.0
scale *= rescale
img = crop(img, center, scale, (cfg.IMG_W, cfg.IMG_H))
return img
def bbox_crop(img, bbox):
""" Crop, center and scale image based on BBox """
with open(bbox, 'r') as f:
bbox = np.array(json.load(f)['bbox']).astype(np.float32)
ul_corner = bbox[:2]
center = ul_corner + 0.5 * bbox[2:]
width = max(bbox[2], bbox[3])
scale = width / 200.0
img = crop(img, center, scale, (cfg.IMG_W, cfg.IMG_H))
return img
########################### TF UTILS #############################
import pickle as pkl
import tensorflow as tf
import tensorflow_graphics as tfg
from render.render_layer_ortho import RenderLayer
import render.vertex_normal_expose as dirt_expose
PI = np.pi
def tfread_image(image,fmt='png', channels=3):
""" Simple read and decode image. """
if (fmt == 'png'):
return tf.image.decode_png(image, channels=channels)
elif (fmt == 'jpg'):
return tf.image.decode_jpeg(image, channels=channels)
else:
print ("ERROR specified format not found....")
def tf_norm(tensor, axis=1):
""" Min-Max normalize image. """
min_val = tf.reduce_min(tensor, axis=axis, keepdims=True)
normalized_tensor = tf.div( tf.subtract(tensor, min_val), tf.subtract(tf.reduce_max(tensor, axis=axis, keepdims=True), min_val))
return normalized_tensor
def tfresize_image(image, size=(cfg.IMG_W, cfg.IMG_H)):
""" Resize image. """
return tf.image.resize(image, size)
def denormalize_image(image):
""" Undo normalization of image. """
image = (image / 2) + 0.5
return image
def unprocess_image(image):
""" Undo preprocess image. """
# Normalize image to [0, 1]
image = (image / 2) + 0.5
image = image * 255.0 #[0,1] to [0,255] range
return image
def preprocess_image(image, do_znorm=True):
""" Preprocess image. """
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, (cfg.IMG_W, cfg.IMG_H))
image /= 255.0 # normalize to [0,1] range
if(do_znorm):
# Normalize image to [-1, 1]
image = 2 * (image - 0.5)
return image
def load_and_preprocess_image(path):
""" Simple read and preprocess for just image. """
image = tf.io.read_file(path)
processed_image = preprocess_image(image)
return processed_image
def load_and_preprocess_image_and_mask(path, j2d, j3d, beta, mask_path, pose, camera, data_id):
""" Simple read and preprocess for image and mask. """
image = tf.io.read_file(path)
proc_image = preprocess_image(image)
## For Mask
mask = tf.io.read_file(mask_path)
proc_mask = preprocess_image(mask, do_znorm=False)
return proc_image, j2d, j3d, beta, proc_mask, pose, camera, data_id
def tf_create_collage(img_list, axis=2):
""" Collage a set of images to form a panel. """
tf_new_array = tf.concat([i for i in img_list], axis=axis)
return tf_new_array
def log_images(tag, image, step, writer):
""" Logs a list of images to tensorboard. """
height, width, channel = image.shape
image = Image.fromarray(image)
output = BytesIO()
image.save(output, format='PNG')
image_string = output.getvalue()
output.close()
## Create an Image object
img_sum = tf.Summary.Image(height=height,
width=width,
colorspace=channel,
encoded_image_string=image_string)
## Create a Summary value
im_summary = tf.Summary.Value(tag='%s' % (tag), image=img_sum)
## Create and write Summary
summary = tf.Summary(value=[im_summary])
writer.add_summary(summary, step)
def get_network_params(scope):
""" Get all accessable variables. """
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)
def get_net_train_params(scope):
""" Get Trainable params. """
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
def copy_weights(iter_no, wt_dir, label='best'):
""" Backup the Weights to pretrained_weights/ given iteration number and label i.e 'iter' or 'best' """
files = os.listdir(wt_dir+label+"wt_")
match_substr = '%s-%d' % (label, iter_no)
files = [f for f in files if match_substr in f]
for f in files:
cmd = 'cp %s%s pretrained_weights/' % (wt_dir, f)
print (cmd)
os.system(cmd)
def get_most_recent_iteration(wt_dir, label='iter'):
""" Gets the most recent iteration number from weights/ dir of given label: ('best' or 'iter') """
files = os.listdir(wt_dir)
files = [f for f in files if label in f]
numbers = {long(f[f.index('-') + 1:f.index('.')]) for f in files}
return max(numbers)
def copy_latest(wt_dir, wt_type='best'):
""" Backup latest weights. """
latest_iter = get_most_recent_iteration(label=wt_type, wt_dir=wt_dir)
copy_weights(latest_iter, label=wt_type, wt_dir=wt_dir)
return latest_iter
def get_latest_iter(wt_dir, wt_type='best'):
""" Get latest weights. """
latest_iter = get_most_recent_iteration(label=wt_type, wt_dir=wt_dir)
return latest_iter
def tf_align_by_pelvis(joints):
""" Simple centering by pelvis location. """
hip_id = 0
pelvis = joints[:, hip_id:hip_id+1, :]
return tf.subtract(joints, pelvis)
def tf_mesh2d_center_by_nose(mesh2d,w=224 ,h=224):
""" Simple mesh centering by nose/pelvis vtx. """
#hip_id = 0
nose_id = 0
ctr = mesh2d[nose_id:nose_id+1,:]
mesh_ret = tf.add(tf.subtract(mesh2d, ctr), [[ w/2, h/5 ]])
return mesh_ret
def tf_perspective_project(points3d, focal, prin_pt, name="perspective_project"):
""" Simple Perspective Projection. """
fx = focal[0]
fy = focal[1]
tx = prin_pt[0]
ty = prin_pt[1]
intrin = tf.convert_to_tensor(np.array([ [fx, 0., tx],
[0., fy, ty],
[0., 0., 1.]]))
intrin = tf.tile(intrin,[points3d.shape[0]])
p_cam3d = tf.matmul(points3d, intrin, name=name)
points2d = (points3d[:,:,0:2] / points3d[:,:,2]) ### project
return points2d
def tf_orthographic_project(points3d, name="orthographic_project"):
""" Simple Orthographic Projection. """
return points3d[:,:,0:2] ## X,Y,Z
def tf_dyn_scale_and_align(vertices, joints_3d, scale, add_trans):
""" Dynamic scale and trans adjust. """
xy_max = tf.expand_dims(tf.reduce_max(vertices, axis=1), axis=1)
xy_min = tf.expand_dims(tf.reduce_min(vertices, axis=1), axis=1)
#person_ctr = (xy_max + xy_min)/2.0
person_range = tf.abs(xy_max-xy_min)
person_sc = tf.expand_dims(tf.reduce_max(person_range[:,:,0:2], axis=2), axis=2)
### Scale person to detector scale
vertices = tf.div(vertices, person_sc)
vertices = vertices * scale
joints_3d = tf.div(joints_3d, person_sc)
joints_3d = joints_3d * scale
### Bbox center
xy_max = tf.expand_dims(tf.reduce_max(vertices, axis=1), axis=1)
xy_min = tf.expand_dims(tf.reduce_min(vertices, axis=1), axis=1)
person_ctr = (xy_max + xy_min)/2.0
add_trans = tf.concat([add_trans, tf.zeros_like(add_trans[:,:,0:1])], axis=2)
vertices = vertices - person_ctr + add_trans
joints_3d = joints_3d - person_ctr + add_trans
return vertices, joints_3d, scale[:,0], ((add_trans-person_ctr)[:,0,:2])
def tf_do_scale_and_align(vertices, joints_3d, scale, trans):
""" Perform Scale and trans. (in world space) """
scale = tf.reshape(scale, [-1, 1, 1])
trans = tf.reshape(trans, [-1, 1, 2])
z = tf.zeros_like(trans[:,:,0:1])
shift = tf.concat([trans, z], axis=2)
### Trans in world space
vertices = vertices + shift
joints_3d = joints_3d + shift
### Scale person
vertices = vertices * scale
joints_3d = joints_3d * scale
return vertices, joints_3d
def for_tpix_tf_do_scale_and_align(vertices, joints_3d, scale, trans):
""" Perform Scale and trans. (in Pixel space) """
xy_max = tf.expand_dims(tf.reduce_max(vertices, axis=1), axis=1)
xy_min = tf.expand_dims(tf.reduce_min(vertices, axis=1), axis=1)
#person_ctr = (xy_max + xy_min)/2.0
person_range = tf.abs(xy_max-xy_min)
person_sc = tf.expand_dims(tf.reduce_max(person_range[:,:,0:2], axis=2), axis=2) ##ignore z
### Unit scale
vertices = tf.div(vertices, person_sc)
joints_3d = tf.div(joints_3d, person_sc)
###
scale = tf.reshape(scale, [-1, 1, 1])
trans = tf.reshape(trans, [-1, 1, 2])
z = tf.zeros_like(trans[:,:,0:1])
shift = tf.concat([trans, z], axis=2)
### Scale person
vertices = vertices * scale
joints_3d = joints_3d * scale
### Trans in cam space
vertices = vertices + shift
joints_3d = joints_3d + shift
return vertices, joints_3d
def tf_align_with_image_j2d(points2d, img_width, img_height):
""" Perform center alignment to image coordinate system. (in Pixel space) """
if(img_width == img_height):
points2d = points2d + (img_width/2)
else:
width_tf = tf.zeros((points2d.shape[0], points2d.shape[1], 1),dtype=tf.int32) + (img_width/2)
height_tf = tf.zeros((points2d.shape[0], points2d.shape[1], 1),dtype=tf.int32) + (img_height/2)
concatd = tf.concat([width_tf, height_tf], axis=2)
points2d = points2d + concatd
return points2d
############ Render pipeline utils ############
MESH_PROP_FACES_FL = './assets/smpl_sampling.pkl'
""" Read face definition. Fixed for a SMPL model. """
with open(os.path.join(os.path.dirname(__file__), MESH_PROP_FACES_FL), 'rb') as f:
sampling = pkl.load(f)
M = sampling['meshes']
faces = M[0]['f'].astype(np.int32)
faces = tf.convert_to_tensor(faces,dtype=tf.int32)
def_bgcolor = tf.zeros(3) + [0, 0.5, 0] ## Green BG
def colour_pick_img(img_batch, vertices, batch_size):
""" Pick clr based on mesh registration. [Vtx, Img] -> [Vtx_clr] """
proj_verts = tf_orthographic_project(vertices)
verts_pix_space = tf_align_with_image_j2d(proj_verts, cfg.IMG_W, cfg.IMG_H)
#### Pick colours and resolve occlusion softly
verts_pix_space = tf.cast(verts_pix_space, dtype=tf.int32)
verts_pix_space = tf.concat([verts_pix_space[:,:,1:], verts_pix_space[:,:,0:1]], axis=2)
if(cfg.TF_version >= 1.14):
#### Alternative colour pick for TF 1.14 & above, faster inference.
clr_picked = tf.gather_nd(params=occ_aware_mask, indices=verts_pix_space, batch_dims=1) ### NOTE: only for tf 1.14 and above
else:
### For TF 1.13 and older
for b in range(batch_size):
if b == 0:
clr_picked = [tf.gather_nd(params=img_batch[b], indices=verts_pix_space[b])]
else:
curr_clr_pick = [tf.gather_nd(params=img_batch[b], indices=verts_pix_space[b])]
clr_picked = tf.concat([clr_picked, curr_clr_pick], axis=0)
img_clr_picked = tf.cast(clr_picked, dtype=tf.float32)
return img_clr_picked
def get_occ_aware_cam_facing_mask(vertices, batch_size, part_based_occlusion_resolve=False, bgcolor=def_bgcolor):
""" Occlusion-aware vtx weighting, depth based or part-based. [Vtx] -> [Vtx_occ_wtmap] """
if (part_based_occlusion_resolve):
vertex_colors = np.zeros((batch_size, 6890, 3))
### Part segmentation_generation
vtx_prts = np.load("vtx_clr_smpl_proj_final_part_segmentations.npy")
### Vertex parts modify for maximal seperation
vtx_prts = vtx_prts + 1
vtx_prts[vtx_prts == 2] = 5
vtx_prts[vtx_prts == 22] = 7
vtx_prts[vtx_prts == 8] = 22
vtx_prts[vtx_prts == 12] = 2
vtx_prts[vtx_prts == 23] = 13
vtx_prts[vtx_prts == 19] = 4
vtx_prts[vtx_prts == 21] = 18
#### part labelled
vtx_part_labels = np.zeros(vertices.shape)
vtx_prts = np.expand_dims(vtx_prts, axis=1)
vtx_prts = vtx_prts / 24.0
part_label = np.concatenate([vtx_prts, vtx_prts, vtx_prts], axis=1)
vtx_part_labels[:] = part_label ##broadcast to form batch
#### Render cam setup
fixed_rt = np.array([1.0, 0.0, 0.0]) ### tilt,pan,roll
angle = np.linalg.norm(fixed_rt)
axis = fixed_rt / angle
ang = np.pi
new_an_ax = axis * (ang)
fixed_rt = new_an_ax
fixed_t = [0., 0., 0.]
##
fixed_renderer = RenderLayer(cfg.IMG_W, cfg.IMG_H, 3, bgcolor=bgcolor, f=faces, camera_f=[cfg.IMG_W, cfg.IMG_H], camera_c=[cfg.IMG_W/2.0, cfg.IMG_H/2.0], camera_rt=fixed_rt, camera_t=fixed_t)
vert_norms = dirt_expose.get_vertex_normals(vertices, faces)
#### Verts selection based on norm
vert_norms_flat = tf.reshape(vert_norms, [-1, 3])
fake_angle = tf.ones_like(vert_norms_flat[:,0:1], dtype=tf.float32) ## unit mag
euler_angles = tfg.geometry.transformation.euler.from_axis_angle(axis=vert_norms_flat, angle=fake_angle)
vert_norms_euler = tf.reshape(euler_angles, [-1, 6890, 3])
### Diff. margin formulation
quant_sharpness_factor = 50
verts_ndiff = vert_norms_euler[:,:,2:] * -1 ## invert as cam faces
verts_ndiff = verts_ndiff * quant_sharpness_factor ## centrifugal from 0.0 to get quantization effect
#verts_ndiff = tf.math.sign(verts_ndiff)
#verts_ndiff = tf.nn.relu(verts_ndiff)
verts_ndiff = tf.nn.sigmoid(verts_ndiff)
if(part_based_occlusion_resolve):
vtx_part_labels= tf.convert_to_tensor(vtx_part_labels, dtype=tf.float32)
## Normal part based resolving occlusion based render
cam_facing_vtx_clrs = tf.multiply(vtx_part_labels, verts_ndiff)
else:
## Depth based occlusion aware picking to be debugged
depth_vertices = vertices[:,:,2:]
## Normalize the depth between 0 and 1
min_val = tf.reduce_min(depth_vertices, axis=1, keepdims=True)
normalized_depth_vertices = tf.div( tf.subtract(depth_vertices, min_val), tf.subtract(tf.reduce_max(depth_vertices, axis=1, keepdims=True), min_val))
cam_facing_vtx_clrs = tf.tile(normalized_depth_vertices, [1,1,3])
cam_facing_vtx_clrs = tf.multiply(cam_facing_vtx_clrs, verts_ndiff)
## Mask render for occlusion resolution
occ_aware_mask = fixed_renderer.call(vertices, vc=cam_facing_vtx_clrs) ## occulsion aware z-buffered parts masks
clr_picked = colour_pick_img(occ_aware_mask, vertices, batch_size)
## Occlusion resolution based on z-buffered parts
if(part_based_occlusion_resolve):
occ_sel_diff = (vtx_part_labels[:,:,0:1] - clr_picked[:,:,0:1] ) * 10.0
else:
### Depth based colour pick
occ_sel_diff = (normalized_depth_vertices[:,:,0:1] - clr_picked[:,:,0:1] ) * 10.0
### Diff. margin soft selection
occ_sel = tf.nn.sigmoid(occ_sel_diff) * tf.nn.sigmoid(-1 * occ_sel_diff) * 4.0
#### Select front facing
final_front_facing_occ_resolved = tf.multiply(occ_sel, verts_ndiff)
return final_front_facing_occ_resolved
def apply_ref_symmetry(vclr_picked_resolved, front_facing_occ_resolved_mask, batch_size):
""" Reflectional symmetry module. [Vtx_clr, Vtx_wtmap] -> [Vtx_clr_symm] """
symm_arr = np.load("./assets/basic_vtx_clr_symm_map.npy")
symm_arr_transpose = np.transpose(symm_arr)
sym_map = tf.expand_dims(symm_arr, axis=0)
sym_map = tf.tile(sym_map, [batch_size,1,1])
sym_map_transpose = tf.expand_dims(symm_arr_transpose, axis=0)
sym_map_transpose = tf.tile(sym_map_transpose, [batch_size, 1, 1])
## Group clr value calc
num = tf.matmul(sym_map, vclr_picked_resolved)
den = tf.matmul(sym_map, front_facing_occ_resolved_mask)
den = den + 0.00001
calc_val = tf.truediv(num, den)
### Value assign using symmtery
vclr_symm = tf.matmul(sym_map_transpose, calc_val)
return vclr_symm
| 33.538462 | 195 | 0.618374 | 4,930 | 31,392 | 3.718256 | 0.146856 | 0.024876 | 0.003819 | 0.008401 | 0.330206 | 0.263543 | 0.222301 | 0.206917 | 0.187715 | 0.17615 | 0 | 0.04785 | 0.239074 | 31,392 | 935 | 196 | 33.574332 | 0.719555 | 0.161602 | 0 | 0.231561 | 0 | 0 | 0.013716 | 0.004217 | 0 | 0 | 0 | 0 | 0.001715 | 1 | 0.087479 | false | 0 | 0.039451 | 0 | 0.210978 | 0.006861 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebb024cf3162a7ae7533de24c182385e63946496 | 8,389 | py | Python | netQuil/connections.py | att-innovate/qnetdes | 459d688e92139ab3219416cdb9e3b20ff082dc1d | [
"MIT"
] | 4 | 2019-11-14T21:30:35.000Z | 2021-12-13T08:34:33.000Z | netQuil/connections.py | att-innovate/qnetdes | 459d688e92139ab3219416cdb9e3b20ff082dc1d | [
"MIT"
] | null | null | null | netQuil/connections.py | att-innovate/qnetdes | 459d688e92139ab3219416cdb9e3b20ff082dc1d | [
"MIT"
] | null | null | null | import queue
import multiprocessing
import itertools
import sys
__all__ = ["QConnect", "CConnect"]
pulse_length_default = 10 * 10 ** -12 # 10 ps photon pulse length
signal_speed = 2.998 * 10 ** 5 #speed of light in km/s
fiber_length_default = 0.0
class QConnect:
def __init__(self, *args, transit_devices=[]):
'''
This is the base class for a quantum connection between multiple agents.
:param agents \*args: list of agents to connect
:param List<Devices> transit_devices: list of devices qubits travel through
'''
agents = list(args)
self.agents = {}
self.source_devices = {}
self.target_devices = {}
self.transit_devices = {}
'''
Create queue to keep track of multiple requests. Name of queue is name of
target agent.
'''
self.queues = {}
for agent in agents:
self.agents.update({agent.name: agent})
self.source_devices.update({agent.name: agent.source_devices})
self.target_devices.update({agent.name: agent.target_devices})
self.transit_devices.update({agent.name: transit_devices})
self.queues.update({agent.name: queue.Queue()})
for agentConnect in agents:
if agentConnect != agent:
agent.qconnections[agentConnect.name] = self
def put(self, source, target, qubits, source_time):
'''
Constructs full list of devices that each qubit must travel through. Sends the qubits
through source devices. Places qubits and a list of transit and target
devices on the queue. Queue is keyed on the target agent's name.
:param String source: name of agent where the qubits being sent originated
:param String target: name of agent receiving qubits
:param Array qubits: array of numbers corresponding to qubits the source is sending
:param Float source_time: time of source agent before sending qubits
:returns: time qubits took to pass through source devices
'''
source_devices = self.source_devices[source]
transit_devices = self.transit_devices[source]
target_devices = self.target_devices[target]
non_source_devices = {
"transit": transit_devices,
"target": target_devices,
}
program = self.agents[source].program
source_delay = 0
# Keep track of qubits remaining
traveling_qubits = qubits
if not source_devices:
source_delay += pulse_length_default
else:
# Keep track of qubits lost by each device
total_lost_qubits = []
for device in source_devices:
# If qubits are still remaining
if traveling_qubits:
res = device.apply(program, traveling_qubits)
if 'lost_qubits' in res.keys():
lost_qubits = res['lost_qubits']
# Remove lost qubits from traveling qubits
traveling_qubits = list(set(traveling_qubits) - set(lost_qubits))
# Add lost_qubits lost from current device to total_lost_qubits
total_lost_qubits += lost_qubits
if 'delay' in res.keys(): source_delay += res['delay']
else: break
# Invert lost qubits and add to traveling qubits
for q in total_lost_qubits:
if q == 0: total_lost_qubits.append(float("-inf"))
else: total_lost_qubits.append(-q)
traveling_qubits += total_lost_qubits
# Scale source delay time according to number of qubits sent
scaled_source_delay = source_delay*len(qubits)
self.queues[target].put((traveling_qubits, non_source_devices, scaled_source_delay, source_time))
return scaled_source_delay
def get(self, agent):
'''
Pops qubits off of the agent's queue. Sends qubit through transit and target devices,
simulating a quantum network. Return an array of the qubits that have been altered, as well as
the time it took the qubit to travel through the network. Some qubits may be lost during transmission. If lost,
their value will switch to negative, or, in the case of 0, be set to -inf
:param Agent agent: agent receiving the qubits
:returns: list of qubits, time to pass through transit and target devices, and the source agent's time
'''
traveling_qubits, devices, source_delay, source_time = self.queues[agent.name].get()
agent.qubits = list(set(traveling_qubits + agent.qubits))
program = self.agents[agent.name].program
transit_devices = devices["transit"]
target_devices = devices["target"]
# Number of qubits before any are lost
num_travel_qubits = len(traveling_qubits)
travel_delay = 0
if not transit_devices:
travel_delay += fiber_length_default/signal_speed
if not target_devices:
travel_delay += 0
total_lost_qubits = [q for q in traveling_qubits if q < 0 or q == float("-inf")]
remaining_qubits = [q for q in traveling_qubits if q >= 0]
for device in list(itertools.chain(transit_devices, target_devices)):
# If qubits are remaining
if remaining_qubits:
res = device.apply(program, traveling_qubits)
if 'lost_qubits' in res.keys():
lost_qubits = res['lost_qubits']
# Remove lost qubits from traveling qubits
remaining_qubits = list(set(remaining_qubits) - set(lost_qubits))
# Add lost_qubits lost from current device to total_lost_qubits
total_lost_qubits += lost_qubits
if 'delay' in res.keys(): travel_delay += res['delay']
else: break
# Remove traveling_qubits
agent.qubits = list(set(agent.qubits) - set(traveling_qubits))
lost_qubits_flipped = []
for q in total_lost_qubits:
if q == 0: lost_qubits_flipped.append(float("-inf"))
else:
lost_qubits_flipped.append(-q)
# Add inverted lost qubits to remaining qubits
traveling_qubits = remaining_qubits + lost_qubits_flipped
agent.qubits += traveling_qubits
scaled_delay = travel_delay*num_travel_qubits + source_delay
return traveling_qubits, scaled_delay, source_time
class CConnect:
def __init__(self, *args, length=0.0):
'''
This is the base class for a classical connection between multiple agents.
:param agents \*args: list of agents to connect
:param Float length: distance between first and second agent
'''
agents = list(args)
self.agents = {}
'''
Create queue to keep track of multiple requests. Name of queue is name of
target agent.
'''
self.queues = {}
for agent in agents:
self.agents.update({agent.name: agent})
self.queues.update({agent.name: queue.Queue()})
for agentConnect in agents:
if agentConnect != agent:
agent.cconnections[agentConnect.name] = self
self.length = length
def put(self, target, cbits):
'''
Places cbits on queue keyed on the target Agent's name
:param String target: name of recipient of program
:param Array cbits: array of numbers corresponding to cbits agent is sending
:returns: time for cbits to travel
'''
csource_delay = pulse_length_default * 8 * sys.getsizeof(cbits)
self.queues[target].put((cbits, csource_delay))
return csource_delay
def get(self, agent):
'''
Pops cbits off of the agent's queue and adds travel delay
:param String agent: name of the agent receiving the cbits
:returns: cbits from source and time they took to travel
'''
cbits, source_delay = self.queues[agent].get()
travel_delay = self.length/signal_speed
scaled_delay = travel_delay*len(cbits) + source_delay
return cbits, scaled_delay | 39.947619 | 119 | 0.619383 | 1,023 | 8,389 | 4.924731 | 0.162268 | 0.061532 | 0.032751 | 0.015879 | 0.356689 | 0.26717 | 0.250099 | 0.241366 | 0.241366 | 0.216753 | 0 | 0.004829 | 0.308857 | 8,389 | 210 | 120 | 39.947619 | 0.864091 | 0.293003 | 0 | 0.290909 | 0 | 0 | 0.022023 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054545 | false | 0 | 0.036364 | 0 | 0.145455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebb199161cfe0fe5c616a4c065ef5c14803d10c1 | 2,035 | py | Python | pqcli/ui/curses/views/game_view/character_sheet_window.py | tree-s/pq-cli | f5d0ed69a99c490a63f854442fba2b443e59a134 | [
"MIT"
] | 94 | 2018-11-17T22:40:16.000Z | 2022-03-28T05:09:16.000Z | pqcli/ui/curses/views/game_view/character_sheet_window.py | tree-s/pq-cli | f5d0ed69a99c490a63f854442fba2b443e59a134 | [
"MIT"
] | 17 | 2019-04-10T18:06:46.000Z | 2022-03-03T03:25:08.000Z | pqcli/ui/curses/views/game_view/character_sheet_window.py | tree-s/pq-cli | f5d0ed69a99c490a63f854442fba2b443e59a134 | [
"MIT"
] | 14 | 2019-04-10T21:33:14.000Z | 2022-02-16T14:42:56.000Z | import typing as T
from pqcli.mechanic import Player, StatType
from pqcli.ui.curses.widgets import Focusable
from .progress_bar_window import DataTableProgressBarWindow
class CharacterSheetWindow(Focusable, DataTableProgressBarWindow):
def __init__(
self, player: Player, parent: T.Any, h: int, w: int, y: int, x: int
) -> None:
super().__init__(
parent,
h,
w,
y,
x,
" Character Sheet ",
align_right=False,
show_time=True,
)
self._on_focus_change += self._render
self._focused = True
self._player = player
self._player.connect("level_up", self._sync_traits)
self._player.stats.connect("change", self._sync_traits)
self._player.exp_bar.connect("change", self._sync_exp)
self.sync()
def stop(self) -> None:
super().stop()
self._player.disconnect("level_up", self._sync_traits)
self._player.stats.disconnect("change", self._sync_traits)
self._player.exp_bar.disconnect("change", self._sync_exp)
def sync(self) -> None:
self._sync_traits()
self._sync_exp()
def _sync_traits(self) -> None:
if not self._win:
return
self._data_table.clear()
self._data_table.add("Name", self._player.name)
self._data_table.add("Race", self._player.race.name)
self._data_table.add("Class", self._player.class_.name)
self._data_table.add("Level", str(self._player.level))
self._data_table.add(" " * 15, "")
for stat in StatType:
self._data_table.add(stat.value, str(self._player.stats[stat]))
self._render_data_table()
def _sync_exp(self) -> None:
self._cur_pos = self._player.exp_bar.position
self._max_pos = self._player.exp_bar.max_
self._progress_title = (
f"Experience ({self._max_pos-self._cur_pos:.0f} XP to go)"
)
self._render_progress_bar()
| 30.373134 | 75 | 0.615725 | 252 | 2,035 | 4.615079 | 0.31746 | 0.128977 | 0.078246 | 0.082545 | 0.239037 | 0.123818 | 0.123818 | 0.123818 | 0 | 0 | 0 | 0.002011 | 0.26683 | 2,035 | 66 | 76 | 30.833333 | 0.77748 | 0 | 0 | 0 | 0 | 0 | 0.064373 | 0.016708 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096154 | false | 0 | 0.076923 | 0 | 0.211538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebb3616f90465bce3896df5538302b23c8d738c6 | 1,668 | py | Python | src/precession/planet.py | kurlytail/precession | a5dd83f4fca4629de1f5759bb467183bda1a6506 | [
"MIT"
] | null | null | null | src/precession/planet.py | kurlytail/precession | a5dd83f4fca4629de1f5759bb467183bda1a6506 | [
"MIT"
] | null | null | null | src/precession/planet.py | kurlytail/precession | a5dd83f4fca4629de1f5759bb467183bda1a6506 | [
"MIT"
] | null | null | null | import yaml
import pathlib
import json
import math
class Planet(object):
def __init__(self, config):
self.GMS = 0
# mass
self.M = 0.
self.name = "unknown"
# period
self.T = 1.
# eccentricity
self.e = 0.
# semi major axis
self.a = 1.
# configuration
self.config = config
def fixup(self):
self.M = self.M / 2.e+30
self.RMin = self.a * (1 - self.e)
self.RMax = self.a * (1 + self.e)
self.R = self.a
self.V = (2 * math.pi * self.R) / self.T
self.GMS = self.R * self.V**2
self.vMax = math.sqrt(
(((1 + self.e) * (1 + self.M)) / self.RMin) * self.GMS)
self.L = self.a * (1 - self.e) * self.vMax
self.GM = self.GMS * self.M
@staticmethod
def load(config, data):
if isinstance(data, pathlib.PosixPath):
data = str(data)
if isinstance(data, str):
with open(data, "r") as data_file:
data = yaml.safe_load(data_file)
if not isinstance(data, dict):
raise TypeError(f"data type {type(data)} cannot be loaded")
planet = Planet(config)
for k in data:
setattr(planet, k, data[k])
planet.fixup()
return planet
def get_dict(self):
data = self.__dict__.copy()
data.pop("config")
return data
def save(self, filename):
with open(filename, 'w') as file:
yaml.dump(self.get_dict(), file)
def __str__(self) -> str:
return f"planet {self.name} => {', '.join(yaml.safe_dump(self.get_dict()).splitlines())}"
| 27.8 | 97 | 0.522182 | 223 | 1,668 | 3.820628 | 0.336323 | 0.029343 | 0.028169 | 0.035211 | 0.052817 | 0.052817 | 0 | 0 | 0 | 0 | 0 | 0.013686 | 0.342926 | 1,668 | 59 | 98 | 28.271186 | 0.763686 | 0.032374 | 0 | 0 | 0 | 0.021277 | 0.082711 | 0.03296 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12766 | false | 0 | 0.085106 | 0.021277 | 0.297872 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebb54d34edb14bf6d75544e3fae03ac69c069a8f | 15,289 | py | Python | jade/jobs/job_submitter.py | NREL/jade | 84d73f45e206c4a35e6483e6c1ce29ab7ac7e543 | [
"BSD-3-Clause"
] | 15 | 2021-05-15T21:58:26.000Z | 2022-03-17T08:26:48.000Z | jade/jobs/job_submitter.py | NREL/jade | 84d73f45e206c4a35e6483e6c1ce29ab7ac7e543 | [
"BSD-3-Clause"
] | 22 | 2021-02-04T20:02:33.000Z | 2021-09-14T13:29:30.000Z | jade/jobs/job_submitter.py | NREL/jade | 84d73f45e206c4a35e6483e6c1ce29ab7ac7e543 | [
"BSD-3-Clause"
] | 3 | 2021-01-11T15:11:31.000Z | 2021-06-07T17:36:51.000Z | """Provides ability to run jobs locally or on HPC."""
from collections import OrderedDict
import datetime
import fileinput
import importlib
import logging
import os
import shutil
import jade
from jade.common import (
CONFIG_FILE,
JOBS_OUTPUT_DIR,
OUTPUT_DIR,
RESULTS_FILE,
HPC_CONFIG_FILE,
)
from jade.enums import JobCompletionStatus, Status, ResourceMonitorType
from jade.events import (
EVENTS_FILENAME,
EVENT_NAME_ERROR_LOG,
StructuredLogEvent,
EVENT_CATEGORY_ERROR,
EVENT_CATEGORY_RESOURCE_UTIL,
EVENT_NAME_BYTES_CONSUMED,
EVENT_NAME_SUBMIT_STARTED,
EVENT_NAME_SUBMIT_COMPLETED,
)
from jade.exceptions import InvalidParameter
from jade.extensions.registry import Registry, ExtensionClassType
from jade.hpc.common import HpcType
from jade.hpc.hpc_manager import HpcManager
from jade.hpc.hpc_submitter import HpcSubmitter
from jade.jobs.cluster import Cluster
from jade.jobs.job_configuration_factory import create_config_from_previous_run
from jade.jobs.job_manager_base import JobManagerBase
from jade.jobs.job_runner import JobRunner
from jade.jobs.results_aggregator import ResultsAggregator
from jade.models import SubmitterParams
from jade.models.submission_group import make_submission_group_lookup
from jade.loggers import log_event
from jade.result import serialize_results, ResultsSummary
from jade.utils.repository_info import RepositoryInfo
from jade.utils.subprocess_manager import run_command
from jade.utils.utils import dump_data, get_directory_size_bytes
import jade.version
logger = logging.getLogger(__name__)
class JobSubmitter(JobManagerBase):
"""Submits jobs for execution locally or on an HPC."""
def __init__(self, config_file, output, is_new):
"""Internal constructor. Callers should use create() or load()."""
super().__init__(config_file, output)
self._hpc = None
self._config_file = config_file
self._is_new = is_new
@classmethod
def create(cls, config_file, params: SubmitterParams, output=OUTPUT_DIR):
"""Creates a new instance.
Parameters
----------
config_file : JobConfiguration
configuration for simulation
params: SubmitterParams
output : str
Output directory
"""
main_file = os.path.join(output, CONFIG_FILE)
shutil.copyfile(config_file, main_file)
mgr = cls(main_file, output, True)
mgr.run_checks(params)
return mgr
@classmethod
def load(cls, output):
"""Loads an instance from an existing directory."""
return cls(os.path.join(output, CONFIG_FILE), output, False)
def __repr__(self):
return f"""num_jobs={self.get_num_jobs()}
results_summary={self.get_results_summmary_report()}"""
def cancel_jobs(self, cluster):
"""Cancel running and pending jobs."""
groups = make_submission_group_lookup(cluster.config.submission_groups)
hpc = HpcManager(groups, self._output)
for job_id in cluster.job_status.hpc_job_ids:
hpc.cancel_job(job_id)
cluster.mark_complete(canceled=True)
def submit_jobs(self, cluster, force_local=False):
"""Submit simulations. Auto-detect whether the current system is an HPC
and submit to its queue. Otherwise, run locally.
Parameters
----------
cluster : Cluster
force_local : bool
If on HPC, run jobs through subprocess as if local.
Returns
-------
Status
"""
if self._is_new:
logger.info("Submit %s jobs for execution.", self._config.get_num_jobs())
logger.info("JADE version %s", jade.version.__version__)
registry = Registry()
loggers = registry.list_loggers()
logger.info("Registered modules for logging: %s", ", ".join(loggers))
self._save_repository_info(registry)
ResultsAggregator.create(self._output)
# If an events summary file exists, it is invalid.
events_file = os.path.join(self._output, EVENTS_FILENAME)
if os.path.exists(events_file):
os.remove(events_file)
event = StructuredLogEvent(
source="submitter",
category=EVENT_CATEGORY_RESOURCE_UTIL,
name=EVENT_NAME_SUBMIT_COMPLETED,
message="job submission started",
num_jobs=self.get_num_jobs(),
)
log_event(event)
else:
self._handle_submission_groups()
result = Status.IN_PROGRESS
group = self._config.get_default_submission_group()
groups = make_submission_group_lookup(cluster.config.submission_groups)
self._hpc = HpcManager(groups, self._output)
if self._hpc.hpc_type == HpcType.LOCAL or force_local:
runner = JobRunner(self._config_file, output=self._output)
num_processes = group.submitter_params.num_processes
verbose = group.submitter_params.verbose
result = runner.run_jobs(verbose=verbose, num_processes=num_processes)
agg = ResultsAggregator.load(self._output)
agg.process_results()
is_complete = True
else:
is_complete = self._submit_to_hpc(cluster)
if is_complete:
result = self._handle_completion(cluster)
return result
def _handle_completion(self, cluster):
result = Status.GOOD
self._results = ResultsAggregator.list_results(self._output)
if len(self._results) != self._config.get_num_jobs():
finished_jobs = {x.name for x in self._results}
all_jobs = {x.name for x in self._config.iter_jobs()}
missing_jobs = sorted(all_jobs.difference(finished_jobs))
logger.error(
"Error in result totals. num_results=%s total_num_jobs=%s",
len(self._results),
self._config.get_num_jobs(),
)
logger.error(
"These jobs did not finish: %s. Check for process crashes or HPC timeouts.",
missing_jobs,
)
result = Status.ERROR
else:
missing_jobs = []
self.write_results_summary(RESULTS_FILE, missing_jobs)
self._log_error_log_messages(self._output)
bytes_consumed = get_directory_size_bytes(self._output, recursive=False)
event = StructuredLogEvent(
source="submitter",
category=EVENT_CATEGORY_RESOURCE_UTIL,
name=EVENT_NAME_BYTES_CONSUMED,
message="main output directory size",
bytes_consumed=bytes_consumed,
)
log_event(event)
event = StructuredLogEvent(
source="submitter",
category=EVENT_CATEGORY_RESOURCE_UTIL,
name=EVENT_NAME_SUBMIT_COMPLETED,
message="job submission completed",
num_jobs=self.get_num_jobs(),
)
log_event(event)
group = self._config.get_default_submission_group()
if group.submitter_params.generate_reports:
self.generate_reports(self._output, group.submitter_params.resource_monitor_type)
cluster.mark_complete()
if cluster.config.pipeline_stage_num is not None:
# The pipeline directory must be the one above this one.
pipeline_dir = os.path.dirname(self._output)
next_stage = cluster.config.pipeline_stage_num + 1
cmd = (
f"jade pipeline submit-next-stage {pipeline_dir} "
f"--stage-num={next_stage} "
f"--return-code={result.value}"
)
run_command(cmd)
return result
def write_results_summary(self, filename, missing_jobs):
"""Write the results to filename in the output directory."""
data = OrderedDict()
data["jade_version"] = jade.version.__version__
now = datetime.datetime.now()
data["timestamp"] = now.strftime("%m/%d/%Y %H:%M:%S")
data["base_directory"] = os.getcwd()
results = self._build_results(missing_jobs)
data["results_summary"] = results["summary"]
data["missing_jobs"] = missing_jobs
data["results"] = results["results"]
output_file = os.path.join(self._output, filename)
dump_data(data, output_file)
logger.info("Wrote results to %s.", output_file)
num_successful = results["summary"]["num_successful"]
num_canceled = results["summary"]["num_canceled"]
num_failed = results["summary"]["num_failed"]
num_missing = len(missing_jobs)
total = num_successful + num_failed + num_missing
log_func = logger.info if num_successful == total else logger.warning
log_func(
"Successful=%s Failed=%s Canceled=%s Missing=%s Total=%s",
num_successful,
num_failed,
num_canceled,
num_missing,
total,
)
return output_file
def _build_results(self, missing_jobs):
num_successful = 0
num_failed = 0
num_canceled = 0
for result in self._results:
if result.is_successful():
num_successful += 1
elif result.is_failed():
num_failed += 1
else:
assert result.is_canceled(), str(result)
num_canceled += 1
return {
"results": serialize_results(self._results),
"summary": {
"num_successful": num_successful,
"num_failed": num_failed,
"num_canceled": num_canceled,
"num_missing": len(missing_jobs),
},
}
def _save_repository_info(self, registry):
extensions = registry.list_extensions()
extension_packages = set(["jade"])
for ext in extensions:
exec_module = ext[ExtensionClassType.EXECUTION].__module__
name = exec_module.split(".")[0]
extension_packages.add(name)
for name in extension_packages:
try:
package = importlib.import_module(name)
repo_info = RepositoryInfo(package)
patch = os.path.join(self._output, f"{name}-diff.patch")
repo_info.write_diff_patch(patch)
logger.info("%s repository information: %s", name, repo_info.summary())
except InvalidParameter:
pass
@staticmethod
def _log_error_log_messages(directory):
for event in JobSubmitter.find_error_log_messages(directory):
log_event(event)
@staticmethod
def find_error_log_messages(directory):
"""Parse output log files for error messages
Parameters
----------
directory : str
output directory
"""
substrings = (
"DUE TO TIME LIMIT", # includes slurmstepd, but check this first
"srun",
"slurmstepd",
"Traceback",
)
filenames = [os.path.join(directory, x) for x in os.listdir(directory) if x.endswith(".e")]
if not filenames:
return
for line in fileinput.input(filenames):
for substring in substrings:
if substring in line:
event = StructuredLogEvent(
source="submitter",
category=EVENT_CATEGORY_ERROR,
name=EVENT_NAME_ERROR_LOG,
message="Detected error message in log.",
error=substring,
filename=fileinput.filename(),
line_number=fileinput.lineno(),
text=line.strip(),
)
yield event
# Only find one match in a single line.
break
@staticmethod
def generate_reports(directory, resource_monitor_type):
"""Create reports summarizing the output results of a set of jobs.
Parameters
----------
directory : str
output directory
resource_monitor_type : ResourceMonitorType
"""
commands = [
(f"jade show-results -o {directory}", "results.txt"),
(f"jade show-events -o {directory} --categories Error", "errors.txt"),
]
if resource_monitor_type != ResourceMonitorType.NONE:
commands.append((f"jade stats show -o {directory}", "stats.txt"))
commands.append((f"jade stats show -o {directory} -j", "stats_summary.json"))
if resource_monitor_type == ResourceMonitorType.PERIODIC:
commands.append((f"jade stats plot -o {directory}", None))
reports = []
for cmd in commands:
output = {}
ret = run_command(cmd[0], output=output)
if ret != 0:
logger.error("Failed to run [%s]: %s: %s", cmd, ret, output["stderr"])
if cmd[1] is not None:
filename = os.path.join(directory, cmd[1])
with open(filename, "w") as f_out:
if "json" not in cmd[1]:
f_out.write(cmd[0] + "\n\n")
f_out.write(output["stdout"])
reports.append(filename)
logger.info("Generated reports %s.", " ".join(reports))
return 0
def _submit_to_hpc(self, cluster):
hpc_submitter = HpcSubmitter(
self._config,
self._config_file,
cluster,
self._output,
)
if hpc_submitter.run():
logger.info("All submitters have completed.")
return True
logger.debug("jobs are still pending")
return False
def run_checks(self, params: SubmitterParams):
"""Checks the configuration for errors. May mutate the config."""
self._config.check_job_dependencies(params)
self._config.check_submission_groups(params)
self._config.check_spark_config()
@staticmethod
def run_submit_jobs(config_file, output, params, pipeline_stage_num=None):
"""Allows submission from an existing Python process."""
os.makedirs(output, exist_ok=True)
mgr = JobSubmitter.create(config_file, params, output=output)
cluster = Cluster.create(
output,
mgr.config,
pipeline_stage_num=pipeline_stage_num,
)
local = params.hpc_config.hpc_type == HpcType.LOCAL
ret = 1
try:
status = mgr.submit_jobs(cluster, force_local=local)
if status == Status.IN_PROGRESS:
check_cmd = f"jade show-status -o {output}"
if not params.dry_run:
print(f"Jobs are in progress. Run '{check_cmd}' for updates.")
ret = 0
else:
ret = status.value
finally:
cluster.demote_from_submitter()
if local:
# These files were not used in this case.
cluster.delete_files_internal()
return ret
| 35.473318 | 99 | 0.608738 | 1,685 | 15,289 | 5.279525 | 0.197626 | 0.017986 | 0.007869 | 0.011241 | 0.176147 | 0.110612 | 0.093413 | 0.073516 | 0.057329 | 0.036196 | 0 | 0.001595 | 0.302832 | 15,289 | 430 | 100 | 35.555814 | 0.833005 | 0.086598 | 0 | 0.123839 | 0 | 0 | 0.098097 | 0.00981 | 0 | 0 | 0 | 0 | 0.003096 | 1 | 0.049536 | false | 0.003096 | 0.092879 | 0.003096 | 0.182663 | 0.003096 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebb61e698b97baa6c67673863e148e21ab80d713 | 1,364 | py | Python | testing/MarketMaker_contract_test.py | SK1989sL/RYO | a0c89c694d9ad4aed9a9776937f2f73271b67f28 | [
"MIT"
] | null | null | null | testing/MarketMaker_contract_test.py | SK1989sL/RYO | a0c89c694d9ad4aed9a9776937f2f73271b67f28 | [
"MIT"
] | null | null | null | testing/MarketMaker_contract_test.py | SK1989sL/RYO | a0c89c694d9ad4aed9a9776937f2f73271b67f28 | [
"MIT"
] | null | null | null | import os
import pytest
from starkware.starknet.compiler.compile import (
compile_starknet_files)
from starkware.starknet.testing.starknet import Starknet
from starkware.starknet.testing.contract import StarknetContract
# The path to the contract source code.
CONTRACT_FILE = os.path.join(
os.path.dirname(__file__), "../contracts/MarketMaker.cairo")
# The testing library uses python's asyncio. So the following
# decorator and the ``async`` keyword are needed.
@pytest.mark.asyncio
async def test_record_items():
# Compile the contract.
contract_definition = compile_starknet_files(
[CONTRACT_FILE], debug_info=True)
# Create a new Starknet class that simulates the StarkNet
# system.
starknet = await Starknet.empty()
# Deploy the contract.
contract_address = await starknet.deploy(
contract_definition=contract_definition)
contract = StarknetContract(
starknet=starknet,
abi=contract_definition.abi,
contract_address=contract_address,
)
market_a_pre = 300
market_b_pre = 500
user_a_pre = 40 # User gives 40.
res = await contract.trade(market_a_pre, market_b_pre, user_a_pre).invoke()
(market_a_post, market_b_post, user_b_post, ) = res
assert market_a_post == market_a_pre + user_a_pre
assert market_b_post == market_b_pre - user_b_post
| 30.311111 | 79 | 0.73827 | 181 | 1,364 | 5.287293 | 0.38674 | 0.025078 | 0.065831 | 0.058516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009001 | 0.185484 | 1,364 | 44 | 80 | 31 | 0.852385 | 0.195748 | 0 | 0 | 0 | 0 | 0.027624 | 0.027624 | 0 | 0 | 0 | 0 | 0.074074 | 1 | 0 | false | 0 | 0.185185 | 0 | 0.185185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebb6bb07248128010f898b3fb588b1cee8d3c6cc | 1,930 | py | Python | test/perform_additional_setup.py | aws/amazon-braket-containers | 44187fb4cc73e05bda3e361638d94b90f6e4c06a | [
"Apache-2.0"
] | 1 | 2022-03-22T23:49:17.000Z | 2022-03-22T23:49:17.000Z | test/perform_additional_setup.py | aws/amazon-braket-containers | 44187fb4cc73e05bda3e361638d94b90f6e4c06a | [
"Apache-2.0"
] | null | null | null | test/perform_additional_setup.py | aws/amazon-braket-containers | 44187fb4cc73e05bda3e361638d94b90f6e4c06a | [
"Apache-2.0"
] | 3 | 2021-11-29T21:19:31.000Z | 2022-01-13T16:31:06.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
import subprocess
import traceback
from urllib.parse import urlparse
import boto3
import tempfile
def download_s3_file(s3_uri: str, local_path: str) -> str:
"""
Downloads a file to a local path.
Args:
s3_uri (str): the S3 URI to get the file from.
local_path (str) : the local path to download to
Returns:
str: the path to the file containing the downloaded path.
"""
s3_client = boto3.client("s3")
parsed_url = urlparse(s3_uri, allow_fragments=False)
s3_bucket = parsed_url.netloc
s3_key = parsed_url.path.lstrip("/")
local_s3_file = os.path.join(local_path, os.path.basename(s3_key))
s3_client.download_file(s3_bucket, s3_key, local_s3_file)
return local_s3_file
def perform_additional_setup() -> None:
lib_s3_uri = os.getenv('AMZN_BRAKET_IMAGE_SETUP_SCRIPT')
if lib_s3_uri:
try:
print("Getting setup script from ", lib_s3_uri)
with tempfile.TemporaryDirectory() as temp_dir:
script_to_run = download_s3_file(lib_s3_uri, temp_dir)
subprocess.run(["chmod", "+x", script_to_run])
subprocess.run(script_to_run)
except Exception as e:
print(f"Unable to install additional libraries.\nException: {e}")
if __name__ == "__main__":
perform_additional_setup()
| 33.275862 | 77 | 0.698964 | 283 | 1,930 | 4.55477 | 0.44523 | 0.031032 | 0.024825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017857 | 0.21658 | 1,930 | 57 | 78 | 33.859649 | 0.834656 | 0.389637 | 0 | 0 | 0 | 0 | 0.114058 | 0.045977 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.25 | 0 | 0.357143 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebb7280816985728a4a272af67774f62eef9667c | 1,146 | py | Python | experiments/optim.py | fbcotter/dtcwt_gainlayer | 32ec3e21066edc2a0d5edefaf70f43d031d1b4ac | [
"MIT"
] | 6 | 2018-11-14T22:41:58.000Z | 2021-12-08T11:01:32.000Z | experiments/optim.py | fbcotter/dtcwt_gainlayer | 32ec3e21066edc2a0d5edefaf70f43d031d1b4ac | [
"MIT"
] | null | null | null | experiments/optim.py | fbcotter/dtcwt_gainlayer | 32ec3e21066edc2a0d5edefaf70f43d031d1b4ac | [
"MIT"
] | 1 | 2020-05-22T16:10:00.000Z | 2020-05-22T16:10:00.000Z | import torch.optim
from numpy import ndarray
def get_optim(optim, params, init_lr, steps=1, wd=0, gamma=1,
momentum=0.9, max_epochs=120):
if optim == 'sgd':
optimizer = torch.optim.SGD(
params, lr=init_lr, momentum=momentum, weight_decay=wd)
elif optim == 'sgd_nomem':
optimizer = torch.optim.SGD(
params, lr=init_lr, momentum=0, weight_decay=wd)
elif optim == 'adam':
optimizer = torch.optim.Adam(
params, lr=init_lr, weight_decay=wd, # amsgrad=True,
betas=(0.9, .999))
else:
raise ValueError('Unknown optimizer')
# Set the learning rate decay
if isinstance(steps, (tuple, list, ndarray)) and len(steps) == 1:
steps = steps[0]
if isinstance(steps, int):
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, int(max_epochs/steps), gamma=gamma)
elif isinstance(steps, (tuple, list, ndarray)):
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, steps, gamma=gamma)
else:
raise ValueError('Unknown lr schedule')
return optimizer, scheduler
| 33.705882 | 69 | 0.624782 | 144 | 1,146 | 4.881944 | 0.361111 | 0.085349 | 0.081081 | 0.059744 | 0.361309 | 0.125178 | 0.125178 | 0.125178 | 0.125178 | 0 | 0 | 0.018846 | 0.259162 | 1,146 | 33 | 70 | 34.727273 | 0.809187 | 0.035777 | 0 | 0.148148 | 0 | 0 | 0.047187 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.074074 | 0 | 0.148148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebb8162cb09b68c8030371823ad7d00d0561cc03 | 1,941 | py | Python | src/microengineclamav/tasks.py | polyswarm/microengine-clamav | 9427932cd35d4f8bfc7fe7877e90f518e7a3bfbb | [
"MIT"
] | 2 | 2018-05-20T00:08:14.000Z | 2018-06-13T22:42:14.000Z | src/microengineclamav/tasks.py | polyswarm/microengine-clamav | 9427932cd35d4f8bfc7fe7877e90f518e7a3bfbb | [
"MIT"
] | 1 | 2021-06-22T15:03:01.000Z | 2021-06-22T20:26:52.000Z | src/microengineclamav/tasks.py | polyswarm/microengine-clamav | 9427932cd35d4f8bfc7fe7877e90f518e7a3bfbb | [
"MIT"
] | 1 | 2019-02-21T20:22:32.000Z | 2019-02-21T20:22:32.000Z | from celery import Celery, Task
from microengine_utils import errors
from microengine_utils.datadog import configure_metrics
from microengine_utils.constants import SCAN_FAIL, SCAN_SUCCESS, SCAN_TIME, SCAN_VERDICT
from microengineclamav.models import Bounty, ScanResult, Verdict, Assertion, Phase
from microengineclamav import settings
from microengineclamav.scan import scan, compute_bid
celery_app = Celery('tasks', broker=settings.BROKER)
class MetricsTask(Task):
_metrics = None
@property
def metrics(self):
if self._metrics is None:
self._metrics = configure_metrics(
settings.DATADOG_API_KEY,
settings.DATADOG_APP_KEY,
settings.ENGINE_NAME,
poly_work=settings.POLY_WORK
)
return self._metrics
@celery_app.task(base=MetricsTask)
def handle_bounty(bounty):
bounty = Bounty(**bounty)
scan_result = ScanResult()
with handle_bounty.metrics.timer(SCAN_TIME):
try:
scan_result = scan(bounty)
handle_bounty.metrics.increment(SCAN_SUCCESS, tags=[f'type:{bounty.artifact_type}'])
handle_bounty.metrics.increment(SCAN_VERDICT, tags=[f'type:{bounty.artifact_type}',
f'verdict:{scan_result.verdict.value}'])
except errors.CalledProcessScanError:
handle_bounty.metrics.increment(
SCAN_FAIL, tags=[f'type:{bounty.artifact_type}', 'scan_error:calledprocess']
)
if bounty.phase == Phase.ARBITRATION:
scan_response = scan_result.to_vote()
else:
if scan_result.verdict in [Verdict.UNKNOWN, Verdict.SUSPICIOUS]:
# These results don't bid any NCT.
bid = 0
else:
bid = compute_bid(bounty, scan_result)
scan_response = scan_result.to_assertion(bid)
bounty.post_response(scan_response)
| 35.944444 | 104 | 0.663575 | 218 | 1,941 | 5.678899 | 0.344037 | 0.056543 | 0.061389 | 0.067851 | 0.181745 | 0.065428 | 0 | 0 | 0 | 0 | 0 | 0.000692 | 0.255023 | 1,941 | 53 | 105 | 36.622642 | 0.855463 | 0.016486 | 0 | 0.046512 | 0 | 0 | 0.076036 | 0.073414 | 0 | 0 | 0 | 0 | 0.046512 | 1 | 0.046512 | false | 0 | 0.162791 | 0 | 0.27907 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebb9e939ebab9f1ac907089a906eafebd3d40188 | 18,835 | py | Python | app/main.py | rendybjunior/freddie-mercury | 1b6d1fe8c06f317e5fc8ab17afdfa0a8b90a7a75 | [
"Apache-2.0"
] | null | null | null | app/main.py | rendybjunior/freddie-mercury | 1b6d1fe8c06f317e5fc8ab17afdfa0a8b90a7a75 | [
"Apache-2.0"
] | 2 | 2019-05-11T16:25:54.000Z | 2019-05-13T01:19:16.000Z | app/main.py | rendybjunior/freddie-mercury | 1b6d1fe8c06f317e5fc8ab17afdfa0a8b90a7a75 | [
"Apache-2.0"
] | null | null | null | import datetime
import os, sys, six, base64, copy
from jinja2 import Environment, FileSystemLoader, Template
from google.auth.transport import requests
from google.cloud import datastore
from google.cloud import storage
from google.cloud import bigquery
import google.oauth2.id_token
from flask import Flask, render_template, request
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField, IntegerField
from wtforms.fields.html5 import DateField
from wtforms.validators import DataRequired, Email
import github3
DAG_FOLDER = 'dags/'
SQL_FOLDER = 'dags/sql/'
DAG_REPO_ORG = 'rendybjunior'
DAG_REPO_NAME = 'freddie-dags'
MASTER_BRANCH_NAME = 'master'
PROJECT = 'xxx'
BUCKET = 'xxx'
g = github3.login(token='xxx')
DOLLAR_TO_IDR = 14000
BQ_DOLLAR_PER_TB = 5
datastore_client = datastore.Client()
app = Flask(__name__)
SECRET_KEY = os.urandom(32)
app.config['SECRET_KEY'] = SECRET_KEY
class DagForm(FlaskForm):
dag_name = StringField('DAG Name', validators=[DataRequired()], render_kw={"placeholder": "lower_case_underscored"})
owner = StringField('Owner', validators=[DataRequired()], render_kw={"placeholder": "lower_case_underscored"})
start_date = DateField('Start Date', validators=[DataRequired()], format='%Y-%m-%d')
email = StringField('Email', validators=[DataRequired(), Email()], render_kw={"placeholder": "separate@bycomma.com,separate@bycomma2.com,"})
retries = IntegerField('Num of Retries', validators=[DataRequired()], default=1)
retry_delay_minutes = IntegerField('Retry Delay (in minutes)', validators=[DataRequired()], default=15)
schedule_interval = StringField('Schedule (in cron) UTC', validators=[DataRequired()], render_kw={"placeholder": "0 17 * * *"})
tasks = StringField('Tasks', validators=[DataRequired()], render_kw={"placeholder": "separated_by_comma, lower_case_underscored"})
dependencies = StringField('Dependencies', validators=[DataRequired()], render_kw={"placeholder": "eg. prev_task_id1,task_id1;prev_task_id1,task_id2)"})
submit = SubmitField('Save')
class TaskForm(FlaskForm):
task_id = StringField('Task ID', validators=[DataRequired()], render_kw={"placeholder": "lower_case_underscored"})
destination_table = StringField('Destination table', validators=[DataRequired()], render_kw={"placeholder": "my-project.test.freddie_mercury"})
sql = TextAreaField('SQL', validators=[DataRequired()])
sql_params = StringField('SQL Param to test SQL. THIS VALUE FOR TESTING ONLY', render_kw={"placeholder": "example: ds=2019-01-01,dsnodash=20190101"})
save = SubmitField('Save')
check_query = SubmitField('Check Query')
run_query = SubmitField('Run Query')
def store_task(task_id, destination_table, sql, sql_params, updated_by, type_):
entity = datastore.Entity(key=datastore_client.key('Task', task_id), exclude_from_indexes=['sql_base64'])
entity.update({
'type': type_,
'destination_table': destination_table,
'sql_base64' : base64.b64encode(sql.encode()),
'sql_params' : sql_params,
'updated_at' : datetime.datetime.now(),
'updated_by' : updated_by
})
datastore_client.put(entity)
return True, "{} saved".format(task_id) # todo check put return value
def fetch_task(task_id):
key = datastore_client.key('Task', task_id)
task = datastore_client.get(key=key)
task_obj = {
'type': task.get('type'),
'task_id': task.key.name,
'sql': base64.b64decode(task.get('sql_base64')).decode(),
'sql_params': task.get('sql_params'),
'destination_table': task.get('destination_table')
}
return task_obj
def fetch_tasks(limit=10):
query = datastore_client.query(kind='Task')
query.order = ['-updated_at']
tasks = query.fetch(limit=limit)
tasks_obj = []
for task in tasks:
tasks_obj.append({
'type': task.get('type'),
'task_id': task.key.name,
'sql': base64.b64decode(task.get('sql_base64')).decode(),
'sql_params': task.get('sql_params'),
'destination_table': task.get('destination_table')
})
return tasks_obj
def store_dag(dag_name, owner, start_date, retries, retry_delay_minutes, email, schedule_interval, tasks, dependencies, updated_by):
entity = datastore.Entity(key=datastore_client.key('Dag', dag_name))
entity.update({
'dag_name': dag_name,
'owner': owner,
'start_date' : start_date,
'retries': retries,
'retry_delay_minutes': retry_delay_minutes,
'email': email,
'schedule_interval': schedule_interval,
'tasks': tasks,
'dependencies': dependencies,
'updated_at' : datetime.datetime.now(),
'updated_by' : updated_by
})
datastore_client.put(entity)
return True, "{} saved".format(dag_name) # todo check put return value
def fetch_dags(limit=10):
query = datastore_client.query(kind='Dag')
query.order = ['-updated_at']
dags = query.fetch(limit=limit)
dags_obj = []
for dag in dags:
dags_obj.append({
'dag_name': dag.key.name,
'owner': dag.get('owner'),
'start_date' : dag.get('start_date'),
'retries': dag.get('retries'),
'retry_delay_minutes': dag.get('retry_delay_minutes'),
'email': dag.get('email'),
'schedule_interval': dag.get('schedule_interval'),
'tasks': dag.get('tasks'),
'dependencies': dag.get('dependencies'),
'updated_by' : dag.get('updated_by')
})
return dags_obj
def fetch_dag(dag_name):
key = datastore_client.key('Dag', dag_name)
dag = datastore_client.get(key=key)
dag_obj = {
'dag_name': dag.key.name,
'owner': dag.get('owner'),
'start_date' : dag.get('start_date'),
'retries': dag.get('retries'),
'retry_delay_minutes': dag.get('retry_delay_minutes'),
'email': dag.get('email'),
'schedule_interval': dag.get('schedule_interval'),
'tasks': dag.get('tasks'),
'dependencies': dag.get('dependencies'),
'updated_by' : dag.get('updated_by')
}
return dag_obj
def upload_sql(task_id, sql):
file_path = os.path.join(SQL_FOLDER, task_id + ".sql")
client = storage.Client(project=PROJECT)
bucket = client.get_bucket(BUCKET)
blob = bucket.blob(file_path)
blob.upload_from_string(sql)
url = blob.public_url
if isinstance(url, six.binary_type):
url = url.decode('utf-8')
print(url)
# todo return meaningful status & message
def upload_dag(dag_name, dag_text):
file_path = os.path.join(DAG_FOLDER, dag_name + ".py")
client = storage.Client(project=PROJECT)
bucket = client.get_bucket(BUCKET)
blob = bucket.blob(file_path)
blob.upload_from_string(dag_text)
url = blob.public_url
if isinstance(url, six.binary_type):
url = url.decode('utf-8')
print(url)
# todo return meaningful status & message
def check_query(sql):
job_config = bigquery.QueryJobConfig()
job_config.dry_run = True
job_config.use_query_cache = False
job_config.use_legacy_sql = False
client = bigquery.Client(project=PROJECT)
try:
query_job = client.query(sql, job_config)
query_size_megabyte = query_job.total_bytes_processed / 1024 / 1024
query_size_terabyte = query_size_megabyte / 1024 / 1024
dollar_est = BQ_DOLLAR_PER_TB * query_size_terabyte
rp_est = dollar_est * DOLLAR_TO_IDR
message = "Total MB that will be processed: {0:.2f}".format(query_size_megabyte)
message += ". Cost estimate: ${0:.2f}".format(dollar_est)
message += " or Rp{0:.2f})".format(rp_est)
return True, message
except:
return False, sys.exc_info()[1]
def run_query(sql, limit=25):
sql_with_limit = sql + "\n LIMIT {}".format(limit)
job_config = bigquery.QueryJobConfig()
job_config.flatten_results = True
job_config.use_query_cache = False
job_config.use_legacy_sql = False
client = bigquery.Client(project=PROJECT)
try:
query_job = client.query(sql_with_limit, job_config=job_config) # API request
rows = query_job.result()
return rows, "OK"
except:
return [], sys.exc_info()[1]
def create_branch(repository, dag_name):
branch_name = '-'.join([dag_name, datetime.datetime.now().strftime('%Y%m%d%H%M%S')])
master_branch = repository.branch(MASTER_BRANCH_NAME)
master_head_sha = master_branch.commit.sha
repository.create_branch_ref(branch_name, master_head_sha)
return branch_name
def create_github_pr(dag_name, dag_file_content, sql_file_contents, committer_name, committer_email):
repository = g.repository(DAG_REPO_ORG, DAG_REPO_NAME)
branch_name = create_branch(repository, dag_name)
dag_file_path = DAG_FOLDER + dag_name + '.py'
content = None
try:
content = repository.file_contents(path=dag_file_path, ref=branch_name)
except Exception:
pass
if content is None:
repository.create_file(path=dag_file_path,
message="Create DAG File {}".format(dag_name),
content=dag_file_content,
branch=branch_name,
committer={
"name": committer_name,
"email": committer_email
})
else:
content.update(
message="Update DAG File {}".format(dag_name),
content=dag_file_content,
branch=branch_name,
committer={
"name": committer_name,
"email": committer_email
})
for task_id, sql in sql_file_contents:
sql_file_path = SQL_FOLDER + task_id + '.sql'
content = None
try:
content = repository.file_contents(path=sql_file_path, ref=branch_name)
except Exception:
pass
if content is None:
repository.create_file(path=sql_file_path,
message="Create SQL for task {}".format(task_id),
content=sql,
branch=branch_name,
committer={
"name": committer_name,
"email": committer_email
})
else:
content.update(
message="Update SQL File for task {}".format(task_id),
content=sql,
branch=branch_name,
committer={
"name": committer_name,
"email": committer_email
})
pull_body="*test* _123_" #TODO
repository.create_pull(title=branch_name, base=MASTER_BRANCH_NAME, head=branch_name, body=pull_body)
firebase_request_adapter = requests.Request()
@app.route('/')
def root():
# Verify Firebase auth.
id_token = request.cookies.get("token")
error_message = None
claims = None
dags = None
tasks = None
if id_token:
try:
# Verify the token against the Firebase Auth API. This example
# verifies the token on each page load. For improved performance,
# some applications may wish to cache results in an encrypted
# session store (see for instance
# http://flask.pocoo.org/docs/1.0/quickstart/#sessions).
claims = google.oauth2.id_token.verify_firebase_token(
id_token, firebase_request_adapter)
tasks = fetch_tasks()
dags = fetch_dags()
except ValueError as exc:
# This will be raised if the token is expired or any other
# verification checks fail.
error_message = str(exc)
return render_template(
'index.html',
user_data=claims, error_message=error_message, dags=dags, tasks=tasks)
@app.route('/dag_form', methods=["GET", "POST"])
def dag_form():
# Verify Firebase auth.
id_token = request.cookies.get("token")
error_message = None
claims = None
if id_token:
claims = google.oauth2.id_token.verify_firebase_token(
id_token, firebase_request_adapter)
form = DagForm()
dag_text = ""
if form.validate_on_submit():
root = os.path.dirname(os.path.abspath(__file__))
templates_dir = os.path.join(root, 'templates')
env = Environment( loader = FileSystemLoader(templates_dir) )
template = env.get_template('dag_template.py')
store_dag(dag_name=form.dag_name.data,
owner=form.owner.data,
start_date=form.start_date.data.strftime("%Y-%m-%d"),
email=form.email.data,
retries=form.retries.data,
retry_delay_minutes=form.retry_delay_minutes.data,
schedule_interval=form.schedule_interval.data,
tasks=form.tasks.data,
dependencies=form.dependencies.data,
updated_by=claims['email'])
tasks = []
sql_file_contents = []
for task_id in form.tasks.data.replace(' ','').split(','):
task = fetch_task(task_id)
if task != "":
# upload_sql(task_id, task.get('sql'))
sql_file_contents.append((task_id, task.get('sql').encode()))
task_for_dag = copy.deepcopy(task)
task_for_dag['sql'] = 'sql/' + task_id + ".sql"
tasks.append(task_for_dag)
dependencies = []
for dependency in form.dependencies.data.replace(' ','').split(';'):
temp = dependency.split(',')
dependencies.append({
'preceding_task_id': temp[0],
'task_id': temp[1]
})
dag_text = template.render(
dag_name=form.dag_name.data,
owner=form.owner.data,
start_date=form.start_date.data.strftime('%Y-%m-%d'),
email=form.email.data,
retries=form.retries.data,
retry_delay_minutes=form.retry_delay_minutes.data,
schedule_interval=form.schedule_interval.data,
tasks=tasks,
dependencies=dependencies,
)
# upload_dag(dag_name=form.dag_name.data, dag_text=dag_text)
create_github_pr(dag_name=form.dag_name.data,
dag_file_content=dag_text.encode(),
sql_file_contents=sql_file_contents,
committer_name=claims['name'],
committer_email=claims['email'])
else:
if request.args.get('dag_name') is not None:
dag = fetch_dag(dag_name=request.args.get('dag_name'))
if dag is not None:
form.dag_name.data = dag.get('dag_name')
form.owner.data = dag.get('owner')
form.start_date.data = datetime.datetime.strptime(dag.get('start_date'),"%Y-%m-%d")
form.retries.data = dag.get('retries')
form.retry_delay_minutes.data = dag.get('retry_delay_minutes')
form.email.data = dag.get('email')
form.schedule_interval.data = dag.get('schedule_interval')
form.tasks.data = dag.get('tasks')
form.dependencies.data = dag.get('dependencies')
return render_template('dag_form.html', user_data=claims, title='DAG Form', form=form, dag_text=dag_text)
@app.route('/task_form', methods=["GET", "POST"])
def task_form():
# Verify Firebase auth.
id_token = request.cookies.get("token")
error_message = None
claims = None
times = None
if id_token:
claims = google.oauth2.id_token.verify_firebase_token(
id_token, firebase_request_adapter)
form = TaskForm()
is_save_ok, save_msg = None, None
is_query_ok, check_query_result = None, None
run_query_result, run_query_result_headers, run_query_result_msg = [], [], None
if form.validate_on_submit():
sql = form.sql.data
if form.sql_params.data:
params = form.sql_params.data.replace(' ','').split(',')
param_dict = {}
for param in params:
param_dict[param.split('=')[0]] = param.split('=')[1]
sql = Template(sql).render(param_dict)
is_query_ok, check_query_result = check_query(sql)
if form.save.data:
if is_query_ok:
is_save_ok, save_msg = store_task(task_id=form.task_id.data,
destination_table=form.destination_table.data,
sql=sql,
sql_params=form.sql_params.data,
type_='BQ_ETL',
updated_by=claims['email'])
else:
save_msg = "Can not save, something happened, see error msg"
# elif form.check_query.data:
# do nothing
elif form.run_query.data:
if is_query_ok:
run_query_result, run_query_result_msg = run_query(sql)
run_query_result_headers = [field.name for field in run_query_result.schema]
else:
run_query_result_msg = "Can not run, something happened, see error msg"
else:
if request.args.get('task_id') is not None:
task = fetch_task(task_id=request.args.get('task_id'))
if task is not None:
form.task_id.data = task.get('task_id')
form.destination_table.data = task.get('destination_table')
form.sql.data = task.get('sql')
form.sql_params.data = task.get('sql_params')
return render_template('task_form.html', user_data=claims, title='Task Form', form=form,
is_save_ok=is_save_ok, save_msg=save_msg,
is_query_ok=is_query_ok, check_query_result=check_query_result,
run_query_result_headers=run_query_result_headers,
run_query_result=run_query_result, run_query_result_msg=run_query_result_msg)
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
# Flask's development server will automatically serve static files in
# the "static" directory. See:
# http://flask.pocoo.org/docs/1.0/quickstart/#static-files. Once deployed,
# App Engine itself will serve those files as configured in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True) | 40.945652 | 156 | 0.623042 | 2,287 | 18,835 | 4.880192 | 0.155225 | 0.020697 | 0.021324 | 0.018816 | 0.473255 | 0.379984 | 0.362423 | 0.328644 | 0.292089 | 0.276857 | 0 | 0.008483 | 0.261481 | 18,835 | 460 | 157 | 40.945652 | 0.793889 | 0.060473 | 0 | 0.379487 | 0 | 0 | 0.12396 | 0.013528 | 0 | 0 | 0 | 0.002174 | 0 | 1 | 0.038462 | false | 0.005128 | 0.035897 | 0 | 0.158974 | 0.005128 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebbaa88673070e1877aabf5581d29c7b6749d413 | 982 | py | Python | ej2.py | NiopTres/Ejercicio-Herramientas-Computacionales | af97b810e1ade4fb2cdfa433e1e09ddc301b7dd3 | [
"Unlicense"
] | null | null | null | ej2.py | NiopTres/Ejercicio-Herramientas-Computacionales | af97b810e1ade4fb2cdfa433e1e09ddc301b7dd3 | [
"Unlicense"
] | null | null | null | ej2.py | NiopTres/Ejercicio-Herramientas-Computacionales | af97b810e1ade4fb2cdfa433e1e09ddc301b7dd3 | [
"Unlicense"
] | null | null | null | NotaParcial1 = int(input("Nota primer Parcial: "))
NotaParcial2 = int(input("Nota segundo Parcial: "))
NotaTaller = int(input("Nota del Taller: "))
NotaProyecto = int(input("Nota del Proyecto: "))
Parcial1 = NotaParcial1*(25/100)
Parcial2 = NotaParcial2*(25/100)
Taller = NotaTaller*(20/100)
Proyecto = NotaProyecto*(30/100)
nota_final = Parcial1 + Parcial2 + Taller + Proyecto
print (nota_final)
"""
Entrada
Ingresar los valores de las notas:
Nota Primer Parcial
Nota Segundo Parcial
Nota Taller
Nota Proyecto
Proceso
Calcular el valor del porcentaje de cada nota:
Porcentaje Parcial 1=Nota Pirmer Parcial * 25%
Porcentaje Parcial 2=Nota Segundo Parcial * 25%
Porcentaje Taller=Nota Taller * 20%
Porcentaje Proyecto=Nota Proyecto * 30%
Calcular la nota final sumando la suma de los porcentajes:
Nota Final = Porcentaje Parcial 1 + Porcentaje Parcial 2 + Porcentaje Taller + Porcentaje Proyecto
Salida
Devolver la Nota Final
"""
| 24.55 | 99 | 0.729124 | 126 | 982 | 5.666667 | 0.34127 | 0.063025 | 0.067227 | 0.042017 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.050063 | 0.186354 | 982 | 39 | 100 | 25.179487 | 0.843554 | 0 | 0 | 0 | 0 | 0 | 0.195062 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebbeecd7804589e9d66d63e0bc0c0723583222d7 | 1,395 | py | Python | build/package.py | weijiekoh/malvarma | cc8b699b697a0735dd53af27ae4a23955b581f93 | [
"MIT"
] | 15 | 2018-01-15T14:22:46.000Z | 2022-03-20T19:05:27.000Z | build/package.py | stephensong/malvarma | cc8b699b697a0735dd53af27ae4a23955b581f93 | [
"MIT"
] | 1 | 2018-01-21T09:56:04.000Z | 2018-06-21T06:20:23.000Z | build/package.py | stephensong/malvarma | cc8b699b697a0735dd53af27ae4a23955b581f93 | [
"MIT"
] | 6 | 2018-01-21T10:00:48.000Z | 2021-07-26T00:03:45.000Z | #!/usr/bin/env python3
"""
This script checksums, signs, and compresses malvarma-<version>.img, and
creates malvarma-<version>.tar.bz2.
The author's GPG signature is hardcoded below.
"""
import os
import shutil
import sys
import subprocess
if __name__ == "__main__":
if len(sys.argv) == 1:
print("Usage: python3 package.py malvarma-<version>.img")
sys.exit(1)
imgfile = sys.argv[1]
folder_name = imgfile.split(".img")[0]
if not os.path.exists(imgfile):
print("Error: {imgfile} does not exist.".format(imgfile=imgfile))
sys.exit(1)
print("Checksumming...")
subprocess.check_call("sha256sum {imgfile} > {imgfile}.sha256".format(imgfile=imgfile),
shell=True, stderr=subprocess.STDOUT)
print("Signing...")
subprocess.check_call("gpg --detach-sign --default-key 0x90DB43617CCC1632 --sign {imgfile}".format(imgfile=imgfile),
shell=True, stderr=subprocess.STDOUT)
print("Compressing")
shutil.rmtree(folder_name, ignore_errors=True)
os.makedirs(folder_name)
shutil.move(imgfile, folder_name)
shutil.move(imgfile + ".sig", folder_name)
shutil.move(imgfile + ".sha256", folder_name)
subprocess.check_call("tar -cvjSf {folder_name}.tar.bz2 {folder_name}".format(folder_name=folder_name),
shell=True, stderr=subprocess.STDOUT)
| 31.704545 | 120 | 0.665233 | 171 | 1,395 | 5.298246 | 0.432749 | 0.110375 | 0.066225 | 0.082781 | 0.247241 | 0.12362 | 0.12362 | 0.12362 | 0.12362 | 0 | 0 | 0.026738 | 0.195699 | 1,395 | 43 | 121 | 32.44186 | 0.780749 | 0.127599 | 0 | 0.185185 | 0 | 0 | 0.239868 | 0.035567 | 0 | 0 | 0.014888 | 0 | 0 | 1 | 0 | false | 0 | 0.148148 | 0 | 0.148148 | 0.185185 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebc2424c2e78916d3caf093c64b2223284f39d93 | 1,955 | py | Python | examples/recordRawFrames.py | OnionIoT/tau-lidar-camera | a70b24e18be8e4c5abfe525c6768fbc10a492fd8 | [
"MIT"
] | 31 | 2020-12-18T16:35:15.000Z | 2022-03-25T18:41:19.000Z | examples/recordRawFrames.py | OnionIoT/tau-lidar-camera | a70b24e18be8e4c5abfe525c6768fbc10a492fd8 | [
"MIT"
] | 17 | 2020-11-18T16:10:36.000Z | 2022-02-01T22:19:11.000Z | examples/recordRawFrames.py | OnionIoT/tau-lidar-camera | a70b24e18be8e4c5abfe525c6768fbc10a492fd8 | [
"MIT"
] | 4 | 2021-01-18T17:25:02.000Z | 2021-11-01T13:25:45.000Z | import os
import time
from signal import signal, SIGINT
from TauLidarCommon.frame import FrameType
from TauLidarCamera.camera import Camera
outputDir = './samples'
runLoop = True
def setup():
camera = None
ports = Camera.scan() ## Scan for available Tau Camera devices
if len(ports) > 0:
camera = Camera.open(ports[0]) ## Open the first available Tau Camera
camera.setModulationChannel(0) ## autoChannelEnabled: 0, channel: 0
camera.setIntegrationTime3d(0, 1000) ## set integration time 0: 1000
camera.setMinimalAmplitude(0, 10) ## set minimal amplitude 0: 80
cameraInfo = camera.info()
print("\nToF camera opened successfully:")
print(" model: %s" % cameraInfo.model)
print(" firmware: %s" % cameraInfo.firmware)
print(" uid: %s" % cameraInfo.uid)
print(" resolution: %s" % cameraInfo.resolution)
print(" port: %s" % cameraInfo.port)
print("\nPress Ctrl-c in terminal to shutdown ...")
return camera
def run(camera):
global runLoop
count = 0
if not os.path.exists(outputDir):
os.makedirs(outputDir)
print('Recording...')
while runLoop:
frame = camera.readFrameRawData(FrameType.DISTANCE_AMPLITUDE)
if frame:
fName = '%s/%s.frame'%(outputDir, time.time())
with open(fName, "wb") as binary_file:
binary_file.write(frame)
print('\rFrame: %d'%count, end='')
count += 1
def cleanup(camera):
print('\nShutting down ...')
camera.close()
def handler(signal_received, frame):
global runLoop
runLoop = False
if __name__ == "__main__":
camera = setup()
signal(SIGINT, handler)
if camera:
try:
run(camera)
except Exception as e:
print(e)
cleanup(camera)
| 26.066667 | 89 | 0.586189 | 210 | 1,955 | 5.4 | 0.461905 | 0.048501 | 0.031746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017647 | 0.304348 | 1,955 | 74 | 90 | 26.418919 | 0.816176 | 0.083887 | 0 | 0.037736 | 0 | 0 | 0.133146 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075472 | false | 0 | 0.09434 | 0 | 0.188679 | 0.207547 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebc29b44ef030ad4cf5e8ff010606f3919b7f18d | 1,038 | py | Python | HyeonJinGithub/2020-10-13/2560 회장뽑기.py | Team-Morgorithm/Morgorithm | 133f19e1e15e423589bd7b94b698d2afc76c3ef6 | [
"MIT"
] | 1 | 2021-07-29T01:33:44.000Z | 2021-07-29T01:33:44.000Z | HyeonJinGithub/2020-10-13/2560 회장뽑기.py | Team-NTO/NTO | 133f19e1e15e423589bd7b94b698d2afc76c3ef6 | [
"MIT"
] | 150 | 2020-09-28T13:11:29.000Z | 2021-08-05T23:28:36.000Z | HyeonJinGithub/2020-10-13/2560 회장뽑기.py | Team-Morgorithm/morgorithm | 133f19e1e15e423589bd7b94b698d2afc76c3ef6 | [
"MIT"
] | 3 | 2020-09-30T14:05:56.000Z | 2021-07-29T01:33:53.000Z | import sys
from collections import deque
def bfs(x):
q = deque([x])
dist = [0] * (N + 1)
check = [False] * (N + 1)
cnt = -1
check[x] = True
while q:
size = len(q)
cnt += 1
for _ in range(size):
x = q.popleft()
for y in a[x]:
if dist[y] == 0 and not check[y]:
dist[y] = dist[x] + 1
q.append(y)
check[y] = True
return cnt
if __name__ == '__main__':
N = int(input())
a = [[] for _ in range(N + 1)]
result = 1000000
res = []
while True:
u, v = map(int, sys.stdin.readline().split())
if u == -1 and v == -1:
break
a[u].append(v)
a[v].append(u)
for i in range(1, N + 1):
score = bfs(i)
if score < result:
res = []
result = score
res.append(i)
elif score == result:
res.append(i)
print(result, len(res))
for s in res:
print(s, end=' ') | 24.714286 | 54 | 0.421002 | 141 | 1,038 | 3.028369 | 0.361702 | 0.018735 | 0.046838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032423 | 0.435453 | 1,038 | 42 | 55 | 24.714286 | 0.696246 | 0 | 0 | 0.097561 | 0 | 0 | 0.008662 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.04878 | 0 | 0.097561 | 0.04878 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebc743c4294c3b10ce8684625c881a47ded3ea8a | 5,235 | py | Python | tests/test_bokeh_wamp.py | ricorx7/rti-python | 1316323b782ddb8df357e55404f507a9573e172c | [
"BSD-3-Clause"
] | 1 | 2017-06-10T13:27:44.000Z | 2017-06-10T13:27:44.000Z | tests/test_bokeh_wamp.py | ricorx7/rti-python | 1316323b782ddb8df357e55404f507a9573e172c | [
"BSD-3-Clause"
] | 10 | 2019-12-28T18:06:18.000Z | 2022-03-25T18:48:20.000Z | tests/test_bokeh_wamp.py | ricorx7/rti_python | 1316323b782ddb8df357e55404f507a9573e172c | [
"BSD-3-Clause"
] | null | null | null | import json
from twisted.logger import Logger
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.wamp import ApplicationRunner
from bokeh.client import push_session
from bokeh.plotting import figure, curdoc
from bokeh.models.widgets import Panel, Tabs
from bokeh.models import Range1d
import numpy as np
class test_bokeh_wamp(ApplicationSession):
def __init__(self, config=None):
ApplicationSession.__init__(self, config)
@inlineCallbacks
def onJoin(self, details):
"""
Initialize the WAMP settings. This is called before everything is setup to ensure
the WAMP settings are initialized.
:return:
"""
self.log.info("WAMP connected")
yield self.subscribe(self.on_ens_json_data, u"com.rti.data.ens")
self.log.info("test Bokehs WAMP init")
def on_ens_json_data(self, data):
"""
Called when JSON Ensemble data is received from WAMP.
:param data: JSON object containing serial data.
:return:
"""
json_data = json.loads(data) # convert to JSON
bins = []
ampB0 = []
ampB1 = []
ampB2 = []
ampB3 = []
corrB0 = []
corrB1 = []
corrB2 = []
corrB3 = []
for bin in range(json_data['EnsembleData']["NumBins"]):
bins.append(bin)
ampB0.append(json_data['Amplitude']["Amplitude"][bin][0])
ampB1.append(json_data['Amplitude']["Amplitude"][bin][1])
ampB2.append(json_data['Amplitude']["Amplitude"][bin][2])
ampB3.append(json_data['Amplitude']["Amplitude"][bin][3])
corrB0.append(json_data['Correlation']["Correlation"][bin][0])
corrB1.append(json_data['Correlation']["Correlation"][bin][1])
corrB2.append(json_data['Correlation']["Correlation"][bin][2])
corrB3.append(json_data['Correlation']["Correlation"][bin][3])
self.config.extra['ampB0'].data_source.data["y"] = bins
self.config.extra['ampB0'].data_source.data["x"] = ampB0
self.config.extra['ampB1'].data_source.data["y"] = bins
self.config.extra['ampB1'].data_source.data["x"] = ampB1
self.config.extra['ampB2'].data_source.data["y"] = bins
self.config.extra['ampB2'].data_source.data["x"] = ampB2
self.config.extra['ampB3'].data_source.data["y"] = bins
self.config.extra['ampB3'].data_source.data["x"] = ampB3
self.config.extra['corrB0'].data_source.data["y"] = bins
self.config.extra['corrB0'].data_source.data["x"] = corrB0
self.config.extra['corrB1'].data_source.data["y"] = bins
self.config.extra['corrB1'].data_source.data["x"] = corrB1
self.config.extra['corrB2'].data_source.data["y"] = bins
self.config.extra['corrB2'].data_source.data["x"] = corrB2
self.config.extra['corrB3'].data_source.data["y"] = bins
self.config.extra['corrB3'].data_source.data["x"] = corrB3
if __name__ == '__main__':
x = np.array([1])
y = np.array([1])
TOOLS = 'pan,box_zoom,wheel_zoom,box_select,crosshair,resize,reset,save,hover'
ampPlot = figure(plot_width=600, plot_height=800, tools=TOOLS, x_range=Range1d(0, 140))
ampPlot.legend.location = "top_left"
ampPlot.legend.click_policy = "hide"
ampPlot.xaxis[0].axis_label="dB"
ampPlot.yaxis[0].axis_label = "Bin"
ampB0 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='red', legend="B0")
ampB1 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='green', legend="B1")
ampB2 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='blue', legend="B2")
ampB3 = ampPlot.line(x=x, y=y, line_width=2, alpha=.85, color='orange', legend="B3")
tabAmp = Panel(child=ampPlot, title="Amplitude")
corrPlot = figure(plot_width=600, plot_height=800, tools=TOOLS, x_range=Range1d(0, 1))
corrPlot.legend.location = "top_left"
corrPlot.legend.click_policy = "hide"
corrPlot.xaxis[0].axis_label = "% (percent)"
corrPlot.yaxis[0].axis_label = "Bin"
corrB0 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='red', legend="B0")
corrB1 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='green', legend="B1")
corrB2 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='blue', legend="B2")
corrB3 = corrPlot.line(x=x, y=y, line_width=2, alpha=.85, color='orange', legend="B3")
tabCorr = Panel(child=corrPlot, title="Correlation")
tabs = Tabs(tabs=[tabAmp, tabCorr])
# open a session to keep our local document in sync with server
session = push_session(curdoc())
session.show(tabs) # open the document in a browser
# Start the WAMP connection
# Connect the main window to the WAMP connection
runner = ApplicationRunner(url=u"ws://localhost:55058/ws", realm=u"realm1",
extra={'ampB0': ampB0, 'ampB1': ampB1, 'ampB2': ampB2, 'ampB3': ampB3,
'corrB0': corrB0, 'corrB1': corrB1, 'corrB2': corrB2, 'corrB3': corrB3})
runner.run(test_bokeh_wamp)
session.loop_until_closed() # run forever | 41.547619 | 110 | 0.637631 | 695 | 5,235 | 4.686331 | 0.253237 | 0.055266 | 0.073687 | 0.036844 | 0.45809 | 0.429229 | 0.338348 | 0.240098 | 0.156586 | 0.156586 | 0 | 0.033028 | 0.207641 | 5,235 | 126 | 111 | 41.547619 | 0.75217 | 0.082713 | 0 | 0 | 0 | 0 | 0.126722 | 0.019284 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.114943 | 0 | 0.16092 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebc7a37f046171aa884cf21a18cce4f0bbd74515 | 8,338 | py | Python | scripts/betterX_labs_attributes.py | eliasall/BetterX-Cloud | c6796f1207ced4ad3c63fd56df08ecf5ece613e1 | [
"Apache-2.0"
] | null | null | null | scripts/betterX_labs_attributes.py | eliasall/BetterX-Cloud | c6796f1207ced4ad3c63fd56df08ecf5ece613e1 | [
"Apache-2.0"
] | null | null | null | scripts/betterX_labs_attributes.py | eliasall/BetterX-Cloud | c6796f1207ced4ad3c63fd56df08ecf5ece613e1 | [
"Apache-2.0"
] | null | null | null |
## Web File
def insertWeb(filetype, json, cursor, conn, uid):
if (filetype == 'web'):
web_page_node(json,uid,cursor,conn) # [pages] / [pageNode]
web_entry_node(json, uid, cursor, conn) # [pages] / [entriesNode]
def web_entry_response(json_entries_node, uid, cursor, conn, parentid):
tblName = 'lab_web_entries_response'
featureAttrs = {'status', 'statusText', 'httpVersion', 'cookieNumber', 'redirectURL', 'headersSize', 'bodySize'}
featureAttrs2 = {'Date', 'Server', 'X-Powered-By', 'Content-Encoding', 'Content-Length', 'Keep-Alive', 'Connection', 'Content-Type'}
featureAttrs3 = {'size', 'compression', 'mimeType', 'encoding'}
vals = {}
values = []
cntattr = 0
for tis in featureAttrs:
vals[cntattr] = tis
values.append(json_entries_node['response'][tis])
cntattr = cntattr + 1
vals[cntattr] = 'web_entries_id'
values.append(parentid)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(vals)
#print type(attrsInJson)
#print attrsInJson
vals2 = {}
values2 = []
cntattr2 = 0
for tis2 in featureAttrs2:
vals2,values2 = appendJsonKey(json_entries_node['response']['headers'], tis2, vals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
renameArrayItem(vals2, 'Date', 'header_Date')
renameArrayItem(vals2, 'Server', 'header_Server')
renameArrayItem(vals2, 'X-Powered-By', 'header_XPoweredBy')
renameArrayItem(vals2, 'Content-Encoding', 'header_ContentEncoding')
renameArrayItem(vals2, 'Content-Length', 'header_ContentLength')
renameArrayItem(vals2, 'Keep-Alive', 'header_KeepAlive')
renameArrayItem(vals2, 'Connection', 'header_Connection')
renameArrayItem(vals2, 'Content-Type', 'header_ContentType')
attrsInJson2,typesInJson2 = toCommaStringDict(vals2)
#print type(attrsInJson2)
#print attrsInJson2
vals3 = {}
values3 = []
cntattr3 = 0
for tis3 in featureAttrs3:
vals3,values3 = appendJsonKey(json_entries_node['response']['content'], tis3, vals3, values3, cntattr3)
cntattr3 = cntattr3 + 1
renameArrayItem(vals3, 'size', 'content_size')
renameArrayItem(vals3, 'compression', 'content_compression')
renameArrayItem(vals3, 'mimeType', 'content_mimeType')
renameArrayItem(vals3, 'encoding', 'content_encoding')
attrsInJson3,typesInJson3 = toCommaStringDict(vals3)
#print type(attrsInJson3)
#print attrsInJson3
attrsInJsonCombined = attrsInJson
typesInJsonCombined = typesInJson
if ( attrsInJson2 != ''):
attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJson2
typesInJsonCombined = typesInJsonCombined + ',' + typesInJson2
values.extend(values2)
if ( attrsInJson3 != ''):
attrsInJsonCombined = attrsInJsonCombined + ',' + attrsInJson3
typesInJsonCombined = typesInJsonCombined + ',' + typesInJson3
values.extend(values3)
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
def web_entry_request(json_entries_node, uid, cursor, conn, parentid):
tblName = 'lab_web_entries_request'
featureAttrs = {'method', 'url', 'httpVersion', 'cookieNumber', 'headerSize', 'bodySize'}
featureAttrs2 = {'Host', 'User-Agent', 'Accept', 'Accept-Encoding', 'Connection', 'Content-Length', 'Keep-Alive'}
vals = {}
values = []
cntattr = 0
for tis in featureAttrs:
vals[cntattr] = tis
values.append(json_entries_node['request'][tis])
cntattr = cntattr + 1
vals[cntattr] = 'web_entries_id'
values.append(parentid)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(vals)
#print type(attrsInJson)
#print attrsInJson
vals2 = {}
values2 = []
cntattr2 = 0
for tis2 in featureAttrs2:
vals2,values2 = appendJsonKey(json_entries_node['request']['headers'], tis2, vals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
renameArrayItem(vals2, 'Host', 'header_Host')
renameArrayItem(vals2, 'User-Agent', 'header_UserAgent')
renameArrayItem(vals2, 'Accept', 'header_Accept')
renameArrayItem(vals2, 'Accept-Encoding', 'header_AcceptEncoding')
renameArrayItem(vals2, 'Connection', 'header_Connection')
renameArrayItem(vals2, 'Content-Length', 'header_ContentLength')
renameArrayItem(vals2, 'Keep-Alive', 'header_KeepAlive')
attrsInJson2,typesInJson2 = toCommaStringDict(vals2)
#print type(attrsInJson2)
#print attrsInJson2
attrsInJsonCombined = attrsInJson
typesInJsonCombined = typesInJson
if ( attrsInJson2 != ''):
attrsInJsonCombined = attrsInJson + ',' + attrsInJson2
typesInJsonCombined = typesInJson + ',' + typesInJson2
values.extend(values2)
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
def web_entry_node(json, uid, cursor, conn):
tblName = 'lab_web_entries'
featureAttrs = {'pageid', 'entryStartTime', 'time', 'serverIPAddress', 'connection'}
featureAttrs2 = {'blocked', 'dns', 'connect', 'send', 'wait', 'receive', 'ssl'}
featureAttrs3 = {'beforeRequestCacheEntries', 'afterRequestCacheEntries', 'hitCount'}
for jiv in json['pages']:
for innerjiv in jiv['entriesNode']:
cntattr = 0
attrsInJson = ''
typesInJson = ''
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(innerjiv, tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
cntattr2 = 0
attrsInJson2 = ''
typesInJson2 = ''
keytypevals2 = {}
values2 = []
for tis2 in featureAttrs2:
keytypevals2,values2 = appendJsonKey(innerjiv['timings'], tis2, keytypevals2, values2, cntattr2)
cntattr2 = cntattr2 + 1
attrsInJson2,typesInJson2 = toCommaStringDict(keytypevals2)
cntattr3 = 0
attrsInJson3 = ''
typesInJson3 = ''
keytypevals3 = {}
values3 = []
for tis3 in featureAttrs3:
keytypevals3,values3 = appendJsonKey(innerjiv['cache'], tis3, keytypevals3, values3, cntattr3)
cntattr3 = cntattr3 + 1
attrsInJson3,typesInJson3 = toCommaStringDict(keytypevals3)
##combine
attrsInJsonCombined = attrsInJson + ',' + attrsInJson2 + ',' + attrsInJson3
typesInJsonCombined = typesInJson + ',' + typesInJson2 + ',' + typesInJson3
values.extend(values2)
values.extend(values3)
#insert
dbinsert(tblName,attrsInJsonCombined,typesInJsonCombined,cursor,values,conn)
##entry request
web_entry_id = getMaxId(tblName,cursor,conn)
web_entry_request(innerjiv, uid, cursor, conn, web_entry_id)
web_entry_response(innerjiv, uid, cursor, conn, web_entry_id)
def web_page_node(json, uid, cursor, conn):
tblName = 'lab_web_pages'
featureAttrs = {'tabid', 'pageStartTime', 'pageid', 'pagetitle', 'pageOnContentLoad', 'pageOnLoad', 'origin'}
cntattr = 0
for jiv in json['pages']:
attrsInJson = ''
typesInJson = ''
keytypevals = {}
values = []
for tis in featureAttrs:
keytypevals,values = appendJsonKey(jiv['pageNode'], tis, keytypevals, values, cntattr)
cntattr = cntattr + 1
keytypevals[cntattr] = 'uid'
cntattr = cntattr + 1
values.append(uid)
renameArrayItem(keytypevals, 'pageid', 'id')
attrsInJson,typesInJson = toCommaStringDict(keytypevals)
dbinsert(tblName,attrsInJson,typesInJson,cursor,values,conn)
## Helper Functions
def dbinsert(tblName,fields,fieldTypes,cursor,values,conn):
sql_command = "insert into " + tblName + " (" + fields + ") values (" + fieldTypes + ")"
#print sql_command
#print values
cursor.execute(sql_command, values)
conn.commit()
def getMaxId(tblName,cursor, conn):
sql = "select max(id) from " + tblName
cursor.execute(sql)
results = cursor.fetchall()
return str(results[0][0])
def isJsonKey(json, tisKey):
for key,val in json.items():
if (key == tisKey):
return True
break
return False
def appendJsonKey(json, key, vals, values, cntattr):
if (isJsonKey(json,key)):
vals[cntattr] = str(key)
values.append(json[key])
return vals,values
def toCommaStringDict(keytypevals):
ret = ''
ret2 = ''
for key in keytypevals:
ret = ret + '`' + keytypevals[key] + '`' + ','
ret2 = ret2 + '%s' + ','
if (len(ret) > 0):
ret = ret[:-1]
ret2 = ret2[:-1]
return ret,ret2
def renameArrayItem(arr, frm, to):
for key in arr:
try:
if( arr[key] == frm):
arr[key] = to
except:
dummy = 0
return arr
def appendJsonKeyConcat(json, key, vals, values, cntattr):
ret = ''
if (isJsonKey(json,key)):
for i in json[key]:
ret = (ret + ' ' + i).strip()
vals[cntattr] = str(key)
values.append(ret)
return vals,values | 32.570313 | 133 | 0.71444 | 861 | 8,338 | 6.835075 | 0.188153 | 0.050977 | 0.017672 | 0.011555 | 0.435174 | 0.397451 | 0.379269 | 0.300255 | 0.268479 | 0.239932 | 0 | 0.023302 | 0.150756 | 8,338 | 256 | 134 | 32.570313 | 0.807796 | 0.039938 | 0 | 0.457286 | 0 | 0 | 0.160882 | 0.017416 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060302 | false | 0 | 0 | 0 | 0.095477 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebc8be7f524d02d68beecb4c56841bf72041c9e6 | 1,148 | py | Python | tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_b972f241.py | eduardojdiniz/CompNeuro | 20269e66540dc4e802273735c97323020ee37406 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 2,294 | 2020-05-11T12:05:35.000Z | 2022-03-28T21:23:34.000Z | tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_b972f241.py | pellet/course-content | bb383857992469e0e7a9c36639ac0d05e842d9bd | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 629 | 2020-05-11T15:42:26.000Z | 2022-03-29T12:23:35.000Z | tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_b972f241.py | pellet/course-content | bb383857992469e0e7a9c36639ac0d05e842d9bd | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 917 | 2020-05-11T12:47:53.000Z | 2022-03-31T12:14:41.000Z | def ddm(T, x0, xinfty, lam, sig):
t = np.arange(0, T, 1.)
x = np.zeros_like(t)
x[0] = x0
for k in range(len(t)-1):
x[k+1] = xinfty + lam * (x[k] - xinfty) + sig * np.random.standard_normal(size=1)
return t, x
# computes equilibrium variance of ddm
# returns variance
def ddm_eq_var(T, x0, xinfty, lam, sig):
t, x = ddm(T, x0, xinfty, lam, sig)
# returns variance of the second half of the simulation
# this is a hack: assumes system has settled by second half
return x[-round(T/2):].var()
np.random.seed(2020) # set random seed
# sweep through values for lambda
lambdas = np.arange(0.05, 0.95, 0.01)
empirical_variances = np.zeros_like(lambdas)
analytical_variances = np.zeros_like(lambdas)
sig = 0.87
# compute empirical equilibrium variance
for i, lam in enumerate(lambdas):
empirical_variances[i] = ddm_eq_var(5000, x0, xinfty, lambdas[i], sig)
# Hint: you can also do this in one line outside the loop!
analytical_variances = sig**2 / (1 - lambdas**2)
# Plot the empirical variance vs analytical variance
with plt.xkcd():
var_comparison_plot(empirical_variances, analytical_variances) | 30.210526 | 89 | 0.691638 | 190 | 1,148 | 4.094737 | 0.421053 | 0.041131 | 0.034704 | 0.046272 | 0.137532 | 0.068123 | 0 | 0 | 0 | 0 | 0 | 0.037594 | 0.189024 | 1,148 | 38 | 90 | 30.210526 | 0.798067 | 0.313589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebcd560a4b989401a8f15f7d602324e8d9dfe946 | 889 | py | Python | tests/dna_builders_test.py | auxein/auxein | 5388cb572b65aecc282f915515c35dc3b987154c | [
"Apache-2.0"
] | 1 | 2019-05-08T14:53:27.000Z | 2019-05-08T14:53:27.000Z | tests/dna_builders_test.py | auxein/auxein | 5388cb572b65aecc282f915515c35dc3b987154c | [
"Apache-2.0"
] | 2 | 2020-08-26T09:16:47.000Z | 2020-10-30T16:47:03.000Z | tests/dna_builders_test.py | auxein/auxein | 5388cb572b65aecc282f915515c35dc3b987154c | [
"Apache-2.0"
] | null | null | null | from unittest.mock import patch
import numpy as np
from auxein.population.dna_builders import UniformRandomDnaBuilder, NormalRandomDnaBuilder
def test_uniform_random_dna_builder_instantiation():
builder = UniformRandomDnaBuilder(interval=(-5, 0))
assert builder.get_distribution() == 'uniform'
assert len(builder.get(10)) == 10
def test_uniform_random_dna_builder_values():
builder = UniformRandomDnaBuilder()
for _ in range(0, 100):
dna: np.ndarray = builder.get(2)
assert -1 < dna[0] < 1
assert -1 < dna[1] < 1
@patch('numpy.random.normal')
def test_normal_random_dna_builder_instantiation(mock_np_normal):
mock_np_normal.return_value = [0.5, -1.3]
builder = NormalRandomDnaBuilder()
assert builder.get_distribution() == 'normal'
assert len(builder.get(2)) == 2
mock_np_normal.assert_called_once_with(0.0, 1.0, 2)
| 29.633333 | 90 | 0.725534 | 118 | 889 | 5.220339 | 0.364407 | 0.081169 | 0.077922 | 0.064935 | 0.097403 | 0.097403 | 0 | 0 | 0 | 0 | 0 | 0.037736 | 0.165354 | 889 | 29 | 91 | 30.655172 | 0.792453 | 0 | 0 | 0 | 0 | 0 | 0.035996 | 0 | 0 | 0 | 0 | 0 | 0.35 | 1 | 0.15 | false | 0 | 0.15 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebd0835b63b438a8287b71effbf6286cc7da50d9 | 5,393 | py | Python | distributed_model.py | mknw/mask-rcnn | 0e7d14abeecb208e63dc5a9f7c05dbd0419afbe7 | [
"MIT"
] | null | null | null | distributed_model.py | mknw/mask-rcnn | 0e7d14abeecb208e63dc5a9f7c05dbd0419afbe7 | [
"MIT"
] | null | null | null | distributed_model.py | mknw/mask-rcnn | 0e7d14abeecb208e63dc5a9f7c05dbd0419afbe7 | [
"MIT"
] | null | null | null | from model import *
from config import *
from utils import *
if __name__ == "__main__":
''' GPU(s) '''
gpus = tf.config.experimental.list_physical_devices('GPU')
GPU_N = 3
if gpus:
try:
tf.config.experimental.set_visible_devices(gpus[GPU_N:], 'GPU')
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
print(e)
import ipdb; ipdb.set_trace()
np.random.seed(420)
tf.random.set_seed(420)
'''
loss and gradient function.
'''
# loss_object = tf.losses.SparseCategoricalCrossentropy()
@tf.function
def loss(model, x, y):
y_ = model(x)
return loss_object(y_true=y, y_pred=y_)
@tf.function
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = tf.abs(y_true - y_pred)
less_than_one = K.cast(tf.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
@tf.function
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
''' dataset and dataset iterator'''
## cifar100 is likey too small. Switching to imagenet2012
# cifar100 = tf.keras.datasets.cifar100
# (x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')
import tensorflow_datasets as tfds
import ipdb
tfds.list_builders()
imagenet2012_builder = tfds.builder("imagenet2012")
train_set, test_set = imagenet2012_builder.as_dataset(split=["train", "validation"])
def onetwentyseven(x):
# normalizing between 1 and -1.
x['image'] = tf.image.resize(x['image'], size=(256, 256))
x['image'] = tf.cast(x['image'], tf.float32) / 127.5 - 1
return x
train_set = train_set.shuffle(1024).map(onetwentyseven)
train_set = train_set.batch(32)
test_set = test_set.shuffle(1024).map(onetwentyseven)
test_set = test_set.batch(32)
import ipdb; ipdb.set_trace()
# preprocess
'''
x_train = (x_train.reshape(-1, 32, 32, 3) / 255).astype(np.float32)
x_test = (x_test.reshape(-1, 32, 32, 3) / 255).astype(np.float32)
# create tf.data.Dataset
train_set = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_set = tf.data.Dataset.from_tensor_slices((x_test, y_test))
# now train_set and test_set are Dataset objects.
# we return the dataset iterator by calling the
# __iter__() method
#
# Alternatively, we can just iterate over the Datasets
# iff eager mode is on (i.e. by default).
train_set = train_set.shuffle(10000)
test_set.shuffle(10000)
b_train_set = train_set.batch(256)
b_test_set = test_set.batch(256)
'''
''' model '''
# from config import Config
from viz import *
from utils import test_model
class Config(object):
def __init__(self):
self.BATCH_SIZE=256
self.BACKBONE = 'resnet51'
mycon = Config()
model = ResNet((None, None, 3), 1000, mycon)
model.build(input_shape=(256, None, None, 3)) # place correct shape from imagenet
''' initialize '''
# Reduce LR with *0.1 when plateau is detected
adapt_lr = LearningRateReducer(init_lr=0.1, factor=0.1,
patience=10, refractory_interval=20) # wait 20 epochs from last update
loss_object = tf.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.SGD(adapt_lr.monitor(), momentum = 0.9)
train_loss_results = []
train_accuracy_results = []
test_loss_results, test_acc_results = [], []
num_epochs = 300
''' train '''
for epoch in range(num_epochs):
epoch_loss_avg = tf.keras.metrics.Mean()
epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
k = 0
optimizer = tf.keras.optimizers.SGD(adapt_lr.monitor(train_loss_results), momentum = 0.9)
for batch in train_set:
# img_btch, lab_btch, fn_btch = batch
img_btch = batch['image']
lab_btch = batch['label']
loss_value, grads = grad(model, img_btch, lab_btch)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
epoch_loss_avg(loss_value)
epoch_accuracy(lab_btch, model(img_btch))
if epoch < 1:
print("Epoch {:03d}: Batch: {:03d} Loss: {:.3%}, Accuracy: {:.3%}".format(epoch, k, epoch_loss_avg.result(), epoch_accuracy.result()))
k+=1
print("Trainset >> Epoch {:03d}: Loss: {:.3%}, Accuracy: {:.3%}".format(epoch, epoch_loss_avg.result(), epoch_accuracy.result()))
# end epoch
#if int(epoch_accuracy.result() > 70):
test_loss, test_accuracy = test_model(model, test_set)
test_loss_results.append(test_loss)
test_acc_results.append(test_accuracy)
train_loss_results.append(epoch_loss_avg.result())
train_accuracy_results.append(epoch_accuracy.result())
# import ipdb; ipdb.set_trace()
if epoch % 100 == 0:
fname = 'imgs/Test_Acc_Loss_IN2012_' + str(epoch) + '.png'
# here we should plot metrics and loss for test too.
# hence TODO: update save_plot
loss_l = [train_loss_results, test_loss_results]
acc_l = [train_accuracy_results, test_acc_results]
save_plot(loss_l, acc_l, fname)
#if train_loss_results[-1] > train_loss_results[-2]: # was if epoch == 10:
# learning_rate /= 10
# optimizer = tf.keras.optimizers.SGD(lr=learning_rate, momentum=0.9)
# print("Sir, we just updated the learning rate Sir.")
import ipdb; ipdb.set_trace()
| 29.631868 | 139 | 0.708511 | 809 | 5,393 | 4.488257 | 0.284302 | 0.026439 | 0.026439 | 0.018728 | 0.226659 | 0.09474 | 0.09474 | 0.058937 | 0.017075 | 0 | 0 | 0.038893 | 0.156128 | 5,393 | 181 | 140 | 29.79558 | 0.758954 | 0.167625 | 0 | 0.067416 | 0 | 0 | 0.070668 | 0.007094 | 0 | 0 | 0 | 0.005525 | 0 | 1 | 0.05618 | false | 0 | 0.11236 | 0 | 0.224719 | 0.044944 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebd7565d7fb3e2e6e97cd012dbbf6e7433713b29 | 872 | py | Python | tests/test_dedge.py | GiliardGodoi/edgesets | b59a600400972ccc82e5e17f2acbb2b45045b40b | [
"MIT"
] | null | null | null | tests/test_dedge.py | GiliardGodoi/edgesets | b59a600400972ccc82e5e17f2acbb2b45045b40b | [
"MIT"
] | 20 | 2021-11-08T13:02:33.000Z | 2021-11-29T01:03:40.000Z | tests/test_dedge.py | GiliardGodoi/edgesets | b59a600400972ccc82e5e17f2acbb2b45045b40b | [
"MIT"
] | null | null | null | from edgesets import UEdge, DEdge
def test_repr():
e1 = DEdge(7, 8)
text = repr(e1)
assert text == "DEdge(7, 8, weight=1)"
e2 = eval(text)
assert type(e1) == type(e1)
assert e1 == e2
def test_if_directions_are_differents_with_same_nodes():
d1 = DEdge(10, 15)
d2 = DEdge(15, 10)
assert d1 != d2
assert hash(d1) != hash(d2)
def test_if_DEdge_is_differente_from_UEdge():
d1 = DEdge(10, 15)
d2 = UEdge(15, 10)
assert d1 != d2
assert hash(d1) != hash(d2)
def test_DEdge_is_different_from_tuple():
param = (25, 42)
edge = DEdge(*param)
assert edge != param
assert hash(edge) != hash(param)
def test_DEdge_is_different_from_list():
param = [24, 25]
edge = DEdge(*param)
assert edge != param
# assert hash(edge) != hash(param) # list is not hashable | 24.222222 | 61 | 0.603211 | 128 | 872 | 3.921875 | 0.328125 | 0.069721 | 0.027888 | 0.043825 | 0.507968 | 0.456175 | 0.36255 | 0.36255 | 0.36255 | 0.36255 | 0 | 0.075591 | 0.271789 | 872 | 36 | 61 | 24.222222 | 0.714961 | 0.061927 | 0 | 0.37037 | 0 | 0 | 0.026889 | 0 | 0 | 0 | 0 | 0 | 0.37037 | 1 | 0.185185 | false | 0 | 0.037037 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebd871d4dbf6a21fb3d86e6cb1fedb9b96ed2220 | 1,889 | py | Python | pred.py | amoshyc/tthl-code | d00ba5abd2ade5b55db6a6b95d136041022e3150 | [
"Apache-2.0"
] | null | null | null | pred.py | amoshyc/tthl-code | d00ba5abd2ade5b55db6a6b95d136041022e3150 | [
"Apache-2.0"
] | null | null | null | pred.py | amoshyc/tthl-code | d00ba5abd2ade5b55db6a6b95d136041022e3150 | [
"Apache-2.0"
] | null | null | null | import argparse
from pathlib import Path
import numpy as np
import scipy
import keras
from keras.models import load_model
from moviepy.editor import VideoFileClip, concatenate_videoclips
from tqdm import tqdm
def main():
# yapf: disable
parser = argparse.ArgumentParser(description='Video Highlight')
parser.add_argument('model', type=str, help='Path to model')
parser.add_argument('video', type=str, help='Path to video to highlight')
parser.add_argument('--out', '-o', type=str, default='./hl.mp4', help='output name')
parser.add_argument('--fps', type=int, default=2, help='fps')
parser.add_argument('--itv', type=int, default=6, help='interval of adjusting')
parser.add_argument('--bs', type=int, default=80, help='batch size')
args = parser.parse_args()
# yapf: enable
print('Loading model & video', end='...')
model = load_model(args.model)
video = VideoFileClip(args.video)
print('ok')
n_frames = int(video.duration) * args.fps
xs = np.zeros((n_frames, 224, 224, 3), dtype=np.float32)
for f in tqdm(range(n_frames), desc='Loading Video Frames', ascii=True):
img = video.get_frame(f / args.fps)
xs[f] = scipy.misc.imresize(img, (224, 224))
# Predicting
pred = model.predict(xs, args.bs, verbose=1)
pred = pred.round().astype(np.uint8).flatten()
print(pred[:500])
for i in range(n_frames - args.itv):
s, t = i, i + args.itv
if pred[s] == 1 and pred[t - 1] == 1:
pred[s:t] = 1
diff = np.diff(np.concatenate([[0], pred, [1]]))
starts = (diff == +1).nonzero()[0] / args.fps
ends = (diff == -1).nonzero()[0] / args.fps
segs = [video.subclip(s, e) for s, e in zip(starts, ends)]
out = concatenate_videoclips(segs)
out.write_videofile(args.out, fps=video.fps, threads=4, audio=True)
if __name__ == '__main__':
main()
| 32.568966 | 88 | 0.644256 | 277 | 1,889 | 4.303249 | 0.397112 | 0.045302 | 0.08557 | 0.043624 | 0.062081 | 0.033557 | 0 | 0 | 0 | 0 | 0 | 0.023669 | 0.194812 | 1,889 | 57 | 89 | 33.140351 | 0.760026 | 0.019587 | 0 | 0 | 0 | 0 | 0.103896 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.195122 | 0 | 0.219512 | 0.073171 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebd878270215dfbcc338d538a43df8bec58e8bb9 | 4,389 | py | Python | blog/views.py | captainxavier/AutoBlog | 44fb23628fe0210a3dcec80b91e1217d27ee9462 | [
"MIT"
] | null | null | null | blog/views.py | captainxavier/AutoBlog | 44fb23628fe0210a3dcec80b91e1217d27ee9462 | [
"MIT"
] | null | null | null | blog/views.py | captainxavier/AutoBlog | 44fb23628fe0210a3dcec80b91e1217d27ee9462 | [
"MIT"
] | null | null | null | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.utils import timezone
from django.db.models import Count, Q
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.contrib.auth import login, authenticate, logout
from django.contrib.contenttypes.models import ContentType
from taggit.models import Tag
from accounts.models import Account
from blog.models import BlogPost, Category, BlogPicture
from comments.forms import CommentForm
from comments.models import Comments
from category.models import Category
BLOG_POST_PER_PAGE = 3
RESULT_POST_PER_PAGE = 17
#Category Count
def get_category_count():
questSet = BlogPost \
.objects \
.values('categories__title','categories__id') \
.annotate(Count('categories__title'))
return questSet
#Blog Page.
def blog_screen_view(request):
category_count = get_category_count()
super_featured = BlogPost.objects.filter(super_featured=True).order_by('-date_published')[:3]
blogPosts = BlogPost.objects.order_by('-date_published')
recentPosts = BlogPost.objects.order_by('-date_published')[:4]
# Pagination
page = request.GET.get('page',1)
blog_posts_paginator = Paginator(blogPosts, BLOG_POST_PER_PAGE)
try:
blogPosts = blog_posts_paginator.page(page)
except PageNotAnInteger:
blogPosts = blog_posts_paginator.page(BLOG_POST_PER_PAGE)
except EmptyPage:
blogPosts = blog_posts_paginator.page(blog_posts_paginator.num_pages)
context = {
'super_featured_posts':super_featured,
'posts': blogPosts,
'recent_posts': recentPosts,
'categories': category_count,
}
return render(request, 'blog/blog.html', context)
# Single Post
def post_screen_view(request, slug):
post = get_object_or_404(BlogPost, slug=slug)
post_related = post.tags.similar_objects()[:3]
app_url = request.get_full_path
category_count = get_category_count()
recentPosts = BlogPost.objects.order_by('-date_published')[:4]
comments = post.comments
initial_data = {
'content_type': post.get_content_type,
'object_id' : post.id,
}
if request.method == 'POST':
form = CommentForm(request.POST or None)
if form.is_valid():
com = form.save(commit=False)
com.user = request.user
com.content_type = post.get_content_type
com.object_id = post.id
parent_obj = None
try:
parent_id = int(request.POST.get("parent_id"))
except:
parent_id = None
if parent_id:
parent_qs = Comments.objects.filter(id=parent_id)
if parent_qs.exists() and parent_qs.count() ==1:
parent_obj = parent_qs.first()
com.parent = parent_obj
com.save()
return HttpResponseRedirect(com.content_object.get_absolute_url())
else:
print('error')
else:
form = CommentForm()
context = {
'post': post,
'recent_posts': recentPosts,
'categories': category_count,
'post_url': app_url,
'comments': comments,
'comment_form': form,
'related_posts':post_related,
}
return render(request, 'blog/post.html', context)
# Search Page
def search_screen_view(request):
query_set = BlogPost.objects.all()
category_count = get_category_count()
query = request.GET.get('q')
if query:
query_set = query_set.filter(
Q(title__icontains=query) |
Q(description_one__icontains=query) |
Q(description_two__icontains=query)
).distinct()
paginator = Paginator(query_set, RESULT_POST_PER_PAGE) # 6 posts per page
page = request.GET.get('page',1)
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context = {
'query_sets': posts,
'categories':category_count,
}
return render(request, 'blog/result_search.html', context) | 33.503817 | 98 | 0.642971 | 497 | 4,389 | 5.430584 | 0.253521 | 0.052983 | 0.040015 | 0.029641 | 0.240089 | 0.203038 | 0.068914 | 0.034828 | 0 | 0 | 0 | 0.005576 | 0.264525 | 4,389 | 131 | 99 | 33.503817 | 0.830545 | 0.017316 | 0 | 0.222222 | 0 | 0 | 0.081618 | 0.005505 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.12037 | 0 | 0.203704 | 0.009259 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebd938bccdfd5e3d285fcfe2b39abb7192868335 | 1,408 | py | Python | data_loader/data_set_loader.py | ys10/WaveRNN | dc4eec65bc1eec59ebc533469d40f072df3a6be6 | [
"MIT"
] | 6 | 2018-11-15T05:48:02.000Z | 2021-06-18T02:22:31.000Z | data_loader/data_set_loader.py | ys10/WaveRNN | dc4eec65bc1eec59ebc533469d40f072df3a6be6 | [
"MIT"
] | null | null | null | data_loader/data_set_loader.py | ys10/WaveRNN | dc4eec65bc1eec59ebc533469d40f072df3a6be6 | [
"MIT"
] | 1 | 2021-04-02T11:53:52.000Z | 2021-04-02T11:53:52.000Z | # coding=utf-8
import tensorflow as tf
class DataSetLoader(object):
def __init__(self, config, generators, default_set_name='train'):
self.config = config
self.generators = generators
self.data_sets = dict()
self.data_set_init_ops = dict()
with tf.variable_scope("data"):
for k in self.generators.keys():
self.data_sets[k] = self.get_data_set_from_generator(self.generators[k].next, epochs=self.config.epochs,
batch_size=self.config.batch_size)
self.iterator = self.data_sets[default_set_name].make_one_shot_iterator()
features, labels = self.iterator.get_next()
self.next_data = {'features': features, 'labels': labels}
for k in self.data_sets.keys():
self.data_set_init_ops[k] = self.iterator.make_initializer(self.data_sets[k])
@staticmethod
def get_data_set_from_generator(generator_func, epochs=1, batch_size=16):
data_set = tf.data.Dataset.from_generator(generator_func,
output_types=(tf.int32, tf.int32),
output_shapes=(tf.TensorShape([64]), tf.TensorShape([1])))
data_set = data_set.repeat(epochs)
data_set = data_set.batch(batch_size)
return data_set
| 48.551724 | 120 | 0.598011 | 169 | 1,408 | 4.692308 | 0.337278 | 0.088272 | 0.075662 | 0.037831 | 0.103405 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011179 | 0.301136 | 1,408 | 28 | 121 | 50.285714 | 0.794715 | 0.008523 | 0 | 0 | 0 | 0 | 0.016499 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.041667 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebda183d9ae687038f19ffe815f5ccbfc5935c5a | 1,323 | py | Python | ssht00ls/classes/connections/__init__.py | vandenberghinc/ssht00ls | e08081773c8da7dfac0764170bfeacb4bf421ec1 | [
"CNRI-Python"
] | 5 | 2021-02-18T17:46:39.000Z | 2021-12-29T15:48:07.000Z | ssht00ls/classes/connections/__init__.py | vandenberghinc/ssht00ls | e08081773c8da7dfac0764170bfeacb4bf421ec1 | [
"CNRI-Python"
] | null | null | null | ssht00ls/classes/connections/__init__.py | vandenberghinc/ssht00ls | e08081773c8da7dfac0764170bfeacb4bf421ec1 | [
"CNRI-Python"
] | 2 | 2021-03-19T14:06:20.000Z | 2021-09-26T14:08:34.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# imports.
from ssht00ls.classes.config import *
from ssht00ls.classes import utils
# the ssh connections object class.
class Connections(Traceback):
def __init__(self):
# docs.
DOCS = {
"module":"ssht00ls.connections",
"initialized":True,
"description":[],
"chapter": "Connections", }
# defaults.
Traceback.__init__(self, traceback="ssht00ls.connections", raw_traceback="ssht00ls.classes.connections.Connections")
#
def list(self, filter="ssh"):
if dev0s.defaults.vars.os not in ["linux"]:
return dev0s.response.error(f"Unsupported operating system [{dev0s.defauls.vars.os}].")
output = dev0s.utils.__execute_script__("""ss | grep ssh | awk '{print $1","$2","$3","$4","$5","$6}' """)
connections = {}
for line in output.split("\n"):
if line not in [""]:
net_id,state,recvq, sendq,local_address,remote_address = line.split(",")
if state == "ESTAB":
connections[remote_address] = {
"remote_address":remote_address,
"local_address":local_address,
"recvq":recvq,
"sendq":sendq,
"net_id":net_id,
}
return dev0s.response.success(f"Successfully listed {len(connections)} ssh connection(s).", {
"connections":connections,
})
# Initialized objects.
connections = Connections()
| 28.76087 | 118 | 0.665911 | 155 | 1,323 | 5.529032 | 0.522581 | 0.060677 | 0.070012 | 0.060677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020833 | 0.165533 | 1,323 | 45 | 119 | 29.4 | 0.755435 | 0.092971 | 0 | 0 | 0 | 0.033333 | 0.307305 | 0.079765 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.233333 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebde515b95808947b4370dd040bc2675c00b8d5a | 907 | py | Python | ycyc/tests/__init__.py | MrLYC/ycyc | 1938493294fbad3a461cc3a752c5385d30a6e51d | [
"MIT"
] | 22 | 2015-07-21T03:15:36.000Z | 2021-02-23T07:58:03.000Z | ycyc/tests/__init__.py | MrLYC/ycyc | 1938493294fbad3a461cc3a752c5385d30a6e51d | [
"MIT"
] | 3 | 2016-03-20T12:06:07.000Z | 2018-01-16T10:34:19.000Z | ycyc/tests/__init__.py | MrLYC/ycyc | 1938493294fbad3a461cc3a752c5385d30a6e51d | [
"MIT"
] | 3 | 2015-05-08T00:55:38.000Z | 2017-02-25T03:30:14.000Z | #!/usr/bin/env python
# encoding: utf-8
from contextlib import contextmanager
import mock
__author__ = 'Liu Yicong'
__email__ = 'imyikong@gmail.com'
@contextmanager
def mock_patches(*patches, **named_patches):
"""
A context manager to help create mock patches.
>>> with mock_patches("package.module.cls", cls2="package.cls") as mocks:
... mocks.cls() #=> package.module.cls
... mocks.cls2() #=> package.cls
"""
attrs = list(i.split(".")[-1] for i in patches)
attrs.extend(list(named_patches.keys()))
patches = list(patches)
patches.extend(list(named_patches.values()))
mock_patches = []
mocks = mock.Mock()
for k, i in zip(attrs, patches):
patch = mock.patch(i)
mock_patches.append(patch)
setattr(mocks, k, patch.start())
try:
yield mocks
finally:
for p in mock_patches:
p.stop()
| 24.513514 | 77 | 0.62183 | 115 | 907 | 4.765217 | 0.486957 | 0.120438 | 0.058394 | 0.080292 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005764 | 0.23484 | 907 | 36 | 78 | 25.194444 | 0.783862 | 0.261301 | 0 | 0 | 0 | 0 | 0.045242 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.095238 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebe611c4dd602efaddd3352116b777a3b429c7f6 | 16,830 | py | Python | sdkcore/SdkCoreTesting/scripts/arsdkgenobjc.py | papachuj/groundsdk-ios | f205f75b11a57f49b39ee558b2e8e39f59a15963 | [
"BSD-3-Clause"
] | 2 | 2020-03-30T00:06:43.000Z | 2021-07-18T18:07:15.000Z | sdkcore/SdkCoreTesting/scripts/arsdkgenobjc.py | papachuj/groundsdk-ios | f205f75b11a57f49b39ee558b2e8e39f59a15963 | [
"BSD-3-Clause"
] | null | null | null | sdkcore/SdkCoreTesting/scripts/arsdkgenobjc.py | papachuj/groundsdk-ios | f205f75b11a57f49b39ee558b2e8e39f59a15963 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import sys, os
import arsdkparser
#===============================================================================
class Writer(object):
def __init__(self, fileobj):
self.fileobj = fileobj
def write(self, fmt, *args):
if args:
self.fileobj.write(fmt % (args))
else:
self.fileobj.write(fmt % ())
#===============================================================================
def class_name(name):
splitted_name = name.split('_')
return "ArsdkFeature" + "".join(x.capitalize() for x in splitted_name)
def enum_class_name(feature_strict_name, enum_name):
splitted_name = enum_name.split('_')
return class_name(feature_strict_name) + "".join(x.capitalize() for x in splitted_name)
def multiset_class_name(feature_strict_name, multiset_name):
splitted_name = multiset_name.split('_')
return class_name(feature_strict_name) + "".join(x.capitalize() for x in splitted_name)
def param_name(name):
components = name.split('_')
return components[0].lower() + "".join(x[0].upper() + x[1:] for x in components[1:])
def arg_type(feature_strict_name, arg, is_fun_arg=False):
args = {
arsdkparser.ArArgType.I8: "NSInteger",
arsdkparser.ArArgType.U8: "NSUInteger",
arsdkparser.ArArgType.I16: "NSInteger",
arsdkparser.ArArgType.U16: "NSUInteger",
arsdkparser.ArArgType.I32: "NSInteger",
arsdkparser.ArArgType.U32: "NSUInteger",
arsdkparser.ArArgType.I64: "int64_t",
arsdkparser.ArArgType.U64: "uint64_t",
arsdkparser.ArArgType.FLOAT: "float",
arsdkparser.ArArgType.DOUBLE: "double",
arsdkparser.ArArgType.STRING: "NSString*"
}
if isinstance(arg.argType, arsdkparser.ArEnum):
argType = enum_class_name(feature_strict_name, arg.argType.name)
elif isinstance(arg.argType, arsdkparser.ArBitfield):
if arg.argType.btfType == arsdkparser.ArArgType.I64 or \
arg.argType.btfType == arsdkparser.ArArgType.U64:
argType = args[arsdkparser.ArArgType.U64]
else:
argType = args[arsdkparser.ArArgType.U32]
elif isinstance(arg.argType, arsdkparser.ArMultiSetting):
if is_fun_arg:
argType = multiset_class_name(feature_strict_name, arg.argType.name) + ' *'
else:
argType = multiset_class_name(feature_strict_name, arg.argType.name)
else:
argType = args[arg.argType]
return argType
def multiset_c_name(ftr, multiset):
return "struct arsdk_%s_%s" % (ftr, multiset)
def arg_c_type(arg, is_fun_arg=False):
args = {
arsdkparser.ArArgType.I8: "int8_t",
arsdkparser.ArArgType.U8: "uint8_t",
arsdkparser.ArArgType.I16: "int16_t",
arsdkparser.ArArgType.U16: "uint16_t",
arsdkparser.ArArgType.I32: "int32_t",
arsdkparser.ArArgType.U32: "uint32_t",
arsdkparser.ArArgType.I64: "int64_t",
arsdkparser.ArArgType.U64: "uint64_t",
arsdkparser.ArArgType.FLOAT: "float",
arsdkparser.ArArgType.DOUBLE: "double",
arsdkparser.ArArgType.STRING: "const char*"
}
if isinstance(arg.argType, arsdkparser.ArEnum):
argType = args[arsdkparser.ArArgType.I32]
elif isinstance(arg.argType, arsdkparser.ArBitfield):
argType = args[arg.argType.btfType]
elif isinstance(arg.argType, arsdkparser.ArMultiSetting):
if is_fun_arg:
argType = multiset_c_name("generic", arg.argType.name.lower()) + ' *'
else:
argType = multiset_c_name("generic", arg.argType.name.lower())
else:
argType = args[arg.argType]
return argType
def arg_name(arg):
if isinstance(arg.argType, arsdkparser.ArEnum):
argName = param_name(arg.name)
elif isinstance(arg.argType, arsdkparser.ArBitfield):
argName = param_name(arg.name) + "BitField"
elif isinstance(arg.argType, arsdkparser.ArMultiSetting):
argName = param_name(arg.name)
else:
argName = param_name(arg.name)
return argName
def arg_value_from_obj_c_to_c(feature_strict_name, arg):
if arg.argType == arsdkparser.ArArgType.STRING:
return "[" + arg_name(arg) + " UTF8String]"
elif isinstance(arg.argType, arsdkparser.ArMultiSetting):
return "[%s getNativeSettings]" % arg_name(arg)
elif arg_c_type(arg) != arg_type(feature_strict_name, arg):
return "(" + arg_c_type(arg) + ")" + arg_name(arg)
else:
return arg_name(arg)
def c_name(val):
return val[0].upper() + val[1:]
#===============================================================================
def expected_cmd_class():
return "ExpectedCmd"
def command_name(feature_name, cmd):
command_name_str = feature_name + "_" + cmd.name
splitted_name = command_name_str.split('_')
command_name_str = "".join(x.capitalize() for x in splitted_name)
# lower first letter
return command_name_str[0].lower() + command_name_str[1:]
def static_initializer_method_name(feature_obj, feature_name, cmd, with_swift_name=False):
return_part = "+ (" + expected_cmd_class() + "*)"
method_root_name = command_name(feature_name, cmd)
method_name = return_part + method_root_name
if cmd.args:
# the first arg is special as the arg name is not part of the method name
arg = cmd.args[0]
method_name += ":(" + arg_type(feature_obj.name, arg, True) + ")" + arg_name(arg)
for arg in cmd.args[1:]:
method_name += " " + arg_name(arg) + ":(" + arg_type(feature_obj.name, arg, True) + ")" + arg_name(arg)
if with_swift_name:
method_name += "\nNS_SWIFT_NAME(" + method_root_name + "("
for arg in cmd.args:
method_name += arg_name(arg) + ":"
method_name += "))"
return method_name
def command_class_name(feature_name, cmd):
command_name_str = command_name(feature_name, cmd)
return expected_cmd_class() + command_name_str[0].upper() + command_name_str[1:]
def match_command_name():
return "- (BOOL)match:(struct arsdk_cmd*)cmd checkParams:(BOOL)checkParams"
def gen_expected_header_file(ctx, out):
out.write("/** Generated, do not edit ! */\n")
out.write("\n")
out.write("#import <Foundation/Foundation.h>\n")
out.write("#import <SdkCore/Arsdk.h>\n")
out.write("\n")
out.write("struct arsdk_cmd;\n")
out.write("\n")
out.write("@interface %s : NSObject\n", expected_cmd_class())
out.write("\n")
out.write("%s;\n", match_command_name())
out.write("- (NSString*)describe;\n");
out.write("\n")
for feature_id in sorted(ctx.featuresById.keys()):
feature_obj = ctx.featuresById[feature_id]
for cmd in feature_obj.cmds:
feature_name = feature_obj.name + ("_" + cmd.cls.name if cmd.cls else "")
out.write("%s;\n", static_initializer_method_name(feature_obj, feature_name, cmd, True))
out.write("@end\n")
out.write("\n")
for feature_id in sorted(ctx.featuresById.keys()):
feature_obj = ctx.featuresById[feature_id]
for cmd in feature_obj.cmds:
feature_name = feature_obj.name + ("_" + cmd.cls.name if cmd.cls else "")
out.write("@interface %s : %s\n", command_class_name(feature_name, cmd), expected_cmd_class())
out.write("@end\n")
out.write("\n")
def gen_expected_source_file(ctx, out):
out.write("/** Generated, do not edit ! */\n")
out.write("\n")
out.write("#import \"" + expected_cmd_class() + ".h\"\n")
out.write("#import <arsdk/arsdk.h>\n")
out.write("\n")
out.write("@interface %s ()\n", expected_cmd_class())
out.write("\n")
out.write("@property (nonatomic, assign) struct arsdk_cmd* cmd;\n")
out.write("@end\n")
out.write("\n")
out.write("@implementation %s\n", expected_cmd_class())
out.write("\n")
out.write("%s {return false;}\n", match_command_name())
out.write("\n")
out.write("- (NSString*)describe {\n");
out.write(" return [ArsdkCommand describe:self.cmd];\n");
out.write("}\n");
out.write("\n")
for feature_id in sorted(ctx.featuresById.keys()):
feature_obj = ctx.featuresById[feature_id]
for cmd in feature_obj.cmds:
feature_name = feature_obj.name + ("_" + cmd.cls.name if cmd.cls else "")
out.write("%s {\n", static_initializer_method_name(feature_obj, feature_name, cmd))
out.write(" %s *expectedCmd = [[%s alloc] init];\n",
command_class_name(feature_name, cmd),
command_class_name(feature_name, cmd))
out.write(" expectedCmd.cmd = calloc(1, sizeof(*expectedCmd.cmd));\n")
out.write(" arsdk_cmd_init(expectedCmd.cmd);\n")
out.write("\n")
if cmd.args:
out.write(" int res = arsdk_cmd_enc_%s_%s(expectedCmd.cmd, %s);\n",
c_name(feature_name), c_name(cmd.name),
", ".join(arg_value_from_obj_c_to_c(feature_obj.name, arg) for arg in cmd.args))
else:
out.write(" int res = arsdk_cmd_enc_%s_%s(expectedCmd.cmd);\n",
c_name(feature_name), c_name(cmd.name))
out.write(" if (res < 0) {\n")
out.write(" return nil;\n")
out.write(" }\n")
out.write(" return expectedCmd;\n")
out.write("}\n")
out.write("\n")
out.write("@end\n")
out.write("\n")
for feature_id in sorted(ctx.featuresById.keys()):
feature_obj = ctx.featuresById[feature_id]
for cmd in feature_obj.cmds:
feature_name = feature_obj.name + ("_" + cmd.cls.name if cmd.cls else "")
out.write("@implementation %s\n", command_class_name(feature_name, cmd))
out.write("\n")
out.write("%s {\n", match_command_name())
out.write(" if (self.cmd->id != cmd->id) return false;\n")
out.write("\n")
if cmd.args:
out.write(" if (checkParams) {\n")
for arg in cmd.args:
out.write(" %s _%s;\n", arg_c_type(arg), arg_name(arg))
out.write(" int res = arsdk_cmd_dec_%s_%s(cmd, %s);\n",
c_name(feature_name), c_name(cmd.name),
", ".join("&_" + arg_name(arg) for arg in cmd.args))
out.write(" if (res < 0) {\n")
out.write(" return false;\n")
out.write(" }\n")
out.write("\n")
for arg in cmd.args:
out.write(" %s my%s;\n", arg_c_type(arg), arg_name(arg).title())
out.write(" res = arsdk_cmd_dec_%s_%s(self.cmd, %s);\n",
c_name(feature_name), c_name(cmd.name),
", ".join("&my" + arg_name(arg).title() for arg in cmd.args))
out.write(" if (res < 0) {\n")
out.write(" return false;\n")
out.write(" }\n")
out.write("\n")
for arg in cmd.args:
if arg.argType == arsdkparser.ArArgType.STRING:
out.write(" NSString* %sObj = [NSString stringWithUTF8String:_%s];\n",
arg_name(arg), arg_name(arg))
out.write(" NSString* my%sObj = [NSString stringWithUTF8String:my%s];\n",
arg_name(arg).title(), arg_name(arg).title())
out.write(" if (![%sObj isEqual:my%sObj]) return false;\n", arg_name(arg), arg_name(arg).title())
elif isinstance(arg.argType, arsdkparser.ArMultiSetting):
out.write(" res = memcmp(&_%s, &my%s, sizeof(my%s));\n", arg.name, arg_name(arg).title(),
arg_name(arg).title())
out.write(" if (res != 0) {\n")
out.write(" return false;\n")
out.write(" }\n")
else:
out.write(" if (_%s != my%s) return false;\n", arg_name(arg), arg_name(arg).title())
out.write("\n")
out.write(" }\n")
out.write(" return true;\n")
out.write("}\n")
out.write("@end\n")
out.write("\n")
#===============================================================================
def cmd_encoder_class():
return "CmdEncoder"
def encoder_function_signature(feature_obj, msg, with_swift_name=False):
feature_name = feature_obj.name + ("_" + msg.cls.name if msg.cls else "")
function_underscored = command_name(feature_name, msg) + "_encoder"
components = function_underscored.split('_')
func_name = components[0][0].lower() + components[0][1:] + "".join(x[0].upper() + x[1:] for x in components[1:])
function_signature = "+ (int (^)(struct arsdk_cmd *))" + func_name
if msg.args:
# the first arg is special as the arg name is not part of the method name
arg = msg.args[0]
function_signature += ":(" + arg_type(feature_obj.name, arg, True) + ")" + arg_name(arg)
for arg in msg.args[1:]:
function_signature += " " + arg_name(arg) + ":(" + arg_type(feature_obj.name, arg, True) + ")" + arg_name(arg)
if with_swift_name:
function_signature += "\nNS_SWIFT_NAME(" + func_name + "("
for arg in msg.args:
function_signature += arg_name(arg) + ":"
function_signature += "))"
return function_signature
def gen_encoder_header_file(ctx, out):
out.write("/** Generated, do not edit ! */\n")
out.write("\n")
out.write("#import <Foundation/Foundation.h>\n")
out.write("#import <SdkCore/Arsdk.h>\n")
out.write("\n")
out.write("struct arsdk_cmd;\n")
out.write("\n")
out.write("@interface %s : NSObject\n", cmd_encoder_class())
out.write("\n")
for feature_id in sorted(ctx.featuresById.keys()):
feature_obj = ctx.featuresById[feature_id]
for evt in feature_obj.evts:
out.write("%s;\n", encoder_function_signature(feature_obj, evt, True))
out.write("@end\n")
out.write("\n")
def gen_encoder_source_file(ctx, out):
out.write("/** Generated, do not edit ! */\n")
out.write("\n")
out.write("#import \"%s.h\"\n", cmd_encoder_class())
out.write("#import <arsdk/arsdk.h>\n")
out.write("\n")
out.write("@implementation %s\n", cmd_encoder_class())
out.write("\n")
for feature_id in sorted(ctx.featuresById.keys()):
feature_obj = ctx.featuresById[feature_id]
for evt in feature_obj.evts:
feature_name = feature_obj.name + ("_" + evt.cls.name if evt.cls else "")
out.write("%s {\n", encoder_function_signature(feature_obj, evt))
out.write(" return ^(struct arsdk_cmd* cmd) {\n")
if evt.args:
out.write(" return arsdk_cmd_enc_%s_%s(cmd, %s);\n",
c_name(feature_name), c_name(evt.name),
", ".join(arg_value_from_obj_c_to_c(feature_obj.name, arg) for arg in evt.args))
else:
out.write(" return arsdk_cmd_enc_%s_%s(cmd);\n",
c_name(feature_name), c_name(evt.name))
out.write(" };\n")
out.write("}\n")
out.write("\n")
out.write("@end\n")
out.write("\n")
#===============================================================================
def list_files(ctx, outdir, extra):
None
#===============================================================================
#===============================================================================
def generate_files(ctx, outdir, extra):
if not os.path.exists (outdir):
os.mkdirs (outdir)
else:
filelist = os.listdir(outdir)
for f in filelist:
os.remove(outdir + "/" + f)
filepath = os.path.join(outdir, expected_cmd_class() + ".h")
with open(filepath, "w") as file_obj:
gen_expected_header_file(ctx, Writer(file_obj))
filepath = os.path.join(outdir, expected_cmd_class() + ".m")
with open(filepath, "w") as file_obj:
gen_expected_source_file(ctx, Writer(file_obj))
filepath = os.path.join(outdir, cmd_encoder_class() + ".h")
with open(filepath, "w") as file_obj:
gen_encoder_header_file(ctx, Writer(file_obj))
filepath = os.path.join(outdir, cmd_encoder_class() + ".m")
with open(filepath, "w") as file_obj:
gen_encoder_source_file(ctx, Writer(file_obj))
print("Done generating test features files.")
| 39.048724 | 128 | 0.57148 | 2,090 | 16,830 | 4.396172 | 0.091388 | 0.096648 | 0.06367 | 0.037005 | 0.732695 | 0.666086 | 0.602743 | 0.555181 | 0.512407 | 0.450044 | 0 | 0.00683 | 0.251812 | 16,830 | 430 | 129 | 39.139535 | 0.72284 | 0.043791 | 0 | 0.45045 | 0 | 0 | 0.157878 | 0.026365 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075075 | false | 0 | 0.03003 | 0.015015 | 0.168168 | 0.003003 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ebe8666cf6e33ea8f8a62d695ff363e1865d7f05 | 4,998 | py | Python | game.py | TheAmmiR/snake-game | 7a95a36c1ef7c0e9064bad3976f14b25bdb19f2a | [
"MIT"
] | null | null | null | game.py | TheAmmiR/snake-game | 7a95a36c1ef7c0e9064bad3976f14b25bdb19f2a | [
"MIT"
] | null | null | null | game.py | TheAmmiR/snake-game | 7a95a36c1ef7c0e9064bad3976f14b25bdb19f2a | [
"MIT"
] | null | null | null | import numpy
import pygame
import random
from pygame import gfxdraw
pygame.init()
config_instance = open('settings.txt', 'r', encoding = 'utf-8')
class Settings:
def __init__(self, settings: dict):
def str_to_rgb(sequence):
r, g, b = sequence.split(' ')
r, g, b = int(r), int(g), int(b)
if (any([r not in range(0, 255), g not in range(0, 255), b not in range(0, 255)])):
raise ValueError(f'You set wrong colour values, check your settings! ({r, g, b})') # wrong rgb color values
return (r, g, b)
setting_names = {
'size of cell': ('cellsize', int),
'size of grid': ('gridsize', int),
'snake colour': 'snake_color',
'apple colour': 'apple_color',
'default length': ('snake_len', int)
}
for key, value in settings.items():
if (setting_names.get(key)):
if (isinstance(setting_names[key], tuple)):
setattr(self, setting_names[key][0], setting_names[key][1](value))
else:
setattr(self, setting_names[key], value)
if (getattr(self, 'snake_color', None)):
self.snake_color = str_to_rgb(self.snake_color)
else:
self.snake_color = (10, 240, 100) # default color
if (getattr(self, 'apple_color', None)):
self.apple_color = str_to_rgb(self.apple_color)
else:
self.apple_color = (240, 10, 10) # default color
def file_handler(instance):
text = instance.read().split('\n')
settings = {}
for line in text:
line = line.split(' - ')
line[0] = line[0].strip(); line[1] = line[1].strip()
settings[line[0]] = line[1]
return Settings(settings)
settings = file_handler(config_instance)
class Game:
def __init__(self, settings):
self.settings = settings
self.clock = pygame.time.Clock()
self.loop = False
self.display = pygame.display.set_mode((self.settings.gridsize * self.settings.cellsize, self.settings.gridsize * self.settings.cellsize))
self.snake: list = []
self.apple: list = []
self.direction: str = 'right'
middle = self.settings.gridsize // 2
xcoords = [middle + i for i in range(self.settings.snake_len)]
ycoords = [middle for _ in range(self.settings.snake_len)] # default snake position
for x, y in zip(xcoords, ycoords):
self.snake.append((x, y))
pygame.display.set_caption('Snake Game')
def start(self):
self.loop = True
self.spawn_apple()
while (self.loop):
for e in pygame.event.get():
if (e.type == pygame.QUIT):
self.loop = False
if (e.type == pygame.KEYDOWN):
if (e.key in [pygame.K_w, pygame.K_UP] and self.direction != 'down'):
self.direction = 'up'
elif (e.key in [pygame.K_s, pygame.K_DOWN] and self.direction != 'up'):
self.direction = 'down'
elif (e.key in [pygame.K_d, pygame.K_RIGHT] and self.direction != 'left'):
self.direction = 'right'
elif (e.key in [pygame.K_a, pygame.K_LEFT] and self.direction != 'right'):
self.direction = 'left'
self.clock.tick(15)
self.display.fill((0, 0, 0))
self.move_snake()
self.draw()
pygame.display.update()
def move_snake(self):
self.snake.pop(0)
if (self.direction == 'left'):
self.snake.append((self.snake[-1][0] - 1, self.snake[-1][1]))
elif (self.direction == 'right'):
self.snake.append((self.snake[-1][0] + 1, self.snake[-1][1]))
elif (self.direction == 'up'):
self.snake.append((self.snake[-1][0], self.snake[-1][1] - 1))
elif (self.direction == 'down'):
self.snake.append((self.snake[-1][0], self.snake[-1][1] + 1))
if (self.snake[-1] == tuple(self.apple)):
self.add_snakes_length(self.direction)
self.spawn_apple()
if (self.snake[-1] in self.snake[:-1]):
self.loop = False
print(f'You lose. Score: {len(self.snake) - self.settings.snake_len}')
if (self.snake[-1][0] < 0 or self.snake[-1][1] < 0 or self.snake[-1][0] > self.settings.cellsize or self.snake[-1][1] > self.settings.cellsize):
self.loop = False
print(f'You lose. Score: {len(self.snake) - self.settings.snake_len}')
def spawn_apple(self):
in_snake = True
while (in_snake):
apple_x = random.randint(0, self.settings.gridsize - 1)
apple_y = random.randint(0, self.settings.gridsize - 1)
if ((apple_x, apple_y) not in self.snake and (apple_x, apple_y) != self.apple):
in_snake = False
self.apple = [apple_x, apple_y]
def add_snakes_length(self, direction):
if (direction == 'up'):
self.snake.insert(0, (self.snake[0][0], self.snake[0][1] + 1))
elif (direction == 'down'):
self.snake.insert(0, (self.snake[0][0], self.snake[0][1] - 1))
elif (direction == 'left'):
self.snake.insert(0, (self.snake[0][0], self.snake[0][1] + 1))
elif (direction == 'right'):
self.snake.insert(0, (self.snake[0][0], self.snake[0][1] - 1))
def draw(self):
cellsize = self.settings.cellsize
gfxdraw.box(self.display, (self.apple[0] * cellsize, self.apple[1] * cellsize, cellsize, cellsize), self.settings.apple_color)
for x, y in self.snake:
gfxdraw.box(self.display, (x * cellsize, y * cellsize, cellsize, cellsize), self.settings.snake_color)
game = Game(settings)
game.start() | 33.543624 | 146 | 0.652461 | 772 | 4,998 | 4.130829 | 0.178756 | 0.118532 | 0.047037 | 0.027595 | 0.343995 | 0.240201 | 0.207275 | 0.158984 | 0.158984 | 0.158984 | 0 | 0.02553 | 0.169268 | 4,998 | 149 | 147 | 33.543624 | 0.742534 | 0.014606 | 0 | 0.121951 | 0 | 0 | 0.08352 | 0.009754 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073171 | false | 0 | 0.03252 | 0 | 0.138211 | 0.01626 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccd7c7cddab93c97ea1df7fdefa52d6f4a71efc0 | 484 | py | Python | static/python/demo.py | Nota-Bene/Nota-Bene.github.io | 57c0a25176627263bb9403e8f660d36cffa9882b | [
"MIT"
] | null | null | null | static/python/demo.py | Nota-Bene/Nota-Bene.github.io | 57c0a25176627263bb9403e8f660d36cffa9882b | [
"MIT"
] | null | null | null | static/python/demo.py | Nota-Bene/Nota-Bene.github.io | 57c0a25176627263bb9403e8f660d36cffa9882b | [
"MIT"
] | null | null | null | import time
import random
def parse(input):
tokens = input.split(" ")
parsedTokens = []
time.sleep(5)
for i in range(200000):
test = random.randint(1, 8) + random.randint(-4, 90)
for token in tokens:
if token == "":
continue
parsedTokens.append({
"text": token,
"lemma": token,
"pos": "verb",
"decl": "3rd person singular future tense",
"gloss": ["a test definition", "a second test definition"]
})
return parsedTokens
| 23.047619 | 64 | 0.60124 | 59 | 484 | 4.932203 | 0.694915 | 0.089347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036011 | 0.254132 | 484 | 20 | 65 | 24.2 | 0.770083 | 0 | 0 | 0 | 0 | 0 | 0.204545 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccdb25e847708f6452a1e362c123c06d0c2c27e2 | 2,961 | py | Python | pytorch/torchnet.py | sjliu68/Remote-Sensing-Image-Classification | 9bd5ec28380961c9e66288dd75c998425622043e | [
"MIT"
] | 32 | 2020-09-10T12:54:09.000Z | 2022-03-21T08:55:29.000Z | pytorch/torchnet.py | sjliu68/Remote-Sensing-Image-Classification | 9bd5ec28380961c9e66288dd75c998425622043e | [
"MIT"
] | null | null | null | pytorch/torchnet.py | sjliu68/Remote-Sensing-Image-Classification | 9bd5ec28380961c9e66288dd75c998425622043e | [
"MIT"
] | 19 | 2020-08-10T10:16:47.000Z | 2022-02-17T06:52:14.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 6 10:07:13 2020
@author: sjliu.me@gmail.com
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class wcrn(nn.Module):
def __init__(self, num_classes=9):
super(wcrn, self).__init__()
self.conv1a = nn.Conv2d(103,64,kernel_size=3,stride=1,padding=0,groups=1)
self.conv1b = nn.Conv2d(103,64,kernel_size=1,stride=1,padding=0,groups=1)
self.maxp1 = nn.MaxPool2d(kernel_size=3)
self.maxp2 = nn.MaxPool2d(kernel_size=5)
# self.bn1 = nn.BatchNorm2d(128,eps=0.001,momentum=0.9)
self.bn1 = nn.BatchNorm2d(128)
self.conv2a = nn.Conv2d(128,128,kernel_size=1,stride=1,padding=0,groups=1)
self.conv2b = nn.Conv2d(128,128,kernel_size=1,stride=1,padding=0,groups=1)
self.fc = nn.Linear(128, num_classes)
# torch.nn.init.normal_(self.fc.weight, mean=0, std=0.01)
def forward(self, x):
out = self.conv1a(x)
out1 = self.conv1b(x)
out = self.maxp1(out)
out1 = self.maxp2(out1)
out = torch.cat((out,out1),1)
out1 = self.bn1(out)
out1 = nn.ReLU()(out1)
out1 = self.conv2a(out1)
out1 = nn.ReLU()(out1)
out1 = self.conv2b(out1)
out = torch.add(out,out1)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
class resnet99_avg(nn.Module):
def __init__(self, num_classes=9):
super(resnet99_avg, self).__init__()
self.conv1a = nn.Conv2d(103,32,kernel_size=3,stride=1,padding=0,groups=1)
self.conv1b = nn.Conv2d(103,32,kernel_size=3,stride=1,padding=0,groups=1)
self.bn1 = nn.BatchNorm2d(64,eps=0.001,momentum=0.9)
self.conv2a = nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,groups=1)
self.conv2b = nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,groups=1)
self.bn2 = nn.BatchNorm2d(64,eps=0.001,momentum=0.9)
self.conv3a = nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,groups=1)
self.conv3b = nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,groups=1)
self.fc = nn.Linear(64, num_classes)
def forward(self, x):
x1 = self.conv1a(x)
x2 = self.conv1b(x)
x1 = torch.cat((x1,x2),axis=1)
x2 = self.bn1(x1)
x2 = nn.ReLU()(x2)
x2 = self.conv2a(x2)
x2 = nn.ReLU()(x2)
x2 = self.conv2b(x2)
x1 = torch.add(x1,x2)
x2 = self.bn2(x1)
x2 = nn.ReLU()(x2)
x2 = self.conv3a(x2)
x2 = nn.ReLU()(x2)
x2 = self.conv3b(x2)
x1 = torch.add(x1,x2)
x1 = nn.AdaptiveAvgPool2d((1,1))(x1)
x1 = x1.reshape(x1.size(0), -1)
out = self.fc(x1)
return out
| 32.538462 | 83 | 0.55049 | 440 | 2,961 | 3.625 | 0.190909 | 0.075235 | 0.087774 | 0.074608 | 0.602508 | 0.576176 | 0.488401 | 0.399373 | 0.399373 | 0.355486 | 0 | 0.121622 | 0.300236 | 2,961 | 90 | 84 | 32.9 | 0.648166 | 0.07126 | 0 | 0.229508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065574 | false | 0 | 0.04918 | 0 | 0.180328 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccdd4e7946bbbb66bc7ecddf26b85179c631159d | 1,012 | py | Python | rpmreq/actions.py | softwarefactory-project/rpmreq | b9b30cf6a184929db23ac86c8cc037592ee8b6be | [
"Apache-2.0"
] | null | null | null | rpmreq/actions.py | softwarefactory-project/rpmreq | b9b30cf6a184929db23ac86c8cc037592ee8b6be | [
"Apache-2.0"
] | null | null | null | rpmreq/actions.py | softwarefactory-project/rpmreq | b9b30cf6a184929db23ac86c8cc037592ee8b6be | [
"Apache-2.0"
] | 1 | 2019-03-10T10:07:04.000Z | 2019-03-10T10:07:04.000Z | import hawkey
import logging
from rpmreq import graph
from rpmreq import query
log = logging.getLogger(__name__)
def build_requires(specs, repos, base_repos=None,
out_data=None, out_image=None,
cache_ttl=3600):
dep_graph = graph.build_requires_graph(
specs=specs, repos=repos, base_repos=base_repos,
cache_ttl=cache_ttl)
graph.break_dep_graph_cycles(dep_graph)
if out_data or out_image:
graph.dump_dep_graph(dep_graph,
out_data=out_data,
out_image=out_image)
return graph.parse_dep_graph(dep_graph)
def last_version(dep, repos):
"""
Return latest package meeting dep
or latest version of dep regardless of version range.
:param dep: dependency to meet
:param repos: repos to query
:return: DepQueryResult, see rpmreq.query.query_dep
"""
sack = query.fetch_repos_sack(repos)
q = hawkey.Query(sack)
return query.query_dep(q, dep)
| 27.351351 | 57 | 0.666008 | 137 | 1,012 | 4.649635 | 0.350365 | 0.087912 | 0.065934 | 0.050235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005355 | 0.261858 | 1,012 | 36 | 58 | 28.111111 | 0.84739 | 0.197628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.190476 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccde37610c8f0bf0da8d0c2c4fba8732e91c7b0e | 1,033 | py | Python | app/utils/path_utils.py | Tim-ty-tang/mlflow-fastapi-deploy | c8884a0462fc9f1ce3aa47f9d000af2bffa82123 | [
"MIT"
] | null | null | null | app/utils/path_utils.py | Tim-ty-tang/mlflow-fastapi-deploy | c8884a0462fc9f1ce3aa47f9d000af2bffa82123 | [
"MIT"
] | null | null | null | app/utils/path_utils.py | Tim-ty-tang/mlflow-fastapi-deploy | c8884a0462fc9f1ce3aa47f9d000af2bffa82123 | [
"MIT"
] | null | null | null | from mlflow.tracking import MlflowClient
from urllib.parse import urlparse
def get_prod_path_mlflow_model_mlflow_query(model_name, version, new_bucket, new_path):
client = MlflowClient()
artifact_path_original = None
for mv in client.search_model_versions(f"name='{model_name}'"):
if mv.version == str(version):
artifact_path_original = mv.source
new_mflow_path = None
if artifact_path_original:
if new_bucket and new_path:
o = urlparse(artifact_path_original, allow_fragments=False)
new_mflow_path = f"s3://{new_bucket.strip('/')}/{new_path.strip('/')}/{o.path.strip('/')}"
return {"old_mlflow_path": artifact_path_original,
"new_mflow_path": new_mflow_path}
def get_prod_path_mlflow_model_explicit(model_name, version, new_bucket, new_path):
new_mflow_path = f"s3://{new_bucket.strip('/')}/{new_path.strip('/')}/{model_name}/{version}"
return {"old_mlflow_path": None,
"new_mflow_path": new_mflow_path}
| 38.259259 | 110 | 0.693127 | 140 | 1,033 | 4.714286 | 0.3 | 0.084848 | 0.127273 | 0.072727 | 0.369697 | 0.369697 | 0.221212 | 0.124242 | 0.124242 | 0.124242 | 0 | 0.002378 | 0.185866 | 1,033 | 26 | 111 | 39.730769 | 0.782402 | 0 | 0 | 0.105263 | 0 | 0 | 0.213385 | 0.1387 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.105263 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccde416df2e474e2d91671ed935f8eaa2f12d8eb | 5,350 | py | Python | 25h8_service.py | openprocurement/robot_tests.broker.25h8 | 619ffd180a8f051ef46d62767d54f4796baa122c | [
"Apache-2.0"
] | null | null | null | 25h8_service.py | openprocurement/robot_tests.broker.25h8 | 619ffd180a8f051ef46d62767d54f4796baa122c | [
"Apache-2.0"
] | 1 | 2017-12-18T13:44:01.000Z | 2017-12-18T13:44:01.000Z | 25h8_service.py | openprocurement/robot_tests.broker.25h8 | 619ffd180a8f051ef46d62767d54f4796baa122c | [
"Apache-2.0"
] | 3 | 2018-06-11T10:30:05.000Z | 2019-08-07T07:55:40.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from iso8601 import parse_date
from pytz import timezone
import urllib
import json
import os
def convert_time(date):
date = datetime.strptime(date, "%d/%m/%Y %H:%M:%S")
return timezone('Europe/Kiev').localize(date).strftime('%Y-%m-%dT%H:%M:%S.%f%z')
def subtract_min_from_date(date, minutes):
date_obj = datetime.strptime(date.split("+")[0], '%Y-%m-%dT%H:%M:%S.%f')
return "{}+{}".format(date_obj - timedelta(minutes=minutes), date.split("+")[1])
def convert_datetime_to_25h8_format(isodate):
iso_dt = parse_date(isodate)
day_string = iso_dt.strftime("%d/%m/%Y %H:%M")
return day_string
def convert_string_from_dict_25h8(string):
return {
u"грн.": u"UAH",
u"True": u"1",
u"False": u"0",
u"Відкриті торги": u"aboveThresholdUA",
u"Відкриті торги з публікацією англ. мовою": u"aboveThresholdEU",
u'Код ДК 021-2015 (CPV)': u'CPV',
u'Код ДК (ДК003)': u'ДК003',
u'Код ДК (ДК018)': u'ДК018',
u'з урахуванням ПДВ': True,
u'з ПДВ': True,
u'без урахуванням ПДВ': False,
u'ОЧIКУВАННЯ ПРОПОЗИЦIЙ': u'active.tendering',
u'ПЕРIОД УТОЧНЕНЬ': u'active.enquiries',
u'АУКЦIОН': u'active.auction',
u'ПРЕКВАЛІФІКАЦІЯ': u'active.pre-qualification',
u'ОСКАРЖЕННЯ ПРЕКВАЛІФІКАЦІЇ': u'active.pre-qualification.stand-still',
u'вимога': u'claim',
u'дано відповідь': u'answered',
u'вирішено': u'resolved',
u'Так': True,
u'Ні': False,
u'на розглядi': u'pending',
u'На розгляді': u'pending',
u'не вирішено(обробляється)': u'pending',
u'відмінено': u'cancelled',
u'відмінена': u'cancelled',
u'Переможець': u'active',
}.get(string, string)
def adapt_procuringEntity(role_name, tender_data):
if role_name == 'tender_owner':
tender_data['data']['procuringEntity']['name'] = u"Ольмек"
tender_data['data']['procuringEntity']['address']['postalCode'] = u"01100"
tender_data['data']['procuringEntity']['address']['region'] = u"місто Київ"
tender_data['data']['procuringEntity']['address']['locality'] = u"Київ"
tender_data['data']['procuringEntity']['address']['streetAddress'] = u"вул. Фрунзе 77"
tender_data['data']['procuringEntity']['identifier']['legalName'] = u"Ольмек"
tender_data['data']['procuringEntity']['identifier']['id'] = u"01234567"
if tender_data['data'].has_key('procurementMethodType'):
if "above" in tender_data['data']['procurementMethodType']:
tender_data['data']['tenderPeriod']['startDate'] = subtract_min_from_date(
tender_data['data']['tenderPeriod']['startDate'], 1)
return tender_data
def adapt_delivery_data(tender_data):
for index in range(len(tender_data['data']['items'])):
value = tender_data['data']['items'][index]['deliveryAddress']['region']
if value == u"місто Київ":
tender_data['data']['items'][index]['deliveryAddress']['region'] = u"Київ"
return tender_data
def adapt_view_data(value, field_name):
if 'value.amount' in field_name:
value = float(value.split(' ')[0])
elif 'currency' in field_name:
value = value.split(' ')[1]
elif 'valueAddedTaxIncluded' in field_name:
value = ' '.join(value.split(' ')[2:])
elif 'minimalStep.amount' in field_name:
value = float(value.split(' ')[0])
elif 'unit.name' in field_name:
value = value.split(' ')[1]
elif 'quantity' in field_name:
value = float(value.split(' ')[0])
elif 'questions' in field_name and '.date' in field_name:
value = convert_time(value.split(' - ')[0])
elif 'Date' in field_name:
value = convert_time(value)
return convert_string_from_dict_25h8(value)
def adapt_view_item_data(value, field_name):
if 'unit.name' in field_name:
value = ' '.join(value.split(' ')[1:])
elif 'quantity' in field_name:
value = float(value.split(' ')[0])
elif 'Date' in field_name:
value = convert_time(value)
return convert_string_from_dict_25h8(value)
def get_related_elem_description(tender_data, feature, item_id):
if item_id == "":
for elem in tender_data['data']['{}s'.format(feature['featureOf'])]:
if feature['relatedItem'] == elem['id']:
return elem['description']
else:
return item_id
def custom_download_file(url, file_name, output_dir):
urllib.urlretrieve(url, ('{}/{}'.format(output_dir, file_name)))
def add_second_sign_after_point(amount):
amount = str(repr(amount))
if '.' in amount and len(amount.split('.')[1]) == 1:
amount += '0'
return amount
def get_bid_phone(internal_id, bid_index):
r = urllib.urlopen('https://lb.api-sandbox.openprocurement.org/api/2.3/tenders/{}'.format(internal_id)).read()
tender = json.loads(r)
bid_id = tender['data']['qualifications'][int(bid_index)]["bidID"]
for bid in tender['data']['bids']:
if bid['id'] == bid_id:
return bid['tenderers'][0]['contactPoint']['telephone']
def get_upload_file_path():
return os.path.join(os.getcwd(), 'src/robot_tests.broker.25h8/testFileForUpload.txt') | 37.152778 | 114 | 0.629533 | 697 | 5,350 | 4.677188 | 0.308465 | 0.067485 | 0.064417 | 0.053988 | 0.328221 | 0.240798 | 0.184356 | 0.136503 | 0.112577 | 0.112577 | 0 | 0.017294 | 0.200187 | 5,350 | 144 | 115 | 37.152778 | 0.744566 | 0.007103 | 0 | 0.13913 | 0 | 0 | 0.281868 | 0.04067 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113043 | false | 0 | 0.052174 | 0.017391 | 0.278261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cce181f9f38d4d3c462cfc4fd68ac4c8d8aebe76 | 6,574 | py | Python | connect_four.py | seanstappas/dynamic-connect-4 | f6106f71ac8779513cd80a2f46397bb778e21018 | [
"MIT"
] | 1 | 2020-08-21T03:05:08.000Z | 2020-08-21T03:05:08.000Z | connect_four.py | seanstappas/dynamic-connect-4 | f6106f71ac8779513cd80a2f46397bb778e21018 | [
"MIT"
] | null | null | null | connect_four.py | seanstappas/dynamic-connect-4 | f6106f71ac8779513cd80a2f46397bb778e21018 | [
"MIT"
] | null | null | null | from __future__ import print_function
NUM_ROWS = 7
NUM_COLS = 7
DIRECTIONS = ('E', 'W', 'N', 'S')
MOVEMENT_DIFFS = {
'N': (0, -1),
'S': (0, 1),
'E': (1, 0),
'W': (-1, 0)
}
X_MOVEMENT_DIFFS = {
'N': 0,
'S': 0,
'E': 1,
'W': -1
}
Y_MOVEMENT_DIFFS = {
'N': -1,
'S': 1,
'E': 0,
'W': 0
}
def actions_and_successors(state, white_player=True):
"""
Returns a list of action, successor tuples resulting from the given state.
:param state: the state to get successors of
:param white_player: True if the current player is white, False otherwise
:return: a list of action, successor tuples resulting from the given state.
"""
return [(a, result(state, a, white_player)) for a in actions(state, white_player)]
def print_state(state):
"""
Prints the given state.
:param state: the state to print
"""
print(' ', end=' ')
for col in range(NUM_COLS):
print(col + 1, end=' ')
print()
for row in range(NUM_ROWS):
print(row + 1, end=' ')
for col in range(NUM_COLS):
if (col + 1, row + 1) in state[0]:
print('O', end='')
elif (col + 1, row + 1) in state[1]:
print('X', end='')
else:
print(' ', end='')
if col < NUM_COLS - 1:
print(',', end='')
print()
def str_to_state(str_state):
"""
Returns a state corresponding to the provided string representation. Here is an example of a valid state:
, , , , , ,X
, , , , ,X,
, , , , ,O,X
,X,O, , , ,X
, , , , ,O,
,O,X, , , ,
O, , , ,O, ,
:param str_state: a string representation of the board
:return: the corresponding state
"""
white_squares = []
black_squares = []
y = 1
for row in str_state.splitlines():
x = 1
for square in row.split(','):
if square == ',':
continue
if square == 'O':
white_squares.append((x, y))
elif square == 'X':
black_squares.append((x, y))
x += 1
y += 1
return tuple(white_squares), tuple(black_squares)
def is_within_bounds(x, y):
"""
:return: True if the given x, y coordinates are within the bounds of the board
"""
return 0 < x <= NUM_COLS and 0 < y <= NUM_ROWS
def is_free_square(state, x, y):
"""
:return: True if the given x, y coordinates are free spots, given the provided state
"""
return (x, y) not in state[0] and (x, y) not in state[1]
def is_valid_action(state, x, y, direction):
"""
Checks if moving the piece at given x, y coordinates in the given direction is valid, given the current state.
:param state: the current state
:param x: the x coordinate of the piece
:param y: the y coordinate of the piece
:param direction: the direction to travel with this action
:return: True if the action is valid, False otherwise
"""
new_x = x + X_MOVEMENT_DIFFS[direction]
new_y = y + Y_MOVEMENT_DIFFS[direction]
return is_within_bounds(new_x, new_y) and is_free_square(state, new_x, new_y)
def occupied_squares_by_player(state, white_player):
"""
Returns the the x, y coordinates of the squares occupied by the given player.
:param state: the given state
:param white_player: True if the current player is white, False otherwise
:return: the x, y coordinates of the squares occupied by the given player.
"""
return state[0] if white_player else state[1]
def actions(state, white_player=True):
"""
Returns the actions available to the given player in the given state.
:param state: the current state
:param white_player: True if the current player is white, False otherwise
:return: the actions available to the given player in the given state
"""
return [(x, y, direction)
for (x, y) in occupied_squares_by_player(state, white_player)
for direction in DIRECTIONS
if is_valid_action(state, x, y, direction)]
def action_str_to_tuple(a):
"""
Converts the provided action string to a tuple
:param a: the action, in string form. For example: '13E'.
:return: the action in tuple form
"""
if a is not None and '1' <= a[0] <= '7' and '1' <= a[1] <= '7' and a[2] in DIRECTIONS:
return int(a[0]), int(a[1]), a[2]
else:
return None
def action_tuple_to_str(action):
"""
Converts the provided action tuple to a string.
:param action: the action
:return: a string representation of the action tuple
"""
if action is None:
return None
return str(action[0]) + str(action[1]) + action[2]
def result(state, action, white_player=True):
"""
Returns the resulting state when the given action is applied to the given state.
:param state: the current state
:param action: the action to apply
:param white_player: True if the current player is white, False otherwise
:return: the resulting state when the given action is applied to the given state
"""
if white_player:
return result_tuple(state, action, white_player), state[1]
else:
return state[0], result_tuple(state, action, white_player)
def result_tuple(s, a, white_player):
"""
Returns the x, y coordinates of the pieces of the given player when the given action is applied to the given state.
:param s: the current state
:param a: the action to apply
:param white_player: True if the current player is white, False otherwise
:return: the x, y coordinates of the pieces of the given player when the given action is applied to the given state
"""
old_x = a[0]
old_y = a[1]
direction = a[2]
new_x = old_x + X_MOVEMENT_DIFFS[direction]
new_y = old_y + Y_MOVEMENT_DIFFS[direction]
return tuple((x, y) if x != old_x or y != old_y else (new_x, new_y)
for (x, y) in occupied_squares_by_player(s, white_player))
def file_to_state(file_name):
"""
Converts the board given by the provided file to a state. Here is an example of a valid state:
, , , , , ,X
, , , , ,X,
, , , , ,O,X
,X,O, , , ,X
, , , , ,O,
,O,X, , , ,
O, , , ,O, ,
:param file_name: the name of the file containing the state
:return: a state corresponding to the board
"""
with open(file_name, 'r') as state_file:
string_state = state_file.read()
state = str_to_state(string_state)
return state
| 29.479821 | 119 | 0.605872 | 979 | 6,574 | 3.95097 | 0.117467 | 0.04757 | 0.033609 | 0.027921 | 0.505946 | 0.441313 | 0.400982 | 0.33635 | 0.307135 | 0.295502 | 0 | 0.012102 | 0.283541 | 6,574 | 222 | 120 | 29.612613 | 0.80913 | 0.434134 | 0 | 0.111111 | 0 | 0 | 0.009868 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131313 | false | 0 | 0.010101 | 0 | 0.292929 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cce3bf69f8b9a7979f41b539d693ed801301a438 | 3,781 | py | Python | pyampute/tests/test_mapping.py | RianneSchouten/pyampute | 98de0d5591546f958b0106217f60df92dc00fbb9 | [
"BSD-3-Clause"
] | 3 | 2022-02-14T02:02:23.000Z | 2022-02-20T09:52:41.000Z | pyampute/tests/test_mapping.py | flacle/pyampute | 8785f62c52a762dfc3113abe3610ba4893ef5f4b | [
"BSD-3-Clause"
] | 24 | 2022-01-26T15:42:13.000Z | 2022-03-12T15:49:56.000Z | pyampute/tests/test_mapping.py | flacle/pyampute | 8785f62c52a762dfc3113abe3610ba4893ef5f4b | [
"BSD-3-Clause"
] | 1 | 2022-02-15T19:15:42.000Z | 2022-02-15T19:15:42.000Z | import numpy as np
import pandas as pd
import unittest
from pyampute.ampute import MultivariateAmputation
from pyampute.exploration.md_patterns import mdPatterns
class TestMapping(unittest.TestCase):
'''
This class tests the example code in the blogpost "A mapping from R-function ampute to pyampute"
'''
def setUp(self) -> None:
super().setUp()
self.n = 10000
self.nhanes2_sim = np.random.randn(10000, 4)
try:
self.nhanes2_orig = pd.read_csv("data/nhanes2.csv")
except:
print("CSV file failed to load.")
def test_patterns(self):
mdp = mdPatterns()
mypatterns = mdp.get_patterns(self.nhanes2_orig, show_plot=False)
self.assertEqual(mypatterns.shape, (6, 6))
self.assertListEqual(
mypatterns.iloc[1:-1, 1:-1].values.tolist(),
[[1, 1, 1, 0], [1, 1, 0, 1], [1, 0, 0, 1], [1, 0, 0, 0]])
ma = MultivariateAmputation(
patterns=[
{'incomplete_vars': [3]},
{'incomplete_vars': [2]},
{'incomplete_vars': [1, 2]},
{'incomplete_vars': [1, 2, 3]}
]
)
nhanes2_incomplete = ma.fit_transform(self.nhanes2_sim)
mdp = mdPatterns()
mypatterns = mdp.get_patterns(nhanes2_incomplete, show_plot=False)
self.assertEqual(mypatterns.shape, (6, 6))
self.assertListEqual(
mypatterns["n_missing_values"].values[:-1].astype(int).tolist(),
[0, 1, 1, 2, 3])
def test_proportions(self):
ma = MultivariateAmputation(
patterns=[
{'incomplete_vars': [3], 'freq': 0.1},
{'incomplete_vars': [2], 'freq': 0.6},
{'incomplete_vars': [1, 2], 'freq': 0.2},
{'incomplete_vars': [1, 2, 3], 'freq': 0.1}
],
prop=0.3)
nhanes2_incomplete = ma.fit_transform(self.nhanes2_sim)
mdp = mdPatterns()
mypatterns = mdp.get_patterns(nhanes2_incomplete, show_plot=False)
self.assertListEqual(
mypatterns.columns.values.tolist(),
["row_count", 0, 3, 1, 2, "n_missing_values"]
)
self.assertAlmostEqual(
mypatterns.loc[1, "row_count"],
0.3 * 0.6 * self.n,
delta=0.05 * self.n,
)
def test_mechanisms(self):
ma = MultivariateAmputation(
patterns=[
{'incomplete_vars': [3], 'mechanism': "MCAR"},
{'incomplete_vars': [2]},
{'incomplete_vars': [1, 2], 'mechanism': "MNAR"},
{'incomplete_vars': [1, 2, 3]}
]
)
nhanes2_incomplete = ma.fit_transform(self.nhanes2_sim)
self.assertEqual(ma.patterns[0]['mechanism'], "MCAR")
self.assertEqual(ma.patterns[2]['mechanism'], "MNAR")
self.assertListEqual(ma.mechanisms.tolist(), ["MCAR", "MAR", "MNAR", "MAR"])
def test_weights(self):
ma = MultivariateAmputation(
patterns=[
{'incomplete_vars': [3], 'weights': [0, 4, 1, 0]},
{'incomplete_vars': [2]},
{'incomplete_vars': [1, 2], 'mechanism': "MNAR"},
{'incomplete_vars': [1, 2, 3], 'weights': {0: -2, 3: 1}, 'mechanism': "MAR+MNAR"}
]
)
nhanes2_incomplete = ma.fit_transform(self.nhanes2_sim)
mdp = mdPatterns()
mypatterns = mdp.get_patterns(nhanes2_incomplete, show_plot=False)
self.assertListEqual(
ma.weights.tolist(),
[[0, 4, 1, 0], [1, 1, 0, 1], [0, 1, 1, 0], [-2, 0, 0, 1]]
)
self.assertTrue(len(ma.wss_per_pattern), 4)
if __name__ == "__main__":
unittest.main()
| 31.773109 | 100 | 0.539275 | 420 | 3,781 | 4.702381 | 0.242857 | 0.113418 | 0.060759 | 0.06481 | 0.508354 | 0.503797 | 0.444557 | 0.351392 | 0.351392 | 0.351392 | 0 | 0.051056 | 0.311029 | 3,781 | 118 | 101 | 32.042373 | 0.707102 | 0.02539 | 0 | 0.359551 | 0 | 0 | 0.126465 | 0 | 0 | 0 | 0 | 0 | 0.123596 | 1 | 0.05618 | false | 0 | 0.05618 | 0 | 0.123596 | 0.011236 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cce90a7d4c65cdb80621e7e28495129d068861f2 | 2,092 | py | Python | Synchro-Update-Vols.py | Apoorb/HCS_Synchro-Reader | d89428069584f420d584e2011a5cf21cd0a51f8b | [
"MIT"
] | null | null | null | Synchro-Update-Vols.py | Apoorb/HCS_Synchro-Reader | d89428069584f420d584e2011a5cf21cd0a51f8b | [
"MIT"
] | null | null | null | Synchro-Update-Vols.py | Apoorb/HCS_Synchro-Reader | d89428069584f420d584e2011a5cf21cd0a51f8b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 30 10:02:36 2019
@author: abibeka
Purpose: Batch update synchro volumes
"""
# 0.0 Housekeeping. Clear variable space
#******************************************************************************************
from IPython import get_ipython #run magic commands
ipython = get_ipython()
ipython.magic("reset -f")
ipython = get_ipython()
import os
import pandas as pd
import numpy as np
import csv
os.chdir(r'C:\Users\abibeka\OneDrive - Kittelson & Associates, Inc\Documents\RampMetering\operations\Synchro')
# Read the volume data
dat = pd.read_csv('VOLUME.CSV',skiprows=2)
dat.fillna('',inplace=True)
dat2 = dat
dat2 = dat2.drop(columns = 'DATE')
dat2.rename(columns = {'TIME': 'RECORDNAME'},inplace=True)
dat2.RECORDNAME = 'Volume'
# Scale the volume data
#Number of Years = 2040 - 2016
NumYears = 2040 - 2016
GrowthRates = [0,1,2] # percent per year
NetGrowthCalc = lambda x: (1+x/100)**NumYears
NetGrowthRate = list(map(NetGrowthCalc,GrowthRates))
NetGrowthRate
def Output2040Vols(datCp = dat2, NetGrowthRt = 1):
datCp.iloc[:,2:] = datCp.iloc[:,2:].applymap(lambda x: x if not x else round(x*NetGrowthRt))
#Change volume data and columns to list --- so it can be written
dat2Write = datCp.values.tolist()
#Read the two 2 lines of the csv file separately
with open('VOLUME.csv', 'r') as readFile:
reader = csv.reader(readFile)
lines = list(reader)
Header = lines[0:3]
Header[0] = ['[Lanes]']
Header[1] =['Lane Group Data']
Header[2][0] = 'RECORDNAME'
Header[2].remove('TIME')
#Write the top 2 lines of the csv file, column name and data
with open('Volume2040_NetGrwRt_{}.csv'.format(round(NetGrowthRt,2)), 'w', newline = '') as writeFile:
writer = csv.writer(writeFile)
writer.writerows(Header)
writer.writerows(dat2Write)
writeFile.close()
Output2040Vols(datCp = dat2, NetGrowthRt = NetGrowthRate[0])
Output2040Vols(datCp = dat2, NetGrowthRt = NetGrowthRate[1])
Output2040Vols(datCp = dat2, NetGrowthRt = NetGrowthRate[2])
| 31.223881 | 110 | 0.663002 | 273 | 2,092 | 5.058608 | 0.494505 | 0.055033 | 0.066618 | 0.098479 | 0.128168 | 0.026068 | 0 | 0 | 0 | 0 | 0 | 0.049397 | 0.167782 | 2,092 | 66 | 111 | 31.69697 | 0.743825 | 0.25 | 0 | 0.051282 | 0 | 0.025641 | 0.137864 | 0.062136 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0 | 0.128205 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccea292c2e611183f7753e3277974ac6da80216f | 5,695 | py | Python | matplotlib.indigoPlugin/Contents/Server Plugin/chart_multiline.py | DaveL17/matplotlib | 857daf4222390d021defb87b57c3360fa12af5ab | [
"MIT"
] | 4 | 2017-08-27T16:53:56.000Z | 2022-03-27T10:48:02.000Z | matplotlib.indigoPlugin/Contents/Server Plugin/chart_multiline.py | DaveL17/matplotlib | 857daf4222390d021defb87b57c3360fa12af5ab | [
"MIT"
] | 3 | 2019-01-30T20:04:00.000Z | 2021-06-21T02:11:17.000Z | matplotlib.indigoPlugin/Contents/Server Plugin/chart_multiline.py | DaveL17/matplotlib | 857daf4222390d021defb87b57c3360fa12af5ab | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Creates the multiline text charts
Given the unique nature of multiline text charts, we use a separate method
to construct them.
-----
"""
# Built-in Modules
import pickle
import sys
import textwrap
import traceback
# Third-party Modules
# Note the order and structure of matplotlib imports is intentional.
import matplotlib
matplotlib.use('AGG') # Note: this statement must be run before any other matplotlib imports are done.
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# My modules
import chart_tools
log = chart_tools.log
payload = chart_tools.payload
p_dict = payload['p_dict']
k_dict = payload['k_dict']
props = payload['props']
chart_name = props['name']
plug_dict = payload['prefs']
text_to_plot = payload['data']
log['Threaddebug'].append(u"chart_multiline.py called.")
if plug_dict['verboseLogging']:
chart_tools.log['Threaddebug'].append(u"{0}".format(payload))
try:
def __init__():
pass
def clean_string(val):
"""
Cleans long strings of whitespace and formats certain characters
The clean_string(self, val) method is used to scrub multiline text elements in
order to try to make them more presentable. The need is easily seen by looking
at the rough text that is provided by the U.S. National Weather Service, for
example.
-----
:param unicode val:
:return val:
"""
# List of (elements, replacements)
clean_list = ((' am ', ' AM '),
(' pm ', ' PM '),
('*', ' '),
('\u000A', ' '),
('...', ' '),
('/ ', '/'),
(' /', '/'),
('/', ' / ')
)
# Take the old, and replace it with the new.
for (old, new) in clean_list:
val = val.replace(old, new)
val = ' '.join(val.split()) # Eliminate spans of whitespace.
return val
p_dict['figureWidth'] = float(props['figureWidth'])
p_dict['figureHeight'] = float(props['figureHeight'])
try:
height = int(props.get('figureHeight', 300)) / int(plt.rcParams['savefig.dpi'])
if height < 1:
height = 1
chart_tools.log['Warning'].append(u"[{n}] Height: Pixels / DPI can not be less than one. Coercing to "
u"one.".format(n=chart_name)
)
except ValueError:
height = 3
try:
width = int(props.get('figureWidth', 500)) / int(plt.rcParams['savefig.dpi'])
if width < 1:
width = 1
chart_tools.log['Warning'].append(u"[{n}] Width: Pixels / DPI can not be less than one. Coercing to "
u"one.".format(n=chart_name)
)
except ValueError:
width = 5
fig = plt.figure(figsize=(width, height))
ax = fig.add_subplot(111)
ax.axis('off')
# If the value to be plotted is empty, use the default text from the device
# configuration.
if len(text_to_plot) <= 1:
text_to_plot = unicode(p_dict['defaultText'])
else:
# The clean_string method tries to remove some potential ugliness from the text
# to be plotted. It's optional--defaulted to on. No need to call this if the
# default text is used.
if p_dict['cleanTheText']:
text_to_plot = clean_string(val=text_to_plot)
if plug_dict['verboseLogging']:
chart_tools.log['Threaddebug'].append(u"[{n}] Data: {t}".format(n=chart_name, t=text_to_plot))
# Wrap the text and prepare it for plotting.
text_to_plot = textwrap.fill(text=text_to_plot,
width=int(p_dict['numberOfCharacters']),
replace_whitespace=p_dict['cleanTheText']
)
ax.text(0.01, 0.95,
text_to_plot,
transform=ax.transAxes,
color=p_dict['textColor'],
fontname=p_dict['fontMain'],
fontsize=p_dict['multilineFontSize'],
verticalalignment='top'
)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if not p_dict['textAreaBorder']:
[s.set_visible(False) for s in ax.spines.values()]
# Transparent Charts Fill
if p_dict['transparent_charts'] and p_dict['transparent_filled']:
ax.add_patch(patches.Rectangle((0, 0), 1, 1,
transform=ax.transAxes,
facecolor=p_dict['faceColor'],
zorder=1
)
)
# =============================== Format Title ================================
chart_tools.format_title(p_dict=p_dict, k_dict=k_dict, loc=(0.5, 0.98), align='center')
# Note that subplots_adjust affects the space surrounding the subplots and not
# the fig.
plt.subplots_adjust(top=0.98,
bottom=0.05,
left=0.02,
right=0.98,
hspace=None,
wspace=None
)
chart_tools.save(logger=log)
except (KeyError, IndexError, ValueError, UnicodeEncodeError) as sub_error:
tb = traceback.format_exc()
chart_tools.log['Critical'].append(u"[{n}] {s}".format(n=chart_name, s=tb))
pickle.dump(chart_tools.log, sys.stdout)
| 33.5 | 114 | 0.545391 | 666 | 5,695 | 4.537538 | 0.37988 | 0.028127 | 0.029782 | 0.021178 | 0.121774 | 0.121774 | 0.104567 | 0.104567 | 0.085374 | 0.085374 | 0 | 0.012881 | 0.332046 | 5,695 | 169 | 115 | 33.698225 | 0.781546 | 0.231958 | 0 | 0.126214 | 0 | 0 | 0.139442 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019417 | false | 0.009709 | 0.07767 | 0 | 0.106796 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cceb89993da7a1b69dbd4927d78012815ff5b4de | 1,814 | py | Python | sdks/apigw-manager/tests/apigw_manager/apigw/test_command.py | IMBlues/bkpaas-python-sdk | a87bee3d26f0ddeac124c7a4679cd3eff4abb8fc | [
"MIT"
] | 17 | 2021-08-03T03:15:35.000Z | 2022-03-18T06:10:04.000Z | sdks/apigw-manager/tests/apigw_manager/apigw/test_command.py | piglei/bkpaas-python-sdk | 3dfea8be5702ccea1228691c6c1c3e87a27238d2 | [
"MIT"
] | 7 | 2021-08-03T07:10:12.000Z | 2022-03-23T04:47:22.000Z | sdks/apigw-manager/tests/apigw_manager/apigw/test_command.py | piglei/bkpaas-python-sdk | 3dfea8be5702ccea1228691c6c1c3e87a27238d2 | [
"MIT"
] | 9 | 2021-08-03T03:20:36.000Z | 2022-03-08T13:47:50.000Z | # -*- coding: utf-8 -*-
"""
* TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-蓝鲸 PaaS 平台(BlueKing-PaaS) available.
* Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
"""
import pytest
from apigw_manager.apigw import command
class TestApiCommand:
@pytest.fixture(autouse=True)
def setup_command(self):
self.command = command.ApiCommand()
def test_get_configuration(self, configuration):
result = self.command.get_configuration()
assert configuration.api_name == result.api_name
assert configuration.host == result.host
def test_get_configuration_with_args(self, faker):
api_name = faker.color
host = faker.url()
result = self.command.get_configuration(api_name=api_name, host=host)
assert api_name == result.api_name
assert host.startswith(result.host)
class TestDefinitionCommand:
@pytest.fixture(autouse=True)
def setup_command(self):
self.command = command.DefinitionCommand()
def test_get_context(self):
context = self.command.get_context(["a:1", "b:2"])
assert "settings" in context
assert "environ" in context
assert context["data"]["a"] == 1
assert context["data"]["b"] == 2
| 37.791667 | 118 | 0.711136 | 242 | 1,814 | 5.243802 | 0.5 | 0.038613 | 0.023641 | 0.025217 | 0.189125 | 0.137116 | 0.096139 | 0.096139 | 0.096139 | 0.096139 | 0 | 0.010316 | 0.198456 | 1,814 | 47 | 119 | 38.595745 | 0.862448 | 0.400772 | 0 | 0.153846 | 0 | 0 | 0.028918 | 0 | 0 | 0 | 0 | 0 | 0.307692 | 1 | 0.192308 | false | 0 | 0.076923 | 0 | 0.346154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccedbc549ffc192d24b74e12a04695a4d740a2b9 | 1,409 | py | Python | sources/base.py | chenders/hours | 878e0fa57ad4810851fd2bab529e7e1525cf9fbb | [
"MIT"
] | null | null | null | sources/base.py | chenders/hours | 878e0fa57ad4810851fd2bab529e7e1525cf9fbb | [
"MIT"
] | 6 | 2015-01-28T00:48:39.000Z | 2015-01-28T00:51:48.000Z | sources/base.py | chenders/hours | 878e0fa57ad4810851fd2bab529e7e1525cf9fbb | [
"MIT"
] | null | null | null | import pytz
from datetime import timedelta
from dateutil import parser
from django.utils.text import Truncator
from django.db import IntegrityError
from core.models import Data
class HoursDataSource(object):
def __init__(self, start_date, end_date):
self.entries = []
self.start_date = start_date
self.end_date = end_date
def truncate(self, text, length):
return Truncator(text).chars(length)
def date_within_bounds(self, date, give_or_take=None):
start_date = self.start_date
end_date = self.end_date
if give_or_take is not None:
start_date -= give_or_take
end_date += give_or_take
return start_date <= date <= end_date
def get_group_date(self, date):
return date + timedelta(days=-date.weekday())
# return date.replace(day=1)
def add_entry(self, date, title, mouseover, url, css_class):
try:
Data.objects.create(date=date, title=title, mouseover=mouseover,
url=url, css_class=css_class)
except IntegrityError:
pass
def date_within_bounds(self, date, give_or_take=None):
start_date = self.start_date
end_date = self.end_date
if give_or_take is not None:
start_date -= give_or_take
end_date += give_or_take
return start_date <= date <= end_date
| 31.311111 | 76 | 0.648687 | 190 | 1,409 | 4.536842 | 0.3 | 0.114849 | 0.092807 | 0.097448 | 0.408353 | 0.408353 | 0.38051 | 0.38051 | 0.38051 | 0.38051 | 0 | 0.000978 | 0.274663 | 1,409 | 44 | 77 | 32.022727 | 0.842466 | 0.018453 | 0 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.171429 | false | 0.028571 | 0.171429 | 0.057143 | 0.485714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccf0191c3e20264408fdd3e37fe77537c8aef935 | 7,725 | py | Python | tests/datastructures_tests/intensity_data_test.py | czbiohub/reconstruct-order | e729ae3871aea0a5ec2d42744a9448c7f0a93037 | [
"Unlicense"
] | 6 | 2019-10-30T23:00:01.000Z | 2021-03-02T19:09:07.000Z | tests/datastructures_tests/intensity_data_test.py | czbiohub/ReconstructOrder | e729ae3871aea0a5ec2d42744a9448c7f0a93037 | [
"Unlicense"
] | 14 | 2019-07-08T22:51:29.000Z | 2019-07-13T15:44:01.000Z | tests/datastructures_tests/intensity_data_test.py | mehta-lab/reconstruct-order | e729ae3871aea0a5ec2d42744a9448c7f0a93037 | [
"Unlicense"
] | 2 | 2020-05-02T23:28:36.000Z | 2020-07-16T23:46:46.000Z | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from ReconstructOrder.datastructures.intensity_data import IntensityData
# ==== test basic construction =====
def test_basic_constructor_nparray():
"""
test assignment using numpy arrays
"""
int_data = IntensityData()
a = np.ones((512, 512))
b = 2*np.ones((512, 512))
c = 3*np.ones((512, 512))
d = 4*np.ones((512, 512))
e = 5*np.ones((512, 512))
int_data.append_image(a)
int_data.append_image(b)
int_data.append_image(c)
int_data.append_image(d)
int_data.append_image(e)
assert_array_equal(int_data.get_image(0), a)
assert_array_equal(int_data.get_image(1), b)
assert_array_equal(int_data.get_image(2), c)
assert_array_equal(int_data.get_image(3), d)
assert_array_equal(int_data.get_image(4), e)
assert_array_equal(int_data.data, np.array([a, b, c, d, e]))
def test_basic_constructor_memap(setup_temp_data):
"""
test assignment using memory mapped files
"""
mm = setup_temp_data
int_data = IntensityData()
int_data.append_image(mm)
int_data.append_image(2 * mm)
int_data.append_image(3 * mm)
int_data.append_image(4 * mm)
int_data.append_image(5 * mm)
assert_array_equal(int_data.get_image(0), mm)
assert_array_equal(int_data.get_image(1), 2*mm)
assert_array_equal(int_data.get_image(2), 3*mm)
assert_array_equal(int_data.get_image(3), 4*mm)
assert_array_equal(int_data.get_image(4), 5*mm)
assert_array_equal(int_data.data, np.array([mm, 2*mm, 3*mm, 4*mm, 5*mm]))
def test_basic_constructor_with_names():
"""
test construction with channel names
Returns
-------
"""
int_data = IntensityData()
int_data.channel_names = ['IExt', 'I0', 'I45', 'I90', 'I135']
a = np.ones((512, 512))
b = 2 * np.ones((512, 512))
c = 3 * np.ones((512, 512))
d = 4 * np.ones((512, 512))
e = 5 * np.ones((512, 512))
int_data.replace_image(a, 'IExt')
int_data.replace_image(b, 'I0')
int_data.replace_image(c, 'I45')
int_data.replace_image(d, 'I90')
int_data.replace_image(e, 'I135')
assert_array_equal(int_data.get_image("IExt"), a)
def test_basic_constructor_without_names():
"""
test construction with channel names
Returns
-------
"""
int_data = IntensityData()
# int_data.channel_names = ['IExt', 'I0', 'I45', 'I90', 'I135']
a = np.ones((512, 512))
b = 2 * np.ones((512, 512))
c = 3 * np.ones((512, 512))
d = 4 * np.ones((512, 512))
e = 5 * np.ones((512, 512))
int_data.append_image(a)
int_data.append_image(b)
int_data.append_image(c)
int_data.append_image(d)
int_data.append_image(e)
assert_array_equal(int_data.get_image(0), a)
# ==== test instances and private/public access =====
def test_instances():
"""
test instance attributes
"""
I1 = IntensityData()
I2 = IntensityData()
with pytest.raises(AssertionError):
assert(I1 == I2)
with pytest.raises(AssertionError):
I1.append_image(np.ones((32, 32)))
I2.append_image(np.ones((64, 64)))
assert_array_equal(I1.get_image(0),I2.get_image(0))
def test_private_access(setup_intensity_data):
"""
should not have access to private variables
access is restricted to setters/getters
"""
int_data, a, b, c, d, e = setup_intensity_data
with pytest.raises(AttributeError):
print(int_data.__IExt)
with pytest.raises(AttributeError):
print(int_data.__I0)
# ==== test methods =====
# replace_image method
def test_replace_image_shape(setup_intensity_data):
int_data, a, b, c, d, e = setup_intensity_data
newim = np.ones((5,5))
with pytest.raises(ValueError):
int_data.replace_image(newim, 0)
def test_replace_image_dtype(setup_intensity_data):
int_data, a, b, c, d, e = setup_intensity_data
newim = 0
with pytest.raises(TypeError):
int_data.replace_image(newim, 0)
def test_replace_image_by_index(setup_intensity_data):
int_data, a, b, c, d, e = setup_intensity_data
newim = np.ones((512, 512))
int_data.replace_image(newim, 0)
assert_array_equal(int_data.data[0], newim)
def test_replace_image_by_string(setup_intensity_data):
int_data, a, b, c, d, e = setup_intensity_data
int_data.channel_names = ['IExt', 'I0', 'I45', 'I90', 'I135']
newim = np.ones((512,512))
int_data.replace_image(newim, 'I90')
assert_array_equal(int_data.get_image('I90'), newim)
# channel_names property
def test_channel_names(setup_intensity_data):
int_data, a, b, c, d, e = setup_intensity_data
names = ['a','b','c','d','e']
int_data.channel_names = names
# get_image method
def test_get_image_str(setup_intensity_data):
"""
test query by string channel name
"""
int_data, a, b, c, d, e = setup_intensity_data
names = ['a','b','c','d','e']
int_data.channel_names = names
dat = int_data.get_image('e')
assert(dat.shape, (512,512))
assert(dat[0][0], 5)
def test_get_img_str_undef(setup_intensity_data):
"""
test exception handling of query by string channel name
"""
int_data, a, b, c, d, e = setup_intensity_data
names = ['a','b','c','d','e','f','g','h']
int_data.channel_names = names
with pytest.raises(ValueError):
dat = int_data.get_image('q')
def test_get_image_int(setup_intensity_data):
"""
test query by int channel index
"""
int_data, a, b, c, d, e = setup_intensity_data
names = ['a','b','c','d','e']
int_data.channel_names = names
dat = int_data.get_image(4)
assert(dat.shape, (512,512))
assert(dat[0][0], 5)
# axis_names property
def test_axis_names(setup_intensity_data):
int_data, a, b, c, d, e = setup_intensity_data
names = ['c', 'x', 'y', 'z', 't']
int_data.axis_names = names
assert(int_data.axis_names, names)
# ==== test data dimensions =====
def test_ndims_1(setup_ndarrays):
"""
test that shape is preserved
"""
p, q, r = setup_ndarrays
int_data = IntensityData()
int_data.append_image(p)
int_data.append_image(p)
int_data.append_image(p)
assert(int_data.data[0].shape == p.shape)
assert(int_data.data.shape == (3,)+p.shape)
def test_ndims_2(setup_ndarrays):
"""
test exception handling for image data that is not \
numpy array or numpy memmap
"""
int_data = IntensityData()
with pytest.raises(TypeError):
int_data.append_image(1)
with pytest.raises(TypeError):
int_data.append_image([1, 2, 3])
with pytest.raises(TypeError):
int_data.append_image({1, 2, 3})
with pytest.raises(TypeError):
int_data.append_image((1, 2, 3))
def test_ndims_3(setup_ndarrays):
"""
test exception handling upon assignment of dim mismatch image
"""
p, q, r = setup_ndarrays
int_data = IntensityData()
int_data.append_image(p)
with pytest.raises(ValueError):
int_data.append_image(q)
# ==== Attribute assignment ==========
def test_assignment(setup_intensity_data):
"""
test exception handling of improper assignment
"""
int_data, a, b, c, d, e = setup_intensity_data
with pytest.raises(TypeError):
int_data.Iext = a
with pytest.raises(TypeError):
int_data.__IExt = a
def test_set_data(setup_intensity_data):
"""
test that neither data nor frames are set-able attributes
"""
int_data, a, b, c, d, e = setup_intensity_data
with pytest.raises(AttributeError):
int_data.data = 0
with pytest.raises(AttributeError):
int_data.num_channels = 0
| 24.759615 | 77 | 0.655016 | 1,159 | 7,725 | 4.099223 | 0.11648 | 0.129657 | 0.06567 | 0.090928 | 0.669964 | 0.615028 | 0.571248 | 0.520732 | 0.428962 | 0.412334 | 0 | 0.03838 | 0.207379 | 7,725 | 311 | 78 | 24.839228 | 0.737547 | 0.13178 | 0 | 0.52439 | 0 | 0 | 0.013616 | 0 | 0 | 0 | 0 | 0 | 0.170732 | 1 | 0.121951 | false | 0 | 0.02439 | 0 | 0.146341 | 0.012195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccf118cf4e661c54ae6e3a8fa5192d55fe0bbd47 | 1,722 | py | Python | configs/mario_pg_config.py | Shiien/verify_rl_torch | 45866609ac55fcf99aaaa89df94573acf35580d2 | [
"MIT"
] | 1 | 2022-03-22T14:59:01.000Z | 2022-03-22T14:59:01.000Z | configs/mario_pg_config.py | Shiien/verify_rl_torch | 45866609ac55fcf99aaaa89df94573acf35580d2 | [
"MIT"
] | null | null | null | configs/mario_pg_config.py | Shiien/verify_rl_torch | 45866609ac55fcf99aaaa89df94573acf35580d2 | [
"MIT"
] | null | null | null | import torch
class MarioConfig:
def __init__(self):
# hyper config
self.max_num_gpus = 1
self.num_workers = 32
self.discount = 0.999
self.observation_space = (84, 84, 3)
self.action_space = 256 + 20 + 8
import os
import datetime
self.results_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../results",
os.path.basename(__file__)[:-3], datetime.datetime.now().strftime(
"%Y-%m-%d--%H-%M-%S")) # Path to store the model weights and TensorBoard logs
self.save_log = True # Save the checkpoint in results_path as model.checkpoint
self.training_steps = int(100 * 1e6) # Total number of training steps (ie weights update according to a batch)
# Alg config
self.lambda_ = 0.95
# Actor config
# Learner config
self.train_on_gpu = torch.cuda.is_available() # Train on GPU if available
self.batch_size = 32 # Number of parts of games to train on at each training step
self.checkpoint_interval = int(8) # Number of training steps before using the model for self-playing
self.optimizer = "Adam" # "Adam" or "SGD". Paper uses SGD
self.weight_decay = 1e-4 # L2 weights regularization
self.momentum = 0.9 # Used only if optimizer is SGD
self.cofentropy = 1e-3
self.v_scaling = 0.5
self.clip_param = 0.15
self.lr_init = 5e-4 # Initial learning rate
self.replay_buffer_size = int(1e3) # Number of self-play games to keep in the replay buffer
self.num_unroll_steps = 16 # Number of game moves to keep for every batch element
| 43.05 | 119 | 0.624855 | 243 | 1,722 | 4.283951 | 0.539095 | 0.038425 | 0.03074 | 0.040346 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039216 | 0.289199 | 1,722 | 39 | 120 | 44.153846 | 0.811275 | 0.348432 | 0 | 0 | 0 | 0 | 0.028959 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.107143 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccf122ab24b6c118407351673ac8790f51122e47 | 505 | py | Python | 31-100/31-40/34.py | higee/project_euler | 2ecdefb6e4a588f50cea47321c88ee7c7ac28110 | [
"MIT"
] | null | null | null | 31-100/31-40/34.py | higee/project_euler | 2ecdefb6e4a588f50cea47321c88ee7c7ac28110 | [
"MIT"
] | null | null | null | 31-100/31-40/34.py | higee/project_euler | 2ecdefb6e4a588f50cea47321c88ee7c7ac28110 | [
"MIT"
] | null | null | null | def fac(n):
if n in [0, 1]:
return 1
else:
return n * fac(n-1)
def sum_of_the_factorial_of_their_digits(n):
fac_of_the_digits = [fac_dic[int(x)] for x in str(n)]
return sum(fac_of_the_digits)
def main():
for n in range(10, 2540161):
if n == sum_of_the_factorial_of_their_digits(n):
yield n
if __name__ == "__main__":
global fac_dic
fac_dic = {n : fac(n) for n in range(10)}
answer = list(main())
print(answer)
| 21.041667 | 57 | 0.584158 | 85 | 505 | 3.129412 | 0.352941 | 0.075188 | 0.037594 | 0.12782 | 0.330827 | 0.233083 | 0.233083 | 0.233083 | 0 | 0 | 0 | 0.042493 | 0.30099 | 505 | 23 | 58 | 21.956522 | 0.711048 | 0 | 0 | 0 | 0 | 0 | 0.015842 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0 | 0 | 0.352941 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccf2d7d2d0ffc342a84d86056b65cf383d097b4c | 7,638 | py | Python | audio_processing.py | poria-cat/Transformer-TTS-Pytorch | 1e9e2dccc16c17372bf86ca73001f76645f53338 | [
"MIT"
] | null | null | null | audio_processing.py | poria-cat/Transformer-TTS-Pytorch | 1e9e2dccc16c17372bf86ca73001f76645f53338 | [
"MIT"
] | null | null | null | audio_processing.py | poria-cat/Transformer-TTS-Pytorch | 1e9e2dccc16c17372bf86ca73001f76645f53338 | [
"MIT"
] | null | null | null | import torch
import torch.nn.functional as F
import torchaudio
import numpy as np
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from librosa.filters import window_sumsquare
from librosa.filters import mel as librosa_mel_fn
def get_mel_basis(sampling_rate=22050, filter_length=1024, n_mel_channels=80, mel_fmin=0.0, mel_fmax=8000.0):
mel_basis = librosa_mel_fn(
sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax) # shape=(n_mels, 1 + n_fft/2)
mel_basis = torch.from_numpy(mel_basis).float()
return mel_basis
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
class Inverse(torch.nn.Module):
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(Inverse, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
scale = filter_length / hop_length
fourier_basis = np.fft.fft(np.eye(filter_length))
cutoff = int((filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :])
if window != None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def forward(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
torch.autograd.Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window != None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length, win_length=self.win_length, n_fft=self.filter_length, dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :,
approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(
self.filter_length/2):]
inverse_transform = inverse_transform[:,
:, :-int(self.filter_length/2):]
return inverse_transform
def griffin_lim(magnitudes, inverse, n_iters=30, filter_length=1024, hop_length=256, win_length=1024,):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
stft = torch.stft(signal, n_fft=filter_length, hop_length=hop_length,
win_length=win_length, window=torch.hann_window(win_length))
real = stft[:, :, :, 0]
imag = stft[:, :, :, 1]
angles = torch.autograd.Variable(
torch.atan2(imag.data, real.data))
signal = inverse(magnitudes, angles).squeeze(1)
return signal
def mel2wav(mel_outputs, n_iters=30, filter_length=1024, hop_length=256, win_length=1024, n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0, mel_fmax=8000.0):
mel_decompress = dynamic_range_decompression(mel_outputs)
mel_decompress = mel_decompress.transpose(1, 2).data.cpu()
mel_basis = librosa_mel_fn(
sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax) # shape=(n_mels, 1 + n_fft/2)
mel_basis = torch.from_numpy(mel_basis).float()
spec_from_mel_scaling = 1000
spec_from_mel = torch.mm(mel_decompress[0], mel_basis)
spec_from_mel = spec_from_mel.transpose(0, 1).unsqueeze(0)
spec_from_mel = spec_from_mel * spec_from_mel_scaling
inverse = Inverse(filter_length=filter_length,
hop_length=hop_length, win_length=win_length)
audio = griffin_lim(torch.autograd.Variable(
spec_from_mel[:, :, :-1]), inverse, n_iters, filter_length=filter_length, hop_length=hop_length, win_length=win_length)
audio = audio.squeeze()
audio = audio.cpu().numpy()
return audio
class STFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=8000.0):
super(STFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
mel_basis = get_mel_basis(
sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax) #shape=(n_mels, 1 + n_fft/2)
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
stft = torch.stft(y,n_fft=self.filter_length, hop_length=self.hop_length,win_length=self.win_length,window=torch.hann_window(self.win_length))
real = stft[:, :, :, 0]
imag = stft[:, :, :, 1]
magnitudes = torch.sqrt(torch.pow(real, 2) + torch.pow(imag, 2))
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
def load_wav(full_path, resample_rate=True, resample_rate_value=22500):
data,sampling_rate = torchaudio.load(full_path)
if resample_rate and resample_rate_value != sampling_rate :
resample = torchaudio.transforms.Resample(sampling_rate, resample_rate_value)
data = resample(data)
return data[0], resample_rate_value
return data[0], resample_rate_value
| 40.2 | 161 | 0.660775 | 995 | 7,638 | 4.776884 | 0.18593 | 0.070692 | 0.034715 | 0.019987 | 0.366716 | 0.337892 | 0.253103 | 0.253103 | 0.229118 | 0.202188 | 0 | 0.02497 | 0.234485 | 7,638 | 189 | 162 | 40.412698 | 0.787925 | 0.052893 | 0 | 0.171642 | 0 | 0 | 0.005452 | 0 | 0 | 0 | 0 | 0 | 0.022388 | 1 | 0.089552 | false | 0 | 0.059701 | 0 | 0.246269 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccf38eaa3eb535456d8a6a2a6262774cda8e86a7 | 13,771 | bzl | Python | toolchain/ndk_cc_toolchain_config.bzl | jbeich/skcms | 9c30a95f0f167ee1513e5a1ea6846b15a010385c | [
"BSD-3-Clause"
] | null | null | null | toolchain/ndk_cc_toolchain_config.bzl | jbeich/skcms | 9c30a95f0f167ee1513e5a1ea6846b15a010385c | [
"BSD-3-Clause"
] | null | null | null | toolchain/ndk_cc_toolchain_config.bzl | jbeich/skcms | 9c30a95f0f167ee1513e5a1ea6846b15a010385c | [
"BSD-3-Clause"
] | null | null | null | """This module defines the ndk_cc_toolchain_config rule.
This file is based on the `external/androidndk/cc_toolchain_config.bzl` file produced by the
built-in `android_ndk_repository` Bazel rule[1], which was used to build the SkCMS repository up
until this revision[2].
The paths in this file point to locations inside the expanded Android NDK ZIP file (found at
external/android_ndk), and must be updated every time we upgrade to a new Android NDK version.
[1] https://github.com/bazelbuild/bazel/blob/4710ef82ce34572878e07c52e83a0144d707f140/src/main/java/com/google/devtools/build/lib/bazel/rules/android/AndroidNdkRepositoryFunction.java#L422
[2] https://skia.googlesource.com/skcms/+/30c8e303800c256febb03a09fdcda7f75d119b1b/WORKSPACE#22
"""
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"feature",
"flag_group",
"flag_set",
"tool_path",
"with_feature_set",
)
load("download_toolchains.bzl", "NDK_PATH")
# Supported CPUs.
_ARMEABI_V7A = "armeabi-v7a"
_ARM64_V8A = "arm64-v8a"
_all_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.clif_match,
ACTION_NAMES.lto_backend,
]
_all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
def _get_default_compile_flags(cpu):
if cpu == _ARMEABI_V7A:
return [
"-D__ANDROID_API__=29",
"-isystem",
NDK_PATH + "/sysroot/usr/include/arm-linux-androideabi",
"-target",
"armv7-none-linux-androideabi",
"-march=armv7-a",
"-mfloat-abi=softfp",
"-mfpu=vfpv3-d16",
"-gcc-toolchain",
NDK_PATH + "/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64",
"-fpic",
"-no-canonical-prefixes",
"-Wno-invalid-command-line-argument",
"-Wno-unused-command-line-argument",
"-funwind-tables",
"-fstack-protector-strong",
"-fno-addrsig",
"-Werror=return-type",
"-Werror=int-to-pointer-cast",
"-Werror=pointer-to-int-cast",
"-Werror=implicit-function-declaration",
]
if cpu == _ARM64_V8A:
return [
"-gcc-toolchain",
NDK_PATH + "/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64",
"-target",
"aarch64-none-linux-android",
"-fpic",
"-isystem",
NDK_PATH + "/sysroot/usr/include/aarch64-linux-android",
"-D__ANDROID_API__=29",
"-no-canonical-prefixes",
"-Wno-invalid-command-line-argument",
"-Wno-unused-command-line-argument",
"-funwind-tables",
"-fstack-protector-strong",
"-fno-addrsig",
"-Werror=return-type",
"-Werror=int-to-pointer-cast",
"-Werror=pointer-to-int-cast",
"-Werror=implicit-function-declaration",
]
fail("Unknown CPU: " + cpu)
def _get_default_link_flags(cpu):
if cpu == _ARMEABI_V7A:
return [
"-target",
"armv7-none-linux-androideabi",
"-gcc-toolchain",
NDK_PATH + "/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64",
"-L",
NDK_PATH + "/sources/cxx-stl/llvm-libc++/libs/armeabi-v7a",
"-no-canonical-prefixes",
"-Wl,-z,relro",
"-Wl,--gc-sections",
]
if cpu == _ARM64_V8A:
return [
"-gcc-toolchain",
NDK_PATH + "/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64",
"-target",
"aarch64-none-linux-android",
"-L",
NDK_PATH + "/sources/cxx-stl/llvm-libc++/libs/arm64-v8a",
"-no-canonical-prefixes",
"-Wl,-z,relro",
"-Wl,--gc-sections",
]
fail("Unknown CPU: " + cpu)
def _get_default_dbg_flags(cpu):
if cpu == _ARMEABI_V7A:
return ["-g", "-fno-strict-aliasing", "-O0", "-UNDEBUG"]
if cpu == _ARM64_V8A:
return ["-O0", "-g", "-UNDEBUG"]
fail("Unknown CPU: " + cpu)
def _get_default_opt_flags(cpu):
if cpu == _ARMEABI_V7A:
return ["-mthumb", "-Os", "-g", "-DNDEBUG"]
if cpu == _ARM64_V8A:
return ["-O2", "-g", "-DNDEBUG"]
fail("Unknown CPU: " + cpu)
def _get_toolchain_identifier(cpu):
if cpu == _ARMEABI_V7A:
return "ndk-armeabi-v7a-toolchain"
if cpu == _ARM64_V8A:
return "ndk-arm64-v8a-toolchain"
fail("Unknown CPU: " + cpu)
def _get_target_system_name(cpu):
if cpu == _ARMEABI_V7A:
return "arm-linux-androideabi"
if cpu == _ARM64_V8A:
return "aarch64-linux-android"
fail("Unknown CPU: " + cpu)
def _get_builtin_sysroot(cpu):
if cpu == _ARMEABI_V7A:
return NDK_PATH + "/platforms/android-29/arch-arm"
if cpu == _ARM64_V8A:
return NDK_PATH + "/platforms/android-29/arch-arm64"
fail("Unknown CPU: " + cpu)
def _get_tool_paths(cpu):
# The cc_common.create_cc_toolchain_config_info function expects tool paths to point to files
# under the directory in which it is invoked. This means we cannot directly reference tools
# under external/android_ndk. The solution is to use "trampoline" scripts that pass through
# any command-line arguments to the NDK binaries under external/android_sdk.
if cpu == _ARMEABI_V7A:
return [
tool_path(
name = "ar",
path = "trampolines/arm-linux-androideabi-ar.sh",
),
tool_path(
name = "cpp",
path = "trampolines/clang.sh",
),
tool_path(
name = "dwp",
path = "trampolines/arm-linux-androideabi-dwp.sh",
),
tool_path(
name = "gcc",
path = "trampolines/clang.sh",
),
tool_path(
name = "gcov",
path = "/bin/false",
),
tool_path(
name = "ld",
path = "trampolines/arm-linux-androideabi-ld.sh",
),
tool_path(
name = "nm",
path = "trampolines/arm-linux-androideabi-nm.sh",
),
tool_path(
name = "objcopy",
path = "trampolines/arm-linux-androideabi-objcopy.sh",
),
tool_path(
name = "objdump",
path = "trampolines/arm-linux-androideabi-objdump.sh",
),
tool_path(
name = "strip",
path = "trampolines/arm-linux-androideabi-strip.sh",
),
]
if cpu == _ARM64_V8A:
return [
tool_path(
name = "ar",
path = "trampolines/aarch64-linux-android-ar.sh",
),
tool_path(
name = "cpp",
path = "trampolines/clang.sh",
),
tool_path(
name = "dwp",
path = "trampolines/aarch64-linux-android-dwp.sh",
),
tool_path(
name = "gcc",
path = "trampolines/clang.sh",
),
tool_path(
name = "gcov",
path = "/bin/false",
),
tool_path(
name = "ld",
path = "trampolines/aarch64-linux-android-ld.sh",
),
tool_path(
name = "nm",
path = "trampolines/aarch64-linux-android-nm.sh",
),
tool_path(
name = "objcopy",
path = "trampolines/aarch64-linux-android-objcopy.sh",
),
tool_path(
name = "objdump",
path = "trampolines/aarch64-linux-android-objdump.sh",
),
tool_path(
name = "strip",
path = "trampolines/aarch64-linux-android-strip.sh",
),
]
fail("Unknown CPU: " + cpu)
def _ndk_cc_toolchain_config_impl(ctx):
default_compile_flags = _get_default_compile_flags(ctx.attr.cpu)
unfiltered_compile_flags = [
"-isystem",
NDK_PATH + "/sources/cxx-stl/llvm-libc++/include",
"-isystem",
NDK_PATH + "/sources/cxx-stl/llvm-libc++abi/include",
"-isystem",
NDK_PATH + "/sources/android/support/include",
"-isystem",
NDK_PATH + "/sysroot/usr/include",
]
default_link_flags = _get_default_link_flags(ctx.attr.cpu)
default_fastbuild_flags = [""]
default_dbg_flags = _get_default_dbg_flags(ctx.attr.cpu)
default_opt_flags = _get_default_opt_flags(ctx.attr.cpu)
opt_feature = feature(name = "opt")
fastbuild_feature = feature(name = "fastbuild")
dbg_feature = feature(name = "dbg")
supports_dynamic_linker_feature = feature(name = "supports_dynamic_linker", enabled = True)
supports_pic_feature = feature(name = "supports_pic", enabled = True)
static_link_cpp_runtimes_feature = feature(name = "static_link_cpp_runtimes", enabled = True)
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = _all_compile_actions,
flag_groups = [flag_group(flags = default_compile_flags)],
),
flag_set(
actions = _all_compile_actions,
flag_groups = [flag_group(flags = default_fastbuild_flags)],
with_features = [with_feature_set(features = ["fastbuild"])],
),
flag_set(
actions = _all_compile_actions,
flag_groups = [flag_group(flags = default_dbg_flags)],
with_features = [with_feature_set(features = ["dbg"])],
),
flag_set(
actions = _all_compile_actions,
flag_groups = [flag_group(flags = default_opt_flags)],
with_features = [with_feature_set(features = ["opt"])],
),
],
)
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = _all_link_actions,
flag_groups = [flag_group(flags = default_link_flags)],
),
],
)
user_compile_flags_feature = feature(
name = "user_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = _all_compile_actions,
flag_groups = [
flag_group(
flags = ["%{user_compile_flags}"],
iterate_over = "user_compile_flags",
expand_if_available = "user_compile_flags",
),
],
),
],
)
sysroot_feature = feature(
name = "sysroot",
enabled = True,
flag_sets = [
flag_set(
actions = _all_compile_actions + _all_link_actions,
flag_groups = [
flag_group(
flags = ["--sysroot=%{sysroot}"],
expand_if_available = "sysroot",
),
],
),
],
)
unfiltered_compile_flags_feature = feature(
name = "unfiltered_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = _all_compile_actions,
flag_groups = [flag_group(flags = unfiltered_compile_flags)],
),
],
)
features = [
default_compile_flags_feature,
default_link_flags_feature,
supports_dynamic_linker_feature,
supports_pic_feature,
static_link_cpp_runtimes_feature,
fastbuild_feature,
dbg_feature,
opt_feature,
user_compile_flags_feature,
sysroot_feature,
unfiltered_compile_flags_feature,
]
cxx_builtin_include_directories = [
NDK_PATH + "/toolchains/llvm/prebuilt/linux-x86_64/lib64/clang/9.0.9/include",
"%sysroot%/usr/include",
NDK_PATH + "/sysroot/usr/include",
]
# https://bazel.build/rules/lib/cc_common#create_cc_toolchain_config_info
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
toolchain_identifier = _get_toolchain_identifier(ctx.attr.cpu),
host_system_name = "local",
target_system_name = _get_target_system_name(ctx.attr.cpu),
target_cpu = ctx.attr.cpu,
target_libc = "local",
compiler = "clang9.0.9",
abi_version = ctx.attr.cpu,
abi_libc_version = "local",
features = features,
tool_paths = _get_tool_paths(ctx.attr.cpu),
cxx_builtin_include_directories = cxx_builtin_include_directories,
builtin_sysroot = _get_builtin_sysroot(ctx.attr.cpu),
)
ndk_cc_toolchain_config = rule(
implementation = _ndk_cc_toolchain_config_impl,
attrs = {
"cpu": attr.string(
mandatory = True,
values = [_ARMEABI_V7A, _ARM64_V8A],
doc = "Target CPU.",
)
},
provides = [CcToolchainConfigInfo],
)
| 34.002469 | 188 | 0.559872 | 1,435 | 13,771 | 5.075261 | 0.192334 | 0.023067 | 0.032953 | 0.030757 | 0.542634 | 0.430729 | 0.397226 | 0.312097 | 0.25992 | 0.224495 | 0 | 0.020979 | 0.325031 | 13,771 | 404 | 189 | 34.086634 | 0.762561 | 0.085542 | 0 | 0.544199 | 0 | 0.002762 | 0.252941 | 0.16876 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024862 | false | 0 | 0 | 0 | 0.071823 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccf57840058ae1e39f456a4f292c8353027f974d | 7,149 | py | Python | VirtualJudgeSpider/OJs/HDUClass.py | mr-kkid/OnlineJudgeSpider | c83c01d8e989ae87834bdabdb3fae0984eae2eaa | [
"MIT"
] | null | null | null | VirtualJudgeSpider/OJs/HDUClass.py | mr-kkid/OnlineJudgeSpider | c83c01d8e989ae87834bdabdb3fae0984eae2eaa | [
"MIT"
] | null | null | null | VirtualJudgeSpider/OJs/HDUClass.py | mr-kkid/OnlineJudgeSpider | c83c01d8e989ae87834bdabdb3fae0984eae2eaa | [
"MIT"
] | null | null | null | import re
from http import cookiejar
from urllib import request, parse
from bs4 import BeautifulSoup
from VirtualJudgeSpider import Config
from VirtualJudgeSpider.Config import Problem, Spider, Result
from VirtualJudgeSpider.OJs.BaseClass import Base
class HDU(Base):
def __init__(self):
self.code_type = 'gb18030'
self.cj = cookiejar.CookieJar()
self.opener = request.build_opener(request.HTTPCookieProcessor(self.cj))
@staticmethod
def home_page_url(self):
url = 'http://acm.hdu.edu.cn/'
return url
def check_login_status(self):
url = 'http://acm.hdu.edu.cn/'
try:
with self.opener.open(url) as fin:
website_data = fin.read().decode(self.code_type)
if re.search(r'userloginex\.php\?action=logout', website_data) is not None:
return True
except:
return False
def login_webside(self, *args, **kwargs):
if self.check_login_status():
return True
login_page_url = 'http://acm.hdu.edu.cn/'
login_link_url = 'http://acm.hdu.edu.cn/userloginex.php?action=login&cid=0¬ice=0'
post_data = parse.urlencode(
{'username': kwargs['account'].get_username(), 'userpass': kwargs['account'].get_password()})
try:
self.opener.open(login_page_url)
req = request.Request(url=login_link_url, data=post_data.encode(self.code_type),
headers=Config.custom_headers)
self.opener.open(req)
if self.check_login_status():
return True
return False
except:
return False
def get_problem(self, *args, **kwargs):
url = 'http://acm.hdu.edu.cn/showproblem.php?pid=' + str(kwargs['pid'])
problem = Problem()
try:
website_data = Spider.get_data(url, self.code_type)
problem.remote_id = kwargs['pid']
problem.remote_url = url
problem.remote_oj = 'HDU'
problem.title = re.search(r'color:#1A5CC8\'>([\s\S]*?)</h1>', website_data).group(1)
problem.time_limit = re.search(r'(\d* MS)', website_data).group(1)
problem.memory_limit = re.search(r'/(\d* K)', website_data).group(1)
problem.special_judge = re.search(r'color=red>Special Judge</font>', website_data) is not None
problem.description = re.search(r'>Problem Description</div>[\s\S]*?panel_content>([\s\S]*?)</div>',
website_data).group(1)
problem.input = re.search(r'>Input</div>[\s\S]*?panel_content>([\s\S]*?)</div>', website_data).group(1)
problem.output = re.search(r'>Output</div>[\s\S]*?panel_content>([\s\S]*?)</div>', website_data).group(1)
match_group = re.search(r'>Sample Input</div>[\s\S]*?panel_content>([\s\S]*?)</div', website_data)
input_data = ''
if match_group:
input_data = re.search(r'(<pre><div[\s\S]*?>)?([\s\S]*)', match_group.group(1)).group(2)
output_data = ''
match_group = re.search(r'>Sample Output</div>[\s\S]*?panel_content>([\s\S]*?)</div', website_data)
if match_group:
output_data = re.search(r'(<pre><div[\s\S]*?>)?([\s\S]*)', match_group.group(1)).group(2)
if re.search('<div', output_data):
output_data = re.search(r'([\s\S]*?)<div', output_data).group(1)
problem.sample = [
{'input': input_data,
'output': output_data}]
match_group = re.search(r'>Author</div>[\s\S]*?panel_content>([\s\S]*?)</div>', website_data)
if match_group:
problem.author = match_group.group(1)
match_group = re.search(r'<i>Hint</i>[\s\S]*?/div>[\s]*([\s\S]+?)</div>', website_data)
if match_group:
problem.hint = match_group.group(1)
except:
return Problem.PROBLEM_NOT_FOUND
return problem
def submit_code(self, *args, **kwargs):
if self.login_webside(*args, **kwargs) is False:
return False
try:
code = kwargs['code']
language = kwargs['language']
pid = kwargs['pid']
url = 'http://acm.hdu.edu.cn/submit.php?action=submit'
post_data = parse.urlencode({'check': '0', 'language': language, 'problemid': pid, 'usercode': code})
req = request.Request(url=url, data=post_data.encode(self.code_type), headers=Config.custom_headers)
response = self.opener.open(req)
response.read().decode(self.code_type)
return True
except:
return False
def find_language(self, *args, **kwargs):
if self.login_webside(*args, **kwargs) is False:
return None
url = 'http://acm.hdu.edu.cn/submit.php'
languages = {}
try:
with self.opener.open(url) as fin:
data = fin.read().decode(self.code_type)
soup = BeautifulSoup(data, 'lxml')
options = soup.find('select', attrs={'name': 'language'}).find_all('option')
for option in options:
languages[option.get('value')] = option.string
finally:
return languages
def get_result(self, *args, **kwargs):
account = kwargs.get('account')
pid = kwargs.get('pid')
url = 'http://acm.hdu.edu.cn/status.php?first=&pid=' + pid + '&user=' + account.username + '&lang=0&status=0'
return self.get_result_by_url(url=url)
def get_result_by_rid(self, rid):
url = 'http://acm.hdu.edu.cn/status.php?first=' + rid + '&pid=&user=&lang=0&status=0'
return self.get_result_by_url(url=url)
def get_result_by_url(self, url):
result = Result()
try:
with request.urlopen(url) as fin:
data = fin.read().decode(self.code_type)
soup = BeautifulSoup(data, 'lxml')
line = soup.find('table', attrs={'class': 'table_text'}).find('tr', attrs={'align': 'center'}).find_all(
'td')
if line is not None:
result.origin_run_id = line[0].string
result.verdict = line[2].string
result.execute_time = line[4].string
result.execute_memory = line[5].string
return result
except:
pass
return result
def get_class_name(self):
return str('HDU')
def is_waiting_for_judge(self, verdict):
if verdict in ['Queuing', 'Compiling', 'Running']:
return True
return False
def check_status(self):
url = 'http://acm.hdu.edu.cn/'
try:
with request.urlopen(url, timeout=5) as fin:
data = fin.read().decode(self.code_type)
if re.search(r'<H1>Welcome to HDU Online Judge System</H1>', data):
return True
except:
return False
| 41.807018 | 120 | 0.55854 | 886 | 7,149 | 4.371332 | 0.188488 | 0.011877 | 0.03718 | 0.033566 | 0.458559 | 0.389362 | 0.356829 | 0.306739 | 0.281694 | 0.253034 | 0 | 0.007331 | 0.294027 | 7,149 | 170 | 121 | 42.052941 | 0.760055 | 0 | 0 | 0.322148 | 0 | 0.013423 | 0.168135 | 0.065324 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087248 | false | 0.013423 | 0.04698 | 0.006711 | 0.295302 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccf57bf381b881ac46ecdef94c8bf2a01ef756ae | 705 | py | Python | onlineJudge/baekjoon/DFS/Q2667.py | dahyeong-yun/prtc_coding-test-py | f082e42cc47d7da912bd229b355a813f2d38fabb | [
"MIT"
] | null | null | null | onlineJudge/baekjoon/DFS/Q2667.py | dahyeong-yun/prtc_coding-test-py | f082e42cc47d7da912bd229b355a813f2d38fabb | [
"MIT"
] | null | null | null | onlineJudge/baekjoon/DFS/Q2667.py | dahyeong-yun/prtc_coding-test-py | f082e42cc47d7da912bd229b355a813f2d38fabb | [
"MIT"
] | null | null | null | ''' 입력 '''
n = int(input()) # 지도의 크기
square_map = []
for i in range(n):
square_map.append(list(map(int, input())))
''' 입력 '''
_house_count = 0
house = []
bundle = 0
def dfx(x, y):
global _house_count
if x <= -1 or x >= n or y <= -1 or y >= n:
return False
if square_map[x][y] == 1:
square_map[x][y] = 2
_house_count += 1
dfx(x, y - 1)
dfx(x, y + 1)
dfx(x + 1, y)
dfx(x - 1, y)
return True
return False
for i in range(n):
for j in range(n):
if dfx(i, j):
house.append(_house_count)
_house_count = 0
bundle += 1
print(bundle)
for i in sorted(house):
print(i) | 16.022727 | 46 | 0.486525 | 114 | 705 | 2.885965 | 0.289474 | 0.151976 | 0.054711 | 0.066869 | 0.12462 | 0.051672 | 0.051672 | 0 | 0 | 0 | 0 | 0.028889 | 0.361702 | 705 | 44 | 47 | 16.022727 | 0.702222 | 0.015603 | 0 | 0.206897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0 | 0 | 0.137931 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccf684f8b04fadc89a621f3af0e959e3165d4fdf | 1,530 | py | Python | queue_task/views.py | emoryBlame/queue_server | 946345111359d5001244eb0cc8fd1b8acc50dd3f | [
"MIT"
] | null | null | null | queue_task/views.py | emoryBlame/queue_server | 946345111359d5001244eb0cc8fd1b8acc50dd3f | [
"MIT"
] | 7 | 2020-02-11T23:41:11.000Z | 2022-01-13T01:04:03.000Z | queue_task/views.py | emoryBlame/queue_server | 946345111359d5001244eb0cc8fd1b8acc50dd3f | [
"MIT"
] | null | null | null | from django.shortcuts import render
from .models import Task
from rest_framework import serializers
from rest_framework.response import Response
from rest_framework.decorators import api_view
# Create your views here.
class TaskSerializer(serializers.ModelSerializer):
"""
Task serializer class
"""
class Meta:
"""
Task serializer meta class
"""
model = Task
fields = ('id', 'url', 'status', 'response_content',\
'response_http_status', 'response_body')
class TaskSerializerResult(serializers.ModelSerializer):
"""
Task id serializer
"""
class Meta:
model = Task
fields = ('id',)
@api_view(("POST",))
def send(request):
if request.method == "POST":
task = Task.objects.create(url=request.data.get("url"))
return Response(TaskSerializerResult(task).data)
else:
return Response({"error": "Bad request."})
@api_view(("GET", ))
def result(request):
if request.method == "GET":
task_id = request.GET.get("id", False)
if task_id:
task = Task.objects.filter(id = task_id).first()
print(task)
if task:
return Response(TaskSerializer(task).data)
else:
task = Task.objects.all().order_by('-id')[:10]
print(task)
return Response(TaskSerializer(task, many = True).data)
else:
return Response({"status": "Bad id"})
else:
return Response({"status": "Bad request"})
@api_view(("GET",))
def start_tasks(request):
Task.objects.all().update(status=0)
return Response({"status": "all task gets status New, and will updating every 2 min in case it's still new"})
| 23.90625 | 110 | 0.698693 | 200 | 1,530 | 5.265 | 0.365 | 0.093067 | 0.048433 | 0.032289 | 0.160494 | 0.043685 | 0 | 0 | 0 | 0 | 0 | 0.003091 | 0.154248 | 1,530 | 63 | 111 | 24.285714 | 0.810665 | 0.060131 | 0 | 0.285714 | 0 | 0 | 0.154119 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.119048 | 0 | 0.452381 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccf716c91df18440671ec6222d7ac8edd7636308 | 6,275 | py | Python | dispotrains.webapp/src/analysis/all_stations.py | emembrives/dispotrains | 6ef69d4a62d60a470ed6fd96d04e47d29a0ae44f | [
"Apache-2.0"
] | 1 | 2016-11-12T01:16:32.000Z | 2016-11-12T01:16:32.000Z | dispotrains.webapp/src/analysis/all_stations.py | emembrives/dispotrains | 6ef69d4a62d60a470ed6fd96d04e47d29a0ae44f | [
"Apache-2.0"
] | null | null | null | dispotrains.webapp/src/analysis/all_stations.py | emembrives/dispotrains | 6ef69d4a62d60a470ed6fd96d04e47d29a0ae44f | [
"Apache-2.0"
] | 2 | 2016-05-20T21:04:15.000Z | 2020-02-02T15:25:40.000Z | #!/bin/env python3
"""
Extracts all metro and RER stations from an OSM dump.
"""
import xml.etree.cElementTree as ET
import argparse
import csv
from math import radians, cos, sin, asin, sqrt
class Station(object):
"""A train station"""
def __init__(self, name, osm_id, lat, lon, accessible=False):
self._name = name
self._osm_ids = set([int(osm_id)])
self._lat = lat
self._lon = lon
self._accessible = accessible
@property
def name(self):
"""Name of the station."""
return self._name
@property
def osm_ids(self):
"""OpenStreetMap ID"""
return self._osm_ids
@property
def lat(self):
"""Latitude of the station."""
return self._lat
@property
def lon(self):
"""Longitude of the station."""
return self._lon
@property
def accessible(self):
"""True if the station is accessible."""
return self._accessible
def distance(self, other):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = [radians(x) for x in \
[self.lon, self.lat, other.lon, other.lat]]
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371.0 # Radius of earth in kilometers. Use 3956 for miles
return c * r
def merge(self, other):
self._osm_ids.update(other.osm_ids)
@staticmethod
def from_node(node):
"""Creates a Station from an XML node in OSM format."""
name_tags = node.findall("./tag[@k='name']")
name = None
if len(name_tags) != 0 :
name = name_tags[0].get("v")
osm_id = node.get("id")
lat = float(node.get("lat"))
lon = float(node.get("lon"))
return Station(name, osm_id, lat, lon)
def __repr__(self):
return "Station(%s)" % (self.name)
def __eq__(self, other):
if isinstance(other, Station):
return self.name == other.name
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.__repr__())
def extract_stations_from_dump(dump_path):
"""Extract a list of |Station|s from an XML dump."""
tree = ET.parse(dump_path)
root = tree.getroot()
allstation_nodes = root.findall('./node')
allstations = {}
for station_node in allstation_nodes:
station = Station.from_node(station_node)
if station.name in allstations:
allstations[station.name].merge(station)
else:
allstations[station.name] = station
return merge_osm_stations(allstations.values())
MERGE_STATIONS = {
26824135: [27371889, 1309031698, 1308998006], # Gare de Lyon
1731763794: [241928557], # Nation
3533789791: [3542631493], # Saint Lazare
243496033: [1731763792], # Etoile
3574677130: [1785132453], # Pont du Garigliano
3586000197: [137533248], # La Défense
269296749: [241926523], # Marne la Vallée Chessy
225119209: [3530909557, 1882558198], # CDG 2
3531066587: [1883637808], # La Fraternelle - Rungis
327613695: [3090733718], # Gare du Nord
255687197: [2367372622], # Issy Val de Seine
264778142: [2799009872], # Porte de la Villette
}
def merge_osm_stations(stations):
stations = list(stations)
def get_station(osm_id):
for station_index in range(len(stations)):
if osm_id in stations[station_index].osm_ids:
return station_index, stations[station_index]
return -1, None
for osm_id, ids_to_merge in MERGE_STATIONS.items():
_, receiver = get_station(osm_id)
for id_to_merge in ids_to_merge:
index_to_merge, station_to_merge = get_station(id_to_merge)
receiver.merge(station_to_merge)
del stations[index_to_merge]
return stations
def extract_accessible_stations(csv_filepath):
"""Extracts stations from a csv file listing accessible stations."""
stations = []
with open(csv_filepath) as reader:
csvreader = csv.reader(reader)
for row in csvreader:
stations.append(Station(row[0], row[4], float(row[2]), float(row[3]), True))
return stations
def merge_stations(all_stations, accessible_stations):
"""Merge two lists of stations."""
merged_stations = []
merged_count = 0
for station1 in all_stations:
found = False
for station2 in accessible_stations:
if len(station1.osm_ids.intersection(station2.osm_ids)):
merged_stations.append(station2)
found = True
merged_count += 1
if not found and station1.name:
merged_stations.append(station1)
print(merged_count)
return merged_stations
def print_to_csv(stations):
"""Print a list of stations to CSV."""
with open("full-list.csv", "w") as writer:
csvwriter = csv.writer(writer)
csvwriter.writerow(
["name", "osm_id", "latitude", "longitude", "accessible"])
for station in stations:
csvwriter.writerow(
[station.name, station.osm_ids, station.lat, station.lon, station.accessible])
def _parse_args():
"""Define and parse command-line arguments."""
parser = argparse.ArgumentParser(description='Extract station information.')
parser.add_argument('--osm_dump', type=str,
help='Path of the OSM dump containing train stations')
parser.add_argument('--accessible_csv', type=str,
help='Path to the list of accessible stations (CSV)')
return parser.parse_args()
def _main():
"""Script entry-point."""
args = _parse_args()
all_stations = extract_stations_from_dump(args.osm_dump)
accessible_stations = extract_accessible_stations(args.accessible_csv)
merged_stations = merge_stations(all_stations, accessible_stations)
print_to_csv(merged_stations)
if __name__ == '__main__':
_main()
| 32.179487 | 94 | 0.629163 | 774 | 6,275 | 4.901809 | 0.289406 | 0.011861 | 0.017923 | 0.014233 | 0.056932 | 0.02214 | 0 | 0 | 0 | 0 | 0 | 0.064649 | 0.262948 | 6,275 | 194 | 95 | 32.345361 | 0.755676 | 0.138805 | 0 | 0.077465 | 0 | 0 | 0.046547 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147887 | false | 0 | 0.028169 | 0.021127 | 0.316901 | 0.021127 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccf73deff0cd7b3da75f4fe279624fa28407626b | 493 | py | Python | scripts/insert_plots.py | hamzaMahdi/sphero_formation | 71dd4a8097c578f9237ed1f65e3debdcc3a8cc5b | [
"MIT"
] | null | null | null | scripts/insert_plots.py | hamzaMahdi/sphero_formation | 71dd4a8097c578f9237ed1f65e3debdcc3a8cc5b | [
"MIT"
] | null | null | null | scripts/insert_plots.py | hamzaMahdi/sphero_formation | 71dd4a8097c578f9237ed1f65e3debdcc3a8cc5b | [
"MIT"
] | 1 | 2019-11-06T21:27:51.000Z | 2019-11-06T21:27:51.000Z | # note : this does not create the link between the map and the world. It only spawns the robots.
# Please make sure to go back and manually add the path to the bitmap file
file_name = 'plots.txt'
f = open("../new_results/" + file_name, "w+")
counter = 1
for i in range(1, 10):
for j in range(1, 6):
f.write('\subfloat{\includegraphics[width=0.5\linewidth]{figures/test_%d_%d.png}}\n' % (i, j))
if counter % 2 == 0:
f.write(r'\\ ')
counter+=1
f.close()
| 35.214286 | 102 | 0.622718 | 85 | 493 | 3.552941 | 0.694118 | 0.05298 | 0.05298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028947 | 0.229209 | 493 | 13 | 103 | 37.923077 | 0.765789 | 0.338742 | 0 | 0 | 0 | 0.1 | 0.318885 | 0.229102 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccf80cabc3a7e5b0b42749bb4a83f5a36f41004c | 5,615 | py | Python | lib/aquilon/worker/formats/entitlement.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 7 | 2015-07-31T05:57:30.000Z | 2021-09-07T15:18:56.000Z | lib/aquilon/worker/formats/entitlement.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 115 | 2015-03-03T13:11:46.000Z | 2021-09-20T12:42:24.000Z | lib/aquilon/worker/formats/entitlement.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 13 | 2015-03-03T11:17:59.000Z | 2021-09-09T09:16:41.000Z | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2018-2019 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entitlement formatter."""
from aquilon.aqdb.model import (
EntitlementArchetypeGrnMap,
EntitlementArchetypeUserMap,
EntitlementClusterGrnMap,
EntitlementClusterUserMap,
EntitlementGrnGrnMap,
EntitlementGrnUserMap,
EntitlementHostGrnMap,
EntitlementHostUserMap,
EntitlementOnArchetype,
EntitlementOnCluster,
EntitlementOnGrn,
EntitlementOnHost,
EntitlementOnHostEnvironment,
EntitlementOnLocation,
EntitlementOnPersonality,
EntitlementPersonalityGrnMap,
EntitlementPersonalityUserMap,
EntitlementToGrn,
EntitlementToUser,
EntitlementType,
)
from aquilon.worker.formats.formatters import ObjectFormatter
class EntitlementTypeFormatter(ObjectFormatter):
def format_raw(self, entit_type, indent="", embedded=True,
indirect_attrs=True):
details = []
details.append('{}Entitlement type: {}'.format(
indent, entit_type.name))
details.append('{} To GRN: {}'.format(
indent, 'enabled' if entit_type.to_grn else 'disabled'))
if entit_type.to_user_types:
user_types = set(m.user_type.name
for m in entit_type.to_user_types)
details.append('{} To User Types: {}'.format(
indent, ', '.join(sorted(user_types))))
if entit_type.comments:
details.append('{} Comments: {}'.format(
indent, entit_type.comments))
return '\n'.join(details)
ObjectFormatter.handlers[EntitlementType] = EntitlementTypeFormatter()
class EntitlementFormatter(ObjectFormatter):
def format_raw(self, entit, indent="", embedded=True, indirect_attrs=True):
details = []
def add(txt):
details.append('{}{}'.format(indent, txt))
add('Entitlement: {}'.format(entit.type.name))
if isinstance(entit, EntitlementToGrn):
add(' To {0:c}: {0.grn}'.format(entit.grn))
elif isinstance(entit, EntitlementToUser):
add(' To {type} {0:c}: {0.name}'.format(
entit.user, type=entit.user.type.name.title()))
if isinstance(entit, EntitlementOnHost):
add(' On {0:c}: {0.hardware_entity.primary_name.fqdn.fqdn}'
.format(entit.host))
elif isinstance(entit, EntitlementOnCluster):
add(' On {0:c}: {0.name}'.format(entit.cluster))
elif isinstance(entit, EntitlementOnPersonality):
add(' On {0:c}: {0.name}'.format(entit.personality))
elif isinstance(entit, EntitlementOnArchetype):
add(' On {0:c}: {0.name}'.format(entit.archetype))
elif isinstance(entit, EntitlementOnGrn):
add(' On {0:c}: {0.grn}'.format(entit.target_grn))
if isinstance(entit, EntitlementOnHostEnvironment):
add(' On {0:c}: {0.name}'.format(entit.host_environment))
if isinstance(entit, EntitlementOnLocation):
add(' On {0:c}: {0.name}'.format(entit.location))
return '\n'.join(details)
def fill_proto(self, entit, skeleton, embedded=True, indirect_attrs=True):
skeleton.type = entit.type.name
if isinstance(entit, EntitlementToGrn):
skeleton.eonid = entit.grn.eon_id
elif isinstance(entit, EntitlementToUser):
self.redirect_proto(entit.user, skeleton.user,
indirect_attrs=False)
if isinstance(entit, EntitlementOnHost):
self.redirect_proto(entit.host, skeleton.host,
indirect_attrs=False)
elif isinstance(entit, EntitlementOnCluster):
self.redirect_proto(entit.cluster, skeleton.cluster,
indirect_attrs=False)
elif isinstance(entit, EntitlementOnPersonality):
self.redirect_proto(entit.personality, skeleton.personality,
indirect_attrs=False)
elif isinstance(entit, EntitlementOnArchetype):
self.redirect_proto(entit.archetype, skeleton.archetype,
indirect_attrs=False)
elif isinstance(entit, EntitlementOnGrn):
skeleton.target_eonid = entit.target_grn.eon_id
if isinstance(entit, EntitlementOnHostEnvironment):
skeleton.host_environment = entit.host_environment.name
if isinstance(entit, EntitlementOnLocation):
self.redirect_proto(entit.location, skeleton.location,
indirect_attrs=False)
for cls in [
EntitlementArchetypeGrnMap,
EntitlementArchetypeUserMap,
EntitlementClusterGrnMap,
EntitlementClusterUserMap,
EntitlementGrnGrnMap,
EntitlementGrnUserMap,
EntitlementHostGrnMap,
EntitlementHostUserMap,
EntitlementPersonalityGrnMap,
EntitlementPersonalityUserMap,
]:
ObjectFormatter.handlers[cls] = EntitlementFormatter()
| 37.18543 | 79 | 0.655565 | 545 | 5,615 | 6.675229 | 0.297248 | 0.074217 | 0.052227 | 0.013469 | 0.279549 | 0.257009 | 0.182243 | 0.133865 | 0 | 0 | 0 | 0.007765 | 0.243099 | 5,615 | 150 | 80 | 37.433333 | 0.848235 | 0.120926 | 0 | 0.448598 | 0 | 0 | 0.067548 | 0.008545 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037383 | false | 0 | 0.018692 | 0 | 0.093458 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccf83380f75272da17e827a8354142f3491d9b15 | 1,618 | py | Python | tests/test_fixture.py | macneiln/py4web | ed50294d650fb466a9a06c26b8f311091b2d0035 | [
"BSD-3-Clause"
] | 133 | 2019-07-24T11:32:34.000Z | 2022-03-25T02:43:55.000Z | tests/test_fixture.py | macneiln/py4web | ed50294d650fb466a9a06c26b8f311091b2d0035 | [
"BSD-3-Clause"
] | 396 | 2019-07-24T06:30:19.000Z | 2022-03-24T07:59:07.000Z | tests/test_fixture.py | macneiln/py4web | ed50294d650fb466a9a06c26b8f311091b2d0035 | [
"BSD-3-Clause"
] | 159 | 2019-07-24T11:32:37.000Z | 2022-03-28T15:17:05.000Z | from types import SimpleNamespace
import pytest
import threading
from py4web.core import Fixture
result = {'seq': []}
def run_thread(func, *a):
t = threading.Thread(target=func, args=a)
return t
class Foo(Fixture):
def on_request(self):
self._safe_local = SimpleNamespace()
@property
def bar(self):
return self._safe_local.a
@bar.setter
def bar(self, a):
self._safe_local.a = a
foo = Foo()
def before_request():
Fixture.__init_request_ctx__()
@pytest.fixture
def init_foo():
def init(key, a, evnt_done=None, evnt_play=None):
result['seq'].append(key)
before_request()
foo.on_request()
foo.bar = a
evnt_done and evnt_done.set()
evnt_play and evnt_play.wait()
result[key] = foo.bar
return foo
return init
def test_fixtute_local_storage(init_foo):
assert init_foo('t1', 'a1') is foo
evnt_done = threading.Event()
evnt_play = threading.Event()
t2 = run_thread(init_foo, 't2', 'a2', evnt_done, evnt_play)
t3 = run_thread(init_foo, 't3', 'a3', None, None)
t2.start()
evnt_done.wait()
t3.start()
t3.join()
evnt_play.set()
t2.join()
assert foo.bar == 'a1'
assert result['t2'] == 'a2'
assert result['t3'] == 'a3'
assert ','.join(result['seq']) == 't1,t2,t3'
def test_fixtute_error():
before_request()
# attempt to access _safe_local prop without on_request-call
with pytest.raises(RuntimeError) as err:
foo.bar
assert 'py4web hint' in err.value.args[0]
assert 'Foo object' in err.value.args[0]
| 22.164384 | 64 | 0.631644 | 231 | 1,618 | 4.229437 | 0.316017 | 0.04913 | 0.039918 | 0.028659 | 0.030706 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019465 | 0.237948 | 1,618 | 72 | 65 | 22.472222 | 0.772912 | 0.035847 | 0 | 0.037037 | 0 | 0 | 0.039153 | 0 | 0 | 0 | 0 | 0 | 0.12963 | 1 | 0.166667 | false | 0 | 0.074074 | 0.018519 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccf92b8e5eba6aedbf6d4f91a3902a09d0c24f3f | 13,049 | py | Python | Scripts/simulation/objects/components/object_inventory_component.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/objects/components/object_inventory_component.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/objects/components/object_inventory_component.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\objects\components\object_inventory_component.py
# Compiled at: 2020-10-06 03:00:48
# Size of source mod 2**32: 16791 bytes
from animation.posture_manifest import AnimationParticipant
from event_testing.resolver import DoubleObjectResolver
from objects.components import componentmethod, types
from objects.components.get_put_component_mixin import GetPutComponentMixin
from objects.components.inventory import InventoryComponent
from objects.components.inventory_enums import InventoryType
from objects.components.inventory_item_trigger import ItemStateTrigger
from objects.components.inventory_owner_tuning import InventoryTuning
from objects.components.state import ObjectStateValue
from objects.object_enums import ItemLocation, ResetReason
from objects.system import create_object
from postures.posture_specs import PostureSpecVariable
from sims4.tuning.tunable import TunableList, TunableReference, TunableEnumEntry, Tunable, OptionalTunable, TunableTuple
from statistics.statistic import Statistic
import services, sims4.resources
logger = sims4.log.Logger('Inventory', default_owner='tingyul')
class ObjectInventoryComponent(GetPutComponentMixin, InventoryComponent, component_name=types.INVENTORY_COMPONENT):
DEFAULT_OBJECT_INVENTORY_AFFORDANCES = TunableList(TunableReference(description='\n Affordances for all object inventories.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.INTERACTION))))
FACTORY_TUNABLES = {'description':'\n Generate an object inventory for this object\n ',
'inventory_type':TunableEnumEntry(description='\n Inventory Type must be set for the object type you add this for.\n ',
tunable_type=InventoryType,
default=InventoryType.UNDEFINED,
invalid_enums=(
InventoryType.UNDEFINED, InventoryType.SIM)),
'visible':Tunable(description='\n If this inventory is visible to player.',
tunable_type=bool,
default=True),
'starting_objects':TunableList(description='\n Objects in this list automatically populate the inventory when its\n owner is created. Currently, to keep the game object count down, an\n object will not be added if the object inventory already has\n another object of the same type.',
tunable=TunableReference(manager=(services.definition_manager()),
description='Objects to populate inventory with.',
pack_safe=True)),
'purchasable_objects':OptionalTunable(description='\n If this list is enabled, an interaction to buy the purchasable\n objects through a dialog picker will show on the inventory object.\n \n Example usage: a list of books for the bookshelf inventory.\n ',
tunable=TunableTuple(show_description=Tunable(description='\n Toggles whether the object description should show in the \n purchase picker.\n ',
tunable_type=bool,
default=False),
objects=TunableList(description='\n A list of object definitions that can be purchased.\n ',
tunable=TunableReference(manager=(services.definition_manager()),
description='')))),
'purge_inventory_state_triggers':TunableList(description='\n Trigger the destruction of all inventory items if the inventory owner hits\n any of the tuned state values.\n \n Only considers state-values present at and after zone-load finalize (ignores\n default values that change during load based on state triggers, for example). \n ',
tunable=ObjectStateValue.TunableReference(description='\n The state value of the owner that triggers inventory item destruction.\n ')),
'score_contained_objects_for_autonomy':Tunable(description='\n Whether or not to score for autonomy any objects contained in this object.',
tunable_type=bool,
default=True),
'item_state_triggers':TunableList(description="\n The state triggers to modify inventory owner's state value based on\n inventory items states.\n ",
tunable=ItemStateTrigger.TunableFactory()),
'allow_putdown_in_inventory':Tunable(description="\n This inventory allows Sims to put objects away into it, such as books\n or other carryables. Ex: mailbox has an inventory but we don't want\n Sims putting away items in the inventory.",
tunable_type=bool,
default=True),
'test_set':OptionalTunable(description='\n If enabled, the ability to pick up items from and put items in this\n object is gated by this test.\n ',
tunable=TunableReference(manager=(services.get_instance_manager(sims4.resources.Types.SNIPPET)),
class_restrictions=('TestSetInstance', ))),
'count_statistic':OptionalTunable(description='\n A statistic whose value will be the number of objects in this\n inventory. It will automatically be added to the object owning this\n type of component.\n ',
tunable=Statistic.TunableReference()),
'return_owned_objects':Tunable(description="\n If enabled, inventory objects will return to their household\n owner's inventory when this object is destroyed off lot. This is\n because build buy can undo actions on lot and cause object id\n collisions.\n \n We first consider the closest instanced Sims, and finally move to\n the household inventory if we can't move to a Sim's inventory.\n ",
tunable_type=bool,
default=False),
'_use_top_item_tooltip':Tunable(description="\n If checked, this inventory would use the top item's tooltip as its\n own tooltip. \n ",
tunable_type=bool,
default=False)}
def __init__(self, owner, inventory_type, visible, starting_objects, purchasable_objects, purge_inventory_state_triggers, score_contained_objects_for_autonomy, item_state_triggers, allow_putdown_in_inventory, test_set, count_statistic, return_owned_objects, _use_top_item_tooltip, **kwargs):
(super().__init__)(owner, **kwargs)
self._inventory_type = inventory_type
self.visible = visible
self.starting_objects = starting_objects
self.purchasable_objects = purchasable_objects
self.purge_inventory_state_triggers = purge_inventory_state_triggers
self.score_contained_objects_for_autonomy = score_contained_objects_for_autonomy
self.item_state_triggers = item_state_triggers
self.allow_putdown_in_inventory = allow_putdown_in_inventory
self.test_set = test_set
self.count_statistic = count_statistic
self.return_owned_objects = return_owned_objects
self._use_top_item_tooltip = _use_top_item_tooltip
@property
def inventory_type(self):
return self._inventory_type
@property
def default_item_location(self):
return ItemLocation.OBJECT_INVENTORY
@componentmethod
def get_inventory_access_constraint(self, sim, is_put, carry_target, use_owner_as_target_for_resolver=False):
if use_owner_as_target_for_resolver:
def constraint_resolver(animation_participant, default=None):
if animation_participant in (AnimationParticipant.SURFACE, PostureSpecVariable.SURFACE_TARGET,
AnimationParticipant.TARGET, PostureSpecVariable.INTERACTION_TARGET):
return self.owner
return default
else:
constraint_resolver = None
return self._get_access_constraint(sim, is_put, carry_target, resolver=constraint_resolver)
@componentmethod
def get_inventory_access_animation(self, *args, **kwargs):
return (self._get_access_animation)(*args, **kwargs)
@property
def should_score_contained_objects_for_autonomy(self):
return self.score_contained_objects_for_autonomy
@property
def use_top_item_tooltip(self):
return self._use_top_item_tooltip
def _get_inventory_count_statistic(self):
return self.count_statistic
def on_add(self):
for trigger in self.item_state_triggers:
self.add_state_trigger(trigger(self))
super().on_add()
def on_reset_component_get_interdependent_reset_records(self, reset_reason, reset_records):
if reset_reason == ResetReason.BEING_DESTROYED:
if not services.current_zone().is_zone_shutting_down:
if not self.is_shared_inventory:
if self.return_owned_objects:
if not self.owner.is_on_active_lot():
household_manager = services.household_manager()
objects_to_transfer = list(iter(self))
for obj in objects_to_transfer:
household_id = obj.get_household_owner_id()
if household_id is not None:
household = household_manager.get(household_id)
if household is not None:
household.move_object_to_sim_or_household_inventory(obj)
super().on_reset_component_get_interdependent_reset_records(reset_reason, reset_records)
def on_post_bb_fixup(self):
self._add_starting_objects()
def _add_starting_objects(self):
for definition in self.starting_objects:
if self.has_item_with_definition(definition):
continue
new_object = create_object(definition, loc_type=(ItemLocation.OBJECT_INVENTORY))
if new_object is None:
logger.error('Failed to create object {}', definition)
continue
new_object.set_household_owner_id(self.owner.get_household_owner_id())
if not self.player_try_add_object(new_object):
logger.error('Failed to add object {} to inventory {}', new_object, self)
new_object.destroy(source=(self.owner), cause='Failed to add starting object to inventory.')
continue
def component_interactable_gen(self):
yield self
def component_super_affordances_gen(self, **kwargs):
if self.visible:
for affordance in self.DEFAULT_OBJECT_INVENTORY_AFFORDANCES:
yield affordance
def _can_access(self, sim):
if self.test_set is not None:
resolver = DoubleObjectResolver(sim, self.owner)
result = self.test_set(resolver)
if not result:
return False
return True
@componentmethod
def can_access_for_pickup(self, sim):
if not self._can_access(sim):
return False
if any((self.owner.state_value_active(value) for value in InventoryTuning.INVALID_ACCESS_STATES)):
return False
return True
@componentmethod
def can_access_for_putdown(self, sim):
if not self.allow_putdown_in_inventory:
return False
else:
return self._can_access(sim) or False
return True
def _check_state_value_for_purge(self, state_value):
return state_value in self.purge_inventory_state_triggers
def _purge_inventory_from_state_change(self, new_value):
if not self._check_state_value_for_purge(new_value):
return
else:
current_zone = services.current_zone()
if current_zone is None:
return
return current_zone.zone_spin_up_service.is_finished or None
self.purge_inventory()
def on_state_changed(self, state, old_value, new_value, from_init):
if self.purge_inventory_state_triggers:
if not from_init:
self._purge_inventory_from_state_change(new_value)
def _purge_inventory_from_load_finalize(self):
owner_state_component = self.owner.state_component
if owner_state_component is None:
logger.error('Attempting to purge an inventory based on state-triggers but the owner ({}) has no state component. Purge fails.', self.owner)
return
for active_state_value in owner_state_component.values():
if self._check_state_value_for_purge(active_state_value):
self.purge_inventory()
return
def on_finalize_load(self):
if self.purge_inventory_state_triggers:
self._purge_inventory_from_load_finalize() | 58.515695 | 486 | 0.683884 | 1,554 | 13,049 | 5.490347 | 0.205277 | 0.02391 | 0.016878 | 0.022152 | 0.185068 | 0.097281 | 0.050164 | 0.024144 | 0.011955 | 0 | 0 | 0.007266 | 0.25113 | 13,049 | 223 | 487 | 58.515695 | 0.865841 | 0.025366 | 0 | 0.221053 | 0 | 0.052632 | 0.266913 | 0.008889 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121053 | false | 0 | 0.078947 | 0.036842 | 0.331579 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccf98db2c183a542430a289ff4949ad327d07cde | 786 | py | Python | sqs_consumer/management/commands/process_queue.py | guilhermebferreira/sqs-consumer | 30e2a636219b7784e43d851570255193e258678d | [
"MIT"
] | null | null | null | sqs_consumer/management/commands/process_queue.py | guilhermebferreira/sqs-consumer | 30e2a636219b7784e43d851570255193e258678d | [
"MIT"
] | null | null | null | sqs_consumer/management/commands/process_queue.py | guilhermebferreira/sqs-consumer | 30e2a636219b7784e43d851570255193e258678d | [
"MIT"
] | null | null | null | from __future__ import absolute_import, unicode_literals
from django.core.management import BaseCommand, CommandError
from sqs_consumer.worker.service import WorkerService
class Command(BaseCommand):
help = 'Command to process tasks from one or more SQS queues'
def add_arguments(self, parser):
parser.add_argument('--queues', '-q',
dest='queue_names',
help='Name of queues to process, separated by commas')
def handle(self, *args, **options):
if not options['queue_names']:
raise CommandError('Queue names (--queues) not specified')
queue_names = [queue_name.rstrip() for queue_name in options['queue_names'].split(',')]
WorkerService().process_queues(queue_names)
| 34.173913 | 95 | 0.667939 | 92 | 786 | 5.521739 | 0.565217 | 0.11811 | 0.066929 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.232824 | 786 | 22 | 96 | 35.727273 | 0.842454 | 0 | 0 | 0 | 0 | 0 | 0.226463 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.214286 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccfb60a5f0e99b7473379c8b27f4c338be01c980 | 2,222 | py | Python | openpharmacophore/tests/test_zinc.py | dprada/OpenPharmacophore | bfcf4bdafd586b27a48fd5d1f13614707b5e55a8 | [
"MIT"
] | 2 | 2021-07-10T05:56:04.000Z | 2021-08-04T14:56:47.000Z | openpharmacophore/tests/test_zinc.py | dprada/OpenPharmacophore | bfcf4bdafd586b27a48fd5d1f13614707b5e55a8 | [
"MIT"
] | 21 | 2021-04-27T06:05:05.000Z | 2021-11-01T23:19:36.000Z | openpharmacophore/tests/test_zinc.py | dprada/OpenPharmacophore | bfcf4bdafd586b27a48fd5d1f13614707b5e55a8 | [
"MIT"
] | 3 | 2021-06-21T19:09:47.000Z | 2021-07-16T01:16:27.000Z | from openpharmacophore.databases.zinc import get_zinc_urls, discretize_values
import pytest
@pytest.mark.parametrize("subset,mol_weight,logp,format", [
("Drug-Like", None, None, "smi"),
(None, (250, 350), (-1, 1), "smi"),
(None, (365, 415), (1.5, 2.25), "smi"),
("Drug-Like", None, None, "sdf"),
(None, (200, 300), (-1, 2), "sdf"),
])
def test_download_ZINC2D_smiles(subset, mol_weight, logp, format):
url_list = get_zinc_urls(
subset=subset,
mw_range=mol_weight,
logp_range=logp,
file_format=format,
)
if format == "smi":
base_url = "http://files.docking.org/2D/"
if subset == "Drug-like":
assert len(url_list) == 90 * 4 * 2
assert url_list[0] == base_url + "BA/BAAA.smi"
assert url_list[-1] == base_url + "JJ/JJEB.smi"
elif mol_weight == (250, 350):
assert len(url_list) == 12 * 4 * 2
assert url_list[0] == base_url + "BA/BAAA.smi"
assert url_list[-1] == base_url + "EC/ECEB.smi"
elif mol_weight == (365, 415):
assert len(url_list) == 12 * 4 * 2
assert url_list[0] == base_url + "EC/ECAA.smi"
assert url_list[-1] == base_url + "HE/HEEB.smi"
else:
base_url = "http://files.docking.org/3D/"
if subset == "Drug-like":
assert len(url_list) == 19420
assert url_list[0] == base_url + "JJ/EDRP/JJEDRP.xaa.sdf.gz"
assert url_list[-1] == base_url + "AB/AAMM/ABAAMM.xaa.sdf.gz"
elif mol_weight == (200, 300):
assert len(url_list) == 3720
assert url_list[0] == base_url + "AA/AAML/AAAAML.xaa.sdf.gz"
assert url_list[-1] == base_url + "DC/EDRP/DCEDRP.xaa.sdf.gz"
@pytest.mark.parametrize("value,lower", [
(230, True),
(484, False),
(600, True)
])
def test_discretize_values(value, lower):
bins = [200, 250, 300, 325, 350, 375, 400, 425, 450, 500, 550]
new_value = discretize_values(value=value, bins=bins, name="Test", lower=lower)
if value == 230:
assert new_value == 200
elif value == 484:
assert new_value == 500
else:
assert new_value == 550 | 35.83871 | 83 | 0.564806 | 311 | 2,222 | 3.868167 | 0.315113 | 0.093101 | 0.108063 | 0.0665 | 0.378221 | 0.336658 | 0.25852 | 0.23857 | 0.18537 | 0.137157 | 0 | 0.084525 | 0.275878 | 2,222 | 62 | 84 | 35.83871 | 0.663145 | 0 | 0 | 0.185185 | 0 | 0 | 0.14395 | 0.05803 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.037037 | false | 0 | 0.037037 | 0 | 0.074074 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccfc779a1ced7c9e46cfbe2591e7ace76abaf9a2 | 643 | py | Python | tests/test.py | y95847frank/AutomatedTicketBot | 66754758430c7a1240b69259e32fcb452639c134 | [
"MIT"
] | 1 | 2021-03-26T05:07:20.000Z | 2021-03-26T05:07:20.000Z | tests/test.py | y95847frank/AutomatedTicketBot | 66754758430c7a1240b69259e32fcb452639c134 | [
"MIT"
] | null | null | null | tests/test.py | y95847frank/AutomatedTicketBot | 66754758430c7a1240b69259e32fcb452639c134 | [
"MIT"
] | null | null | null | import AutoTicketsBot as tBot
configDestination = 'var/config.yml'
args = tBot.addArgs()
config = tBot.configRead(configDestination)
if tBot.configWrite(configDestination, args, config) is True:
print("Successfully store new config to {}".format(configDestination))
ticketsBot = tBot.AutoTicketsBot(config)
#scheduleBot(ticketsBot, config['Config']['startTime'])
try:
tBot.websiteSignIn(ticketsBot, retryCounter=3)
tBot.buyTickets(ticketsBot)
tBot.notifyUser('AutoTicketsBot Notification', 'Got tickets!!!!!')
tBot.terminateBot(ticketsBot, waitTime=900)
except RuntimeError as e:
tBot.terminateBot(ticketsBot, waitTime=0)
print(e) | 29.227273 | 71 | 0.785381 | 71 | 643 | 7.112676 | 0.56338 | 0.055446 | 0.10297 | 0.134653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008562 | 0.091757 | 643 | 22 | 72 | 29.227273 | 0.856164 | 0.083981 | 0 | 0 | 0 | 0 | 0.156197 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccfe49b139702ec62120531b875985143b174591 | 751 | py | Python | kattis/k_ones.py | ivanlyon/exercises | 0792976ae2acb85187b26a52812f9ebdd119b5e8 | [
"MIT"
] | null | null | null | kattis/k_ones.py | ivanlyon/exercises | 0792976ae2acb85187b26a52812f9ebdd119b5e8 | [
"MIT"
] | null | null | null | kattis/k_ones.py | ivanlyon/exercises | 0792976ae2acb85187b26a52812f9ebdd119b5e8 | [
"MIT"
] | null | null | null | '''
Smallest factor to reach a number composed of digit '1'
Status: Accepted
'''
###############################################################################
def main():
"""Read input and print output"""
while True:
try:
number = int(input())
except EOFError:
break
if number == 1:
print('1')
else:
assert number % 2 != 0
assert number % 5 != 0
digits, remainder = 1, 1
while remainder:
remainder = (remainder * 10 + 1) % number
digits += 1
print(digits)
###############################################################################
if __name__ == '__main__':
main()
| 22.757576 | 79 | 0.370173 | 61 | 751 | 4.42623 | 0.590164 | 0.044444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026157 | 0.338216 | 751 | 32 | 80 | 23.46875 | 0.517103 | 0.134487 | 0 | 0 | 0 | 0 | 0.018789 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.055556 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ccfe72e943c07b30fc915317d0d3a67d9c72f9cc | 2,190 | py | Python | back/api/message.py | LyonParapente/EventOrganizer | b263c2ce61b6ad1d6c414eb388ca5ee9492a9b73 | [
"MIT"
] | 4 | 2018-07-29T10:48:53.000Z | 2018-08-23T13:02:15.000Z | back/api/message.py | LyonParapente/EventOrganizer | b263c2ce61b6ad1d6c414eb388ca5ee9492a9b73 | [
"MIT"
] | 7 | 2018-11-15T15:17:45.000Z | 2021-05-11T19:58:55.000Z | back/api/message.py | LyonParapente/EventOrganizer | b263c2ce61b6ad1d6c414eb388ca5ee9492a9b73 | [
"MIT"
] | null | null | null | from flask import request, abort
from flask_restful_swagger_3 import Resource, swagger
from flask_jwt_extended import jwt_required, get_jwt_identity, get_jwt
from models.message import Message, MessageCreate
from database.manager import db
from emails import send_new_message
class MessageAPICreate(Resource):
@jwt_required()
@swagger.doc({
'tags': ['message'],
'security': [
{'BearerAuth': []}
],
'requestBody': {
'required': True,
'content': {
'application/json': {
'schema': Message
}
}
},
'responses': {
'201': {
'description': 'Created message',
'content': {
'application/json': {
'schema': Message
}
}
},
'401': {
'description': 'Not authenticated'
},
'403': {
'description': 'Update forbidden'
}
}
})
def post(self):
"""Create a message"""
args = request.json
author_id = get_jwt_identity()
args['author_id'] = author_id
try:
# Validate request body with schema model
message = MessageCreate(**args)
except ValueError as e:
abort(400, e.args[0])
props = None
editLatest = message['editLatest']
del message['editLatest']
if editLatest:
last_msg = db.get_last_message(message['event_id'])
if last_msg and last_msg['author_id'] == author_id:
nb = db.edit_message(last_msg['id'], message['comment'], last_msg['author_id'], last_msg['event_id'])
if nb == 1:
last_msg['comment'] = message['comment']
props = last_msg
else:
abort(500, 'Error updating comment')
else:
abort(403, 'Can only update the latest comment if it is yours')
else:
try:
props = db.insert_message(**message)
except Exception as e:
abort(500, e.args[0])
# Email
if not editLatest:
claims = get_jwt()
author_name = claims['firstname'] + ' ' + claims['lastname']
send_new_message(author_name, author_id, props['event_id'], props['comment'])
return Message(**props), 201, {'Location': request.path + '/' + str(props['id'])}
| 27.721519 | 109 | 0.590411 | 244 | 2,190 | 5.131148 | 0.405738 | 0.044728 | 0.022364 | 0.044728 | 0.055911 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017666 | 0.276256 | 2,190 | 78 | 110 | 28.076923 | 0.77224 | 0.028767 | 0 | 0.157143 | 0 | 0 | 0.191509 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014286 | false | 0 | 0.085714 | 0 | 0.128571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |