hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cc0e4c530fb291e69617a606ce0bd3a8fad89aa2 | 5,000 | py | Python | src/stage2v9/cascade_pyramid_network_v9.py | gathierry/FashionAI-KeyPointsDetectionOfApparel | 2e0942b42b4a9cd974cdddc151675738dc8a8cb4 | [
"Apache-2.0"
] | 174 | 2018-06-04T02:12:34.000Z | 2022-03-30T07:01:29.000Z | src/stage2v9/cascade_pyramid_network_v9.py | gathierry/FashionAI-KeyPointsDetectionOfApparel | 2e0942b42b4a9cd974cdddc151675738dc8a8cb4 | [
"Apache-2.0"
] | 9 | 2018-06-05T11:32:05.000Z | 2021-09-13T09:10:05.000Z | src/stage2v9/cascade_pyramid_network_v9.py | gathierry/FashionAI-KeyPointsDetectionOfApparel | 2e0942b42b4a9cd974cdddc151675738dc8a8cb4 | [
"Apache-2.0"
] | 55 | 2018-06-05T09:50:52.000Z | 2022-03-30T15:58:00.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.stage2v9.senet import senet154
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class GlobalNet(nn.Module):
def __init__(self, config):
super(GlobalNet, self).__init__()
pretrained_model = senet154(num_classes=1000, pretrained='imagenet')
self.layer0 = pretrained_model.layer0
self.layer1 = pretrained_model.layer1
self.layer2 = pretrained_model.layer2
self.layer3 = pretrained_model.layer3
self.layer4 = pretrained_model.layer4
# Lateral layers
self.latlayer1 = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
self.latlayer3 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0)
self.latlayer4 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)
# Top-down layers
self.toplayer1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.toplayer2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.toplayer3 = nn.Conv2d(256, config.num_keypoints, kernel_size=3, stride=1, padding=1)
def _upsample_add(self, x, y):
_,_,H,W = y.size()
return F.upsample(x, size=(H,W), mode='bilinear') + y
def forward(self, x):
# Bottom-up
c1 = self.layer0(x)
c2 = self.layer1(c1) # ch = 256
c3 = self.layer2(c2) # ch = 512
c4 = self.layer3(c3) # ch = 1024
c5 = self.layer4(c4) # ch = 2048
# Top-down
p5 = self.latlayer1(c5)
p4 = self._upsample_add(p5, self.latlayer2(c4))
p4 = self.toplayer1(p4)
p3 = self._upsample_add(p4, self.latlayer3(c3))
p3 = self.toplayer2(p3)
p2 = self._upsample_add(p3, self.latlayer4(c2))
p2 = self.toplayer3(p2)
return p2, p3, p4, p5
class RefineNet(nn.Module):
def __init__(self, config):
super(RefineNet, self).__init__()
self.bottleneck2 = Bottleneck(config.num_keypoints, 64, 1)
self.bottleneck3 = nn.Sequential(Bottleneck(256, 64, 1),
nn.ConvTranspose2d(256, 256, kernel_size=2 * 2, stride=2, padding=2 // 2))
self.bottleneck4 = nn.Sequential(Bottleneck(256, 64, 1),
Bottleneck(256, 64, 1),
nn.ConvTranspose2d(256, 256, kernel_size=2 * 4, stride=4, padding=4 // 2))
self.bottleneck5 = nn.Sequential(Bottleneck(256, 64, 1),
Bottleneck(256, 64, 1),
Bottleneck(256, 64, 1),
nn.ConvTranspose2d(256, 256, kernel_size=2 * 8, stride=8, padding=8 // 2))
self.output = nn.Sequential(Bottleneck(1024, 64, 1),
nn.Conv2d(256, config.num_keypoints, kernel_size=1, stride=1, padding=0))
def forward(self, p2, p3, p4, p5):
p2 = self.bottleneck2(p2)
p3 = self.bottleneck3(p3)
p4 = self.bottleneck4(p4)
p5 = self.bottleneck5(p5)
return self.output(torch.cat([p2, p3, p4, p5], dim=1))
class CascadePyramidNetV9(nn.Module):
def __init__(self, config):
super(CascadePyramidNetV9, self).__init__()
self.global_net = GlobalNet(config)
self.refine_net = RefineNet(config)
def forward(self, x):
p2, p3, p4, p5 = self.global_net(x)
out = self.refine_net(p2, p3, p4, p5)
return p2, out
if __name__ == '__main__':
import torch
from torch.autograd import Variable
from src.config import Config
config = Config('outwear')
net = CascadePyramidNetV9(config)
fms = net(Variable(torch.randn(1,3,512, 512)))
| 42.735043 | 116 | 0.5848 | 635 | 5,000 | 4.474016 | 0.187402 | 0.052798 | 0.041183 | 0.035903 | 0.331925 | 0.303414 | 0.295319 | 0.181626 | 0.143963 | 0.109117 | 0 | 0.089442 | 0.2934 | 5,000 | 116 | 117 | 43.103448 | 0.71469 | 0.0174 | 0 | 0.113402 | 0 | 0 | 0.006475 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092784 | false | 0 | 0.072165 | 0 | 0.268041 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc0e89b32cdd7ff2a29574cb8fe3595e3139fa61 | 3,131 | py | Python | test_ribbit.py | icr-ctl/gnatcatcher | ddfdde8442a66d2409d16b7eddb769a579460652 | [
"MIT"
] | 1 | 2022-01-08T04:35:48.000Z | 2022-01-08T04:35:48.000Z | test_ribbit.py | icr-ctl/gnatcatcher | ddfdde8442a66d2409d16b7eddb769a579460652 | [
"MIT"
] | null | null | null | test_ribbit.py | icr-ctl/gnatcatcher | ddfdde8442a66d2409d16b7eddb769a579460652 | [
"MIT"
] | 1 | 2022-01-29T15:42:29.000Z | 2022-01-29T15:42:29.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 9 12:14:23 2021
@author: amandabreton
"""
# suppress warnings
import warnings
warnings.simplefilter('ignore')
#import packages
import numpy as np
from glob import glob
import pandas as pd
from matplotlib import pyplot as plt
import argparse
import yaml
#local imports from opensoundscape
from opensoundscape.audio import Audio
from opensoundscape.spectrogram import Spectrogram
from opensoundscape.ribbit import ribbit
from opensoundscape.helpers import run_command
# create big visuals
plt.rcParams['figure.figsize'] = [15, 8]
# download files from box.com to the current directory
#_ = run_command(f"curl -L https://pitt.box.com/shared/static/9mrxib85y1jmf1ybbjvbr0tv171iekvy.gz -o ./great_plains_toad_dataset.tar.gz")# | tar -xz -f")
#_ = run_command(f"tar -xz -f great_plains_toad_dataset.tar.gz")
# this will print `0` if everything went correctly. If it prints 256 or another number, something is wrong (try downloading from the link above)
# using our own data: just put the audio into a folder w/ same name
#parser = argparse.ArgumentParser()
#parser.add_argument('config_filename')
#args = parser.parse_args()
#CONFIG_FILE = args.config_filename
#with open(CONFIG_FILE) as f:
# configs = yaml.load(f, Loader=yaml.SafeLoader)
#audio_path = configs['path']
# audio_path is the path to a single audio file
# example:
# path: /Users/amandabreton/Documents/GitHub/gnatcatcher/sounds/great_plains_toad_dataset/5D3C4530.WAV'
# audio_path = np.sort(glob('./great_plains_toad_dataset/5D31ED38.WAV'))[0]
audio_path = np.sort(glob('./great_plains_toad_dataset/*'))[0]
# load the audio file into an OpenSoundscape Audio object
audio = Audio.from_file(audio_path)
# create a Spectrogram object
spectrogram = Spectrogram.from_audio(audio)
#%%
# minimum and maximum rate of pulsing (pulses per second) to search for
pulse_rate_range = [10, 20]
# look for a vocalization in the range of 1000-2000 Hz
signal_band = [2000, 2500]
# subtract the amplitude signal from these frequency ranges
noise_bands = [[0, 200], [10000, 10100]]
# divides the signal into segments this many seconds long, analyzes each independently
window_length = 2 # (seconds)
# if True, it will show the power spectrum plot for each audio segment
show_plots = True
# %% this is the part that gives you scoress
#get the audio file path
#audio_path = np.sort(glob('./great_plains_toad_dataset/*'))[1]
#make the spectrogram
spec = Spectrogram.from_audio(audio.from_file(audio_path))
#run RIBBIT
scores, times = ribbit(
spec,
pulse_rate_range=pulse_rate_range,
signal_band=signal_band,
window_len=window_length,
noise_bands=noise_bands,
plot=False)
#show the spectrogram
spec.plot()
# %% plot the score vs time of each window
times = np.array(times)
times = times/2
plt.scatter(times,scores)
plt.xlabel('window start time (sec)')
plt.ylabel('RIBBIT score')
plt.title('RIBBIT scores')
print('The max score is:')
print(np.max(scores))
maxscore = np.max(scores)
| 31.626263 | 153 | 0.738103 | 459 | 3,131 | 4.912854 | 0.46841 | 0.027938 | 0.039911 | 0.058537 | 0.102439 | 0.102439 | 0.054545 | 0.054545 | 0.054545 | 0 | 0 | 0.030107 | 0.161929 | 3,131 | 98 | 154 | 31.94898 | 0.829268 | 0.569147 | 0 | 0 | 0 | 0 | 0.087558 | 0.022273 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.282051 | 0 | 0.282051 | 0.051282 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc0f06b88f7d0b9237235174b2b56cdfa1603af2 | 1,386 | py | Python | vision_utils/align_face.py | miltonbd/computer_vision_utils | bcc72fe1df2d84bdb61e934cc916fb4442d080e5 | [
"Apache-2.0"
] | 1 | 2020-08-12T21:48:32.000Z | 2020-08-12T21:48:32.000Z | vision_utils/align_face.py | miltonbd/computer_vision_utils | bcc72fe1df2d84bdb61e934cc916fb4442d080e5 | [
"Apache-2.0"
] | null | null | null | vision_utils/align_face.py | miltonbd/computer_vision_utils | bcc72fe1df2d84bdb61e934cc916fb4442d080e5 | [
"Apache-2.0"
] | null | null | null | from mtcnn.mtcnn import MTCNN
import cv2
import argparse
from utils_all.fileutils import *
import glob
parser = argparse.ArgumentParser(description='Detect and align face')
parser.add_argument('--data_set', default='/home/milton/PycharmProjects/facelock_app/facenet_mtcnn_to_mobile/custom_faces', type=str,help='Dataset Path, each class must in its own dir')
parser.add_argument('--output',default='/home/milton/PycharmProjects/facelock_app/facenet_mtcnn_to_mobile/custom_face_aligned', type=str,help='Out save dir')
if __name__=="__main__":
args = parser.parse_args()
data_set=args.data_set
output=args.output
create_dir_if_not_exists(output)
file_paths=glob.glob(join(data_set,"**","**"))
detector = MTCNN()
for file_path in file_paths:
img=cv2.imread(file_path)
if img is None:
print("Invalid Image: {}".format(file_path))
continue
output_data=detector.detect_faces(img)
if len(output_data) == 0:
continue
print(output_data)
box_ = output_data[0]['box']
print(box_)
x,y,w,h= box_
img=img[x:x+w,y:y+h,:]
class_name = file_path.split('/')[-2].replace(' ','_')
class_dir=join(output,class_name)
create_dir_if_not_exists(class_dir)
output_path=join(class_dir,basename(file_path))
cv2.imwrite(output_path,img) | 38.5 | 185 | 0.686147 | 196 | 1,386 | 4.556122 | 0.413265 | 0.044793 | 0.038074 | 0.071669 | 0.199328 | 0.154535 | 0.154535 | 0.154535 | 0.154535 | 0.154535 | 0 | 0.005329 | 0.18759 | 1,386 | 36 | 186 | 38.5 | 0.787744 | 0 | 0 | 0.060606 | 0 | 0 | 0.211247 | 0.11752 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.151515 | 0 | 0.151515 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc0f8c0b517442ab7c1b943878a633495282f29e | 897 | py | Python | function-args.py | Phoenix1327/Python-tutorial | f3e1e91b83f6ad5ccf6ca7d12ae303b19e1387c4 | [
"Apache-2.0"
] | null | null | null | function-args.py | Phoenix1327/Python-tutorial | f3e1e91b83f6ad5ccf6ca7d12ae303b19e1387c4 | [
"Apache-2.0"
] | null | null | null | function-args.py | Phoenix1327/Python-tutorial | f3e1e91b83f6ad5ccf6ca7d12ae303b19e1387c4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Phoenix1327'
def enroll(name, gender, age=6, city='Beijing'):
print('name:', name)
print('gender:', gender)
print('age:', age)
print('city:', city)
enroll('Sarah', 'F')
enroll('Bob', 'M', 7)
enroll('Adam', 'M', city='Tianjin')
def add_end(L = None):
if L is None:
L = []
L.append('END')
return L
print(add_end([1,2,3]))
print(add_end())
def calc(*numbers):
sum = 0
for n in numbers:
sum += n*n
return sum
print(calc(1, 2, 3))
nums = [1, 2, 3]
print(calc(*nums))
def move(n, org, via, dst, L):
if n == 1:
L.append('# %s - -> %s' % (org, dst))
else:
move(n-1, org, dst, via, L)
move(1, org, via, dst, L)
move(n-1, via, org, dst, L)
L=[]
move(3, 'A', 'B', 'C', L)
for x, value in enumerate(L):
print('step:%d %s' % (x+1, value))
| 18.6875 | 48 | 0.516165 | 148 | 897 | 3.081081 | 0.391892 | 0.039474 | 0.019737 | 0.035088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035982 | 0.25641 | 897 | 47 | 49 | 19.085106 | 0.647676 | 0.047938 | 0 | 0.057143 | 0 | 0 | 0.10446 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0 | 0 | 0.171429 | 0.257143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc12d7b279f4673b3b488ada91be35001d28c31d | 5,561 | py | Python | webapp/services/admin.py | zorglub42/marv2plage | 436fb8b6b05a1c28011786ce390b45b4a5567483 | [
"Apache-2.0"
] | null | null | null | webapp/services/admin.py | zorglub42/marv2plage | 436fb8b6b05a1c28011786ce390b45b4a5567483 | [
"Apache-2.0"
] | null | null | null | webapp/services/admin.py | zorglub42/marv2plage | 436fb8b6b05a1c28011786ce390b45b4a5567483 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 Zorglub42 {contact(at)zorglub42.fr}.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
"""FFBC8 Webapp Administration service implementation."""
import json
import logging
import socket
import subprocess
import settings
class AdminService(object):
"""admin class."""
def __init__(self):
"""Initialize Admin Service."""
self.logger = logging.getLogger(__name__)
def _send_to_arduino(self, command):
"""Send a commant to arduino and return result."""
res = ""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as soc:
soc.connect(
(
settings.conf["ARDUINO"]["server"],
settings.conf["ARDUINO"]["port"]
)
)
soc.sendall((command + "\n").encode("utf-8"))
try:
while True:
data = soc.recv(1024)
if not data:
break
res += data.decode("utf-8").replace("\r", "")
except Exception:
self.logger.exception("Error while communication with arduino")
finally:
soc.close()
return res
def load_conf(self):
"""Load config from ENV vars or config file."""
with open('/etc/meteo/webapp-settings.json') as conf_file:
config = json.load(conf_file)
settings.conf = config
def save_conf(self):
"""Save config to config file."""
with open('/etc/meteo/webapp-settings.json', 'w') as outfile:
json.dump(settings.conf, outfile, indent=4)
def set_time(self, iso_date):
"""Set system time from iso date
Arguments:
iso_date {String} -- DateTime to set in ISO format
"""
self.logger.info("Setting date to %s requested", iso_date)
cmd = "{} {}".format(settings.conf["COMMANDS"]["settime"], iso_date)
self.logger.debug(
"Executing %s",
cmd
)
subprocess.run(cmd.split(" "))
return "OK"
def execute_command(self, command):
"""Execute a command."""
self.logger.info("%s requested", command)
self.logger.debug(
"Executing %s",
settings.conf["COMMANDS"][command]
)
subprocess.run(settings.conf["COMMANDS"][command].split(" "))
return "OK"
def request_mag_calibration(self):
"""Request mag calibration to Arduino."""
msg = self._send_to_arduino("CM")
return {
"status": "OK",
"message": msg
}
def request_compass_support(self):
"""Request compass support to Arduino."""
msg = self._send_to_arduino("HC")
if "HC 1" in msg:
return {
"status": "OK",
"message": "Compass supported"
}
else:
return {
"status": "KO",
"message": "Compass not supported"
}
def request_find_north(self):
"""Request Arduino to find north."""
msg = self._send_to_arduino("MS")
return {
"status": "OK",
"message": msg
}
def _net_exists(self, nets, ssid):
"""Return true if ssid already in nets."""
ret = False
for net in nets:
ret = ret or net["ssid"] == ssid
return ret
def get_wifi_hotspot(self):
"""
Find wifi hotposts aurond device
and current wifi configuration
"""
self.load_conf()
cmd = (
"iwlist " + settings.conf["WIFI"]["if"] + " scan| "
'egrep "Qual|SSID" || /bin/true'
)
data = subprocess.check_output(
cmd,
shell=True
)
data = data.decode().split('\n')[:-1]
nets = []
i = 0
while i < len(data):
qual_parts = data[i].strip().split("=")
ratio = qual_parts[1].split(" ")
ratio = ratio[0].split("/")
qual = int(((float(ratio[0]) / float(ratio[1]))*100)/25)
ssid_parts = data[i + 1].strip().split('"')
ssid = ssid_parts[1]
if qual > 0 and ssid != "" and not self._net_exists(nets, ssid):
nets.append(
{
"ssid": ssid,
"quality": qual
}
)
i += 2
res = settings.conf["WIFI"].copy()
res["networks"] = sorted(
nets, key=lambda i: i['quality'], reverse=True
)
cmd = (
"ifconfig " + settings.conf["WIFI"]["if"] + "| "
"grep 'ether ' | "
"awk '{print $2}'"
)
res["mac"] = subprocess.check_output(
cmd,
shell=True
).decode().replace("\n", "")
return res
def apply_wifi(self, wifi_conf):
"""Apply wifi configuration."""
self.load_conf()
settings.conf["WIFI"]["mode"] = wifi_conf["mode"]
settings.conf["WIFI"]["client"] = wifi_conf["client"]
settings.conf["WIFI"]["hotspot"] = wifi_conf["hotspot"]
self.save_conf()
subprocess.call(settings.conf["COMMANDS"]["setwifi"], shell=True) | 30.059459 | 79 | 0.506923 | 586 | 5,561 | 4.711604 | 0.354949 | 0.060848 | 0.03477 | 0.014125 | 0.142702 | 0.096342 | 0.052879 | 0.031873 | 0.031873 | 0 | 0 | 0.01037 | 0.358389 | 5,561 | 185 | 80 | 30.059459 | 0.763453 | 0.151591 | 0 | 0.19084 | 0 | 0 | 0.121728 | 0.013525 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091603 | false | 0.022901 | 0.038168 | 0 | 0.206107 | 0.007634 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc13ed04398d7c196e01dc4803eceafc032fc532 | 9,893 | py | Python | cube_builder/utils/image.py | brazil-data-cube/cube-builder | 5ca4b3fa47436e46626d6bb198373a70176f484b | [
"MIT"
] | 5 | 2020-02-06T13:53:39.000Z | 2021-11-30T11:25:46.000Z | cube_builder/utils/image.py | brazil-data-cube/cube-builder | 5ca4b3fa47436e46626d6bb198373a70176f484b | [
"MIT"
] | 110 | 2020-02-03T12:34:05.000Z | 2022-03-28T16:14:14.000Z | cube_builder/utils/image.py | brazil-data-cube/cube-builder | 5ca4b3fa47436e46626d6bb198373a70176f484b | [
"MIT"
] | 5 | 2020-02-03T11:46:00.000Z | 2020-07-11T11:16:33.000Z | #
# This file is part of Python Module for Cube Builder.
# Copyright (C) 2019-2021 INPE.
#
# Cube Builder is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Define a utility to validate merge images."""
import logging
import os
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import List, Optional, Union
from urllib.parse import urlparse
import numpy
import rasterio
from rasterio._warp import Affine
from sqlalchemy.engine.result import ResultProxy, RowProxy
from ..config import Config
from .processing import SmartDataSet, generate_cogs, save_as_cog
LANDSAT_BANDS = dict(
int16=['band1', 'band2', 'band3', 'band4', 'band5', 'band6', 'band7', 'evi', 'ndvi'],
uint16=['pixel_qa']
)
def validate(row: RowProxy):
"""Validate each merge result."""
url = row.link.replace('chronos.dpi.inpe.br:8089/datastore', 'www.dpi.inpe.br/newcatalog/tmp')
errors = list()
if 'File' in row.traceback:
try:
with rasterio.open('/vsicurl/{}'.format(url), 'r') as data_set:
logging.debug('File {} ok'.format(url))
if row.collection_id.startswith('LC8'):
data_type = data_set.meta.get('dtype')
band_dtype = LANDSAT_BANDS.get(data_type)
if band_dtype is None:
errors.append(
dict(
message='Band {} mismatch with default Landsat 8'.format(row.band),
band=row.band,
file=url
)
)
file_name = Path(url).stem
band_name = file_name.split('_')[8]
if band_name not in band_dtype:
errors.append(
dict(
message='Band {} should be {}'.format(row.band, data_type),
band=row.band,
file=url
)
)
except rasterio.RasterioIOError as e:
if not row.traceback:
errors.append(dict(message=f'File not found or invalid. ({url})', band=row.band,
file=url, filename=_file_name(url)))
return row, errors
def _file_name(url: str) -> str:
parsed = urlparse(url)
# IF SAFE: parent
if '.SAFE' in parsed.path:
safe_pos = parsed.path.index('.SAFE') + 5
abs_safe_folder = parsed.path[:safe_pos]
scene_id = os.path.basename(os.path.dirname(abs_safe_folder))
return scene_id
return os.path.basename(parsed.path)
def validate_merges(images: ResultProxy, num_threads: int = Config.MAX_THREADS_IMAGE_VALIDATOR) -> dict:
"""Validate each merge retrieved from ``Activity.list_merge_files``.
Args:
images: Activity merge images
num_threads: Concurrent processes to validate
"""
with ThreadPoolExecutor(max_workers=num_threads) as executor:
futures = executor.map(validate, images)
output = dict()
for row, errors in futures:
if row is None:
continue
output.setdefault(row.date, dict())
output[row.date].setdefault('bands', dict())
output[row.date].setdefault('errors', list())
output[row.date].setdefault('collections', set())
output[row.date]['collections'].add(row.data_set)
output[row.date]['file'] = row.file
output[row.date]['errors'].extend(errors)
if row.traceback:
output[row.date]['errors'].append(dict(message=row.traceback, band=row.band,
filename=_file_name(row.link)))
output[row.date]['bands'].setdefault(row.band, list())
output[row.date]['bands'][row.band].append(row.link)
for element in output.values():
element['collections'] = list(element['collections'])
return output
def create_empty_raster(location: str, proj4: str, dtype: str, xmin: float, ymax: float,
resolution: List[float], dist: List[float], nodata: float, cog=True):
"""Create an data set filled out with nodata.
This method aims to solve the problem to generate an empty scene to make sure in order to
follow the data cube timeline.
Args:
location (str): Path where file will be generated.
proj4 (str): Proj4 with Coordinate Reference System.
dtype (str): Data type
xmin (float): Image minx (Related to geotransform)
ymax (float): Image ymax
resolution (List[float]): Pixel resolution (X, Y)
dist (List[float]): The distance of X, Y (Scene offset)
nodata (float): Scene nodata.
cog (bool): Flag to generate datacube. Default is `True`.
"""
resx, resy = resolution
distx, disty = dist
cols = round(distx / resx)
rows = round(disty / resy)
new_res_x = distx / cols
new_res_y = disty / rows
transform = Affine(new_res_x, 0, xmin, 0, -new_res_y, ymax)
options = dict(
width=cols,
height=rows,
nodata=nodata,
crs=proj4,
transform=transform,
count=1
)
ds = SmartDataSet(str(location), mode='w', dtype=dtype, driver='GTiff', **options)
ds.close()
if cog:
generate_cogs(str(location), str(location))
return str(location)
def match_histogram_with_merges(source: str, source_mask: str, reference: str, reference_mask: str, block_size: int = None):
"""Normalize the source image histogram with reference image.
This functions implements the `skimage.exposure.match_histograms`, which consists in the manipulate the pixels of an
input image and match the histogram with the reference image.
See more in `Histogram Matching <https://scikit-image.org/docs/dev/auto_examples/color_exposure/plot_histogram_matching.html>`_.
Note:
It overwrites the source file.
Args:
source (str): Path to the rasterio data set file
source_mask (str): Path to the rasterio data set file
reference (str): Path to the rasterio data set file
reference_mask (str): Path to the rasterio data set file
"""
from skimage.exposure import match_histograms as _match_histograms
with rasterio.open(source) as source_data_set, rasterio.open(source_mask) as source_mask_data_set:
source_arr = source_data_set.read(1, masked=True)
source_mask_arr = source_mask_data_set.read(1)
source_options = source_data_set.profile.copy()
with rasterio.open(reference) as reference_data_set, rasterio.open(reference_mask) as reference_mask_data_set:
reference_arr = reference_data_set.read(1, masked=True)
reference_mask_arr = reference_mask_data_set.read(1)
intersect_mask = numpy.logical_and(
source_mask_arr < 255, # CHECK: Use only valid data? numpy.isin(source_mask_arr, [0, 1, 3]),
reference_mask_arr < 255, # CHECK: Use only valid data? numpy.isin(reference_mask_arr, [0, 1, 3]),
)
valid_positions = numpy.where(intersect_mask)
if valid_positions and len(valid_positions[0]) == 0:
return
intersected_source_arr = source_arr[valid_positions]
intersected_reference_arr = reference_arr[valid_positions]
histogram = _match_histograms(intersected_source_arr, intersected_reference_arr)
histogram = numpy.round(histogram).astype(source_options['dtype'])
source_arr[valid_positions] = histogram
save_as_cog(str(source), source_arr, block_size=block_size, mode='w', **source_options)
def radsat_extract_bits(bit_value: Union[int, numpy.ndarray], bit_start: int, bit_end: Optional[int] = None):
"""Extract bitwise values from image.
This method uses the bitwise operation to identify pixel saturation.
According to the document `LaSRC Product Guide <https://prd-wret.s3.us-west-2.amazonaws.com/assets/palladium/production/atoms/files/LSDS-1368_L8_C1-LandSurfaceReflectanceCode-LASRC_ProductGuide-v3.pdf>`_,
the Landsat Radiometric Saturation Quality Assessment Band (radsat_qa) is a bit
packed representation of which sensor bands were saturated during data sensing capture.
The value 1 represents saturated value while 0 is valid data.
For Landsat-8, the following table represents pixels saturation::
Bit Bit Value Description
0 1 Data Fill Flag
1 2 Band 1 Data Saturation Flag
2 4 Band 2 Data Saturation Flag
3 8 Band 3 Data Saturation Flag
4 16 Band 4 Data Saturation Flag
5 32 Band 5 Data Saturation Flag
6 64 Band 6 Data Saturation Flag
7 128 Band 7 Data Saturation Flag
8 256 Band 8 Data Saturation Flag
9 512 Band 9 Data Saturation Flag
10 1024 Band 10 Data Saturation Flag
11 2048 Band 11 Data Saturation Flag
Example:
>>> from cube_builder.utils.image import radsat_extract_bits
>>> # Represents band 10 (1024) and band 1 (2) is saturated.
>>> # Check if any band is saturated
>>> radsat_extract_bits(1026, 1, 7)
1
>>> # You can also pass the numpy array
>>> # radsat_extract_bits(numpy.random.randint(0, 1028, size=(100, 100)), 1, 7)
"""
if bit_end is None:
bit_end = bit_start
mask_size = (1 + bit_end) - bit_start
mask = (1 << mask_size) - 1
res = (bit_value >> bit_start) & mask
return res
| 36.914179 | 208 | 0.62246 | 1,252 | 9,893 | 4.784345 | 0.297125 | 0.019866 | 0.033055 | 0.015359 | 0.087479 | 0.045743 | 0.038397 | 0.038397 | 0.033222 | 0.013356 | 0 | 0.021072 | 0.285252 | 9,893 | 267 | 209 | 37.052434 | 0.82605 | 0.350955 | 0 | 0.061538 | 0 | 0 | 0.05607 | 0.010401 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046154 | false | 0 | 0.1 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc155fca72fa9ac6aeaf31ffe16cc17c4816cb6d | 8,328 | py | Python | skate_ppo/hprun.py | snumrl/skate | a57ec2dc81dc2502da8886b92b870d2c8d65b838 | [
"Apache-2.0"
] | null | null | null | skate_ppo/hprun.py | snumrl/skate | a57ec2dc81dc2502da8886b92b870d2c8d65b838 | [
"Apache-2.0"
] | null | null | null | skate_ppo/hprun.py | snumrl/skate | a57ec2dc81dc2502da8886b92b870d2c8d65b838 | [
"Apache-2.0"
] | null | null | null | try:
from mpi4py import MPI
except ImportError:
MPI = None
import os
import sys
import multiprocessing
import gym
import tensorflow as tf
from baselines.common.misc_util import set_global_seeds
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from baselines.common.cmd_util import common_arg_parser, parse_unknown_args
from baselines.common.tf_util import get_session
from baselines.bench import Monitor
from baselines.common import retro_wrappers
from baselines.common.wrappers import ClipActionsWrapper
from baselines import logger
from importlib import import_module
from skate_ppo.envs import get_env, get_env_parameter
def make_env(env_id, env_type, mpi_rank=0, subrank=0, seed=None, reward_scale=1.0, gamestate=None, flatten_dict_observations=True, wrapper_kwargs=None, env_kwargs=None, logger_dir=None, initializer=None):
if initializer is not None:
initializer(mpi_rank=mpi_rank, subrank=subrank)
env = get_env(env_id)
if flatten_dict_observations and isinstance(env.observation_space, gym.spaces.Dict):
keys = env.observation_space.spaces.keys()
env = gym.wrappers.FlattenDictWrapper(env, dict_keys=list(keys))
env.seed(seed + subrank if seed is not None else None)
env = Monitor(env,
logger_dir and os.path.join(logger_dir, str(mpi_rank) + '.' + str(subrank)),
allow_early_resets=True)
if isinstance(env.action_space, gym.spaces.Box):
env = ClipActionsWrapper(env)
if reward_scale != 1:
env = retro_wrappers.RewardScaler(env, reward_scale)
return env
def make_vec_env(env_id, env_type, num_env, seed,
wrapper_kwargs=None,
env_kwargs=None,
start_index=0,
reward_scale=1.0,
flatten_dict_observations=True,
gamestate=None,
initializer=None,
force_dummy=False):
"""
Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.
"""
wrapper_kwargs = wrapper_kwargs or {}
env_kwargs = env_kwargs or {}
mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
seed = seed + 10000 * mpi_rank if seed is not None else None
logger_dir = logger.get_dir()
def make_thunk(rank, _initializer=None):
return lambda: make_env(
env_id=env_id,
env_type=env_type,
mpi_rank=mpi_rank,
subrank=rank,
seed=seed,
reward_scale=reward_scale,
gamestate=gamestate,
flatten_dict_observations=flatten_dict_observations,
wrapper_kwargs=wrapper_kwargs,
env_kwargs=env_kwargs,
logger_dir=logger_dir,
initializer=_initializer
)
set_global_seeds(seed)
if not force_dummy and num_env > 1:
return SubprocVecEnv([make_thunk(i + start_index, _initializer=initializer) for i in range(num_env)])
else:
return DummyVecEnv([make_thunk(i + start_index, _initializer=None) for i in range(num_env)])
def build_env(args):
ncpu = multiprocessing.cpu_count()
if sys.platform == 'darwin':
ncpu //= 2
nenv = args.num_env or ncpu
alg = args.alg
seed = args.seed
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
config.gpu_options.allow_growth = True
get_session(config=config)
flatten_dict_observations = alg not in {'her'}
env = make_vec_env(args.env, None, args.num_env or 1, seed, reward_scale=args.reward_scale, flatten_dict_observations=flatten_dict_observations)
return env
def train(args, extra_args):
total_timesteps = int(args.num_timesteps)
seed = args.seed
learn = get_learn_function(args.alg)
alg_kwargs = get_env_parameter(args.env)
alg_kwargs.update(extra_args)
env = build_env(args)
alg_kwargs['network'] = 'mlp'
# print('Training {} on {}:{} with arguments \n{}'.format(args.alg, env_type, env_id, alg_kwargs))
model = learn(
env=env,
seed=seed,
total_timesteps=total_timesteps,
**alg_kwargs
)
return model, env
def get_alg_module(alg, submodule=None):
submodule = submodule or alg
try:
# first try to import the alg module from baselines
alg_module = import_module('.'.join(['baselines', alg, submodule]))
except ImportError:
# then from rl_algs
alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))
return alg_module
def get_learn_function(alg):
return get_alg_module(alg).learn
def get_learn_function_defaults(alg, env_type):
try:
alg_defaults = get_alg_module(alg, 'defaults')
kwargs = getattr(alg_defaults, env_type)()
except (ImportError, AttributeError):
kwargs = {}
return kwargs
def parse_cmdline_kwargs(args):
"""
convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible
:param args:
:return:
"""
def parse(v):
assert isinstance(v, str)
try:
return eval(v)
except (NameError, SyntaxError):
return v
return {k: parse(v) for k,v in parse_unknown_args(args).items()}
def main(args):
# configure logger, disable logging in child MPI processes (with rank > 0)
arg_parser = common_arg_parser()
args, unknown_args = arg_parser.parse_known_args(args)
extra_args = parse_cmdline_kwargs(unknown_args)
if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
rank = 0
logger.configure()
else:
logger.configure(format_strs=[])
rank = MPI.COMM_WORLD.Get_rank()
model, env = train(args, extra_args)
if args.save_path is not None and rank == 0:
save_path = os.path.expanduser(args.save_path)
model.save(save_path)
env.close()
if __name__ == '__main__':
"""
-h, --help show this help message and exit
--env ENV environment ID (default: Reacher-v2)
--env_type ENV_TYPE type of environment, used when the environment type
cannot be automatically determined (default: None)
--seed SEED RNG seed (default: None)
--alg ALG Algorithm (default: ppo2)
--num_timesteps NUM_TIMESTEPS
--network NETWORK network type (mlp, cnn, lstm, cnn_lstm, conv_only)
(default: None)
--gamestate GAMESTATE
game state to load (so far only used in retro games)
(default: None)
--num_env NUM_ENV Number of environment copies being run in parallel.
When not specified, set to number of cpus for Atari,
and to 1 for Mujoco (default: None)
--reward_scale REWARD_SCALE
Reward scale factor. Default: 1.0 (default: 1.0)
--save_path SAVE_PATH
Path to save trained model to (default: None)
--save_video_interval SAVE_VIDEO_INTERVAL
Save video every x steps (0 = disabled) (default: 0)
--save_video_length SAVE_VIDEO_LENGTH
Length of recorded video. Default: 200 (default: 200)
--play
--alg=ppo2
--env=Humanoid-v2
--network=mlp
--num_timesteps=2e7
--ent_coef=0.1
--num_hidden=32
--num_layers=3
--value_network=copy
--load_path=~/models/pong_20M_ppo2
OPENAI_LOGDIR=$HOME/logs/cartpole-ppo
OPENAI_LOG_FORMAT=csv
"""
import time
argv = []
env_id = 'speed_skating'
cur_time = time.strftime("%Y%m%d%H%M")
os.environ["OPENAI_LOGDIR"] = os.getcwd() + '/' + env_id + '/log_' + cur_time
os.environ["OPENAI_LOG_FORMAT"] = 'csv'
argv.extend(['--env='+env_id])
argv.extend(['--alg=ppo2'])
argv.extend(['--num_env=8'])
argv.extend(['--num_timesteps=1e8'])
argv.extend(['--save_path='+env_id+'/'+'model_'+cur_time])
argv.extend(['--num_hidden=64'])
argv.extend(['--num_layers=2'])
main(argv)
| 32.658824 | 204 | 0.64049 | 1,078 | 8,328 | 4.713358 | 0.255102 | 0.023814 | 0.036213 | 0.006495 | 0.116709 | 0.066916 | 0.009053 | 0 | 0 | 0 | 0 | 0.009477 | 0.26513 | 8,328 | 254 | 205 | 32.787402 | 0.820752 | 0.052113 | 0 | 0.090909 | 0 | 0 | 0.034247 | 0 | 0.006494 | 0 | 0 | 0 | 0.006494 | 1 | 0.071429 | false | 0 | 0.149351 | 0.012987 | 0.298701 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc1ab79d8243d70f036a6563b8c8175a9621fcaf | 1,077 | py | Python | src/markdown_storage/mixins.py | stephanpoetschner/markdown_storage | 69005db4484010e0d2282bdeb0d0bcc30a316932 | [
"MIT"
] | null | null | null | src/markdown_storage/mixins.py | stephanpoetschner/markdown_storage | 69005db4484010e0d2282bdeb0d0bcc30a316932 | [
"MIT"
] | null | null | null | src/markdown_storage/mixins.py | stephanpoetschner/markdown_storage | 69005db4484010e0d2282bdeb0d0bcc30a316932 | [
"MIT"
] | null | null | null | import os
import yaml
from .exceptions import MetadataError
class FileReaderMixin(object):
@classmethod
def is_valid(cls, path, allowed_extensions=None):
allowed_extensions = (allowed_extensions or [])
if not os.path.isfile(path):
return False
name, ext = os.path.splitext(path)
if ext.lstrip('.') not in allowed_extensions:
return False
return True
@classmethod
def read(cls, path):
with open(path, 'r') as f:
return f.read()
class MetadataMixin(object):
@classmethod
def annotate(cls, meta):
for key, value in meta.items():
func_name = 'annotate_' + key
if hasattr(cls, func_name):
yield (key, getattr(cls, func_name)(value))
else:
yield (key, value)
@classmethod
def parse_meta(cls, raw_metadata):
try:
meta = dict(yaml.load(raw_metadata))
except yaml.scanner.ScannerError:
raise MetadataError()
return cls.annotate(meta)
| 23.413043 | 59 | 0.588672 | 122 | 1,077 | 5.098361 | 0.47541 | 0.090032 | 0.064309 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.317549 | 1,077 | 45 | 60 | 23.933333 | 0.846259 | 0 | 0 | 0.181818 | 0 | 0 | 0.010223 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121212 | false | 0 | 0.090909 | 0 | 0.424242 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc1da07168a9f6fc3aa75a6b7f531708ed0a5540 | 1,766 | py | Python | postreview/cli.py | tigefa4u/post-review | ff40d5460331c81ef2a9140026413a5f2a22e0db | [
"MIT"
] | 9 | 2017-08-12T10:56:39.000Z | 2018-10-07T04:58:51.000Z | postreview/cli.py | tigefa4u/post-review | ff40d5460331c81ef2a9140026413a5f2a22e0db | [
"MIT"
] | 15 | 2017-08-01T03:17:49.000Z | 2017-10-10T02:39:14.000Z | postreview/cli.py | tigefa4u/post-review | ff40d5460331c81ef2a9140026413a5f2a22e0db | [
"MIT"
] | 1 | 2018-10-06T06:42:07.000Z | 2018-10-06T06:42:07.000Z |
from builtins import object
from .configprocesser import get_configuration
from .GitCommandRunner import GitCommandRunner as Git
from .GitServiceManager import GitServiceManager
from postreview import __version__
import argparse
import sys
import os
def main():
driver = CliDriver()
return driver.main()
class CliDriver(object):
def main(self, args=None):
if args is None:
args = sys.argv[1:]
parser = self._create_parser()
parsed, remaining = parser.parse_known_args(args)
try:
if parsed.version:
sys.stdout.write('post-review/' + __version__)
sys.stdout.write('\n\n')
return
if parsed.target is None:
raise ValueError()
self.target = parsed.target
except (AttributeError, ValueError):
sys.stderr.write("===================================")
sys.stderr.write("\n")
sys.stderr.write("WARNING: missing --target argument")
sys.stderr.write("\n")
sys.stderr.write("===================================")
sys.stderr.write("\n\n")
parser.print_help()
return 255
git_service = GitServiceManager(self.target)
return git_service.post_review()
def _create_parser(self):
parser = argparse.ArgumentParser(description="create a code review and merge request")
#parser._action_groups.pop()
required = parser.add_argument_group('Required Arguments')
required.add_argument('--target', '-t', help='remote branch to diff/merge with.')
parser.add_argument('--version', action='store_true', help='software package version installed')
return parser
| 32.703704 | 104 | 0.602492 | 186 | 1,766 | 5.580645 | 0.424731 | 0.052023 | 0.080925 | 0.043353 | 0.083815 | 0.083815 | 0.083815 | 0 | 0 | 0 | 0 | 0.003063 | 0.260476 | 1,766 | 53 | 105 | 33.320755 | 0.79173 | 0.015289 | 0 | 0.095238 | 0 | 0 | 0.161197 | 0.040299 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.190476 | 0 | 0.404762 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc1f160f7e9ad15a6d8462a77b084aabd58a75d9 | 699 | py | Python | heatmap.py | 82ndAirborneDiv/LpSubP | a92fc4db5a15ddba24249ca37075378ad331c702 | [
"CC0-1.0"
] | 1 | 2020-02-13T21:28:15.000Z | 2020-02-13T21:28:15.000Z | heatmap.py | 82ndAirborneDiv/LpSubP | a92fc4db5a15ddba24249ca37075378ad331c702 | [
"CC0-1.0"
] | null | null | null | heatmap.py | 82ndAirborneDiv/LpSubP | a92fc4db5a15ddba24249ca37075378ad331c702 | [
"CC0-1.0"
] | 3 | 2020-10-20T15:49:50.000Z | 2020-12-21T20:52:46.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 6 13:48:22 2019
@author: nej1
"""
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
hm=pd.read_csv("allele_diff_matrix.csv",index_col=0)
hm.columns=hm.columns.str.split(".").str[0].tolist()
hm.index=hm.index.str.split(".").str[0].tolist()
fig=plt.figure(figsize=(10,10))
ax=fig.add_subplot(111)
ax.set_title("Heatmap",size=10)
#ax.cax.colorbar(im)
ax.set_xticks(np.arange(len(hm.columns)))
ax.set_xticklabels(hm.columns,size=9,rotation=90)
ax.set_yticks(np.arange(len(hm.index)))
ax.set_yticklabels(hm.index,size=9)
im=ax.imshow(hm,cmap="PuOr",interpolation="nearest")
plt.savefig('heatmap.pdf')
| 26.884615 | 53 | 0.703863 | 122 | 699 | 3.95082 | 0.557377 | 0.051867 | 0.045643 | 0.049793 | 0.074689 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046326 | 0.104435 | 699 | 25 | 54 | 27.96 | 0.723642 | 0.131617 | 0 | 0 | 0 | 0 | 0.092496 | 0.038394 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc2210941bc221d5c725510513a7b1d4cf498dd2 | 26,933 | py | Python | lighthouse/mlv.py | egonrian/google-research | 8177adbe9ca0d7e5a9463b54581fe6dd27be0974 | [
"Apache-2.0"
] | 3 | 2021-01-18T04:46:49.000Z | 2021-03-05T09:21:40.000Z | lighthouse/mlv.py | JustinDurham/google-research | 9049acf9246c1b75170f0c6757e62a8f619a9db6 | [
"Apache-2.0"
] | 25 | 2020-07-25T08:53:09.000Z | 2022-03-12T00:43:02.000Z | lighthouse/mlv.py | JustinDurham/google-research | 9049acf9246c1b75170f0c6757e62a8f619a9db6 | [
"Apache-2.0"
] | 4 | 2021-02-08T10:25:45.000Z | 2021-04-17T14:46:26.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for training multiscale volumetric lighting prediction."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import tensorflow.compat.v1 as tf
import lighthouse.geometry.projector as pj
import lighthouse.nets as nets
class MLV(object):
"""Class definition for Multiscale Lighting Volume learning module."""
def __init__(self):
pass
def infer_mpi(self, src_images, ref_image, ref_pose, src_poses, intrinsics,
psv_planes):
"""Construct the MPI inference graph.
Args:
src_images: stack of source images [batch, height, width, 3*#source]
ref_image: reference image [batch, height, width, 3]
ref_pose: reference frame pose (camera to world) [batch, 4, 4]
src_poses: source frame poses (camera to world) [batch, 4, 4, #source]
intrinsics: camera intrinsics [batch, 3, 3]
psv_planes: list of depth of PSV planes
Returns:
outputs: a collection of output tensors.
"""
with tf.name_scope('format_network_input'):
net_input = self.format_network_input(ref_image, src_images, ref_pose,
src_poses, psv_planes, intrinsics)
with tf.name_scope('layer_prediction'):
# generate entire MPI (training and inference, but takes more memory)
rgba_layers = nets.mpi_net(net_input)
# Collect output tensors
pred = {}
pred['rgba_layers'] = rgba_layers
pred['psv'] = net_input
# add pred tensors to outputs collection
for i in pred:
tf.add_to_collection('outputs', pred[i])
return pred
def mpi_render_view(self, input_mpi, ref_pose, tgt_pose, planes, intrinsics):
"""Render a target view from MPI representation.
Args:
input_mpi: input MPI [batch, height, width, #planes, 4]
ref_pose: reference camera pose [batch, 4, 4]
tgt_pose: target pose to render from [batch, 4, 4]
planes: list of depths for each plane
intrinsics: camera intrinsics [batch, 3, 3]
Returns:
rendered view [batch, height, width, 3]
"""
batch_size, _, _ = tgt_pose.get_shape().as_list()
num_planes = tf.shape(planes)[0]
height = tf.shape(input_mpi)[1]
width = tf.shape(input_mpi)[2]
rgba_layers = input_mpi
# render target viewpoint
filler = tf.concat(
[tf.zeros([batch_size, 1, 3]),
tf.ones([batch_size, 1, 1])], axis=2)
intrinsics_filler = tf.stack(
[tf.to_float(height),
tf.to_float(width), intrinsics[0, 0, 0]], axis=0)[:, tf.newaxis]
ref_pose_c2w = ref_pose
ref_pose_c2w = tf.concat([
tf.concat([
ref_pose_c2w[:, :3, 0:1], ref_pose_c2w[:, :3, 1:2],
-1.0 * ref_pose_c2w[:, :3, 2:3], ref_pose_c2w[:, :3, 3:]
],
axis=2), filler
],
axis=1)
ref_pose_c2w = tf.concat([ref_pose_c2w[0, :3, :], intrinsics_filler],
axis=1)
tgt_pose_c2w = tgt_pose
tgt_pose_c2w = tf.concat([
tf.concat([
tgt_pose_c2w[:, :3, 0:1], tgt_pose_c2w[:, :3, 1:2],
-1.0 * tgt_pose_c2w[:, :3, 2:3], tgt_pose_c2w[:, :3, 3:]
],
axis=2), filler
],
axis=1)
tgt_pose_c2w = tf.concat([tgt_pose_c2w[0, :3, :], intrinsics_filler],
axis=1)
rendering, alpha_acc, accum = pj.render_mpi_homogs(
rgba_layers,
ref_pose_c2w,
tgt_pose_c2w,
1.0 / planes[0],
1.0 / planes[-1],
num_planes,
debug=False)
return rendering, alpha_acc, accum
def img2mpi(self, img, depth, planedepths):
"""Compute ground truth MPI of visible content using depth map."""
height = tf.shape(img)[1]
width = tf.shape(img)[2]
num_depths = planedepths.shape[0]
depth_inds = (tf.to_float(num_depths) - 1) * (
(1.0 / depth) - (1.0 / planedepths[0])) / ((1.0 / planedepths[-1]) -
(1.0 / planedepths[0]))
depth_inds = tf.round(depth_inds)
depth_inds_tile = tf.to_int32(
tf.tile(depth_inds[:, :, :, tf.newaxis], [1, 1, 1, num_depths]))
_, _, d = tf.meshgrid(
tf.range(height), tf.range(width), tf.range(num_depths), indexing='ij')
mpi_colors = tf.to_float(
tf.tile(img[:, :, :, tf.newaxis, :], [1, 1, 1, num_depths, 1]))
mpi_alphas = tf.to_float(
tf.where(
tf.equal(depth_inds_tile, d), tf.ones_like(depth_inds_tile),
tf.zeros_like(depth_inds_tile)))
mpi = tf.concat([mpi_colors, mpi_alphas[Ellipsis, tf.newaxis]], axis=4)
return mpi
def predict_lighting_vol(self,
mpi,
planes,
intrinsics,
cube_res,
scale_factors,
depth_clip=20.0):
"""Predict lighting volumes from MPI.
Args:
mpi: input mpi
planes: input mpi plane depths
intrinsics: ref camera intrinsics
cube_res: resolution of cube volume for lighting prediction
scale_factors: scales for multiresolution cube sampling
depth_clip: farthest depth (sets limits of coarsest cube)
Returns:
list of completed lighting volumes
"""
batchsize = tf.shape(mpi)[0]
max_depth = tf.minimum(planes[0], depth_clip)
cube_side_lengths = [2.0 * max_depth]
for i in range(len(scale_factors)):
cube_side_lengths.append(2.0 * max_depth / scale_factors[i])
# shape of each cube's footprint within the next coarser volume
cube_rel_shapes = []
for i in range(len(scale_factors)):
if i == 0:
i_rel_shape = cube_res // scale_factors[0]
else:
i_rel_shape = (cube_res * scale_factors[i - 1]) // scale_factors[i]
cube_rel_shapes.append(i_rel_shape)
cube_centers = [tf.zeros([batchsize, 3])]
for i in range(len(scale_factors)):
i_center_depth = (cube_side_lengths[i] / (cube_res - 1)) * (
cube_rel_shapes[i] // 2)
cube_centers.append(
tf.concat([
tf.zeros([batchsize, 2]), i_center_depth * tf.ones([batchsize, 1])
],
axis=1))
cube_nest_inds = []
for i in range(len(scale_factors)):
if i == 0:
i_nest_inds = [(cube_res - cube_rel_shapes[i]) // 2,
(cube_res - cube_rel_shapes[i]) // 2,
cube_res // 2 - cube_rel_shapes[i]]
else:
i_nest_inds = [(cube_res - cube_rel_shapes[i]) // 2,
(cube_res - cube_rel_shapes[i]) // 2,
cube_res - cube_rel_shapes[i]]
cube_nest_inds.append(i_nest_inds)
cube_list = []
for i in range(len(cube_centers)):
i_cube, _ = pj.mpi_resample_cube(mpi, cube_centers[i], intrinsics, planes,
cube_side_lengths[i], cube_res)
cube_list.append(i_cube)
return cube_list, cube_centers, cube_side_lengths, cube_rel_shapes, cube_nest_inds
def render_envmap(self, cubes, cube_centers, cube_side_lengths,
cube_rel_shapes, cube_nest_inds, ref_pose, env_pose,
theta_res, phi_res, r_res):
"""Render environment map from volumetric lights.
Args:
cubes: input list of cubes in multiscale volume
cube_centers: position of cube centers
cube_side_lengths: side lengths of cubes
cube_rel_shapes: size of "footprint" of each cube within next coarser cube
cube_nest_inds: indices for cube "footprints"
ref_pose: c2w pose of ref camera
env_pose: c2w pose of environment map camera
theta_res: resolution of theta (width) for environment map
phi_res: resolution of phi (height) for environment map
r_res: number of spherical shells to sample for environment map rendering
Returns:
An environment map at the input pose
"""
num_scales = len(cubes)
env_c2w = env_pose
env2ref = tf.matmul(tf.matrix_inverse(ref_pose), env_c2w)
# cube-->sphere resampling
all_shells_list = []
all_rad_list = []
for i in range(num_scales):
if i == num_scales - 1:
# "finest" resolution cube, don't zero out
cube_removed = cubes[i]
else:
# zero out areas covered by finer resolution cubes
cube_shape = cubes[i].get_shape().as_list()[1]
zm_y, zm_x, zm_z = tf.meshgrid(
tf.range(cube_nest_inds[i][0],
cube_nest_inds[i][0] + cube_rel_shapes[i]),
tf.range(cube_nest_inds[i][1],
cube_nest_inds[i][1] + cube_rel_shapes[i]),
tf.range(cube_nest_inds[i][2],
cube_nest_inds[i][2] + cube_rel_shapes[i]),
indexing='ij')
inds = tf.stack([zm_y, zm_x, zm_z], axis=-1)
updates = tf.to_float(tf.ones_like(zm_x))
zero_mask = 1.0 - tf.scatter_nd(
inds, updates, shape=[cube_shape, cube_shape, cube_shape])
cube_removed = zero_mask[tf.newaxis, :, :, :, tf.newaxis] * cubes[i]
spheres_i, rad_i = pj.spherical_cubevol_resample(cube_removed, env2ref,
cube_centers[i],
cube_side_lengths[i],
phi_res, theta_res,
r_res)
all_shells_list.append(spheres_i)
all_rad_list.append(rad_i)
all_shells = tf.concat(all_shells_list, axis=3)
all_rad = tf.concat(all_rad_list, axis=0)
all_shells = pj.interleave_shells(all_shells, all_rad)
all_shells_envmap = pj.over_composite(all_shells)
return all_shells_envmap, all_shells_list
def build_train_graph(self,
inputs,
min_depth,
max_depth,
cube_res,
theta_res,
phi_res,
r_res,
scale_factors,
num_mpi_planes,
learning_rate=0.0001,
vgg_model_weights=None,
global_step=0,
depth_clip=20.0):
"""Construct the training computation graph.
Args:
inputs: dictionary of tensors (see 'input_data' below) needed for training
min_depth: minimum depth for the PSV and MPI planes
max_depth: maximum depth for the PSV and MPI planes
cube_res: per-side cube resolution
theta_res: environment map width
phi_res: environment map height
r_res: number of radii to use when sampling spheres for rendering
scale_factors: downsampling factors of cubes relative to the coarsest
num_mpi_planes: number of MPI planes to infer
learning_rate: learning rate
vgg_model_weights: vgg weights (needed when vgg loss is used)
global_step: training iteration
depth_clip: maximum depth for coarsest resampled volumes
Returns:
A train_op to be used for training.
"""
with tf.name_scope('setup'):
psv_planes = pj.inv_depths(min_depth, max_depth, num_mpi_planes)
mpi_planes = pj.inv_depths(min_depth, max_depth, num_mpi_planes)
with tf.name_scope('input_data'):
tgt_image = inputs['tgt_image']
ref_image = inputs['ref_image']
src_images = inputs['src_images']
env_image = inputs['env_image']
ref_depth = inputs['ref_depth']
tgt_pose = inputs['tgt_pose']
ref_pose = inputs['ref_pose']
src_poses = inputs['src_poses']
env_pose = inputs['env_pose']
intrinsics = inputs['intrinsics']
_, _, _, num_source = src_poses.get_shape().as_list()
with tf.name_scope('inference'):
num_mpi_planes = tf.shape(mpi_planes)[0]
pred = self.infer_mpi(src_images, ref_image, ref_pose, src_poses,
intrinsics, psv_planes)
rgba_layers = pred['rgba_layers']
psv = pred['psv']
with tf.name_scope('synthesis'):
output_image, output_alpha_acc, _ = self.mpi_render_view(
rgba_layers, ref_pose, tgt_pose, mpi_planes, intrinsics)
with tf.name_scope('environment_rendering'):
mpi_gt = self.img2mpi(ref_image, ref_depth, mpi_planes)
output_image_gt, _, _ = self.mpi_render_view(mpi_gt, ref_pose, tgt_pose,
mpi_planes, intrinsics)
lightvols_gt, _, _, _, _ = self.predict_lighting_vol(
mpi_gt,
mpi_planes,
intrinsics,
cube_res,
scale_factors,
depth_clip=depth_clip)
lightvols, lightvol_centers, \
lightvol_side_lengths, \
cube_rel_shapes, \
cube_nest_inds = self.predict_lighting_vol(rgba_layers, mpi_planes,
intrinsics, cube_res,
scale_factors,
depth_clip=depth_clip)
lightvols_out = nets.cube_net_multires(lightvols, cube_rel_shapes,
cube_nest_inds)
gt_envmap, gt_shells = self.render_envmap(lightvols_gt, lightvol_centers,
lightvol_side_lengths,
cube_rel_shapes, cube_nest_inds,
ref_pose, env_pose, theta_res,
phi_res, r_res)
prenet_envmap, prenet_shells = self.render_envmap(
lightvols, lightvol_centers, lightvol_side_lengths, cube_rel_shapes,
cube_nest_inds, ref_pose, env_pose, theta_res, phi_res, r_res)
output_envmap, output_shells = self.render_envmap(
lightvols_out, lightvol_centers, lightvol_side_lengths,
cube_rel_shapes, cube_nest_inds, ref_pose, env_pose, theta_res,
phi_res, r_res)
with tf.name_scope('loss'):
# mask loss for pixels outside reference frustum
loss_mask = tf.where(
tf.equal(output_alpha_acc[Ellipsis, tf.newaxis], 0.0),
tf.zeros_like(output_image[:, :, :, 0:1]),
tf.ones_like(output_image[:, :, :, 0:1]))
loss_mask = tf.stop_gradient(loss_mask)
tf.summary.image('loss_mask', loss_mask)
# helper functions for loss
def compute_error(real, fake, mask):
mask = tf.ones_like(real) * mask
return tf.reduce_sum(mask * tf.abs(fake - real)) / (
tf.reduce_sum(mask) + 1.0e-8)
# Normalized VGG loss
def downsample(tensor, ds):
return tf.nn.avg_pool(tensor, [1, ds, ds, 1], [1, ds, ds, 1], 'SAME')
def vgg_loss(tgt_image, output_image, loss_mask, vgg_weights):
"""VGG activation loss definition."""
vgg_real = nets.build_vgg19(tgt_image * 255.0, vgg_weights)
rescaled_output_image = output_image * 255.0
vgg_fake = nets.build_vgg19(rescaled_output_image, vgg_weights)
p0 = compute_error(vgg_real['input'], vgg_fake['input'], loss_mask)
p1 = compute_error(vgg_real['conv1_2'], vgg_fake['conv1_2'],
loss_mask) / 2.6
p2 = compute_error(vgg_real['conv2_2'], vgg_fake['conv2_2'],
downsample(loss_mask, 2)) / 4.8
p3 = compute_error(vgg_real['conv3_2'], vgg_fake['conv3_2'],
downsample(loss_mask, 4)) / 3.7
p4 = compute_error(vgg_real['conv4_2'], vgg_fake['conv4_2'],
downsample(loss_mask, 8)) / 5.6
p5 = compute_error(vgg_real['conv5_2'], vgg_fake['conv5_2'],
downsample(loss_mask, 16)) * 10 / 1.5
total_loss = p0 + p1 + p2 + p3 + p4 + p5
return total_loss
# rendered image loss
render_loss = vgg_loss(tgt_image, output_image, loss_mask,
vgg_model_weights) / 100.0
total_loss = render_loss
# rendered envmap loss
envmap_loss = vgg_loss(env_image, output_envmap[Ellipsis, :3],
tf.ones_like(env_image[Ellipsis, 0:1]),
vgg_model_weights) / 100.0
# set envmap loss to 0 when only training mpi network (see paper)
envmap_loss = tf.where(tf.greater(global_step, 240000), envmap_loss, 0.0)
total_loss += envmap_loss
# adversarial loss for envmap
real_logit = nets.discriminator(env_image, scope='discriminator')
fake_logit = nets.discriminator(
output_envmap[Ellipsis, :3], scope='discriminator')
adv_loss_list = []
for i in range(len(fake_logit)):
adv_loss_list.append(0.1 * -1.0 * tf.reduce_mean(fake_logit[i][-1]))
adv_loss = tf.reduce_mean(adv_loss_list)
real_loss_list = []
fake_loss_list = []
for i in range(len(fake_logit)):
real_loss_list.append(
-1.0 * tf.reduce_mean(tf.minimum(real_logit[i][-1] - 1, 0.0)))
fake_loss_list.append(
-1.0 *
tf.reduce_mean(tf.minimum(-1.0 * fake_logit[i][-1] - 1, 0.0)))
real_loss = tf.reduce_mean(real_loss_list)
fake_loss = tf.reduce_mean(fake_loss_list)
disc_loss = real_loss + fake_loss
# set adv/disc losses to 0 until end of training
adv_loss = tf.where(tf.greater(global_step, 690000), adv_loss, 0.0)
disc_loss = tf.where(tf.greater(global_step, 690000), disc_loss, 0.0)
tf.summary.scalar('loss_disc', disc_loss)
tf.summary.scalar('loss_disc_real', real_loss)
tf.summary.scalar('loss_disc_fake', fake_loss)
tf.summary.scalar('loss_adv', adv_loss)
total_loss += adv_loss
with tf.name_scope('train_op'):
train_variables = [
var for var in tf.trainable_variables()
if 'discriminator' not in var.name
]
optim = tf.train.AdamOptimizer(learning_rate, epsilon=1e-4)
grads_and_variables = optim.compute_gradients(
total_loss, var_list=train_variables)
grads = [gv[0] for gv in grads_and_variables]
variables = [gv[1] for gv in grads_and_variables]
def denan(x):
return tf.where(tf.is_nan(x), tf.zeros_like(x), x)
grads_clipped = [denan(g) for g in grads]
grads_clipped, _ = tf.clip_by_global_norm(grads_clipped, 100.0)
train_op = [optim.apply_gradients(zip(grads_clipped, variables))]
tf.summary.scalar('gradient global norm', tf.linalg.global_norm(grads))
tf.summary.scalar('clipped gradient global norm',
tf.linalg.global_norm(grads_clipped))
d_variables = [
var for var in tf.trainable_variables() if 'discriminator' in var.name
]
optim_d = tf.train.AdamOptimizer(learning_rate, beta1=0.0)
train_op.append(optim_d.minimize(disc_loss, var_list=d_variables))
with tf.name_scope('envmap_gt'):
tf.summary.image('envmap', gt_envmap)
tf.summary.image('envmap_alpha', gt_envmap[Ellipsis, -1:])
for i in range(len(gt_shells)):
i_envmap = pj.over_composite(gt_shells[i])
tf.summary.image('envmap_level_' + str(i), i_envmap)
with tf.name_scope('envmap_prenet'):
tf.summary.image('envmap', prenet_envmap)
tf.summary.image('envmap_alpha', prenet_envmap[Ellipsis, -1:])
for i in range(len(prenet_shells)):
i_envmap = pj.over_composite(prenet_shells[i])
tf.summary.image('envmap_level_' + str(i), i_envmap)
with tf.name_scope('envmap_output'):
tf.summary.image('envmap', output_envmap)
tf.summary.image('envmap_alpha', output_envmap[Ellipsis, -1:])
for i in range(len(output_shells)):
i_envmap = pj.over_composite(output_shells[i])
tf.summary.image('envmap_level_' + str(i), i_envmap)
tf.summary.scalar('loss_total', total_loss)
tf.summary.scalar('loss_render', render_loss)
tf.summary.scalar('loss_envmap', envmap_loss)
tf.summary.scalar('min_depth', min_depth)
tf.summary.scalar('max_depth', max_depth)
with tf.name_scope('level_stats'):
for i in range(len(lightvols)):
tf.summary.scalar('cube_side_length_' + str(i),
lightvol_side_lengths[i])
tf.summary.scalar('cube_center_' + str(i), lightvol_centers[i][0, -1])
# Source images
for i in range(num_source):
src_image = src_images[:, :, :, i * 3:(i + 1) * 3]
tf.summary.image('image_src_%d' % i, src_image)
# Output image
tf.summary.image('image_output', output_image)
tf.summary.image('image_output_Gt', output_image_gt)
# Target image
tf.summary.image('image_tgt', tgt_image)
tf.summary.image('envmap_tgt', env_image)
# Ref image
tf.summary.image('image_ref', ref_image)
# Predicted color and alpha layers, and PSV
num_summ = 8 # number of plane summaries to show in tensorboard
for i in range(num_summ):
ind = tf.to_int32(i * num_mpi_planes / num_summ)
rgb = rgba_layers[:, :, :, ind, :3]
alpha = rgba_layers[:, :, :, ind, -1:]
ref_plane = psv[:, :, :, ind, :3]
source_plane = psv[:, :, :, ind, 3:6]
tf.summary.image('layer_rgb_%d' % i, rgb)
tf.summary.image('layer_alpha_%d' % i, alpha)
tf.summary.image('layer_rgba_%d' % i, rgba_layers[:, :, :, ind, :])
tf.summary.image('psv_avg_%d' % i, 0.5 * ref_plane + 0.5 * source_plane)
tf.summary.image('psv_ref_%d' % i, ref_plane)
tf.summary.image('psv_source_%d' % i, source_plane)
return train_op
def train(self, train_op, load_dir, checkpoint_dir, summary_dir, summary_freq,
checkpoint_freq, max_steps, global_step):
"""Runs the training procedure.
Args:
train_op: op for training the network
load_dir: directory to load checkpoint for continuing training
checkpoint_dir: where to save the model checkpoints
summary_dir: where to save the tensorboard summaries
summary_freq: summary frequency
checkpoint_freq: Frequency of model saving
max_steps: maximum training steps
global_step: training iteration placeholder
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
step_start = 1
with tf.Session(config=config) as sess:
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(summary_dir, sess.graph)
saver = tf.train.Saver([var for var in tf.trainable_variables()],
max_to_keep=None)
sess.run(tf.global_variables_initializer())
checkpoint = tf.train.latest_checkpoint(load_dir)
if checkpoint is not None:
print('Resume training from previous checkpoint:', checkpoint)
step_start = int(checkpoint.split('-')[-1])
saver.restore(sess, checkpoint)
print('starting training iters')
for step in range(step_start, max_steps + 1):
start_time = time.time()
fetches = {'train': train_op}
if step % summary_freq == 0:
fetches['summary'] = merged
results = sess.run(fetches, feed_dict={global_step: step})
if step % summary_freq == 0:
train_writer.add_summary(results['summary'], step)
print('[Step %.8d] time: %4.4f/it' % (step, time.time() - start_time))
if step % checkpoint_freq == 0:
print('Saving checkpoint to %s...' % checkpoint_dir)
saver.save(
sess,
os.path.join(checkpoint_dir, 'model.ckpt'),
global_step=step)
def format_network_input(self, ref_image, psv_src_images, ref_pose,
psv_src_poses, planes, intrinsics):
"""Format the network input.
Args:
ref_image: reference source image [batch, height, width, 3]
psv_src_images: stack of source images (excluding the ref image) [batch,
height, width, 3*(#source)]
ref_pose: reference camera-to-world pose (where PSV is constructed)
[batch, 4, 4]
psv_src_poses: input poses (camera to world) [batch, 4, 4, #source]
planes: list of scalar depth values for each plane
intrinsics: camera intrinsics [batch, 3, 3]
Returns:
net_input: [batch, height, width, #planes, (#source+1)*3]
"""
batch_size = tf.shape(psv_src_images)[0]
height = tf.shape(psv_src_images)[1]
width = tf.shape(psv_src_images)[2]
_, _, _, num_psv_source = psv_src_poses.get_shape().as_list()
num_planes = tf.shape(planes)[0]
filler = tf.concat(
[tf.zeros([batch_size, 1, 3]),
tf.ones([batch_size, 1, 1])], axis=2)
intrinsics_filler = tf.stack([
tf.to_float(height),
tf.to_float(width),
tf.to_float(intrinsics[0, 0, 0])
],
axis=0)[:, tf.newaxis]
ref_pose_c2w = ref_pose
ref_pose_c2w = tf.concat([
tf.concat([
ref_pose_c2w[:, :3, 0:1], ref_pose_c2w[:, :3, 1:2],
-1.0 * ref_pose_c2w[:, :3, 2:3], ref_pose_c2w[:, :3, 3:]
],
axis=2), filler
],
axis=1)
ref_pose_c2w = tf.concat([ref_pose_c2w[0, :3, :], intrinsics_filler],
axis=1)
net_input = []
for i in range(num_psv_source):
curr_pose_c2w = psv_src_poses[:, :, :, i]
curr_pose_c2w = tf.concat([
tf.concat([
curr_pose_c2w[:, :3, 0:1], curr_pose_c2w[:, :3, 1:2],
-1.0 * curr_pose_c2w[:, :3, 2:3], curr_pose_c2w[:, :3, 3:]
], 2), filler
], 1)
curr_pose_c2w = tf.concat([curr_pose_c2w[0, :3, :], intrinsics_filler],
axis=1)
curr_image = psv_src_images[:, :, :, i * 3:(i + 1) * 3]
curr_psv = pj.make_psv_homogs(curr_image, curr_pose_c2w, ref_pose_c2w,
1.0 / planes, num_planes)
net_input.append(curr_psv[tf.newaxis, Ellipsis])
net_input = tf.concat(net_input, axis=4)
ref_img_stack = tf.tile(
tf.expand_dims(ref_image, 3), [1, 1, 1, num_planes, 1])
net_input = tf.concat([ref_img_stack, net_input], axis=4)
net_input.set_shape([1, None, None, None, 3 * (num_psv_source + 1)])
return net_input
| 38.752518 | 86 | 0.606802 | 3,645 | 26,933 | 4.211797 | 0.131413 | 0.017327 | 0.020063 | 0.010748 | 0.322499 | 0.264396 | 0.208963 | 0.179521 | 0.155875 | 0.142327 | 0 | 0.022876 | 0.284224 | 26,933 | 694 | 87 | 38.808357 | 0.773472 | 0.187242 | 0 | 0.171053 | 0 | 0 | 0.045239 | 0.000978 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028509 | false | 0.002193 | 0.017544 | 0.004386 | 0.072368 | 0.010965 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc247ba3d4e292b96a192d10e4ecbabd302ad719 | 1,057 | py | Python | python/breakingRecords.py | thrama/coding-test | d031f9d2865bce1cf8edbdf65ea4357c69e7ef9a | [
"Unlicense"
] | null | null | null | python/breakingRecords.py | thrama/coding-test | d031f9d2865bce1cf8edbdf65ea4357c69e7ef9a | [
"Unlicense"
] | null | null | null | python/breakingRecords.py | thrama/coding-test | d031f9d2865bce1cf8edbdf65ea4357c69e7ef9a | [
"Unlicense"
] | null | null | null | #!/bin/python3
"""
Maria plays college basketball and wants to go pro. Each season she maintains
a record of her play. She tabulates the number of times she breaks her season
record for most points and least points in a game. Points scored in the first
game establish her record for the season, and she begins counting from there.
Link: https://www.hackerrank.com/challenges/breaking-best-and-worst-records/problem
"""
import math
import os
import random
import re
import sys
# Complete the breakingRecords function below.
def breakingRecords(scores):
x = y = 0
highScore = lowScore = scores[0]
for i in range(1, len(scores)):
if scores[i] > highScore:
highScore = scores[i]
print(highScore)
x+=1
if scores[i] < lowScore:
lowScore = scores[i]
y+=1
return x, y
if __name__ == '__main__':
#scores = [ 10, 5, 20, 20, 4, 5, 2, 25, 1 ]
scores = [ 3, 4, 21, 36, 10, 28, 35, 5, 24, 42, ]
result = breakingRecords(scores)
print(result) | 25.780488 | 83 | 0.641438 | 154 | 1,057 | 4.350649 | 0.577922 | 0.041791 | 0.026866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046213 | 0.263009 | 1,057 | 41 | 84 | 25.780488 | 0.813864 | 0.473037 | 0 | 0 | 0 | 0 | 0.014599 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.238095 | 0 | 0.333333 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc2533fb3fa206768352cb80d7e17129f9b4565d | 4,221 | py | Python | year-2020/day-16/part-2.py | mpryc/advent-of-code | 7703752f6b1a490e93e3f647053b7b59d7b6780e | [
"MIT"
] | 4 | 2021-12-10T23:31:12.000Z | 2021-12-15T23:10:05.000Z | year-2020/day-16/part-2.py | sdatko/advent-of-code-2020 | 04b53d9550b5a5bdd7d680607b03d6fca4b8f11e | [
"MIT"
] | 1 | 2021-12-02T14:14:48.000Z | 2021-12-02T14:14:48.000Z | year-2020/day-16/part-2.py | sdatko/advent-of-code-2020 | 04b53d9550b5a5bdd7d680607b03d6fca4b8f11e | [
"MIT"
] | 1 | 2021-12-02T12:42:28.000Z | 2021-12-02T12:42:28.000Z | #!/usr/bin/env python3
#
# Task:
# Now that you've identified which tickets contain invalid values, discard
# those tickets entirely. Use the remaining valid tickets to determine which
# field is which.
# Using the valid ranges for each field, determine what order the fields
# appear on the tickets. The order is consistent between all tickets:
# if seat is the third field, it is the third field on every ticket,
# including your ticket.
# Once you work out which field is which, look for the six fields on your
# ticket that start with the word departure. What do you get if you multiply
# those six values together?
#
# Solution:
# First step is to create a collection of valid tickets, which contains all
# tickets that did not have bad values, plus our own ticket. Then from each
# ticket in that collection of valid ones, we take the same field, obtaining
# list of values on the position number #i. Next we iterate over the list
# of constraints and we check whether all these values fit in given ranges.
# This way be build a collection of possible positions of fields on ticket.
# Finally the tricky part is to notice that most of the fields satisfy more
# than one constraint – so by elimination, we need to pick first the field
# which fits exactly one class' constraints, then remove it as a candidate
# from all other possible positions. By doing so for every field, eventually
# we receive set of unique positions. Then we just need to find the indexes
# of fields with names starting from `departure` and multiply values under
# given indexes on our ticket.
#
INPUT_FILE = 'input.txt'
def main():
data = open(INPUT_FILE, 'r').read().split('\n\n')
notes = data[0].strip().split('\n')
constraints = {}
constraints_indexes = {}
constraints_possible_indexes = {}
for note in notes:
key, value = note.split(': ')
range1, range2 = value.split(' or ')
constraints[key] = [
[int(number) for number in range1.split('-')],
[int(number) for number in range2.split('-')],
]
constraints_indexes[key] = None
constraints_possible_indexes[key] = []
my_ticket = [int(number) for number in data[1].split('\n')[1].split(',')]
nearby_tickets = [list(map(int, line.split(',')))
for line in data[2].strip().split('\n')[1:]]
valid_tickets = []
for ticket in nearby_tickets:
valid_ticket = True
for number in ticket:
valid_number = False
for constraint in constraints.values():
if constraint[0][0] <= number <= constraint[0][1] \
or constraint[1][0] <= number <= constraint[1][1]:
valid_number = True
if not valid_number:
valid_ticket = False
if valid_ticket:
valid_tickets.append(ticket)
valid_tickets.append(my_ticket)
for name, constraint in constraints.items():
for i in range(len(my_ticket)):
numbers = [ticket[i] for ticket in valid_tickets]
valid_constraint = True
for number in numbers:
if not constraint[0][0] <= number <= constraint[0][1] \
and not constraint[1][0] <= number <= constraint[1][1]:
valid_constraint = False
if valid_constraint:
constraints_possible_indexes[name].append(i)
while True:
repeat = False
for name, indexes in constraints_possible_indexes.items():
if len(indexes) == 1:
constraints_indexes[name] = indexes[0]
repeat = True
for index in constraints_indexes.values():
for name in constraints_possible_indexes:
if index in constraints_possible_indexes[name]:
constraints_possible_indexes[name].remove(index)
if not repeat:
break
wanted_fields = [index
for name, index in constraints_indexes.items()
if name.startswith('departure')]
result = 1
for index in wanted_fields:
result *= my_ticket[index]
print(result)
if __name__ == '__main__':
main()
| 34.598361 | 77 | 0.636816 | 560 | 4,221 | 4.7125 | 0.321429 | 0.050398 | 0.068966 | 0.020462 | 0.071997 | 0.049261 | 0.049261 | 0.026525 | 0 | 0 | 0 | 0.009493 | 0.276238 | 4,221 | 121 | 78 | 34.884298 | 0.85401 | 0.359394 | 0 | 0 | 0 | 0 | 0.017577 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015625 | false | 0 | 0 | 0 | 0.015625 | 0.015625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc26050e837d63b1545fcf06ce650c1cb8dcb03c | 2,698 | py | Python | pychron/dvc/dvc_irradiationable.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 31 | 2016-03-07T02:38:17.000Z | 2022-02-14T18:23:43.000Z | pychron/dvc/dvc_irradiationable.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 1,626 | 2015-01-07T04:52:35.000Z | 2022-03-25T19:15:59.000Z | pychron/dvc/dvc_irradiationable.py | UIllinoisHALPychron/pychron | f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc | [
"Apache-2.0"
] | 26 | 2015-05-23T00:10:06.000Z | 2022-03-07T16:51:57.000Z | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Str, Property, cached_property, Instance, Event
from pychron.loggable import Loggable
class DVCAble(Loggable):
dvc = Instance("pychron.dvc.dvc.DVC")
def get_database(self):
if self.dvc:
return self.dvc
class DVCIrradiationable(DVCAble):
level = Str
levels = Property(depends_on="irradiation, updated")
irradiation = Str
irradiations = Property(depends_on="updated")
updated = Event
_suppress_auto_select_irradiation = False
def verify_database_connection(self, inform=True):
# return self.dvc.initialize(inform)
self.debug("Verify database connection")
ret = self.dvc.initialize(inform)
if ret:
# trigger reload of irradiations, and levels
self.updated = True
return ret
def load(self):
pass
def setup(self):
pass
@cached_property
def _get_irradiations(self):
irrad_names = []
db = self.get_database()
if db.connect():
with db.session_ctx():
irs = db.get_irradiations()
if irs:
irrad_names = [i.name for i in irs]
if irrad_names:
if not self.irradiation:
self.irradiation = irrad_names[0]
return irrad_names
@cached_property
def _get_levels(self):
levels = []
db = self.get_database()
if db.connect():
with db.session_ctx():
irrad = db.get_irradiation(self.irradiation)
if irrad:
levels = sorted([li.name for li in irrad.levels])
if levels:
if not self.level:
self.level = levels[0]
return levels
# ============= EOF =============================================
| 31.011494 | 81 | 0.55189 | 291 | 2,698 | 5.024055 | 0.415808 | 0.04104 | 0.017784 | 0.021888 | 0.060192 | 0.060192 | 0.060192 | 0.060192 | 0.060192 | 0.060192 | 0 | 0.005181 | 0.284655 | 2,698 | 86 | 82 | 31.372093 | 0.752332 | 0.336546 | 0 | 0.2 | 0 | 0 | 0.040678 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0.04 | 0.04 | 0 | 0.42 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc2634892f8784964a7118f4dc04f44eac7d84c1 | 792 | py | Python | src/gluonts/nursery/tsbench/src/tsbench/surrogate/transformers/__init__.py | RingoIngo/gluon-ts | 62fb20c36025fc969653accaffaa783671709564 | [
"Apache-2.0"
] | 1 | 2022-03-28T01:17:00.000Z | 2022-03-28T01:17:00.000Z | src/gluonts/nursery/tsbench/src/tsbench/surrogate/transformers/__init__.py | RingoIngo/gluon-ts | 62fb20c36025fc969653accaffaa783671709564 | [
"Apache-2.0"
] | null | null | null | src/gluonts/nursery/tsbench/src/tsbench/surrogate/transformers/__init__.py | RingoIngo/gluon-ts | 62fb20c36025fc969653accaffaa783671709564 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from .config import ConfigTransformer, EnsembleConfigTransformer
from .performance import PerformanceTransformer
__all__ = [
"ConfigTransformer",
"EnsembleConfigTransformer",
"PerformanceTransformer",
]
| 36 | 75 | 0.765152 | 104 | 792 | 5.788462 | 0.653846 | 0.099668 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012048 | 0.161616 | 792 | 21 | 76 | 37.714286 | 0.894578 | 0.694444 | 0 | 0 | 0 | 0 | 0.279476 | 0.20524 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc280bec7698b83b03b4a78dc846b4681e0fca25 | 5,522 | py | Python | adm.py | afhpayne/adm | 5454d58626732b5afd4727f5ee3c7af4e8538272 | [
"MIT"
] | 4 | 2021-04-07T19:10:20.000Z | 2022-01-23T14:36:50.000Z | adm.py | afhpayne/adm | 5454d58626732b5afd4727f5ee3c7af4e8538272 | [
"MIT"
] | 1 | 2018-10-17T00:40:56.000Z | 2018-10-17T00:40:56.000Z | adm.py | afhpayne/adm | 5454d58626732b5afd4727f5ee3c7af4e8538272 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# Andrew Payne, info(*t)duckbrainsoftware(d*t)com
# MIT License
# Copyright (c) 2018-2021 Andrew Payne
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Software Data:
soft_name = "ADM"
soft_tag = "a simple display manager"
# Version
soft_vers = "1.2.4"
import datetime
import getpass
import os
import pathlib
import platform
import readline
import shutil
import socket
import subprocess
import time
# Colors
W = '\033[0m' # white
O = '\033[33m' # orange
# Lists
wm_sort = []
wm_print = []
wm_choose = []
xinitrc_dir = []
# Dictionaries
wm_print = {}
# Home location
user_home = os.environ['HOME']
# These Linux distros have been tested
linuxes = ["slackware", "arch", "void", "debian"]
def os_specific_xinit_loc_func():
check_platform = platform.platform()
with open('/etc/os-release') as distro:
check_distro = distro.read()
hostname = socket.gethostname()
tripwire = 0
if "linux" in check_platform.lower():
for linux in linuxes:
if linux in (str(check_distro.lower())):
xinitrc_dir.append('/etc/X11/xinit')
tripwire = 1
elif "freebsd" in check_platform.lower():
xinitrc_dir.append('/usr/local/etc/X11/xinit')
tripwire = 1
elif tripwire == 0:
print(check_platform, "is not supported in this release. Exiting.")
exit(1)
def make_list_of_xinitrcs_func():
for wm in os.listdir(os.path.join(xinitrc_dir[0])):
if os.path.isdir(os.path.join(xinitrc_dir[0] + "/" + wm)) is False:
if wm.startswith("xinitrc") and len(wm) > 8:
wm = wm.replace("xinitrc.", "")
wm_sort.append(wm)
wm_sort.sort()
def make_xinitrc_dict_func():
key = 1
for wm in wm_sort:
wm_print.update({key:wm})
key += 1
# Let's get started
os_specific_xinit_loc_func()
make_list_of_xinitrcs_func()
make_xinitrc_dict_func()
# get terminal window size to set layout
getsize = shutil.get_terminal_size()
column,line = getsize
head_factor = (round(line * .0625))
left_factor = (round(column * .0625))
header = ("\n" * head_factor)
margin = (" " * left_factor)
divider = ("-" * 66)
welstr = ("Welcome to " + O+ soft_name +W + " version " + soft_vers + ", " + soft_tag + ".")
date = datetime.datetime.now().strftime("%I:%M %p on %A, %B %_d %Y.")
datestr = ("It is " + date)
username = getpass.getuser()
hostname = socket.gethostname()
youstr = ("You are logged in as " + username + " on " + hostname + ".")
system = platform.system()
release = platform.release()
cpu = platform.processor()
sysstr = ("Running " + system + " " + release + " " + cpu)
herestr = ("Here are the window managers found on your system...")
os.system('clear')
print(header)
print(margin + divider)
print(margin + welstr)
print("\n")
print(margin + datestr)
print("\n")
print(margin + youstr)
print("\n")
print(margin + sysstr)
print(margin + divider)
print("")
print(margin + herestr)
print("")
for key,value in wm_print.items():
print(margin + " " * 4 + "[" + str(key) + "] " + str(value))
print("")
user_num = 0
while user_num == 0:
user_num = input(margin + " "*4 + "(Q) to quit, or enter a number: ")
if user_num == 'Q' or user_num == 'q':
user_num = 1
os.system('clear')
exit(0)
else:
try:
x = int(user_num) - 1
winman = (wm_sort[x])
user_uid = os.getuid()
group_gid = os.getgid()
## COMMENT OUT NEXT 3 LINES TO DISABLE SAFETY BACKUP OF CURRENT XINITRC
if os.path.isfile(os.path.join(user_home, '.xinitrc')):
shutil.move(os.path.join(user_home, '.xinitrc'), os.path.join(user_home, '.xinitrc_LAST'))
os.chmod(os.path.join(user_home, '.xinitrc_LAST'), 0o666)
shutil.copy2((os.path.join(xinitrc_dir[0] + "/" + "xinitrc." + winman)), os.path.join(user_home, '.xinitrc'))
os.chown(os.path.join(user_home, '.xinitrc'), user_uid, group_gid)
print("")
print((margin + " "*4), end=" ")
print("-->", end=" ")
time.sleep(.1)
print("Starting " + winman)
time.sleep(.4)
user_num = 1
subprocess.run('startx')
exit(0)
except ValueError:
print(margin + " " *4 + "\t" + user_num + " is not an option")
except IndexError:
print(margin + " " *4 + "\t" + user_num + " is not an option")
exit(0)
| 30.849162 | 121 | 0.632742 | 757 | 5,522 | 4.509908 | 0.393659 | 0.019332 | 0.026362 | 0.024605 | 0.124487 | 0.098711 | 0.051552 | 0.019332 | 0.019332 | 0.019332 | 0 | 0.015858 | 0.234879 | 5,522 | 178 | 122 | 31.022472 | 0.792189 | 0.249728 | 0 | 0.181818 | 0 | 0 | 0.128436 | 0.005838 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024793 | false | 0.016529 | 0.082645 | 0 | 0.107438 | 0.214876 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc2b62d6321c65690905a9ad8d5f24f82c2e62f1 | 2,968 | py | Python | PIP/Minor Assignment 11/A11Q11.py | ankitrajbiswal/SEM_5 | db716e242e77149a4091e0e564356ddc724aeff0 | [
"Apache-2.0"
] | null | null | null | PIP/Minor Assignment 11/A11Q11.py | ankitrajbiswal/SEM_5 | db716e242e77149a4091e0e564356ddc724aeff0 | [
"Apache-2.0"
] | null | null | null | PIP/Minor Assignment 11/A11Q11.py | ankitrajbiswal/SEM_5 | db716e242e77149a4091e0e564356ddc724aeff0 | [
"Apache-2.0"
] | 1 | 2022-03-02T05:07:39.000Z | 2022-03-02T05:07:39.000Z | import sys
class MyDate:
def __init__(self,day=1,month=1, year=2000):
if not(type(day)==int and type(month)==int and type(year)==int):
print('Invalid data provided for date !')
sys.exit()
if month>0 and month<=12:
self.month=month
else:
print('Invalid data for month !')
sys.exit()
if year>1900:
self.year=year
else:
print('Invalid data for year and year should be greateer than 1900.')
sys.exit()
self.day=self.checkday(day)
def checkday(self,day):
if self.year%400==0 or (self.year%100!=0 and self.year%4==0):
currentYear=[31,29,31,30,31,30,31,31,30,31,30,31]
else:
currentYear=[31,28,31,30,31,30,31,31,30,31,30,31]
if (day>0 and day<=currentYear[self.month-1]):
return day
else:
print('Invalid value for day')
sys.exit()
def __str__(self):
if self.day<=9:
day='0'+str(self.day)
else:
day=str(self.day)
if self.month<=9:
month='0'+str(self.month)
else:
month=str(self.month)
return day+'-'+month+'-'+str(self.year)
class MyTime:
def __init__(self, hours=0,minutes=0,seconds=0):
self.sethours(hours)
self.setminutes(minutes)
self.setseconds(seconds)
def sethours(self, hours):
if hours>=0 and hours<=23:
self.hours=hours
else:
print('invalid value for hours')
sys.exit()
def setminutes(self,minutes):
if minutes>=0 and minutes<=59:
self.minutes=minutes
else:
print('invalid value for minutes')
sys.exit()
def setseconds(self,seconds):
if seconds>=0 and seconds<=59:
self.seconds=seconds
else:
print('invalid value for seconds')
sys.exit()
def __str__(self):
if self.hours<=9:
hours='0'+str(self.hours)
else:
hours=str(self.hours)
if self.minutes<=9:
minutes='0'+str(self.minutes)
else:
minutes=str(self.minutes)
if self.seconds<=9:
seconds='0'+str(self.seconds)
else:
seconds=str(self.seconds)
return hours+':'+minutes+':'+seconds
class Appointment(MyDate, MyTime):
def __init__(self, day, month, year, hours, minutes, seconds, description):
MyDate.__init__(self, day, month, year)
MyTime.__init__(self, hours, minutes, seconds)
self.description=description
def __str__(self):
return MyDate.__str__(self)+', '+MyTime.__str__(self)+'\n'+self.description
print(Appointment(15,1, 2022, 10,0,0, 'meeting regarding Covid-19')) | 32.977778 | 84 | 0.525606 | 365 | 2,968 | 4.164384 | 0.178082 | 0.073684 | 0.031579 | 0.021053 | 0.176316 | 0.056579 | 0.056579 | 0.026316 | 0.026316 | 0.026316 | 0 | 0.057792 | 0.347035 | 2,968 | 90 | 85 | 32.977778 | 0.726522 | 0 | 0 | 0.268293 | 0 | 0 | 0.086806 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121951 | false | 0 | 0.012195 | 0.012195 | 0.219512 | 0.097561 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc2db38095640b34191932a72368e9c862e51033 | 7,234 | py | Python | armada/tests/unit/handlers/test_wait.py | drewwalters96/airship-armada | 307f1318c4e83f247f1e3838478957e7555d6ce0 | [
"Apache-2.0"
] | null | null | null | armada/tests/unit/handlers/test_wait.py | drewwalters96/airship-armada | 307f1318c4e83f247f1e3838478957e7555d6ce0 | [
"Apache-2.0"
] | null | null | null | armada/tests/unit/handlers/test_wait.py | drewwalters96/airship-armada | 307f1318c4e83f247f1e3838478957e7555d6ce0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from armada import const
from armada.exceptions import manifest_exceptions
from armada.handlers import wait
from armada.tests.unit import base
test_chart = {'wait': {'timeout': 10, 'native': {'enabled': False}}}
class ChartWaitTestCase(base.ArmadaTestCase):
def get_unit(self, chart, timeout=None):
return wait.ChartWait(
k8s=mock.MagicMock(),
release_name='test-test',
chart=chart,
namespace='test',
k8s_wait_attempts=1,
k8s_wait_attempt_sleep=1,
timeout=timeout)
def test_get_timeout(self):
unit = self.get_unit({'timeout': 5, 'wait': {'timeout': 10}})
self.assertEquals(unit.get_timeout(), 10)
def test_get_timeout_default(self):
unit = self.get_unit({})
self.assertEquals(unit.get_timeout(), const.DEFAULT_CHART_TIMEOUT)
def test_get_timeout_override(self):
unit = self.get_unit(
timeout=20, chart={
'timeout': 5,
'wait': {
'timeout': 10
}
})
self.assertEquals(unit.get_timeout(), 20)
def test_get_timeout_deprecated(self):
unit = self.get_unit({'timeout': 5})
self.assertEquals(unit.get_timeout(), 5)
def test_is_native_enabled_default_true(self):
unit = self.get_unit({})
self.assertEquals(unit.is_native_enabled(), True)
def test_is_native_enabled_true(self):
unit = self.get_unit({'wait': {'native': {'enabled': True}}})
self.assertEquals(unit.is_native_enabled(), True)
def test_is_native_enabled_false(self):
unit = self.get_unit({'wait': {'native': {'enabled': False}}})
self.assertEquals(unit.is_native_enabled(), False)
def test_waits_init(self):
unit = self.get_unit({
'wait': {
'resources': [{
'type': 'pod',
'labels': {
'foo': 'bar'
}
}, {
'type': 'job',
'labels': {
'foo': 'bar'
}
}, {
'type': 'daemonset',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}, {
'type': 'deployment',
'labels': {
'foo': 'bar'
},
'min_ready': '50%'
}, {
'type': 'statefulset',
'labels': {
'foo': 'bar'
}
}]
}
}) # yapf: disable
self.assertEqual(5, len(unit.waits))
self.assertIsInstance(unit.waits[0], wait.PodWait)
self.assertIsInstance(unit.waits[1], wait.JobWait)
self.assertIsInstance(unit.waits[2], wait.DaemonSetWait)
self.assertIsInstance(unit.waits[3], wait.DeploymentWait)
self.assertIsInstance(unit.waits[4], wait.StatefulSetWait)
def test_waits_init_min_ready_fails_if_not_controller(self):
def create_pod_wait_min_ready():
self.get_unit({
'wait': {
'resources': [{
'type': 'pod',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}]
}
})
self.assertRaises(manifest_exceptions.ManifestException,
create_pod_wait_min_ready)
def create_job_wait_min_ready():
self.get_unit({
'wait': {
'resources': [{
'type': 'job',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}]
}
})
self.assertRaises(manifest_exceptions.ManifestException,
create_job_wait_min_ready)
def test_waits_init_invalid_type(self):
def create_with_invalid_type():
self.get_unit({
'wait': {
'resources': [{
'type': 'invalid',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}]
}
})
self.assertRaises(manifest_exceptions.ManifestException,
create_with_invalid_type)
@mock.patch.object(wait.ChartWait, 'get_resource_wait')
def test_wait(self, get_resource_wait):
def return_mock(*args, **kwargs):
return mock.MagicMock()
get_resource_wait.side_effect = return_mock
unit = self.get_unit({
'wait': {
'resources': [{
'type': 'foo'
}, {
'type': 'bar'
}]
}
})
unit.wait(10)
self.assertEqual(2, len(unit.waits))
for w in unit.waits:
w.wait.assert_called_once()
class PodWaitTestCase(base.ArmadaTestCase):
def get_unit(self, labels):
return wait.PodWait(
resource_type='pod', chart_wait=mock.MagicMock(), labels=labels)
def test_include_resource(self):
def mock_resource(annotations):
resource = mock.Mock()
resource.metadata.annotations = annotations
return resource
test_resources = [
mock_resource({
'key': 'value',
'helm.sh/hook': 'test-success'
}),
mock_resource({
'helm.sh/hook': 'test-failure'
}),
mock_resource({
'helm.sh/hook': 'test-success,pre-install'
})
]
non_test_resources = [
mock_resource({
'helm.sh/hook': 'pre-install'
}),
mock_resource({
'key': 'value'
}),
mock_resource({})
]
unit = self.get_unit({})
# Validate test resources excluded
for resource in test_resources:
self.assertFalse(unit.include_resource(resource))
# Validate other resources included
for resource in non_test_resources:
self.assertTrue(unit.include_resource(resource))
| 30.91453 | 76 | 0.493503 | 671 | 7,234 | 5.120715 | 0.262295 | 0.030559 | 0.041618 | 0.043655 | 0.37369 | 0.300931 | 0.231665 | 0.2078 | 0.169965 | 0.154831 | 0 | 0.010096 | 0.397567 | 7,234 | 233 | 77 | 31.04721 | 0.778339 | 0.093033 | 0 | 0.413408 | 0 | 0 | 0.085727 | 0.003667 | 0 | 0 | 0 | 0 | 0.111732 | 1 | 0.106145 | false | 0 | 0.027933 | 0.01676 | 0.167598 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc30c861bd17f33a5407de9de8a6aafcc6ffd586 | 9,398 | py | Python | api/python/quilt3/api.py | viveklak/quilt | 248971a9e2ecd1bcae3a5af77d4d8933b2dba0c7 | [
"Apache-2.0"
] | null | null | null | api/python/quilt3/api.py | viveklak/quilt | 248971a9e2ecd1bcae3a5af77d4d8933b2dba0c7 | [
"Apache-2.0"
] | null | null | null | api/python/quilt3/api.py | viveklak/quilt | 248971a9e2ecd1bcae3a5af77d4d8933b2dba0c7 | [
"Apache-2.0"
] | null | null | null | from .data_transfer import copy_file, get_bytes, delete_url, list_url
from .packages import Package
from .search_util import search_api
from .util import (QuiltConfig, QuiltException, CONFIG_PATH,
CONFIG_TEMPLATE, configure_from_default, config_exists,
configure_from_url, fix_url, get_package_registry,
load_config, PhysicalKey, read_yaml, validate_package_name,
write_yaml)
from .telemetry import ApiTelemetry
def copy(src, dest):
"""
Copies ``src`` object from QUILT to ``dest``.
Either of ``src`` and ``dest`` may be S3 paths (starting with ``s3://``)
or local file paths (starting with ``file:///``).
Parameters:
src (str): a path to retrieve
dest (str): a path to write to
"""
copy_file(PhysicalKey.from_url(fix_url(src)), PhysicalKey.from_url(fix_url(dest)))
@ApiTelemetry("api.delete_package")
def delete_package(name, registry=None, top_hash=None):
"""
Delete a package. Deletes only the manifest entries and not the underlying files.
Parameters:
name (str): Name of the package
registry (str): The registry the package will be removed from
top_hash (str): Optional. A package hash to delete, instead of the whole package.
"""
validate_package_name(name)
usr, pkg = name.split('/')
registry_parsed = PhysicalKey.from_url(get_package_registry(fix_url(registry) if registry else None))
named_packages = registry_parsed.join('named_packages')
package_path = named_packages.join(name)
paths = list(list_url(package_path))
if not paths:
raise QuiltException("No such package exists in the given directory.")
if top_hash is not None:
top_hash = Package.resolve_hash(registry_parsed, top_hash)
deleted = []
remaining = []
for path, _ in paths:
parts = path.split('/')
if len(parts) == 1:
pkg_hash = get_bytes(package_path.join(parts[0]))
if pkg_hash.decode().strip() == top_hash:
deleted.append(parts[0])
else:
remaining.append(parts[0])
if not deleted:
raise QuiltException("No such package version exists in the given directory.")
for path in deleted:
delete_url(package_path.join(path))
if 'latest' in deleted and remaining:
# Create a new "latest". Technically, we need to compare numerically,
# but string comparisons will be fine till year 2286.
new_latest = max(remaining)
copy_file(package_path.join(new_latest), package_path.join('latest'))
else:
for path, _ in paths:
delete_url(package_path.join(path))
# Will ignore non-empty dirs.
# TODO: .join('') adds a trailing slash - but need a better way.
delete_url(package_path.join(''))
delete_url(named_packages.join(usr).join(''))
@ApiTelemetry("api.list_packages")
def list_packages(registry=None):
"""Lists Packages in the registry.
Returns a sequence of all named packages in a registry.
If the registry is None, default to the local registry.
Args:
registry(string): location of registry to load package from.
Returns:
A sequence of strings containing the names of the packages
"""
registry_parsed = PhysicalKey.from_url(get_package_registry(fix_url(registry) if registry else None))
return _list_packages(registry_parsed)
def _list_packages(registry):
"""This differs from list_packages because it does not have
telemetry on it. If Quilt code needs the functionality to list
packages under a different customer-facing API, _list_packages()
is the function that should be used to prevent duplicate metrics
(each API call that the user makes should generate a single
telemetry event).
"""
named_packages = registry.join('named_packages')
prev_pkg = None
for path, _ in list_url(named_packages):
parts = path.split('/')
if len(parts) == 3:
pkg = f'{parts[0]}/{parts[1]}'
# A package can have multiple versions, but we should only return the name once.
if pkg != prev_pkg:
prev_pkg = pkg
yield pkg
@ApiTelemetry("api.list_package_versions")
def list_package_versions(name, registry=None):
"""Lists versions of a given package.
Returns a sequence of (version, hash) of a package in a registry.
If the registry is None, default to the local registry.
Args:
registry(string): location of registry to load package from.
Returns:
A sequence of tuples containing the named version and hash.
"""
validate_package_name(name)
registry_parsed = PhysicalKey.from_url(get_package_registry(fix_url(registry) if registry else None))
return _list_package_versions(name=name, registry=registry_parsed)
def _list_package_versions(name, registry):
"""Telemetry-free version of list_package_versions. Internal quilt
code should always use _list_package_versions. See documentation
for _list_packages for why.
"""
package = registry.join('named_packages').join(name)
for path, _ in list_url(package):
parts = path.split('/')
if len(parts) == 1:
pkg_hash = get_bytes(package.join(parts[0]))
yield parts[0], pkg_hash.decode().strip()
@ApiTelemetry("api.config")
def config(*catalog_url, **config_values):
"""Set or read the QUILT configuration.
To retrieve the current config, call directly, without arguments:
>>> import quilt3
>>> quilt3.config()
To trigger autoconfiguration, call with just the navigator URL:
>>> quilt3.config('https://example.com')
To set config values, call with one or more key=value pairs:
>>> quilt3.config(navigator_url='http://example.com',
... elastic_search_url='http://example.com/queries')
Default config values can be found in `quilt3.util.CONFIG_TEMPLATE`.
Args:
catalog_url: A (single) URL indicating a location to configure from
**config_values: `key=value` pairs to set in the config
Returns:
QuiltConfig: (an ordered Mapping)
"""
return _config(*catalog_url, **config_values)
def _config(*catalog_url, **config_values):
""" telemetry-free version of config() """
if catalog_url and config_values:
raise QuiltException("Expected either an auto-config URL or key=value pairs, but got both.")
# Total distinction of args and kwargs -- config(catalog_url='http://foo.com')
if catalog_url and len(catalog_url) > 1:
raise QuiltException("`catalog_url` cannot be used with other `config_values`.")
# Use given catalog's config to replace local configuration
if catalog_url:
catalog_url = catalog_url[0]
# If catalog_url is empty, reset to an empty config.
if catalog_url:
config_template = configure_from_url(catalog_url)
else:
config_template = read_yaml(CONFIG_TEMPLATE)
write_yaml(config_template, CONFIG_PATH, keep_backup=True)
local_config = config_template
# Create a custom config with the passed-in values only
elif config_values:
local_config = load_config()
config_values = QuiltConfig('', config_values) # Does some validation/scrubbing
for key, value in config_values.items():
local_config[key] = value
write_yaml(local_config, CONFIG_PATH)
# Return the current config if present or create one from the default stack
else:
if config_exists():
local_config = load_config()
else:
local_config = configure_from_default()
# Return current config
return QuiltConfig(CONFIG_PATH, local_config)
@ApiTelemetry("api.disable_telemetry")
def disable_telemetry():
""" Permanently disable sending of anonymous usage metrics """
_disable_telemetry()
def _disable_telemetry():
_config(telemetry_disabled=True)
@ApiTelemetry("api.search")
def search(query, limit=10):
"""
Execute a search against the configured search endpoint.
Args:
query (str): query string to search
limit (number): maximum number of results to return. Defaults to 10
Query Syntax:
[simple query string query](
https://www.elastic.co/guide/en/elasticsearch/reference/6.8/query-dsl-simple-query-string-query.html)
Returns:
a list of objects with the following structure:
```
[{
"_id": <document unique id>
"_index": <source index>,
"_score": <relevance score>
"_source":
"key": <key of the object>,
"size": <size of object in bytes>,
"user_meta": <user metadata from meta= via quilt3>,
"last_modified": <timestamp from ElasticSearch>,
"updated": <object timestamp from S3>,
"version_id": <version_id of object version>
"_type": <document type>
}, ...]
```
"""
# force a call to configure_from_default if no config exists
_config()
raw_results = search_api(query, '*', limit)
return raw_results['hits']['hits']
| 35.598485 | 113 | 0.655991 | 1,195 | 9,398 | 4.988285 | 0.238494 | 0.025164 | 0.015098 | 0.014092 | 0.190908 | 0.137896 | 0.102667 | 0.102667 | 0.102667 | 0.102667 | 0 | 0.004408 | 0.251649 | 9,398 | 263 | 114 | 35.73384 | 0.843168 | 0.430624 | 0 | 0.211009 | 0 | 0 | 0.083115 | 0.013484 | 0 | 0 | 0 | 0.003802 | 0 | 1 | 0.100917 | false | 0 | 0.045872 | 0 | 0.192661 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc314c2e4e9cc3287b2ead534fc34c22a7709ef8 | 629 | py | Python | Basic's/Projects/Automation with Python/ExcelSpreadSheet6.py | Fahad-Hafeez/Python-Learning | c37f26e10c1d23f5277327c9bc474c19747f2b90 | [
"Apache-2.0"
] | null | null | null | Basic's/Projects/Automation with Python/ExcelSpreadSheet6.py | Fahad-Hafeez/Python-Learning | c37f26e10c1d23f5277327c9bc474c19747f2b90 | [
"Apache-2.0"
] | null | null | null | Basic's/Projects/Automation with Python/ExcelSpreadSheet6.py | Fahad-Hafeez/Python-Learning | c37f26e10c1d23f5277327c9bc474c19747f2b90 | [
"Apache-2.0"
] | null | null | null | import openpyxl as xl
from openpyxl.chart import BarChart, Reference
def process_workbook(filename):
wb = xl.load_workbook(filename)
sheet = wb['Sheet1']
for row in range(2, sheet.max_row + 1):
cell = sheet.cell(row, 3)
corrected_price = cell.value * 0.9
corrected_price_cell = sheet.cell(row, 4)
corrected_price_cell = corrected_price
values = Reference(sheet,
min_row=2,
max_row=sheet.max_row,
min_col=4,
max_col=4)
chart = BarChart()
chart.add_data(values)
sheet.add_chart(chart, 'e2')
wb.save(filename)
| 24.192308 | 49 | 0.621622 | 85 | 629 | 4.411765 | 0.447059 | 0.149333 | 0.144 | 0.085333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024283 | 0.279809 | 629 | 25 | 50 | 25.16 | 0.803532 | 0 | 0 | 0 | 0 | 0 | 0.012719 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.105263 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc3296a94e8c01b9ceba107583ab31525fb67cc1 | 1,012 | py | Python | docker/kuberay-autoscaler/run_autoscaler_with_retries.py | dsctt/ray | 29d94a22114b02adfd3745c4991a3ce70592dd16 | [
"Apache-2.0"
] | 1 | 2021-09-20T15:45:59.000Z | 2021-09-20T15:45:59.000Z | docker/kuberay-autoscaler/run_autoscaler_with_retries.py | dsctt/ray | 29d94a22114b02adfd3745c4991a3ce70592dd16 | [
"Apache-2.0"
] | 53 | 2021-10-06T20:08:04.000Z | 2022-03-21T20:17:25.000Z | docker/kuberay-autoscaler/run_autoscaler_with_retries.py | dsctt/ray | 29d94a22114b02adfd3745c4991a3ce70592dd16 | [
"Apache-2.0"
] | null | null | null | import os
import subprocess
import sys
import time
here = os.path.dirname(os.path.abspath(__file__))
run_autoscaler_script = os.path.join(here, "run_autoscaler.py")
BACKOFF_S = 5
if __name__ == "__main__":
"""Keep trying to start the autoscaler until it runs.
We need to retry until the Ray head is running.
This script also has the effect of restarting the autoscaler if it fails.
Autoscaler-starting attempts are run in subprocesses out of fear that a
failed Monitor.start() attempt could leave dangling half-initialized global
Python state.
"""
while True:
try:
# We are forwarding all the command line arguments of
# run_autoscaler_with_retries.py to run_autoscaler.py.
subprocess.run(
["python", f"{run_autoscaler_script}"] + sys.argv[1:]
) # noqa: B1
except subprocess.SubprocessError:
print(f"Restarting autoscaler in {BACKOFF_S} seconds.")
time.sleep(BACKOFF_S)
| 32.645161 | 79 | 0.674901 | 136 | 1,012 | 4.845588 | 0.610294 | 0.098634 | 0.057663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003947 | 0.249012 | 1,012 | 30 | 80 | 33.733333 | 0.863158 | 0.11166 | 0 | 0 | 0 | 0 | 0.1875 | 0.043561 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc3308f66627e5cf612b92b630dad7a5c8a947bd | 2,519 | py | Python | feedback_bot/service_layer/unit_of_work.py | polfpilf/feedback-bot | 9e3aa243bb5189af4789a85473facf1bb6977a38 | [
"Unlicense"
] | null | null | null | feedback_bot/service_layer/unit_of_work.py | polfpilf/feedback-bot | 9e3aa243bb5189af4789a85473facf1bb6977a38 | [
"Unlicense"
] | null | null | null | feedback_bot/service_layer/unit_of_work.py | polfpilf/feedback-bot | 9e3aa243bb5189af4789a85473facf1bb6977a38 | [
"Unlicense"
] | 1 | 2021-08-30T00:44:57.000Z | 2021-08-30T00:44:57.000Z | from abc import ABC, abstractmethod
from contextlib import AbstractAsyncContextManager
from dataclasses import dataclass
from typing import Dict, Tuple
import aiogram
import asyncpg
from feedback_bot.adapters import telegram
from feedback_bot.adapters.repositories import (
admin as admin_repository,
target_chat as target_chat_repository,
forwarded_message as forwarded_message_repository,
)
from feedback_bot.model import Admin, TargetChat, ForwardedMessage
class AbstractUnitOfWork(AbstractAsyncContextManager):
admins: admin_repository.AbstractAdminRepository
target_chats: target_chat_repository.AbstractTargetChatRepository
forwarded_messages: forwarded_message_repository.AbstractForwardedMessageRepository
telegram_api: telegram.AbstractTelegramAPI
async def __aenter__(self):
return self
async def __aexit__(self, *args):
await self.rollback()
async def rollback(self):
await self._rollback()
async def commit(self):
await self._commit()
@abstractmethod
async def _rollback(self):
raise NotImplementedError
@abstractmethod
async def _commit(self):
raise NotImplementedError
class PostgresUnitOfWork(AbstractUnitOfWork):
_pool: asyncpg.Pool
_conn: asyncpg.Connection
_transaction: asyncpg.transaction.Transaction
_committed: bool
_rolled_back: bool
def __init__(self, bot: aiogram.Bot, pool: asyncpg.Pool):
self.telegram_api = telegram.TelegramAPI(bot)
self._pool = pool
self._committed = False
self._rolled_back = False
async def __aenter__(self):
self._conn = await self._pool.acquire()
self._transaction = self._conn.transaction()
await self._transaction.start()
self.admins = admin_repository.PostgresAdminRepository(self._conn)
self.target_chats = (
target_chat_repository.PostgresTargetChatRepository(self._conn)
)
self.forwarded_messages = (
forwarded_message_repository.PostgresForwardedMessageRepository(self._conn)
)
async def __aexit__(self, *args):
await super().__aexit__(*args)
await self._pool.release(self._conn)
async def _commit(self):
await self._transaction.commit()
self._committed = True
async def _rollback(self):
if self._committed or self._rolled_back:
return
await self._transaction.rollback()
self._rolled_back = True
| 28.954023 | 87 | 0.720524 | 256 | 2,519 | 6.761719 | 0.28125 | 0.046216 | 0.025997 | 0.034662 | 0.168111 | 0.061236 | 0 | 0 | 0 | 0 | 0 | 0 | 0.213974 | 2,519 | 86 | 88 | 29.290698 | 0.874242 | 0 | 0 | 0.184615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015385 | false | 0 | 0.138462 | 0 | 0.353846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bc820b43e6e7f058e37025651e80d40d9d5d508 | 1,845 | py | Python | oaff/fastapi/api/util.py | JBurkinshaw/ogc-api-fast-features | 4fc6ba3cc4df1600450fe4c9f35320b00c69f158 | [
"MIT"
] | null | null | null | oaff/fastapi/api/util.py | JBurkinshaw/ogc-api-fast-features | 4fc6ba3cc4df1600450fe4c9f35320b00c69f158 | [
"MIT"
] | null | null | null | oaff/fastapi/api/util.py | JBurkinshaw/ogc-api-fast-features | 4fc6ba3cc4df1600450fe4c9f35320b00c69f158 | [
"MIT"
] | null | null | null | import re
from oaff.app.responses.response_format import ResponseFormat
from oaff.fastapi.api import settings
def alternate_format_for_url(url: str, format: ResponseFormat) -> str:
format_pattern = re.compile(
rf"(\?|&)format=({'|'.join([format.name for format in ResponseFormat])})" # noqa: E501
)
query_string_pattern = re.compile(r"(\?|&).+=.*")
if re.search(format_pattern, url) is not None:
return re.sub(format_pattern, rf"\1format={format.name}", url)
else:
connector = "?"
if re.search(query_string_pattern, url):
connector = "&"
return f"{url}{connector}format={format.name}"
def next_page(url: str) -> str:
return _change_page(url, True)
def prev_page(url: str) -> str:
return _change_page(url, False)
def _change_page(url: str, forward: bool) -> str:
url_parts = url.split("?")
parameters = {
key: value
for key, value in [
part.split("=")
for part in [
part
for part in (url_parts[1] if len(url_parts) == 2 else "").split("&")
if len(part) > 0
]
]
}
limit = (
int(parameters["limit"])
if "limit" in parameters
else settings.ITEMS_LIMIT_DEFAULT
)
offset = (
int(parameters["offset"])
if "offset" in parameters
else settings.ITEMS_OFFSET_DEFAULT
)
return "{0}?{1}".format(
url_parts[0],
"&".join(
[
f"{key}={value}"
for key, value in {
**parameters,
**{
"offset": max(offset + limit * (1 if forward else -1), 0),
"limit": limit,
},
}.items()
]
),
)
| 27.537313 | 95 | 0.511653 | 202 | 1,845 | 4.539604 | 0.311881 | 0.038168 | 0.032715 | 0.028353 | 0.178844 | 0.115594 | 0.069793 | 0.069793 | 0 | 0 | 0 | 0.010897 | 0.353388 | 1,845 | 66 | 96 | 27.954545 | 0.757754 | 0.00542 | 0 | 0 | 0 | 0 | 0.107474 | 0.051282 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070175 | false | 0 | 0.052632 | 0.035088 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bcba6147d48584ac3836c0b84504ebb1b1617d7 | 1,332 | py | Python | tools/faults_during_bag.py | oleg-alexandrov/astrobee | 0a2daf565c075e10dc88daed5f35fd998c7585b5 | [
"Apache-2.0"
] | 1 | 2021-12-07T22:59:36.000Z | 2021-12-07T22:59:36.000Z | tools/faults_during_bag.py | oleg-alexandrov/astrobee | 0a2daf565c075e10dc88daed5f35fd998c7585b5 | [
"Apache-2.0"
] | 5 | 2022-03-23T16:48:13.000Z | 2022-03-29T22:55:22.000Z | tools/faults_during_bag.py | oleg-alexandrov/astrobee | 0a2daf565c075e10dc88daed5f35fd998c7585b5 | [
"Apache-2.0"
] | 1 | 2021-06-07T23:16:03.000Z | 2021-06-07T23:16:03.000Z | import sys
import rosbag
import rospy
from ff_msgs.msg import Fault, FaultState
def process(msg, start):
for f in msg.faults:
# Fault 21 is perching arm node missing --> ignore
if f.id != 21:
elapsed = f.time_of_fault - start
print(
(
"secs_from_start=%d fault_id=%d timestamp=%d.%09d"
% (
int(elapsed.secs),
f.id,
f.time_of_fault.secs,
f.time_of_fault.nsecs,
)
)
)
if len(sys.argv) < 3:
print("Usage: faults_during_bag.py short_bag_for_period ars_default_bag")
exit(1)
short_bag_fn = sys.argv[1]
default_bag_fn = sys.argv[2]
print(("reading time bounds of %s" % short_bag_fn))
short_bag = rosbag.Bag(short_bag_fn)
start_ts = short_bag.get_start_time()
end_ts = short_bag.get_end_time()
short_bag.close()
print(
(
"will filter events of %s starting at %f to %f"
% (default_bag_fn, start_ts, end_ts)
)
)
default_bag = rosbag.Bag(default_bag_fn)
for topic, msg, time in default_bag.read_messages(
topics=["/mgt/sys_monitor/state"],
start_time=rospy.Time(start_ts),
end_time=rospy.Time(end_ts),
):
process(msg, rospy.Time(start_ts))
| 24.666667 | 77 | 0.58033 | 188 | 1,332 | 3.845745 | 0.37766 | 0.08852 | 0.029046 | 0.049793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010941 | 0.313814 | 1,332 | 53 | 78 | 25.132075 | 0.780088 | 0.036036 | 0 | 0.047619 | 0 | 0 | 0.159126 | 0.017161 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.095238 | 0 | 0.119048 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bcbc3d185e4a62b619f2e3ff7c712287865bc8c | 206 | py | Python | BigData/ML/testplot.py | kmouss/Samples-py | c43ac0b801ca76bcae790cf4e59780f55b108151 | [
"MIT"
] | null | null | null | BigData/ML/testplot.py | kmouss/Samples-py | c43ac0b801ca76bcae790cf4e59780f55b108151 | [
"MIT"
] | null | null | null | BigData/ML/testplot.py | kmouss/Samples-py | c43ac0b801ca76bcae790cf4e59780f55b108151 | [
"MIT"
] | null | null | null | import pylab
xVals = [2, 3, 4, 5]
yVals1 = [2, 3, 4, 5]
pylab.plot(xVals, yVals1, 'b-', label = 'Test1')
yVals2 = [1, 7, 3, 5]
pylab.plot(xVals, yVals2, 'r--', label = 'Test2')
pylab.legend()
pylab.show() | 20.6 | 49 | 0.592233 | 35 | 206 | 3.485714 | 0.542857 | 0.032787 | 0.04918 | 0.065574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 0.169903 | 206 | 10 | 50 | 20.6 | 0.608187 | 0 | 0 | 0 | 0 | 0 | 0.072464 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bd59196bb6bb4c466e14da657dae58822b2ba55 | 7,696 | py | Python | utils.py | JustM3Dev/Minecraft | ef38086181a15f7c4c9e833ee48bc9237c949d27 | [
"MIT"
] | 96 | 2015-01-02T15:16:06.000Z | 2022-03-02T11:06:52.000Z | utils.py | JustM3Dev/Minecraft | ef38086181a15f7c4c9e833ee48bc9237c949d27 | [
"MIT"
] | 35 | 2015-04-16T03:59:14.000Z | 2021-09-30T03:39:54.000Z | utils.py | JustM3Dev/Minecraft | ef38086181a15f7c4c9e833ee48bc9237c949d27 | [
"MIT"
] | 38 | 2015-01-08T05:26:43.000Z | 2021-12-16T10:23:34.000Z | # Imports, sorted alphabetically.
# Python packages
import os
import struct
from typing import Tuple, List
from ctypes import byref
# Third-party packages
import pyglet
from pyglet.gl import *
# Modules from this project
import globals as G
from custom_types import iVector, fVector
__all__ = (
'load_image', 'image_sprite', 'hidden_image_sprite', 'vec', 'FastRandom',
'init_resources', 'init_font', 'get_block_icon',
'FACES', 'FACES_WITH_DIAGONALS', 'normalize_float', 'normalize',
'sectorize', 'TextureGroup', 'make_nbt_from_dict', 'extract_nbt'
)
def load_image(*args):
path = os.path.join(*args)
return pyglet.image.load(os.path.join(*args)) if os.path.isfile(
path) else None
def image_sprite(image, batch, group, x: int = 0, y: int = 0, width: int = None, height: int = None):
if image is None or batch is None or group is None:
return None
width = width or image.width
height = height or image.height
if isinstance(group, int):
group = pyglet.graphics.OrderedGroup(group)
return pyglet.sprite.Sprite(image.get_region(x, y, width, height),
batch=batch, group=group)
def hidden_image_sprite(*args, **kwargs):
sprite = image_sprite(*args, **kwargs)
if sprite:
sprite.visible = False
return sprite
def vec(*args):
"""Creates GLfloat arrays of floats"""
return (GLfloat * len(args))(*args)
# fast math algorithms
class FastRandom:
seed: int
def __init__(self, seed):
self.seed = seed
def randint(self) -> int:
self.seed = (214013 * self.seed + 2531011)
return (self.seed >> 16) & 0x7FFF
def init_resources():
init_font('resources/fonts/Chunkfive.ttf', 'ChunkFive Roman')
init_font('resources/fonts/slkscr.ttf', 'Silkscreen Normal')
def init_font(filename, fontname):
pyglet.font.add_file(filename)
pyglet.font.load(fontname)
_block_icon_fbo = None
def get_block_icon(block, icon_size, world):
global _block_icon_fbo
print(block.id.filename())
block_icon = G.texture_pack_list.selected_texture_pack.load_texture(block.id.filename()) \
or (block.group or world.group).texture.get_region(
int(block.texture_data[2 * 8] * G.TILESET_SIZE) * icon_size,
int(block.texture_data[2 * 8 + 1] * G.TILESET_SIZE) * icon_size,
icon_size,
icon_size)
if block.id.is_item():
return block_icon
# create 3d icon for blocks
if _block_icon_fbo is None:
_block_icon_fbo = GLuint(0)
glGenFramebuffers(1, byref(_block_icon_fbo))
glBindFramebuffer(GL_FRAMEBUFFER, _block_icon_fbo)
icon_texture = pyglet.image.Texture.create(icon_size, icon_size, GL_RGBA)
glBindTexture(GL_TEXTURE_2D, icon_texture.id)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, icon_size, icon_size, 0, GL_RGBA, GL_FLOAT, None)
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, icon_texture.id, 0)
viewport = (GLint * 4)()
glGetIntegerv(GL_VIEWPORT, viewport)
glViewport(0, 0, icon_size, icon_size)
glClearColor(1.0, 1.0, 1.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
glOrtho(-1.5, 1.5, -1.5, 1.5, -10, 10)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
glColor4f(1.0, 1.0, 1.0, 1.0)
glRotatef(-45.0, 0.0, 1.0, 0.0)
glRotatef(-30.0, -1.0, 0.0, 1.0)
glScalef(1.5, 1.5, 1.5)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
vertex_data = block.get_vertices(0, 0, 0)
texture_data = block.texture_data
count = len(texture_data) // 2
batch = pyglet.graphics.Batch()
batch.add(count, GL_QUADS, (block.group or world.group),
('v3f/static', vertex_data),
('t2f/static', texture_data))
batch.draw()
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glViewport(*viewport)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
return icon_texture.get_image_data()
FACES: Tuple[iVector, ...] = (
( 0, 1, 0),
( 0, -1, 0),
(-1, 0, 0),
( 1, 0, 0),
( 0, 0, 1),
( 0, 0, -1),
)
FACES_WITH_DIAGONALS: Tuple[iVector, ...] = FACES + (
(-1, -1, 0),
(-1, 0, -1),
( 0, -1, -1),
( 1, 1, 0),
( 1, 0, 1),
( 0, 1, 1),
( 1, -1, 0),
( 1, 0, -1),
( 0, 1, -1),
(-1, 1, 0),
(-1, 0, 1),
( 0, -1, 1),
)
def normalize_float(f: float) -> int:
"""
This is faster than int(round(f)). Nearly two times faster.
Since it is run at least 500,000 times during map generation,
and also in game logic, it has a major impact on performance.
>>> normalize_float(0.2)
0
>>> normalize_float(-0.4)
0
>>> normalize_float(0.5)
1
>>> normalize_float(-0.5)
-1
>>> normalize_float(0.0)
0
"""
int_f = int(f)
if f > 0:
if f - int_f < 0.5:
return int_f
return int_f + 1
if f - int_f > -0.5:
return int_f
return int_f - 1
def normalize(position: fVector) -> fVector:
x, y, z = position
return normalize_float(x), normalize_float(y), normalize_float(z)
def sectorize(position: iVector) -> iVector:
x, y, z = normalize(position)
x, y, z = (x // G.SECTOR_SIZE,
y // G.SECTOR_SIZE,
z // G.SECTOR_SIZE)
return x, y, z
class TextureGroup(pyglet.graphics.Group):
def __init__(self, path):
super(TextureGroup, self).__init__()
self.texture = pyglet.image.load(path).get_texture()
def set_state(self):
glBindTexture(self.texture.target, self.texture.id)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glEnable(self.texture.target)
def unset_state(self):
glDisable(self.texture.target)
# Named Binary Tag
def make_int_packet(i: int) -> bytes:
return struct.pack('i', i)
def extract_int_packet(packet: bytes):
"""
:rtype: (bytes, int)
"""
return packet[4:], struct.unpack('i', packet[:4])[0]
def make_string_packet(s: str) -> bytes:
return struct.pack('i', len(s)) + s.encode('utf-8')
def extract_string_packet(packet: bytes):
"""
:rtype: (bytes, str)
"""
strlen = struct.unpack('i', packet[:4])[0]
packet = packet[4:]
s = packet[:strlen].decode('utf-8')
packet = packet[strlen:]
return packet, s
def make_packet(obj) -> bytes:
if type(obj) == int:
return make_int_packet(obj)
elif type(obj) == str:
return make_string_packet(obj)
else:
print(('make_packet: unsupported type: ' + str(type(obj))))
return None
def extract_packet(packet):
tag, packet = struct.unpack('B', packet[:1])[0], packet[1:]
if tag == 0:
return extract_int_packet(packet)
elif tag == 1:
return extract_string_packet(packet)
def type_tag(t) -> bytes:
tag = 0
if t == int:
tag = 0
elif t == str:
tag = 1
return struct.pack('B', tag)
def make_nbt_from_dict(d: dict) -> bytes:
packet = b''
for key in list(d.keys()):
packet += make_string_packet(key) + type_tag(type(d[key])) + make_packet(d[key])
return packet
def extract_nbt(nbt):
result = {}
while len(nbt) > 0:
nbt, key = extract_string_packet(nbt)
nbt, value = extract_packet(nbt)
result[key] = value
return result
| 26.537931 | 101 | 0.626429 | 1,088 | 7,696 | 4.240809 | 0.224265 | 0.01257 | 0.014304 | 0.012137 | 0.150195 | 0.105115 | 0.050282 | 0.048765 | 0.038145 | 0.038145 | 0 | 0.035525 | 0.239215 | 7,696 | 289 | 102 | 26.629758 | 0.752519 | 0.072505 | 0 | 0.090909 | 0 | 0 | 0.048912 | 0.00782 | 0 | 0 | 0.000853 | 0 | 0 | 1 | 0.121212 | false | 0 | 0.040404 | 0.010101 | 0.308081 | 0.010101 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bde4b4e9b9b5bc2427f2c9f8972821d80b74b4b | 23,295 | py | Python | tests/unit/test_lgr_core.py | icann/lgr-core | 482e1c2cc485eb666e8b3547644baf0e364ebc96 | [
"BSD-3-Clause"
] | 7 | 2017-07-10T22:39:52.000Z | 2021-06-25T20:19:28.000Z | tests/unit/test_lgr_core.py | icann/lgr-core | 482e1c2cc485eb666e8b3547644baf0e364ebc96 | [
"BSD-3-Clause"
] | 13 | 2016-10-26T19:42:00.000Z | 2021-12-13T19:43:42.000Z | tests/unit/test_lgr_core.py | icann/lgr-core | 482e1c2cc485eb666e8b3547644baf0e364ebc96 | [
"BSD-3-Clause"
] | 8 | 2016-11-07T15:40:27.000Z | 2020-09-22T13:48:52.000Z | # -*- coding: utf-8 -*-
"""
test_lgr_core.py - Unit testing of LGR code module.
"""
from __future__ import unicode_literals
import types
import unittest
from lgr.char import Char, RangeChar
from lgr.classes import TAG_CLASSNAME_PREFIX
from lgr.core import LGR
from lgr.exceptions import (CharAlreadyExists,
VariantAlreadyExists,
CharInvalidIdnaProperty,
CharInvalidContextRule,
VariantInvalidContextRule,
RangeInvalidContextRule,
NotInRepertoire,
NotInLGR,
DuplicateReference,
LGRFormatException)
from munidata.database import IDNADatabase
class TestLGRCore(unittest.TestCase):
def setUp(self):
unidb = IDNADatabase('6.3.0')
self.lgr = LGR(unicode_database=unidb)
def test_add_single_cp_list(self):
self.lgr.add_cp([0x0061])
self.assertIn(0x0061, self.lgr.repertoire)
def test_add_single_cp_int(self):
self.lgr.add_cp(0x0061)
self.assertIn(0x0061, self.lgr.repertoire)
def test_add_cp_sequence(self):
self.lgr.add_cp([0x0061, 0x0062])
self.assertIn([0x0061, 0x0062], self.lgr.repertoire)
self.assertNotIn(0x0061, self.lgr.repertoire)
self.assertNotIn(0x0062, self.lgr.repertoire)
def test_add_multiple_cp_sequences(self):
self.lgr.add_cp([0x0061, 0x0062])
self.lgr.add_cp([0x0061, 0x0062, 0x0063])
self.assertIn([0x0061, 0x0062], self.lgr.repertoire)
self.assertIn([0x0061, 0x0062, 0x0063], self.lgr.repertoire)
self.assertNotIn(0x0061, self.lgr.repertoire)
self.assertNotIn(0x0062, self.lgr.repertoire)
self.assertNotIn(0x0063, self.lgr.repertoire)
def test_add_cp_in_repertoire(self):
self.lgr.add_cp([0x0061])
self.assertRaises(CharAlreadyExists, self.lgr.add_cp, [0x0061])
self.assertRaises(CharAlreadyExists, self.lgr.add_cp, 0x0061)
def test_add_cp_validation(self):
validation_lgr = LGR()
validation_lgr.add_cp([0x0061])
self.lgr.add_cp([0x0061], validating_repertoire=validation_lgr,
override_repertoire=False)
self.assertRaises(NotInRepertoire, self.lgr.add_cp, [0x0062],
validating_repertoire=validation_lgr,
override_repertoire=False)
def test_add_cp_validation_override(self):
validation_lgr = LGR()
validation_lgr.add_cp([0x0061])
self.lgr.add_cp([0x0061], validating_repertoire=validation_lgr,
override_repertoire=False)
self.lgr.add_cp([0x0062],
validating_repertoire=validation_lgr,
override_repertoire=True)
self.assertIn(0x0062, self.lgr.repertoire)
def test_del_single_cp_list(self):
self.lgr.add_cp(0x0061)
self.lgr.del_cp([0x0061])
self.assertNotIn(0x0061, self.lgr.repertoire)
def test_del_single_cp_int(self):
self.lgr.add_cp([0x0061])
self.lgr.del_cp(0x0061)
self.assertNotIn(0x0061, self.lgr.repertoire)
def test_del_cp_sequence(self):
self.lgr.add_cp([0x0061, 0x0062])
self.lgr.del_cp([0x0061, 0x0062])
self.assertEqual(len(self.lgr.repertoire), 0)
def test_del_cp_sequence_with_cp(self):
self.lgr.add_cp([0x0061, 0x0062])
self.assertRaises(NotInLGR, self.lgr.del_cp, 0x0061)
self.assertRaises(NotInLGR, self.lgr.del_cp, 0x0062)
self.assertIn([0x0061, 0x0062], self.lgr.repertoire)
def test_add_cp_when_not_when(self):
self.lgr.add_cp([0x0061], when='w1')
with self.assertRaises(CharInvalidContextRule) as cm:
self.lgr.add_cp([0x0062], when='w2', not_when='nw1')
the_exception = cm.exception
self.assertEqual(the_exception.cp,
[0x0062])
self.lgr.add_cp([0x0062], not_when='nw2')
with self.assertRaises(CharInvalidContextRule) as cm:
self.lgr.add_cp([0x0063], when='w3', not_when='nw3')
the_exception = cm.exception
self.assertEqual(the_exception.cp,
[0x0063])
def test_add_range(self):
self.lgr.add_range(0x0061, 0x007A)
for cp in range(0x0061, 0x007A + 1):
self.assertIn(cp, self.lgr.repertoire)
def test_add_range_in_repertoire(self):
self.lgr.add_range(0x0061, 0x007A)
self.assertRaises(CharAlreadyExists,
self.lgr.add_range, 0x0061, 0x007A)
def test_add_range_validation(self):
validation_lgr = LGR()
for cp in range(0x0061, 0x007A + 1):
validation_lgr.add_cp(cp)
self.lgr.add_range(0x0061, 0x007A,
validating_repertoire=validation_lgr,
override_repertoire=False)
self.assertRaises(NotInRepertoire, self.lgr.add_range, 0x00F8, 0x00FF,
validating_repertoire=validation_lgr,
override_repertoire=False)
def test_add_range_validation_with_range(self):
validation_lgr = LGR()
validation_lgr.add_range(0x0061, 0x007A)
self.lgr.add_range(0x0061, 0x007A,
validating_repertoire=validation_lgr,
override_repertoire=False)
self.assertRaises(NotInRepertoire, self.lgr.add_range, 0x00F8, 0x00FF,
validating_repertoire=validation_lgr,
override_repertoire=False)
def test_add_range_validation_override(self):
validation_lgr = LGR()
for cp in range(0x0061, 0x007A):
validation_lgr.add_cp(cp)
self.lgr.add_range(0x0031, 0x0032,
validating_repertoire=validation_lgr,
override_repertoire=True)
self.assertIn(0x0031, self.lgr.repertoire)
def test_add_range_when_not_when(self):
self.lgr.add_range(0x0061, 0x0065, when='w1')
with self.assertRaises(RangeInvalidContextRule) as cm:
self.lgr.add_range(0x0066, 0x007A, when='w2', not_when='nw1')
the_exception = cm.exception
self.assertEqual(the_exception.first_cp,
0x0066)
self.assertEqual(the_exception.last_cp,
0x007A)
self.lgr.add_range(0x0066, 0x007A, not_when='nw2')
with self.assertRaises(RangeInvalidContextRule) as cm:
self.lgr.add_range(0x01BD, 0x01C3, when='w3', not_when='nw3')
the_exception = cm.exception
self.assertEqual(the_exception.first_cp,
0x01BD)
self.assertEqual(the_exception.last_cp,
0x01C3)
def test_expand_ranges(self):
self.lgr.add_range(0x0061, 0x007A)
for cp in range(0x0061, 0x007A + 1):
self.assertIsInstance(self.lgr.get_char(cp), RangeChar)
self.lgr.add_range(0x01BD, 0x01C3)
for cp in range(0x01BD, 0x01C3 + 1):
self.assertIsInstance(self.lgr.get_char(cp), RangeChar)
self.lgr.expand_ranges()
for cp in range(0x0061, 0x007A + 1):
char = self.lgr.get_char(cp)
self.assertIsInstance(char, Char)
self.assertNotIsInstance(char, RangeChar)
for cp in range(0x01BD, 0x01C3 + 1):
char = self.lgr.get_char(cp)
self.assertIsInstance(char, Char)
self.assertNotIsInstance(char, RangeChar)
def test_expand_range(self):
self.lgr.add_range(0x0061, 0x007A)
for cp in range(0x0061, 0x007A + 1):
self.assertIsInstance(self.lgr.get_char(cp), RangeChar)
self.lgr.expand_range(0x0061, 0x007A)
for cp in range(0x0061, 0x007A + 1):
char = self.lgr.get_char(cp)
self.assertIsInstance(char, Char)
self.assertNotIsInstance(char, RangeChar)
def test_add_variant_in_repertoire(self):
self.lgr.add_cp([0x0061])
self.lgr.add_variant([0x0061], [0x0030])
self.assertRaises(VariantAlreadyExists, self.lgr.add_variant, [0x0061],
[0x0030])
def test_add_variant_validation(self):
validation_lgr = LGR()
validation_lgr.add_cp([0x0061])
validation_lgr.add_cp([0x0030])
self.lgr.add_cp([0x0061])
self.lgr.add_variant([0x0061], [0x0030])
self.assertRaises(NotInRepertoire, self.lgr.add_variant,
[0x0061], [0x0062],
validating_repertoire=validation_lgr,
override_repertoire=False)
def test_add_variant_when_not_when(self):
self.lgr.add_cp([0x0061])
self.lgr.add_variant([0x0061], [0x0030], when='w1')
with self.assertRaises(VariantInvalidContextRule) as cm:
self.lgr.add_variant([0x0061], [0x0031], when='w2', not_when='nw1')
the_exception = cm.exception
self.assertEqual(the_exception.cp,
[0x0061])
self.assertEqual(the_exception.variant,
[0x0031])
self.lgr.add_variant([0x0061], [0x0030], not_when='nw2')
with self.assertRaises(VariantInvalidContextRule) as cm:
self.lgr.add_variant([0x0061], [0x0031], when='w3', not_when='nw3')
the_exception = cm.exception
self.assertEqual(the_exception.cp,
[0x0061])
self.assertEqual(the_exception.variant,
[0x0031])
def test_del_cp_validation_override(self):
validation_lgr = LGR()
validation_lgr.add_cp([0x0061])
validation_lgr.add_cp([0x0030])
self.lgr.add_cp([0x0061])
self.lgr.add_variant([0x0061], [0x0030])
self.lgr.add_variant([0x0061], [0x0062],
validating_repertoire=validation_lgr,
override_repertoire=True)
self.assertIn((0x0062,), self.lgr.repertoire[0x0061]._variants)
def test_get_variants(self):
self.lgr.add_cp([0x0061])
self.lgr.add_variant([0x0061], [0x0030])
variants = self.lgr.get_variants([0x0061])
self.assertIsInstance(variants, types.GeneratorType)
variant_list = list(variants)
self.assertEqual(len(variant_list), 1)
def test_check_range_no_modification(self):
self.lgr.check_range(0x0060, 0x007F)
self.assertEqual(len(self.lgr.repertoire), 0)
def test_check_range(self):
self.lgr.add_cp([0x0061])
self.lgr.add_cp([0x007A])
codepoints = self.lgr.check_range(0x0060, 0x007F)
for result in codepoints:
cp = result[0]
prop = result[1]
if cp == 0x060 or cp >= 0x007B:
self.assertIsInstance(prop, CharInvalidIdnaProperty)
elif cp == 0x0061 or cp == 0x007A:
self.assertIsInstance(prop, CharAlreadyExists)
else:
self.assertIsNone(prop)
def test_add_codepoints(self):
self.lgr.add_codepoints([c for c in range(0x0061, 0x007A + 1)] +
[0x0107] +
[0x0137, 0x0138])
expected_output = [RangeChar(0x061, 0x0061, 0x007A),
Char(0x0107),
RangeChar(0x0137, 0x0137, 0x0138)]
self.assertEqual(expected_output, list(self.lgr.repertoire))
def test_tags_on_codepoint(self):
self.lgr.add_cp([0x0061], tag=['t1', 't2'])
with self.assertRaises(LGRFormatException) as cm:
self.lgr.add_cp([0x0062], tag=['t1', 't1'])
the_exception = cm.exception
self.assertEqual(the_exception.reason,
LGRFormatException.LGRFormatReason.DUPLICATE_TAG)
def test_tags_on_codepoint_sequence(self):
with self.assertRaises(LGRFormatException) as cm:
self.lgr.add_cp([0x0061, 0x0062], tag=['t1'])
the_exception = cm.exception
self.assertEqual(the_exception.reason,
LGRFormatException.LGRFormatReason.SEQUENCE_NO_TAG)
def test_tags_on_range(self):
self.lgr.add_range(0x0061, 0x0062, tag=['t1', 't2'])
with self.assertRaises(LGRFormatException) as cm:
self.lgr.add_range(0x0063, 0x0064, tag=['t1', 't1'])
the_exception = cm.exception
self.assertEqual(the_exception.reason,
LGRFormatException.LGRFormatReason.DUPLICATE_TAG)
def test_del_tag(self):
self.lgr.add_cp([0x0061], tag=['1'])
self.lgr.add_cp([0x0062], tag=['1', '2'])
self.lgr.del_tag('1')
self.assertNotIn(TAG_CLASSNAME_PREFIX + '1', self.lgr.classes_lookup)
self.assertEquals(self.lgr.get_char([0x0061]).tags, [])
self.assertEquals(self.lgr.get_char([0x0062]).tags, ['2'])
def test_list_types(self):
self.lgr.add_cp([0x0061])
self.lgr.add_variant([0x0061], [0x0030], variant_type='BLOCK')
self.lgr.add_variant([0x0061], [0x0031], variant_type='VALID')
self.lgr.add_variant([0x0061], [0x0032], variant_type='BLOCK')
self.assertEquals(self.lgr.types,
set(['BLOCK', 'VALID']))
def test_del_reference(self):
ref_id_1 = self.lgr.add_reference("Test - 1")
ref_id_2 = self.lgr.add_reference("Test - 2")
self.lgr.add_cp([0x0061], ref=[ref_id_1])
self.lgr.add_cp([0x0062], ref=[ref_id_1, ref_id_2])
self.lgr.del_reference(ref_id_1)
self.assertNotIn(ref_id_1, self.lgr.reference_manager)
self.assertEquals(self.lgr.get_char([0x0061]).references, [])
self.assertEquals(self.lgr.get_char([0x0062]).references, [ref_id_2])
def test_add_cp_duplicate_reference(self):
ref_id = self.lgr.add_reference("Test - 1")
with self.assertRaises(DuplicateReference) as cm:
self.lgr.add_cp([0x0061], ref=[ref_id, ref_id])
the_exception = cm.exception
self.assertEqual(the_exception.cp, [0x0061])
def test_add_range_duplicate_reference(self):
ref_id = self.lgr.add_reference("Test - 1")
with self.assertRaises(DuplicateReference) as cm:
self.lgr.add_range(0x0061, 0x0062, ref=[ref_id, ref_id])
the_exception = cm.exception
self.assertEqual(the_exception.cp, 0x0061)
def test_add_variant_duplicate_reference(self):
self.lgr.add_cp([0x0061])
ref_id = self.lgr.add_reference("Test - 1")
with self.assertRaises(DuplicateReference) as cm:
self.lgr.add_variant([0x0061], [0x0062], ref=[ref_id, ref_id])
the_exception = cm.exception
self.assertEqual(the_exception.cp, [0x0061])
def test_generate_variants(self):
self.lgr.add_cp([0x0061])
self.lgr.add_cp([0x0062])
self.lgr.add_cp([0x0063])
self.lgr.add_cp([0x0064])
self.lgr.add_variant([0x0061], [0x0070], variant_type="type0")
self.lgr.add_variant([0x0062], [0x0071], variant_type="type1")
self.lgr.add_variant([0x0062], [0x0072], variant_type="type2")
self.assertEqual([], list(self.lgr._generate_label_variants([])))
self.assertEqual([],
list(self.lgr._generate_label_variants([0x0063])))
self.assertEqual([],
list(self.lgr._generate_label_variants([0x0063,
0x0064])))
self.assertEqual(set([((0x0071, 0x0063), frozenset(['type1']), False),
((0x0072, 0x0063), frozenset(['type2']), False)]),
set(self.lgr._generate_label_variants([0x0062,
0x0063])))
self.assertEqual(set([((0x0061, 0x0062), frozenset(), False),
((0x0061, 0x0071), frozenset(['type1']), False),
((0x0061, 0x0072), frozenset(['type2']), False),
((0x0070, 0x0062), frozenset(['type0']), False),
((0x0070, 0x0071), frozenset(['type0', 'type1']), True),
((0x0070, 0x0072), frozenset(['type0', 'type2']), True),
]),
set(self.lgr._generate_label_variants([0x0061,
0x0062])))
self.assertEqual(set([((0x0061, 0x0062, 0x0062), frozenset(), False),
((0x0061, 0x0062, 0x0071), frozenset(['type1']), False),
((0x0061, 0x0062, 0x0072), frozenset(['type2']), False),
((0x0061, 0x0071, 0x0062), frozenset(['type1']), False),
((0x0061, 0x0071, 0x0071), frozenset(['type1']), False),
((0x0061, 0x0071, 0x0072), frozenset(['type1', 'type2']), False),
((0x0061, 0x0072, 0x0062), frozenset(['type2']), False),
((0x0061, 0x0072, 0x0071), frozenset(['type1', 'type2']), False),
((0x0061, 0x0072, 0x0072), frozenset(['type2']), False),
((0x0070, 0x0062, 0x0062), frozenset(['type0']), False),
((0x0070, 0x0062, 0x0071), frozenset(['type0', 'type1']), False),
((0x0070, 0x0062, 0x0072), frozenset(['type0', 'type2']), False),
((0x0070, 0x0071, 0x0062), frozenset(['type0', 'type1']), False),
((0x0070, 0x0071, 0x0071), frozenset(['type0', 'type1']), True),
((0x0070, 0x0071, 0x0072), frozenset(['type0', 'type1', 'type2']), True),
((0x0070, 0x0072, 0x0062), frozenset(['type0', 'type2']), False),
((0x0070, 0x0072, 0x0071), frozenset(['type0', 'type1', 'type2']), True),
((0x0070, 0x0072, 0x0072), frozenset(['type0', 'type2']), True),
]),
set(self.lgr._generate_label_variants([0x0061,
0x0062,
0x0062])))
def test_generate_variants_reflexive(self):
self.lgr.add_cp([0x0061])
self.lgr.add_cp([0x0062])
self.lgr.add_cp([0x0063])
self.lgr.add_variant([0x0062], [0x0062], variant_type="reflexive")
self.lgr.add_variant([0x0063], [0x0070], variant_type="type")
self.assertEqual([], list(self.lgr._generate_label_variants([])))
self.assertEqual([],
list(self.lgr._generate_label_variants([0x0061])))
self.assertEqual([((0x0062,), frozenset(['reflexive']), True)],
list(self.lgr._generate_label_variants([0x0062])))
self.assertEqual(set([((0x0062, 0x0063), frozenset(['reflexive']), False),
((0x0062, 0x0070), frozenset(['reflexive', 'type']), True),
]),
set(self.lgr._generate_label_variants([0x0062,
0x0063])))
def test_generate_variants_sequence_same_cp(self):
self.lgr.add_cp([0x05D9, 0x05D9])
self.lgr.add_cp([0X05F2])
self.lgr.add_cp([0x05D9])
self.lgr.add_variant([0x05D9, 0x05D9], [0x05F2])
self.lgr.add_variant([0x05F2], [0x05D9, 0x05D9])
self.assertEqual(set([((0x05F2, 0x05D9), frozenset(), False),
((0x05D9, 0x05D9, 0x05D9), frozenset(), False),
((0x05D9, 0x05F2), frozenset(), False)]),
set(self.lgr._generate_label_variants([0x05D9, 0x05D9, 0x05D9])))
def test_label_simple(self):
self.lgr.add_cp([0x0061])
self.lgr.add_cp([0x0062, 0x0063])
self.lgr.add_range(0x0064, 0x0068)
valid_labels = (
[0x0061],
[0x0062, 0x0063],
[0x0064],
[0x0068],
[0x0061, 0x0064],
[0x0061, 0x0062, 0x0063, 0x0064],
[0x0062, 0x0063, 0x0068]
)
invalid_labels = (
([0x0060], [], [(0x0060, None)]),
([0x0069], [], [(0x0069, None)]),
([0x0062], [], [(0x0062, None)]),
([0x0063], [], [(0x0063, None)]),
([0x0061, 0x0062], [0x0061], [(0x0062, None)])
)
for label in valid_labels:
self.assertEqual((True, label, []),
self.lgr._test_preliminary_eligibility(label))
for (label, label_part, not_in_lgr) in invalid_labels:
self.assertEqual((False, label_part, not_in_lgr),
self.lgr._test_preliminary_eligibility(label))
def test_label_eligibility_multiple_choices(self):
self.lgr.add_cp([0x0061])
self.lgr.add_cp([0x0061, 0x0062, 0x0063])
self.lgr.add_cp([0x0064])
self.assertEqual(self.lgr._test_preliminary_eligibility([0x0062]),
(False, [], [(0x0062, None)]))
self.assertEqual(self.lgr._test_preliminary_eligibility([0x0061, 0x0062, 0x0063, 0x0064]),
(True, [0x0061, 0x0062, 0x0063, 0x0064], []))
def test_label_delayed_eligibilty(self):
self.lgr.add_cp([0x0061])
self.lgr.add_variant([0x0061], [0x0061], 'block')
self.lgr.add_cp([0x0062])
self.lgr.add_variant([0x0062], [0x0062], 'invalid')
self.lgr.add_cp([0x0063, 0x0064])
self.lgr.add_variant([0x0063, 0x0064], [0x0063, 0x0064], 'invalid')
self.assertEqual(self.lgr._test_label_disposition([0x0062]),
('invalid', 0))
self.assertEqual(self.lgr._test_label_disposition([0x0063, 0x0064]),
('invalid', 0))
self.assertEqual(self.lgr._test_label_disposition([0x0061, 0x0062]),
('invalid', 0))
def test_estimate_variant_numbers(self):
self.lgr.add_cp([0x0061])
self.assertEqual(1, self.lgr.estimate_variant_number([0x0061]))
self.lgr.add_variant([0x0061], [0x0061], 'disp')
self.lgr.add_cp([0x0062])
self.lgr.add_variant([0x0062], [0x0062], 'disp')
self.assertEqual(2, self.lgr.estimate_variant_number([0x0061]))
self.assertEqual(2, self.lgr.estimate_variant_number([0x0062]))
self.assertEqual(2 * 2, self.lgr.estimate_variant_number([0x0061, 0x0062]))
self.lgr.add_cp([0x0063])
for i in range(10):
self.lgr.add_variant([0x0063], [0x074D + i], 'disp')
self.assertEqual(11, self.lgr.estimate_variant_number([0x0063]))
self.assertEqual(2 * 2 * 11, self.lgr.estimate_variant_number([0x0061, 0x0062, 0x0063]))
if __name__ == '__main__':
import logging
logging.getLogger('lgr').addHandler(logging.NullHandler())
unittest.main()
| 42.664835 | 103 | 0.579867 | 2,488 | 23,295 | 5.219453 | 0.07717 | 0.097567 | 0.085477 | 0.053596 | 0.745495 | 0.6785 | 0.598337 | 0.518173 | 0.487294 | 0.4501 | 0 | 0.128407 | 0.295943 | 23,295 | 545 | 104 | 42.743119 | 0.663374 | 0.003177 | 0 | 0.416667 | 0 | 0 | 0.019429 | 0 | 0 | 0 | 0.106147 | 0 | 0.231982 | 1 | 0.101351 | false | 0 | 0.02027 | 0 | 0.123874 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bdf49cbe86061f6a6d2674b7247e10d0986c44a | 1,622 | py | Python | imix/data/sampler/sampler_adaptor.py | linxi1158/iMIX | af87a17275f02c94932bb2e29f132a84db812002 | [
"Apache-2.0"
] | 23 | 2021-06-26T08:45:19.000Z | 2022-03-02T02:13:33.000Z | imix/data/sampler/sampler_adaptor.py | XChuanLee/iMIX | 99898de97ef8b45462ca1d6bf2542e423a73d769 | [
"Apache-2.0"
] | null | null | null | imix/data/sampler/sampler_adaptor.py | XChuanLee/iMIX | 99898de97ef8b45462ca1d6bf2542e423a73d769 | [
"Apache-2.0"
] | 9 | 2021-06-10T02:36:20.000Z | 2021-11-09T02:18:16.000Z | from typing import Dict, Optional
from imix.utils.config import imixEasyDict
import re
class SamplerAdaptor:
"""modify the parameters of the sampler and the batch_sampler to adapt to
different sampling methods."""
@classmethod
def adaptor(cls, cfg: imixEasyDict, default_args: Optional[Dict] = None):
sampler_type = cfg.get('type')
func_name = '_adaptor_' + cls.run_fun_suffix(sampler_type)
run_func = getattr(cls, func_name, None)
if run_func is None:
return
else:
run_func(cfg, default_args)
@classmethod
def run_fun_suffix(cls, name: str) -> str:
r = re.findall('([A-Z][a-z]+)', name)
func_suffix = '_'.join(r)
return func_suffix.lower()
@classmethod
def _adaptor_token_bucket_sampler(cls, cfg: imixEasyDict, default_args: Optional[Dict] = None):
dataset = default_args.pop('dataset')
default_args['lens'] = dataset.dataset.lens
@classmethod
def _adaptor_random_sampler(cls, cfg: imixEasyDict, default_args: Optional[Dict] = None):
default_args['data_source'] = default_args.pop('dataset')
@classmethod
def _adaptor_distributed_sampler(cls, cfg: imixEasyDict, default_args: Optional[Dict] = None):
if 'world_size' in default_args.keys():
default_args['num_replicas'] = default_args.pop('world_size')
@classmethod
def _adaptor_sequential_sampler(cls, cfg: imixEasyDict, default_args: Optional[Dict] = None):
if 'dataset' in default_args.keys():
default_args['data_source'] = default_args.pop('dataset')
| 36.044444 | 99 | 0.675092 | 202 | 1,622 | 5.168317 | 0.331683 | 0.168582 | 0.100575 | 0.119732 | 0.369732 | 0.369732 | 0.326628 | 0.326628 | 0.203065 | 0.103448 | 0 | 0 | 0.215783 | 1,622 | 44 | 100 | 36.863636 | 0.820755 | 0.060419 | 0 | 0.242424 | 0 | 0 | 0.074637 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bdfc45f35316bf95902a8fa8c5edd5de2cf6cbe | 2,864 | py | Python | IpTrackerv1.0.py | codassassin/ip-tracker-v1.0 | 4a4b311b32d002c5b9388b32f1e6ad0070b4e25b | [
"MIT"
] | null | null | null | IpTrackerv1.0.py | codassassin/ip-tracker-v1.0 | 4a4b311b32d002c5b9388b32f1e6ad0070b4e25b | [
"MIT"
] | null | null | null | IpTrackerv1.0.py | codassassin/ip-tracker-v1.0 | 4a4b311b32d002c5b9388b32f1e6ad0070b4e25b | [
"MIT"
] | null | null | null | from geolite2 import geolite2
import requests
class bColors:
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BLUE = '\033[94m'
def banner():
print(bColors.GREEN + '<<< IP-TRACKER v1.0>>>')
print(bColors.RED + r'''
_
| |
| |___
| _ \ _ _
| |_) | | (_) |
\____/ \__, |
__/ |
|___/
_ _
| | (_)
____ ____ ___| | ___ _ ______ ______ ___ _ ______ ______ _ _ ____
/ ___\ / \ / _ | / _ | | / _____| / _____| / _ | | / _____| / _____| | | | | | \
| |____ | () | | (_| | | (_|| | \______\ \______\ | (_|| | \______\ \______\ | | | | | |
\____/ \____/ \____/ \___|_| |______/ |______/ \___|_| |______/ |______/ |_| |_| |_|
''')
def ipLocation(ipTrack):
docReader = geolite2.reader()
trackLocation = docReader.get(ipTrack)
# Assigning specific values from GeoLiteCity.dat
city = (trackLocation['city']['names']['en'])
continent = (trackLocation['continent']['names']['en'])
country = (trackLocation['country']['names']['en'])
location = (trackLocation['location'])
locationAccuracy = location['accuracy_radius']
locationLatitude = location['latitude']
locationLongitude = location['longitude']
locationTimeZone = location['time_zone']
postal = (trackLocation['postal'])
postalCode = postal['code']
registeredCountry = (trackLocation['registered_country']['names']['en'])
subdivisions = (trackLocation['subdivisions'][0]['names']['en'])
r = str(bColors.RED)
g = str(bColors.GREEN)
b = str(bColors.BLUE)
y = str(bColors.YELLOW)
print(r + '* ' + b + 'public_ip: ' + g + ip + r + '\n* ' + b + 'city: ' + g + city + r + '\n* ' + b + 'continent: ' + g +
continent + r + '\n* ' + b + 'country: ' + g + country + r + '\n* ' + b + 'location: ' + r + '\n\t↪ ' + y +
'accuracy_radius: ' + g + str(locationAccuracy) + r + '\n\t↪ ' + y + 'latitude: ' + g + str(locationLatitude)
+ r + '\n\t↪ ' + y + 'longitude: ' + g + str(locationLongitude) + r + '\n\t↪ ' + y + 'time_zone: ' + g +
locationTimeZone + r + '\n\t↪ ' + y + 'map: ' + g +
f'https://www.google.co.in/maps/@{locationLatitude},{locationLongitude},15z?hl=en' + r + '\n* ' + b +
'postal_code: ' + g + str(postalCode) + r + '\n* ' + b + 'registered_country: ' + g + registeredCountry + r +
'\n* ' + b + 'subdivisions: ' + g + subdivisions)
ip = requests.get('https://api.ipify.org').text
banner()
ipLocation(ip)
| 40.914286 | 126 | 0.47067 | 232 | 2,864 | 5.038793 | 0.336207 | 0.02053 | 0.017964 | 0.017109 | 0.021386 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014965 | 0.346718 | 2,864 | 69 | 127 | 41.507246 | 0.607162 | 0.016061 | 0 | 0 | 0 | 0.072727 | 0.458849 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036364 | false | 0 | 0.036364 | 0 | 0.163636 | 0.054545 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0be333dad7a5d2d06d8fd1a96016de0d3eb536b5 | 2,529 | py | Python | scripts/histogram_event.py | dereksoeder/iS3D | 27fed56d697f8e4a0e08fa9cb261436f4a7c7f8c | [
"MIT"
] | 1 | 2022-02-08T21:26:12.000Z | 2022-02-08T21:26:12.000Z | scripts/histogram_event.py | dereksoeder/iS3D | 27fed56d697f8e4a0e08fa9cb261436f4a7c7f8c | [
"MIT"
] | 13 | 2018-06-13T19:29:07.000Z | 2021-02-07T04:29:10.000Z | scripts/histogram_event.py | dereksoeder/iS3D | 27fed56d697f8e4a0e08fa9cb261436f4a7c7f8c | [
"MIT"
] | 7 | 2018-04-11T01:56:22.000Z | 2021-07-09T23:00:21.000Z | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
import sys
#load the particle list
particle_list = pd.read_csv(sys.argv[1], sep=',')
mcid = particle_list['mcid']
#spacetime info
tau = particle_list['tau']
x = particle_list['x']
eta = particle_list['eta']
#momentum info
E = particle_list['E']
px = particle_list['px']
py = particle_list['py']
pz = particle_list['pz']
#species dependent info
#pion 211
pi_pT = []
pi_y = []
pi_phi = []
#kaon 321
k_pT = []
k_y = []
k_phi = []
#proton 2212
p_pT = []
p_y = []
p_phi = []
#3d momentum space lists
for i in range(1, len(E) ):
if ( mcid[i] == 211 ):
pi_pT.append( math.sqrt( px[i]*px[i] + py[i]*py[i] ) )
pi_y.append( 0.5 * math.log( (E[i] + pz[i]) / (E[i] - pz[i]) ) )
pi_phi.append( math.atan( py[i] / px[i] ) )
if ( mcid[i] == 321 ):
k_pT.append( math.sqrt( px[i]*px[i] + py[i]*py[i] ) )
k_y.append( 0.5 * math.log( (E[i] + pz[i]) / (E[i] - pz[i]) ) )
k_phi.append( math.atan( py[i] / px[i] ) )
if ( mcid[i] == 2212 ):
p_pT.append( math.sqrt( px[i]*px[i] + py[i]*py[i] ) )
p_y.append( 0.5 * math.log( (E[i] + pz[i]) / (E[i] - pz[i]) ) )
p_phi.append( math.atan( py[i] / px[i] ) )
#midrapidity
pi_pT_mid = []
k_pT_mid = []
p_pT_mid = []
#the range of rapidity to integrate over
ymax = 0.5
for i in range(1, len(pi_pT) ):
if ( abs(pi_y[i]) < ymax ):
pi_pT_mid.append( pi_pT[i] )
for i in range(1, len(k_pT) ):
if ( abs(k_y[i]) < ymax ):
k_pT_mid.append( k_pT[i] )
for i in range(1, len(p_pT) ):
if ( abs(p_y[i]) < ymax ):
p_pT_mid.append( p_pT[i] )
#histogram of particle yields
plt.hist(mcid, bins='auto')
plt.title("Particle Yields")
plt.xlabel("MC ID")
plt.show()
#pT bins
pT_bins = pd.read_csv('tables/pT_nodes.dat', header=None)
pT_bins = [0,.0072, .038, .094, .175, .28, .42, .58, .78, 1.01, 1.3, 1.6, 1.97, 2.4, 2.96, 3.7]
#histogram of tau (proper time of production)
plt.hist(tau, bins='auto')
plt.title("Proper time of particle production")
plt.xlabel("tau (fm/c)")
plt.show()
#pion spectra at midrapidity
plt.hist(pi_pT_mid, bins=pT_bins)
plt.title("Pion spectra midrapidity")
plt.xlabel("pT (GeV)")
plt.show()
#pion spectra at midrapidity
plt.hist(k_pT_mid, bins=pT_bins)
plt.title("Kaon spectra midrapidity")
plt.xlabel("pT (GeV)")
plt.show()
#pion spectra at midrapidity
plt.hist(p_pT_mid, bins=pT_bins)
plt.title("Proton spectra midrapidity")
plt.xlabel("pT (GeV)")
plt.show()
| 23.858491 | 95 | 0.605378 | 462 | 2,529 | 3.179654 | 0.233766 | 0.081688 | 0.016338 | 0.020422 | 0.406399 | 0.406399 | 0.385977 | 0.339006 | 0.246426 | 0.246426 | 0 | 0.036085 | 0.200079 | 2,529 | 105 | 96 | 24.085714 | 0.690064 | 0.132463 | 0 | 0.112676 | 0 | 0 | 0.095544 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.070423 | 0 | 0.070423 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0be5ba6ef35e6adafb7042f659edd1b01987a5ad | 36,412 | py | Python | main.py | lfzinho/sonias-fury | 4fc2b097c16c2e4e6c30f5a8e96bc5f6d7f376b3 | [
"MIT"
] | null | null | null | main.py | lfzinho/sonias-fury | 4fc2b097c16c2e4e6c30f5a8e96bc5f6d7f376b3 | [
"MIT"
] | null | null | null | main.py | lfzinho/sonias-fury | 4fc2b097c16c2e4e6c30f5a8e96bc5f6d7f376b3 | [
"MIT"
] | null | null | null | import pygame
from pygame import mixer
import os
import random
import csv
import button
mixer.init()
pygame.init()
SCREEN_WIDTH = 672
SCREEN_HEIGHT = int(SCREEN_WIDTH * 0.8)
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption('Shooter')
# set framerate
clock = pygame.time.Clock()
FPS = 60
# define game variables
GRAVITY = 0.75
SCROLL_THRESH = 280
DEFAULT_ROWS = 16
ROWS = 10
Y_SPEED_MULTIPIER = DEFAULT_ROWS / ROWS
COLS = 150
TILE_SIZE = SCREEN_HEIGHT // ROWS
TILE_TYPES = 21
MAX_LEVELS = 3
screen_scroll = 0
bg_scroll = 0
level = 1
start_game = False
start_intro = False
# define player action variables
moving_left = False
moving_right = False
shoot = False
grenade = False
grenade_thrown = False
# load music and sounds
# pygame.mixer.music.load('audio/Ghostbusters (8 Bit Remix Cover Version).mp3')
pygame.mixer.music.load(os.path.join("audio", "Ghostbusters (8 Bit Remix Cover Version).mp3"))
pygame.mixer.music.set_volume(0.03)
bg_music = pygame.mixer.music
bg_music.play(-1, 0.0, 5000)
# jump_fx = pygame.mixer.Sound('audio/jump.wav')
jump_fx = pygame.mixer.Sound(os.path.join("audio", "jump.wav"))
jump_fx.set_volume(0.05)
# shot_fx = pygame.mixer.Sound('audio/shot.wav')
shot_fx = pygame.mixer.Sound(os.path.join("audio", "shot.wav"))
shot_fx.set_volume(0.05)
# grenade_fx = pygame.mixer.Sound('audio/grenade.wav')
grenade_fx = pygame.mixer.Sound(os.path.join("audio", "grenade.wav"))
grenade_fx.set_volume(0.05)
# hitmarker_fx = pygame.mixer.Sound('audio/Hitsound_COD.wav')
hitmarker_fx = pygame.mixer.Sound(os.path.join("audio", "Hitsound_COD.wav"))
hitmarker_fx.set_volume(0.05)
# portal_fx = pygame.mixer.Sound('audio/portal.wav')
portal_fx = pygame.mixer.Sound(os.path.join("audio", "portal.wav"))
portal_fx.set_volume(0.02)
# death_fx = pygame.mixer.Sound('audio/8-bit-death-sound-effect.wav')
death_fx = pygame.mixer.Sound(os.path.join("audio", "8-bit-death-sound-effect.wav"))
death_fx.set_volume(0.2)
# load images
# button images
# start_img = pygame.image.load('img/start_btn.png').convert_alpha()
start_img = pygame.image.load(os.path.join("img", "start_btn.png")).convert_alpha()
# exit_img = pygame.image.load('img/exit_btn.png').convert_alpha()
exit_img = pygame.image.load(os.path.join("img", "exit_btn.png")).convert_alpha()
# restart_img = pygame.image.load('img/restart_btn.png').convert_alpha()
restart_img = pygame.image.load(os.path.join("img", "restart_btn.png")).convert_alpha()
# magic_fx = pygame.image.load('img/magic_fx.png').convert_alpha()
magic_fx = pygame.image.load(os.path.join("img", "magic_fx.png")).convert_alpha()
# menu
# menu = pygame.image.load('./img/menu.png').convert_alpha()
menu = pygame.image.load(os.path.join(".", "img", "menu.png")).convert_alpha()
# historia1 = pygame.image.load('./img/historia1.png').convert_alpha()
historia1 = pygame.image.load(os.path.join(".", "img", "historia1.png")).convert_alpha()
# historia2 = pygame.image.load('./img/historia2.png').convert_alpha()
historia2 = pygame.image.load(os.path.join(".", "img", "historia2.png")).convert_alpha()
# background
# overlay_img = pygame.image.load('img/Background/overlay.png').convert_alpha()
overlay_img = pygame.image.load(os.path.join("img", "Background", "overlay.png")).convert_alpha()
# mist_img = pygame.image.load('img/Background/mist.png').convert_alpha()
mist_img = pygame.image.load(os.path.join("img", "Background", "mist.png")).convert_alpha()
# corridor_img = pygame.image.load('img/Background/corridor.png').convert_alpha()
corridor_img = pygame.image.load(os.path.join("img", "Background", "corridor.png")).convert_alpha()
# sky_img = pygame.image.load('img/Background/sky_cloud.png').convert_alpha()
sky_img = pygame.image.load(os.path.join("img", "Background", "sky_cloud.png")).convert_alpha()
bkgnd_scle = 1.3
overlay_img = pygame.transform.scale(overlay_img,
(int(overlay_img.get_width() * bkgnd_scle),
int(overlay_img.get_height() * bkgnd_scle)))
mist_img = pygame.transform.scale(mist_img,
(int(mist_img.get_width() * bkgnd_scle),
int(mist_img.get_height() * bkgnd_scle)))
corridor_img = pygame.transform.scale(corridor_img,
(int(corridor_img.get_width() * bkgnd_scle),
int(corridor_img.get_height() * bkgnd_scle)))
sky_img = pygame.transform.scale(sky_img,
(int(sky_img.get_width() * bkgnd_scle),
int(sky_img.get_height() * bkgnd_scle)))
# store tiles in a list
img_list = []
for x in range(TILE_TYPES):
# img = pygame.image.load(f'img/Tile/{x}.png')
img = pygame.image.load(os.path.join("img", "Tile", f"{x}.png"))
img = pygame.transform.scale(img, (TILE_SIZE, TILE_SIZE))
img_list.append(img)
# bullet
# bullet_img = pygame.image.load('img/icons/bullet.png').convert_alpha()
bullet_img = pygame.image.load(os.path.join("img", "icons", "bullet.png")).convert_alpha()
bullet_img = pygame.transform.scale(bullet_img, (int(bullet_img.get_width() * 2), int(bullet_img.get_height() * 2)))
# grenade
# grenade_img = pygame.image.load('img/icons/grenade.png').convert_alpha()
grenade_img = pygame.image.load(os.path.join("img", "icons", "grenade.png")).convert_alpha()
# pick up boxes
# health_box_img = pygame.image.load('img/icons/health_box.png').convert_alpha()
health_box_img = pygame.image.load(os.path.join("img", "icons", "health_box.png")).convert_alpha()
# ammo_box_img = pygame.image.load('img/icons/ammo_box.png').convert_alpha()
ammo_box_img = pygame.image.load(os.path.join("img", "icons", "ammo_box.png")).convert_alpha()
# grenade_box_img = pygame.image.load('img/icons/grenade_box.png').convert_alpha()
grenade_box_img = pygame.image.load(os.path.join("img", "icons", "grenade_box.png")).convert_alpha()
item_boxes = {
'Health': health_box_img,
'Ammo': ammo_box_img,
'Grenade': grenade_box_img
}
# define colours
BG = (0, 0, 0)
RED = (255, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
BLACK = (0, 0, 0)
PINK = (235, 65, 54)
# define font
font = pygame.font.SysFont('Futura', 30)
def draw_text(text, font, text_col, x, y):
img = font.render(text, True, text_col)
screen.blit(img, (x, y))
def draw_bg():
screen.fill(BG)
width = sky_img.get_width()
for x in range(5):
screen.blit(sky_img, ((x * width) - bg_scroll * 0.5, 0))
screen.blit(corridor_img, ((x * width) - bg_scroll, 0))
def draw_obg():
width = sky_img.get_width()
for x in range(5):
screen.blit(overlay_img, ((x * width) - bg_scroll * 1.2, 0))
screen.blit(mist_img, ((x * width) - bg_scroll - pygame.time.get_ticks() * 0.1, 0))
def draw_history(first_time, piece, time=25000):
global run
global start_game
global start_intro
while pygame.time.get_ticks() <= first_time + time:
clicked = False
if piece == 1:
screen.blit(historia1, (0, 0))
elif piece == 2:
screen.blit(historia2, (0, 0))
for event in pygame.event.get():
# quit game
if event.type == pygame.QUIT:
pygame.quit()
# keyboard presses
if event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
break
if clicked:
break
pygame.display.update()
if piece == 1:
start_game = True
start_intro = True
if piece == 2:
run = False
def draw_mist():
if player.gravity == -1:
screen.blit(magic_fx, (0, 0)) # alpha level
# function to reset level
def reset_level():
enemy_group.empty()
bullet_group.empty()
grenade_group.empty()
explosion_group.empty()
item_box_group.empty()
decoration_group.empty()
water_group.empty()
portal_group.empty()
exit_group.empty()
# create empty tile list
data = []
for row in range(ROWS):
r = [-1] * COLS
data.append(r)
return data
class Soldier(pygame.sprite.Sprite):
def __init__(self, char_type, x, y, scale, speed, ammo, grenades):
scale = scale * (DEFAULT_ROWS / ROWS)
pygame.sprite.Sprite.__init__(self)
self.alive = True
self.char_type = char_type
self.speed = speed
self.shoot_cooldown = 0
self.grenades = grenades
self.health = 100
self.max_health = self.health
self.direction = 1
self.vel_y = 0
self.jump = False
self.in_air = True
self.flip = False
self.flipy = False
self.gravity = 1
self.flag = 1
self.animation_list = []
self.frame_index = 0
self.action = 0
self.update_time = pygame.time.get_ticks()
# ai specific variables
self.move_counter = 0
self.vision = pygame.Rect(0, 0, 650, 650)
self.idling = False
self.idling_counter = 0
self.pesoY = random.randrange(3, 6)
# load all images for the players
animation_types = ['Idle', 'Run', 'Jump', 'Death']
for animation in animation_types:
# reset temporary list of images
temp_list = []
# count number of files in the folder
# num_of_frames = len(os.listdir(f'img/{self.char_type}/{animation}'))
num_of_frames = len(os.listdir(os.path.join("img", f'{self.char_type}', f'{animation}')))
for i in range(num_of_frames):
# img = pygame.image.load(f'img/{self.char_type}/{animation}/{i}.png').convert_alpha()
img = pygame.image.load(
os.path.join("img", f"{self.char_type}", f"{animation}", f"{i}.png")).convert_alpha()
img = pygame.transform.scale(img, (int(img.get_width() * scale), int(img.get_height() * scale)))
temp_list.append(img)
self.animation_list.append(temp_list)
self.image = self.animation_list[self.action][self.frame_index]
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.width = self.image.get_width()
self.height = self.image.get_height()
def update(self):
self.update_animation()
self.check_alive()
# update cooldown
if self.shoot_cooldown > 0:
self.shoot_cooldown -= 1
# MOD: se o fantasma e o player se encontram
if self.char_type == "enemy" and self.alive:
if pygame.sprite.collide_rect(player, self):
if player.alive:
player.health -= 20
self.kill()
def move(self, moving_left, moving_right, ghost_move=False, player_pos=[0, 0]):
# reset movement variables
screen_scroll = 0
dx = 0
dy = 0
# assign movement variables if moving left or right
if moving_left:
dx = -self.speed
self.flip = True
self.direction = -1
if moving_right:
dx = self.speed
self.flip = False
self.direction = 1
# MOD: enxerto de código pra fazer o fantasma funcionar
if ghost_move:
# move em y:
# O fantasma dá preferência pra movimentação em y, pois assim ele
# entra na mira do player. Isso deixa o jogo mais divertido e justo.
if abs(self.rect.y - player_pos[1]) * self.pesoY > abs(self.rect.x - player_pos[0]):
if self.rect.y < player_pos[1]:
dy = self.speed
elif self.rect.y > player_pos[1]:
dy = -self.speed
# move em x e define a direção de acordo
else:
if self.rect.x < player_pos[0]:
dx = self.speed
elif self.rect.x > player_pos[0]:
dx = -self.speed
# jump
if self.jump == True and self.in_air == False:
self.vel_y = -11
self.jump = False
self.in_air = True
if self.char_type != "enemy":
# apply gravity
self.vel_y += GRAVITY
if self.vel_y > 10:
self.vel_y
dy += self.vel_y * Y_SPEED_MULTIPIER * self.gravity
# check for collision
for tile in world.obstacle_list:
# check collision in the x direction
if tile[1].colliderect(self.rect.x + dx, self.rect.y, self.width, self.height):
dx = 0
# if the ai has hit a wall then make it turn around
if self.char_type == 'enemy':
self.direction *= -1
self.move_counter = 0
# check for collision in the y direction
if tile[1].colliderect(self.rect.x, self.rect.y + dy, self.width, self.height):
# check if below the ground, i.e. jumping
if self.vel_y < 0:
self.vel_y = 0
if self.gravity > 0:
dy = tile[1].bottom - self.rect.top
elif self.gravity < 0:
dy = tile[1].top - self.rect.bottom
# check if above the ground, i.e. falling
elif self.vel_y >= 0:
self.vel_y = 0
self.in_air = False
if self.gravity > 0:
dy = tile[1].top - self.rect.bottom
elif self.gravity < 0:
dy = tile[1].bottom - self.rect.top
# check for collision with water
if pygame.sprite.spritecollide(self, water_group, False):
self.health = 0
# check for player collision with a portal
if pygame.sprite.spritecollide(self, portal_group, False):
if self.char_type != "enemy":
portal_fx.play()
if self.gravity == 1 and self.flag == 1:
self.gravity = -1
self.flipy = True
self.flag = 0
elif self.gravity == -1 and self.flag == 1:
self.gravity = 1
self.flipy = False
self.flag = 0
elif not (pygame.sprite.spritecollide(self, portal_group, False)):
self.flag = 1
# check for collision with exit
level_complete = False
if pygame.sprite.spritecollide(self, exit_group, False):
level_complete = True
# check if fallen off the map
if self.gravity == 1:
if self.rect.bottom > SCREEN_HEIGHT:
self.health = 0
if self.gravity == -1:
if self.rect.bottom < 0:
self.health = 0
# check if going off the edges of the screen
if self.char_type == 'player':
if self.rect.left + dx < 0 or self.rect.right + dx > SCREEN_WIDTH:
dx = 0
# update rectangle position
self.rect.x += dx
self.rect.y += dy
# update scroll based on player position
if self.char_type == 'player':
if (self.rect.right > SCREEN_WIDTH - SCROLL_THRESH and bg_scroll < (
world.level_length * TILE_SIZE) - SCREEN_WIDTH) \
or (self.rect.left < SCROLL_THRESH and bg_scroll > abs(dx)):
self.rect.x -= dx
screen_scroll = -dx
return screen_scroll, level_complete
def shoot(self):
if self.shoot_cooldown == 0:
self.shoot_cooldown = 20
bullet = Bullet(self.rect.centerx + (0.75 * self.rect.size[0] * self.direction),
self.rect.centery + (0.2 * self.rect.size[0]),
self.direction)
bullet_group.add(bullet)
shot_fx.play()
def ai(self, player_pos):
if self.alive and player.alive:
self.vision.center = (self.rect.centerx, self.rect.centery)
if self.vision.colliderect(player.rect):
# MOD: Se o fantasma detecta o jogador, ele o persegue
self.update_action(1)
self.vision.center = (self.rect.centerx, self.rect.centery)
self.move(0, 0, True, player_pos)
else:
self.update_action(0)
# scroll
self.rect.x += screen_scroll
def update_animation(self):
# update animation
ANIMATION_COOLDOWN = 100
# update image depending on current frame
self.image = self.animation_list[self.action][self.frame_index]
# check if enough time has passed since the last update
if pygame.time.get_ticks() - self.update_time > ANIMATION_COOLDOWN:
self.update_time = pygame.time.get_ticks()
self.frame_index += 1
# if the animation has run out the reset back to the start
if self.frame_index >= len(self.animation_list[self.action]):
if self.action == 3:
self.frame_index = len(self.animation_list[self.action]) - 1
else:
self.frame_index = 0
def update_action(self, new_action):
# check if the new action is different to the previous one
if new_action != self.action:
self.action = new_action
# update the animation settings
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def check_alive(self):
if self.health <= 0:
self.health = 0
self.speed = 0
self.alive = False
self.update_action(3)
def draw(self):
screen.blit(pygame.transform.flip(self.image, self.flip, self.flipy), self.rect)
class World():
def __init__(self):
self.obstacle_list = []
def process_data(self, data):
self.level_length = len(data[0])
# iterate through each value in level data file
for y, row in enumerate(data):
for x, tile in enumerate(row):
if tile >= 0:
img = img_list[tile]
img_rect = img.get_rect()
img_rect.x = x * TILE_SIZE
img_rect.y = y * TILE_SIZE
tile_data = (img, img_rect)
if tile >= 0 and tile <= 8:
self.obstacle_list.append(tile_data)
elif tile >= 9 and tile <= 10:
water = Water(img, x * TILE_SIZE, y * TILE_SIZE)
water_group.add(water)
elif tile == 11:
portal = Portal(img, x * TILE_SIZE, y * TILE_SIZE)
portal_group.add(portal)
elif tile >= 11 and tile <= 14:
decoration = Decoration(img, x * TILE_SIZE, y * TILE_SIZE)
decoration_group.add(decoration)
elif tile == 15: # create player
player = Soldier('player', x * TILE_SIZE, y * TILE_SIZE, 1.4, 5, 20, 5)
health_bar = HealthBar(10, 10, player.health, player.health)
elif tile == 16: # create enemies
enemy = Soldier('enemy', x * TILE_SIZE, y * TILE_SIZE, 1.65, 2, 20, 0)
enemy_group.add(enemy)
elif tile == 17: # create ammo box
item_box = ItemBox('Ammo', x * TILE_SIZE, y * TILE_SIZE)
item_box_group.add(item_box)
elif tile == 18: # create grenade box
item_box = ItemBox('Grenade', x * TILE_SIZE, y * TILE_SIZE)
item_box_group.add(item_box)
elif tile == 19: # create health box
item_box = ItemBox('Health', x * TILE_SIZE, y * TILE_SIZE)
item_box_group.add(item_box)
elif tile == 20: # create exit
exit = Exit(img, x * TILE_SIZE, y * TILE_SIZE)
exit_group.add(exit)
return player, health_bar
def draw(self):
for tile in self.obstacle_list:
tile[1][0] += screen_scroll
screen.blit(tile[0], tile[1])
class Decoration(pygame.sprite.Sprite):
def __init__(self, img, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = img
self.rect = self.image.get_rect()
self.rect.midtop = (x + TILE_SIZE // 2, y + (TILE_SIZE - self.image.get_height()))
def update(self):
self.rect.x += screen_scroll
class Water(pygame.sprite.Sprite):
def __init__(self, img, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = img
self.rect = self.image.get_rect()
self.rect.midtop = (x + TILE_SIZE // 2, y + (TILE_SIZE - self.image.get_height()))
def update(self):
self.rect.x += screen_scroll
class Portal(pygame.sprite.Sprite):
def __init__(self, img, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = img
self.rect = self.image.get_rect()
self.rect.midtop = (x + TILE_SIZE // 2, y + (TILE_SIZE - self.image.get_height()))
def update(self):
self.rect.x += screen_scroll
class Exit(pygame.sprite.Sprite):
def __init__(self, img, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = img
self.rect = self.image.get_rect()
self.rect.midtop = (x + TILE_SIZE // 2, y + (TILE_SIZE - self.image.get_height()))
def update(self):
self.rect.x += screen_scroll
class ItemBox(pygame.sprite.Sprite):
def __init__(self, item_type, x, y):
pygame.sprite.Sprite.__init__(self)
self.item_type = item_type
self.image = item_boxes[self.item_type]
self.rect = self.image.get_rect()
self.rect.midtop = (x + TILE_SIZE // 2, y + (TILE_SIZE - self.image.get_height()))
def update(self):
# scroll
self.rect.x += screen_scroll
# check if the player has picked up the box
if pygame.sprite.collide_rect(self, player):
# check what kind of box it was
if self.item_type == 'Health':
player.health += 25
if player.health > player.max_health:
player.health = player.max_health
elif self.item_type == 'Grenade':
player.grenades += 3
# delete the item box
self.kill()
class HealthBar():
def __init__(self, x, y, health, max_health):
self.x = x
self.y = y
self.health = health
self.max_health = max_health
def draw(self, health):
# update with new health
self.health = health
# calculate health ratio
ratio = self.health / self.max_health
pygame.draw.rect(screen, BLACK, (self.x - 2, self.y - 2, 154, 24))
pygame.draw.rect(screen, RED, (self.x, self.y, 150, 20))
pygame.draw.rect(screen, GREEN, (self.x, self.y, 150 * ratio, 20))
class Bullet(pygame.sprite.Sprite):
def __init__(self, x, y, direction):
pygame.sprite.Sprite.__init__(self)
self.speed = 10
self.image = bullet_img
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.direction = direction
def update(self):
# move bullet
self.rect.x += (self.direction * self.speed) + screen_scroll
# check if bullet has gone off screen
if self.rect.right < 0 or self.rect.left > SCREEN_WIDTH:
self.kill()
# check for collision with level
for tile in world.obstacle_list:
if tile[1].colliderect(self.rect):
self.kill()
# check collision with characters
if pygame.sprite.spritecollide(player, bullet_group, False):
if player.alive:
player.health -= 5
self.kill()
for enemy in enemy_group:
if pygame.sprite.spritecollide(enemy, bullet_group, False):
if enemy.alive:
enemy.health -= 35
hitmarker_fx.play()
self.kill()
class Grenade(pygame.sprite.Sprite):
def __init__(self, x, y, direction):
pygame.sprite.Sprite.__init__(self)
self.timer = 100
self.vel_y = -11
self.speed = 7
self.image = grenade_img
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.width = self.image.get_width()
self.height = self.image.get_height()
self.direction = direction
def update(self):
self.vel_y += GRAVITY
dx = self.direction * self.speed
dy = self.vel_y * Y_SPEED_MULTIPIER
# check for collision with level
for tile in world.obstacle_list:
# check collision with walls
if tile[1].colliderect(self.rect.x + dx, self.rect.y, self.width, self.height):
self.direction *= -1
dx = self.direction * self.speed
# check for collision in the y direction
if tile[1].colliderect(self.rect.x, self.rect.y + dy, self.width, self.height):
self.speed = 0
# check if below the ground, i.e. thrown up
if self.vel_y < 0:
self.vel_y = 0
dy = tile[1].bottom - self.rect.top
# check if above the ground, i.e. falling
elif self.vel_y >= 0:
self.vel_y = 0
dy = tile[1].top - self.rect.bottom
# update grenade position
self.rect.x += dx + screen_scroll
self.rect.y += dy
# countdown timer
self.timer -= 1
if self.timer <= 0:
self.kill()
grenade_fx.play()
explosion = Explosion(self.rect.x, self.rect.y, 0.5)
explosion_group.add(explosion)
# do damage to anyone that is nearby
if abs(self.rect.centerx - player.rect.centerx) < TILE_SIZE * 2 and \
abs(self.rect.centery - player.rect.centery) < TILE_SIZE * 2:
player.health -= 50
for enemy in enemy_group:
if abs(self.rect.centerx - enemy.rect.centerx) < TILE_SIZE * 2 and \
abs(self.rect.centery - enemy.rect.centery) < TILE_SIZE * 2:
enemy.health -= 50
class Explosion(pygame.sprite.Sprite):
def __init__(self, x, y, scale):
pygame.sprite.Sprite.__init__(self)
self.images = []
for num in range(1, 6):
# img = pygame.image.load(f'img/explosion/exp{num}.png').convert_alpha()
img = pygame.image.load(os.path.join("img", "explosion", f"exp{num}.png")).convert_alpha()
img = pygame.transform.scale(img, (int(img.get_width() * scale), int(img.get_height() * scale)))
self.images.append(img)
self.frame_index = 0
self.image = self.images[self.frame_index]
self.rect = self.image.get_rect()
self.rect.center = (x, y)
self.counter = 0
def update(self):
# scroll
self.rect.x += screen_scroll
EXPLOSION_SPEED = 4
# update explosion amimation
self.counter += 1
if self.counter >= EXPLOSION_SPEED:
self.counter = 0
self.frame_index += 1
# if the animation is complete then delete the explosion
if self.frame_index >= len(self.images):
self.kill()
else:
self.image = self.images[self.frame_index]
class ScreenFade():
def __init__(self, direction, colour, speed):
self.direction = direction
self.colour = colour
self.speed = speed
self.fade_counter = 0
def fade(self):
fade_complete = False
self.fade_counter += self.speed
if self.direction == 1: # whole screen fade
pygame.draw.rect(screen, self.colour, (0 - self.fade_counter, 0, SCREEN_WIDTH // 2, SCREEN_HEIGHT))
pygame.draw.rect(screen, self.colour,
(SCREEN_WIDTH // 2 + self.fade_counter, 0, SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.draw.rect(screen, self.colour, (0, 0 - self.fade_counter, SCREEN_WIDTH, SCREEN_HEIGHT // 2))
pygame.draw.rect(screen, self.colour,
(0, SCREEN_HEIGHT // 2 + self.fade_counter, SCREEN_WIDTH, SCREEN_HEIGHT))
if self.direction == 2: # vertical screen fade down
pygame.draw.rect(screen, self.colour, (0, 0, SCREEN_WIDTH, 0 + self.fade_counter))
if self.fade_counter >= SCREEN_WIDTH:
fade_complete = True
return fade_complete
# create screen fades
intro_fade = ScreenFade(1, BLACK, 4)
death_fade = ScreenFade(2, BLACK, 4)
# create buttons
start_button = button.Button(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2, start_img, 1)
exit_button = button.Button(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 1.4, exit_img, 1)
restart_button = button.Button(SCREEN_WIDTH // 2 - 135, SCREEN_HEIGHT // 2 - 50, restart_img, 1)
# create sprite groups
enemy_group = pygame.sprite.Group()
bullet_group = pygame.sprite.Group()
grenade_group = pygame.sprite.Group()
explosion_group = pygame.sprite.Group()
item_box_group = pygame.sprite.Group()
decoration_group = pygame.sprite.Group()
water_group = pygame.sprite.Group()
portal_group = pygame.sprite.Group()
exit_group = pygame.sprite.Group()
# create empty tile list
world_data = []
for row in range(ROWS):
r = [-1] * COLS
world_data.append(r)
# load in level data and create world
with open(f'level{level}_data.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for x, row in enumerate(reader):
for y, tile in enumerate(row):
world_data[x][y] = int(tile)
world = World()
player, health_bar = world.process_data(world_data)
temp_key_list = []
run = True
while run:
clock.tick(FPS)
# konami code
if len(temp_key_list) > 10:
temp_key_list.pop(0)
if temp_key_list == [pygame.K_UP, pygame.K_UP, pygame.K_DOWN, pygame.K_DOWN,
pygame.K_LEFT, pygame.K_RIGHT, pygame.K_LEFT, pygame.K_RIGHT,
pygame.K_b, pygame.K_a]:
start_game = True
player.health = 500
if start_game == False:
# draw the menu
screen.blit(menu, (0, 0))
# add buttons
if start_button.draw(screen):
draw_history(pygame.time.get_ticks(), 1)
if exit_button.draw(screen):
run = False
else:
# update background
draw_bg()
# draw world map
world.draw()
player.update()
player.draw()
for enemy in enemy_group:
enemy.ai([player.rect.x, player.rect.y])
enemy.update()
enemy.draw()
# update and draw groups
bullet_group.update()
grenade_group.update()
explosion_group.update()
item_box_group.update()
decoration_group.update()
water_group.update()
portal_group.update()
exit_group.update()
bullet_group.draw(screen)
grenade_group.draw(screen)
explosion_group.draw(screen)
item_box_group.draw(screen)
decoration_group.draw(screen)
water_group.draw(screen)
portal_group.draw(screen)
exit_group.draw(screen)
# Background overlay
draw_obg()
draw_mist()
# show player health
health_bar.draw(player.health)
# show grenades
draw_text('Explosivo: ', font, WHITE, 10, 40)
for x in range(player.grenades):
screen.blit(grenade_img, (120 + (x * 15), 42))
# show intro
if start_intro == True:
if intro_fade.fade():
start_intro = False
intro_fade.fade_counter = 0
# update player actions
if player.alive:
# shoot bullets
if shoot:
player.shoot()
# throw grenades
elif grenade and grenade_thrown == False and player.grenades > 0:
grenade = Grenade(player.rect.centerx + (0.5 * player.rect.size[0] * player.direction), \
player.rect.top, player.direction)
grenade_group.add(grenade)
# reduce grenades
player.grenades -= 1
grenade_thrown = True
if player.in_air:
player.update_action(2) # 2: jump
elif moving_left or moving_right:
player.update_action(1) # 1: run
else:
player.update_action(0) # 0: idle
screen_scroll, level_complete = player.move(moving_left, moving_right)
bg_scroll -= screen_scroll
# check if player has completed the level
if level_complete:
level += 1
if level == 3:
draw_history(pygame.time.get_ticks(), 2, 100000)
start_intro = True
bg_scroll = 0
world_data = reset_level()
if level <= MAX_LEVELS:
# load in level data and create world
with open(f'level{level}_data.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for x, row in enumerate(reader):
for y, tile in enumerate(row):
world_data[x][y] = int(tile)
world = World()
player, health_bar = world.process_data(world_data)
else:
screen_scroll = 0
if death_fade.fade():
if restart_button.draw(screen):
death_fade.fade_counter = 0
start_intro = True
bg_scroll = 0
world_data = reset_level()
# load in level data and create world
with open(f'level{level}_data.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for x, row in enumerate(reader):
for y, tile in enumerate(row):
world_data[x][y] = int(tile)
world = World()
player, health_bar = world.process_data(world_data)
for event in pygame.event.get():
# quit game
if event.type == pygame.QUIT:
run = False
# keyboard presses
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
moving_left = True
temp_key_list.append(event.key)
if event.key == pygame.K_d:
moving_right = True
if event.key == pygame.K_SPACE:
shoot = True
if event.key == pygame.K_q:
grenade = True
if event.key == pygame.K_w and player.alive:
player.jump = True
jump_fx.play()
if event.key == pygame.K_ESCAPE:
run = False
if event.key == pygame.K_LEFT:
temp_key_list.append(event.key)
if event.key == pygame.K_RIGHT:
temp_key_list.append(event.key)
if event.key == pygame.K_UP:
temp_key_list.append(event.key)
if event.key == pygame.K_DOWN:
temp_key_list.append(event.key)
if event.key == pygame.K_b:
temp_key_list.append(event.key)
# keyboard button released
if event.type == pygame.KEYUP:
if event.key == pygame.K_a:
moving_left = False
if event.key == pygame.K_d:
moving_right = False
if event.key == pygame.K_SPACE:
shoot = False
if event.key == pygame.K_q:
grenade = False
grenade_thrown = False
pygame.display.update()
pygame.quit()
| 37.383984 | 116 | 0.5694 | 4,676 | 36,412 | 4.271814 | 0.091104 | 0.030038 | 0.028536 | 0.027034 | 0.529011 | 0.453617 | 0.388636 | 0.324856 | 0.259825 | 0.211164 | 0 | 0.017477 | 0.318 | 36,412 | 973 | 117 | 37.422405 | 0.786896 | 0.129463 | 0 | 0.369444 | 0 | 0 | 0.024369 | 0.002884 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052778 | false | 0 | 0.008333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0be6c77d6db0b1ef2ef9d7a6ba5e4d6a7b5669cd | 944 | py | Python | agent/indy_catalyst_agent/messaging/credentials/messages/credential.py | mikelodder7/indy-catalyst | 5e80d8d6764144303b7ef1851aee32291bcb2d98 | [
"Apache-2.0"
] | null | null | null | agent/indy_catalyst_agent/messaging/credentials/messages/credential.py | mikelodder7/indy-catalyst | 5e80d8d6764144303b7ef1851aee32291bcb2d98 | [
"Apache-2.0"
] | null | null | null | agent/indy_catalyst_agent/messaging/credentials/messages/credential.py | mikelodder7/indy-catalyst | 5e80d8d6764144303b7ef1851aee32291bcb2d98 | [
"Apache-2.0"
] | null | null | null | """
A credential content message.
"""
from marshmallow import fields
from ...agent_message import AgentMessage, AgentMessageSchema
from ...message_types import MessageTypes
class Credential(AgentMessage):
"""Class representing a credential."""
class Meta:
# handler_class = CredentialHandler
schema_class = "CredentialSchema"
message_type = MessageTypes.CREDENTIAL.value
def __init__(
self,
*,
credential_json: str = None,
revocation_registry_id: str = None,
**kwargs
):
super(Credential, self).__init__(**kwargs)
self.credential_json = credential_json
self.revocation_registry_id = revocation_registry_id
class CredentialSchema(AgentMessageSchema):
"""Credential schema."""
class Meta:
model_class = Credential
credential_json = fields.Str(required=True)
revocation_registry_id = fields.Str(required=True)
| 24.842105 | 61 | 0.690678 | 92 | 944 | 6.804348 | 0.391304 | 0.089457 | 0.127796 | 0.067093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.223517 | 944 | 37 | 62 | 25.513514 | 0.854025 | 0.122881 | 0 | 0.090909 | 0 | 0 | 0.019753 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.136364 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0be7e862effd564ba0ca3349ca48650a8d534979 | 3,227 | bzl | Python | config/configs.bzl | BlakeRxxk/esoft-ios-tuition | 4854a5118b04ec987219a94b45fed31e3bcf1d74 | [
"MIT"
] | null | null | null | config/configs.bzl | BlakeRxxk/esoft-ios-tuition | 4854a5118b04ec987219a94b45fed31e3bcf1d74 | [
"MIT"
] | null | null | null | config/configs.bzl | BlakeRxxk/esoft-ios-tuition | 4854a5118b04ec987219a94b45fed31e3bcf1d74 | [
"MIT"
] | 2 | 2020-11-04T06:35:56.000Z | 2021-09-22T07:06:58.000Z | load("//config:utils.bzl",
"config_with_updated_linker_flags",
"configs_with_config",
"merge_dict",
"DEVELOPMENT_LANGUAGE",
"SHARED_CONFIGS",
"ALL_LOAD_LINKER_FLAG",
"read_config_nonempty",
"optimization_config",
"add_provisioning_profile_specifier",
"add_codesign_identity",
"strip_debug_symbols",
"get_build_number",
"get_short_version",
"bundle_identifier",
"get_development_team",
"get_provisioning_profile",
"get_codesign_entitlements",
)
def app_binary_configs(name="TemplateApp"):
config = {
"ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES": "YES",
"DEVELOPMENT_LANGUAGE": DEVELOPMENT_LANGUAGE,
"PRODUCT_BUNDLE_IDENTIFIER": bundle_identifier(suffix=""),
"CODE_SIGN_ENTITLEMENTS": get_codesign_entitlements("app"),
"DEVELOPMENT_TEAM": get_development_team(),
"ASSETCATALOG_COMPILER_APPICON_NAME": "AppIcon",
"BUILD_NUMBER": get_build_number(),
"PRODUCT_BUNDLE_SHORT_VERSION": get_short_version(),
"APP_NAME": name,
"PRODUCT_NAME": name,
"TARGETED_DEVICE_FAMILY": "1,2",
}
config = merge_dict(SHARED_CONFIGS, config)
config = merge_dict(config, optimization_config())
config = config_with_updated_linker_flags(config, ALL_LOAD_LINKER_FLAG)
configs = configs_with_config(config)
configs = add_provisioning_profile_specifier(configs, "app")
configs = add_codesign_identity(configs)
configs = strip_debug_symbols(configs)
return configs
def app_test_configs(name):
config = {
"ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES": "NO",
"DEVELOPMENT_LANGUAGE": DEVELOPMENT_LANGUAGE,
"PRODUCT_BUNDLE_IDENTIFIER": bundle_identifier(suffix=""),
"DEVELOPMENT_TEAM": get_development_team(),
"BUILD_NUMBER": get_build_number(),
"PRODUCT_BUNDLE_SHORT_VERSION": get_short_version(),
"APP_NAME": name,
"PRODUCT_NAME": name,
"TARGETED_DEVICE_FAMILY": "1,2",
"CLANG_ENABLE_CODE_COVERAGE": "YES",
"COPY_PHASE_STRIP": "NO"
}
config = merge_dict(SHARED_CONFIGS, config)
config = merge_dict(config, optimization_config())
config = config_with_updated_linker_flags(config, ALL_LOAD_LINKER_FLAG)
configs = {
"Debug": config,
"Profile": config,
}
return configs
def info_plist_substitutions(name):
substitutions = {
"DEVELOPMENT_LANGUAGE": DEVELOPMENT_LANGUAGE,
"EXECUTABLE_NAME": name,
"PRODUCT_BUNDLE_IDENTIFIER": bundle_identifier(name),
"PRODUCT_NAME": name,
"CURRENT_PROJECT_VERSION": "1",
}
return substitutions
def app_info_plist_substitutions(name="TemplateApp"):
substitutions = {
"DEVELOPMENT_LANGUAGE": DEVELOPMENT_LANGUAGE,
"EXECUTABLE_NAME": name,
"PRODUCT_BUNDLE_IDENTIFIER": bundle_identifier(suffix=""),
"PRODUCT_NAME": name,
"APP_NAME": name,
"CURRENT_PROJECT_VERSION": "1",
"BUILD_NUMBER": get_build_number(),
"PRODUCT_BUNDLE_SHORT_VERSION": get_short_version(),
"ASSETCATALOG_COMPILER_APPICON_NAME": "AppIcon",
"TARGETED_DEVICE_FAMILY": "1,2",
}
return substitutions
| 35.461538 | 75 | 0.689185 | 336 | 3,227 | 6.127976 | 0.220238 | 0.08305 | 0.036425 | 0.073822 | 0.623118 | 0.529869 | 0.459932 | 0.459932 | 0.459932 | 0.459932 | 0 | 0.003104 | 0.201425 | 3,227 | 90 | 76 | 35.855556 | 0.795887 | 0 | 0 | 0.523256 | 0 | 0 | 0.369383 | 0.192749 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0 | 0 | 0.093023 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bed7f6a418608ad6f1f890a548e1d76c12306a6 | 1,148 | py | Python | dsf-to-flac.py | whirledsol/music-partition | 844e7a8db7e5dc5001676fd69d699a971cae5f07 | [
"MIT"
] | null | null | null | dsf-to-flac.py | whirledsol/music-partition | 844e7a8db7e5dc5001676fd69d699a971cae5f07 | [
"MIT"
] | null | null | null | dsf-to-flac.py | whirledsol/music-partition | 844e7a8db7e5dc5001676fd69d699a971cae5f07 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys, os
import argparse
from os.path import abspath
def main():
args = parse()
#print(f"looking in {args.dir}")
for file in os.listdir(args.dir):
if file.endswith(".dsf"):
fileagnostic = f"{args.dir}\\{file[:-4]}"
print(f"working on {fileagnostic}")
os.system(f"ffmpeg -i \"{fileagnostic}.dsf\" -af aformat={args.format} -compression_level {args.compression_level} \"{fileagnostic}.flac\"")
def parse():
parser = argparse.ArgumentParser(description='Converts dsf to flac')
parser.add_argument('-d','--directory',dest='dir',type=str, required=True, help='the directory to find files in')
parser.add_argument('-l','--low',dest='low',action='store_true', help='if supplied, uses 16bit. defaults to 24bit.')
parser.add_argument('-c','--compression_level',dest='compression_level',type=str, default='12', help='value from 0-12 with 12 taking more time to compress')
args = parser.parse_args()
args.dir = abspath(args.dir)
args.format = "s16:44100" if args.low else "s32:176000"
print('****************\n\n')
print(args)
print('****************\n\n')
return args
if __name__ == "__main__": main() | 35.875 | 157 | 0.680314 | 167 | 1,148 | 4.57485 | 0.497006 | 0.045812 | 0.066754 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026497 | 0.112369 | 1,148 | 32 | 158 | 35.875 | 0.723258 | 0.040941 | 0 | 0.086957 | 0 | 0 | 0.384545 | 0.061818 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.26087 | 0.173913 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bed97e5d8b2dc9699be4fae86b43300e99178b8 | 1,368 | py | Python | py/examples/sender_example.py | egglang/sonicky | f24d94f6869a4d9ecad80f5d9eea3346dc9a113a | [
"Apache-2.0"
] | 4 | 2017-09-15T16:45:50.000Z | 2020-03-02T11:31:38.000Z | py/examples/sender_example.py | egglang/sonicky | f24d94f6869a4d9ecad80f5d9eea3346dc9a113a | [
"Apache-2.0"
] | 1 | 2020-03-02T11:32:01.000Z | 2020-03-02T11:32:01.000Z | py/examples/sender_example.py | egglang/sonicky | f24d94f6869a4d9ecad80f5d9eea3346dc9a113a | [
"Apache-2.0"
] | 1 | 2021-12-21T22:44:39.000Z | 2021-12-21T22:44:39.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2017 egglang <t.egawa@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sonicky.codec.ecc import OnebyteReedSolomonEcc, EmptyEcc
from sonicky.communication import SoundWriter, SoundSender
__author__ = 'egg'
def write(coder=EmptyEcc(), debug=False, target_str='', file_name='./out.wav'):
print("Encoding...")
writer = SoundWriter(debug=debug, coder=coder)
writer.write_string_to_file(target_str, file_name)
def send(coder=EmptyEcc(), debug=False, target_str=''):
print("Encoding...")
sender = SoundSender(debug=debug, coder=coder)
sender.send_string(target_str)
if __name__ == "__main__":
# send string message with Reedsolomon
send(coder=OnebyteReedSolomonEcc(), debug=True, target_str="Hello!")
# write(coder=OnebyteReedSolomonEcc(), target_str="nn", file_name="out.wav")
| 35.076923 | 80 | 0.737573 | 187 | 1,368 | 5.262032 | 0.55615 | 0.060976 | 0.026423 | 0.03252 | 0.065041 | 0.065041 | 0 | 0 | 0 | 0 | 0 | 0.007719 | 0.147661 | 1,368 | 38 | 81 | 36 | 0.836192 | 0.510965 | 0 | 0.153846 | 0 | 0 | 0.073733 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.153846 | 0 | 0.307692 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bed9c33950a3cf3292d85acdb94ef8513d534aa | 2,628 | py | Python | keyboard_agent.py | yoshi-ono/WM_Hackathon | 546996c80b53a155ca94417b2fd19412c8f9f9c4 | [
"Apache-2.0"
] | 9 | 2020-09-08T04:39:55.000Z | 2021-08-03T14:28:33.000Z | keyboard_agent.py | pulinagrawal/WM_Hackathon | 19522ba74c5cf1d78b19d58b1881e77b5ef26c27 | [
"Apache-2.0"
] | null | null | null | keyboard_agent.py | pulinagrawal/WM_Hackathon | 19522ba74c5cf1d78b19d58b1881e77b5ef26c27 | [
"Apache-2.0"
] | 4 | 2021-04-21T00:48:28.000Z | 2021-06-28T02:33:04.000Z | #!/usr/bin/env python
import sys, gym, time
#
# Test yourself as a learning agent! Pass environment name as a command-line argument, for example:
#
# python keyboard_agent.py SpaceInvadersNoFrameskip-v4
#
import gym_game
import pygame
if len(sys.argv) < 3:
print('Usage: python keyboard_agent.py ENV_NAME CONFIG_FILE')
sys.exit(-1)
env_name = sys.argv[1]
print('Making Gym[PyGame] environment:', env_name)
config_file = sys.argv[2]
print('Config file:', config_file)
env = gym.make(env_name, config_file=config_file)
sleep_time = 0.1
if not hasattr(env.action_space, 'n'):
raise Exception('Keyboard agent only supports discrete action spaces')
ACTIONS = env.action_space.n
print("ACTIONS={}".format(ACTIONS))
print("Press keys 1 2 3 ... to take actions 1 2 3 ... etc.")
print("No keys pressed is taking action 0")
render_mode = 'human'
# render_mode = 'rgb_array'
env.use_wall_clock = True
env.reset()
#env.render(render_mode)
def get_action(pressed_keys):
action = None
if pressed_keys[pygame.K_0] == 1:
action = 0
elif pressed_keys[pygame.K_1] == 1:
action = 1
elif pressed_keys[pygame.K_2] == 1:
action = 2
elif pressed_keys[pygame.K_3] == 1:
action = 3
elif pressed_keys[pygame.K_4] == 1:
action = 4
elif pressed_keys[pygame.K_5] == 1:
action = 5
elif pressed_keys[pygame.K_6] == 1:
action = 6
elif pressed_keys[pygame.K_7] == 1:
action = 7
elif pressed_keys[pygame.K_8] == 1:
action = 8
elif pressed_keys[pygame.K_9] == 1:
action = 9
if action is None:
action = 0
return action
def rollout(env):
observation = env.reset()
quit = False
total_reward = 0
total_timesteps = 0
while 1:
# Check for quit from user
events = env.get_events()
for event in events:
if event.type == pygame.QUIT:
quit = True
print('Quit event')
# Get selected action from user
pressed_keys = env.get_keys_pressed()
a = get_action(pressed_keys)
# Update the environment
observation, reward, done, info = env.step(a)
total_timesteps += 1
total_reward += reward
# print('Obs: ',str(observation))
# Render the new state
img = env.render(mode=render_mode, close=quit) # Render the game
# Handle quit request
if quit:
print('Quitting (truncating rollout)...')
break
if done:
print('Episode (rollout) complete.')
env.reset()
break
# Wait a short time
time.sleep(sleep_time)
print("Rollout summary: Timesteps %i Reward %0.2f" % (total_timesteps, total_reward))
return quit
while 1:
quit = rollout(env)
if quit:
break
| 23.890909 | 99 | 0.671233 | 396 | 2,628 | 4.310606 | 0.318182 | 0.083773 | 0.09959 | 0.105448 | 0.139426 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025169 | 0.213851 | 2,628 | 109 | 100 | 24.110092 | 0.801065 | 0.15411 | 0 | 0.142857 | 0 | 0 | 0.162211 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025974 | false | 0 | 0.038961 | 0 | 0.090909 | 0.12987 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0beec220c419cadc25cdd94e7d6041fbe8eb1983 | 4,019 | py | Python | tests/macaca-android-sample.test.py | MaxGuo/macaca | bb16ee86d4fd066f318f36b73d98833c92da9d7a | [
"MIT"
] | 36 | 2017-03-29T06:48:01.000Z | 2021-01-26T06:54:46.000Z | tests/macaca-android-sample.test.py | MaxGuo/macaca | bb16ee86d4fd066f318f36b73d98833c92da9d7a | [
"MIT"
] | 4 | 2016-08-18T10:28:02.000Z | 2016-12-03T03:02:05.000Z | tests/macaca-android-sample.test.py | MaxGuo/macaca | bb16ee86d4fd066f318f36b73d98833c92da9d7a | [
"MIT"
] | 41 | 2017-03-19T10:56:13.000Z | 2022-01-20T07:02:48.000Z | #coding:utf-8
import unittest
import os
import time
from macaca import WebDriver
from macaca import Keys
from retrying import retry
desired_caps = {
'platformName': 'android',
'app': 'https://npmcdn.com/android-app-bootstrap@latest/android_app_bootstrap/build/outputs/apk/android_app_bootstrap-debug.apk',
}
server_url = {
'hostname': 'localhost',
'port': 3456
}
def switch_to_webview(driver):
contexts = driver.contexts
driver.context = contexts[-1]
return driver
def switch_to_native(driver):
contexts = driver.contexts
driver.context = contexts[0]
return driver
class MacacaTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = WebDriver(desired_caps, server_url)
cls.initDriver()
@classmethod
def tearDownClass(cls):
cls.driver.quit()
@classmethod
@retry
def initDriver(cls):
print("Retry connecting server...")
cls.driver.init()
def test_01_login(self):
el = self.driver \
.elements_by_class_name('android.widget.EditText')[0] \
.send_keys('中文+Test+12345678') \
el = self.driver \
.elements_by_class_name('android.widget.EditText')[1] \
.send_keys('111111')
time.sleep(1)
# self.driver.keys(Keys.ENTER.value + Keys.ESCAPE.value)
self.driver \
.element_by_name('Login') \
.click()
def test_02_scroll_tableview(self):
self.driver \
.wait_for_element_by_name('HOME') \
.click()
self.driver \
.wait_for_element_by_name('list') \
.click()
def test_03_gesture(self):
time.sleep(5)
self.driver \
.wait_for_element_by_name('Alert') \
.click()
time.sleep(5)
self.driver \
.accept_alert()
time.sleep(3)
self.driver \
.back()
time.sleep(3)
self.driver \
.wait_for_element_by_name('Gesture') \
.click()
time.sleep(3)
self.driver \
.touch('tap', {
'x': 100,
'y': 100
})
time.sleep(5)
self.driver \
.touch('doubleTap', {
'x': 100,
'y': 100
})
time.sleep(5)
self.driver \
.touch('press', {
'x': 100,
'y': 100,
'steps': 100
})
time.sleep(5)
self.driver \
.touch('drag', {
'fromX': 100,
'fromY': 100,
'toX': 100,
'toY': 600,
'steps': 100
})
time.sleep(5)
self.driver.back()
time.sleep(5)
self.driver.back()
def test_04_webview(self):
self.driver \
.wait_for_element_by_name('Webview') \
.click()
time.sleep(5)
self.driver.save_screenshot('./webView.png') # save screen shot
switch_to_webview(self.driver) \
.wait_for_element_by_id('pushView') \
.click()
switch_to_webview(self.driver) \
.wait_for_element_by_id('popView') \
.click()
def test_05_web(self):
switch_to_native(self.driver) \
.wait_for_element_by_name('Baidu') \
.click()
time.sleep(5)
self.driver.save_screenshot("./baidu.png")
switch_to_webview(self.driver) \
.wait_for_element_by_id('index-kw') \
.send_keys('macaca')
self.driver \
.wait_for_element_by_id('index-bn') \
.click()
def test_06_logout(self):
switch_to_native(self.driver) \
.wait_for_element_by_name('PERSONAL') \
.click()
self.driver.wait_for_element_by_name('Logout') \
.click()
if __name__ == '__main__':
unittest.main()
| 22.20442 | 133 | 0.526499 | 430 | 4,019 | 4.676744 | 0.288372 | 0.129289 | 0.083541 | 0.101442 | 0.489806 | 0.457981 | 0.431626 | 0.322725 | 0.19642 | 0.19642 | 0 | 0.032172 | 0.350336 | 4,019 | 180 | 134 | 22.327778 | 0.738031 | 0.020901 | 0 | 0.484848 | 0 | 0.007576 | 0.109105 | 0.011699 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.045455 | 0 | 0.151515 | 0.007576 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bf23234460c739e972bccaf2a5bc79dbcce8fc6 | 47,798 | py | Python | kshell_utilities/general_utilities.py | GaffaSnobb/kshell-utilities | 6e07d073ea742bbeb92662aaeb2f8cfe2abaa0cb | [
"MIT"
] | null | null | null | kshell_utilities/general_utilities.py | GaffaSnobb/kshell-utilities | 6e07d073ea742bbeb92662aaeb2f8cfe2abaa0cb | [
"MIT"
] | null | null | null | kshell_utilities/general_utilities.py | GaffaSnobb/kshell-utilities | 6e07d073ea742bbeb92662aaeb2f8cfe2abaa0cb | [
"MIT"
] | null | null | null | import sys, time, warnings
from typing import Union, Tuple, Optional
from fractions import Fraction
from itertools import chain
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import chi2
from scipy.optimize import curve_fit
from .parameters import flags
def create_spin_parity_list(
spins: np.ndarray,
parities: np.ndarray
) -> list:
"""
Pair up input spins and parities in a list of lists.
Parameters
----------
spins : np.ndarray
Array of spins for each energy level.
parities : np.ndarray
Array of corresponding parities for each energy level.
Returns
-------
spins_parities : list
A nested list of spins and parities [[spin, parity], ...] sorted
with respect to the spin. N is the number of unique spins in
'spins'.
Examples
--------
Example list:
``` python
[[1, +1], [3, +1], [5, +1], [7, +1], [9, +1], [11, +1], [13, +1]]
```
"""
spin_parity_list = []
for i in range(len(spins)):
if (tmp := [int(spins[i]), int(parities[i])]) in spin_parity_list:
continue
spin_parity_list.append(tmp)
return spin_parity_list
def div0(numerator, denominator):
"""
Suppress ZeroDivisionError, set x/0 to 0, and set inf, -inf and nan
to 0. Author Jørgen Midtbø.
Examples
--------
>>> div0([1, 1, 1], [1, 2, 0])
array([1. , 0.5, 0. ])
"""
with np.errstate(divide='ignore', invalid='ignore'):
res = np.true_divide(numerator, denominator)
res[~np.isfinite(res)] = 0 # -inf inf NaN
return res
def gamma_strength_function_average(
levels: np.ndarray,
transitions: np.ndarray,
bin_width: Union[float, int],
Ex_min: Union[float, int],
Ex_max: Union[float, int],
multipole_type: str,
prefactor_E1: Union[None, float] = None,
prefactor_M1: Union[None, float] = None,
prefactor_E2: Union[None, float] = None,
initial_or_final: str = "initial",
partial_or_total: str = "partial",
include_only_nonzero_in_average: bool = True,
include_n_levels: Union[None, int] = None,
filter_spins: Union[None, list] = None,
filter_parities: str = "both",
return_n_transitions: bool = False,
# plot: bool = False,
# save_plot: bool = False
) -> Tuple[np.ndarray, np.ndarray, Optional[np.ndarray]]:
"""
Calculate the gamma strength function averaged over total angular
momenta, parities, and initial excitation energies.
Author: Jørgen Midtbø.
Modified by: GaffaSnobb.
TODO: Figure out the pre-factors.
TODO: Use numpy.logical_or to filter levels and transitions to avoid
TODO: Make res.transitions_BXL.ji, res.transitions_BXL.pii, etc.
class attributes (properties).
using many if statements in the loops.
Parameters
----------
levels : np.ndarray
Array containing energy, spin, and parity for each excited
state. [[E, 2*spin, parity, idx], ...]. idx counts how many
times a state of that given spin and parity has occurred. The
first 0+ state will have an idx of 1, the second 0+ will have an
idx of 2, etc.
transitions : np.ndarray
Array containing transition data for the specified
multipolarity.
OLD:
Mx8 array containing [2*spin_final, parity_initial, Ex_final,
2*spin_initial, parity_initial, Ex_initial, E_gamma, B(.., i->f)]
OLD NEW:
[2*spin_initial, parity_initial, Ex_initial, 2*spin_final,
parity_final, Ex_final, E_gamma, B(.., i->f), B(.., f<-i)]
NEW:
[2*spin_initial, parity_initial, idx_initial, Ex_initial,
2*spin_final, parity_final, idx_final, Ex_final, E_gamma,
B(.., i->f), B(.., f<-i)]
bin_width : Union[float, int]
The width of the energy bins. A bin width of 0.2 contains 20
states of uniform spacing of 0.01.
Ex_min : Union[float, int]
Lower limit for initial level excitation energy, usually in MeV.
Ex_max : Union[float, int]
Upper limit for initial level excitation energy, usually in MeV.
multipole_type : str
Choose whether to calculate for 'E1', 'M1' or 'E2'. NOTE:
Currently only M1 and E1 is implemented.
prefactor_E1 : Union[None, float]
E1 pre-factor from the definition of the GSF. Defaults to a
standard value if None.
prefactor_M1 : Union[None, float]
M1 pre-factor from the definition of the GSF. Defaults to a
standard value if None.
prefactor_E2 : Union[None, float]
E2 pre-factor from the definition of the GSF. Defaults to a
standard value if None.
initial_or_final : str
Choose whether to use the energy of the initial or final state
for the transition calculations. NOTE: This may be removed in
a future release since the correct alternative is to use the
initial energy.
partial_or_total : str
Choose whether to use the partial level density
rho(E_i, J_i, pi_i) or the total level density rho(E_i) for
calculating the gamma strength function. Note that the partial
level density, the default value, is probably the correct
alternative. Using the total level density will introduce an
arbitrary scaling depending on how many (J, pi) combinations
were included in the calculations.
This argument is included for easy comparison between the two
densities. See the appendix of PhysRevC.98.064321 for details.
include_only_nonzero_in_average : bool
If True (default) only non-zero values are included in the final
averaging of the gamma strength function. The correct
alternative is to use only the non-zero values, so setting this
parameter to False should be done with care.
include_n_levels : Union[None, int]
The number of states per spin to include. Example:
include_n_levels = 100 will include only the 100 lowest laying
states for each spin.
filter_spins : Union[None, list]
Which spins to include in the GSF. If None, all spins are
included. TODO: Make int valid input too.
filter_parities : str
Which parities to include in the GSF. 'both', '+', '-' are
allowed.
return_n_transitions : bool
Count the number of transitions, as a function of gamma energy,
involved in the GSF calculation and return this number as a
third return value. For calculating Porter-Thomas fluctuations
in the GSF by
r(E_gamma) = sqrt(2/n(E_gamma))
where n is the number of transitions for each gamma energy, used
to calculate the GSF. The value n is called n_transitions_array
in the code. See for example DOI: 10.1103/PhysRevC.98.054303 for
details.
plot : bool
Toogle plotting on / off.
save_plot : bool
Toogle saving of plot (as .png with dpi=300) on / off.
Variables
---------
Ex : np.ndarray
The excitation energy of all levels.
Ex_initial : np.ndarray
The excitation energy of the initial state of a transition.
spins : np.ndarray
The spins of all levels.
parities : np.ndarray
The parities of all levels.
Returns
-------
bins : np.ndarray
The bins corresponding to gSF_ExJpiavg (x values for plot).
gSF_ExJpiavg : np.ndarray
The gamma strength function.
"""
skip_counter = { # Debug.
"Transit: Energy range": 0,
"Transit: Number of levels": 0,
"Transit: Parity": 0,
"Level density: Energy range": 0,
"Level density: Number of levels": 0,
"Level density: Parity": 0
}
total_gsf_time = time.perf_counter()
allowed_filter_parities = ["+", "-", "both"]
if filter_parities not in allowed_filter_parities:
msg = f"filter_parities must be {allowed_filter_parities}"
raise TypeError(msg)
if filter_parities == "both":
filter_parities = [-1, +1]
elif filter_parities == "-":
filter_parities = [-1]
elif filter_parities == "+":
filter_parities = [+1]
if include_n_levels is None:
include_n_levels = np.inf # Include all states.
if (Ex_min < 0) or (Ex_max < 0):
msg = "Ex_min and Ex_max cannot be negative!"
raise ValueError(msg)
if Ex_max < Ex_min:
msg = "Ex_max cannot be smaller than Ex_min!"
raise ValueError(msg)
prefactors = { # Factor from the def. of the GSF.
"M1": 11.5473e-9, # [1/(mu_N**2*MeV**2)].
# "E1": 1.047e-6,
"E1": 3.4888977e-7
}
if prefactor_E1 is not None:
"""
Override the E1 prefactor.
"""
prefactors["E1"] = prefactor_E1
if prefactor_M1 is not None:
"""
Override the M1 prefactor.
"""
prefactors["M1"] = prefactor_M1
if prefactor_E2 is not None:
"""
Override the E2 prefactor.
"""
prefactors["E2"] = prefactor_E2
prefactor = prefactors[multipole_type]
# Extract data to a more readable form:
n_transitions = len(transitions[:, 0])
n_levels = len(levels[:, 0])
E_ground_state = levels[0, 0] # Read out the absolute ground state energy so we can get relative energies later.
try:
Ex, spins, parities, level_counter = np.copy(levels[:, 0]), levels[:, 1], levels[:, 2], levels[:, 3]
except IndexError as err:
msg = f"{err.__str__()}\n"
msg += "Error probably due to old tmp files. Use loadtxt parameter"
msg += " load_and_save_to_file = 'overwrite' to re-read data from the"
msg += " summary file and generate new tmp files."
raise Exception(msg) from err
if initial_or_final == "initial":
Ex_initial_or_final = np.copy(transitions[:, 3]) # To avoid altering the raw data.
spin_initial_or_final_idx = 0
parity_initial_or_final_idx = 1
elif initial_or_final == "final":
Ex_initial_or_final = np.copy(transitions[:, 7]) # To avoid altering the raw data.
spin_initial_or_final_idx = 4
parity_initial_or_final_idx = 5
msg = "Using final states for the energy limits is not correct"
msg += " and should only be used for comparison with the correct"
msg += " option which is using initial states for the energy limits."
warnings.warn(msg, RuntimeWarning)
else:
msg = "'initial_or_final' must be either 'initial' or 'final'."
msg += f" Got {initial_or_final}"
raise ValueError(msg)
if abs(Ex_initial_or_final[0]) > 10:
"""
Adjust energies relative to the ground state energy if they have
not been adjusted already. The ground state energy is usually
minus a few tens of MeV and above, so checking absolute value
above 10 MeV is probably safe. Cant check for equality to zero
since the initial state will never be zero.
NOTE: Just check if the value is negative instead?
"""
Ex_initial_or_final -= E_ground_state
if Ex[0] != 0:
"""
Adjust energies relative to the ground state energy if they have
not been adjusted already.
"""
Ex -= E_ground_state
if (Ex_actual_max := np.max(Ex)) < Ex_max:
msg = "Requested max excitation energy is greater than the largest"
msg += " excitation energy in the data file."
msg += f" Changing Ex_max from {Ex_max} to {Ex_actual_max}."
Ex_max = Ex_actual_max
print(msg)
"""
Find index of first and last bin (lower bin edge) where we put
counts. It's important to not include the other Ex bins in the
averaging later, because they contain zeros which will pull the
average down.
Bin alternatives:
bin_array = np.linspace(0, bin_width*n_bins, n_bins + 1) # Array of lower bin edge energy values
bin_array_middle = (bin_array[0: -1] + bin_array[1:])/2 # Array of middle bin values
"""
Ex_min_idx = int(Ex_min/bin_width)
Ex_max_idx = int(Ex_max/bin_width)
n_bins = int(np.ceil(Ex_max/bin_width)) # Make sure the number of bins cover the whole Ex region.
# Ex_max = bin_width*n_bins # Adjust Ex_max to match the round-off in the bin width. NOTE: Unsure if this is needed.
"""
B_pixel_sum[Ex_final_idx, E_gamma_idx, spin_parity_idx] contains the
summed reduced transition probabilities for all transitions
contained within the Ex_final_idx bin, E_gamma_idx bin, and
spin_parity_idx bin. B_pixel_counts counts the number of transitions
within the same bins.
"""
spin_parity_list = create_spin_parity_list(spins, parities) # To create a unique index for every [spin, parity] pair.
n_unique_spin_parity_pairs = len(spin_parity_list)
B_pixel_sum = np.zeros((n_bins, n_bins, n_unique_spin_parity_pairs)) # Summed B(..) values for each pixel.
B_pixel_count = np.zeros((n_bins, n_bins, n_unique_spin_parity_pairs)) # The number of transitions.
rho_ExJpi = np.zeros((n_bins, n_unique_spin_parity_pairs)) # (Ex, Jpi) matrix to store level density
gSF = np.zeros((n_bins, n_bins, n_unique_spin_parity_pairs))
n_transitions_array = np.zeros(n_bins, dtype=int) # Count the number of transitions per gamma energy bin.
transit_gsf_time = time.perf_counter()
for transition_idx in range(n_transitions):
"""
Iterate over all transitions in the transitions matrix and add
up all reduced transition probabilities and the number of
transitions in the correct bins.
"""
if (Ex_initial_or_final[transition_idx] < Ex_min) or (Ex_initial_or_final[transition_idx] >= Ex_max):
"""
Check if transition is within min max limits, skip if not.
"""
skip_counter["Transit: Energy range"] += 1 # Debug.
continue
idx_initial = transitions[transition_idx, 2]
idx_final = transitions[transition_idx, 6]
if (idx_initial > include_n_levels) or (idx_final > include_n_levels):
"""
Include only 'include_n_levels' number of levels. Defaults
to np.inf (include all).
"""
skip_counter["Transit: Number of levels"] += 1 # Debug.
continue
spin_initial = transitions[transition_idx, 0]/2
spin_final = transitions[transition_idx, 4]/2
if filter_spins is not None:
# if (spin_initial not in filter_spins) or (spin_final not in filter_spins):
if spin_initial not in filter_spins:
"""
Skip transitions to or from levels of total angular momentum
not in the filter list.
"""
try:
skip_counter[f"Transit: j init: {spin_initial}"] += 1
except KeyError:
skip_counter[f"Transit: j init: {spin_initial}"] = 1
continue
parity_initial = transitions[transition_idx, 1]
parity_final = transitions[transition_idx, 5]
if (parity_initial not in filter_parities) or (parity_final not in filter_parities):
"""
Skip initial or final parities which are not in the filter
list. NOTE: Might be wrong to filter on the final parity.
"""
skip_counter["Transit: Parity"] += 1
continue
# Get bin index for E_gamma and Ex. Indices are defined with respect to the lower bin edge.
E_gamma_idx = int(transitions[transition_idx, 8]/bin_width)
Ex_initial_or_final_idx = int(Ex_initial_or_final[transition_idx]/bin_width)
n_transitions_array[E_gamma_idx] += 1 # Count the number of transitions involved in this GSF (Porter-Thomas fluctuations).
"""
transitions : np.ndarray
OLD:
Mx8 array containing [2*spin_final, parity_initial, Ex_final,
2*spin_initial, parity_initial, Ex_initial, E_gamma, B(.., i->f)]
OLD NEW:
[2*spin_initial, parity_initial, Ex_initial, 2*spin_final,
parity_final, Ex_final, E_gamma, B(.., i->f), B(.., f<-i)]
NEW:
[2*spin_initial, parity_initial, idx_initial, Ex_initial,
2*spin_final, parity_final, idx_final, Ex_final, E_gamma,
B(.., i->f), B(.., f<-i)]
"""
spin_initial_or_final = int(transitions[transition_idx, spin_initial_or_final_idx]) # Superfluous int casts?
parity_initial_or_final = int(transitions[transition_idx, parity_initial_or_final_idx])
spin_parity_idx = spin_parity_list.index([spin_initial_or_final, parity_initial_or_final])
try:
"""
Add B(..) value and increment transition count,
respectively. NOTE: Hope to remove this try-except by
implementing suitable input checks to this function. Note to
the note: Will prob. not be removed to keep the ability to
compare initial and final.
"""
B_pixel_sum[Ex_initial_or_final_idx, E_gamma_idx, spin_parity_idx] += \
transitions[transition_idx, 9]
B_pixel_count[Ex_initial_or_final_idx, E_gamma_idx, spin_parity_idx] += 1
except IndexError as err:
"""
NOTE: This error usually occurs because Ex_max is set to
limit Ex_final instead of Ex_initial. If so, E_gamma might
be larger than Ex_max and thus be placed in a B_pixel
outside of the allocated scope. This error has a larger
probability of occuring if Ex_max is set to a low value
because then the probability of
E_gamma = Ex_initial - Ex_final
is larger.
"""
msg = f"{err.__str__()}\n"
msg += f"{Ex_initial_or_final_idx=}, {E_gamma_idx=}, {spin_parity_idx=}, {transition_idx=}\n"
msg += f"{B_pixel_sum.shape=}\n"
msg += f"{transitions.shape=}\n"
msg += f"{Ex_max=}\n"
msg += f"2*spin_final: {transitions[transition_idx, 4]}\n"
msg += f"parity_initial: {transitions[transition_idx, 1]}\n"
msg += f"Ex_final: {transitions[transition_idx, 7]}\n"
msg += f"2*spin_initial: {transitions[transition_idx, 0]}\n"
msg += f"parity_initial: {transitions[transition_idx, 1]}\n"
msg += f"Ex_initial: {transitions[transition_idx, 3]}\n"
msg += f"E_gamma: {transitions[transition_idx, 8]}\n"
msg += f"B(.., i->f): {transitions[transition_idx, 9]}\n"
msg += f"B(.., f<-i): {transitions[transition_idx, 10]}\n"
raise Exception(msg) from err
transit_gsf_time = time.perf_counter() - transit_gsf_time
level_density_gsf_time = time.perf_counter()
for levels_idx in range(n_levels):
"""
Calculate the level density for each (Ex, spin_parity) pixel.
"""
if Ex[levels_idx] >= Ex_max:
"""
Skip if level is outside range. Only upper limit since
decays to levels below the lower limit are allowed.
"""
skip_counter["Level density: Energy range"] += 1
continue
if level_counter[levels_idx] > include_n_levels:
"""
Include only 'include_n_levels' number of levels. Defaults
to np.inf (include all).
"""
skip_counter["Level density: Number of levels"] += 1
continue
if filter_spins is not None:
if (spin_tmp := levels[levels_idx, 1]/2) not in filter_spins:
"""
Skip levels of total angular momentum not in the filter
list.
"""
try:
skip_counter[f"Level density: j: {spin_tmp}"] += 1
except KeyError:
skip_counter[f"Level density: j: {spin_tmp}"] = 1
continue
Ex_idx = int(Ex[levels_idx]/bin_width)
spin_parity_idx = \
spin_parity_list.index([spins[levels_idx], parities[levels_idx]])
rho_ExJpi[Ex_idx, spin_parity_idx] += 1
level_density_gsf_time = time.perf_counter() - level_density_gsf_time
if partial_or_total == "total":
"""
Use the total level density, rho(E_i), instead of the partial
level density, rho(E_i, J_i, pi_i). Sum over all (J_i, pi_i)
pairs and then copy these summed values to all columns in
rho_ExJpi.
"""
tmp_sum = rho_ExJpi.sum(axis=1)
for i in range(rho_ExJpi.shape[1]):
"""
All columns in rho_ExJpi will be identical. This is for
compatibility with the following for loop.
"""
rho_ExJpi[:, i] = tmp_sum
msg = "Using the total level density is not correct and"
msg += " should only be used when comparing with the correct"
msg += " alternative which is using the partial level density."
warnings.warn(msg, RuntimeWarning)
rho_ExJpi /= bin_width # Normalize to bin width, to get density in MeV^-1.
gsf_time = time.perf_counter()
for spin_parity_idx in range(n_unique_spin_parity_pairs):
"""
Calculate gamma strength functions for each [Ex, E_gamma,
spin_parity] individually using the partial level density for
each [Ex, spin_parity].
"""
for Ex_idx in range(n_bins):
gSF[Ex_idx, :, spin_parity_idx] = \
prefactor*rho_ExJpi[Ex_idx, spin_parity_idx]*div0(
numerator = B_pixel_sum[Ex_idx, :, spin_parity_idx],
denominator = B_pixel_count[Ex_idx, :, spin_parity_idx]
)
gsf_time = time.perf_counter() - gsf_time
avg_gsf_time = time.perf_counter()
if include_only_nonzero_in_average:
"""
Update 20171009 (Midtbø): Took proper care to only average over
the non-zero f(Eg,Ex,J,parity_initial) pixels.
NOTE: Probably not necessary to set an upper limit on gSF
due to the input adjustment of Ex_max.
"""
gSF_currentExrange = gSF[Ex_min_idx:Ex_max_idx + 1, :, :]
gSF_ExJpiavg = div0(
numerator = gSF_currentExrange.sum(axis = (0, 2)),
denominator = (gSF_currentExrange != 0).sum(axis = (0, 2))
)
else:
"""
NOTE: Probably not necessary to set an upper limit on gSF
due to the input adjustment of Ex_max.
"""
gSF_ExJpiavg = gSF[Ex_min_idx:Ex_max_idx + 1, :, :].mean(axis=(0, 2))
msg = "Including non-zero values when averaging the gamma strength"
msg += " function is not correct and should be used with care!"
warnings.warn(msg, RuntimeWarning)
avg_gsf_time = time.perf_counter() - avg_gsf_time
bins = np.linspace(0, Ex_max, n_bins + 1)
bins = (bins[:-1] + bins[1:])/2 # Middle point of the bins.
bins = bins[:len(gSF_ExJpiavg)]
total_gsf_time = time.perf_counter() - total_gsf_time
if flags["debug"]:
transit_total_skips = \
sum([skip_counter[key] for key in skip_counter if key.startswith("Transit")])
level_density_total_skips = \
sum([skip_counter[key] for key in skip_counter if key.startswith("Level density")])
n_transitions_included = n_transitions - transit_total_skips
n_levels_included = n_levels - level_density_total_skips
print("--------------------------------")
print(f"{transit_gsf_time = } s")
print(f"{level_density_gsf_time = } s")
print(f"{gsf_time = } s")
print(f"{avg_gsf_time = } s")
print(f"{total_gsf_time = } s")
print(f"{multipole_type = }")
for elem in skip_counter:
print(f"Skips: {elem}: {skip_counter[elem]}")
# print(f"{skip_counter = }")
print(f"{transit_total_skips = }")
print(f"{n_transitions = }")
print(f"{n_transitions_included = }")
print(f"{level_density_total_skips = }")
print(f"{n_levels = }")
print(f"{n_levels_included = }")
print("--------------------------------")
if return_n_transitions:
return bins, gSF_ExJpiavg, n_transitions_array
else:
return bins, gSF_ExJpiavg
def level_plot(
levels: np.ndarray,
include_n_levels: int = 1_000,
filter_spins: Union[None, list] = None,
ax: Union[None, plt.Axes] = None
):
"""
Generate a level plot for a single isotope. Spin on the x axis,
energy on the y axis.
Parameters
----------
levels : np.ndarray
NxM array of [[energy, spin, parity], ...]. This is the instance
attribute 'levels' of ReadKshellOutput.
include_n_levels : int
The maximum amount of states to plot for each spin. Default set
to a large number to indicate ≈ no limit.
filter_spins : Union[None, list]
Which spins to include in the plot. If None, all spins are
plotted.
ax : Union[None, plt.Axes]
matplotlib Axes to plot on. If None, plt.Figure and plt.Axes is
generated in this function.
"""
ax_input = False if (ax is None) else True
if levels[0, 0] != 0:
"""
Adjust energies relative to the ground state energy.
"""
energies = levels[:, 0] - levels[0, 0]
else:
energies = levels[:, 0]
spins = levels[:, 1]/2 # levels[:, 1] is 2*spin.
parity_symbol = "+" if levels[0, 2] == 1 else "-"
if filter_spins is not None:
spin_scope = np.unique(filter_spins) # x values for the plot.
else:
spin_scope = np.unique(spins)
counts = {} # Dict to keep tabs on how many states of each spin have been plotted.
line_width = np.abs(spins[0] - spins[1])/4*0.9
if not ax_input:
fig, ax = plt.subplots()
for i in range(len(energies)):
if filter_spins is not None:
if spins[i] not in filter_spins:
"""
Skip spins which are not in the filter.
"""
continue
try:
counts[spins[i]] += 1
except KeyError:
counts[spins[i]] = 1
if counts[spins[i]] > include_n_levels:
"""
Include only the first 'include_n_levels' amount of states
for any of the spins.
"""
continue
ax.hlines(
y = energies[i],
xmin = spins[i] - line_width,
xmax = spins[i] + line_width,
color = "black"
)
ax.set_xticks(spin_scope)
ax.set_xticklabels([f"{Fraction(i)}" + f"$^{parity_symbol}$" for i in spin_scope])
ax.set_xlabel("Spin")
ax.set_ylabel("E [MeV]")
if not ax_input:
plt.show()
def level_density(
levels: np.ndarray,
bin_width: Union[int, float],
include_n_levels: Union[None, int] = None,
filter_spins: Union[None, int, list] = None,
filter_parity: Union[None, str, int] = None,
E_min: Union[None, float, int] = None,
E_max: Union[None, float, int] = None,
plot: bool = False,
save_plot: bool = False
) -> Tuple[np.ndarray, np.ndarray]:
"""
Calculate the level density for a given bin size.
Parameters
----------
levels : Union[np.ndarray, list]
Nx4 array of [[E, 2*spin, parity, idx], ...] or 1D array / list
of only energies.
bin_width : Union[int, float]
Energy interval of which to calculate the density.
include_n_levels : Union[None, int]
The number of states per spin to include. Example:
include_n_levels = 100 will include only the 100 lowest laying
states for each spin.
filter_spins : Union[None, int, list]
Keep only the levels which have angular momenta in the filter.
If None, all angular momenta are kept. Input must be the actual
angular momenta values and not 2*j.
filter_parity : Union[None, str, int]
Keep only levels of parity 'filter_parity'. +1, -1, '+', '-'
allowed inputs.
E_min : Union[None, float, int]
Minimum energy to include in the calculation. If None, the
minimum energy in the levels array is used.
E_max : Union[None, float, int]
Maximum energy to include in the calculation. If None, the
maximum energy in the levels array is used.
plot : bool
For toggling plotting on / off.
save_plot : bool
Toogle saving of plot (as .png with dpi=300) on / off.
Returns
-------
bins : np.ndarray
The corresponding bins (x value for plotting).
density : np.ndarray
The level density.
Raises
------
ValueError:
If any filter is given when energy_levels is a list of only
energy levels.
TypeError:
If input parameters are of the wrong type.
"""
if not isinstance(levels, np.ndarray):
levels = np.array(levels)
if not isinstance(filter_spins, (int, float, list, type(None), np.ndarray)):
msg = f"'filter_spins' must be of type: int, float, list, None. Got {type(filter_spins)}."
raise TypeError(msg)
if not isinstance(include_n_levels, (int, type(None))):
msg = f"'include_n_levels' must be of type: int, None. Got {type(include_n_levels)}."
raise TypeError(msg)
if not isinstance(filter_parity, (type(None), int, str)):
msg = f"'filter_parity' must be of type: None, int, str. Got {type(filter_parity)}."
raise TypeError(msg)
if not isinstance(E_min, (type(None), int, float)):
msg = f"'E_min' must be of type: None, int, float. Got {type(E_min)}."
raise TypeError(msg)
if not isinstance(E_max, (type(None), int, float)):
msg = f"'E_max' must be of type: None, int, float. Got {type(E_max)}."
raise TypeError(msg)
if isinstance(filter_parity, str):
valid_filter_parity = ["+", "-"]
if filter_parity not in valid_filter_parity:
msg = f"Valid parity filters are: {valid_filter_parity}."
raise ValueError(msg)
filter_parity = 1 if (filter_parity == "+") else -1
if isinstance(filter_spins, (int, float)):
filter_spins = [filter_spins]
if (levels.ndim == 1) and (filter_spins is not None):
msg = "Spin filter cannot be applied to a list of only energy levels!"
raise ValueError(msg)
if (levels.ndim == 1) and (include_n_levels is not None):
msg = "Cannot choose the number of levels per spin if 'levels' is only a list of energies!"
raise ValueError(msg)
if (levels.ndim == 1) and (filter_parity is not None):
msg = "Parity filter cannot be applied to a list of only energy levels!"
raise ValueError(msg)
if levels.ndim == 1:
"""
'levels' only contain energy values.
"""
energy_levels = levels
else:
"""
'levels' is a multidimensional array on the form
[[E, 2*spin, parity, idx], ...].
"""
energy_levels = np.copy(levels) # Copy just in case.
if include_n_levels is not None:
"""
Include ony 'include_n_levels' of levels per angular
momentum and parity pair.
"""
indices = energy_levels[:, 3] # Counter for the number of levels per spin.
energy_levels = energy_levels[indices <= include_n_levels]
if filter_spins is not None:
"""
filter_spins is a list of angular momenta. Inside this if
statement we know that 'levels' is a multidimensional array due
to the check inside the previous except.
levels:
"""
filter_spins = [2*j for j in filter_spins] # energy_levels has 2*j to avoid fractions.
mask_list = []
for j in filter_spins:
"""
Create a [bool1, bool2, ...] mask for each j.
"""
mask_list.append(energy_levels[:, 1] == j)
energy_levels = energy_levels[np.logical_or.reduce(mask_list)] # Contains only levels of j in the filter.
if filter_parity is not None:
energy_levels = energy_levels[energy_levels[:, 2] == filter_parity]
energy_levels = energy_levels[:, 0]
if levels.ndim == 1:
"""
Decide the max value of the energy bins.
"""
if levels[0] != 0:
"""
Calculate energies relative to the ground state if not already
done.
"""
energy_levels -= energy_levels[0]
bin_max = levels[-1] - levels[0] # The max energy of the un-filtered data set.
else:
bin_max = levels[-1]
else:
"""
Decide the max value of the energy bins.
"""
if levels[0, 0] != 0:
"""
Calculate energies relative to the ground state if not already
done.
"""
energy_levels -= energy_levels[0]
bin_max = levels[-1, 0] - levels[0, 0] # The max energy of the un-filtered data set.
else:
bin_max = levels[-1, 0]
if E_min is not None:
energy_levels = energy_levels[energy_levels >= E_min]
if E_max is not None:
energy_levels = energy_levels[energy_levels <= E_max]
bins = np.arange(0, bin_max + bin_width, bin_width)
n_bins = len(bins)
counts = np.zeros(n_bins)
for i in range(n_bins - 1):
counts[i] = np.sum(bins[i] <= energy_levels[energy_levels < bins[i + 1]])
density = (counts/bin_width)[:-1]
bins = bins[1:]
if plot:
fig, ax = plt.subplots()
ax.step(bins, density, color="black")
ax.set_ylabel(r"Density [MeV$^{-1}$]")
ax.set_xlabel("E [MeV]")
ax.legend([f"{bin_width=} MeV"])
ax.grid()
if save_plot:
fname = "nld.png"
print(f"NLD saved as '{fname}'")
fig.savefig(fname=fname, dpi=300)
plt.show()
return bins, density
def porter_thomas(
transitions: np.ndarray,
Ei: Union[int, float, list],
BXL_bin_width: Union[int, float],
j_list: Union[list, None] = None,
Ei_bin_width: Union[int, float] = 0.1,
return_chi2: bool = False,
) -> tuple[np.ndarray, np.ndarray, Optional[np.ndarray]]:
"""
Calculate the distribution of B(XL)/mean(B(XL)) values scaled to
a chi-squared distribution of 1 degree of freedom.
Parameters
----------
transitions : np.ndarray
Array containing transition data for the specified
multipolarity.
[2*spin_initial, parity_initial, idx_initial, Ex_initial,
2*spin_final, parity_final, idx_final, Ex_final, E_gamma,
B(XL, i->f), B(XL, f<-i)]
Ei : int, float, list
The initial excitation energy of the transitions where the
distribution will be calculated. If Ei is only a number, then a
bin of size 'Ei_bin_width' around Ei will be used. If Ei is a
list, tuple, or array with both a lower and an upper limit, then
all excitation energies in that interval will be used.
BXL_bin_width : int, float
The bin size of the BXL values for the distribution (not the Ei
bin size!).
Ei_bin_width : int, float
The size of the initial energy bin if 'Ei' is only one number.
Will not be used if 'Ei' is both a lower and an upper limit.
return_chi2 : bool
If True, the chi-squared distribution y values will be returned
as a third return value.
Returns
-------
BXL_bins : np.ndarray
The BXL bins (x values).
BXL_counts : np.ndarray
The number of counts in each BXL_bins bin (y values).
rv.pdf(BXL_bins) : np.ndarray
The chi-squared distribution y values.
"""
pt_prepare_data_time = time.perf_counter()
if isinstance(Ei, (list, tuple, np.ndarray)):
"""
If Ei defines a lower and an upper limit.
"""
Ei_mask = np.logical_and(
transitions[:, 3] >= Ei[0],
transitions[:, 3] < Ei[-1]
)
BXL = transitions[Ei_mask]
else:
BXL = transitions[np.abs(transitions[:, 3] - Ei) < Ei_bin_width] # Consider only levels around Ei.
if j_list is not None:
"""
Create a mask of j values for the transitions array. Allow only
entries with initial angular momenta in j_list.
"""
if not isinstance(j_list, list):
msg = f"j_list must be of type list! Got {type(j_list)}."
raise TypeError(msg)
j_list = [2*j for j in j_list] # Angular momenta are stored as 2*j to avoid fractions.
mask_list = []
for j in j_list:
"""
Create a [bool1, bool2, ...] mask for each j.
"""
mask_list.append(BXL[:, 0] == j)
BXL = BXL[np.logical_or.reduce(mask_list)] # Contains only transitions of j in the filter.
# BXL = np.copy(BXL[:, 9]) # The 9th col. is the reduced decay transition probabilities.
n_BXL_before = len(BXL)
idxi_masks = []
pii_masks = []
ji_masks = []
BXL_tmp = []
initial_indices = np.unique(BXL[:, 2]).astype(int)
initial_parities = np.unique(BXL[:, 1]).astype(int)
initial_j = np.unique(BXL[:, 0])
for idxi in initial_indices:
idxi_masks.append(BXL[:, 2] == idxi)
for pii in initial_parities:
pii_masks.append(BXL[:, 1] == pii)
for ji in initial_j:
ji_masks.append(BXL[:, 0] == ji)
for pii in pii_masks:
for idxi in idxi_masks:
for ji in ji_masks:
mask = np.logical_and(ji, np.logical_and(pii, idxi))
tmp = BXL[mask][:, 9] # 9 is B decay.
if not tmp.size:
"""
Some combinations of masks might not match any
levels.
"""
continue
BXL_tmp.extend(tmp/tmp.mean())
BXL = np.asarray(BXL_tmp)
BXL.sort()
# BXL = BXL/np.mean(BXL)
n_BXL_after = len(BXL)
if n_BXL_before != n_BXL_after:
msg = "The number of BXL values has changed during the Porter-Thomas analysis!"
msg += f" This should not happen! {n_BXL_before = }, {n_BXL_after = }."
raise RuntimeError(msg)
BXL_bins = np.arange(0, BXL[-1] + BXL_bin_width, BXL_bin_width)
n_BXL_bins = len(BXL_bins)
BXL_counts = np.zeros(n_BXL_bins)
pt_prepare_data_time = time.perf_counter() - pt_prepare_data_time
pt_count_time = time.perf_counter()
for i in range(n_BXL_bins - 1):
"""
Calculate the number of transitions with BXL values between
BXL_bins[i] and BXL_bins[i + 1].
"""
BXL_counts[i] = np.sum(BXL_bins[i] <= BXL[BXL < BXL_bins[i + 1]])
pt_count_time = time.perf_counter() - pt_count_time
pt_post_process_time = time.perf_counter()
rv = chi2(1)
BXL_counts = BXL_counts[1:]
BXL_bins = BXL_bins[1:]
BXL_counts /= np.trapz(BXL_counts) # Normalize counts.
# popt, _ = curve_fit(
# f = lambda x, scale: scale*rv.pdf(x),
# xdata = BXL_bins,
# ydata = BXL_counts,
# p0 = [rv.pdf(BXL_bins)[1]/BXL_counts[1]],
# method = "lm",
# )
# BXL_counts *= popt[0] # Scale counts to match chi2.
BXL_counts *= np.mean(rv.pdf(BXL_bins)[1:20]/BXL_counts[1:20])
pt_post_process_time = time.perf_counter() - pt_post_process_time
if flags["debug"]:
print("--------------------------------")
print(f"Porter-Thomas: Prepare data time: {pt_prepare_data_time:.3f} s")
print(f"Porter-Thomas: Count time: {pt_count_time:.3f} s")
print(f"Porter-Thomas: Post process time: {pt_post_process_time:.3f} s")
print("--------------------------------")
if return_chi2:
return BXL_bins, BXL_counts, rv.pdf(BXL_bins)
else:
return BXL_bins, BXL_counts
def nuclear_shell_model():
"""
Generate a diagram of the nuclear shell model shell structure.
"""
plt.rcParams.update({
"backend": "pgf",
"text.usetex": True,
"font.family": "serif",
"font.serif": ["roman"],
"legend.fontsize": 14,
"xtick.labelsize": 15,
"ytick.labelsize": 15,
"axes.labelsize": 14,
"axes.titlesize": 15,
})
fig, ax = plt.subplots(figsize=(6.4, 8))
ax.axis(False)
fontsize = 15
x_offset = 0.6
x_text_offset = x_offset - 0.5
first_layer_labels = [
r"$1s$", r"$1p$", r"$1d$", r"$2s$", r"$1f$", r"$2p$", r"$1g$",
r"$2d$", r"$3s$"
]
first_layer_y = [1, 2.4, 4.2, 4.45, 6.3, 6.8, 9, 10.0, 10.5]
second_layer_labels = [
r"$1s_{1/2}$", r"$1p_{3/2}$", r"$1p_{1/2}$", r"$1d_{5/2}$",
r"$2s_{1/2}$", r"$1d_{3/2}$", r"$1f_{7/2}$", r"$2p_{3/2}$",
r"$1f_{5/2}$", r"$2p_{1/2}$", r"$1g_{9/2}$", r"$2d_{5/2}$",
r"$1g_{7/2}$", r"$3s_{1/2}$", r"$2d_{3/2}$"
]
second_layer_y = [
first_layer_y[0], first_layer_y[1] - 0.15, first_layer_y[1] + 0.15,
first_layer_y[2] - 0.3, first_layer_y[3], first_layer_y[2] + 0.51,
first_layer_y[4] - 0.6, first_layer_y[5] - 0.10, first_layer_y[4] + 0.7,
first_layer_y[5] + 0.5, first_layer_y[6] - 1.0, first_layer_y[7] - 0.4,
first_layer_y[6] + 0.9, first_layer_y[7] + 0.8, first_layer_y[8]
]
dash_layer = [
[2 + x_offset, first_layer_y[0], 2.5 + x_offset, second_layer_y[0]],
[2 + x_offset, first_layer_y[1], 2.5 + x_offset, second_layer_y[1]],
[2 + x_offset, first_layer_y[1], 2.5 + x_offset, second_layer_y[2]],
[2 + x_offset, first_layer_y[2], 2.5 + x_offset, second_layer_y[3]],
[2 + x_offset, first_layer_y[2], 2.5 + x_offset, second_layer_y[5]],
[2 + x_offset, first_layer_y[3], 2.5 + x_offset, second_layer_y[4]],
[2 + x_offset, first_layer_y[4], 2.5 + x_offset, second_layer_y[6]],
[2 + x_offset, first_layer_y[4], 2.5 + x_offset, second_layer_y[8]],
[2 + x_offset, first_layer_y[5], 2.5 + x_offset, second_layer_y[7]],
[2 + x_offset, first_layer_y[5], 2.5 + x_offset, second_layer_y[9]],
[2 + x_offset, first_layer_y[6], 2.5 + x_offset, second_layer_y[10]],
[2 + x_offset, first_layer_y[7], 2.5 + x_offset, second_layer_y[11]],
[2 + x_offset, first_layer_y[6], 2.5 + x_offset, second_layer_y[12]],
[2 + x_offset, first_layer_y[7], 2.5 + x_offset, second_layer_y[13]],
[2 + x_offset, first_layer_y[8], 2.5 + x_offset, second_layer_y[14]],
]
core_layer_labels = [
r"$^{16}$O", r"$^{40}$Ca", r"$^{56}$Ni"
]
core_layer_y = [
second_layer_y[2] + 0.5, second_layer_y[5] + 0.5, second_layer_y[6] + 0.5
]
occupations = [
2, 4, 2, 6, 2, 4, 8, 4, 6, 2, 10, 6, 8, 2, 4
]
occupations_y = second_layer_y
cum_occupations = [
2, 8, 20, 28, 50
]
cum_occupations_y = [
second_layer_y[0], second_layer_y[2], second_layer_y[5],
second_layer_y[6], second_layer_y[10]
]
ax.hlines( # To force the width of the figure.
y = 1,
xmin = 3.5 + x_offset,
xmax = 4.5 + x_offset,
color = "white"
)
ax.hlines( # To force the width of the figure.
y = 1,
xmin = 1,
xmax = 2,
color = "white"
)
for y, label in zip(first_layer_y, first_layer_labels):
ax.hlines(
y = y,
xmin = 1 + x_offset,
xmax = 2 + x_offset,
color = "black",
)
fig.text(
x = 0.12 + x_text_offset,
y = y/13.95 + 0.067,
s = label,
fontsize = fontsize
)
for y, label in zip(second_layer_y, second_layer_labels):
ax.hlines(
y = y,
xmin = 2.5 + x_offset,
xmax = 3.5 + x_offset,
color = "black",
)
fig.text(
x = 0.6 + x_text_offset,
y = y/14.2 + 0.067,
s = label,
fontsize = fontsize
)
for x1, y1, x2, y2 in dash_layer:
ax.plot([x1, x2], [y1, y2], linestyle="dashed", color="black")
for occupation, y in zip(occupations, occupations_y):
fig.text(
x = 0.69 + x_text_offset,
y = y/14.2 + 0.067,
s = occupation,
fontsize = fontsize - 1
)
for occupation, y in zip(cum_occupations, cum_occupations_y):
fig.text(
x = 0.73 + x_text_offset,
y = y/14.2 + 0.067,
s = occupation,
fontsize = fontsize - 1
)
for y, label in zip(core_layer_y, core_layer_labels):
fig.text(
x = 0.77 + x_text_offset,
y = y/14 + 0.067,
s = label,
fontsize = fontsize - 1
)
fig.text(
x = 0.73 + x_text_offset,
y = y/14 + 0.064,
s = "---------",
fontsize = fontsize - 1
)
# USD
x1 = 1.35
x2 = 1.25
y1 = 4.9
y2 = 3.83
ax.vlines(
x = x2,
ymin = y2,
ymax = y1,
color = "darkorange",
)
fig.text(
x = 0.15,
y = 0.37,
s = "USD",
fontsize = 12,
rotation = "vertical",
color = "darkorange"
)
# GXPF
y3 = 7.5
y4 = 5.6
ax.vlines(
x = x2,
ymin = y4,
ymax = y3,
color = "firebrick",
)
fig.text(
x = 0.15,
y = 0.52,
s = "GXPF",
fontsize = 12,
rotation = "vertical",
color = "firebrick"
)
# SDPF-MU
x4 = x2 - 0.04
ax.vlines(
x = x4,
ymin = y2,
ymax = y3,
color = "royalblue",
)
fig.text(
x = 0.14,
y = 0.42,
s = "SDPF-MU",
fontsize = 12,
rotation = "vertical",
color = "royalblue"
)
#JUN45
y7 = 6.5
y8 = 8.2
x6 = x4 - 0.04
ax.vlines(
x = x6,
ymin = y8,
ymax = y7,
color = "green",
)
fig.text(
x = 0.14,
y = 0.59,
s = "JUN45",
fontsize = 12,
rotation = "vertical",
color = "green"
)
# SDPF-SDG
ax.vlines(
x = x6 - 0.04,
ymin = y2,
ymax = 11,
color = "mediumorchid",
)
fig.text(
x = 0.15,
y = 0.66,
s = "SDPF-SDG",
fontsize = 12,
rotation = "vertical",
color = "mediumorchid"
)
# Spectroscopic notation
fig.text(
x = 0.45,
y = 0.93,
s = r"$s \;\; p \;\; d \;\; f \;\; g \;\; h$",
fontsize = fontsize - 1,
color = "black",
)
fig.text(
x = 0.45,
y = 0.92,
s = "------------------",
fontsize = fontsize - 1,
color = "black",
)
fig.text(
x = 0.415,
y = 0.90,
s = r"$l: 0 \;\; 1 \;\; 2 \;\; 3 \;\; 4 \;\; 5$",
fontsize = fontsize - 1,
color = "black",
)
fig.text(
x = 0.405,
y = 0.88,
s = r"$\pi:+ - + - \, + \, -$",
fontsize = fontsize - 1,
color = "black",
)
fig.savefig(fname="nuclear_shell_model.png", dpi=500)#, format="eps")
plt.show() | 35.093979 | 133 | 0.584062 | 6,660 | 47,798 | 4.016366 | 0.107958 | 0.013458 | 0.013159 | 0.011365 | 0.416315 | 0.313171 | 0.254813 | 0.202363 | 0.191559 | 0.173464 | 0 | 0.025953 | 0.305933 | 47,798 | 1,362 | 134 | 35.093979 | 0.780317 | 0.231139 | 0 | 0.235537 | 0 | 0.001377 | 0.146309 | 0.02537 | 0 | 0 | 0 | 0.002937 | 0 | 1 | 0.009642 | false | 0 | 0.012397 | 0 | 0.03168 | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bf403f982d94a4a3a64570839b16b0a546a44c7 | 4,734 | py | Python | foreman/data_refinery_foreman/foreman/management/commands/remove_dead_computed_files.py | AlexsLemonade/refinebio | 52f44947f902adedaccf270d5f9dbd56ab47e40a | [
"BSD-3-Clause"
] | 106 | 2018-03-05T16:24:47.000Z | 2022-03-19T19:12:25.000Z | foreman/data_refinery_foreman/foreman/management/commands/remove_dead_computed_files.py | AlexsLemonade/refinebio | 52f44947f902adedaccf270d5f9dbd56ab47e40a | [
"BSD-3-Clause"
] | 1,494 | 2018-02-27T17:02:21.000Z | 2022-03-24T15:10:30.000Z | foreman/data_refinery_foreman/foreman/management/commands/remove_dead_computed_files.py | AlexsLemonade/refinebio | 52f44947f902adedaccf270d5f9dbd56ab47e40a | [
"BSD-3-Clause"
] | 15 | 2019-02-03T01:34:59.000Z | 2022-03-29T01:59:13.000Z | import time
from django.core.management.base import BaseCommand
from django.db.models import Q
from data_refinery_common.job_management import create_downloader_job
from data_refinery_common.models import (
ComputationalResult,
ComputedFile,
Sample,
SampleComputedFileAssociation,
SampleResultAssociation,
)
from data_refinery_common.performant_pagination.pagination import PAGE_SIZE, PerformantPaginator
def requeue_sample(sample, dry_run=False):
sample_was_requeued = False
if sample.is_processed:
has_live_computational_results = False
for result in sample.results.all():
live_files = result.computedfile_set.filter(
s3_bucket__isnull=False, s3_key__isnull=False
)
if live_files.count() >= 1:
has_live_computational_results = True
live_computed_files = sample.computed_files.filter(
s3_bucket__isnull=False, s3_key__isnull=False
)
if not (has_live_computational_results or live_computed_files.count() > 1):
sample_was_requeued = True
if not dry_run:
# There's no live computed files, the sample
# should not have been marked processed.
sample.is_processed = False
sample.save()
create_downloader_job(sample.original_files.all(), force=True)
return sample_was_requeued
def requeue_samples(sample_queryset, dry_run=False):
paginator = PerformantPaginator(sample_queryset, PAGE_SIZE)
page = paginator.page()
# Loop through the samples one by one to see if they've been
# erroneously marked as processed. If so, mark them as
# unprocessed, and kick off a new job so they can get
# processed correctly.
# Do this before deleting the computed files in case we get
# interrupted. It'll be harder to tell what samples were
# erroneously marked as processed.
while True:
counter = 0
for sample in page.object_list:
if requeue_sample(sample, dry_run):
counter += 1
# requeue_sample makes database calls, not a good idea to
# call in a loop without a sleep.
time.sleep(1)
print(f"Requeued {counter} samples in that page.")
if not page.has_next():
break
else:
page = paginator.page(page.next_page_number())
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--dry-run",
help="Prints what it would do, without doing any of it.",
action="store_true",
)
def handle(self, *args, **options):
"""Removes computed files that weren't uploaded to S3.
It also cleans up computational results that are relying on these computed files.
It also marks samples that are processed as unprocessed if
they don't have another live computed file or result, and then
requeues a downloader job for them to be processed and have
their files uploaded to s3.
"""
dead_computed_files = ComputedFile.objects.filter(
Q(s3_bucket__isnull=True) | Q(s3_key__isnull=True)
)
file_sample_assocs = SampleComputedFileAssociation.objects.filter(
computed_file_id__in=dead_computed_files.values("id")
)
file_associated_samples = Sample.objects.filter(
id__in=file_sample_assocs.values("sample_id")
)
requeue_samples(file_associated_samples, options["dry_run"])
# TODO: Clear out ComputationalResults and their associated compendium results.
# What about file->result->sample? Don't some of thems not get associated directly?
# Some ComputedFiles are only linked to samples indirectly
# through their ComputationalResults. We need also need to run
# requeue_sample on them.
dead_computational_results = ComputationalResult.objects.filter(
id__in=dead_computed_files.values("result_id").distinct()
)
result_sample_assocs = SampleResultAssociation.objects.filter(
result_id__in=dead_computational_results.values("id")
)
result_associated_samples = Sample.objects.filter(
id__in=result_sample_assocs.values("sample_id")
)
requeue_samples(result_associated_samples, options["dry_run"])
print(f"Deleting {dead_computational_results.count()} ComputationalResults")
print(f"Deleting {dead_computed_files.count()} ComputedFiles")
if not options["dry_run"]:
dead_computational_results.delete()
dead_computed_files.delete()
| 37.275591 | 96 | 0.672159 | 571 | 4,734 | 5.343257 | 0.327496 | 0.051131 | 0.02786 | 0.021632 | 0.134382 | 0.098328 | 0.080629 | 0.028187 | 0.028187 | 0.028187 | 0 | 0.003712 | 0.260245 | 4,734 | 126 | 97 | 37.571429 | 0.867504 | 0.242712 | 0 | 0.025316 | 0 | 0 | 0.07909 | 0.018492 | 0 | 0 | 0 | 0.007937 | 0 | 1 | 0.050633 | false | 0 | 0.075949 | 0 | 0.151899 | 0.037975 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bf434cc0ef7433c21a6631c3f7a5a7c4dc59f0c | 565 | py | Python | baidu.py | PatricAlhanmox/WebCrawler | e4dca496e538f0c5a36e9980dffe6fcf36b22ff8 | [
"MIT"
] | null | null | null | baidu.py | PatricAlhanmox/WebCrawler | e4dca496e538f0c5a36e9980dffe6fcf36b22ff8 | [
"MIT"
] | null | null | null | baidu.py | PatricAlhanmox/WebCrawler | e4dca496e538f0c5a36e9980dffe6fcf36b22ff8 | [
"MIT"
] | null | null | null | import requests
import json
# 没查找一个单词都是一个字符串发送了ajax请求(POST)/ 响应数据是一组josn数据
if __name__ == "__main__":
url = 'https://fanyi.baidu.com/sug'
ua = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'}
word = input("enter a word: ")
param = {
'kw' : word
}
respond = requests.post(url = url, data = param, headers = ua)
fileName = word + '.json'
dic = respond.json()
json.dump(dic, fp=open(fileName, 'w', encoding='UTF-8'), ensure_ascii=False) | 35.3125 | 143 | 0.638938 | 78 | 565 | 4.512821 | 0.74359 | 0.028409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066225 | 0.19823 | 565 | 16 | 144 | 35.3125 | 0.710817 | 0.077876 | 0 | 0 | 0 | 0.076923 | 0.359615 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bf497f6696caf6b66edc0f87bde10226bd66d34 | 5,145 | py | Python | logistic_regression.py | franpena-kth/learning-deep-learning | 9cd287b602dee1358672c4189445721a9c24f107 | [
"MIT"
] | null | null | null | logistic_regression.py | franpena-kth/learning-deep-learning | 9cd287b602dee1358672c4189445721a9c24f107 | [
"MIT"
] | null | null | null | logistic_regression.py | franpena-kth/learning-deep-learning | 9cd287b602dee1358672c4189445721a9c24f107 | [
"MIT"
] | null | null | null | import time
import numpy
from matplotlib import pyplot
import data_loader
import utils
def initialize_parameters(n_dims):
# The shape of W is (n_l, n_l-1). In the logistic regression case, the output layer size is n_1=1 and the input
# layer size is n_0 = X.shape[0]. There
w = numpy.zeros((n_dims, 1))
b = 0
assert (w.shape == (n_dims, 1))
assert (isinstance(b, float) or isinstance(b, int))
return w, b
def calculate_Z(X, w, b):
return numpy.dot(w.T, X) + b
def calculate_A(Z):
return utils.sigmoid(Z)
def forward_step(X, w, b):
A = calculate_A(calculate_Z(X, w, b))
return A
def calculate_cost(A, Y):
m = Y.shape[1]
cost = -(1.0/m) * numpy.sum(Y * numpy.log(A) + (1 - Y) * numpy.log(1 - A))
return cost
def backward_step(A, X, Y,):
m = X.shape[1]
dw = (1.0/m) * numpy.dot(X, (A-Y).T)
db = (1.0/m) * numpy.sum(A-Y)
grads = {"dw": dw, "db": db}
return grads
def propagate(w, b, X, Y):
z = calculate_Z(X, w, b)
A = calculate_A(z)
cost = calculate_cost(A, Y)
grads = backward_step(A, X, Y)
assert (grads["dw"].shape == w.shape)
assert (grads["db"].dtype == float)
cost = numpy.squeeze(cost)
assert (cost.shape == ())
return grads, cost
def update_parameters(W, b, dW, db, learning_rate):
W = W - learning_rate * dW
b = b - learning_rate * db
return W, b
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False):
costs = []
for i in range(num_iterations):
grads, cost = propagate(w, b, X, Y)
dw = grads["dw"]
db = grads["db"]
w = w - learning_rate * dw
b = b - learning_rate * db
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training iterations
if print_cost and i % 100 == 0:
print("Cost after iteration %i: %f" % (i, cost))
params = {"w": w, "b": b}
grads = {"dw": dw, "db": db}
print(costs)
print(len(costs))
return params, grads, costs
def predict(w, b, X):
m = X.shape[1]
Y_prediction = numpy.zeros((1, m))
A = calculate_A(calculate_Z(X, w, b))
for i in range(A.shape[1]):
if A[0, i] <= 0.5:
Y_prediction[0, i] = 0
else:
Y_prediction[0, i] = 1
assert (Y_prediction.shape == (1, m))
return Y_prediction
def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False):
w, b = initialize_parameters(X_train.shape[0])
params, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
w = params["w"]
b = params["b"]
Y_prediction_train = predict(w, b, X_train)
Y_prediction_test = predict(w, b, X_test)
# Print train/test Errors
print("train accuracy: {} %".format(100 - numpy.mean(numpy.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - numpy.mean(numpy.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train": Y_prediction_train,
"w": w,
"b": b,
"learning_rate": learning_rate,
"num_iterations": num_iterations}
# print(costs)
# print(len(costs))
return d
def plot_costs(d):
# Plot learning curve (with costs)
costs = numpy.squeeze(d['costs'])
pyplot.plot(costs)
pyplot.ylabel('cost')
pyplot.xlabel('iterations (per hundreds)')
pyplot.title("Learning rate =" + str(d["learning_rate"]))
pyplot.show()
def search_learning_rates():
learning_rates = [0.01, 0.001, 0.0001]
models = {}
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = data_loader.load_dataset()
train_set_x, test_set_x = data_loader.preprocess_dataset(train_set_x_orig, test_set_x_orig)
for i in learning_rates:
print("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations=1500, learning_rate=i,
print_cost=False)
print('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
pyplot.plot(numpy.squeeze(models[str(i)]["costs"]), label=str(models[str(i)]["learning_rate"]))
pyplot.ylabel('cost')
pyplot.xlabel('iterations (hundreds)')
legend = pyplot.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
pyplot.show()
def main():
# train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
# train_set_x, test_set_x = preprocess_dataset(train_set_x_orig, test_set_x_orig)
#
# d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations=2000, learning_rate=0.005,
# print_cost=False)
# plot_costs(d)
search_learning_rates()
start = time.time()
main()
end = time.time()
total_time = end - start
print("%s: Total time = %f seconds" % (time.strftime("%Y/%m/%d-%H:%M:%S"), total_time))
| 24.975728 | 118 | 0.606997 | 793 | 5,145 | 3.737705 | 0.177806 | 0.012821 | 0.024292 | 0.016194 | 0.352901 | 0.281714 | 0.183536 | 0.183536 | 0.148448 | 0.118084 | 0 | 0.021961 | 0.238873 | 5,145 | 205 | 119 | 25.097561 | 0.734934 | 0.117979 | 0 | 0.117647 | 0 | 0 | 0.087774 | 0.01216 | 0 | 0 | 0 | 0 | 0.05042 | 1 | 0.117647 | false | 0 | 0.042017 | 0.016807 | 0.252101 | 0.109244 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bf4ba34310e253a7a51922c07f12be5c9c48a82 | 2,002 | py | Python | tests/unit/test_proto_mapping.py | nitoqq/mercator | 179c3a66559ca91e360a5f435f1cfacfe7bf07d7 | [
"MIT"
] | 17 | 2019-03-07T18:49:46.000Z | 2022-02-06T16:26:09.000Z | tests/unit/test_proto_mapping.py | nitoqq/mercator | 179c3a66559ca91e360a5f435f1cfacfe7bf07d7 | [
"MIT"
] | 6 | 2019-07-26T10:28:54.000Z | 2021-02-02T06:50:59.000Z | tests/unit/test_proto_mapping.py | nitoqq/mercator | 179c3a66559ca91e360a5f435f1cfacfe7bf07d7 | [
"MIT"
] | 6 | 2019-07-26T07:17:08.000Z | 2021-10-02T05:32:52.000Z | # -*- coding: utf-8 -*-
from mock import patch
from google.protobuf.timestamp_pb2 import Timestamp
from mercator import ProtoKey, ProtoMapping
from mercator.errors import ProtobufCastError
class MyCustomObjectWithTimestampData:
"""this class is used as input for tests where proto mappings declare
__source_input_type__.
"""
def __init__(self, seconds):
self.seconds = seconds
class TimestampMapping(ProtoMapping):
__proto__ = Timestamp
__source_input_type__ = MyCustomObjectWithTimestampData
seconds = ProtoKey('seconds', int)
def test_proto_mapping_from_invalid():
"ProtoMapping.to_protobuf() should raise exception if input value is invalid"
mapping = TimestampMapping({'seconds': {'invalid': 'val'}})
when_called = mapping.to_protobuf.when.called
when_called.should.have.raised(
ProtobufCastError,
'int() argument must be a string, a bytes-like object or a number, not \'dict\' while casting "{\'invalid\': \'val\'}" (dict) to int'
)
def test_proto_mapping_to_dict_when_none():
"ProtoMapping.to_dict() should return empty dict if input value None"
mapping = TimestampMapping(None)
result = mapping.to_dict()
result.should.be.a(dict)
result.should.equal({})
def test_proto_mapping_object_incompatible_with_source_input_type():
"ProtoMapping.to_dict() should raise TypeError if the input object is not a subclass of __source_input_type__"
class DummyObject:
def __repr__(self):
return '<dummy_object>'
dummy_object = DummyObject()
mapping = TimestampMapping(dummy_object)
when_called = mapping.to_dict.when.called
when_called.should.have.raised(
TypeError,
"<dummy_object> must be a dict "
"or <class 'tests.unit.test_proto_mapping.MyCustomObjectWithTimestampData'> "
"but is <class 'tests.unit.test_proto_mapping.test_proto_mapping_object_incompatible_with_source_input_type.<locals>.DummyObject'> instead")
| 30.333333 | 148 | 0.731269 | 242 | 2,002 | 5.739669 | 0.342975 | 0.038877 | 0.069114 | 0.041037 | 0.203024 | 0.171346 | 0.12815 | 0.076314 | 0.076314 | 0 | 0 | 0.001216 | 0.178322 | 2,002 | 65 | 149 | 30.8 | 0.843161 | 0.182318 | 0 | 0.052632 | 0 | 0 | 0.340267 | 0.144 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0 | 0.105263 | 0.026316 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bf4e1a62b25cb47b09c6d6b1abf891455562d63 | 13,224 | py | Python | project/moco/plot_benchmark_KNN.py | das-projects/sufficient_dimension_reduction | 515346562995d8f875fc9c7d9824bd43aa75b0a8 | [
"Apache-2.0"
] | 2 | 2022-01-28T09:48:33.000Z | 2022-03-26T03:15:44.000Z | project/moco/plot_benchmark_KNN.py | das-projects/selfsupervised-learning | e023952fe5fd38c79324dcb80bb889362484a6bc | [
"Apache-2.0"
] | null | null | null | project/moco/plot_benchmark_KNN.py | das-projects/selfsupervised-learning | e023952fe5fd38c79324dcb80bb889362484a6bc | [
"Apache-2.0"
] | null | null | null | """
K-Nearest Neighbours search (WIP)
===========================================================
Let's compare the performances of PyTorch, JAX, FAISS and KeOps fpr
K-NN queries on random samples and standard datasets.
.. note::
In this demo, we use exact **bruteforce** computations
(tensorized for PyTorch and online for KeOps), without leveraging any multiscale
or low-rank (Nystroem/multipole) decomposition of the Kernel matrix.
First support for these approximation schemes is scheduled for
May-June 2021.
"""
##############################################
# Setup
# ---------------------
import numpy as np
import torch
from matplotlib import pyplot as plt
from functools import partial
from benchmark_utils import (
flatten,
random_normal,
full_benchmark,
timer,
tensor,
int_tensor,
jax_tensor,
)
from dataset_utils import generate_samples
use_cuda = torch.cuda.is_available()
##############################################
# Benchmark specifications:
#
# Values of K that we'll loop upon:
Ks = [1, 2, 5, 10, 20, 50, 100]
##############################################
# Simple bruteforce implementations
# ------------------------------------------
#
# Define a simple Gaussian RBF product, using a **tensorized** implementation.
# Note that expanding the squared norm :math:`\|x-y\|^2` as a sum
# :math:`\|x\|^2 - 2 \langle x, y \rangle + \|y\|^2` allows us
# to leverage the fast matrix-matrix product of the BLAS/cuBLAS
# libraries.
#
#
# PyTorch bruteforce:
#
"""
def KNN_KeOps(K, metric="euclidean", **kwargs):
def fit(x_train):
# Setup the K-NN estimator:
x_train = tensor(x_train)
start = timer()
# N.B.: The "training" time here should be negligible.
elapsed = timer() - start
def f(x_test):
x_test = tensor(x_test)
start = timer()
# Actual K-NN query:
elapsed = timer() - start
indices = indices.cpu().numpy()
return indices, elapsed
return f, elapsed
return fit
"""
def KNN_torch(K, metric="euclidean", **kwargs):
def fit(x_train):
# Setup the K-NN estimator:
x_train = tensor(x_train)
start = timer()
# The "training" time here should be negligible:
x_train_norm = (x_train ** 2).sum(-1)
elapsed = timer() - start
def f(x_test):
x_test = tensor(x_test)
start = timer()
# Actual K-NN query:
if metric == "euclidean":
x_test_norm = (x_test ** 2).sum(-1)
diss = (
x_test_norm.view(-1, 1)
+ x_train_norm.view(1, -1)
- 2 * x_test @ x_train.t()
)
elif metric == "manhattan":
diss = (x_test[:, None, :] - x_train[None, :, :]).abs().sum(dim=2)
elif metric == "angular":
diss = -x_test @ x_train.t()
elif metric == "hyperbolic":
x_test_norm = (x_test ** 2).sum(-1)
diss = (
x_test_norm.view(-1, 1)
+ x_train_norm.view(1, -1)
- 2 * x_test @ x_train.t()
)
diss /= x_test[:, 0].view(-1, 1) * x_train[:, 0].view(1, -1)
out = diss.topk(K, dim=1, largest=False)
elapsed = timer() - start
indices = out.indices.cpu().numpy()
return indices, elapsed
return f, elapsed
return fit
#############################################################################
# PyTorch bruteforce, with small batches to avoid memory overflows:
def KNN_torch_batch_loop(K, metric="euclidean", **kwargs):
def fit(x_train):
# Setup the K-NN estimator:
x_train = tensor(x_train)
Ntrain, D = x_train.shape
start = timer()
# The "training" time here should be negligible:
x_train_norm = (x_train ** 2).sum(-1)
elapsed = timer() - start
def f(x_test):
x_test = tensor(x_test)
# Estimate the largest reasonable batch size:
Ntest = x_test.shape[0]
# torch.cuda.get_device_properties(deviceId).total_memory
av_mem = int(5e8)
Ntest_loop = min(max(1, av_mem // (4 * D * Ntrain)), Ntest)
Nloop = (Ntest - 1) // Ntest_loop + 1
# print(f"{Ntest} queries, split in {Nloop} batches of {Ntest_loop} queries each.")
out = int_tensor(Ntest, K)
start = timer()
# Actual K-NN query:
for k in range(Nloop):
x_test_k = x_test[Ntest_loop * k : Ntest_loop * (k + 1), :]
if metric == "euclidean":
x_test_norm = (x_test_k ** 2).sum(-1)
diss = (
x_test_norm.view(-1, 1)
+ x_train_norm.view(1, -1)
- 2 * x_test_k @ x_train.t()
)
elif metric == "manhattan":
diss = (x_test_k[:, None, :] - x_train[None, :, :]).abs().sum(dim=2)
elif metric == "angular":
diss = -x_test_k @ x_train.t()
elif metric == "hyperbolic":
x_test_norm = (x_test_k ** 2).sum(-1)
diss = (
x_test_norm.view(-1, 1)
+ x_train_norm.view(1, -1)
- 2 * x_test_k @ x_train.t()
)
diss /= x_test_k[:, 0].view(-1, 1) * x_train[:, 0].view(1, -1)
out[Ntest_loop * k : Ntest_loop * (k + 1), :] = diss.topk(
K, dim=1, largest=False
).indices
del diss
# torch.cuda.empty_cache()
elapsed = timer() - start
indices = out.cpu().numpy()
return indices, elapsed
return f, elapsed
return fit
############################################################################
# Distance matrices with JAX:
from functools import partial
import jax
import jax.numpy as jnp
@partial(jax.jit, static_argnums=(2, 3))
def knn_jax_fun(x_train, x_test, K, metric):
if metric == "euclidean":
diss = (
(x_test ** 2).sum(-1)[:, None]
+ (x_train ** 2).sum(-1)[None, :]
- 2 * x_test @ x_train.T
)
elif metric == "manhattan":
diss = jax.lax.abs(x_test[:, None, :] - x_train[None, :, :]).sum(-1)
elif metric == "angular":
diss = -x_test @ x_train.T
elif metric == "hyperbolic":
diss = (
(x_test ** 2).sum(-1)[:, None]
+ (x_train ** 2).sum(-1)[None, :]
- 2 * x_test @ x_train.T
)
diss = diss / (x_test[:, 0][:, None] * x_train[:, 0][None, :])
indices = jax.lax.top_k(-diss, K)[1]
return indices
############################################################################
# JAX bruteforce:
def KNN_JAX(K, metric="euclidean", **kwargs):
def fit(x_train):
# Setup the K-NN estimator:
start = timer(use_torch=False)
x_train = jax_tensor(x_train)
elapsed = timer(use_torch=False) - start
def f(x_test):
x_test = jax_tensor(x_test)
# Actual K-NN query:
start = timer(use_torch=False)
indices = knn_jax_fun(x_train, x_test, K, metric)
indices = np.array(indices)
elapsed = timer(use_torch=False) - start
return indices, elapsed
return f, elapsed
return fit
#############################################################################
# JAX bruteforce, with small batches to avoid memory overflows:
def KNN_JAX_batch_loop(K, metric="euclidean", **kwargs):
def fit(x_train):
# Setup the K-NN estimator:
start = timer(use_torch=False)
x_train = jax_tensor(x_train)
elapsed = timer(use_torch=False) - start
def f(x_test):
x_test = jax_tensor(x_test)
# Estimate the largest reasonable batch size
# torch.cuda.get_device_properties(deviceId).total_memory
av_mem = int(5e8)
Ntrain, D = x_train.shape
Ntest = x_test.shape[0]
Ntest_loop = min(max(1, av_mem // (4 * D * Ntrain)), Ntest)
Nloop = (Ntest - 1) // Ntest_loop + 1
# print(f"{Ntest} queries, split in {Nloop} batches of {Ntest_loop} queries each.")
indices = np.zeros((Ntest, K), dtype=int)
start = timer(use_torch=False)
# Actual K-NN query:
for k in range(Nloop):
x_test_k = x_test[Ntest_loop * k : Ntest_loop * (k + 1), :]
indices[Ntest_loop * k : Ntest_loop * (k + 1), :] = knn_jax_fun(
x_train, x_test_k, K, metric
)
elapsed = timer(use_torch=False) - start
return indices, elapsed
return f, elapsed
return fit
############################################################################
# KeOps bruteforce implementation:
#
from pykeops.torch import LazyTensor, Vi, Vj
def KNN_KeOps(K, metric="euclidean", **kwargs):
def fit(x_train):
# Setup the K-NN estimator:
x_train = tensor(x_train)
start = timer()
# Encoding as KeOps LazyTensors:
D = x_train.shape[1]
X_i = Vi(0, D)
X_j = Vj(1, D)
# Symbolic distance matrix:
if metric == "euclidean":
D_ij = ((X_i - X_j) ** 2).sum(-1)
elif metric == "manhattan":
D_ij = (X_i - X_j).abs().sum(-1)
elif metric == "angular":
D_ij = -(X_i | X_j)
elif metric == "hyperbolic":
D_ij = ((X_i - X_j) ** 2).sum(-1) / (X_i[0] * X_j[0])
# K-NN query operator:
KNN_fun = D_ij.argKmin(K, dim=1)
# N.B.: The "training" time here should be negligible.
elapsed = timer() - start
def f(x_test):
x_test = tensor(x_test)
start = timer()
# Actual K-NN query:
indices = KNN_fun(x_test, x_train)
elapsed = timer() - start
indices = indices.cpu().numpy()
return indices, elapsed
return f, elapsed
return fit
################################################################################
# SciKit-Learn tree-based and bruteforce methods
# -----------------------------------------------------
#
from sklearn.neighbors import NearestNeighbors
def KNN_sklearn(K, metric="euclidean", algorithm=None, **kwargs):
if metric in ["euclidean", "angular"]:
p = 2
elif metric == "manhattan":
p = 1
else:
raise NotImplementedError("This distance is not supported.")
KNN_meth = NearestNeighbors(n_neighbors=K, algorithm=algorithm, p=p, n_jobs=-1)
def fit(x_train):
# Setup the K-NN estimator:
start = timer()
KNN_fun = KNN_meth.fit(x_train).kneighbors
elapsed = timer() - start
def f(x_test):
start = timer()
distances, indices = KNN_fun(x_test)
elapsed = timer() - start
return indices, elapsed
return f, elapsed
return fit
KNN_sklearn_auto = partial(KNN_sklearn, algorithm="auto")
KNN_sklearn_ball_tree = partial(KNN_sklearn, algorithm="ball_tree")
KNN_sklearn_kd_tree = partial(KNN_sklearn, algorithm="kd_tree")
KNN_sklearn_brute = partial(KNN_sklearn, algorithm="brute")
##############################################
# NumPy vs. PyTorch vs. KeOps (Gpu)
# --------------------------------------------------------
def run_KNN_benchmark(name):
# Load the dataset and some info:
dataset = generate_samples(name)(1)
N_train, dimension = dataset["train"].shape
N_test, _ = dataset["test"].shape
metric = dataset["metric"]
# Routines to benchmark:
routines = [
(KNN_sklearn_auto, "sklearn, auto (CPU)", {}),
(KNN_sklearn_ball_tree, "sklearn, Ball-tree (CPU)", {}),
(KNN_sklearn_kd_tree, "sklearn, KD-tree (CPU)", {}),
(KNN_sklearn_brute, "sklearn, bruteforce (CPU)", {}),
(KNN_torch, "PyTorch (GPU)", {}),
(KNN_torch_batch_loop, "PyTorch (small batches, GPU)", {}),
(KNN_KeOps, "KeOps (GPU)", {}),
(KNN_JAX, "JAX (GPU)", {}),
(KNN_JAX_batch_loop, "JAX (small batches, GPU)", {}),
]
# Actual run:
full_benchmark(
f"K-NN search on {name}: {N_test:,} queries on a dataset of {N_train:,} points\nin dimension {dimension:,} with a {metric} metric.",
routines,
generate_samples(name),
min_time=1e-4,
max_time=10,
problem_sizes=Ks,
xlabel="Number of neighbours K",
)
##############################################
# On random samples:
# --------------------------------------------------------
#
# Small dataset in :math:`\mathbb{R}^3`:
run_KNN_benchmark("R^D a")
########################################
# Large dataset in :math:`\mathbb{R}^3`:
run_KNN_benchmark("R^D b")
plt.show()
| 29.192053 | 140 | 0.500832 | 1,564 | 13,224 | 4.056905 | 0.168798 | 0.046493 | 0.019858 | 0.012608 | 0.575414 | 0.529866 | 0.520883 | 0.501024 | 0.492199 | 0.455477 | 0 | 0.013418 | 0.306791 | 13,224 | 452 | 141 | 29.256637 | 0.678739 | 0.187538 | 0 | 0.540426 | 0 | 0.004255 | 0.066553 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0.046809 | 0 | 0.212766 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bf50df97fe273af7db7554074587b4833fc3f7a | 17,601 | py | Python | flaml/onlineml/trial.py | goncaloperes/FLAML | 0ba58e0acecc788670a1b28f7ceb5908746ec6fc | [
"MIT"
] | 1,747 | 2020-12-05T00:14:58.000Z | 2022-03-31T20:54:09.000Z | flaml/onlineml/trial.py | goncaloperes/FLAML | 0ba58e0acecc788670a1b28f7ceb5908746ec6fc | [
"MIT"
] | 229 | 2020-12-14T06:19:16.000Z | 2022-03-31T05:20:27.000Z | flaml/onlineml/trial.py | goncaloperes/FLAML | 0ba58e0acecc788670a1b28f7ceb5908746ec6fc | [
"MIT"
] | 260 | 2020-12-13T09:24:41.000Z | 2022-03-27T04:09:51.000Z | import numpy as np
import logging
import time
import math
import copy
import collections
from typing import Optional
from sklearn.metrics import mean_squared_error, mean_absolute_error
from flaml.tune import Trial
logger = logging.getLogger(__name__)
def get_ns_feature_dim_from_vw_example(vw_example) -> dict:
"""Get a dictionary of feature dimensionality for each namespace singleton
NOTE:
Assumption: assume the vw_example takes one of the following format
depending on whether the example includes the feature names
format 1: 'y |ns1 feature1:feature_value1 feature2:feature_value2 |ns2
ns2 feature3:feature_value3 feature4:feature_value4'
format 2: 'y | ns1 feature_value1 feature_value2 |
ns2 feature_value3 feature_value4'
The output of both cases are {'ns1': 2, 'ns2': 2}
For more information about the input formate of vw example, please refer to
https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Input-format
"""
ns_feature_dim = {}
data = vw_example.split('|')
for i in range(1, len(data)):
if ':' in data[i]:
ns_w_feature = data[i].split(' ')
ns = ns_w_feature[0]
feature = ns_w_feature[1:]
feature_dim = len(feature)
else:
data_split = data[i].split(' ')
ns = data_split[0]
feature_dim = len(data_split) - 1
if len(data_split[-1]) == 0:
feature_dim -= 1
ns_feature_dim[ns] = feature_dim
logger.debug('name space feature dimension %s', ns_feature_dim)
return ns_feature_dim
class OnlineResult:
"""Class for managing the result statistics of a trial
Attributes:
observation_count: the total number of observations
resource_used: the sum of loss
Methods:
update_result(new_loss, new_resource_used, data_dimension)
Update result
get_score(score_name)
Get the score according to the input score_name
"""
prob_delta = 0.1
LOSS_MIN = 0.0
LOSS_MAX = np.inf
CB_COEF = 0.05 # 0.001 for mse
def __init__(self, result_type_name: str, cb_coef: Optional[float] = None,
init_loss: Optional[float] = 0.0, init_cb: Optional[float] = 100.0,
mode: Optional[str] = 'min', sliding_window_size: Optional[int] = 100):
"""
Args:
result_type_name (str): The name of the result type
"""
self._result_type_name = result_type_name # for example 'mse' or 'mae'
self._mode = mode
self._init_loss = init_loss
# statistics needed for alg
self.observation_count = 0
self.resource_used = 0.0
self._loss_avg = 0.0
self._loss_cb = init_cb # a large number (TODO: this can be changed)
self._cb_coef = cb_coef if cb_coef is not None else self.CB_COEF
# optional statistics
self._sliding_window_size = sliding_window_size
self._loss_queue = collections.deque(maxlen=self._sliding_window_size)
def update_result(self, new_loss, new_resource_used, data_dimension,
bound_of_range=1.0, new_observation_count=1.0):
"""Update result statistics
"""
self.resource_used += new_resource_used
# keep the running average instead of sum of loss to avoid over overflow
self._loss_avg = self._loss_avg * (self.observation_count / (self.observation_count + new_observation_count)
) + new_loss / (self.observation_count + new_observation_count)
self.observation_count += new_observation_count
self._loss_cb = self._update_loss_cb(bound_of_range, data_dimension)
self._loss_queue.append(new_loss)
def _update_loss_cb(self, bound_of_range, data_dim,
bound_name='sample_complexity_bound'):
"""Calculate bound coef
"""
if bound_name == 'sample_complexity_bound':
# set the coefficient in the loss bound
if 'mae' in self.result_type_name:
coef = self._cb_coef * bound_of_range
else:
coef = 0.001 * bound_of_range
comp_F = math.sqrt(data_dim)
n = self.observation_count
return coef * comp_F * math.sqrt((np.log10(n / OnlineResult.prob_delta)) / n)
else:
raise NotImplementedError
@property
def result_type_name(self):
return self._result_type_name
@property
def loss_avg(self):
return self._loss_avg if \
self.observation_count != 0 else self._init_loss
@property
def loss_cb(self):
return self._loss_cb
@property
def loss_lcb(self):
return max(self._loss_avg - self._loss_cb, OnlineResult.LOSS_MIN)
@property
def loss_ucb(self):
return min(self._loss_avg + self._loss_cb, OnlineResult.LOSS_MAX)
@property
def loss_avg_recent(self):
return sum(self._loss_queue) / len(self._loss_queue) \
if len(self._loss_queue) != 0 else self._init_loss
def get_score(self, score_name, cb_ratio=1):
if 'lcb' in score_name:
return max(self._loss_avg - cb_ratio * self._loss_cb, OnlineResult.LOSS_MIN)
elif 'ucb' in score_name:
return min(self._loss_avg + cb_ratio * self._loss_cb, OnlineResult.LOSS_MAX)
elif 'avg' in score_name:
return self._loss_avg
else:
raise NotImplementedError
class BaseOnlineTrial(Trial):
"""Class for online trial.
Attributes:
config: the config for this trial
trial_id: the trial_id of this trial
min_resource_lease (float): the minimum resource realse
status: the status of this trial
start_time: the start time of this trial
custom_trial_name: a custom name for this trial
Methods:
set_resource_lease(resource)
set_status(status)
set_checked_under_current_champion(checked_under_current_champion)
"""
def __init__(self,
config: dict,
min_resource_lease: float,
is_champion: Optional[bool] = False,
is_checked_under_current_champion: Optional[bool] = True,
custom_trial_name: Optional[str] = 'mae',
trial_id: Optional[str] = None,
):
"""
Args:
config: the config dict
min_resource_lease: the minimum resource realse
is_champion: a bool variable
is_checked_under_current_champion: a bool variable
custom_trial_name: custom trial name
trial_id: the trial id
"""
# ****basic variables
self.config = config
self.trial_id = trial_id
self.status = Trial.PENDING
self.start_time = time.time()
self.custom_trial_name = custom_trial_name
# ***resource budget related variable
self._min_resource_lease = min_resource_lease
self._resource_lease = copy.copy(self._min_resource_lease)
# ***champion related variables
self._is_champion = is_champion
# self._is_checked_under_current_champion_ is supposed to be always 1 when the trial is first created
self._is_checked_under_current_champion = is_checked_under_current_champion
@property
def is_champion(self):
return self._is_champion
@property
def is_checked_under_current_champion(self):
return self._is_checked_under_current_champion
@property
def resource_lease(self):
return self._resource_lease
def set_checked_under_current_champion(self, checked_under_current_champion: bool):
"""TODO: add documentation why this is needed. This is needed because sometimes
we want to know whether a trial has been paused since a new champion is promoted.
We want to try to pause those running trials (even though they are not yet achieve
the next scheduling check point according to resource used and resource lease),
because a better trial is likely to be in the new challengers generated by the new
champion, so we want to try them as soon as possible.
If we wait until we reach the next scheduling point, we may waste a lot of resource
(depending on what is the current resource lease) on the old trials (note that new
trials is not possible to be scheduled to run until there is a slot openning).
Intuitively speaking, we want to squize an opening slot as soon as possible once
a new champion is promoted, such that we are able to try newly generated challengers.
"""
self._is_checked_under_current_champion = checked_under_current_champion
def set_resource_lease(self, resource: float):
self._resource_lease = resource
def set_status(self, status):
"""Sets the status of the trial and record the start time
"""
self.status = status
if status == Trial.RUNNING:
if self.start_time is None:
self.start_time = time.time()
class VowpalWabbitTrial(BaseOnlineTrial):
"""Implement BaseOnlineTrial for Vowpal Wabbit
Attributes:
model: the online model
result: the anytime result for the online model
trainable_class: the model class (set as pyvw.vw for VowpalWabbitTrial)
config: the config for this trial
trial_id: the trial_id of this trial
min_resource_lease (float): the minimum resource realse
status: the status of this trial
start_time: the start time of this trial
custom_trial_name: a custom name for this trial
Methods:
set_resource_lease(resource)
set_status(status)
set_checked_under_current_champion(checked_under_current_champion)
NOTE:
About result:
1. training related results (need to be updated in the trainable class)
2. result about resources lease (need to be updated externally)
About namespaces in vw:
- Wiki in vw:
https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Namespaces
- Namespace vs features:
https://stackoverflow.com/questions/28586225/in-vowpal-wabbit-what-is-the-difference-between-a-namespace-and-feature
"""
cost_unit = 1.0
interactions_config_key = 'interactions'
MIN_RES_CONST = 5
def __init__(self,
config: dict,
min_resource_lease: float,
metric: str = 'mae',
is_champion: Optional[bool] = False,
is_checked_under_current_champion: Optional[bool] = True,
custom_trial_name: Optional[str] = 'vw_mae_clipped',
trial_id: Optional[str] = None,
cb_coef: Optional[float] = None,
):
"""Constructor
Args:
config (dict): the config of the trial (note that the config is a set
because the hyperparameters are )
min_resource_lease (float): the minimum resource lease
metric (str): the loss metric
is_champion (bool): indicates whether the trial is the current champion or not
is_checked_under_current_champion (bool): indicates whether this trials has
been paused under the current champion
trial_id (str): id of the trial (if None, it will be generated in the constructor)
"""
try:
from vowpalwabbit import pyvw
except ImportError:
raise ImportError(
'To use AutoVW, please run pip install flaml[vw] to install vowpalwabbit')
# attributes
self.trial_id = self._config_to_id(config) if trial_id is None else trial_id
logger.info('Create trial with trial_id: %s', self.trial_id)
super().__init__(config, min_resource_lease, is_champion, is_checked_under_current_champion,
custom_trial_name, self.trial_id)
self.model = None # model is None until the config is scheduled to run
self.result = None
self.trainable_class = pyvw.vw
# variables that are needed during online training
self._metric = metric
self._y_min_observed = None
self._y_max_observed = None
# application dependent variables
self._dim = None
self._cb_coef = cb_coef
@staticmethod
def _config_to_id(config):
"""Generate an id for the provided config
"""
# sort config keys
sorted_k_list = sorted(list(config.keys()))
config_id_full = ''
for key in sorted_k_list:
v = config[key]
config_id = '|'
if isinstance(v, set):
value_list = sorted(v)
config_id += '_'.join([str(k) for k in value_list])
else:
config_id += str(v)
config_id_full = config_id_full + config_id
return config_id_full
def _initialize_vw_model(self, vw_example):
"""Initialize a vw model using the trainable_class
"""
self._vw_config = self.config.copy()
ns_interactions = self.config.get(VowpalWabbitTrial.interactions_config_key, None)
# ensure the feature interaction config is a list (required by VW)
if ns_interactions is not None:
self._vw_config[VowpalWabbitTrial.interactions_config_key] \
= list(ns_interactions)
# get the dimensionality of the feature according to the namespace configuration
namespace_feature_dim = get_ns_feature_dim_from_vw_example(vw_example)
self._dim = self._get_dim_from_ns(namespace_feature_dim, ns_interactions)
# construct an instance of vw model using the input config and fixed config
self.model = self.trainable_class(**self._vw_config)
self.result = OnlineResult(self._metric,
cb_coef=self._cb_coef,
init_loss=0.0, init_cb=100.0,)
def train_eval_model_online(self, data_sample, y_pred):
"""Train and eval model online
"""
# extract info needed the first time we see the data
if self._resource_lease == 'auto' or self._resource_lease is None:
assert self._dim is not None
self._resource_lease = self._dim * self.MIN_RES_CONST
y = self._get_y_from_vw_example(data_sample)
self._update_y_range(y)
if self.model is None:
# initialize self.model and self.result
self._initialize_vw_model(data_sample)
# do one step of learning
self.model.learn(data_sample)
# update training related results accordingly
new_loss = self._get_loss(y, y_pred, self._metric,
self._y_min_observed, self._y_max_observed)
# udpate sample size, sum of loss, and cost
data_sample_size = 1
bound_of_range = self._y_max_observed - self._y_min_observed
if bound_of_range == 0:
bound_of_range = 1.0
self.result.update_result(new_loss,
VowpalWabbitTrial.cost_unit * data_sample_size,
self._dim, bound_of_range)
def predict(self, x):
"""Predict using the model
"""
if self.model is None:
# initialize self.model and self.result
self._initialize_vw_model(x)
return self.model.predict(x)
def _get_loss(self, y_true, y_pred, loss_func_name, y_min_observed, y_max_observed):
"""Get instantaneous loss from y_true and y_pred, and loss_func_name
For mae_clip, we clip y_pred in the observed range of y
"""
if 'mse' in loss_func_name or 'squared' in loss_func_name:
loss_func = mean_squared_error
elif 'mae' in loss_func_name or 'absolute' in loss_func_name:
loss_func = mean_absolute_error
if y_min_observed is not None and y_max_observed is not None and \
'clip' in loss_func_name:
# clip y_pred in the observed range of y
y_pred = min(y_max_observed, max(y_pred, y_min_observed))
else:
raise NotImplementedError
return loss_func([y_true], [y_pred])
def _update_y_range(self, y):
"""Maintain running observed minimum and maximum target value
"""
if self._y_min_observed is None or y < self._y_min_observed:
self._y_min_observed = y
if self._y_max_observed is None or y > self._y_max_observed:
self._y_max_observed = y
@staticmethod
def _get_dim_from_ns(namespace_feature_dim: dict, namespace_interactions: [set, list]):
"""Get the dimensionality of the corresponding feature of input namespace set
"""
total_dim = sum(namespace_feature_dim.values())
if namespace_interactions:
for f in namespace_interactions:
ns_dim = 1.0
for c in f:
ns_dim *= namespace_feature_dim[c]
total_dim += ns_dim
return total_dim
def clean_up_model(self):
self.model = None
self.result = None
@staticmethod
def _get_y_from_vw_example(vw_example):
"""Get y from a vw_example. this works for regression datasets.
"""
return float(vw_example.split('|')[0])
| 40.276888 | 124 | 0.639452 | 2,308 | 17,601 | 4.59662 | 0.162045 | 0.028184 | 0.032237 | 0.04581 | 0.296635 | 0.213027 | 0.189179 | 0.139598 | 0.122255 | 0.096333 | 0 | 0.00772 | 0.293449 | 17,601 | 436 | 125 | 40.369266 | 0.845368 | 0.336629 | 0 | 0.177686 | 0 | 0 | 0.023635 | 0.004166 | 0 | 0 | 0 | 0.004587 | 0.004132 | 1 | 0.115702 | false | 0 | 0.049587 | 0.03719 | 0.285124 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bf5d22c6be087c1eebcaa7ec45e76813b16c9c3 | 2,985 | py | Python | db/base.py | wisedier/flask-sqlalchemy-api | 07619ecac75ee5215e7d7f56c95fec9f371c8ee0 | [
"MIT"
] | null | null | null | db/base.py | wisedier/flask-sqlalchemy-api | 07619ecac75ee5215e7d7f56c95fec9f371c8ee0 | [
"MIT"
] | null | null | null | db/base.py | wisedier/flask-sqlalchemy-api | 07619ecac75ee5215e7d7f56c95fec9f371c8ee0 | [
"MIT"
] | null | null | null | import logging
import sys
import colorama
import inflection
from sqlalchemy import MetaData, create_engine, exc
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.util import OrderedSet
from config import config
from db import query
db_engine = create_engine(
config.SQLALCHEMY_DATABASE_URI,
echo=config.SQLALCHEMY_ECHO,
convert_unicode=True,
)
Session = scoped_session(
sessionmaker(
autocommit=False,
autoflush=False,
expire_on_commit=False,
bind=db_engine,
query_cls=query.Query,
)
)
session = Session()
class DeclarativeBase(object):
exclude_modules = OrderedSet(['db', 'models'])
@declared_attr
def __tablename__(self):
names = self.__module__.split('.') + inflection.underscore(self.__name__).split('_')
names = list(OrderedSet(names) - self.exclude_modules)
names[-1] = inflection.pluralize(names[-1])
return '_'.join(names)
class Base(declarative_base(cls=DeclarativeBase, metadata=MetaData()), object):
__abstract__ = True
query = Session.query_property()
session = session
@classmethod
def commit(cls):
try:
cls.session.commit()
except Exception:
cls.session.rollback()
raise
@classmethod
def create(cls, **kwargs):
# noinspection PyArgumentList
instance = cls(**kwargs)
return instance.save(commit=False)
@classmethod
def get_by(cls, **kwargs):
return cls.query.filter_by(**kwargs).first()
def update(self, **kwargs):
commit = kwargs.get('commit', False)
for field in kwargs:
setattr(self, field, kwargs[field])
self.save(commit=commit)
def save(self, *, commit=False):
self.session.add(self)
if commit:
try:
self.session.commit()
except exc.SQLAlchemyError:
pass
except Exception:
self.session.rollback()
raise
return self
def delete(self, *, commit=True):
self.session.delete(self)
if commit:
try:
self.session.commit()
except Exception:
self.session.rollback()
raise
if config.SQLALCHEMY_ECHO:
logger = logging.getLogger('sqlalchemy.engine.base.Engine')
logger.setLevel(logging.INFO)
for handler in logger.handlers:
logger.removeHandler(handler)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
''.join([
colorama.Style.BRIGHT,
colorama.Fore.MAGENTA,
'%(asctime)s: ',
colorama.Fore.CYAN,
'%(message)s',
colorama.Fore.RESET,
colorama.Style.RESET_ALL
])
)
handler.setFormatter(formatter)
logger.addHandler(handler)
| 26.651786 | 92 | 0.620101 | 305 | 2,985 | 5.931148 | 0.35082 | 0.036484 | 0.031509 | 0.030956 | 0.081813 | 0.081813 | 0.042012 | 0.042012 | 0 | 0 | 0 | 0.000931 | 0.280402 | 2,985 | 111 | 93 | 26.891892 | 0.841248 | 0.009045 | 0 | 0.191489 | 0 | 0 | 0.023681 | 0.009811 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074468 | false | 0.010638 | 0.106383 | 0.010638 | 0.287234 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bf6e399a3305b2859b26c928e51625a914a2f88 | 775 | py | Python | points_container.py | jagatiyakeval/rrt_simulator | e09f6ae57765c05d6af571bcdaf8eff694021e97 | [
"MIT"
] | null | null | null | points_container.py | jagatiyakeval/rrt_simulator | e09f6ae57765c05d6af571bcdaf8eff694021e97 | [
"MIT"
] | null | null | null | points_container.py | jagatiyakeval/rrt_simulator | e09f6ae57765c05d6af571bcdaf8eff694021e97 | [
"MIT"
] | null | null | null | from utils import dist
class pointsContainer:
"""
An ADT to store 2D points and find a point's nearest neighbor.
To solve the Nearest Neighbor Search problem (NNS), I chose to
do a linear search, since it is much simpler than other
approaches and produces a reasonably good result here.
Time complexities:
- insert: O(1)
- NNS: O(N), where N is the number of points currently inside the container.
"""
def __init__(self):
self._points = []
def insert(self, point):
self._points.append(point)
def NNS(self, point):
best = self._points[0] # will throw IndexError if self._points is empty.
bestDist = dist(best, point)
for p in self._points:
pDist = dist(p, point)
if pDist < bestDist:
best = p
bestDist = pDist
return best
| 25.833333 | 79 | 0.696774 | 119 | 775 | 4.462185 | 0.588235 | 0.094162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004959 | 0.219355 | 775 | 29 | 80 | 26.724138 | 0.872727 | 0.523871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.066667 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bf6fb5705520f37e9cf36dbf13e8e9f37ba74a7 | 4,331 | py | Python | accessories/views.py | brekib18/ShipOC | c83b8f9f18587bf9adf35c685d6589acc5d3b012 | [
"MIT"
] | null | null | null | accessories/views.py | brekib18/ShipOC | c83b8f9f18587bf9adf35c685d6589acc5d3b012 | [
"MIT"
] | null | null | null | accessories/views.py | brekib18/ShipOC | c83b8f9f18587bf9adf35c685d6589acc5d3b012 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.shortcuts import get_object_or_404, redirect
from django.http import JsonResponse
from accessories.forms.accessories_form import AccessoriesCreateForm, AccessoriesUpdateForm
from accessories.models import Accessories, AccessoriesImage
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
if 'sort_button' in request.GET:
sort_button = request.GET['sort_button']
if sort_button == 'alphabetical':
result = Accessories.objects.all()
accessories = []
result = result.order_by('name')
for elem in result:
accessories.append({
'id': elem.id,
'name': elem.name,
'description': elem.description,
'firstImage': elem.accessoriesimage_set.first().image,
'price': elem.price
})
elif sort_button == 'price_low':
result = Accessories.objects.all()
accessories = []
result = result.order_by('price')
for elem in result:
accessories.append({
'id': elem.id,
'name': elem.name,
'description': elem.description,
'firstImage': elem.accessoriesimage_set.first().image,
'price': elem.price
})
elif sort_button == 'price_high':
result = Accessories.objects.all()
accessories = []
result = result.order_by('-price')
print(result)
for elem in result:
accessories.append({
'id': elem.id,
'name': elem.name,
'description': elem.description,
'firstImage': elem.accessoriesimage_set.first().image,
'price': elem.price
})
return JsonResponse({'data': accessories})
if 'search_filter' in request.GET:
search_filter = request.GET['search_filter']
accessories = [{
'id': x.id,
'name': x.name,
'description': x.description,
'firstImage': x.accessoriesimage_set.first().image,
'price': x.price
} for x in Accessories.objects.filter(name__icontains=search_filter)]
return JsonResponse({'data': accessories})
context = {'accessories': Accessories.objects.all().order_by('name')}
return render(request, 'accessories/index.html', context)
def get_accessories_by_id(request,id):
return render(request,'accessories/accessories_details.html',{
'accessories': get_object_or_404(Accessories, pk=id)
})
@login_required
def create_accessories(request):
if not request.user.is_superuser:
return redirect('accessories-index')
if request.method == 'POST':
form = AccessoriesCreateForm(data=request.POST)
if form.is_valid():
accessories = form.save()
accessories_image = AccessoriesImage(image=request.POST['image'], accessories=accessories)
accessories_image.save()
return redirect('accessories-index')
else:
form = AccessoriesCreateForm()
# TODO: Instance new BookCreateForm()
return render(request, 'accessories/create_accessories.html', {
'form': form
})
@login_required
def delete_accessories(request, id):
accessories = get_object_or_404(Accessories, pk=id)
if not request.user.is_superuser:
return redirect('accessories-index')
accessories.delete()
return redirect('accessories-index')
@login_required
def update_accessories(request, id):
instance = get_object_or_404(Accessories, pk=id)
if not request.user.is_superuser:
return redirect('accessories-index')
if request.method == 'POST':
form = AccessoriesUpdateForm(data=request.POST, instance=instance)
if form.is_valid():
form.save()
return redirect('accessories-details', id=id)
else:
form = AccessoriesUpdateForm(instance=instance)
print(2)
return render(request, 'accessories/update_accessories.html',{
'form': form,
'id': id
}) | 35.211382 | 102 | 0.601709 | 425 | 4,331 | 6.002353 | 0.192941 | 0.02352 | 0.0588 | 0.0588 | 0.385731 | 0.372403 | 0.372403 | 0.372403 | 0.352411 | 0.330067 | 0 | 0.004239 | 0.291849 | 4,331 | 123 | 103 | 35.211382 | 0.827519 | 0.013623 | 0 | 0.504854 | 0 | 0 | 0.119672 | 0.029977 | 0 | 0 | 0 | 0.00813 | 0 | 1 | 0.048544 | false | 0 | 0.058252 | 0.009709 | 0.223301 | 0.019417 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bf7cdbd9035ba43658a57dbe03ddc137eb45c69 | 1,838 | py | Python | tests/test_misc.py | carlwgeorge/uvalde | 472aa09c57e16b3df8afe2f38cfcee74f52cd545 | [
"MIT"
] | 3 | 2020-04-08T20:26:59.000Z | 2020-12-13T07:57:55.000Z | tests/test_misc.py | carlwgeorge/uvalde | 472aa09c57e16b3df8afe2f38cfcee74f52cd545 | [
"MIT"
] | 2 | 2019-09-06T20:28:18.000Z | 2019-11-04T14:41:36.000Z | tests/test_misc.py | carlwgeorge/uvalde | 472aa09c57e16b3df8afe2f38cfcee74f52cd545 | [
"MIT"
] | 2 | 2019-06-22T10:03:59.000Z | 2022-02-17T01:11:23.000Z | import pathlib
import appdirs
import click.testing
import pytest
import repomd
import uvalde
@pytest.mark.parametrize('real_path', [True, False], ids=['target exists', 'target does not exist'])
def test_createrepo(tmp_path, real_path):
target = tmp_path / 'repo'
if real_path:
target.mkdir()
uvalde.repodata.createrepo(target)
repo = repomd.load(f'file://{target}')
assert len(repo) == 0
else:
with pytest.raises(FileExistsError, match='No such directory'):
uvalde.repodata.createrepo(target)
@pytest.mark.parametrize(
'start_exists,end_exists',
[
(False, False),
(True, True),
(True, False),
],
ids=[
'start does not exist',
'end already exists',
'start exists and end does not',
],
)
def test_safe_check(tmp_path, start_exists, end_exists):
start = tmp_path / 'a'
end = tmp_path / 'b'
if start_exists:
start.touch()
if end_exists:
end.touch()
if not start_exists:
with pytest.raises(SystemExit, match='a does not exist'):
uvalde.transfer.safe_check(start, end)
if end_exists:
with pytest.raises(SystemExit, match='b already exists'):
uvalde.transfer.safe_check(start, end)
if start_exists and not end_exists:
uvalde.transfer.safe_check(start, end)
assert end.parent.exists()
assert end.parent.is_dir()
def test_old_db_file(tmp_config):
runner = click.testing.CliRunner()
db_dir = pathlib.Path(appdirs.user_data_dir('uvalde'))
db_dir.mkdir(parents=True)
old_db_file = db_dir / 'rpms.sqlite'
old_db_file.touch()
result = runner.invoke(uvalde.main, ['list'])
assert f'Old database {old_db_file} detected' in result.output
assert result.exit_code == 1
| 25.178082 | 100 | 0.644178 | 241 | 1,838 | 4.742739 | 0.319502 | 0.057743 | 0.031496 | 0.060367 | 0.160105 | 0.160105 | 0.095363 | 0 | 0 | 0 | 0 | 0.001433 | 0.240479 | 1,838 | 72 | 101 | 25.527778 | 0.817335 | 0 | 0 | 0.160714 | 0 | 0 | 0.140914 | 0.012514 | 0 | 0 | 0 | 0 | 0.089286 | 1 | 0.053571 | false | 0 | 0.107143 | 0 | 0.160714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bf835361c22257b0d204aa3f16ee832e0211454 | 1,815 | py | Python | {{cookiecutter.project_slug}}/core/rest_framework.py | claysllanxavier/django-cookiecutter | 97de7ff4ed3dc94c32bf756a57aee0664a888cbc | [
"BSD-3-Clause"
] | 8 | 2021-08-13T17:48:27.000Z | 2022-02-22T02:34:15.000Z | {{cookiecutter.project_slug}}/core/rest_framework.py | claysllanxavier/django-cookiecutter | 97de7ff4ed3dc94c32bf756a57aee0664a888cbc | [
"BSD-3-Clause"
] | 2 | 2022-03-24T20:39:00.000Z | 2022-03-24T20:39:48.000Z | {{cookiecutter.project_slug}}/core/rest_framework.py | claysllanxavier/django-cookiecutter | 97de7ff4ed3dc94c32bf756a57aee0664a888cbc | [
"BSD-3-Clause"
] | 2 | 2021-09-21T00:05:27.000Z | 2022-01-03T10:50:05.000Z | from collections import OrderedDict
from collections.abc import Mapping
from rest_framework.exceptions import ValidationError
from rest_framework.fields import get_error_detail, set_value
from rest_framework.serializers import ModelSerializer
class Serializador(ModelSerializer):
def to_internal_value(self, data):
"""
Para padronizar o retorno das mensagem que vem do padrão do Django.
Dict of native values <- Dict of primitive datatypes.
"""
if not isinstance(data, Mapping):
message = self.error_messages['invalid'].format(
datatype=type(data).__name__
)
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
}, code='invalid')
ret = OrderedDict()
errors = OrderedDict()
fields = self._writable_fields
for field in fields:
validate_method = getattr(self, 'validate_' + field.field_name, None)
primitive_value = field.get_value(data)
try:
validated_value = field.run_validation(primitive_value)
if validate_method is not None:
validated_value = validate_method(validated_value)
except ValidationError as exc:
# errors[field.field_name] = exc.detail
# Padrão definido para retorno do error
errors["error_message"] = {field.field_name: exc.detail}
except DjangoValidationError as exc:
errors[field.field_name] = get_error_detail(exc)
except SkipField:
pass
else:
set_value(ret, field.source_attrs, validated_value)
if errors:
raise ValidationError(errors)
return ret
| 36.3 | 81 | 0.621488 | 188 | 1,815 | 5.787234 | 0.446809 | 0.036765 | 0.051471 | 0.029412 | 0.075368 | 0.045956 | 0 | 0 | 0 | 0 | 0 | 0 | 0.31405 | 1,815 | 49 | 82 | 37.040816 | 0.873896 | 0.109091 | 0 | 0 | 0 | 0 | 0.022843 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0.028571 | 0.142857 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bf83e1a315b15b0159e68583b5bc36d0ba61b0a | 271 | py | Python | test/local/select_candidates.py | adamcho14/SEVAS | b46c206bd4d25563bd4403007481da0f4cd6a31a | [
"Apache-2.0"
] | null | null | null | test/local/select_candidates.py | adamcho14/SEVAS | b46c206bd4d25563bd4403007481da0f4cd6a31a | [
"Apache-2.0"
] | null | null | null | test/local/select_candidates.py | adamcho14/SEVAS | b46c206bd4d25563bd4403007481da0f4cd6a31a | [
"Apache-2.0"
] | null | null | null | import sqlite3
import json
connection = sqlite3.connect("../db/persons.sqlite")
cursor = connection.cursor()
cursor.execute("SELECT * FROM candidates")
result = cursor.fetchall()
connection.close()
with open("candidates.txt", 'w') as file:
json.dump(result, file)
| 20.846154 | 52 | 0.730627 | 34 | 271 | 5.823529 | 0.676471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008368 | 0.118081 | 271 | 12 | 53 | 22.583333 | 0.820084 | 0 | 0 | 0 | 0 | 0 | 0.217712 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bf846009015899e8dbfa809ab7e00b8738734d3 | 4,335 | py | Python | bin/merge_kallisto_abundance.py | twylie/bviRNASeq | 4e9521d395d2c3fc764d22500319ec778df5f9da | [
"MIT"
] | null | null | null | bin/merge_kallisto_abundance.py | twylie/bviRNASeq | 4e9521d395d2c3fc764d22500319ec778df5f9da | [
"MIT"
] | null | null | null | bin/merge_kallisto_abundance.py | twylie/bviRNASeq | 4e9521d395d2c3fc764d22500319ec778df5f9da | [
"MIT"
] | null | null | null | #!/usr/bin/python3.7
import argparse
import pandas as pd
import os
import re
version = '1.0'
# SUBROUTINES #################################################################
def eval_cli_arguments():
parser = argparse.ArgumentParser(
description='Merge multiple Kallisto abundance files.',
prog='merge_kallisto_abundance.py',
add_help=False
)
# Optional arguments.
parser.add_argument(
'-h',
'--help',
action='help',
help='Display the extended usage statement.'
)
parser.add_argument(
'--version',
action='version',
version=version,
help='Display the software version number.'
)
# Required arguments.
required_group = parser.add_argument_group('required')
required_group.add_argument(
'--kallistodir',
metavar='DIR',
action='store',
help='Root path to the Kallisto results directory.',
required=True,
)
required_group.add_argument(
'--output',
metavar='FILE',
action='store',
help='Path to write the merged, annotated abundance file.',
required=True
)
required_group.add_argument(
'--annotation',
metavar='FILE',
action='store',
help='Path to the transcriptome annotation file.',
required=True
)
return parser.parse_args()
def determine_sample_ids(arguments):
abundance = list()
for root, dirs, files in os.walk(arguments.kallistodir):
for file_ in files:
if file_.endswith('.tsv'):
abundance.append([os.path.basename(root), '/'.join([root, file_])])
return abundance
def merge_abundances(abundance):
dataframes = list()
for sample, path in abundance:
df = pd.read_csv(path, sep='\t')
df['sample id'] = sample
df = df.set_index('sample id')
dataframes.append(df)
df_merged = pd.concat(dataframes)
return df_merged
def write_annotated_merged_abundances(arguments):
df_annotation = pd.read_csv(arguments.annotation, index_col='transcript_id', sep='\t')
out = re.sub('.tsv', '.annotated.tsv', arguments.output)
out_fh = open(out, 'w')
out_fh.write(
'\t'.join([
'sample_id',
'target_id',
'length',
'eff_length',
'est_counts',
'tpm',
'transcript_id',
'seq_type',
'location',
'gene',
'gene_biotype',
'transcript_biotype',
'gene_symbol',
'description'
]) + '\n'
)
with open(arguments.output, 'r') as fh:
for line in fh:
# [0] sample id
# [1] target_id
# [2] length
# [3] eff_length
# [4] est_counts
# [5] tpm
line = line.strip()
abundance_fields = line.split('\t')
target_id = abundance_fields[1]
if target_id != 'target_id':
# [0] transcript_id
# [1] seq_type
# [2] location
# [3] gene
# [4] gene_biotype
# [5] transcript_biotype
# [6] gene_symbol
# [7] description
annotation_fields = list(df_annotation.loc[target_id])
annotation_fields.insert(0, target_id)
fields = abundance_fields + annotation_fields
out_fh.write('\t'.join(map(str, fields)) + '\n')
return
###############################################################################
# MAIN #
###############################################################################
if __name__ == '__main__':
# Given the root path to a collection of Kallisto quantification files
# (abundance.tsv), we will create a merged table using the parent directory
# names as the associated sample ids. Assumes that parent directory names
# are unique.
arguments = eval_cli_arguments()
abundance = determine_sample_ids(arguments)
df = merge_abundances(abundance)
df.to_csv(arguments.output, sep='\t')
write_annotated_merged_abundances(arguments)
# __END__
| 25.958084 | 90 | 0.527566 | 434 | 4,335 | 5.069124 | 0.334101 | 0.025455 | 0.023182 | 0.032727 | 0.110909 | 0.061818 | 0.029091 | 0 | 0 | 0 | 0 | 0.006754 | 0.316955 | 4,335 | 166 | 91 | 26.114458 | 0.736238 | 0.135179 | 0 | 0.118812 | 0 | 0 | 0.170237 | 0.007712 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039604 | false | 0 | 0.039604 | 0 | 0.118812 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bfb6f64ea7f545f208305c3ecba83724140ff33 | 7,613 | py | Python | min_interval/datas.py | kohjiaxuan/Stock-Market-Dashboard | a517136eff62a482455b68c0e8ebed8361ab1d53 | [
"MIT"
] | 13 | 2019-06-13T15:50:47.000Z | 2022-03-11T07:57:11.000Z | min_interval/datas.py | kohjiaxuan/Stock-Market-Dashboard | a517136eff62a482455b68c0e8ebed8361ab1d53 | [
"MIT"
] | null | null | null | min_interval/datas.py | kohjiaxuan/Stock-Market-Dashboard | a517136eff62a482455b68c0e8ebed8361ab1d53 | [
"MIT"
] | 1 | 2021-08-22T06:12:33.000Z | 2021-08-22T06:12:33.000Z | # Build lists to store information for stock prices at (open, close, high, low) at 5 minute intervals
# This step can be built into a Python Class in the future to allow users to select their own stock names
# https://www.powercms.in/blog/how-get-json-data-remote-url-python-script
import json
# from class datetime import subclass datetime
from datetime import datetime
import numpy as np
def transformdata(string1,string2,string3,string4,string5,string6,startdatetime):
# load string saved into json data
jsondataAAPL = json.loads(string1)
jsondataAMZN = json.loads(string2)
jsondataGOOGL = json.loads(string3)
jsondataFB = json.loads(string4)
jsondataMSFT = json.loads(string5)
jsondataNFLX = json.loads(string6)
# Get all keys of time series (datetime) as string type and save it onto a list
timeAAPL = list(reversed(list(jsondataAAPL['Time Series (5min)'].keys())))
timeAMZN = list(reversed(list(jsondataAMZN['Time Series (5min)'].keys())))
timeGOOGL = list(reversed(list(jsondataGOOGL['Time Series (5min)'].keys())))
timeFB = list(reversed(list(jsondataFB['Time Series (5min)'].keys())))
timeMSFT = list(reversed(list(jsondataMSFT['Time Series (5min)'].keys())))
timeNFLX = list(reversed(list(jsondataNFLX['Time Series (5min)'].keys())))
# Get element index/position for start datetime
# Append :00 to the time if less than 2 : are detected, i.e. the seconds are not put
try:
startdatetime = str(startdatetime)
ticker = 0
for letter in startdatetime:
if letter == ':':
ticker += 1
# Only add seconds if a time is put inside, e.g. 2020-05-09 10:00 (1 colon) or 11:30 (1 colon)
if ticker == 1:
startdatetime += ":00"
# print(startdatetime)
except:
startdatetime = ""
try:
startposition = 0
ticker = 0
for curdatetime in timeAAPL:
if str(curdatetime).find(startdatetime) > -1:
startposition = ticker
break
ticker += 1
except:
startposition = 0
AAPLopen = []
AAPLhigh = []
AAPLlow = []
AAPLclose = []
AMZNopen = []
AMZNhigh = []
AMZNlow = []
AMZNclose = []
GOOGLopen = []
GOOGLhigh = []
GOOGLlow = []
GOOGLclose = []
FBopen = []
FBhigh = []
FBlow = []
FBclose = []
MSFTopen = []
MSFThigh = []
MSFTlow = []
MSFTclose = []
NFLXopen = []
NFLXhigh = []
NFLXlow = []
NFLXclose = []
timeAAPL_s = []
timeAMZN_s = []
timeGOOGL_s = []
timeFB_s = []
timeMSFT_s = []
timeNFLX_s = []
# Get all the stock information using the datetime key used in the JSON packet
trackposition = -1
for string in timeAAPL:
trackposition += 1
if trackposition < startposition:
continue
timeAAPL_s.append(string[2:16])
AAPLopen.append(float(jsondataAAPL['Time Series (5min)'][string]['1. open']))
AAPLhigh.append(float(jsondataAAPL['Time Series (5min)'][string]['2. high']))
AAPLlow.append(float(jsondataAAPL['Time Series (5min)'][string]['3. low']))
AAPLclose.append(float(jsondataAAPL['Time Series (5min)'][string]['4. close']))
trackposition = -1
for string in timeAMZN:
trackposition += 1
if trackposition < startposition:
continue
timeAMZN_s.append(string[2:16])
AMZNopen.append(float(jsondataAMZN['Time Series (5min)'][string]['1. open']))
AMZNhigh.append(float(jsondataAMZN['Time Series (5min)'][string]['2. high']))
AMZNlow.append(float(jsondataAMZN['Time Series (5min)'][string]['3. low']))
AMZNclose.append(float(jsondataAMZN['Time Series (5min)'][string]['4. close']))
trackposition = -1
for string in timeGOOGL:
trackposition += 1
if trackposition < startposition:
continue
timeGOOGL_s.append(string[2:16])
GOOGLopen.append(float(jsondataGOOGL['Time Series (5min)'][string]['1. open']))
GOOGLhigh.append(float(jsondataGOOGL['Time Series (5min)'][string]['2. high']))
GOOGLlow.append(float(jsondataGOOGL['Time Series (5min)'][string]['3. low']))
GOOGLclose.append(float(jsondataGOOGL['Time Series (5min)'][string]['4. close']))
trackposition = -1
for string in timeFB:
trackposition += 1
if trackposition < startposition:
continue
timeFB_s.append(string[2:16])
FBopen.append(float(jsondataFB['Time Series (5min)'][string]['1. open']))
FBhigh.append(float(jsondataFB['Time Series (5min)'][string]['2. high']))
FBlow.append(float(jsondataFB['Time Series (5min)'][string]['3. low']))
FBclose.append(float(jsondataFB['Time Series (5min)'][string]['4. close']))
trackposition = -1
for string in timeMSFT:
trackposition += 1
if trackposition < startposition:
continue
timeMSFT_s.append(string[2:16])
MSFTopen.append(float(jsondataMSFT['Time Series (5min)'][string]['1. open']))
MSFThigh.append(float(jsondataMSFT['Time Series (5min)'][string]['2. high']))
MSFTlow.append(float(jsondataMSFT['Time Series (5min)'][string]['3. low']))
MSFTclose.append(float(jsondataMSFT['Time Series (5min)'][string]['4. close']))
trackposition = -1
for string in timeNFLX:
trackposition += 1
if trackposition < startposition:
continue
timeNFLX_s.append(string[2:16])
NFLXopen.append(float(jsondataNFLX['Time Series (5min)'][string]['1. open']))
NFLXhigh.append(float(jsondataNFLX['Time Series (5min)'][string]['2. high']))
NFLXlow.append(float(jsondataNFLX['Time Series (5min)'][string]['3. low']))
NFLXclose.append(float(jsondataNFLX['Time Series (5min)'][string]['4. close']))
# Create normalized list of 9 companies stock prices by the price at opening
# This is used for plotting the relative stock performance chart (% movement in a day) for all 6 stocks
AAPLdailyopen_n = np.array(AAPLopen)/AAPLopen[0]
AMZNdailyopen_n = np.array(AMZNopen)/AMZNopen[0]
GOOGLdailyopen_n = np.array(GOOGLopen)/GOOGLopen[0]
FBdailyopen_n = np.array(FBopen)/FBopen[0]
MSFTdailyopen_n = np.array(MSFTopen)/MSFTopen[0]
NFLXdailyopen_n = np.array(NFLXopen)/NFLXopen[0]
# Get percentage change of each stock to show in the title of graphs
AAPL_change = round((AAPLopen[-1] - AAPLopen[0])/AAPLopen[0] * 100,2)
AMZN_change = round((AMZNopen[-1] - AMZNopen[0])/AMZNopen[0] * 100,2)
GOOGL_change = round((GOOGLopen[-1] - GOOGLopen[0])/GOOGLopen[0] * 100,2)
FB_change = round((FBopen[-1] - FBopen[0])/FBopen[0] * 100,2)
MSFT_change = round((MSFTopen[-1] - MSFTopen[0])/MSFTopen[0] * 100,2)
NFLX_change = round((NFLXopen[-1] - NFLXopen[0])/NFLXopen[0] * 100,2)
return {"daily": [AAPLdailyopen_n, AMZNdailyopen_n, GOOGLdailyopen_n, FBdailyopen_n, MSFTdailyopen_n, NFLXdailyopen_n],
"change": [AAPL_change, AMZN_change, GOOGL_change, FB_change, MSFT_change, NFLX_change],
"time": [timeAAPL_s, timeAMZN_s, timeGOOGL_s, timeFB_s, timeMSFT_s, timeNFLX_s],
"open": [AAPLopen, AMZNopen, GOOGLopen, FBopen, MSFTopen, NFLXopen],
"high": [AAPLhigh, AMZNhigh, GOOGLhigh, FBhigh, MSFThigh, NFLXhigh],
"low": [AAPLlow, AMZNlow, GOOGLlow, FBlow, MSFTlow, NFLXlow],
"close": [AAPLclose, AMZNclose, GOOGLclose, FBclose, MSFTclose, NFLXclose]
} | 42.530726 | 123 | 0.633784 | 888 | 7,613 | 5.386261 | 0.231982 | 0.064813 | 0.087811 | 0.100355 | 0.376333 | 0.351035 | 0.269496 | 0.075476 | 0.075476 | 0.075476 | 0 | 0.030324 | 0.22895 | 7,613 | 179 | 124 | 42.530726 | 0.784497 | 0.130697 | 0 | 0.232877 | 0 | 0 | 0.112508 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006849 | false | 0 | 0.020548 | 0 | 0.034247 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bfd94441faadef19a07083291f57efae727d802 | 580 | py | Python | setup.py | koksyuen/pySA | 31566bd29d3f685d518d8bedbe2aee118c7ca7b0 | [
"MIT"
] | null | null | null | setup.py | koksyuen/pySA | 31566bd29d3f685d518d8bedbe2aee118c7ca7b0 | [
"MIT"
] | null | null | null | setup.py | koksyuen/pySA | 31566bd29d3f685d518d8bedbe2aee118c7ca7b0 | [
"MIT"
] | null | null | null | from setuptools import find_packages,setup
requirements = open('requirements.txt')
setup(
name = "py_SA",
version = "1.0",
author = "Sri Sanketh Uppalapati",
author_email = "iamjustice443@gmail.com",
description = ("Structural Analysis in python"),
keywords = "Shear force Bending moment",
url = "https://github.com/pySA-dev/pySA",
license = "MIT",
packages = find_packages(exclude=["build.*"]),
install_requires = requirements.read().splitlines(),
entry_points = {
"console_scripts": [ "pySA = tools.main:main"]
}
)
| 27.619048 | 56 | 0.648276 | 63 | 580 | 5.857143 | 0.825397 | 0.065041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010846 | 0.205172 | 580 | 20 | 57 | 29 | 0.789588 | 0 | 0 | 0 | 0 | 0 | 0.35 | 0.039655 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.058824 | 0 | 0.058824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bfdb602ce107d1fbedf7c71178107b88d14af81 | 1,078 | py | Python | anti_spoofing/data_io/dataset_loader.py | YatinMahajann/Silent-Face-Anti-Spoofing | e3bdd8ed1f49c7cc5eacf01201dad61a15fb50ed | [
"Apache-2.0"
] | 819 | 2020-07-14T02:22:31.000Z | 2022-03-31T08:54:34.000Z | src/data_io/dataset_loader.py | tfygg/Silent-Face-Anti-Spoofing | 08a36e201df14923b648917ea7fb7e647cedf7c1 | [
"Apache-2.0"
] | 86 | 2020-07-14T03:56:57.000Z | 2022-03-25T06:39:30.000Z | src/data_io/dataset_loader.py | tfygg/Silent-Face-Anti-Spoofing | 08a36e201df14923b648917ea7fb7e647cedf7c1 | [
"Apache-2.0"
] | 268 | 2020-07-14T07:25:23.000Z | 2022-03-26T06:38:33.000Z | # -*- coding: utf-8 -*-
# @Time : 20-6-4 下午3:40
# @Author : zhuying
# @Company : Minivision
# @File : dataset_loader.py
# @Software : PyCharm
from torch.utils.data import DataLoader
from src.data_io.dataset_folder import DatasetFolderFT
from src.data_io import transform as trans
def get_train_loader(conf):
train_transform = trans.Compose([
trans.ToPILImage(),
trans.RandomResizedCrop(size=tuple(conf.input_size),
scale=(0.9, 1.1)),
trans.ColorJitter(brightness=0.4,
contrast=0.4, saturation=0.4, hue=0.1),
trans.RandomRotation(10),
trans.RandomHorizontalFlip(),
trans.ToTensor()
])
root_path = '{}/{}'.format(conf.train_root_path, conf.patch_info)
trainset = DatasetFolderFT(root_path, train_transform,
None, conf.ft_width, conf.ft_height)
train_loader = DataLoader(
trainset,
batch_size=conf.batch_size,
shuffle=True,
pin_memory=True,
num_workers=16)
return train_loader
| 31.705882 | 69 | 0.625232 | 128 | 1,078 | 5.09375 | 0.578125 | 0.050614 | 0.033742 | 0.039877 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030151 | 0.261596 | 1,078 | 33 | 70 | 32.666667 | 0.788945 | 0.119666 | 0 | 0 | 0 | 0 | 0.005308 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.125 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bfdf08924d59df001478f6d9007c758e7599f63 | 4,317 | py | Python | deploy-certbot-layer.py | dacut/certbot-to-acm | 0ec90992f4c3d13d6323402b900d028dc358c3cd | [
"Apache-2.0"
] | null | null | null | deploy-certbot-layer.py | dacut/certbot-to-acm | 0ec90992f4c3d13d6323402b900d028dc358c3cd | [
"Apache-2.0"
] | null | null | null | deploy-certbot-layer.py | dacut/certbot-to-acm | 0ec90992f4c3d13d6323402b900d028dc358c3cd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""\
Usage: deploy-certbot-layer.py <pyver>...
Deploy certbot layers. The <pyver> arguments are 3.6, 3.7, 3.8, etc.
Options:
-h | --help
Show this usage information.
"""
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
from dataclasses import dataclass, field
from getopt import getopt, GetoptError
from sys import argv, exit as sys_exit, stderr, stdout
from typing import Dict, List, NamedTuple, Optional, Set
from boto3.session import Session
PROFILES = ("iono", "iono-gov")
@dataclass
class LayerVersionInfo:
"""
Information about the deployment of a CertBot layer for a given region and Python version.
"""
python_version: str
s3_key: str
s3_object_version: str
lambda_layer_version: int
@dataclass
class RegionInfo:
"""
Information about a region and the CertBot layers uploaded there.
"""
region_name: str
s3_bucket: Optional[str] = None
version_info: Dict[str, LayerVersionInfo] = field(default_factory=dict)
@dataclass
class AccountInfo:
"""
Information about a given AWS account
"""
account_id: str
partition: str
regions: Set[str]
@classmethod
def for_profile(cls, profile_name: str) -> "AccountInfo":
"""
Returns account information given the profile name. Credentials should be stored in ~/.aws/credentials.
"""
return cls.for_session(Session(profile_name=profile_name))
@classmethod
def for_session(cls, session: Session) -> "AccountInfo":
"""
Returns account information given a Boto3 session.
"""
sts = session.client("sts")
ec2 = session.client("ec2")
cid = sts.get_caller_identity()
account_id = cid["Account"]
partition = cid["Arn"].split(":")[1]
regions = set([region["RegionName"] for region in ec2.describe_regions()["Regions"]])
return cls(account_id=account_id, partition=partition, regions=regions)
def main(args):
"""
Main program entrypoint.
"""
try:
opts, args = getopt(args, "h", ["help"])
for opt, val in opts:
if opt in ["-h", "--help"]:
usage(stdout)
return 0
except GetoptError as e:
print(e, file=stderr)
usage()
return 2
profile_to_account_info = {profile: AccountInfo.for_profile(profile) for profile in PROFILES}
executor = ThreadPoolExecutor()
futures = []
for profile, account_info in profile_to_account_info.items():
for region in account_info.regions:
print(f"Submitting {profile}/{region}")
futures.append(executor.submit(send_versions, profile, region, args))
for future in wait(futures, return_when=ALL_COMPLETED)[0]:
exc = future.exception()
if exc is not None:
print(exc, file=stderr)
return 0
def send_versions(profile: str, region: str, versions: List[str]) -> None:
b3 = Session(profile_name=profile, region_name=region)
s3 = b3.client("s3")
lam = b3.client("lambda")
s3_bucket = f"ionosphere-public-{region}"
for ver in versions:
filename = f"lambda-layers/certbot-layer-py{ver}.zip"
layer_name = f"certbot-py{ver.replace('.', '')}"
print(f"Writing {filename} to {region}")
with open(filename, "rb") as fd:
s3_result = s3.put_object(ACL="public-read", Body=fd, Bucket=s3_bucket, Key=filename)
s3_version = s3_result["VersionId"]
print(f"Publishing Lambda layer for {region}/{ver}")
lam_result = lam.publish_layer_version(
LayerName=layer_name,
Description=f"Certbot for Python {ver}",
Content={
"S3Bucket": s3_bucket,
"S3Key": filename,
"S3ObjectVersion": s3_version,
},
CompatibleRuntimes=[f"python{ver}"],
LicenseInfo="Apache-2.0",
)
lam_ver = lam_result["Version"]
lam.add_layer_version_permission(
LayerName=layer_name, VersionNumber=lam_ver, StatementId="Public", Action="lambda:GetLayerVersion", Principal="*"
)
def usage(fd=stderr):
fd.write(__doc__)
if __name__ == "__main__":
sys_exit(main(argv[1:]))
| 29.568493 | 125 | 0.631457 | 515 | 4,317 | 5.151456 | 0.349515 | 0.020731 | 0.010554 | 0.027139 | 0.030908 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012093 | 0.252953 | 4,317 | 145 | 126 | 29.772414 | 0.810543 | 0.132036 | 0 | 0.077778 | 0 | 0 | 0.117291 | 0.031388 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.066667 | 0 | 0.322222 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0400cf5a1560f09a56784fd8ac6b2df81083c662 | 5,776 | py | Python | baselines/deepq/experiments/atari/lru_knn_ucb_gpu.py | MouseHu/emdqn | ba907e959f21dd0b5a17117accccae9c82a79a3b | [
"MIT"
] | null | null | null | baselines/deepq/experiments/atari/lru_knn_ucb_gpu.py | MouseHu/emdqn | ba907e959f21dd0b5a17117accccae9c82a79a3b | [
"MIT"
] | null | null | null | baselines/deepq/experiments/atari/lru_knn_ucb_gpu.py | MouseHu/emdqn | ba907e959f21dd0b5a17117accccae9c82a79a3b | [
"MIT"
] | 1 | 2021-04-26T13:55:47.000Z | 2021-04-26T13:55:47.000Z | import numpy as np
from baselines.deepq.experiments.atari.knn_cuda import knn as knn_cuda
# each action -> a lru_knn buffer
class LRU_KNN_UCB_GPU(object):
def __init__(self, capacity, z_dim, env_name, action, mode="mean", num_actions=6,knn=4):
self.action = action
self.knn = knn
self.env_name = env_name
self.capacity = capacity
self.num_actions = num_actions
self.states = np.empty((capacity, z_dim), dtype=np.float32)
self.q_values_decay = np.zeros(capacity)
self.count = np.zeros(capacity)
self.lru = np.zeros(capacity)
self.best_action = np.zeros((capacity, num_actions), dtype=np.int)
self.curr_capacity = 0
self.tm = 0.0
self.addnum = 0
self.buildnum = 256
self.buildnum_max = 256
self.bufpath = './buffer/%s' % self.env_name
self.mode = mode
self.threshold = 1e-2
def peek(self, key, value_decay, action=-1, modify=False):
if self.curr_capacity ==0 :
return None, None, None
dist, ind = knn_cuda.knn(np.transpose(np.array([key])), np.transpose(self.states[:self.curr_capacity]), 1)
dist, ind = np.transpose(dist), np.transpose(ind - 1)
ind = ind[0][0]
# print(dist.shape,ind.shape)
if dist[0][0] < self.threshold:
# print("peek success")
self.lru[ind] = self.tm
self.tm += 0.01
if modify:
if self.mode == "max":
if value_decay > self.q_values_decay[ind]:
self.q_values_decay[ind] = value_decay
if action >= 0:
self.best_action[ind, action] = 1
elif self.mode == "mean":
self.q_values_decay[ind] = (value_decay + self.q_values_decay[ind] * self.count[ind]) / (
self.count[ind] + 1)
self.count[ind] += 1
return self.q_values_decay[ind], self.best_action[ind], self.count[ind]
# print self.states[ind], key
# if prints:
# print("peek", dist[0][0])
return None, None, None
def knn_value(self, key, knn, ):
# knn = min(self.curr_capacity, knn)
if self.curr_capacity < knn:
return 0.0, None, 1.0
dist, ind = knn_cuda.knn(np.transpose(key), np.transpose(self.states[:self.curr_capacity]), knn)
dist, ind = np.transpose(dist), np.transpose(ind - 1)
coeff = np.exp(dist[0])
coeff = coeff / np.sum(coeff)
value = 0.0
action = np.zeros((self.num_actions,))
value_decay = 0.0
count = 0
# print("nearest dist", dist[0][0])
for j, index in enumerate(ind[0]):
value_decay += self.q_values_decay[index] * coeff[j]
count += self.count[index] * coeff[j]
action += self.best_action[index] * coeff[j]
self.lru[index] = self.tm
self.tm += 0.01
q_decay = value_decay
return q_decay, action, count
def act_value(self, key, knn):
# knn = min(self.curr_capacity, knn)
values = []
actions = np.zeros((len(key), self.num_actions))
counts = []
exact_refer = []
if self.curr_capacity < knn:
for i in range(len(key)):
actions[i, self.action] = 1
values.append(0)
counts.append(1)
exact_refer.append(False)
return values, actions, counts, np.array(exact_refer)
dist, ind = knn_cuda.knn(np.transpose(key), np.transpose(self.states[:self.curr_capacity]), knn)
dist, ind = np.transpose(dist), np.transpose(ind - 1)
# print(dist.shape, ind.shape, len(key), key.shape)
# print("nearest dist", dist[0][0])
for i in range(len(dist)):
value_decay = 0
count = 0
coeff = np.exp(dist[i])
coeff = coeff / np.sum(coeff)
if dist[i][0] < self.threshold:
exact_refer.append(True)
value_decay = self.q_values_decay[ind[i][0]]
count = self.count[ind[i][0]]
actions[i] = self.best_action[ind[i][0]]
self.lru[ind[i][0]] = self.tm
self.tm += 0.01
else:
exact_refer.append(False)
for j, index in enumerate(ind[i]):
value_decay += self.q_values_decay[index] * coeff[j]
count += self.count[index] * coeff[j]
# print(coeff.shape, index, i)
actions[i] += self.best_action[index] * coeff[j]
self.lru[index] = self.tm
self.tm += 0.01
values.append(value_decay)
counts.append(count)
return values, actions, counts, np.array(exact_refer)
def add(self, key, value_decay, action=-1):
if self.curr_capacity >= self.capacity:
# find the LRU entry
old_index = np.argmin(self.lru)
self.states[old_index] = key
self.q_values_decay[old_index] = value_decay
self.lru[old_index] = self.tm
self.count[old_index] = 2
if action >= 0:
self.best_action[old_index, action] = 1
else:
self.states[self.curr_capacity] = key
self.q_values_decay[self.curr_capacity] = value_decay
self.lru[self.curr_capacity] = self.tm
self.count[self.curr_capacity] = 2
if action >= 0:
self.best_action[self.curr_capacity, action] = 1
self.curr_capacity += 1
self.tm += 0.01
def update_kdtree(self):
pass
| 39.561644 | 114 | 0.539993 | 756 | 5,776 | 3.989418 | 0.138889 | 0.04244 | 0.084881 | 0.058355 | 0.464191 | 0.370358 | 0.314987 | 0.249005 | 0.186008 | 0.17374 | 0 | 0.020893 | 0.337084 | 5,776 | 145 | 115 | 39.834483 | 0.76678 | 0.066828 | 0 | 0.277311 | 0 | 0 | 0.004092 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05042 | false | 0.008403 | 0.016807 | 0 | 0.134454 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0401ba50b6fa7e538643079e35222254d90e2964 | 5,472 | py | Python | rl_coach/agents/naf_agent.py | abcp4/coach | e0f1b9ecc884ffea71ba634986d478f4fc150d3e | [
"Apache-2.0"
] | 3 | 2019-11-15T02:05:38.000Z | 2020-10-10T17:15:40.000Z | rl_coach/agents/naf_agent.py | gndctrl2mjrtm/coach-ray | ae6593bb33cf0ae3c5a4b3b351560dd6b47cd031 | [
"Apache-2.0"
] | null | null | null | rl_coach/agents/naf_agent.py | gndctrl2mjrtm/coach-ray | ae6593bb33cf0ae3c5a4b3b351560dd6b47cd031 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Union
import numpy as np
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.architectures.tensorflow_components.heads.naf_head import NAFHeadParameters
from rl_coach.architectures.tensorflow_components.middlewares.fc_middleware import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, AgentParameters, \
NetworkParameters
from rl_coach.architectures.tensorflow_components.embedders.embedder import InputEmbedderParameters
from rl_coach.core_types import ActionInfo, EnvironmentSteps
from rl_coach.exploration_policies.ou_process import OUProcessParameters
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
from rl_coach.spaces import BoxActionSpace
class NAFNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [NAFHeadParameters()]
self.optimizer_type = 'Adam'
self.learning_rate = 0.001
self.async_training = True
self.create_target_network = True
class NAFAlgorithmParameters(AlgorithmParameters):
def __init__(self):
super().__init__()
self.num_consecutive_training_steps = 5
self.num_steps_between_copying_online_weights_to_target = EnvironmentSteps(1)
self.rate_for_copying_weights_to_target = 0.001
class NAFAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=NAFAlgorithmParameters(),
exploration=OUProcessParameters(),
memory=EpisodicExperienceReplayParameters(),
networks={"main": NAFNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.naf_agent:NAFAgent'
# Normalized Advantage Functions - https://arxiv.org/pdf/1603.00748.pdf
class NAFAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.l_values = self.register_signal("L")
self.a_values = self.register_signal("Advantage")
self.mu_values = self.register_signal("Action")
self.v_values = self.register_signal("V")
self.TD_targets = self.register_signal("TD targets")
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# TD error = r + discount*v_st_plus_1 - q_st
v_st_plus_1 = self.networks['main'].target_network.predict(
batch.next_states(network_keys),
self.networks['main'].target_network.output_heads[0].V,
squeeze_output=False,
)
TD_targets = np.expand_dims(batch.rewards(), -1) + \
(1.0 - np.expand_dims(batch.game_overs(), -1)) * self.ap.algorithm.discount * v_st_plus_1
self.TD_targets.add_sample(TD_targets)
result = self.networks['main'].train_and_sync_networks({**batch.states(network_keys),
'output_0_0': batch.actions(len(batch.actions().shape) == 1)
}, TD_targets)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads
def choose_action(self, curr_state):
if type(self.spaces.action) != BoxActionSpace:
raise ValueError('NAF works only for continuous control problems')
# convert to batch so we can run it through the network
tf_input_state = self.prepare_batch_for_inference(curr_state, 'main')
naf_head = self.networks['main'].online_network.output_heads[0]
action_values = self.networks['main'].online_network.predict(tf_input_state, outputs=naf_head.mu,
squeeze_output=False)
# get the actual action to use
action = self.exploration_policy.get_action(action_values)
# get the internal values for logging
outputs = [naf_head.mu, naf_head.Q, naf_head.L, naf_head.A, naf_head.V]
result = self.networks['main'].online_network.predict(
{**tf_input_state, 'output_0_0': action_values},
outputs=outputs
)
mu, Q, L, A, V = result
# store the q values statistics for logging
self.q_values.add_sample(Q)
self.l_values.add_sample(L)
self.a_values.add_sample(A)
self.mu_values.add_sample(mu)
self.v_values.add_sample(V)
action_info = ActionInfo(action=action, action_value=Q)
return action_info
| 42.75 | 124 | 0.690972 | 649 | 5,472 | 5.5547 | 0.366718 | 0.019417 | 0.027462 | 0.02663 | 0.134535 | 0.07656 | 0.02663 | 0.02663 | 0.02663 | 0 | 0 | 0.009871 | 0.222405 | 5,472 | 127 | 125 | 43.086614 | 0.837368 | 0.152412 | 0 | 0.061728 | 0 | 0 | 0.044194 | 0.007366 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08642 | false | 0 | 0.135802 | 0.012346 | 0.308642 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04020337881f511c96f0fae59b2e564117c73810 | 1,051 | py | Python | cgi-bin/common/response.py | JamisHoo/Yagra | edcfe8ae6aadee152023c894bd0b8a0b23b9e5a9 | [
"MIT"
] | null | null | null | cgi-bin/common/response.py | JamisHoo/Yagra | edcfe8ae6aadee152023c894bd0b8a0b23b9e5a9 | [
"MIT"
] | null | null | null | cgi-bin/common/response.py | JamisHoo/Yagra | edcfe8ae6aadee152023c894bd0b8a0b23b9e5a9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import config
from collections import defaultdict
def text_response(content_type, message_body, cookie=None):
response = ""
response += "Content-type: {}".format(content_type) + "\n"
if cookie:
response += str(cookie) + "\n"
response += "\n"
response += message_body
return response
def redirect(location, cookie=None):
response = ""
response += "Location: {}".format(location) + "\n"
if cookie:
response += str(cookie) + "\n"
response += "\n"
return response
def not_found():
response = "Status: 404 Not Found" + "\n"
response += "\n"
return response
def populate_html(template_file, variables={}):
""" Return a string containing the contents of the named file and replace
variables in the file.
"""
variables_dict = defaultdict(str, variables)
file_path = os.path.join(config.html_templates_dir, template_file)
with open(file_path) as file_handler:
return file_handler.read().format(variables_dict)
| 22.847826 | 77 | 0.656518 | 128 | 1,051 | 5.257813 | 0.429688 | 0.053492 | 0.044577 | 0.077266 | 0.172363 | 0.172363 | 0.106984 | 0.106984 | 0.106984 | 0 | 0 | 0.003654 | 0.218839 | 1,051 | 45 | 78 | 23.355556 | 0.816078 | 0.107517 | 0 | 0.444444 | 0 | 0 | 0.070499 | 0 | 0.037037 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0 | 0.111111 | 0 | 0.407407 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
040322daa1bf75bc481dee0de3e41fdf7997aa8c | 14,864 | py | Python | torchvision_x/transforms/functional.py | X-Lai/torchvision-enhance | d47fa779046954a2a3030a0b695dab1473b1d589 | [
"MIT"
] | null | null | null | torchvision_x/transforms/functional.py | X-Lai/torchvision-enhance | d47fa779046954a2a3030a0b695dab1473b1d589 | [
"MIT"
] | null | null | null | torchvision_x/transforms/functional.py | X-Lai/torchvision-enhance | d47fa779046954a2a3030a0b695dab1473b1d589 | [
"MIT"
] | 1 | 2021-05-06T01:18:55.000Z | 2021-05-06T01:18:55.000Z | from __future__ import division
import torch
import math
import random
from PIL import Image, ImageOps
try:
import accimage
except ImportError:
accimage = None
import numpy as np
import numbers
import types
import collections
from skimage.external import tifffile
from skimage import transform, filters, util
import cv2
def _is_pil_image(img):
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
def _is_tensor_image(img):
return torch.is_tensor(img) and img.ndimension() == 3
def _is_numpy_image(img):
return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
def to_tensor(pic):
if not (_is_pil_image(pic) or _is_numpy_image(pic)):
raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
# get max value of dtype, if the dtype is not uint8 and uint 16, change this
# denominator = _get_dtype_max(pic)
denominator = np.iinfo(np.uint8).max if pic.dtype == np.uint8 else np.iinfo(np.uint16).max
# handle numpy array
if len(pic.shape) == 2:
img = torch.from_numpy(pic)
img = torch.unsqueeze(img, 0)
else:
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
return img.float().div(denominator)
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic)
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return
def to_ndarray(pic, dtype='uint8'):
if not (_is_numpy_image(pic) or _is_tensor_image(pic)):
raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))
npimg = pic
if isinstance(pic, torch.FloatTensor):
if dtype == 'uint8':
npimg = (pic.numpy() * np.iinfo(np.uint8).max).astype(np.uint8)
elif dtype == 'uint16':
npimg = (pic.numpy() * np.iinfo(np.int32).max).astype(np.int32)
else:
raise ValueError('not support dtype')
npimg = np.transpose(npimg, (1, 2, 0))
else:
npimg = np.transpose(pic.numpy(), (1, 2, 0))
assert isinstance(npimg, np.ndarray)
return npimg
def normalize(tensor, mean, std):
"""Normalize a tensor image with mean and standard deviation.
See ``Normalize`` for more details.
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channely.
Returns:
Tensor: Normalized Tensor image.
"""
if not _is_tensor_image(tensor):
raise TypeError('tensor is not a torch image.')
# TODO: make efficient
for t, m, s in zip(tensor, mean, std):
t.sub_(m).div_(s)
return tensor
def flip(img, flip_mode):
if not _is_numpy_image(img):
raise TypeError('img should be numpy ndarray. Got {}'.format(type(img)))
if not (isinstance(flip_mode, int)):
raise TypeError('flipCode should be integer. Got {}'.format(type(flip_mode)))
return cv2.flip(img, flip_mode)
def rotate(img, angle=0, order=1):
"""Rotate image by a certain angle around its center.
Parameters
----------
img : ndarray(uint16 or uint8)
Input image.
angle : integer
Rotation angle in degrees in counter-clockwise direction.
Returns
-------
rotated : ndarray(uint16 or uint8)
Rotated version of the input.
Examples
--------
rotate(image, 30)
rotate(image, 180)
"""
if not _is_numpy_image(img):
raise TypeError('img should be numpy ndarray. Got {}'.format(type(img)))
if not (isinstance(angle, numbers.Number)):
raise TypeError('Angle should be integer. Got {}'.format(type(angle)))
img_new = transform.rotate(img, angle, order=order, preserve_range=True)
img_new = img_new.astype(img.dtype)
return img_new
def shift(img, right_shift=5, down_shift=5):
"""
:param img: the image input
:param right_shift: the pixels of shift right
:param down_shift: the pixels of down right
:return: transformed img
"""
if not _is_numpy_image(img):
raise TypeError('img should be numpy ndarray. Got {}'.format(type(img)))
if not (isinstance(right_shift, int)):
raise TypeError('shift.rightshift should be integer. Got {}'.format(type(right_shift)))
if not (isinstance(down_shift, int)):
raise TypeError('shift.downshift should be integer. Got {}'.format(type(down_shift)))
tform = transform.SimilarityTransform(translation=(-right_shift, -down_shift))
img_new = transform.warp(img, tform, preserve_range=True)
img_new = img_new.astype(img.dtype)
return img_new
def crop(img, top, left, width, height):
"""crop image from position top and left, width and height
Arguments:
img {numpy.ndarray} -- input image
top {int} -- start position row
left {int} -- start position column
width {int} -- crop width
height {int} -- crop height
"""
if not all([isinstance(x, int) for x in (top, left, width, height)]):
raise ValueError("params should be integer!")
if (width > img.shape[0] or height > img.shape[1]):
raise ValueError("the output imgage size should be small than input image!!!")
if len(img.shape) == 2:
img_height, img_width = img.shape
else:
img_height, img_width, _ = img.shape
right = img_width - (left + width)
bottom = img_height - (top + height)
if len(img.shape) == 2:
img_croped = util.crop(img,((top,bottom),(left,right)))
else:
img_croped = util.crop(img,((top,bottom),(left,right),(0,0)))
return img_croped
def center_crop(img, output_size):
if isinstance(output_size, numbers.Number):
output_size = (int(output_size), int(output_size))
if len(img.shape) == 2:
h, w = img.shape
else:
h, w, _ = img.shape
th, tw = output_size
i = int(round((h - th) / 2.))
j = int(round((w - tw) / 2.))
return crop(img, i, j, th, tw)
def resize(img, size, interpolation=Image.BILINEAR):
"""Resize the input PIL Image to the given size.
Note: cv2.resize do not support int32, weird
Args:
img (Numpy Array): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image.
"""
if not _is_numpy_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
size = (size, size)
if img.dtype == np.int32:
resized = img.astype(np.uint16)
resized = cv2.resize(resized, size , interpolation)
else:
resized = cv2.resize(img, size , interpolation)
resized = resized.astype(img.dtype)
return resized
def crop_resize(img, top, left, width, height, size, interpolation=Image.BILINEAR):
"""Crop the given PIL Image and resize it to desired size.
Notably used in RandomResizedCrop.
Args:
img (PIL Image): Image to be cropped.
i: Upper pixel coordinate.
j: Left pixel coordinate.
h: Height of the cropped image.
w: Width of the cropped image.
size (sequence or int): Desired output size. Same semantics as ``scale``.
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``.
Returns:
PIL Image: Cropped image.
"""
assert _is_numpy_image(img), 'img should be PIL Image'
img = crop(img, top, left, width, height)
img = resize(img, size, interpolation)
return img
def pad(img, pad_width, mode='reflect', **kwargs):
"""Pad the given image.
Args:
pad_width : int, padding width
mode: str or function. contain{‘constant’,‘edge’,‘linear_ramp’,‘maximum’,‘mean’
, ‘median’, ‘minimum’, ‘reflect’,‘symmetric’,‘wrap’}
Examples
--------
>>> Transformed_img = pad(img,[(20,20),(20,20),(0,0)],mode='reflect')
"""
if len(img.shape) == 2:
pad_width = ((pad_width, pad_width), (pad_width, pad_width))
else:
pad_width = ((pad_width, pad_width), (pad_width, pad_width), (0, 0))
return np.pad(img, pad_width, mode)
def noise(img, dtype='uint8', mode='gaussian', mean=0, var=0.01):
"""TODO
"""
if dtype == 'uint16':
img_new = img.astype(np.uint16)
img_new = util.random_noise(img, mode, mean=mean, var=var)
if dtype == 'uint8':
img_new = (img_new * np.iinfo(np.uint8).max).astype(np.uint8)
elif dtype == 'uint16':
img_new = (img_new * np.iinfo(np.int32).max).astype(np.int32)
else:
raise ValueError('not support type')
return img_new
def gaussian_blur(img, sigma=1, dtype='uint8', multichannel=False):
"""Multi-dimensional Gaussian filter.
Parameters
----------
image : ndarray
Input image (grayscale or color) to filter.
sigma : scalar or sequence of scalars, optional
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
multichannel : bool, optional (default: None)
Whether the last axis of the image is to be interpreted as multiple
channels. If True, each channel is filtered separately (channels are
not mixed together). Only 3 channels are supported. If `None`,
the function will attempt to guess this, and raise a warning if
ambiguous, when the array has shape (M, N, 3).
Returns
-------
filtered_image : ndarray
"""
if np.any(np.asarray(sigma) < 0.0):
raise ValueError("Sigma values less than zero are not valid")
img_new = filters.gaussian(img, sigma, multichannel)
if dtype == 'uint8':
print(img_new.max(), img_new.min())
img_new = (img_new * np.iinfo(np.uint8).max).astype(np.uint8)
elif dtype == 'uint16':
img_new = (img_new * np.iinfo(np.int32).max).astype(np.int32)
else:
raise ValueError('not support type')
return img_new
def piecewise_transform(image, numcols=5, numrows=5, warp_left_right=10, warp_up_down=10, order=1):
"""2D piecewise affine transformation.
Control points are used to define the mapping. The transform is based on
a Delaunay triangulation of the points to form a mesh. Each triangle is
used to find a local affine transform.
Parameters
----------
img : ndarray
numcols : int, optional (default: 5)
numbers of the colums to transformation
numrows : int, optional (default: 5)
numbers of the rows to transformation
warp_left_right: int, optional (default: 10)
the pixels of transformation left and right
warp_up_down: int, optional (default: 10)
the pixels of transformation up and down
Returns
-------
Transformed_image : ndarray
Examples
--------
>>> Transformed_img = piecetransform(image,numcols=10, numrows=10, warp_left_right=5, warp_up_down=5)
"""
rows, cols = image.shape[0], image.shape[1]
numcols = numcols
numrows = numrows
src_cols = np.linspace(0, cols, numcols, dtype=int)
src_rows = np.linspace(0, rows, numrows, dtype=int)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]
src_rows_new = np.ndarray.transpose(src_rows)
src_cols_new = np.ndarray.transpose(src_cols)
# src_new = np.dstack([src_cols_new.flat, src_rows_new.flat])[0]
dst_cols = np.ndarray(src_cols.shape)
dst_rows = np.ndarray(src_rows.shape)
for i in range(0, numcols):
for j in range(0, numrows):
if src_cols[i, j] == 0 or src_cols[i, j] == cols:
dst_cols[i, j] = src_cols[i, j]
else:
dst_cols[i, j] = src_cols[i, j] + np.random.uniform(-1, 1) * warp_left_right
if src_rows[i, j] == 0 or src_rows[i, j] == rows:
dst_rows[i, j] = src_rows[i, j]
else:
dst_rows[i, j] = src_rows[i, j] + np.random.uniform(-1, 1) * warp_up_down
dst = np.dstack([dst_cols.flat, dst_rows.flat])[0]
# dst_rows_new = np.ndarray.transpose(dst_rows)
# dst_cols_new = np.ndarray.transpose(dst_cols)
# dst_new = np.dstack([dst_cols_new.flat, dst_rows_new.flat])[0]
tform = transform.PiecewiseAffineTransform()
tform.estimate(src, dst)
img_new = transform.warp(image, tform, output_shape=(rows, cols), order=order, preserve_range=True)
img_new = img_new.astype(image.dtype)
return img_new
if __name__ == '__main__':
img = tifffile.imread('sample-data/MUL_AOI_4_Shanghai_img1920.tif')
# tifffile.imshow(img[:,:,[3,2,1]])
img = img.astype(np.int32)
out = resize(img, (224,224))
# out = filters.gaussian_filter(img, sigma=2)
# out2 = gaussian_blur(img, dtype='uint16')
# out2 = to_tensor(out2)
# out3 = to_ndarray(out2, dtype='uint16')
# pass | 33.85877 | 113 | 0.62722 | 2,069 | 14,864 | 4.398743 | 0.179314 | 0.0178 | 0.014284 | 0.014064 | 0.284914 | 0.220525 | 0.185804 | 0.160861 | 0.132073 | 0.123723 | 0 | 0.017907 | 0.252355 | 14,864 | 439 | 114 | 33.85877 | 0.801044 | 0.323802 | 0 | 0.226415 | 0 | 0 | 0.080192 | 0.004473 | 0 | 0 | 0 | 0.004556 | 0.009434 | 1 | 0.080189 | false | 0 | 0.066038 | 0.009434 | 0.245283 | 0.004717 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0406ca3b56a00f0e66ae48222f85061d51626795 | 2,096 | py | Python | phase_one_legacy/CPSC2020_challenge.py | DeepPSP/cpsc2020 | 47acb884ea1f2f819e564d8a17ad37001ed0df27 | [
"BSD-3-Clause"
] | 1 | 2021-12-07T11:44:48.000Z | 2021-12-07T11:44:48.000Z | phase_one_legacy/CPSC2020_challenge.py | wenh06/cpsc2020 | 47acb884ea1f2f819e564d8a17ad37001ed0df27 | [
"BSD-3-Clause"
] | null | null | null | phase_one_legacy/CPSC2020_challenge.py | wenh06/cpsc2020 | 47acb884ea1f2f819e564d8a17ad37001ed0df27 | [
"BSD-3-Clause"
] | 1 | 2021-05-25T14:56:02.000Z | 2021-05-25T14:56:02.000Z | """
"""
from numbers import Real
from typing import Tuple
import numpy as np
import xgboost as xgb
from cfg import FeatureCfg
from signal_processing.ecg_preproc import preprocess_signal, parallel_preprocess_signal
from signal_processing.ecg_features import compute_ecg_features
from models.load_model import load_model
import utils
def CPSC2020_challenge(ECG:np.ndarray, fs:Real=400) -> Tuple[np.ndarray,np.ndarray]:
"""
% This function can be used for events 1 and 2. Participants are free to modify any
% components of the code. However the function prototype must stay the same
% [S_pos,V_pos] = CPSC2020_challenge(ECG,fs) where the inputs and outputs are specified
% below.
%
%% Inputs
% ECG : raw ecg vector signal 1-D signal
% fs : sampling rate
%
%% Outputs
% S_pos : the position where SPBs detected
% V_pos : the position where PVCs detected
%
%
%
% Copyright (C) 2020 Dr. Chengyu Liu
% Southeast university
% chengyu@seu.edu.cn
%
% Last updated : 02-23-2020
"""
# ====== arrhythmias detection =======
sig = np.array(ECG).copy().flatten()
pps = parallel_preprocess_signal(sig, fs) # use default config in `cfg`
filtered_ecg = pps['filtered_ecg']
rpeaks = pps['rpeaks']
filtered_rpeaks = rpeaks[np.where( (rpeaks>=FeatureCfg.beat_winL) & (rpeaks<len(sig)-FeatureCfg.beat_winR) )[0]]
features = compute_ecg_features(filtered_ecg, filtered_rpeaks)
model = load_model(field='ml')
# if model is None:
# model = train()
if isinstance(model, dict):
if model.get("feature_scaler", None):
features = model["feature_scaler"].transform(features)
model = model["model"]
if type(model).__name__ == "Booster":
# xgboost native Booster
y_pred = model.predict(xgb.DMatrix(features))
else:
y_pred = model.predict(features)
S_pos, V_pos = utils.pred_to_indices(
y_pred, filtered_rpeaks,
class_map=FeatureCfg.class_map
)
return S_pos, V_pos
| 29.521127 | 116 | 0.664122 | 275 | 2,096 | 4.890909 | 0.476364 | 0.011896 | 0.011152 | 0.017844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016854 | 0.235687 | 2,096 | 70 | 117 | 29.942857 | 0.822722 | 0.334447 | 0 | 0 | 0 | 0 | 0.046476 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.3 | 0 | 0.366667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04082a89f90f437c84a8722bc7b20a9571cf67d9 | 770 | py | Python | fabfile.py | bitbyt3r/sideboard | 45e13011a664543352d51ce073cfa9635c748bb7 | [
"BSD-3-Clause"
] | 4 | 2015-02-18T20:38:42.000Z | 2021-11-17T10:10:34.000Z | fabfile.py | bitbyt3r/sideboard | 45e13011a664543352d51ce073cfa9635c748bb7 | [
"BSD-3-Clause"
] | 84 | 2015-07-23T12:23:24.000Z | 2018-08-04T05:09:30.000Z | fabfile.py | bitbyt3r/sideboard | 45e13011a664543352d51ce073cfa9635c748bb7 | [
"BSD-3-Clause"
] | 10 | 2015-02-10T13:38:18.000Z | 2020-05-23T20:01:36.000Z | from __future__ import unicode_literals
import time
import os.path
import yaml
import ship_it
__here__ = os.path.abspath(os.path.dirname(__file__))
MANIFEST_YAML = os.path.join(__here__, 'manifest.yaml')
MANIFEST_TEMPLATE = MANIFEST_YAML + '.template'
def _populate_manifest_and_invoke_fpm(iteration):
import sideboard
with open(MANIFEST_TEMPLATE) as f:
manifest = yaml.load(f)
manifest[b'version'] = sideboard.__version__
manifest[b'iteration'] = iteration
with open(MANIFEST_YAML, 'w') as f:
yaml.dump(manifest, f)
ship_it.fpm(MANIFEST_YAML)
def fpm_stable(iteration):
_populate_manifest_and_invoke_fpm(iteration)
def fpm_testing():
_populate_manifest_and_invoke_fpm(b'0.{}'.format(int(time.time())))
| 23.333333 | 71 | 0.736364 | 104 | 770 | 5.009615 | 0.375 | 0.138196 | 0.109405 | 0.143954 | 0.195777 | 0.142035 | 0 | 0 | 0 | 0 | 0 | 0.001543 | 0.158442 | 770 | 32 | 72 | 24.0625 | 0.802469 | 0 | 0 | 0 | 0 | 0 | 0.055844 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.285714 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
040c5f0060d1142800ee3d1c3e85730acd35bbb5 | 1,238 | py | Python | drone/tests/settings.py | grafke/Drone-workflow-controller | 468581206dff149733c1d60999c01d28cc1691ad | [
"Apache-2.0"
] | 1 | 2017-05-22T18:24:57.000Z | 2017-05-22T18:24:57.000Z | drone/tests/settings.py | grafke/Drone-workflow-controller | 468581206dff149733c1d60999c01d28cc1691ad | [
"Apache-2.0"
] | null | null | null | drone/tests/settings.py | grafke/Drone-workflow-controller | 468581206dff149733c1d60999c01d28cc1691ad | [
"Apache-2.0"
] | null | null | null | import logging
import os
application_name = 'Workflow manager'
aws_jobs_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config/aws_jobs_config.json')
aws_jobs_config_schema = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config/aws_jobs_config_schema.json')
remote_jobs_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config/remote_jobs_config.json')
remote_jobs_config_schema = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config/remote_jobs_config_schema.json')
# Metadata
metadata = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'dev-metadata.db')
metadata_history_days = 7
metadata_future_days = 7
# Logging
log_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'logs/application.log')
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(log_file)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# Scheduler
schedule_interval_seconds = 1
supported_dependencies = ['job_completed']
supported_job_types = ['ssh', 'emr'] | 38.6875 | 126 | 0.791599 | 176 | 1,238 | 5.221591 | 0.306818 | 0.117519 | 0.065288 | 0.078346 | 0.413493 | 0.413493 | 0.413493 | 0.413493 | 0.413493 | 0.413493 | 0 | 0.002591 | 0.06462 | 1,238 | 32 | 127 | 38.6875 | 0.791019 | 0.021002 | 0 | 0 | 0 | 0 | 0.206782 | 0.105873 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
040e9704fcff74d4af32c9baaee7f590a5b9b3e1 | 21,025 | py | Python | validator.py | ITRI-AIdea/CTSP-extrusion-scheduling | 5322f0c0418d824e74988cdfa63855e5bae73024 | [
"Apache-2.0"
] | 1 | 2020-09-17T03:34:46.000Z | 2020-09-17T03:34:46.000Z | validator.py | ITRI-AIdea/CTSP-extrusion-scheduling | 5322f0c0418d824e74988cdfa63855e5bae73024 | [
"Apache-2.0"
] | null | null | null | validator.py | ITRI-AIdea/CTSP-extrusion-scheduling | 5322f0c0418d824e74988cdfa63855e5bae73024 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 Industrial Technology Research Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import argparse
import pandas as pd
from query_table import valid_prod_no, valid_prod_line, valid_keys, valid_k_line, \
width_constraint, type_transition, composition_transition, tune_hour_state, code_type_transition, \
state_transition, special_order_code, initial_state, mfg_transition
def handle_validation_errors(obj, msg):
"""Wrap validation errors.
"""
obj.check_pass = False
obj.check_msg = msg
return obj.check_pass, obj.check_msg
def generate_date_list(start_date, end_date):
date_list = []
end_date = end_date + datetime.timedelta(1)
for n in range(int((end_date - start_date).days)):
date_list.append((start_date + datetime.timedelta(n)).strftime('%Y-%m-%d'))
return date_list
class DatetimeRange:
def __init__(self, dt1, dt2):
self._dt1 = dt1
self._dt2 = dt2
def __contains__(self, dt):
"""Check if the date is in valid date range"""
return self._dt1 <= dt <= self._dt2
class Validator:
"""Validate submission file"""
def __init__(self, order_file, json_file, start_date, end_date):
# 1. Check for JSON format.
try:
with open(json_file) as f:
self.data = json.load(f)
self.check_pass = True
self.check_msg = 'Submission file is valid.'
except Exception as e:
self.data = None
self.check_pass = False
self.check_msg = str(e)
self.start_date = start_date
self.end_date = end_date
try:
self.order_df = pd.read_csv(order_file, index_col='order_code') # Get order information
except Exception as e:
self.order_df = None
self.check_pass = False
self.check_msg = str(e)
def validate_dates(self):
"""2. Check the scheduled date is valid."""
tmp_date = None
if self.check_pass:
for date, v in self.data.items():
try:
datetime.datetime.strptime(date, '%Y-%m-%d')
except ValueError:
msg = '{}: Wrong datetime format.'.format(date)
return handle_validation_errors(self, msg)
current_date = datetime.datetime.strptime(date, '%Y-%m-%d')
start_date = datetime.datetime.strptime(self.start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(self.end_date, '%Y-%m-%d')
if current_date not in DatetimeRange(start_date, end_date):
msg = 'Scheduled date is not in valid range. ({} to {})'.format(self.start_date, self.end_date)
return handle_validation_errors(self, msg)
if not tmp_date:
tmp_date = date
else:
if datetime.datetime.strptime(date, '%Y-%m-%d') < datetime.datetime.strptime(tmp_date, '%Y-%m-%d'):
msg = '{}, {}: Wrong date order.'.format(date, tmp_date)
return handle_validation_errors(self, msg)
tmp_date = date
valid_date = generate_date_list(start_date, end_date)
if not all(list(i in list(self.data.keys()) for i in valid_date)):
msg = 'Not all dates are included.'
return handle_validation_errors(self, msg)
return self.check_pass, self.check_msg
def check_valid_schedule(self):
init_count = 0
order_set = set()
amount_dict = {}
if self.check_pass:
for date, lines in self.data.items():
line_per_day = []
open_line_per_day = set()
init_count = init_count + 1
for line_no, line_data in lines.items():
line_per_day.append(line_no)
# 3. Check for production lines.
if line_no not in valid_prod_line:
msg = '{}, {}: Wrong production lines.'.format(date, line_no)
return handle_validation_errors(self, msg)
if not isinstance(line_data, list):
msg = '{}, {}: Scheduled items should be a list.'.format(date, line_no)
return handle_validation_errors(self, msg)
if len(line_data) == 0:
msg = '{}, {}: Scheduled items should not be empty.'.format(date, line_no)
return handle_validation_errors(self, msg)
cul_hours = 0
for data in line_data:
type_change = False
df_composition = None
header = '{},{}: '.format(date, line_no)
# 4. Check for order code, product code, hours and mfg_width.
if set(valid_keys) != set(data.keys()):
msg = header + 'Some keys are missing in {}.'.format(valid_keys)
return handle_validation_errors(self, msg)
header = '{},{},{}: '.format(date, line_no, data['order_code'])
if not isinstance(data['order_code'], str):
msg = header + '"order_code" is not string.'
return handle_validation_errors(self, msg)
if data['order_code'] not in special_order_code + self.order_df.index.tolist():
msg = header + 'Invalid "order_code".'
return handle_validation_errors(self, msg)
if data['order_code'] not in special_order_code:
order_set.add(data['order_code'])
if not isinstance(data['product_code'], str):
msg = header + '"product_code" is not string.'
return handle_validation_errors(self, msg)
if data['product_code'] not in valid_prod_no and data['product_code'] not in special_order_code:
msg = header + 'Invalid "product_code".'
return handle_validation_errors(self, msg)
if not isinstance(data['hours'], int):
msg = header + '"hours" is not integer.'
return handle_validation_errors(self, msg)
if data['hours'] < 0:
msg = header + 'Invalid "hours".'
return handle_validation_errors(self, msg)
if not isinstance(data['mfg_width'], int):
msg = header + '"mfg_width" is not integer.'
return handle_validation_errors(self, msg)
if data['mfg_width'] < 0:
msg = header + 'Invalid "mfg_width".'
return handle_validation_errors(self, msg)
# 5. Check for date constraints.
code_type_transition[line_no].append(data['order_code'])
if data['order_code'] != 'stop':
open_line_per_day.add(line_no)
if data['order_code'] not in special_order_code:
if len(code_type_transition[line_no]) > 0:
if len(code_type_transition[line_no]) == 1 or code_type_transition[line_no][-2] == 'stop':
msg = header + 'You should tune the machine (tune_8 or tune_48) before start.'
return handle_validation_errors(self, msg)
if self.order_df.loc[data['order_code'], 'product_code'] != data['product_code']:
msg = header + 'Mismatched "order_code" and "product_code".'
return handle_validation_errors(self, msg)
tune_hours = tune_hour_state[line_no]
tune_hour_state[line_no] = 0
try:
not_before = datetime.datetime.strptime(
self.order_df.loc[data['order_code'], 'not_before'].split('T')[0], '%Y-%m-%d')
not_after = datetime.datetime.strptime(
self.order_df.loc[data['order_code'], 'not_after'].split('T')[0], '%Y-%m-%d')
prod_date = datetime.datetime.strptime(date, '%Y-%m-%d')
if prod_date not in DatetimeRange(not_before, not_after):
msg = header + 'Production schedule is out of range.'
return handle_validation_errors(self, msg)
except Exception as e:
msg = str(e)
return handle_validation_errors(self, msg)
df_code = self.order_df.loc[data['order_code'], 'material']
df_composition = self.order_df.loc[data['order_code'], 'composition']
# 6. Check for production line constraints.
if df_code == 'MS':
if line_no != 'C1':
msg = header + 'Invalid line assignment for MS material.'
return handle_validation_errors(self, msg)
if 'K' in data['product_code']:
if line_no not in valid_k_line:
msg = header + 'Invalid line assignment for product code starting with K.'
return handle_validation_errors(self, msg)
# 7. Check for width constraints.
try:
product_type = self.order_df.loc[data['order_code'], 'type']
width = self.order_df.loc[data['order_code'], 'width']
type_transition[line_no].append(product_type)
if not width_constraint[line_no]['max_mfg_width'].get(product_type, None):
msg = header + 'Mismatched "type: {}" and "line: {}".'.format(product_type, line_no)
return handle_validation_errors(self, msg)
if data['mfg_width'] > width_constraint[line_no]['max_mfg_width'][product_type]:
msg = header + '"mfg_width" exceeds production limit.'
return handle_validation_errors(self, msg)
if width > width_constraint[line_no]['max_width'][product_type]:
msg = header + '"width" exceeds production limit.'
return handle_validation_errors(self, msg)
if product_type == 'lenti' and (data['mfg_width'] - width) < 70:
msg = header + '"mfg_width" should be at least 70mm wider than "width" for type "lenti".'
return handle_validation_errors(self, msg)
elif product_type == 'plate' and (data['mfg_width'] - width) < 50:
msg = header + '"mfg_width" should be at least 50mm wider than "width" for type "plate".'
return handle_validation_errors(self, msg)
except Exception as e:
msg = str(e)
return handle_validation_errors(self, msg)
last_type = initial_state[line_no]
if len(type_transition[line_no]) > 1:
type_list = type_transition[line_no]
last_type = type_list[-2]
""" 8. Check if the machine of specific production line restart while:
a. The type of product changed. (tune for 48 hours)
"""
if last_type and last_type != product_type:
type_change = True
if code_type_transition[line_no][-2] != 'tune_48' or tune_hours != 48:
msg = header + 'Type changed, you should tune the machine for 48 hours (tune_48).'
return handle_validation_errors(self, msg)
""" 8. Check if the machine of specific production line restart while:
b. The composition of product changed from 8% or 100% to 0%. (tune for 8 hours)
"""
last_composition = None
last_state = None
if len(state_transition[line_no]) > 0:
last_state = state_transition[line_no][-1]
if len(composition_transition[line_no]) > 0:
last_composition = composition_transition[line_no][-1]
if last_composition and last_composition != '0%':
if df_composition and df_composition == '0%':
valid_state = 'tune_8'
valid_tune_hours = 8
if type_change:
valid_state = 'tune_48'
valid_tune_hours = 48
if last_state != valid_state or tune_hours != valid_tune_hours:
msg = header + 'Composition changed to 0%, you should tune the machine for {} hours. ({})'.format(
valid_tune_hours, valid_state)
return handle_validation_errors(self, msg)
if len(mfg_transition[line_no]) > 0:
valid_tune_state = 'tune_8'
valid_tune_hours = 8
if type_change:
valid_tune_state = 'tune_48'
valid_tune_hours = 48
if data['mfg_width'] != mfg_transition[line_no][-1]:
if last_state != valid_tune_state or tune_hours != valid_tune_hours:
msg = header + '"mfg_width" changed, you should tune the machine for {} hours. ({})'.format(
valid_tune_hours, valid_tune_state)
return handle_validation_errors(self, msg)
mfg_transition[line_no].append(data['mfg_width'])
# Calculate production quantity for each order
amount_dict[data['order_code']] = data['hours'] * 125 + amount_dict.get(
data['order_code'], 0)
elif data['order_code'] == 'tune_8':
if data['product_code'] != 'tune_8':
msg = header + 'Mismatched "order_code" and "product_code".'
return handle_validation_errors(self, msg)
if len(code_type_transition[line_no]) > 1 and code_type_transition[line_no][-2] == 'tune_48':
msg = header + '"tune_48" cannot be followed by "tune_8".'
return handle_validation_errors(self, msg)
if data['hours'] > 8:
msg = header + 'Invalid tune hours for "tune_8".'
return handle_validation_errors(self, msg)
tune_hour_state[line_no] += data['hours']
if tune_hour_state[line_no] > 8:
msg = header + 'You cannot tune more than 8 hours for "tune_8".'
return handle_validation_errors(self, msg)
elif data['order_code'] == 'tune_48':
if data['product_code'] != 'tune_48':
msg = header + 'Mismatched "order_code" and "product_code".'
return handle_validation_errors(self, msg)
if len(code_type_transition[line_no]) > 1 and code_type_transition[line_no][-2] == 'tune_8':
msg = header + '"tune_8" cannot be followed by "tune_48".'
return handle_validation_errors(self, msg)
if data['hours'] > 24:
msg = header + 'Invalid tune hours for "tune_48".'
return handle_validation_errors(self, msg)
tune_hour_state[line_no] += data['hours']
if tune_hour_state[line_no] > 48:
msg = header + 'You cannot tune more than 48 hours for "tune_48".'
return handle_validation_errors(self, msg)
cul_hours += data['hours']
state_transition[line_no].append(data['order_code'])
if df_composition:
composition_transition[line_no].append(df_composition)
header = '{},{}: '.format(date, line_no)
if cul_hours != 24:
msg = header + 'Working hours should be equal to 24 per day.'
return handle_validation_errors(self, msg)
# 9. Check if the number of opened production lines are between 2~6.
if set(line_per_day) != set(valid_prod_line):
msg = 'Missing schedule for some production lines.'
return handle_validation_errors(self, msg)
if len(open_line_per_day) not in range(2, 7):
msg = 'The number of open production lines should be between 2 and 6.'
return handle_validation_errors(self, msg)
order_in_range = self.order_df
# 10. Check if all orders are included.
if order_set != set(order_in_range.index):
msg = 'Not all order are included.'
return handle_validation_errors(self, msg)
# 11. Check if the product amount is valid.
for order, amount in amount_dict.items():
if amount != order_in_range.loc[order, 'quantity']:
msg = 'Wrong production quantity for order: {}.'.format(order)
return handle_validation_errors(self, msg)
return self.check_pass, self.check_msg
if __name__ == '__main__':
parser = argparse.ArgumentParser("validator")
parser.add_argument("--order_file", default='orders_2019.csv', type=str)
parser.add_argument("--submit_file", default='submission_example.json', type=str)
parser.add_argument("--start_date", default='2019-07-01', type=str)
parser.add_argument("--end_date", default='2019-12-31', type=str)
args = parser.parse_args()
val = Validator(args.order_file, args.submit_file, args.start_date, args.end_date)
val.validate_dates()
val.check_valid_schedule()
print(val.check_msg)
| 58.893557 | 142 | 0.489132 | 2,175 | 21,025 | 4.484598 | 0.122299 | 0.075456 | 0.101497 | 0.126307 | 0.527578 | 0.442895 | 0.39512 | 0.312487 | 0.252922 | 0.228214 | 0 | 0.013527 | 0.426873 | 21,025 | 356 | 143 | 59.058989 | 0.795934 | 0.055648 | 0 | 0.282686 | 0 | 0 | 0.131301 | 0.001187 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024735 | false | 0.031802 | 0.017668 | 0 | 0.222615 | 0.003534 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04106049336ef1fc6553bc1a2a1fb97e19a5a539 | 12,974 | py | Python | bertil.py | apptitlig/DarthVader | cb99252bff2db2fef26f49b849aabb922e949636 | [
"MIT"
] | null | null | null | bertil.py | apptitlig/DarthVader | cb99252bff2db2fef26f49b849aabb922e949636 | [
"MIT"
] | null | null | null | bertil.py | apptitlig/DarthVader | cb99252bff2db2fef26f49b849aabb922e949636 | [
"MIT"
] | null | null | null | import sys
import requests
import re
import discord
import logging
import argparse
import os
import random
from metno_locationforecast import Place, Forecast
import random
import giphy_client
from giphy_client.rest import ApiException
import xml.etree.ElementTree as ET
import datetime as dt
import statistics as s
import locale
import calendar
api_instance = giphy_client.DefaultApi()
def main(args):
lingus = Place("Lingus", 64.1659735, 20.9219466, 15)
sikea_forecast = Forecast(lingus, "metno-locationforecast/1.0 https://github.com/apptitlig")
umea = Place("Umee", 63.8390388, 20.3381108, 16)
umea_forecast = Forecast(umea, "metno-locationforecast/1.0 https://github.com/apptitlig")
mittemellan = Place("mittemellan", 63.9262669, 20.4252715, 16)
mittemellan_forecast = Forecast(umea, "metno-locationforecast/1.0 https://github.com/apptitlig")
start_discord_listener(args.api_key, args.api_key_giphy, args.channel, args.user, sikea_forecast, umea_forecast, mittemellan_forecast)
async def search_gifs(query, api_key_giphy):
try:
response = api_instance.gifs_search_get(api_key_giphy,
query, limit=5, rating='g')
lst = list(response.data)
gif = random.choices(lst)
return gif[0].url
except ApiException as e:
return "Exception when calling DefaultApi->gifs_search_get: %s\n" % e
except IndexError as e:
response = api_instance.gifs_search_get(api_key_giphy,
"possum", limit=5, rating='g')
lst = list(response.data)
gif = random.choices(lst)
return gif[0].url
async def dot_aligned(seq):
snums = [str(seq)]
dots = [s.find('.') for s in snums]
return [' '*(3 - d) + s for s, d in zip(snums, dots)][0]
async def forecastDay(f,days, i):
temp, nb, blast, start, end = await getValuesWithList(f, i)
alignedtemp = await dot_aligned(temp)
forecast_string = start + " - " + end + ": " + '{:7}'.format(str(alignedtemp)) + '{:6}'.format(str(blast)) + str(nb) + "\n"
return forecast_string
async def forecast2day(f, days, time):
time0 = f[time].start_time
time1 = f[time].end_time
temp, nb, blast, _, _ = await getValuesWithList(f, time)
alignedtemp = await dot_aligned(temp)
forecast_string = str(time0.time())[0:5] + " - " + str(time1.time())[0:5] + ": " + "{:5.1f}".format(float(alignedtemp)) + "{:5.1f}".format(blast) + " " + str(nb) + "\n"
return forecast_string
async def forecast1day(f, days, start, end):
temp = 0
nb = 0
blast = 0
time0 = f[start].start_time
time1 = f[end].end_time
for j in range(start, end):
temp1, nb1, blast1, _, _ = await getValuesWithList(f, j)
temp = temp + temp1
nb = nb + nb1
blast = blast + blast1
temp = temp/(end - start)
nb = nb/(end - start)
blast = blast/(end - start)
alignedtemp = await dot_aligned(temp)
forecast_string = str(time0.time())[0:5] + " - " + str(time1.time())[0:5] + ": " + "{:5.1f}".format(float(alignedtemp)) + "{:5.1f}".format(blast) + " " + str(nb) + "\n"
return forecast_string
async def minMaxDay(f):
min = f[0].variables["air_temperature"].value
max = f[0].variables["air_temperature"].value
for i in range(len(f)):
t = f[i].variables["air_temperature"].value
if t > max:
max = t
if t < min:
min = t
alignedmax = await dot_aligned(max)
alignedmin = await dot_aligned(min)
return '{:10}'.format(str(alignedmin) + " " + str(alignedmax))
async def getValuesWithList(forecast, i):
temp = forecast[i].variables["air_temperature"].value
nb = forecast[i].variables["precipitation_amount"].value
blast = forecast[i].variables["wind_speed"].value
start = forecast[i].start_time
end = forecast[i].end_time
return temp, nb, blast, str(start.time())[0:5], str(end.time())[0:5]
async def getValuesWithData(forecast, i):
temp = forecast.data.intervals[i].variables["air_temperature"].value
nb = forecast.data.intervals[i].variables["precipitation_amount"].value
blast = forecast.data.intervals[i].variables["wind_speed"].value
start = forecast.data.intervals[i].start_time
end = forecast.data.intervals[i].end_time
return temp, nb, blast, str(start.time())[0:5], str(end.time())[0:5]
async def addWeekday(days):
d = calendar.day_name[days.weekday()]
return '{:9}'.format(str(d))
async def prognosMinMax(forecast, length):
forecast.update()
forecast_string = "```Dag Min Max \n";
for i in range(1, length+1):
days = dt.date.today() + dt.timedelta(days=i)
f = forecast.data.intervals_for(days)
day = f[0].start_time
forecast_string = forecast_string + str(day.day) + " " + await addWeekday(days) + " " + await minMaxDay(f)
forecast_string = forecast_string + "\n"
return forecast_string + "```"
async def prognosN(forecast, length):
forecast.update()
forecast_string = "```Tid Temp Blåst Nederbörd\n";
for i in range(1, length+1):
days = dt.date.today() + dt.timedelta(days=i)
f = forecast.data.intervals_for(days)
forecast_string = forecast_string + await addWeekday(days) + "\n"
if len(f) == 24:
forecast_string = forecast_string + await forecast1day(f, days, 0, 5) + await forecast1day(f, days, 6, 11) + await forecast1day(f ,days, 12, 17) + await forecast1day(f, days, 18, 23)
elif len(f) == 19:
forecast_string = forecast_string + await forecast1day(f, days, 0, 5) + await forecast1day(f, days, 6, 11) + await forecast1day(f ,days, 12, 17) + await forecast2day(f, days, 18)
elif len(f) == 14:
forecast_string = forecast_string + await forecast1day(f, days, 0, 5) + await forecast1day(f, days, 6, 11) + await forecast2day(f, days, 12) + await forecast2day(f, days, 13)
else:
forecast_string = forecast_string + await forecastDay(f, days, 0) + await forecastDay(f, days, 1) + await forecastDay(f, days, 2) + await forecastDay(f,days, 3)
forecast_string = forecast_string + "\n"
return forecast_string + "```"
async def prognos(forecast):
forecast.update()
forecast_string = "```Tid Temp Blåst Nederbörd\n";
for i in range(5):
temp, nb, blast, start, end = await getValuesWithData(forecast, i)
alignedtemp = await dot_aligned(temp)
forecast_string = forecast_string + start + " - " + end + ": " + '{:7}'.format(str(alignedtemp)) + '{:6}'.format(str(blast)) + str(nb) + "\n"
temp, nb, blast, start, end = await getValuesWithData(forecast, 0)
time = int(start[0:2])
symbol = forecast.data.intervals[0].symbol_code
emoji = ''
if (symbol == 'cloudy'):
emoji = emoji + random.choice([":cloud: "])
if (symbol == 'clearsky' or symbol == 'fair' or symbol == "clearsky_day"):
emoji = emoji + random.choice([":sun_with_face: ", ":sunny: "])
if (symbol == 'fog'):
emoji = emoji + random.choice([":fog: "])
if (symbol.find('rain') > 1 or symbol.find('sleet') > 1):
emoji = emoji + random.choice([":rain_cloud: ", ":white_sun_rain_cloud: "])
if (symbol.find('thunder') > 1):
emoji = emoji + random.choice([":cloud_lightning: "])
if (symbol.find('snow') > 1):
emoji = emoji + random.choice([":cloud_snow: ", ":snowflake: ",":snowman: ", ":snowman2: "])
if (symbol.find('night') > 1):
emoji = emoji + random.choice([":star:", ":star2:"])
if (blast >= 4):
emoji = emoji + random.choice([":dash:", ":wind_blowing_face:"])
if (blast > 14):
emoji = emoji + ":cloud_tornado:"
forecast_string = forecast_string + "```\n" + emoji
return forecast_string
def start_discord_listener(api_key, api_key_giphy, subscribed_channels, user, sikea_forecast, umea_forecast, mittemellan_forecast):
client = discord.Client()
global points
points = 10
@client.event
async def on_ready():
logging.info(f"Logged in as {client.user}")
@client.event
async def on_message(message):
if message.author == client.user:
logging.debug(f"Ignoring message sent by myself.")
return
if str(message.channel) not in subscribed_channels:
logging.debug(f"Ignoring message sent in channel other than {subscribed_channels}.")
return
supercold = ["antarctica", "polarbear", "blizzard", "snow dog", "icy", "polar"]
somewhatcold = ["cold", "freezing", "brrr", "skiing", "snowmobile", "ice ice baby", "ice king", "gunther", "titanic", "winter", "olaf", "snowflake", "frost", "chill", "below zero"]
spring = ["thaw", "spring", "flower bud", "daffodil", "butterfly", "tulip", "puddles", "kid (baby goat)", "daisy", "bird nest" ]
patternsVader = re.findall("v[v]*ä[ä]*d[d]*e[e]*r[r]*", message.content.lower())
patternsVadret = re.findall("v[v]*ä[ä]*d[d]*r[r]*e[e]*t[t]*", message.content.lower())
patternsVadur = re.findall("v[v]*ä[ä]*d[d]*u[u]*r[r]*", message.content.lower())
global points
if (str(message.author) == str(user[0]) and (points > 5)):
isprognosorvader = message.content.split()[0].lower()
if ((isprognosorvader == "prognos") and (len( message.content.split()) > 1 ) ):
tosearchfor = message.content.split()[1].lower()
if(tosearchfor != "umeå" and tosearchfor != "sikeå" and tosearchfor != "u" and tosearchfor != "s"):
gif = await search_gifs(tosearchfor, api_key_giphy)
await message.channel.send(str(gif))
points = points - 5
if len(patternsVader) > 0 or len(patternsVadret) > 0 or len(patternsVadur) > 0 :
points = points + 2
data_umea = requests.get("http://www8.tfe.umu.se/vadertjanst/service1.asmx/Temp")
root = ET.fromstring(data_umea.content)
degree_umea = root.text
sikea_forecast.update()
search_string = ""
if (sikea_forecast.data.intervals[0].variables["air_temperature"].value < -11):
search_string = random.choice(supercold)
elif (sikea_forecast.data.intervals[0].variables["air_temperature"].value < 0):
search_string = random.choice(somewhatcold)
else:
search_string = random.choice(spring)
gif = await search_gifs(search_string, api_key_giphy)
await message.channel.send(f"Umeå: " + str(degree_umea) + "\nSikeå: " + str(sikea_forecast.data.intervals[0].variables["air_temperature"].value) +"\n" + str(gif))
patternsSikea = re.findall("prognos s", message.content.lower())
if len(patternsSikea) > 0:
points = points + 2
ans = await prognos(sikea_forecast)
await message.channel.send(ans)
patternsUmea = re.findall("prognos u", message.content.lower())
if len(patternsUmea) > 0:
points = points + 2
ans = await prognos(umea_forecast)
await message.channel.send(ans)
patternsN = re.findall("prognos [1-9][1-9]*", message.content.lower())
if len(patternsN) > 0:
points = points + 2
splitmessage = message.content.lower().split()
days = int(splitmessage[1])
if days < 5:
ans = await prognosN(mittemellan_forecast, days)
else:
ans = await prognosMinMax(mittemellan_forecast, days)
await message.channel.send(ans)
client.run(api_key)
def parse_args():
parser = argparse.ArgumentParser(description="A Discord Ume weather bot ")
parser.add_argument("--api-key",
help="Relevant Discord API key.",
default=os.environ.get("BERTIL_API_KEY"))
parser.add_argument("--api-key-giphy",
help="Relevant Giphy API key.",
default=os.environ.get("BERTIL_API_KEY_GIPHY"))
parser.add_argument("--channel",
action="append",
help="A channel which Berit listens in. May be supplied multiple times.",
required=True)
parser.add_argument("--user",
action="append",
help="A user to joke with.",
required=True)
args = parser.parse_args()
if args.api_key is None:
logging.error("API supplied neither by --api-key or env variable BERTIL_API_KEY.")
sys.exit(1)
return args
if __name__ == "__main__":
main(parse_args()) | 39.196375 | 196 | 0.602821 | 1,596 | 12,974 | 4.793233 | 0.209273 | 0.058562 | 0.030196 | 0.036601 | 0.445229 | 0.378039 | 0.335163 | 0.273987 | 0.239346 | 0.193203 | 0 | 0.023992 | 0.254663 | 12,974 | 331 | 197 | 39.196375 | 0.767115 | 0 | 0 | 0.228 | 0 | 0.004 | 0.137033 | 0.017726 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012 | false | 0 | 0.068 | 0 | 0.148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04132235b420e3a12e7967bb4163ff51bb7ddaa1 | 7,497 | py | Python | kra/collectors/pods.py | smpio/kube-resource-analyzer | d214b7c32bc5e404ac951f66cf0914fcae1f580f | [
"MIT"
] | null | null | null | kra/collectors/pods.py | smpio/kube-resource-analyzer | d214b7c32bc5e404ac951f66cf0914fcae1f580f | [
"MIT"
] | 50 | 2021-05-26T14:15:09.000Z | 2021-07-24T12:08:14.000Z | kra/collectors/pods.py | smpio/kube-resource-analyzer | d214b7c32bc5e404ac951f66cf0914fcae1f580f | [
"MIT"
] | null | null | null | import queue
import logging
import kubernetes
import kubernetes.client.rest
from django.db import IntegrityError
from django.utils import timezone
from utils.threading import SupervisedThread, SupervisedThreadGroup
from utils.kubernetes.watch import KubeWatcher, WatchEventType
from utils.signal import install_shutdown_signal_handlers
from utils.django.db import retry_on_connection_close
from kra import kube
from kra import models
log = logging.getLogger(__name__)
def main():
install_shutdown_signal_handlers()
q = queue.Queue()
threads = SupervisedThreadGroup()
threads.add_thread(WatcherThread(q))
threads.add_thread(HandlerThread(q))
threads.start_all()
threads.wait_any()
class WatcherThread(SupervisedThread):
def __init__(self, queue):
super().__init__()
self.queue = queue
def run_supervised(self):
v1 = kubernetes.client.CoreV1Api()
for event_type, pod in KubeWatcher(v1.list_pod_for_all_namespaces):
self.queue.put((event_type, pod))
class HandlerThread(SupervisedThread):
def __init__(self, queue):
super().__init__()
self.queue = queue
self.initial_pods = set()
self.handle = self.handle_initial_event
def run_supervised(self):
while True:
event_type, pod = self.queue.get()
try:
self.handle(event_type, pod)
except Exception:
log.exception('Failed to handle %s on pod %s/%s',
event_type.name, pod.metadata.namespace, pod.metadata.name)
def handle_initial_event(self, event_type, pod):
if event_type == WatchEventType.ADDED:
self.initial_pods.add(pod.metadata.uid)
if event_type == WatchEventType.DONE_INITIAL:
self.handle = self.handle_normal_event
self.initial_cleanup()
else:
self.handle_normal_event(event_type, pod)
@retry_on_connection_close()
def handle_normal_event(self, event_type, pod):
log.info('%s %s/%s', event_type.name, pod.metadata.namespace, pod.metadata.name)
if event_type in (WatchEventType.ADDED, WatchEventType.MODIFIED):
self.handle_update(pod)
elif event_type == WatchEventType.DELETED:
self.handle_delete(pod)
def handle_delete(self, pod):
now = timezone.now()
models.Pod.objects.filter(uid=pod.metadata.uid, gone_at=None).update(gone_at=now)
models.Container.objects.filter(pod__uid=pod.metadata.uid, finished_at=None).update(finished_at=now)
def handle_update(self, pod):
if pod.status.start_time is None:
# Pod is creating, and not started yet
return
update_pod(pod)
def initial_cleanup(self):
now = timezone.now()
pod_qs = models.Pod.objects.filter(gone_at=None).exclude(uid__in=self.initial_pods)
count = pod_qs.update(gone_at=now)
log.info('Marked %d pods as gone', count)
container_qs = models.Container.objects.filter(finished_at=None).exclude(pod__uid__in=self.initial_pods)
count = container_qs.update(finished_at=now)
log.info('Marked %d containers as finished', count)
del self.initial_pods
def update_pod(pod):
data = {
'namespace': pod.metadata.namespace,
'name': pod.metadata.name,
'spec_hash': get_pod_spec_hash(pod),
'started_at': pod.status.start_time,
'gone_at': None,
}
try:
try:
data['workload'] = get_workload_from_pod(pod)
except kubernetes.client.rest.ApiException as e:
if e.status == 404:
log.info('Failed to get workload for pod %s/%s: Not Found',
pod.metadata.namespace, pod.metadata.name)
else:
raise e
except Exception:
log.warning('Failed to get workload for pod %s/%s',
pod.metadata.namespace, pod.metadata.name, exc_info=True)
mypod, _ = models.Pod.objects.update_or_create(uid=pod.metadata.uid, defaults=data)
update_containers(pod, mypod)
def update_containers(pod, mypod):
mycontainers = {}
for container in pod.spec.containers:
data = kube.get_container_resources(container)
data['name'] = container.name
mycontainers[container.name] = data
for container_status in pod.status.container_statuses or []:
runtime_id = None
started_at = None
finished_at = None
if container_status.state.running:
started_at = container_status.state.running.started_at
elif container_status.state.terminated:
started_at = container_status.state.terminated.started_at
finished_at = container_status.state.terminated.finished_at
runtime_id = kube.parse_container_runtime_id(container_status.state.terminated.container_id)
runtime_id = kube.parse_container_runtime_id(container_status.container_id) or runtime_id
mycontainers[container_status.name]['runtime_id'] = runtime_id
mycontainers[container_status.name]['started_at'] = started_at
mycontainers[container_status.name]['finished_at'] = finished_at
for name, data in mycontainers.items():
runtime_id = data.pop('runtime_id', None)
if not runtime_id:
log.info('No runtime_id for container %s in pod %s/%s', name, pod.metadata.namespace, pod.metadata.name)
continue
if not data.get('started_at'):
log.info('No started_at for container %s in pod %s/%s', name, pod.metadata.namespace, pod.metadata.name)
try:
c, _ = models.Container.objects.update_or_create(pod=mypod, runtime_id=runtime_id, defaults=data)
except IntegrityError as err:
if data.get('started_at'):
raise err
def get_workload_from_pod(pod):
owner = get_owner_recursive(pod)
if owner is None:
return None
kind = models.WorkloadKind[owner.kind]
if kind == models.WorkloadKind.DaemonSet:
affinity = None
else:
affinity = get_affinity_from_pod(pod)
wl, _ = models.Workload.objects.update_or_create(
kind=kind,
namespace=pod.metadata.namespace,
name=owner.metadata.name,
defaults={
'affinity': affinity,
}
)
return wl
def get_affinity_from_pod(pod):
if pod.spec.affinity:
affinity = pod.spec.affinity.to_dict()
else:
affinity = {}
if pod.spec.node_selector:
affinity['node_selector'] = pod.spec.node_selector
return affinity or None
def get_owner_recursive(obj):
if not obj.metadata.owner_references:
return None
for ref in obj.metadata.owner_references:
if not ref.controller:
continue
read_func = kind_to_read_func(ref.kind)
owner = read_func(ref.name, obj.metadata.namespace)
if owner is None:
return None
superowner = get_owner_recursive(owner)
if superowner is not None:
return superowner
return owner
return None
def kind_to_read_func(kind):
if kind == 'Node':
return lambda name, ns: None
return kube.read_funcs[models.WorkloadKind[kind]]
def get_pod_spec_hash(pod):
return pod.metadata.labels.get('controller-revision-hash') or pod.metadata.labels.get('pod-template-hash') or ''
if __name__ == '__main__':
main()
| 32.175966 | 116 | 0.661064 | 941 | 7,497 | 5.041445 | 0.176408 | 0.048693 | 0.033727 | 0.029089 | 0.247892 | 0.193508 | 0.102024 | 0.102024 | 0.090641 | 0.06914 | 0 | 0.00106 | 0.244631 | 7,497 | 232 | 117 | 32.314655 | 0.836659 | 0.004802 | 0 | 0.157303 | 0 | 0 | 0.060196 | 0.003218 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095506 | false | 0 | 0.067416 | 0.005618 | 0.241573 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
041532c661f3a5511414b8b85873664deacbfef7 | 5,571 | py | Python | src/python/grpcio_tests/tests/unit/_early_ok_test.py | pawelz/grpc | 373866da41cf86ec23fa3420b08a3d0a55151b58 | [
"Apache-2.0"
] | 2 | 2015-03-19T04:32:48.000Z | 2016-08-16T07:27:00.000Z | src/python/grpcio_tests/tests/unit/_early_ok_test.py | pawelz/grpc | 373866da41cf86ec23fa3420b08a3d0a55151b58 | [
"Apache-2.0"
] | null | null | null | src/python/grpcio_tests/tests/unit/_early_ok_test.py | pawelz/grpc | 373866da41cf86ec23fa3420b08a3d0a55151b58 | [
"Apache-2.0"
] | 6 | 2016-02-02T16:31:48.000Z | 2017-06-18T16:02:20.000Z | # Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests servicers sending OK status without having read all requests.
This is a regression test of https://github.com/grpc/grpc/issues/6891.
"""
import enum
import unittest
import six
import grpc
from tests.unit import test_common
from tests.unit.framework.common import test_constants
_RPC_METHOD = '/serffice/Meffod'
@enum.unique
class _MessageCount(enum.Enum):
ZERO = (
0,
'Zero',
)
TWO = (
1,
'Two',
)
MANY = (
test_constants.STREAM_LENGTH,
'Many',
)
@enum.unique
class _MessageSize(enum.Enum):
EMPTY = (
0,
'Empty',
)
SMALL = (
32,
'Small',
) # Smaller than any flow control window.
LARGE = (
3 * 1024 * 1024,
'Large',
) # Larger than any flow control window.
_ZERO_MESSAGE = b''
_SMALL_MESSAGE = b'\x07' * _MessageSize.SMALL.value[0]
_LARGE_MESSAGE = b'abc' * (_MessageSize.LARGE.value[0] // 3)
@enum.unique
class _ReadRequests(enum.Enum):
ZERO = (
0,
'Zero',
)
TWO = (
2,
'Two',
)
class _Case(object):
def __init__(self, request_count, request_size, request_reading,
response_count, response_size):
self.request_count = request_count
self.request_size = request_size
self.request_reading = request_reading
self.response_count = response_count
self.response_size = response_size
def create_test_case_name(self):
return '{}{}Requests{}Read{}{}ResponsesEarlyOKTest'.format(
self.request_count.value[1], self.request_size.value[1],
self.request_reading.value[1], self.response_count.value[1],
self.response_size.value[1])
def _message(message_size):
if message_size is _MessageSize.EMPTY:
return _ZERO_MESSAGE
elif message_size is _MessageSize.SMALL:
return _SMALL_MESSAGE
elif message_size is _MessageSize.LARGE:
return _LARGE_MESSAGE
def _messages_to_send(count, size):
for _ in range(count.value[0]):
yield _message(size)
def _draw_requests(case, request_iterator):
for _ in range(
min(case.request_count.value[0], case.request_reading.value[0])):
next(request_iterator)
def _draw_responses(case, response_iterator):
for _ in range(case.response_count.value[0]):
next(response_iterator)
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(self, case):
self.request_streaming = True
self.response_streaming = True
self.request_deserializer = None
self.response_serializer = None
self.unary_unary = None
self.unary_stream = None
self.stream_unary = None
self._case = case
def stream_stream(self, request_iterator, servicer_context):
_draw_requests(self._case, request_iterator)
for response in _messages_to_send(self._case.response_count,
self._case.response_size):
yield response
class _GenericHandler(grpc.GenericRpcHandler):
def __init__(self, case):
self._case = case
def service(self, handler_call_details):
return _MethodHandler(self._case)
class _EarlyOkTest(unittest.TestCase):
def setUp(self):
self._server = test_common.test_server()
port = self._server.add_insecure_port('[::]:0')
self._server.add_generic_rpc_handlers((_GenericHandler(self.case),))
self._server.start()
self._channel = grpc.insecure_channel('localhost:%d' % port)
self._multi_callable = self._channel.stream_stream(_RPC_METHOD)
def tearDown(self):
self._server.stop(None)
def test_early_ok(self):
requests = _messages_to_send(self.case.request_count,
self.case.request_size)
response_iterator_call = self._multi_callable(requests)
_draw_responses(self.case, response_iterator_call)
self.assertIs(grpc.StatusCode.OK, response_iterator_call.code())
def _cases():
for request_count in _MessageCount:
for request_size in _MessageSize:
for request_reading in _ReadRequests:
for response_count in _MessageCount:
for response_size in _MessageSize:
yield _Case(request_count, request_size,
request_reading, response_count,
response_size)
def _test_case_classes():
for case in _cases():
yield type(case.create_test_case_name(), (_EarlyOkTest,), {
'case': case,
'__module__': _EarlyOkTest.__module__,
})
def load_tests(loader, tests, pattern):
return unittest.TestSuite(
tests=tuple(
loader.loadTestsFromTestCase(test_case_class)
for test_case_class in _test_case_classes()))
if __name__ == '__main__':
unittest.main(verbosity=2)
| 26.913043 | 77 | 0.651948 | 658 | 5,571 | 5.211246 | 0.296353 | 0.027997 | 0.011665 | 0.020997 | 0.106153 | 0.068241 | 0.036162 | 0.036162 | 0.036162 | 0.036162 | 0 | 0.010654 | 0.258661 | 5,571 | 206 | 78 | 27.043689 | 0.819613 | 0.137498 | 0 | 0.132353 | 0 | 0 | 0.028846 | 0.008779 | 0 | 0 | 0 | 0 | 0.007353 | 1 | 0.117647 | false | 0 | 0.044118 | 0.022059 | 0.316176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04155fb87c6918dbbb91b9c5ba9e2e21796f8713 | 4,000 | py | Python | torchaudio/models/_wavernn.py | lbjcom/audio | 990bb5e57b66c92254365fdd6e43a12d9d0b7c78 | [
"BSD-2-Clause"
] | 1 | 2020-03-22T21:21:41.000Z | 2020-03-22T21:21:41.000Z | torchaudio/models/_wavernn.py | lbjcom/audio | 990bb5e57b66c92254365fdd6e43a12d9d0b7c78 | [
"BSD-2-Clause"
] | null | null | null | torchaudio/models/_wavernn.py | lbjcom/audio | 990bb5e57b66c92254365fdd6e43a12d9d0b7c78 | [
"BSD-2-Clause"
] | null | null | null | from torch import Tensor
from torch import nn
__all__ = ["_ResBlock", "_MelResNet"]
class _ResBlock(nn.Module):
r"""This is a ResNet block layer. This layer is based on the paper "Deep Residual Learning
for Image Recognition". Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. CVPR, 2016.
It is a block used in WaveRNN. WaveRNN is based on the paper "Efficient Neural Audio Synthesis".
Nal Kalchbrenner, Erich Elsen, Karen Simonyan, Seb Noury, Norman Casagrande, Edward Lockhart,
Florian Stimberg, Aaron van den Oord, Sander Dieleman, Koray Kavukcuoglu. arXiv:1802.08435, 2018.
Args:
num_dims: the number of compute dimensions in the input (default=128).
Examples::
>>> resblock = _ResBlock(num_dims=128)
>>> input = torch.rand(10, 128, 512)
>>> output = resblock(input)
"""
def __init__(self, num_dims: int = 128) -> None:
super().__init__()
self.resblock_model = nn.Sequential(
nn.Conv1d(in_channels=num_dims, out_channels=num_dims, kernel_size=1, bias=False),
nn.BatchNorm1d(num_dims),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=num_dims, out_channels=num_dims, kernel_size=1, bias=False),
nn.BatchNorm1d(num_dims)
)
def forward(self, x: Tensor) -> Tensor:
r"""Pass the input through the _ResBlock layer.
Args:
x: the input sequence to the _ResBlock layer (required).
Shape:
- x: :math:`(N, S, T)`.
- output: :math:`(N, S, T)`.
where N is the batch size, S is the number of input sequence,
T is the length of input sequence.
"""
residual = x
return self.resblock_model(x) + residual
class _MelResNet(nn.Module):
r"""This is a MelResNet layer based on a stack of ResBlocks. It is a block used in WaveRNN.
WaveRNN is based on the paper "Efficient Neural Audio Synthesis". Nal Kalchbrenner, Erich Elsen,
Karen Simonyan, Seb Noury, Norman Casagrande, Edward Lockhart, Florian Stimberg, Aaron van den Oord,
Sander Dieleman, Koray Kavukcuoglu. arXiv:1802.08435, 2018.
Args:
res_blocks: the number of ResBlock in stack (default=10).
input_dims: the number of input sequence (default=100).
hidden_dims: the number of compute dimensions (default=128).
output_dims: the number of output sequence (default=128).
pad: the number of kernal size (pad * 2 + 1) in the first Conv1d layer (default=2).
Examples::
>>> melresnet = _MelResNet(res_blocks=10, input_dims=100,
hidden_dims=128, output_dims=128, pad=2)
>>> input = torch.rand(10, 100, 512)
>>> output = melresnet(input)
"""
def __init__(self, res_blocks: int = 10,
input_dims: int = 100,
hidden_dims: int = 128,
output_dims: int = 128,
pad: int = 2) -> None:
super().__init__()
kernel_size = pad * 2 + 1
ResBlocks = []
for i in range(res_blocks):
ResBlocks.append(_ResBlock(hidden_dims))
self.melresnet_model = nn.Sequential(
nn.Conv1d(in_channels=input_dims, out_channels=hidden_dims, kernel_size=kernel_size, bias=False),
nn.BatchNorm1d(hidden_dims),
nn.ReLU(inplace=True),
*ResBlocks,
nn.Conv1d(in_channels=hidden_dims, out_channels=output_dims, kernel_size=1)
)
def forward(self, x: Tensor) -> Tensor:
r"""Pass the input through the _MelResNet layer.
Args:
x: the input sequence to the _MelResNet layer (required).
Shape:
- x: :math:`(N, S, T)`.
- output: :math:`(N, P, T - 2 * pad)`.
where N is the batch size, S is the number of input sequence,
P is the number of output sequence, T is the length of input sequence.
"""
return self.melresnet_model(x)
| 37.735849 | 109 | 0.61775 | 529 | 4,000 | 4.52552 | 0.257089 | 0.026316 | 0.041353 | 0.025063 | 0.540936 | 0.490393 | 0.450292 | 0.428571 | 0.376775 | 0.376775 | 0 | 0.037347 | 0.28375 | 4,000 | 105 | 110 | 38.095238 | 0.798255 | 0.54725 | 0 | 0.2 | 0 | 0 | 0.012227 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.05 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
041644d221552d8023aebddea739493fa792f1c1 | 10,211 | py | Python | mmdet/models/relation_heads/approaches/transformer.py | yizhe-ang/MMSceneGraph | d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba | [
"MIT"
] | 24 | 2021-10-14T03:28:28.000Z | 2022-03-29T09:30:04.000Z | mmdet/models/relation_heads/approaches/transformer.py | yizhe-ang/MMSceneGraph | d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba | [
"MIT"
] | 4 | 2021-12-14T15:04:49.000Z | 2022-02-19T09:54:42.000Z | mmdet/models/relation_heads/approaches/transformer.py | yizhe-ang/MMSceneGraph | d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba | [
"MIT"
] | 4 | 2021-10-31T11:23:06.000Z | 2021-12-17T06:38:50.000Z | # ---------------------------------------------------------------
# transformer.py
# Set-up time: 2021/3/26 11:21
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import torch
from torch import nn
from torch.nn.utils.rnn import PackedSequence
from torch.nn import functional as F
from mmcv.cnn import kaiming_init
import copy
import math
from .motif_util import obj_edge_vectors, encode_box_info, to_onehot
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, num_objs):
"Pass the input (and mask) through each layer in turn."
split_x = x.split(num_objs, 0)
max_num = max(num_objs)
num_atts = torch.LongTensor(num_objs).to(x.device)
bs = len(num_objs)
mask = torch.arange(0, max_num, device=x.device).long().unsqueeze(0).expand(bs, max_num).lt(
num_atts.unsqueeze(1)).long().unsqueeze(-2) # B * 1 * MAXN
padded_x = nn.utils.rnn.pad_sequence(split_x, batch_first=True) # B * MAXN * D
for layer in self.layers:
padded_x = layer(padded_x, mask)
padded_x = self.norm(padded_x)
# restore:
return torch.cat([padded_x[i, :n] for i, n in enumerate(num_objs)], 0)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, float('-inf'))
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class TransformerContext(nn.Module):
"""
Modified from neural-motifs to encode contexts for each objects
"""
def __init__(self, config, obj_classes, rel_classes):
super(TransformerContext, self).__init__()
self.cfg = config
self.obj_classes = obj_classes
self.rel_classes = rel_classes
self.num_obj_classes = len(obj_classes)
in_channels = self.cfg.roi_dim
self.use_gt_box = self.cfg.use_gt_box
self.use_gt_label = self.cfg.use_gt_label
# mode
if self.cfg.use_gt_box:
if self.cfg.use_gt_label:
self.mode = 'predcls'
else:
self.mode = 'sgcls'
else:
self.mode = 'sgdet'
# word embedding
self.embed_dim = self.cfg.embed_dim
self.obj_embed1 = nn.Embedding(self.num_obj_classes, self.embed_dim)
self.obj_embed2 = nn.Embedding(self.num_obj_classes, self.embed_dim)
obj_embed_vecs = obj_edge_vectors(self.obj_classes, wv_dir=self.cfg.glove_dir, wv_dim=self.embed_dim)
with torch.no_grad():
self.obj_embed1.weight.copy_(obj_embed_vecs, non_blocking=True)
self.obj_embed2.weight.copy_(obj_embed_vecs, non_blocking=True)
# position embedding
self.pos_embed = nn.Sequential(*[
nn.Linear(9, 32), nn.ReLU(inplace=True), nn.Dropout(0.1),
nn.Linear(32, 128), nn.ReLU(inplace=True), nn.Dropout(0.1)])
# object & relation context
self.obj_dim = in_channels
self.dropout_rate = self.cfg.dropout_rate
self.hidden_dim = self.cfg.hidden_dim
self.nl_obj = self.cfg.context_object_layer
self.nl_edge = self.cfg.context_edge_layer
assert self.nl_obj > 0 and self.nl_edge > 0
# transformer
self.num_head = self.cfg.num_head
self.inner_dim = self.cfg.inner_dim
self.k_dim = self.cfg.k_dim
self.v_dim = self.cfg.v_dim
self.lin_obj = nn.Linear(self.obj_dim + self.embed_dim + 128, self.hidden_dim)
self.lin_edge = nn.Linear(self.obj_dim + self.embed_dim + self.hidden_dim, self.hidden_dim)
self.out_obj = nn.Linear(self.hidden_dim, self.num_obj_classes)
# make model
c = copy.deepcopy
attn = MultiHeadedAttention(self.num_head, self.hidden_dim, self.dropout_rate)
ff = PositionwiseFeedForward(self.hidden_dim, self.inner_dim, self.dropout_rate)
self.context_obj = Encoder(EncoderLayer(self.hidden_dim, c(attn), c(ff), self.dropout_rate), self.nl_obj)
self.context_edge = Encoder(EncoderLayer(self.hidden_dim, c(attn), c(ff), self.dropout_rate), self.nl_edge)
def init_weights(self):
for m in self.pos_embed:
if isinstance(m, nn.Linear):
kaiming_init(m, distribution='uniform', a=1)
kaiming_init(self.lin_obj, distribution='uniform', a=1)
kaiming_init(self.lin_edge, distribution='uniform', a=1)
def forward(self, x, det_result):
# labels will be used in DecoderRNN during training (for nms)
if self.training or self.use_gt_box: # predcls or sgcls or training, just put obj_labels here
obj_labels = torch.cat(det_result.labels)
else:
obj_labels = None
if self.use_gt_label: # predcls
obj_embed = self.obj_embed1(obj_labels.long())
else:
obj_dists = torch.cat(det_result.dists, dim=0).detach()
obj_embed = obj_dists @ self.obj_embed1.weight
pos_embed = self.pos_embed(encode_box_info(det_result)) # N x 128
batch_size = x.shape[0]
num_objs = [len(b) for b in det_result.bboxes]
obj_pre_rep = torch.cat((x, obj_embed, pos_embed), -1) # N x (1024 + 200 + 128)
obj_pre_rep = self.lin_obj(obj_pre_rep) # N x hidden_dim
obj_feats = self.context_obj(obj_pre_rep, num_objs)
if self.mode != 'predcls':
obj_dists = self.out_obj(obj_feats)
obj_preds = obj_dists[:, 1:].max(1)[1] + 1
else:
assert obj_labels is not None
obj_preds = obj_labels
obj_dists = to_onehot(obj_preds, self.num_obj_classes)
obj_embed2 = self.obj_embed2(obj_preds.long())
edge_pre_rep = torch.cat((obj_embed2, x, obj_feats), -1)
edge_pre_rep = self.lin_edge(edge_pre_rep)
edge_ctx = self.context_edge(edge_pre_rep, num_objs)
return obj_dists, obj_preds, edge_ctx
| 39.42471 | 116 | 0.613358 | 1,443 | 10,211 | 4.125433 | 0.210672 | 0.027045 | 0.019654 | 0.01999 | 0.146985 | 0.101125 | 0.101125 | 0.101125 | 0.048043 | 0.020494 | 0 | 0.014969 | 0.267261 | 10,211 | 258 | 117 | 39.577519 | 0.780674 | 0.15072 | 0 | 0.060109 | 0 | 0 | 0.057156 | 0 | 0 | 0 | 0 | 0 | 0.016393 | 1 | 0.092896 | false | 0.005464 | 0.043716 | 0.005464 | 0.224044 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
041669faf857834385d32fa6e3c5831018664782 | 1,463 | py | Python | ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/params.py | kennyballou/ambari | 8985bcf11296d540a861a8634c17d6b9b1accd5a | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/params.py | kennyballou/ambari | 8985bcf11296d540a861a8634c17d6b9b1accd5a | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/package/scripts/params.py | kennyballou/ambari | 8985bcf11296d540a861a8634c17d6b9b1accd5a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
# server configurations
config = Script.get_config()
hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
hive_conf_dir = os.environ["HIVE_CONF_DIR"]
hive_home = os.environ["HIVE_HOME"]
hive_lib_dir = os.environ["HIVE_LIB_DIR"]
hive_log_dir = os.environ["HIVE_LOG_DIR"]
hive_opts = os.environ["HIVE_OPTS"]
hcat_home = os.environ["HCAT_HOME"]
hcat_config_dir = os.environ["WEBHCAT_CONF_DIR"]
hive_env_sh_template = config['configurations']['hive-env']['content']
hive_warehouse_dir = config['configurations']['hive-site']['hive.metastore.warehouse.dir']
hive_user = "hadoop"
hadoop_user = "hadoop"
hcat_user = "hadoop"
| 36.575 | 90 | 0.777854 | 225 | 1,463 | 4.906667 | 0.484444 | 0.065217 | 0.058877 | 0.043478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003115 | 0.122351 | 1,463 | 39 | 91 | 37.512821 | 0.856698 | 0.545455 | 0 | 0 | 0 | 0 | 0.291603 | 0.042748 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0418a4c5df55b14564ea87b3b8b09ac1269d87e7 | 5,375 | py | Python | research/capsules/models/capsule_model_test.py | TuKJet/models | 984fbc754943c849c55a57923f4223099a1ff88c | [
"Apache-2.0"
] | 3,326 | 2018-01-26T22:42:25.000Z | 2022-02-16T13:16:39.000Z | research/capsules/models/capsule_model_test.py | lianlengyunyu/models | 984fbc754943c849c55a57923f4223099a1ff88c | [
"Apache-2.0"
] | 150 | 2017-08-28T14:59:36.000Z | 2022-03-11T23:21:35.000Z | research/capsules/models/capsule_model_test.py | lianlengyunyu/models | 984fbc754943c849c55a57923f4223099a1ff88c | [
"Apache-2.0"
] | 1,474 | 2018-02-01T04:33:18.000Z | 2022-03-08T07:02:20.000Z | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for capsule_model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from models import capsule_model
class CapsuleModelTest(tf.test.TestCase):
def setUp(self):
self.hparams = tf.contrib.training.HParams(
learning_rate=0.001,
decay_rate=0.96,
decay_steps=1,
num_prime_capsules=2,
padding='SAME',
leaky=False,
routing=3,
verbose=False,
loss_type='softmax',
remake=False)
def testBuildCapsule(self):
"""Checks the correct shape of capsule output and total number of variables.
The output shape should be [batch, 10, 16]. Also each capsule layer should
declare 2 sets of variables (weight and bias), therefore single call to
_build_capsule declares 4 variables for a total of 2 capsule layers.
"""
with tf.Graph().as_default():
test_model = capsule_model.CapsuleModel(self.hparams)
toy_input = np.reshape(np.arange(256 * 14 * 14), (1, 1, 256, 14, 14))
input_tensor = tf.constant(toy_input, dtype=tf.float32)
output = test_model._build_capsule(input_tensor, 10)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(trainable_vars), 4)
_, capsules, atoms = output.get_shape()
self.assertListEqual([10, 16], [capsules.value, atoms.value])
def testInference(self):
"""Checks the correct shape of capsule output and total number of variables.
The output logit shape should be [batch, 10]. Also each layer should
declare 2 sets of variables (weight and bias), therefore single call to
inference declares 6 variables for a total of 3 layers.
"""
with tf.Graph().as_default():
test_model = capsule_model.CapsuleModel(self.hparams)
toy_image = np.reshape(np.arange(32 * 32), (1, 1, 32, 32))
input_image = tf.constant(toy_image, dtype=tf.float32)
features = {
'height': 32,
'depth': 1,
'num_classes': 10,
'images': input_image
}
output = test_model.inference(features)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(trainable_vars), 6)
_, classes = output.logits.get_shape()
self.assertEqual(10, classes.value)
def testIntegrity(self):
"""Checks a multi_gpu call on CapsuleModel builds the desired graph.
With the correct inference graph, multi_gpu is able to call inference
multiple times without any increase in number of trainable variables or a
duplication error.
"""
with tf.Graph().as_default():
test_model = capsule_model.CapsuleModel(self.hparams)
toy_image = np.reshape(np.arange(32 * 32), (1, 1, 32, 32))
input_image = tf.constant(toy_image, dtype=tf.float32)
features = {
'height': 32,
'depth': 1,
'images': input_image,
'labels': tf.one_hot([2], 10),
'num_classes': 10,
'num_targets': 1,
}
_, tower_output = test_model.multi_gpu([features, features, features], 3)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(trainable_vars), 6)
_, classes = tower_output[0].logits.get_shape()
self.assertEqual(10, classes.value)
def testInferenceWithRemake(self):
"""Checks the correct shape of remakes and total number of variables.
The reconstruction should have same shape as input. Each remake network
should declare 6 sets of variables (weight and bias) and different targets
should share the variables.
"""
with tf.Graph().as_default():
self.hparams.parse('remake=True,verbose=True')
test_model = capsule_model.CapsuleModel(self.hparams)
toy_image = np.reshape(np.arange(32 * 32), (1, 1, 32, 32))
input_image = tf.constant(toy_image, dtype=tf.float32)
features = {
'height': 32,
'depth': 1,
'images': input_image,
'recons_image': input_image,
'spare_image': input_image,
'recons_label': tf.constant([2]),
'spare_label': tf.constant([2]),
'num_targets': 2,
'num_classes': 10,
}
output = test_model.inference(features)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual(len(trainable_vars), 12)
remake_1, remake_2 = output.remakes
self.assertEqual(32 * 32, remake_1.get_shape()[1].value)
self.assertEqual(32 * 32, remake_2.get_shape()[1].value)
if __name__ == '__main__':
tf.test.main()
| 38.120567 | 80 | 0.667721 | 708 | 5,375 | 4.911017 | 0.289548 | 0.020708 | 0.012655 | 0.014955 | 0.466207 | 0.423066 | 0.399195 | 0.399195 | 0.399195 | 0.372735 | 0 | 0.03281 | 0.217488 | 5,375 | 140 | 81 | 38.392857 | 0.793866 | 0.320186 | 0 | 0.431818 | 0 | 0 | 0.056668 | 0.006766 | 0 | 0 | 0 | 0 | 0.102273 | 1 | 0.056818 | false | 0 | 0.068182 | 0 | 0.136364 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0419b144ee62ca5173b31ea60d250f1248135bb3 | 1,876 | py | Python | extract.py | alxdembo/vtx | 992be3c7c7f62da7f32c0d01e2ff7d709b5b531e | [
"MIT"
] | null | null | null | extract.py | alxdembo/vtx | 992be3c7c7f62da7f32c0d01e2ff7d709b5b531e | [
"MIT"
] | null | null | null | extract.py | alxdembo/vtx | 992be3c7c7f62da7f32c0d01e2ff7d709b5b531e | [
"MIT"
] | null | null | null | import pandas as pd
class CargoData:
"""
Class to perform various calculations over Cargo Data
"""
def __init__(self, source_file):
self.source_file = source_file
# Rows containing NA values are dropped due to unsuitability for the calculations required
self.source_df = pd.read_parquet(source_file, engine='pyarrow', use_nullable_dtypes=True, columns=[
"start_timestamp",
"end_timestamp",
"vessel_class",
"id"
]).dropna()
def get_duration(self):
"""
Returns movement duration time
:return: Pandas series of movement duration time deltas
"""
return self.source_df.end_timestamp.astype("datetime64") - self.source_df.start_timestamp.astype("datetime64")
def get_duration_per_class(self):
"""
Returns movement duration time per class
:return: Pandas series of movement duration time deltas with class as an index
"""
duration_per_class_df = self.source_df[["id", "vessel_class"]].copy()
duration_per_class_df["duration"] = self.get_duration()
return duration_per_class_df.groupby("vessel_class")["duration"].sum()
def print_movements(self):
"""
Prints movement durations and movement durations per class
"""
duration_df = self.source_df
duration_df["duration"] = self.get_duration()
print("\nMovement duration: \n", duration_df.to_string())
print("\nMovement duration per class: \n", self.get_duration_per_class().to_string())
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("source", help="A source file to extract movements from.")
args = parser.parse_args()
#
cargo_data = CargoData(args.source)
cargo_data.print_movements()
| 32.912281 | 118 | 0.658316 | 222 | 1,876 | 5.292793 | 0.382883 | 0.054468 | 0.081702 | 0.045957 | 0.173617 | 0.078298 | 0.078298 | 0.078298 | 0 | 0 | 0 | 0.002809 | 0.240938 | 1,876 | 56 | 119 | 33.5 | 0.822331 | 0.218017 | 0 | 0 | 0 | 0 | 0.167766 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.071429 | 0 | 0.321429 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
041a87fc4a089e3e22b6f87780c61e67f9da57a4 | 8,329 | py | Python | GUI/GUI_SMLM_localResolution.py | MaximilianBeckers/SPOC | 55b4f82f20bb3ca7bf653904fc5da23e8d019c00 | [
"BSD-3-Clause"
] | 7 | 2020-03-15T00:53:04.000Z | 2020-09-01T07:55:56.000Z | GUI/GUI_SMLM_localResolution.py | MaximilianBeckers/SPOC | 55b4f82f20bb3ca7bf653904fc5da23e8d019c00 | [
"BSD-3-Clause"
] | null | null | null | GUI/GUI_SMLM_localResolution.py | MaximilianBeckers/SPOC | 55b4f82f20bb3ca7bf653904fc5da23e8d019c00 | [
"BSD-3-Clause"
] | null | null | null | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from SMLMUtil import SMLM
from scipy import ndimage
import numpy as np
import time, os
import matplotlib.pyplot as plt
# ********************************
# ******* resolution window ******
# *********************************
class SMLMLocalResolutionWindow(QWidget):
def __init__(self):
super(SMLMLocalResolutionWindow, self).__init__();
layout = QFormLayout();
# ------------ now required input
layout.addRow(' ', QHBoxLayout()); # make some space
requiredLabel = QLabel("Required Input", self);
requiredLabel.setFont(QFont('Arial', 17));
layout.addRow(requiredLabel, QHBoxLayout());
# add input file
hbox_localizations = QHBoxLayout();
self.fileLine_localizations = QLineEdit();
searchButton_localizations = self.searchFileButton_localizations();
hbox_localizations.addWidget(self.fileLine_localizations);
hbox_localizations.addWidget(searchButton_localizations);
layout.addRow('Localizations', hbox_localizations);
#or
orLabel = QLabel("or two half-images:", self);
orLabel.setFont(QFont('Arial', 15));
layout.addRow( '', orLabel);
# ------- read two images
# add input file
hbox_image1 = QHBoxLayout();
self.fileLine_image1 = QLineEdit();
searchButton_image1 = self.searchFileButton_image1();
hbox_image1.addWidget(self.fileLine_image1);
hbox_image1.addWidget(searchButton_image1);
layout.addRow('Image 1', hbox_image1);
hbox_image2 = QHBoxLayout();
self.fileLine_image2 = QLineEdit();
searchButton_image2 = self.searchFileButton_image2();
hbox_image2.addWidget(self.fileLine_image2);
hbox_image2.addWidget(searchButton_image2);
layout.addRow('Image 2', hbox_image2);
#pixel size input
self.apix = QLineEdit();
self.apix.setText('2');
layout.addRow('Pixel size to be used [nm]', self.apix);
# ------------ now optional input
layout.addRow(' ', QHBoxLayout()); # make some space
optionLabel = QLabel("Optional Input", self);
optionLabel.setFont(QFont('Arial', 17));
layout.addRow(optionLabel, QHBoxLayout());
# window size input
self.windowSize = QLineEdit();
self.windowSize.setText('500');
layout.addRow('Size of sliding window [pixels]', self.windowSize);
# step size input
self.stepSize = QLineEdit();
self.stepSize.setText('200');
layout.addRow('Step size of sliding window [pixels]', self.stepSize);
# low resolution limit input
self.lowRes = QLineEdit();
self.lowRes.setText('None');
layout.addRow('Low resolution limit [nm]', self.lowRes);
# add output directory
hbox_output = QHBoxLayout();
self.fileLine_output = QLineEdit();
searchButton_output = self.searchFileButton_output();
hbox_output.addWidget(self.fileLine_output);
hbox_output.addWidget(searchButton_output);
layout.addRow('Save output to ', hbox_output);
# some buttons
qtBtn = self.quitButton();
runBtn = self.FSCBtn();
buttonBox = QHBoxLayout();
buttonBox.addWidget(qtBtn);
buttonBox.addWidget(runBtn);
formGroupBox = QGroupBox();
formGroupBox.setLayout(layout);
#set the main Layout
heading = QLabel("Global resolution estimation by FDR-FSC", self);
heading.setFont(QFont('Arial', 17));
heading.setAlignment(Qt.AlignTop);
mainLayout = QVBoxLayout();
mainLayout.addWidget(heading);
mainLayout.addWidget(formGroupBox);
mainLayout.addLayout(buttonBox);
self.setLayout(mainLayout);
#button localizations
def searchFileButton_localizations(self):
btn = QPushButton('Search File');
btn.clicked.connect(self.onInputFileButtonClicked_localizations);
return btn;
def onInputFileButtonClicked_localizations(self):
filename = QFileDialog.getOpenFileName(caption='Open file');
if filename:
self.fileLine_localizations.setText(filename[0]);
#button image1
def searchFileButton_image1(self):
btn = QPushButton('Search File');
btn.clicked.connect(self.onInputFileButtonClicked_image1);
return btn;
def onInputFileButtonClicked_image1(self):
filename = QFileDialog.getOpenFileName(caption='Open file');
if filename:
self.fileLine_image1.setText(filename[0]);
#button image2
def searchFileButton_image2(self):
btn = QPushButton('Search File');
btn.clicked.connect(self.onInputFileButtonClicked_image2);
return btn;
def onInputFileButtonClicked_image2(self):
filename = QFileDialog.getOpenFileName(caption='Open file');
if filename:
self.fileLine_image2.setText(filename[0]);
#button output
def searchFileButton_output(self):
btn = QPushButton('Search File');
btn.clicked.connect(self.onInputFileButtonClicked_output);
return btn;
def onInputFileButtonClicked_output(self):
filename = QFileDialog.getExistingDirectory(caption='Set output directory');
if filename:
self.fileLine_output.setText(filename);
#button to quit
def quitButton(self):
btn = QPushButton('Quit');
btn.clicked.connect(QCoreApplication.instance().quit);
btn.resize(btn.minimumSizeHint());
return btn;
def FSCBtn(self):
btn = QPushButton('Run');
btn.resize(btn.minimumSizeHint());
btn.clicked.connect(self.runFSC);
return btn;
def showMessageBox(self, path):
msg = QMessageBox();
msg.setIcon(QMessageBox.Information);
msg.setText("Local resolution estimation finished. Results saved to " + path);
msg.setWindowTitle("Results");
msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel);
retval = msg.exec_();
# ---------------------------------------------
def runFSC(self):
#show message box before starting
msg = QMessageBox();
msg.setIcon(QMessageBox.Information);
msg.setText("Start the job with OK!")
msg.setInformativeText("GUI will be locked until the job is finished. See terminal printouts for progress ...");
msg.setWindowTitle("Start job");
msg.setStandardButtons( QMessageBox.Cancel| QMessageBox.Ok);
result = msg.exec_();
if result == QMessageBox.Cancel:
return;
start = time.time();
print('***************************************************');
print('******* Significance analysis of FSC curves *******');
print('***************************************************');
# set working directory and output filename
path = self.fileLine_output.text();
if path == '':
path = os.path.dirname(self.fileLine_localizations.text());
os.chdir(path);
#read the input
image1 = None;
image2 = None;
try:
localizations = np.loadtxt(self.fileLine_localizations.text(), delimiter=" ", skiprows=1, usecols=(4, 5));
except:
try:
image1 = ndimage.imread(self.fileLine_image1.text());
image2 = ndimage.imread(self.fileLine_image1.text());
localizations = None;
except:
msg = QMessageBox();
msg.setIcon(QMessageBox.Information);
msg.setText("Cannot the input ...");
msg.setWindowTitle("Error");
msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel);
retval = msg.exec_();
return;
#read the pixel size
try:
apix = float(self.apix.text());
except:
msg = QMessageBox();
msg.setIcon(QMessageBox.Information);
msg.setText("Cannot read pixel size ...");
msg.setWindowTitle("Error");
msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel);
retval = msg.exec_();
return;
#read the window Size
try:
windowSize = int(self.windowSize.text());
except:
print("Window size needs to be a positive integer ...");
return;
#read the window Size
try:
stepSize = int(self.stepSize.text());
except:
print("Step size needs to be a positive integer ...");
return;
#read the low resolution limit
try:
lowResLimit = float(self.lowRes.text());
except:
lowResLimit = None;
if lowResLimit is not None:
print('Low resolution limit set to {:.2f} Angstroem.'.format(lowResLimit));
else:
print('No low resolution limit used ... ');
#calcualte the actual local resolutions
SMLMObject = SMLM.SMLM();
SMLMObject.localResolution(localizations, image1, image2, apix, stepSize, windowSize, lowResLimit);
# plot the local resolutions
plt.imshow(SMLMObject.localResolutions.T, cmap='hot', origin='lower');
#plt.colorbar();
plt.savefig('localResolutions.png');
plt.close();
plt.imshow(SMLMObject.filteredMap.T, cmap='hot', origin='lower')
#plt.colorbar();
plt.savefig('heatMap_filt.png');
plt.close();
self.showMessageBox(path);
| 28.426621 | 114 | 0.702485 | 912 | 8,329 | 6.338816 | 0.246711 | 0.035288 | 0.018682 | 0.018163 | 0.257568 | 0.257568 | 0.220204 | 0.20602 | 0.186646 | 0.161737 | 0 | 0.008807 | 0.141193 | 8,329 | 292 | 115 | 28.523973 | 0.799385 | 0.092568 | 0 | 0.297436 | 0 | 0 | 0.130967 | 0.013562 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.041026 | 0 | 0.14359 | 0.041026 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
041b2811338198cdcfd843d3dc47874ff643b7e6 | 3,465 | py | Python | scripts/plottings/pretty_plot_example_1.py | PhilipeRLeal/xarray_case_studies | b7771fefde658f0d450cbddd94637ce7936c5f52 | [
"MIT"
] | 1 | 2022-02-22T01:07:31.000Z | 2022-02-22T01:07:31.000Z | scripts/plottings/pretty_plot_example_1.py | PhilipeRLeal/xarray_case_studies | b7771fefde658f0d450cbddd94637ce7936c5f52 | [
"MIT"
] | null | null | null | scripts/plottings/pretty_plot_example_1.py | PhilipeRLeal/xarray_case_studies | b7771fefde658f0d450cbddd94637ce7936c5f52 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 8 18:32:22 2018
@author: Philipe_Leal
"""
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import matplotlib as mpl
import cartopy
import cartopy.crs as ccrs
import numpy as np
from osgeo import gdal, ogr
import cartopy.feature as cfeature
from metpy.plots import ctables
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from matplotlib import patheffects
import datetime
Shp_path = r'C:\Doutorado\Relatorio_Peixes\ZEEs\ZEE_Antares.shp'
nc_path = r'C:\Doutorado\Relatorio_Peixes\Imagens_PP\RCP45_All_Models_Future_Climate_2055.nc'
Netcdf_ds = Dataset(nc_path)
Lista_vars = list(Netcdf_ds.variables.keys())
Anomaly = Netcdf_ds['anomaly'][:]
Lat = Netcdf_ds['lat'][:]
Lon = Netcdf_ds['lon'][:]
ymin = np.min(Lat)
ymax = np.max(Lat)
xmin = np.min(Lon)
xmax = np.max(Lon)
ncols = np.size(Lon)
nrows= np.size(Lat)
maskvalue = 1
xres=(xmax-xmin)/float(ncols)
yres=(ymax-ymin)/float(nrows)
geotransform=(xmin,xres,0,ymax,0, -yres)
src_ds = ogr.Open(Shp_path)
src_lyr=src_ds.GetLayer()
dst_ds = gdal.GetDriverByName('MEM').Create('', ncols, nrows, 1 ,gdal.GDT_Byte)
dst_rb = dst_ds.GetRasterBand(1)
dst_rb.Fill(0) #initialise raster with zeros
dst_rb.SetNoDataValue(0)
dst_ds.SetGeoTransform(geotransform)
err = gdal.RasterizeLayer(dst_ds, [maskvalue], src_lyr)
dst_ds.FlushCache()
mask_arr=dst_ds.GetRasterBand(1).ReadAsArray()
Anomaly_mask = np.ma.masked_array(Anomaly, mask= mask_arr)
# Plotando:
proj = ccrs.PlateCarree(central_longitude=0)
cmap = ctables.registry.get_colortable('NWSReflectivityExpanded')
norm = mpl.colors.Normalize(vmin=np.nanmin(Anomaly_mask), vmax=np.nanmax(Anomaly_mask))
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1, projection=proj)
ax.coastlines(resolution='50m', color='black')
ax.add_feature(cfeature.STATES, linestyle=':', edgecolor='black')
ax.add_feature(cfeature.BORDERS, linewidth=2, edgecolor='black')
#ax.set_extent([-180, 180, -90, 90], cartopy.crs.Geodetic())
ax.coastlines(resolution='50m')
ax.add_feature(cartopy.feature.STATES)
ax.add_feature(cartopy.feature.BORDERS, linewidth=2, edgecolor='black')
ax.gridlines(draw_labels=True)
ax.xlabels_top = ax.ylabels_right = False
ax.xformatter = LONGITUDE_FORMATTER
ax.yformatter = LATITUDE_FORMATTER
timestamp = datetime.datetime.today()
text_time = ax.text(0.89, 1.12, timestamp.strftime('%d/%m/%Y'), verticalalignment='baseline',
horizontalalignment='center', transform=ax.transAxes,
color='white', fontsize='x-large', weight='bold')
text_unidades = ax.text(1.2, 0.5, str(Netcdf_ds['anomaly'].units), verticalalignment='baseline',
horizontalalignment='center', transform=ax.transAxes,
color='black', fontsize='large', weight='bold', rotation=90)
text_experimental = ax.text(0.5, 1.23, 'Anomalia PP',
horizontalalignment='center', transform=ax.transAxes,
color='white', fontsize='large', weight='bold')
# Make the text stand out even better using matplotlib's path effects
outline_effect = [patheffects.withStroke(linewidth=2, foreground='black')]
text_time.set_path_effects(outline_effect)
text_experimental.set_path_effects(outline_effect)
proj = ccrs.PlateCarree(central_longitude=180)
cax = ax.imshow(Anomaly_mask, extent = ([-180, 180, -90, 90]),
cmap=cmap, norm = norm, origin='lower', transform=proj)
plt.colorbar(cax, shrink=0.7)
plt.show()
| 30.663717 | 96 | 0.742569 | 497 | 3,465 | 5.036217 | 0.43662 | 0.019177 | 0.019177 | 0.043148 | 0.243708 | 0.140631 | 0.090292 | 0.090292 | 0 | 0 | 0 | 0.027415 | 0.115729 | 3,465 | 112 | 97 | 30.9375 | 0.789491 | 0.070996 | 0 | 0.041096 | 0 | 0 | 0.096694 | 0.047723 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.164384 | 0 | 0.164384 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
041b576e2a4fb5501aae5ec91214e8dffb035b90 | 13,112 | py | Python | pychron/entry/identifier_generator.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 31 | 2016-03-07T02:38:17.000Z | 2022-02-14T18:23:43.000Z | pychron/entry/identifier_generator.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 1,626 | 2015-01-07T04:52:35.000Z | 2022-03-25T19:15:59.000Z | pychron/entry/identifier_generator.py | UIllinoisHALPychron/pychron | f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc | [
"Apache-2.0"
] | 26 | 2015-05-23T00:10:06.000Z | 2022-03-07T16:51:57.000Z | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Any, Str, List, Bool, Int, CInt, Instance
from traitsui.api import Item, VGroup
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.progress import open_progress
from pychron.core.ui.combobox_editor import ComboboxEditor
from pychron.loggable import Loggable
from pychron.persistence_loggable import PersistenceMixin
def get_maxs(lns):
def func(li):
try:
x = int(li)
except ValueError:
x = 0
return x
lns = [func(li) for li in lns]
return [max(gi) for gi in group_runs(lns)]
# lns = list(map(func, lns))
# return list(map(max, group_runs(lns)))
def group_runs(li, tolerance=1000):
out = []
last = li[0]
for x in li:
if abs(x - last) > tolerance:
yield out
out = []
out.append(x)
last = x
yield out
class IdentifierGenerator(Loggable, PersistenceMixin):
db = Any
dvc = Instance("pychron.dvc.dvc.DVC")
# default_j = Float(1e-4)
# default_j_err = Float(1e-7)
monitor_name = Str
use_consecutive_identifiers = Bool
irradiation_positions = List
irradiation = Str
level = Str
is_preview = Bool
overwrite = Bool
level_offset = Int(0)
offset = Int(5)
mon_start = CInt(5000)
unk_start = CInt(1000)
pattributes = ("level_offset", "offset")
persistence_name = "identifier_generator.p"
mon_maxs = List
unk_maxs = List
def setup(self):
self.load()
if not self.use_consecutive_identifiers:
monlns = self.db.get_last_identifiers(self.monitor_name)
unklns = self.db.get_last_identifiers(excludes=(self.monitor_name,))
if monlns:
self.mon_maxs = list(map(str, get_maxs(monlns)))
if unklns:
self.unk_maxs = list(map(str, get_maxs(unklns)))
self.mon_start = self.mon_maxs[0] if self.mon_maxs else 0
self.unk_start = self.unk_maxs[0] if self.unk_maxs else 0
else:
unklns = self.db.get_last_identifiers()
if unklns:
self.unk_maxs = list(map(str, get_maxs(unklns)))
self.unk_start = self.unk_maxs[0] if self.unk_maxs else 0
start_grp = VGroup(
Item(
"mon_start",
label="Starting Monitor Identifier",
editor=ComboboxEditor(name="mon_maxs"),
defined_when="not use_consecutive_identifiers",
),
Item(
"unk_start",
label="Starting Unknown Identifier",
editor=ComboboxEditor(name="unk_maxs"),
defined_when="not use_consecutive_identifiers",
),
Item(
"unk_start",
label="Starting Identifier",
editor=ComboboxEditor(name="unk_maxs"),
defined_when="use_consecutive_identifiers",
),
)
info = self.edit_traits(
view=okcancel_view(
Item("offset"),
Item("level_offset"),
start_grp,
title="Configure Identifier Generation",
)
)
if info.result:
self.dump()
return True
def preview(self, positions, level):
self.irradiation_positions = positions
self.level = level
self.is_preview = True
self.generate_identifiers()
def generate_identifiers(self, *args, **kw):
self._generate_labnumbers(*args)
if not self.is_preview:
self.dvc.meta_commit("Generate identifiers")
def _generate_labnumbers(self, offset=None, level_offset=None):
"""
get last labnumber
start numbering at 1+offset
add level_offset between each level
"""
if offset is None:
offset = self.offset
if level_offset is None:
level_offset = self.level_offset
irradiation = self.irradiation
mongen, unkgen, n = self._position_generator(offset, level_offset)
if n:
prog = open_progress(n)
# prog.max = n - 1
for gen in (mongen, unkgen):
for pos, ident in gen:
po = pos.position
le = pos.level.name
if self.is_preview:
self._set_position_identifier(pos, ident)
else:
pos.identifier = ident
self.dvc.set_identifier(irradiation, le, po, ident)
# self._add_default_flux(pos)
msg = "setting irrad. pos. {} {}-{} labnumber={}".format(
irradiation, le, po, ident
)
self.info(msg)
if prog:
prog.change_message(msg)
prog.close()
def _set_position_identifier(self, dbpos, ident):
ipos = self._get_irradiated_position(dbpos)
if ipos:
ident = str(ident)
ipos.identifier = ident
def _get_irradiated_position(self, dbpos):
if dbpos.level.name == self.level:
ipos = next(
(po for po in self.irradiation_positions if po.hole == dbpos.position),
None,
)
return ipos
# def _add_default_flux(self, pos):
# db = self.db
# j, j_err = self.default_j, self.default_j_err
# dbln = pos.labnumber
#
# def add_flux():
# hist = db.add_flux_history(pos)
# dbln.selected_flux_history = hist
# f = db.add_flux(j, j_err)
# f.history = hist
#
# if dbln.selected_flux_history:
# tol = 1e-10
# flux = dbln.selected_flux_history.flux
# if abs(flux.j - j) > tol or abs(flux.j_err - j_err) > tol:
# add_flux()
# else:
# add_flux()
def _position_generator(self, offset, level_offset):
"""
return 2 generators
monitors, unknowns
"""
db = self.db
irradiation = self.irradiation
irrad = db.get_irradiation(irradiation)
levels = irrad.levels
overwrite = self.overwrite
n = sum(
[
len(
[
p
for p in li.positions
if overwrite or (p.sample and not p.identifier)
]
)
for li in levels
]
)
args = (irradiation, levels, overwrite, offset, level_offset)
if self.use_consecutive_identifiers:
if not self.unk_start:
last_unk_ln = db.get_last_identifier()
if last_unk_ln:
last_unk_ln = int(last_unk_ln.identifier)
else:
last_unk_ln = 0
else:
last_unk_ln = self.unk_start
unks = self._identifier_generator(last_unk_ln, False, *args)
mons = []
else:
if not self.mon_start:
last_mon_ln = db.get_last_identifier(self.monitor_name)
if last_mon_ln:
last_mon_ln = int(last_mon_ln.identifier)
else:
last_mon_ln = 0
else:
last_mon_ln = self.mon_start
if not self.unk_start:
last_unk_ln = db.get_last_identifier()
if last_unk_ln:
last_unk_ln = int(last_unk_ln.identifier)
else:
last_unk_ln = 0
else:
last_unk_ln = self.unk_start
mons = self._identifier_generator(last_mon_ln, True, *args)
unks = self._identifier_generator(last_unk_ln, False, *args)
return mons, unks, n
def _get_position_is_monitor(self, dbpos):
ipos = self._get_irradiated_position(dbpos)
if ipos:
return ipos.sample == self.monitor_name
def _get_position_sample(self, dbpos):
ipos = self._get_irradiated_position(dbpos)
if ipos:
return ipos.sample
def _identifier_generator(
self, start, is_monitor, irrad, levels, overwrite, offset, level_offset
):
offset = max(1, offset)
level_offset = max(1, level_offset)
sln = start + offset
if self.use_consecutive_identifiers:
def monkey(*args, **kw):
def _monkey(*args, **kw):
return True
return _monkey
else:
def monkey(invert=False):
def _monkey(x):
r = None
if self.is_preview:
r = self._get_position_is_monitor(x)
else:
# print('dsaf', x.sample.name, self.monitor_name, x.sample.name == self.monitor_name)
try:
r = x.sample.name == self.monitor_name
except AttributeError as e:
pass
if invert:
r = not r
return r
return _monkey
def has_sample(x):
r = None
if self.is_preview:
r = self._get_position_sample(x)
else:
try:
r = x.sample.name # == self.monitor_name
except AttributeError as e:
pass
return r
test = monkey(not is_monitor)
for level in levels:
i = 0
for position in sorted(level.positions, key=lambda x: x.position):
if not has_sample(position):
continue
if not test(position):
continue
if position.identifier and not overwrite:
le = "{}{}-{}".format(irrad, position.level.name, position.position)
ln = position.identifier
self.warning(
"skipping position {} already has labnumber {}".format(le, ln)
)
continue
yield position, i + sln
i += 1
sln = sln + i + level_offset - 1
if __name__ == "__main__":
lns = [
22923126,
22923083,
22923066,
22923051,
22923045,
22923034,
22923016,
22923001,
22922001,
22921003,
22921002,
22921001,
22191022,
22191021,
22191020,
22191019,
22191018,
22191017,
22191016,
22191015,
22191014,
22191013,
22191012,
22191011,
22191010,
22191009,
22191008,
22191007,
22191006,
22191005,
22191004,
623410,
623409,
623408,
623407,
623406,
623404,
623403,
623402,
623401,
92596,
92595,
69156,
63249,
63248,
63247,
63246,
63245,
63244,
63243,
63242,
63241,
63240,
63239,
63238,
63237,
63236,
63235,
63234,
63233,
63232,
63231,
63230,
63229,
63228,
63227,
63225,
63224,
63223,
63222,
63221,
63220,
63219,
63218,
63217,
63216,
63215,
63214,
63213,
63212,
63211,
63210,
63209,
63208,
63207,
63206,
63205,
63204,
63203,
63202,
63201,
63200,
63199,
63198,
63197,
63196,
63195,
63194,
63193,
63192,
63191,
63190,
63189,
63188,
63187,
63186,
]
print(get_maxs(lns))
# ============= EOF =============================================
| 27.838641 | 109 | 0.499924 | 1,342 | 13,112 | 4.698212 | 0.260805 | 0.02617 | 0.019984 | 0.008247 | 0.230769 | 0.20682 | 0.173989 | 0.173989 | 0.153529 | 0.139255 | 0 | 0.086957 | 0.4036 | 13,112 | 470 | 110 | 27.897872 | 0.719309 | 0.131101 | 0 | 0.220056 | 0 | 0 | 0.039184 | 0.009131 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050139 | false | 0.005571 | 0.019499 | 0.002786 | 0.153203 | 0.002786 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
041da5a154522e556902522c2196b3f71ea92819 | 2,650 | py | Python | tests/test_ambiguous_inference.py | nelfin/pylint-protobuf | dbdfaecfbf248ccc75f6cdef4b29e4fb6a21e70e | [
"MIT"
] | 25 | 2018-09-21T13:21:25.000Z | 2021-12-30T06:00:57.000Z | tests/test_ambiguous_inference.py | nelfin/pylint-protobuf | dbdfaecfbf248ccc75f6cdef4b29e4fb6a21e70e | [
"MIT"
] | 48 | 2019-01-26T10:10:43.000Z | 2021-07-27T02:23:19.000Z | tests/test_ambiguous_inference.py | nelfin/pylint-protobuf | dbdfaecfbf248ccc75f6cdef4b29e4fb6a21e70e | [
"MIT"
] | 12 | 2018-09-27T09:19:58.000Z | 2021-02-12T17:17:07.000Z | import pytest
import pylint_protobuf
@pytest.fixture
def simple_mod(proto_builder):
return proto_builder("""
message Test {
required string name = 1;
}
""")
@pytest.fixture
def inference_mod(simple_mod, module_builder):
return module_builder("""
from {} import Test
class C:
def __init__(self):
self.value = None
def parse(self):
self.value = Test()
self.value.ParseFromString("blahblahblah")
""".format(simple_mod), 'inference_mod')
def test_no_E1101_on_node_inference(inference_mod, linter_factory):
linter = linter_factory(
register=pylint_protobuf.register,
disable=['all'], enable=['protobuf-undefined-attribute', 'no-member'],
)
linter.check([inference_mod])
actual_messages = [m.msg for m in linter.reporter.messages]
assert not actual_messages
@pytest.fixture
def issue44_pb2(proto_builder):
return proto_builder("""
message Example {
required int32 value = 1;
}
message DifferentExample {
required int32 different_value = 1;
}
""")
def test_issue44_no_warnings_if_any_matches(issue44_pb2, module_builder, linter_factory):
mod = module_builder("""
from {pb2} import Example, DifferentExample
request = Example(value=123)
if 1 + 1 == 2:
request = DifferentExample()
if 2 + 2 == 4:
request.different_value = 456
""".format(pb2=issue44_pb2), 'issue44_example1')
linter = linter_factory(
register=pylint_protobuf.register,
disable=['all'], enable=['protobuf-undefined-attribute']
)
linter.check([mod])
actual_messages = [m.msg for m in linter.reporter.messages]
assert not actual_messages
def test_issue44_package_no_warnings_if_any_matches(issue44_pb2, module_builder, linter_factory):
# Previous behaviour (up to 2c09cf3) only passes in astroid-2.5 due to changes in
# context.path (works with astroid cc3bfc5, reverted by 03d15b0)
mod = module_builder("""
import {pb2} as pb
request = pb.Example(value=123)
if 1 + 1 == 2:
request = pb.DifferentExample()
if 2 + 2 == 4:
request.different_value = 456
""".format(pb2=issue44_pb2), 'issue44_example2')
linter = linter_factory(
register=pylint_protobuf.register,
disable=['all'], enable=['protobuf-undefined-attribute']
)
linter.check([mod])
actual_messages = [m.msg for m in linter.reporter.messages]
assert not actual_messages
| 30.813953 | 97 | 0.638491 | 304 | 2,650 | 5.351974 | 0.302632 | 0.047941 | 0.029502 | 0.049785 | 0.551321 | 0.551321 | 0.505839 | 0.505839 | 0.472649 | 0.472649 | 0 | 0.039226 | 0.259245 | 2,650 | 85 | 98 | 31.176471 | 0.789608 | 0.053585 | 0 | 0.442857 | 0 | 0 | 0.430339 | 0.085429 | 0 | 0 | 0 | 0 | 0.042857 | 1 | 0.085714 | false | 0 | 0.071429 | 0.042857 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
041f3c2cf44751bd69e8d94a6feb8e33f0fa1f64 | 3,177 | py | Python | fred.py | marcoracer/hmm_spy_regimes | a07668c2fe6679c4e0418253b729bfd2cd94e6bc | [
"BSD-3-Clause"
] | null | null | null | fred.py | marcoracer/hmm_spy_regimes | a07668c2fe6679c4e0418253b729bfd2cd94e6bc | [
"BSD-3-Clause"
] | null | null | null | fred.py | marcoracer/hmm_spy_regimes | a07668c2fe6679c4e0418253b729bfd2cd94e6bc | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pandas as pd
import sklearn.mixture as mix
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.dates import YearLocator, MonthLocator
import seaborn as sns
import missingno as msno
import quandl as qd
# reference:
# http://www.blackarbs.com/blog/introduction-hidden-markov-models-python-networkx-sklearn/2/9/2017
# get fed data
f1 = 'TEDRATE' # ted spread
f2 = 'T10Y2Y' # constant maturity ten yer - 2 year
f3 = 'T10Y3M' # constant maturity 10yr - 3m
start = pd.to_datetime('2002-01-01')
end = pd.datetime.today()
data_SPY = qd.get('LSE/SPY5')
data_f1 = qd.get('FRED/TEDRATE')
data_f2 = qd.get('FRED/T10Y2Y')
data_f3 = qd.get('FRED/T10Y3M')
data = pd.concat([data_SPY['Price'], data_f1, data_f2, data_f3], axis=1, join='inner')
data.columns = ['SPY', f1, f2, f3]
data['sret'] = np.log( data['SPY']/ data['SPY'].shift(1))
print(' --- Data ---')
print(data.tail())
# quick visual inspection of the data
msno.matrix(data)
col = 'sret'
select = data.ix[:].dropna()
ft_cols = [f1, f2, f3, col]
X = select[ft_cols].values
print('\nFitting to HMM and decoding ...', end='')
model = mix.GaussianMixture(n_components=4,
covariance_type='full',
n_init=100,
random_state=7).fit(X)
# Predict the optimal sequence of internal hidden state
hidden_states = model.predict(X)
print('done!\n')
print('Score: %.2f;\tBIC: %.2f;\tAIC:%.2f;\n' % (model.score(X), model.bic(X), model.aic(X)))
print('Means and vars of each hidden state')
for i in range(model.n_components):
print('%d th hidden state' % i)
print('mean = ', model.means_[i])
print('var = ', np.diag(model.covariances_[i]))
print()
sns.set(font_scale=1.25)
style_kwds = {'xtick.major.size': 3, 'ytick.major.size': 3,
'font.family':u'courier prime code', 'legend.frameon': True}
sns.set_style('white', style_kwds)
fig, axs = plt.subplots(model.n_components, sharex=True, figsize=(12,9))
colors = cm.rainbow(np.linspace(0, 1, model.n_components))
for i, (ax, color) in enumerate(zip(axs, colors)):
# Use fancy indexing to plot data in each state.
mask = hidden_states == i
ax.plot_date(select.index.values[mask],
select[col].values[mask], '.-', c=color)
ax.set_title('%d th hidden state' % i, fontsize=16, fontweight='demi')
# Format the ticks.
ax.xaxis.set_major_locator(YearLocator())
ax.xaxis.set_minor_locator(MonthLocator())
sns.despine(offset=10)
plt.tight_layout()
plt.show()
# fig.savefig('Hidden Markov (Mixture) Model_Regime Subplots.png')
sns.set(font_scale=1.5)
states = (pd.DataFrame(hidden_states, columns=['states'], index=select.index)
.join(select, how='inner')
.assign(mkt_cret=select.sret.cumsum())
.reset_index(drop=False)
.rename(columns={'index':'Date'}))
print(' --- States ---')
print(states.tail())
sns.set_style('white', style_kwds)
order = [0, 1, 2]
fg = sns.FacetGrid(data=states, hue='states', hue_order=order,
palette=colors, aspect=1.31, size=12)
fg.map(plt.scatter, 'Date', 'SPY', alpha=0.8).add_legend()
sns.despine(offset=10)
fg.fig.suptitle('Historical SPY Regimes', fontsize=24, fontweight='demi')
plt.tight_layout()
plt.show()
# fg.savefig('Hidden Markov (Mixture) Model_SPY Regimes.png') | 29.416667 | 98 | 0.704123 | 504 | 3,177 | 4.35119 | 0.444444 | 0.012768 | 0.012312 | 0.012768 | 0.098495 | 0.0228 | 0 | 0 | 0 | 0 | 0 | 0.030769 | 0.120239 | 3,177 | 108 | 99 | 29.416667 | 0.753846 | 0.149197 | 0 | 0.106667 | 0 | 0 | 0.16388 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.12 | 0 | 0.12 | 0.16 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
041fa5e4d5e8ed2a81f7c02289d5ec06269c0248 | 12,208 | py | Python | Game.py | SeijiNoda/Python-Missile-Command | d7309386475ece0111936bac4da61429edcd5e98 | [
"MIT"
] | null | null | null | Game.py | SeijiNoda/Python-Missile-Command | d7309386475ece0111936bac4da61429edcd5e98 | [
"MIT"
] | null | null | null | Game.py | SeijiNoda/Python-Missile-Command | d7309386475ece0111936bac4da61429edcd5e98 | [
"MIT"
] | null | null | null | # Replica of "Missile Command" from ATARI (TI402 project)
## Matheus Seiji Luna Noda, 19190 - November 2020
## Github at "https://github.com/SeijiNoda/Python-Missile-Command/tree/master"
#Import pygame library
import pygame
#Import random library (used for generating missiles)
import random
#Import math library (used for square root method)
import math
#Initiates pygame
pygame.init()
#Set main screen's dimensions
screen = pygame.display.set_mode((800, 600))
#Levels
#Class for level storage:
## self.lvl Wicth level it is
## self.time Total amount of time the player has to survive for
## self.qnt Quantity of missiles in the determined amount of time
class Level:
def __init__(self, lvl, time, qnt):
self.lvl = lvl
self.time = time
self.qnt = qnt
#Creating all of the Level objects
lvl1 = Level(1, 60000, 12)
lvl2 = Level(2, 60000, 18)
lvl3 = Level(3, 60000, 24)
lvl4 = Level(4, 60000, 30)
lvl5 = Level(5, 60000, 36)
lvl6 = Level(6, 60000, 42)
lvl7 = Level(7, 60000, 48)
lvl8 = Level(8, 60000, 54)
lvl9 = Level(9, 60000, 60)
lvl10 = Level(10, 60000, 66)
lvl11 = Level(11, 60000, 72)
lvl12 = Level(12, 60000, 78)
lvl13 = Level(13, 60000, 84)
lvl14 = Level(14, 60000, 90)
lvl15 = Level(15, 60000, 96)
lvl16 = Level(16, 60000, 102)
lvl17 = Level(17, 60000, 108)
lvl18 = Level(18, 60000, 114)
lvl19 = Level(19, 60000, 120)
lvl20= Level(20, 60000, 126)
#Add every level to the levels list
levels = [lvl1, lvl2, lvl3, lvl4, lvl5, lvl6, lvl7, lvl8, lvl9, lvl10, lvl11, lvl12, lvl13, lvl14, lvl15, lvl16, lvl17, lvl18, lvl19, lvl20]
#City
#Fixed variables for the cities
cityColor = (0,128,255)
ruinsColor = (63, 63, 65)
#Class for city storage:
## self.index Index for the current city
## self.x and self.y Coordinates for the city
## self.status Status (alive or dead) of the city
class City:
def __init__(self, index, x):
self.index = index
self.x = x
self.y = 545
self.status = "alive"
#Creating all of the cities objects
city1 = City(0, 50)
city2 = City(1, 150)
city3 = City(2, 250)
city4 = City(3, 500)
city5 = City(4, 600)
city6 = City(5, 700)
#Add every city in the list
cities = [city1, city2, city3, city4, city5, city6]
#Variable for city status verification
isAlive = [True, True, True, True, True, True]
##Function return boolean if every single city isn't alive
def allDead():
for cityStatus in isAlive:
if cityStatus == True:
return False
return True
#Player
#Fixed variables
playerColor = (255,0,0)
playerHeight = 8
playerWidth = 8
#Player coordinates
playerX = 400
playerY = 300
#Bomb
#Fixed variables
bombColor = (255,255,255)
#Bomb's explosion current radius
bombRadius = 10
#Max explosion range
bombRange = 50
#Bomb state
bomb_state = "ready"
#Bomb coordinates
bombX = playerX
bombY = playerY
#Function that draws the bomb when fired:
## x and y Bomb current coordinates (usually bombX and bombY)
## r Bomb current radius (usally bombRadius)
def fire_bomb(x, y, r):
#Sets the bomb state to "fire" (it's firing currently)
global bomb_state
bomb_state = "fire"
#Draws the bomb
pygame.draw.circle(screen, (255,175,0), (x-5,y-5), r)
#Missiles
#Class for missle storage:
## self.index This missle's index
## self.originX and self.originY Missile's departure point's coordinates
## self.destnityX and self.destinyY Missle's destiny's coordinates
## self.x and self.y Current position
## self.status Current status
class Missile:
def __init__(self, index, originX, originY, destinyX, destinyY):
self.index = index
self.originX = originX
self.originY = originY
self.destinyX = destinyX
self.destinyY = destinyY
self.x = originX
self.y = originY
self.status = "flying"
#List of all of the current missiles
missilesList = []
#Function for creating a new missile
def genMissile():
#If there's still cities alive
if not allDead():
#Gets a random city from the list
city = random.randrange(0,len(cities))
#Keeps on until finds a city that is alive (since there's at least one of them)
while cities[city].status != "alive":
city = random.randrange(0,len(cities))
#Create the missile with a random origin coordinates
m = Missile(city, random.randrange(0,800), 0,cities[city].x+25, cities[city].y)
#Adds the missile to the list
missilesList.append(m)
#Level control
witch_level = 1
missilesSent = 0
points = 0
#Reset function prepares all of the variables for a new game
def resetGame():
global isAlive
global cities
global bomb_state, bombRange, bombRadius
global playerX, playerY, cont, points, missilesSent, witch_level
playerX = 400
playerY = 300
bombRange = 50
bombRadius = 10
cont = 0
missilesSent = 0
witch_level = 1
points = 0
bomb_state = "ready"
missilesList.clear()
for x in range(len(isAlive)):
isAlive[x] = True
for city in cities:
city.status = "alive"
#Main
cont = 0
clock = pygame.time.Clock()
#Loop control variables
victory = False
done = False
while not done:
#If there's still cities alive
if not allDead():
for event in pygame.event.get():
#Quits of the game
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
#Fires the bomb
if event.key == pygame.K_SPACE:
if bomb_state != "fire":
bombX = playerX
bombY = playerY
fire_bomb(bombX, bombY, bombRadius)
#Movememnt handler
pressed = pygame.key.get_pressed()
if pressed[pygame.K_UP] and playerY >= 3: playerY -= 3
if pressed[pygame.K_DOWN] and playerY <= 500: playerY += 3
if pressed[pygame.K_LEFT] and playerX >= 3: playerX -= 3
if pressed[pygame.K_RIGHT] and playerX <= 792: playerX += 3
#Erases all
screen.fill((0, 0, 0))
#Draws floor
pygame.draw.rect(screen, (164,99,33), pygame.Rect(0,550,800,600))
#Draws the main base
pygame.draw.rect(screen, (164,255,255), pygame.Rect(360, 525, 80,40))
pygame.draw.rect(screen, (164,255,255), pygame.Rect(380, 505, 40,20))
#Draw all of the cities
for city in cities:
#Divides ciites between alive and dead
if city.status == "alive":
pygame.draw.rect(screen, cityColor, pygame.Rect(city.x, city.y, 50, 35))
else:
pygame.draw.rect(screen, ruinsColor, pygame.Rect(city.x, city.y, 50, 35))
isAlive[city.index] = False
#Generates the missiles
time = levels[witch_level].time
qnt = levels[witch_level].qnt
cont += 1
auxCont = ( time / qnt ) / 60
if cont >= 2.5*auxCont:
genMissile()
missilesSent += 1
cont = 0
#Draws every missile in the list
for m in missilesList:
if m.status == "flying":
dx = m.destinyX - m.x
dy = m.destinyY - m.y
#Gets the hypotenuse
d = math.sqrt(dx*dx + dy*dy)
#Calculate the change to the enemy position
speed = 2
cx = speed * dx / d
cy = speed * dy / d
#Update enemy position
m.x += cx
m.y += cy
#Arrived on destiny
if m.y >= m.destinyY:
m.status = "arrived"
cities[m.index].status = "dead"
#Blew up
if (m.x - bombX)*(m.x - bombX) + (m.y - bombY)*(m.y - bombY) < bombRadius*bombRadius:
m.status = "blew"
points += 50*witch_level
#Missile trail
pygame.draw.line(screen, (255,255,255), (m.originX, m.originY), (m.x, m.y))
pygame.draw.rect(screen, (70,70,70), pygame.Rect(m.x-2, m.y-2, 5,5))
#Detecs colisions between the cities, the bomb an the missiles AND add the points to the score
if missilesSent >= qnt:
mult = 1
for city in cities:
if city.status == "alive":
mult += 1
points *= mult
if bombRadius > 5:
bombRadius -= 0.5
if bombRange > 25:
bombRange -= 2.5
witch_level += 1
#Victory stablished
if witch_level > 20:
done = True
victory = True
#Handles bomb animation and reloading
if bomb_state == "fire":
bombRadius = bombRadius + 1
if bombRadius <= 14:
pygame.draw.line(screen, (255,255,0), (bombX, bombY), (400,505))
fire_bomb(bombX, bombY, bombRadius)
if bombRadius >= bombRange:
bombRadius = 10
bomb_state = "ready"
#Draws the player
pygame.draw.rect(screen, playerColor, pygame.Rect(playerX, playerY, playerWidth, playerHeight))
#Writes the display
font = pygame.font.Font('./Resources/AtariSmall.ttf', 16)
text = font.render("Lvl: " + str(witch_level), False, (255,255,255))
screen.blit(text, (725, 40))
font = pygame.font.Font('./Resources/AtariSmall.ttf', 13)
text = font.render("pts: " + str(points), False, (255,255,255))
screen.blit(text, (725, 54))
pygame.display.flip()
clock.tick(60)
else:
#"YOU LOST" scenario
#Erases all
screen.fill((0, 0, 0))
#Writes all of the messages and your score
font = pygame.font.Font('./Resources/AtariSmall.ttf', 72)
text = font.render("You Lost", False, (255,255,255))
screen.blit(text, (400 - text.get_width() // 2, 300 - text.get_height() // 2))
font = pygame.font.Font('./Resources/AtariSmall.ttf', 22)
text = font.render("Play again? (Y/N)", False, (255,255,255))
screen.blit(text, (400 - text.get_width() // 2, (300 - text.get_height() // 2) + 50))
font = pygame.font.Font('./Resources/AtariSmall.ttf', 24)
text = font.render("points: " + str(points), False, (255,255,255))
screen.blit(text, (400 - text.get_width() // 2, (300 - text.get_height() // 2) + 100))
pygame.display.flip()
clock.tick(60)
#Waits for command
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_n:
done = True
if event.key == pygame.K_y:
resetGame()
done = False
if victory:
#"YOU WIN" scenario
#Ends main loop
done = False
while not done:
#Writes message
screen.fill((0, 0, 0))
font = pygame.font.Font('./Resources/AtariSmall.ttf', 72)
text = font.render("You Won!", False, (255,255,255))
screen.blit(text, (400 - text.get_width() // 2, 300 - text.get_height() // 2))
font = pygame.font.Font('./Resources/AtariSmall.ttf', 22)
text = font.render("Congratulations!", False, (255,255,255))
screen.blit(text, (400 - text.get_width() // 2, (300 - text.get_height() // 2) + 50))
font = pygame.font.Font('./Resources/AtariSmall.ttf', 24)
text = font.render("points: " + str(points), False, (255,255,255))
screen.blit(text, (400 - text.get_width() // 2, (300 - text.get_height() // 2) + 100))
pygame.display.flip()
clock.tick(60)
#Wait for command to exit the game
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
done = True | 30.368159 | 140 | 0.576425 | 1,603 | 12,208 | 4.356831 | 0.227698 | 0.019759 | 0.012887 | 0.020619 | 0.267182 | 0.2311 | 0.201031 | 0.183276 | 0.171535 | 0.144759 | 0 | 0.079896 | 0.308978 | 12,208 | 402 | 141 | 30.368159 | 0.747985 | 0.222477 | 0 | 0.351695 | 0 | 0 | 0.038556 | 0.022154 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029661 | false | 0 | 0.012712 | 0 | 0.063559 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04209d0f190e97d214547cc0d6fc5cacbcedadb9 | 3,577 | py | Python | indra/sources/lincs_drug/processor.py | qiuhaoling/indra | fa1fb31c4333ea63d023181eaf6f759e3dd3b400 | [
"BSD-2-Clause"
] | null | null | null | indra/sources/lincs_drug/processor.py | qiuhaoling/indra | fa1fb31c4333ea63d023181eaf6f759e3dd3b400 | [
"BSD-2-Clause"
] | null | null | null | indra/sources/lincs_drug/processor.py | qiuhaoling/indra | fa1fb31c4333ea63d023181eaf6f759e3dd3b400 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import absolute_import, print_function, unicode_literals
__all__ = ['LincsProcessor']
import re
from indra.statements import Agent, Inhibition, Evidence
from indra.databases.lincs_client import LincsClient
from indra.databases import uniprot_client, hgnc_client, chebi_client
class LincsProcessor(object):
"""Processor for the HMS LINCS drug target dataset.
Parameters
----------
lincs_data : list[dict]
A list of dicts with keys set by the header of the csv, and values from
the data in the csv.
Attributes
----------
statements : list[indra.statements.Statement]
A list of indra statements extracted from the CSV file.
"""
def __init__(self, lincs_data):
self._data = lincs_data
self._lc = LincsClient()
# Process all the lines (skipping the header)
self.statements = []
for line in self._data:
self._process_line(line)
return
def _process_line(self, line):
drug = self._extract_drug(line)
prot = self._extract_protein(line)
if prot is None:
return
evidence = self._make_evidence(line)
self.statements.append(Inhibition(drug, prot, evidence=evidence))
def _extract_drug(self, line):
drug_name = line['Small Molecule Name']
lincs_id = line['Small Molecule HMS LINCS ID']
refs = self._lc.get_small_molecule_refs(lincs_id)
if 'PUBCHEM' in refs:
chebi_id = chebi_client.get_chebi_id_from_pubchem(refs['PUBCHEM'])
if chebi_id:
refs['CHEBI'] = 'CHEBI:%s' % chebi_id
return Agent(drug_name, db_refs=refs)
def _extract_protein(self, line):
# Extract key information from the lines.
prot_name = line['Protein Name']
prot_id = line['Protein HMS LINCS ID']
# Get available db-refs.
db_refs = {}
if prot_id:
db_refs.update(self._lc.get_protein_refs(prot_id))
# Since the resource only gives us an UP ID (not HGNC), we
# try to get that and standardize the name to the gene name
up_id = db_refs.get('UP')
if up_id:
gene_name = uniprot_client.get_gene_name(up_id)
if gene_name:
prot_name = gene_name
hgnc_id = hgnc_client.get_hgnc_id(gene_name)
if hgnc_id:
db_refs['HGNC'] = hgnc_id
# In some cases lines are missing protein information in which
# case we return None
else:
return None
# Create the agent.
return Agent(prot_name, db_refs=db_refs)
def _make_evidence(self, line):
ev_list = []
key_refs = line['Key References'].split(';')
generic_notes = {
'is_nominal': line['Is Nominal'],
'effective_concentration': line['Effective Concentration']
}
patt = re.compile('(?:pmid|pubmed\s+id):\s+(\d+)', re.IGNORECASE)
for ref in key_refs:
# Only extracting pmids, but there is generally more info available.
m = patt.search(ref)
if m is None:
pmid = None
else:
pmid = m.groups()[0]
annotations = {'reference': ref}
annotations.update(generic_notes)
ev = Evidence('lincs_drug', pmid=pmid, annotations=annotations,
epistemics={'direct': True})
ev_list.append(ev)
return ev_list
| 34.066667 | 80 | 0.594912 | 440 | 3,577 | 4.611364 | 0.306818 | 0.023657 | 0.011828 | 0.011828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000409 | 0.315907 | 3,577 | 104 | 81 | 34.394231 | 0.82877 | 0.199609 | 0 | 0.059701 | 0 | 0 | 0.092626 | 0.018525 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074627 | false | 0 | 0.074627 | 0 | 0.253731 | 0.014925 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0420a7023fe888fdcf493f50aee8d43af55e7323 | 391 | py | Python | Tools/flash_led.py | BleuLlama/LlamaVampireDrive | 63f5990f239afaf7a88373041bece0873eb7b67b | [
"MIT"
] | 4 | 2020-12-01T03:34:41.000Z | 2021-07-22T23:26:40.000Z | Tools/flash_led.py | BleuLlama/LlamaVampireDrive | 63f5990f239afaf7a88373041bece0873eb7b67b | [
"MIT"
] | null | null | null | Tools/flash_led.py | BleuLlama/LlamaVampireDrive | 63f5990f239afaf7a88373041bece0873eb7b67b | [
"MIT"
] | null | null | null | #!/bin/python
#
# Simple script to test the LED
import RPi.GPIO as GPIO
import time
# The gpio line the LED is connected to
led = 21
GPIO.setwarnings( False )
GPIO.setmode( GPIO.BCM )
GPIO.setup( led,GPIO.OUT )
def flash( nt, dv ):
while nt > 0:
GPIO.output( led,GPIO.LOW )
time.sleep( dv )
GPIO.output( led,GPIO.HIGH )
time.sleep( dv )
nt -= 1
flash( 3, .1 )
| 16.291667 | 39 | 0.631714 | 65 | 391 | 3.8 | 0.538462 | 0.08502 | 0.105263 | 0.137652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02027 | 0.242967 | 391 | 23 | 40 | 17 | 0.814189 | 0.204604 | 0 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
042216eedb709a82e809982a6db712c3624d766a | 1,582 | py | Python | inf_classif_analysis/tree_based_muliclassification/dec_trees_LET_IS.py | Marco-Ametrano/myocardal_infarction_class | d2fb9d4d6643d0b836ffdb94a32911eb4d68c390 | [
"MIT"
] | null | null | null | inf_classif_analysis/tree_based_muliclassification/dec_trees_LET_IS.py | Marco-Ametrano/myocardal_infarction_class | d2fb9d4d6643d0b836ffdb94a32911eb4d68c390 | [
"MIT"
] | null | null | null | inf_classif_analysis/tree_based_muliclassification/dec_trees_LET_IS.py | Marco-Ametrano/myocardal_infarction_class | d2fb9d4d6643d0b836ffdb94a32911eb4d68c390 | [
"MIT"
] | null | null | null | #DECISION TREES WITH LET_IS
X_train, X_test, Y_LET_train, Y_LET_test = train_test_split(X, LET_IS, test_size=0.3, random_state= 22)
model_imp = DecisionTreeClassifier(random_state=22)
model_imp.fit(X_train, Y_LET_train)
importance = model_imp.feature_importances_
for i,v in enumerate(importance):
print('Feature: %0d, Score: %.5f' % (i,v))
# plot feature importance
plt.bar([x for x in range(len(importance))], importance)
plt.show()
np.where(importance>0.015)
X_train_rid = X_train.iloc[:, [0, 1, 3, 5, 6, 8, 9, 32, 40, 41, 43, 44, 53, 54, 70, 79, 81,
83, 85, 89, 90, 93, 94, 95, 96]]
X_test_rid = X_test.iloc[:, [0, 1, 3, 5, 6, 8, 9, 32, 40, 41, 43, 44, 53, 54, 70, 79, 81,
83, 85, 89, 90, 93, 94, 95, 96]]
class_tree=DecisionTreeClassifier(random_state=22)
param = {'max_depth':range(1, 6), 'min_samples_split':range(3,20), 'min_samples_leaf':range(2,15)}
grid = GridSearchCV(class_tree, param, cv=3)
grid.fit(X_train_rid, Y_LET_train)
print(grid.best_params_)
mod_tree=DecisionTreeClassifier(max_depth=3,min_samples_split=14,min_samples_leaf=3,random_state=22)
mod_tree.fit(X_train_rid, Y_LET_train)
text_representation = tree.export_text(mod_tree)
print(text_representation)
fig1 = plt.figure(figsize=(25,20))
_ = tree.plot_tree(mod_tree,feature_names=list(X_train_rid.columns), class_names=['0', '1', '2', '3', '4', '5', '6', '7'],filled=True)
y_train_pred = mod_tree.predict(X_train_rid)
print(classification_report(Y_LET_train, y_train_pred))
y_test_pred = mod_tree.predict(X_test_rid)
print(classification_report(Y_LET_test, y_test_pred))
| 46.529412 | 134 | 0.730088 | 287 | 1,582 | 3.731707 | 0.355401 | 0.044818 | 0.042017 | 0.018674 | 0.261438 | 0.186741 | 0.126984 | 0.087768 | 0.087768 | 0.087768 | 0 | 0.091233 | 0.113148 | 1,582 | 33 | 135 | 47.939394 | 0.672131 | 0.031606 | 0 | 0.071429 | 0 | 0 | 0.04902 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.178571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
042412a0d8ad0293d7100c6415f263e7d1db073e | 772 | py | Python | rewyndapp/urls.py | c-o-89/Rewynd2 | 6b647071f40273d47239f35be921fdc8d8a04b1a | [
"MIT"
] | null | null | null | rewyndapp/urls.py | c-o-89/Rewynd2 | 6b647071f40273d47239f35be921fdc8d8a04b1a | [
"MIT"
] | 3 | 2020-02-11T23:23:19.000Z | 2021-06-10T20:58:28.000Z | rewyndapp/urls.py | c-o-89/Rewynd2 | 6b647071f40273d47239f35be921fdc8d8a04b1a | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = 'rewyndapp' #see https://docs.djangoproject.com/en/2.1/intro/tutorial03/
urlpatterns = [
# /
path('', views.index, name='index'),
# /programs/
path('programs/', views.programs_page, name='programs_page'),
# /programs/5
path('programs/<int:program_id>/', views.program_listview, name='program_listview'),
# /episode/5
path('episode/<int:id>/', views.episode_page, name='episode_page'),
# /about/
path('about/', views.about_page, name='about_page'),
path('api/programlist/', views.ProgramList.as_view()),
path('api/episodelist/<int:program_id>/', views.EpisodeList.as_view()),
path('api/episodetweets/<int:episode_id>/', views.EpisodeTweets.as_view()),
]
| 29.692308 | 88 | 0.673575 | 97 | 772 | 5.206186 | 0.371134 | 0.055446 | 0.047525 | 0.067327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009023 | 0.138601 | 772 | 25 | 89 | 30.88 | 0.750376 | 0.13342 | 0 | 0 | 0 | 0 | 0.312217 | 0.14178 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
042abc47ba2dcdfd33c99616c0993d3af38d3744 | 1,817 | py | Python | python/example_code/ec2/ec2_basics/ec2_teardown.py | gabehollombe-aws/aws-doc-sdk-examples | dfc0e06ebe1762ab127f3ef5f425507644c6a99c | [
"Apache-2.0"
] | 12 | 2020-07-28T01:20:15.000Z | 2021-12-10T10:52:49.000Z | python/example_code/ec2/ec2_basics/ec2_teardown.py | gabehollombe-aws/aws-doc-sdk-examples | dfc0e06ebe1762ab127f3ef5f425507644c6a99c | [
"Apache-2.0"
] | 5 | 2021-12-10T01:52:47.000Z | 2022-01-04T16:47:45.000Z | python/example_code/ec2/ec2_basics/ec2_teardown.py | gabehollombe-aws/aws-doc-sdk-examples | dfc0e06ebe1762ab127f3ef5f425507644c6a99c | [
"Apache-2.0"
] | 5 | 2020-08-29T14:01:38.000Z | 2021-11-18T07:11:49.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to use the AWS SDK for Python (Boto3) with the Amazon Elastic Compute Cloud
(Amazon EC2) API to terminate an instance and clean up additional resources.
"""
import logging
import os
import boto3
from botocore.exceptions import ClientError
logger = logging.getLogger(__name__)
ec2 = boto3.resource('ec2')
def delete_key_pair(key_name, key_file_name):
"""
Deletes a key pair and the specified private key file.
:param key_name: The name of the key pair to delete.
:param key_file_name: The local file name of the private key file.
"""
try:
ec2.KeyPair(key_name).delete()
os.remove(key_file_name)
logger.info("Deleted key %s and private key file %s.", key_name, key_file_name)
except ClientError:
logger.exception("Couldn't delete key %s.", key_name)
raise
def delete_security_group(group_id):
"""
Deletes a security group.
:param group_id: The ID of the security group to delete.
"""
try:
ec2.SecurityGroup(group_id).delete()
logger.info("Deleted security group %s.", group_id)
except ClientError:
logger.exception("Couldn't delete security group %s.", group_id)
raise
def terminate_instance(instance_id):
"""
Terminates an instance. The request returns immediately. To wait for the
instance to terminate, use Instance.wait_until_terminated().
:param instance_id: The ID of the instance to terminate.
"""
try:
ec2.Instance(instance_id).terminate()
logger.info("Terminating instance %s.", instance_id)
except ClientError:
logging.exception("Couldn't terminate instance %s.", instance_id)
raise
| 28.84127 | 87 | 0.695102 | 252 | 1,817 | 4.876984 | 0.345238 | 0.03987 | 0.035801 | 0.022783 | 0.156225 | 0.07323 | 0.07323 | 0 | 0 | 0 | 0 | 0.007714 | 0.21519 | 1,817 | 62 | 88 | 29.306452 | 0.854137 | 0.40011 | 0 | 0.321429 | 0 | 0 | 0.177165 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.142857 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
042ceef1c6dd99b53de1413bd11c4bcbdc65cc48 | 3,351 | py | Python | python/basic-bot/tests/bot_handler_test.py | yizhou-wang/hangouts-chat-samples | d8f60ea39bb3b5ba89c1adaadac00108b07edcb5 | [
"Apache-2.0"
] | null | null | null | python/basic-bot/tests/bot_handler_test.py | yizhou-wang/hangouts-chat-samples | d8f60ea39bb3b5ba89c1adaadac00108b07edcb5 | [
"Apache-2.0"
] | null | null | null | python/basic-bot/tests/bot_handler_test.py | yizhou-wang/hangouts-chat-samples | d8f60ea39bb3b5ba89c1adaadac00108b07edcb5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import json
import sys
from google.appengine.ext import testbed
# Import the module under test
import bot
# from events import Event, event_factory
class BotTest(unittest.TestCase):
ROOM_DISPLAY_NAME = 'Bot Testing'
USER_DISPLAY_NAME = 'Bot Tester'
TEST_MESSAGE = "Test message"
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.app = bot.app.test_client()
# Test the response when the bot is added to the room
def testBotAddedToRoom(self):
message = {
'type': 'ADDED_TO_SPACE',
'space': {
'type': 'ROOM',
'displayName': self.ROOM_DISPLAY_NAME
},
'user': {
'displayName': self.USER_DISPLAY_NAME
}
}
response = self.app.post('/',
data=json.dumps(message),
content_type='application/json')
data = json.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(data['text'], 'Thanks for adding me to "%s"!' % self.ROOM_DISPLAY_NAME)
self.assertEqual(response.content_type, 'application/json')
# Test the response when the bot is added to the room
def testBotAddedToDM(self):
message = {
'type': 'ADDED_TO_SPACE',
'space': {
'type': 'DM',
'displayName': self.ROOM_DISPLAY_NAME
},
'user': {
'displayName': self.USER_DISPLAY_NAME
}
}
response = self.app.post('/',
data=json.dumps(message),
content_type='application/json')
data = json.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(data['text'], 'Thanks for adding me to a DM, %s!'
% self.USER_DISPLAY_NAME)
self.assertEqual(response.content_type, 'application/json')
def testBotSentMessage(self):
message_text = 'Hello bot test!'
message = {
'type': 'MESSAGE',
'space': {
'type': 'DM',
'displayName': self.ROOM_DISPLAY_NAME
},
'user': {
'displayName': self.USER_DISPLAY_NAME
},
'message': {
'text': message_text
}
}
response = self.app.post('/',
data=json.dumps(message),
content_type='application/json')
data = json.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(data['text'], 'Your message: "%s"'
% message_text)
self.assertEqual(response.content_type, 'application/json') | 30.743119 | 96 | 0.590272 | 373 | 3,351 | 5.198391 | 0.321716 | 0.05673 | 0.068076 | 0.080454 | 0.525529 | 0.525529 | 0.525529 | 0.500258 | 0.467767 | 0.405879 | 0 | 0.00729 | 0.304088 | 3,351 | 109 | 97 | 30.743119 | 0.824185 | 0.21516 | 0 | 0.506849 | 0 | 0 | 0.156908 | 0 | 0 | 0 | 0 | 0 | 0.123288 | 1 | 0.054795 | false | 0 | 0.068493 | 0 | 0.178082 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
042db7250b5823ff4fb7eec4e23d3e1b3663bb45 | 1,874 | py | Python | CodingTestForEmployment/Part3/implementation/implementation8.py | lkc263/Algorithm_Study_Python | 5b9a74ecf7e864c861df2280a1bf4b393b0fcbca | [
"MIT"
] | null | null | null | CodingTestForEmployment/Part3/implementation/implementation8.py | lkc263/Algorithm_Study_Python | 5b9a74ecf7e864c861df2280a1bf4b393b0fcbca | [
"MIT"
] | null | null | null | CodingTestForEmployment/Part3/implementation/implementation8.py | lkc263/Algorithm_Study_Python | 5b9a74ecf7e864c861df2280a1bf4b393b0fcbca | [
"MIT"
] | null | null | null | from itertools import permutations
def solution(n, weak, dist):
# 길이를 2배로 늘려서 '원형'을 일자 형태로 변형
length = len(weak)
for i in range(length):
weak.append(weak[i] + n)
answer = len(dist) + 1 # 투입할 친구 수의 최솟값을 찾아야 하므로 len(dist) + 1로 초기화
# 0부터 length - 1까지의 위치를 각각 시작점으로 설정
for start in range(length):
print()
print("시작지점(start) : ", start)
# 친구를 나열하는 모든 경우의 수 각각에 대하여 확인
for friends in list(permutations(dist, len(dist))):
print("friends : ", friends, end=' ')
count = 1 # 투입할 친구의 수
# 해당 친구가 점검할 수 있는 마지막 위치
position = weak[start] + friends[count - 1]
print("친구가 점검할 수 있는 마지막 위치 : ", position, ":", weak[start], "+", friends[count - 1])
# 시작점부터 모든 취약 지점을 확인
print("시작점 부터 모든 취약지점을 확인", start, start + length)
for index in range(start, start + length):
# 점검할 수 있는 위치를 벗어나는 경우
if position < weak[index]:
print("현재 index", index, "해당 친구가 점검할 수 있는 마지막 위치 < 취약한 지점 :", position, weak[index], end =' ')
count += 1 # 새로운 친구를 투입
print("새로운 친구를 투입")
if count > len(dist): # 더 투입이 불가능하다면 종료
print("새로운 친구 명수가, 총 투입될 수 있는 인원수 보다 많아 종료")
break
position = weak[index] + friends[count - 1]
print("새로운 친구가 점검할 수 있는 마지막 위치 업데이트 : ", position,":",weak[index]," + ",friends[count-1], " index, count : ", index, count)
answer = min(answer, count) # 최솟값 계산
print("결과 : ", answer)
if answer > len(dist):
return -1
return answer
n = 12
weak = [1, 3, 4, 9, 10]
dist = [3, 5, 7]
print("외벽의 길이 : ",n)
print("취약한 지점 : ",weak)
print("각 친구가 1시간 동안 이동할 수 있는 거리 ", dist)
print(solution(n, weak, dist))
| 38.244898 | 143 | 0.506403 | 267 | 1,874 | 3.554307 | 0.40824 | 0.022129 | 0.031612 | 0.037935 | 0.189673 | 0.189673 | 0.111697 | 0.092729 | 0.092729 | 0.092729 | 0 | 0.020305 | 0.369264 | 1,874 | 48 | 144 | 39.041667 | 0.782572 | 0.127535 | 0 | 0 | 0 | 0 | 0.155884 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.027027 | 0 | 0.108108 | 0.378378 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
042ffb172aa476d4a620f9efa24fbabc25879f43 | 1,192 | py | Python | MACD/huobi_Python-1.0.3/huobi/model/etfswapconfig.py | yangdemin/dingpan | 0b68c7f9b497c7becab6ec3e7a2b21b5c03a1dd9 | [
"MIT"
] | 1 | 2020-12-28T07:04:45.000Z | 2020-12-28T07:04:45.000Z | MACD/huobi_Python-1.0.3/huobi/model/etfswapconfig.py | yangdemin/dingpan | 0b68c7f9b497c7becab6ec3e7a2b21b5c03a1dd9 | [
"MIT"
] | 1 | 2020-12-05T11:41:35.000Z | 2020-12-05T11:41:35.000Z | MACD/huobi_Python-1.0.3/huobi/model/etfswapconfig.py | yangdemin/dingpan | 0b68c7f9b497c7becab6ec3e7a2b21b5c03a1dd9 | [
"MIT"
] | 1 | 2022-03-27T10:36:04.000Z | 2022-03-27T10:36:04.000Z | from huobi.model.constant import *
class EtfSwapConfig:
"""
The basic information of ETF creation and redemption, as well as ETF constituents, including max
amount of creation, min amount of creation, max amount of redemption, min amount of redemption,
creation fee rate, redemption fee rate, eft create/redeem status.
:member
purchase_max_amount: The max creation amounts per request.
purchase_min_amount: The minimum creation amounts per request.
redemption_max_amount: The max redemption amounts per request.
redemption_min_amount: The minimum redemption amounts per request.
purchase_fee_rate: The creation fee rate.
redemption_fee_rate: The redemption fee rate.
status: The status of the ETF.
unit_price_list: ETF constitution in format of amount and currency.
"""
def __init__(self):
self.purchase_max_amount = 0
self.purchase_min_amount = 0
self.redemption_max_amount = 0
self.redemption_min_amount = 0
self.purchase_fee_rate = 0.0
self.redemption_fee_rate = 0.0
self.status = EtfStatus.INVALID
self.unit_price_list = list()
| 39.733333 | 100 | 0.708054 | 159 | 1,192 | 5.106918 | 0.295597 | 0.068966 | 0.083744 | 0.061576 | 0.110837 | 0.078818 | 0 | 0 | 0 | 0 | 0 | 0.00884 | 0.240772 | 1,192 | 29 | 101 | 41.103448 | 0.888398 | 0.619128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
043041ecd9a9126a54c7e1da17ed2afba9ee574c | 1,092 | py | Python | boto3fu/route53/commands.py | bdandoy/boto3fu | 4cdc50187162245eeea7230653569fdf1d66da5b | [
"MIT"
] | null | null | null | boto3fu/route53/commands.py | bdandoy/boto3fu | 4cdc50187162245eeea7230653569fdf1d66da5b | [
"MIT"
] | null | null | null | boto3fu/route53/commands.py | bdandoy/boto3fu | 4cdc50187162245eeea7230653569fdf1d66da5b | [
"MIT"
] | null | null | null | import logging
from boto3fu.connection_manager import client_aggregator
#import boto3fu.list.route53
from boto3fu.route53 import route53
from boto3fu import outputs
logger = logging.getLogger(__name__)
def get_route53_zones(profile, region, boto_client_params, zone_type, output_format='table'):
"""
"""
if not profile:
profile = [None]
if not region:
region = [None]
clients = client_aggregator(profile, region, 'route53', boto_client_params)
zones = []
for c in clients:
zones.extend(route53.get_route53_zones(c, zone_type))
outputs.output(zones, output_format)
def get_route53_resource_records(profile, region, boto_client_params, zone_type, output_format='table', zone_names=[]):
"""
"""
if not profile:
profile = [None]
if not region:
region = [None]
clients = client_aggregator(profile, region, 'route53', boto_client_params)
records = []
for c in clients:
records.extend(route53.get_resource_records(c, zone_names, zone_type))
outputs.output(records, output_format) | 28.736842 | 119 | 0.705128 | 135 | 1,092 | 5.444444 | 0.274074 | 0.070748 | 0.087075 | 0.062585 | 0.427211 | 0.427211 | 0.427211 | 0.427211 | 0.427211 | 0.427211 | 0 | 0.027335 | 0.195971 | 1,092 | 38 | 120 | 28.736842 | 0.809795 | 0.024725 | 0 | 0.48 | 0 | 0 | 0.023011 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.16 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04340e604ec03fd026df342b717d3a67b934ea4b | 7,897 | py | Python | chatserver.py | ian-hsieh/RenderManForBlender | c827f029f4cbbd1fcc71ed8d3694fc5ac58cc468 | [
"MIT"
] | 12 | 2019-05-03T21:58:15.000Z | 2022-02-24T07:02:21.000Z | chatserver.py | ian-hsieh/RenderManForBlender | c827f029f4cbbd1fcc71ed8d3694fc5ac58cc468 | [
"MIT"
] | 4 | 2019-03-07T18:20:16.000Z | 2020-09-24T21:53:15.000Z | chatserver.py | ian-hsieh/RenderManForBlender | c827f029f4cbbd1fcc71ed8d3694fc5ac58cc468 | [
"MIT"
] | 3 | 2019-05-25T01:17:09.000Z | 2019-09-13T14:43:12.000Z | # -----------------------------------------------------------------------------
#
# Copyright (c) 1986-2018 Pixar. All rights reserved.
#
# The information in this file (the "Software") is provided for the exclusive
# use of the software licensees of Pixar ("Licensees"). Licensees have the
# right to incorporate the Software into other products for use by other
# authorized software licensees of Pixar, without fee. Except as expressly
# permitted herein, the Software may not be disclosed to third parties, copied
# or duplicated in any form, in whole or in part, without the prior written
# permission of Pixar.
#
# The copyright notices in the Software and this entire statement, including the
# above license grant, this restriction and the following disclaimer, must be
# included in all copies of the Software, in whole or in part, and all permitted
# derivative works of the Software, unless such copies or derivative works are
# solely in the form of machine-executable object code generated by a source
# language processor.
#
# PIXAR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL PIXAR BE
# LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. IN NO CASE WILL
# PIXAR'S TOTAL LIABILITY FOR ALL DAMAGES ARISING OUT OF OR IN CONNECTION WITH
# THE USE OR PERFORMANCE OF THIS SOFTWARE EXCEED $50.
#
# Pixar
# 1200 Park Ave
# Emeryville CA 94608
#
# -----------------------------------------------------------------------------
import socketserver
import threading
import re
class CmdString(object):
"""Class for holding "well ordered" commands of the form:
cmd -opt1 <value> -opt2 <value2> [;]
where there opt and values are always in pairs and value
can be quoted with either {}'s or ""'s in which case the
value can contain spaces, newlines. " and } can appear in
value if preceded by \
"""
def __init__(self, s):
#
s = s.decode()
self.cmd, sep, opts = s.partition(' ')
if opts.endswith(';'):
opts = opts[:-1]
# parse out strings like:
# dspyParam -proto {it} -user {j\nb} -foo bar -baz "James Brown"'
pat_flag = re.compile(r'\s*-(\w+)')
#
# get a string inside {}'s but allow nesting of {}'s by supporting
# \} to escape the end of a block (?: is a non-capturing group and
# (?<=\\) is a look-behind assertion, in this case preceded
# by a \.
pat_curly = re.compile(r'\s*{((?:(?<=\\)}|[^}])*)}')
# like curly but start and end with double quotes
pat_dquotes = re.compile(r'\s*"((?:(?<=\\)"|[^"])*)"')
pat_word = re.compile(r'\s*([^-\s][^\s]*)')
cflag = None
self.args = list()
self.dargs = dict()
# walk the string, alternate between flags and values, slicing
# off what we've parsed so far
while len(opts) > 0:
# 1. flags
m = pat_flag.match(opts)
if not m:
return
cflag = m.groups()[0]
self.args.append(cflag)
if cflag not in self.dargs:
self.dargs[cflag] = None
opts = opts[len(m.group()):]
# 2. values
while m:
m = pat_curly.match(opts)
if m:
curly = m.groups()[0]
curly = curly.replace('\\}', '}')
self.addValue(cflag, curly)
opts = opts[len(m.group()):]
continue
m = pat_dquotes.match(opts)
if m:
dquotes = m.groups()[0]
dquotes = dquotes.replace('\\"', '"')
self.addValue(cflag, dquotes)
opts = opts[len(m.group()):]
continue
m = pat_word.match(opts)
if m:
word = m.groups()[0]
self.addValue(cflag, word)
opts = opts[len(m.group()):]
continue
def addValue(self, flag, value):
self.args.append(value)
if not flag:
return
if flag in self.dargs and self.dargs[flag] is not None:
self.dargs[flag] = self.dargs[flag] + ' ' + value
else:
self.dargs[flag] = value
def getCommand(self):
return self.cmd
def getOpt(self, flag, defaultValue=None):
if flag in self.dargs:
return self.dargs[flag]
elif defaultValue is not None:
return defaultValue
return None
class ItBaseHandler:
"""A protocol handler, sub-classes are expected to override dspyRender
taking care that this method will run in it's own thread."""
def __init__(self, request):
self.request = request
def handle(self):
while True:
data = self.request.recv(2048).strip()
if data == '':
break
ack = "ok\x00"
self.request.sendall(ack.encode())
data = data[:-1] # remove the trailing nul
self.msg = CmdString(data)
cmd = self.msg.getCommand()
if cmd == 'dspyRender':
self.dspyRender()
elif cmd == 'dspyIPR':
self.dspyIPR()
elif cmd == 'stopRender':
self.stopRender()
elif cmd == 'SelectObject':
self.selectObjectById()
elif cmd == 'SelectSurface':
self.selectSurfaceById()
else:
pass
def dspyRender(self):
"""Overriders should look in self.cmd for the arguments to dspyRender"""
pass
def dspyIPR(self):
pass
def stopRender(self):
pass
def selectObjectById(self):
pass
def selectSurfaceById(self):
pass
protocols = {
'it': ItBaseHandler,
}
class CommandHandler(socketserver.ThreadingMixIn, socketserver.BaseRequestHandler):
"""
The request handler.
"""
def handle(self):
self.data = self.request.recv(1024).strip()
ack = "ok\x00"
self.request.sendall(ack.encode())
protocol = self.digestProtocol(self.data)
if protocol:
handler = protocol(self.request)
handler.handle()
def digestProtocol(self, connectString):
header = CmdString(connectString)
magic = header.getCommand()
if magic != 'UtTcpOpen':
return None
try:
pname = header.getOpt("proto")
handler = protocols[pname]
return handler
except ValueError:
return None
if __name__ == "__main__":
import time
print("hello")
s = 'dspyParams -proto {it} -user {j\nb} -foo bar -crop 0.0 1 0.0 1.0 -baz "James Brown"'
cs = CmdString(s)
cmd = cs.getCommand()
proto = cs.getOpt('proto')
user = cs.getOpt('user')
foo = cs.getOpt('foo')
crop = cs.getOpt('crop')
baz = cs.getOpt('baz')
# zero port makes the OS pick one
host, port = "localhost", 0
# Create the server, binding to localhost on port 9999
server = socketserver.TCPServer((host, port), CommandHandler)
ip, port = server.server_address
print("serving on port %s port = %d" % (str(ip), port))
thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
thread.daemon = True
thread.start()
print("now serving")
while True:
time.sleep(2)
print("tick")
| 31.213439 | 93 | 0.564392 | 939 | 7,897 | 4.71885 | 0.354633 | 0.020311 | 0.014669 | 0.00993 | 0.090273 | 0.071993 | 0.066351 | 0.057323 | 0.028436 | 0.028436 | 0 | 0.010357 | 0.31531 | 7,897 | 252 | 94 | 31.337302 | 0.809136 | 0.355578 | 0 | 0.21831 | 0 | 0.007042 | 0.067298 | 0.010075 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091549 | false | 0.042254 | 0.028169 | 0.007042 | 0.204225 | 0.028169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0434211689f84077d77bec4e7790bf15ec9ada09 | 3,317 | py | Python | experiment_3_automaton.py | yangliu28/swarm_automata_sim | 129b5f078a8ad19b32c3bb2d3b5f0734e9b11b2a | [
"MIT"
] | null | null | null | experiment_3_automaton.py | yangliu28/swarm_automata_sim | 129b5f078a8ad19b32c3bb2d3b5f0734e9b11b2a | [
"MIT"
] | null | null | null | experiment_3_automaton.py | yangliu28/swarm_automata_sim | 129b5f078a8ad19b32c3bb2d3b5f0734e9b11b2a | [
"MIT"
] | 1 | 2021-08-14T08:33:53.000Z | 2021-08-14T08:33:53.000Z | # automaton class for experiment 3
# automaton has constant velocity, the moving direction changed by pheromone
# which is produced by the automata, and pheromone weakens over time
# in this way the automaton no loonger needs a moving direction fixed to the body
# it is free to move omnidirectional like a partical now
from math import *
from utilities import reset_radian
class Automaton:
def __init__(self, pos, vel, ori, radia_radius, alpha):
self.pos = list(pos) # pos[0] for x, pos[1] for y, convert to list
self.vel = vel # unsigned scalar, constant
self.ori = ori # global orientation of moving direction
# radiation length and alpha are attributes of this automaton
self.radia_radius = radia_radius # radiation length
# alpha is relative angle of generated pheromone to moving direction
self.alpha = alpha
def update_with_accel(self, accel_r, delta_t):
# update position and orientation of automaton with acceleration
# accel_r is signed centripetal acceleration, rotating ccw is positive
ori_delta = accel_r*delta_t/self.vel
# print warning msg if rotation angle is too large
if ori_delta > pi/2:
print("rotation angle is too large for one update")
# use middle orientation to calculate new position
ori_mid = reset_radian(self.ori + ori_delta/2)
self.pos[0] = self.pos[0] + cos(ori_mid) * self.vel*delta_t
self.pos[1] = self.pos[1] + sin(ori_mid) * self.vel*delta_t
# update orientation
self.ori = reset_radian(self.ori + ori_delta)
def update_without_accel(self, delta_t):
# update position and orientation of automaton without acceleration
self.pos[0] = self.pos[0] + cos(self.ori) * self.vel*delta_t
self.pos[1] = self.pos[1] + sin(self.ori) * self.vel*delta_t
def check_boundary(self, arena_size, delta_t): # rebound the automaton on boundaries
if self.vel == 0: return # nothing to do if automaton is still
# use velocities in Cartesian arena coordinates to control automaton rebound
vel_x_temp = self.vel * cos(self.ori)
vel_y_temp = self.vel * sin(self.ori)
if ((self.pos[0] >= arena_size[0]/2 and vel_x_temp > 0)
or (self.pos[0] <= -arena_size[0]/2 and vel_x_temp < 0)):
# if automaton out of left or right bouddaries
# flip positive moving direction along vertical line
self.ori = reset_radian(2*(pi/2) - self.ori)
# update once again so the position is inside arena
self.update_without_accel(delta_t)
if ((self.pos[1] >= arena_size[1]/2 and vel_y_temp > 0)
or (self.pos[1] <= -arena_size[1]/2 and vel_y_temp < 0)):
# if automaton out of top or bottom bouddaries
# flip positive moving direction along horizontal line
self.ori = reset_radian(2*(0) - self.ori)
self.update_without_accel(delta_t)
# accessors for Automaton class
def get_pos(self):
# return a copy of pos, not binding to another variable
return self.pos[:]
def get_ori(self):
return self.ori
def get_radia_radius(self):
return self.radia_radius
def get_alpha(self):
return self.alpha
| 48.072464 | 89 | 0.66325 | 494 | 3,317 | 4.323887 | 0.287449 | 0.049157 | 0.022472 | 0.024345 | 0.322097 | 0.298689 | 0.14794 | 0.13015 | 0.088015 | 0.088015 | 0 | 0.013804 | 0.257462 | 3,317 | 68 | 90 | 48.779412 | 0.853431 | 0.407597 | 0 | 0.05 | 0 | 0 | 0.021728 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.05 | 0.1 | 0.375 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0435186584e8793357969fd5fb749a69b0b00fa4 | 708 | py | Python | Tkinter_practice/projects/rectangle_drawyer.py | Aaftab-Alam/PythonPractice | 9167fef1ac87f1560803418aff0c328884ca5df7 | [
"Apache-2.0"
] | null | null | null | Tkinter_practice/projects/rectangle_drawyer.py | Aaftab-Alam/PythonPractice | 9167fef1ac87f1560803418aff0c328884ca5df7 | [
"Apache-2.0"
] | null | null | null | Tkinter_practice/projects/rectangle_drawyer.py | Aaftab-Alam/PythonPractice | 9167fef1ac87f1560803418aff0c328884ca5df7 | [
"Apache-2.0"
] | null | null | null | from tkinter import *
root=Tk()
root.geometry("1000x1000")
def origin(event):
global x,y
x=event.x
y=event.y
def rect(event):
x1,y1=event.x,event.y
canvas.delete("last")
canvas.create_rectangle(x,y,x1,y1,outline="grey", width=5,tag="last")
canvas.update()
def release(event):
x2,y2=event.x,event.y
canvas.delete("last")
canvas.create_rectangle(x,y,x2,y2,fill="#992512",outline="#992512")
canvas=Canvas(root,width=700,height=700, highlightbackground="black", highlightthickness=4)
canvas.pack(anchor="c")
canvas.bind("<Button-1>",origin)
canvas.bind("<B1-Motion>",rect)
canvas.bind("<ButtonRelease-1>", release)
Button(text="Clear",command=lambda :canvas.delete("all")).pack()
root.mainloop() | 25.285714 | 91 | 0.725989 | 111 | 708 | 4.612613 | 0.477477 | 0.015625 | 0.042969 | 0.046875 | 0.199219 | 0.199219 | 0.199219 | 0.199219 | 0.199219 | 0.199219 | 0 | 0.059361 | 0.072034 | 708 | 28 | 92 | 25.285714 | 0.719939 | 0 | 0 | 0.086957 | 0 | 0 | 0.12835 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.043478 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0435b042b021506aa9d0f8c2b8b9825404c2c86c | 1,510 | py | Python | src/record_test.py | rrednoss/repository-monitoring | 8e35a2b0221f2626b4be8ba758dce6e937c1dbc9 | [
"MIT"
] | 2 | 2022-01-08T23:08:52.000Z | 2022-01-08T23:31:21.000Z | src/record_test.py | rrednoss/repository-monitoring | 8e35a2b0221f2626b4be8ba758dce6e937c1dbc9 | [
"MIT"
] | null | null | null | src/record_test.py | rrednoss/repository-monitoring | 8e35a2b0221f2626b4be8ba758dce6e937c1dbc9 | [
"MIT"
] | null | null | null | import datetime
import os
import unittest
from record import Record
class TestRecord(unittest.TestCase):
def setUp(self) -> None:
self.path = os.path.dirname(os.path.realpath(__file__))
self.name = "test_file"
self.record = Record(self.path, self.name)
def tearDown(self) -> None:
os.remove(self.record.path())
def test_add(self):
# creating the test case
repository = "/home/rednoss/Documents/Workspace/repository-monitoring"
branch = "main"
timestamp = datetime.datetime.now().replace(microsecond=0).isoformat()
self.record.add(repository, branch, timestamp)
# testing the result
with open(self.record.path(), "r") as f:
current_record = f.read()
self.assertTrue(repository in current_record)
self.assertTrue(branch in current_record)
self.assertTrue(timestamp in current_record)
def test_read(self):
# creating the test case
repository = "/home/rednoss/Documents/Workspace/repository-monitoring"
branch = "main"
timestamp = datetime.datetime.now().replace(microsecond=0).isoformat()
self.record.add(repository, branch, timestamp)
# testing the result
current_record = self.record.read()
self.assertTrue(repository in current_record)
self.assertTrue(timestamp in current_record.get(repository))
self.assertTrue(branch in current_record.get(repository).get(timestamp))
| 35.116279 | 80 | 0.666225 | 174 | 1,510 | 5.695402 | 0.293103 | 0.104945 | 0.090817 | 0.057518 | 0.668012 | 0.641776 | 0.600404 | 0.600404 | 0.600404 | 0.407669 | 0 | 0.001718 | 0.229139 | 1,510 | 42 | 81 | 35.952381 | 0.849656 | 0.054967 | 0 | 0.333333 | 0 | 0 | 0.090014 | 0.077356 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
043643d2f83998726a0bf777aa809199b7dd4624 | 1,308 | py | Python | app/api/query_utils.py | murilocg/ActivityAnalysisOfCodeReview | ad0f14bf93838e7ec4e6204a6361310c119c4bb0 | [
"MIT"
] | 1 | 2022-03-21T19:06:41.000Z | 2022-03-21T19:06:41.000Z | app/api/query_utils.py | KesleyV/Lab6 | 3440d5dccfd5a3e278309e6b5c4644a808ef51bd | [
"MIT"
] | null | null | null | app/api/query_utils.py | KesleyV/Lab6 | 3440d5dccfd5a3e278309e6b5c4644a808ef51bd | [
"MIT"
] | null | null | null | import requests
import time
endpoint = "https://api.github.com/graphql"
interval_retry = 5
max_retries = 5
def create_query(params, query_template):
q = query_template
for k in params.keys():
value = params[k]
if type(value) == int:
q = add_param_number(k, value, q)
else:
q = add_param_string(k, value, q)
return q
def add_param_number(name, value, query):
return query.replace(name, '%d' % value)
def add_param_string(name, value, query):
return query.replace(name, "null") if value == "" else query.replace(name, '"%s"' % value)
def retry(query, token, sleep):
next_sleep = sleep + interval_retry if sleep < 25 else sleep
retry_count = sleep/interval_retry
time.sleep(sleep)
return execute_query(query, token, next_sleep)
def execute_query(query, token, sleep = 0):
request = ''
try:
request = requests.post(endpoint, json = {'query': query}, headers = {
'Content-Type': 'application/json',
'Authorization': 'bearer ' + token
})
except (Exception) as e:
return retry(query, token, sleep)
if request.status_code == 200:
data = request.json()
return data if "data" in data else retry(query, token, sleep)
else:
return retry(query, token, sleep) | 29.066667 | 95 | 0.640673 | 175 | 1,308 | 4.668571 | 0.36 | 0.073439 | 0.091799 | 0.097919 | 0.151775 | 0.088127 | 0.088127 | 0 | 0 | 0 | 0 | 0.008024 | 0.237768 | 1,308 | 45 | 96 | 29.066667 | 0.811434 | 0 | 0 | 0.108108 | 0 | 0 | 0.074102 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0.054054 | 0.054054 | 0.378378 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0436cf2be3be1f72af97336e50b0875aeeb9ae56 | 2,455 | py | Python | Software/PiKwonDo/timer_test.py | Wright4TheJob/PiKwonDo | 441174ce357fe1cda2dbf98539d5cee7d4f34541 | [
"MIT"
] | 1 | 2018-03-26T17:36:00.000Z | 2018-03-26T17:36:00.000Z | Software/PiKwonDo/timer_test.py | Wright4TheJob/PiKwonDo | 441174ce357fe1cda2dbf98539d5cee7d4f34541 | [
"MIT"
] | null | null | null | Software/PiKwonDo/timer_test.py | Wright4TheJob/PiKwonDo | 441174ce357fe1cda2dbf98539d5cee7d4f34541 | [
"MIT"
] | 1 | 2018-11-21T16:38:24.000Z | 2018-11-21T16:38:24.000Z | #! /bin/env/python3
# David Wright
# Copyright 2017
# Written for Python 3.5.6
"""Test the timer board and GPIO timing."""
import sys
import queue
from PyQt5.QtWidgets import QWidget, QApplication, QGridLayout, QLabel
from PyQt5.QtCore import Qt
import gpio_scanner
class App(QWidget):
"""Create GUI visualization of timer board button status."""
def __init__(self):
"""Start python application."""
super().__init__()
self.title = 'Testing Timer Board'
self.left = 10
self.top = 10
self.width = 640
self.height = 200
self.button_statuses = None
self.init_ui()
self.start_gpio_scan()
self.dropbox = queue.Queue()
gpio_scanner.PeriodicActionThread(self.read_queue, 1000)
def init_ui(self):
"""Create GUI elements."""
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
grid_layout = QGridLayout()
self.labels = []
self.statuses = []
for i in range(0, 5):
label = QLabel("Button %i" % (i+1), self)
label.setAlignment(Qt.AlignCenter)
grid_layout.addWidget(label, 0, i)
status = QLabel("", self)
status.setStyleSheet("background: red")
grid_layout.addWidget(status, 1, i)
self.statuses.append(status)
self.setLayout(grid_layout)
self.show()
def start_gpio_scan(self):
"""Begin hardware scanner routine."""
self.scanner = gpio_scanner.HardwareControllerScanner()
def read_queue(self):
"""Read the latest value from queue and react."""
try:
data = self.dropbox.get(block=False)
self.button_statuses = [x[0] for x in data]
self.update_ui()
except queue.Empty:
# print('Queue is empty, not redrawing')
pass
def update_ui(self):
"""Redraw the UI elements based on button status."""
for label, value in zip(self.statuses, self.button_statuses):
self.set_label_color(label, value)
def set_label_color(label, value):
"""Select label background color based on button status."""
if value == 1:
label.setStyleSheet('background: green')
else:
label.setStyleSheet('background: red')
if __name__ == '__main__':
APP = QApplication(sys.argv)
ex = App()
sys.exit(APP.exec_())
| 29.578313 | 70 | 0.61222 | 294 | 2,455 | 4.972789 | 0.414966 | 0.02736 | 0.036936 | 0.023256 | 0.031464 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017406 | 0.274542 | 2,455 | 82 | 71 | 29.939024 | 0.803481 | 0.173931 | 0 | 0 | 0 | 0 | 0.041856 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109091 | false | 0.018182 | 0.090909 | 0 | 0.218182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04374967f85141607159675f39023a19f0e391db | 2,414 | py | Python | http-minimal.py | Birch-san/bcc-traffic-by-service | 9249673a2302d9817a9d88a8e14081a1d359f38c | [
"Apache-2.0"
] | null | null | null | http-minimal.py | Birch-san/bcc-traffic-by-service | 9249673a2302d9817a9d88a8e14081a1d359f38c | [
"Apache-2.0"
] | null | null | null | http-minimal.py | Birch-san/bcc-traffic-by-service | 9249673a2302d9817a9d88a8e14081a1d359f38c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# invoke with:
# sudo ./http-minimal.py -i docker0
from bcc import BPF
from sys import argv
import socket
import os
def toHex(s):
lst = []
for ch in s:
hv = hex(ord(ch)).replace('0x', '')
if len(hv) == 1:
hv = '0'+hv
lst.append(hv)
return reduce(lambda x,y:x+y, lst)
interface = argv[2]
print ("binding socket to '%s'" % interface)
bpf = BPF(text = \
r'''
#include <bcc/proto.h>
#define IP_TCP 6
int http_filter(struct __sk_buff *skb) {
u8 *cursor = 0;
struct ethernet_t *ethernet = cursor_advance(cursor, sizeof(*ethernet));
//filter IP packets (ethernet type = 0x0800)
if (!(ethernet->type == 0x0800)) {
goto DROP;
}
struct ip_t *ip = cursor_advance(cursor, sizeof(*ip));
//filter TCP packets (ip next protocol = 0x06)
if (ip->nextp != IP_TCP) {
goto DROP;
}
u32 ip_header_length = 0;
//calculate ip header length
//value to multiply * 4
//e.g. ip->hlen = 5 ; IP Header Length = 5 x 4 byte = 20 byte
ip_header_length = ip->hlen << 2; //SHL 2 -> *4 multiply
//check ip header length against minimum
if (ip_header_length < sizeof(*ip)) {
goto DROP;
}
//shift cursor forward for dynamic ip header size
void *_ = cursor_advance(cursor, (ip_header_length-sizeof(*ip)));
struct tcp_t *tcp = cursor_advance(cursor, sizeof(*tcp));
bpf_trace_printk("%u\n", tcp->seq_num);
return -1;
//drop the packet returning 0
DROP:
return 0;
}
''')
function_http_filter = bpf.load_func("http_filter", BPF.SOCKET_FILTER)
BPF.attach_raw_socket(function_http_filter, interface)
socket_fd = function_http_filter.sock
sock = socket.fromfd(socket_fd,socket.PF_PACKET,socket.SOCK_RAW,socket.IPPROTO_IP) # formerly IPPROTO_IP
sock.setblocking(True)
ETH_HLEN = 14
print ("ready")
while 1:
#retrieve raw packet from socket
packet_str = os.read(socket_fd,65536) #set packet length to max packet length on the interface. formerly 4096.
#calculate ip header length
ip_header_length = bytearray(packet_str)[ETH_HLEN] #load Byte
ip_header_length = ip_header_length & 0x0F #mask bits 0..3
ip_header_length = ip_header_length << 2 #shift to obtain length
tcp_header_offset = ETH_HLEN+ip_header_length
seq_num_str = packet_str[tcp_header_offset+4:tcp_header_offset+8]
seq_num = int(toHex(seq_num_str),16)
print(seq_num) | 24.14 | 112 | 0.673985 | 369 | 2,414 | 4.211382 | 0.382114 | 0.07722 | 0.126126 | 0.041184 | 0.097812 | 0.054054 | 0 | 0 | 0 | 0 | 0 | 0.029749 | 0.206297 | 2,414 | 100 | 113 | 24.14 | 0.781315 | 0.106048 | 0 | 0 | 0 | 0 | 0.037443 | 0 | 0 | 0 | 0.003653 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.125 | 0 | 0.1875 | 0.09375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04377bfb98f325388549e877032310227a7f4d17 | 4,915 | py | Python | packages/galapagos_embedded/scripts/viewer.py | 100kimch/ros_galapagos | 8f92cb93246c263b61199aef113e43cefc5f3939 | [
"MIT"
] | 2 | 2020-10-26T05:01:35.000Z | 2022-02-14T10:37:17.000Z | packages/galapagos_embedded/scripts/viewer.py | 100kimch/ros_galapagos | 8f92cb93246c263b61199aef113e43cefc5f3939 | [
"MIT"
] | null | null | null | packages/galapagos_embedded/scripts/viewer.py | 100kimch/ros_galapagos | 8f92cb93246c263b61199aef113e43cefc5f3939 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# NOTE: using rospy library unrecommended in processor.py
import rospy
from geometry_msgs.msg import Twist
from lib_eye import EYE
from lib_lidar import *
from scheduler import SCHEDULER
import math
# * Variables
IS_RUNNING = True
BUF_ANGULAR = [0, 0]
# * Methods
def initialize():
""" initialize processing """
EYE.calibrate()
def view_subcam(image):
""" process the subcam image """
global DIRECTION
global BUF_ANGULAR
if not SCHEDULER.is_enable["subcam"]:
return
if EYE.is_sub_occupied():
return
if SCHEDULER.debug_option["show_timer"]:
SCHEDULER.check_time("subcam", min=0.3)
info = EYE.see_sub(image)
# print(EYE.get_front_state() + " " + str(EYE.get_front_state()))
if info is None:
rospy.logwarn("[PROC] No Information!")
return
elif False:
# elif EYE.get_front_state() is "turning":
# rospy.logdebug("[PROC] turning...")
center = info["center"]
slope = info["slope"]
# left: + right: -
# if slope > 0:
# weight_slope = pow(abs(slope) / 1.8, 0.9) * 2.6
# else:
# weight_slope = - pow(abs(slope) / 1.8, 0.9) * 2.6
# # weight_slope = - pow(abs(slope) / 1.8, 0.5) * 2.5
if slope > 0:
value = pow(abs(slope) / 1.8, 1.2) * 3.2
else:
value = - pow(abs(slope) / 1.8, 1.2) * 3.2
# if slope > 0:
# weight_center = pow(abs(center) / 250, 0.9) * 5.5
# elif slope < -0:
# weight_center = - pow(abs(center) / 250, 0.9) * 5.5
# else:
# weight_center = 0
# # weight_center = slope * 1
if value > 2.6:
value = 2.6
elif value < -2.6:
value = -2.6
degree = value
# BUF_ANGULAR.append(value)
# past_val = BUF_ANGULAR.pop(0) * 0.7
# if info["has_line"]:
# if (value * past_val >= 0) and (abs(value) > abs(past_val)):
# degree = value
# else:
# degree = 0
# BUF_ANGULAR = [0] * BUF_SIZE
# else:
# if value > 0:
# degree = 2.7
# elif value < 0:
# degree = -2.7
# else:
# degree = 0
# if not info["has_line"]:
# if (slope < 0):
# degree = 4.12
# else:
# degree = -4.12
if SCHEDULER.debug_option["show_center_slope"]:
rospy.logdebug(
"[PROC] slope: {:.2f} w_slope: {:.2f} degree: {:.2f} {}".format(
slope, value, degree, info["has_line"])
)
elif EYE.get_front_state() is "straight":
# rospy.logdebug("[PROC] going straight...")
center = info["center"]
if center < 0:
value = pow(abs(center) / 150, 0.9) * 2
elif center > 0:
value = - pow(abs(center) / 150, 0.9) * 2
else:
value = 0
if value > 1.5:
value = 1.5
elif value < -1.5:
value = -1.5
degree = value
if SCHEDULER.debug_option["show_center_slope"]:
rospy.logdebug(
"[PROC] center: {:.2f} w_center: {:.2f} {}".format(
center, degree, info["has_line"])
)
rospy.Timer(
rospy.Duration(0.14), EYE.release_sub_occupied, oneshot=True
)
# TURTLE.turn("", 0.13, degree)
EYE.release_sub_occupied()
def view_frontcam(image):
""" process the frontcam """
# rospy.loginfo("[VIEW] frontcam image received.")
if not EYE.is_front_occupied():
if SCHEDULER.debug_option["show_center_slope"]:
SCHEDULER.check_time("frontcam", min=0.4)
info = EYE.see_front(image)
if info is None:
return
# rospy.logdebug("info: {:s}".format(str(info)))
if info["center"] is "-1000" or info["center"] is "1000" or info["center"] is "0":
pass
else:
EYE.reset_state()
rospy.Timer(rospy.Duration(0.1),
EYE.release_front_occupied, oneshot=True)
def view_lidar(lidar_data):
""" process the lidar data """
set_lidar_values(lidar_data)
rospy.logdebug("left: {:.2f} right: {:.2f} front: {:.2f}".format(
get_object_distance("left"), get_object_distance("right"), get_object_distance("front")))
def view_speed(twist):
global IS_RUNNING
if twist.linear.x == 0 and twist.angular.x == 0:
IS_RUNNING = False
else:
# rospy.logdebug("[VIEW] speed: {:.2f}".format(twist.linear.x))
IS_RUNNING = True
def calibrate_subcam(image):
""" calibrate the subcam """
info = EYE.see_sub(image)
rospy.loginfo("info: {:s}".format(str(info)))
rospy.signal_shutdown("[VIEW] ended calibration")
| 28.575581 | 97 | 0.525534 | 611 | 4,915 | 4.094926 | 0.217676 | 0.021583 | 0.021982 | 0.023981 | 0.290168 | 0.220624 | 0.180655 | 0.165867 | 0.136691 | 0.136691 | 0 | 0.041539 | 0.333876 | 4,915 | 171 | 98 | 28.74269 | 0.722663 | 0.292981 | 0 | 0.258427 | 0 | 0 | 0.106533 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067416 | false | 0.011236 | 0.067416 | 0 | 0.179775 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04381cc77af07a3db0659e84346acc80968d1b66 | 17,549 | py | Python | python/TwitterAPY.py | parobo/TwitterAPY | 1edb897d2614d95ce54fcd52abe771e1bbdaa9cf | [
"MIT"
] | null | null | null | python/TwitterAPY.py | parobo/TwitterAPY | 1edb897d2614d95ce54fcd52abe771e1bbdaa9cf | [
"MIT"
] | null | null | null | python/TwitterAPY.py | parobo/TwitterAPY | 1edb897d2614d95ce54fcd52abe771e1bbdaa9cf | [
"MIT"
] | null | null | null | import requests
from requests.exceptions import ChunkedEncodingError, ConnectionError
from time import time, sleep
class page:
"""
A page contains properties for the tweets in it,
(one page always contains 10-500 tweets,
depending on the max_result parameter in the request),
for the users returned by the request,
as well as the places and referenced tweets.
"""
def __init__(self, data, endpoint):
self._data = data
self.endpoint = endpoint
@property
def tweets(self):
if self.endpoint == 'fullarchivesearch':
if self._data.get('data'):
return [tweet(tweet_data) for tweet_data in self._data['data']]
else:
return None
@property
def users(self):
if self.endpoint == 'fullarchivesearch':
if self._data.get('includes'):
if self._data['includes'].get('users'):
return [user(user_data) for user_data in self._data['includes']['users']]
else:
return None
else:
return None
elif self.endpoint == 'userfollowing' or self.endpoint == 'userfollowers':
if self._data.get('data'):
return [user(user_data) for user_data in self._data['data']]
else:
return None
@property
def places(self):
if self._data.get('includes'):
if self._data['includes'].get('places'):
return [place(place_data) for place_data in self._data['includes']['places']]
else:
return None
else:
return None
@property
def referenced_tweets(self):
if self._data.get('includes'):
if self._data['includes'].get('tweets'):
return [tweet(tweet_data) for tweet_data in self._data['includes']['tweets']]
else:
return None
else:
return None
@property
def next_token(self):
if self._data.get('meta'):
return self._data['meta'].get('next_token')
else:
return None
class tweet:
def __init__(self,data):
self._data = data
@property
def id(self):
return self._data.get('id')
@property
def text(self):
return self._data.get('text')
@property
def attachments(self):
return self._data.get('attachments')
@property
def author_id(self):
return self._data.get('author_id')
@property
def context_annotations(self):
return self._data.get('context_annotations')
@property
def conversation_id(self):
return self._data.get('conversation_id')
@property
def created_at(self):
return self._data.get('created_at')
@property
def entities(self):
return self._data.get('entities')
@property
def geo(self):
return self._data.get('geo')
@property
def in_reply_to_user_id(self):
return self._data.get('in_reply_to_user_id')
@property
def lang(self):
return self._data.get('lang')
@property
def retweet_count(self):
if 'public_metrics' in self._data.keys():
return self._data['public_metrics'].get('retweet_count')
else:
return None
@property
def reply_count(self):
if 'public_metrics' in self._data.keys():
return self._data['public_metrics'].get('reply_count')
else:
return None
@property
def like_count(self):
if 'public_metrics' in self._data.keys():
return self._data['public_metrics'].get('like_count')
else:
return None
@property
def quote_count(self):
if 'public_metrics' in self._data.keys():
return self._data['public_metrics'].get('quote_count')
else:
return None
@property
def referenced_tweets(self):
return self._data.get('referenced_tweets')
@property
def source(self):
return self._data.get('source')
@property
def withheld(self):
return self._data.get('withheld')
@property
def is_retweet(self):
if self.referenced_tweets is not None:
return any([(tweet.get('type') == 'retweeted') for tweet in self.referenced_tweets])
else:
return False
@property
def is_quote(self):
if self.referenced_tweets is not None:
return any([(tweet.get('type') == 'quoted') for tweet in self.referenced_tweets])
else:
return False
@property
def is_reply(self):
if self.referenced_tweets is not None:
return any([(tweet.get('type') == 'replied_to') for tweet in self.referenced_tweets])
else:
return False
class user:
def __init__(self,data):
self._data = data
@property
def id(self):
return self._data.get('id')
@property
def name(self):
return self._data.get('name')
@property
def username(self):
return self._data.get('username')
@property
def created_at(self):
return self._data.get('created_at')
@property
def description(self):
return self._data.get('description')
@property
def entities(self):
return self._data.get('entities')
@property
def location(self):
return self._data.get('location')
@property
def pinned_tweet_id(self):
return self._data.get('pinned_tweet_id')
@property
def pinned_tweet_id(self):
return self._data.get('pinned_tweet_id')
@property
def profile_image_url(self):
return self._data.get('profile_image_url')
@property
def protected(self):
return self._data.get('protected')
@property
def followers_count(self):
if 'public_metrics' in self._data.keys():
return self._data['public_metrics'].get('followers_count')
else:
return None
@property
def following_count(self):
if 'public_metrics' in self._data.keys():
return self._data['public_metrics'].get('following_count')
else:
return None
@property
def tweet_count(self):
if 'public_metrics' in self._data.keys():
return self._data['public_metrics'].get('tweet_count')
else:
return None
@property
def listed_count(self):
if 'public_metrics' in self._data.keys():
return self._data['public_metrics'].get('listed_count')
else:
return None
@property
def name(self):
return self._data.get('name')
@property
def url(self):
return self._data.get('url')
@property
def verified(self):
return self._data.get('verified')
@property
def withheld(self):
return self._data.get('withheld')
class place:
def __init__(self,data):
self._data = data
@property
def id(self):
return self._data.get('id')
@property
def full_name(self):
return self._data.get('full_name')
@property
def country(self):
return self._data.get('country')
@property
def country_code(self):
return self._data.get('country_code')
@property
def geo(self):
return self._data.get('geo')
@property
def name(self):
return self._data.get('name')
@property
def place_type(self):
return self._data.get('place_type')
#TODO more detailed query terms
class api:
#user input of BearerToken
def __init__(self,token,tweet_fields=['id','text','author_id','context_annotations','conversation_id','created_at','geo','lang','public_metrics','referenced_tweets','source'],
user_fields=['id','name','username','created_at','description','location','protected','public_metrics'],place_fields=['full_name','id','country','country_code','geo','name','place_type'],
tweet_expansions=['author_id','referenced_tweets.id','geo.place_id']):
self.allowed_tweet_fields = ['id','text','attachments','author_id','context_annotations','conversation_id','created_at','entities','geo','in_reply_to_user_id','lang','public_metrics','referenced_tweets','source','withheld']
self.allowed_user_fields = ['id','name','username','created_at','description','entities','location','pinned_tweet_id','profile_image_url','protected','public_metrics','url','verified','withheld']
self.allowed_place_fields = ['full_name','id','contained_id','country','country_code','geo','name','place_type']
self.allowed_tweet_expansions=['author_id','referenced_tweets.id','in_reply_to_user_id','attachments.media_keys','attachments.poll_ids','geo.place_id','entities.mentions.username','referenced_tweets.id.author_id']
if any([field not in self.allowed_tweet_fields for field in tweet_fields]):
raise TwitterAPYError('410','{} not allowed. Allowed fields are {}.'.format(tweet_fields, self.allowed_tweet_fields))
if any([field not in self.allowed_user_fields for field in user_fields]):
raise TwitterAPYError('410','{} not allowed. Allowed fields are {}.'.format(user_fields, self.allowed_user_fields))
if any([field not in self.allowed_place_fields for field in place_fields]):
raise TwitterAPYError('410','{} not allowed. Allowed fields are {}.'.format(place_fields, self.allowed_place_fields))
if any([expansion not in self.allowed_tweet_expansions for expansion in tweet_expansions]):
raise TwitterAPYError('410','{} not allowed. Allowed fields are {}.'.format(tweet_expansions, self.allowed_tweet_expansions))
self.token = token
self.tweet_fields = tweet_fields
self.user_fields = user_fields
self.tweet_expansions = tweet_expansions
self.place_fields = place_fields
def __build_headers__(self):
headers = {"Authorization": "Bearer {}".format(self.token)}
return headers
def __build_params__(self,query):
if self.endpoint_name =='fullarchivesearch':
query['place.fields'] = ','.join(self.place_fields)
query['expansions'] = ','.join(self.tweet_expansions)
query['user.fields'] = ','.join(self.user_fields)
query['tweet.fields'] = ','.join(self.tweet_fields)
return query
def __check_rate_limit__(self, response):
remaining = int(response.headers.get('x-rate-limit-remaining'))
if remaining < 1:
time_until_ratelimit = max(1,int(response.headers.get('x-rate-limit-reset')) - int(time()))
print('Rate limit reached. Sleeping for {} and retrying again.'.format(time_until_ratelimit))
sleep(time_until_ratelimit)
def __handle_status__(self,response):
#sleeps a second because of twitter limit to 1 request per second
sleep(1)
if response.status_code == 200:
return True
if response.status_code in [400,401,403,404]:
raise TwitterAPYError(response.status_code, response.text)
if response.status_code == 429:
time_until_ratelimit = max(1,int(response.headers.get('x-rate-limit-reset')) - int(time()))
print('Rate limit reached. Sleeping for {} and retrying again.'.format(time_until_ratelimit))
sleep(time_until_ratelimit)
return False
if response.status_code >= 500:
print('Error on Twitter side. Sleeping for {} and retrying again.'.format(30))
sleep(30)
return False
def __handle_errors__(self,error,endpoint,headers,params):
print("An unexpected error occured: {}./n Sleeping 10 seconds and retrying once.".format(str(type(error))))
sleep(10)
return requests.request("GET", endpoint, headers=headers, params=params)
def full_archive_search(self,query,max_pages=False):
self.endpoint_name = 'fullarchivesearch'
endpoint = "https://api.twitter.com/2/tweets/search/all"
headers = self.__build_headers__()
params = self.__build_params__(query)
pages = []
success = False
while not success:
try:
sleep(0.01)
response = requests.request("GET", endpoint, headers=headers, params=params)
except (ChunkedEncodingError,ConnectionError,ConnectionResetError) as e:
sleep(0.01)
response = self.__handle_errors__(e,endpoint,headers,params)
success = self.__handle_status__(response)
if success:
p = page(response.json(),self.endpoint_name)
pages.append(p)
while (p.next_token is not None) & (max_pages==False or len(pages)<max_pages):
self.__check_rate_limit__(response)
success_subsequent = False
while not success_subsequent:
params['next_token'] = p.next_token
try:
sleep(0.01)
response = requests.request("GET", endpoint, headers=headers, params=params)
except (ChunkedEncodingError,ConnectionError,ConnectionResetError) as e:
sleep(0.01)
response = self.__handle_errors__(e,endpoint,headers,params)
success_subsequent = self.__handle_status__(response)
if success_subsequent:
p = page(response.json(),self.endpoint_name)
pages.append(p)
return pages
def user_following(self,user_id,query,max_pages=False):
self.endpoint_name = 'userfollowing'
endpoint = "https://api.twitter.com/2/users/{}/following".format(user_id)
headers = self.__build_headers__()
params = self.__build_params__(query)
pages = []
success = False
while not success:
try:
sleep(0.01)
response = requests.request("GET", endpoint, headers=headers, params=params)
except (ChunkedEncodingError,ConnectionError,ConnectionResetError) as e:
sleep(0.01)
response = self.__handle_errors__(e,endpoint,headers,params)
success = self.__handle_status__(response)
if success:
p = page(response.json(),self.endpoint_name)
pages.append(p)
while (p.next_token is not None) & (max_pages==False or len(pages)<max_pages):
self.__check_rate_limit__(response)
success_subsequent = False
while not success_subsequent:
params['pagination_token'] = p.next_token
try:
sleep(0.01)
response = requests.request("GET", endpoint, headers=headers, params=params)
except (ChunkedEncodingError,ConnectionError,ConnectionResetError) as e:
sleep(0.01)
response = self.__handle_errors__(e,endpoint,headers,params)
success_subsequent = self.__handle_status__(response)
if success_subsequent:
p = page(response.json(),self.endpoint_name)
pages.append(p)
return pages
def user_followers(self,user_id,query,max_pages=False):
self.endpoint_name = 'userfollowers'
endpoint = "https://api.twitter.com/2/users/{}/followers".format(user_id)
headers = self.__build_headers__()
params = self.__build_params__(query)
pages = []
success = False
while not success:
try:
sleep(0.01)
response = requests.request("GET", endpoint, headers=headers, params=params)
except (ChunkedEncodingError,ConnectionError,ConnectionResetError) as e:
sleep(0.01)
response = self.__handle_errors__(e,endpoint,headers,params)
success = self.__handle_status__(response)
if success:
p = page(response.json(),self.endpoint_name)
pages.append(p)
while (p.next_token is not None) & (max_pages==False or len(pages)<max_pages):
self.__check_rate_limit__(response)
success_subsequent = False
while not success_subsequent:
params['pagination_token'] = p.next_token
try:
sleep(0.01)
response = requests.request("GET", endpoint, headers=headers, params=params)
except (ChunkedEncodingError,ConnectionError,ConnectionResetError) as e:
sleep(0.01)
response = self.__handle_errors__(e,endpoint,headers,params)
success_subsequent = self.__handle_status__(response)
if success_subsequent:
p = page(response.json(),self.endpoint_name)
pages.append(p)
return pages
class TwitterAPYError(Exception):
def __init__(self, id, message):
self.id = id
self.message = message | 40.811628 | 231 | 0.599749 | 1,968 | 17,549 | 5.103659 | 0.101626 | 0.059737 | 0.062724 | 0.064516 | 0.707786 | 0.665273 | 0.606631 | 0.583035 | 0.531959 | 0.509658 | 0 | 0.007225 | 0.290159 | 17,549 | 430 | 232 | 40.811628 | 0.799069 | 0.01983 | 0 | 0.630653 | 0 | 0 | 0.140203 | 0.005825 | 0 | 0 | 0 | 0.002326 | 0 | 1 | 0.165829 | false | 0 | 0.007538 | 0.090452 | 0.394472 | 0.01005 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
043fb277af3249dc37c15fd8e8bba0f699dca93c | 15,940 | py | Python | codes/b_environments/or_gym/envs/classic_or/vehicle_routing.py | linklab/link_rl | e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99 | [
"MIT"
] | 6 | 2020-12-03T21:08:39.000Z | 2021-12-26T08:40:33.000Z | codes/b_environments/or_gym/envs/classic_or/vehicle_routing.py | linklab/link_rl | e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99 | [
"MIT"
] | 9 | 2020-09-21T19:03:54.000Z | 2022-03-07T09:05:56.000Z | codes/b_environments/or_gym/envs/classic_or/vehicle_routing.py | linklab/link_rl | e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99 | [
"MIT"
] | 1 | 2021-11-23T12:30:37.000Z | 2021-11-23T12:30:37.000Z | '''
Example taken from Balaji et al.
Paper: https://arxiv.org/abs/1911.10641
GitHub: https://github.com/awslabs/or-rl-benchmarks
'''
import gym
from gym import spaces
import or_gym
from or_gym.utils import assign_env_config
import random
import numpy as np
from scipy.stats import truncnorm
class VehicleRoutingEnv(gym.Env):
'''
Dynamic Vehicle Routing Problem
This environment simulates a driver working with a food delivery app
to move through a city, accept orders, pick them up from restaurants,
and deliver them to waiting customers. Each order has a specific
delivery value, restaurant, and delivery location, all of which are
known by the driver before he accepts the order. After accepting, the
driver must navigate to the restaurant to collect the order and then
deliver it. If an order isn't accepted, it may be taken by another
driver. Additionally, the driver has 60 minutes to make a delivery
from the time an order is created.
The city is represented as a grid with different zones that have
different statistics for order creation and value. At each time step,
new orders are created with a fixed probability unique to each zone.
The driver's vehicle also has a finite capacity limiting the number of
orders he can carry at a given time, although there is no limit on the
number of accepted orders.
The driver receives a penalty for time and distance spent during travel,
but receives rewards for accepting and delivering orders.
Observation:
Type: Box
State Vector: S = (p, h, c, l, w, e, v)
p = pickup location
h = driver's current position
c = remaining vehicle capacity
l = order location
w = order status (open, accepted, picked up, delivered/inactive)
e = time elapsed since order generation
v = order value
Action:
Type: Discrete
0 = wait
1:max_orders = accept order
max_orders:2*max_orders = pickup order
2*max_orders:3*max_orders = deliver order
3*max_orders:3*max_orders + n_restaurants = go to restaurant
Action masking is available for this environment. Set mask=True
in the env_config dictionary.
Reward:
The agent recieves 1/3 of the order value for accepting an order,
picking it up, and delivering the order. The cost is comprised of
three elements: delivery time, delivery distance, and cost of failure
(if the driver does not deliver the item).
Starting State:
Restaurant and driver locations are randomized at the start of each
episode. New orders are generated according to the order probability.
Episode Terimantion:
Episode termination occurs when the total time has elapsed.
'''
def __init__(self, *args, **kwargs):
self.n_restaurants = 2
self.max_orders = 10
self.order_prob = 0.5
self.vehicle_capacity = 4
self.grid = (5, 5)
self.order_promise = 60
self.order_timeout_prob = 0.15
self.num_zones = 4
self.order_probs_per_zone = [0.1, 0.5, 0.3, 0.1]
self.order_reward_min = [8, 5, 2, 1]
self.order_reward_max = [12, 8, 5, 3]
self.half_norm_scale_reward_per_zone = [0.5, 0.5, 0.5, 0.5]
self.penalty_per_timestep = 0.1
self.penalty_per_move = 0.1
self.order_miss_penalty = 50
self.step_limit = 1000
self.mask = False
self.info = {}
assign_env_config(self, kwargs)
self._order_nums = np.arange(self.max_orders)
self.loc_permutations = [(x, y) for x in range(self.grid[0])
for y in range(self.grid[1])]
self.action_dim = 1 + 3 * self.max_orders + self.n_restaurants
self.obs_dim = 2 * self.n_restaurants + 4 + 6 * self.max_orders
box_low = np.zeros(self.obs_dim)
box_high = np.hstack([
np.repeat(
max(self.grid), 2 * self.n_restaurants + 2), # Locations 0-5
np.repeat(self.vehicle_capacity, 2), # Vehicle capacities 6-7
np.tile(np.hstack([4, self.n_restaurants, self.grid,
self.order_promise, max(self.order_reward_max)]), self.max_orders)
])
if self.mask:
self.observation_space = spaces.Dict({
'action_mask': spaces.Box(
low=np.zeros(self.action_dim),
high=np.ones(self.action_dim),
dtype=np.uint8),
'avail_actions': spaces.Box(
low=np.zeros(self.action_dim),
high=np.ones(self.action_dim),
dtype=np.uint8),
'state': spaces.Box(
low=box_low,
high=box_high,
dtype=np.float16)
})
else:
self.observation_space = spaces.Box(
low=box_low,
high=box_high,
dtype=np.float16)
self.action_space = spaces.Discrete(self.action_dim)
self.reset()
def step(self, action):
done = False
self.reward = 0
self.late_penalty = 0
if action == 0:
self.wait(action)
elif action <= self.max_orders:
self.accept_order(action)
elif action <= 2 * self.max_orders:
self.pickup_order(action)
elif action <= 3 * self.max_orders:
self.deliver_order(action)
elif action <= 3 * self.max_orders + self.n_restaurants:
self.return_to_restaurant(action)
else:
raise Exception(f"Selected action ({action}) outside of action space.")
self.state = self._update_state()
self.step_count += 1
if self.step_count >= self.step_limit:
done = True
return self.state, self.reward, done, self.info
def wait(self, action):
# Do nothing
pass
def accept_order(self, action):
# Accept order denoted by action
order_idx = action - 1
if order_idx not in self.order_dict.keys():
# Invalid action, do nothing
pass
elif self.order_dict[order_idx] == 1:
self.order_dict[order_idx] = 2
self.reward += self.order_dict[order_idx]['Value'] / 3
def pickup_order(self, action):
order_idx = action - self.max_orders - 1
if order_idx not in self.order_dict.keys():
# Invalid action, do nothing
pass
else:
restaurant = self.order_dict[order_idx]['RestaurantID']
restaurant_loc = self.restaurant_loc[restaurant]
self._go_to_destination(restaurant_loc)
self.reward -= self.penalty_per_move
# Movement and pickup can occur during same time step
if self.order_dict[order_idx]['Status'] == 2 and self.driver_loc == restaurant_loc:
if self.vehicle_load < self.vehicle_capacity:
self.order_dict[order_idx]['Status'] = 3
self.vehicle_capacity += 1
self.reward += self.order_dict[order_idx]['Value'] / 3
def deliver_order(self, action):
order_idx = action - 2 * self.max_orders - 1
if order_idx not in self.order_dict.keys():
# Invalid action, do nothing
pass
else:
order_loc = self.order_dict[order_idx]['DeliveryLoc']
self._go_to_destination(order_loc)
self.reward -= self.penalty_per_move
# Can deliver multiple orders simultaneously
for k, v in self.order_dict.items():
if v['Status'] == 3 and v['DeliveryLoc'] == self.driver_loc:
if v['Time'] <= self.order_promise:
self.reward = v['Value'] / 3
self.vehicle_load -= 1
v['Status'] = 4 # Delivered
def return_to_restaurant(self, action):
restaurant = action - 3 * self.max_orders - 1
restaurant_loc = self.restaurant_loc[restaurant]
self._go_to_destination(restaurant_loc)
self.reward -= self.penalty_per_move
def _update_orders(self):
self._update_order_times()
self._remove_orders()
self._generate_orders()
def _remove_orders(self):
# Remove orders if they're over due
orders_to_delete = []
for k, v in self.order_dict.items():
if v['Time'] >= self.order_promise:
if v['Status'] >= 2:
# Apply penalty and remove associated rewards
self.reward -= (self.order_miss_penalty +
v['Value'] * (v['Status']==2)/3 +
v['Value'] * (v['Status']==3) * 2/3)
self.late_penalty += self.order_miss_penalty
if v['Status'] == 3:
self.vehicle_capacity -= 1
orders_to_delete.append(k)
elif v['Status'] == 4:
orders_to_delete.append(k)
# Probabalistically remove open orders
elif v['Status'] == 1 and np.random.random() < self.order_timeout_prob:
orders_to_delete.append(k)
for k in orders_to_delete:
del self.order_dict[k]
def _update_state(self):
self._update_orders()
order_array = np.zeros((self.max_orders, 6)) # Placeholder for order data
try:
order_data = np.hstack([v1 for v in self.order_dict.values()
for v1 in v.values()]).reshape(-1, 7)
order_array[order_data[:, 0].astype(int)] += order_data[:, 1:]
except ValueError:
# Occurs when order_data is empty
pass
state = np.hstack([
np.hstack(self.restaurant_loc),
np.hstack(self.driver_loc),
np.hstack([self.vehicle_load, self.vehicle_capacity]),
order_array.flatten()
])
if self.mask:
action_mask = self._update_mask(state)
state = {
'state': state,
'action_mask': action_mask,
'avail_actions': np.ones(self.action_dim)
}
return state
def _update_mask(self, state):
action_mask = np.zeros(self.action_dim)
# Wait and return to restaurant are always allowed
action_mask[0] = 1
action_mask[(3 * self.max_orders + 1):(3 * self.max_orders + self.n_restaurants + 1)] = 1
for k, v in self.order_dict.items():
status = v['Status']
# Allow accepting an open order
if status == 1:
action_mask[k + 1] = 1
# Allow navigating to accepted order for pickup
elif status == 2 and self.vehicle_load < self.vehicle_capacity:
action_mask[k + self.max_orders + 1] = 1
# Allow delivery of picked up order
elif status == 3:
action_mask[k + 2 * self.max_orders + 1] = 1
return action_mask
def reset(self):
self.step_count = 0
self.vehicle_load = 0
self.randomize_locations()
self.zone_loc = self._get_zones()
self.order_dict = {}
self.state = self._update_state()
return self.state
def _update_order_times(self):
for k, v in self.order_dict.items():
if v['Status'] >= 1:
v['Time'] += 1
def _generate_orders(self):
open_slots = self._order_nums[~np.isin(self._order_nums,
np.array([k for k in self.order_dict.keys()]))]
try:
order_num = open_slots.min()
except ValueError:
pass
for n in open_slots:
# Probabalistically create a new order
if np.random.random() < self.order_prob:
zone = np.random.choice(self.num_zones, p=self.order_probs_per_zone)
order = self._get_order_from_zone(zone, order_num)
self.order_dict[order_num] = order
order_num += 1
def _get_order_from_zone(self, zone, n):
delivery_loc = random.choice(self.zone_loc[zone])
restaurant_idx = np.random.choice(self.n_restaurants)
value = truncnorm.rvs(0,
(self.order_reward_max[zone] - self.order_reward_min[zone])
/self.half_norm_scale_reward_per_zone[zone],
self.order_reward_min[zone],
self.half_norm_scale_reward_per_zone[zone])
return {'Number': n,
'Status': 1,
'RestaurantID': restaurant_idx,
'DeliveryLoc': delivery_loc,
'Time': 0,
'Value': value}
def randomize_locations(self):
self._place_restaurants()
self._place_driver()
def _place_restaurants(self):
self.restaurant_loc = random.sample(self.loc_permutations,
self.n_restaurants)
def _place_driver(self):
self.driver_loc = list(random.sample(self.loc_permutations, 1)[0])
def _move_driver(self, direction):
if direction is None:
return None
# Receives direction from routing function
if direction == 0: # Up
self.driver_loc[1] += 1
elif direction == 1: # Down
self.driver_loc[1] -= 1
elif direction == 2: # Right
self.driver_loc[0] += 1
elif direction == 3: # Left
self.driver_loc[0] -= 1
# Check boundaries
if self.driver_loc[0] > self.grid[0]:
self.driver_loc[0] = self.grid[0]
if self.driver_loc[0] < 0:
self.driver_loc[0] = 0
if self.driver_loc[1] > self.grid[1]:
self.driver_loc[1] = self.grid[1]
if self.driver_loc[1] < 0:
self.driver_loc[1] = 0
def _go_to_destination(self, destination):
# Automatically selects direction based on starting location and
# destination.
# 0 -> Up; 1 -> Down; 2 -> Right; 3 -> Left
x_diff = self.driver_loc[0] - destination[0]
y_diff = self.driver_loc[1] - destination[1]
if abs(x_diff) >= abs(y_diff):
if x_diff > 0:
direction = 2
elif x_diff < 0:
direction = 3
elif abs(x_diff) == abs(y_diff): # 0 == 0
# Do nothing
direction = None
else:
if y_diff > 0:
direction = 0
elif y_diff < 0:
direction = 1
self._move_driver(direction)
def _get_num_spaces_per_zone(self):
total_spaces = self.grid[0] * self.grid[1]
spaces_per_zone = np.array([np.floor(total_spaces / self.num_zones)
for i in range(self.num_zones)])
for i in range(total_spaces % self.num_zones):
spaces_per_zone[i] += 1
return spaces_per_zone.astype(int)
def _get_zones(self):
# Slices the grid into zones by row
spaces_per_zone = self._get_num_spaces_per_zone()
zones = {}
for i, n in enumerate(spaces_per_zone):
x = sum(spaces_per_zone[:i])
zones[i] = self.loc_permutations[x:x+n]
zones = self._remove_restaurants_from_zone_locs(zones)
return zones
def _remove_restaurants_from_zone_locs(self, zones):
for k, v in zones.items():
for r in self.restaurant_loc:
try:
loc_to_remove = v.index(r)
del zones[k][loc_to_remove]
except ValueError:
pass
return zones | 38.68932 | 97 | 0.572334 | 2,036 | 15,940 | 4.289784 | 0.172888 | 0.043279 | 0.029769 | 0.015457 | 0.285894 | 0.195558 | 0.156057 | 0.126861 | 0.11461 | 0.105679 | 0 | 0.019617 | 0.338018 | 15,940 | 412 | 98 | 38.68932 | 0.808093 | 0.206964 | 0 | 0.196552 | 0 | 0 | 0.023871 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07931 | false | 0.024138 | 0.024138 | 0 | 0.137931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0440591460e565a7896103d2f164e008bab24d84 | 1,398 | py | Python | base/templatetags/order_by_querystring.py | Angoreher/xcero | 27965682dcb4a8c300110a277905633e0201ffd7 | [
"MIT"
] | null | null | null | base/templatetags/order_by_querystring.py | Angoreher/xcero | 27965682dcb4a8c300110a277905633e0201ffd7 | [
"MIT"
] | null | null | null | base/templatetags/order_by_querystring.py | Angoreher/xcero | 27965682dcb4a8c300110a277905633e0201ffd7 | [
"MIT"
] | null | null | null | from django import template
register = template.Library()
@register.simple_tag
def get_order_by_querytring(ordering, current_order=None, remove=False):
"""
Using the ordering parameter (a list), returns a query string with the
orders of the columns
The parameter current_order can be passed along to handle the specific
order of a single column. So for example if you are ordering by 'email' and
'first_name', you can pass on 'email' as the current order, so the system
can keep every other order, but inverse the order of the email field.
"""
if not current_order:
return '&'.join(['o={}'.format(o) for o in ordering])
reversed_current_order = '-{}'.format(current_order)
query_string = []
for order in ordering:
if order == current_order:
if remove:
continue
query_string.append(reversed_current_order)
elif order == reversed_current_order:
if remove:
continue
query_string.append(current_order)
else:
query_string.append(order)
# if the current orderd and it's reversed are not being currently used
if not (current_order in ordering or reversed_current_order in ordering):
if not remove:
query_string.append(current_order)
return '&'.join(['o={}'.format(o) for o in query_string])
| 32.511628 | 79 | 0.66309 | 190 | 1,398 | 4.736842 | 0.405263 | 0.173333 | 0.088889 | 0.037778 | 0.212222 | 0.18 | 0.18 | 0.18 | 0.08 | 0.08 | 0 | 0 | 0.261087 | 1,398 | 42 | 80 | 33.285714 | 0.871249 | 0.32475 | 0 | 0.26087 | 0 | 0 | 0.014317 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.043478 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04407e24b128e6f46a8ee70139fadafc2a68987a | 8,314 | py | Python | accessmod/processing.py | BLSQ/accessmod-pipelines | 8ff8d439c979e5d90b9c7228043d3f1bb152cde1 | [
"MIT"
] | null | null | null | accessmod/processing.py | BLSQ/accessmod-pipelines | 8ff8d439c979e5d90b9c7228043d3f1bb152cde1 | [
"MIT"
] | null | null | null | accessmod/processing.py | BLSQ/accessmod-pipelines | 8ff8d439c979e5d90b9c7228043d3f1bb152cde1 | [
"MIT"
] | null | null | null | import json
import logging
import os
import tempfile
from typing import Tuple
import geopandas as gpd
import numpy as np
import rasterio
from osgeo import gdal
from rasterio.crs import CRS
from rasterio.transform import from_origin
from rasterio.warp import aligned_target, transform_bounds, transform_geom
from shapely.geometry import Polygon
from utils import filesystem
logging.basicConfig(
format="%(asctime)s %(levelname)s %(message)s",
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(__name__)
GDAL_CREATION_OPTIONS = [
"TILED=TRUE",
"BLOCKXSIZE=256",
"BLOCKYSIZE=256",
"COMPRESS=ZSTD",
"PREDICTOR=2",
"NUM_THREADS=ALL_CPUS",
]
RASTERIO_DEFAULT_PROFILE = {
"driver": "GTiff",
"tiled": True,
"blockxsize": 256,
"blockysize": 256,
"compress": "zstd",
"predictor": 2,
"num_threads": "all_cpus",
}
GDAL_DTYPES = {
"int16": gdal.GDT_Int16,
"uint8": gdal.GDT_Byte,
}
GDAL_RESAMPLING_ALGS = [
"near",
"bilinear",
"cubic",
"cubicspline",
"lanczos",
"average",
"rms",
"mode",
"max",
"min",
"med",
"q1",
"q2",
"sum",
]
def create_grid(
geom: Polygon, dst_crs: CRS, dst_res: int
) -> Tuple[rasterio.Affine, Tuple[int], Tuple[float]]:
"""Create a raster grid for a given area of interest.
Parameters
----------
geom : shapely geometry
Area of interest.
dst_crs : CRS
Target CRS as a rasterio CRS object.
dst_res : int or float
Spatial resolution (in `dst_crs` units).
Returns
-------
transform: Affine
Output affine transform object.
shape : tuple of int
Output shape (height, width).
bounds : tuple of float
Output bounds.
"""
bounds = transform_bounds(CRS.from_epsg(4326), dst_crs, *geom.bounds)
xmin, ymin, xmax, ymax = bounds
transform = from_origin(xmin, ymax, dst_res, dst_res)
ncols = (xmax - xmin) / dst_res
nrows = (ymax - ymin) / dst_res
transform, ncols, nrows = aligned_target(transform, ncols, nrows, dst_res)
logger.info(f"Generated raster grid of shape ({nrows}, {ncols}).")
return transform, (nrows, ncols), bounds
def reproject(
src_raster: str,
dst_raster: str,
dst_crs: CRS,
dtype: str,
bounds: Tuple[float] = None,
height: int = None,
width: int = None,
xres: float = None,
yres: float = None,
resampling_alg: str = "bilinear",
src_nodata: float = None,
dst_nodata: float = None,
) -> str:
"""Reproject a raster.
Reproject a raster with GDAL based on either:
* bounds and shape
* bounds and spatial resolution
Parameters
----------
src_raster : str
Path to source raster.
dst_raster : str
Path to output raster.
dst_crs : CRS
Target CRS.
dtype : str
Target data type (ex: "int16").
bounds : tuple of float, optional
Target raster bounds (xmin, ymin, xmax, ymax).
height : int, optional
Target raster height.
width : int, optional
Target raster width.
xres : float, optional
Target X spatial resolution.
yres : float, optional
Target Y spatial resolution.
resampling_alg : str, optional
GDAL Resampling algorithm (default=bilinear).
src_nodata : float, optional
Nodata value in source raster.
dst_nodata : float, optional
Nodata value in output raster.
Return
------
str
Path to output raster.
"""
if height and not width:
return ValueError("Both height and width must be provided.")
if xres and not yres:
return ValueError("Both xres and yres must be provided.")
if xres and height:
return ValueError("Shape and resolution cannot be used together.")
if dtype not in GDAL_DTYPES:
return ValueError(f"Data type {dtype} is not supported.")
if resampling_alg not in GDAL_RESAMPLING_ALGS:
return ValueError(f"Resampling algorithm {resampling_alg} is not supported.")
gdal.Warp(
dst_raster,
src_raster,
dstSRS=dst_crs.to_string(),
outputType=GDAL_DTYPES[dtype],
format="GTiff",
outputBounds=bounds,
height=height,
width=width,
xRes=xres,
yRes=yres,
resampleAlg=resampling_alg,
srcNodata=src_nodata,
dstNodata=dst_nodata,
creationOptions=GDAL_CREATION_OPTIONS,
)
logger.info(f"Reprojected {src_raster} to {dst_crs.to_string()}.")
return dst_raster
def mask(src_raster: str, dst_raster: str, geom: Polygon, src_crs: CRS = None):
"""Clip raster data based on a polygon.
Parameters
----------
src_raster : str
Path to source raster.
dst_raster : str
Path to output raster.
geom : shapely polygon
Area of interest.
src_crs : CRS, optional
CRS of input geometry if different from source raster.
Return
------
str
Path to output raster.
"""
geom = geom.__geo_interface__
# Reproject input geometry to same CRS as raster if needed
with rasterio.open(src_raster) as src:
dst_crs = src.crs
if src_crs:
geom = transform_geom(src_crs, dst_crs, geom)
logger.info(
f"Reprojected geometry from {src_crs.to_string()} to {dst_crs.to_string()}."
)
# GDAL needs the geometry as a file
with tempfile.TemporaryDirectory("AccessMod_") as tmp_dir:
geom_fp = os.path.join(tmp_dir, "geom.geojson")
with open(geom_fp, "w") as f:
json.dump(geom, f)
options = gdal.WarpOptions(
cutlineDSName=geom_fp,
cropToCutline=False,
creationOptions=GDAL_CREATION_OPTIONS,
)
gdal.Warp(dst_raster, src_raster, options=options)
logger.info(f"Clipped raster {src_raster}.")
return dst_raster
def enforce_crs(geodataframe: gpd.GeoDataFrame, crs: CRS) -> gpd.GeoDataFrame:
"""Enforce a given CRS on a geodataframe.
If the geodataframe does not have any CRS assigned, it is assumed
to be in WGS84.
Parameters
----------
geodataframe : geodataframe
Input geopandas geodataframe.
crs : pyproj CRS
Target CRS.
Return
------
geodataframe
Projected geodataframe.
"""
if not geodataframe.crs:
geodataframe.crs = CRS.from_epsg(4326)
logger.debug("Geodataframe did not have any CRS assigned.")
if geodataframe.crs != crs:
geodataframe.to_crs(crs, inplace=True)
logger.debug("Reprojected geodataframe.")
return geodataframe
def get_raster_statistics(src_file: str) -> dict:
"""Compute basic raster statistics.
This includes min, max, 1st percentile, 2nd percentile, 98th percentile, and
99th percentile.
"""
meta = {}
fs = filesystem(src_file)
with fs.open(src_file, "rb") as f:
with rasterio.open(f) as src:
nodata = src.nodata
data = src.read(1)
meta["dtype"] = src.dtypes[0]
meta["nodata"] = nodata
meta["min"] = data[data != nodata].min()
meta["max"] = data[data != nodata].max()
for percentile in (1, 2, 98, 99):
meta[f"p{percentile}"] = np.percentile(
data[data != nodata].ravel(), percentile
)
# unique values
if src.dtypes[0] in ("uint8", "int8", "int16"):
unique = list(np.unique(data[data != nodata]))
if len(unique) <= 20:
meta["unique_values"] = unique
return meta
def generate_geojson(src_file: str, dst_file: str) -> str:
"""Generate a GeoJSON copy from input vector file.
This is for dataviz purposes. NB: Paths must be local.
"""
# target crs is epsg:4326 in dataviz
DST_CRS = CRS.from_epsg(4326)
src_geodata = gpd.read_file(src_file)
# set default crs
if not src_geodata.crs:
src_geodata.crs = DST_CRS
# reproject if needed
if src_geodata.crs == DST_CRS:
return src_file
else:
dst_geodata = src_geodata.to_crs(DST_CRS)
dst_geodata.to_file(dst_file, driver="GeoJSON")
return dst_file
| 26.819355 | 88 | 0.619798 | 1,033 | 8,314 | 4.8606 | 0.239109 | 0.017925 | 0.010755 | 0.01195 | 0.159331 | 0.113922 | 0.070504 | 0.057359 | 0.057359 | 0.057359 | 0 | 0.010311 | 0.276762 | 8,314 | 309 | 89 | 26.906149 | 0.824713 | 0.276642 | 0 | 0.023529 | 0 | 0 | 0.15401 | 0.011533 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035294 | false | 0 | 0.082353 | 0 | 0.188235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0443b28c790d7807b6f50f7f6f858ff95d8d931e | 672 | py | Python | 2018/day12.py | tcbegley/advent-of-code | e293d06e9cd994b26c0d10619672a6d8d2d65377 | [
"MIT"
] | 6 | 2021-12-05T11:21:17.000Z | 2021-12-07T03:04:24.000Z | 2018/day12.py | tcbegley/advent-of-code | e293d06e9cd994b26c0d10619672a6d8d2d65377 | [
"MIT"
] | null | null | null | 2018/day12.py | tcbegley/advent-of-code | e293d06e9cd994b26c0d10619672a6d8d2d65377 | [
"MIT"
] | null | null | null | import re
import sys
def answer(path):
with open(path) as f:
lines = f.read().strip().split("\n")
state = f"{'.'*20}{re.search(r'[.#]+', lines[0]).group(0)}{'.'*20}"
n = len(state)
rules = {}
for line in lines[2:]:
k, v = line.split(" => ")
rules[k] = v
for gen in range(20):
print(state)
new_state = ".."
for i in range(2, n - 2):
new_state += rules[state[i - 2 : i + 3]]
state = new_state + ".."
total = 0
for i, p in enumerate(state):
if p == "#":
total += i - 20
return total
if __name__ == "__main__":
print(answer(sys.argv[1]))
| 19.2 | 71 | 0.46875 | 95 | 672 | 3.2 | 0.463158 | 0.078947 | 0.085526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038202 | 0.337798 | 672 | 34 | 72 | 19.764706 | 0.644944 | 0 | 0 | 0 | 0 | 0 | 0.111607 | 0.081845 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.083333 | 0 | 0.166667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0443f2a687e671d1a9df04bcc19725daba33333a | 24,151 | py | Python | moocng/teacheradmin/views.py | OpenMOOC/moocng | 1e3dafb84aa1838c881df0c9bcca069e47c7f52d | [
"Apache-2.0"
] | 36 | 2015-01-10T06:00:36.000Z | 2020-03-19T10:06:59.000Z | moocng/teacheradmin/views.py | OpenMOOC/moocng | 1e3dafb84aa1838c881df0c9bcca069e47c7f52d | [
"Apache-2.0"
] | 3 | 2015-10-01T17:59:32.000Z | 2018-09-04T03:32:17.000Z | moocng/teacheradmin/views.py | OpenMOOC/moocng | 1e3dafb84aa1838c881df0c9bcca069e47c7f52d | [
"Apache-2.0"
] | 17 | 2015-01-13T03:46:58.000Z | 2020-07-05T06:29:51.000Z | # -*- coding: utf-8 -*-
# Copyright 2012-2013 UNED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from django.contrib.auth.models import User
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.core.urlresolvers import reverse
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.template.defaultfilters import slugify
from django.utils import simplejson
from django.utils.translation import ugettext as _
from moocng.courses.models import (Course, CourseTeacher, KnowledgeQuantum,
Option, Announcement, Unit, Attachment)
from moocng.courses.utils import UNIT_BADGE_CLASSES
from moocng.categories.models import Category
from moocng.media_contents import get_media_content_types_choices
from moocng.mongodb import get_db
from moocng.portal.templatetags.gravatar import gravatar_img_for_email
from moocng.teacheradmin.decorators import is_teacher_or_staff
from moocng.teacheradmin.forms import (CourseForm, AnnouncementForm,
MassiveEmailForm, AssetTeacherForm,
StaticPageForm)
from moocng.teacheradmin.models import Invitation, MassiveEmail
from moocng.teacheradmin.tasks import send_massive_email_task
from moocng.teacheradmin.utils import (send_invitation,
send_removed_notification)
from moocng.videos.tasks import process_video_task
from moocng.assets.utils import course_get_assets
from moocng.assets.models import Asset
from moocng.externalapps.models import externalapps
@is_teacher_or_staff
def teacheradmin_stats(request, course_slug):
course = get_object_or_404(Course, slug=course_slug)
is_enrolled = course.students.filter(id=request.user.id).exists()
stats_course = get_db().get_collection('stats_course')
stats = stats_course.find_one({'course_id': course.id})
if stats is not None:
data = {
'enrolled': course.students.count(),
'started': stats.get('started', -1),
'completed': stats.get('completed', -1),
}
if course.threshold is not None:
#if the course doesn't support certification, then don't return the
#'passed' stat since it doesn't apply
data['passed'] = stats.get('passed', -1)
return render_to_response('teacheradmin/stats.html', {
'course': course,
'is_enrolled': is_enrolled,
'initial_data': simplejson.dumps(data),
}, context_instance=RequestContext(request))
else:
messages.error(request, _(u"There are no statistics for this course."))
return HttpResponseRedirect(reverse('teacheradmin_info',
args=[course_slug]))
@is_teacher_or_staff
def teacheradmin_stats_units(request, course_slug):
course = get_object_or_404(Course, slug=course_slug)
stats_unit = get_db().get_collection('stats_unit')
data = []
for unit in course.unit_set.only('id', 'title').all():
stats = stats_unit.find_one({'unit_id': unit.id})
if stats is not None:
unit_data = {
'id': unit.id,
'title': unit.title,
'started': stats.get('started', -1),
'completed': stats.get('completed', -1),
}
if course.threshold is not None:
# if the course doesn't support certification, then don't return
# the 'passed' stat since it doesn't apply
unit_data['passed'] = stats.get('passed', -1)
data.append(unit_data)
return HttpResponse(simplejson.dumps(data), mimetype='application/json')
@is_teacher_or_staff
def teacheradmin_stats_kqs(request, course_slug):
course = get_object_or_404(Course, slug=course_slug)
if not 'unit' in request.GET:
return HttpResponse(status=400)
unit = get_object_or_404(Unit, id=request.GET['unit'])
if not unit in course.unit_set.all():
return HttpResponse(status=400)
stats_kq = get_db().get_collection('stats_kq')
data = []
for kq in unit.knowledgequantum_set.only('id', 'title').all():
stats = stats_kq.find_one({'kq_id': kq.id})
if stats is not None:
kq_data = {
'id': kq.id,
'title': kq.title,
'viewed': stats.get('viewed', 1)
}
kq_type = kq.kq_type()
if kq_type == 'PeerReviewAssignment':
kq_data['submitted'] = stats.get('submitted', -1)
kq_data['reviews'] = stats.get('reviews', -1)
kq_data['reviewers'] = stats.get('reviewers', -1)
elif kq_type == 'Question':
kq_data['submitted'] = stats.get('submitted', -1)
if course.threshold is not None:
# if the course doesn't support certification, then don't
# return the 'passed' stat since it doesn't apply
kq_data['passed'] = stats.get('passed', -1)
data.append(kq_data)
return HttpResponse(simplejson.dumps(data), mimetype='application/json')
@is_teacher_or_staff
def teacheradmin_units(request, course_slug):
course = get_object_or_404(Course, slug=course_slug)
is_enrolled = course.students.filter(id=request.user.id).exists()
return render_to_response('teacheradmin/units.html', {
'course': course,
'is_enrolled': is_enrolled,
'unit_badge_classes': simplejson.dumps(UNIT_BADGE_CLASSES),
'media_content_type_choices': get_media_content_types_choices(),
}, context_instance=RequestContext(request))
@is_teacher_or_staff
def teacheradmin_units_forcevideoprocess(request, course_slug):
if not 'kq' in request.GET:
return HttpResponse(status=400)
kq = get_object_or_404(KnowledgeQuantum, id=request.GET['kq'])
question_list = kq.question_set.all()
if len(question_list) > 0:
process_video_task.delay(question_list[0].id)
return HttpResponse()
@is_teacher_or_staff
def teacheradmin_units_attachment(request, course_slug):
if request.method == 'POST':
if not 'kq' in request.GET:
return HttpResponse(status=400)
kq = get_object_or_404(KnowledgeQuantum, id=request.GET['kq'])
if not('attachment' in request.FILES.keys()):
return HttpResponse(status=400)
uploaded_file = request.FILES['attachment']
attachment = Attachment(attachment=uploaded_file, kq=kq)
attachment.save()
return HttpResponse()
elif request.method == 'DELETE':
if not 'attachment' in request.GET:
return HttpResponse(status=400)
attachment = get_object_or_404(Attachment,
id=request.GET['attachment'])
attachment.delete()
return HttpResponse()
else:
return HttpResponse(status=400)
@is_teacher_or_staff
def teacheradmin_units_question(request, course_slug, kq_id):
kq = get_object_or_404(KnowledgeQuantum, id=kq_id)
course = get_object_or_404(Course, slug=course_slug)
is_enrolled = course.students.filter(id=request.user.id).exists()
question_list = kq.question_set.all()
if len(question_list) > 0:
obj = question_list[0]
else:
return HttpResponse(status=400)
if 'HTTP_REFERER' in request.META:
goback = request.META['HTTP_REFERER']
else:
goback = None
if obj is None:
raise Http404(_('The KQ with the %s id doesn\'t exists') % kq_id)
if request.method == 'POST':
data = simplejson.loads(request.raw_post_data)
option = obj.option_set.create(**data)
data['id'] = option.id
return HttpResponse(simplejson.dumps(data),
mimetype='application/json')
else:
json = [{
'id': opt.id,
'optiontype': opt.optiontype,
'solution': opt.solution,
'feedback': opt.feedback,
'text': opt.text,
'x': opt.x, 'y': opt.y,
'width': opt.width, 'height': opt.height,
} for opt in obj.option_set.all()]
context = {
'course': course,
'is_enrolled': is_enrolled,
'object_id': obj.id,
'original': obj,
'options_json': simplejson.dumps(json),
'goback': goback,
}
return render_to_response('teacheradmin/question.html', context,
context_instance=RequestContext(request))
@is_teacher_or_staff
def teacheradmin_units_option(request, course_slug, kq_id, option_id):
option = get_object_or_404(Option, id=option_id)
if request.method == 'PUT':
data = simplejson.loads(request.raw_post_data)
for key, value in data.items():
if key != 'id':
setattr(option, key, value)
option.save()
return HttpResponse(simplejson.dumps(data),
mimetype='application/json')
elif request.method == 'DELETE':
option.delete()
return HttpResponse('')
elif request.method == 'GET':
data = {
'id': option.id,
'optiontype': option.optiontype,
'solution': option.solution,
'feedback': option.feedback,
'text': option.text,
'x': option.x, 'y': option.y,
'width': option.width, 'height': option.height,
}
return HttpResponse(simplejson.dumps(data),
mimetype='application/json')
@is_teacher_or_staff
def teacheradmin_teachers(request, course_slug):
course = get_object_or_404(Course, slug=course_slug)
is_enrolled = course.students.filter(id=request.user.id).exists()
return render_to_response('teacheradmin/teachers.html', {
'course': course,
'is_enrolled': is_enrolled,
'course_teachers': CourseTeacher.objects.filter(course=course),
'invitations': Invitation.objects.filter(course=course),
'request': request,
}, context_instance=RequestContext(request))
@is_teacher_or_staff
def teacheradmin_teachers_delete(request, course_slug, email_or_id):
course = get_object_or_404(Course, slug=course_slug)
response = HttpResponse()
try:
validate_email(email_or_id)
# is email, so is an invitation
try:
invitation = Invitation.objects.get(email=email_or_id,
course=course)
invitation.delete()
send_removed_notification(request, email_or_id, course)
except Invitation.DoesNotExist:
response = HttpResponse(status=404)
except ValidationError:
# is an id
try:
ct = CourseTeacher.objects.get(id=email_or_id)
if ct.teacher == course.owner:
response = HttpResponse(status=401)
else:
ct.delete()
send_removed_notification(request, ct.teacher.email, course)
except (ValueError, CourseTeacher.DoesNotExist):
response = HttpResponse(status=404)
return response
@is_teacher_or_staff
def teacheradmin_teachers_invite(request, course_slug):
course = get_object_or_404(Course, slug=course_slug)
email_or_id = request.POST['data']
user = None
response = None
try:
validate_email(email_or_id)
# is email
try:
user = User.objects.get(email=email_or_id)
except User.DoesNotExist:
pass
except ValidationError:
# is id
try:
user = User.objects.get(id=email_or_id)
except (ValueError, User.DoesNotExist):
response = HttpResponse(status=404)
if user is not None:
try:
ct = CourseTeacher.objects.get(course=course, teacher=user)
return HttpResponse(status=409)
except CourseTeacher.DoesNotExist:
ct = CourseTeacher.objects.create(course=course, teacher=user)
name = user.get_full_name()
if not name:
name = user.username
data = {
'id': ct.id,
'order': ct.order,
'name': name,
'gravatar': gravatar_img_for_email(user.email, 20),
'pending': False
}
response = HttpResponse(simplejson.dumps(data),
mimetype='application/json')
elif response is None:
if Invitation.objects.filter(email=email_or_id, course=course).count() == 0:
invitation = Invitation(host=request.user, email=email_or_id,
course=course, datetime=datetime.now())
invitation.save()
send_invitation(request, invitation)
data = {
'name': email_or_id,
'gravatar': gravatar_img_for_email(email_or_id, 20),
'pending': True
}
response = HttpResponse(simplejson.dumps(data),
mimetype='application/json')
else:
response = HttpResponse(status=409)
return response
@is_teacher_or_staff
def teacheradmin_teachers_transfer(request, course_slug):
course = get_object_or_404(Course, slug=course_slug)
ident = request.POST['data']
if request.user != course.owner:
return HttpResponse(status=401)
response = HttpResponse()
try:
user = CourseTeacher.objects.get(id=ident).teacher
course.owner = user
course.save()
except (ValueError, CourseTeacher.DoesNotExist):
response = HttpResponse(status=404)
return response
@is_teacher_or_staff
def teacheradmin_teachers_reorder(request, course_slug):
course = get_object_or_404(Course, slug=course_slug)
try:
new_order = simplejson.loads(request.raw_post_data)
except ValueError:
return HttpResponse(status=400)
response = HttpResponse()
cts_map = dict([(cts.id, cts)
for cts in CourseTeacher.objects.filter(course=course)])
for i, course_teacher_id in enumerate(new_order):
cid = int(course_teacher_id)
ct = cts_map.get(cid, None)
if ct is None:
return HttpResponse(status=404)
else:
ct.order = i
ct.save()
return response
@is_teacher_or_staff
def teacheradmin_info(request, course_slug):
course = get_object_or_404(Course, slug=course_slug)
is_enrolled = course.students.filter(id=request.user.id).exists()
external_apps = externalapps.all()
if request.method == 'POST':
form = CourseForm(data=request.POST, files=request.FILES, instance=course)
static_page_form = StaticPageForm(data=request.POST, instance=course.static_page)
if form.is_valid() and static_page_form.is_valid():
static_page = static_page_form.save(commit=False)
static_page.save()
course = form.save(commit=False)
course.static_page = static_page
course.save()
messages.success(request, _(u"Your changes were saved."))
return HttpResponseRedirect(reverse('teacheradmin_info',
args=[course_slug]))
else:
messages.error(request, _(u"There were problems with some data you introduced, please fix them and try again."))
else:
form = CourseForm(instance=course)
static_page_form = StaticPageForm(instance=course.static_page)
return render_to_response('teacheradmin/info.html', {
'course': course,
'is_enrolled': is_enrolled,
'form': form,
'static_page_form': static_page_form,
'external_apps': external_apps,
}, context_instance=RequestContext(request))
@is_teacher_or_staff
def teacheradmin_categories(request, course_slug):
course = get_object_or_404(Course, slug=course_slug)
is_enrolled = course.students.filter(id=request.user.id).exists()
if request.method == 'POST':
category_list = []
for key in request.POST.keys():
if key.startswith('cat-'):
slug = key[4:]
try:
category = Category.objects.get(slug=slug,
only_admins=False)
category_list.append(category)
except Category.DoesNotExist:
messages.error(request, _(u'There were problems with some data you introduced, please fix them and try again.'))
return HttpResponseRedirect(
reverse('teacheradmin_categories', args=[course_slug]))
admin_cats = course.categories.filter(only_admins=True)
category_list.extend(admin_cats)
course.categories.clear()
course.categories.add(*category_list)
course.save()
messages.success(request, _(u"Your changes were saved."))
return HttpResponseRedirect(reverse('teacheradmin_categories',
args=[course_slug]))
counter = 0
categories = []
aux = []
for cat in Category.objects.filter(only_admins=False):
counter += 1
aux.append({
'cat': cat,
'checked': cat in course.categories.all(),
})
if counter % 5 == 0:
categories.append(aux)
aux = []
if len(aux) < 5 and len(aux) > 0:
categories.append(aux)
return render_to_response('teacheradmin/categories.html', {
'course': course,
'is_enrolled': is_enrolled,
'categories': categories,
}, context_instance=RequestContext(request))
@is_teacher_or_staff
def teacheradmin_assets(request, course_slug):
course = get_object_or_404(Course, slug=course_slug)
is_enrolled = course.students.filter(id=request.user.id).exists()
assets = course_get_assets(course).order_by('id').distinct()
return render_to_response('teacheradmin/assets.html', {
'course': course,
'is_enrolled': is_enrolled,
'assets': assets,
}, context_instance=RequestContext(request))
@is_teacher_or_staff
def teacheradmin_assets_edit(request, course_slug, asset_id):
asset = get_object_or_404(Asset, id=asset_id)
course = get_object_or_404(Course, slug=course_slug)
is_enrolled = course.students.filter(id=request.user.id).exists()
if request.method == 'POST':
form = AssetTeacherForm(request.POST, instance=asset)
if form.is_valid():
form_name = form.cleaned_data['name']
form_capacity = form.cleaned_data['capacity']
form_description = form.cleaned_data['description']
if asset is not None:
asset.name = form_name
asset.capacity = form_capacity
asset.description = form_description
asset.save()
return HttpResponseRedirect(
reverse("teacheradmin_assets",
args=[course_slug]))
else:
form = AssetTeacherForm(instance=asset)
return render_to_response('teacheradmin/asset_edit.html', {
'course': course,
'is_enrolled': is_enrolled,
'form': form,
'asset': asset,
}, context_instance=RequestContext(request))
@is_teacher_or_staff
def teacheradmin_announcements(request, course_slug):
course = get_object_or_404(Course, slug=course_slug)
is_enrolled = course.students.filter(id=request.user.id).exists()
announcements = course.announcement_set.all()
return render_to_response('teacheradmin/announcements.html', {
'course': course,
'is_enrolled': is_enrolled,
'announcements': announcements,
}, context_instance=RequestContext(request))
@is_teacher_or_staff
def teacheradmin_announcements_view(request, course_slug, announ_id, announ_slug):
announcement = get_object_or_404(Announcement, id=announ_id)
course = announcement.course
is_enrolled = course.students.filter(id=request.user.id).exists()
return render_to_response('teacheradmin/announcement_view.html', {
'course': course,
'is_enrolled': is_enrolled,
'announcement': announcement,
}, context_instance=RequestContext(request))
@is_teacher_or_staff
def teacheradmin_announcements_add_or_edit(request, course_slug, announ_id=None, announ_slug=None):
if announ_id is None:
announcement = None
course = get_object_or_404(Course, slug=course_slug)
else:
announcement = get_object_or_404(Announcement, id=announ_id)
course = announcement.course
is_enrolled = course.students.filter(id=request.user.id).exists()
students = course.students.count()
data = None
if request.method == 'POST':
data = request.POST
massive_emails = course.massive_emails.all()
form = AnnouncementForm(data=data, instance=announcement, course=course)
remain_send_emails = form.remain_send_emails(massive_emails)
if form.is_valid():
announcement = form.save()
messages.success(request,
_("The announcement was created successfully."))
if form.cleaned_data.get('send_email', None):
messages.success(
request,
_("The email has been queued, and it will be send in batches to every student in the course.")
)
return HttpResponseRedirect(
reverse("teacheradmin_announcements_view",
args=[course_slug, announcement.id, announcement.slug]))
return render_to_response('teacheradmin/announcement_edit.html', {
'course': course,
'is_enrolled': is_enrolled,
'form': form,
'announcement': announcement,
'remain_send_emails': remain_send_emails,
'students': students
}, context_instance=RequestContext(request))
@is_teacher_or_staff
def teacheradmin_announcements_delete(request, course_slug, announ_id, announ_slug):
announcement = get_object_or_404(Announcement, id=announ_id)
announcement.delete()
return HttpResponseRedirect(reverse("teacheradmin_announcements", args=[course_slug]))
@is_teacher_or_staff
def teacheradmin_emails(request, course_slug):
course = get_object_or_404(Course, slug=course_slug)
is_enrolled = course.students.filter(id=request.user.id).exists()
students = course.students.count()
data = None
if request.method == 'POST':
data = request.POST
massive_emails = course.massive_emails.all()
form = MassiveEmailForm(data=data, course=course)
remain_send_emails = form.remain_send_emails(massive_emails)
if remain_send_emails > 0 and form.is_valid():
form.save()
messages.success(request, _("The email has been queued, and it will be send in batches to every student in the course."))
return HttpResponseRedirect(reverse('teacheradmin_stats', args=[course_slug]))
return render_to_response('teacheradmin/emails.html', {
'course': course,
'massive_emails': massive_emails,
'remain_send_emails': remain_send_emails,
'is_enrolled': is_enrolled,
'students': students,
'form': form,
}, context_instance=RequestContext(request))
| 36.317293 | 132 | 0.642706 | 2,761 | 24,151 | 5.41398 | 0.122419 | 0.042815 | 0.032111 | 0.026224 | 0.549773 | 0.473843 | 0.432299 | 0.386072 | 0.345999 | 0.330813 | 0 | 0.010371 | 0.257422 | 24,151 | 664 | 133 | 36.371988 | 0.823129 | 0.038549 | 0 | 0.438697 | 0 | 0.005747 | 0.09584 | 0.019573 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042146 | false | 0.007663 | 0.051724 | 0 | 0.180077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |