hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce754f8ccaef370d5bccd15d3b5a3617a6bc907e
| 7,773
|
py
|
Python
|
science_animations/square_cube.py
|
dhruvbhatia00/manim
|
ad2eb05bcc806a5c947d8e79797f133b0b0b0153
|
[
"MIT"
] | null | null | null |
science_animations/square_cube.py
|
dhruvbhatia00/manim
|
ad2eb05bcc806a5c947d8e79797f133b0b0b0153
|
[
"MIT"
] | null | null | null |
science_animations/square_cube.py
|
dhruvbhatia00/manim
|
ad2eb05bcc806a5c947d8e79797f133b0b0b0153
|
[
"MIT"
] | null | null | null |
from manimlib.imports import *
class SquareScale(Scene):
CONFIG = {
"camera_config": {"background_color": WHITE},
"text_config": {"stroke_color": WHITE, "fill_color": BLACK},
}
def construct(self):
s1 = VGroup()
for i in range(2):
line = VGroup()
for j in range(2):
s = Square(side_length=0.5, color=BLUE, stroke_width=1, fill_opacity=1)
line.add(s)
line.arrange(RIGHT, buff=-0)
s1.add(line)
s1.arrange(DOWN, buff=0)
b1 = BraceLabel(s1, "x", DOWN, **self.text_config)
s2 = VGroup()
for i in range(2):
line = VGroup()
for j in range(2):
s = Square(side_length=0.5, color=BLUE, stroke_width=1, fill_opacity=1)
line.add(s)
line.arrange(RIGHT, buff=-0)
s2.add(line)
s2.arrange(DOWN, buff=0).move_to(2*RIGHT)
b2 = BraceLabel(s2, "x", DOWN, **self.text_config)
self.play(GrowFromCenter(s1))
self.play(GrowFromCenter(b1))
self.wait()
self.play(s1.shift, 2*LEFT,
b1.shift, 2*LEFT)
self.play(TransformFromCopy(s1, s2),
TransformFromCopy(b1, b2))
self.wait()
s3 = s2.copy().scale(2)
self.play(Transform(s2, s3),
Transform(b2, BraceLabel(s3, "2 \cdot x", DOWN, **self.text_config)))
self.wait(2)
move_out = []
for line in s2:
for s in line:
dir = s.get_center() - s2.get_center()
move_out.append(ApplyMethod(s.shift, dir*0.3))
self.play(*move_out)
self.wait(3)
class CubeScale(ThreeDScene):
CONFIG = {
"camera_config": {"background_color": WHITE},
"text_config": {"stroke_color": WHITE, "fill_color": BLACK},
}
def construct(self):
self.set_camera_orientation(phi=60 * DEGREES, theta=-90 * DEGREES)
self.begin_ambient_camera_rotation(rate=0.04)
s1 = VGroup()
for i in range(2):
plane = VGroup()
for j in range(2):
line = VGroup()
for k in range(2):
s = Cube(side_length=0.5, color=BLUE, stroke_width=1, fill_opacity=1)
line.add(s)
line.arrange(RIGHT, buff=-0)
plane.add(line)
plane.arrange(DOWN, buff=0)
s1.add(plane)
s1.arrange(OUT, buff=0)
b1 = BraceLabel(s1, "x", DOWN, **self.text_config)
s2 = VGroup()
for i in range(2):
plane = VGroup()
for j in range(2):
line = VGroup()
for k in range(2):
s = Cube(side_length=0.5, color=BLUE, stroke_width=1, fill_opacity=1)
line.add(s)
line.arrange(RIGHT, buff=-0)
plane.add(line)
plane.arrange(DOWN, buff=0)
s2.add(plane)
s2.arrange(OUT, buff=0).move_to(2*RIGHT)
b2 = BraceLabel(s2, "x", DOWN, **self.text_config)
self.play(GrowFromCenter(s1))
self.play(GrowFromCenter(b1))
self.wait()
self.play(s1.shift, 2 * LEFT,
b1.shift, 2 * LEFT)
self.play(TransformFromCopy(s1, s2),
TransformFromCopy(b1, b2))
self.wait()
s3 = s2.copy().scale(2)
self.play(Transform(s2, s3),
Transform(b2, BraceLabel(s3, "2 \cdot x", DOWN, **self.text_config)))
self.wait(2)
move_out = []
move_in = []
for plane in s2:
for line in plane:
for s in line:
dir = s.get_center() - s2.get_center()
move_out.append(ApplyMethod(s.shift, dir * 0.3))
move_in.append(ApplyMethod(s.shift, dir * -0.3))
self.stop_ambient_camera_rotation()
#self.set_camera_orientation(phi=0 * DEGREES, theta=-90 * DEGREES)
self.move_camera(phi=0 * DEGREES, theta=-90 * DEGREES)
# self.set_camera_orientation(phi=60 * DEGREES, theta=-90 * DEGREES)
self.play(*move_out)
self.play(*move_in)
self.wait(3)
class Both(ThreeDScene):
CONFIG = {
"camera_config": {"background_color": WHITE},
"text_config": {"stroke_color": WHITE, "fill_color": BLACK},
}
def construct(self):
s1 = VGroup()
for i in range(2):
line = VGroup()
for j in range(2):
s = Square(side_length=0.5, color=BLUE, stroke_width=1, fill_opacity=1)
line.add(s)
line.arrange(RIGHT, buff=-0)
s1.add(line)
s1.arrange(DOWN, buff=0)
b1 = BraceLabel(s1, "x", DOWN, **self.text_config)
s2 = VGroup()
for i in range(2):
line = VGroup()
for j in range(2):
s = Square(side_length=0.5, color=BLUE, stroke_width=1, fill_opacity=1)
line.add(s)
line.arrange(RIGHT, buff=-0)
s2.add(line)
s2.arrange(DOWN, buff=0).move_to(2 * RIGHT)
b2 = BraceLabel(s2, "x", DOWN, **self.text_config)
self.play(GrowFromCenter(s1))
self.play(GrowFromCenter(b1))
self.wait()
self.play(s1.shift, 2 * LEFT,
b1.shift, 2 * LEFT)
self.play(TransformFromCopy(s1, s2),
TransformFromCopy(b1, b2))
self.wait()
s3 = s2.copy().scale(2)
self.play(Transform(s2, s3),
Transform(b2, BraceLabel(s3, "2 \cdot x", DOWN, **self.text_config)))
self.wait()
move_out = []
move_in = []
for line in s2:
for s in line:
dir = s.get_center() - s2.get_center()
move_out.append(ApplyMethod(s.shift, dir * 0.3))
move_in.append(ApplyMethod(s.shift, dir * -0.3))
self.play(*move_out)
self.play(*move_in)
self.wait(1)
self.play(*[FadeOut(sub) for sub in self.mobjects])
self.move_camera(phi=60 * DEGREES, theta=-90 * DEGREES)
self.begin_ambient_camera_rotation(rate=0.08)
s1 = VGroup()
for i in range(2):
plane = VGroup()
for j in range(2):
line = VGroup()
for k in range(2):
s = Cube(side_length=0.5, color=BLUE, stroke_width=1, fill_opacity=1)
line.add(s)
line.arrange(RIGHT, buff=-0)
plane.add(line)
plane.arrange(DOWN, buff=0)
s1.add(plane)
s1.arrange(OUT, buff=0).shift(2*LEFT)
B1 = BraceLabel(s1, "x", DOWN, **self.text_config)
s2 = VGroup()
for i in range(2):
plane = VGroup()
for j in range(2):
line = VGroup()
for k in range(2):
s = Cube(side_length=0.5, color=BLUE, stroke_width=1, fill_opacity=1)
line.add(s)
line.arrange(RIGHT, buff=-0)
plane.add(line)
plane.arrange(DOWN, buff=0)
s2.add(plane)
s2.arrange(OUT, buff=0).move_to(2 * RIGHT).scale(2)
b2 = BraceLabel(s2, "2 \cdot x", DOWN, **self.text_config)
self.play(GrowFromCenter(s1), GrowFromCenter(b1), GrowFromCenter(s2), GrowFromCenter(b2))
self.wait()
move_out = []
for plane in s2:
for line in plane:
for s in line:
dir = s.get_center() - s2.get_center()
move_out.append(ApplyMethod(s.shift, dir * 0.3))
self.play(*move_out)
self.wait(3)
| 31.726531
| 97
| 0.509456
| 984
| 7,773
| 3.925813
| 0.09248
| 0.047631
| 0.041419
| 0.037018
| 0.933989
| 0.922858
| 0.922858
| 0.908879
| 0.907585
| 0.896454
| 0
| 0.045564
| 0.359063
| 7,773
| 245
| 98
| 31.726531
| 0.729827
| 0.016982
| 0
| 0.887755
| 0
| 0
| 0.029974
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015306
| false
| 0
| 0.005102
| 0
| 0.05102
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ce821a035385e45f39286ff29aadc6d17b8aded6
| 5,035
|
py
|
Python
|
turtle222.py
|
shyed2001/Python_Programming
|
93ef958e3d8aa77f9191b550972235ce4fe4a6cb
|
[
"bzip2-1.0.6"
] | 2
|
2019-05-01T04:32:14.000Z
|
2019-05-04T11:28:18.000Z
|
turtle222.py
|
shyed2001/python-learning-basics
|
93ef958e3d8aa77f9191b550972235ce4fe4a6cb
|
[
"bzip2-1.0.6"
] | null | null | null |
turtle222.py
|
shyed2001/python-learning-basics
|
93ef958e3d8aa77f9191b550972235ce4fe4a6cb
|
[
"bzip2-1.0.6"
] | null | null | null |
print("""
import turtle
wn = turtle.Screen()
print("#Creates a playground for turtle. Not must?")
tess=turtle.Turtle()
alex=turtle.Turtle()
alex.pen()
alex.backward(30)
alex.penup()
alex.right(-60)
alex.forward(-45)
alex.pendown()
alex.left(-30)
alex.backward(-20)
alex.penup()
alex.forward(-45)
alex.pendown()
alex.left(30)
tess.color('red')
tess.pensize(13)
tess.forward(-80)
tess.penup()
tess.stamp()
tess.forward(80)
tess.penup()
tess.forward(-80)
tess.penup()
alex.color('yellow')
alex.pensize(25)
for i in range (3):
alex.forward(80)
alex.right(90)
for i in [0,1,2,3]:
tess.shape("square")
tess.backward(55)
tess.right(90)
alex.color('green')
alex.pensize(51)
for i in range (3):
alex.speed(10)
alex.forward(130)
alex.right(120)
tess.color('blue')
tess.pensize(51)
for i in ("a","b","c"):
tess.shape("circle")
tess.speed(1)
tess.forward(120)
tess.right(120)
for i in ("a","b","c"):
tess.shape("circle")
tess.speed(0)
tess.forward(120)
tess.stamp()
tess.right(-120)
for c in ["dark green", "red", "yellow", "black"]:
alex.color(c)
alex.speed(5)
alex.forward(-380)
alex.right(120)
tc= ["black", "light green", "orange", "pink"]
for c in tc:
tess.shape("arrow")
tess.color(c)
tess.forward(100)
tess.stamp()
tess.right(120)
tc= ["black", "light green", "orange", "pink"]
for c in tc:
tess.shape("arrow")
tess.pendown()
tess.color(c)
tess.forward(-100)
tess.right(120)
for i in [0,1,2,3]:
tess.forward(-30)
tess.penup()
tess.forward(30)
tess.stamp()
tess.pendown()
tess.shape("turtle")
tess.forward(-28)
tess.stamp()
tess.right(90)
tess.shape("turtle")
tess.forward(30)
for i in [0,1,2,3]:
tess.forward(30)
tess.pensize(5)
tess.penup()
tess.forward(-30)
#tess.pendown()
tess.shape("turtle")
tess.forward(-28)
tess.right(90)
tess.stamp()
tess.shape("turtle")
tess.penup()
tess.stamp()
tess.speed(3)
tess.right(90)
tess.forward(230)
tess.stamp()
tess.right(90)
tess.speed(10)
tess.right(90)
tess.backward(-130)
tess.stamp()
tess.right(90)
tess.speed(1)
tess.forward(-130)
tess.stamp()
wn.mainloop()
""")
import turtle
wn = turtle.Screen() # Turtle screen
print("#Creates a playground for turtle. Not must?")
tess=turtle.Turtle() # Turtle assigned variables
alex=turtle.Turtle() # Turtle assigned variables
alex.speed(1)
alex.pen()
alex.penup()
alex.backward(30)
alex.pendown()
alex.left(-30)
alex.penup()
alex.right(-60)
alex.forward(-45)
alex.pendown()
alex.left(-30)
alex.backward(-20)
alex.penup()
alex.forward(-45)
alex.pendown()
alex.left(30)
alex.forward(55)
tess.color('red')
tess.pensize(13)
tess.forward(-80)
tess.penup()
tess.stamp()
tess.forward(80)
tess.penup()
tess.forward(-80)
tess.penup()
alex.color('yellow')
alex.pensize(25)
for i in range (3):
alex.forward(80)
alex.right(90)
for i in [0,1,2,3]:
tess.shape("square")
tess.backward(55)
tess.right(90)
alex.color('green')
alex.pensize(51)
for i in range (3):
alex.speed(10)
alex.forward(130)
alex.right(120)
tess.color('blue')
tess.pensize(51)
for i in ("a","b","c"):
tess.shape("circle")
tess.speed(1)
tess.forward(120)
tess.right(120)
for i in ("a","b","c"):
tess.shape("circle")
tess.speed(0)
tess.forward(120)
tess.stamp()
tess.right(-120)
for c in ["dark green", "red", "yellow", "black"]:
alex.color(c)
alex.speed(5)
alex.forward(-380)
alex.right(120)
tc= ["black", "light green", "orange", "pink"]
for c in tc:
tess.shape("arrow")
tess.color(c)
tess.forward(100)
tess.stamp()
tess.right(120)
tc= ["black", "light green", "orange", "pink"]
for c in tc:
tess.shape("arrow")
tess.pendown()
tess.color(c)
tess.forward(-100)
tess.right(120)
for i in [0,1,2,3]:
tess.forward(-30)
tess.penup()
tess.forward(30)
tess.stamp()
tess.pendown()
tess.shape("turtle")
tess.forward(-28)
tess.stamp()
tess.right(90)
tess.shape("turtle")
tess.forward(30)
for i in [0,1,2,3]:
tess.forward(30)
tess.pensize(5)
tess.penup()
tess.forward(-30)
#tess.pendown()
tess.shape("turtle")
tess.forward(-28)
tess.right(90)
tess.stamp()
tess.shape("turtle")
tess.penup()
tess.stamp()
tess.speed(3)
tess.right(90)
tess.forward(230)
tess.stamp()
tess.right(90)
tess.speed(10)
tess.right(90)
tess.backward(-130)
tess.stamp()
tess.right(90)
tess.speed(1)
tess.forward(-130)
tess.stamp()
size=5
for i in range(30):
tess.stamp()
size=size+2
tess.forward(size)
tess.right(24)
tess.pendown()
tess.pensize(25)
tess.color("black")
for i in range(5):
tess.forward(55)
tess.right(72)
wn.mainloop()
| 18.928571
| 53
| 0.599801
| 759
| 5,035
| 3.97892
| 0.093544
| 0.123841
| 0.077483
| 0.059603
| 0.920199
| 0.906954
| 0.878808
| 0.878808
| 0.878808
| 0.877483
| 0
| 0.070779
| 0.2143
| 5,035
| 265
| 54
| 19
| 0.692619
| 0.01569
| 0
| 0.938017
| 0
| 0
| 0.510993
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008264
| 0
| 0.008264
| 0.012397
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0ca1e403e1186f18da54e6b778ca849ea222b9ef
| 16,924
|
py
|
Python
|
dataset.py
|
elientumba2019/Learning-Exposure-Correction-via-consistency-Modeling-Pytorch-Implementation-
|
eba44073a8dc699de880e5d6b9977db3dd80ace2
|
[
"MIT"
] | 8
|
2021-10-18T10:59:49.000Z
|
2022-03-30T08:08:11.000Z
|
dataset.py
|
elientumba2019/Learning-Exposure-Correction-via-consistency-Modeling-Pytorch-Implementation-
|
eba44073a8dc699de880e5d6b9977db3dd80ace2
|
[
"MIT"
] | 1
|
2022-02-22T03:10:12.000Z
|
2022-02-22T03:10:12.000Z
|
dataset.py
|
elientumba2019/Learning-Exposure-Correction-via-consistency-Modeling-Pytorch-Implementation-
|
eba44073a8dc699de880e5d6b9977db3dd80ace2
|
[
"MIT"
] | 1
|
2022-01-21T02:55:19.000Z
|
2022-01-21T02:55:19.000Z
|
import os
import cv2
import numpy as np
import torch
import torchvision.transforms.functional as TF
from torch.utils.data import Dataset
import imageio
import matplotlib.pyplot as plt
from PIL import Image
from torchvision import transforms
import glob
import random
class ExposureCorrectionTrain(Dataset):
def __init__(self, dataset_dir,
transform=None,
resize_size=(384, 384),
mode='train',
color=1):
super(ExposureCorrectionTrain, self).__init__()
self.dataset_dir = dataset_dir
self.transform = transform
self.resize = resize_size
# low light and normal light folders
self.input_images = os.path.join(self.dataset_dir, 'INPUT_IMAGES')
self.gt_images = os.path.join(self.dataset_dir, 'GT_IMAGES')
if resize_size[0] > 384:
self.image_list = read_and_parse(dataset_dir, resize_size[0])
else:
self.image_list = os.listdir(self.input_images)
self.gt_dictionary = self.make_ground_truth_dictionary(self.gt_images)
self.mode = mode
self.color_mode = color
def make_ground_truth_dictionary(self, gt_dir):
gt_dictionary = {}
files = os.listdir(os.path.join(self.dataset_dir, gt_dir))
for i in range(len(files)):
image_file = files[i]
if image_file[-4:] != '.jpg':
print(f'non image : {image_file}')
image_index = image_file[:5]
gt_dictionary[image_index] = image_file
return gt_dictionary
def __len__(self):
return len(self.image_list)
def __getitem__(self, index):
input_image = self.image_list[index]
image_prefix = input_image[:5]
gt_name = self.gt_dictionary[image_prefix]
input_path = os.path.join(self.input_images, input_image)
image_contrast_path = glob.glob(os.path.join(self.input_images, f'*{image_prefix}*'))
image_contrast_path.remove(input_path)
image_contrast_path = image_contrast_path[random.randint(0, len(image_contrast_path) - 1)]
gt_path = os.path.join(self.gt_images, gt_name)
# read gt image ------------------------------------------------
normal_image = load_image(gt_path, mode=self.color_mode)
normal_image = torch.from_numpy(normal_image)
normal_image = normal_image.permute(2, 0, 1)
# read contrast image ------------------------------------------------
contrast_image = load_image(image_contrast_path, mode=self.color_mode)
contrast_image = torch.from_numpy(contrast_image)
contrast_image = contrast_image.permute(2, 0, 1)
# read input image ---------------------------------------------------
input_image = load_image(input_path, mode=self.color_mode)
input_image = torch.from_numpy(input_image)
input_image = input_image.permute(2, 0, 1)
# random crops on the images
if self.mode == 'train':
c, h, w = normal_image.shape
i = np.random.randint(0, h - self.resize[1] + 1)
j = np.random.randint(0, w - self.resize[0] + 1)
normal_image = self._random_crop(normal_image, i, j)
input_image = self._random_crop(input_image, i, j)
contrast_image = self._random_crop(contrast_image, i, j)
normalized_image = self.normalize_image(input_image)
return normalized_image, normal_image, input_image, contrast_image
def _random_crop(self, image, i=0, j=0):
c, h, w = image.shape
assert w >= self.resize[1] and h >= self.resize[0], \
f'Error: Crop size: {self.resize[0]}, Image size: ({w}, {h})'
PIL_image = transforms.functional.to_pil_image(image)
cropped_image = transforms.functional.crop(PIL_image, i, j, self.resize[1], self.resize[0])
cropped_image = transforms.functional.to_tensor(cropped_image)
# nump = cropped_image.permute(1, 2, 0).numpy()
# plt.figure()
# plt.imshow(nump)
# plt.show()
return cropped_image
def normalize_image(self, image):
# normalize the image
transform_list = [
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
tr = transforms.Compose(transform_list)
# normalized mage
normalized_image = tr(image)
return normalized_image
def crop_image(self, image):
pre = transforms.functional.to_pil_image(image)
cropped = transforms.functional.center_crop(pre, self.resize[0])
post = transforms.functional.to_tensor(cropped)
# np = post.permute(1, 2, 0).numpy()
# plt.figure()
# plt.imshow(np)
# plt.show()
return post
class ExposureCorrectionTest(Dataset):
def __init__(self, dataset_dir,
transform=None,
resize_size=(384, 384),
mode='train',
folder=None,
filt=3,
color=1):
super(ExposureCorrectionTest, self).__init__()
if folder is None:
folder = ['INPUT_IMAGES', 'GT_IMAGES']
self.dataset_dir = dataset_dir
self.transform = transform
self.resize = resize_size
# low light and normal light folders
self.input_images = os.path.join(self.dataset_dir, folder[0])
self.gt_images = os.path.join(self.dataset_dir, folder[1])
self.image_list = os.listdir(self.input_images)
self.gt_dictionary = self.make_ground_truth_dictionary(self.gt_images)
self.image_list = self.filter_list(self.image_list, filt)
self.mode = mode
self.color_mode = color
def make_ground_truth_dictionary(self, gt_dir):
gt_dictionary = {}
files = os.listdir(os.path.join(self.dataset_dir, gt_dir))
for i in range(len(files)):
image_file = files[i]
if image_file[-4:] != '.jpg':
print(f'non image : {image_file}')
image_index = image_file[:5]
gt_dictionary[image_index] = image_file
return gt_dictionary
def __len__(self):
return len(self.image_list)
def __getitem__(self, index):
input_image = self.image_list[index]
image_prefix = input_image[:5]
gt_name = self.gt_dictionary[image_prefix]
input_path = os.path.join(self.input_images, input_image)
gt_path = os.path.join(self.gt_images, gt_name)
# read gt image ------------------------------------------------
normal_image = load_image(gt_path, mode=self.color_mode)
nh, nw, nc = normal_image.shape
if self.mode == 'test':
ww, hh = adapt_size(nh, nw)
ww, hh = get_novel_size(ww, hh, 512)
normal_image = cv2.resize(normal_image, (ww, hh))
# show_image(normal_image)
normal_image = torch.from_numpy(normal_image)
normal_image = normal_image.permute(2, 0, 1)
# read input image ---------------------------------------------------
input_image = load_image(input_path, mode=self.color_mode)
if self.mode == 'test':
input_image = cv2.resize(input_image, (ww, hh))
# show_image(input_image)
input_image = torch.from_numpy(input_image)
input_image = input_image.permute(2, 0, 1)
# random crops on the images
if self.mode == 'train':
c, h, w = normal_image.shape
i = np.random.randint(0, h - self.resize[1] + 1)
j = np.random.randint(0, w - self.resize[0] + 1)
normal_image = self._random_crop(normal_image, i, j)
input_image = self._random_crop(input_image, i, j)
normalized_image = self.normalize_image(input_image)
return normalized_image, normal_image, input_image
def _random_crop(self, image, i=0, j=0):
c, h, w = image.shape
assert w >= self.resize[1] and h >= self.resize[0], \
f'Error: Crop size: {self.resize[0]}, Image size: ({w}, {h})'
PIL_image = transforms.functional.to_pil_image(image)
cropped_image = transforms.functional.crop(PIL_image, i, j, self.resize[1], self.resize[0])
cropped_image = transforms.functional.to_tensor(cropped_image)
# nump = cropped_image.permute(1, 2, 0).numpy()
# plt.figure()
# plt.imshow(nump)
# plt.show()
return cropped_image
def normalize_image(self, image):
# normalize the image
transform_list = [
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
tr = transforms.Compose(transform_list)
# normalized mage
normalized_image = tr(image)
return normalized_image
def crop_image(self, image):
pre = transforms.functional.to_pil_image(image)
cropped = transforms.functional.center_crop(pre, self.resize[0])
post = transforms.functional.to_tensor(cropped)
# np = post.permute(1, 2, 0).numpy()
# plt.figure()
# plt.imshow(np)
# plt.show()
return post
def filter_list(self, image_list, param):
new_list = []
if param == 1:
for i in range(len(image_list)):
a = image_list[i].split('_')[-1][0]
if a == '0' or a == 'P':
new_list.append(image_list[i])
elif param == 2:
for i in range(len(image_list)):
a = image_list[i].split('_')[-1][0]
if a == 'N':
new_list.append(image_list[i])
else:
return image_list
return new_list
class ExposureCorrection3(Dataset):
def __init__(self, dataset_dir,
transform=None,
resize_size=(384, 384),
mode='train',
folder=None,
filt=3):
super(ExposureCorrection3, self).__init__()
if folder is None:
folder = ['INPUT_IMAGES', 'GT_IMAGES']
self.dataset_dir = dataset_dir
self.transform = transform
self.resize = resize_size
# low light and normal light folders
self.input_images = os.path.join(self.dataset_dir, folder[0])
self.gt_images = os.path.join(self.dataset_dir, folder[1])
self.image_list = os.listdir(self.input_images)
self.gt_dictionary = self.make_ground_truth_dictionary(self.gt_images)
self.image_list = self.filter_list(self.image_list, filt)
self.mode = mode
def make_ground_truth_dictionary(self, gt_dir):
gt_dictionary = {}
files = os.listdir(os.path.join(self.dataset_dir, gt_dir))
for i in range(len(files)):
image_file = files[i]
if image_file[-4:] != '.jpg':
print(f'non image : {image_file}')
image_index = image_file[:5]
gt_dictionary[image_index] = image_file
return gt_dictionary
def __len__(self):
return len(self.image_list)
def __getitem__(self, index):
input_image = self.image_list[index]
input_name = input_image
image_prefix = input_image[:5]
gt_name = self.gt_dictionary[image_prefix]
input_path = os.path.join(self.input_images, input_image)
gt_path = os.path.join(self.gt_images, gt_name)
# read gt image ------------------------------------------------
normal_image = load_image(gt_path)
nh, nw, nc = normal_image.shape
if self.mode == 'test':
ww, hh = adapt_size(nh, nw)
ww, hh = get_novel_size(ww, hh, 512)
normal_image = cv2.resize(normal_image, (ww, hh))
#show_image(normal_image)
normal_image = torch.from_numpy(normal_image)
normal_image = normal_image.permute(2, 0, 1)
# read input image ---------------------------------------------------
input_image = load_image(input_path)
if self.mode == 'test':
input_image = cv2.resize(input_image, (ww, hh))
#show_image(input_image)
input_image = torch.from_numpy(input_image)
input_image = input_image.permute(2, 0, 1)
# random crops on the images
if self.mode == 'train':
c, h, w = normal_image.shape
i = np.random.randint(0, h - self.resize[1] + 1)
j = np.random.randint(0, w - self.resize[0] + 1)
normal_image = self._random_crop(normal_image, i, j)
input_image = self._random_crop(input_image, i, j)
normalized_image = self.normalize_image(input_image)
return normalized_image, normal_image, input_image, input_name
def _random_crop(self, image, i=0, j=0):
c, h, w = image.shape
assert w >= self.resize[1] and h >= self.resize[0], \
f'Error: Crop size: {self.resize[0]}, Image size: ({w}, {h})'
PIL_image = transforms.functional.to_pil_image(image)
cropped_image = transforms.functional.crop(PIL_image, i, j, self.resize[1], self.resize[0])
cropped_image = transforms.functional.to_tensor(cropped_image)
# nump = cropped_image.permute(1, 2, 0).numpy()
# plt.figure()
# plt.imshow(nump)
# plt.show()
return cropped_image
def normalize_image(self, image):
# normalize the image
transform_list = [
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
tr = transforms.Compose(transform_list)
# normalized mage
normalized_image = tr(image)
return normalized_image
def crop_image(self, image):
pre = transforms.functional.to_pil_image(image)
cropped = transforms.functional.center_crop(pre, self.resize[0])
post = transforms.functional.to_tensor(cropped)
# np = post.permute(1, 2, 0).numpy()
# plt.figure()
# plt.imshow(np)
# plt.show()
return post
def filter_list(self, image_list, param):
new_list = []
if param == 1:
for i in range(len(image_list)):
a = image_list[i].split('_')[-1][0]
if a == '0' or a == 'P':
new_list.append(image_list[i])
elif param == 2:
for i in range(len(image_list)):
a = image_list[i].split('_')[-1][0]
if a == 'N':
new_list.append(image_list[i])
else:
return image_list
return new_list
def get_novel_size(ww, hh, size):
if ww > hh:
ratio = size / ww
nw, nh = round(ratio * ww), round(ratio * hh)
return nw, nh
else:
ratio = size / hh
nw, nh = round(ratio * ww), round(ratio * hh)
return nw, nh
def load_image(name_jpg, mode=1):
if mode == 1:
return np.asarray(Image.open(name_jpg).convert('RGB')).astype(np.float32) / 255.0
else:
image = cv2.imread(name_jpg, cv2.IMREAD_COLOR)
LAB = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
return LAB.astype(np.float32) / 255.0
def show_image(image):
plt.figure()
plt.imshow(image)
plt.show()
def perform_test(h, size1, size2):
if h > size1 and h < size2:
return size1
else:
return 0
def adapt_size(h, w):
nh = 0,
nw = 0
sizes = [64, 128, 256, 512, 1024, 2048, 5086]
for i in range(len(sizes) - 1):
nh = perform_test(h, sizes[i], sizes[i + 1])
if nh != 0:
break
for i in range(len(sizes) - 1):
nw = perform_test(w, sizes[i], sizes[i + 1])
if nw != 0:
break
return nw, nh
def get_size_item():
dataset_path = '/media/lf216/Data/elie/5k/data/INPUT_IMAGES'
elements = os.listdir(dataset_path)
list_element = []
count = 0
for image in elements:
image_path = os.path.join(dataset_path, image)
img = imageio.imread(image_path)
H, W, C = img.shape
if H > 768 and W > 768:
count = count + 1
list_element.append(image)
with open("resolutions/images_768.txt", "a") as txt_file:
txt_file.write(image + "\n")
print(f'saved : {count}/{len(elements)} : {img.shape}')
def read_and_parse(file, res):
f = f'images_{res}.txt'
path = f'{file}/{f}'
with open(path) as fs:
lines = fs.readlines()
lst = []
for i in range(len(lines)):
lst.append(lines[i].rstrip('\n'))
return lst
if __name__ == '__main__':
dataset = '/media/lf216/Data/elie/5k/data'
path2 = '/media/lf216/Data/elie/5k/test'
dat = ExposureCorrectionTrain(dataset)
e = dat[8525]
print(e[0].shape)
# read_and_parse('/media/lf216/Data/elie/5k/data')
# get_size_item()
| 30.493694
| 99
| 0.580773
| 2,194
| 16,924
| 4.25433
| 0.084777
| 0.049282
| 0.030534
| 0.023998
| 0.813585
| 0.801907
| 0.788301
| 0.783801
| 0.783801
| 0.779944
| 0
| 0.022791
| 0.287048
| 16,924
| 554
| 100
| 30.548736
| 0.750787
| 0.082841
| 0
| 0.723343
| 0
| 0.008646
| 0.039361
| 0.009824
| 0
| 0
| 0
| 0
| 0.008646
| 1
| 0.086455
| false
| 0
| 0.034582
| 0.008646
| 0.216138
| 0.014409
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0cb197028295bcd2c4f2c607e98011d3afafb86c
| 9,194
|
py
|
Python
|
webserver/python2.7/site-packages/mpmath/tests/test_calculus.py
|
maxr1876/Radix
|
bf9a5470908ea0823c8398565086b1e6b960c73b
|
[
"BSD-2-Clause"
] | 4
|
2018-06-03T02:11:46.000Z
|
2021-08-18T19:55:15.000Z
|
mpmath/tests/test_calculus.py
|
asmeurer/mpmath
|
157a7091b80c3ac12c4d3c1886a892bd8b29d8bc
|
[
"BSD-3-Clause"
] | null | null | null |
mpmath/tests/test_calculus.py
|
asmeurer/mpmath
|
157a7091b80c3ac12c4d3c1886a892bd8b29d8bc
|
[
"BSD-3-Clause"
] | 3
|
2019-12-23T19:12:51.000Z
|
2021-04-30T14:00:31.000Z
|
from mpmath import *
def test_approximation():
mp.dps = 15
f = lambda x: cos(2-2*x)/x
p, err = chebyfit(f, [2, 4], 8, error=True)
assert err < 1e-5
for i in range(10):
x = 2 + i/5.
assert abs(polyval(p, x) - f(x)) < err
def test_limits():
mp.dps = 15
assert limit(lambda x: (x-sin(x))/x**3, 0).ae(mpf(1)/6)
assert limit(lambda n: (1+1/n)**n, inf).ae(e)
def test_polyval():
assert polyval([], 3) == 0
assert polyval([0], 3) == 0
assert polyval([5], 3) == 5
# 4x^3 - 2x + 5
p = [4, 0, -2, 5]
assert polyval(p,4) == 253
assert polyval(p,4,derivative=True) == (253, 190)
def test_polyroots():
p = polyroots([1,-4])
assert p[0].ae(4)
p, q = polyroots([1,2,3])
assert p.ae(-1 - sqrt(2)*j)
assert q.ae(-1 + sqrt(2)*j)
#this is not a real test, it only tests a specific case
assert polyroots([1]) == []
try:
polyroots([0])
assert False
except ValueError:
pass
def test_polyroots_legendre():
n = 64
coeffs = [11975573020964041433067793888190275875, 0,
-190100434726484311252477736051902332000, 0,
1437919688271127330313741595496589239248, 0,
-6897338342113537600691931230430793911840, 0,
23556405536185284408974715545252277554280, 0,
-60969520211303089058522793175947071316960, 0,
124284021969194758465450309166353645376880, 0,
-204721258548015217049921875719981284186016, 0,
277415422258095841688223780704620656114900, 0,
-313237834141273382807123548182995095192800, 0,
297432255354328395601259515935229287637200, 0,
-239057700565161140389797367947941296605600, 0,
163356095386193445933028201431093219347160, 0,
-95158890516229191805647495979277603503200, 0,
47310254620162038075933656063247634556400, 0,
-20071017111583894941305187420771723751200, 0,
7255051932731034189479516844750603752850, 0,
-2228176940331017311443863996901733412640, 0,
579006552594977616773047095969088431600, 0,
-126584428502545713788439446082310831200, 0,
23112325428835593809686977515028663000, 0,
-3491517141958743235617737161547844000, 0,
431305058712550634988073414073557200, 0,
-42927166660756742088912492757452000, 0,
3378527005707706553294038781836500, 0,
-205277590220215081719131470288800, 0,
9330799555464321896324157740400, 0,
-304114948474392713657972548576, 0,
6695289961520387531608984680, 0,
-91048139350447232095702560, 0,
659769125727878493447120, 0,
-1905929106580294155360, 0,
916312070471295267]
with mp.workdps(3):
try:
roots = polyroots(coeffs, maxsteps=5, cleanup=True, error=False,
extraprec=n*10)
raise AssertionError("polyroots() didn't raise NoConvergence")
except (mp.NoConvergence):
pass
roots = polyroots(coeffs, maxsteps=50, cleanup=True, error=False,
extraprec=n*10)
roots = [str(r) for r in roots]
assert roots == \
['-0.999', '-0.996', '-0.991', '-0.983', '-0.973', '-0.961',
'-0.946', '-0.93', '-0.911', '-0.889', '-0.866', '-0.841',
'-0.813', '-0.784', '-0.753', '-0.72', '-0.685', '-0.649',
'-0.611', '-0.572', '-0.531', '-0.489', '-0.446', '-0.402',
'-0.357', '-0.311', '-0.265', '-0.217', '-0.17', '-0.121',
'-0.073', '-0.0243', '0.0243', '0.073', '0.121', '0.17', '0.217',
'0.265', '0.311', '0.357', '0.402', '0.446', '0.489', '0.531',
'0.572', '0.611', '0.649', '0.685', '0.72', '0.753', '0.784',
'0.813', '0.841', '0.866', '0.889', '0.911', '0.93', '0.946',
'0.961', '0.973', '0.983', '0.991', '0.996', '0.999']
def test_polyroots_legendre_init():
extra_prec = 100
coeffs = [11975573020964041433067793888190275875, 0,
-190100434726484311252477736051902332000, 0,
1437919688271127330313741595496589239248, 0,
-6897338342113537600691931230430793911840, 0,
23556405536185284408974715545252277554280, 0,
-60969520211303089058522793175947071316960, 0,
124284021969194758465450309166353645376880, 0,
-204721258548015217049921875719981284186016, 0,
277415422258095841688223780704620656114900, 0,
-313237834141273382807123548182995095192800, 0,
297432255354328395601259515935229287637200, 0,
-239057700565161140389797367947941296605600, 0,
163356095386193445933028201431093219347160, 0,
-95158890516229191805647495979277603503200, 0,
47310254620162038075933656063247634556400, 0,
-20071017111583894941305187420771723751200, 0,
7255051932731034189479516844750603752850, 0,
-2228176940331017311443863996901733412640, 0,
579006552594977616773047095969088431600, 0,
-126584428502545713788439446082310831200, 0,
23112325428835593809686977515028663000, 0,
-3491517141958743235617737161547844000, 0,
431305058712550634988073414073557200, 0,
-42927166660756742088912492757452000, 0,
3378527005707706553294038781836500, 0,
-205277590220215081719131470288800, 0,
9330799555464321896324157740400, 0,
-304114948474392713657972548576, 0,
6695289961520387531608984680, 0,
-91048139350447232095702560, 0,
659769125727878493447120, 0,
-1905929106580294155360, 0,
916312070471295267]
roots_init = matrix(['-0.999', '-0.996', '-0.991', '-0.983', '-0.973',
'-0.961', '-0.946', '-0.93', '-0.911', '-0.889',
'-0.866', '-0.841', '-0.813', '-0.784', '-0.753',
'-0.72', '-0.685', '-0.649', '-0.611', '-0.572',
'-0.531', '-0.489', '-0.446', '-0.402', '-0.357',
'-0.311', '-0.265', '-0.217', '-0.17', '-0.121',
'-0.073', '-0.0243', '0.0243', '0.073', '0.121',
'0.17', '0.217', '0.265', ' 0.311', '0.357',
'0.402', '0.446', '0.489', '0.531', '0.572',
'0.611', '0.649', '0.685', '0.72', '0.753',
'0.784', '0.813', '0.841', '0.866', '0.889',
'0.911', '0.93', '0.946', '0.961', '0.973',
'0.983', '0.991', '0.996', '0.999', '1.0'])
with mp.workdps(2*mp.dps):
roots_exact = polyroots(coeffs, maxsteps=50, cleanup=True, error=False,
extraprec=2*extra_prec)
try:
roots = polyroots(coeffs, maxsteps=5, cleanup=True, error=False,
extraprec=extra_prec)
raise AssertionError("polyroots() didn't raise NoConvergence")
except (mp.NoConvergence):
pass
roots,err = polyroots(coeffs, maxsteps=5, cleanup=True, error=True,
extraprec=extra_prec,roots_init=roots_init)
assert max(matrix(roots_exact)-matrix(roots).apply(abs)) < err
roots1,err1 = polyroots(coeffs, maxsteps=25, cleanup=True, error=True,
extraprec=extra_prec,roots_init=roots_init[:60])
assert max(matrix(roots_exact)-matrix(roots1).apply(abs)) < err1
def test_pade():
one = mpf(1)
mp.dps = 20
N = 10
a = [one]
k = 1
for i in range(1, N+1):
k *= i
a.append(one/k)
p, q = pade(a, N//2, N//2)
for x in arange(0, 1, 0.1):
r = polyval(p[::-1], x)/polyval(q[::-1], x)
assert(r.ae(exp(x), 1.0e-10))
mp.dps = 15
def test_fourier():
mp.dps = 15
c, s = fourier(lambda x: x+1, [-1, 2], 2)
#plot([lambda x: x+1, lambda x: fourierval((c, s), [-1, 2], x)], [-1, 2])
assert c[0].ae(1.5)
assert c[1].ae(-3*sqrt(3)/(2*pi))
assert c[2].ae(3*sqrt(3)/(4*pi))
assert s[0] == 0
assert s[1].ae(3/(2*pi))
assert s[2].ae(3/(4*pi))
assert fourierval((c, s), [-1, 2], 1).ae(1.9134966715663442)
def test_differint():
mp.dps = 15
assert differint(lambda t: t, 2, -0.5).ae(8*sqrt(2/pi)/3)
def test_invlap():
mp.dps = 15
t = 0.01
fp = lambda p: 1/(p+1)**2
ft = lambda t: t*exp(-t)
ftt = ft(t)
assert invertlaplace(fp,t,method='talbot').ae(ftt)
assert invertlaplace(fp,t,method='stehfest').ae(ftt)
assert invertlaplace(fp,t,method='dehoog').ae(ftt)
t = 1.0
ftt = ft(t)
assert invertlaplace(fp,t,method='talbot').ae(ftt)
assert invertlaplace(fp,t,method='stehfest').ae(ftt)
assert invertlaplace(fp,t,method='dehoog').ae(ftt)
t = 0.01
fp = lambda p: log(p)/p
ft = lambda t: -euler-log(t)
ftt = ft(t)
assert invertlaplace(fp,t,method='talbot').ae(ftt)
assert invertlaplace(fp,t,method='stehfest').ae(ftt)
assert invertlaplace(fp,t,method='dehoog').ae(ftt)
t = 1.0
ftt = ft(t)
assert invertlaplace(fp,t,method='talbot').ae(ftt)
assert invertlaplace(fp,t,method='stehfest').ae(ftt)
assert invertlaplace(fp,t,method='dehoog').ae(ftt)
| 41.414414
| 79
| 0.586252
| 1,045
| 9,194
| 5.133014
| 0.180861
| 0.042506
| 0.04698
| 0.049217
| 0.777778
| 0.769202
| 0.752983
| 0.74739
| 0.74739
| 0.726883
| 0
| 0.462449
| 0.255601
| 9,194
| 221
| 80
| 41.60181
| 0.321303
| 0.015119
| 0
| 0.519608
| 0
| 0
| 0.094575
| 0
| 0
| 0
| 0
| 0
| 0.196078
| 1
| 0.04902
| false
| 0.014706
| 0.004902
| 0
| 0.053922
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0ccc977d12c6fcc42b4325fe08d236a749047f56
| 13,068
|
py
|
Python
|
fixture/session.py
|
SazonovPavel/lims-tst-web-portal
|
75f6538d7e16ce1fc0c96ea6f499b95a7eab1cfd
|
[
"Apache-2.0"
] | null | null | null |
fixture/session.py
|
SazonovPavel/lims-tst-web-portal
|
75f6538d7e16ce1fc0c96ea6f499b95a7eab1cfd
|
[
"Apache-2.0"
] | null | null | null |
fixture/session.py
|
SazonovPavel/lims-tst-web-portal
|
75f6538d7e16ce1fc0c96ea6f499b95a7eab1cfd
|
[
"Apache-2.0"
] | 1
|
2019-08-11T18:53:18.000Z
|
2019-08-11T18:53:18.000Z
|
import time
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, password, path_to_key):
wd = self.app.wd
self.app.open_home_page()
# 1 нажимаем кнопку "Войти на портал"
wd.find_element_by_xpath("//div[@id='content-wrapper']/div/a/div").click()
# 2 выбираем вариант файловый носитель
wd.find_element_by_xpath("//a[contains(.,'Файловий носій')]").click()
# 3 открываем выпадающий список Центр сертификации ключей
time.sleep(10)
wd.find_element_by_xpath("//select[@id='CAsServersSelect']").click()
# 4 выбираем вариант "Центр сертифікації ключів "Україна" "
wd.find_element_by_xpath("//option[contains(.,'Україна')]").click()
# 5 прописываем путь к ключу
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[2]/div/input").send_keys(
path_to_key)
# 6 Активирую поле ПАРОЛЬ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").click()
# 7 Ввожу пароль
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").send_keys(
password)
# 8 нажимаю кнопку ВОЙТИ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div[2]/div/button").click()
time.sleep(10)
# 9 Нажимаю кнопку ПОГОДИТИСЬ
wd.find_element_by_xpath("//div[@id='contentSignJSModal']/button").click()
time.sleep(5)
# 10 Проверяю наличие элемента (заголовок Портал Держликслужбы)
wd.find_element_by_xpath("//div[@class='header-inner']/h1").click()
def login_second(self):
wd = self.app.wd
self.app.open_home_page()
# 1 нажимаем кнопку "Войти на портал"
wd.find_element_by_xpath("//div[@id='content-wrapper']/div/a/div").click()
# 2 выбираем вариант файловый носитель
wd.find_element_by_xpath("//a[contains(.,'Файловий носій')]").click()
# 3 открываем выпадающий список Центр сертификации ключей
time.sleep(10)
wd.find_element_by_xpath("//select[@id='CAsServersSelect']").click()
# 4 выбираем вариант "Центр сертифікації ключів "Україна" "
wd.find_element_by_xpath("//option[contains(.,'Україна')]").click()
# 5 нажимаю кнопку ВОЙТИ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div[2]/div/button").click()
time.sleep(3)
# if (wd.find_element_by_xpath("//*[text()[contains(.,'No Sales Found')]")).Enabled)
# {
# wd.switch_to_alert().accept()
# }
alert = wd.switch_to.alert
assert "Виникла помилка при зчитуванні особистого ключа. Опис помилки: файл з особистим ключем не обрано" in alert.text
alert.accept()
# wd.switch_to_alert().accept()
def login_third(self, password, path_to_key):
wd = self.app.wd
self.app.open_home_page()
# 1 нажимаем кнопку "Войти на портал"
wd.find_element_by_xpath("//div[@id='content-wrapper']/div/a/div").click()
# 2 выбираем вариант файловый носитель
wd.find_element_by_xpath("//a[contains(.,'Файловий носій')]").click()
# 3 открываем выпадающий список Центр сертификации ключей
time.sleep(10)
wd.find_element_by_xpath("//select[@id='CAsServersSelect']").click()
# 4 выбираем вариант "Центр сертифікації ключів "Україна" "
wd.find_element_by_xpath("//option[contains(.,'Україна')]").click()
# 5 прописываем путь к ключу
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[2]/div/input").send_keys(
path_to_key)
# 6 Активирую поле ПАРОЛЬ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").click()
# 7 Ввожу пароль
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").send_keys(
password)
# 8 нажимаю кнопку ВОЙТИ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div[2]/div/button").click()
time.sleep(3)
alert = wd.switch_to.alert
assert "Виникла помилка при зчитуванні особистого ключа. Опис помилки: не вказано пароль доступу до особистого ключа" in alert.text
alert.accept()
# wd.switch_to_alert().accept()
def login_fourth(self, password):
wd = self.app.wd
self.app.open_home_page()
# 1 нажимаем кнопку "Войти на портал"
wd.find_element_by_xpath("//div[@id='content-wrapper']/div/a/div").click()
# 2 выбираем вариант файловый носитель
wd.find_element_by_xpath("//a[contains(.,'Файловий носій')]").click()
time.sleep(10)
# 6 Активирую поле ПАРОЛЬ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").click()
# 7 Ввожу пароль
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").send_keys(
password)
# 8 нажимаю кнопку ВОЙТИ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div[2]/div/button").click()
time.sleep(3)
alert = wd.switch_to.alert
assert "Виникла помилка при зчитуванні особистого ключа. Опис помилки: файл з особистим ключем не обрано" in alert.text
alert.accept()
# wd.switch_to_alert().accept()
def login_fifth(self, password, path_to_key):
wd = self.app.wd
self.app.open_home_page()
# 1 нажимаем кнопку "Войти на портал"
wd.find_element_by_xpath("//div[@id='content-wrapper']/div/a/div").click()
# 2 выбираем вариант файловый носитель
wd.find_element_by_xpath("//a[contains(.,'Файловий носій')]").click()
time.sleep(10)
# 5 прописываем путь к ключу
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[2]/div/input").send_keys(
path_to_key)
# 6 Активирую поле ПАРОЛЬ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").click()
# 7 Ввожу пароль
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").send_keys(
password)
# 8 нажимаю кнопку ВОЙТИ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div[2]/div/button").click()
time.sleep(3)
alert = wd.switch_to.alert
assert "Сертифікат не знайдено(51)" in alert.text
alert.accept()
# wd.switch_to_alert().accept()
def login_sixth(self, password, path_to_key):
wd = self.app.wd
self.app.open_home_page()
# 1 нажимаем кнопку "Войти на портал"
wd.find_element_by_xpath("//div[@id='content-wrapper']/div/a/div").click()
# 2 выбираем вариант файловый носитель
wd.find_element_by_xpath("//a[contains(.,'Файловий носій')]").click()
# 3 открываем выпадающий список Центр сертификации ключей
time.sleep(10)
wd.find_element_by_xpath("//select[@id='CAsServersSelect']").click()
# 4 выбираем вариант "МВС України"
wd.find_element_by_xpath("//option[contains(.,'МВС України')]").click()
# 5 прописываем путь к ключу
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[2]/div/input").send_keys(
path_to_key)
# 6 Активирую поле ПАРОЛЬ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").click()
# 7 Ввожу пароль
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").send_keys(
password)
# 8 нажимаю кнопку ВОЙТИ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div[2]/div/button").click()
time.sleep(3)
alert = wd.switch_to.alert
assert "Сертифікат не знайдено(51)" in alert.text
alert.accept()
# wd.switch_to_alert().accept()
def login_seventh(self, password, path_to_key):
wd = self.app.wd
self.app.open_home_page()
# 1 нажимаем кнопку "Войти на портал"
wd.find_element_by_xpath("//div[@id='content-wrapper']/div/a/div").click()
# 2 выбираем вариант файловый носитель
wd.find_element_by_xpath("//a[contains(.,'Файловий носій')]").click()
# 3 открываем выпадающий список Центр сертификации ключей
time.sleep(10)
wd.find_element_by_xpath("//select[@id='CAsServersSelect']").click()
# 4 выбираем вариант "Центр сертифікації ключів "Україна" "
wd.find_element_by_xpath("//option[contains(.,'Україна')]").click()
# 5 прописываем путь к ключу
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[2]/div/input").send_keys(
path_to_key)
# 6 Активирую поле ПАРОЛЬ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").click()
# 7 Ввожу пароль
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").send_keys(
password)
# 8 нажимаю кнопку ВОЙТИ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div[2]/div/button").click()
time.sleep(3)
alert = wd.switch_to.alert
assert "Виникла помилка при відкритті особистого ключа (невірний пароль чи ключ пошкоджений)(24)" in alert.text
alert.accept()
# wd.switch_to_alert().accept()
def login_eighth(self, password):
wd = self.app.wd
self.app.open_home_page()
# 1 нажимаем кнопку "Войти на портал"
wd.find_element_by_xpath("//div[@id='content-wrapper']/div/a/div").click()
# 2 выбираем вариант файловый носитель
wd.find_element_by_xpath("//a[contains(.,'Файловий носій')]").click()
# 3 открываем выпадающий список Центр сертификации ключей
time.sleep(10)
wd.find_element_by_xpath("//select[@id='CAsServersSelect']").click()
# 4 выбираем вариант "Центр сертифікації ключів "Україна" "
wd.find_element_by_xpath("//option[contains(.,'Україна')]").click()
# 6 Активирую поле ПАРОЛЬ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").click()
# 7 Ввожу пароль
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").send_keys(
password)
# 8 нажимаю кнопку ВОЙТИ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div[2]/div/button").click()
time.sleep(3)
alert = wd.switch_to.alert
assert "Виникла помилка при зчитуванні особистого ключа. Опис помилки: файл з особистим ключем не обрано" in alert.text
alert.accept()
# wd.switch_to_alert().accept()
def login_tenth(self, password, path_to_key):
wd = self.app.wd
self.app.open_home_page()
# 1 нажимаем кнопку "Войти на портал"
wd.find_element_by_xpath("//div[@id='content-wrapper']/div/a/div").click()
# 2 выбираем вариант файловый носитель
wd.find_element_by_xpath("//a[contains(.,'Файловий носій')]").click()
# 3 открываем выпадающий список Центр сертификации ключей
time.sleep(10)
wd.find_element_by_xpath("//select[@id='CAsServersSelect']").click()
# 4 выбираем вариант "Центр сертифікації ключів "Україна" "
wd.find_element_by_xpath("//option[contains(.,'Україна')]").click()
# 5 прописываем путь к ключу
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[2]/div/input").send_keys(
path_to_key)
# 6 Активирую поле ПАРОЛЬ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").click()
# 7 Ввожу пароль
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div/div/input").send_keys(
password)
# 8 нажимаю кнопку ВОЙТИ
wd.find_element_by_xpath("//form[@id='formAuthJSRequest']/fieldset/div[3]/div/div[2]/div/button").click()
time.sleep(10)
# 9 Нажимаю кнопку ПОГОДИТИСЬ
wd.find_element_by_xpath("//div[@id='contentSignJSModal']/button").click()
time.sleep(5)
# 10 Проверяю наличие элемента (заголовок Портал Держликслужбы)
wd.find_element_by_xpath("//div[@class='header-inner']/h1").click()
def logout(self):
wd = self.app.wd
wd.find_element_by_xpath("//ul[@id='header-account-menu']/li/div/div/div/span").click()
wd.find_element_by_xpath("//ul[@id='header-account-menu']/li/div/ul/li[3]/div/a/div/span").click()
time.sleep(3)
# Проверяю наличие эленмента (Заголовок)
wd.find_element_by_xpath("//h1[contains(.,'Онлайн-подача та відслідковування')]")
| 48.761194
| 139
| 0.647153
| 1,728
| 13,068
| 4.718171
| 0.084491
| 0.052251
| 0.11321
| 0.130627
| 0.952901
| 0.943088
| 0.943088
| 0.938918
| 0.938918
| 0.938918
| 0
| 0.01454
| 0.205311
| 13,068
| 267
| 140
| 48.94382
| 0.770534
| 0.209137
| 0
| 0.874214
| 0
| 0.062893
| 0.382935
| 0.319454
| 0
| 0
| 0
| 0
| 0.044025
| 1
| 0.069182
| false
| 0.100629
| 0.006289
| 0
| 0.081761
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
49034a9982344167bc6fe3456e6ee488cab3dd4e
| 2,124
|
py
|
Python
|
RSA-encryption/Attack-Retrieve-Modulus/extractmod.py
|
amoniaka-knabino/Crypton
|
91a91698050a384e7623eddc871dba1acecd585c
|
[
"MIT"
] | 1,175
|
2018-06-13T07:05:56.000Z
|
2022-03-28T05:51:38.000Z
|
RSA-encryption/Attack-Retrieve-Modulus/extractmod.py
|
amoniaka-knabino/Crypton
|
91a91698050a384e7623eddc871dba1acecd585c
|
[
"MIT"
] | 5
|
2019-10-12T15:43:52.000Z
|
2020-08-03T05:52:08.000Z
|
RSA-encryption/Attack-Retrieve-Modulus/extractmod.py
|
amoniaka-knabino/Crypton
|
91a91698050a384e7623eddc871dba1acecd585c
|
[
"MIT"
] | 211
|
2018-02-13T11:06:08.000Z
|
2022-03-28T22:36:59.000Z
|
from Crypto.Util.number import *
def extractmod_eknown(_encrypt, e, limit=4):
"""
Reference: https://crypto.stackexchange.com/questions/43583/deduce-modulus-n-from-public-exponent-and-encrypted-data
Function to extract the value of modulus, given value of public key exponent
:input parameters:
_encrypt : <type 'function'> : Function interacting with the server for encryption
e : <type 'int' or 'long'> : Public Key exponent
limit : <type 'int'> : number of values to be sent for encryption
"""
try:
assert limit <= 4
except AssertionError:
print "[+] Limit too big!"
return -1
try:
m_list = [2, 3, 5, 7]
mod_list = [(bytes_to_long(_encrypt(long_to_bytes(m_list[i])))) - (m_list[i]**e) for i in range(limit)]
_GCD = mod_list[0]
for i in range(limit):
_GCD = GCD(_GCD, mod_list[i])
return _GCD
except Exception as ex:
print "[+] Exception: ", ex
def extractmod_eunknown(_encrypt, limit=4):
"""
Reference: https://crypto.stackexchange.com/questions/43583/deduce-modulus-n-from-public-exponent-and-encrypted-data
Function to extract the value of modulus without the value of public key exponent
:input parameters:
_encrypt : <type 'function'> : Function interacting with the server for encryption
limit : <type 'int'> : number of values to be sent for encryption
"""
try:
assert limit <= 4
except AssertionError:
print "[+] Limit too big!"
return -1
try:
m_list = [2, 3, 5, 7]
ct_list = [bytes_to_long(_encrypt(long_to_bytes(m_list[i]**2))) for i in range(limit)]
ct_list2 = [bytes_to_long(_encrypt(long_to_bytes(m_list[i]))) for i in range(limit)]
assert len(ct_list) == len(ct_list2)
mod_list = [(ct_list2[i]**2 - ct_list[i]) for i in range(limit)]
_gcd = mod_list[0]
for i in mod_list:
_gcd = GCD(_gcd, i)
return _gcd
except Exception as ex:
print "[+] Exception: ", ex
return -1
| 36.62069
| 120
| 0.615819
| 294
| 2,124
| 4.289116
| 0.265306
| 0.023791
| 0.028549
| 0.043616
| 0.820777
| 0.808089
| 0.79778
| 0.781126
| 0.781126
| 0.781126
| 0
| 0.020672
| 0.271186
| 2,124
| 57
| 121
| 37.263158
| 0.793928
| 0
| 0
| 0.542857
| 0
| 0
| 0.053528
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| null | null | 0
| 0.028571
| null | null | 0.114286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b462016526368e9856f7c9debabb066a9fb9311
| 89
|
py
|
Python
|
catsup/parser/__init__.py
|
whtsky/catsup-docs-zh
|
91b5ebcf77e0df5de736bd5f3f03deb9145699f5
|
[
"MIT"
] | 62
|
2015-01-12T03:15:54.000Z
|
2021-09-11T03:30:57.000Z
|
catsup/parser/__init__.py
|
whtsky/catsup-docs-zh
|
91b5ebcf77e0df5de736bd5f3f03deb9145699f5
|
[
"MIT"
] | 52
|
2015-04-18T19:21:00.000Z
|
2020-05-25T00:49:34.000Z
|
catsup/parser/__init__.py
|
whtsky/catsup-docs-zh
|
91b5ebcf77e0df5de736bd5f3f03deb9145699f5
|
[
"MIT"
] | 14
|
2015-01-11T12:55:02.000Z
|
2019-02-28T06:36:56.000Z
|
from .config import load
def config(*args, **kwargs):
return load(*args, **kwargs)
| 14.833333
| 32
| 0.662921
| 12
| 89
| 4.916667
| 0.666667
| 0.338983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179775
| 89
| 5
| 33
| 17.8
| 0.808219
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
0ba2e719b28ebb14b29967fe8197686a3b944c0e
| 9,679
|
py
|
Python
|
tests/evaluation_setting/test_evaluation_setting.py
|
zhaoyone/RecBole
|
a620a96cc58535462b468d2ca799ac52d31fcf0a
|
[
"MIT"
] | 4
|
2021-04-23T07:47:53.000Z
|
2022-02-01T13:48:33.000Z
|
tests/evaluation_setting/test_evaluation_setting.py
|
zhaoyone/RecBole
|
a620a96cc58535462b468d2ca799ac52d31fcf0a
|
[
"MIT"
] | null | null | null |
tests/evaluation_setting/test_evaluation_setting.py
|
zhaoyone/RecBole
|
a620a96cc58535462b468d2ca799ac52d31fcf0a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2020/10/27
# @Author : Shanlei Mu
# @Email : slmu@ruc.edu.cn
# UPDATE:
# @Time : 2020/11/17
# @Author : Xingyu Pan
# @Email : panxy@ruc.edu.cn
import os
import unittest
from recbole.quick_start import objective_function
current_path = os.path.dirname(os.path.realpath(__file__))
config_file_list = [os.path.join(current_path, '../model/test_model.yaml')]
class TestGeneralRecommender(unittest.TestCase):
def test_rols_full(self):
config_dict = {
'eval_setting': 'RO_LS,full',
'model': 'BPR',
}
objective_function(config_dict=config_dict,
config_file_list=config_file_list, saved=False)
'''
config_dict = {
'eval_setting': 'RO_LS,full',
'model': 'NeuMF',
}
objective_function(config_dict=config_dict,
config_file_list=config_file_list, saved=False)
config_dict = {
'eval_setting': 'RO_LS,full',
'model': 'FISM',
}
objective_function(config_dict=config_dict,
config_file_list=config_file_list, saved=False)
config_dict = {
'eval_setting': 'RO_LS,full',
'model': 'LightGCN',
}
objective_function(config_dict=config_dict,
config_file_list=config_file_list, saved=False)
'''
def test_tols_full(self):
config_dict = {
'eval_setting': 'TO_LS,full',
'model': 'BPR',
}
objective_function(config_dict=config_dict,
config_file_list=config_file_list, saved=False)
'''
config_dict = {
'eval_setting': 'TO_LS,full',
'model': 'NeuMF',
}
objective_function(config_dict=config_dict,
config_file_list=config_file_list, saved=False)
config_dict = {
'eval_setting': 'TO_LS,full',
'model': 'FISM',
}
objective_function(config_dict=config_dict,
config_file_list=config_file_list, saved=False)
config_dict = {
'eval_setting': 'TO_LS,full',
'model': 'LightGCN',
}
objective_function(config_dict=config_dict,
config_file_list=config_file_list, saved=False)
'''
def test_tors_full(self):
config_dict = {
'eval_setting': 'TO_RS,full',
'model': 'BPR',
}
objective_function(config_dict=config_dict,
config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'TO_RS,full',
# 'model': 'NeuMF',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'TO_RS,full',
# 'model': 'FISM',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'TO_RS,full',
# 'model': 'LightGCN',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
def test_rors_uni100(self):
config_dict = {
'eval_setting': 'RO_RS,uni100',
'model': 'BPR',
}
objective_function(config_dict=config_dict,
config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'RO_RS,uni100',
# 'model': 'NeuMF',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'RO_RS,uni100',
# 'model': 'FISM',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'RO_RS,uni100',
# 'model': 'LightGCN',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
def test_tols_uni100(self):
config_dict = {
'eval_setting': 'TO_LS,uni100',
'model': 'BPR',
}
objective_function(config_dict=config_dict,
config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'TO_LS,uni100',
# 'model': 'NeuMF',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'TO_LS,uni100',
# 'model': 'FISM',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'TO_LS,uni100',
# 'model': 'LightGCN',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
def test_rols_uni100(self):
config_dict = {
'eval_setting': 'RO_LS,uni100',
'model': 'BPR',
}
objective_function(config_dict=config_dict,
config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'RO_LS,uni100',
# 'model': 'NeuMF',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'RO_LS,uni100',
# 'model': 'FISM',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'RO_LS,uni100',
# 'model': 'LightGCN',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
def test_tors_uni100(self):
config_dict = {
'eval_setting': 'TO_RS,uni100',
'model': 'BPR',
}
objective_function(config_dict=config_dict,
config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'TO_RS,uni100',
# 'model': 'NeuMF',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'TO_RS,uni100',
# 'model': 'FISM',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'TO_RS,uni100',
# 'model': 'LightGCN',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
class TestContextRecommender(unittest.TestCase):
def test_tors(self):
config_dict = {
'eval_setting': 'TO_RS',
'model': 'FM',
}
objective_function(config_dict=config_dict,
config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'TO_RS',
# 'model': 'DeepFM',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'TO_RS',
# 'model': 'DSSM',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'TO_RS',
# 'model': 'AutoInt',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
class TestSequentialRecommender(unittest.TestCase):
def test_tols_uni100(self):
config_dict = {
'eval_setting': 'TO_LS,uni100',
'model': 'FPMC',
}
objective_function(config_dict=config_dict,
config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'TO_LS,uni100',
# 'model': 'SASRec',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'TO_LS,uni100',
# 'model': 'GRU4RecF',
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
# config_dict = {
# 'eval_setting': 'TO_LS,uni100',
# 'model': 'Caser',
# 'MAX_ITEM_LIST_LENGTH': 10,
# 'reproducibility': False,
# }
# objective_function(config_dict=config_dict,
# config_file_list=config_file_list, saved=False)
if __name__ == '__main__':
unittest.main()
| 36.115672
| 76
| 0.53363
| 952
| 9,679
| 5.006303
| 0.086134
| 0.226605
| 0.214436
| 0.158624
| 0.890684
| 0.890684
| 0.889845
| 0.87222
| 0.842845
| 0.842845
| 0
| 0.015205
| 0.354479
| 9,679
| 267
| 77
| 36.250936
| 0.747599
| 0.428143
| 0
| 0.520548
| 0
| 0
| 0.077408
| 0.006051
| 0
| 0
| 0
| 0
| 0
| 1
| 0.123288
| false
| 0
| 0.041096
| 0
| 0.205479
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f06728207656423e01d53423b7a572d541a728a8
| 96
|
py
|
Python
|
vision_kf/other/ekf/main.py
|
vortexntnu/Vortex-CV
|
eb5e4836eeb750551807760a2eef3a0fb7daf7ff
|
[
"MIT"
] | 2
|
2022-01-26T11:13:38.000Z
|
2022-02-22T21:18:30.000Z
|
vision_kf/other/ekf/main.py
|
vortexntnu/Vortex-CV
|
eb5e4836eeb750551807760a2eef3a0fb7daf7ff
|
[
"MIT"
] | 42
|
2022-01-25T17:10:43.000Z
|
2022-03-29T18:41:34.000Z
|
vision_kf/other/ekf/main.py
|
vortexntnu/Vortex_CV
|
eb5e4836eeb750551807760a2eef3a0fb7daf7ff
|
[
"MIT"
] | null | null | null |
import sample_simulations.basic_sim
if __name__ == "__main__":
sample_simulations.basic_sim
| 24
| 35
| 0.8125
| 12
| 96
| 5.5
| 0.666667
| 0.515152
| 0.666667
| 0.757576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114583
| 96
| 4
| 36
| 24
| 0.776471
| 0
| 0
| 0
| 0
| 0
| 0.082474
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
b2b1bf59df3724ab4ea576f88dbbc19751e9e9a7
| 4,877
|
py
|
Python
|
tests/test_engine/test_queries/test_queryop_comparsion_eq.py
|
jqueguiner/montydb
|
55bb3099fe110dbcd1ee24a71479fb0861d993a4
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_engine/test_queries/test_queryop_comparsion_eq.py
|
jqueguiner/montydb
|
55bb3099fe110dbcd1ee24a71479fb0861d993a4
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_engine/test_queries/test_queryop_comparsion_eq.py
|
jqueguiner/montydb
|
55bb3099fe110dbcd1ee24a71479fb0861d993a4
|
[
"BSD-3-Clause"
] | null | null | null |
from montydb.types import PY3, bson_ as bson
from ...conftest import skip_if_no_bson
def count_documents(cursor, spec=None):
return cursor.collection.count_documents(spec or {})
def test_qop_eq_1(monty_find, mongo_find):
docs = [
{"a": 1},
{"a": 0}
]
spec = {"a": 1}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_qop_eq_2(monty_find, mongo_find):
docs = [
{"a": 1},
{"a": 0}
]
spec = {"a": {"$eq": 1}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
assert next(mongo_c) == next(monty_c)
def test_qop_eq_3(monty_find, mongo_find):
docs = [
{"a": [1]},
{"a": 1}
]
spec = {"a": {"$eq": 1}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 2
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_eq_4(monty_find, mongo_find):
docs = [
{"a": [1]},
{"a": [[1], 2]}
]
spec = {"a": {"$eq": [1]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 2
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
for i in range(2):
assert next(mongo_c) == next(monty_c)
def test_qop_eq_5(monty_find, mongo_find):
docs = [
{"a": [2, 1]},
{"a": [1, 2]},
{"a": [[2, 1], 3]},
{"a": [[1, 2], 3]},
]
spec = {"a": {"$eq": [2, 1]}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 2
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
for i in range(2):
assert next(mongo_c) == next(monty_c)
@skip_if_no_bson
def test_qop_eq_6(monty_find, mongo_find):
docs = [
{"a": [{"b": bson.Binary(b"00")}]},
{"a": [{"b": bson.Binary(b"01")}]},
]
spec = {"a.b": {"$eq": b"01"}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
count = 1 if PY3 else 0
assert count_documents(mongo_c, spec) == count
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
if PY3:
assert next(mongo_c) == next(monty_c)
mongo_c.rewind()
assert next(mongo_c)["_id"] == 1
@skip_if_no_bson
def test_qop_eq_7(monty_find, mongo_find):
docs = [
{"a": [{"b": bson.Code("a")}]},
]
spec = {"a.b": {"$eq": "a"}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 0
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
@skip_if_no_bson
def test_qop_eq_8(monty_find, mongo_find):
docs = [
{"a": [{"b": "a"}]},
]
spec = {"a.b": {"$eq": bson.Code("a")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 0
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
@skip_if_no_bson
def test_qop_eq_9(monty_find, mongo_find):
docs = [
{"a": 1},
]
spec = {"a": {"$eq": bson.Int64(1)}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 1
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
@skip_if_no_bson
def test_qop_eq_10(monty_find, mongo_find):
docs = [
{"a": 1},
{"a": 1.0},
]
spec = {"a": {"$eq": bson.Decimal128("1")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 2
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
@skip_if_no_bson
def test_qop_eq_11(monty_find, mongo_find):
docs = [
{"a": 1},
{"a": 1.0},
]
spec = {"a": {"$eq": bson.Decimal128("1.0")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 2
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
def test_qop_eq_12(monty_find, mongo_find):
docs = [
{"tags": [["ssl", "security"], "warning"]}
]
spec = {"tags.0": "security"}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert count_documents(mongo_c, spec) == 0
assert count_documents(monty_c, spec) == count_documents(mongo_c, spec)
| 25.139175
| 75
| 0.602009
| 734
| 4,877
| 3.701635
| 0.084469
| 0.097166
| 0.114833
| 0.176665
| 0.883327
| 0.853147
| 0.83364
| 0.806404
| 0.768127
| 0.748988
| 0
| 0.02305
| 0.234981
| 4,877
| 193
| 76
| 25.26943
| 0.705173
| 0
| 0
| 0.617021
| 0
| 0
| 0.025636
| 0
| 0
| 0
| 0
| 0
| 0.219858
| 1
| 0.092199
| false
| 0
| 0.014184
| 0.007092
| 0.113475
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3317f80ea1bd997f501f0d55beec7442b87e67b1
| 33,446
|
py
|
Python
|
eeauditor/auditors/aws/Amazon_ELB_Auditor.py
|
kbhagi/ElectricEye
|
31960e1e1cfb75c5d354844ea9e07d5295442823
|
[
"Apache-2.0"
] | 442
|
2020-03-15T20:56:36.000Z
|
2022-03-31T22:13:07.000Z
|
eeauditor/auditors/aws/Amazon_ELB_Auditor.py
|
kbhagi/ElectricEye
|
31960e1e1cfb75c5d354844ea9e07d5295442823
|
[
"Apache-2.0"
] | 57
|
2020-03-15T22:09:56.000Z
|
2022-03-31T13:17:06.000Z
|
eeauditor/auditors/aws/Amazon_ELB_Auditor.py
|
kbhagi/ElectricEye
|
31960e1e1cfb75c5d354844ea9e07d5295442823
|
[
"Apache-2.0"
] | 59
|
2020-03-15T21:19:10.000Z
|
2022-03-31T15:01:31.000Z
|
#This file is part of ElectricEye.
#SPDX-License-Identifier: Apache-2.0
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
import boto3
import datetime
from check_register import CheckRegister
registry = CheckRegister()
# create boto3 clients
elb = boto3.client("elb")
@registry.register_check("elb")
def internet_facing_clb_https_listener_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ELB.1] Classic load balancers that are internet-facing should use secure listeners"""
# loop through classic load balancers
response = elb.describe_load_balancers()
for classicbalancer in response["LoadBalancerDescriptions"]:
clbName = str(classicbalancer["LoadBalancerName"])
clbArn = f"arn:{awsPartition}:elasticloadbalancing:{awsRegion}:{awsAccountId}:loadbalancer/{clbName}"
clbScheme = str(classicbalancer["Scheme"])
if clbScheme == "internet-facing":
for listeners in classicbalancer["ListenerDescriptions"]:
listenerProtocol = str(listeners["Listener"]["Protocol"])
iso8601Time = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
)
if listenerProtocol != "HTTPS" or "SSL":
finding = {
"SchemaVersion": "2018-10-08",
"Id": clbArn + "/classic-loadbalancer-secure-listener-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": clbArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[ELB.1] Classic load balancers that are internet-facing should use secure listeners",
"Description": "Classic load balancer "
+ clbName
+ " does not use a secure listener (HTTPS or SSL). Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For more information on classic load balancer HTTPS listeners refer to the Create a Classic Load Balancer with an HTTPS Listener section of the Classic Load Balancers User Guide.",
"Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElbLoadBalancer",
"Id": clbArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"LoadBalancerName": clbName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.DS-2",
"NIST SP 800-53 SC-8",
"NIST SP 800-53 SC-11",
"NIST SP 800-53 SC-12",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.13.2.3",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": clbArn + "/classic-loadbalancer-secure-listener-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": clbArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ELB.1] Classic load balancers that are internet-facing should use secure listeners",
"Description": "Classic load balancer "
+ clbName
+ " uses a secure listener (HTTPS or SSL).",
"Remediation": {
"Recommendation": {
"Text": "For more information on classic load balancer HTTPS listeners refer to the Create a Classic Load Balancer with an HTTPS Listener section of the Classic Load Balancers User Guide.",
"Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElbLoadBalancer",
"Id": clbArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"LoadBalancerName": clbName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.DS-2",
"NIST SP 800-53 SC-8",
"NIST SP 800-53 SC-11",
"NIST SP 800-53 SC-12",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.13.2.3",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
else:
print("Ignoring internal CLB")
pass
@registry.register_check("elb")
def clb_https_listener_tls12_policy_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ELB.2] Classic load balancers should use TLS 1.2 listener policies"""
# loop through classic load balancers
response = elb.describe_load_balancers()
for classicbalancer in response["LoadBalancerDescriptions"]:
clbName = str(classicbalancer["LoadBalancerName"])
clbArn = f"arn:{awsPartition}:elasticloadbalancing:{awsRegion}:{awsAccountId}:loadbalancer/{clbName}"
for listeners in classicbalancer["ListenerDescriptions"]:
listenerPolicies = str(listeners["PolicyNames"])
iso8601Time = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
)
if listenerPolicies == "[]":
pass
elif listenerPolicies == "ELBSecurityPolicy-TLS-1-2-2017-01":
finding = {
"SchemaVersion": "2018-10-08",
"Id": clbArn + "/classic-loadbalancer-tls12-policy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": clbArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ELB.2] Classic load balancers should use TLS 1.2 listener policies",
"Description": "Classic load balancer "
+ clbName
+ " does not use a TLS 1.2 listener policy.",
"Remediation": {
"Recommendation": {
"Text": "For more information on classic load balancer listener policies refer to the Predefined SSL Security Policies for Classic Load Balancers section of the Classic Load Balancers User Guide.",
"Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElbLoadBalancer",
"Id": clbArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"LoadBalancerName": clbName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.DS-2",
"NIST SP 800-53 SC-8",
"NIST SP 800-53 SC-11",
"NIST SP 800-53 SC-12",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.13.2.3",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": clbArn + "/classic-loadbalancer-tls12-policy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": clbArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[ELB.2] Classic load balancers should use TLS 1.2 listener policies",
"Description": "Classic load balancer "
+ clbName
+ " does not use a TLS 1.2 listener policy. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For more information on classic load balancer listener policies refer to the Predefined SSL Security Policies for Classic Load Balancers section of the Classic Load Balancers User Guide.",
"Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElbLoadBalancer",
"Id": clbArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"LoadBalancerName": clbName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.DS-2",
"NIST SP 800-53 SC-8",
"NIST SP 800-53 SC-11",
"NIST SP 800-53 SC-12",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
"ISO 27001:2013 A.13.1.1",
"ISO 27001:2013 A.13.2.1",
"ISO 27001:2013 A.13.2.3",
"ISO 27001:2013 A.14.1.2",
"ISO 27001:2013 A.14.1.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
@registry.register_check("elb")
def clb_cross_zone_balancing_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ELB.3] Classic load balancers should have cross-zone load balancing configured"""
# loop through classic load balancers
response = elb.describe_load_balancers()
for classicbalancer in response["LoadBalancerDescriptions"]:
clbName = str(classicbalancer["LoadBalancerName"])
clbArn = f"arn:{awsPartition}:elasticloadbalancing:{awsRegion}:{awsAccountId}:loadbalancer/{clbName}"
response = elb.describe_load_balancer_attributes(LoadBalancerName=clbName)
crossZoneCheck = str(
response["LoadBalancerAttributes"]["CrossZoneLoadBalancing"]["Enabled"]
)
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if crossZoneCheck == "False":
finding = {
"SchemaVersion": "2018-10-08",
"Id": clbArn + "/classic-loadbalancer-cross-zone-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": clbArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[ELB.3] Classic load balancers should have cross-zone load balancing configured",
"Description": "Classic load balancer "
+ clbName
+ " does not have cross-zone load balancing configured. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For more information on cross-zone load balancing refer to the Configure Cross-Zone Load Balancing for Your Classic Load Balancer section of the Classic Load Balancers User Guide.",
"Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElbLoadBalancer",
"Id": clbArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"LoadBalancerName": clbName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF ID.BE-5",
"NIST CSF PR.PT-5",
"NIST SP 800-53 CP-2",
"NIST SP 800-53 CP-11",
"NIST SP 800-53 SA-13",
"NIST SP 800-53 SA14",
"AICPA TSC CC3.1",
"AICPA TSC A1.2",
"ISO 27001:2013 A.11.1.4",
"ISO 27001:2013 A.17.1.1",
"ISO 27001:2013 A.17.1.2",
"ISO 27001:2013 A.17.2.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": clbArn + "/classic-loadbalancer-cross-zone-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": clbArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ELB.3] Classic load balancers should have cross-zone load balancing configured",
"Description": "Classic load balancer "
+ clbName
+ " has cross-zone load balancing configured.",
"Remediation": {
"Recommendation": {
"Text": "For more information on cross-zone load balancing refer to the Configure Cross-Zone Load Balancing for Your Classic Load Balancer section of the Classic Load Balancers User Guide.",
"Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElbLoadBalancer",
"Id": clbArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"LoadBalancerName": clbName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF ID.BE-5",
"NIST CSF PR.PT-5",
"NIST SP 800-53 CP-2",
"NIST SP 800-53 CP-11",
"NIST SP 800-53 SA-13",
"NIST SP 800-53 SA14",
"AICPA TSC CC3.1",
"AICPA TSC A1.2",
"ISO 27001:2013 A.11.1.4",
"ISO 27001:2013 A.17.1.1",
"ISO 27001:2013 A.17.1.2",
"ISO 27001:2013 A.17.2.1",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
@registry.register_check("elb")
def clb_connection_draining_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ELB.4] Classic load balancers should have connection draining configured"""
# loop through classic load balancers
response = elb.describe_load_balancers()
for classicbalancer in response["LoadBalancerDescriptions"]:
clbName = str(classicbalancer["LoadBalancerName"])
clbArn = f"arn:{awsPartition}:elasticloadbalancing:{awsRegion}:{awsAccountId}:loadbalancer/{clbName}"
response = elb.describe_load_balancer_attributes(LoadBalancerName=clbName)
connectionDrainCheck = str(
response["LoadBalancerAttributes"]["ConnectionDraining"]["Enabled"]
)
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if connectionDrainCheck == "False":
finding = {
"SchemaVersion": "2018-10-08",
"Id": clbArn + "/classic-loadbalancer-connection-draining-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": clbArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[ELB.4] Classic load balancers should have connection draining configured",
"Description": "Classic load balancer "
+ clbName
+ " does not have connection draining configured. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For more information on connection draining refer to the Configure Connection Draining for Your Classic Load Balancer section of the Classic Load Balancers User Guide.",
"Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElbLoadBalancer",
"Id": clbArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"LoadBalancerName": clbName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF ID.BE-5",
"NIST CSF PR.PT-5",
"NIST SP 800-53 CP-2",
"NIST SP 800-53 CP-11",
"NIST SP 800-53 SA-13",
"NIST SP 800-53 SA14",
"AICPA TSC CC3.1",
"AICPA TSC A1.2",
"ISO 27001:2013 A.11.1.4",
"ISO 27001:2013 A.17.1.1",
"ISO 27001:2013 A.17.1.2",
"ISO 27001:2013 A.17.2.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": clbArn + "/classic-loadbalancer-connection-draining-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": clbArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ELB.4] Classic load balancers should have connection draining configured",
"Description": "Classic load balancer "
+ clbName
+ " does not have connection draining configured.",
"Remediation": {
"Recommendation": {
"Text": "For more information on connection draining refer to the Configure Connection Draining for Your Classic Load Balancer section of the Classic Load Balancers User Guide.",
"Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElbLoadBalancer",
"Id": clbArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"LoadBalancerName": clbName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF ID.BE-5",
"NIST CSF PR.PT-5",
"NIST SP 800-53 CP-2",
"NIST SP 800-53 CP-11",
"NIST SP 800-53 SA-13",
"NIST SP 800-53 SA14",
"AICPA TSC CC3.1",
"AICPA TSC A1.2",
"ISO 27001:2013 A.11.1.4",
"ISO 27001:2013 A.17.1.1",
"ISO 27001:2013 A.17.1.2",
"ISO 27001:2013 A.17.2.1",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
@registry.register_check("elb")
def clb_access_logging_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ELB.5] Classic load balancers should enable access logging"""
# loop through classic load balancers
response = elb.describe_load_balancers()
for classicbalancer in response["LoadBalancerDescriptions"]:
clbName = str(classicbalancer["LoadBalancerName"])
clbArn = f"arn:{awsPartition}:elasticloadbalancing:{awsRegion}:{awsAccountId}:loadbalancer/{clbName}"
response = elb.describe_load_balancer_attributes(LoadBalancerName=clbName)
accessLogCheck = str(response["LoadBalancerAttributes"]["AccessLog"]["Enabled"])
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if accessLogCheck == "False":
finding = {
"SchemaVersion": "2018-10-08",
"Id": clbArn + "/classic-loadbalancer-access-logging-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": clbArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[ELB.5] Classic load balancers should enable access logging",
"Description": "Classic load balancer "
+ clbName
+ " does not have access logging enabled. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For more information on access logging refer to the Access Logs for Your Classic Load Balancer section of the Classic Load Balancers User Guide.",
"Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElbLoadBalancer",
"Id": clbArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"LoadBalancerName": clbName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF DE.AE-3",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 IR-5",
"NIST SP 800-53 IR-8",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.7"
]
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE"
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": clbArn + "/classic-loadbalancer-access-logging-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": clbArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ELB.5] Classic load balancers should enable access logging",
"Description": "Classic load balancer "
+ clbName
+ " does not have access logging enabled.",
"Remediation": {
"Recommendation": {
"Text": "For more information on access logging refer to the Access Logs for Your Classic Load Balancer section of the Classic Load Balancers User Guide.",
"Url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElbLoadBalancer",
"Id": clbArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"LoadBalancerName": clbName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF DE.AE-3",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 IR-5",
"NIST SP 800-53 IR-8",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.7"
]
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED"
}
yield finding
| 52.753943
| 225
| 0.474795
| 2,735
| 33,446
| 5.789762
| 0.107495
| 0.037512
| 0.033344
| 0.036123
| 0.911778
| 0.902494
| 0.896116
| 0.893653
| 0.891127
| 0.886833
| 0
| 0.059586
| 0.418944
| 33,446
| 634
| 226
| 52.753943
| 0.755223
| 0.041021
| 0
| 0.825796
| 0
| 0.033501
| 0.408861
| 0.062008
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008375
| false
| 0.011725
| 0.005025
| 0
| 0.0134
| 0.001675
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
33314d11fe613ecbffce19b3743d2155cd43a2f3
| 59,208
|
py
|
Python
|
textnn/utils/encoding/test/test_text.py
|
tongr/TextNN
|
a0294a197d3be284177214e8f019e9fed13dff1a
|
[
"Apache-2.0"
] | 1
|
2019-03-08T12:12:45.000Z
|
2019-03-08T12:12:45.000Z
|
textnn/utils/encoding/test/test_text.py
|
tongr/TextNN
|
a0294a197d3be284177214e8f019e9fed13dff1a
|
[
"Apache-2.0"
] | 16
|
2019-02-14T11:51:30.000Z
|
2019-06-11T08:25:53.000Z
|
textnn/utils/encoding/test/test_text.py
|
tongr/TextNN
|
a0294a197d3be284177214e8f019e9fed13dff1a
|
[
"Apache-2.0"
] | null | null | null |
from textnn.utils.encoding.text import *
from pytest import approx, raises
# texts from https://en.wikipedia.org/wiki/Python_(programming_language)
corpus = [
"Python is an interpreted, high-level, general-purpose programming language.",
"Created by Guido van Rossum and first released in 1991, Python has a design philosophy that emphasizes code "
"readability, notably using significant whitespace.",
"It provides constructs that enable clear programming on both small and large scales.",
"Van Rossum led the language community until stepping down as leader in July 2018.",
"Python features a dynamic type system and automatic memory management.",
"It supports multiple programming paradigms, including object-oriented, imperative, functional and procedural, and "
"has a large and comprehensive standard library.",
"Python interpreters are available for many operating systems.",
"CPython, the reference implementation of Python, is open source software and has a community-based development "
"model, as do nearly all of Python's other implementations.",
"Python and CPython are managed by the non-profit Python Software Foundation.",
]
test_sentence = "Python is a multi-paradigm programming language."
def test_sow_encoder_default():
encoder = BowEncoder(mode="binary")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 100 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 100)
# test sentence contains 7 words, but because OOV occurs twice and reduces to a binary value of 1, it sums up to 6
assert np.sum(encoded_test_sentences) == 6
# two of them are OOV (multi and paradigm)
assert encoded_test_sentences[0, 1] == 1
# python occurs once
assert encoded_test_sentences[0, encoder.word_to_index("python")] == 1
# is occurs once
assert encoded_test_sentences[0, encoder.word_to_index("is")] == 1
# a occurs once
assert encoded_test_sentences[0, encoder.word_to_index("a")] == 1
# programming occurs once
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == 1
# language occurs once
assert encoded_test_sentences[0, encoder.word_to_index("language")] == 1
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == {
"<OOV>": 1,
"python": 1,
"is": 1,
"a": 1,
"programming": 1,
"language": 1,
}
def test_sow_encoder_limit_vocab():
# build a vocab of size 8 including:
# - reserved token <UNUSED> and <OOV>
# - plus the top 6 words in the corpus: and(8), python(7), a(4), programming(3), has(3), and the(3)
encoder = BowEncoder(limit_vocabulary=8, mode="binary")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 8 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 8)
# test sentence contains 7 words, but because OOV occurs 4 times and reduces to a binary value of 1, it sums up to 4
assert np.sum(encoded_test_sentences) == 4
# four of them are OOV (is, multi, paradigm, language) ...
assert encoded_test_sentences[0, 1] == 1
# python occurs once
assert encoded_test_sentences[0, encoder.word_to_index("python")] == 1
# a occurs once
assert encoded_test_sentences[0, encoder.word_to_index("a")] == 1
# programming occurs once
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == 1
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == {
"<OOV>": 1,
"python": 1,
"a": 1,
"programming": 1,
}
def test_sow_encoder_limit_vocab_and_top_words():
# build a vocab of size 20 including:
# - reserved token <UNUSED> and <OOV>
# - plus the top 6 words in the corpus:
# programming(3), has(3), the(3), is(2), language(2), by(2), van(2), rossum(2), in(2), that(2), it(2),
# large(2), community(2), as(2), are(2), cpython(2), of(2), software(2)
# - ignored words (top 3): and(8), python(7), a(4)
encoder = BowEncoder(skip_top_words=3, limit_vocabulary=20, mode="binary")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 20 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 20)
# test sentence contains 7 words, but because OOV occurs 4 times and reduces to a binary value of 1, it sums up to 4
assert np.sum(encoded_test_sentences) == 4
# three of them are OOV (multi, paradigm, python, and a)
assert encoded_test_sentences[0, 1] == 1
# is occurs once
assert encoded_test_sentences[0, encoder.word_to_index("is")] == 1
# programming occurs once
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == 1
# language occurs once
assert encoded_test_sentences[0, encoder.word_to_index("language")] == 1
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == {
"<OOV>": 1,
"is": 1,
"programming": 1,
"language": 1,
}
def test_sow_encoder_limit_vocab_and_top_words_oov_update_corner_cases():
encoder = BowEncoder(skip_top_words=1, limit_vocabulary=60, mode="binary")
encoder.prepare(corpus, show_progress=False)
# here we test the tree cases, where the OOV is actually (or not) influenced by skip_top_words=1 (removal of and):
# - corpus[0] contains no OOV word(s) and does not contain 'and'
# - corpus[1] contains no OOV word(s) and also contains 'and'
# - corpus[4] contains OOV word(s) and also contains 'and'
# - test_sentence contains OOV word(s) but does not contain 'and'
encoded_test_sentences = encoder.encode([corpus[0], corpus[1], corpus[4], test_sentence],
show_progress=False)
# no OOV + no 'and'
assert encoded_test_sentences[0, 1] == 0
# no OOV + 'and'
assert encoded_test_sentences[1, 1] == 1
# OOV + 'and'
assert encoded_test_sentences[1, 1] == 1
# OOV + no 'and'
assert encoded_test_sentences[2, 1] == 1
def test_bow_encoder_default():
encoder = BowEncoder(mode="count")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 100 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 100)
# test sentence contains 7 words
assert np.sum(encoded_test_sentences) == 7
# two of them are OOV (multi and paradigm)
assert encoded_test_sentences[0, 1] == 2
# python occurs once
assert encoded_test_sentences[0, encoder.word_to_index("python")] == 1
# is occurs once
assert encoded_test_sentences[0, encoder.word_to_index("is")] == 1
# a occurs once
assert encoded_test_sentences[0, encoder.word_to_index("a")] == 1
# programming occurs once
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == 1
# language occurs once
assert encoded_test_sentences[0, encoder.word_to_index("language")] == 1
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == {
"<OOV>": 2,
"python": 1,
"is": 1,
"a": 1,
"programming": 1,
"language": 1,
}
def test_bow_encoder_limit_vocab():
# build a vocab of size 8 including:
# - reserved token <UNUSED> and <OOV>
# - plus the top 6 words in the corpus: and(8), python(7), a(4), programming(3), has(3), and the(3)
encoder = BowEncoder(limit_vocabulary=8, mode="count")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 8 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 8)
# test sentence contains 7 words
assert np.sum(encoded_test_sentences) == 7
# four of them are OOV (is, multi, paradigm, language) ...
assert encoded_test_sentences[0, 1] == 4
# python occurs once
assert encoded_test_sentences[0, encoder.word_to_index("python")] == 1
# a occurs once
assert encoded_test_sentences[0, encoder.word_to_index("a")] == 1
# programming occurs once
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == 1
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == {
"<OOV>": 4,
"python": 1,
"a": 1,
"programming": 1,
}
def test_bow_encoder_limit_vocab_and_top_words():
# build a vocab of size 20 including:
# - reserved token <UNUSED> and <OOV>
# - plus the top 6 words in the corpus:
# programming(3), has(3), the(3), is(2), language(2), by(2), van(2), rossum(2), in(2), that(2), it(2),
# large(2), community(2), as(2), are(2), cpython(2), of(2), software(2)
# - ignored words (top 3): and(8), python(7), a(4)
encoder = BowEncoder(skip_top_words=3, limit_vocabulary=20, mode="count")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 20 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 20)
# test sentence contains 7 words
assert np.sum(encoded_test_sentences) == 7
# three of them are OOV (multi, paradigm, python, and a)
assert encoded_test_sentences[0, 1] == 4
# is occurs once
assert encoded_test_sentences[0, encoder.word_to_index("is")] == 1
# programming occurs once
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == 1
# language occurs once
assert encoded_test_sentences[0, encoder.word_to_index("language")] == 1
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == {
"<OOV>": 4,
"is": 1,
"programming": 1,
"language": 1,
}
def test_freq_encoder_default():
encoder = BowEncoder(mode="freq")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 100 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 100)
# test sentence has a relative size of 1 (all 7 words)
assert np.sum(encoded_test_sentences) == approx(1, rel=1e-3)
# two of them (overall 7) are OOV (multi and paradigm)
assert encoded_test_sentences[0, 1] == approx(2/7., rel=1e-3)
# python occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("python")] == approx(1/7., rel=1e-3)
# is occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("is")] == approx(1/7., rel=1e-3)
# a occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("a")] == approx(1/7., rel=1e-3)
# programming occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == approx(1/7., rel=1e-3)
# language occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("language")] == approx(1/7., rel=1e-3)
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == approx({
"<OOV>": 2/7.,
"python": 1/7.,
"is": 1/7.,
"a": 1/7.,
"programming": 1/7.,
"language": 1/7.,
}, rel=1e-3)
def test_freq_encoder_limit_vocab():
# build a vocab of size 20 including:
# - reserved token <UNUSED> and <OOV>
# - plus the top 6 words in the corpus: and(8), python(7), a(4), programming(3), has(3), and the(3)
encoder = BowEncoder(limit_vocabulary=8, mode="freq")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 8 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 8)
# test sentence has a relative size of 1 (all 7 words)
assert np.sum(encoded_test_sentences) == approx(1, rel=1e-3)
# four of them are OOV (is, multi, paradigm, language) ...
assert encoded_test_sentences[0, 1] == approx(4/7., rel=1e-3)
# python occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("python")] == approx(1/7., rel=1e-3)
# a occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("a")] == approx(1/7., rel=1e-3)
# programming occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == approx(1/7., rel=1e-3)
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == approx({
"<OOV>": 4/7.,
"python": 1/7.,
"a": 1/7.,
"programming": 1/7.,
}, rel=1e-3)
def test_freq_encoder_limit_vocab_and_top_words():
# build a vocab of size 20 including:
# - reserved token <UNUSED> and <OOV>
# - plus the top 6 words in the corpus:
# programming(3), has(3), the(3), is(2), language(2), by(2), van(2), rossum(2), in(2), that(2), it(2),
# large(2), community(2), as(2), are(2), cpython(2), of(2), software(2)
# - ignored words (top 3): and(8), python(7), a(4)
encoder = BowEncoder(skip_top_words=3, limit_vocabulary=20, mode="freq")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 20 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 20)
# test sentence has a relative size of 1 (all 7 words)
assert np.sum(encoded_test_sentences) == approx(1, rel=1e-3)
# three of them are OOV (multi, paradigm, python, and a)
assert encoded_test_sentences[0, 1] == approx(4/7., rel=1e-3)
# is occurs once
assert encoded_test_sentences[0, encoder.word_to_index("is")] == approx(1/7., rel=1e-3)
# programming occurs once
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == approx(1/7., rel=1e-3)
# language occurs once
assert encoded_test_sentences[0, encoder.word_to_index("language")] == approx(1/7., rel=1e-3)
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == approx({
"<OOV>": 4/7.,
"is": 1/7.,
"programming": 1/7.,
"language": 1/7.,
}, rel=1e-3)
def test_tfidf_encoder_default():
encoder = BowEncoder(mode="tfidf")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 100 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 100)
# test sentence tfidf sum (over all 7 words)
assert np.sum(encoded_test_sentences) == approx(9.706, rel=1e-3)
# two of them are OOV (multi and paradigm)
assert encoded_test_sentences[0, 1] == approx(3.898, rel=1e-3)
# python occurs once
assert encoded_test_sentences[0, encoder.word_to_index("python")] == approx(0.826, rel=1e-3)
# is occurs once
assert encoded_test_sentences[0, encoder.word_to_index("is")] == approx(1.386, rel=1e-3)
# a occurs once
assert encoded_test_sentences[0, encoder.word_to_index("a")] == approx(1.029, rel=1e-3)
# programming occurs once
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == approx(1.178, rel=1e-3)
# language occurs once
assert encoded_test_sentences[0, encoder.word_to_index("language")] == approx(1.386, rel=1e-3)
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == approx({
"<OOV>": 3.898,
"python": 0.826,
"is": 1.386,
"a": 1.029,
"programming": 1.178,
"language": 1.386,
}, rel=1e-3)
def test_tfidf_encoder_limit_vocab():
# build a vocab of size 8 including:
# - reserved token <UNUSED> and <OOV>
# - plus the top 6 words in the corpus: and(8), python(7), a(4), programming(3), has(3), and the(3)
encoder = BowEncoder(limit_vocabulary=8, mode="tfidf")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 8 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 8)
# test sentence tfidf sum (over all 7 words)
assert np.sum(encoded_test_sentences) == approx(8.529, rel=1e-3)
# four of them are OOV (is, multi, paradigm, language) ...
assert encoded_test_sentences[0, 1] == approx(5.494, rel=1e-3)
# python occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("python")] == approx(0.826, rel=1e-3)
# a occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("a")] == approx(1.029, rel=1e-3)
# programming occurs once (out of 7 words)
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == approx(1.178, rel=1e-3)
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == approx({
"<OOV>": 5.494,
"python": 0.826,
"a": 1.029,
"programming": 1.178,
}, rel=1e-3)
def test_tfidf_encoder_limit_vocab_and_top_words():
# build a vocab of size 20 including:
# - reserved token <UNUSED> and <OOV>
# - plus the top 6 words in the corpus:
# programming(3), has(3), the(3), is(2), language(2), by(2), van(2), rossum(2), in(2), that(2), it(2),
# large(2), community(2), as(2), are(2), cpython(2), of(2), software(2)
# - ignored words (top 3): and(8), python(7), a(4)
encoder = BowEncoder(skip_top_words=3, limit_vocabulary=20, mode="tfidf")
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence], show_progress=False)
# vocabulary consists of overall 20 words and the test set contains only one text
assert encoded_test_sentences.shape == (1, 20)
# test sentence has a relative size of 1 (all 7 words)
assert np.sum(encoded_test_sentences) == approx(9.706, rel=1e-3)
# three of them are OOV (multi, paradigm, python, and a), however, current tfidf aggregation for oov is broken
# TODO fix oov aggregation for top k (currently only implemented as: tfidf(OOV)+tfidf(top1)+tfidf(top2)+...)
assert encoded_test_sentences[0, 1] > 3.898
# is occurs once
assert encoded_test_sentences[0, encoder.word_to_index("is")] == approx(1.386, rel=1e-3)
# programming occurs once
assert encoded_test_sentences[0, encoder.word_to_index("programming")] == approx(1.178, rel=1e-3)
# language occurs once
assert encoded_test_sentences[0, encoder.word_to_index("language")] == approx(1.386, rel=1e-3)
# decode
bow_dict = encoder.decode(encoded_test_sentences, 0, show_progress=False, ignore_zero_freq=True)
assert bow_dict == approx({
"<OOV>": encoded_test_sentences[0, 1],
"is": 1.386,
"programming": 1.178,
"language": 1.386,
}, rel=1e-3)
def test_sequence_encoder():
encoder = TokenSequenceEncoder()
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence, "and"], show_progress=False)
# sentence consists of 7 words + <START> + <END> token
assert encoded_test_sentences.shape == (2, 9)
# first word is '<START>'
assert encoded_test_sentences[0, 0] == encoder.start_token_index
# second word is 'Python' (2nd most common + 4 reserved token)
assert encoded_test_sentences[0, 1] == 5
# third word is 'is' (7th most common + 4 reserved token)
assert encoded_test_sentences[0, 2] == 10
# fourth word is 'a' (3rd most common + 4 reserved token)
assert encoded_test_sentences[0, 3] == 6
# fifth word is 'multi' (unknown -> OOV)
assert encoded_test_sentences[0, 4] == encoder.oov_token_index
# sixth word is 'paradigm' (unknown -> OOV)
assert encoded_test_sentences[0, 5] == encoder.oov_token_index
# seventh word is 'programming' (4th most common + 4 reserved token)
assert encoded_test_sentences[0, 6] == 7
# eighth word is 'language' (8th most common + 4 reserved token)
assert encoded_test_sentences[0, 7] == 11
# last word is '<END>'
assert encoded_test_sentences[0, 8] == encoder.end_token_index
# padding with '<PAD>' (6 chars)
np.testing.assert_array_equal(
encoded_test_sentences[1, :6],
np.array([encoder.padding_token_index]*6))
# first word after is '<START>'
assert encoded_test_sentences[1, 6] == encoder.start_token_index
# second word is 'and' (most common + 4 reserved token)
assert encoded_test_sentences[1, 7] == 4
# last word is '<END>'
assert encoded_test_sentences[1, 8] == encoder.end_token_index
# decode
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_start_end=False)
assert sequence_list == ["python", "is", "a", "<OOV>", "<OOV>", "programming", "language"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_start_end=False)
assert sequence_list == ["and"]
# decode w/ control chars
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_padding=True)
assert sequence_list == ["<START>", "python", "is", "a", "<OOV>", "<OOV>", "programming", "language", "<END>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_padding=True)
assert sequence_list == ["<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<START>", "and", "<END>"]
def test_sequence_encoder_limit_vocab():
# build a vocab of size 10 including:
# - reserved token <PAD>, <OOV>, <START>, and <END>
# - plus the top 6 words in the corpus: and(8), python(7), a(4), programming(3), has(3), and the(3)
encoder = TokenSequenceEncoder(limit_vocabulary=10)
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence, "and"], show_progress=False)
# sentence consists of 7 words + <START> token + <END> token
assert encoded_test_sentences.shape == (2, 9)
# first word is '<START>'
assert encoded_test_sentences[0, 0] == encoder.start_token_index
# second word is 'Python' (2nd most common + 4 reserved token)
assert encoded_test_sentences[0, 1] == 5
# thord word is 'is' (not in the limited vocab -> OOV)
assert encoded_test_sentences[0, 2] == encoder.oov_token_index
# fourth word is 'a' (3rd most common + 4 reserved token)
assert encoded_test_sentences[0, 3] == 6
# fifth word is 'multi' (unknown -> OOV)
assert encoded_test_sentences[0, 4] == encoder.oov_token_index
# sixth word is 'paradigm' (unknown -> OOV)
assert encoded_test_sentences[0, 5] == encoder.oov_token_index
# seventh word is 'programming' (4th most common + 4 reserved token)
assert encoded_test_sentences[0, 6] == 7
# eighth word is 'language' (not in the limited vocab -> OOV)
assert encoded_test_sentences[0, 7] == encoder.oov_token_index
# last word is '<END>'
assert encoded_test_sentences[0, 8] == encoder.end_token_index
# padding with '<PAD>' (6 chars)
np.testing.assert_array_equal(
encoded_test_sentences[1, :6],
np.array([encoder.padding_token_index]*6))
# first word after is '<START>'
assert encoded_test_sentences[1, 6] == encoder.start_token_index
# second word is 'and' (most common + 4 reserved token)
assert encoded_test_sentences[1, 7] == 4
# last word is '<END>'
assert encoded_test_sentences[1, 8] == encoder.end_token_index
# decode
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_start_end=False)
assert sequence_list == ["python", "<OOV>", "a", "<OOV>", "<OOV>", "programming", "<OOV>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_start_end=False)
assert sequence_list == ["and"]
# decode w/ control chars
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_padding=True)
assert sequence_list == ["<START>", "python", "<OOV>", "a", "<OOV>", "<OOV>", "programming", "<OOV>", "<END>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_padding=True)
assert sequence_list == ["<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<START>", "and", "<END>"]
def test_sequence_encoder_limit_vocab_and_top_words():
# build a vocab of size 22 including:
# - reserved token <PAD>, <OOV>, <START>, and <END>
# - plus the top 6 words in the corpus:
# programming(3), has(3), the(3), is(2), language(2), by(2), van(2), rossum(2), in(2), that(2), it(2),
# large(2), community(2), as(2), are(2), cpython(2), of(2), software(2)
# - ignored words (top 3): and(8), python(7), a(4)
encoder = TokenSequenceEncoder(skip_top_words=3, limit_vocabulary=22)
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence, "Python"], show_progress=False)
# sentence consists of 7 words + <START> token + <END> token
assert encoded_test_sentences.shape == (2, 9)
# first word is '<START>'
assert encoded_test_sentences[0, 0] == encoder.start_token_index
# second word is 'Python' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[0, 1] == encoder.oov_token_index
# third word is 'is' (7th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 2] == 7
# fourth word is 'a' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[0, 3] == encoder.oov_token_index
# fifth word is 'multi' (unknown -> OOV)
assert encoded_test_sentences[0, 4] == encoder.oov_token_index
# sixth word is 'paradigm' (unknown -> OOV)
assert encoded_test_sentences[0, 5] == encoder.oov_token_index
# seventh word is 'programming' (4th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 6] == 4
# eighth word is 'language' (8th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 7] == 8
# last word is '<END>'
assert encoded_test_sentences[0, 8] == encoder.end_token_index
# padding with '<PAD>' (6 chars)
np.testing.assert_array_equal(
encoded_test_sentences[1, :6],
np.array([encoder.padding_token_index]*6))
# first word after is '<START>'
assert encoded_test_sentences[1, 6] == encoder.start_token_index
# second word is 'and' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[1, 7] == encoder.oov_token_index
# last word is '<END>'
assert encoded_test_sentences[1, 8] == encoder.end_token_index
# decode
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_start_end=False)
assert sequence_list == ["<OOV>", "is", "<OOV>", "<OOV>", "<OOV>", "programming", "language"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_start_end=False)
assert sequence_list == ["<OOV>"]
# decode w/ control chars
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_padding=True)
assert sequence_list == ["<START>", "<OOV>", "is", "<OOV>", "<OOV>", "<OOV>", "programming", "language", "<END>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_padding=True)
assert sequence_list == ["<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<START>", "<OOV>", "<END>"]
def test_truncated_sequence_encoder():
encoder = TokenSequenceEncoder(default_length=5)
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence, "and"], show_progress=False)
# padding to size 10 and two sentences
assert encoded_test_sentences.shape == (2, 5)
# first word is '<START>'
assert encoded_test_sentences[0, 0] == encoder.start_token_index
# second word is 'Python' (2nd most common + 4 reserved token)
assert encoded_test_sentences[0, 1] == 5
# third word is 'is' (7th most common + 4 reserved token)
assert encoded_test_sentences[0, 2] == 10
# fourth word is 'a' (3rd most common + 4 reserved token)
assert encoded_test_sentences[0, 3] == 6
# last word is '<END>'
assert encoded_test_sentences[0, 4] == encoder.end_token_index
# padding with '<PAD>' (6 chars)
np.testing.assert_array_equal(
encoded_test_sentences[1, :2],
np.array([encoder.padding_token_index]*2))
# first word after is '<START>'
assert encoded_test_sentences[1, 2] == encoder.start_token_index
# second word is 'and' (most common + 4 reserved token)
assert encoded_test_sentences[1, 3] == 4
# last word is '<END>'
assert encoded_test_sentences[1, 4] == encoder.end_token_index
# decode
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False)
assert sequence_list == ["<START>", "python", "is", "a", "<END>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False)
assert sequence_list == ["<PAD>", "<PAD>", "<START>", "and", "<END>"]
# decode w/o control chars
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_start_end=False, show_padding=False)
assert sequence_list == ["python", "is", "a"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_start_end=False, show_padding=False)
assert sequence_list == ["and"]
# same same but with encoding specific length
encoded_test_sentences = encoder.encode([test_sentence, "and"], show_progress=False, length=4)
# padding to size 10 and two sentences
assert encoded_test_sentences.shape == (2, 4)
# first word is '<START>'
assert encoded_test_sentences[0, 0] == encoder.start_token_index
# second word is 'Python' (2nd most common + 4 reserved token)
assert encoded_test_sentences[0, 1] == 5
# third word is 'is' (7th most common + 4 reserved token)
assert encoded_test_sentences[0, 2] == 10
# last word is '<END>'
assert encoded_test_sentences[0, 3] == encoder.end_token_index
# padding with '<PAD>' (6 chars)
np.testing.assert_array_equal(
encoded_test_sentences[1, :1],
np.array([encoder.padding_token_index]))
# first word after is '<START>'
assert encoded_test_sentences[1, 1] == encoder.start_token_index
# second word is 'and' (most common + 4 reserved token)
assert encoded_test_sentences[1, 2] == 4
# last word is '<END>'
assert encoded_test_sentences[1, 3] == encoder.end_token_index
# decode
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False)
assert sequence_list == ["<START>", "python", "is", "<END>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False)
assert sequence_list == ["<PAD>", "<START>", "and", "<END>"]
# decode w/o control chars
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_start_end=False, show_padding=False)
assert sequence_list == ["python", "is"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_start_end=False, show_padding=False)
assert sequence_list == ["and"]
def test_padded_sequence_encoder():
encoder = TokenSequenceEncoder(default_length=11)
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence, "and"], show_progress=False)
# padding to size 10 and two sentences
assert encoded_test_sentences.shape == (2, 11)
# padding with '<PAD>' (2 chars)
np.testing.assert_array_equal(
encoded_test_sentences[0, :2],
np.array([encoder.padding_token_index]*2))
# first word is '<START>'
assert encoded_test_sentences[0, 2] == encoder.start_token_index
# second word is 'Python' (2nd most common + 4 reserved token)
assert encoded_test_sentences[0, 3] == 5
# third word is 'is' (7th most common + 4 reserved token)
assert encoded_test_sentences[0, 4] == 10
# fourth word is 'a' (3rd most common + 4 reserved token)
assert encoded_test_sentences[0, 5] == 6
# fifth word is 'multi' (unknown -> OOV)
assert encoded_test_sentences[0, 6] == encoder.oov_token_index
# sixth word is 'paradigm' (unknown -> OOV)
assert encoded_test_sentences[0, 7] == encoder.oov_token_index
# seventh word is 'programming' (4th most common + 4 reserved token)
assert encoded_test_sentences[0, 8] == 7
# eighth word is 'language' (8th most common + 4 reserved token)
assert encoded_test_sentences[0, 9] == 11
# last word is '<END>'
assert encoded_test_sentences[0, 10] == encoder.end_token_index
# padding with '<PAD>' (6 chars)
np.testing.assert_array_equal(
encoded_test_sentences[1, :8],
np.array([encoder.padding_token_index]*8))
# first word after is '<START>'
assert encoded_test_sentences[1, 8] == encoder.start_token_index
# second word is 'and' (most common + 4 reserved token)
assert encoded_test_sentences[1, 9] == 4
# last word is '<END>'
assert encoded_test_sentences[1, 10] == encoder.end_token_index
# decode
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False)
assert sequence_list == ["<PAD>", "<PAD>", "<START>", "python", "is", "a", "<OOV>", "<OOV>", "programming",
"language", "<END>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False)
assert sequence_list == ["<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<START>", "and", "<END>"]
# decode w/o control chars
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_start_end=False, show_padding=False)
assert sequence_list == ["python", "is", "a", "<OOV>", "<OOV>", "programming", "language"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_start_end=False, show_padding=False)
assert sequence_list == ["and"]
def test_padded_sequence_encoder_limit_vocab():
# build a vocab of size 10 including:
# - reserved token <PAD>, <OOV>, <START>, and <END>
# - plus the top 6 words in the corpus: and(8), python(7), a(4), programming(3), has(3), and the(3)
encoder = TokenSequenceEncoder(default_length=10, limit_vocabulary=10)
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence, "and"], show_progress=False)
# padding to size 10 and two sentences
assert encoded_test_sentences.shape == (2, 10)
# padding with '<PAD>' (1 chars)
np.testing.assert_array_equal(
encoded_test_sentences[0, :1],
np.array([encoder.padding_token_index]*1))
# first word is '<START>'
assert encoded_test_sentences[0, 1] == encoder.start_token_index
# second word is 'Python' (2nd most common + 4 reserved token)
assert encoded_test_sentences[0, 2] == 5
# thord word is 'is' (not in the limited vocab -> OOV)
assert encoded_test_sentences[0, 3] == encoder.oov_token_index
# fourth word is 'a' (3rd most common + 4 reserved token)
assert encoded_test_sentences[0, 4] == 6
# fifth word is 'multi' (unknown -> OOV)
assert encoded_test_sentences[0, 5] == encoder.oov_token_index
# sixth word is 'paradigm' (unknown -> OOV)
assert encoded_test_sentences[0, 6] == encoder.oov_token_index
# seventh word is 'programming' (4th most common + 4 reserved token)
assert encoded_test_sentences[0, 7] == 7
# eighth word is 'language' (not in the limited vocab -> OOV)
assert encoded_test_sentences[0, 8] == encoder.oov_token_index
# last word is '<END>'
assert encoded_test_sentences[0, 9] == encoder.end_token_index
# padding with '<PAD>' (7 chars)
np.testing.assert_array_equal(
encoded_test_sentences[1, :7],
np.array([encoder.padding_token_index]*7))
# first word after is '<START>'
assert encoded_test_sentences[1, 7] == encoder.start_token_index
# second word is 'and' (most common + 4 reserved token)
assert encoded_test_sentences[1, 8] == 4
# last word is '<END>'
assert encoded_test_sentences[1, 9] == encoder.end_token_index
# decode
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False)
assert sequence_list == ["<PAD>", "<START>", "python", "<OOV>", "a", "<OOV>", "<OOV>", "programming", "<OOV>",
"<END>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False)
assert sequence_list == ["<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<START>", "and", "<END>"]
# decode w/o control chars
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_start_end=False, show_padding=False)
assert sequence_list == ["python", "<OOV>", "a", "<OOV>", "<OOV>", "programming", "<OOV>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_start_end=False, show_padding=False)
assert sequence_list == ["and"]
def test_padded_sequence_encoder_limit_vocab_and_top_words():
# build a vocab of size 22 including:
# - reserved token <PAD>, <OOV>, <START>, and <END>
# - plus the top 6 words in the corpus:
# programming(3), has(3), the(3), is(2), language(2), by(2), van(2), rossum(2), in(2), that(2), it(2),
# large(2), community(2), as(2), are(2), cpython(2), of(2), software(2)
# - ignored words (top 3): and(8), python(7), a(4)
encoder = TokenSequenceEncoder(default_length=10, skip_top_words=3, limit_vocabulary=22)
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence, "Python"], show_progress=False)
# padding to size 10 and two sentences
assert encoded_test_sentences.shape == (2, 10)
# padding with '<PAD>' (2 chars)
np.testing.assert_array_equal(
encoded_test_sentences[0, :1],
np.array([encoder.padding_token_index]))
# first word is '<START>'
assert encoded_test_sentences[0, 1] == encoder.start_token_index
# second word is 'Python' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[0, 2] == encoder.oov_token_index
# third word is 'is' (7th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 3] == 7
# fourth word is 'a' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[0, 4] == encoder.oov_token_index
# fifth word is 'multi' (unknown -> OOV)
assert encoded_test_sentences[0, 5] == encoder.oov_token_index
# sixth word is 'paradigm' (unknown -> OOV)
assert encoded_test_sentences[0, 6] == encoder.oov_token_index
# seventh word is 'programming' (4th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 7] == 4
# eighth word is 'language' (8th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 8] == 8
# last word is '<END>'
assert encoded_test_sentences[0, 9] == encoder.end_token_index
# padding with '<PAD>' (6 chars)
np.testing.assert_array_equal(
encoded_test_sentences[1, :7],
np.array([encoder.padding_token_index]*7))
# first word after is '<START>'
assert encoded_test_sentences[1, 7] == encoder.start_token_index
# second word is 'and' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[1, 8] == encoder.oov_token_index
# last word is '<END>'
assert encoded_test_sentences[1, 9] == encoder.end_token_index
# decode
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False)
assert sequence_list == ["<PAD>", "<START>", "<OOV>", "is", "<OOV>", "<OOV>", "<OOV>", "programming",
"language", "<END>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False)
assert sequence_list == ["<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<START>", "<OOV>", "<END>"]
# decode w/o control chars
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_start_end=False, show_padding=False)
assert sequence_list == ["<OOV>", "is", "<OOV>", "<OOV>", "<OOV>", "programming", "language"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_start_end=False, show_padding=False)
assert sequence_list == ["<OOV>"]
def test_padded_sequence_encoder_limit_vocab_and_top_words_no_start_end_token():
# build a vocab of size 22 including:
# - reserved token <PAD>, <OOV>, <START>, and <END>
# - plus the top 6 words in the corpus:
# programming(3), has(3), the(3), is(2), language(2), by(2), van(2), rossum(2), in(2), that(2), it(2),
# large(2), community(2), as(2), are(2), cpython(2), of(2), software(2)
# - ignored words (top 3): and(8), python(7), a(4)
encoder = TokenSequenceEncoder(default_length=10, skip_top_words=3, limit_vocabulary=22,
add_start_end_indicators=False)
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence, "Python"], show_progress=False)
# padding to size 10 and two sentences
assert encoded_test_sentences.shape == (2, 10)
# padding with '<PAD>' (3 token)
np.testing.assert_array_equal(
encoded_test_sentences[0, :3],
np.array([encoder.padding_token_index]*3))
# first word is 'Python' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[0, 3] == encoder.oov_token_index
# second word is 'is' (7th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 4] == 7
# third word is 'a' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[0, 5] == encoder.oov_token_index
# fourth word is 'multi' (unknown -> OOV)
assert encoded_test_sentences[0, 6] == encoder.oov_token_index
# fifth word is 'paradigm' (unknown -> OOV)
assert encoded_test_sentences[0, 7] == encoder.oov_token_index
# sixth word is 'programming' (4th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 8] == 4
# seventh word is 'language' (8th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 9] == 8
# padding with '<PAD>' (9 token)
np.testing.assert_array_equal(
encoded_test_sentences[1, :9],
np.array([encoder.padding_token_index]*9))
# first word is 'and' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[1, 9] == encoder.oov_token_index
# decode
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False)
assert sequence_list == ["<PAD>", "<PAD>", "<PAD>", "<OOV>", "is", "<OOV>", "<OOV>", "<OOV>", "programming",
"language"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False)
assert sequence_list == ["<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<OOV>"]
# decode w/o control chars
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_padding=False)
assert sequence_list == ["<OOV>", "is", "<OOV>", "<OOV>", "<OOV>", "programming", "language"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_padding=False)
assert sequence_list == ["<OOV>"]
def test_padded_sequence_encoder_limit_vocab_and_top_words_no_start_end_token_pad_end():
# build a vocab of size 22 including:
# - reserved token <PAD>, <OOV>, <START>, and <END>
# - plus the top 6 words in the corpus:
# programming(3), has(3), the(3), is(2), language(2), by(2), van(2), rossum(2), in(2), that(2), it(2),
# large(2), community(2), as(2), are(2), cpython(2), of(2), software(2)
# - ignored words (top 3): and(8), python(7), a(4)
encoder = TokenSequenceEncoder(default_length=10, skip_top_words=3, limit_vocabulary=22,
add_start_end_indicators=False, pad_beginning=False)
encoder.prepare(corpus, show_progress=False)
# encode test sentence
encoded_test_sentences = encoder.encode([test_sentence, "Python"], show_progress=False)
# padding to size 10 and two sentences
assert encoded_test_sentences.shape == (2, 10)
# first word is 'Python' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[0, 0] == encoder.oov_token_index
# second word is 'is' (7th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 1] == 7
# third word is 'a' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[0, 2] == encoder.oov_token_index
# fourth word is 'multi' (unknown -> OOV)
assert encoded_test_sentences[0, 3] == encoder.oov_token_index
# fifth word is 'paradigm' (unknown -> OOV)
assert encoded_test_sentences[0, 4] == encoder.oov_token_index
# sixth word is 'programming' (4th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 5] == 4
# seventh word is 'language' (8th most common - top-3 words + 4 reserved token)
assert encoded_test_sentences[0, 6] == 8
# padding with '<PAD>' (3 token)
np.testing.assert_array_equal(
encoded_test_sentences[0, -3:],
np.array([encoder.padding_token_index]*3))
# first word is 'and' (not in the limited vocab (among top-3) -> OOV)
assert encoded_test_sentences[1, 0] == encoder.oov_token_index
# padding with '<PAD>' (9 chars)
np.testing.assert_array_equal(
encoded_test_sentences[1, -9:],
np.array([encoder.padding_token_index]*9))
# decode
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False)
assert sequence_list == ["<OOV>", "is", "<OOV>", "<OOV>", "<OOV>", "programming", "language", "<PAD>", "<PAD>",
"<PAD>"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False)
assert sequence_list == ["<OOV>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>", "<PAD>"]
# decode w/o control chars
sequence_list = encoder.decode(encoded_test_sentences, 0, show_progress=False, show_padding=False)
assert sequence_list == ["<OOV>", "is", "<OOV>", "<OOV>", "<OOV>", "programming", "language"]
sequence_list = encoder.decode(encoded_test_sentences, 1, show_progress=False, show_padding=False)
assert sequence_list == ["<OOV>"]
class TestEmbeddingMatcher(AbstractEmbeddingMatcher):
def __init__(self, encode_reserved_words):
super().__init__(encode_reserved_words=encode_reserved_words)
# some random unique embedding pattern
self.vector_defs = {
"python": ["0.1", "0.1", "0.1", "0.1"],
"is": ["-0.9", "-0.9", "-0.9", "-0.9"],
"a": ["0.2", "0.3", "0.4", "0.5"],
"multi": ["-0.5", "-0.6", "-0.7", "-0.8"],
"paradigm": ["0.5", "0.4", "0.3", "0.2"],
"programming": ["-0.9", "-0.8", "-0.7", "-0.6"],
"language": ["0.4", "0.6", "0.6", "0.4"],
}
def get_vector_source(self):
vectors = list(WordVector(word, vec) for word, vec in self.vector_defs.items())
return len(self.vector_defs), 4, vectors
def test_embedding_matcher():
encoder = TokenSequenceEncoder(default_length=10)
encoder.prepare(corpus, show_progress=False)
#
# do not encode reserved words
#
matcher = TestEmbeddingMatcher(encode_reserved_words=False)
with raises(ValueError) as e_info:
_ = matcher.embedding_matrix
matcher.reload_embeddings(token_encoder=encoder, show_progress=False)
# expect matrix for 102 words/token with 4 dimensions each
expected = np.zeros((102, 4))
# every word/token has an embedded representation
# 'python' is 2nd most common + 4 reserved token -> index:5
expected[5] = np.array(matcher.vector_defs["python"])
# 'is' is 7th most common + 4 reserved token -> index:10
expected[10] = np.array(matcher.vector_defs["is"])
# 'a' is 3rd most common + 4 reserved token -> index:6
expected[6] = np.array(matcher.vector_defs["a"])
# 'multi' is unknown -> OOV
# 'paradigm' is unknown -> OOV
# 'programming' is 4th most common + 4 reserved token -> index:7
expected[7] = np.array(matcher.vector_defs["programming"])
# 'language' is 8th most common + 4 reserved token -> index:11
expected[11] = np.array(matcher.vector_defs["language"])
np.testing.assert_array_equal(
matcher.embedding_matrix,
expected
)
#
# do encode reserved words
#
matcher = TestEmbeddingMatcher(encode_reserved_words=True)
with raises(ValueError) as e_info:
_ = matcher.embedding_matrix
matcher.reload_embeddings(token_encoder=encoder, show_progress=False)
# expect matrix for 102 words/token with 4 dimensions each (filled with OOV --> one vector (normalized by 1e-16))
expected = np.ones((102, 4)) * matcher.normalize_reserved_embeddings_by
# we only need to update embeddings not equal to OOV
# reserved words embeddings:
# <PAD> -> zero vector
expected[0] = np.zeros(4)
# <OOV> -> one vector (normalized by 1e-16) ..actually not necessary
expected[1] = np.array([1]*4) * matcher.normalize_reserved_embeddings_by
# <START> -> minus one vector (normalized by 1e-16)
expected[2] = np.array([-1]*4) * matcher.normalize_reserved_embeddings_by
# <END> -> alternating(one, minus-one) vector (normalized by 1e-16)
expected[3] = np.array([1, -1]*2) * matcher.normalize_reserved_embeddings_by
# 'python' is 2nd most common + 4 reserved token -> index:5
expected[5] = np.array(matcher.vector_defs["python"])
# 'is' is 7th most common + 4 reserved token -> index:10
expected[10] = np.array(matcher.vector_defs["is"])
# 'a' is 3rd most common + 4 reserved token -> index:6
expected[6] = np.array(matcher.vector_defs["a"])
# 'multi' is unknown -> OOV
# 'paradigm' is unknown -> OOV
# 'programming' is 4th most common + 4 reserved token -> index:7
expected[7] = np.array(matcher.vector_defs["programming"])
# 'language' is 8th most common + 4 reserved token -> index:11
expected[11] = np.array(matcher.vector_defs["language"])
np.testing.assert_array_equal(
matcher.embedding_matrix,
expected
)
def test_embedding_matcher_limit_vocab():
# build a vocab of size 10 including:
# - reserved token <PAD>, <OOV>, <START>, and <END>
# - plus the top 6 words in the corpus: and(8), python(7), a(4), programming(3), has(3), and the(3)
encoder = TokenSequenceEncoder(default_length=10, limit_vocabulary=10)
encoder.prepare(corpus, show_progress=False)
#
# do not encode reserved words
#
matcher = TestEmbeddingMatcher(encode_reserved_words=False)
with raises(ValueError) as e_info:
_ = matcher.embedding_matrix
matcher.reload_embeddings(token_encoder=encoder, show_progress=False)
# expect matrix for 102 words/token with 4 dimensions each
expected = np.zeros((10, 4))
# only few words have an embedded representation
# 'python' is 2nd most common + 4 reserved token -> index:5
expected[5] = np.array(matcher.vector_defs["python"])
# 'is' is 7th most common + 4 reserved token -> OOV
# 'a' is 3rd most common + 4 reserved token -> index:6
expected[6] = np.array(matcher.vector_defs["a"])
# 'multi' is unknown -> OOV
# 'paradigm' is unknown -> OOV
# 'programming' is 4th most common + 4 reserved token -> index:7
expected[7] = np.array(matcher.vector_defs["programming"])
# 'language' is 8th most common + 4 reserved token -> OOV
np.testing.assert_array_equal(
matcher.embedding_matrix,
expected
)
#
# do encode reserved words
#
matcher = TestEmbeddingMatcher(encode_reserved_words=True)
with raises(ValueError) as e_info:
_ = matcher.embedding_matrix
matcher.reload_embeddings(token_encoder=encoder, show_progress=False)
# expect matrix for 102 words/token with 4 dimensions each (filled with OOV --> one vector (normalized by 1e-16))
expected = np.ones((10, 4)) * matcher.normalize_reserved_embeddings_by
# we only need to update embeddings not equal to OOV
# reserved words embeddings:
# <PAD> -> zero vector
expected[0] = np.zeros(4)
# <OOV> -> one vector (normalized by 1e-16) ..actually not necessary
expected[1] = np.array([1]*4) * matcher.normalize_reserved_embeddings_by
# <START> -> minus one vector (normalized by 1e-16)
expected[2] = np.array([-1]*4) * matcher.normalize_reserved_embeddings_by
# <END> -> alternating(one, minus-one) vector (normalized by 1e-16)
expected[3] = np.array([1, -1]*2) * matcher.normalize_reserved_embeddings_by
# only few words have an embedded representation
# 'python' is 2nd most common + 4 reserved token -> index:5
expected[5] = np.array(matcher.vector_defs["python"])
# 'is' is 7th most common + 4 reserved token -> OOV
# 'a' is 3rd most common + 4 reserved token -> index:6
expected[6] = np.array(matcher.vector_defs["a"])
# 'multi' is unknown -> OOV
# 'paradigm' is unknown -> OOV
# 'programming' is 4th most common + 4 reserved token -> index:7
expected[7] = np.array(matcher.vector_defs["programming"])
# 'language' is 8th most common + 4 reserved token -> OOV
np.testing.assert_array_equal(
matcher.embedding_matrix,
expected
)
def test_embedding_matcher_limit_vocab_and_top_words():
# build a vocab of size 22 including:
# - reserved token <PAD>, <OOV>, <START>, and <END>
# - plus the top 6 words in the corpus:
# programming(3), has(3), the(3), is(2), language(2), by(2), van(2), rossum(2), in(2), that(2), it(2),
# large(2), community(2), as(2), are(2), cpython(2), of(2), software(2)
# - ignored words (top 3): and(8), python(7), a(4)
encoder = TokenSequenceEncoder(default_length=10, skip_top_words=3, limit_vocabulary=22)
encoder.prepare(corpus, show_progress=False)
#
# do not encode reserved words
#
matcher = TestEmbeddingMatcher(encode_reserved_words=False)
with raises(ValueError) as e_info:
_ = matcher.embedding_matrix
matcher.reload_embeddings(token_encoder=encoder, show_progress=False)
# expect matrix for 22 words/token with 4 dimensions each
expected = np.zeros((22, 4))
# only few words have an embedded representation
# 'python' is 2nd most common -> OOV
# 'is' is 7th most common - top-3 words + 4 reserved token -> index:10
expected[7] = np.array(matcher.vector_defs["is"])
# 'a' is 3rd most common -> OOV
# 'multi' is unknown -> OOV
# 'paradigm' is unknown -> OOV
# 'programming' is 4th most common - top-3 words + 4 reserved token -> index:7
expected[4] = np.array(matcher.vector_defs["programming"])
# 'language' is 8th most common - top-3 words + 4 reserved token -> index:11
expected[8] = np.array(matcher.vector_defs["language"])
np.testing.assert_array_equal(
matcher.embedding_matrix,
expected
)
#
# do encode reserved words
#
matcher = TestEmbeddingMatcher(encode_reserved_words=True)
with raises(ValueError) as e_info:
_ = matcher.embedding_matrix
matcher.reload_embeddings(token_encoder=encoder, show_progress=False)
# expect matrix for 102 words/token with 4 dimensions each (filled with OOV --> one vector (normalized by 1e-16))
expected = np.ones((22, 4)) * matcher.normalize_reserved_embeddings_by
# we only need to update embeddings not equal to OOV
# reserved words embeddings:
# <PAD> -> zero vector
expected[0] = np.zeros(4)
# <OOV> -> one vector (normalized by 1e-16) ..actually not necessary
expected[1] = np.array([1]*4) * matcher.normalize_reserved_embeddings_by
# <START> -> minus one vector (normalized by 1e-16)
expected[2] = np.array([-1]*4) * matcher.normalize_reserved_embeddings_by
# <END> -> alternating(one, minus-one) vector (normalized by 1e-16)
expected[3] = np.array([1, -1]*2) * matcher.normalize_reserved_embeddings_by
# 'python' is 2nd most common -> OOV
# 'is' is 7th most common - top-3 words + 4 reserved token -> index:10
expected[7] = np.array(matcher.vector_defs["is"])
# 'a' is 3rd most common -> OOV
# 'multi' is unknown -> OOV
# 'paradigm' is unknown -> OOV
# 'programming' is 4th most common - top-3 words + 4 reserved token -> index:7
expected[4] = np.array(matcher.vector_defs["programming"])
# 'language' is 8th most common - top-3 words + 4 reserved token -> index:11
expected[8] = np.array(matcher.vector_defs["language"])
np.testing.assert_array_equal(
matcher.embedding_matrix,
expected
)
| 45.826625
| 127
| 0.673085
| 8,385
| 59,208
| 4.574478
| 0.039117
| 0.082592
| 0.150168
| 0.125401
| 0.947128
| 0.938733
| 0.93146
| 0.927393
| 0.923665
| 0.916156
| 0
| 0.034642
| 0.196528
| 59,208
| 1,291
| 128
| 45.862122
| 0.771651
| 0.31972
| 0
| 0.701563
| 0
| 0
| 0.081501
| 0
| 0
| 0
| 0
| 0.000775
| 0.421875
| 1
| 0.042188
| false
| 0
| 0.003125
| 0
| 0.048438
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
681c5ebeef339e47245f467c0228ce49d5ce6971
| 6,916
|
py
|
Python
|
models/official/detection/modeling/architecture/nn_blocks.py
|
eduagarcia/tpu
|
e6fb353f5d61b02c140a8b8a2a50c58b557c2f91
|
[
"Apache-2.0"
] | 4
|
2020-01-23T16:17:37.000Z
|
2022-01-18T22:02:22.000Z
|
models/official/detection/modeling/architecture/nn_blocks.py
|
eduagarcia/tpu
|
e6fb353f5d61b02c140a8b8a2a50c58b557c2f91
|
[
"Apache-2.0"
] | null | null | null |
models/official/detection/modeling/architecture/nn_blocks.py
|
eduagarcia/tpu
|
e6fb353f5d61b02c140a8b8a2a50c58b557c2f91
|
[
"Apache-2.0"
] | 1
|
2020-02-16T12:09:49.000Z
|
2020-02-16T12:09:49.000Z
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Block zoo."""
from __future__ import absolute_import
from __future__ import division
#Standard imports
from __future__ import print_function
from absl import logging
import tensorflow.compat.v1 as tf
from modeling.architecture import nn_ops
def residual_block(inputs,
filters,
strides,
use_projection,
activation=tf.nn.relu,
batch_norm_relu=nn_ops.BatchNormRelu(),
dropblock=nn_ops.Dropblock(),
drop_connect_rate=None,
data_format='channels_last',
is_training=False):
"""The residual block with BN and DropBlock after convolutions.
Args:
inputs: a `Tensor` of size `[batch, channels, height, width]`.
filters: an `int` number of filters for the convolutions.
strides: an `int` block stride. If greater than 1, this block will
ultimately downsample the input.
use_projection: a `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
activation: activation function. Support 'relu' and 'swish'.
batch_norm_relu: an operation that is added after convolutions, including a
batch norm layer and an optional relu activation.
dropblock: a drop block layer that is added after convluations. Note that
the default implementation does not apply any drop block.
drop_connect_rate: a 'float' number that specifies the drop connection rate
of the block. Note that the default `None` means no drop connection is
applied.
data_format: a `str` that specifies the data format.
is_training: a `bool` if True, the model is in training mode.
Returns:
The output `Tensor` of the block.
"""
logging.info('-----> Building residual block.')
shortcut = inputs
if use_projection:
shortcut = nn_ops.conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=1,
strides=strides,
data_format=data_format)
shortcut = batch_norm_relu(shortcut, relu=False, is_training=is_training)
shortcut = dropblock(shortcut, is_training=is_training)
inputs = nn_ops.conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training=is_training)
inputs = dropblock(inputs, is_training=is_training)
inputs = nn_ops.conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=1,
data_format=data_format)
inputs = batch_norm_relu(inputs, relu=False, is_training=is_training)
inputs = dropblock(inputs, is_training=is_training)
if drop_connect_rate:
inputs = nn_ops.drop_connect(inputs, is_training, drop_connect_rate)
return activation(inputs + shortcut)
def bottleneck_block(inputs,
filters,
strides,
use_projection,
activation=tf.nn.relu,
batch_norm_relu=nn_ops.BatchNormRelu(),
dropblock=nn_ops.Dropblock(),
drop_connect_rate=None,
data_format='channels_last',
is_training=False):
"""The bottleneck block with BN and DropBlock after convolutions.
Args:
inputs: a `Tensor` of size `[batch, channels, height, width]`.
filters: a `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
strides: an `int` block stride. If greater than 1, this block will
ultimately downsample the input.
use_projection: a `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
activation: activation function. Support 'relu' and 'swish'.
batch_norm_relu: an operation that is added after convolutions, including a
batch norm layer and an optional relu activation.
dropblock: a drop block layer that is added after convluations. Note that
the default implementation does not apply any drop block.
drop_connect_rate: a 'float' number that specifies the drop connection rate
of the block. Note that the default `None` means no drop connection is
applied.
data_format: a `str` that specifies the data format.
is_training: a `bool` if True, the model is in training mode.
Returns:
The output `Tensor` of the block.
"""
logging.info('-----> Building bottleneck block.')
shortcut = inputs
if use_projection:
filters_out = 4 * filters
shortcut = nn_ops.conv2d_fixed_padding(
inputs=inputs,
filters=filters_out,
kernel_size=1,
strides=strides,
data_format=data_format)
shortcut = batch_norm_relu(shortcut, relu=False, is_training=is_training)
shortcut = dropblock(shortcut, is_training=is_training)
inputs = nn_ops.conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=1,
strides=1,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training=is_training)
inputs = dropblock(inputs, is_training=is_training)
inputs = nn_ops.conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training=is_training)
inputs = dropblock(inputs, is_training=is_training)
inputs = nn_ops.conv2d_fixed_padding(
inputs=inputs,
filters=4 * filters,
kernel_size=1,
strides=1,
data_format=data_format)
inputs = batch_norm_relu(inputs, relu=False, is_training=is_training)
inputs = dropblock(inputs, is_training=is_training)
if drop_connect_rate:
inputs = nn_ops.drop_connect(inputs, is_training, drop_connect_rate)
return activation(inputs + shortcut)
| 38.209945
| 80
| 0.688404
| 903
| 6,916
| 5.10299
| 0.204873
| 0.073785
| 0.036458
| 0.060764
| 0.81467
| 0.81467
| 0.789497
| 0.789497
| 0.789497
| 0.789497
| 0
| 0.00621
| 0.231637
| 6,916
| 180
| 81
| 38.422222
| 0.860933
| 0.468623
| 0
| 0.863158
| 0
| 0
| 0.025338
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021053
| false
| 0
| 0.063158
| 0
| 0.105263
| 0.010526
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
68343224fe17a55d8db2c799bf413ec4efc51bd4
| 1,396
|
py
|
Python
|
output/my_ASCII_drawings_functions.py
|
MicheleMorelli/ASCII_art_tool_for_terminal
|
8d60738252434d4d0185a023715f24e93c752513
|
[
"MIT"
] | 3
|
2018-02-09T17:24:22.000Z
|
2018-02-12T21:44:30.000Z
|
output/my_ASCII_drawings_functions.py
|
MicheleMorelli/ASCII_art_tool_for_terminal
|
8d60738252434d4d0185a023715f24e93c752513
|
[
"MIT"
] | 2
|
2018-02-10T18:15:25.000Z
|
2018-02-16T16:31:48.000Z
|
output/my_ASCII_drawings_functions.py
|
MicheleMorelli/ASCII_art_tool_for_terminal
|
8d60738252434d4d0185a023715f24e93c752513
|
[
"MIT"
] | null | null | null |
#Created with the Terminal ASCII Paint app by Michele Morelli - https://github.com/MicheleMorelli
def draw_house():
print(" "*64+"\n"+" "*64+"\n"+" "*5+"_"*33+" "*26+"\n"+" "*4+"|"+"/"*4+"|"+"#"*28+" "*26+"\n"+" "*4+"|"+"/"*4+"|"+"#"*28+" "*26+"\n"+" "*4+"|"+"/"*4+"|"+"#"*2+" "*2+"|"+"/"+"|"+"#"*2+" "*3+"|"+"/"+"|"+"#"*5+" "*3+"|"+"/"+"|"+"#"*2+" "*26+"\n"+" "*4+"|"+"/"*4+"|"+"#"*2+" "*2+"|"+"/"+"|"+"#"*2+" "*3+"|"+"/"+"|"+"#"*5+" "*3+"|"+"/"+"|"+"#"*2+" "*26+"\n"+" "*4+"|"+"/"*4+"|"+"#"*2+" "*2+"|"+"/"+"|"+"#"*2+" "*3+"|"+"/"+"|"+"#"*5+" "*3+"|"+"/"+"|"+"#"*2+" "*26+"\n"+" "*4+"|"+"/"*4+"|"+"#"*9+" "*3+"|"+"/"+"|"+"#"*13+" "*26+"\n"+" "*4+"|"+"/"*4+"|"+"#"*28+" "*26+"\n"+" "*4+"|"+"/"*4+"|"+"#"*28+" "*26+"\n"+" "*4+"|"+"/"*4+"|"+"#"*9+" "*7+"|"+"/"+"|"+"#"*9+" "*2+"_"*16+" "*8+"\n"+" "*4+"|"+"/"*4+"|"+"#"*9+" "*7+"|"+"/"+"|"+"#"*9+" "+"|"+"/"*2+"|"+"#"*13+"|"+" "*7+"\n"+" "*4+"|"+"/"*4+"|"+"#"*9+" "*7+"|"+"/"+"|"+"#"*9+" "+"|"+"/"*2+"|"+"#"*13+"|"+" "*7+"\n"+" "*4+"|"+"/"*4+"|"+"#"*9+" "*7+"|"+"/"+"|"+"#"*9+" "+"|"+"/"*2+"|"+"#"*13+"|"+" "*7+"\n"+" "*4+"|"+"/"*4+"|"+"#"*9+" "*7+"|"+"/"+"|"+"#"*9+" "+"|"+"/"*2+"|"+"#"*13+"|"+" "*7+"\n")
draw_house()
def draw_sdfsdf():
print(" "*64+"\n"+" "*64+"\n"+" "*64+"\n"+" "*64+"\n"+" "*64+"\n"+" "*64+"\n"+" "*64+"\n"+" "*64+"\n"+" "*64+"\n"+" "*64+"\n"+" "*64+"\n"+" "*64+"\n"+" "*64+"\n"+" "*64+"\n"+" "*64+"\n"+" "*64+"\n")
| 155.111111
| 1,042
| 0.222779
| 169
| 1,396
| 1.810651
| 0.195266
| 0.176471
| 0.261438
| 0.313725
| 0.601307
| 0.545752
| 0.545752
| 0.53268
| 0.53268
| 0.53268
| 0
| 0.116065
| 0.068052
| 1,396
| 8
| 1,043
| 174.5
| 0.119139
| 0.068768
| 0
| 0
| 0
| 0
| 0.20339
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| true
| 0
| 0
| 0
| 0.4
| 0.4
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
684504b3b0dfe97aa6dde7500a7330073d9cfc7a
| 142
|
py
|
Python
|
EEG_Lightning/dassl/config/__init__.py
|
mcd4874/NeurIPS_competition
|
4df1f222929e9824a55c9c4ae6634743391b0fe9
|
[
"MIT"
] | 23
|
2021-10-14T02:31:06.000Z
|
2022-01-25T16:26:44.000Z
|
EEG_Lightning/dassl/config/__init__.py
|
mcd4874/NeurIPS_competition
|
4df1f222929e9824a55c9c4ae6634743391b0fe9
|
[
"MIT"
] | null | null | null |
EEG_Lightning/dassl/config/__init__.py
|
mcd4874/NeurIPS_competition
|
4df1f222929e9824a55c9c4ae6634743391b0fe9
|
[
"MIT"
] | 1
|
2022-03-05T06:54:11.000Z
|
2022-03-05T06:54:11.000Z
|
# from .defaults import _C as cfg_default
from .defaults_new import _C as cfg_default
def get_cfg_default():
return cfg_default.clone()
| 20.285714
| 43
| 0.774648
| 23
| 142
| 4.434783
| 0.521739
| 0.392157
| 0.176471
| 0.235294
| 0.372549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161972
| 142
| 6
| 44
| 23.666667
| 0.857143
| 0.274648
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 9
|
d7ac82c4bf58877230764c65e4a1152e6f4b8703
| 22,997
|
py
|
Python
|
sm2/genmod/tests/test_irls.py
|
jbrockmendel/sm2
|
c02a3f9a4fcba35ffc8c852ca5ad8b9d7620f4cf
|
[
"BSD-3-Clause"
] | 1
|
2021-08-02T13:48:59.000Z
|
2021-08-02T13:48:59.000Z
|
sm2/genmod/tests/test_irls.py
|
jbrockmendel/sm2
|
c02a3f9a4fcba35ffc8c852ca5ad8b9d7620f4cf
|
[
"BSD-3-Clause"
] | 24
|
2018-03-26T00:44:58.000Z
|
2018-10-09T17:06:07.000Z
|
sm2/genmod/tests/test_irls.py
|
jbrockmendel/sm2
|
c02a3f9a4fcba35ffc8c852ca5ad8b9d7620f4cf
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Tests for iteratively weighted least squares
Upstream this is part of test_glm
"""
import warnings
import pytest
import numpy as np
from numpy.testing import assert_allclose
import sm2.api as sm
from sm2.genmod.families import links
from sm2.tools.numdiff import approx_fprime, approx_hess
@pytest.mark.not_vetted
def check_score_hessian(results):
# GH#4620
# compare models core and hessian with numerical derivatives
params = results.params
# avoid checking score at MLE, score close to zero
sc = results.model.score(params * 0.98, scale=1)
# cs currently (0.9) does not work for all families
# sc2 = approx_fprime_cs(params * 0.98, results.model.loglike)
llfunc = lambda x: results.model.loglike(x, scale=1)
sc2 = approx_fprime(params * 0.98, llfunc)
assert_allclose(sc, sc2, rtol=0.05)
hess = results.model.hessian(params, scale=1)
hess2 = approx_hess(params, llfunc)
assert_allclose(hess, hess2, rtol=0.05)
scfunc = lambda x: results.model.score(x, scale=1)
hess3 = approx_fprime(params, scfunc)
assert_allclose(hess, hess3, rtol=0.05)
@pytest.mark.not_vetted
def gen_endog(lin_pred, family_class, link, binom_version=0):
np.random.seed(872)
mu = link().inverse(lin_pred)
if family_class == sm.families.Binomial:
if binom_version == 0:
endog = 1 * (np.random.uniform(size=len(lin_pred)) < mu)
else:
endog = np.empty((len(lin_pred), 2))
n = 10
uni = np.random.uniform(size=(len(lin_pred), n))
endog[:, 0] = (uni < mu[:, None]).sum(1)
endog[:, 1] = n - endog[:, 0]
elif family_class == sm.families.Poisson:
endog = np.random.poisson(mu)
elif family_class == sm.families.Gamma:
endog = np.random.gamma(2, mu)
elif family_class == sm.families.Gaussian:
endog = mu + np.random.normal(size=len(lin_pred))
elif family_class == sm.families.NegativeBinomial:
from scipy.stats.distributions import nbinom
endog = nbinom.rvs(mu, 0.5)
elif family_class == sm.families.InverseGaussian:
from scipy.stats.distributions import invgauss
endog = invgauss.rvs(mu)
elif family_class == sm.families.Tweedie:
# upstream this case wasn't present in test_glm, but there was an
# otherwise identical gen_endog function in test_glm_weights
rate = 1
shape = 1.0
scale = mu / (rate * shape)
endog = (np.random.poisson(rate, size=scale.shape[0]) *
np.random.gamma(shape * scale))
else:
raise ValueError
return endog
@pytest.mark.not_vetted
def test_gradient_irls():
# Compare the results when using gradient optimization and IRLS.
# TODO: Find working examples for inverse_squared link
np.random.seed(87342)
fams = [(sm.families.Binomial, [links.logit, links.probit, links.cloglog,
links.log, links.cauchy]),
(sm.families.Poisson, [links.log, links.identity, links.sqrt]),
(sm.families.Gamma, [links.log, links.identity,
links.inverse_power]),
(sm.families.Gaussian, [links.identity, links.log,
links.inverse_power]),
(sm.families.InverseGaussian, [links.log, links.identity,
links.inverse_power,
links.inverse_squared]),
(sm.families.NegativeBinomial, [links.log, links.inverse_power,
links.inverse_squared,
links.identity])]
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
skip_one = False
for family_class, family_links in fams:
for link in family_links:
for binom_version in [0, 1]:
if family_class != sm.families.Binomial and binom_version == 1:
continue
if (family_class, link) == (sm.families.Poisson,
links.identity):
lin_pred = 20 + exog.sum(1)
elif (family_class, link) == (sm.families.Binomial, links.log):
lin_pred = -1 + exog.sum(1) / 8
elif (family_class, link) == (sm.families.Poisson, links.sqrt):
lin_pred = 2 + exog.sum(1)
elif (family_class, link) == (sm.families.InverseGaussian,
links.log):
#skip_zero = True
lin_pred = -1 + exog.sum(1)
elif (family_class, link) == (sm.families.InverseGaussian,
links.identity):
lin_pred = 20 + 5 * exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (sm.families.InverseGaussian,
links.inverse_squared):
lin_pred = 0.5 + exog.sum(1) / 5
continue # skip due to non-convergence
elif (family_class, link) == (sm.families.InverseGaussian,
links.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (sm.families.NegativeBinomial,
links.identity):
lin_pred = 20 + 5 * exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (sm.families.NegativeBinomial,
links.inverse_squared):
lin_pred = 0.1 + np.random.uniform(size=exog.shape[0])
continue # skip due to non-convergence
elif (family_class, link) == (sm.families.NegativeBinomial,
links.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (sm.families.Gaussian,
links.inverse_power):
# adding skip because of convergence failure
skip_one = True
# GH#4620
# the following fails with identity link, because endog < 0
# elif family_class == fam.Gamma:
# lin_pred = (0.5 * exog.sum(1) +
# np.random.uniform(size=exog.shape[0]))
else:
lin_pred = np.random.uniform(size=exog.shape[0])
endog = gen_endog(lin_pred, family_class, link, binom_version)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_irls = sm.GLM(endog, exog,
family=family_class(link=link()))
rslt_irls = mod_irls.fit(method="IRLS")
if (family_class, link) not in [(sm.families.Poisson,
links.sqrt),
(sm.families.Gamma,
links.inverse_power),
(sm.families.InverseGaussian,
links.identity)]:
# GH#4620
check_score_hessian(rslt_irls)
# Try with and without starting values.
for max_start_irls, start_params in [(0, rslt_irls.params),
(3, None)]:
# TODO: skip convergence failures for now
if max_start_irls > 0 and skip_one:
continue
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_gradient = sm.GLM(endog, exog,
family=family_class(link=link()))
rslt_gradient = mod_gradient.fit(
max_start_irls=max_start_irls,
start_params=start_params,
method="newton",
maxiter=300)
assert_allclose(rslt_gradient.params,
rslt_irls.params, rtol=1e-6, atol=5e-5)
assert_allclose(rslt_gradient.llf, rslt_irls.llf,
rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.scale, rslt_irls.scale,
rtol=1e-6, atol=1e-6)
# Get the standard errors using expected information.
gradient_bse = rslt_gradient.bse
ehess = mod_gradient.hessian(rslt_gradient.params,
observed=False)
gradient_bse = np.sqrt(-np.diag(np.linalg.inv(ehess)))
assert_allclose(gradient_bse, rslt_irls.bse,
rtol=1e-6, atol=5e-5)
@pytest.mark.not_vetted
def test_gradient_irls_eim():
# Compare the results when using eime gradient optimization and IRLS.
# TODO: Find working examples for inverse_squared link
np.random.seed(87342)
fams = [(sm.families.Binomial, [links.logit, links.probit, links.cloglog,
links.log, links.cauchy]),
(sm.families.Poisson, [links.log, links.identity, links.sqrt]),
(sm.families.Gamma, [links.log, links.identity,
links.inverse_power]),
(sm.families.Gaussian, [links.identity, links.log,
links.inverse_power]),
(sm.families.InverseGaussian, [links.log, links.identity,
links.inverse_power,
links.inverse_squared]),
(sm.families.NegativeBinomial, [links.log, links.inverse_power,
links.inverse_squared,
links.identity])]
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
skip_one = False
for family_class, family_links in fams:
for link in family_links:
for binom_version in [0, 1]:
if family_class != sm.families.Binomial and binom_version == 1:
continue
if (family_class, link) == (sm.families.Poisson,
links.identity):
lin_pred = 20 + exog.sum(1)
elif (family_class, link) == (sm.families.Binomial, links.log):
lin_pred = -1 + exog.sum(1) / 8
elif (family_class, link) == (sm.families.Poisson, links.sqrt):
lin_pred = 2 + exog.sum(1)
elif (family_class, link) == (sm.families.InverseGaussian,
links.log):
# skip_zero = True
lin_pred = -1 + exog.sum(1)
elif (family_class, link) == (sm.families.InverseGaussian,
links.identity):
lin_pred = 20 + 5 * exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (sm.families.InverseGaussian,
links.inverse_squared):
lin_pred = 0.5 + exog.sum(1) / 5
continue # skip due to non-convergence
elif (family_class, link) == (sm.families.InverseGaussian,
links.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (sm.families.NegativeBinomial,
links.identity):
lin_pred = 20 + 5 * exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (sm.families.NegativeBinomial,
links.inverse_squared):
lin_pred = 0.1 + np.random.uniform(size=exog.shape[0])
continue # skip due to non-convergence
elif (family_class, link) == (sm.families.NegativeBinomial,
links.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (sm.families.Gaussian,
links.inverse_power):
# adding skip because of convergence failure
skip_one = True
else:
lin_pred = np.random.uniform(size=exog.shape[0])
endog = gen_endog(lin_pred, family_class, link, binom_version)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_irls = sm.GLM(endog, exog,
family=family_class(link=link()))
rslt_irls = mod_irls.fit(method="IRLS")
# Try with and without starting values.
for max_start_irls, start_params in ((0, rslt_irls.params),
(3, None)):
# TODO: skip convergence failures for now
if max_start_irls > 0 and skip_one:
continue
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_gradient = sm.GLM(endog, exog,
family=family_class(link=link()))
rslt_gradient = mod_gradient.fit(
max_start_irls=max_start_irls,
start_params=start_params,
method="newton",
optim_hessian='eim')
assert_allclose(rslt_gradient.params, rslt_irls.params,
rtol=1e-6, atol=5e-5)
assert_allclose(rslt_gradient.llf, rslt_irls.llf,
rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.scale, rslt_irls.scale,
rtol=1e-6, atol=1e-6)
# Get the standard errors using expected information.
ehess = mod_gradient.hessian(rslt_gradient.params,
observed=False)
gradient_bse = np.sqrt(-np.diag(np.linalg.inv(ehess)))
assert_allclose(gradient_bse, rslt_irls.bse,
rtol=1e-6, atol=5e-5)
# Taken from test_glm_weight.
# TODO: Is this redundant with tests above from test_glm?
@pytest.mark.not_vetted
def test_wtd_gradient_irls():
# Compare the results when using gradient optimization and IRLS.
# TODO: Find working examples for inverse_squared link
np.random.seed(87342)
fam = sm.families
lnk = sm.families.links
families = [(fam.Binomial, [lnk.logit, lnk.probit, lnk.cloglog, lnk.log,
lnk.cauchy]),
(fam.Poisson, [lnk.log, lnk.identity, lnk.sqrt]),
(fam.Gamma, [lnk.log, lnk.identity, lnk.inverse_power]),
(fam.Gaussian, [lnk.identity, lnk.log, lnk.inverse_power]),
(fam.InverseGaussian, [lnk.log, lnk.identity,
lnk.inverse_power,
lnk.inverse_squared]),
(fam.NegativeBinomial, [lnk.log, lnk.inverse_power,
lnk.inverse_squared, lnk.identity])]
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
skip_one = False
for family_class, family_links in families:
for link in family_links:
for binom_version in [0, 1]:
method = 'bfgs'
if family_class != fam.Binomial and binom_version == 1:
continue
elif family_class == fam.Binomial and link == lnk.cloglog:
# Can't get gradient to converage with var_weights here
continue
elif family_class == fam.Binomial and link == lnk.log:
# Can't get gradient to converage with var_weights here
continue
elif (family_class, link) == (fam.Poisson, lnk.identity):
lin_pred = 20 + exog.sum(1)
elif (family_class, link) == (fam.Binomial, lnk.log):
lin_pred = -1 + exog.sum(1) / 8
elif (family_class, link) == (fam.Poisson, lnk.sqrt):
lin_pred = -2 + exog.sum(1)
elif (family_class, link) == (fam.Gamma, lnk.log):
# Can't get gradient to converge with var_weights here
continue
elif (family_class, link) == (fam.Gamma, lnk.identity):
# Can't get gradient to converage with var_weights here
continue
elif (family_class, link) == (fam.Gamma, lnk.inverse_power):
# Can't get gradient to converage with var_weights here
continue
elif (family_class, link) == (fam.Gaussian, lnk.log):
# Can't get gradient to converage with var_weights here
continue
elif (family_class, link) == (fam.Gaussian, lnk.inverse_power):
# Can't get gradient to converage with var_weights here
continue
elif (family_class, link) == (fam.InverseGaussian, lnk.log):
# Can't get gradient to converage with var_weights here
lin_pred = -1 + exog.sum(1)
continue
elif (family_class, link) == (fam.InverseGaussian,
lnk.identity):
# Can't get gradient to converage with var_weights here
lin_pred = 20 + 5 * exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
continue
elif (family_class, link) == (fam.InverseGaussian,
lnk.inverse_squared):
lin_pred = 0.5 + exog.sum(1) / 5
continue # skip due to non-convergence
elif (family_class, link) == (fam.InverseGaussian,
lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
method = 'newton'
elif (family_class, link) == (fam.NegativeBinomial,
lnk.identity):
lin_pred = 20 + 5 * exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-3, np.inf)
method = 'newton'
elif (family_class, link) == (fam.NegativeBinomial,
lnk.inverse_squared):
lin_pred = 0.1 + np.random.uniform(size=exog.shape[0])
continue # skip due to non-convergence
elif (family_class, link) == (fam.NegativeBinomial,
lnk.inverse_power):
# Can't get gradient to converage with var_weights here
lin_pred = 1 + exog.sum(1) / 5
continue
elif (family_class, link) == (fam.Gaussian, lnk.inverse_power):
# adding skip because of convergence failure
skip_one = True
else:
lin_pred = np.random.uniform(size=exog.shape[0])
endog = gen_endog(lin_pred, family_class, link, binom_version)
if binom_version == 0:
wts = np.ones_like(endog)
tmp = np.random.randint(2,
5,
size=(endog > endog.mean()).sum())
wts[endog > endog.mean()] = tmp
else:
wts = np.ones(shape=endog.shape[0])
y = endog[:, 0] / endog.sum(axis=1)
tmp = np.random.gamma(2, size=(y > y.mean()).sum())
wts[y > y.mean()] = tmp
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_irls = sm.GLM(endog, exog, var_weights=wts,
family=family_class(link=link()))
rslt_irls = mod_irls.fit(method="IRLS", atol=1e-10,
tol_criterion='params')
# Try with and without starting values.
for max_start_irls, start_params in ((0, rslt_irls.params),
(3, None)):
# TODO: skip convergence failures for now
if max_start_irls > 0 and skip_one:
continue
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_gradient = sm.GLM(endog, exog, var_weights=wts,
family=family_class(link=link()))
rslt_gradient = mod_gradient.fit(
max_start_irls=max_start_irls,
start_params=start_params,
method=method)
assert_allclose(rslt_gradient.params,
rslt_irls.params,
rtol=1e-6, atol=5e-5)
assert_allclose(rslt_gradient.llf, rslt_irls.llf,
rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.scale, rslt_irls.scale,
rtol=1e-6, atol=1e-6)
# Get the standard errors using expected information.
gradient_bse = rslt_gradient.bse
ehess = mod_gradient.hessian(rslt_gradient.params,
observed=False)
gradient_bse = np.sqrt(-np.diag(np.linalg.inv(ehess)))
assert_allclose(gradient_bse, rslt_irls.bse,
rtol=1e-6, atol=5e-5)
| 48.313025
| 79
| 0.485933
| 2,371
| 22,997
| 4.564319
| 0.10291
| 0.066069
| 0.067917
| 0.063205
| 0.820366
| 0.790335
| 0.768619
| 0.74672
| 0.724358
| 0.702273
| 0
| 0.021985
| 0.424447
| 22,997
| 475
| 80
| 48.414737
| 0.795633
| 0.100883
| 0
| 0.710811
| 0
| 0
| 0.004123
| 0
| 0
| 0
| 0
| 0.002105
| 0.043243
| 1
| 0.013514
| false
| 0
| 0.024324
| 0
| 0.040541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d7c0c5d036761a235af172c3a181ee57c3a31c1e
| 51,281
|
py
|
Python
|
misc/baxter/src_py_/Jos.py
|
YoshimitsuMatsutaIe/rmp_test
|
a7c94ff68b518ef51821484795c308c2c8519c4c
|
[
"MIT"
] | null | null | null |
misc/baxter/src_py_/Jos.py
|
YoshimitsuMatsutaIe/rmp_test
|
a7c94ff68b518ef51821484795c308c2c8519c4c
|
[
"MIT"
] | null | null | null |
misc/baxter/src_py_/Jos.py
|
YoshimitsuMatsutaIe/rmp_test
|
a7c94ff68b518ef51821484795c308c2c8519c4c
|
[
"MIT"
] | null | null | null |
import numpy as np
from math import cos as c
from math import sin as s
from math import tan as ta
from math import sqrt as sq
def jo_W0(q):
return numpy.array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
def jo_BR(q):
return numpy.array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
def jo_0(q):
return numpy.array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
def jo_1(q):
return numpy.array([[0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]), 0, 0, 0, 0, 0, 0], [0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]), 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
def jo_2(q):
return numpy.array([[(0.257634355725319*numpy.sin(q[0, 0]) + 0.257634355725319*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0]) + 0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]), -(0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0]), 0, 0, 0, 0, 0], [(0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0]) + 0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]), -(-0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0]), 0, 0, 0, 0, 0], [0, -0.36435*numpy.cos(q[1, 0]), 0, 0, 0, 0, 0]])
def jo_3(q):
return numpy.array([[(-0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]) + (0.257634355725319*numpy.sin(q[0, 0]) + 0.257634355725319*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0]) + 0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]), (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]) - (0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0]), -(-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]), 0, 0, 0, 0], [(-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]) + (0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0]) + 0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]), -(-0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]), (0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) - (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]), 0, 0, 0, 0], [0, 0.069*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) - 0.36435*numpy.cos(q[1, 0]), 0.069*numpy.sin(q[2, 0])*numpy.cos(q[1, 0]), 0, 0, 0, 0]])
def jo_4(q):
return numpy.array([[(0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]) + (0.257634355725319*numpy.sin(q[0, 0]) + 0.257634355725319*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0]) + (0.264662997130313*numpy.sin(q[0, 0]) + 0.264662997130313*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]) + 0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]), 0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]) - (0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0]) - (0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[3, 0]), (-0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.sin(q[3, 0]) - (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]), (-0.37429*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - (0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]), 0, 0, 0], [(0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]) + (0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0]) + (0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]) + 0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]), -(-0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[3, 0]) - (-0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]), (0.37429*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) - 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) - (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]), (-0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - (-0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]), 0, 0, 0], [0, 0.37429*numpy.sin(q[1, 0])*numpy.sin(q[3, 0])*numpy.cos(q[2, 0]) + 0.069*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) - 0.37429*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]) - 0.36435*numpy.cos(q[1, 0]), 0.37429*numpy.sin(q[2, 0])*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]) + 0.069*numpy.sin(q[2, 0])*numpy.cos(q[1, 0]), 0.37429*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) - 0.37429*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]), 0, 0, 0]])
def jo_5(q):
return numpy.array([[(0.01*((-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) + 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[4, 0]) + (0.01*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]))*numpy.sin(q[4, 0]) + (0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]) + (0.257634355725319*numpy.sin(q[0, 0]) + 0.257634355725319*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0]) + (0.264662997130313*numpy.sin(q[0, 0]) + 0.264662997130313*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]) + 0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]), (-0.01*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) + 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[4, 0]) + 0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]) - (0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0]) - (0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[3, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0])*numpy.sin(q[4, 0])*numpy.cos(q[1, 0]), 0.01*(-(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.cos(q[3, 0])*numpy.cos(q[4, 0]) + (-0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.sin(q[3, 0]) + (0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) - 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[4, 0]) - (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]), (-0.01*(-(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[4, 0]) + (-0.37429*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - (0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]), -(0.01*(-(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - 0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.sin(q[4, 0]) + (0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.cos(q[4, 0]), 0, 0], [(0.01*((-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) + 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[4, 0]) + (0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + (0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.sin(q[4, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]) + (0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0]) + (0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]) + 0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]), (-0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[4, 0]) + 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0])*numpy.sin(q[4, 0])*numpy.cos(q[1, 0]) - (-0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[3, 0]) - (-0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]), (0.37429*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) - 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + 0.01*((0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) - (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0])*numpy.cos(q[4, 0]) + (0.01*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) - 0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[4, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) - (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]), (-0.01*(-(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[4, 0]) + (-0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - (-0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]), -(0.01*(-(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.sin(q[4, 0]) + (0.01*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.cos(q[4, 0]), 0, 0], [0, (0.01*numpy.sin(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]) + 0.01*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[4, 0]) - 0.01*numpy.sin(q[1, 0])*numpy.sin(q[2, 0])*numpy.sin(q[4, 0]) + 0.37429*numpy.sin(q[1, 0])*numpy.sin(q[3, 0])*numpy.cos(q[2, 0]) + 0.069*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) - 0.37429*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]) - 0.36435*numpy.cos(q[1, 0]), 0.37429*numpy.sin(q[2, 0])*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]) + 0.01*numpy.sin(q[2, 0])*numpy.cos(q[1, 0])*numpy.cos(q[3, 0])*numpy.cos(q[4, 0]) + 0.069*numpy.sin(q[2, 0])*numpy.cos(q[1, 0]) + 0.01*numpy.sin(q[4, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]), (0.01*numpy.sin(q[1, 0])*numpy.cos(q[3, 0]) + 0.01*numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]))*numpy.cos(q[4, 0]) + 0.37429*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) - 0.37429*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]), -(0.01*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) - 0.01*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]))*numpy.sin(q[4, 0]) + 0.01*numpy.sin(q[2, 0])*numpy.cos(q[1, 0])*numpy.cos(q[4, 0]), 0, 0]])
def jo_6(q):
return numpy.array([[(0.01*((-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) + 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[4, 0]) + (0.01*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]))*numpy.sin(q[4, 0]) + (0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]) + (0.257634355725319*numpy.sin(q[0, 0]) + 0.257634355725319*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0]) + (0.264662997130313*numpy.sin(q[0, 0]) + 0.264662997130313*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]) + 0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]), (-0.01*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) + 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[4, 0]) + 0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]) - (0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0]) - (0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[3, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0])*numpy.sin(q[4, 0])*numpy.cos(q[1, 0]), 0.01*(-(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.cos(q[3, 0])*numpy.cos(q[4, 0]) + (-0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.sin(q[3, 0]) + (0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) - 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[4, 0]) - (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]), (-0.01*(-(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[4, 0]) + (-0.37429*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - (0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]), -(0.01*(-(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - 0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.sin(q[4, 0]) + (0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.cos(q[4, 0]), 0, 0], [(0.01*((-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) + 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[4, 0]) + (0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + (0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.sin(q[4, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]) + (0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0]) + (0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]) + 0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]), (-0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[4, 0]) + 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0])*numpy.sin(q[4, 0])*numpy.cos(q[1, 0]) - (-0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[3, 0]) - (-0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]), (0.37429*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) - 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + 0.01*((0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) - (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0])*numpy.cos(q[4, 0]) + (0.01*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) - 0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[4, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) - (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]), (-0.01*(-(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[4, 0]) + (-0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - (-0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]), -(0.01*(-(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.sin(q[4, 0]) + (0.01*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.cos(q[4, 0]), 0, 0], [0, (0.01*numpy.sin(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]) + 0.01*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[4, 0]) - 0.01*numpy.sin(q[1, 0])*numpy.sin(q[2, 0])*numpy.sin(q[4, 0]) + 0.37429*numpy.sin(q[1, 0])*numpy.sin(q[3, 0])*numpy.cos(q[2, 0]) + 0.069*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) - 0.37429*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]) - 0.36435*numpy.cos(q[1, 0]), 0.37429*numpy.sin(q[2, 0])*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]) + 0.01*numpy.sin(q[2, 0])*numpy.cos(q[1, 0])*numpy.cos(q[3, 0])*numpy.cos(q[4, 0]) + 0.069*numpy.sin(q[2, 0])*numpy.cos(q[1, 0]) + 0.01*numpy.sin(q[4, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]), (0.01*numpy.sin(q[1, 0])*numpy.cos(q[3, 0]) + 0.01*numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]))*numpy.cos(q[4, 0]) + 0.37429*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) - 0.37429*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]), -(0.01*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) - 0.01*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]))*numpy.sin(q[4, 0]) + 0.01*numpy.sin(q[2, 0])*numpy.cos(q[1, 0])*numpy.cos(q[4, 0]), 0, 0]])
def jo_ee(q):
return numpy.array([[(0.3683*(((-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) + (-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[4, 0]) + 0.3683*((-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]))*numpy.sin(q[4, 0]))*numpy.sin(q[5, 0]) + (0.3683*((-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + 0.3683*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[5, 0]) + (0.01*((-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) + 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[4, 0]) + (0.01*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]))*numpy.sin(q[4, 0]) + (0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]) + (0.257634355725319*numpy.sin(q[0, 0]) + 0.257634355725319*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0]) + (0.264662997130313*numpy.sin(q[0, 0]) + 0.264662997130313*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]) + 0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]), (0.3683*(-(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) + (-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[4, 0]) + 0.3683*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0])*numpy.sin(q[4, 0])*numpy.cos(q[1, 0]))*numpy.sin(q[5, 0]) + (-0.01*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) + 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[4, 0]) + (0.3683*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]) - 0.3683*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[5, 0]) + 0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]) - (0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0]) - (0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[3, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0])*numpy.sin(q[4, 0])*numpy.cos(q[1, 0]), (0.3683*(-(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.cos(q[3, 0])*numpy.cos(q[4, 0]) + 0.3683*((0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) - (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[4, 0]))*numpy.sin(q[5, 0]) + 0.3683*(-(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[5, 0]) + 0.01*(-(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.cos(q[3, 0])*numpy.cos(q[4, 0]) + (-0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.sin(q[3, 0]) + (0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) - 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[4, 0]) - (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]), 0.3683*(-(-(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + (-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]))*numpy.sin(q[5, 0])*numpy.cos(q[4, 0]) + (-0.01*(-(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[4, 0]) + (0.3683*(-(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - 0.3683*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[5, 0]) + (-0.37429*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - (0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]), (-0.3683*((-(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.sin(q[4, 0]) + 0.3683*((0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.cos(q[4, 0]))*numpy.sin(q[5, 0]) - (0.01*(-(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - 0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.sin(q[4, 0]) + (0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.cos(q[4, 0]), (0.3683*((-(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[4, 0]) + 0.3683*((0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.sin(q[4, 0]))*numpy.cos(q[5, 0]) - (0.3683*(-(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + 0.3683*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]))*numpy.sin(q[5, 0]), 0], [(0.3683*(((-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) + (-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[4, 0]) + 0.3683*((0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.sin(q[4, 0]))*numpy.sin(q[5, 0]) + (0.3683*((-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + 0.3683*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[5, 0]) + (0.01*((-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) + 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[4, 0]) + (0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + (0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.sin(q[4, 0]) + (-0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]) + (0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0]) + (0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]) + 0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]), (0.3683*(-(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[4, 0]) + 0.3683*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0])*numpy.sin(q[4, 0])*numpy.cos(q[1, 0]))*numpy.sin(q[5, 0]) + (-0.3683*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[3, 0]) + 0.3683*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]))*numpy.cos(q[5, 0]) + (-0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[4, 0]) + 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0])*numpy.sin(q[4, 0])*numpy.cos(q[1, 0]) - (-0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[3, 0]) - (-0.257634355725319*numpy.sin(q[0, 0]) - 0.257634355725319*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]), (0.37429*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) - 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + 0.3683*((0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) - (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[5, 0]) + 0.01*((0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) - (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0])*numpy.cos(q[4, 0]) + (0.3683*((0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) - (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0])*numpy.cos(q[4, 0]) + 0.3683*((-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) - (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[4, 0]))*numpy.sin(q[5, 0]) + (0.01*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) - 0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[4, 0]) + (0.0487903679018718*numpy.sin(q[0, 0]) - 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]) - (0.0487903679018718*numpy.sin(q[0, 0]) + 0.0487903679018718*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]), 0.3683*(-(-(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]))*numpy.sin(q[5, 0])*numpy.cos(q[4, 0]) + (-0.01*(-(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) + 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[4, 0]) + (0.3683*(-(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - 0.3683*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[5, 0]) + (-0.37429*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + 0.37429*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - (-0.264662997130313*numpy.sin(q[0, 0]) - 0.264662997130313*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]), (-0.3683*((-(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - (-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.sin(q[4, 0]) + 0.3683*((-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.cos(q[4, 0]))*numpy.sin(q[5, 0]) - (0.01*(-(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - 0.01*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.sin(q[4, 0]) + (0.01*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + 0.01*(0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.cos(q[4, 0]), (0.3683*((-(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.cos(q[3, 0]) - (-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[4, 0]) + 0.3683*((-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.sin(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[2, 0]))*numpy.sin(q[4, 0]))*numpy.cos(q[5, 0]) - (0.3683*(-(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) + (0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.sin(q[2, 0]))*numpy.sin(q[3, 0]) + 0.3683*(-0.707106781186548*numpy.sin(q[0, 0]) - 0.707106781186548*numpy.cos(q[0, 0]))*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]))*numpy.sin(q[5, 0]), 0], [0, (0.3683*(numpy.sin(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]) + numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[4, 0]) - 0.3683*numpy.sin(q[1, 0])*numpy.sin(q[2, 0])*numpy.sin(q[4, 0]))*numpy.sin(q[5, 0]) + (0.3683*numpy.sin(q[1, 0])*numpy.sin(q[3, 0])*numpy.cos(q[2, 0]) - 0.3683*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[5, 0]) + (0.01*numpy.sin(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]) + 0.01*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[4, 0]) - 0.01*numpy.sin(q[1, 0])*numpy.sin(q[2, 0])*numpy.sin(q[4, 0]) + 0.37429*numpy.sin(q[1, 0])*numpy.sin(q[3, 0])*numpy.cos(q[2, 0]) + 0.069*numpy.sin(q[1, 0])*numpy.cos(q[2, 0]) - 0.37429*numpy.cos(q[1, 0])*numpy.cos(q[3, 0]) - 0.36435*numpy.cos(q[1, 0]), (0.3683*numpy.sin(q[2, 0])*numpy.cos(q[1, 0])*numpy.cos(q[3, 0])*numpy.cos(q[4, 0]) + 0.3683*numpy.sin(q[4, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]))*numpy.sin(q[5, 0]) + 0.3683*numpy.sin(q[2, 0])*numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[5, 0]) + 0.37429*numpy.sin(q[2, 0])*numpy.sin(q[3, 0])*numpy.cos(q[1, 0]) + 0.01*numpy.sin(q[2, 0])*numpy.cos(q[1, 0])*numpy.cos(q[3, 0])*numpy.cos(q[4, 0]) + 0.069*numpy.sin(q[2, 0])*numpy.cos(q[1, 0]) + 0.01*numpy.sin(q[4, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]), (0.3683*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) - 0.3683*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[5, 0]) + (0.01*numpy.sin(q[1, 0])*numpy.cos(q[3, 0]) + 0.01*numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]))*numpy.cos(q[4, 0]) + 0.3683*(numpy.sin(q[1, 0])*numpy.cos(q[3, 0]) + numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]))*numpy.sin(q[5, 0])*numpy.cos(q[4, 0]) + 0.37429*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) - 0.37429*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]), (-0.3683*(numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) - numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]))*numpy.sin(q[4, 0]) + 0.3683*numpy.sin(q[2, 0])*numpy.cos(q[1, 0])*numpy.cos(q[4, 0]))*numpy.sin(q[5, 0]) - (0.01*numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) - 0.01*numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]))*numpy.sin(q[4, 0]) + 0.01*numpy.sin(q[2, 0])*numpy.cos(q[1, 0])*numpy.cos(q[4, 0]), (0.3683*(numpy.sin(q[1, 0])*numpy.sin(q[3, 0]) - numpy.cos(q[1, 0])*numpy.cos(q[2, 0])*numpy.cos(q[3, 0]))*numpy.cos(q[4, 0]) + 0.3683*numpy.sin(q[2, 0])*numpy.sin(q[4, 0])*numpy.cos(q[1, 0]))*numpy.cos(q[5, 0]) - (-0.3683*numpy.sin(q[1, 0])*numpy.cos(q[3, 0]) - 0.3683*numpy.sin(q[3, 0])*numpy.cos(q[1, 0])*numpy.cos(q[2, 0]))*numpy.sin(q[5, 0]), 0]])
| 1,972.346154
| 22,155
| 0.629434
| 10,801
| 51,281
| 2.987501
| 0.003518
| 0.103322
| 0.240145
| 0.148134
| 0.996405
| 0.994825
| 0.994608
| 0.994453
| 0.994205
| 0.993864
| 0
| 0.346143
| 0.068368
| 51,281
| 25
| 22,156
| 2,051.24
| 0.329273
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 13
|
d7c3d65be25a07ed197194379f9ba747b3b6336a
| 3,103
|
py
|
Python
|
mailman2/mailmanapi.py
|
edinburghhacklab/hackdb
|
3ec7d66039705aa511dd6559196fa51a53b3a110
|
[
"MIT"
] | null | null | null |
mailman2/mailmanapi.py
|
edinburghhacklab/hackdb
|
3ec7d66039705aa511dd6559196fa51a53b3a110
|
[
"MIT"
] | null | null | null |
mailman2/mailmanapi.py
|
edinburghhacklab/hackdb
|
3ec7d66039705aa511dd6559196fa51a53b3a110
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2022 Tim Hawes <me@timhawes.com>
#
# SPDX-License-Identifier: MIT
import requests
from django.conf import settings
def get_list(list_name):
return requests.get(
f"{settings.MAILMAN_API_URL}/lists/{list_name}",
auth=(settings.MAILMAN_API_USERNAME, settings.MAILMAN_API_PASSWORD),
).json()
def get_lists():
return requests.get(
f"{settings.MAILMAN_API_URL}/lists",
auth=(settings.MAILMAN_API_USERNAME, settings.MAILMAN_API_PASSWORD),
).json()
def get_list_member_data(list_name, email):
response = requests.get(
f"{settings.MAILMAN_API_URL}/lists/{list_name}/members/{email}",
auth=(settings.MAILMAN_API_USERNAME, settings.MAILMAN_API_PASSWORD),
)
if response:
return response.json()
else:
return None
def is_subscribed(list_name, email):
response = requests.get(
f"{settings.MAILMAN_API_URL}/lists/{list_name}/members/{email}",
auth=(settings.MAILMAN_API_USERNAME, settings.MAILMAN_API_PASSWORD),
)
if response:
return True
else:
return False
def get_list_members(list_name):
response = requests.get(
f"{settings.MAILMAN_API_URL}/lists/{list_name}/members",
auth=(settings.MAILMAN_API_USERNAME, settings.MAILMAN_API_PASSWORD),
)
if response:
return response.json()
else:
return []
def get_member(email):
response = requests.get(
f"{settings.MAILMAN_API_URL}/members/{email}",
auth=(settings.MAILMAN_API_USERNAME, settings.MAILMAN_API_PASSWORD),
)
if response:
return response.json()
else:
return None
def subscribe(list_name, email):
response = requests.post(
f"{settings.MAILMAN_API_URL}/lists/{list_name}/members/{email}",
auth=(settings.MAILMAN_API_USERNAME, settings.MAILMAN_API_PASSWORD),
)
if response and response.status_code == 200:
return True
else:
return False
def unsubscribe(list_name, email):
response = requests.delete(
f"{settings.MAILMAN_API_URL}/lists/{list_name}/members/{email}",
auth=(settings.MAILMAN_API_USERNAME, settings.MAILMAN_API_PASSWORD),
)
if response and response.status_code == 200:
return True
else:
return False
def change_address(list_name, old_address, new_address):
response = requests.patch(
f"{settings.MAILMAN_API_URL}/lists/{list_name}/members/{old_address}",
auth=(settings.MAILMAN_API_USERNAME, settings.MAILMAN_API_PASSWORD),
json={"address": new_address},
)
if response and response.status_code == 200:
return True
else:
return False
def global_change_address(old_address, new_address):
response = requests.post(
f"{settings.MAILMAN_API_URL}/members/{old_address}/change_address",
auth=(settings.MAILMAN_API_USERNAME, settings.MAILMAN_API_PASSWORD),
json={"new_address": new_address},
)
if response and response.status_code == 200:
return True
else:
return False
| 27.954955
| 78
| 0.680309
| 378
| 3,103
| 5.320106
| 0.150794
| 0.223769
| 0.268523
| 0.09448
| 0.856788
| 0.835903
| 0.793635
| 0.793635
| 0.762805
| 0.694182
| 0
| 0.006539
| 0.211408
| 3,103
| 110
| 79
| 28.209091
| 0.815284
| 0.027393
| 0
| 0.639535
| 0
| 0
| 0.184804
| 0.178832
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0.116279
| 0.023256
| 0.023256
| 0.348837
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
d7d2bf124a31fe521ab6732f67afc6b1daa7633f
| 1,716
|
py
|
Python
|
server/commands/build.py
|
ivan0313/Facial-Recognition-Database-Management-System
|
0e3693ed7308733b4b2f9155ae86bb35d67299af
|
[
"MIT"
] | 6
|
2021-09-13T13:45:49.000Z
|
2021-12-20T15:36:10.000Z
|
server/commands/build.py
|
ivan-ngchakming/Facial-Recognition-Database-Management-System
|
5b5409822db482d5c9f7d7f71538cfa614633843
|
[
"MIT"
] | 31
|
2021-09-11T05:52:56.000Z
|
2021-11-07T14:35:41.000Z
|
server/commands/build.py
|
ivan-ngchakming/Facial-Recognition-Database-Management-System
|
5b5409822db482d5c9f7d7f71538cfa614633843
|
[
"MIT"
] | 2
|
2021-09-13T04:08:05.000Z
|
2021-09-26T04:06:53.000Z
|
import os
import timeit
from flask.cli import AppGroup
import click
cli = AppGroup("build", short_help="Building the application")
def create_build_dir():
path = "pyinstaller_build"
if not os.path.isdir(path):
os.mkdir(path)
@cli.command()
@click.option("-r", "--build-react", is_flag=True, default=False)
@click.option("-c", "--clean", is_flag=True, default=False)
def windows(build_react, clean):
import PyInstaller.__main__
start = timeit.default_timer()
create_build_dir()
if build_react:
os.system("yarn build")
# Configure pyinstaller parameters
configs = ["wsgi.spec"]
if clean:
configs.append("--clean")
with open(configs[0], "r") as f:
spec_content = f.read()
# Build executable using pyinstaller.
PyInstaller.__main__.run(configs)
with open(f"dist/{configs[0]}", "w") as f:
f.write(spec_content)
print(f"Build finished in {timeit.default_timer()-start:.2f}s")
@cli.command()
@click.option("-r", "--build-react", is_flag=True, default=False)
@click.option("-c", "--clean", is_flag=True, default=False)
def mac(build_react, clean):
import PyInstaller.__main__
start = timeit.default_timer()
create_build_dir()
if build_react:
os.system("yarn build")
# Configure pyinstaller parameters
configs = ["wsgi_mac.spec"]
if clean:
configs.append("--clean")
with open(configs[0], "r") as f:
spec_content = f.read()
# Build executable using pyinstaller.
PyInstaller.__main__.run(configs)
with open(f"dist/{configs[0]}", "w") as f:
f.write(spec_content)
print(f"Build finished in {timeit.default_timer()-start:.2f}s")
| 22.88
| 67
| 0.655012
| 230
| 1,716
| 4.708696
| 0.282609
| 0.055402
| 0.036934
| 0.062789
| 0.829178
| 0.829178
| 0.829178
| 0.829178
| 0.829178
| 0.829178
| 0
| 0.004354
| 0.19697
| 1,716
| 74
| 68
| 23.189189
| 0.781567
| 0.079837
| 0
| 0.711111
| 0
| 0
| 0.186785
| 0.044473
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.2
| 0.044444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d7d747736127584c691afa077e404aa531e2c0b7
| 863
|
py
|
Python
|
tests/unit/format/test_format.py
|
matthewgdv/pathmagic
|
fe5788e02cfa7397cc0ef45ea7a0c5549ca4f261
|
[
"MIT"
] | null | null | null |
tests/unit/format/test_format.py
|
matthewgdv/pathmagic
|
fe5788e02cfa7397cc0ef45ea7a0c5549ca4f261
|
[
"MIT"
] | 1
|
2021-02-08T10:48:05.000Z
|
2021-02-08T10:48:05.000Z
|
tests/unit/format/test_format.py
|
matthewgdv/pathmagic
|
fe5788e02cfa7397cc0ef45ea7a0c5549ca4f261
|
[
"MIT"
] | null | null | null |
# import pytest
class TestFormatHandler:
def test_read(self): # synced
assert True
def test_write(self): # synced
assert True
def test_append(self): # synced
assert True
def test_read_help(self): # synced
assert True
def test_write_help(self): # synced
assert True
def test__ensure_format(self): # synced
assert True
def test_add_format(self): # synced
assert True
class TestFormatMeta:
def test___new__(self): # synced
assert True
class TestFormat:
def test_initialize(self): # synced
assert True
def test_read(self): # synced
assert True
def test_read_help(self): # synced
assert True
def test_write(self): # synced
assert True
def test_write_help(self): # synced
assert True
| 18.361702
| 44
| 0.61993
| 105
| 863
| 4.866667
| 0.2
| 0.178082
| 0.407045
| 0.508806
| 0.804305
| 0.682975
| 0.630137
| 0.577299
| 0.577299
| 0.534247
| 0
| 0
| 0.31518
| 863
| 46
| 45
| 18.76087
| 0.864636
| 0.12051
| 0
| 0.724138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.448276
| 1
| 0.448276
| false
| 0
| 0
| 0
| 0.551724
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
cc0be2f3533641a0121542ea7b30cb61571efd9c
| 10,260
|
py
|
Python
|
alembic/versions/50979d8ef680_add_aadp_purchase_steps.py
|
albertwo1978/atst
|
60d0c688b328bf3385b07885ff33d215e42ac395
|
[
"MIT"
] | 1
|
2020-01-16T16:15:52.000Z
|
2020-01-16T16:15:52.000Z
|
alembic/versions/50979d8ef680_add_aadp_purchase_steps.py
|
albertwo1978/atst
|
60d0c688b328bf3385b07885ff33d215e42ac395
|
[
"MIT"
] | null | null | null |
alembic/versions/50979d8ef680_add_aadp_purchase_steps.py
|
albertwo1978/atst
|
60d0c688b328bf3385b07885ff33d215e42ac395
|
[
"MIT"
] | null | null | null |
"""Add AADP Purchase Steps
Revision ID: 50979d8ef680
Revises: cd7e3f9a5d64
Create Date: 2020-01-30 17:00:27.916639
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "50979d8ef680" # pragma: allowlist secret
down_revision = "cd7e3f9a5d64" # pragma: allowlist secret
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"portfolio_state_machines",
"state",
type_=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATION_CREATED",
"BILLING_PROFILE_CREATION_IN_PROGRESS",
"BILLING_PROFILE_CREATION_FAILED",
"BILLING_PROFILE_VERIFICATION_CREATED",
"BILLING_PROFILE_VERIFICATION_IN_PROGRESS",
"BILLING_PROFILE_VERIFICATION_FAILED",
"BILLING_PROFILE_TENANT_ACCESS_CREATED",
"BILLING_PROFILE_TENANT_ACCESS_IN_PROGRESS",
"BILLING_PROFILE_TENANT_ACCESS_FAILED",
"TASK_ORDER_BILLING_CREATION_CREATED",
"TASK_ORDER_BILLING_CREATION_IN_PROGRESS",
"TASK_ORDER_BILLING_CREATION_FAILED",
"TASK_ORDER_BILLING_VERIFICATION_CREATED",
"TASK_ORDER_BILLING_VERIFICATION_IN_PROGRESS",
"TASK_ORDER_BILLING_VERIFICATION_FAILED",
"BILLING_INSTRUCTION_CREATED",
"BILLING_INSTRUCTION_IN_PROGRESS",
"BILLING_INSTRUCTION_FAILED",
"PRODUCT_PURCHASE_CREATED",
"PRODUCT_PURCHASE_IN_PROGRESS",
"PRODUCT_PURCHASE_FAILED",
"PRODUCT_PURCHASE_VERIFICATION_CREATED",
"PRODUCT_PURCHASE_VERIFICATION_IN_PROGRESS",
"PRODUCT_PURCHASE_VERIFICATION_FAILED",
"TENANT_PRINCIPAL_APP_CREATED",
"TENANT_PRINCIPAL_APP_IN_PROGRESS",
"TENANT_PRINCIPAL_APP_FAILED",
"TENANT_PRINCIPAL_CREATED",
"TENANT_PRINCIPAL_IN_PROGRESS",
"TENANT_PRINCIPAL_FAILED",
"TENANT_PRINCIPAL_CREDENTIAL_CREATED",
"TENANT_PRINCIPAL_CREDENTIAL_IN_PROGRESS",
"TENANT_PRINCIPAL_CREDENTIAL_FAILED",
"ADMIN_ROLE_DEFINITION_CREATED",
"ADMIN_ROLE_DEFINITION_IN_PROGRESS",
"ADMIN_ROLE_DEFINITION_FAILED",
"PRINCIPAL_ADMIN_ROLE_CREATED",
"PRINCIPAL_ADMIN_ROLE_IN_PROGRESS",
"PRINCIPAL_ADMIN_ROLE_FAILED",
"TENANT_ADMIN_OWNERSHIP_CREATED",
"TENANT_ADMIN_OWNERSHIP_IN_PROGRESS",
"TENANT_ADMIN_OWNERSHIP_FAILED",
"TENANT_PRINCIPAL_OWNERSHIP_CREATED",
"TENANT_PRINCIPAL_OWNERSHIP_IN_PROGRESS",
"TENANT_PRINCIPAL_OWNERSHIP_FAILED",
name="fsmstates",
native_enum=False,
),
existing_type=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATION_CREATED",
"BILLING_PROFILE_CREATION_IN_PROGRESS",
"BILLING_PROFILE_CREATION_FAILED",
"BILLING_PROFILE_VERIFICATION_CREATED",
"BILLING_PROFILE_VERIFICATION_IN_PROGRESS",
"BILLING_PROFILE_VERIFICATION_FAILED",
"BILLING_PROFILE_TENANT_ACCESS_CREATED",
"BILLING_PROFILE_TENANT_ACCESS_IN_PROGRESS",
"BILLING_PROFILE_TENANT_ACCESS_FAILED",
"TASK_ORDER_BILLING_CREATION_CREATED",
"TASK_ORDER_BILLING_CREATION_IN_PROGRESS",
"TASK_ORDER_BILLING_CREATION_FAILED",
"TASK_ORDER_BILLING_VERIFICATION_CREATED",
"TASK_ORDER_BILLING_VERIFICATION_IN_PROGRESS",
"TASK_ORDER_BILLING_VERIFICATION_FAILED",
"BILLING_INSTRUCTION_CREATED",
"BILLING_INSTRUCTION_IN_PROGRESS",
"BILLING_INSTRUCTION_FAILED",
"TENANT_PRINCIPAL_APP_CREATED",
"TENANT_PRINCIPAL_APP_IN_PROGRESS",
"TENANT_PRINCIPAL_APP_FAILED",
"TENANT_PRINCIPAL_CREATED",
"TENANT_PRINCIPAL_IN_PROGRESS",
"TENANT_PRINCIPAL_FAILED",
"TENANT_PRINCIPAL_CREDENTIAL_CREATED",
"TENANT_PRINCIPAL_CREDENTIAL_IN_PROGRESS",
"TENANT_PRINCIPAL_CREDENTIAL_FAILED",
"ADMIN_ROLE_DEFINITION_CREATED",
"ADMIN_ROLE_DEFINITION_IN_PROGRESS",
"ADMIN_ROLE_DEFINITION_FAILED",
"PRINCIPAL_ADMIN_ROLE_CREATED",
"PRINCIPAL_ADMIN_ROLE_IN_PROGRESS",
"PRINCIPAL_ADMIN_ROLE_FAILED",
"TENANT_ADMIN_OWNERSHIP_CREATED",
"TENANT_ADMIN_OWNERSHIP_IN_PROGRESS",
"TENANT_ADMIN_OWNERSHIP_FAILED",
"TENANT_PRINCIPAL_OWNERSHIP_CREATED",
"TENANT_PRINCIPAL_OWNERSHIP_IN_PROGRESS",
"TENANT_PRINCIPAL_OWNERSHIP_FAILED",
name="fsmstates",
native_enum=False,
),
existing_nullable=False,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"portfolio_state_machines",
"state",
type_=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATION_CREATED",
"BILLING_PROFILE_CREATION_IN_PROGRESS",
"BILLING_PROFILE_CREATION_FAILED",
"BILLING_PROFILE_VERIFICATION_CREATED",
"BILLING_PROFILE_VERIFICATION_IN_PROGRESS",
"BILLING_PROFILE_VERIFICATION_FAILED",
"BILLING_PROFILE_TENANT_ACCESS_CREATED",
"BILLING_PROFILE_TENANT_ACCESS_IN_PROGRESS",
"BILLING_PROFILE_TENANT_ACCESS_FAILED",
"TASK_ORDER_BILLING_CREATION_CREATED",
"TASK_ORDER_BILLING_CREATION_IN_PROGRESS",
"TASK_ORDER_BILLING_CREATION_FAILED",
"TASK_ORDER_BILLING_VERIFICATION_CREATED",
"TASK_ORDER_BILLING_VERIFICATION_IN_PROGRESS",
"TASK_ORDER_BILLING_VERIFICATION_FAILED",
"BILLING_INSTRUCTION_CREATED",
"BILLING_INSTRUCTION_IN_PROGRESS",
"BILLING_INSTRUCTION_FAILED",
"TENANT_PRINCIPAL_APP_CREATED",
"TENANT_PRINCIPAL_APP_IN_PROGRESS",
"TENANT_PRINCIPAL_APP_FAILED",
"TENANT_PRINCIPAL_CREATED",
"TENANT_PRINCIPAL_IN_PROGRESS",
"TENANT_PRINCIPAL_FAILED",
"TENANT_PRINCIPAL_CREDENTIAL_CREATED",
"TENANT_PRINCIPAL_CREDENTIAL_IN_PROGRESS",
"TENANT_PRINCIPAL_CREDENTIAL_FAILED",
"ADMIN_ROLE_DEFINITION_CREATED",
"ADMIN_ROLE_DEFINITION_IN_PROGRESS",
"ADMIN_ROLE_DEFINITION_FAILED",
"PRINCIPAL_ADMIN_ROLE_CREATED",
"PRINCIPAL_ADMIN_ROLE_IN_PROGRESS",
"PRINCIPAL_ADMIN_ROLE_FAILED",
"TENANT_ADMIN_OWNERSHIP_CREATED",
"TENANT_ADMIN_OWNERSHIP_IN_PROGRESS",
"TENANT_ADMIN_OWNERSHIP_FAILED",
"TENANT_PRINCIPAL_OWNERSHIP_CREATED",
"TENANT_PRINCIPAL_OWNERSHIP_IN_PROGRESS",
"TENANT_PRINCIPAL_OWNERSHIP_FAILED",
name="fsmstates",
native_enum=False,
),
existing_type=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATION_CREATED",
"BILLING_PROFILE_CREATION_IN_PROGRESS",
"BILLING_PROFILE_CREATION_FAILED",
"BILLING_PROFILE_VERIFICATION_CREATED",
"BILLING_PROFILE_VERIFICATION_IN_PROGRESS",
"BILLING_PROFILE_VERIFICATION_FAILED",
"BILLING_PROFILE_TENANT_ACCESS_CREATED",
"BILLING_PROFILE_TENANT_ACCESS_IN_PROGRESS",
"BILLING_PROFILE_TENANT_ACCESS_FAILED",
"TASK_ORDER_BILLING_CREATION_CREATED",
"TASK_ORDER_BILLING_CREATION_IN_PROGRESS",
"TASK_ORDER_BILLING_CREATION_FAILED",
"TASK_ORDER_BILLING_VERIFICATION_CREATED",
"TASK_ORDER_BILLING_VERIFICATION_IN_PROGRESS",
"TASK_ORDER_BILLING_VERIFICATION_FAILED",
"BILLING_INSTRUCTION_CREATED",
"BILLING_INSTRUCTION_IN_PROGRESS",
"BILLING_INSTRUCTION_FAILED",
"PRODUCT_PURCHASE_CREATED",
"PRODUCT_PURCHASE_IN_PROGRESS",
"PRODUCT_PURCHASE_FAILED",
"PRODUCT_PURCHASE_VERIFICATION_CREATED",
"PRODUCT_PURCHASE_VERIFICATION_IN_PROGRESS",
"PRODUCT_PURCHASE_VERIFICATION_FAILED",
"TENANT_PRINCIPAL_APP_CREATED",
"TENANT_PRINCIPAL_APP_IN_PROGRESS",
"TENANT_PRINCIPAL_APP_FAILED",
"TENANT_PRINCIPAL_CREATED",
"TENANT_PRINCIPAL_IN_PROGRESS",
"TENANT_PRINCIPAL_FAILED",
"TENANT_PRINCIPAL_CREDENTIAL_CREATED",
"TENANT_PRINCIPAL_CREDENTIAL_IN_PROGRESS",
"TENANT_PRINCIPAL_CREDENTIAL_FAILED",
"ADMIN_ROLE_DEFINITION_CREATED",
"ADMIN_ROLE_DEFINITION_IN_PROGRESS",
"ADMIN_ROLE_DEFINITION_FAILED",
"PRINCIPAL_ADMIN_ROLE_CREATED",
"PRINCIPAL_ADMIN_ROLE_IN_PROGRESS",
"PRINCIPAL_ADMIN_ROLE_FAILED",
"TENANT_ADMIN_OWNERSHIP_CREATED",
"TENANT_ADMIN_OWNERSHIP_IN_PROGRESS",
"TENANT_ADMIN_OWNERSHIP_FAILED",
"TENANT_PRINCIPAL_OWNERSHIP_CREATED",
"TENANT_PRINCIPAL_OWNERSHIP_IN_PROGRESS",
"TENANT_PRINCIPAL_OWNERSHIP_FAILED",
name="fsmstates",
native_enum=False,
),
existing_nullable=False,
)
# ### end Alembic commands ###
| 40.714286
| 65
| 0.639961
| 908
| 10,260
| 6.569383
| 0.092511
| 0.100587
| 0.064376
| 0.067058
| 0.949874
| 0.949874
| 0.949874
| 0.949874
| 0.949874
| 0.949874
| 0
| 0.00685
| 0.288596
| 10,260
| 251
| 66
| 40.876494
| 0.810385
| 0.0346
| 0
| 0.957265
| 0
| 0
| 0.603689
| 0.562538
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008547
| false
| 0
| 0.008547
| 0
| 0.017094
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
cc237477fe9f6f14cc35347d43bfe82e1cc6f0e5
| 27
|
py
|
Python
|
pcep/prac_15.py
|
gliverm/devnet-study-group
|
28aecef8207cfeb8f10dc375c22e5ec953d6762b
|
[
"MIT"
] | 1
|
2020-07-30T15:23:55.000Z
|
2020-07-30T15:23:55.000Z
|
pcep/prac_15.py
|
gliverm/devnet-study-group
|
28aecef8207cfeb8f10dc375c22e5ec953d6762b
|
[
"MIT"
] | null | null | null |
pcep/prac_15.py
|
gliverm/devnet-study-group
|
28aecef8207cfeb8f10dc375c22e5ec953d6762b
|
[
"MIT"
] | null | null | null |
x = 1 // 5 + 1 / 5
print(x)
| 13.5
| 18
| 0.407407
| 7
| 27
| 1.571429
| 0.571429
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 0.333333
| 27
| 2
| 19
| 13.5
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
0bcf303aa7be7aa06c7ec27ca17cca503d137b38
| 637
|
py
|
Python
|
src/real_world/__init__.py
|
yotaro-shimose/cprl-solver
|
c50a9c101340a0629b6f09015b8dc33318ce081b
|
[
"MIT"
] | null | null | null |
src/real_world/__init__.py
|
yotaro-shimose/cprl-solver
|
c50a9c101340a0629b6f09015b8dc33318ce081b
|
[
"MIT"
] | null | null | null |
src/real_world/__init__.py
|
yotaro-shimose/cprl-solver
|
c50a9c101340a0629b6f09015b8dc33318ce081b
|
[
"MIT"
] | null | null | null |
from src.real_world.conversion import REAL_X_RANGE
from src.real_world.conversion import REAL_Y_RANGE
from src.real_world.conversion import VEHICLE_SPEED
from src.real_world.conversion import EIGHT_HOURS
from src.real_world.conversion import calc_xy
from src.real_world.conversion import calc_lat_lon
from src.real_world.conversion import to_virtual_coord
from src.real_world.conversion import to_virtual_time
from src.real_world.conversion import to_real_coord
from src.real_world.conversion import to_real_time
from src.real_world.generation import generate_field_instance
from src.real_world.generation import generate_field_dataset
| 45.5
| 61
| 0.8854
| 105
| 637
| 5.057143
| 0.247619
| 0.158192
| 0.248588
| 0.361582
| 0.911488
| 0.896422
| 0.836158
| 0.485876
| 0
| 0
| 0
| 0
| 0.076923
| 637
| 13
| 62
| 49
| 0.903061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
f0a8a862eeeaf5cf264884fa309aeb26f589d352
| 1,096
|
py
|
Python
|
foolbox/tests/test_attacks_saliency.py
|
schoyc/foolbox
|
e0f400be8fa393467d3db598576e985727edb310
|
[
"MIT"
] | 4
|
2021-01-07T12:33:36.000Z
|
2022-03-12T07:16:43.000Z
|
foolbox/tests/test_attacks_saliency.py
|
alvarorobledo/foolbox
|
25d995b1a50f4926e07bc51877d385c0518982f8
|
[
"MIT"
] | null | null | null |
foolbox/tests/test_attacks_saliency.py
|
alvarorobledo/foolbox
|
25d995b1a50f4926e07bc51877d385c0518982f8
|
[
"MIT"
] | 1
|
2021-02-26T10:04:20.000Z
|
2021-02-26T10:04:20.000Z
|
import numpy as np
from foolbox.attacks import SaliencyMapAttack as Attack
def test_attack(bn_adversarial):
adv = bn_adversarial
attack = Attack()
attack(adv)
assert adv.image is not None
assert adv.distance.value < np.inf
def test_attack_random_targets(bn_adversarial):
adv = bn_adversarial
attack = Attack()
attack(adv, num_random_targets=2)
assert adv.image is not None
assert adv.distance.value < np.inf
def test_targeted_attack(bn_targeted_adversarial):
adv = bn_targeted_adversarial
attack = Attack()
attack(adv)
assert adv.image is not None
assert adv.distance.value < np.inf
def test_targeted_attack_slow(bn_targeted_adversarial):
adv = bn_targeted_adversarial
attack = Attack()
attack(adv, fast=False)
assert adv.image is not None
assert adv.distance.value < np.inf
def test_targeted_attack_max(bn_targeted_adversarial):
adv = bn_targeted_adversarial
attack = Attack()
attack(adv, max_perturbations_per_pixel=1)
assert adv.image is not None
assert adv.distance.value < np.inf
| 24.909091
| 55
| 0.734489
| 155
| 1,096
| 4.980645
| 0.225806
| 0.15544
| 0.163212
| 0.187824
| 0.800518
| 0.800518
| 0.800518
| 0.800518
| 0.800518
| 0.712435
| 0
| 0.002268
| 0.195255
| 1,096
| 43
| 56
| 25.488372
| 0.873016
| 0
| 0
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.3125
| 1
| 0.15625
| false
| 0
| 0.0625
| 0
| 0.21875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f0d83df0bf59b9566c6542856507188d452c5f20
| 16,222
|
py
|
Python
|
microtvm_device/microDevice_pb2_grpc.py
|
mehrdadh/microtvm-device
|
50c0ea616184e6fda2bf0266443555124f509415
|
[
"Apache-2.0"
] | 1
|
2022-01-20T23:10:03.000Z
|
2022-01-20T23:10:03.000Z
|
microtvm_device/microDevice_pb2_grpc.py
|
mehrdadh/microtvm-device
|
50c0ea616184e6fda2bf0266443555124f509415
|
[
"Apache-2.0"
] | null | null | null |
microtvm_device/microDevice_pb2_grpc.py
|
mehrdadh/microtvm-device
|
50c0ea616184e6fda2bf0266443555124f509415
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from microtvm_device import microDevice_pb2 as microtvm__device_dot_microDevice__pb2
class RPCRequestStub(object):
"""The device request service.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.RPCDeviceRequest = channel.unary_unary(
'/microDevice.RPCRequest/RPCDeviceRequest',
request_serializer=microtvm__device_dot_microDevice__pb2.DeviceMessage.SerializeToString,
response_deserializer=microtvm__device_dot_microDevice__pb2.DeviceReply.FromString,
)
self.RPCDeviceRelease = channel.unary_unary(
'/microDevice.RPCRequest/RPCDeviceRelease',
request_serializer=microtvm__device_dot_microDevice__pb2.DeviceMessage.SerializeToString,
response_deserializer=microtvm__device_dot_microDevice__pb2.DeviceReply.FromString,
)
self.RPCDeviceIsAlive = channel.unary_unary(
'/microDevice.RPCRequest/RPCDeviceIsAlive',
request_serializer=microtvm__device_dot_microDevice__pb2.DeviceMessage.SerializeToString,
response_deserializer=microtvm__device_dot_microDevice__pb2.DeviceReply.FromString,
)
self.RPCSessionRequest = channel.unary_unary(
'/microDevice.RPCRequest/RPCSessionRequest',
request_serializer=microtvm__device_dot_microDevice__pb2.SessionMessage.SerializeToString,
response_deserializer=microtvm__device_dot_microDevice__pb2.SessionMessage.FromString,
)
self.RPCSessionClose = channel.unary_unary(
'/microDevice.RPCRequest/RPCSessionClose',
request_serializer=microtvm__device_dot_microDevice__pb2.SessionMessage.SerializeToString,
response_deserializer=microtvm__device_dot_microDevice__pb2.SessionMessage.FromString,
)
self.RPCDeviceRequestList = channel.unary_unary(
'/microDevice.RPCRequest/RPCDeviceRequestList',
request_serializer=microtvm__device_dot_microDevice__pb2.StringMessage.SerializeToString,
response_deserializer=microtvm__device_dot_microDevice__pb2.StringMessage.FromString,
)
self.RPCDeviceRequestEnable = channel.unary_unary(
'/microDevice.RPCRequest/RPCDeviceRequestEnable',
request_serializer=microtvm__device_dot_microDevice__pb2.DeviceMessage.SerializeToString,
response_deserializer=microtvm__device_dot_microDevice__pb2.StringMessage.FromString,
)
self.RPCDeviceRequestDisable = channel.unary_unary(
'/microDevice.RPCRequest/RPCDeviceRequestDisable',
request_serializer=microtvm__device_dot_microDevice__pb2.DeviceMessage.SerializeToString,
response_deserializer=microtvm__device_dot_microDevice__pb2.StringMessage.FromString,
)
self.RPCGetDeviceTypeInfo = channel.unary_unary(
'/microDevice.RPCRequest/RPCGetDeviceTypeInfo',
request_serializer=microtvm__device_dot_microDevice__pb2.DeviceMessage.SerializeToString,
response_deserializer=microtvm__device_dot_microDevice__pb2.DeviceReply.FromString,
)
class RPCRequestServicer(object):
"""The device request service.
"""
def RPCDeviceRequest(self, request, context):
"""Send a device serial
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RPCDeviceRelease(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RPCDeviceIsAlive(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RPCSessionRequest(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RPCSessionClose(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RPCDeviceRequestList(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RPCDeviceRequestEnable(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RPCDeviceRequestDisable(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RPCGetDeviceTypeInfo(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RPCRequestServicer_to_server(servicer, server):
rpc_method_handlers = {
'RPCDeviceRequest': grpc.unary_unary_rpc_method_handler(
servicer.RPCDeviceRequest,
request_deserializer=microtvm__device_dot_microDevice__pb2.DeviceMessage.FromString,
response_serializer=microtvm__device_dot_microDevice__pb2.DeviceReply.SerializeToString,
),
'RPCDeviceRelease': grpc.unary_unary_rpc_method_handler(
servicer.RPCDeviceRelease,
request_deserializer=microtvm__device_dot_microDevice__pb2.DeviceMessage.FromString,
response_serializer=microtvm__device_dot_microDevice__pb2.DeviceReply.SerializeToString,
),
'RPCDeviceIsAlive': grpc.unary_unary_rpc_method_handler(
servicer.RPCDeviceIsAlive,
request_deserializer=microtvm__device_dot_microDevice__pb2.DeviceMessage.FromString,
response_serializer=microtvm__device_dot_microDevice__pb2.DeviceReply.SerializeToString,
),
'RPCSessionRequest': grpc.unary_unary_rpc_method_handler(
servicer.RPCSessionRequest,
request_deserializer=microtvm__device_dot_microDevice__pb2.SessionMessage.FromString,
response_serializer=microtvm__device_dot_microDevice__pb2.SessionMessage.SerializeToString,
),
'RPCSessionClose': grpc.unary_unary_rpc_method_handler(
servicer.RPCSessionClose,
request_deserializer=microtvm__device_dot_microDevice__pb2.SessionMessage.FromString,
response_serializer=microtvm__device_dot_microDevice__pb2.SessionMessage.SerializeToString,
),
'RPCDeviceRequestList': grpc.unary_unary_rpc_method_handler(
servicer.RPCDeviceRequestList,
request_deserializer=microtvm__device_dot_microDevice__pb2.StringMessage.FromString,
response_serializer=microtvm__device_dot_microDevice__pb2.StringMessage.SerializeToString,
),
'RPCDeviceRequestEnable': grpc.unary_unary_rpc_method_handler(
servicer.RPCDeviceRequestEnable,
request_deserializer=microtvm__device_dot_microDevice__pb2.DeviceMessage.FromString,
response_serializer=microtvm__device_dot_microDevice__pb2.StringMessage.SerializeToString,
),
'RPCDeviceRequestDisable': grpc.unary_unary_rpc_method_handler(
servicer.RPCDeviceRequestDisable,
request_deserializer=microtvm__device_dot_microDevice__pb2.DeviceMessage.FromString,
response_serializer=microtvm__device_dot_microDevice__pb2.StringMessage.SerializeToString,
),
'RPCGetDeviceTypeInfo': grpc.unary_unary_rpc_method_handler(
servicer.RPCGetDeviceTypeInfo,
request_deserializer=microtvm__device_dot_microDevice__pb2.DeviceMessage.FromString,
response_serializer=microtvm__device_dot_microDevice__pb2.DeviceReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'microDevice.RPCRequest', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class RPCRequest(object):
"""The device request service.
"""
@staticmethod
def RPCDeviceRequest(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/microDevice.RPCRequest/RPCDeviceRequest',
microtvm__device_dot_microDevice__pb2.DeviceMessage.SerializeToString,
microtvm__device_dot_microDevice__pb2.DeviceReply.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RPCDeviceRelease(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/microDevice.RPCRequest/RPCDeviceRelease',
microtvm__device_dot_microDevice__pb2.DeviceMessage.SerializeToString,
microtvm__device_dot_microDevice__pb2.DeviceReply.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RPCDeviceIsAlive(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/microDevice.RPCRequest/RPCDeviceIsAlive',
microtvm__device_dot_microDevice__pb2.DeviceMessage.SerializeToString,
microtvm__device_dot_microDevice__pb2.DeviceReply.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RPCSessionRequest(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/microDevice.RPCRequest/RPCSessionRequest',
microtvm__device_dot_microDevice__pb2.SessionMessage.SerializeToString,
microtvm__device_dot_microDevice__pb2.SessionMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RPCSessionClose(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/microDevice.RPCRequest/RPCSessionClose',
microtvm__device_dot_microDevice__pb2.SessionMessage.SerializeToString,
microtvm__device_dot_microDevice__pb2.SessionMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RPCDeviceRequestList(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/microDevice.RPCRequest/RPCDeviceRequestList',
microtvm__device_dot_microDevice__pb2.StringMessage.SerializeToString,
microtvm__device_dot_microDevice__pb2.StringMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RPCDeviceRequestEnable(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/microDevice.RPCRequest/RPCDeviceRequestEnable',
microtvm__device_dot_microDevice__pb2.DeviceMessage.SerializeToString,
microtvm__device_dot_microDevice__pb2.StringMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RPCDeviceRequestDisable(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/microDevice.RPCRequest/RPCDeviceRequestDisable',
microtvm__device_dot_microDevice__pb2.DeviceMessage.SerializeToString,
microtvm__device_dot_microDevice__pb2.StringMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RPCGetDeviceTypeInfo(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/microDevice.RPCRequest/RPCGetDeviceTypeInfo',
microtvm__device_dot_microDevice__pb2.DeviceMessage.SerializeToString,
microtvm__device_dot_microDevice__pb2.DeviceReply.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 48.423881
| 112
| 0.685612
| 1,377
| 16,222
| 7.691358
| 0.078431
| 0.074025
| 0.088283
| 0.145406
| 0.839581
| 0.801624
| 0.795581
| 0.757813
| 0.747427
| 0.747427
| 0
| 0.004584
| 0.246949
| 16,222
| 334
| 113
| 48.568862
| 0.862394
| 0.05024
| 0
| 0.691489
| 1
| 0
| 0.089027
| 0.054148
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070922
| false
| 0
| 0.007092
| 0.031915
| 0.120567
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f0dfbab4d38fe0fb18143dc099ce0698fb6d59a6
| 15,638
|
py
|
Python
|
src/stats/mle.py
|
valevo/thesis
|
6671fa7ed8aefd3e89fd29ee97fa31a3c4315868
|
[
"MIT"
] | 1
|
2018-07-07T11:40:49.000Z
|
2018-07-07T11:40:49.000Z
|
src/stats/mle.py
|
valevo/Thesis
|
6671fa7ed8aefd3e89fd29ee97fa31a3c4315868
|
[
"MIT"
] | null | null | null |
src/stats/mle.py
|
valevo/Thesis
|
6671fa7ed8aefd3e89fd29ee97fa31a3c4315868
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel,\
GenericLikelihoodModelResults
from statsmodels.nonparametric.smoothers_lowess import lowess
from scipy.special import zeta
from scipy.stats import binom
import pickle
import numpy as np
lg = np.log10
class Mandelbrot(GenericLikelihoodModel):
def to_pickle(self, filename, remove_data=True):
if not filename.endswith(".pkl"):
filename += ".pkl"
if not self.fit_result:
raise ValueError("No fit result registered yet; pickling pointless!")
if remove_data:
self.fit_result.model = None
self.fit_result.exog = None
self.fit_result.endog = None
with open(filename, "wb") as handle:
pickle.dump(self.fit_result, handle)
@classmethod
def from_pickle(cls, filename, to_class=False, frequencies=None,
ranks=None, **kwargs):
if not filename.endswith(".pkl"):
filename += ".pkl"
with open(filename, "rb") as handle:
fit_res = pickle.load(handle)
if not to_class:
return fit_res
if (frequencies is None) or (ranks is None):
raise ValueError("Mandelbrot class can only be instatiated with"
"frequencies and ranks given!")
mandel = cls(frequencies, ranks, **kwargs)
fit_res.model = mandel
mandel.register_fit(fit_res)
return mandel
def __init__(self, frequencies, ranks, **kwargs):
if not len(frequencies) == len(ranks):
raise ValueError("NOT THE SAME NUMBER OF RANKS AND FREQS!")
frequencies = np.asarray(frequencies)
ranks = np.asarray(ranks)
self.n_obs = np.sum(frequencies)
super().__init__(endog=frequencies, exog=ranks, **kwargs)
self.fit_result = None
def prob(self, params, ranks=None, log=False):
if ranks is None:
ranks = self.exog
alpha, beta = params
if log:
return -alpha*lg(beta+ranks) - lg(zeta(alpha, q=beta+1.))
else:
return ((beta + ranks)**(-alpha))/zeta(alpha, q=beta+1.)
def loglike(self, params, frequencies=None, ranks=None):
rs = self.exog if (ranks is None) else ranks
fs = self.endog if (frequencies is None) else frequencies
alpha, beta = params
# if alpha > 10 or beta > 20:
# return -np.inf
if alpha < 1.0 or beta < 0.0:
return -np.inf
# no need to calculate P(r) when observed f(r) was zero
log_probs = -alpha*lg(beta+rs) - lg(zeta(alpha, q=beta+1.))
log_probs = log_probs.reshape(-1, )
return np.sum(fs * log_probs) - beta**5
def register_fit(self, fit_result, overwrite=False):
if not self.fit_result is None and not overwrite:
raise ValueError("A fit result is already registered and overwrite=False!")
self.fit_result = fit_result
self.optim_params = fit_result.params
self.pseudo_r_squared = self.pseudo_r_squared(self.optim_params)
self.SE, self.SE_relative = fit_result.bse, fit_result.bse/self.optim_params
self.BIC, self.BIC_relative = fit_result.bic,\
(-2*self.null_loglike())/fit_result.bic
def print_result(self, string=False):
if self.fit_result is None:
raise ValueError("Register a fitting result first!")
def format_x(x):
return float('{0:.3g}'.format(x))
s = "="*50
s += "\n" + "MANDELBROT"
s += "\n" + " Optimal Parameters " + str(tuple(map(format_x, self.optim_params)))
s += "\n" + " Standard Error [relative]: " + str(tuple(map(format_x, self.SE))) +\
", [" + str(tuple(map(format_x, self.SE_relative))) + "]"
s += "\n" + " Pseudo R^2: " + str(format_x(self.pseudo_r_squared))
s += "\n" + " BIC [relative]: " + str(format_x(self.BIC)) +\
", [" + str(format_x(self.BIC_relative)) + "]"
s += "\n" + "="*50
if string:
return s
print(s)
def null_loglike(self, epsilon=1e-10):
return self.loglike((1.+epsilon, 0.0))
def pseudo_r_squared(self, params):
return 1-self.loglike(params)/self.null_loglike()
def predict(self, params, ranks=None, freqs=True, n_obs=None,
correct_for_finite_domain=True):
if ranks is None:
ranks = self.exog
ranks = np.asarray(ranks)
if n_obs is None:
n_obs = self.n_obs
alpha, beta = params
pred_probs = self.prob(params, ranks=ranks, log=False)
if correct_for_finite_domain:
if not freqs:
raise NotImplementedError("Correction for "\
"finite domain not implemented with probabilities!")
return pred_probs*(n_obs/np.sum(pred_probs))
if freqs:
return n_obs*pred_probs
return pred_probs
class Mandelbrot2(GenericLikelihoodModel):
def to_pickle(self, filename, remove_data=True):
if not filename.endswith(".pkl"):
filename += ".pkl"
if not self.fit_result:
raise ValueError("No fit result registered yet; pickling pointless!")
if remove_data:
self.fit_result.model = None
self.fit_result.exog = None
self.fit_result.endog = None
with open(filename, "wb") as handle:
pickle.dump(self.fit_result, handle)
@classmethod
def from_pickle(cls, filename, to_class=False, frequencies=None,
ranks=None, **kwargs):
if not filename.endswith(".pkl"):
filename += ".pkl"
with open(filename, "rb") as handle:
fit_res = pickle.load(handle)
if not to_class:
return fit_res
if (frequencies is None) or (ranks is None):
raise ValueError("Mandelbrot class can only be instatiated with"
"frequencies and ranks given!")
mandel = cls(frequencies, ranks, **kwargs)
fit_res.model = mandel
mandel.register_fit(fit_res)
return mandel
def __init__(self, frequencies, ranks, regulariser, **kwargs):
if not len(frequencies) == len(ranks):
raise ValueError("NOT THE SAME NUMBER OF RANKS AND FREQS!")
frequencies = np.asarray(frequencies)
ranks = np.asarray(ranks)
self.n_obs = np.sum(frequencies)
self.regulariser = regulariser
super().__init__(endog=frequencies, exog=ranks, **kwargs)
self.fit_result = None
def prob(self, params, ranks=None, log=False):
if ranks is None:
ranks = self.exog
alpha, beta = params
if log:
return -alpha*lg(beta+ranks) - lg(zeta(alpha, q=beta+1.))
else:
return ((beta + ranks)**(-alpha))/zeta(alpha, q=beta+1.)
def loglike(self, params):
rs = self.exog
fs = self.endog
alpha, beta = params
# if alpha > 10 or beta > 20:
# return -np.inf
if alpha < 1.0 or beta < 0.0:
return -np.inf
# no need to calculate P(r) when observed f(r) was zero
log_probs = -alpha*lg(beta+rs) - lg(zeta(alpha, q=beta+1.))
log_probs = log_probs.reshape(-1, )
return self.regulariser(np.sum(fs*log_probs), alpha, beta)
# return np.sum(fs * log_probs) - beta**5
def register_fit(self, fit_result, overwrite=False):
if not self.fit_result is None and not overwrite:
raise ValueError("A fit result is already registered and overwrite=False!")
self.fit_result = fit_result
self.optim_params = fit_result.params
self.pseudo_r_squared = self.pseudo_r_squared(self.optim_params)
self.SE, self.SE_relative = fit_result.bse, fit_result.bse/self.optim_params
self.BIC, self.BIC_relative = fit_result.bic,\
(-2*self.null_loglike())/fit_result.bic
def print_result(self, string=False):
if self.fit_result is None:
raise ValueError("Register a fitting result first!")
def format_x(x):
return float('{0:.3g}'.format(x))
s = "="*50
s += "\n" + "MANDELBROT"
s += "\n" + " Optimal Parameters " + str(tuple(map(format_x, self.optim_params)))
s += "\n" + " Standard Error [relative]: " + str(tuple(map(format_x, self.SE))) +\
", [" + str(tuple(map(format_x, self.SE_relative))) + "]"
s += "\n" + " Pseudo R^2: " + str(format_x(self.pseudo_r_squared))
s += "\n" + " BIC [relative]: " + str(format_x(self.BIC)) +\
", [" + str(format_x(self.BIC_relative)) + "]"
s += "\n" + "="*50
if string:
return s
print(s)
def null_loglike(self, epsilon=1e-10):
return self.loglike((1.+epsilon, 0.0))
def pseudo_r_squared(self, params):
return 1-self.loglike(params)/self.null_loglike()
def predict(self, params, ranks=None, freqs=True, n_obs=None,
correct_for_finite_domain=True):
if ranks is None:
ranks = self.exog
ranks = np.asarray(ranks)
if n_obs is None:
n_obs = self.n_obs
alpha, beta = params
pred_probs = self.prob(params, ranks=ranks, log=False)
if correct_for_finite_domain:
if not freqs:
raise NotImplementedError("Correction for "\
"finite domain not implemented with probabilities!")
return pred_probs*(n_obs/np.sum(pred_probs))
if freqs:
return n_obs*pred_probs
return pred_probs
class Heap(GenericLikelihoodModel):
def to_pickle(self, filename, remove_data=True):
if not filename.endswith(".pkl"):
filename += ".pkl"
if not self.fit_result:
raise ValueError("No fit result registered yet; pickling pointless!")
if remove_data:
self.remove_data()
with open(filename, "wb") as handle:
pickle.dump(self.fit_result, handle)
def remove_data(self):
self.fit_result.model = None
self.fit_result.exog = None
self.fit_result.endog = None
@classmethod
def from_pickle(cls, filename, to_class=False, ns_types=None,
ns_tokens=None, **kwargs):
if not filename.endswith(".pkl"):
filename += ".pkl"
with open(filename, "rb") as handle:
fit_res = pickle.load(handle)
if not to_class:
return fit_res
if (ns_types is None) or (ns_tokens is None):
raise ValueError("Heap class can only be instatiated with"
"frequencies and ranks given!")
heap = cls(ns_types, ns_tokens, **kwargs)
fit_res.model = heap
heap.register_fit(fit_res)
return heap
def __init__(self, ns_types, ns_tokens, **kwargs):
if not len(ns_types) == len(ns_tokens):
raise ValueError("N TYPES AND N TOKENS OF DIFFERENT LENGTH!")
self.n_obs = len(ns_types)
ns_types = np.asarray(ns_types)
ns_tokens = np.asarray(ns_tokens)
if ns_tokens[0] == 0:
ns_types[0] = 1
ns_tokens[0] = 1
self.ttrs = ns_types/ns_tokens
# self.log_ttrs = lg(ns_types)/lg(ns_tokens)
super().__init__(endog=ns_types, exog=ns_tokens, **kwargs)
self.fit_result = None
def loglike(self, params):
K, beta = params
if beta > 1. or K < 1:
return -np.inf
types, tokens = self.endog, self.exog
# V(n) = K*n**beta
projected_n_types = K*tokens**beta
p = .5
# binom mode = floor((n+1)*p),
# so binom_n = floor(1/p*n)
binom_ns = np.floor((1/p)*projected_n_types)
logprobs = list(binom.logpmf(t, bn, p)[0]
for t, bn in zip(types, binom_ns))
logprobs_clipped = np.clip(logprobs, -10**6, 0)
return sum(logprobs_clipped)# - beta*1000
def null_loglike(self):
types, tokens = self.endog, self.exog
projected_n_types = np.median(self.ttrs)*tokens.reshape((-1, ))
p = .5
binom_ns = np.floor((1/p)*projected_n_types)
logprobs = list(binom.logpmf(t, bn, p)
for t, bn in zip(types, binom_ns))
logprobs_clipped = np.clip(logprobs, -10**6, 0)
return sum(logprobs_clipped)
def fit(self, start_params=None, method="powell", **kwargs):
if start_params is None:
start_params = (10, 0.75)
return super().fit(start_params=start_params, method=method, **kwargs)
def predict(self, params, ns_tokens=None):
if ns_tokens is None:
ns_tokens = self.exog
ns_tokens = np.asarray(ns_tokens)
K, beta = params
return K*ns_tokens**beta
def register_fit(self, fit_result, overwrite=False):
if not self.fit_result is None and not overwrite:
raise ValueError("A fit result is already registered and overwrite=False!")
self.fit_result = fit_result
self.optim_params = fit_result.params
self.pseudo_r_squared = self.pseudo_r_squared(self.optim_params)
self.SE, self.SE_relative = fit_result.bse, fit_result.bse/self.optim_params
self.BIC, self.BIC_relative = fit_result.bic,\
(-2*self.null_loglike())/fit_result.bic
def print_result(self, string=False):
if self.fit_result is None:
raise ValueError("Register a fitting result first!")
def format_x(x):
return float('{0:.3g}'.format(x))
s = "="*50
s += "\n" + "HEAP"
s += "\n" + " Optimal Parameters " + str(tuple(map(format_x, self.optim_params)))
s += "\n" + " Standard Error [relative]: " + str(tuple(map(format_x, self.SE))) +\
", [" + str(tuple(map(format_x, self.SE_relative))) + "]"
s += "\n" + " Pseudo R^2: " + str(format_x(self.pseudo_r_squared))
s += "\n" + " BIC [relative]: " + str(format_x(self.BIC)) +\
", [" + str(format_x(self.BIC_relative)) + "]"
s += "\n" + "="*50
if string:
return s
print(s)
def pseudo_r_squared(self, params):
return 1-self.loglike(params)/self.null_loglike()
| 29.957854
| 94
| 0.542844
| 1,893
| 15,638
| 4.330692
| 0.098785
| 0.059283
| 0.047573
| 0.019761
| 0.846304
| 0.836546
| 0.820444
| 0.820444
| 0.820444
| 0.814711
| 0
| 0.009995
| 0.347423
| 15,638
| 521
| 95
| 30.015355
| 0.793337
| 0.027881
| 0
| 0.81672
| 0
| 0
| 0.085001
| 0
| 0.057878
| 0
| 0
| 0
| 0
| 1
| 0.109325
| false
| 0
| 0.022508
| 0.025723
| 0.257235
| 0.019293
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b1ae7f35ecd7f6d7d518a57d1b89adf09a51d7b
| 12,580
|
py
|
Python
|
mvpa_itab/script/carlo/mdm/supplementary_analysis.py
|
robbisg/mvpa_itab_wu
|
e3cdb198a21349672f601cd34381e0895fa6484c
|
[
"MIT"
] | 1
|
2022-01-12T08:59:22.000Z
|
2022-01-12T08:59:22.000Z
|
mvpa_itab/script/carlo/mdm/supplementary_analysis.py
|
robbisg/mvpa_itab_wu
|
e3cdb198a21349672f601cd34381e0895fa6484c
|
[
"MIT"
] | 46
|
2016-08-04T14:49:37.000Z
|
2022-03-09T08:47:48.000Z
|
mvpa_itab/script/carlo/mdm/supplementary_analysis.py
|
robbisg/mvpa_itab_wu
|
e3cdb198a21349672f601cd34381e0895fa6484c
|
[
"MIT"
] | null | null | null |
_default_options = [
{
'target_transformer__attr': "image_type",
'sample_slicer__attr': {'image_type':["I", "O"]},
'balancer__balancer': RandomUnderSampler(sampling_strategy={"I": 40, "O": 40}, return_indices=True),
'kwargs__roi_values': [('image+type', [6])],
},
{
'target_transformer__attr': "decision",
'sample_slicer__attr': {'decision':["N", "O"],},
'balancer__balancer': RandomUnderSampler(sampling_strategy={"N": 40, "O": 40}, return_indices=True),
'kwargs__roi_values': [('decision', [6]), ('decision', [7]), ('decision', [8]),
('decision', [9]), ('decision', [10]),
('motor+resp', [6])],
},
{
'target_transformer__attr': "motor_resp",
'sample_slicer__attr': {'motor_resp':["P", "S"]},
'balancer__balancer': RandomUnderSampler(sampling_strategy={"P": 40, "S": 40}, return_indices=True),
'kwargs__roi_values': [('decision', [6]), ('decision', [7]), ('decision', [8]),
('decision', [9]), ('decision', [10]),
('motor+resp', [6])],
}
]
_default_config = {
'prepro': [
'target_transformer',
'sample_slicer',
'balancer'
],
"balancer__attr": 'subject',
'estimator': [
('fsel', SelectKBest(k=50)),
('clf', SVC(C=1, kernel='linear'))
],
'estimator__clf__C': 1,
'estimator__clf__kernel': 'linear',
'cv': LeaveOneGroupOut,
#'cv__n_splits': 7,
#'cv__test_size': 0.25,
'scores': ['accuracy'],
'analysis': TemporalDecoding,
'analysis__n_jobs': -1,
'analysis__permutation': 0,
'analysis__verbose': 0,
#'kwargs__roi': ['omnibus', 'decision', 'image+type', 'motor+resp', 'target+side'],
#'kwargs__roi_values': [('image+type', [2])],
#'kwargs__prepro': ['feature_normalizer', 'sample_normalizer'],
'kwargs__cv_attr': 'subject'
}
iterator = AnalysisIterator(_default_options,
AnalysisConfigurator(**_default_config),
kind='configuration')
for conf in iterator:
kwargs = conf._get_kwargs()
a = AnalysisPipeline(conf, name="temporal_decoding_across_fsel").fit(ds, **kwargs)
a.save()
gc.collect()
##################################################
loader = DataLoader(configuration_file=configuration_file,
#data_path="/home/carlos/mount/meg_workstation/Carlo_MDM/",
task='BETA_MVPA',
roi_labels=roi_labels,
event_file="beta_attributes_full",
brain_mask="mask_intersection")
prepro = PreprocessingPipeline(nodes=[
#Transformer(),
Detrender(),
SampleZNormalizer(),
FeatureZNormalizer(),
])
#prepro = PreprocessingPipeline()
ds = loader.fetch(prepro=prepro)
ds = MemoryReducer(dtype=np.float16).transform(ds)
_default_options = {
'target_transformer__attr': ['decision', 'memory_status']
}
_default_config = {
'prepro': ['target_transformer', 'sample_slicer', 'balancer'],
'sample_slicer__attr': {'decision':["N", "O"],'evidence':[1]},
"balancer__attr": 'subject',
'estimator': [
('fsel', SelectKBest(k=50)),
('clf', SVC(C=1, kernel='linear'))],
'estimator__clf__C': 1,
'estimator__clf__kernel': 'linear',
'cv': LeaveOneGroupOut,
'scores': ['accuracy'],
'analysis': RoiDecoding,
'analysis__n_jobs': -1,
'analysis__permutation': 0,
'analysis__verbose': 0,
'kwargs__roi_values': [('decision', [1]), ('decision', [2]), ('decision', [3]),
('decision', [4]), ('decision', [5]),
('decision', [6]), ('decision', [7]), ('decision', [8]),
('decision', [9]), ('decision', [10]),
('motor+resp', [1]), ('motor+resp', [2]), ('motor+resp', [3]),
('motor+resp', [4]), ('motor+resp', [5])('motor+resp', [6])],
}
'kwargs__cv_attr': 'subject'
}
import gc
iterator = AnalysisIterator(_default_options,
AnalysisConfigurator(**_default_config))
for conf in iterator:
kwargs = conf._get_kwargs()
a = AnalysisPipeline(conf, name="roi_decoding_across_memoryvsdecision1x").fit(ds, **kwargs)
a.save()
gc.collect()
###################################################
_default_options = [
{
'target_transformer__attr': "image_type",
'sample_slicer__attr': {'image_type':["I", "O"], 'evidence':[1]},
#'balancer__balancer': RandomUnderSampler(sampling_strategy={"I": 20, "O": 20}, return_indices=True),
'kwargs__roi_values': [('image+type', [6])],
},
{
'target_transformer__attr': "image_type",
'sample_slicer__attr': {'image_type':["I", "O"], 'evidence':[3]},
#'balancer__balancer': RandomUnderSampler(sampling_strategy={"I": 20, "O": 20}, return_indices=True),
'kwargs__roi_values': [('image+type', [6])],
},
{
'target_transformer__attr': "image_type",
'sample_slicer__attr': {'image_type':["I", "O"], 'evidence':[5]},
#'balancer__balancer': RandomUnderSampler(sampling_strategy={"I": 20, "O": 20}, return_indices=True),
'kwargs__roi_values': [('image+type', [6])],
},
################################################################
{
'target_transformer__attr': "decision",
'sample_slicer__attr': {'decision':["N", "O"],'evidence':[1]},
#'balancer__balancer': RandomUnderSampler(sampling_strategy={"N": 20, "O": 20}, return_indices=True),
'kwargs__roi_values': [('decision', [6]), ('decision', [7]), ('decision', [8]),
('decision', [9]), ('decision', [10]),
('motor+resp', [6])],
}
},
{
'target_transformer__attr': "decision",
'sample_slicer__attr': {'decision':["N", "O"],'evidence':[3]},
#'balancer__balancer': RandomUnderSampler(sampling_strategy={"N": 20, "O": 20}, return_indices=True),
'kwargs__roi_values': [('decision', [6]), ('decision', [7]), ('decision', [8]),
('decision', [9]), ('decision', [10]),
('motor+resp', [6])],
}
},
{
'target_transformer__attr': "decision",
'sample_slicer__attr': {'decision':["N", "O"], 'evidence':[5]},
#'balancer__balancer': RandomUnderSampler(sampling_strategy={"N": 20, "O": 20}, return_indices=True),
'kwargs__roi_values': [('decision', [6]), ('decision', [7]), ('decision', [8]),
('decision', [9]), ('decision', [10]),
('motor+resp', [6])],
}
},
#######################################################################
{
'target_transformer__attr': "motor_resp",
'sample_slicer__attr': {'motor_resp':["P", "S"], 'evidence':[1]},
#'balancer__balancer': RandomUnderSampler(sampling_strategy={"P": 20, "S": 20}, return_indices=True),
'kwargs__roi_values': [('decision', [6]), ('decision', [7]), ('decision', [8]),
('decision', [9]), ('decision', [10]),
('motor+resp', [6])],
}
},
{
'target_transformer__attr': "motor_resp",
'sample_slicer__attr': {'motor_resp':["P", "S"], 'evidence':[3]},
#'balancer__balancer': RandomUnderSampler(sampling_strategy={"P": 20, "S": 20}, return_indices=True),
'kwargs__roi_values': [('decision', [6]), ('decision', [7]), ('decision', [8]),
('decision', [9]), ('decision', [10]),
('motor+resp', [6])],
}
},
{
'target_transformer__attr': "motor_resp",
'sample_slicer__attr': {'motor_resp':["P", "S"], 'evidence':[5]},
#'balancer__balancer': RandomUnderSampler(sampling_strategy={"P": 20, "S": 20}, return_indices=True),
'kwargs__roi_values': [('decision', [6]), ('decision', [7]), ('decision', [8]),
('decision', [9]), ('decision', [10]),
('motor+resp', [6])],
}
}
]
_default_config = {
'prepro': ['target_transformer', 'sample_slicer', 'balancer'],
"balancer__attr": 'subject',
'estimator': [
('fsel', SelectKBest(k=50)),
('clf', SVC(C=1, kernel='linear'))],
'estimator__clf__C': 1,
'estimator__clf__kernel': 'linear',
'cv': LeaveOneGroupOut,
'scores': ['accuracy'],
'analysis': RoiDecoding,
'analysis__n_jobs': -1,
'analysis__permutation': 0,
'analysis__verbose': 0,
#'kwargs__roi': labels,
#'kwargs__roi_values': [('image+type', [2])],
#'kwargs__prepro': ['feature_normalizer', 'sample_normalizer'],
'kwargs__cv_attr': 'subject'
}
import gc
iterator = AnalysisIterator(_default_options,
AnalysisConfigurator(**_default_config),
kind='configuration')
for conf in iterator:
kwargs = conf._get_kwargs()
a = AnalysisPipeline(conf, name="roi_decoding_across_full").fit(ds, **kwargs)
a.save()
gc.collect()
| 47.651515
| 137
| 0.392925
| 857
| 12,580
| 5.389732
| 0.150525
| 0.044815
| 0.048712
| 0.109115
| 0.861658
| 0.854081
| 0.820957
| 0.80407
| 0.790864
| 0.781338
| 0
| 0.023653
| 0.445469
| 12,580
| 264
| 138
| 47.651515
| 0.638475
| 0
| 0
| 0.578947
| 0
| 0
| 0.243152
| 0.04874
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.010526
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9be6338f37cf8637471dd7c68fb4d9579fd3d4f0
| 29,423
|
py
|
Python
|
chordToFinger.py
|
highmore9501/fretDance
|
6a1ec2e378bc9d510030209f6218f7bc7e1bbf0a
|
[
"WTFPL"
] | 1
|
2021-12-03T04:11:50.000Z
|
2021-12-03T04:11:50.000Z
|
chordToFinger.py
|
highmore9501/fretDance
|
6a1ec2e378bc9d510030209f6218f7bc7e1bbf0a
|
[
"WTFPL"
] | null | null | null |
chordToFinger.py
|
highmore9501/fretDance
|
6a1ec2e378bc9d510030209f6218f7bc7e1bbf0a
|
[
"WTFPL"
] | null | null | null |
from calculate import arrangeNotesInChord
def copyNewDancer(dancer):
"""
复制原来的dancer,并且把手指都抬起来
:param dancer:
:return:
"""
import copy
newDancer = copy.deepcopy(dancer)
newDancer.releaseFingers()
return newDancer
def getChordList(chordPosition):
"""
处理和弦音符位置chordPosition,把它分解成需要按的音符位置chordList,和不要按的空弦音noPress,方便后续处理
:param chordPosition:
:return:
"""
chordList = list(chordPosition)
chordLength = len(chordList)
noPress = []
for i in range(chordLength - 1, -1, -1):
if chordList[i][1] == 0:
noPress.append(chordList.pop(i))
return chordList, noPress
def fingerNoteComb(dancer, chordPosition, fingerList, usedFinger=None, ESN=None):
"""
:param ESN: empty string note 空弦音
原来母和弦里的空弦音,和这个函数里的Chord不同,这个函数里的Chord已经过滤掉空弦音了
:param usedFinger: 其它使用过的手指列表
:param dancer: 原始dancer
:param chordPosition: 多指需要按的音符位置列表,中间不包含空弦音
:param fingerList: 可以用到的手指列表,例如[2,3,4]表示利用2/3/4指
:return: 所有单按完以后生成的dancer列表
"""
if ESN is None:
ESN = []
if usedFinger is None:
usedFinger = []
result = []
resultAppend = result.append
noteNumber = len(chordPosition)
realFingerList = fingerList + usedFinger
from itertools import combinations
import copy
for fingerComb in combinations(fingerList, noteNumber):
newDancer = copy.deepcopy(dancer)
for i in range(noteNumber):
newDancer.fingerMoveTo(fingerComb[i], chordPosition[i][0], chordPosition[i][1])
newDancer.recordTrace(realFingerList, ESN)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger00(dancer, chordPosition):
"""处理[0],也就是全部空弦音的情况,输出结果1个"""
newDancer = copyNewDancer(dancer)
newTrace = []
for [string, fret] in chordPosition:
newTrace.append([string, 0])
newDancer.traceNote.append(newTrace)
newDancer.traceFinger.append([0])
return newDancer
def chord2Finger01(dancer, chordPosition):
"""处理[1],输出结果4个,分别用1/2/3/4指单按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
string = chordList[0][0]
fret = chordList[0][1]
for i in range(4):
newDancer = copyNewDancer(dancer)
newDancer.fingerMoveTo(i + 1, string, fret)
newDancer.recordTrace([i + 1], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger02(dancer, chordPosition):
"""处理[2],输出结果3个,输出结果4个,就是1/3/4指大横按或1指小横按,
加上输出结果6个,4指对2点组合单按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
fret = chordList[0][1]
for i in range(2): # 1指大横按
for string in range(chordList[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, fret, i + 2)
newDancer.recordTrace([1], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for i in range(2): # 34指大横按
for string in range(chordList[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(i + 2, string, fret, 2)
newDancer.recordTrace([i + 2], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
newDancer = copyNewDancer(dancer)
singlePressDancer = fingerNoteComb(newDancer, chordPosition, [1, 2, 3, 4], ESN=noPress) # 1/2/3/4指单按和弦里的2个音
result += singlePressDancer
return result
def chord2Finger03(dancer, chordPosition):
"""处理[1,1],输出结果6个,4指对2点组合单按"""
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
newDancer = copyNewDancer(dancer)
result = fingerNoteComb(newDancer, newChordByFret, [1, 2, 3, 4], ESN=noPress)
return result
def chord2Finger04(dancer, chordPosition):
"""处理[3],输出结果4个,就是1/3/4指大横按或1指小横按,
加上输出结果4个,4指对3点组合单按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByString = arrangeNotesInChord(chordList, 'string')
fret = newChordByString[0][1]
for i in range(2): # 1指大小横按
for string in range(newChordByString[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, fret, i + 2)
newDancer.recordTrace([1], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for i in range(2): # 34指大横按
for string in range(newChordByString[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(i + 2, string, fret, 2)
newDancer.recordTrace([i + 2], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
newDancer = copyNewDancer(dancer)
singlePressDancer = fingerNoteComb(newDancer, newChordByString, [1, 2, 3, 4], ESN=noPress) # 4指对3点组合单按
result += singlePressDancer
return result
def chord2Finger05(dancer, chordPosition):
"""处理[2,1],输出结果6个,1指横按/小横按,2/3/4指单按;
加上出结果4个,4指对3点组合单按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
fret = newChordByFret[0][1]
for i in range(2): # 1指大小横按最低品,2/3/4指单按最高品
for fingerNumber in range(2, 5):
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, fret, i + 2)
newDancer.fingerMoveTo(fingerNumber, newChordByFret[2][0], newChordByFret[2][1])
newDancer.recordTrace([1, fingerNumber], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
newDancer = copyNewDancer(dancer)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret, [1, 2, 3, 4], ESN=noPress) # 4指对3点组合单按
result += singlePressDancer
return result
def chord2Finger06(dancer, chordPosition):
"""处理[1,2],输出结果2个,3指大横按,1/2指单按;
加上输出结果3个,4指小横按,1/2/3指单按;
加上出结果4个,4指对3点组合单按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
fret = newChordByFret[1][1]
for fingerNumber in range(1, 3): # 3指大横按,1/2指单按
for string in range(newChordByFret[1][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(3, string, fret, 2)
newDancer.fingerMoveTo(fingerNumber, newChordByFret[0][0], newChordByFret[0][1])
newDancer.recordTrace([3, fingerNumber], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for fingerNumber in range(1, 4): # 4指大横按,1/2/3指单按
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(4, string, fret, 2)
newDancer.fingerMoveTo(fingerNumber, newChordByFret[0][0], newChordByFret[0][1])
newDancer.recordTrace([4, fingerNumber], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
newDancer = copyNewDancer(dancer)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret, [1, 2, 3, 4], ESN=noPress) # 4指对3点组合单按
result += singlePressDancer
return result
def chord2Finger07(dancer, chordPosition):
"""处理[1,1,1],输出结果4个,品格从低到高分别用1/2/3/4指,单按3个音"""
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
newDancer = copyNewDancer(dancer)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret, [1, 2, 3, 4], ESN=noPress) # 4指对3点组合单按
return singlePressDancer
def chord2Finger08(dancer, chordPosition):
"""处理[4],[5],[6],输出结果1个,就是1指横按"""
chordList, noPress = getChordList(chordPosition)
for string in range(chordList[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, chordList[0][1], 2)
newDancer.recordTrace([1], noPress)
if newDancer.validation(chordPosition):
return newDancer
def chord2Finger09(dancer, chordPosition):
"""处理[1,3],输出结果1个,1指按最低品,2/3/4指根据弦数从低到高单按;
加上输出结果2个,3指小横按,1/2指单按;加上输出结果3个,4指小横按,1/2/3指单按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
fret = newChordByFret[1][1]
for fingerNumber in range(1, 3): # 3指大横按,1/2指单按
for string in range(newChordByFret[1][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(3, string, fret, 2)
newDancer.fingerMoveTo(fingerNumber, newChordByFret[0][0], newChordByFret[0][1])
newDancer.recordTrace([3, fingerNumber], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for fingerNumber in range(1, 4): # 4指大横按,1/2/3指单按
for string in range(newChordByFret[1][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(4, string, fret, 2)
newDancer.fingerMoveTo(fingerNumber, newChordByFret[0][0], newChordByFret[0][1])
newDancer.recordTrace([4, fingerNumber], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for i in range(1): # 1234指对四个音单按
newDancer = copyNewDancer(dancer)
newDancer.fingerMoveTo(1, newChordByFret[0][0], newChordByFret[0][1])
newDancer.fingerMoveTo(2, newChordByFret[1][0], fret)
newDancer.fingerMoveTo(3, newChordByFret[2][0], fret)
newDancer.fingerMoveTo(4, newChordByFret[2][0], fret)
newDancer.recordTrace([1, 2, 3, 4], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger10(dancer, chordPosition):
"""处理[2,2],输出结果1个,1/2指按2个低音,3/4指按2个高音,
加上输出6个结果,1指大/小横按,23/24/34指单按2个单音,
加上输出2个结果,1指大横按,3/4指小横按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for i in range(0, 1): # 1指大/小横按,2/3/4指单按2个单音
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], i + 2)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret[2:], [2, 3, 4], ESN=noPress)
result += singlePressDancer
for i in range(0, 1): # 1指大横按,3/4指小横按
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
newDancer.changeBarre(i + 3, newChordByFret[2][0], newChordByFret[2][1], 3)
newDancer.recordTrace([1, i + 3], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
newDancer = copyNewDancer(dancer)
for i in range(4): # 1/2指按2个低音,3/4指按2个高音
newDancer.fingerMoveTo(i + 1, newChordByFret[i][0], newChordByFret[i][1])
newDancer.recordTrace([1, 2, 3, 4], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger11(dancer, chordPosition):
"""处理[3,1],[4,1],[5,1]输出结果3个,1指大横按,2/3/4指单按"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer) # 1指大横按
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
singlePressDancer = fingerNoteComb(newDancer, [newChordByFret[-1]], [2, 3, 4], ESN=noPress) # 2/3/4指对1点组合单按
result += singlePressDancer
return result
def chord2Finger12(dancer, chordPosition):
"""处理[1,1,2],[1,1,3],输出结果2个,3/4指大横按,1/2指单按两个音"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for i in range(2):
for string in range(newChordByFret[2][0], 7):
newDancer = copyNewDancer(dancer) # 3/4指大横按
newDancer.changeBarre(i + 3, string, newChordByFret[2][1], 2)
singlePressDancer = fingerNoteComb(newDancer, [newChordByFret[:1]], [1, 2], ESN=noPress) # 1,2指对2点组合单按
result += singlePressDancer
return result
def chord2Finger13(dancer, chordPosition):
"""处理[1,2,1],[1,1,1,1],输出结果1个,品格从低到高分别用1234"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
newDancer = copyNewDancer(dancer)
for i in range(4):
newDancer.fingerMoveTo(i + 1, newChordByFret[i][0], newChordByFret[i][1])
newDancer.recordTrace([1, 2, 3, 4], noPress)
if newDancer.validation(chordPosition):
result.append(newDancer)
return result
def chord2Finger14(dancer, chordPosition):
"""处理[2,1,1],[3,1,1],输出结果6个,1指横按/小横按,2/3/4指按2个单音"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
# 1指大横按,2/3/4指单按2个单音
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret[-2:], [2, 3, 4], [1], noPress)
result += singlePressDancer
return result
def chord2Finger15(dancer, chordPosition):
"""处理[3,1,1,1],[2,1,1,1],输出结果2个,1指大/小横按,2/3/4指单按3个音"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for i in range(0, 1): # 1指大/小横按,2/3/4指单按2个单音
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], i + 2)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret[i + 2:], [2, 3, 4], [1], noPress)
result += singlePressDancer
return result
def chord2Finger16(dancer, chordPosition):
"""处理[1,4],输出结果4个,3/4指大横按,1/2指单按低音"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for i in range(0, 1): # 3/4指大横按,1/2指单按低音
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(i + 3, string, newChordByFret[0][1], 2)
singlePressDancer = fingerNoteComb(newDancer, [newChordByFret[0]], [1, 2], [i + 3], noPress)
result += singlePressDancer
return result
def chord2Finger17(dancer, chordPosition):
"""处理[2,3],输出结果2个,1指大横按,3/4指大横按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for i in range(0, 1): # 1指大横按,3/4指小横按
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
newDancer.changeBarre(i + 3, newChordByFret[2][0], newChordByFret[2][1], 3)
newDancer.recordTrace([1, i + 3], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger18(dancer, chordPosition):
"""处理[3,2],输出结果2个,1指大横按,3/4指大横按,
加上输出6个结果,1指大/小横按,23/24/34指单按2个单音"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for i in range(0, 1): # 1指大横按,3/4指小横按
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
newDancer.changeBarre(i + 3, newChordByFret[2][0], newChordByFret[2][1], 3)
newDancer.recordTrace([1, i + 3], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for i in range(0, 1): # 1指大/小横按,2/3/4指单按2个单音
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], i + 2)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret[3:], [2, 3, 4], [1], noPress)
result += singlePressDancer
return result
def chord2Finger19(dancer, chordPosition):
"""处理[4,2],输出结果2个,1指大横按,3/4指大横按,加上输出3个结果,1指大横按,23/24/34指单按2个单音"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret[4:], [2, 3, 4], [1], noPress)
result += singlePressDancer
for i in range(0, 1): # 1指大横按,3/4指小横按
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
newDancer.changeBarre(i + 3, newChordByFret[4][0], newChordByFret[4][1], 3)
newDancer.recordTrace([1, i + 3], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger20(dancer, chordPosition):
"""处理[2,1,1,2],输出结果1个,1指大横按,4指小横按,23指按2个单音"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
newDancer = copyNewDancer(dancer)
for string in range(newChordByFret[0][0], 7):
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
newDancer.changeBarre(4, newChordByFret[4][0], newChordByFret[4][1], 3)
newDancer.fingerMoveTo(2, newChordByFret[2][0], newChordByFret[2][1])
newDancer.fingerMoveTo(3, newChordByFret[3][0], newChordByFret[3][1])
newDancer.recordTrace([1, 2, 3, 4], noPress)
if newDancer.validation(chordPosition):
result.append(newDancer)
return result
def chord2Finger21(dancer, chordPosition):
"""
处理[1,1,1,3],输出结果1个,4指小横按,123指按3个单音
"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(4, string, newChordByFret[3][1], 3)
newDancer.fingerMoveTo(1, newChordByFret[0][0], newChordByFret[0][1])
newDancer.fingerMoveTo(2, newChordByFret[1][0], newChordByFret[1][1])
newDancer.fingerMoveTo(3, newChordByFret[2][0], newChordByFret[2][1])
newDancer.recordTrace([1, 2, 3, 4], noPress)
if newDancer.validation(chordPosition):
return result
def chord2Finger22(dancer, chordPosition):
"""处理[2,1,2],输出结果2个,1指大横按,4指小横按,2/3指单按1个音,
加上输出结果1个,1/3指大横按,2指单按1个音"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2) # 1指大横按,4指小横按,2/3指单按1个音
newDancer.changeBarre(4, newChordByFret[3][0], newChordByFret[3][1], 3)
singlePressDancer = fingerNoteComb(newDancer, [newChordByFret[2]], [2, 3], [1, 4], noPress)
result += singlePressDancer
for i in range(1): # 1指大横按,3指小横按,2指单按1个音
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
newDancer.changeBarre(3, newChordByFret[3][0], newChordByFret[3][1], 3)
newDancer.fingerMoveTo(2, newChordByFret[2][0], newChordByFret[2][1])
newDancer.recordTrace([1, 2, 3], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger23(dancer, chordPosition):
"""处理[2,2,1],输出结果1个,1指大横按,3指小横按,4指单按1个音
加上输出结果2个,1指大/小横按,2/3/4指按3个音"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2) # 1指大横按,3指小横按,4指单按1个音
newDancer.changeBarre(3, newChordByFret[2][0], newChordByFret[2][1], 3)
newDancer.fingerMoveTo(4, newChordByFret[4][0], newChordByFret[4][1])
newDancer.recordTrace([1, 3, 4], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for i in range(2):
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], i + 2) # 1指大/小横按,2/3/4指按3个音
newDancer.fingerMoveTo(2, newChordByFret[2][0], newChordByFret[2][1])
newDancer.fingerMoveTo(3, newChordByFret[3][0], newChordByFret[3][1])
newDancer.fingerMoveTo(4, newChordByFret[4][0], newChordByFret[4][1])
newDancer.recordTrace([1, 2, 3, 4], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger24(dancer, chordPosition):
"""处理[4,1,1],输出3个结果,1指大横按,23/24/34指单按2个单音"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
singlePressDancer = fingerNoteComb(newDancer, newChordByFret[-2:], [2, 3, 4], [1], noPress)
result += singlePressDancer
return result
def chord2Finger25(dancer, chordPosition):
"""处理[1,1,1,2],输出结果1个,4指大横按,123指单按3音"""
result = []
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for string in range(newChordByFret[3][0], 7):
newDancer = copyNewDancer(dancer) # 4指大横按
newDancer.changeBarre(4, string, newChordByFret[3][1], 2)
singlePressDancer = fingerNoteComb(newDancer, [newChordByFret[:2]], [1, 2, 3], [4], noPress) # 1/2/3指对3点组合单按
result += singlePressDancer
return result
def chord2Finger26(dancer, chordPosition):
"""处理[3,1,2],输出结果1个,1指大横按,3指小横按,2指单按,
加上输出结果2个,1指大横按,4指小横按,2/3指单按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2) # 1指大横按,3指小横按,2指单按
newDancer.changeBarre(3, newChordByFret[-2][0], newChordByFret[-2][1], 3)
newDancer.fingerMoveTo(2, newChordByFret[3][0], newChordByFret[3][1])
newDancer.recordTrace([1, 2, 3], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for i in range(2):
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2) # 1指大横按,4指小横按,2/3指单按
newDancer.changeBarre(4, newChordByFret[-2][0], newChordByFret[-2][1], 3)
newDancer.fingerMoveTo(i + 2, newChordByFret[3][0], newChordByFret[3][1])
newDancer.recordTrace([1, 4, i + 2], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger27(dancer, chordPosition):
"""处理[2,1,3],输出结果2个,1指大/小横按,3指小横按,2指单按,
加上输出结果2个,1/4指大横按,2/3指单按1个音"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for i in range(2):
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], i + 2) # 1指大/小横按,3指小横按,2指单按
newDancer.changeBarre(3, newChordByFret[-2][0], newChordByFret[-2][1], 3)
newDancer.fingerMoveTo(2, newChordByFret[2][0], newChordByFret[2][1])
newDancer.recordTrace([1, 2, 3], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for i in range(2):
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2) # 1指大横按,4指小横按,2/3指单按1个音
newDancer.changeBarre(4, newChordByFret[-2][0], newChordByFret[-2][1], 3)
newDancer.fingerMoveTo(i + 2, newChordByFret[2][0], newChordByFret[2][1])
newDancer.recordTrace([1, 4, i + 2], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger28(dancer, chordPosition):
"""处理[3,3],输出结果2个,1指大横按,3/4指大横按,
加上输出结果1个,1指大横按,234指单按3音"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2) # 1指大横按,234指单按3音
singlePressDancer = fingerNoteComb(newDancer, newChordByFret[3:], [2, 3, 4], [1], noPress)
result += singlePressDancer
for i in range(0, 1): # 1指大横按,3/4指小横按
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
newDancer.changeBarre(i + 3, newChordByFret[3][0], newChordByFret[3][1], 3)
newDancer.recordTrace([1, i + 3], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger29(dancer, chordPosition):
"""处理[2,4],输出结果1个,1指大横按,3/4指小横按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for i in range(2): # 1指大横按,3/4指小横按
for string in range(newChordByFret[0][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(1, string, newChordByFret[0][1], 2)
newDancer.changeBarre(i + 3, newChordByFret[2][0], newChordByFret[2][1], 2)
newDancer.recordTrace([1, i + 3], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger30(dancer, chordPosition):
"""处理[1,5],输出结果2个,1指单按,3/4指小横按"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for i in range(2): # 1指单按,3/4指小横按
for string in range(newChordByFret[1][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(i + 3, string, newChordByFret[1][1], 3)
newDancer.fingerMoveTo(1, newChordByFret[0][0], newChordByFret[0][1])
newDancer.recordTrace([1, i + 3], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
return result
def chord2Finger31(dancer, chordPosition):
"""处理[1,1,4],输出结果1个,3指小横按,1/2指单按2个音,
加上输出结果3个,4指大横按,12/23/13指按2个音"""
result = []
resultAppend = result.append
chordList, noPress = getChordList(chordPosition)
newChordByFret = arrangeNotesInChord(chordList, 'fret')
for string in range(newChordByFret[2][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(3, string, newChordByFret[2][1], 3) # 3指小横按,1/2指单按2个音
for i in range(2):
newDancer.fingerMoveTo(i + 1, newChordByFret[i][0], newChordByFret[i][1])
newDancer.recordTrace([1, 2, 3], noPress)
if newDancer.validation(chordPosition):
resultAppend(newDancer)
for i in range(1):
for string in range(newChordByFret[2][0], 7):
newDancer = copyNewDancer(dancer)
newDancer.changeBarre(4, string, newChordByFret[2][1], 3) # 4指小横按,1/2/3指按2个音
singlePressDancer = fingerNoteComb(newDancer, newChordByFret[:1], [1, 2, 3], [4], noPress)
result += singlePressDancer
return result
| 38.612861
| 117
| 0.652925
| 3,134
| 29,423
| 6.129866
| 0.05903
| 0.027328
| 0.074332
| 0.033314
| 0.833169
| 0.789131
| 0.769351
| 0.747853
| 0.734111
| 0.705013
| 0
| 0.056402
| 0.221459
| 29,423
| 761
| 118
| 38.663601
| 0.78225
| 0.088332
| 0
| 0.804388
| 0
| 0
| 0.004308
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063985
| false
| 0
| 0.007313
| 0
| 0.135283
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5063878c38951547f1732f5eb79c268a5506d85d
| 374
|
py
|
Python
|
skpalm/multivariate_methods.py
|
jameschapman19/scikit-palm
|
7bd7add33181ccd27c79a604957d48fd0576e1bc
|
[
"BSD-3-Clause"
] | 4
|
2022-03-03T16:20:06.000Z
|
2022-03-03T16:20:19.000Z
|
skpalm/multivariate_methods.py
|
jameschapman19/scikit-palm
|
7bd7add33181ccd27c79a604957d48fd0576e1bc
|
[
"BSD-3-Clause"
] | null | null | null |
skpalm/multivariate_methods.py
|
jameschapman19/scikit-palm
|
7bd7add33181ccd27c79a604957d48fd0576e1bc
|
[
"BSD-3-Clause"
] | null | null | null |
def fasttsq(M,psi,Y,y,m,c,o,plm):
#TODO
raise NotImplementedError
def fasttsq3d(M,psi,Y,y,m,c,o,plm):
#TODO
raise NotImplementedError
def fasttsqp(M,psi,Y,y,m,c,o,plm):
#TODO
raise NotImplementedError
def fastq(M,psi,Y,y,m,c,o,plm):
#TODO
raise NotImplementedError
def fastq3d(M,psi,Y,y,m,c,o,plm):
#TODO
raise NotImplementedError
| 19.684211
| 35
| 0.671123
| 65
| 374
| 3.861538
| 0.230769
| 0.079681
| 0.099602
| 0.119522
| 0.844622
| 0.844622
| 0.844622
| 0.844622
| 0.844622
| 0.844622
| 0
| 0.006536
| 0.181818
| 374
| 19
| 36
| 19.684211
| 0.813725
| 0.053476
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ac97e373e0555ac7ac85cfc3dffb17e36dc98181
| 209
|
py
|
Python
|
django_shopping_cart/context_processors.py
|
arcanemachine/django-shopping-cart
|
99e464f856c6605077d77f13e692bfba40f09e6c
|
[
"MIT"
] | null | null | null |
django_shopping_cart/context_processors.py
|
arcanemachine/django-shopping-cart
|
99e464f856c6605077d77f13e692bfba40f09e6c
|
[
"MIT"
] | null | null | null |
django_shopping_cart/context_processors.py
|
arcanemachine/django-shopping-cart
|
99e464f856c6605077d77f13e692bfba40f09e6c
|
[
"MIT"
] | null | null | null |
from django_shopping_cart import server_config
def constants(request):
return {'PROJECT_NAME': server_config.PROJECT_NAME,
'FRONTEND_SERVER_LOCATION': server_config.FRONTEND_SERVER_LOCATION}
| 29.857143
| 79
| 0.794258
| 25
| 209
| 6.2
| 0.6
| 0.232258
| 0.283871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138756
| 209
| 6
| 80
| 34.833333
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0.172249
| 0.114833
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
acd891e26d6d8044a7b68d2ef8d6b58e4949b206
| 4,129
|
py
|
Python
|
pronunciation.py
|
radomd92/botjagwar
|
1dc96600c40041057a9f9afde38c31ca34b8db38
|
[
"MIT"
] | 7
|
2015-01-23T17:24:04.000Z
|
2022-01-12T16:54:24.000Z
|
pronunciation.py
|
radomd92/botjagwar
|
1dc96600c40041057a9f9afde38c31ca34b8db38
|
[
"MIT"
] | 18
|
2017-12-09T01:11:23.000Z
|
2021-09-22T13:26:24.000Z
|
pronunciation.py
|
radomd92/botjagwar
|
1dc96600c40041057a9f9afde38c31ca34b8db38
|
[
"MIT"
] | 1
|
2015-06-22T02:17:55.000Z
|
2015-06-22T02:17:55.000Z
|
import re
import sys
import pywikibot
from page_lister import get_pages_from_category
def replace_pronunciation_template(language, language_name):
for mg_page in get_pages_from_category('mg', language_name):
old_content = mg_content = mg_page.get()
print('>>>>', mg_page.title(), '<<<<')
if '{{fanononana||%s}}' % language in mg_content:
mg_content = mg_content.replace(
'{{fanononana| |%s}}' % language,
'{{fanononana-%s}}' % language)
mg_content = mg_content.replace(
'{{fanononana||%s}}' % language,
'{{fanononana-%s}}' % language)
pywikibot.showDiff(old_content, mg_content)
mg_page.put(mg_content, "%s: manampy fanononana" % language_name)
else:
print('{{fanononana||%s}} not found' % language)
def copy_pronunciations(language, language_name, ipa_or_pron='IPA'):
pron_regex = re.compile('\\{\\{%s\\|(.*)\\|([a-z]+)\\}\\}' % ipa_or_pron)
for mg_page in get_pages_from_category('mg', language_name):
print('>>>>', mg_page.title(), '<<<<')
en_page = pywikibot.Page(
pywikibot.Site(
'en',
'wiktionary'),
mg_page.title())
if en_page.isRedirectPage():
print('redirect')
continue
if en_page.exists():
en_content = en_page.get()
match = [x for x in pron_regex.findall(
en_content) if x[0] == language]
if not match:
print('no match')
continue
old_content = mg_content = mg_page.get()
if '{{fanononana||%s}}' % language in mg_content:
print(match)
concat_pron = ''
for m in match:
concat_pron += m[1]
mg_content = mg_content.replace(
'{{fanononana||%s}}' % language,
'{{fanononana-%s|%s}}' % (language, concat_pron))
pywikibot.showDiff(old_content, mg_content)
mg_page.put(
mg_content,
"%s: manampy fanononana" %
language_name)
else:
print('{{fanononana||%s}} not found' % language)
else:
print('english page does not exist')
continue
def copy_pronunciations(language, language_name, ipa_or_pron='IPA'):
pron_regex = re.compile('\\{\\{([a-z]+)\\-%s\\|(.*)\\}\\}' % ipa_or_pron)
for mg_page in get_pages_from_category('mg', language_name):
print('>>>>', mg_page.title(), '<<<<')
en_page = pywikibot.Page(
pywikibot.Site(
'en',
'wiktionary'),
mg_page.title())
if en_page.isRedirectPage():
print('redirect')
continue
if en_page.exists():
en_content = en_page.get()
match = [x for x in pron_regex.findall(
en_content) if x[0] == language]
if not match:
print('no match')
continue
old_content = mg_content = mg_page.get()
if '{{fanononana||%s}}' % language in mg_content:
print(match)
concat_pron = ''
for m in match:
concat_pron += m[1]
mg_content = mg_content.replace(
'{{fanononana||%s}}' % language,
'{{fanononana-%s|%s}}' % (language, concat_pron))
pywikibot.showDiff(old_content, mg_content)
mg_page.put(
mg_content,
"%s: manampy fanononana" %
language_name)
else:
print('{{fanononana||%s}} not found' % language)
else:
print('english page does not exist')
continue
if __name__ == '__main__':
functions = {
'c': copy_pronunciations,
'r': replace_pronunciation_template
}
functions[sys.argv[1]](sys.argv[2], sys.argv[3])
| 35.290598
| 77
| 0.503027
| 424
| 4,129
| 4.646226
| 0.15566
| 0.091371
| 0.08934
| 0.063959
| 0.875635
| 0.875635
| 0.875635
| 0.845178
| 0.845178
| 0.845178
| 0
| 0.002663
| 0.363284
| 4,129
| 116
| 78
| 35.594828
| 0.746672
| 0
| 0
| 0.861386
| 0
| 0
| 0.13829
| 0.0155
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029703
| false
| 0
| 0.039604
| 0
| 0.069307
| 0.138614
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
acff644ffa6634f204877086a03c7e8b4d2271b4
| 3,032
|
py
|
Python
|
tests/applications/test_forms.py
|
crydotsnake/djangogirls
|
0e764294085d6d7d3c4f61a7fe36f91640abedcd
|
[
"BSD-3-Clause"
] | 446
|
2015-01-04T20:58:26.000Z
|
2022-03-30T23:08:26.000Z
|
tests/applications/test_forms.py
|
serenasensini/TheRedCode_Docker-per-Django-e-Postgres
|
78a2ca1f09ab956a6936d14a5fd99336ff39f472
|
[
"BSD-3-Clause"
] | 649
|
2015-01-09T23:42:14.000Z
|
2022-03-31T17:27:19.000Z
|
tests/applications/test_forms.py
|
serenasensini/TheRedCode_Docker-per-Django-e-Postgres
|
78a2ca1f09ab956a6936d14a5fd99336ff39f472
|
[
"BSD-3-Clause"
] | 319
|
2015-01-06T20:58:42.000Z
|
2022-03-30T06:29:04.000Z
|
import pytest
import vcr
from applications.forms import ApplicationForm
from applications.models import Application, Form, Question
from core.models import Event
@pytest.mark.django_db
@vcr.use_cassette('tests/applications/vcr/application_form_prevent_duplicate_emails.yaml')
def test_application_form_prevent_duplicate_emails():
event = Event.objects.create(
name='Test', city='Test', country='Test',
is_page_live=True, page_url='test'
)
form = Form.objects.create(event=event)
# Override default questions, we need just the e-mail
form.question_set.all().delete()
question = Question.objects.create(
title="Your e-mail address:",
question_type="email",
form=form,
order=1
)
assert Application.objects.count() == 0
form_data = {
'newsletter_optin': 'yes',
'g-recaptcha-response': 'PASSED',
f'question_{question.pk}': 'test@test.pl'
}
application_form = ApplicationForm(form_data, form=form)
assert application_form.is_valid()
application_form.save()
assert Application.objects.count() == 1
application = Application.objects.get()
assert application.newsletter_optin is True
application_form = ApplicationForm(form_data, form=form)
assert not application_form.is_valid()
@pytest.mark.django_db
@vcr.use_cassette('tests/applications/vcr/application_form_prevent_duplicate_emails.yaml')
def test_application_form_no_newsletter():
event = Event.objects.create(
name='Test', city='Test', country='Test',
is_page_live=True, page_url='test')
form = Form.objects.create(event=event)
# Override default questions, we need just the e-mail
form.question_set.all().delete()
question = Question.objects.create(
title="Your e-mail address:",
question_type="email",
form=form,
order=1)
assert Application.objects.count() == 0
form_data = {
'newsletter_optin': 'no',
'g-recaptcha-response': 'PASSED',
f'question_{question.pk}': 'test@test.pl'
}
application_form = ApplicationForm(form_data, form=form)
assert application_form.is_valid()
application_form.save()
assert Application.objects.count() == 1
application = Application.objects.get()
assert application.newsletter_optin is False
@pytest.mark.django_db
@vcr.use_cassette('tests/applications/vcr/application_form_prevent_duplicate_emails.yaml')
def test_application_form_no_questions():
event = Event.objects.create(
name='Test', city='Test', country='Test',
is_page_live=True, page_url='test')
form = Form.objects.create(event=event)
# Override default questions, we need just the e-mail
form.question_set.all().delete()
assert Application.objects.count() == 0
form_data = {
'newsletter_optin': 'yes',
'g-recaptcha-response': 'PASSED'
}
application_form = ApplicationForm(form_data, form=form)
assert application_form.is_valid()
| 30.32
| 90
| 0.697559
| 372
| 3,032
| 5.489247
| 0.198925
| 0.124878
| 0.058766
| 0.071009
| 0.904995
| 0.894221
| 0.894221
| 0.894221
| 0.868756
| 0.868756
| 0
| 0.002836
| 0.186016
| 3,032
| 99
| 91
| 30.626263
| 0.824554
| 0.051121
| 0
| 0.716216
| 0
| 0
| 0.176471
| 0.087365
| 0
| 0
| 0
| 0
| 0.148649
| 1
| 0.040541
| false
| 0.040541
| 0.067568
| 0
| 0.108108
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4a23886a570384a4d6bea6cd8d18e1ef0b7eb705
| 66,657
|
py
|
Python
|
src/instructions.py
|
hansbonini/pynes-dev
|
aa5d04de0a1beb6afb93219ffc9f63e83b3907a0
|
[
"MIT"
] | null | null | null |
src/instructions.py
|
hansbonini/pynes-dev
|
aa5d04de0a1beb6afb93219ffc9f63e83b3907a0
|
[
"MIT"
] | null | null | null |
src/instructions.py
|
hansbonini/pynes-dev
|
aa5d04de0a1beb6afb93219ffc9f63e83b3907a0
|
[
"MIT"
] | null | null | null |
import addrmodes
# TODO: Verificar se nao existem enderecamentos maiores que 1 byte
def rel_addr(value):
if value & 0b10000000:
value &= 0b1111111
value -= 128
return value
def advancePC(cpu, size):
cpu.registers['PC'] += size
def setN(cpu, value):
if value & (1 << 7) == 1 << 7:
cpu.setStatus(cpu.statusFlags['n'], 1)
else:
cpu.setStatus(cpu.statusFlags['n'], 0)
def setZ(cpu, value):
if value == 0:
cpu.setStatus(cpu.statusFlags['z'], 1)
else:
cpu.setStatus(cpu.statusFlags['z'], 0)
def setO(cpu, value):
cpu.setStatus(cpu.statusFlags['v'], value)
def setC(cpu, value):
cpu.setStatus(cpu.statusFlags['c'], value)
def ADC_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ADC_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ADC_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 255)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ADC_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 255)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ADC_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 255)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ADC_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 255)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ADC_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 255)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ADC_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 255)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def AND_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.registers['A'] & cpu.readMemory(cpu.registers['PC'] + 1)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def AND_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def AND_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def AND_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def AND_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def AND_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def AND_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def AND_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def ASL_Accumulator(cpu):
size = 1
nCycles = 2
value = cpu.registers['A']
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.registers['A'] = value
return nCycles
def ASL_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ASL_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ASL_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ASL_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def BCC_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if not cpu.getStatus(cpu.statusFlags['c']):
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 2
else:
nCycles += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def BCS_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if cpu.getStatus(cpu.statusFlags['c']):
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 2
else:
nCycles += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def BEQ_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if cpu.getStatus(cpu.statusFlags['z']):
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 2
else:
nCycles += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def BIT_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value & cpu.registers['A'])
setO(cpu, (value >> 6) & 1)
return nCycles
def BIT_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value & cpu.registers['A'])
setO(cpu, (value >> 6) & 1)
return nCycles
def BMI_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if cpu.getStatus(cpu.statusFlags['n']):
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 2
else:
nCycles += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def BNE_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if not cpu.getStatus(cpu.statusFlags['z']):
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 2
else:
nCycles += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def BPL_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if not cpu.getStatus(cpu.statusFlags['n']):
nCycles += 1
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 1
#cpu.registers['PC'] += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def BRK_Implied(cpu):
size = 1
nCycles = 7
cpu.registers['PC'] += 2
cpu.pushStack((cpu.registers['PC'] >> 8) & 0xFF)
cpu.pushStack(cpu.registers['PC'] & 0xFF)
cpu.setStatus(cpu.statusFlags['b'], 1)
cpu.pushStack(cpu.registers['P'])
cpu.setStatus(cpu.statusFlags['i'], 1)
cpu.InterruptRequest = 0x49
advancePC(cpu, size)
return nCycles
def BVC_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if not cpu.getStatus(cpu.statusFlags['v']):
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 2
else:
nCycles += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def BVS_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if cpu.getStatus(cpu.statusFlags['v']):
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 2
else:
nCycles += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def CLC_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['c'], 0)
advancePC(cpu, size)
return nCycles
def CLD_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['d'], 0)
advancePC(cpu, size)
return nCycles
def CLI_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['i'], 0)
advancePC(cpu, size)
return nCycles
def CLV_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['v'], 0)
advancePC(cpu, size)
return nCycles
def CMP_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPX_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = cpu.registers['X'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPX_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = cpu.registers['X'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPX_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = cpu.registers['X'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPY_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = cpu.registers['Y'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPY_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = cpu.registers['Y'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPY_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = cpu.registers['Y'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DEC_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def DEC_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def DEC_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def DEC_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def DEX_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['X']
value = (value - 1) & 0xFF
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def DEY_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['Y']
value = (value - 1) & 0xFF
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INC_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INC_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INC_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INC_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INX_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['X']
value = (value + 1) & 0xFF
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INY_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['Y']
value = (value + 1) & 0xFF
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def JMP_Absolute(cpu):
size = 3
nCycles = 3
address = addrmodes.Absolute(cpu)
advancePC(cpu, size)
cpu.registers['PC'] = address
return nCycles
def JMP_Indirect(cpu):
size = 3
nCycles = 5
address = addrmodes.Indirect(cpu)
advancePC(cpu, size)
cpu.registers['PC'] = address
return nCycles
def JSR_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
advancePC(cpu, 2)
cpu.pushStack((cpu.registers['PC'] >> 8) & 0xFF)
cpu.pushStack(cpu.registers['PC'] & 0xFF)
cpu.registers['PC'] = address
return nCycles
def LDA_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDX_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDX_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDX_Zero_Y(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDX_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDX_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDY_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDY_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDY_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDY_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDY_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LSR_Accumulator(cpu):
size = 1
nCycles = 2
value = cpu.registers['A']
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LSR_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LSR_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LSR_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LSR_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def NOP_Implied(cpu):
size = 1
nCycles = 2
advancePC(cpu, size)
return nCycles
def ORA_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def ORA_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def ORA_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def ORA_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def ORA_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def ORA_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def ORA_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def ORA_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def PHA_Implied(cpu):
size = 1
nCycles = 3
value = cpu.registers['A']
cpu.pushStack(value)
advancePC(cpu, size)
return nCycles
def PHP_Implied(cpu):
size = 1
nCycles = 3
value = cpu.registers['P']
cpu.pushStack(value)
advancePC(cpu, size)
return nCycles
def PLA_Implied(cpu):
size = 1
nCycles = 4
value = cpu.pullStack()
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def PLP_Implied(cpu):
size = 1
nCycles = 4
value = cpu.pullStack()
# Don't set the break flag
cpu.registers['P'] = (value & 0xEF)
# Always set the non used flag
cpu.registers['P'] |= (1 << 5)
advancePC(cpu, size)
return nCycles
def ROL_Accumulator(cpu):
size = 1
nCycles = 2
value = cpu.registers['A']
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.registers['A'] = value
return nCycles
def ROL_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ROL_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ROL_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ROL_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ROR_Accumulator(cpu):
size = 1
nCycles = 2
value = cpu.registers['A']
if cpu.getStatus(cpu.statusFlags['c']):
value |= 0x100
setC(cpu, value & 0x01)
value >>= 1
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.registers['A'] = value
return nCycles
def ROR_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) + carry
advancePC(cpu, size)
setN(cpu, (value >> 7) & 1)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ROR_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) + carry
advancePC(cpu, size)
setN(cpu, (value >> 7) & 1)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ROR_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) + carry
advancePC(cpu, size)
setN(cpu, (value >> 7) & 1)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ROR_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) + carry
advancePC(cpu, size)
setN(cpu, (value >> 7) & 1)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def RTI_Implied(cpu):
size = 1
nCycles = 6
value = cpu.pullStack()
cpu.registers['P'] = value
cpu.registers['P'] |= (1 << 5)
value = cpu.pullStack()
value |= (cpu.pullStack() << 8)
cpu.registers['PC'] = value
return nCycles
def RTS_Implied(cpu):
size = 1
nCycles = 6
value = cpu.pullStack()
value += ((cpu.pullStack()) << 8)
cpu.registers['PC'] = value
advancePC(cpu, size)
return nCycles
def SBC_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
carry = cpu.getStatus(cpu.statusFlags['c'])
#Todo: Verificar o (1 - carry) depois
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SBC_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SBC_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SBC_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SBC_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SBC_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SBC_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SBC_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SEC_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['c'], 1)
advancePC(cpu, size)
return nCycles
def SED_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['d'], 1)
advancePC(cpu, size)
return nCycles
def SEI_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['i'], 1)
advancePC(cpu, size)
return nCycles
def STA_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
cpu.writeMemory(address, cpu.registers['A'])
advancePC(cpu, size)
return nCycles
def STA_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
cpu.writeMemory(address, cpu.registers['A'])
advancePC(cpu, size)
return nCycles
def STA_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
cpu.writeMemory(address, cpu.registers['A'])
advancePC(cpu, size)
return nCycles
def STA_Absolute_X(cpu):
size = 3
nCycles = 5
address = addrmodes.Absolute_X(cpu)
cpu.writeMemory(address, cpu.registers['A'])
advancePC(cpu, size)
return nCycles
def STA_Absolute_Y(cpu):
size = 3
nCycles = 5
address = addrmodes.Absolute_Y(cpu)
cpu.writeMemory(address, cpu.registers['A'])
advancePC(cpu, size)
return nCycles
def STA_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
cpu.writeMemory(address, cpu.registers['A'])
advancePC(cpu, size)
return nCycles
def STA_Indirect_Y(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_Y(cpu)
cpu.writeMemory(address, cpu.registers['A'])
advancePC(cpu, size)
return nCycles
def STX_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
cpu.writeMemory(address, cpu.registers['X'])
advancePC(cpu, size)
return nCycles
def STX_Zero_Y(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_Y(cpu)
cpu.writeMemory(address, cpu.registers['X'])
advancePC(cpu, size)
return nCycles
def STX_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
cpu.writeMemory(address, cpu.registers['X'])
advancePC(cpu, size)
return nCycles
def STY_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
cpu.writeMemory(address, cpu.registers['Y'])
advancePC(cpu, size)
return nCycles
def STY_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
cpu.writeMemory(address, cpu.registers['Y'])
advancePC(cpu, size)
return nCycles
def STY_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
cpu.writeMemory(address, cpu.registers['Y'])
advancePC(cpu, size)
return nCycles
def TAX_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['A']
setN(cpu, value)
setZ(cpu, value)
cpu.registers['X'] = value
advancePC(cpu, size)
return nCycles
def TAY_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['A']
setN(cpu, value)
setZ(cpu, value)
cpu.registers['Y'] = value
advancePC(cpu, size)
return nCycles
def TSX_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['SP']
setN(cpu, value)
setZ(cpu, value)
cpu.registers['X'] = value
advancePC(cpu, size)
return nCycles
def TXA_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['X']
setN(cpu, value)
setZ(cpu, value)
cpu.registers['A'] = value
advancePC(cpu, size)
return nCycles
def TXS_Implied(cpu):
size = 1
nCycles = 2
cpu.registers['SP'] = cpu.registers['X']
advancePC(cpu, size)
return nCycles
def TYA_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['Y']
setN(cpu, value)
setZ(cpu, value)
cpu.registers['A'] = value
advancePC(cpu, size)
return nCycles
# Unofficial Opcodes
def DCP_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, ~value >> 8 & 0x1)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DCP_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, ~value >> 8 & 0x1)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DCP_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, ~value >> 8 & 0x1)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DCP_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, ~value >> 8 & 0x1)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DCP_Absolute_Y(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, ~value >> 8 & 0x1)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DCP_Indirect_X(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, ~value >> 8 & 0x1)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DCP_Indirect_Y(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, ~value >> 8 & 0x1)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DOP_Immediate(cpu):
size = 2
nCycles = 2
advancePC(cpu, size)
return nCycles
def DOP_Zero(cpu):
size = 2
nCycles = 3
advancePC(cpu, size)
return nCycles
def DOP_Zero_X(cpu):
size = 2
nCycles = 4
advancePC(cpu, size)
return nCycles
def ISB_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ISB_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ISB_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ISB_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ISB_Absolute_Y(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ISB_Indirect_X(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ISB_Indirect_Y(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def LAX_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LAX_Zero_Y(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LAX_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LAX_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LAX_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LAX_Indirect_Y(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def RLA_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
cpu.writeMemory(address, value)
return nCycles
def RLA_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
cpu.writeMemory(address, value)
return nCycles
def RLA_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
cpu.writeMemory(address, value)
return nCycles
def RLA_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
cpu.writeMemory(address, value)
return nCycles
def RLA_Absolute_Y(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
cpu.writeMemory(address, value)
return nCycles
def RLA_Indirect_X(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
cpu.writeMemory(address, value)
return nCycles
def RLA_Indirect_Y(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
cpu.writeMemory(address, value)
return nCycles
def RRA_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) | carry
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def RRA_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) | carry
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def RRA_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) | carry
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def RRA_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) | carry
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def RRA_Absolute_Y(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) | carry
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def RRA_Indirect_X(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) | carry
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def RRA_Indirect_Y(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) | carry
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SAX_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.registers['X'] & cpu.registers['A']
cpu.writeMemory(address, value)
advancePC(cpu, size)
return nCycles
def SAX_Zero_Y(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_Y(cpu)
value = cpu.registers['X'] & cpu.registers['A']
cpu.writeMemory(address, value)
advancePC(cpu, size)
return nCycles
def SAX_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.registers['X'] & cpu.registers['A']
cpu.writeMemory(address, value)
advancePC(cpu, size)
return nCycles
def SAX_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.registers['X'] & cpu.registers['A']
cpu.writeMemory(address, value)
advancePC(cpu, size)
return nCycles
def SLO_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
cpu.writeMemory(address, value)
cpu.registers['A'] |= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def SLO_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
cpu.writeMemory(address, value)
cpu.registers['A'] |= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def SLO_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
cpu.writeMemory(address, value)
cpu.registers['A'] |= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def SLO_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
cpu.writeMemory(address, value)
cpu.registers['A'] |= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def SLO_Absolute_Y(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
cpu.writeMemory(address, value)
cpu.registers['A'] |= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def SLO_Indirect_X(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
cpu.writeMemory(address, value)
cpu.registers['A'] |= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def SLO_Indirect_Y(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
cpu.writeMemory(address, value)
cpu.registers['A'] |= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def SRE_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] ^= value
cpu.writeMemory(address, value)
advancePC(cpu, size)
setZ(cpu, cpu.registers['A'])
setN(cpu, cpu.registers['A'])
return nCycles
def SRE_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] ^= value
cpu.writeMemory(address, value)
advancePC(cpu, size)
setZ(cpu, cpu.registers['A'])
setN(cpu, cpu.registers['A'])
return nCycles
def SRE_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] ^= value
cpu.writeMemory(address, value)
advancePC(cpu, size)
setZ(cpu, cpu.registers['A'])
setN(cpu, cpu.registers['A'])
return nCycles
def SRE_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] ^= value
cpu.writeMemory(address, value)
advancePC(cpu, size)
setZ(cpu, cpu.registers['A'])
setN(cpu, cpu.registers['A'])
return nCycles
def SRE_Absolute_Y(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] ^= value
cpu.writeMemory(address, value)
advancePC(cpu, size)
setZ(cpu, cpu.registers['A'])
setN(cpu, cpu.registers['A'])
return nCycles
def SRE_Indirect_X(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] ^= value
cpu.writeMemory(address, value)
advancePC(cpu, size)
setZ(cpu, cpu.registers['A'])
setN(cpu, cpu.registers['A'])
return nCycles
def SRE_Indirect_Y(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] ^= value
cpu.writeMemory(address, value)
advancePC(cpu, size)
setZ(cpu, cpu.registers['A'])
setN(cpu, cpu.registers['A'])
return nCycles
def TOP_Absolute(cpu):
size = 3
nCycles = 4
advancePC(cpu, size)
return nCycles
def TOP_Absolute_X(cpu):
size = 3
nCycles = 4
advancePC(cpu, size)
return nCycles
| 21.578828
| 72
| 0.599052
| 8,763
| 66,657
| 4.510328
| 0.018144
| 0.0735
| 0.096701
| 0.071729
| 0.983909
| 0.979405
| 0.968804
| 0.959619
| 0.950739
| 0.940391
| 0
| 0.026326
| 0.260888
| 66,657
| 3,088
| 73
| 21.585816
| 0.775915
| 0.002955
| 0
| 0.893021
| 0
| 0
| 0.00781
| 0
| 0
| 0
| 0.016403
| 0.000324
| 0
| 1
| 0.089428
| false
| 0
| 0.000418
| 0
| 0.177183
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c5cd8e4a73d0e99731d3ac0698cf3b746e2a517f
| 3,887
|
py
|
Python
|
graphtheory/independentsets/isetus.py
|
gitter-badger/graphs-dict
|
2be1a5b140feb050eec799d6cadf6de5eef01745
|
[
"BSD-3-Clause"
] | 36
|
2015-09-20T20:55:39.000Z
|
2021-09-20T05:49:03.000Z
|
graphtheory/independentsets/isetus.py
|
gitter-badger/graphs-dict
|
2be1a5b140feb050eec799d6cadf6de5eef01745
|
[
"BSD-3-Clause"
] | 6
|
2016-03-25T21:41:46.000Z
|
2020-02-12T03:18:59.000Z
|
graphtheory/independentsets/isetus.py
|
gitter-badger/graphs-dict
|
2be1a5b140feb050eec799d6cadf6de5eef01745
|
[
"BSD-3-Clause"
] | 9
|
2016-09-12T07:57:27.000Z
|
2022-03-21T16:15:39.000Z
|
#!/usr/bin/python
# Tutaj _used i independent_set jest typu set.
class UnorderedSequentialIndependentSet1:
"""Find a maximal independent set."""
def __init__(self, graph):
"""The algorithm initialization."""
if graph.is_directed():
raise ValueError("the graph is directed")
self.graph = graph
for edge in self.graph.iteredges():
if edge.source == edge.target: # for multigraphs
raise ValueError("a loop detected")
self.independent_set = set()
self.cardinality = 0
self.source = None
def run(self, source=None):
"""Executable pseudocode."""
used = set()
if source is not None:
self.source = source
self.independent_set.add(source)
used.add(source)
used.update(self.graph.iteradjacent(source))
for source in self.graph.iternodes():
if source in used:
continue
self.independent_set.add(source)
used.add(source)
used.update(self.graph.iteradjacent(source))
self.cardinality = len(self.independent_set)
# Tutaj _used jest dict, a independent_set jest typu set.
class UnorderedSequentialIndependentSet2:
"""Find a maximal independent set."""
def __init__(self, graph):
"""The algorithm initialization."""
if graph.is_directed():
raise ValueError("the graph is directed")
self.graph = graph
for edge in self.graph.iteredges():
if edge.source == edge.target: # for multigraphs
raise ValueError("a loop detected")
self.independent_set = set()
self.cardinality = 0
self.source = None
def run(self, source=None):
"""Executable pseudocode."""
used = dict((node, False) for node in self.graph.iternodes())
if source is not None:
self.source = source
self.independent_set.add(source)
used[source] = True
for target in self.graph.iteradjacent(source):
used[target] = True
for source in self.graph.iternodes():
if used[source]:
continue
self.independent_set.add(source)
used[source] = True
for target in self.graph.iteradjacent(source):
used[target] = True
self.cardinality = len(self.independent_set)
# Tutaj _used i independent_set jest dict. Wygodne dla C++.
class UnorderedSequentialIndependentSet3:
"""Find a maximal independent set."""
def __init__(self, graph):
"""The algorithm initialization."""
if graph.is_directed():
raise ValueError("the graph is directed")
self.graph = graph
for edge in self.graph.iteredges():
if edge.source == edge.target: # for multigraphs
raise ValueError("a loop detected")
self.independent_set = dict((node, False) for node in self.graph.iternodes())
self.cardinality = 0
self.source = None
def run(self, source=None):
"""Executable pseudocode."""
used = dict((node, False) for node in self.graph.iternodes())
if source is not None:
self.source = source
self.independent_set[source] = True
used[source] = True
self.cardinality += 1
for target in self.graph.iteradjacent(source):
used[target] = True
for source in self.graph.iternodes():
if used[source]:
continue
self.independent_set[source] = True
used[source] = True
self.cardinality += 1
for target in self.graph.iteradjacent(source):
used[target] = True
UnorderedSequentialIndependentSet = UnorderedSequentialIndependentSet1
# EOF
| 35.336364
| 85
| 0.59326
| 423
| 3,887
| 5.375887
| 0.144208
| 0.083113
| 0.062885
| 0.07124
| 0.902375
| 0.902375
| 0.860598
| 0.843448
| 0.80387
| 0.78628
| 0
| 0.003358
| 0.310522
| 3,887
| 109
| 86
| 35.66055
| 0.845149
| 0.124003
| 0
| 0.91358
| 0
| 0
| 0.03222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c5ddd6ab107c7babde3d565d7d78a1ec5b21b427
| 5,211
|
py
|
Python
|
trainer/models/sisr/models.py
|
jason-zl190/sisr
|
2415d28333c94602c52be9c314a8044165d992cf
|
[
"Apache-2.0"
] | 2
|
2019-12-15T17:12:46.000Z
|
2019-12-15T21:09:31.000Z
|
trainer/models/sisr/models.py
|
jason-zl190/sisr
|
2415d28333c94602c52be9c314a8044165d992cf
|
[
"Apache-2.0"
] | null | null | null |
trainer/models/sisr/models.py
|
jason-zl190/sisr
|
2415d28333c94602c52be9c314a8044165d992cf
|
[
"Apache-2.0"
] | 1
|
2020-12-15T15:30:12.000Z
|
2020-12-15T15:30:12.000Z
|
import tensorflow as tf
class MySRResNet():
def __init__(self, shape=(None, None, 3)):
self.shape = shape
def __call__(self):
input_tensor = tf.keras.layers.Input(shape=self.shape)
x1 = tf.keras.layers.Conv2D(64, 9, padding='same')(input_tensor)
x1 = tf.keras.layers.PReLU(alpha_initializer='zeros')(x1)
# B residual blocks
# conv2_1, k3n64s1
x = tf.keras.layers.Conv2D(64, 3, padding='same')(x1)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.PReLU(alpha_initializer='zeros')(x)
x = tf.keras.layers.Conv2D(64, 3, padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x21 = x + x1
# conv2_2, k3n64s1
x = tf.keras.layers.Conv2D(64, 3, padding='same')(x21)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.PReLU(alpha_initializer='zeros')(x)
x = tf.keras.layers.Conv2D(64, 3, padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x22 = x + x21
# conv2_3, k3n64s1
x = tf.keras.layers.Conv2D(64, 3, padding='same')(x22)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.PReLU(alpha_initializer='zeros')(x)
x = tf.keras.layers.Conv2D(64, 3, padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x23 = x + x22
# conv2_4, k3n64s1
x = tf.keras.layers.Conv2D(64, 3, padding='same')(x23)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.PReLU(alpha_initializer='zeros')(x)
x = tf.keras.layers.Conv2D(64, 3, padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x24 = x + x23
# conv2_5, k3n64s1 -- end of B residual block
x = tf.keras.layers.Conv2D(64, 3, padding='same')(x24)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.PReLU(alpha_initializer='zeros')(x)
x = tf.keras.layers.Conv2D(64, 3, padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x25 = x + x24
# conv3, k3n64s1
x = tf.keras.layers.Conv2D(64, 3, 1, padding='same')(x25)
x = tf.keras.layers.BatchNormalization()(x)
x = x + x1
# conv4_1, k3n256s1
x = tf.keras.layers.Conv2D(256, 3, padding='same')(x)
x = tf.nn.depth_to_space(x, block_size=2)
x = tf.keras.layers.PReLU(alpha_initializer='zeros')(x)
# conv4_2
x = tf.keras.layers.Conv2D(256, 3, padding='same')(x)
x = tf.nn.depth_to_space(x, block_size=2)
x = tf.keras.layers.PReLU(alpha_initializer='zeros')(x)
# conv5, k9n3s1
x = tf.keras.layers.Conv2D(3, 9, padding='same')(x)
return tf.keras.Model(inputs=input_tensor, outputs=x)
class Discriminator():
def __init__(self, shape=(None, None, 3)):
self.shape = shape
def __call__(self):
input_tensor = tf.keras.layers.Input(shape=self.shape)
x = tf.keras.layers.Conv2D(64, 3, padding='same')(input_tensor)
x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)
# conv2_1, k3n64s2
x = tf.keras.layers.Conv2D(64, 3, 2, padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)
# conv2_2, k3n128s1
x = tf.keras.layers.Conv2D(128, 3, padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)
# conv2_3, k3n128s2
x = tf.keras.layers.Conv2D(128, 3, 2, padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)
# conv2_4, k3n256s1
x = tf.keras.layers.Conv2D(256, 3, padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)
# conv2_5, k3n256s2
x = tf.keras.layers.Conv2D(256, 3, 2, padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)
# conv2_6, k3n512s1
x = tf.keras.layers.Conv2D(512, 3, padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)
# conv2_7, k3n512s2
x = tf.keras.layers.Conv2D(512, 3, 2, padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1024)(x)
x = tf.keras.layers.LeakyReLU(alpha=0.2)(x)
x = tf.keras.layers.Dense(1, activation='sigmoid')(x)
return tf.keras.Model(inputs=input_tensor, outputs=x)
| 41.357143
| 85
| 0.545769
| 681
| 5,211
| 4.10279
| 0.116006
| 0.162849
| 0.293128
| 0.295634
| 0.873658
| 0.858626
| 0.849678
| 0.774517
| 0.76378
| 0.739442
| 0
| 0.07206
| 0.304932
| 5,211
| 125
| 86
| 41.688
| 0.699337
| 0.059298
| 0
| 0.658537
| 0
| 0
| 0.02846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.012195
| 0
| 0.109756
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c5eb20117b1c3cd69cace5fcbb70f1689e81a753
| 3,369
|
py
|
Python
|
src/huobi/coin_swap/rest/trigger_order.py
|
hbdmapi/huobi_sdk_Python
|
a4ee876f947011fb5d66da32853cb3a21d852a4b
|
[
"MIT"
] | 1
|
2022-03-13T16:55:34.000Z
|
2022-03-13T16:55:34.000Z
|
src/huobi/coin_swap/rest/trigger_order.py
|
hbdmapi/huobi_sdk_Python
|
a4ee876f947011fb5d66da32853cb3a21d852a4b
|
[
"MIT"
] | null | null | null |
src/huobi/coin_swap/rest/trigger_order.py
|
hbdmapi/huobi_sdk_Python
|
a4ee876f947011fb5d66da32853cb3a21d852a4b
|
[
"MIT"
] | null | null | null |
import json
from huobi.utils.http import post
from huobi.host import HOST_FUTURES
class TriggerOrder:
def __init__(self, access_key: str, secret_key: str, host: str = HOST_FUTURES):
self.__access_key = access_key
self.__secret_key = secret_key
self.__host = host
def order(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_trigger_order"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
def cancel(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_trigger_cancel"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
def cancel_all(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_trigger_cancelall"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
def get_open_orders(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_trigger_openorders"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
def get_his_orders(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_trigger_hisorders"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
def tpsl_order(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_tpsl_order"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
def tpsl_cancel(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_tpsl_cancel"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
def tpsl_cancel_all(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_tpsl_cancelall"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
def get_tpsl_open_orders(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_tpsl_openorders"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
def get_tpsl_his_orders(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_tpsl_hisorders"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
def get_relation_tpsl_order(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_relation_tpsl_order"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
def track_order(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_track_order"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
def track_cancel(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_track_cancel"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
def track_cancel_all(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_track_cancelall"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
def get_track_open_orders(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_track_openorders"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
def get_track_his_orders(self, data: dict = None) -> json:
path = "/swap-api/v1/swap_track_hisorders"
return post(self.__host, path, self.__access_key, self.__secret_key, data)
| 44.328947
| 83
| 0.678836
| 487
| 3,369
| 4.271047
| 0.082136
| 0.082212
| 0.1125
| 0.155288
| 0.908173
| 0.897596
| 0.897596
| 0.897596
| 0.897596
| 0.897596
| 0
| 0.005946
| 0.201247
| 3,369
| 75
| 84
| 44.92
| 0.767001
| 0
| 0
| 0.285714
| 0
| 0
| 0.154052
| 0.154052
| 0
| 0
| 0
| 0
| 0
| 1
| 0.303571
| false
| 0
| 0.053571
| 0
| 0.660714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
6804b3cb6c9f1efa515626b9e2dcc1fb662edd9f
| 39,371
|
py
|
Python
|
python/dlxapi/api/projects_api.py
|
dlens/dlxapi
|
189a6519240ce625d7a9cdb89e305a335d2aa045
|
[
"MIT"
] | null | null | null |
python/dlxapi/api/projects_api.py
|
dlens/dlxapi
|
189a6519240ce625d7a9cdb89e305a335d2aa045
|
[
"MIT"
] | 1
|
2020-08-20T17:31:43.000Z
|
2020-08-20T17:31:43.000Z
|
python/dlxapi/api/projects_api.py
|
dlens/dlxapi
|
189a6519240ce625d7a9cdb89e305a335d2aa045
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Decision Lens API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from dlxapi.api_client import ApiClient
class ProjectsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def add_contributing_users_for_project(self, id, **kwargs): # noqa: E501
"""Add users to a project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_contributing_users_for_project(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: project id (required)
:param AddUsersRequest body: Email ids and personal message
:return: list[PortfolioPlanUser]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_contributing_users_for_project_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.add_contributing_users_for_project_with_http_info(id, **kwargs) # noqa: E501
return data
def add_contributing_users_for_project_with_http_info(self, id, **kwargs): # noqa: E501
"""Add users to a project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_contributing_users_for_project_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: project id (required)
:param AddUsersRequest body: Email ids and personal message
:return: list[PortfolioPlanUser]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_contributing_users_for_project" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `add_contributing_users_for_project`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/projects/{id}/users', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[PortfolioPlanUser]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_project(self, portfolio_id, project, **kwargs): # noqa: E501
"""Creates a new project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_project(portfolio_id, project, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str portfolio_id: Portfolio id (required)
:param Project project: Project to create (required)
:return: Project
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_project_with_http_info(portfolio_id, project, **kwargs) # noqa: E501
else:
(data) = self.create_project_with_http_info(portfolio_id, project, **kwargs) # noqa: E501
return data
def create_project_with_http_info(self, portfolio_id, project, **kwargs): # noqa: E501
"""Creates a new project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_project_with_http_info(portfolio_id, project, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str portfolio_id: Portfolio id (required)
:param Project project: Project to create (required)
:return: Project
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['portfolio_id', 'project'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_project" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'portfolio_id' is set
if self.api_client.client_side_validation and ('portfolio_id' not in params or
params['portfolio_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `portfolio_id` when calling `create_project`") # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and ('project' not in params or
params['project'] is None): # noqa: E501
raise ValueError("Missing the required parameter `project` when calling `create_project`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'portfolio_id' in params:
query_params.append(('portfolioId', params['portfolio_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'project' in params:
body_params = params['project']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/projects', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Project', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_project(self, id, **kwargs): # noqa: E501
"""Delete a project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Project id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_project_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_project_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_project_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete a project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Project id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_project" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `delete_project`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/projects/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_projects(self, project_ids, **kwargs): # noqa: E501
"""Delete projects. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_projects(project_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] project_ids: Project ids (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_projects_with_http_info(project_ids, **kwargs) # noqa: E501
else:
(data) = self.delete_projects_with_http_info(project_ids, **kwargs) # noqa: E501
return data
def delete_projects_with_http_info(self, project_ids, **kwargs): # noqa: E501
"""Delete projects. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_projects_with_http_info(project_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] project_ids: Project ids (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_ids'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_projects" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_ids' is set
if self.api_client.client_side_validation and ('project_ids' not in params or
params['project_ids'] is None): # noqa: E501
raise ValueError("Missing the required parameter `project_ids` when calling `delete_projects`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'project_ids' in params:
body_params = params['project_ids']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/projects/delete', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_project(self, id, **kwargs): # noqa: E501
"""Retrieves a project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Project id (required)
:return: Project
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_project_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_project_with_http_info(id, **kwargs) # noqa: E501
return data
def get_project_with_http_info(self, id, **kwargs): # noqa: E501
"""Retrieves a project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: Project id (required)
:return: Project
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_project" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `get_project`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/projects/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Project', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_project_for_portfolio_plan(self, project_id, portfolio_plan_id, **kwargs): # noqa: E501
"""Retrieves a project for a portfolioPlan. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_for_portfolio_plan(project_id, portfolio_plan_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str project_id: Project id (required)
:param str portfolio_plan_id: PortfolioPlan id (required)
:return: Project
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_project_for_portfolio_plan_with_http_info(project_id, portfolio_plan_id, **kwargs) # noqa: E501
else:
(data) = self.get_project_for_portfolio_plan_with_http_info(project_id, portfolio_plan_id, **kwargs) # noqa: E501
return data
def get_project_for_portfolio_plan_with_http_info(self, project_id, portfolio_plan_id, **kwargs): # noqa: E501
"""Retrieves a project for a portfolioPlan. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_for_portfolio_plan_with_http_info(project_id, portfolio_plan_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str project_id: Project id (required)
:param str portfolio_plan_id: PortfolioPlan id (required)
:return: Project
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'portfolio_plan_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_project_for_portfolio_plan" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if self.api_client.client_side_validation and ('project_id' not in params or
params['project_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `project_id` when calling `get_project_for_portfolio_plan`") # noqa: E501
# verify the required parameter 'portfolio_plan_id' is set
if self.api_client.client_side_validation and ('portfolio_plan_id' not in params or
params['portfolio_plan_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `portfolio_plan_id` when calling `get_project_for_portfolio_plan`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id'] # noqa: E501
if 'portfolio_plan_id' in params:
path_params['portfolioPlanId'] = params['portfolio_plan_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/projects/{projectId}/portfolioPlan/{portfolioPlanId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Project', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_projects_for_portfolio(self, portfolio_id, **kwargs): # noqa: E501
"""Retrieves projects contained within a portfolio. Possible expand paths are - (items.fieldValues, contributingUserIds) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_projects_for_portfolio(portfolio_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str portfolio_id: Portfolio id (required)
:param str portfolio_plan_id: Portfolio plan id. If not specified the portfolio plan will default to current baseline
:param str expand: JSON string containing an array expand specifications for fields. An expand specification must have a path and includes optional properties match, unique, allPossible, limit, offset, orderBy.
:param int limit: Pagination limit
:param int offset: Pagination offset
:param str order_by: Comma delimited list of order by expressions. Use '-' in front of expression for reverse order.
:param str match: Semi-colon delimited list of expressions to include in the response only the items in a collections that satisfy the expression(s). All other items should be exluded.
:return: Projects
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_projects_for_portfolio_with_http_info(portfolio_id, **kwargs) # noqa: E501
else:
(data) = self.get_projects_for_portfolio_with_http_info(portfolio_id, **kwargs) # noqa: E501
return data
def get_projects_for_portfolio_with_http_info(self, portfolio_id, **kwargs): # noqa: E501
"""Retrieves projects contained within a portfolio. Possible expand paths are - (items.fieldValues, contributingUserIds) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_projects_for_portfolio_with_http_info(portfolio_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str portfolio_id: Portfolio id (required)
:param str portfolio_plan_id: Portfolio plan id. If not specified the portfolio plan will default to current baseline
:param str expand: JSON string containing an array expand specifications for fields. An expand specification must have a path and includes optional properties match, unique, allPossible, limit, offset, orderBy.
:param int limit: Pagination limit
:param int offset: Pagination offset
:param str order_by: Comma delimited list of order by expressions. Use '-' in front of expression for reverse order.
:param str match: Semi-colon delimited list of expressions to include in the response only the items in a collections that satisfy the expression(s). All other items should be exluded.
:return: Projects
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['portfolio_id', 'portfolio_plan_id', 'expand', 'limit', 'offset', 'order_by', 'match'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_projects_for_portfolio" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'portfolio_id' is set
if self.api_client.client_side_validation and ('portfolio_id' not in params or
params['portfolio_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `portfolio_id` when calling `get_projects_for_portfolio`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'portfolio_id' in params:
query_params.append(('portfolioId', params['portfolio_id'])) # noqa: E501
if 'portfolio_plan_id' in params:
query_params.append(('portfolioPlanId', params['portfolio_plan_id'])) # noqa: E501
if 'expand' in params:
query_params.append(('expand', params['expand'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'order_by' in params:
query_params.append(('orderBy', params['order_by'])) # noqa: E501
if 'match' in params:
query_params.append(('match', params['match'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/projects', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Projects', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_contributing_users_from_project(self, id, body, **kwargs): # noqa: E501
"""Remove contributing users from a project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_contributing_users_from_project(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: project id (required)
:param RemoveContributingUsersRequest body: contributing user ids (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_contributing_users_from_project_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.remove_contributing_users_from_project_with_http_info(id, body, **kwargs) # noqa: E501
return data
def remove_contributing_users_from_project_with_http_info(self, id, body, **kwargs): # noqa: E501
"""Remove contributing users from a project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_contributing_users_from_project_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: project id (required)
:param RemoveContributingUsersRequest body: contributing user ids (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_contributing_users_from_project" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `remove_contributing_users_from_project`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `remove_contributing_users_from_project`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/projects/{id}/users', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_project_dependencies(self, id, **kwargs): # noqa: E501
"""Adds or removes dependsOn and/or dependant linked projects to a project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_project_dependencies(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: project id (required)
:param SetDependenciesRequest body: dependsOn and hasDependent project Ids
:return: Projects
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_project_dependencies_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.set_project_dependencies_with_http_info(id, **kwargs) # noqa: E501
return data
def set_project_dependencies_with_http_info(self, id, **kwargs): # noqa: E501
"""Adds or removes dependsOn and/or dependant linked projects to a project. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_project_dependencies_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: project id (required)
:param SetDependenciesRequest body: dependsOn and hasDependent project Ids
:return: Projects
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_project_dependencies" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `set_project_dependencies`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2'] # noqa: E501
return self.api_client.call_api(
'/projects/{id}/dependencies', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Projects', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 41.884043
| 219
| 0.612126
| 4,530
| 39,371
| 5.071523
| 0.051656
| 0.048054
| 0.021938
| 0.028206
| 0.958127
| 0.943023
| 0.926003
| 0.908505
| 0.90067
| 0.890093
| 0
| 0.015631
| 0.299662
| 39,371
| 939
| 220
| 41.928648
| 0.817575
| 0.329684
| 0
| 0.772374
| 1
| 0
| 0.181551
| 0.044264
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036965
| false
| 0
| 0.007782
| 0
| 0.099222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6814a5a1d88be94a9967b39995dff7c2077e61e1
| 7,222
|
py
|
Python
|
tests/test_01_accept_negative_country_code_special.py
|
glushkovvv/test_2gis
|
2affff49411a3c7ff77e9d399ec86eb314aa3757
|
[
"MIT"
] | null | null | null |
tests/test_01_accept_negative_country_code_special.py
|
glushkovvv/test_2gis
|
2affff49411a3c7ff77e9d399ec86eb314aa3757
|
[
"MIT"
] | 1
|
2020-08-05T06:27:23.000Z
|
2020-08-05T06:27:42.000Z
|
tests/test_01_accept_negative_country_code_special.py
|
glushkovvv/test_2gis
|
2affff49411a3c7ff77e9d399ec86eb314aa3757
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
test_01_accept_negative_country_code_special
~~~~~~~~~~~~~~
The 2GIS API Test
Check negative country_code special character
:author: Vadim Glushkov
:copyright: Copyright 2019, The2GIS API Test"
:license: MIT
:version: 1.0.0
:maintainer: Vadim Glushkov
:email: plussg@yandex.ru
:status: Development
"""
import json
from os.path import join, dirname
import pytest
import allure
from jsonschema import Draft7Validator
from tools.string_manipulation import get_space_and_end_character, get_special_character
from tools.api_responses import get_response
from tools.load_json_schema import load_json_schema
@allure.epic("Негативные тесты API")
@allure.suite("Фильтрация по коду страны. Пустота, пробелы, табуляция, перевод строк и прочее")
@allure.title("Проверка ответов при передачи в качестве параметра code_country пустых симвлов пустоты, пробела и т.п.")
@pytest.mark.parametrize("country_code",
get_space_and_end_character())
def test_01_accept_negative_space_char(setup_option, country_code):
"""Проверка ответов API при передачи в качестве кода страны пустоты, пробелов и т.п.
:param setup_option: Установочные параметры
:type setup_option: dict
:param country_code: Код страны
:type: chars
:return:
"""
api_url = setup_option['site_url']
setup_params = {
"country_code": country_code,
}
api_response = get_response(api_url, setup_params)
json_content = json.loads(api_response.content.decode('utf-8'))
response_message = (f" EndPoint: {api_url}?country_code={country_code}\n"
f" Status: {api_response.status_code}\n"
f" Headers: {api_response.headers}\n"
f" Body: {json_content}")
assert api_response.status_code == 200, f"""Статус {api_response.status_code} != 200\r\n""" + response_message
relative_path = join('../datasets', 'json_error_schemas_for_test.json')
filename = join(dirname(__file__), relative_path)
schema = load_json_schema(filename=filename)
check = Draft7Validator(schema=schema).is_valid(json_content)
assert check, f"""Ошибка при валидации json схемы {country_code}\r\n"""+response_message
@allure.epic("Негативные тесты API")
@allure.suite("Фильтрация по коду страны. Специальные символы.")
@allure.title("Проверка ответов при передачи в качестве параметра code_country одного специального символа")
@pytest.mark.parametrize("country_code",
get_special_character(count_chars=1,
len_list=10))
def test_02_accept_negative_special_one_char(setup_option, country_code):
"""Проверка ответов API при передачи в качестве кода страны одного специального сивола
:param setup_option: Установочные параметры
:type setup_option: dict
:param country_code: Код страны
:type country_code: str
:return:
"""
api_url = setup_option['site_url']
setup_params = {
"country_code": country_code,
}
api_response = get_response(api_url, setup_params)
json_content = json.loads(api_response.content.decode('utf-8'))
response_message = (f" EndPoint: {api_url}?country_code={country_code}\n"
f" Status: {api_response.status_code}\n"
f" Headers: {api_response.headers}\n"
f" Body: {json_content}")
assert api_response.status_code == 200, f"""Статус {api_response.status_code} != 200\r\n""" + response_message
relative_path = join('../datasets', 'json_error_schemas_for_test.json')
filename = join(dirname(__file__), relative_path)
schema = load_json_schema(filename=filename)
check = Draft7Validator(schema=schema).is_valid(json_content)
assert check, f"""Ошибка при валидации json схемы {country_code}\r\n""" + response_message
@allure.epic("Негативные тесты API")
@allure.suite("Фильтрация по коду страны. Специальные символы.")
@allure.title("Проверка ответов при передачи в качестве параметра code_country пустых комбинации из 2 спец. символов")
@pytest.mark.parametrize("country_code",
get_special_character(count_chars=2,
len_list=10))
def test_03_accept_negative_special_two_char(setup_option, country_code):
"""Проверка ответов API при передачи в качестве кода страны двух специальных сиволов
:param setup_option: Установочные параметры
:type setup_option: dict
:param country_code: Код страны
:type country_code: str
:return:
"""
api_url = setup_option['site_url']
setup_params = {
"country_code": country_code,
}
api_response = get_response(api_url, setup_params)
json_content = json.loads(api_response.content.decode('utf-8'))
response_message = (f" EndPoint: {api_url}?country_code={country_code}\n"
f" Status: {api_response.status_code}\n"
f" Headers: {api_response.headers}\n"
f" Body: {json_content}")
assert api_response.status_code == 200, f"""Статус {api_response.status_code} != 200\r\n""" + response_message
relative_path = join('../datasets', 'json_error_schemas_for_test.json')
filename = join(dirname(__file__), relative_path)
schema = load_json_schema(filename=filename)
check = Draft7Validator(schema=schema).is_valid(json_content)
assert check, f"""Ошибка при валидации json схемы {country_code}\r\n""" + response_message
@allure.epic("Негативные тесты API")
@allure.suite("Фильтрация по коду страны. Специальные символы.")
@allure.title("Проверка ответов при передачи в качестве параметра code_country пустых комбинации из 3 спец. символов")
@pytest.mark.parametrize("country_code",
get_special_character(count_chars=3,
len_list=10))
def test_04_accept_negative_special_three_char(setup_option, country_code):
"""Проверка ответов API при передачи в качестве кода страны трех специальных символов
:param setup_option: Установочные параметры
:type setup_option: dict
:param country_code: Код страны
:type country_code: str
:return:
"""
api_url = setup_option['site_url']
setup_params = {
"country_code": country_code,
}
api_response = get_response(api_url, setup_params)
json_content = json.loads(api_response.content.decode('utf-8'))
response_message = (f" EndPoint: {api_url}?country_code={country_code}\n"
f" Status: {api_response.status_code}\n"
f" Headers: {api_response.headers}\n"
f" Body: {json_content}")
assert api_response.status_code == 200, f"""Статус {api_response.status_code} != 200\r\n""" + response_message
relative_path = join('../datasets', 'json_error_schemas_for_test.json')
filename = join(dirname(__file__), relative_path)
schema = load_json_schema(filename=filename)
check = Draft7Validator(schema=schema).is_valid(json_content)
assert check, f"""Ошибка при валидации json схемы {country_code}\r\n""" + response_message
| 44.580247
| 119
| 0.692052
| 911
| 7,222
| 5.207464
| 0.170143
| 0.085793
| 0.043002
| 0.05312
| 0.839587
| 0.820405
| 0.813027
| 0.813027
| 0.813027
| 0.813027
| 0
| 0.011142
| 0.204652
| 7,222
| 161
| 120
| 44.857143
| 0.814763
| 0.163667
| 0
| 0.747475
| 0
| 0
| 0.338633
| 0.100812
| 0
| 0
| 0
| 0
| 0.080808
| 1
| 0.040404
| false
| 0
| 0.080808
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a8521db91f2e1f2bef162abe392a5a5139dc8f51
| 6,407
|
py
|
Python
|
test/test_expected_results.py
|
ColemanTom/shellcov
|
d90bd9f0da89ef0b9e536140803fdce470d27479
|
[
"MIT"
] | null | null | null |
test/test_expected_results.py
|
ColemanTom/shellcov
|
d90bd9f0da89ef0b9e536140803fdce470d27479
|
[
"MIT"
] | 13
|
2020-07-12T09:34:41.000Z
|
2021-06-25T01:06:04.000Z
|
test/test_expected_results.py
|
ColemanTom/shellcov
|
d90bd9f0da89ef0b9e536140803fdce470d27479
|
[
"MIT"
] | null | null | null |
from string import Template
from shell_cov.shell_cov import FILLER
ESCAPED_QUOTES = '\n'.join(s.strip() for s in r'''
#!/bin/bash
set -eux -o pipefail
arg=$1
values=(1 2 3 4 5)
echo "num values = ${#values[@]}"
case "$arg" in
1|2|${#values[@]}) echo result!!!
;;
25) # do nothing here
;;
*) ;;
esac; echo hello
case "$arg"
in
8) echo testing;;
esac
if (( arg == ${#values[@]} ))
then
# do something
echo awktest |
awk '{
print $1
}'
fi
if [[ 1 == 1]]; then :;;;; fi
;;; #
;;;
cat <<- EOF
EOF hello
hello EOF
EOF
cat <<-EOF
EOF
cat <<'EOF'
EOFI
EOF
echo hello \
test \
boo \
\ #test
\
echo hello \\
echo "multi-line
string"
echo 'multi-line
single quote string
'
echo 'escaped
multi-single'
echo "escaped
multi-double
"
function one {
# do something
:;;
}
function two() {
test; }
function three ()
{
:
}
four()
{
:
}
five() { testing; }
six() { testing;;;;;;; ;; ;;
}
'''.splitlines())
COMMENTS = '\n'.join(s.strip() for s in r'''
set -eux -o pipefail
arg=$1
values=(1 2 3 4 5)
echo "num values = ${#values[@]}"
case "$arg" in
1|2|${#values[@]}) echo result!!!
;;
25)
;;
*) ;;
esac; echo hello
case "$arg"
in
8) echo testing;;
esac
if (( arg == ${#values[@]} ))
then
echo awktest |
awk '{
print $1
}'
fi
if [[ 1 == 1]]; then :;;;; fi
;;;
;;;
cat <<- EOF
EOF hello
hello EOF
EOF
cat <<-EOF
EOF
cat <<'EOF'
EOFI
EOF
echo hello \
test \
boo \
\
\
echo hello \\
echo "multi-line
string"
echo 'multi-line
single quote string
'
echo 'escaped
\'multi-single\''
echo "escaped
\"multi-double
\"
"
function one {
:;;
}
function two() {
test; }
function three ()
{
:
}
four()
{
:
}
five() { testing; }
six() { testing;;;;;;; ;; ;;
}
'''.splitlines())
LINE_CONTINUATION = Template('\n'.join(s.strip() for s in r'''
#!/bin/bash
set -eux -o pipefail
arg=$1
values=(1 2 3 4 5)
echo "num values = ${#values[@]}"
case "$arg" in
1|2|${#values[@]}) echo result!!!
;;
25) # do nothing here
;;
*) ;;
esac; echo hello
case "$arg"
in
8) echo testing;;
esac
if (( arg == ${#values[@]} ))
then
# do something
echo awktest |
awk '{
print $1
}'
fi
if [[ 1 == 1]]; then :;;;; fi
;;; #
;;;
cat <<- EOF
EOF hello
hello EOF
EOF
cat <<-EOF
EOF
cat <<'EOF'
EOFI
EOF
${filler}
echo hello \\
echo "multi-line
string"
echo 'multi-line
single quote string
'
echo 'escaped
\'multi-single\''
echo "escaped
\"multi-double
\"
"
function one {
# do something
:;;
}
function two() {
test; }
function three ()
{
:
}
four()
{
:
}
five() { testing; }
six() { testing;;;;;;; ;; ;;
}
'''.splitlines())).safe_substitute({'filler': FILLER})
HEREDOC = Template('\n'.join(s.strip() for s in r'''
#!/bin/bash
set -eux -o pipefail
arg=$1
values=(1 2 3 4 5)
echo "num values = ${#values[@]}"
case "$arg" in
1|2|${#values[@]}) echo result!!!
;;
25) # do nothing here
;;
*) ;;
esac; echo hello
case "$arg"
in
8) echo testing;;
esac
if (( arg == ${#values[@]} ))
then
# do something
echo awktest |
awk '{
print $1
}'
fi
if [[ 1 == 1]]; then :;;;; fi
;;; #
;;;
cat ${filler}
cat ${filler}
cat ${filler}
echo hello \
test \
boo \
\ #test
\
echo hello \\
echo "multi-line
string"
echo 'multi-line
single quote string
'
echo 'escaped
\'multi-single\''
echo "escaped
\"multi-double
\"
"
function one {
# do something
:;;
}
function two() {
test; }
function three ()
{
:
}
four()
{
:
}
five() { testing; }
six() { testing;;;;;;; ;; ;;
}
'''.splitlines())).safe_substitute({'filler': FILLER})
FUNCTION = Template('\n'.join(s.strip() for s in r'''
#!/bin/bash
set -eux -o pipefail
arg=$1
values=(1 2 3 4 5)
echo "num values = ${#values[@]}"
case "$arg" in
1|2|${#values[@]}) echo result!!!
;;
25) # do nothing here
;;
*) ;;
esac; echo hello
case "$arg"
in
8) echo testing;;
esac
if (( arg == ${#values[@]} ))
then
# do something
echo awktest |
awk '{
print $1
}'
fi
if [[ 1 == 1]]; then :;;;; fi
;;; #
;;;
cat <<- EOF
EOF hello
hello EOF
EOF
cat <<-EOF
EOF
cat <<'EOF'
EOFI
EOF
echo hello \
test \
boo \
\ #test
\
echo hello \\
echo "multi-line
string"
echo 'multi-line
single quote string
'
echo 'escaped
\'multi-single\''
echo "escaped
\"multi-double
\"
"
# do something
:;;
}
test; }
:
}
:
}
five() { testing; }
six() { testing;;;;;;; ;; ;;
}
'''.splitlines())).safe_substitute({'filler': FILLER})
MULTILINE_QUOTES = Template('\n'.join(s.strip() for s in r'''
#!/bin/bash
set -eux -o pipefail
arg=$1
values=(1 2 3 4 5)
echo "num values = ${#values[@]}"
case "$arg" in
1|2|${#values[@]}) echo result!!!
;;
25) # do nothing here
;;
*) ;;
esac; echo hello
case "$arg"
in
8) echo testing;;
esac
if (( arg == ${#values[@]} ))
then
# do something
echo awktest |
${filler}
fi
if [[ 1 == 1]]; then :;;;; fi
;;; #
;;;
cat <<- EOF
EOF hello
hello EOF
EOF
cat <<-EOF
EOF
cat <<'EOF'
EOFI
EOF
echo hello \
test \
boo \
\ #test
\
echo hello \\
${filler}
${filler}
${filler}
${filler}
function one {
# do something
:;;
}
function two() {
test; }
function three ()
{
:
}
four()
{
:
}
five() { testing; }
six() { testing;;;;;;; ;; ;;
}
'''.splitlines())).safe_substitute({'filler': FILLER})
LOGIC = '\n'.join(s.strip() for s in r'''
#!/bin/bash
set -eux -o pipefail
arg=$1
values=(1 2 3 4 5)
echo "num values = ${#values[@]}"
case "$arg" in
1|2|${#values[@]}) echo result!!!
25) # do nothing here
esac; echo hello
case "$arg"
8) echo testing;;
if (( arg == ${#values[@]} ))
# do something
echo awktest |
awk '{
print $1
}'
if [[ 1 == 1]]; then :;;;; fi
;;; #
cat <<- EOF
EOF hello
hello EOF
EOF
cat <<-EOF
EOF
cat <<'EOF'
EOFI
EOF
echo hello \
test \
boo \
\ #test
\
echo hello \\
echo "multi-line
string"
echo 'multi-line
single quote string
'
echo 'escaped
\'multi-single\''
echo "escaped
\"multi-double
\"
"
function one {
# do something
:;;
function two() {
test; }
function three ()
:
four()
:
five() { testing; }
six() { testing;;;;;;; ;; ;;
'''.splitlines())
| 10.218501
| 62
| 0.520056
| 825
| 6,407
| 4.027879
| 0.088485
| 0.054168
| 0.035209
| 0.043334
| 0.931989
| 0.931989
| 0.931989
| 0.92266
| 0.917243
| 0.90009
| 0
| 0.020851
| 0.273919
| 6,407
| 626
| 63
| 10.234824
| 0.693465
| 0
| 0
| 0.829374
| 0
| 0
| 0.894803
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00432
| 0
| 0.00432
| 0.012959
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a895820b415560eabfdaafe75a0ecfddcddde49a
| 9,139
|
py
|
Python
|
applications/bestbukpl/controllers/read_coupon.py
|
lechu87/bbuk
|
34934040f70a6d3e6d15d7659411c6398732fb58
|
[
"BSD-3-Clause"
] | null | null | null |
applications/bestbukpl/controllers/read_coupon.py
|
lechu87/bbuk
|
34934040f70a6d3e6d15d7659411c6398732fb58
|
[
"BSD-3-Clause"
] | null | null | null |
applications/bestbukpl/controllers/read_coupon.py
|
lechu87/bbuk
|
34934040f70a6d3e6d15d7659411c6398732fb58
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# spróbój czegos takiego jak
#read_coupon_fortuna_wo_sql=local_import('read_coupon_fortuna_wo_sql_27')
#from read_coupon_fortuna_wo_sql local_import read_coupon
import subprocess
import re
from collections import defaultdict
def index(): return dict(message="hello from read_coupon.py")
def test():
#rows=s.select()
#rows=s.select()
check_id=request.vars.c_id
#x=subprocess.call(['python3', '/home/lesinle/odds/read_coupon_fortuna_wo_sql.py',check_id])
x=subprocess.check_output(['python3','/home/lesinle/odds/read_coupon_fortuna_wo_sql.py',check_id])
y1=re.sub('\[','',x)
y2=re.sub(']','',y1)
y=y2.split("'")
#print (y
#y = [x for x in y if x != ['[',']']]
#y.remove('[')
new_list=[]
for el in y:
if el not in [",","[","']","['","]","[","],","],[","], ["," ","\n",", ","'",",\n",""]:
new_list.append(el)
home2=[]
home_list=[]
away_list=[]
typ_list=[]
typ_name_list=[]
for i in range(0,len(new_list)-1,4):
home_list.append(new_list[i])
away_list.append(new_list[i+1])
typ_list.append(new_list[i+2])
typ_name_list.append(new_list[i+3])
db = DAL('sqlite://db.sqlite')
db_name=db._uri
bookies=['db_fortuna','db_sts','db_iforbet','db_lvbet','db_totolotek']
for bookie in bookies:
db.define_table(bookie, Field('home'),Field('away'),Field('game_1'),Field('game_0'),Field('game_2'),Field('game_10'),Field('game_02'),Field('game_12'),Field('data'),Field('Sport'),Field('League'),Field('country'),Field('dnb_1'),Field('dnb_2'),Field('o_05'),Field('o_15'),Field('o_25'),Field('o_35'),Field('o_45'),Field('o_55'),Field('o_65'),Field('o_75'),Field('o_85'),Field('o_95'),Field('u_05'),Field('u_15'),Field('u_25'),Field('u_35'),Field('u_45'),Field('u_55'),Field('u_65'),Field('u_75'),Field('u_85'),Field('u_95'),Field('ht_ft_11'),Field('ht_ft_1x'),Field('ht_ft_x1'),Field('ht_ft_22'),Field('ht_ft_x2'),Field('ht_ft_2x'),Field('ht_ft_xx'),Field('ht_ft_12'),Field('ht_ft_21'),Field('first_half_1'),Field('first_half_x'),Field('first_half_2'),Field('first_half_10'),Field('first_half_02'),Field('first_half_12'),Field('eh_min_1_1'),Field('eh_min_1_x2'),Field('u_25_1'),Field('o_25_1'),Field('u_25_x'),Field('o_25_x'),Field('u_25_2'),Field('o_25_2'),Field('first_goal_1'),Field('first_goal_2'),Field('first_goal_0'),Field('o_35_x'),Field('u_35_2'),Field('o_35_2'),Field('u_35_1'),Field('o_35_1'),Field('u_35_x'),Field('hour'),Field('update_time'),Field('btts_1'),Field('btts_2'),Field('btts_x'),Field('btts_no_x'),Field('btts_no_1'),Field('btts_no_2'),Field('u_15_1'),Field('u_15_x'),Field('u_15_2'),Field('o_15_x'),Field('o_15_1'),Field('o_15_2'),Field('eh_min_1_2'),Field('eh_min_1_x1'),Field('eh_plus_1_1'),Field('eh_plus_1_x2'),Field('eh_plus_1_2'),Field('eh_plus_1_x1'),Field('eh_plus_1_x'),Field('eh_min_1_x'),Field('btts_yes'),Field('btts_no'), migrate=False)
wynik=defaultdict(str)
for i in range(0,len(home_list)):
for bookie in bookies:
if bookie not in wynik.keys():
wynik[bookie]=[]
#wynik[bookie].append(db((db[bookie].home=='Leicester') & (db[bookie].away=='Watford')).select())
wynik[bookie].append(db((db[bookie].home==home_list[i]) & (db[bookie].away==away_list[i])).select())
for i in range(0,len(home_list)):
for bookie in bookies:
if len(wynik[bookie][i])==0:
wynik[bookie][i]=db((db[bookie].home=='Test') & (db[bookie].away=='Test')).select()
#wynik2=db((db.bookie.home=='Leicester') & (db.bookie.away=='Watford')).select()
#wynik=[wynik1,wynik2]
#else
# wynik = wynik&
wynik_temp=db(((db.db_fortuna.home=='Barcelona'))).select()
#q = (home=='Leicester' & away=='Watford')
#s=db(q)
#for i in range(0,len(home_list)):
# rows=db((home==home_list[i]) & (away==away_list[i])).select()
cos=typ_list[1] #rows = db((home=='Leicester') & (away=='Watford')).select()
ff=wynik['db_fortuna'][0]
#select distinct db_sts.home, db_sts.away, db_sts.data, db_sts.game_1 as sts_1, db_fortuna.game_1 as fortuna_1 from db_sts JOIN db_fortuna on (db_sts.home=db_fortuna.home and db_sts.away=db_fortuna.away)
#where db_sts.home like 'Leicester' and db_sts.away like 'Watford'
#http://web2py.com/books/default/chapter/42/06/warstwa-abstracji-bazy-danych
return locals()
def hello():
#rows=s.select()
#rows=s.select()
check_id=request.vars.c_id
#x=subprocess.call(['python3', '/home/lesinle/odds/read_coupon_fortuna_wo_sql.py',check_id])
x=subprocess.check_output(['python3','/home/lesinle/odds/read_coupon_fortuna_wo_sql.py',check_id])
y1=re.sub('\[','',x)
y2=re.sub(']','',y1)
y=y2.split("'")
#print (y
#y = [x for x in y if x != ['[',']']]
#y.remove('[')
new_list=[]
for el in y:
if el not in [",","[","']","['","]","[","],","],[","], ["," ","\n",", ","'",",\n",""]:
new_list.append(el)
home2=[]
home_list=[]
away_list=[]
typ_list=[]
typ_name_list=[]
for i in range(0,len(new_list)-1,4):
home_list.append(new_list[i])
away_list.append(new_list[i+1])
typ_list.append(new_list[i+2])
typ_name_list.append(new_list[i+3])
db = DAL('sqlite://db.sqlite')
db_name=db._uri
bookies=['db_fortuna','db_sts','db_iforbet','db_lvbet','db_totolotek']
for bookie in bookies:
db.define_table(bookie, Field('home'),Field('away'),Field('game_1'),Field('game_0'),Field('game_2'),Field('game_10'),Field('game_02'),Field('game_12'),Field('data'),Field('Sport'),Field('League'),Field('country'),Field('dnb_1'),Field('dnb_2'),Field('o_05'),Field('o_15'),Field('o_25'),Field('o_35'),Field('o_45'),Field('o_55'),Field('o_65'),Field('o_75'),Field('o_85'),Field('o_95'),Field('u_05'),Field('u_15'),Field('u_25'),Field('u_35'),Field('u_45'),Field('u_55'),Field('u_65'),Field('u_75'),Field('u_85'),Field('u_95'),Field('ht_ft_11'),Field('ht_ft_1x'),Field('ht_ft_x1'),Field('ht_ft_22'),Field('ht_ft_x2'),Field('ht_ft_2x'),Field('ht_ft_xx'),Field('ht_ft_12'),Field('ht_ft_21'),Field('first_half_1'),Field('first_half_x'),Field('first_half_2'),Field('first_half_10'),Field('first_half_02'),Field('first_half_12'),Field('eh_min_1_1'),Field('eh_min_1_x2'),Field('u_25_1'),Field('o_25_1'),Field('u_25_x'),Field('o_25_x'),Field('u_25_2'),Field('o_25_2'),Field('first_goal_1'),Field('first_goal_2'),Field('first_goal_0'),Field('o_35_x'),Field('u_35_2'),Field('o_35_2'),Field('u_35_1'),Field('o_35_1'),Field('u_35_x'),Field('hour'),Field('update_time'),Field('btts_1'),Field('btts_2'),Field('btts_x'),Field('btts_no_x'),Field('btts_no_1'),Field('btts_no_2'),Field('u_15_1'),Field('u_15_x'),Field('u_15_2'),Field('o_15_x'),Field('o_15_1'),Field('o_15_2'),Field('eh_min_1_2'),Field('eh_min_1_x1'),Field('eh_plus_1_1'),Field('eh_plus_1_x2'),Field('eh_plus_1_2'),Field('eh_plus_1_x1'),Field('eh_plus_1_x'),Field('eh_min_1_x'),Field('btts_yes'),Field('btts_no'), migrate=False)
wynik=defaultdict(str)
for i in range(0,len(home_list)):
for bookie in bookies:
if bookie not in wynik.keys():
wynik[bookie]=[]
#wynik[bookie].append(db((db[bookie].home=='Leicester') & (db[bookie].away=='Watford')).select())
wynik[bookie].append(db((db[bookie].home==home_list[i]) & (db[bookie].away==away_list[i])).select())
for i in range(0,len(home_list)):
for bookie in bookies:
if len(wynik[bookie][i])==0:
wynik[bookie][i]=db((db[bookie].home=='Test') & (db[bookie].away=='Test')).select()
#wynik2=db((db.bookie.home=='Leicester') & (db.bookie.away=='Watford')).select()
#wynik=[wynik1,wynik2]
#else
# wynik = wynik&
# wynik_temp=db(((db.db_sts.home=='Leicester') & (db.db_sts.away=='Watford')) | ((db.db_fortuna.home=='Leicester') & (db.db_fortuna.away=='Watford'))).select()
#q = (home=='Leicester' & away=='Watford')
#s=db(q)
#for i in range(0,len(home_list)):
# rows=db((home==home_list[i]) & (away==away_list[i])).select()
cos=typ_list[1] #rows = db((home=='Leicester') & (away=='Watford')).select()
#for bookie in bookies:
# if len(wynik[bookie])==0:
# wynik[bookie].append(db((db[bookie].home=='Test') & (db[bookie].away=='Test')).select())
full_kurs= defaultdict(lambda:1)
for i in range(0,len(home_list)):
for bookie in bookies:
if bookie not in full_kurs.keys():
full_kurs[bookie]=1
try:
full_kurs[bookie]=full_kurs[bookie]*float(wynik[bookie][i][0][typ_list[i]])
except:
full_kurs[bookie]=full_kurs[bookie]*1
#select distinct db_sts.home, db_sts.away, db_sts.data, db_sts.game_1 as sts_1, db_fortuna.game_1 as fortuna_1 from db_sts JOIN db_fortuna on (db_sts.home=db_fortuna.home and db_sts.away=db_fortuna.away)
#where db_sts.home like 'Leicester' and db_sts.away like 'Watford'
#http://web2py.com/books/default/chapter/42/06/warstwa-abstracji-bazy-danych
return locals()
| 65.748201
| 1,581
| 0.636831
| 1,547
| 9,139
| 3.475113
| 0.108597
| 0.042411
| 0.030134
| 0.020461
| 0.932478
| 0.930246
| 0.919829
| 0.916667
| 0.910342
| 0.894345
| 0
| 0.045048
| 0.13043
| 9,139
| 138
| 1,582
| 66.224638
| 0.631433
| 0.259328
| 0
| 0.831461
| 0
| 0
| 0.241892
| 0.014281
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033708
| false
| 0
| 0.033708
| 0.011236
| 0.089888
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a8c5c8070b1badb3f215de596de252e2fa11f159
| 160
|
py
|
Python
|
Models/Model_1/ResNet/__init__.py
|
akanimax/toxic-comment-identification-tensorflow
|
a1d065639d8b518c0ac1dc53e98e09642e258bb6
|
[
"MIT"
] | null | null | null |
Models/Model_1/ResNet/__init__.py
|
akanimax/toxic-comment-identification-tensorflow
|
a1d065639d8b518c0ac1dc53e98e09642e258bb6
|
[
"MIT"
] | null | null | null |
Models/Model_1/ResNet/__init__.py
|
akanimax/toxic-comment-identification-tensorflow
|
a1d065639d8b518c0ac1dc53e98e09642e258bb6
|
[
"MIT"
] | null | null | null |
""" This package contains the code for the first model to be trained for this problem
"""
from __future__ import print_function
from __future__ import division
| 32
| 85
| 0.80625
| 24
| 160
| 5
| 0.75
| 0.166667
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 160
| 5
| 86
| 32
| 0.888889
| 0.50625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
7656fe442f4b5448c8efa639987f559228291109
| 45
|
py
|
Python
|
projects/faces/landmark/landmark/__init__.py
|
Bingwen-Hu/hackaway
|
69727d76fd652390d9660e9ea4354ba5cc76dd5c
|
[
"BSD-2-Clause"
] | null | null | null |
projects/faces/landmark/landmark/__init__.py
|
Bingwen-Hu/hackaway
|
69727d76fd652390d9660e9ea4354ba5cc76dd5c
|
[
"BSD-2-Clause"
] | null | null | null |
projects/faces/landmark/landmark/__init__.py
|
Bingwen-Hu/hackaway
|
69727d76fd652390d9660e9ea4354ba5cc76dd5c
|
[
"BSD-2-Clause"
] | null | null | null |
from .api import detect
from .api import show
| 22.5
| 23
| 0.8
| 8
| 45
| 4.5
| 0.625
| 0.388889
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 2
| 24
| 22.5
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
76746358001a7533446098f9eddc1122638f9f6f
| 163,522
|
py
|
Python
|
Aci_Cal_Toolkit.py
|
riccardo123github/ACI-Python-Scripts-Automation
|
b3bd986628c9c7753345acab2fdf48f13d6580fa
|
[
"Apache-2.0"
] | 1
|
2021-08-02T09:00:25.000Z
|
2021-08-02T09:00:25.000Z
|
Aci_Cal_Toolkit.py
|
riccardo123github/ACI-Python-Scripts-Automation
|
b3bd986628c9c7753345acab2fdf48f13d6580fa
|
[
"Apache-2.0"
] | null | null | null |
Aci_Cal_Toolkit.py
|
riccardo123github/ACI-Python-Scripts-Automation
|
b3bd986628c9c7753345acab2fdf48f13d6580fa
|
[
"Apache-2.0"
] | null | null | null |
import requests
import json
import sys
import collections
import jinja2
import ipaddress
import time
import re
import urllib3
urllib3.disable_warnings()
# Global options for debugging
PRINT_PAYLOAD = True
# flag variable to avoid pushing anything to APIC
PUSH_TO_APIC = False
PRINT_RESPONSE_TEXT_ALWAYS = False
PRINT_RESPONSE_TEXT_ON_FAIL = True
# Global path to main json directory
json_path = 'C:/path_to_json_template_dir/jsondata/'
# Global list of allowed statuses
valid_status = ['created', 'created,modified', 'deleted']
# Exception Classes
class InsufficientArgs(Exception):
pass
class InvalidArg(Exception):
pass
class LoginFailed(Exception):
pass
# Function to validate input for each method
def process_kwargs(required_args, optional_args, **kwargs):
# Validate all required kwargs passed
if all(item in kwargs for item in required_args.keys()) is not True:
raise InsufficientArgs('Insufficient required arguments.')
# Load all required args values from kwargs
for item in kwargs:
if item in required_args.keys():
required_args[item] = kwargs[item]
for item in kwargs:
if item in optional_args.keys():
optional_args[item] = kwargs[item]
# Combine option and required dicts for Jinja template render
# the following syntax is supported from Python3.6, we replace
# it with the manual copy.
# templateVars = { **required_args, **optional_args }
templateVars = required_args.copy()
templateVars.update(optional_args)
return(templateVars)
# Function to execute HTTP Post
def post(apic, payload, cookies, uri, section=''):
if PRINT_PAYLOAD or not PUSH_TO_APIC:
print('Adding to the object: "'+uri+'" the following json string:')
print(payload)
s = requests.Session()
r = ''
if PUSH_TO_APIC:
while r == '':
try:
r = s.post('https://{}/api/node/{}.json'.format(apic, uri),
data=payload, cookies=cookies, verify=False)
status = r.status_code
except requests.exceptions.ConnectionError as e:
print("Connection error, pausing before retrying. Error: {}"
.format(e))
time.sleep(5)
except Exception as e:
print("Method {} failed. Exception: {}".format(section[:-5], e))
status = 666
return(status)
if PRINT_RESPONSE_TEXT_ALWAYS:
print(r.text)
if status != 200 and PRINT_RESPONSE_TEXT_ON_FAIL:
print(r.text)
else:
return 200
return status
# Class must be instantiated with APIC IP address, username, and password
# the login method returns the APIC cookies.
class FabLogin(object):
def __init__(self, apic, user, pword):
self.apic = apic
self.user = user
self.pword = pword
def login(self):
# Load login json payload
payload = '''
{{
"aaaUser": {{
"attributes": {{
"name": "{user}",
"pwd": "{pword}"
}}
}}
}}
'''.format(user=self.user, pword=self.pword)
payload = json.loads(payload,
object_pairs_hook=collections.OrderedDict)
s = requests.Session()
# Try the request, if exception, exit program w/ error
try:
# Verify is disabled as there are issues if it is enabled
r = s.post('https://{}/api/mo/aaaLogin.json'.format(self.apic),
data=json.dumps(payload), verify=False)
# Capture HTTP status code from the request
status = r.status_code
# Capture the APIC cookie for all other future calls
cookies = r.cookies
# Log login status/time(?) somewhere
if status == 400:
print("Error 400 - Bad Request - ABORT!")
print("Probably have a bad URL")
sys.exit()
if status == 401:
print("Error 401 - Unauthorized - ABORT!")
print("Probably have incorrect credentials")
sys.exit()
if status == 403:
print("Error 403 - Forbidden - ABORT!")
print("Server refuses to handle your request")
sys.exit()
if status == 404:
print("Error 404 - Not Found - ABORT!")
print("Seems like you're trying to POST to a page that doesn't"
" exist.")
sys.exit()
except Exception as e:
print("Something went wrong logging into the APIC - ABORT!")
# Log exit reason somewhere
raise LoginFailed(e)
self.cookies = cookies
return cookies
# Class must be instantiated with APIC IP address and cookies
class FabPodPol(object):
def __init__(self, apic, cookies):
self.apic = apic
self.cookies = cookies
self.templateLoader = jinja2.FileSystemLoader(
searchpath=(json_path + 'FabPodPol/'))
self.templateEnv = jinja2.Environment(loader=self.templateLoader)
# Method must be called with the following kwargs.
# name: Name of the node being deployed
# id: ID of the node being deploeyd as an integer (i.e. 101)
# serial: Serial number of device being deployed
# descr: (Optional) Description of the node
# fabric: (Optional) Default is 1 - will be relevant for xconnect
# pod: (Optional) Default is 1 - will be relevant for multipod
def comission_hw(self, **kwargs):
# Dicts for required and optional args
required_args = {'name': '',
'id': '',
'serial': ''}
optional_args = {'descr': '',
'fabric': '1',
'pod': '1'}
# Validate inputs, return dict of template vars
templateVars = process_kwargs(required_args, optional_args, **kwargs)
# Validate inputs
if not int(templateVars['id']):
raise InvalidArg('ID must be an integer')
else:
templateVars['id'] = int(templateVars['id'])
if not int(templateVars['fabric']):
raise InvalidArg('Fabric ID must be an integer')
else:
templateVars['fabric'] = int(templateVars['fabric'])
if not int(templateVars['pod']):
raise InvalidArg('Pod ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
# Locate template for method
template_file = "comission_hw.json"
template = self.templateEnv.get_template(template_file)
# Render template w/ values from dicts
payload = template.render(templateVars)
# Handle request
uri = 'mo/uni'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# address: Name/IP of the NTP server
# status: created | created,modified | deleted
def ntp(self, **kwargs):
required_args = {'address': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not ipaddress.ip_address(templateVars['address']):
raise InvalidArg('Address must be a valid IPv4 address')
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "ntp.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the node being deployed
# address: IP of DNS Server
# status: (Of the DNS Server) created | created,modified | deleted
# domain: (Optional) DNS Domain
# domain_status: (Optional) created | created,modified | deleted
# preferred: (Optional) yes | no
# domain_default: (Optional) yes | no
def dns(self, **kwargs):
required_args = {'address': '',
'status': ''}
optional_args = {'domain': '',
'domain_status': 'deleted',
'preferred': 'no',
'domain_default': 'no'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not ipaddress.ip_address(templateVars['address']):
raise InvalidArg('Address must be a valid IPv4 address')
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "dns.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/dnsp-default'
status = post(self.apic, payload, self.cookies, uri, template_file)
template_file = "dns_profile.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/dnsp-default/rsProfileToEpg'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# asn: Fabric BGP ASN as an integer
# status: created | created,modified | deleted
def fabric_bgp(self, **kwargs):
required_args = {'asn': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not (int(templateVars['asn']) in range(1, 65536)):
raise InvalidArg('Invalid BGP ASN')
else:
templateVars['asn'] = int(templateVars['asn'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "fabric_bgp.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/bgpInstP-default/as'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# rr: ID of node to be route reflector
# status: created | created,modified | deleted
def fabric_rr(self, **kwargs):
required_args = {'rr': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['rr']):
raise InvalidArg('Route Reflector ID must be an integer')
else:
templateVars['rr'] = int(templateVars['rr'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "fabric_rr.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/bgpInstP-default/rr/node-{}'.format(
templateVars['rr'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of pod-policy to be created
# status: created | created,modified | deleted
def pod_pol(self, **kwargs):
required_args = {'name': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "pod_pol.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/funcprof'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
template_file = "pod_pol_assign.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/podprof-default/pods-default-typ-ALL/rspodPGrp'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Class must be instantiated with APIC IP address and cookies
class FabAccPol(object):
def __init__(self, apic, cookies):
self.apic = apic
self.cookies = cookies
self.templateLoader = jinja2.FileSystemLoader(
searchpath=(json_path + 'FabAccPol/'))
self.templateEnv = jinja2.Environment(loader=self.templateLoader)
# Method must be called with the following kwargs.
# name: The name of the CDP policy
# state: enabled | disabled
# status: created | created,modified | deleted
def cdp(self, **kwargs):
required_args = {'name': '',
'state': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "cdp.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/cdpIfP-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: The name of the LLDP policy
# state: enabled | disabled
# Note: The configured state is deployed to both Tx and Rx
# status: created | created,modified | deleted
def lldp(self, **kwargs):
required_args = {'name': '',
'state': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "lldp.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/lldpIfP-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: The name of the Link policy
# auto_neg: on | off
# speed: 100M | 1G | 10G | 40G | auto
# Note: 100G should be available soon if not already in some versions
# status: created | created,modified | deleted
def link(self, **kwargs):
required_args = {'name': '',
'auto_neg': '',
'speed': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "link.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/hintfpol-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: The name of the Port-Channel policy
# mode: off | mac-pin | active
# Note: 'off' = static mode-on
# state: enabled | disabled
# Note: The configured state is deployed to both Tx and Rx
# status: created | created,modified | deleted
def pc(self, **kwargs):
required_args = {'name': '',
'mode': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "pc.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/lacplagp-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: The name of the Per Port VLAN policy
# state: enabled | disabled
# status: created | created,modified | deleted
def ppv(self, **kwargs):
required_args = {'name': '',
'state': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "ppv.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/l2IfP-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: The name of the Per Port VLAN policy
# state: enabled | disabled
# status: created | created,modified | deleted
def mcp_intf(self, **kwargs):
required_args = {'name': '',
'state': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "mcp_intf.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/mcpIfP-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# password: string for global MCP password
# state: enabled | disabled
def mcp_global(self, **kwargs):
required_args = {'password': '',
'state': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
template_file = "mcp_global.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/mcpInstP-default'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# event: mcp-loop | ep-move | bpduguard
# state: true | false
def err_disable(self, **kwargs):
required_args = {'event': '',
'state': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
template_file = "err_disable.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/infra/edrErrDisRecoverPol-default/edrEventP-event-{}'
.format(templateVars['event']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: The name of the parent VLAN Pool
# mode: static | dynamic
# range_mode: static | dynamic
# start: Starting VLAN - as an integer
# end: Ending VLAN - as an integer
# status: created | created,modified | deleted
def vl_pool(self, **kwargs):
required_args = {'name': '',
'mode': '',
'range_mode': '',
'start': '',
'end': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['start']):
raise InvalidArg('VLAN IDs must be an integer')
else:
templateVars['start'] = int(templateVars['start'])
if not int(templateVars['end']):
raise InvalidArg('VLAN IDs must be an integer')
else:
templateVars['end'] = int(templateVars['end'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "vl_pool.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/infra/vlanns-[{}]-{}'
.format(templateVars['name'], templateVars['mode']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: The name of the AEP
# status: created | created,modified | deleted
# infra: created | created,modified | deleted
# Note: This should be 'deleted' if no infra VLAN is needed
# or it should be 'created,modified' if there is a infra VLAN
# infra_vlan: (optional) infastructure vlan as an integer
# override: (optional) created | created,modified | deleted
# Note: This should be 'deleted' if no infra override is needed
# or it should be 'created,modified' if there is an override policy
# override_pc: (optional) Name of the port-channel policy
# override_cdp: (optional) Name of the cdp policy
# override_lldp: (optional) Name of the lldp policy
def aep(self, **kwargs):
required_args = {'name': '',
'status': '',
'infra': 'deleted'}
optional_args = {'infra_vlan': '0',
'override': 'deleted',
'override_pc': '',
'override_cdp': '',
'override_lldp': ''}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['infra'] == 'created,modified':
if not int(templateVars['infra_vlan']):
raise InvalidArg('Infra VLAN ID must be an integer')
else:
templateVars['infra_vlan'] = int(templateVars['infra_vlan'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
if templateVars['infra'] not in valid_status:
raise InvalidArg('Status invalid')
if templateVars['override'] not in valid_status:
raise InvalidArg('Status invalid')
if templateVars['override'] == 'created,modified':
template_file = "aep_override.json"
else:
template_file = "aep_no_override.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/attentp-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the L3-Out Domain
# status: created | created,modified | deleted
# vlan_pool: Name of the VLAN pool to associate to the L3 Out
def l3_dom(self, **kwargs):
required_args = {'name': '',
'status': '',
'vlan_pool': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "l3_dom.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/l3dom-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Physical Domain
# status: created | created,modified | deleted
# vlan_pool: Name of the VLAN pool to associate to the Physical Domain
def phys_dom(self, **kwargs):
required_args = {'name': '',
'status': '',
'vlan_pool': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "phys_dom.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/phys-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the AEP
# status: created | created,modified | deleted
# l3_dom: Name of the L3 Domain to be hooked to the AEP
def l3_aep(self, **kwargs):
required_args = {'name': '',
'status': '',
'l3_dom': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "l3_aep.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/attentp-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the AEP
# status: created | created,modified | deleted
# dom_name: Name of the L3 Domain to be hooked to the AEP
def phys_aep(self, **kwargs):
required_args = {'name': '',
'status': '',
'dom_name': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "phys_aep.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/attentp-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the vPC
# id: vPC ID as an integer
# status: created | created,modified | deleted
# sw1: Node 1 in integer (i.e. 101)
# sw2: Node 2 in integer (i.e. 102)
def vpc(self, **kwargs):
required_args = {'name': '',
'id': '',
'status': '',
'sw1': '',
'sw2': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['id']):
raise InvalidArg('ID must be an integer')
else:
templateVars['id'] = int(templateVars['id'])
if not int(templateVars['sw1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw1'] = int(templateVars['sw1'])
if not int(templateVars['sw2']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw2'] = int(templateVars['sw2'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "vpc.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/protpol/expgep-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# This method creates a switch profile for a pair of switches (vPC)
# name: Name of the Switch Profile
# swSelName: name of the switch selector profile
# status: created | created,modified | deleted
# sw1: Node 1 in integer (i.e. 101)
# sw2: Node 2 in integer (i.e. 102)
# Two node bulks are created, since theoretically a vpc could exist
# between any couple of leaf switches. In some DC environment, vpc
# are built only toward "contiguous" switches (101-102, 105-106, ...),
# to have a more clean and standardized approach. In this case only,
# just one single bulk could have been created, with different 'from'
# and 'to' values.
def swPro_swSel_vpc(self, **kwargs):
required_args = {'name': '',
'swSelName': '',
'status': '',
'sw1': '',
'sw2': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['sw1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw1'] = int(templateVars['sw1'])
if not int(templateVars['sw2']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw2'] = int(templateVars['sw2'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "swPro_swSel_vpc.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/nprof-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# This method creates a switch profile for a signle switch
# name: Name of the Switch Profile
# name: Name of the Single Switch Selector
# status: created | created,modified | deleted
# sw1: Node 1 in integer (i.e. 101)
def swPro_swSel_single(self, **kwargs):
required_args = {'name': '',
'swSelName': '',
'status': '',
'sw1': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['sw1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw1'] = int(templateVars['sw1'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "swPro_swSel_single.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/nprof-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Interface Policy Group
# status: created | created,modified | deleted
# lag_type: node | link
# Note: Node = vPC, Link = PC
# lldp: Name of LLDP Policy
# cdp: Name of CDP Policy
# aep: Name of AEP
# mcp: Name of MCP Policy
# lag: Name of Port-Channel Policy
# link: Name of Link Policy
def int_pol_grp_vpc(self, **kwargs):
required_args = {'name': '',
'status': '',
'lag_type': '',
'lldp': '',
'cdp': '',
'aep': '',
'mcp': '',
'lag': '',
'link': ''}
optional_args = {'ppv': '',
'storm': ''}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "int_pol_grp_vpc.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/funcprof/accbundle-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Interface Policy Group
# status: created | created,modified | deleted
# lldp: Name of LLDP Policy
# cdp: Name of CDP Policy
# aep: Name of AEP
# mcp: Name of MCP Policy
# link: Name of Link Policy
def int_pol_grp_access(self, **kwargs):
required_args = {'name': '',
'status': '',
'lldp': '',
'cdp': '',
'aep': '',
'mcp': '',
'link': ''}
optional_args = {'ppv': '',
'storm': ''}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "int_pol_grp_access.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/infra/funcprof/accportgrp-{}'
.format(templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Interface Policy Group
# status: created | created,modified | deleted
# breakout_map: 10g-4x | 25g-4x
def int_pol_grp_brkout(self, **kwargs):
required_args = {'name': '',
'status': '',
'breakout_map': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "int_pol_grp_brkout.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/infra/funcprof/brkoutportgrp-{}'
.format(templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Interface Profile
# status: created | created,modified | deleted
def int_profile(self, **kwargs):
required_args = {'name': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "int_profile.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/accportprof-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Interface Profile
# status: created | created,modified | deleted
# port_name: Name of the port selector in the Interface Profile
# port_type: accportgrp | accbundle | brkoutportgrp
# Note: accportgrp = Access Port
# Note: accbundle = vPC or Port Channel
# Note: brkoutportgrp = Breakout Ports
# pol_group: Name of the Policy Group to apply
# mod_start: Starting mod as an integer (almost always 1)
# mod_end: Ending mod as an integer (almost always 1)
# port_start: Starting port as an integer
# port_end: Ending port as an integer
def int_selector(self, **kwargs):
required_args = {'name': '',
'status': '',
'port_name': '',
'port_type': '',
'pol_group': '',
'mod_start': '1',
'mod_end': '1',
'port_start': '',
'port_end': ''}
optional_args = {'descr': ''}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['mod_start']):
raise InvalidArg('ID must be an integer')
else:
templateVars['mod_start'] = int(templateVars['mod_start'])
if not int(templateVars['mod_end']):
raise InvalidArg('ID must be an integer')
else:
templateVars['mod_end'] = int(templateVars['mod_end'])
if not int(templateVars['port_start']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port_start'] = int(templateVars['port_start'])
if not int(templateVars['port_end']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port_end'] = int(templateVars['port_end'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "int_selector.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/accportprof-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Interface Selector
# status: created | created,modified | deleted
# port_name: Name of the port selector in the Interface Profile
# pol_group: Name of the Policy Group to apply
# mod_start: Starting mod as an integer (almost always 1)
# mod_end: Ending mod as an integer (almost always 1)
# port_start: Starting port as an integer
# port_end: Ending port as an integer
# sub_start: Starting sub port as an integer
# sub_end: Ending sub port as an integer
def int_sub_selector(self, **kwargs):
required_args = {'name': '',
'status': '',
'port_name': '',
'port': '',
'sub_start': '',
'sub_end': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['port']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port'] = int(templateVars['port'])
if not int(templateVars['sub_start']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sub_start'] = int(templateVars['sub_start'])
if not int(templateVars['sub_end']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sub_end'] = int(templateVars['sub_end'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "int_sub_selector.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/accportprof-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Interface Selector
# status: created | created,modified | deleted
# port_name: Name of the port selector in the Interface Profile
# port_type: accportgrp | accbundle | brkoutportgrp
# Note: accportgrp = Access Port
# Note: accbundle = vPC or Port Channel
# Note: brkoutportgrp = Breakout Ports
# pol_group: Name of the Policy Group to apply
# mod (Optional): Mod as an integer (almost always 1)
# Port: Part as an integer
# sub_start: Starting sub port as an integer
# sub_end: Ending sub port as an integer
def int_sub_selector_individual(self, **kwargs):
required_args = {'name': '',
'status': '',
'port_name': '',
'port_type': '',
'pol_group': '',
'port': '',
'sub_start': '',
'sub_end': ''}
optional_args = {'mod': '1'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['mod']):
raise InvalidArg('ID must be an integer')
else:
templateVars['mod'] = int(templateVars['mod'])
if not int(templateVars['port']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port'] = int(templateVars['port'])
if not int(templateVars['sub_start']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sub_start'] = int(templateVars['sub_start'])
if not int(templateVars['sub_end']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sub_end'] = int(templateVars['sub_end'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "int_sub_selector_individual.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/accportprof-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Switch Profile
# status: created | created,modified | deleted
# int_profile: Name of the Interface Profile to hook to Switch Selector
def int_prof_to_sw_profile(self, **kwargs):
required_args = {'name': '',
'status': '',
'int_profile': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "int_prof_to_sw_profile.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/nprof-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Interface Selector
# fex_pol_grp: Name of the FEX Policy Group
# status: created | created,modified | deleted
def fex_profile(self, **kwargs):
required_args = {'name': '',
'fex_pol_grp': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "fex_profile.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/fexprof-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Interface Selector
# status: created | created,modified | deleted
# port_name: Name of the port selector in the Interface Profile
# port_type: accportgrp | accbundle
# Note: accportgrp = Access Port
# Note: accbundle = vPC or Port Channel
# pol_group: Name of the Policy Group to apply
# mod_start: Starting mod as an integer (almost always 1)
# mod_end: Ending mod as an integer (almost always 1)
# port_start: Starting port as an integer
# port_end: Ending port as an integer
def fex_int_profile(self, **kwargs):
required_args = {'name': '',
'status': '',
'port_name': '',
'port_type': '',
'pol_group': '',
'port_start': '',
'port_end': '',
'fex_id': ''}
optional_args = {'mod_start': '1',
'mod_end': '1'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['mod_start']):
raise InvalidArg('ID must be an integer')
else:
templateVars['mod_start'] = int(templateVars['mod_start'])
if not int(templateVars['mod_end']):
raise InvalidArg('ID must be an integer')
else:
templateVars['mod_end'] = int(templateVars['mod_end'])
if not int(templateVars['port_start']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port_start'] = int(templateVars['port_start'])
if not int(templateVars['port_end']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port_end'] = int(templateVars['port_end'])
if not int(templateVars['fex_id']):
raise InvalidArg('ID must be an integer')
else:
templateVars['fex_id'] = int(templateVars['fex_id'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "fex_int_profile.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/infra/fexprof-{}/hports-{}-typ-range'
.format(templateVars['name'], templateVars['port_name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the Interface Selector
# status: created | created,modified | deleted
# port_name: Name of the port selector in the Interface Profile
# mod_start: Starting mod as an integer (almost always 1)
# mod_end: Ending mod as an integer (almost always 1)
# port_start: Starting port as an integer
# port_end: Ending port as an integer
# fex_id: Integer ID of the FEX
# fex_pol_grp: Name of FEX Policy Group
# fex_prof: Name of the FEX Profile
def fex_leaf_profile(self, **kwargs):
required_args = {'name': '',
'status': '',
'port_name': '',
'port_start': '',
'port_end': '',
'fex_id': '',
'fex_prof': '',
'fex_pol_grp': ''}
optional_args = {'mod_start': '1',
'mod_end': '1'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['mod_start']):
raise InvalidArg('ID must be an integer')
else:
templateVars['mod_start'] = int(templateVars['mod_start'])
if not int(templateVars['mod_end']):
raise InvalidArg('ID must be an integer')
else:
templateVars['mod_end'] = int(templateVars['mod_end'])
if not int(templateVars['port_start']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port_start'] = int(templateVars['port_start'])
if not int(templateVars['port_end']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port_end'] = int(templateVars['port_end'])
if not int(templateVars['fex_id']):
raise InvalidArg('ID must be an integer')
else:
templateVars['fex_id'] = int(templateVars['fex_id'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "fex_leaf_profile.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/accportprof-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Class must be instantiated with APIC IP address and cookies
class FabTnPol(object):
def __init__(self, apic, cookies):
self.apic = apic
self.cookies = cookies
self.templateLoader = jinja2.FileSystemLoader(
searchpath=(json_path + 'FabTnPol/'))
self.templateEnv = jinja2.Environment(loader=self.templateLoader)
# Method must be called with the following kwargs.
# name: The name of the Tenant
# status: created | created,modified | deleted
def tenant(self, **kwargs):
required_args = {'name': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "tenant.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/tn-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the VRF
# enforce: enforced | unenforced
# status: created | created,modified | deleted
def vrf(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'enforce': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "vrf.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ctx-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the VRF
# contract: Name of the Contract
# status: created | created,modified | deleted
def vz_any_provide(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'contract': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "vz_any_provide.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ctx-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the VRF
# contract: Name of the Contract
# status: created | created,modified | deleted
def vz_any_consume(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'contract': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "vz_any_consume.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ctx-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the VRF
# prefgrp: disabled | enabled
def prefgrp(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'prefgrp': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
template_file = "prefgrp.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ctx-{}/any'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the BD
# arp: yes | no
# mdest: bd-flood | drop | encap-flood
# mcast: flood | opt-flood
# unicast: yes | no
# unk_unicast: proxy | flood
# vrf: Name of associated VRF -- moving to OPTIONAL to not break older,
# versions, but has no functionality at this point
# status: created | created,modified | deleted
# multicast (Optional): yes | no -- multicast routing tick box
def bd(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'arp': '',
'mdest': '',
'mcast': '',
'unicast': '',
'unk_unicast': '',
'status': ''}
optional_args = {'limitlearn': 'yes',
'multicast': 'no',
'vrf': '',
'descr': ''}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "bd.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/BD-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the BD
# vrf: Name of associated VRF
# status: created | created,modified | deleted
def bd_vrf(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'vrf': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "bd_vrf.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/BD-{}/rsctx'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the BD
# subnet: Subnet in CIDR: ex: 1.1.1.1/24
# preferred: yes | no
# scope: public | private | shared | public,shared | private,shared
# status: created | created,modified | deleted
def bd_subnet(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'subnet': '',
'scope': '',
'preferred': '',
'status': ''}
optional_args = {'descr': ''}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "bd_subnet.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/BD-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the BD
# l3_out: Name of the associated L3 Out
# status: created | created,modified | deleted
def bd_l3_out(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'l3_out': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "bd_l3_out.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/BD-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the Filter
# dst_start: unspecified | port number as an integer
# dst_end: unspecified | port number as an integer
# src_start: unspecified | port number as an integer
# src_end: unspecified | port number as an integer
# ethertype: commonly IP or unspecified
# protocol: if IP commonly tcp | udp | unspecified
# Note: ACI is case sensitive, use all lower case!
# status: created | created,modified | deleted
def filter(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'dst_start': '',
'dst_end': '',
'src_start': '',
'src_end': '',
'ethertype': '',
'protocol': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not (templateVars['dst_start'] == 'unspecified'):
try:
templateVars['dst_start'] = int(templateVars['dst_start'])
except Exception as e:
print(e)
raise InvalidArg("Filter port must be 'unspecified' or an integer")
if not (templateVars['dst_end'] == 'unspecified'):
try:
templateVars['dst_end'] = int(templateVars['dst_end'])
except Exception as e:
print(e)
raise InvalidArg("Filter port must be 'unspecified' or an integer")
if not (templateVars['src_start'] == 'unspecified'):
try:
templateVars['src_start'] = int(templateVars['src_start'])
except Exception as e:
print(e)
raise InvalidArg("Filter port must be 'unspecified' or an integer")
if not (templateVars['src_end'] == 'unspecified'):
try:
templateVars['src_end'] = int(templateVars['src_end'])
except Exception as e:
print(e)
raise InvalidArg("Filter port must be 'unspecified' or an integer")
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "filter.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/flt-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the Contract
# scope: context | global | tenant | application-profile
# subject: Name of the Subject
# filter: Name of the Filter being referenced
# reverse_filter: yes | no
# status: created | created,modified | deleted
def contract(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'scope': '',
'subject': '',
'filter': '',
'reverse_filter': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "contract.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/brc-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# name: Name of the Application Profile
# status: created | created,modified | deleted
def app_profile(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "app_profile.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ap-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# ap_name: Name of parent Application Profile
# name: Name of the EPG
# bd: Name of associated BD
# status: created | created,modified | deleted
def epg(self, **kwargs):
required_args = {'tn_name': '',
'ap_name': '',
'name': '',
'bd': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "epg.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ap-{}/epg-{}'
.format(templateVars['tn_name'], templateVars['ap_name'],
templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# ap_name: Name of parent Application Profile
# name: Name of the EPG
# prfgrp: include | exclude
def epg_prfgrp(self, **kwargs):
required_args = {'tn_name': '',
'ap_name': '',
'name': '',
'prfgrp': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
template_file = "epg_prfgrp.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ap-{}/epg-{}'
.format(templateVars['tn_name'], templateVars['ap_name'],
templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# ap_name: Name of parent Application Profile
# epg_name: Name of the EPG
# phys_dom: Name of the Physical Domain
# deploy: lazy | immediate
# resolve: lazy | immediate | on-demand
# status: created | created,modified | deleted
def epg_phys_dom(self, **kwargs):
required_args = {'tn_name': '',
'ap_name': '',
'epg_name': '',
'phys_dom': '',
'deploy': '',
'resolve': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "epg_phys_dom.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ap-{}/epg-{}'
.format(templateVars['tn_name'], templateVars['ap_name'],
templateVars['epg_name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# ap_name: Name of parent Application Profile
# epg_name: Name of the EPG
# vmm_dom: Name of the VMM Domain
# deploy: lazy | immediate
# resolve: lazy | immediate | pre-provision
# status: created | created,modified | deleted
def epg_vmm_dom(self, **kwargs):
required_args = {'tn_name': '',
'ap_name': '',
'epg_name': '',
'vmm_dom': '',
'deploy': '',
'resolve': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "epg_vmm_dom.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ap-{}/epg-{}'
.format(templateVars['tn_name'], templateVars['ap_name'],
templateVars['epg_name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# ap_name: Name of parent Application Profile
# epg_name: Name of the EPG
# contract: Name of the Contract
# status: created | created,modified | deleted
def provide_contract(self, **kwargs):
required_args = {'tn_name': '',
'ap_name': '',
'epg_name': '',
'contract': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "provide_contract.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ap-{}/epg-{}/rsprov-{}'
.format(templateVars['tn_name'], templateVars['ap_name'],
templateVars['epg_name'], templateVars['contract']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# ap_name: Name of parent Application Profile
# epg_name: Name of the EPG
# contract: Name of the Contract
# status: created | created,modified | deleted
def consume_contract(self, **kwargs):
required_args = {'tn_name': '',
'ap_name': '',
'epg_name': '',
'contract': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "consume_contract.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ap-{}/epg-{}/rscons-{}'
.format(templateVars['tn_name'], templateVars['ap_name'],
templateVars['epg_name'], templateVars['contract']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# ap_name: Name of parent Application Profile
# epg_name: Name of the EPG
# pod (optional): Integer ID of the pod
# sw1: Switch 1 of the vPC (node ID) as an integer
# sw2: Switch 2 of the vPC (node ID) as an integer
# vpc: Name of the vPC
# encap: Encapsulation VLAN ID as an integer
# deploy: lazy | immediate
# mode; (optional): regular (trunk) | native (dot1p)
# status: created | created,modified | deleted
def static_path_vpc(self, **kwargs):
required_args = {'tn_name': '',
'ap_name': '',
'epg_name': '',
'sw1': '',
'sw2': '',
'vpc': '',
'encap': '',
'deploy': '',
'status': ''}
optional_args = {'pod': '1',
'mode': 'regular'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['sw1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw1'] = int(templateVars['sw1'])
if not int(templateVars['sw2']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw2'] = int(templateVars['sw2'])
if not int(templateVars['encap']):
raise InvalidArg('ID must be an integer')
else:
templateVars['encap'] = int(templateVars['encap'])
if not int(templateVars['pod']):
raise InvalidArg('Pod ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "static_path_vpc.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ap-{}/epg-{}'
.format(templateVars['tn_name'], templateVars['ap_name'],
templateVars['epg_name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# ap_name: Name of parent Application Profile
# epg_name: Name of the EPG
# pod (optional): Integer ID of the pod
# sw1: Switch 1 of the vPC (node ID) as an integer
# port_channel: Name of the Port Channel
# encap: Encapsulation VLAN ID as an integer
# deploy: lazy | immediate
# mode; (optional): regular (trunk) | native (dot1p)
# status: created | created,modified | deleted
def static_path_port_channel(self, **kwargs):
required_args = {'tn_name': '',
'ap_name': '',
'epg_name': '',
'sw1': '',
'port_channel': '',
'encap': '',
'deploy': '',
'status': ''}
optional_args = {'pod': '1',
'mode': 'regular'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['sw1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw1'] = int(templateVars['sw1'])
if not int(templateVars['encap']):
raise InvalidArg('ID must be an integer')
else:
templateVars['encap'] = int(templateVars['encap'])
if not int(templateVars['pod']):
raise InvalidArg('Pod ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "static_path_port_channel.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ap-{}/epg-{}//rspathAtt-[topology/pod-{}/paths-{}/pathep-[{}]]'
.format(templateVars['tn_name'], templateVars['ap_name'],
templateVars['epg_name'], templateVars['pod'],
templateVars['sw1'], templateVars['port_channel']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# ap_name: Name of parent Application Profile
# epg_name: Name of the EPG
# pod (optional): Integer ID of the pod
# sw1: Switch 1 of the vPC (node ID) as an integer
# port: Port ID as an integer (i.e. 1 or 2)
# encap: Encapsulation VLAN ID as an integer
# deploy: lazy | immediate
# status: created | created,modified | deleted
def static_path_access(self, **kwargs):
required_args = {'tn_name': '',
'ap_name': '',
'epg_name': '',
'sw1': '',
'port': '',
'encap': '',
'deploy': '',
'status': ''}
optional_args = {'pod': '1'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['sw1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw1'] = int(templateVars['sw1'])
if not int(templateVars['port']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port'] = int(templateVars['port'])
if not int(templateVars['encap']):
raise InvalidArg('ID must be an integer')
else:
templateVars['encap'] = int(templateVars['encap'])
if not int(templateVars['pod']):
raise InvalidArg('Pod ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "static_path_access.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ap-{}/epg-{}'
.format(templateVars['tn_name'], templateVars['ap_name'],
templateVars['epg_name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# ap_name: Name of parent Application Profile
# epg_name: Name of the EPG
# pod (optional): Integer ID of the pod
# sw1: Switch 1 of the vPC (node ID) as an integer
# port: Port ID as an integer (i.e. 1 or 2)
# encap: Encapsulation VLAN ID as an integer
# deploy: lazy | immediate
# mode: native | regular (dot1p, trunk)
# status: created | created,modified | deleted
def static_path(self, **kwargs):
required_args = {'tn_name': '',
'ap_name': '',
'epg_name': '',
'sw1': '',
'port': '',
'encap': '',
'deploy': '',
'mode': '',
'status': ''}
optional_args = {'pod': '1'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['sw1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw1'] = int(templateVars['sw1'])
if not int(templateVars['port']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port'] = int(templateVars['port'])
if not int(templateVars['encap']):
raise InvalidArg('ID must be an integer')
else:
templateVars['encap'] = int(templateVars['encap'])
if not int(templateVars['pod']):
raise InvalidArg('Pod ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "static_path.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ap-{}/epg-{}'
.format(templateVars['tn_name'], templateVars['ap_name'],
templateVars['epg_name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# NOTE: At this time this only supports external DHCP servers (ext to fab)
# tn_name: The name of the Tenant
# relay_name: Name of the DHCP Label/Provider
# dhcp_ip: IP of the DHCP server
# l3_tn: Name of the Tenant containing the L3 out used to reach DHCP server
# l3_out: Name of the L3 out used to reach DHCP server
# l3_network: Name of the L3 out Network used to reach DHCP server
# status: created | created,modified | deleted
def dhcp_relay(self, **kwargs):
required_args = {'tn_name': '',
'relay_name': '',
'dhcp_ip': '',
'l3_tn': '',
'l3_network': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not ipaddress.ip_address(templateVars['dhcp_ip']):
raise InvalidArg('Address must be a valid IPv4 address')
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "dhcp_relay.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/relayp-{}'
.format(templateVars['tn_name'], templateVars['relay_name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# relay_name: Name of the DHCP Label/Provider
# dhcp_ip: IP of the DHCP server
# dhcp_tn_name: Name of the Tenant containing the DHCP server
# dhcp_ap_name: Name of the AP containing the DHCP server
# dhcp_epg_name: Name of the EPG containing the DHCP server
# status: created | created,modified | deleted
def dhcp_relay_tn(self, **kwargs):
required_args = {'tn_name': '',
'relay_name': '',
'dhcp_ip': '',
'dhcp_tn_name': '',
'dhcp_ap_name': '',
'dhcp_epg_name': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not ipaddress.ip_address(templateVars['dhcp_ip']):
raise InvalidArg('Address must be a valid IPv4 address')
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "dhcp_relay_tn.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/relayp-{}'
.format(templateVars['tn_name'], templateVars['relay_name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# bd_name: Name of BD to deploy DHCP label to
# relay_name: Name of the DHCP Label/Provider
# status: created | created,modified | deleted
# scope (optional): infra | tenant, defaults to tenant
def dhcp_label(self, **kwargs):
required_args = {'tn_name': '',
'bd_name': '',
'relay_name': '',
'status': '',
'scope': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "dhcp_label.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/BD-{}'
.format(templateVars['tn_name'], templateVars['bd_name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: The name of the Tenant
# ap_name: Name of parent Application Profile
# epg_name: Name of the EPG
# pod (optional): Integer ID of the pod
# fex_id: Integer ID of the FEX
# sw1: Switch 1 of the vPC (node ID) as an integer
# port: Port ID as an integer (i.e. 1 or 2)
# encap: Encapsulation VLAN ID as an integer
# deploy: lazy | immediate
# mdoe: native | regular (dot1p / trunk)
# status: created | created,modified | deleted
def fex_static_path(self, **kwargs):
required_args = {'tn_name': '',
'ap_name': '',
'epg_name': '',
'sw1': '',
'fex_id': '',
'port': '',
'encap': '',
'deploy': '',
'mode': '',
'status': ''}
optional_args = {'pod': '1'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['sw1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw1'] = int(templateVars['sw1'])
if not int(templateVars['port']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port'] = int(templateVars['port'])
if not int(templateVars['encap']):
raise InvalidArg('ID must be an integer')
else:
templateVars['encap'] = int(templateVars['encap'])
if not int(templateVars['pod']):
raise InvalidArg('Pod ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if not int(templateVars['fex_id']):
raise InvalidArg('FEX ID must be an integer')
else:
templateVars['fex_id'] = int(templateVars['fex_id'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "fex_static_path.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ap-{}/epg-{}'
.format(templateVars['tn_name'], templateVars['ap_name'],
templateVars['epg_name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Class must be instantiated with APIC IP address and cookies
class FabL3Pol(object):
def __init__(self, apic, cookies):
self.apic = apic
self.cookies = cookies
self.templateLoader = jinja2.FileSystemLoader(
searchpath=(json_path + 'FabL3Pol/'))
self.templateEnv = jinja2.Environment(loader=self.templateLoader)
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# name: The name of the L3-Out
# domain: Name of the External L3 Domain
# vrf: Name of associated VRF
# status: created | created,modified | deleted
def l3_out(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'domain': '',
'vrf': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "l3_out.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# name: The name of the L3-Out
# area: backbone | area id as an integer | area id as dotted decimal
# area_type: regular | nssa
# status: created | created,modified | deleted
def ospf(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'area': '',
'area_type': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "ospf.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# name: The name of the L3-Out
# status: created | created,modified | deleted (of the BGP process)
def bgp(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "bgp.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# name: The name of the L3-Out
# node_name: Name of the Node Profile
# pod: ID of the pod
# sw1: Node ID of first switch as an integer
# sw2: Node ID of second switch as an integer
# sw1_loop: IP of node1 loopback as a dotted decimal (no mask)
# sw2: Node ID of first switch as an integer
# loopback: yes | no
# status: created | created,modified | deleted
def node_profile(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'node_name': '',
'pod': '',
'sw1': '',
'sw2': '',
'sw1_loop': '',
'sw2_loop': '',
'loopback': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['pod']):
raise InvalidArg('ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if not int(templateVars['sw1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw1'] = int(templateVars['sw1'])
if not int(templateVars['sw2']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw2'] = int(templateVars['sw2'])
if not ipaddress.ip_address(templateVars['sw1_loop']):
raise InvalidArg('Address must be a valid IPv4 address')
if not ipaddress.ip_address(templateVars['sw2_loop']):
raise InvalidArg('Address must be a valid IPv4 address')
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "node_profile.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# name: The name of the L3-Out
# pod: ID of the pod
# node_name: Name of the Node Profile
# sw: Node ID of the switch as an integer
# prefix: Prefix in CIDR format (i.e. 0.0.0.0/0)
# next_hop: IP of the next hop in dotted decimal format (i.e. 1.1.1.1)
# status: created | created,modified | deleted
def static_routes(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'pod': '',
'node_name': '',
'sw': '',
'prefix': '',
'next_hop': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['pod']):
raise InvalidArg('ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if not int(templateVars['sw']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw'] = int(templateVars['sw'])
if not ipaddress.ip_address(templateVars['next_hop']):
raise InvalidArg('Address must be a valid IPv4 address')
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "static_routes.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# name: The name of the L3-Out
# pod: ID of the pod
# node_name: Name of the Node Profile
# int_profile: Name of the Interface Profile
# sw: Node ID of the switch as an integer
# port: Port number as an integer
# ip: IP of the interface in dotted decimal format (i.e. 1.1.1.1)
# int_profile_status created | created,modified | deleted of the Int Pro
# status: created | created,modified | deleted of the Interface itself
def routed_ints(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'pod': '',
'node_name': '',
'int_profile': '',
'sw': '',
'port': '',
'ip': '',
'int_profile': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['pod']):
raise InvalidArg('ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if not int(templateVars['sw']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw'] = int(templateVars['sw'])
if not int(templateVars['port']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port'] = int(templateVars['port'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "routed_ints.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}/lnodep-{}/lifp-{}'
.format(templateVars['tn_name'], templateVars['name'],
templateVars['node_name'], templateVars['int_profile']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# name: The name of the L3-Out
# pod: ID of the pod
# node_name: Name of the Node Profile
# int_profile: Name of the Interface Profile
# sw: Node ID of the switch as an integer
# port: Port number as an integer
# vlan: VLAN ID as an integer
# ip: IP of the interface in dotted decimal format (i.e. 1.1.1.1)
# int_profile_status created | created,modified | deleted of Int Profile
# status: created | created,modified | deleted of the Interface itself
def routed_sub_ints(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'pod': '',
'node_name': '',
'int_profile': '',
'sw': '',
'port': '',
'vlan': '',
'ip': '',
'int_profile': '',
'int_profile_status': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['pod']):
raise InvalidArg('ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if not int(templateVars['sw']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw'] = int(templateVars['sw'])
if not int(templateVars['port']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port'] = int(templateVars['port'])
if not int(templateVars['vlan']):
raise InvalidArg('ID must be an integer')
else:
templateVars['vlan'] = int(templateVars['vlan'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "routed_sub_ints.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}/lnodep-{}/lifp-{}'
.format(templateVars['tn_name'], templateVars['name'],
templateVars['node_name'], templateVars['int_profile']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# name: The name of the L3-Out
# pod: ID of the pod
# node_name: Name of the Node Profile
# int_profile: Name of the Interface Profile
# sw1: Switch-1 ID of the switch as an integer
# sw2: Switch-2 ID of the switch as an integer
# sw1_ip: IP of Switch-1 in dotted-decimal
# sw2_ip: IP of Switch-2 in dotted-decimal
# vlan: VLAN ID as an integer
# vpc: Name of associated vPC
# int_profile_status: created | created,modified | deleted of the Int Pro
# status: created | created,modified | deleted of the Interface itself
def svi(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'pod': '',
'node_name': '',
'int_profile': '',
'sw1': '',
'sw2': '',
'sw1_ip': '',
'sw2_ip': '',
'vlan': '',
'vpc': '',
'int_profile_status': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['pod']):
raise InvalidArg('ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if not int(templateVars['sw1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw1'] = int(templateVars['sw1'])
if not int(templateVars['sw2']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw2'] = int(templateVars['sw2'])
if not int(templateVars['vlan']):
raise InvalidArg('ID must be an integer')
else:
templateVars['vlan'] = int(templateVars['vlan'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "svi.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}/lnodep-{}/lifp-{}'
.format(templateVars['tn_name'], templateVars['name'],
templateVars['node_name'], templateVars['int_profile']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# name: The name of the L3-Out
# pod: ID of the pod
# node_name: Name of the Node Profile
# int_profile: Name of the Interface Profile
# sw1: Switch-1 ID of the switch as an integer
# ip: IP of Switch-1 in dotted-decimal
# vip: IP of the VIP (hsrp-like IP)
# vlan: VLAN ID as an integer
# pc: Name of associated PC
# int_profile_status: created | created,modified | deleted of the Int Pro
# status: created | created,modified | deleted
def svi_pc(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'pod': '',
'node_name': '',
'int_profile': '',
'sw1': '',
'ip': '',
'vip': '',
'vlan': '',
'pc': '',
'int_profile_status': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['pod']):
raise InvalidArg('ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if not int(templateVars['sw1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw1'] = int(templateVars['sw1'])
if not int(templateVars['vlan']):
raise InvalidArg('ID must be an integer')
else:
templateVars['vlan'] = int(templateVars['vlan'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "svi_pc.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}/lnodep-{}/lifp-{}'
.format(templateVars['tn_name'], templateVars['name'],
templateVars['node_name'], templateVars['int_profile']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# name: The name of the L3-Out
# pod: ID of the pod
# node_name: Name of the Node Profile
# int_profile: Name of the Interface Profile
# sw1: Switch-1 ID of the switch as an integer
# sw2: Switch-2 ID of the switch as an integer
# vpc: Name of associated vPC
# status: created | created,modified | deleted of the VIP itself
def svi_vip(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'pod': '',
'node_name': '',
'int_profile': '',
'sw1': '',
'sw2': '',
'vpc': '',
'vip': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['pod']):
raise InvalidArg('ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if not int(templateVars['sw1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw1'] = int(templateVars['sw1'])
if not int(templateVars['sw2']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw2'] = int(templateVars['sw2'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "svi_vip.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}/lnodep-{}/lifp-{}/rspathL3OutAtt-[topology'
'/pod-{}/protpaths-{}-{}/pathep-[{}]]'
.format(templateVars['tn_name'], templateVars['name'],
templateVars['node_name'], templateVars['int_profile'],
templateVars['pod'], templateVars['sw1'],
templateVars['sw2'], templateVars['vpc']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# name: The name of the L3-Out
# epg_name: Name of the Prefix Based EPG
# subnet: Subent in CIDR format
# status: created | created,modified | deleted of the EPG itself
# subnet_status created | created,modified | deleted of the subnet
def network_epg(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'epg_name': '',
'subnet': '',
'status': '',
'subnet_status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
if templateVars['subnet_status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "network_epg.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}/instP-{}'
.format(templateVars['tn_name'], templateVars['name'],
templateVars['epg_name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# pol_name: The name of the Interface Policy
# hello: hello interval in seconds as an integer
# dead: dead interval in seconds as an integer
# net_type: p2p | bcast | unspecified
# status: created | created,modified | deleted
def ospf_int_pol(self, **kwargs):
required_args = {'tn_name': '',
'pol_name': '',
'hello': '',
'dead': '',
'net_type': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['hello']):
raise InvalidArg('Value must be an integer')
else:
templateVars['hello'] = int(templateVars['hello'])
if not int(templateVars['dead']):
raise InvalidArg('Value must be an integer')
else:
templateVars['dead'] = int(templateVars['dead'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "ospf_int_pol.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ospfIfPol-{}'
.format(templateVars['tn_name'], templateVars['pol_name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# name: The name of the L3 Out
# node_name: Name of the Node Profile
# int_profile: Name of the Interface Profile
# pol_type: ospf | eigrp | bgp
# pol_name: Name of the Interface Policy to be applied
# status: created | created,modified | deleted
def deploy_int_pol(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'node_name': '',
'int_profile': '',
'pol_type': '',
'pol_name': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "deploy_int_pol.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}/lnodep-{}/lifp-{}'
.format(templateVars['tn_name'], templateVars['name'],
templateVars['node_name'],
templateVars['int_profile']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# name: The name of the L3 Out
# node_name: Name of the Node Profile
# peer: BGP Peer address in dotted decimal
# local_asn: Local BGP ASN as an integer
# remote_asn: Remote BGP ASN as an integer
# status: created | created,modified | deleted
def bgp_peer_loopback(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'node_name': '',
'peer': '',
'local_asn': '',
'remote_asn': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not ipaddress.ip_address(templateVars['peer']):
raise InvalidArg('Address must be a valid IPv4 address')
if not (int(templateVars['local_asn']) in range(1, 65535)):
raise InvalidArg('Invalid BGP ASN')
else:
templateVars['local_asn'] = int(templateVars['local_asn'])
if not (int(templateVars['remote_asn']) in range(1, 65535)):
raise InvalidArg('Invalid BGP ASN')
else:
templateVars['remote_asn'] = int(templateVars['remote_asn'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "bgp_peer_loopback.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# name: The name of the L3 Out
# node_name: Name of the Node Profile
# int_profile: Name of the Interface Profile
# sw: Integer ID of switch
# port: Integer ID of port
# peer: BGP Peer address in dotted decimal
# local_asn: Local BGP ASN as an integer
# remote_asn: Remote BGP ASN as an integer
# pod: (Optional) Integer of Pod ID
# status: created | created,modified | deleted
def bgp_peer_interface(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'node_name': '',
'int_profile': '',
'sw1': '',
'port': '',
'peer': '',
'local_asn': '',
'remote_asn': '',
'status': ''}
optional_args = {'pod': '1'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['pod']):
raise InvalidArg('ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if not int(templateVars['sw1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw1'] = int(templateVars['sw1'])
if not int(templateVars['port']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port'] = int(templateVars['port'])
if not ipaddress.ip_address(templateVars['peer']):
raise InvalidArg('Address must be a valid IPv4 address')
if not (int(templateVars['local_asn']) in range(1, 65535)):
raise InvalidArg('Invalid BGP ASN')
else:
templateVars['local_asn'] = int(templateVars['local_asn'])
if not (int(templateVars['remote_asn']) in range(1, 65535)):
raise InvalidArg('Invalid BGP ASN')
else:
templateVars['remote_asn'] = int(templateVars['remote_asn'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "bgp_peer_loopback.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}'
.format(templateVars['tn_name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant
# name: The name of the L3 Out
# pod: ID of the pod
# node_name: Name of the Node Profile
# int_profile: Name of the Interface Profile
# sw1: Node ID of the first switch as an integer
# sw1: Node ID of the second switch as an integer
# vpc: Name of the associated vPC
# peer: BGP Peer address in dotted decimal
# local_asn: Local BGP ASN as an integer
# remote_asn: Remote BGP ASN as an integer
# status: created | created,modified | deleted
def bgp_peer_svi(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'pod': '',
'node_name': '',
'int_profile': '',
'sw1': '',
'sw2': '',
'vpc': '',
'peer': '',
'local_asn': '',
'remote_asn': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['pod']):
raise InvalidArg('ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if not int(templateVars['sw1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw1'] = int(templateVars['sw1'])
if not int(templateVars['sw2']):
raise InvalidArg('ID must be an integer')
else:
templateVars['sw2'] = int(templateVars['sw2'])
if not ipaddress.ip_address(templateVars['peer']):
raise InvalidArg('Address must be a valid IPv4 address')
if not (int(templateVars['local_asn']) in range(1, 65535)):
raise InvalidArg('Invalid BGP ASN')
else:
templateVars['local_asn'] = int(templateVars['local_asn'])
if not (int(templateVars['remote_asn']) in range(1, 65535)):
raise InvalidArg('Invalid BGP ASN')
else:
templateVars['remote_asn'] = int(templateVars['remote_asn'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "bgp_peer_loopback.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}/lnodep-{}'
.format(templateVars['tn_name'], templateVars['name'],
templateVars['node_name'], templateVars['int_profile']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# tn_name: Name of the Tenant
# name: The name of the L3 Out
# epg_name: Name of the L3 Out EPG (Network object)
# contract: Name of the contract to provide
# status: created | created,modified | deleted
def l3_provide_contract(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'epg_name': '',
'contract': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "l3_provide_contract.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}/instP-{}'
.format(templateVars['tn_name'], templateVars['name'],
templateVars['epg_name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# tn_name: Name of the Tenant
# name: The name of the L3 Out
# epg_name: Name of the L3 Out EPG (Network object)
# contract: Name of the contract to consume
# status: created | created,modified | deleted
def l3_consume_contract(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'epg_name': '',
'contract': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "l3_consume_contract.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}/instP-{}'
.format(templateVars['tn_name'], templateVars['name'],
templateVars['epg_name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# tn_name: Name of the Tenant
# name: The name of the L3 Out
# vrf: Name of the VRF
# status: created | created,modified | deleted
def vrf_enable_pim(self, **kwargs):
required_args = {'tn_name': '',
'vrf': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "vrf_enable_pim.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ctx-{}'
.format(templateVars['tn_name'], templateVars['vrf']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# tn_name: Name of the Tenant
# name: The name of the L3 Out
# vrf: Name of the VRF
# rp: IP of RP
# status: created | created,modified | deleted
def vrf_pim_static_rp(self, **kwargs):
required_args = {'tn_name': '',
'vrf': '',
'rp': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "vrf_pim_static_rp.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/ctx-{}/pimctxp/staticrp/staticrpent-[{}]'
.format(templateVars['tn_name'], templateVars['vrf'],
templateVars['rp']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# tn_name: Name of the Tenant
# l3_out: Name of the L3 Out
# status: created | created,modified | deleted
def l3_out_pim(self, **kwargs):
required_args = {'tn_name': '',
'l3_out': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "l3_out_pim.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/out-{}/'
.format(templateVars['tn_name'], templateVars['l3_out']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Class must be instantiated with APIC IP address and cookies
class TshootPol(object):
def __init__(self, apic, cookies):
self.apic = apic
self.cookies = cookies
self.templateLoader = jinja2.FileSystemLoader(
searchpath=(json_path + 'TshootPol/'))
self.templateEnv = jinja2.Environment(loader=self.templateLoader)
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant (for source of SPAN)
# name: Name of the SPAN Source (automatically append -Group where appropriate)
# admin: enabled | disabled
# direction: both | in | out
# ap: Name of Application Profile (for source of SPAN)
# epg: Name of EPG (for source of SPAN)
# dest: Name of SPAN Destination, -Group is automatically appended
# status: created | created,modified | deleted
def span_src(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'admin': '',
'direction': '',
'ap': '',
'epg': '',
'dest': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "span_src.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/srcgrp-{}-Group'
.format(templateVars['name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# tn_name: Name of the Tenant (where you are building the SPAN)
# name: The name of the SPAN Destination Group
# tn_dest: Name of the Tenant where the SPAN destination resides
# ap: Name of Application Profile (for destination of SPAN)
# epg: Name of EPG (for destination of SPAN)
# dest_ip: IP address of device terminating SPAN
# src_ip: IP address of ACI ERSPAN source
# status: created | created,modified | deleted
def span_dst(self, **kwargs):
required_args = {'tn_name': '',
'name': '',
'tn_dest': '',
'ap': '',
'epg': '',
'dest_ip': '',
'src_ip': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not ipaddress.ip_address(templateVars['dest_ip']):
raise InvalidArg('Address must be a valid IPv4 address')
if not ipaddress.ip_address(templateVars['src_ip']):
raise InvalidArg('Address must be a valid IPv4 address')
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "span_dst.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-{}/destgrp-{}-Group'
.format(templateVars['name'], templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Class must be instantiated with APIC IP address and cookies
class Query(object):
def __init__(self, apic, cookies):
self.apic = apic
self.cookies = cookies
# Method must be called with the following kwargs.
# dn: DN of object you would like to query
# Returns status code and json payload of query
def query_dn(self, dn, query_filter=''):
s = requests.Session()
try:
r = s.get('https://{}/api/node/mo/{}.json{}'.format(self.apic, dn,
query_filter), cookies=self.cookies, verify=False)
status = r.status_code
payload = json.loads(r.text)
except Exception as e:
print("Failed to query DN. Exception: {}".format(e))
status = 666
return (status, payload)
def query_class(self, query_class, query_filter=''):
s = requests.Session()
try:
r = s.get('https://{}/api/node/class/{}.json{}'.format(self.apic,
query_class, query_filter), cookies=self.cookies,
verify=False)
status = r.status_code
payload = json.loads(r.text)
except Exception as e:
print("Failed to query Class. Exception: {}".format(e))
status = 666
return (status, payload)
# Method must be called with the following kwargs.
# url: the url of the objectquery, for example /api/mo/...
# Returns status code and json payload of query
def query_url(self, url):
s = requests.Session()
try:
r = s.get('https://'+str(self.apic)+url,
cookies=self.cookies, verify=False)
status = r.status_code
payload = json.loads(r.text)
except Exception as e:
print("Failed to query Class. Exception: {}".format(e))
status = 666
return (status, payload)
# Queries the fabric to retrieve information about the ports,
# and returns them in a dictionary of dictionaries. The array is made in this way:
#
# node_data[node_id]['ports'][intf]['intSel'] = intSel (1 value)
# node_data[node_id]['ports'][intf]['intProf'] = intProf intf selector's father
# node_data[node_id]['ports'][intf]['polGrp'] = polGrp (1 value)
# node_data[node_id]['ports'][intf]['type'] = port_type 'access' or 'bundle'
# node_data[node_id]['ports'][intf]['descr']
# node_data[node_id]['swProf'][switchProf][swSel] contains all swSelectors
# node_data[node_id]['intProf'][intProf] --> father's switch profile
def query_ports (self):
node_data = {}
query = '/api/node/class/infraNodeP.json?query-target=subtree&target-subtree-class=infraNodeBlk'
[status, payload] = self.query_url(query)
if status != 200:
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['infraNodeBlk']['attributes']['dn']
# We obtain the relationship between switch profiles and switch selector profiles,
# from the switch selector profile we obtain the real node IDs. Potentially there
# could be multiple switch selectors, not just one.
#
# uni/infra/nprof-<Leaf_Prof>/leaves-<Switch_Selector>-typ-range/nodeblk-1fd76fa26065f27f
reg = re.search('nprof-(.*?)\/leaves-(.*?)-typ-range\/nodeblk', dn)
switchProf = reg.group(1)
swSel = reg.group(2)
nodeFrom = (int)(obj['infraNodeBlk']['attributes']['from_'])
nodeTo = (int)(obj['infraNodeBlk']['attributes']['to_'])
for node_id in range(nodeFrom, nodeTo+1):
if not node_id in node_data:
node_data[node_id] = {}
node_data[node_id]['swProf'] = {}
node_data[node_id]['intProf'] = {}
node_data[node_id]['ports'] = {}
if not switchProf in node_data[node_id]['swProf']:
node_data[node_id]['swProf'][switchProf] = {}
node_data[node_id]['swProf'][switchProf][swSel] = 1
query = '/api/node/class/infraNodeP.json?query-target=subtree&target-subtree-class=infraRsAccPortP'
[status, payload] = self.query_url(query)
if status != 200:
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['infraRsAccPortP']['attributes']['dn']
# here we obtain the relationship between the switch profile and the interface profile
#
# uni/infra/nprof-<Leaf_Prof>/rsaccPortP-[uni/infra/accportprof-<if_Prof>]
reg = re.search('nprof-(.*?)\/.*\[uni/infra/accportprof-(.*)\]', dn)
switchProf = reg.group(1)
intProf = reg.group(2)
for node_id in node_data:
if switchProf in node_data[node_id]['swProf']:
node_data[node_id]['intProf'][intProf] = switchProf
# From this query, you get the port ranges for all the interface selectors, the dn
# of the object contains also the interface profile to which the selectors belong to.
#
# uni/infra/accportprof-<if_Prof>/hports-<if_Selector>-typ-range/portblk-4e72096af1945b11
[status, payload] = self.query_class('infraPortBlk')
if status != 200:
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['infraPortBlk']['attributes']['dn']
module = (int)(obj['infraPortBlk']['attributes']['fromCard'])
fromPort = (int)(obj['infraPortBlk']['attributes']['fromPort'])
toPort = (int)(obj['infraPortBlk']['attributes']['toPort'])
descr = obj['infraPortBlk']['attributes']['descr']
reg = re.search('accportprof-(.*?)\/hports-(.*?)-typ-range\/portblk', dn)
intProf = reg.group(1)
intSel = reg.group(2)
# we now cycle on the nodes that have that intSelection profile, and add all the ports
for node_id in node_data:
for prof in node_data[node_id]['intProf']:
if intProf == prof:
# here all intProf should be there
for port_id in range(fromPort,toPort+1):
port = str(module)+'/'+str(port_id)
if not port in node_data[node_id]['ports']:
node_data[node_id]['ports'][port] = {}
node_data[node_id]['ports'][port]['intSel'] = intSel
node_data[node_id]['ports'][port]['descr'] = descr
node_data[node_id]['ports'][port]['intProf'] = intProf
# for every intSelector, we have the sum of the range of ports PLUS the policy group
#
# uni/infra/accportprof-<if_Prof>/hports-<if_Selector>-typ-range/rsaccBaseGrp
[status, payload] = self.query_class('infraRsAccBaseGrp')
if status != 200:
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['infraRsAccBaseGrp']['attributes']['dn']
reg = re.search('accportprof-(.*?)\/hports-(.*?)-typ-range', dn)
intProf = reg.group(1)
intSel = reg.group(2)
# uni/infra/funcprof/accbundle-<pol_Grp>
polGrp_dn = obj['infraRsAccBaseGrp']['attributes']['tDn']
reg = re.search('acc(bundle|portgrp)-(.*)', polGrp_dn)
polGrp = reg.group(2).strip()
if reg.group(1) == 'bundle':
port_type = 'bundle'
else:
port_type = 'access'
for node_id in node_data:
for intf in node_data[node_id]['ports']:
if node_data[node_id]['ports'][intf]['intSel'] == intSel:
node_data[node_id]['ports'][intf]['polGrp'] = polGrp
node_data[node_id]['ports'][intf]['type'] = port_type
# uncomment to print out all retrieved data
for node_id in sorted(node_data):
for intf in sorted(node_data[node_id]['ports']):
intProf = node_data[node_id]['ports'][intf]['intProf']
intSel = node_data[node_id]['ports'][intf]['intSel']
polGrp = node_data[node_id]['ports'][intf]['polGrp']
descr = node_data[node_id]['ports'][intf]['descr']
port_type = node_data[node_id]['ports'][intf]['type']
swProf = node_data[node_id]['intProf'][intProf]
print('Node '+str(node_id)+' interface "'+intf+'":')
print(' ---> selected by "'+intSel+'" is used by "'+intProf+'"')
print(' ---> "'+intProf+'\" is used by "'+swProf+'"')
print(' ---> "'+swProf+'" swSel sons are "'+(','.join(node_data[node_id]['swProf'][swProf]))+'"')
print(' ---> attached polGrp "'+polGrp+'" description "'+descr+'" mode "'+port_type+'"\n')
return node_data
# This function queries all the tenants information regarding vrf, bd,
# subnets, application profiles, epg and stores all the most important data
# (i.e. not all the parameters of every object) in a dictionary of dictionaries.
#
# Queries to the apic are time expensive, for this reason it is usually more
# efficient to perform less queries, retrieve more data and process it locally.
#
# apic_data[ten_name]['vrf_list'][vrf_name][bd_name]['ip'] = [], list of subnets
# apic_data[ten_name]['anp_list'][ap_name][epg] = {}
# apic_data[ten_name]['bd_list'][bd_name]['ip'] = [], list of subnets
# apic_data[ten_name]['bd_list'][bd_name]['vrf'] = vrf
#
# the third and fourth row are used to easily get the vrf to which a certain
# BD is associated, without searching on the data tree built in the first row.
def query_all_tenants(self):
apic_data = {}
# TENANTS and VRF
[status, payload] = self.query_class('fvCtx')
if (status != 200):
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['fvCtx']['attributes']['dn']
reg = re.search('\/tn-(.*?)\/ctx-(.*)', dn)
ten_name = reg.group(1)
vrf = reg.group(2).strip()
if not ten_name in apic_data:
apic_data[ten_name] = {}
apic_data[ten_name]['vrf_list'] = {}
apic_data[ten_name]['anp_list'] = {}
apic_data[ten_name]['bd_list'] = {}
apic_data[ten_name]['vrf_list'][vrf]={}
# APPLICATION PROFILES
[status, payload] = self.query_class('fvAp')
if (status != 200):
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['fvAp']['attributes']['dn']
reg = re.search('uni\/tn-(.*?)\/ap-(.*)', dn)
ten_name = reg.group(1)
app = reg.group(2).strip()
apic_data[ten_name]['anp_list'][app]={}
# BRIDGE DOMAINS, we query all bridge domains for which a vrf has been configured
[status, payload] = self.query_class('fvRsCtx')
if (status != 200):
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['fvRsCtx']['attributes']['dn']
reg = re.search('\/tn-(.*?)\/BD-(.*?)\/rsctx', dn)
ten_name = reg.group(1)
bd_name = reg.group(2)
tdn = obj['fvRsCtx']['attributes']['tDn']
vrf = re.search('uni\/tn-(.*?)\/ctx-(.*)', tdn).group(2).strip()
apic_data[ten_name]['vrf_list'][vrf][bd_name]={}
# there can be multiple ip subnets associated to a BD
apic_data[ten_name]['vrf_list'][vrf][bd_name]['ip'] = []
apic_data[ten_name]['bd_list'][bd_name]={}
apic_data[ten_name]['bd_list'][bd_name]['vrf'] = vrf
# there can be multiple ip subnets associated to a BD
apic_data[ten_name]['bd_list'][bd_name]['ip'] = []
# BRIDGE DOMAIN SUBNETS
[status, payload] = self.query_class('fvSubnet')
if (status != 200):
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['fvSubnet']['attributes']['dn']
# uni/tn-<tn_name>/BD-<bd_name>/subnet-[<subnet>]
# there are also the following objects, we skip them
# uni/tn-<tn_name>/ap-<anp_name>/epg-<epg_name>/subnet-[<subnet>]
reg = re.search('\/tn-(.*?)\/BD-(.*?)\/subnet-\[(.*)\]', dn)
if reg == None:
continue
ten_name = reg.group(1)
bd_name = reg.group(2)
ip = reg.group(3)
# here we easily retrieve the vrf associated to the bd
vrf = apic_data[ten_name]['bd_list'][bd_name]['vrf']
apic_data[ten_name]['vrf_list'][vrf][bd_name]['ip'].append(ip)
apic_data[ten_name]['bd_list'][bd_name]['ip'].append(ip)
# EPG
[status, payload] = self.query_class('fvAEPg')
if (status != 200):
return None
json_data = payload['imdata']
for obj in json_data:
dn = obj['fvAEPg']['attributes']['dn']
# uni/tn-<tn_name>/ap-<anp_name>/epg-<epg_name>
reg = re.search('\/tn-(.*?)\/ap-(.*?)\/epg-(.*)', dn)
ten_name = reg.group(1)
anp_prof = reg.group(2)
epg = reg.group(3).strip()
apic_data[ten_name]['anp_list'][anp_prof][epg]={}
'''
for ten_name in apic_data:
for vrf_name in apic_data[ten_name]['vrf_list']:
for bd in apic_data[ten_name]['vrf_list'][vrf_name]:
print ('TENANT: "{}", vrf: "{}", BD: "{}", subnets: {}'\
.format(ten_name,vrf_name,bd,', '\
.join(apic_data[ten_name]['vrf_list'][vrf_name][bd]['ip'])))
for ten_name in apic_data:
for app_name in apic_data[ten_name]['anp_list']:
for epg in apic_data[ten_name]['anp_list'][app_name]:
print ('TENANT: "{}", ANP: "{}", BD: "{}"'.format(ten_name,app_name,epg))
'''
return apic_data
# This function performs queries to the fabric and retrieves the configured
# vPC, and return an hash where the key is the policy group applied to the
# channel/vpc, and the value is its DN.
def query_vpc (self):
[status, payload] = self.query_url('/api/class/fabricProtPathEpCont.json')
if status != 200:
return None
json_data = payload['imdata']
vpc_dn = {}
for res in json_data:
dn = res['fabricProtPathEpCont']['attributes']['dn']
[status, payload] = self.query_url('/api/mo/'+dn+'.json?query-target=children')
if status != 200:
return None
vpc_data = payload['imdata']
for elem in vpc_data:
dn = elem['fabricPathEp']['attributes']['dn']
vpc_dn[re.search("pathep-\[(.*)\]",dn).group(1)] = dn
return vpc_dn
# Class must be instantiated with APIC IP address and cookies
class FabCfgMgmt(object):
def __init__(self, apic, cookies):
self.apic = apic
self.cookies = cookies
self.templateLoader = jinja2.FileSystemLoader(
searchpath=(json_path + 'FabCfgMgmt/'))
self.templateEnv = jinja2.Environment(loader=self.templateLoader)
# Method must be called with the following kwargs. Note only supports
# SCP at this time (could easily add SFTP or FTP if needed though)
# name = name of the remote location
# ip = IP of the remote location (note, module does no validation)
# path = Path on the remote location
# user = username for remote location
# pword = password (sent in clear text) for the remote location
# status = created | created,modified | deleted
def remote_path(self, **kwargs):
required_args = {'name': '',
'ip': '',
'path': '',
'user': '',
'pword': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not ipaddress.ip_address(templateVars['ip']):
raise InvalidArg('Address must be a valid IPv4 address')
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "remote_path.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/path-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name = name of the snapshot itself
# snapshot = true | false - if true it creates an export policy and
# takes a snapshot, if false it simply creates an export policy
# status = created | created,modified | deleted
# path = (Optional) remote path for export (can be left blank for snapshot)
def backup(self, **kwargs):
required_args = {'name': '',
'snapshot': '',
'status': ''}
optional_args = {'path': ''}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "backup.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/configexp-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name = name of the import object itself
# filename = name of the file to import
# path = name of the remote path object where the file lives
def replace(self, **kwargs):
required_args = {'name': '',
'filename': '',
'path': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
template_file = "replace.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/configimp-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name = name of the snapshot itself (note you need to put the file
# extension in yourself)
def snapback(self, **kwargs):
required_args = {'name': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
template_file = "snapback.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/configimp-default'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Class must be instantiated with APIC IP address and cookies
class FabAdminMgmt(object):
def __init__(self, apic, cookies):
self.apic = apic
self.cookies = cookies
self.templateLoader = jinja2.FileSystemLoader(
searchpath=(json_path + 'FabAdminMgmt/'))
self.templateEnv = jinja2.Environment(loader=self.templateLoader)
# Method must be called with the following kwargs.
# user: Username for user to be created/modified
# status: created | created,modified | deleted
# pwd: Password of user
def user(self, **kwargs):
required_args = {'user': '',
'status': '',
'pwd': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "user.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/userext/user-{}'.format(templateVars['user'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# address: node ip
# gateway: gateway IP
# pod: Pod Node Lives in
# id: Node id
def oob_mgmt(self, **kwargs):
required_args = {'address': '',
'gateway': '',
'pod': '',
'status': '',
'id': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['id']):
raise InvalidArg('ID must be an integer')
else:
templateVars['id'] = int(templateVars['id'])
if not int(templateVars['pod']):
raise InvalidArg('Pod must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "oob_mgmt.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/tn-mgmt'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: name of in band EPG
# vlan: vlan to be used for inb
# status: created | created,modified | deleted
def inb_epg(self, **kwargs):
required_args = {'name': '',
'vlan': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['vlan']):
raise InvalidArg('VLAN IDs must be an integer')
else:
templateVars['vlan'] = int(templateVars['vlan'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "inb_epg.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/tn-mgmt'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: name of in band EPG
# contract: contract to be applied
# status: created | created,modified | deleted
def inb_epg_consume(self, **kwargs):
required_args = {'name': '',
'contract': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "inb_epg_consume.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/tn-mgmt'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: name of in band EPG
# contract: contract to be applied
# status: created | created,modified | deleted
def inb_epg_provide(self, **kwargs):
required_args = {'name': '',
'contract': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "inb_epg_provide.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/tn-mgmt'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# address: node ip
# gateway: gateway IP
# pod: Pod Node Lives in
# id: Node id
def inb_mgmt(self, **kwargs):
required_args = {'address': '',
'gateway': '',
'inb_epg_name': '',
'status': '',
'id': ''}
optional_args = {'pod': '1'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['id']):
raise InvalidArg('ID must be an integer')
else:
templateVars['id'] = int(templateVars['id'])
if not int(templateVars['pod']):
raise InvalidArg('Pod must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "inb_mgmt.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/tn-mgmt'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Class must be instantiated with APIC IP address and cookies
class FabVMM(object):
def __init__(self, apic, cookies):
self.apic = apic
self.cookies = cookies
self.templateLoader = jinja2.FileSystemLoader(
searchpath=(json_path + 'FabVMM/'))
self.templateEnv = jinja2.Environment(loader=self.templateLoader)
# Method must be called with the following kwargs.
# name: The name of the VMware VMM Domain to create
# host: IP of the vCenter
# dc: Name of the datacenetr in vCenter (case sensitive)
# user: vCenter user name (must have correct permissions)
# pwd: vCenter user password
# status: created | created,modified | deleted
def vcenter(self, **kwargs):
required_args = {'name': '',
'host': '',
'vl_pool': '',
'dc': '',
'user': '',
'pwd': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "vcenter.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/vmmp-VMware/dom-{}'
.format(templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: The name of the VMware VMM Domain to create
# aep: The name of the AEP to associate to the VMM Domain
# status: created | created,modified | deleted
def vcenter_aep(self, **kwargs):
required_args = {'name': '',
'aep': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "vcenter_aep.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/infra/attentp-{}'
.format(templateVars['aep']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: The name of the VMM Domain
# status: created | created,modified | deleted
def vswitch_pol(self, **kwargs):
required_args = {'name': '',
'status': ''}
optional_args = {'cdp_pol': 'CDP-Enabled',
'lldp_pol': 'LLDP-Disabled',
'dom_type': 'VMware'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "vswitch_pol.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/vmmp-VMware/dom-{}'
.format(templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Class must be instantiated with APIC IP address and cookies
class Mpod(object):
def __init__(self, apic, cookies):
self.apic = apic
self.cookies = cookies
self.templateLoader = jinja2.FileSystemLoader(
searchpath=(json_path + 'Mpod/'))
self.templateEnv = jinja2.Environment(loader=self.templateLoader)
# Method must be called with the following kwargs.
# name: name of the spine policy group
# cdp: name of the cdp policy
# aep: name of the AEP
# int: name of the interface policy
# status: created | created,modified | deleted
def spine_pol_grp(self, **kwargs):
required_args = {'name': '',
'cdp': '',
'aep': '',
'int': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "spine_pol_grp.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/infra/funcprof/spaccportgrp-{}'
.format(templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: name of the spine interface profile
# port_name: name of the interface selector
# mod_start: integer for starting module (blade)
# mod_end: integer for ending module (blade)
# port_start: integer for starting port id
# port_end: integer for ending port id
# status: created | created,modified | deleted
def spine_int_pro(self, **kwargs):
required_args = {'name': '',
'port_name': '',
'mod_start': '',
'mod_end': '',
'port_start': '',
'port_end': '',
'pol_grp': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['mod_start']):
raise InvalidArg('ID must be an integer')
else:
templateVars['mod_start'] = int(templateVars['mod_start'])
if not int(templateVars['mod_end']):
raise InvalidArg('ID must be an integer')
else:
templateVars['mod_end'] = int(templateVars['mod_end'])
if not int(templateVars['port_start']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port_start'] = int(templateVars['port_start'])
if not int(templateVars['port_end']):
raise InvalidArg('ID must be an integer')
else:
templateVars['port_end'] = int(templateVars['port_end'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "spine_int_pro.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/infra/spaccportprof-{}'
.format(templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: name of the spine switch profile
# spine_sel_name: name of the spine selector
# id: integer id of the spine node
# int_sel: name of the spine interface selector
# status: created | created,modified | deleted
def spine_sw_pro(self, **kwargs):
required_args = {'name': '',
'spine_sel_name': '',
'id': '',
'int_sel': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['id']):
raise InvalidArg('ID must be an integer')
else:
templateVars['id'] = int(templateVars['id'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "spine_sw_pro.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/infra/spprof-{}'
.format(templateVars['name']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# pod_id: integer of the pod ID to initialize
# tep_pool: CIDR notation for pod TEP pool range
# status: created | created,modified | deleted
def init_pod(self, **kwargs):
required_args = {'pod_id': '',
'tep_pool': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['pod_id']):
raise InvalidArg('ID must be an integer')
else:
templateVars['pod_id'] = int(templateVars['pod_id'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "init_pod.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/controller/setuppol/setupp-{}'
.format(templateVars['pod_id']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# EXPERIMENTAL: No support for multiple IPN connections per pod
# conn_id: integer of the pod ID to initialize
# name: Name of the IPN Profile
# rt: (optional) default is the fabric default (as2-nn4:5:16)
# pod1_dtep: IP for pod1 DTEP
# pod2_dtep: IP for pod2 DTEP
# route_prof_name: (optional) Name of the route Prof
# subnet1: CIDR for Pod1 peering
# subnet2: CIDR for Pod2 peering
# status: created | created,modified | deleted
def create_mpod(self, **kwargs):
required_args = {'conn_id': '',
'name': '',
'pod1_dtep': '',
'pod2_dtep': '',
'subnet1': '',
'subnet2': '',
'status': ''}
optional_args = {'rt': 'extended:as2-nn4:5:16',
'route_prof_name': 'MpodRouteProf'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['conn_id']):
raise InvalidArg('ID must be an integer')
else:
templateVars['conn_id'] = int(templateVars['conn_id'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "create_mpod.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-infra/fabricExtConnP-{}'
.format(templateVars['conn_id']))
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# EXPERIMENTAL: No support for multiple IPN connections per pod
# pod1_spine1:
# pod1_spine1_int1:
# pod1_spine1_int1_ip:
# pod1_spine1_rtrid:
# pod2_spine1:
# pod2_spine1_int1:
# pod2_spine1_int1_ip:
# pod2_spine1_rtrid:
# status: created | created,modified | deleted
def mpod_l3_out(self, **kwargs):
required_args = {'pod1_spine1': '',
'pod1_spine1_int1': '',
'pod1_spine1_int1_ip': '',
'pod1_spine1_rtrid': '',
'pod2_spine1': '',
'pod2_spine1_int1': '',
'pod2_spine1_int1_ip': '',
'pod2_spine1_rtrid': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['pod1_spine1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['pod1_spine1'] = int(templateVars['pod1_spine1'])
if not int(templateVars['pod2_spine1']):
raise InvalidArg('ID must be an integer')
else:
templateVars['pod2_spine1'] = int(templateVars['pod2_spine1'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "mpod_l3_out.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = ('mo/uni/tn-infra/out-multipod')
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
| 39.603294
| 113
| 0.571874
| 17,876
| 163,522
| 5.100694
| 0.045592
| 0.041193
| 0.021222
| 0.023031
| 0.845109
| 0.822724
| 0.802676
| 0.769829
| 0.752314
| 0.740009
| 0
| 0.005253
| 0.313199
| 163,522
| 4,128
| 114
| 39.612888
| 0.806628
| 0.208675
| 0
| 0.739591
| 0
| 0.000757
| 0.146601
| 0.026079
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046177
| false
| 0.001514
| 0.003407
| 0
| 0.10106
| 0.01022
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
76b0828bdb515a10a405cd66bbfdae971b8d70ff
| 11,381
|
py
|
Python
|
test/test_bit4id_pathgroup_tokens_api.py
|
signingtoday/signingtoday-sdk-python
|
ed267279622fb59f2ad8fa289157fc9cdf9d8a5b
|
[
"MIT"
] | null | null | null |
test/test_bit4id_pathgroup_tokens_api.py
|
signingtoday/signingtoday-sdk-python
|
ed267279622fb59f2ad8fa289157fc9cdf9d8a5b
|
[
"MIT"
] | null | null | null |
test/test_bit4id_pathgroup_tokens_api.py
|
signingtoday/signingtoday-sdk-python
|
ed267279622fb59f2ad8fa289157fc9cdf9d8a5b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Signing Today API
*Signing Today* enables seamless integration of digital signatures into any
website by the use of easy requests to our API. This is the smart way of
adding digital signature support with a great user experience.


*Signing Today APIs* use HTTP methods and are RESTful based, moreover they
are protected by a *server to server authentication* standard by the use of
tokens.


*Signing Today APIs* can be used in these environments:


| Environment | Description | Endpoint |
| ----------- | ----------- | -------- |
| Sandbox     | Test environment | `https://sandbox.signingtoday.com` |
| Live        | Production environment | `https://api.signingtoday.com` |


For every single request to Signing Today has to be defined the following
*HTTP* header:
- `Authorization`, which contains the authentication token.

If the request has a body than another *HTTP* header is requested:
- `Content-Type`, with `application/json` value.


Follows an example of usage to enumerate all the user of *my-org*
organization.

**Example**

```json
$ curl https://sandbox.signingtoday.com/api/v1/my-org/users \
    -H 'Authorization: Token <access-token>'
```

## HTTP methods used

APIs use the right HTTP verb in every situation.

| Method   | Description                    |
| -------- | ------------------------------ |
| `GET`    | Request data from a resource   |
| `POST`   | Send data to create a resource |
| `PUT`    | Update a resource              |
| `PATCH`  | Partially update a resource    |
| `DELETE` | Delete a resourse              |


## Response definition

All the response are in JSON format.
As response to a request of all users of an organization you will have a
result like this:

```json
{
    "pagination": {
      "count": 75,
      "previous": "https://sandbox.signingtoday.com/api/v1/my-org/users?page=1",
      "next": "https://sandbox.signingtoday.com/api/v1/my-org/users?page=3",
      "pages": 8,
      "page": 2
    },
    "meta": {
      "code": 200
    },
    "data": [
      {
        "id": "jdo",
        "status": "enabled",
        "type": "Basic user account",
        "email": johndoe@dummyemail.com,
        "first_name": "John",
        "last_name": "Doe",
        "wallet": [],
        "created_by": "system",
        "owner": false,
        "automatic": false,
        "rao": false
      },
      ...
    ]
  }
```

The JSON of the response is made of three parts:
- Pagination
- Meta
- Data

### Pagination

*Pagination* object allows to split the response into parts and then to
rebuild it sequentially by the use of `next` and `previous` parameters, by
which you get previous and following blocks. The *Pagination* is present
only if the response is a list of objects.

The general structure of *Pagination* object is the following:

```json
{
    "pagination": {
      "count": 75,
      "previous": "https://sandbox.signingtoday.com/api/v1/my-org/users?page=1",
      "next": "https://sandbox.signingtoday.com/api/v1/my-org/users?page=3",
      "pages": 8,
      "page": 2
    },
    ...
  }
```

### Meta

*Meta* object is used to enrich the information about the response. In the
previous example, a successful case of response, *Meta* will have value
`status: 2XX`. In case of unsuccessful response, *Meta* will have further
information, as follows:

```json
{
    "meta": {
      "code": <HTTP STATUS CODE>,
      "error_type": <STATUS CODE DESCRIPTION>,
      "error_message": <ERROR DESCRIPTION>
    }
  }
```

### Data

*Data* object outputs as object or list of them. Contains the expected data
as requested to the API.

## Search filters

Search filters of the API have the following structure:

`where_ATTRIBUTENAME`=`VALUE`

In this way you make a case-sensitive search of *VALUE*. You can extend it
through the Django lookup, obtaining more specific filters. For example:

`where_ATTRIBUTENAME__LOOKUP`=`VALUE`

where *LOOKUP* can be replaced with `icontains` to have a partial insensitive
research, where

`where_first_name__icontains`=`CHa`

matches with every user that have the *cha* string in their name, with
no differences between capital and lower cases.

[Here](https://docs.djangoproject.com/en/1.11/ref/models/querysets/#field-lookups)
the list of the lookups.

## Webhooks

Signing Today supports webhooks for the update of DSTs and identities status.
You can choose if to use or not webhooks and if you want to receive updates
about DSTs and/or identities. You can configurate it on application token
level, in the *webhook* field, as follows:

```json
"webhooks": {
  "dst": "URL",
  "identity": "URL"
  }
```

### DSTs status update

DSTs send the following status updates:
- **DST_STATUS_CHANGED**: whenever the DST changes its status
- **SIGNATURE_STATUS_CHANGED**: whenever one of the signatures changes its
status

#### DST_STATUS_CHANGED

Sends the following information:

```json
{
    "message": "DST_STATUS_CHANGED",
    "data": {
      "status": "<DST_STATUS>",
      "dst": "<DST_ID>",
      "reason": "<DST_REASON>"
    }
  }
```

#### SIGNATURE_STATUS_CHANGED

Sends the following information:

```json
{
    "message": "SIGNATURE_STATUS_CHANGED",
    "data": {
      "status": "<SIGNATURE_STATUS>",
      "group": <MEMBERSHIP_GROUP_INDEX>,
      "dst": {
        "id": "<DST_ID>",
        "title": "<DST_TITLE>"
      },
      "signature": "<SIGNATURE_ID>",
      "signer": "<SIGNER_USERNAME>",
      "position": "<SIGNATURE_POSITION>",
      "document": {
        "display_name": "<DOCUMENT_TITLE>",
        "id": "<DOCUMENT_ID>",
        "order": <DOCUMENT_INDEX>
      },
      "automatic": <DECLARES_IF_THE_SIGNER_IS_AUTOMATIC>,
      "page": "<SIGNATURE_PAGE>"
    }
  }
```

### Identities status update

Identities send the following status updates:
- **IDENTITY_REQUEST_ENROLLED**: whenever an identity request is activated

#### IDENTITY_REQUEST_ENROLLED

Sends the following information:

```json
{
    "message": "IDENTITY_REQUEST_ENROLLED",
    "data": {
      "status": "<REQUEST_STATUS>",
      "request": "<REQUEST_ID>",
      "user": "<APPLICANT_USERNAME>"
    }
  }
```

### Urlback

Sometimes may be necessary to make a redirect after an user, from the
signature tray, has completed his operations or activated a certificate.

If set, redirects could happen in 3 cases:
- after a signature or decline
- after a DST has been signed by all the signers or canceled
- after the activation of a certificate

In the first two cases the urlback returns the following information through
a data form:
- **dst-id**: id of the DST
- **dst-url**: signature_ticket of the signature
- **dst-status**: current status of the DST
- **dst-signature-id**: id of the signature
- **dst-signature-status**: current status of the signature
- **user**: username of the signer
- **decline-reason**: in case of a refused DST contains the reason of the
decline

In the last case the urlback returns the following information through a
data form:
- **user**: username of the user activated the certificate
- **identity-provider**: the provider has been used to issue the certificate
- **identity-request-id**: id of the enrollment request
- **identity-id**: id of the new identity
- **identity-label**: the label assigned to the identity
- **identity-certificate**: public key of the certificate


 # noqa: E501
The version of the OpenAPI document: 1.5.0
Contact: smartcloud@bit4id.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import signing_today_client
from signing_today_client.api.bit4id_pathgroup_tokens_api import Bit4idPathgroupTokensApi # noqa: E501
from signing_today_client.rest import ApiException
class TestBit4idPathgroupTokensApi(unittest.TestCase):
"""Bit4idPathgroupTokensApi unit test stubs"""
def setUp(self):
self.api = signing_today_client.api.bit4id_pathgroup_tokens_api.Bit4idPathgroupTokensApi() # noqa: E501
def tearDown(self):
pass
def test_create_token(self):
"""Test case for create_token
Create an application token # noqa: E501
"""
pass
def test_delete_token(self):
"""Test case for delete_token
Delete a token of the organization # noqa: E501
"""
pass
def test_get_token(self):
"""Test case for get_token
Get information about a token # noqa: E501
"""
pass
def test_list_tokens(self):
"""Test case for list_tokens
Enumerate the tokens of an organization # noqa: E501
"""
pass
def test_list_user_tokens(self):
"""Test case for list_user_tokens
Enumerate the tokens of an user # noqa: E501
"""
pass
def test_update_token(self):
"""Test case for update_token
Update the properties of a token # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 147.805195
| 9,710
| 0.947281
| 212
| 11,381
| 50.632075
| 0.353774
| 0.006708
| 0.006149
| 0.008385
| 0.040246
| 0.029253
| 0.008385
| 0.008385
| 0
| 0
| 0
| 0.092991
| 0.040945
| 11,381
| 76
| 9,711
| 149.75
| 0.890426
| 0.911431
| 0
| 0.291667
| 0
| 0
| 0.009512
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.291667
| 0.208333
| 0
| 0.583333
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
76b4d0b34d7b28bd583415a7e40328035d0120e4
| 9,642
|
py
|
Python
|
src/rose-upset.py
|
MountainMan12/rose2018ng-notebook
|
2c907f0d599a50c23c487984e6a0fe0364d3651b
|
[
"MIT"
] | 8
|
2019-09-20T15:47:54.000Z
|
2021-11-01T02:05:17.000Z
|
src/rose-upset.py
|
MountainMan12/rose2018ng-notebook
|
2c907f0d599a50c23c487984e6a0fe0364d3651b
|
[
"MIT"
] | 9
|
2020-03-24T16:53:29.000Z
|
2022-01-13T01:07:07.000Z
|
src/rose-upset.py
|
MountainMan12/rose2018ng-notebook
|
2c907f0d599a50c23c487984e6a0fe0364d3651b
|
[
"MIT"
] | 3
|
2020-07-18T20:42:43.000Z
|
2021-04-14T11:31:42.000Z
|
from matplotlib import pyplot as plt
import pandas as pd
import os, errno
from upsetplot import plot
__author__ = 'proccaserra (Philippe Rocca-Serra)'
# author: philippe rocca-serra (philippe.rocca-serra@oerc.ox.ac.uk)
# ontology: http://www.stato-ontology.org
try:
if not os.path.exists('../figures/denovo'):
os.makedirs('../figures/denovo')
except OSError as e:
if e.errno != errno.EEXIST:
raise
TableS1_Science2015 = ["E_E_farnesal","E_E_farnesol","E_E_farnesyl_acetate","E_2_hexen_1_ol","E_2_hexenal","E_beta_farnesene","E_beta_ocimene","Z_3_hexen_1_ol","Z_3_hexenyl_acetate","1_3_5_trimethoxybenzene","2_phenylethanol","3_5_dimethoxytoluene","alpha_cadinol","benzaldehyde","benzylalcohol","beta_myrcene","bicyclogermacrene","citronellol","delta_cadinene","dihydro_beta_ionol","dihydro_beta_ionone","elemol","eugenol","geranial","geranic_acid","geraniol","geranyl_acetate","germacrene_D","germacrene_D_4_ol","hexan_1_ol","hexanal","hexyl_acetate","methyl_eugenol","neral","nerol","nonanal","phenylacetaldehyde","tau_cadinol","tau_muurolol","Z_beta_ocimene"]
# Table S3:
# ["(E,E)_farnesol","(E)_beta_farnesene","alpha_cadinol","beta_myrcene","bicyclogermacrene","citronellal","citronellol","delta_cadinene","geranial","geraniol","geranyl acetate","germacrene D","germacrene D_4_ol","limonene","linalool","neral","nerol","beta_caryophyllene","beta_elemene","beta_pinene","tau_cadinol","tau_muurolol","alpha_humulene","alpha_muurolene","alpha_muurolol","alpha_pinene"])
TableS3_Science2015 = ["E_E_farnesol","E_beta_farnesene","alpha_cadinol","beta_myrcene","bicyclogermacrene","citronellal","citronellol","delta_cadinene","geranial","geraniol","geranyl_acetate","germacrene_D","germacrene_D_4_ol","limonene","linalool","neral","nerol","beta_caryophyllene","beta_elemene","beta_pinene","tau_cadinol","tau_muurolol","alpha_humulene","alpha_muurolene","alpha_muurolol","alpha_pinene"]
set_NG2018 = ["hexan-2-ol","hexanal","E_2_hexenal","Z_3_hexen_1_ol","E_2_hexen_1_ol","hexan_1_ol","nonane","alpha_pinene","benzaldehyde","beta_myrcene","Z_3_hexenyl_acetate","hexyl_acetate","E_hexenyl_acetate","limonene","benzylalcohol","phenylacetaldehyde","E_beta_ocimene","linalool","nonanal","2_phenylethanol","beta_citronellal","alpha-terpineol","decanal","nerol","beta_citronellol","neral","geraniol","beta_phenylethyl_acetate","3_5_dimethoxytoluene","geranial","undecanal","theaspirane_A","beta_citronellyl_acetate","eugenol","neryl_acetate","alpha_copaene","geranyl_acetate","beta_elemene","methyl_eugenol","beta_caryophyllene","1_3_5_trimethoxybenzene","dihydro_beta_ionone","alpha_guaiene","dihydro_beta_ionol","E_beta_farnesene","germacrene_D","pentadecane","E_E_alpha_farnesene","gamma_cadinene","delta_cadinene","elemol","germacrene_D_4_ol","hexadecane","tau_cadinol","beta_eudesmol","alpha_cadinol","heptadecene","heptadecane","E_E_farnesol","E_E_farnesal","E_E_farnesyl_acetate"]
df1 = pd.DataFrame({'name': TableS1_Science2015})
df2 = pd.DataFrame({'name': TableS3_Science2015})
df3 = pd.DataFrame({'name': set_NG2018})
df4 = (df1.merge(df2, how='outer', indicator=True)
.assign(TableS1_Science2015 = lambda x: x._merge != "right_only",
TableS3_Science2015 = lambda x: x._merge != "left_only")
.drop("_merge", 1)).merge(df3, how='outer', indicator=True).assign(set_NG2018 = lambda x: x._merge != "left_only").drop("_merge",1)
chemicals = [c for c in df4.columns if c != "name"]
chemicals_count_series = df4.fillna(False).groupby(chemicals).count()["name"]
plot(chemicals_count_series, sort_by="cardinality")
fig = plt.gcf()
fig.set_size_inches(8, 4, forward=True)
fig.savefig('../figures/denovo/Figure_2b-upset-plot-Science2015&NatGen2018.png', bbox_inches='tight')
#///////////////////////////////\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
# input for generating figure 2 using upSetR:
# https://gehlenborglab.shinyapps.io/upsetr/
# https://www.biorxiv.org/content/early/2017/03/25/120600.full.pdf+html
# chemical names had to be altered to allow lists to be used as input to the software.
# set1_Science2015 = set([E_E_farnesal,E_E_farnesol,E_E_farnesyl_acetate,E_2_hexen_1_ol,E_2_hexenal,E_beta_farnesene,E_beta_ocimene,Z_3_hexen_1_ol,Z_3_hexenyl_acetate,1_3_5_trimethoxybenzene,2_phenylethanol,3_5_dimethoxytoluene,alpha_cadinol,benzaldehyde,benzylalcohol,beta_myrcene,bicyclogermacrene,citronellol,delta_cadinene,dihydro_beta_ionol,dihydro_beta_ionone,elemol,eugenol,geranial,geranic_acid,geraniol,geranyl_acetate,germacrene_D,germacrene_D_4_ol,hexan_1_ol,hexanal,hexyl_acetate,methyl_eugenol,neral,nerol,nonanal,phenylacetaldehyde,tau_cadinol,tau_muurolol,Z_beta_ocimene])
# set2_Science2015 = set([E_E_farnesol,E_beta_farnesene,alpha_cadinol,beta_myrcene,bicyclogermacrene,citronellal,citronellol,delta_cadinene,geranial,geraniol,geranyl_acetate,germacrene_D,germacrene_D_4_ol,limonene,linalool,neral,nerol,E_beta_caryophyllene,beta_elemene,beta_pinene,tau_cadinol,tau_muurolol,alpha_humulene,alpha_muurolene,alpha_muurolol,alpha_pinene])
# set_NG2018=set([hexan-2-ol,hexanal,E_2_hexenal,Z_3_hexen_1_ol,E_2_hexen_1_ol,hexan_1_ol,nonane,alpha_pinene,benzaldehyde,beta_myrcene,Z_3_hexenyl_acetate,hexyl_acetate,E_hexenyl_acetate,limonene,benzylalcohol,phenylacetaldehyde,E_beta_ocimene,linalool,nonanal,2_phenylethanol,beta_citronellal,alpha-terpineol,decanal,nerol,beta_citronellol,neral,geraniol,beta_phenylethyl_acetate,3_5_dimethoxytoluene,geranial,undecanal,theaspirane_A,beta_citronellyl_acetate,eugenol,neryl_acetate,alpha_copaene,geranyl_acetate,beta_elemene,methyl_eugenol,E_beta_caryophyllene,1_3_5_trimethoxybenzene,dihydro_beta_ionone,alpha_guaiene,dihydro_beta_ionol,E_beta_farnesene,germacrene_D,pentadecane,E_E_alpha_farnesene,gamma_cadinene,delta_cadinene,elemol,germacrene_D_4_ol,hexadecane,tau_cadinol,beta_eudesmol,alpha_cadinol,heptadecene,heptadecane,E_E_farnesol,E_E_farnesal,E_E_farnesyl_acetate])
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\////////////////////////////////////////
# set1_Science2015 = pd.Series([E_E_farnesal,E_E_farnesol,E_E_farnesyl_acetate,E_2_hexen_1_ol,E_2_hexenal,E_beta_farnesene,E_beta_ocimene,Z_3_hexen_1_ol,Z_3_hexenyl_acetate,1_3_5_trimethoxybenzene,2_phenylethanol,3_5_dimethoxytoluene,alpha_cadinol,benzaldehyde,benzylalcohol,beta_myrcene,bicyclogermacrene,citronellol,delta_cadinene,dihydro_beta_ionol,dihydro_beta_ionone,elemol,eugenol,geranial,geranic_acid,geraniol,geranyl_acetate,germacrene_D,germacrene_D_4_ol,hexan_1_ol,hexanal,hexyl_acetate,methyl_eugenol,neral,nerol,nonanal,phenylacetaldehyde,tau_cadinol,tau_muurolol,Z_beta_ocimene])
# set2_Science2015 = pd.Series([E_E_farnesol,E_beta_farnesene,alpha_cadinol,beta_myrcene,bicyclogermacrene,citronellal,citronellol,delta_cadinene,geranial,geraniol,geranyl_acetate,germacrene_D,germacrene_D_4_ol,limonene,linalool,neral,nerol,E_beta_caryophyllene,beta_elemene,beta_pinene,tau_cadinol,tau_muurolol,alpha_humulene,alpha_muurolene,alpha_muurolol,alpha_pinene])
# set_NG2018 = pd.Series([hexan-2-ol,hexanal,E_2_hexenal,Z_3_hexen_1_ol,E_2_hexen_1_ol,hexan_1_ol,nonane,alpha_pinene,benzaldehyde,beta_myrcene,Z_3_hexenyl_acetate,hexyl_acetate,E_hexenyl_acetate,limonene,benzylalcohol,phenylacetaldehyde,E_beta_ocimene,linalool,nonanal,2_phenylethanol,beta_citronellal,alpha-terpineol,decanal,nerol,beta_citronellol,neral,geraniol,beta_phenylethyl_acetate,3_5_dimethoxytoluene,geranial,undecanal,theaspirane_A,beta_citronellyl_acetate,eugenol,neryl_acetate,alpha_copaene,geranyl_acetate,beta_elemene,methyl_eugenol,E_beta_caryophyllene,1_3_5_trimethoxybenzene,dihydro_beta_ionone,alpha_guaiene,dihydro_beta_ionol,E_beta_farnesene,germacrene_D,pentadecane,E_E_alpha_farnesene,gamma_cadinene,delta_cadinene,elemol,germacrene_D_4_ol,hexadecane,tau_cadinol,beta_eudesmol,alpha_cadinol,heptadecene,heptadecane,E_E_farnesol,E_E_farnesal,E_E_farnesyl_acetate])
# set1_Science2015 = set([E_E_farnesal,E_E_farnesol,E_E_farnesyl_acetate,E_2_hexen_1_ol,E_2_hexenal,E_beta_farnesene,E_beta_ocimene,Z_3_hexen_1_ol,Z_3_hexenyl_acetate,1_3_5_trimethoxybenzene,2_phenylethanol,3_5_dimethoxytoluene,alpha_cadinol,benzaldehyde,benzylalcohol,beta_myrcene,bicyclogermacrene,citronellol,delta_cadinene,dihydro_beta_ionol,dihydro_beta_ionone,elemol,eugenol,geranial,geranic_acid,geraniol,geranyl_acetate,germacrene_D,germacrene_D_4_ol,hexan_1_ol,hexanal,hexyl_acetate,methyl_eugenol,neral,nerol,nonanal,phenylacetaldehyde,tau_cadinol,tau_muurolol,Z_beta_ocimene])
# set2_Science2015 = set([E_E_farnesol,E_beta_farnesene,alpha_cadinol,beta_myrcene,bicyclogermacrene,citronellal,citronellol,delta_cadinene,geranial,geraniol,geranyl_acetate,germacrene_D,germacrene_D_4_ol,limonene,linalool,neral,nerol,E_beta_caryophyllene,beta_elemene,beta_pinene,tau_cadinol,tau_muurolol,alpha_humulene,alpha_muurolene,alpha_muurolol,alpha_pinene])
# set_NG2018 = set([hexan-2-ol,hexanal,E_2_hexenal,Z_3_hexen_1_ol,E_2_hexen_1_ol,hexan_1_ol,nonane,alpha_pinene,benzaldehyde,beta_myrcene,Z_3_hexenyl_acetate,hexyl_acetate,E_hexenyl_acetate,limonene,benzylalcohol,phenylacetaldehyde,E_beta_ocimene,linalool,nonanal,2_phenylethanol,beta_citronellal,alpha-terpineol,decanal,nerol,beta_citronellol,neral,geraniol,beta_phenylethyl_acetate,3_5_dimethoxytoluene,geranial,undecanal,theaspirane_A,beta_citronellyl_acetate,eugenol,neryl_acetate,alpha_copaene,geranyl_acetate,beta_elemene,methyl_eugenol,E_beta_caryophyllene,1_3_5_trimethoxybenzene,dihydro_beta_ionone,alpha_guaiene,dihydro_beta_ionol,E_beta_farnesene,germacrene_D,pentadecane,E_E_alpha_farnesene,gamma_cadinene,delta_cadinene,elemol,germacrene_D_4_ol,hexadecane,tau_cadinol,beta_eudesmol,alpha_cadinol,heptadecene,heptadecane,E_E_farnesol,E_E_farnesal,E_E_farnesyl_acetate])
| 143.910448
| 994
| 0.841423
| 1,386
| 9,642
| 5.407648
| 0.142857
| 0.008806
| 0.017078
| 0.019079
| 0.874049
| 0.86004
| 0.86004
| 0.86004
| 0.86004
| 0.851768
| 0
| 0.025865
| 0.025617
| 9,642
| 66
| 995
| 146.090909
| 0.7719
| 0.663866
| 0
| 0
| 0
| 0
| 0.573959
| 0.04941
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.148148
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8c2426a84797c502fcc929baad7471df602059cf
| 2,788
|
py
|
Python
|
musket_core/tests/decl_test.py
|
dreamflyer/musket_core
|
1bdf1b4715a3b5c63bf687799d7b977fdf49053f
|
[
"MIT"
] | 1
|
2019-04-12T13:46:53.000Z
|
2019-04-12T13:46:53.000Z
|
musket_core/tests/decl_test.py
|
dreamflyer/musket_core
|
1bdf1b4715a3b5c63bf687799d7b977fdf49053f
|
[
"MIT"
] | 5
|
2018-12-12T11:49:05.000Z
|
2019-04-30T14:23:54.000Z
|
musket_core/tests/decl_test.py
|
dreamflyer/musket_core
|
1bdf1b4715a3b5c63bf687799d7b977fdf49053f
|
[
"MIT"
] | null | null | null |
import unittest
from musket_core import net_declaration
import keras
import os
fl=__file__
fl=os.path.dirname(fl)
class TestStringMethods(unittest.TestCase):
def testNetCreation(self):
m1 = net_declaration.create_model(os.path.join(fl,"../examples/conditional.yaml"), keras.layers.Input((200, 3)))
m1.summary()
m1=net_declaration.create_model(os.path.join(fl,"../examples/example2.9.yaml"),keras.layers.Input((200,200,3)))
print(m1.summary())
m1=net_declaration.create_model(os.path.join(fl,"../examples/example2.8.yaml"),keras.layers.Input((200,200,3)))
print(m1.summary())
m1=net_declaration.create_model(os.path.join(fl,"../examples/example2.7.yaml"),keras.layers.Input((200,200,3)))
print(m1.summary())
m1=net_declaration.create_model(os.path.join(fl,"../examples/example2.6.yaml"),keras.layers.Input((200,200,3)))
print(m1.summary())
m1=net_declaration.create_model(os.path.join(fl,"../examples/example2.5.yaml"),keras.layers.Input((200,200,3)))
print(m1.summary())
m1=net_declaration.create_model(os.path.join(fl,"../examples/example2.4.yaml"),keras.layers.Input((200,200,3)))
print(m1.summary())
m1=net_declaration.create_model(os.path.join(fl,"../examples/example2.3.yaml"),keras.layers.Input((200,200)))
print(m1.summary())
m1=net_declaration.create_model(os.path.join(fl,"../examples/example2.2.yaml"),keras.layers.Input((200,200)))
print(m1.summary())
m1=net_declaration.create_model(os.path.join(fl,"../examples/example2.1.yaml"),keras.layers.Input((200,200)))
print(m1.summary())
m1=net_declaration.create_model(os.path.join(fl,"../examples/example1.yaml"),keras.layers.Input((200,200)))
print(m1.summary())
m2=net_declaration.create_model(os.path.join(fl,"../examples/example2.yaml"),[keras.layers.Input((200,200)),keras.layers.Input((200,200))])
print(m2.summary())
m3=net_declaration.create_model(os.path.join(fl,"../examples/example3.yaml"),[keras.layers.Input((200,200)),keras.layers.Input((200,200))])
assert len(m3.outputs)==2
print(m3.summary())
m4=net_declaration.create_model(os.path.join(fl,"../examples/inception.yaml"),[keras.layers.Input((200,200)),keras.layers.Input((200,200))])
print(m4.summary())
m5=net_declaration.create_model(os.path.join(fl,"../examples/simple.yaml"),[keras.layers.Input((200,200)),keras.layers.Input((200,200))])
print(m5.summary())
m6 = net_declaration.create_model(os.path.join(fl,"../examples/bidirectional.yaml"),
[keras.layers.Input((200, 200))])
print(m6.summary())
| 58.083333
| 149
| 0.656385
| 385
| 2,788
| 4.654545
| 0.14026
| 0.122768
| 0.178571
| 0.212054
| 0.835938
| 0.823103
| 0.823103
| 0.805804
| 0.805804
| 0.683036
| 0
| 0.075623
| 0.151004
| 2,788
| 47
| 150
| 59.319149
| 0.681453
| 0
| 0
| 0.238095
| 0
| 0
| 0.155053
| 0.155053
| 0
| 0
| 0
| 0
| 0.02381
| 1
| 0.02381
| false
| 0
| 0.095238
| 0
| 0.142857
| 0.357143
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8c25bd57ed3da7922c216569e07e63070966c580
| 64,374
|
py
|
Python
|
doajtest/unit/test_tasks_ingestDOAJarticles.py
|
DOAJ/doaj
|
b11f163c48f51f9e3ada2b02c617b50b847dcb4c
|
[
"Apache-2.0"
] | 47
|
2015-04-24T13:13:39.000Z
|
2022-03-06T03:22:42.000Z
|
doajtest/unit/test_tasks_ingestDOAJarticles.py
|
DOAJ/doaj
|
b11f163c48f51f9e3ada2b02c617b50b847dcb4c
|
[
"Apache-2.0"
] | 1,215
|
2015-01-02T14:29:38.000Z
|
2022-03-28T14:19:13.000Z
|
doajtest/unit/test_tasks_ingestDOAJarticles.py
|
DOAJ/doaj
|
b11f163c48f51f9e3ada2b02c617b50b847dcb4c
|
[
"Apache-2.0"
] | 14
|
2015-11-27T13:01:23.000Z
|
2021-05-21T07:57:23.000Z
|
from doajtest.helpers import DoajTestCase
from lxml import etree
from doajtest.mocks.bll_article import BLLArticleMockFactory
from doajtest.mocks.ftp import FTPMockFactory
from doajtest.mocks.file import FileMockFactory
from doajtest.mocks.response import ResponseMockFactory
from doajtest.mocks.xwalk import XwalkMockFactory
from portality.tasks import ingestarticles
from doajtest.fixtures.article_doajxml import DoajXmlArticleFixtureFactory
from doajtest.fixtures.accounts import AccountFixtureFactory
import time
from portality.crosswalks import article_doaj_xml
from portality.bll.services import article as articleSvc
from portality import models
from portality.core import app
from portality.background import BackgroundException
import ftplib, os, requests
from urllib.parse import urlparse
from portality.ui.messages import Messages
class TestIngestArticlesDoajXML(DoajTestCase):
@classmethod
def setUpClass(self):
super(TestIngestArticlesDoajXML, self).setUpClass()
self.schema_old = etree.XMLSchema
@classmethod
def tearDownClass(self):
super(TestIngestArticlesDoajXML, self).tearDownClass()
etree.XMLSchema = self.schema_old
def setUp(self):
super(TestIngestArticlesDoajXML, self).setUp()
self.cleanup_ids = []
self.cleanup_paths = []
self.xwalk_validate = article_doaj_xml.DOAJXWalk.validate
self.batch_create_articles = articleSvc.ArticleService.batch_create_articles
self.head = requests.head
self.get = requests.get
self.ftp = ftplib.FTP
self.upload_dir = app.config["UPLOAD_DIR"]
self.ingest_articles_retries = app.config['HUEY_TASKS']['ingest_articles']['retries']
schema_path = app.config.get("SCHEMAS", {}).get("doaj")
schema_file = open(schema_path)
schema_doc = etree.parse(schema_file)
self.schema = etree.XMLSchema(schema_doc)
etree.XMLSchema = self.mock_load_schema
def tearDown(self):
super(TestIngestArticlesDoajXML, self).tearDown()
article_doaj_xml.DOAJXWalk.validate = self.xwalk_validate
articleSvc.ArticleService.batch_create_articles = self.batch_create_articles
requests.head = self.head
requests.get = self.get
ftplib.FTP = self.ftp
app.config["UPLOAD_DIR"] = self.upload_dir
app.config["HUEY_TASKS"]["ingest_articles"]["retries"] = self.ingest_articles_retries
for id in self.cleanup_ids:
path = os.path.join(app.config.get("UPLOAD_DIR", "."), id + ".xml")
if os.path.exists(path):
os.remove(path)
for id in self.cleanup_ids:
path = os.path.join(app.config.get("FAILED_ARTICLE_DIR", "."), id + ".xml")
if os.path.exists(path):
os.remove(path)
for path in self.cleanup_paths:
if os.path.exists(path):
os.remove(path)
def mock_load_schema(self, doc):
return self.schema
def test_01_doaj_file_upload_success(self):
handle = DoajXmlArticleFixtureFactory.upload_1_issn_correct()
f = FileMockFactory(stream=handle)
previous = []
id = ingestarticles.IngestArticlesBackgroundTask._file_upload("testuser", f, "doaj", previous)
self.cleanup_ids.append(id)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.schema == "doaj"
assert fu.status == "validated"
path = os.path.join(app.config.get("UPLOAD_DIR", "."), id + ".xml")
assert os.path.exists(path)
assert len(previous) == 1
def test_02_doaj_file_upload_invalid(self):
handle = DoajXmlArticleFixtureFactory.invalid_schema_xml()
f = FileMockFactory(stream=handle)
previous = []
with self.assertRaises(BackgroundException):
id = ingestarticles.IngestArticlesBackgroundTask._file_upload("testuser", f, "doaj", previous)
assert len(previous) == 1
id = previous[0].id
self.cleanup_ids.append(id)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "failed"
assert fu.error is not None and fu.error != ""
assert fu.error_details is not None and fu.error != ""
assert list(fu.failure_reasons.keys()) == []
# file should have been removed from upload dir
path = os.path.join(app.config.get("UPLOAD_DIR", "."), id + ".xml")
assert not os.path.exists(path)
# and placed into the failed dir
fad = os.path.join(app.config.get("FAILED_ARTICLE_DIR", "."), id + ".xml")
assert os.path.exists(fad)
def test_03_doaj_file_upload_fail(self):
article_doaj_xml.DOAJXWalk.validate = XwalkMockFactory.validate
etree.XMLSchema = self.mock_load_schema
handle = DoajXmlArticleFixtureFactory.upload_1_issn_correct()
f = FileMockFactory(stream=handle)
previous = []
with self.assertRaises(BackgroundException):
id = ingestarticles.IngestArticlesBackgroundTask._file_upload("testuser", f, "doaj", previous)
assert len(previous) == 1
id = previous[0].id
self.cleanup_ids.append(id)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "failed"
assert fu.error is not None and fu.error != ""
assert fu.error_details is None
assert list(fu.failure_reasons.keys()) == []
# file should have been removed from disk
path = os.path.join(app.config.get("UPLOAD_DIR", "."), id + ".xml")
assert not os.path.exists(path)
def test_04_doaj_url_upload_http_success(self):
# first try with a successful HEAD request
requests.head = ResponseMockFactory.head_success
requests.get = ResponseMockFactory.doaj_get_success
url = "http://success"
previous = []
id = ingestarticles.IngestArticlesBackgroundTask._url_upload("testuser", url, "doaj", previous)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.schema == "doaj"
assert fu.status == "exists"
assert len(previous) == 1
# try that again, but with an unsuccessful HEAD request
requests.head = ResponseMockFactory.head_fail
previous = []
id = ingestarticles.IngestArticlesBackgroundTask._url_upload("testuser", url, "doaj", previous)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.schema == "doaj"
assert fu.status == "exists"
assert len(previous) == 1
def test_05_doaj_url_upload_http_fail(self):
# try with failing http requests
requests.head = ResponseMockFactory.head_fail
requests.get = ResponseMockFactory.get_fail
url = "http://fail"
previous = []
with self.assertRaises(BackgroundException):
id = ingestarticles.IngestArticlesBackgroundTask._url_upload("testuser", url, "doaj", previous)
assert len(previous) == 1
id = previous[0].id
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "failed"
assert fu.error is not None and fu.error != ""
assert fu.error_details is None
assert list(fu.failure_reasons.keys()) == []
# now try again with an invalid url
requests.head = ResponseMockFactory.head_success
url = "other://url"
previous = []
with self.assertRaises(BackgroundException):
id = ingestarticles.IngestArticlesBackgroundTask._url_upload("testuser", url, "doaj", previous)
assert len(previous) == 1
id = previous[0].id
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "failed"
assert fu.error is not None and fu.error != ""
assert fu.error_details is None
assert list(fu.failure_reasons.keys()) == []
def test_06_doaj_url_upload_ftp_success(self):
ftplib.FTP = FTPMockFactory.create("doaj")
url = "ftp://success"
previous = []
id = ingestarticles.IngestArticlesBackgroundTask._url_upload("testuser", url, "doaj", previous)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.schema == "doaj"
assert fu.status == "exists"
assert len(previous) == 1
def test_07_url_upload_ftp_fail(self):
ftplib.FTP = FTPMockFactory.create("doaj")
url = "ftp://fail"
previous = []
with self.assertRaises(BackgroundException):
id = ingestarticles.IngestArticlesBackgroundTask._url_upload("testuser", url, "doaj", previous)
assert len(previous) == 1
id = previous[0].id
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "failed"
assert fu.error is not None and fu.error != ""
assert fu.error_details is None
assert list(fu.failure_reasons.keys()) == []
def test_08_doajxml_prepare_file_upload_success(self):
handle = DoajXmlArticleFixtureFactory.upload_1_issn_correct()
f = FileMockFactory(stream=handle)
previous = []
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testuser", upload_file=f, schema="doaj", previous=previous)
assert job is not None
assert "ingest_articles__file_upload_id" in job.params
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
assert len(previous) == 1
fu = models.FileUpload.pull(id)
assert fu is not None
def test_09_prepare_file_upload_fail(self):
article_doaj_xml.DOAJXWalk.validate = XwalkMockFactory.validate
handle = DoajXmlArticleFixtureFactory.upload_1_issn_correct()
f = FileMockFactory(stream=handle)
previous = []
with self.assertRaises(BackgroundException):
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testuser", upload_file=f, schema="doaj", previous=previous)
assert len(previous) == 1
id = previous[0].id
self.cleanup_ids.append(id)
fu = models.FileUpload.pull(id)
assert fu is not None
def test_10_prepare_url_upload_success(self):
requests.head = ResponseMockFactory.head_success
requests.get = ResponseMockFactory.doaj_get_success
url = "http://success"
previous = []
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testuser", url=url, schema="doaj", previous=previous)
assert job is not None
assert "ingest_articles__file_upload_id" in job.params
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
assert len(previous) == 1
fu = models.FileUpload.pull(id)
assert fu is not None
def test_11_prepare_url_upload_fail(self):
# try with failing http requests
requests.head = ResponseMockFactory.head_fail
requests.get = ResponseMockFactory.get_fail
url = "http://fail"
previous = []
with self.assertRaises(BackgroundException):
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testuser", url=url, schema="doaj", previous=previous)
assert len(previous) == 1
id = previous[0].id
self.cleanup_ids.append(id)
fu = models.FileUpload.pull(id)
assert fu is not None
def test_12_prepare_parameter_errors(self):
# no url or file upload
with self.assertRaises(BackgroundException):
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testuser", schema="doaj", previous=[])
# no schema
with self.assertRaises(BackgroundException):
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testuser", url="http://whatever", previous=[])
# upload dir not configured
del app.config["UPLOAD_DIR"]
with self.assertRaises(BackgroundException):
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testuser", url="http://whatever", schema="doaj", previous=[])
def test_13_ftp_upload_success(self):
ftplib.FTP = FTPMockFactory.create("doaj")
file_upload = models.FileUpload()
file_upload.set_id()
upload_dir = app.config.get("UPLOAD_DIR")
path = os.path.join(upload_dir, file_upload.local_filename)
self.cleanup_paths.append(path)
url= "ftp://upload"
parsed_url = urlparse(url)
job = models.BackgroundJob()
result = ingestarticles.ftp_upload(job, path, parsed_url, file_upload)
assert result is True
assert os.path.exists(path)
assert file_upload.status == "downloaded"
def test_14_ftp_upload_fail(self):
ftplib.FTP = FTPMockFactory.create("doaj")
file_upload = models.FileUpload()
file_upload.set_id()
upload_dir = app.config.get("UPLOAD_DIR")
path = os.path.join(upload_dir, file_upload.local_filename)
self.cleanup_paths.append(path)
url= "ftp://fail"
parsed_url = urlparse(url)
job = models.BackgroundJob()
result = ingestarticles.ftp_upload(job, path, parsed_url, file_upload)
assert result is False
assert file_upload.status == "failed"
assert file_upload.error is not None and file_upload.error != ""
assert file_upload.error_details is None
assert list(file_upload.failure_reasons.keys()) == []
def test_15_http_upload_success(self):
requests.head = ResponseMockFactory.head_fail
requests.get = ResponseMockFactory.doaj_get_success
url= "http://upload"
file_upload = models.FileUpload()
file_upload.set_id()
file_upload.upload("testuser", url, status="exists")
upload_dir = app.config.get("UPLOAD_DIR")
path = os.path.join(upload_dir, file_upload.local_filename)
self.cleanup_paths.append(path)
job = models.BackgroundJob()
result = ingestarticles.http_upload(job, path, file_upload)
assert result is True
assert os.path.exists(path)
assert file_upload.status == "downloaded"
def test_17_doaj_download_http_valid(self):
requests.head = ResponseMockFactory.head_fail
requests.get = ResponseMockFactory.doaj_get_success
job = models.BackgroundJob()
task = ingestarticles.IngestArticlesBackgroundTask(job)
url = "http://valid"
file_upload = models.FileUpload()
file_upload.set_id()
file_upload.upload("testuser", url, status="exists")
file_upload.set_schema("doaj")
upload_dir = app.config.get("UPLOAD_DIR")
path = os.path.join(upload_dir, file_upload.local_filename)
self.cleanup_paths.append(path)
print(file_upload)
result = task._download(file_upload)
assert result is True
assert file_upload.status == "validated"
def test_18_download_http_invalid(self):
requests.head = ResponseMockFactory.head_fail
requests.get = ResponseMockFactory.doaj_get_success
job = models.BackgroundJob()
url = "http://upload"
file_upload = models.FileUpload()
file_upload.set_id()
file_upload.upload("testuser", url, status="exists")
file_upload.set_schema("doaj")
upload_dir = app.config.get("UPLOAD_DIR")
path = os.path.join(upload_dir, file_upload.local_filename)
self.cleanup_paths.append(path)
self.cleanup_ids.append(file_upload.id)
task = ingestarticles.IngestArticlesBackgroundTask(job)
result = task._download(file_upload)
assert file_upload.status == "failed"
assert file_upload.error is not None and file_upload.error != ""
assert file_upload.error_details is not None and file_upload.error_details != ""
assert list(file_upload.failure_reasons.keys()) == []
def test_19_download_http_error(self):
requests.head = ResponseMockFactory.head_fail
requests.get = ResponseMockFactory.get_fail
job = models.BackgroundJob()
url = "http://fail"
file_upload = models.FileUpload()
file_upload.set_id()
file_upload.upload("testuser", url, status="exists")
file_upload.set_schema("doaj")
upload_dir = app.config.get("UPLOAD_DIR")
path = os.path.join(upload_dir, file_upload.local_filename)
self.cleanup_paths.append(path)
task = ingestarticles.IngestArticlesBackgroundTask(job)
result = task._download(file_upload)
assert result is False
assert file_upload.status == "failed"
assert file_upload.error is not None and file_upload.error != ""
assert file_upload.error_details is None
assert list(file_upload.failure_reasons.keys()) == []
def test_20_download_ftp_valid(self):
ftplib.FTP = FTPMockFactory.create("doaj")
job = models.BackgroundJob()
url = "ftp://valid"
file_upload = models.FileUpload()
file_upload.set_id()
file_upload.upload("testuser", url, status="exists")
file_upload.set_schema("doaj")
upload_dir = app.config.get("UPLOAD_DIR")
path = os.path.join(upload_dir, file_upload.local_filename)
self.cleanup_paths.append(path)
task = ingestarticles.IngestArticlesBackgroundTask(job)
result = task._download(file_upload)
assert result is True
assert file_upload.status == "validated"
def test_21_download_ftp_invalid(self):
ftplib.FTP = FTPMockFactory.create("doaj")
job = models.BackgroundJob()
url = "ftp://upload"
file_upload = models.FileUpload()
file_upload.set_id()
file_upload.upload("testuser", url, status="exists")
file_upload.set_schema("doaj")
upload_dir = app.config.get("UPLOAD_DIR")
path = os.path.join(upload_dir, file_upload.local_filename)
self.cleanup_paths.append(path)
self.cleanup_ids.append(file_upload.id)
task = ingestarticles.IngestArticlesBackgroundTask(job)
result = task._download(file_upload)
assert file_upload.status == "failed"
assert file_upload.error is not None and file_upload.error != ""
assert file_upload.error_details is not None and file_upload.error_details != ""
assert list(file_upload.failure_reasons.keys()) == []
def test_22_download_ftp_error(self):
ftplib.FTP = FTPMockFactory.create("doaj")
job = models.BackgroundJob()
url = "ftp://fail"
file_upload = models.FileUpload()
file_upload.set_id()
file_upload.upload("testuser", url, status="exists")
file_upload.set_schema("doaj")
upload_dir = app.config.get("UPLOAD_DIR")
path = os.path.join(upload_dir, file_upload.local_filename)
self.cleanup_paths.append(path)
task = ingestarticles.IngestArticlesBackgroundTask(job)
result = task._download(file_upload)
assert result is False
assert file_upload.status == "failed"
assert file_upload.error is not None and file_upload.error != ""
assert file_upload.error_details is None
assert list(file_upload.failure_reasons.keys()) == []
def test_23_doaj_process_success(self):
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
j.save(blocking=True)
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
job = models.BackgroundJob()
file_upload = models.FileUpload()
file_upload.set_id()
file_upload.set_schema("doaj")
file_upload.upload("testowner", "filename.xml")
upload_dir = app.config.get("UPLOAD_DIR")
path = os.path.join(upload_dir, file_upload.local_filename)
self.cleanup_paths.append(path)
stream = DoajXmlArticleFixtureFactory.upload_1_issn_correct()
with open(path, "wb") as f:
f.write(stream.read())
task = ingestarticles.IngestArticlesBackgroundTask(job)
task._process(file_upload)
assert not os.path.exists(path)
assert file_upload.status == "processed"
assert file_upload.imported == 1
assert file_upload.new == 1
def test_24_process_invalid_file(self):
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
j.save(blocking=True)
job = models.BackgroundJob()
file_upload = models.FileUpload()
file_upload.set_id()
file_upload.set_schema("doaj")
upload_dir = app.config.get("UPLOAD_DIR")
path = os.path.join(upload_dir, file_upload.local_filename)
self.cleanup_paths.append(path)
self.cleanup_ids.append(file_upload.id)
stream = DoajXmlArticleFixtureFactory.invalid_schema_xml()
with open(path, "w") as f:
f.write(stream.read())
task = ingestarticles.IngestArticlesBackgroundTask(job)
task._process(file_upload)
assert not os.path.exists(path)
assert file_upload.status == "failed"
assert file_upload.error is not None and file_upload.error != ""
assert file_upload.error_details is not None and file_upload.error_details != ""
assert list(file_upload.failure_reasons.keys()) == []
def test_25_process_filesystem_error(self):
articleSvc.ArticleService.batch_create_articles = BLLArticleMockFactory.batch_create
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
j.save(blocking=True)
job = models.BackgroundJob()
file_upload = models.FileUpload()
file_upload.set_id()
file_upload.set_schema("doaj")
upload_dir = app.config.get("UPLOAD_DIR")
path = os.path.join(upload_dir, file_upload.local_filename)
self.cleanup_paths.append(path)
self.cleanup_ids.append(file_upload.id)
stream = DoajXmlArticleFixtureFactory.upload_1_issn_correct()
with open(path, "wb") as f:
f.write(stream.read())
task = ingestarticles.IngestArticlesBackgroundTask(job)
task._process(file_upload)
assert not os.path.exists(path)
assert file_upload.status == "failed"
assert file_upload.error is not None and file_upload.error != ""
assert file_upload.error_details is None
assert list(file_upload.failure_reasons.keys()) == []
def test_26_run_validated(self):
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
j.save(blocking=True)
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_1_issn_correct()
f = FileMockFactory(stream=handle)
previous = []
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", upload_file=f, schema="doaj", previous=previous)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "processed"
def test_27_run_exists(self):
requests.head = ResponseMockFactory.head_fail
requests.get = ResponseMockFactory.doaj_get_success
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
j.save(blocking=True)
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
url = "http://valid"
previous = []
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", url=url, schema="doaj", previous=previous)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "processed"
def test_28_run_errors(self):
job = models.BackgroundJob()
task = ingestarticles.IngestArticlesBackgroundTask(job)
with self.assertRaises(BackgroundException):
task.run()
job.params = {}
with self.assertRaises(BackgroundException):
task.run()
job.params = {"ingest_articles__file_upload_id" : "whatever"}
with self.assertRaises(BackgroundException):
task.run()
def test_29_submit_success(self):
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
j.save(blocking=True)
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_1_issn_correct()
f = FileMockFactory(stream=handle)
previous = []
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", upload_file=f, schema="doaj", previous=previous)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
# this assumes that huey is in always eager mode, and thus this immediately calls the async task,
# which in turn calls execute, which ultimately calls run
ingestarticles.IngestArticlesBackgroundTask.submit(job)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "processed"
def test_31_doaj_run_fail_unmatched_issn(self):
# Create a journal with 2 issns, one of which is the same as an issn on the
# article, but the article also contains an issn which doesn't match the journal
# We expect a failed ingest
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
bj.add_identifier(bj.E_ISSN, "9876-5432")
j.save(blocking=True)
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_2_issns_ambiguous()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "failed", "received status: {}".format(fu.status)
assert fu.error is not None and fu.error != ""
assert fu.error_details is None
fr = fu.failure_reasons
assert "unmatched" in fr
assert fr["unmatched"] == ["2345-6789"]
def test_32_run_doaj_fail_shared_issn(self):
# Create 2 journals with the same issns but different owners, which match the issns on the article
# We expect an ingest failure
j1 = models.Journal()
j1.set_owner("testowner1")
bj1 = j1.bibjson()
bj1.add_identifier(bj1.P_ISSN, "1234-5678")
bj1.add_identifier(bj1.E_ISSN, "9876-5432")
j1.save()
j2 = models.Journal()
j2.set_owner("testowner2")
j2.set_in_doaj(False)
bj2 = j2.bibjson()
bj2.add_identifier(bj2.P_ISSN, "1234-5678")
bj2.add_identifier(bj2.E_ISSN, "9876-5432")
j2.save(blocking=True)
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner1")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_2_issns_correct()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner1", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "failed"
assert fu.error is not None and fu.error != ""
assert fu.error_details is None
fr = fu.failure_reasons
assert "shared" in fr
assert "1234-5678" in fr["shared"]
assert "9876-5432" in fr["shared"]
def test_33_run_fail_unowned_issn(self):
# Create 2 journals with different owners and one different issn each. The two issns in the
# article match each of the journals respectively
# We expect an ingest failure
j1 = models.Journal()
j1.set_owner("testowner1")
bj1 = j1.bibjson()
bj1.add_identifier(bj1.P_ISSN, "1234-5678")
j1.save()
j2 = models.Journal()
j2.set_owner("testowner2")
j2.set_in_doaj(False)
bj2 = j2.bibjson()
bj2.add_identifier(bj2.E_ISSN, "9876-5432")
j2.save(blocking=True)
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_2_issns_correct()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "failed"
assert fu.error is not None and fu.error != ""
assert fu.error_details is None
fr = fu.failure_reasons
assert "unowned" in fr
assert "9876-5432" in fr["unowned"]
def test_34_doaj_journal_2_article_2_success(self):
# Create a journal with two issns both of which match the 2 issns in the article
# we expect a successful article ingest
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
bj.add_identifier(bj.E_ISSN, "9876-5432")
j.save(blocking=True)
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_2_issns_correct()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "processed"
assert fu.imported == 1
assert fu.updates == 0
assert fu.new == 1
fr = fu.failure_reasons
assert len(fr.get("shared", [])) == 0
assert len(fr.get("unowned", [])) == 0
assert len(fr.get("unmatched", [])) == 0
found = [a for a in models.Article.find_by_issns(["1234-5678", "9876-5432"])]
assert len(found) == 1
def test_35_doaj_journal_2_article_1_success(self):
# Create a journal with 2 issns, one of which is present in the article as the
# only issn
# We expect a successful article ingest
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
bj.add_identifier(bj.E_ISSN, "9876-5432")
j.save()
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_1_issn_correct()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "processed"
assert fu.imported == 1
assert fu.updates == 0
assert fu.new == 1
fr = fu.failure_reasons
assert len(fr.get("shared", [])) == 0
assert len(fr.get("unowned", [])) == 0
assert len(fr.get("unmatched", [])) == 0
found = [a for a in models.Article.find_by_issns(["1234-5678"])]
assert len(found) == 1
def test_37_doaj_journal_1_article_1_success(self):
# Create a journal with 1 issn, which is the same 1 issn on the article
# we expect a successful article ingest
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
j.save()
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_1_issn_correct()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "processed"
assert fu.imported == 1
assert fu.updates == 0
assert fu.new == 1
fr = fu.failure_reasons
assert len(fr.get("shared", [])) == 0
assert len(fr.get("unowned", [])) == 0
assert len(fr.get("unmatched", [])) == 0
found = [a for a in models.Article.find_by_issns(["1234-5678"])]
assert len(found) == 1
def test_38_doaj_journal_2_article_2_1_different_success(self):
# Create a journal with 2 issns, one of which is the same as an issn on the
# article, but the article also contains an issn which doesn't match the journal
# We expect a failed ingest
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
bj.add_identifier(bj.E_ISSN, "9876-5432")
j.save()
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_2_issns_ambiguous()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "failed"
assert fu.imported == 0
assert fu.updates == 0
assert fu.new == 0
fr = fu.failure_reasons
assert len(fr.get("shared", [])) == 0
assert len(fr.get("unowned", [])) == 0
assert len(fr.get("unmatched", [])) == 1
found = [a for a in models.Article.find_by_issns(["1234-5678", "2345-6789"])]
assert len(found) == 0
def test_39_doaj_2_journals_different_owners_both_issns_fail(self):
# Create 2 journals with the same issns but different owners, which match the issns on the article
# We expect an ingest failure
j1 = models.Journal()
j1.set_owner("testowner1")
bj1 = j1.bibjson()
bj1.add_identifier(bj1.P_ISSN, "1234-5678")
bj1.add_identifier(bj1.E_ISSN, "9876-5432")
j1.save()
j2 = models.Journal()
j2.set_owner("testowner2")
j2.set_in_doaj(False)
bj2 = j2.bibjson()
bj2.add_identifier(bj2.P_ISSN, "1234-5678")
bj2.add_identifier(bj2.E_ISSN, "9876-5432")
j2.save()
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner1")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_2_issns_correct()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner1", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "failed"
assert fu.imported == 0
assert fu.updates == 0
assert fu.new == 0
fr = fu.failure_reasons
assert len(fr.get("shared", [])) == 2
assert "1234-5678" in fr["shared"]
assert "9876-5432" in fr["shared"]
assert len(fr.get("unowned", [])) == 0
assert len(fr.get("unmatched", [])) == 0
found = [a for a in models.Article.find_by_issns(["1234-5678", "9876-5432"])]
assert len(found) == 0
def test_40_doaj_2_journals_different_owners_issn_each_fail(self):
# Create 2 journals with different owners and one different issn each. The two issns in the
# article match each of the journals respectively
# We expect an ingest failure
j1 = models.Journal()
j1.set_owner("testowner1")
bj1 = j1.bibjson()
bj1.add_identifier(bj1.P_ISSN, "1234-5678")
j1.save()
j2 = models.Journal()
j2.set_owner("testowner2")
j2.set_in_doaj(False)
bj2 = j2.bibjson()
bj2.add_identifier(bj2.E_ISSN, "9876-5432")
j2.save()
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner1")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_2_issns_correct()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner1", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "failed"
assert fu.imported == 0
assert fu.updates == 0
assert fu.new == 0
fr = fu.failure_reasons
assert len(fr.get("shared", [])) == 0
assert len(fr.get("unowned", [])) == 1
assert "9876-5432" in fr["unowned"]
assert len(fr.get("unmatched", [])) == 0
found = [a for a in models.Article.find_by_issns(["1234-5678", "9876-5432"])]
assert len(found) == 0
def test_41_doaj_2_journals_same_owner_issn_each_success(self):
# Create 2 journals with the same owner, each with one different issn. The article's 2 issns
# match each of these issns
# We expect a successful article ingest
j1 = models.Journal()
j1.set_owner("testowner")
bj1 = j1.bibjson()
bj1.add_identifier(bj1.P_ISSN, "1234-5678")
j1.save()
j2 = models.Journal()
j2.set_owner("testowner")
j2.set_in_doaj(False)
bj2 = j2.bibjson()
bj2.add_identifier(bj2.E_ISSN, "9876-5432")
j2.save()
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_2_issns_correct()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "processed"
assert fu.imported == 1
assert fu.updates == 0
assert fu.new == 1
fr = fu.failure_reasons
assert len(fr.get("shared", [])) == 0
assert len(fr.get("unowned", [])) == 0
assert len(fr.get("unmatched", [])) == 0
found = [a for a in models.Article.find_by_issns(["1234-5678", "9876-5432"])]
assert len(found) == 1
def test_42_doaj_2_journals_different_owners_different_issns_mixed_article_fail(self):
# Create 2 different journals with different owners and different issns (2 each).
# The article's issns match one issn in each journal
# We expect an ingest failure
j1 = models.Journal()
j1.set_owner("testowner1")
bj1 = j1.bibjson()
bj1.add_identifier(bj1.P_ISSN, "1234-5678")
bj1.add_identifier(bj1.E_ISSN, "2345-6789")
j1.save()
j2 = models.Journal()
j2.set_owner("testowner2")
j2.set_in_doaj(False)
bj2 = j2.bibjson()
bj2.add_identifier(bj2.P_ISSN, "8765-4321")
bj2.add_identifier(bj2.E_ISSN, "9876-5432")
j2.save()
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner1")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_2_issns_correct()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner1", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "failed"
assert fu.imported == 0
assert fu.updates == 0
assert fu.new == 0
fr = fu.failure_reasons
assert len(fr.get("shared", [])) == 0
assert len(fr.get("unowned", [])) == 1
assert "9876-5432" in fr["unowned"]
assert len(fr.get("unmatched", [])) == 0
found = [a for a in models.Article.find_by_issns(["1234-5678", "9876-5432"])]
assert len(found) == 0
def test_43_doaj_duplication(self):
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
bj.add_identifier(bj.E_ISSN, "9876-5432")
j.save()
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
# make both handles, as we want as little gap as possible between requests in a moment
handle1 = DoajXmlArticleFixtureFactory.upload_2_issns_correct()
handle2 = DoajXmlArticleFixtureFactory.upload_2_issns_correct()
f1 = FileMockFactory(stream=handle1)
f2 = FileMockFactory(stream=handle2)
job1 = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", schema="doaj", upload_file=f1)
id1 = job1.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id1)
job2 = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", schema="doaj", upload_file=f2)
id2 = job2.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id2)
# because file upload gets created and saved by prepare
time.sleep(2)
task1 = ingestarticles.IngestArticlesBackgroundTask(job1)
task2 = ingestarticles.IngestArticlesBackgroundTask(job2)
task1.run()
task2.run()
# because file upload needs to be re-saved
time.sleep(2)
fu1 = models.FileUpload.pull(id1)
fu2 = models.FileUpload.pull(id2)
assert fu1.status == "processed", "received status: {}".format(fu1.status)
assert fu2.status == "processed", "received status: {}".format(fu2.status)
# now let's check that only one article got created
found = [a for a in models.Article.find_by_issns(["1234-5678", "9876-5432"])]
assert len(found) == 1, "found: {}".format(len(found))
def test_44_doaj_journal_1_article_1_superlong_noclip(self):
# Create a journal with 1 issn, which is the same 1 issn on the article
# we expect a successful article ingest
# But it's just shy of 30000 unicode characters long!
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
j.save()
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_1_issn_superlong_should_not_clip()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "processed"
assert fu.imported == 1
assert fu.updates == 0
assert fu.new == 1
fr = fu.failure_reasons
assert len(fr.get("shared", [])) == 0
assert len(fr.get("unowned", [])) == 0
assert len(fr.get("unmatched", [])) == 0
found = [a for a in models.Article.find_by_issns(["1234-5678"])]
assert len(found) == 1
assert len(found[0].bibjson().abstract) == 26264
def test_doaj_45_journal_1_article_1_superlong_clip(self):
# Create a journal with 1 issn, which is the same 1 issn on the article
# we expect a successful article ingest
# But it's over 40k unicode characters long!
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
j.save()
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_1_issn_superlong_should_clip()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "processed"
assert fu.imported == 1
assert fu.updates == 0
assert fu.new == 1
fr = fu.failure_reasons
assert len(fr.get("shared", [])) == 0
assert len(fr.get("unowned", [])) == 0
assert len(fr.get("unmatched", [])) == 0
found = [a for a in models.Article.find_by_issns(["1234-5678"])]
assert len(found) == 1
assert len(found[0].bibjson().abstract) == 30000
def test_46_doaj_one_journal_one_article_2_issns_one_unknown(self):
# Create one journal and ingest one article. The Journal has two issns, and the article
# has two issns, but one of the journal's issns is unknown
# We expect an ingest failure
j1 = models.Journal()
j1.set_owner("testowner1")
bj1 = j1.bibjson()
bj1.add_identifier(bj1.P_ISSN, "1234-5678")
bj1.add_identifier(bj1.E_ISSN, "2222-2222")
j1.save()
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner1")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_2_issns_correct()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner1", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "failed"
assert fu.imported == 0
assert fu.updates == 0
assert fu.new == 0
fr = fu.failure_reasons
assert len(fr.get("shared", [])) == 0
assert len(fr.get("unowned", [])) == 0
assert len(fr.get("unmatched", [])) == 1
assert "9876-5432" in fr["unmatched"]
found = [a for a in models.Article.find_by_issns(["1234-5678", "9876-5432"])]
assert len(found) == 0
def test_47_doaj_lcc_spelling_error(self):
# create a journal with a broken subject classification
j1 = models.Journal()
j1.set_owner("testowner1")
bj1 = j1.bibjson()
bj1.add_identifier(bj1.P_ISSN, "1234-5678")
bj1.add_identifier(bj1.E_ISSN, "9876-5432")
bj1.add_subject("LCC", "Whatever", "WHATEVA")
bj1.add_subject("LCC", "Aquaculture. Fisheries. Angling", "SH1-691")
j1.save()
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner1")
account.save(blocking=True)
handle = DoajXmlArticleFixtureFactory.upload_2_issns_correct()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner1", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None, 'expected FileUpload is not None, received: {}'.format(fu)
assert fu.status == "processed", 'expected status processed, received: {}'.format(fu.status)
assert fu.imported == 1, 'expected 1 imported, received: {}'.format(fu.imported)
assert fu.updates == 0, 'expected 0 updates, received: {}'.format(fu.updates)
assert fu.new == 1, 'expected 1 new, received: {}'.format(fu.new)
fr = fu.failure_reasons
assert len(fr.get("shared", [])) == 0
assert len(fr.get("unowned", [])) == 0
assert len(fr.get("unmatched", [])) == 0
found = [a for a in models.Article.find_by_issns(["1234-5678", "9876-5432"])]
assert len(found) == 1
cpaths = found[0].data["index"]["classification_paths"]
assert len(cpaths) == 1
assert cpaths[0] == "Agriculture: Aquaculture. Fisheries. Angling"
def test_48_doaj_unknown_journal_issn(self):
# create a journal with one of the ISSNs specified
j1 = models.Journal()
j1.set_owner("testowner1")
bj1 = j1.bibjson()
bj1.add_identifier(bj1.P_ISSN, "1234-5678")
j1.save(blocking=True)
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner1")
account.save(blocking=True)
# take an article with 2 issns, but one of which is not in the index
handle = DoajXmlArticleFixtureFactory.upload_2_issns_correct()
f = FileMockFactory(stream=handle)
job = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner1", schema="doaj", upload_file=f)
id = job.params.get("ingest_articles__file_upload_id")
self.cleanup_ids.append(id)
# because file upload gets created and saved by prepare
time.sleep(2)
task = ingestarticles.IngestArticlesBackgroundTask(job)
task.run()
# because file upload needs to be re-saved
time.sleep(2)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "failed"
assert fu.imported == 0
assert fu.updates == 0
assert fu.new == 0
fr = fu.failure_reasons
assert len(fr.get("shared", [])) == 0
assert len(fr.get("unowned", [])) == 0
assert len(fr.get("unmatched", [])) == 1
def test_49_doaj_noids(self):
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
j.save(blocking=True)
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
job = models.BackgroundJob()
file_upload = models.FileUpload()
file_upload.set_id()
file_upload.set_schema("doaj")
file_upload.upload("testowner", "filename.xml")
upload_dir = app.config.get("UPLOAD_DIR")
path = os.path.join(upload_dir, file_upload.local_filename)
self.cleanup_paths.append(path)
stream = DoajXmlArticleFixtureFactory.noids()
with open(path, "wb") as f:
f.write(stream.read())
task = ingestarticles.IngestArticlesBackgroundTask(job)
task._process(file_upload)
assert not os.path.exists(path)
assert file_upload.status == "failed"
def test_50_valid_url_starting_with_http(self):
handle = DoajXmlArticleFixtureFactory.valid_url_http()
f = FileMockFactory(stream=handle)
previous = []
id = ingestarticles.IngestArticlesBackgroundTask._file_upload("testuser", f, "doaj", previous)
self.cleanup_ids.append(id)
fu = models.FileUpload.pull(id)
assert fu.status == "validated"
def test_51_valid_url_starting_with_https(self):
handle = DoajXmlArticleFixtureFactory.valid_url_https()
f = FileMockFactory(stream=handle)
previous = []
id = ingestarticles.IngestArticlesBackgroundTask._file_upload("testuser", f, "doaj", previous)
self.cleanup_ids.append(id)
fu = models.FileUpload.pull(id)
assert fu.status == "validated"
def test_52_valid_url_with_non_ascii_chars(self):
handle = DoajXmlArticleFixtureFactory.valid_url_non_ascii_chars()
f = FileMockFactory(stream=handle)
previous = []
id = ingestarticles.IngestArticlesBackgroundTask._file_upload("testuser", f, "doaj", previous)
self.cleanup_ids.append(id)
fu = models.FileUpload.pull(id)
assert fu.status == "validated"
def test_53_invalid_url(self):
handle = DoajXmlArticleFixtureFactory.invalid_url()
f = FileMockFactory(stream=handle)
previous = []
with self.assertRaises(BackgroundException):
id = ingestarticles.IngestArticlesBackgroundTask._file_upload("testuser", f, "doaj", previous)
assert len(previous) == 1
id = previous[0].id
self.cleanup_ids.append(id)
fu = models.FileUpload.pull(id)
assert fu.status == "failed"
assert fu.error == 'Unable to validate document with identified schema'
def test_54_invalid_url_http_missing(self):
handle = DoajXmlArticleFixtureFactory.invalid_url_http_missing()
f = FileMockFactory(stream=handle)
previous = []
with self.assertRaises(BackgroundException):
id = ingestarticles.IngestArticlesBackgroundTask._file_upload("testuser", f, "doaj", previous)
assert len(previous) == 1
id = previous[0].id
self.cleanup_ids.append(id)
fu = models.FileUpload.pull(id)
assert fu.status == "failed"
assert fu.error == 'Unable to validate document with identified schema'
def test_55_valid_url_with_http_anchor(self):
handle = DoajXmlArticleFixtureFactory.valid_url_http_anchor()
f = FileMockFactory(stream=handle)
previous = []
id = ingestarticles.IngestArticlesBackgroundTask._file_upload("testuser", f, "doaj", previous)
self.cleanup_ids.append(id)
fu = models.FileUpload.pull(id)
assert fu.status == "validated"
def test_56_valid_url_with_parameters(self):
handle = DoajXmlArticleFixtureFactory.valid_url_parameters()
f = FileMockFactory(stream=handle)
previous = []
id = ingestarticles.IngestArticlesBackgroundTask._file_upload("testuser", f, "doaj", previous)
self.cleanup_ids.append(id)
fu = models.FileUpload.pull(id)
assert fu.status == "validated"
def test_57_file_with_valid_orcid_id(self):
handle = DoajXmlArticleFixtureFactory.valid_orcid_id()
f = FileMockFactory(stream=handle)
previous = []
id = ingestarticles.IngestArticlesBackgroundTask._file_upload("testuser", f, "doaj", previous)
self.cleanup_ids.append(id)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.schema == "doaj"
assert fu.status == "validated"
path = os.path.join(app.config.get("UPLOAD_DIR", "."), id + ".xml")
assert os.path.exists(path)
assert len(previous) == 1
def test_58_file_with_invalid_orcid_id(self):
handle = DoajXmlArticleFixtureFactory.invalid_orcid_id()
f = FileMockFactory(stream=handle)
previous = []
with self.assertRaises(BackgroundException):
id = ingestarticles.IngestArticlesBackgroundTask._file_upload("testuser", f, "doaj", previous)
assert len(previous) == 1
id = previous[0].id
self.cleanup_ids.append(id)
fu = models.FileUpload.pull(id)
assert fu is not None
assert fu.status == "failed"
assert fu.error is not None and fu.error != ""
assert fu.error_details is not None and fu.error != ""
assert list(fu.failure_reasons.keys()) == []
# file should have been removed from upload dir
path = os.path.join(app.config.get("UPLOAD_DIR", "."), id + ".xml")
assert not os.path.exists(path)
# and placed into the failed dir
fad = os.path.join(app.config.get("FAILED_ARTICLE_DIR", "."), id + ".xml")
assert os.path.exists(fad)
def test_59_same_issns(self):
j = models.Journal()
j.set_owner("testowner")
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "1234-5678")
j.save(blocking=True)
asource = AccountFixtureFactory.make_publisher_source()
account = models.Account(**asource)
account.set_id("testowner")
account.save(blocking=True)
job = models.BackgroundJob()
file_upload = models.FileUpload()
file_upload.set_id()
file_upload.set_schema("doaj")
file_upload.upload("testowner", "filename.xml")
upload_dir = app.config.get("UPLOAD_DIR")
path = os.path.join(upload_dir, file_upload.local_filename)
self.cleanup_paths.append(path)
stream = DoajXmlArticleFixtureFactory.upload_the_same_issns()
with open(path, "wb") as f:
f.write(stream.read())
task = ingestarticles.IngestArticlesBackgroundTask(job)
task._process(file_upload)
assert not os.path.exists(path)
assert file_upload.status == "failed", "expected: failed, received: {}".format(file_upload.status)
assert file_upload.error == Messages.EXCEPTION_IDENTICAL_PISSN_AND_EISSN, "Expected: '{}', received: {}".format(Messages.EXCEPTION_IDENTICAL_PISSN_AND_EISSN, file_upload.error)
| 35.023939
| 184
| 0.646068
| 7,686
| 64,374
| 5.246032
| 0.052563
| 0.053322
| 0.012946
| 0.020337
| 0.879591
| 0.854344
| 0.834974
| 0.827683
| 0.817638
| 0.815084
| 0
| 0.023768
| 0.249045
| 64,374
| 1,838
| 184
| 35.023939
| 0.81031
| 0.07531
| 0
| 0.850863
| 0
| 0
| 0.080079
| 0.013562
| 0
| 0
| 0
| 0
| 0.240973
| 1
| 0.047881
| false
| 0
| 0.025903
| 0.000785
| 0.075353
| 0.000785
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8c2c1ec4b686fc5b8d49ee878e10b181b38634a5
| 2,460
|
py
|
Python
|
pcdet/models/backbones_3d/vfe/fusion_vfe.py
|
HenryLittle/OpenPCDet-HL
|
7dba01750e10d170849314723ec0665782236a70
|
[
"Apache-2.0"
] | null | null | null |
pcdet/models/backbones_3d/vfe/fusion_vfe.py
|
HenryLittle/OpenPCDet-HL
|
7dba01750e10d170849314723ec0665782236a70
|
[
"Apache-2.0"
] | null | null | null |
pcdet/models/backbones_3d/vfe/fusion_vfe.py
|
HenryLittle/OpenPCDet-HL
|
7dba01750e10d170849314723ec0665782236a70
|
[
"Apache-2.0"
] | null | null | null |
import torch
from .vfe_template import VFETemplate
from .image_vfe import ImageVFE
from .mean_vfe import MeanVFE
from .image_vfe_modules.resnet import HookedResNet
from .image_vfe_modules.maskrcnn import HookedMaskRCNN
class ImageMaskRCNNVFE(VFETemplate):
def __init__(self, model_cfg, point_cloud_range, num_point_features, **kwargs):
super().__init__(model_cfg, **kwargs)
self.num_point_features = num_point_features
# [x_min, y_min, z_min, x_max, y_max, z_max]
self.pc_range = point_cloud_range
self.freeze = model_cfg.FREEZE_BACKBONE
self.resnet = HookedMaskRCNN(network=model_cfg.BACKBONE,
output_layers=model_cfg.OUTPUT_LAYERS)
self.mean_vfe = MeanVFE(model_cfg, num_point_features)
def get_output_feature_dim(self):
return self.num_point_features
def forward(self, batch_dict, **kwargs):
# just get feature pyramid shape {key:[B, C, H, W]}
batch_dict['images'] = torch.nan_to_num(batch_dict['images'])
if self.freeze:
with torch.no_grad():
_, image_fpn = self.resnet(batch_dict['images'])
else:
_, image_fpn = self.resnet(batch_dict['images'])
batch_dict = self.mean_vfe(batch_dict)
batch_dict['image_fpn'] = image_fpn
return batch_dict
class ImageResNetVFE(VFETemplate):
def __init__(self, model_cfg, point_cloud_range, num_point_features, **kwargs):
super().__init__(model_cfg, **kwargs)
self.num_point_features = num_point_features
# [x_min, y_min, z_min, x_max, y_max, z_max]
self.pc_range = point_cloud_range
self.freeze = model_cfg.FREEZE_BACKBONE
self.resnet = HookedResNet(resnet=model_cfg.BACKBONE,
output_layers=model_cfg.OUTPUT_LAYERS)
self.mean_vfe = MeanVFE(model_cfg, num_point_features)
def get_output_feature_dim(self):
return self.num_point_features
def forward(self, batch_dict, **kwargs):
# just get feature pyramid shape {key:[B, C, H, W]}
batch_dict['images'] = torch.nan_to_num(batch_dict['images'])
if self.freeze:
with torch.no_grad():
_, image_fpn = self.resnet(batch_dict['images'])
else:
_, image_fpn = self.resnet(batch_dict['images'])
batch_dict = self.mean_vfe(batch_dict)
batch_dict['image_fpn'] = image_fpn
return batch_dict
| 34.647887
| 83
| 0.671951
| 327
| 2,460
| 4.66055
| 0.198777
| 0.106299
| 0.104987
| 0.052493
| 0.828084
| 0.828084
| 0.828084
| 0.828084
| 0.828084
| 0.828084
| 0
| 0
| 0.231707
| 2,460
| 71
| 84
| 34.647887
| 0.806349
| 0.075203
| 0
| 0.791667
| 0
| 0
| 0.029062
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0.041667
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8c374c289c498d77021f4ba74421169e4595ab8b
| 201
|
py
|
Python
|
abmarl/pols/__init__.py
|
Leonardo767/Abmarl
|
9fada5447b09174c6a70b6032b4a8d08b66c4589
|
[
"Apache-2.0"
] | 7
|
2020-11-13T01:33:44.000Z
|
2021-03-05T14:30:34.000Z
|
abmarl/pols/__init__.py
|
Leonardo767/Abmarl
|
9fada5447b09174c6a70b6032b4a8d08b66c4589
|
[
"Apache-2.0"
] | 91
|
2020-11-04T23:34:30.000Z
|
2021-06-08T17:18:00.000Z
|
abmarl/pols/__init__.py
|
Leonardo767/Abmarl
|
9fada5447b09174c6a70b6032b4a8d08b66c4589
|
[
"Apache-2.0"
] | 6
|
2021-07-12T19:28:51.000Z
|
2022-03-01T00:50:02.000Z
|
from .abstract_policy import HeuristicPolicy
from .random_policy import RandomAction
from .policy import GreedyPolicy
from .policy import EpsilonSoftPolicy
from .policy import RandomFirstActionPolicy
| 28.714286
| 44
| 0.870647
| 22
| 201
| 7.863636
| 0.454545
| 0.346821
| 0.277457
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104478
| 201
| 6
| 45
| 33.5
| 0.961111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4feebec3c8e434a205b70e180501d0deac39326b
| 9,659
|
py
|
Python
|
tests/integration/test_kpi_smart_gap_mode.py
|
JLSteenwyk/ClipKIT
|
b2d6033e638a78acc36942f9f420d5d3bc0e09ad
|
[
"MIT"
] | 28
|
2020-06-11T14:06:15.000Z
|
2022-03-14T04:32:12.000Z
|
tests/integration/test_kpi_smart_gap_mode.py
|
JLSteenwyk/ClipKIT
|
b2d6033e638a78acc36942f9f420d5d3bc0e09ad
|
[
"MIT"
] | 10
|
2020-09-14T13:59:13.000Z
|
2022-02-25T17:17:01.000Z
|
tests/integration/test_kpi_smart_gap_mode.py
|
JLSteenwyk/ClipKIT
|
b2d6033e638a78acc36942f9f420d5d3bc0e09ad
|
[
"MIT"
] | 1
|
2020-12-15T07:25:09.000Z
|
2020-12-15T07:25:09.000Z
|
import pytest
from pathlib import Path
from clipkit.clipkit import execute
from clipkit.files import FileFormat
from clipkit.modes import TrimmingMode
here = Path(__file__)
@pytest.mark.integration
class TestKPISmartGapsMode(object):
def test_simple_no_change(self):
"""
usage: clipkit simple.fa -m kpi-smart-gap
"""
input_file = f"{here.parent}/samples/simple.fa"
output_file = "output/simple.fa_smart_gaps"
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.8,
mode=TrimmingMode.kpi_smart_gap,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/simple.fa_kpi_smart_gaps", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_12_YIL115C_Anc_2_253_codon_aln(self):
"""
test gappy with codon alignment of yeast sequences
usage: clipkit 12_YIL115C_Anc_2.253_codon_aln.fasta -m kpi-smart-gap
"""
input_file = f"{here.parent}/samples/12_YIL115C_Anc_2.253_codon_aln.fasta"
output_file = "output/12_YIL115C_Anc_2.253_codon_aln.fasta.clipkit_kpi_smart_gaps"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.9167,
mode=TrimmingMode.kpi_smart_gap,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/12_YIL115C_Anc_2.253_codon_aln.clipkit_kpi_smart_gaps", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_12_YIL115C_Anc_2_253_aa_aln(self):
"""
test gappy with amino acid alignment of yeast sequences
usage: clipkit 12_YIL115C_Anc_2.253_aa_aln.fasta -m kpi-smart-gap
"""
input_file = f"{here.parent}/samples/12_YIL115C_Anc_2.253_aa_aln.fasta"
output_file = "output/12_YIL115C_Anc_2.253_aa_aln.fasta.clipkit_smart_gaps"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.9167,
mode=TrimmingMode.kpi_smart_gap,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/12_YIL115C_Anc_2.253_aa_aln.clipkit_kpi_smart_gaps", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_24_ENSG00000163519_aa_aln(self):
"""
test gappy with amino acid alignment of mammalian sequences
usage: clipkit 24_ENSG00000163519_aa_aln.fasta -m kpi-smart-gap
"""
input_file = f"{here.parent}/samples/24_ENSG00000163519_aa_aln.fasta"
output_file = "output/24_ENSG00000163519_aa_aln.fasta.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.9583,
mode=TrimmingMode.kpi_smart_gap,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/24_ENSG00000163519_aa_aln.clipkit_kpi_smart_gaps", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_24_ENSG00000163519_codon_aln(self):
"""
test gappy with codon alignment of mammalian sequences
usage: clipkit 24_ENSG00000163519_codon_aln.fasta -m kpi-smart-gap
"""
input_file = f"{here.parent}/samples/24_ENSG00000163519_codon_aln.fasta"
output_file = "output/24_ENSG00000163519_codon_aln.fasta.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.9583,
mode=TrimmingMode.kpi_smart_gap,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/24_ENSG00000163519_codon_aln.clipkit_kpi_smart_gaps", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_EOG091N44M8_aa(self):
"""
test gappy with amino acid alignment of Penicillium sequences
usage: clipkit EOG091N44M8_aa.fa -m kpi-smart-gap
"""
input_file = f"{here.parent}/samples/EOG091N44M8_aa.fa"
output_file = "output/EOG091N44M8_aa.fa.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.8803,
mode=TrimmingMode.kpi_smart_gap,
use_log=False,
)
execute(**kwargs)
with open(f"{here.parent}/expected/EOG091N44M8_aa.clipkit_kpi_smart_gaps", "r") as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_EOG091N44M8_nt(self):
"""
test gappy with nucleotide alignment of Penicillium sequences
usage: clipkit EOG091N44M8_nt.fa -m kpi-smart-gap
"""
input_file = f"{here.parent}/samples/EOG091N44M8_nt.fa"
output_file = "output/EOG091N44M8_nt.fa.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.8803,
mode=TrimmingMode.kpi_smart_gap,
use_log=False,
)
execute(**kwargs)
with open(f"{here.parent}/expected/EOG091N44M8_nt.clipkit_kpi_smart_gaps", "r") as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
@pytest.mark.slow
def test_EOG092C0CZK_aa(self):
"""
test gappy with amino alignment of fungal sequences
usage: clipkit EOG092C0CZK_aa_aln.fasta -m kpi-smart-gap
"""
input_file = f"{here.parent}/samples/EOG092C0CZK_aa_aln.fasta"
output_file = "output/EOG092C0CZK_aa_aln.fasta.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.9986,
mode=TrimmingMode.kpi_smart_gap,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/EOG092C0CZK_aa_aln.clipkit_kpi_smart_gaps", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
def test_EOG092C4VOX_aa(self):
"""
test gappy with amino alignment of fungal sequences
usage: clipkit EOG092C4VOX_aa_aln.fasta -m kpi-smart-gap
"""
input_file = f"{here.parent}/samples/EOG092C4VOX_aa_aln.fasta"
output_file = "output/EOG092C4VOX_aa_aln.fasta.clipkit"
in_file_format = 'fasta'
out_file_format = 'fasta'
kwargs = dict(
input_file=input_file,
output_file=output_file,
input_file_format='fasta',
output_file_format='fasta',
complement=False,
gaps=0.9993,
mode=TrimmingMode.kpi_smart_gap,
use_log=False,
)
execute(**kwargs)
with open(
f"{here.parent}/expected/EOG092C4VOX_aa_aln.clipkit_kpi_smart_gaps", "r"
) as expected:
expected_content = expected.read()
with open(output_file, "r") as out_file:
output_content = out_file.read()
assert expected_content == output_content
| 32.96587
| 100
| 0.612693
| 1,145
| 9,659
| 4.844541
| 0.076856
| 0.081125
| 0.091942
| 0.023436
| 0.912025
| 0.900667
| 0.88967
| 0.8574
| 0.828376
| 0.815576
| 0
| 0.053392
| 0.298064
| 9,659
| 292
| 101
| 33.078767
| 0.764749
| 0.100735
| 0
| 0.728571
| 0
| 0
| 0.188852
| 0.166508
| 0
| 0
| 0
| 0
| 0.042857
| 1
| 0.042857
| false
| 0
| 0.02381
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8b417e91638786abf14d781d658ff977d1a64c6c
| 170
|
py
|
Python
|
culturemesh/client/__init__.py
|
raydleemsc/flask_tests_workshop
|
4a90a4ac8a186874e63ae0dd531a331d2b9e4385
|
[
"CC-BY-4.0"
] | null | null | null |
culturemesh/client/__init__.py
|
raydleemsc/flask_tests_workshop
|
4a90a4ac8a186874e63ae0dd531a331d2b9e4385
|
[
"CC-BY-4.0"
] | null | null | null |
culturemesh/client/__init__.py
|
raydleemsc/flask_tests_workshop
|
4a90a4ac8a186874e63ae0dd531a331d2b9e4385
|
[
"CC-BY-4.0"
] | 3
|
2021-09-20T20:14:42.000Z
|
2022-01-12T19:11:36.000Z
|
#
# Import things to be discoverable at the package level.
#
""" Only the API Client should be discoverable """
from .client import Client
from .client import Request
| 21.25
| 57
| 0.741176
| 24
| 170
| 5.25
| 0.625
| 0.222222
| 0.253968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188235
| 170
| 8
| 58
| 21.25
| 0.913043
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8c84d5b8991a0f487fe1c6aaaa1ea63c12b0c63e
| 85,841
|
py
|
Python
|
tests/parser/identifiers.6.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/identifiers.6.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/identifiers.6.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
n(x4000).
n(x3999).
n(x3998).
n(x3997).
n(x3996).
n(x3995).
n(x3994).
n(x3993).
n(x3992).
n(x3991).
n(x3990).
n(x3989).
n(x3988).
n(x3987).
n(x3986).
n(x3985).
n(x3984).
n(x3983).
n(x3982).
n(x3981).
n(x3980).
n(x3979).
n(x3978).
n(x3977).
n(x3976).
n(x3975).
n(x3974).
n(x3973).
n(x3972).
n(x3971).
n(x3970).
n(x3969).
n(x3968).
n(x3967).
n(x3966).
n(x3965).
n(x3964).
n(x3963).
n(x3962).
n(x3961).
n(x3960).
n(x3959).
n(x3958).
n(x3957).
n(x3956).
n(x3955).
n(x3954).
n(x3953).
n(x3952).
n(x3951).
n(x3950).
n(x3949).
n(x3948).
n(x3947).
n(x3946).
n(x3945).
n(x3944).
n(x3943).
n(x3942).
n(x3941).
n(x3940).
n(x3939).
n(x3938).
n(x3937).
n(x3936).
n(x3935).
n(x3934).
n(x3933).
n(x3932).
n(x3931).
n(x3930).
n(x3929).
n(x3928).
n(x3927).
n(x3926).
n(x3925).
n(x3924).
n(x3923).
n(x3922).
n(x3921).
n(x3920).
n(x3919).
n(x3918).
n(x3917).
n(x3916).
n(x3915).
n(x3914).
n(x3913).
n(x3912).
n(x3911).
n(x3910).
n(x3909).
n(x3908).
n(x3907).
n(x3906).
n(x3905).
n(x3904).
n(x3903).
n(x3902).
n(x3901).
n(x3900).
n(x3899).
n(x3898).
n(x3897).
n(x3896).
n(x3895).
n(x3894).
n(x3893).
n(x3892).
n(x3891).
n(x3890).
n(x3889).
n(x3888).
n(x3887).
n(x3886).
n(x3885).
n(x3884).
n(x3883).
n(x3882).
n(x3881).
n(x3880).
n(x3879).
n(x3878).
n(x3877).
n(x3876).
n(x3875).
n(x3874).
n(x3873).
n(x3872).
n(x3871).
n(x3870).
n(x3869).
n(x3868).
n(x3867).
n(x3866).
n(x3865).
n(x3864).
n(x3863).
n(x3862).
n(x3861).
n(x3860).
n(x3859).
n(x3858).
n(x3857).
n(x3856).
n(x3855).
n(x3854).
n(x3853).
n(x3852).
n(x3851).
n(x3850).
n(x3849).
n(x3848).
n(x3847).
n(x3846).
n(x3845).
n(x3844).
n(x3843).
n(x3842).
n(x3841).
n(x3840).
n(x3839).
n(x3838).
n(x3837).
n(x3836).
n(x3835).
n(x3834).
n(x3833).
n(x3832).
n(x3831).
n(x3830).
n(x3829).
n(x3828).
n(x3827).
n(x3826).
n(x3825).
n(x3824).
n(x3823).
n(x3822).
n(x3821).
n(x3820).
n(x3819).
n(x3818).
n(x3817).
n(x3816).
n(x3815).
n(x3814).
n(x3813).
n(x3812).
n(x3811).
n(x3810).
n(x3809).
n(x3808).
n(x3807).
n(x3806).
n(x3805).
n(x3804).
n(x3803).
n(x3802).
n(x3801).
n(x3800).
n(x3799).
n(x3798).
n(x3797).
n(x3796).
n(x3795).
n(x3794).
n(x3793).
n(x3792).
n(x3791).
n(x3790).
n(x3789).
n(x3788).
n(x3787).
n(x3786).
n(x3785).
n(x3784).
n(x3783).
n(x3782).
n(x3781).
n(x3780).
n(x3779).
n(x3778).
n(x3777).
n(x3776).
n(x3775).
n(x3774).
n(x3773).
n(x3772).
n(x3771).
n(x3770).
n(x3769).
n(x3768).
n(x3767).
n(x3766).
n(x3765).
n(x3764).
n(x3763).
n(x3762).
n(x3761).
n(x3760).
n(x3759).
n(x3758).
n(x3757).
n(x3756).
n(x3755).
n(x3754).
n(x3753).
n(x3752).
n(x3751).
n(x3750).
n(x3749).
n(x3748).
n(x3747).
n(x3746).
n(x3745).
n(x3744).
n(x3743).
n(x3742).
n(x3741).
n(x3740).
n(x3739).
n(x3738).
n(x3737).
n(x3736).
n(x3735).
n(x3734).
n(x3733).
n(x3732).
n(x3731).
n(x3730).
n(x3729).
n(x3728).
n(x3727).
n(x3726).
n(x3725).
n(x3724).
n(x3723).
n(x3722).
n(x3721).
n(x3720).
n(x3719).
n(x3718).
n(x3717).
n(x3716).
n(x3715).
n(x3714).
n(x3713).
n(x3712).
n(x3711).
n(x3710).
n(x3709).
n(x3708).
n(x3707).
n(x3706).
n(x3705).
n(x3704).
n(x3703).
n(x3702).
n(x3701).
n(x3700).
n(x3699).
n(x3698).
n(x3697).
n(x3696).
n(x3695).
n(x3694).
n(x3693).
n(x3692).
n(x3691).
n(x3690).
n(x3689).
n(x3688).
n(x3687).
n(x3686).
n(x3685).
n(x3684).
n(x3683).
n(x3682).
n(x3681).
n(x3680).
n(x3679).
n(x3678).
n(x3677).
n(x3676).
n(x3675).
n(x3674).
n(x3673).
n(x3672).
n(x3671).
n(x3670).
n(x3669).
n(x3668).
n(x3667).
n(x3666).
n(x3665).
n(x3664).
n(x3663).
n(x3662).
n(x3661).
n(x3660).
n(x3659).
n(x3658).
n(x3657).
n(x3656).
n(x3655).
n(x3654).
n(x3653).
n(x3652).
n(x3651).
n(x3650).
n(x3649).
n(x3648).
n(x3647).
n(x3646).
n(x3645).
n(x3644).
n(x3643).
n(x3642).
n(x3641).
n(x3640).
n(x3639).
n(x3638).
n(x3637).
n(x3636).
n(x3635).
n(x3634).
n(x3633).
n(x3632).
n(x3631).
n(x3630).
n(x3629).
n(x3628).
n(x3627).
n(x3626).
n(x3625).
n(x3624).
n(x3623).
n(x3622).
n(x3621).
n(x3620).
n(x3619).
n(x3618).
n(x3617).
n(x3616).
n(x3615).
n(x3614).
n(x3613).
n(x3612).
n(x3611).
n(x3610).
n(x3609).
n(x3608).
n(x3607).
n(x3606).
n(x3605).
n(x3604).
n(x3603).
n(x3602).
n(x3601).
n(x3600).
n(x3599).
n(x3598).
n(x3597).
n(x3596).
n(x3595).
n(x3594).
n(x3593).
n(x3592).
n(x3591).
n(x3590).
n(x3589).
n(x3588).
n(x3587).
n(x3586).
n(x3585).
n(x3584).
n(x3583).
n(x3582).
n(x3581).
n(x3580).
n(x3579).
n(x3578).
n(x3577).
n(x3576).
n(x3575).
n(x3574).
n(x3573).
n(x3572).
n(x3571).
n(x3570).
n(x3569).
n(x3568).
n(x3567).
n(x3566).
n(x3565).
n(x3564).
n(x3563).
n(x3562).
n(x3561).
n(x3560).
n(x3559).
n(x3558).
n(x3557).
n(x3556).
n(x3555).
n(x3554).
n(x3553).
n(x3552).
n(x3551).
n(x3550).
n(x3549).
n(x3548).
n(x3547).
n(x3546).
n(x3545).
n(x3544).
n(x3543).
n(x3542).
n(x3541).
n(x3540).
n(x3539).
n(x3538).
n(x3537).
n(x3536).
n(x3535).
n(x3534).
n(x3533).
n(x3532).
n(x3531).
n(x3530).
n(x3529).
n(x3528).
n(x3527).
n(x3526).
n(x3525).
n(x3524).
n(x3523).
n(x3522).
n(x3521).
n(x3520).
n(x3519).
n(x3518).
n(x3517).
n(x3516).
n(x3515).
n(x3514).
n(x3513).
n(x3512).
n(x3511).
n(x3510).
n(x3509).
n(x3508).
n(x3507).
n(x3506).
n(x3505).
n(x3504).
n(x3503).
n(x3502).
n(x3501).
n(x3500).
n(x3499).
n(x3498).
n(x3497).
n(x3496).
n(x3495).
n(x3494).
n(x3493).
n(x3492).
n(x3491).
n(x3490).
n(x3489).
n(x3488).
n(x3487).
n(x3486).
n(x3485).
n(x3484).
n(x3483).
n(x3482).
n(x3481).
n(x3480).
n(x3479).
n(x3478).
n(x3477).
n(x3476).
n(x3475).
n(x3474).
n(x3473).
n(x3472).
n(x3471).
n(x3470).
n(x3469).
n(x3468).
n(x3467).
n(x3466).
n(x3465).
n(x3464).
n(x3463).
n(x3462).
n(x3461).
n(x3460).
n(x3459).
n(x3458).
n(x3457).
n(x3456).
n(x3455).
n(x3454).
n(x3453).
n(x3452).
n(x3451).
n(x3450).
n(x3449).
n(x3448).
n(x3447).
n(x3446).
n(x3445).
n(x3444).
n(x3443).
n(x3442).
n(x3441).
n(x3440).
n(x3439).
n(x3438).
n(x3437).
n(x3436).
n(x3435).
n(x3434).
n(x3433).
n(x3432).
n(x3431).
n(x3430).
n(x3429).
n(x3428).
n(x3427).
n(x3426).
n(x3425).
n(x3424).
n(x3423).
n(x3422).
n(x3421).
n(x3420).
n(x3419).
n(x3418).
n(x3417).
n(x3416).
n(x3415).
n(x3414).
n(x3413).
n(x3412).
n(x3411).
n(x3410).
n(x3409).
n(x3408).
n(x3407).
n(x3406).
n(x3405).
n(x3404).
n(x3403).
n(x3402).
n(x3401).
n(x3400).
n(x3399).
n(x3398).
n(x3397).
n(x3396).
n(x3395).
n(x3394).
n(x3393).
n(x3392).
n(x3391).
n(x3390).
n(x3389).
n(x3388).
n(x3387).
n(x3386).
n(x3385).
n(x3384).
n(x3383).
n(x3382).
n(x3381).
n(x3380).
n(x3379).
n(x3378).
n(x3377).
n(x3376).
n(x3375).
n(x3374).
n(x3373).
n(x3372).
n(x3371).
n(x3370).
n(x3369).
n(x3368).
n(x3367).
n(x3366).
n(x3365).
n(x3364).
n(x3363).
n(x3362).
n(x3361).
n(x3360).
n(x3359).
n(x3358).
n(x3357).
n(x3356).
n(x3355).
n(x3354).
n(x3353).
n(x3352).
n(x3351).
n(x3350).
n(x3349).
n(x3348).
n(x3347).
n(x3346).
n(x3345).
n(x3344).
n(x3343).
n(x3342).
n(x3341).
n(x3340).
n(x3339).
n(x3338).
n(x3337).
n(x3336).
n(x3335).
n(x3334).
n(x3333).
n(x3332).
n(x3331).
n(x3330).
n(x3329).
n(x3328).
n(x3327).
n(x3326).
n(x3325).
n(x3324).
n(x3323).
n(x3322).
n(x3321).
n(x3320).
n(x3319).
n(x3318).
n(x3317).
n(x3316).
n(x3315).
n(x3314).
n(x3313).
n(x3312).
n(x3311).
n(x3310).
n(x3309).
n(x3308).
n(x3307).
n(x3306).
n(x3305).
n(x3304).
n(x3303).
n(x3302).
n(x3301).
n(x3300).
n(x3299).
n(x3298).
n(x3297).
n(x3296).
n(x3295).
n(x3294).
n(x3293).
n(x3292).
n(x3291).
n(x3290).
n(x3289).
n(x3288).
n(x3287).
n(x3286).
n(x3285).
n(x3284).
n(x3283).
n(x3282).
n(x3281).
n(x3280).
n(x3279).
n(x3278).
n(x3277).
n(x3276).
n(x3275).
n(x3274).
n(x3273).
n(x3272).
n(x3271).
n(x3270).
n(x3269).
n(x3268).
n(x3267).
n(x3266).
n(x3265).
n(x3264).
n(x3263).
n(x3262).
n(x3261).
n(x3260).
n(x3259).
n(x3258).
n(x3257).
n(x3256).
n(x3255).
n(x3254).
n(x3253).
n(x3252).
n(x3251).
n(x3250).
n(x3249).
n(x3248).
n(x3247).
n(x3246).
n(x3245).
n(x3244).
n(x3243).
n(x3242).
n(x3241).
n(x3240).
n(x3239).
n(x3238).
n(x3237).
n(x3236).
n(x3235).
n(x3234).
n(x3233).
n(x3232).
n(x3231).
n(x3230).
n(x3229).
n(x3228).
n(x3227).
n(x3226).
n(x3225).
n(x3224).
n(x3223).
n(x3222).
n(x3221).
n(x3220).
n(x3219).
n(x3218).
n(x3217).
n(x3216).
n(x3215).
n(x3214).
n(x3213).
n(x3212).
n(x3211).
n(x3210).
n(x3209).
n(x3208).
n(x3207).
n(x3206).
n(x3205).
n(x3204).
n(x3203).
n(x3202).
n(x3201).
n(x3200).
n(x3199).
n(x3198).
n(x3197).
n(x3196).
n(x3195).
n(x3194).
n(x3193).
n(x3192).
n(x3191).
n(x3190).
n(x3189).
n(x3188).
n(x3187).
n(x3186).
n(x3185).
n(x3184).
n(x3183).
n(x3182).
n(x3181).
n(x3180).
n(x3179).
n(x3178).
n(x3177).
n(x3176).
n(x3175).
n(x3174).
n(x3173).
n(x3172).
n(x3171).
n(x3170).
n(x3169).
n(x3168).
n(x3167).
n(x3166).
n(x3165).
n(x3164).
n(x3163).
n(x3162).
n(x3161).
n(x3160).
n(x3159).
n(x3158).
n(x3157).
n(x3156).
n(x3155).
n(x3154).
n(x3153).
n(x3152).
n(x3151).
n(x3150).
n(x3149).
n(x3148).
n(x3147).
n(x3146).
n(x3145).
n(x3144).
n(x3143).
n(x3142).
n(x3141).
n(x3140).
n(x3139).
n(x3138).
n(x3137).
n(x3136).
n(x3135).
n(x3134).
n(x3133).
n(x3132).
n(x3131).
n(x3130).
n(x3129).
n(x3128).
n(x3127).
n(x3126).
n(x3125).
n(x3124).
n(x3123).
n(x3122).
n(x3121).
n(x3120).
n(x3119).
n(x3118).
n(x3117).
n(x3116).
n(x3115).
n(x3114).
n(x3113).
n(x3112).
n(x3111).
n(x3110).
n(x3109).
n(x3108).
n(x3107).
n(x3106).
n(x3105).
n(x3104).
n(x3103).
n(x3102).
n(x3101).
n(x3100).
n(x3099).
n(x3098).
n(x3097).
n(x3096).
n(x3095).
n(x3094).
n(x3093).
n(x3092).
n(x3091).
n(x3090).
n(x3089).
n(x3088).
n(x3087).
n(x3086).
n(x3085).
n(x3084).
n(x3083).
n(x3082).
n(x3081).
n(x3080).
n(x3079).
n(x3078).
n(x3077).
n(x3076).
n(x3075).
n(x3074).
n(x3073).
n(x3072).
n(x3071).
n(x3070).
n(x3069).
n(x3068).
n(x3067).
n(x3066).
n(x3065).
n(x3064).
n(x3063).
n(x3062).
n(x3061).
n(x3060).
n(x3059).
n(x3058).
n(x3057).
n(x3056).
n(x3055).
n(x3054).
n(x3053).
n(x3052).
n(x3051).
n(x3050).
n(x3049).
n(x3048).
n(x3047).
n(x3046).
n(x3045).
n(x3044).
n(x3043).
n(x3042).
n(x3041).
n(x3040).
n(x3039).
n(x3038).
n(x3037).
n(x3036).
n(x3035).
n(x3034).
n(x3033).
n(x3032).
n(x3031).
n(x3030).
n(x3029).
n(x3028).
n(x3027).
n(x3026).
n(x3025).
n(x3024).
n(x3023).
n(x3022).
n(x3021).
n(x3020).
n(x3019).
n(x3018).
n(x3017).
n(x3016).
n(x3015).
n(x3014).
n(x3013).
n(x3012).
n(x3011).
n(x3010).
n(x3009).
n(x3008).
n(x3007).
n(x3006).
n(x3005).
n(x3004).
n(x3003).
n(x3002).
n(x3001).
n(x3000).
n(x2999).
n(x2998).
n(x2997).
n(x2996).
n(x2995).
n(x2994).
n(x2993).
n(x2992).
n(x2991).
n(x2990).
n(x2989).
n(x2988).
n(x2987).
n(x2986).
n(x2985).
n(x2984).
n(x2983).
n(x2982).
n(x2981).
n(x2980).
n(x2979).
n(x2978).
n(x2977).
n(x2976).
n(x2975).
n(x2974).
n(x2973).
n(x2972).
n(x2971).
n(x2970).
n(x2969).
n(x2968).
n(x2967).
n(x2966).
n(x2965).
n(x2964).
n(x2963).
n(x2962).
n(x2961).
n(x2960).
n(x2959).
n(x2958).
n(x2957).
n(x2956).
n(x2955).
n(x2954).
n(x2953).
n(x2952).
n(x2951).
n(x2950).
n(x2949).
n(x2948).
n(x2947).
n(x2946).
n(x2945).
n(x2944).
n(x2943).
n(x2942).
n(x2941).
n(x2940).
n(x2939).
n(x2938).
n(x2937).
n(x2936).
n(x2935).
n(x2934).
n(x2933).
n(x2932).
n(x2931).
n(x2930).
n(x2929).
n(x2928).
n(x2927).
n(x2926).
n(x2925).
n(x2924).
n(x2923).
n(x2922).
n(x2921).
n(x2920).
n(x2919).
n(x2918).
n(x2917).
n(x2916).
n(x2915).
n(x2914).
n(x2913).
n(x2912).
n(x2911).
n(x2910).
n(x2909).
n(x2908).
n(x2907).
n(x2906).
n(x2905).
n(x2904).
n(x2903).
n(x2902).
n(x2901).
n(x2900).
n(x2899).
n(x2898).
n(x2897).
n(x2896).
n(x2895).
n(x2894).
n(x2893).
n(x2892).
n(x2891).
n(x2890).
n(x2889).
n(x2888).
n(x2887).
n(x2886).
n(x2885).
n(x2884).
n(x2883).
n(x2882).
n(x2881).
n(x2880).
n(x2879).
n(x2878).
n(x2877).
n(x2876).
n(x2875).
n(x2874).
n(x2873).
n(x2872).
n(x2871).
n(x2870).
n(x2869).
n(x2868).
n(x2867).
n(x2866).
n(x2865).
n(x2864).
n(x2863).
n(x2862).
n(x2861).
n(x2860).
n(x2859).
n(x2858).
n(x2857).
n(x2856).
n(x2855).
n(x2854).
n(x2853).
n(x2852).
n(x2851).
n(x2850).
n(x2849).
n(x2848).
n(x2847).
n(x2846).
n(x2845).
n(x2844).
n(x2843).
n(x2842).
n(x2841).
n(x2840).
n(x2839).
n(x2838).
n(x2837).
n(x2836).
n(x2835).
n(x2834).
n(x2833).
n(x2832).
n(x2831).
n(x2830).
n(x2829).
n(x2828).
n(x2827).
n(x2826).
n(x2825).
n(x2824).
n(x2823).
n(x2822).
n(x2821).
n(x2820).
n(x2819).
n(x2818).
n(x2817).
n(x2816).
n(x2815).
n(x2814).
n(x2813).
n(x2812).
n(x2811).
n(x2810).
n(x2809).
n(x2808).
n(x2807).
n(x2806).
n(x2805).
n(x2804).
n(x2803).
n(x2802).
n(x2801).
n(x2800).
n(x2799).
n(x2798).
n(x2797).
n(x2796).
n(x2795).
n(x2794).
n(x2793).
n(x2792).
n(x2791).
n(x2790).
n(x2789).
n(x2788).
n(x2787).
n(x2786).
n(x2785).
n(x2784).
n(x2783).
n(x2782).
n(x2781).
n(x2780).
n(x2779).
n(x2778).
n(x2777).
n(x2776).
n(x2775).
n(x2774).
n(x2773).
n(x2772).
n(x2771).
n(x2770).
n(x2769).
n(x2768).
n(x2767).
n(x2766).
n(x2765).
n(x2764).
n(x2763).
n(x2762).
n(x2761).
n(x2760).
n(x2759).
n(x2758).
n(x2757).
n(x2756).
n(x2755).
n(x2754).
n(x2753).
n(x2752).
n(x2751).
n(x2750).
n(x2749).
n(x2748).
n(x2747).
n(x2746).
n(x2745).
n(x2744).
n(x2743).
n(x2742).
n(x2741).
n(x2740).
n(x2739).
n(x2738).
n(x2737).
n(x2736).
n(x2735).
n(x2734).
n(x2733).
n(x2732).
n(x2731).
n(x2730).
n(x2729).
n(x2728).
n(x2727).
n(x2726).
n(x2725).
n(x2724).
n(x2723).
n(x2722).
n(x2721).
n(x2720).
n(x2719).
n(x2718).
n(x2717).
n(x2716).
n(x2715).
n(x2714).
n(x2713).
n(x2712).
n(x2711).
n(x2710).
n(x2709).
n(x2708).
n(x2707).
n(x2706).
n(x2705).
n(x2704).
n(x2703).
n(x2702).
n(x2701).
n(x2700).
n(x2699).
n(x2698).
n(x2697).
n(x2696).
n(x2695).
n(x2694).
n(x2693).
n(x2692).
n(x2691).
n(x2690).
n(x2689).
n(x2688).
n(x2687).
n(x2686).
n(x2685).
n(x2684).
n(x2683).
n(x2682).
n(x2681).
n(x2680).
n(x2679).
n(x2678).
n(x2677).
n(x2676).
n(x2675).
n(x2674).
n(x2673).
n(x2672).
n(x2671).
n(x2670).
n(x2669).
n(x2668).
n(x2667).
n(x2666).
n(x2665).
n(x2664).
n(x2663).
n(x2662).
n(x2661).
n(x2660).
n(x2659).
n(x2658).
n(x2657).
n(x2656).
n(x2655).
n(x2654).
n(x2653).
n(x2652).
n(x2651).
n(x2650).
n(x2649).
n(x2648).
n(x2647).
n(x2646).
n(x2645).
n(x2644).
n(x2643).
n(x2642).
n(x2641).
n(x2640).
n(x2639).
n(x2638).
n(x2637).
n(x2636).
n(x2635).
n(x2634).
n(x2633).
n(x2632).
n(x2631).
n(x2630).
n(x2629).
n(x2628).
n(x2627).
n(x2626).
n(x2625).
n(x2624).
n(x2623).
n(x2622).
n(x2621).
n(x2620).
n(x2619).
n(x2618).
n(x2617).
n(x2616).
n(x2615).
n(x2614).
n(x2613).
n(x2612).
n(x2611).
n(x2610).
n(x2609).
n(x2608).
n(x2607).
n(x2606).
n(x2605).
n(x2604).
n(x2603).
n(x2602).
n(x2601).
n(x2600).
n(x2599).
n(x2598).
n(x2597).
n(x2596).
n(x2595).
n(x2594).
n(x2593).
n(x2592).
n(x2591).
n(x2590).
n(x2589).
n(x2588).
n(x2587).
n(x2586).
n(x2585).
n(x2584).
n(x2583).
n(x2582).
n(x2581).
n(x2580).
n(x2579).
n(x2578).
n(x2577).
n(x2576).
n(x2575).
n(x2574).
n(x2573).
n(x2572).
n(x2571).
n(x2570).
n(x2569).
n(x2568).
n(x2567).
n(x2566).
n(x2565).
n(x2564).
n(x2563).
n(x2562).
n(x2561).
n(x2560).
n(x2559).
n(x2558).
n(x2557).
n(x2556).
n(x2555).
n(x2554).
n(x2553).
n(x2552).
n(x2551).
n(x2550).
n(x2549).
n(x2548).
n(x2547).
n(x2546).
n(x2545).
n(x2544).
n(x2543).
n(x2542).
n(x2541).
n(x2540).
n(x2539).
n(x2538).
n(x2537).
n(x2536).
n(x2535).
n(x2534).
n(x2533).
n(x2532).
n(x2531).
n(x2530).
n(x2529).
n(x2528).
n(x2527).
n(x2526).
n(x2525).
n(x2524).
n(x2523).
n(x2522).
n(x2521).
n(x2520).
n(x2519).
n(x2518).
n(x2517).
n(x2516).
n(x2515).
n(x2514).
n(x2513).
n(x2512).
n(x2511).
n(x2510).
n(x2509).
n(x2508).
n(x2507).
n(x2506).
n(x2505).
n(x2504).
n(x2503).
n(x2502).
n(x2501).
n(x2500).
n(x2499).
n(x2498).
n(x2497).
n(x2496).
n(x2495).
n(x2494).
n(x2493).
n(x2492).
n(x2491).
n(x2490).
n(x2489).
n(x2488).
n(x2487).
n(x2486).
n(x2485).
n(x2484).
n(x2483).
n(x2482).
n(x2481).
n(x2480).
n(x2479).
n(x2478).
n(x2477).
n(x2476).
n(x2475).
n(x2474).
n(x2473).
n(x2472).
n(x2471).
n(x2470).
n(x2469).
n(x2468).
n(x2467).
n(x2466).
n(x2465).
n(x2464).
n(x2463).
n(x2462).
n(x2461).
n(x2460).
n(x2459).
n(x2458).
n(x2457).
n(x2456).
n(x2455).
n(x2454).
n(x2453).
n(x2452).
n(x2451).
n(x2450).
n(x2449).
n(x2448).
n(x2447).
n(x2446).
n(x2445).
n(x2444).
n(x2443).
n(x2442).
n(x2441).
n(x2440).
n(x2439).
n(x2438).
n(x2437).
n(x2436).
n(x2435).
n(x2434).
n(x2433).
n(x2432).
n(x2431).
n(x2430).
n(x2429).
n(x2428).
n(x2427).
n(x2426).
n(x2425).
n(x2424).
n(x2423).
n(x2422).
n(x2421).
n(x2420).
n(x2419).
n(x2418).
n(x2417).
n(x2416).
n(x2415).
n(x2414).
n(x2413).
n(x2412).
n(x2411).
n(x2410).
n(x2409).
n(x2408).
n(x2407).
n(x2406).
n(x2405).
n(x2404).
n(x2403).
n(x2402).
n(x2401).
n(x2400).
n(x2399).
n(x2398).
n(x2397).
n(x2396).
n(x2395).
n(x2394).
n(x2393).
n(x2392).
n(x2391).
n(x2390).
n(x2389).
n(x2388).
n(x2387).
n(x2386).
n(x2385).
n(x2384).
n(x2383).
n(x2382).
n(x2381).
n(x2380).
n(x2379).
n(x2378).
n(x2377).
n(x2376).
n(x2375).
n(x2374).
n(x2373).
n(x2372).
n(x2371).
n(x2370).
n(x2369).
n(x2368).
n(x2367).
n(x2366).
n(x2365).
n(x2364).
n(x2363).
n(x2362).
n(x2361).
n(x2360).
n(x2359).
n(x2358).
n(x2357).
n(x2356).
n(x2355).
n(x2354).
n(x2353).
n(x2352).
n(x2351).
n(x2350).
n(x2349).
n(x2348).
n(x2347).
n(x2346).
n(x2345).
n(x2344).
n(x2343).
n(x2342).
n(x2341).
n(x2340).
n(x2339).
n(x2338).
n(x2337).
n(x2336).
n(x2335).
n(x2334).
n(x2333).
n(x2332).
n(x2331).
n(x2330).
n(x2329).
n(x2328).
n(x2327).
n(x2326).
n(x2325).
n(x2324).
n(x2323).
n(x2322).
n(x2321).
n(x2320).
n(x2319).
n(x2318).
n(x2317).
n(x2316).
n(x2315).
n(x2314).
n(x2313).
n(x2312).
n(x2311).
n(x2310).
n(x2309).
n(x2308).
n(x2307).
n(x2306).
n(x2305).
n(x2304).
n(x2303).
n(x2302).
n(x2301).
n(x2300).
n(x2299).
n(x2298).
n(x2297).
n(x2296).
n(x2295).
n(x2294).
n(x2293).
n(x2292).
n(x2291).
n(x2290).
n(x2289).
n(x2288).
n(x2287).
n(x2286).
n(x2285).
n(x2284).
n(x2283).
n(x2282).
n(x2281).
n(x2280).
n(x2279).
n(x2278).
n(x2277).
n(x2276).
n(x2275).
n(x2274).
n(x2273).
n(x2272).
n(x2271).
n(x2270).
n(x2269).
n(x2268).
n(x2267).
n(x2266).
n(x2265).
n(x2264).
n(x2263).
n(x2262).
n(x2261).
n(x2260).
n(x2259).
n(x2258).
n(x2257).
n(x2256).
n(x2255).
n(x2254).
n(x2253).
n(x2252).
n(x2251).
n(x2250).
n(x2249).
n(x2248).
n(x2247).
n(x2246).
n(x2245).
n(x2244).
n(x2243).
n(x2242).
n(x2241).
n(x2240).
n(x2239).
n(x2238).
n(x2237).
n(x2236).
n(x2235).
n(x2234).
n(x2233).
n(x2232).
n(x2231).
n(x2230).
n(x2229).
n(x2228).
n(x2227).
n(x2226).
n(x2225).
n(x2224).
n(x2223).
n(x2222).
n(x2221).
n(x2220).
n(x2219).
n(x2218).
n(x2217).
n(x2216).
n(x2215).
n(x2214).
n(x2213).
n(x2212).
n(x2211).
n(x2210).
n(x2209).
n(x2208).
n(x2207).
n(x2206).
n(x2205).
n(x2204).
n(x2203).
n(x2202).
n(x2201).
n(x2200).
n(x2199).
n(x2198).
n(x2197).
n(x2196).
n(x2195).
n(x2194).
n(x2193).
n(x2192).
n(x2191).
n(x2190).
n(x2189).
n(x2188).
n(x2187).
n(x2186).
n(x2185).
n(x2184).
n(x2183).
n(x2182).
n(x2181).
n(x2180).
n(x2179).
n(x2178).
n(x2177).
n(x2176).
n(x2175).
n(x2174).
n(x2173).
n(x2172).
n(x2171).
n(x2170).
n(x2169).
n(x2168).
n(x2167).
n(x2166).
n(x2165).
n(x2164).
n(x2163).
n(x2162).
n(x2161).
n(x2160).
n(x2159).
n(x2158).
n(x2157).
n(x2156).
n(x2155).
n(x2154).
n(x2153).
n(x2152).
n(x2151).
n(x2150).
n(x2149).
n(x2148).
n(x2147).
n(x2146).
n(x2145).
n(x2144).
n(x2143).
n(x2142).
n(x2141).
n(x2140).
n(x2139).
n(x2138).
n(x2137).
n(x2136).
n(x2135).
n(x2134).
n(x2133).
n(x2132).
n(x2131).
n(x2130).
n(x2129).
n(x2128).
n(x2127).
n(x2126).
n(x2125).
n(x2124).
n(x2123).
n(x2122).
n(x2121).
n(x2120).
n(x2119).
n(x2118).
n(x2117).
n(x2116).
n(x2115).
n(x2114).
n(x2113).
n(x2112).
n(x2111).
n(x2110).
n(x2109).
n(x2108).
n(x2107).
n(x2106).
n(x2105).
n(x2104).
n(x2103).
n(x2102).
n(x2101).
n(x2100).
n(x2099).
n(x2098).
n(x2097).
n(x2096).
n(x2095).
n(x2094).
n(x2093).
n(x2092).
n(x2091).
n(x2090).
n(x2089).
n(x2088).
n(x2087).
n(x2086).
n(x2085).
n(x2084).
n(x2083).
n(x2082).
n(x2081).
n(x2080).
n(x2079).
n(x2078).
n(x2077).
n(x2076).
n(x2075).
n(x2074).
n(x2073).
n(x2072).
n(x2071).
n(x2070).
n(x2069).
n(x2068).
n(x2067).
n(x2066).
n(x2065).
n(x2064).
n(x2063).
n(x2062).
n(x2061).
n(x2060).
n(x2059).
n(x2058).
n(x2057).
n(x2056).
n(x2055).
n(x2054).
n(x2053).
n(x2052).
n(x2051).
n(x2050).
n(x2049).
n(x2048).
n(x2047).
n(x2046).
n(x2045).
n(x2044).
n(x2043).
n(x2042).
n(x2041).
n(x2040).
n(x2039).
n(x2038).
n(x2037).
n(x2036).
n(x2035).
n(x2034).
n(x2033).
n(x2032).
n(x2031).
n(x2030).
n(x2029).
n(x2028).
n(x2027).
n(x2026).
n(x2025).
n(x2024).
n(x2023).
n(x2022).
n(x2021).
n(x2020).
n(x2019).
n(x2018).
n(x2017).
n(x2016).
n(x2015).
n(x2014).
n(x2013).
n(x2012).
n(x2011).
n(x2010).
n(x2009).
n(x2008).
n(x2007).
n(x2006).
n(x2005).
n(x2004).
n(x2003).
n(x2002).
n(x2001).
n(x2000).
n(x1999).
n(x1998).
n(x1997).
n(x1996).
n(x1995).
n(x1994).
n(x1993).
n(x1992).
n(x1991).
n(x1990).
n(x1989).
n(x1988).
n(x1987).
n(x1986).
n(x1985).
n(x1984).
n(x1983).
n(x1982).
n(x1981).
n(x1980).
n(x1979).
n(x1978).
n(x1977).
n(x1976).
n(x1975).
n(x1974).
n(x1973).
n(x1972).
n(x1971).
n(x1970).
n(x1969).
n(x1968).
n(x1967).
n(x1966).
n(x1965).
n(x1964).
n(x1963).
n(x1962).
n(x1961).
n(x1960).
n(x1959).
n(x1958).
n(x1957).
n(x1956).
n(x1955).
n(x1954).
n(x1953).
n(x1952).
n(x1951).
n(x1950).
n(x1949).
n(x1948).
n(x1947).
n(x1946).
n(x1945).
n(x1944).
n(x1943).
n(x1942).
n(x1941).
n(x1940).
n(x1939).
n(x1938).
n(x1937).
n(x1936).
n(x1935).
n(x1934).
n(x1933).
n(x1932).
n(x1931).
n(x1930).
n(x1929).
n(x1928).
n(x1927).
n(x1926).
n(x1925).
n(x1924).
n(x1923).
n(x1922).
n(x1921).
n(x1920).
n(x1919).
n(x1918).
n(x1917).
n(x1916).
n(x1915).
n(x1914).
n(x1913).
n(x1912).
n(x1911).
n(x1910).
n(x1909).
n(x1908).
n(x1907).
n(x1906).
n(x1905).
n(x1904).
n(x1903).
n(x1902).
n(x1901).
n(x1900).
n(x1899).
n(x1898).
n(x1897).
n(x1896).
n(x1895).
n(x1894).
n(x1893).
n(x1892).
n(x1891).
n(x1890).
n(x1889).
n(x1888).
n(x1887).
n(x1886).
n(x1885).
n(x1884).
n(x1883).
n(x1882).
n(x1881).
n(x1880).
n(x1879).
n(x1878).
n(x1877).
n(x1876).
n(x1875).
n(x1874).
n(x1873).
n(x1872).
n(x1871).
n(x1870).
n(x1869).
n(x1868).
n(x1867).
n(x1866).
n(x1865).
n(x1864).
n(x1863).
n(x1862).
n(x1861).
n(x1860).
n(x1859).
n(x1858).
n(x1857).
n(x1856).
n(x1855).
n(x1854).
n(x1853).
n(x1852).
n(x1851).
n(x1850).
n(x1849).
n(x1848).
n(x1847).
n(x1846).
n(x1845).
n(x1844).
n(x1843).
n(x1842).
n(x1841).
n(x1840).
n(x1839).
n(x1838).
n(x1837).
n(x1836).
n(x1835).
n(x1834).
n(x1833).
n(x1832).
n(x1831).
n(x1830).
n(x1829).
n(x1828).
n(x1827).
n(x1826).
n(x1825).
n(x1824).
n(x1823).
n(x1822).
n(x1821).
n(x1820).
n(x1819).
n(x1818).
n(x1817).
n(x1816).
n(x1815).
n(x1814).
n(x1813).
n(x1812).
n(x1811).
n(x1810).
n(x1809).
n(x1808).
n(x1807).
n(x1806).
n(x1805).
n(x1804).
n(x1803).
n(x1802).
n(x1801).
n(x1800).
n(x1799).
n(x1798).
n(x1797).
n(x1796).
n(x1795).
n(x1794).
n(x1793).
n(x1792).
n(x1791).
n(x1790).
n(x1789).
n(x1788).
n(x1787).
n(x1786).
n(x1785).
n(x1784).
n(x1783).
n(x1782).
n(x1781).
n(x1780).
n(x1779).
n(x1778).
n(x1777).
n(x1776).
n(x1775).
n(x1774).
n(x1773).
n(x1772).
n(x1771).
n(x1770).
n(x1769).
n(x1768).
n(x1767).
n(x1766).
n(x1765).
n(x1764).
n(x1763).
n(x1762).
n(x1761).
n(x1760).
n(x1759).
n(x1758).
n(x1757).
n(x1756).
n(x1755).
n(x1754).
n(x1753).
n(x1752).
n(x1751).
n(x1750).
n(x1749).
n(x1748).
n(x1747).
n(x1746).
n(x1745).
n(x1744).
n(x1743).
n(x1742).
n(x1741).
n(x1740).
n(x1739).
n(x1738).
n(x1737).
n(x1736).
n(x1735).
n(x1734).
n(x1733).
n(x1732).
n(x1731).
n(x1730).
n(x1729).
n(x1728).
n(x1727).
n(x1726).
n(x1725).
n(x1724).
n(x1723).
n(x1722).
n(x1721).
n(x1720).
n(x1719).
n(x1718).
n(x1717).
n(x1716).
n(x1715).
n(x1714).
n(x1713).
n(x1712).
n(x1711).
n(x1710).
n(x1709).
n(x1708).
n(x1707).
n(x1706).
n(x1705).
n(x1704).
n(x1703).
n(x1702).
n(x1701).
n(x1700).
n(x1699).
n(x1698).
n(x1697).
n(x1696).
n(x1695).
n(x1694).
n(x1693).
n(x1692).
n(x1691).
n(x1690).
n(x1689).
n(x1688).
n(x1687).
n(x1686).
n(x1685).
n(x1684).
n(x1683).
n(x1682).
n(x1681).
n(x1680).
n(x1679).
n(x1678).
n(x1677).
n(x1676).
n(x1675).
n(x1674).
n(x1673).
n(x1672).
n(x1671).
n(x1670).
n(x1669).
n(x1668).
n(x1667).
n(x1666).
n(x1665).
n(x1664).
n(x1663).
n(x1662).
n(x1661).
n(x1660).
n(x1659).
n(x1658).
n(x1657).
n(x1656).
n(x1655).
n(x1654).
n(x1653).
n(x1652).
n(x1651).
n(x1650).
n(x1649).
n(x1648).
n(x1647).
n(x1646).
n(x1645).
n(x1644).
n(x1643).
n(x1642).
n(x1641).
n(x1640).
n(x1639).
n(x1638).
n(x1637).
n(x1636).
n(x1635).
n(x1634).
n(x1633).
n(x1632).
n(x1631).
n(x1630).
n(x1629).
n(x1628).
n(x1627).
n(x1626).
n(x1625).
n(x1624).
n(x1623).
n(x1622).
n(x1621).
n(x1620).
n(x1619).
n(x1618).
n(x1617).
n(x1616).
n(x1615).
n(x1614).
n(x1613).
n(x1612).
n(x1611).
n(x1610).
n(x1609).
n(x1608).
n(x1607).
n(x1606).
n(x1605).
n(x1604).
n(x1603).
n(x1602).
n(x1601).
n(x1600).
n(x1599).
n(x1598).
n(x1597).
n(x1596).
n(x1595).
n(x1594).
n(x1593).
n(x1592).
n(x1591).
n(x1590).
n(x1589).
n(x1588).
n(x1587).
n(x1586).
n(x1585).
n(x1584).
n(x1583).
n(x1582).
n(x1581).
n(x1580).
n(x1579).
n(x1578).
n(x1577).
n(x1576).
n(x1575).
n(x1574).
n(x1573).
n(x1572).
n(x1571).
n(x1570).
n(x1569).
n(x1568).
n(x1567).
n(x1566).
n(x1565).
n(x1564).
n(x1563).
n(x1562).
n(x1561).
n(x1560).
n(x1559).
n(x1558).
n(x1557).
n(x1556).
n(x1555).
n(x1554).
n(x1553).
n(x1552).
n(x1551).
n(x1550).
n(x1549).
n(x1548).
n(x1547).
n(x1546).
n(x1545).
n(x1544).
n(x1543).
n(x1542).
n(x1541).
n(x1540).
n(x1539).
n(x1538).
n(x1537).
n(x1536).
n(x1535).
n(x1534).
n(x1533).
n(x1532).
n(x1531).
n(x1530).
n(x1529).
n(x1528).
n(x1527).
n(x1526).
n(x1525).
n(x1524).
n(x1523).
n(x1522).
n(x1521).
n(x1520).
n(x1519).
n(x1518).
n(x1517).
n(x1516).
n(x1515).
n(x1514).
n(x1513).
n(x1512).
n(x1511).
n(x1510).
n(x1509).
n(x1508).
n(x1507).
n(x1506).
n(x1505).
n(x1504).
n(x1503).
n(x1502).
n(x1501).
n(x1500).
n(x1499).
n(x1498).
n(x1497).
n(x1496).
n(x1495).
n(x1494).
n(x1493).
n(x1492).
n(x1491).
n(x1490).
n(x1489).
n(x1488).
n(x1487).
n(x1486).
n(x1485).
n(x1484).
n(x1483).
n(x1482).
n(x1481).
n(x1480).
n(x1479).
n(x1478).
n(x1477).
n(x1476).
n(x1475).
n(x1474).
n(x1473).
n(x1472).
n(x1471).
n(x1470).
n(x1469).
n(x1468).
n(x1467).
n(x1466).
n(x1465).
n(x1464).
n(x1463).
n(x1462).
n(x1461).
n(x1460).
n(x1459).
n(x1458).
n(x1457).
n(x1456).
n(x1455).
n(x1454).
n(x1453).
n(x1452).
n(x1451).
n(x1450).
n(x1449).
n(x1448).
n(x1447).
n(x1446).
n(x1445).
n(x1444).
n(x1443).
n(x1442).
n(x1441).
n(x1440).
n(x1439).
n(x1438).
n(x1437).
n(x1436).
n(x1435).
n(x1434).
n(x1433).
n(x1432).
n(x1431).
n(x1430).
n(x1429).
n(x1428).
n(x1427).
n(x1426).
n(x1425).
n(x1424).
n(x1423).
n(x1422).
n(x1421).
n(x1420).
n(x1419).
n(x1418).
n(x1417).
n(x1416).
n(x1415).
n(x1414).
n(x1413).
n(x1412).
n(x1411).
n(x1410).
n(x1409).
n(x1408).
n(x1407).
n(x1406).
n(x1405).
n(x1404).
n(x1403).
n(x1402).
n(x1401).
n(x1400).
n(x1399).
n(x1398).
n(x1397).
n(x1396).
n(x1395).
n(x1394).
n(x1393).
n(x1392).
n(x1391).
n(x1390).
n(x1389).
n(x1388).
n(x1387).
n(x1386).
n(x1385).
n(x1384).
n(x1383).
n(x1382).
n(x1381).
n(x1380).
n(x1379).
n(x1378).
n(x1377).
n(x1376).
n(x1375).
n(x1374).
n(x1373).
n(x1372).
n(x1371).
n(x1370).
n(x1369).
n(x1368).
n(x1367).
n(x1366).
n(x1365).
n(x1364).
n(x1363).
n(x1362).
n(x1361).
n(x1360).
n(x1359).
n(x1358).
n(x1357).
n(x1356).
n(x1355).
n(x1354).
n(x1353).
n(x1352).
n(x1351).
n(x1350).
n(x1349).
n(x1348).
n(x1347).
n(x1346).
n(x1345).
n(x1344).
n(x1343).
n(x1342).
n(x1341).
n(x1340).
n(x1339).
n(x1338).
n(x1337).
n(x1336).
n(x1335).
n(x1334).
n(x1333).
n(x1332).
n(x1331).
n(x1330).
n(x1329).
n(x1328).
n(x1327).
n(x1326).
n(x1325).
n(x1324).
n(x1323).
n(x1322).
n(x1321).
n(x1320).
n(x1319).
n(x1318).
n(x1317).
n(x1316).
n(x1315).
n(x1314).
n(x1313).
n(x1312).
n(x1311).
n(x1310).
n(x1309).
n(x1308).
n(x1307).
n(x1306).
n(x1305).
n(x1304).
n(x1303).
n(x1302).
n(x1301).
n(x1300).
n(x1299).
n(x1298).
n(x1297).
n(x1296).
n(x1295).
n(x1294).
n(x1293).
n(x1292).
n(x1291).
n(x1290).
n(x1289).
n(x1288).
n(x1287).
n(x1286).
n(x1285).
n(x1284).
n(x1283).
n(x1282).
n(x1281).
n(x1280).
n(x1279).
n(x1278).
n(x1277).
n(x1276).
n(x1275).
n(x1274).
n(x1273).
n(x1272).
n(x1271).
n(x1270).
n(x1269).
n(x1268).
n(x1267).
n(x1266).
n(x1265).
n(x1264).
n(x1263).
n(x1262).
n(x1261).
n(x1260).
n(x1259).
n(x1258).
n(x1257).
n(x1256).
n(x1255).
n(x1254).
n(x1253).
n(x1252).
n(x1251).
n(x1250).
n(x1249).
n(x1248).
n(x1247).
n(x1246).
n(x1245).
n(x1244).
n(x1243).
n(x1242).
n(x1241).
n(x1240).
n(x1239).
n(x1238).
n(x1237).
n(x1236).
n(x1235).
n(x1234).
n(x1233).
n(x1232).
n(x1231).
n(x1230).
n(x1229).
n(x1228).
n(x1227).
n(x1226).
n(x1225).
n(x1224).
n(x1223).
n(x1222).
n(x1221).
n(x1220).
n(x1219).
n(x1218).
n(x1217).
n(x1216).
n(x1215).
n(x1214).
n(x1213).
n(x1212).
n(x1211).
n(x1210).
n(x1209).
n(x1208).
n(x1207).
n(x1206).
n(x1205).
n(x1204).
n(x1203).
n(x1202).
n(x1201).
n(x1200).
n(x1199).
n(x1198).
n(x1197).
n(x1196).
n(x1195).
n(x1194).
n(x1193).
n(x1192).
n(x1191).
n(x1190).
n(x1189).
n(x1188).
n(x1187).
n(x1186).
n(x1185).
n(x1184).
n(x1183).
n(x1182).
n(x1181).
n(x1180).
n(x1179).
n(x1178).
n(x1177).
n(x1176).
n(x1175).
n(x1174).
n(x1173).
n(x1172).
n(x1171).
n(x1170).
n(x1169).
n(x1168).
n(x1167).
n(x1166).
n(x1165).
n(x1164).
n(x1163).
n(x1162).
n(x1161).
n(x1160).
n(x1159).
n(x1158).
n(x1157).
n(x1156).
n(x1155).
n(x1154).
n(x1153).
n(x1152).
n(x1151).
n(x1150).
n(x1149).
n(x1148).
n(x1147).
n(x1146).
n(x1145).
n(x1144).
n(x1143).
n(x1142).
n(x1141).
n(x1140).
n(x1139).
n(x1138).
n(x1137).
n(x1136).
n(x1135).
n(x1134).
n(x1133).
n(x1132).
n(x1131).
n(x1130).
n(x1129).
n(x1128).
n(x1127).
n(x1126).
n(x1125).
n(x1124).
n(x1123).
n(x1122).
n(x1121).
n(x1120).
n(x1119).
n(x1118).
n(x1117).
n(x1116).
n(x1115).
n(x1114).
n(x1113).
n(x1112).
n(x1111).
n(x1110).
n(x1109).
n(x1108).
n(x1107).
n(x1106).
n(x1105).
n(x1104).
n(x1103).
n(x1102).
n(x1101).
n(x1100).
n(x1099).
n(x1098).
n(x1097).
n(x1096).
n(x1095).
n(x1094).
n(x1093).
n(x1092).
n(x1091).
n(x1090).
n(x1089).
n(x1088).
n(x1087).
n(x1086).
n(x1085).
n(x1084).
n(x1083).
n(x1082).
n(x1081).
n(x1080).
n(x1079).
n(x1078).
n(x1077).
n(x1076).
n(x1075).
n(x1074).
n(x1073).
n(x1072).
n(x1071).
n(x1070).
n(x1069).
n(x1068).
n(x1067).
n(x1066).
n(x1065).
n(x1064).
n(x1063).
n(x1062).
n(x1061).
n(x1060).
n(x1059).
n(x1058).
n(x1057).
n(x1056).
n(x1055).
n(x1054).
n(x1053).
n(x1052).
n(x1051).
n(x1050).
n(x1049).
n(x1048).
n(x1047).
n(x1046).
n(x1045).
n(x1044).
n(x1043).
n(x1042).
n(x1041).
n(x1040).
n(x1039).
n(x1038).
n(x1037).
n(x1036).
n(x1035).
n(x1034).
n(x1033).
n(x1032).
n(x1031).
n(x1030).
n(x1029).
n(x1028).
n(x1027).
n(x1026).
n(x1025).
n(x1024).
n(x1023).
n(x1022).
n(x1021).
n(x1020).
n(x1019).
n(x1018).
n(x1017).
n(x1016).
n(x1015).
n(x1014).
n(x1013).
n(x1012).
n(x1011).
n(x1010).
n(x1009).
n(x1008).
n(x1007).
n(x1006).
n(x1005).
n(x1004).
n(x1003).
n(x1002).
n(x1001).
n(x1000).
n(x999).
n(x998).
n(x997).
n(x996).
n(x995).
n(x994).
n(x993).
n(x992).
n(x991).
n(x990).
n(x989).
n(x988).
n(x987).
n(x986).
n(x985).
n(x984).
n(x983).
n(x982).
n(x981).
n(x980).
n(x979).
n(x978).
n(x977).
n(x976).
n(x975).
n(x974).
n(x973).
n(x972).
n(x971).
n(x970).
n(x969).
n(x968).
n(x967).
n(x966).
n(x965).
n(x964).
n(x963).
n(x962).
n(x961).
n(x960).
n(x959).
n(x958).
n(x957).
n(x956).
n(x955).
n(x954).
n(x953).
n(x952).
n(x951).
n(x950).
n(x949).
n(x948).
n(x947).
n(x946).
n(x945).
n(x944).
n(x943).
n(x942).
n(x941).
n(x940).
n(x939).
n(x938).
n(x937).
n(x936).
n(x935).
n(x934).
n(x933).
n(x932).
n(x931).
n(x930).
n(x929).
n(x928).
n(x927).
n(x926).
n(x925).
n(x924).
n(x923).
n(x922).
n(x921).
n(x920).
n(x919).
n(x918).
n(x917).
n(x916).
n(x915).
n(x914).
n(x913).
n(x912).
n(x911).
n(x910).
n(x909).
n(x908).
n(x907).
n(x906).
n(x905).
n(x904).
n(x903).
n(x902).
n(x901).
n(x900).
n(x899).
n(x898).
n(x897).
n(x896).
n(x895).
n(x894).
n(x893).
n(x892).
n(x891).
n(x890).
n(x889).
n(x888).
n(x887).
n(x886).
n(x885).
n(x884).
n(x883).
n(x882).
n(x881).
n(x880).
n(x879).
n(x878).
n(x877).
n(x876).
n(x875).
n(x874).
n(x873).
n(x872).
n(x871).
n(x870).
n(x869).
n(x868).
n(x867).
n(x866).
n(x865).
n(x864).
n(x863).
n(x862).
n(x861).
n(x860).
n(x859).
n(x858).
n(x857).
n(x856).
n(x855).
n(x854).
n(x853).
n(x852).
n(x851).
n(x850).
n(x849).
n(x848).
n(x847).
n(x846).
n(x845).
n(x844).
n(x843).
n(x842).
n(x841).
n(x840).
n(x839).
n(x838).
n(x837).
n(x836).
n(x835).
n(x834).
n(x833).
n(x832).
n(x831).
n(x830).
n(x829).
n(x828).
n(x827).
n(x826).
n(x825).
n(x824).
n(x823).
n(x822).
n(x821).
n(x820).
n(x819).
n(x818).
n(x817).
n(x816).
n(x815).
n(x814).
n(x813).
n(x812).
n(x811).
n(x810).
n(x809).
n(x808).
n(x807).
n(x806).
n(x805).
n(x804).
n(x803).
n(x802).
n(x801).
n(x800).
n(x799).
n(x798).
n(x797).
n(x796).
n(x795).
n(x794).
n(x793).
n(x792).
n(x791).
n(x790).
n(x789).
n(x788).
n(x787).
n(x786).
n(x785).
n(x784).
n(x783).
n(x782).
n(x781).
n(x780).
n(x779).
n(x778).
n(x777).
n(x776).
n(x775).
n(x774).
n(x773).
n(x772).
n(x771).
n(x770).
n(x769).
n(x768).
n(x767).
n(x766).
n(x765).
n(x764).
n(x763).
n(x762).
n(x761).
n(x760).
n(x759).
n(x758).
n(x757).
n(x756).
n(x755).
n(x754).
n(x753).
n(x752).
n(x751).
n(x750).
n(x749).
n(x748).
n(x747).
n(x746).
n(x745).
n(x744).
n(x743).
n(x742).
n(x741).
n(x740).
n(x739).
n(x738).
n(x737).
n(x736).
n(x735).
n(x734).
n(x733).
n(x732).
n(x731).
n(x730).
n(x729).
n(x728).
n(x727).
n(x726).
n(x725).
n(x724).
n(x723).
n(x722).
n(x721).
n(x720).
n(x719).
n(x718).
n(x717).
n(x716).
n(x715).
n(x714).
n(x713).
n(x712).
n(x711).
n(x710).
n(x709).
n(x708).
n(x707).
n(x706).
n(x705).
n(x704).
n(x703).
n(x702).
n(x701).
n(x700).
n(x699).
n(x698).
n(x697).
n(x696).
n(x695).
n(x694).
n(x693).
n(x692).
n(x691).
n(x690).
n(x689).
n(x688).
n(x687).
n(x686).
n(x685).
n(x684).
n(x683).
n(x682).
n(x681).
n(x680).
n(x679).
n(x678).
n(x677).
n(x676).
n(x675).
n(x674).
n(x673).
n(x672).
n(x671).
n(x670).
n(x669).
n(x668).
n(x667).
n(x666).
n(x665).
n(x664).
n(x663).
n(x662).
n(x661).
n(x660).
n(x659).
n(x658).
n(x657).
n(x656).
n(x655).
n(x654).
n(x653).
n(x652).
n(x651).
n(x650).
n(x649).
n(x648).
n(x647).
n(x646).
n(x645).
n(x644).
n(x643).
n(x642).
n(x641).
n(x640).
n(x639).
n(x638).
n(x637).
n(x636).
n(x635).
n(x634).
n(x633).
n(x632).
n(x631).
n(x630).
n(x629).
n(x628).
n(x627).
n(x626).
n(x625).
n(x624).
n(x623).
n(x622).
n(x621).
n(x620).
n(x619).
n(x618).
n(x617).
n(x616).
n(x615).
n(x614).
n(x613).
n(x612).
n(x611).
n(x610).
n(x609).
n(x608).
n(x607).
n(x606).
n(x605).
n(x604).
n(x603).
n(x602).
n(x601).
n(x600).
n(x599).
n(x598).
n(x597).
n(x596).
n(x595).
n(x594).
n(x593).
n(x592).
n(x591).
n(x590).
n(x589).
n(x588).
n(x587).
n(x586).
n(x585).
n(x584).
n(x583).
n(x582).
n(x581).
n(x580).
n(x579).
n(x578).
n(x577).
n(x576).
n(x575).
n(x574).
n(x573).
n(x572).
n(x571).
n(x570).
n(x569).
n(x568).
n(x567).
n(x566).
n(x565).
n(x564).
n(x563).
n(x562).
n(x561).
n(x560).
n(x559).
n(x558).
n(x557).
n(x556).
n(x555).
n(x554).
n(x553).
n(x552).
n(x551).
n(x550).
n(x549).
n(x548).
n(x547).
n(x546).
n(x545).
n(x544).
n(x543).
n(x542).
n(x541).
n(x540).
n(x539).
n(x538).
n(x537).
n(x536).
n(x535).
n(x534).
n(x533).
n(x532).
n(x531).
n(x530).
n(x529).
n(x528).
n(x527).
n(x526).
n(x525).
n(x524).
n(x523).
n(x522).
n(x521).
n(x520).
n(x519).
n(x518).
n(x517).
n(x516).
n(x515).
n(x514).
n(x513).
n(x512).
n(x511).
n(x510).
n(x509).
n(x508).
n(x507).
n(x506).
n(x505).
n(x504).
n(x503).
n(x502).
n(x501).
n(x500).
n(x499).
n(x498).
n(x497).
n(x496).
n(x495).
n(x494).
n(x493).
n(x492).
n(x491).
n(x490).
n(x489).
n(x488).
n(x487).
n(x486).
n(x485).
n(x484).
n(x483).
n(x482).
n(x481).
n(x480).
n(x479).
n(x478).
n(x477).
n(x476).
n(x475).
n(x474).
n(x473).
n(x472).
n(x471).
n(x470).
n(x469).
n(x468).
n(x467).
n(x466).
n(x465).
n(x464).
n(x463).
n(x462).
n(x461).
n(x460).
n(x459).
n(x458).
n(x457).
n(x456).
n(x455).
n(x454).
n(x453).
n(x452).
n(x451).
n(x450).
n(x449).
n(x448).
n(x447).
n(x446).
n(x445).
n(x444).
n(x443).
n(x442).
n(x441).
n(x440).
n(x439).
n(x438).
n(x437).
n(x436).
n(x435).
n(x434).
n(x433).
n(x432).
n(x431).
n(x430).
n(x429).
n(x428).
n(x427).
n(x426).
n(x425).
n(x424).
n(x423).
n(x422).
n(x421).
n(x420).
n(x419).
n(x418).
n(x417).
n(x416).
n(x415).
n(x414).
n(x413).
n(x412).
n(x411).
n(x410).
n(x409).
n(x408).
n(x407).
n(x406).
n(x405).
n(x404).
n(x403).
n(x402).
n(x401).
n(x400).
n(x399).
n(x398).
n(x397).
n(x396).
n(x395).
n(x394).
n(x393).
n(x392).
n(x391).
n(x390).
n(x389).
n(x388).
n(x387).
n(x386).
n(x385).
n(x384).
n(x383).
n(x382).
n(x381).
n(x380).
n(x379).
n(x378).
n(x377).
n(x376).
n(x375).
n(x374).
n(x373).
n(x372).
n(x371).
n(x370).
n(x369).
n(x368).
n(x367).
n(x366).
n(x365).
n(x364).
n(x363).
n(x362).
n(x361).
n(x360).
n(x359).
n(x358).
n(x357).
n(x356).
n(x355).
n(x354).
n(x353).
n(x352).
n(x351).
n(x350).
n(x349).
n(x348).
n(x347).
n(x346).
n(x345).
n(x344).
n(x343).
n(x342).
n(x341).
n(x340).
n(x339).
n(x338).
n(x337).
n(x336).
n(x335).
n(x334).
n(x333).
n(x332).
n(x331).
n(x330).
n(x329).
n(x328).
n(x327).
n(x326).
n(x325).
n(x324).
n(x323).
n(x322).
n(x321).
n(x320).
n(x319).
n(x318).
n(x317).
n(x316).
n(x315).
n(x314).
n(x313).
n(x312).
n(x311).
n(x310).
n(x309).
n(x308).
n(x307).
n(x306).
n(x305).
n(x304).
n(x303).
n(x302).
n(x301).
n(x300).
n(x299).
n(x298).
n(x297).
n(x296).
n(x295).
n(x294).
n(x293).
n(x292).
n(x291).
n(x290).
n(x289).
n(x288).
n(x287).
n(x286).
n(x285).
n(x284).
n(x283).
n(x282).
n(x281).
n(x280).
n(x279).
n(x278).
n(x277).
n(x276).
n(x275).
n(x274).
n(x273).
n(x272).
n(x271).
n(x270).
n(x269).
n(x268).
n(x267).
n(x266).
n(x265).
n(x264).
n(x263).
n(x262).
n(x261).
n(x260).
n(x259).
n(x258).
n(x257).
n(x256).
n(x255).
n(x254).
n(x253).
n(x252).
n(x251).
n(x250).
n(x249).
n(x248).
n(x247).
n(x246).
n(x245).
n(x244).
n(x243).
n(x242).
n(x241).
n(x240).
n(x239).
n(x238).
n(x237).
n(x236).
n(x235).
n(x234).
n(x233).
n(x232).
n(x231).
n(x230).
n(x229).
n(x228).
n(x227).
n(x226).
n(x225).
n(x224).
n(x223).
n(x222).
n(x221).
n(x220).
n(x219).
n(x218).
n(x217).
n(x216).
n(x215).
n(x214).
n(x213).
n(x212).
n(x211).
n(x210).
n(x209).
n(x208).
n(x207).
n(x206).
n(x205).
n(x204).
n(x203).
n(x202).
n(x201).
n(x200).
n(x199).
n(x198).
n(x197).
n(x196).
n(x195).
n(x194).
n(x193).
n(x192).
n(x191).
n(x190).
n(x189).
n(x188).
n(x187).
n(x186).
n(x185).
n(x184).
n(x183).
n(x182).
n(x181).
n(x180).
n(x179).
n(x178).
n(x177).
n(x176).
n(x175).
n(x174).
n(x173).
n(x172).
n(x171).
n(x170).
n(x169).
n(x168).
n(x167).
n(x166).
n(x165).
n(x164).
n(x163).
n(x162).
n(x161).
n(x160).
n(x159).
n(x158).
n(x157).
n(x156).
n(x155).
n(x154).
n(x153).
n(x152).
n(x151).
n(x150).
n(x149).
n(x148).
n(x147).
n(x146).
n(x145).
n(x144).
n(x143).
n(x142).
n(x141).
n(x140).
n(x139).
n(x138).
n(x137).
n(x136).
n(x135).
n(x134).
n(x133).
n(x132).
n(x131).
n(x130).
n(x129).
n(x128).
n(x127).
n(x126).
n(x125).
n(x124).
n(x123).
n(x122).
n(x121).
n(x120).
n(x119).
n(x118).
n(x117).
n(x116).
n(x115).
n(x114).
n(x113).
n(x112).
n(x111).
n(x110).
n(x109).
n(x108).
n(x107).
n(x106).
n(x105).
n(x104).
n(x103).
n(x102).
n(x101).
n(x100).
n(x99).
n(x98).
n(x97).
n(x96).
n(x95).
n(x94).
n(x93).
n(x92).
n(x91).
n(x90).
n(x89).
n(x88).
n(x87).
n(x86).
n(x85).
n(x84).
n(x83).
n(x82).
n(x81).
n(x80).
n(x79).
n(x78).
n(x77).
n(x76).
n(x75).
n(x74).
n(x73).
n(x72).
n(x71).
n(x70).
n(x69).
n(x68).
n(x67).
n(x66).
n(x65).
n(x64).
n(x63).
n(x62).
n(x61).
n(x60).
n(x59).
n(x58).
n(x57).
n(x56).
n(x55).
n(x54).
n(x53).
n(x52).
n(x51).
n(x50).
n(x49).
n(x48).
n(x47).
n(x46).
n(x45).
n(x44).
n(x43).
n(x42).
n(x41).
n(x40).
n(x39).
n(x38).
n(x37).
n(x36).
n(x35).
n(x34).
n(x33).
n(x32).
n(x31).
n(x30).
n(x29).
n(x28).
n(x27).
n(x26).
n(x25).
n(x24).
n(x23).
n(x22).
n(x21).
n(x20).
n(x19).
n(x18).
n(x17).
n(x16).
n(x15).
n(x14).
n(x13).
n(x12).
n(x11).
n(x10).
n(x9).
n(x8).
n(x7).
n(x6).
n(x5).
n(x4).
n(x3).
n(x2).
n(x1).
n(x4001).
"""
output = """
n(x4000).
n(x3999).
n(x3998).
n(x3997).
n(x3996).
n(x3995).
n(x3994).
n(x3993).
n(x3992).
n(x3991).
n(x3990).
n(x3989).
n(x3988).
n(x3987).
n(x3986).
n(x3985).
n(x3984).
n(x3983).
n(x3982).
n(x3981).
n(x3980).
n(x3979).
n(x3978).
n(x3977).
n(x3976).
n(x3975).
n(x3974).
n(x3973).
n(x3972).
n(x3971).
n(x3970).
n(x3969).
n(x3968).
n(x3967).
n(x3966).
n(x3965).
n(x3964).
n(x3963).
n(x3962).
n(x3961).
n(x3960).
n(x3959).
n(x3958).
n(x3957).
n(x3956).
n(x3955).
n(x3954).
n(x3953).
n(x3952).
n(x3951).
n(x3950).
n(x3949).
n(x3948).
n(x3947).
n(x3946).
n(x3945).
n(x3944).
n(x3943).
n(x3942).
n(x3941).
n(x3940).
n(x3939).
n(x3938).
n(x3937).
n(x3936).
n(x3935).
n(x3934).
n(x3933).
n(x3932).
n(x3931).
n(x3930).
n(x3929).
n(x3928).
n(x3927).
n(x3926).
n(x3925).
n(x3924).
n(x3923).
n(x3922).
n(x3921).
n(x3920).
n(x3919).
n(x3918).
n(x3917).
n(x3916).
n(x3915).
n(x3914).
n(x3913).
n(x3912).
n(x3911).
n(x3910).
n(x3909).
n(x3908).
n(x3907).
n(x3906).
n(x3905).
n(x3904).
n(x3903).
n(x3902).
n(x3901).
n(x3900).
n(x3899).
n(x3898).
n(x3897).
n(x3896).
n(x3895).
n(x3894).
n(x3893).
n(x3892).
n(x3891).
n(x3890).
n(x3889).
n(x3888).
n(x3887).
n(x3886).
n(x3885).
n(x3884).
n(x3883).
n(x3882).
n(x3881).
n(x3880).
n(x3879).
n(x3878).
n(x3877).
n(x3876).
n(x3875).
n(x3874).
n(x3873).
n(x3872).
n(x3871).
n(x3870).
n(x3869).
n(x3868).
n(x3867).
n(x3866).
n(x3865).
n(x3864).
n(x3863).
n(x3862).
n(x3861).
n(x3860).
n(x3859).
n(x3858).
n(x3857).
n(x3856).
n(x3855).
n(x3854).
n(x3853).
n(x3852).
n(x3851).
n(x3850).
n(x3849).
n(x3848).
n(x3847).
n(x3846).
n(x3845).
n(x3844).
n(x3843).
n(x3842).
n(x3841).
n(x3840).
n(x3839).
n(x3838).
n(x3837).
n(x3836).
n(x3835).
n(x3834).
n(x3833).
n(x3832).
n(x3831).
n(x3830).
n(x3829).
n(x3828).
n(x3827).
n(x3826).
n(x3825).
n(x3824).
n(x3823).
n(x3822).
n(x3821).
n(x3820).
n(x3819).
n(x3818).
n(x3817).
n(x3816).
n(x3815).
n(x3814).
n(x3813).
n(x3812).
n(x3811).
n(x3810).
n(x3809).
n(x3808).
n(x3807).
n(x3806).
n(x3805).
n(x3804).
n(x3803).
n(x3802).
n(x3801).
n(x3800).
n(x3799).
n(x3798).
n(x3797).
n(x3796).
n(x3795).
n(x3794).
n(x3793).
n(x3792).
n(x3791).
n(x3790).
n(x3789).
n(x3788).
n(x3787).
n(x3786).
n(x3785).
n(x3784).
n(x3783).
n(x3782).
n(x3781).
n(x3780).
n(x3779).
n(x3778).
n(x3777).
n(x3776).
n(x3775).
n(x3774).
n(x3773).
n(x3772).
n(x3771).
n(x3770).
n(x3769).
n(x3768).
n(x3767).
n(x3766).
n(x3765).
n(x3764).
n(x3763).
n(x3762).
n(x3761).
n(x3760).
n(x3759).
n(x3758).
n(x3757).
n(x3756).
n(x3755).
n(x3754).
n(x3753).
n(x3752).
n(x3751).
n(x3750).
n(x3749).
n(x3748).
n(x3747).
n(x3746).
n(x3745).
n(x3744).
n(x3743).
n(x3742).
n(x3741).
n(x3740).
n(x3739).
n(x3738).
n(x3737).
n(x3736).
n(x3735).
n(x3734).
n(x3733).
n(x3732).
n(x3731).
n(x3730).
n(x3729).
n(x3728).
n(x3727).
n(x3726).
n(x3725).
n(x3724).
n(x3723).
n(x3722).
n(x3721).
n(x3720).
n(x3719).
n(x3718).
n(x3717).
n(x3716).
n(x3715).
n(x3714).
n(x3713).
n(x3712).
n(x3711).
n(x3710).
n(x3709).
n(x3708).
n(x3707).
n(x3706).
n(x3705).
n(x3704).
n(x3703).
n(x3702).
n(x3701).
n(x3700).
n(x3699).
n(x3698).
n(x3697).
n(x3696).
n(x3695).
n(x3694).
n(x3693).
n(x3692).
n(x3691).
n(x3690).
n(x3689).
n(x3688).
n(x3687).
n(x3686).
n(x3685).
n(x3684).
n(x3683).
n(x3682).
n(x3681).
n(x3680).
n(x3679).
n(x3678).
n(x3677).
n(x3676).
n(x3675).
n(x3674).
n(x3673).
n(x3672).
n(x3671).
n(x3670).
n(x3669).
n(x3668).
n(x3667).
n(x3666).
n(x3665).
n(x3664).
n(x3663).
n(x3662).
n(x3661).
n(x3660).
n(x3659).
n(x3658).
n(x3657).
n(x3656).
n(x3655).
n(x3654).
n(x3653).
n(x3652).
n(x3651).
n(x3650).
n(x3649).
n(x3648).
n(x3647).
n(x3646).
n(x3645).
n(x3644).
n(x3643).
n(x3642).
n(x3641).
n(x3640).
n(x3639).
n(x3638).
n(x3637).
n(x3636).
n(x3635).
n(x3634).
n(x3633).
n(x3632).
n(x3631).
n(x3630).
n(x3629).
n(x3628).
n(x3627).
n(x3626).
n(x3625).
n(x3624).
n(x3623).
n(x3622).
n(x3621).
n(x3620).
n(x3619).
n(x3618).
n(x3617).
n(x3616).
n(x3615).
n(x3614).
n(x3613).
n(x3612).
n(x3611).
n(x3610).
n(x3609).
n(x3608).
n(x3607).
n(x3606).
n(x3605).
n(x3604).
n(x3603).
n(x3602).
n(x3601).
n(x3600).
n(x3599).
n(x3598).
n(x3597).
n(x3596).
n(x3595).
n(x3594).
n(x3593).
n(x3592).
n(x3591).
n(x3590).
n(x3589).
n(x3588).
n(x3587).
n(x3586).
n(x3585).
n(x3584).
n(x3583).
n(x3582).
n(x3581).
n(x3580).
n(x3579).
n(x3578).
n(x3577).
n(x3576).
n(x3575).
n(x3574).
n(x3573).
n(x3572).
n(x3571).
n(x3570).
n(x3569).
n(x3568).
n(x3567).
n(x3566).
n(x3565).
n(x3564).
n(x3563).
n(x3562).
n(x3561).
n(x3560).
n(x3559).
n(x3558).
n(x3557).
n(x3556).
n(x3555).
n(x3554).
n(x3553).
n(x3552).
n(x3551).
n(x3550).
n(x3549).
n(x3548).
n(x3547).
n(x3546).
n(x3545).
n(x3544).
n(x3543).
n(x3542).
n(x3541).
n(x3540).
n(x3539).
n(x3538).
n(x3537).
n(x3536).
n(x3535).
n(x3534).
n(x3533).
n(x3532).
n(x3531).
n(x3530).
n(x3529).
n(x3528).
n(x3527).
n(x3526).
n(x3525).
n(x3524).
n(x3523).
n(x3522).
n(x3521).
n(x3520).
n(x3519).
n(x3518).
n(x3517).
n(x3516).
n(x3515).
n(x3514).
n(x3513).
n(x3512).
n(x3511).
n(x3510).
n(x3509).
n(x3508).
n(x3507).
n(x3506).
n(x3505).
n(x3504).
n(x3503).
n(x3502).
n(x3501).
n(x3500).
n(x3499).
n(x3498).
n(x3497).
n(x3496).
n(x3495).
n(x3494).
n(x3493).
n(x3492).
n(x3491).
n(x3490).
n(x3489).
n(x3488).
n(x3487).
n(x3486).
n(x3485).
n(x3484).
n(x3483).
n(x3482).
n(x3481).
n(x3480).
n(x3479).
n(x3478).
n(x3477).
n(x3476).
n(x3475).
n(x3474).
n(x3473).
n(x3472).
n(x3471).
n(x3470).
n(x3469).
n(x3468).
n(x3467).
n(x3466).
n(x3465).
n(x3464).
n(x3463).
n(x3462).
n(x3461).
n(x3460).
n(x3459).
n(x3458).
n(x3457).
n(x3456).
n(x3455).
n(x3454).
n(x3453).
n(x3452).
n(x3451).
n(x3450).
n(x3449).
n(x3448).
n(x3447).
n(x3446).
n(x3445).
n(x3444).
n(x3443).
n(x3442).
n(x3441).
n(x3440).
n(x3439).
n(x3438).
n(x3437).
n(x3436).
n(x3435).
n(x3434).
n(x3433).
n(x3432).
n(x3431).
n(x3430).
n(x3429).
n(x3428).
n(x3427).
n(x3426).
n(x3425).
n(x3424).
n(x3423).
n(x3422).
n(x3421).
n(x3420).
n(x3419).
n(x3418).
n(x3417).
n(x3416).
n(x3415).
n(x3414).
n(x3413).
n(x3412).
n(x3411).
n(x3410).
n(x3409).
n(x3408).
n(x3407).
n(x3406).
n(x3405).
n(x3404).
n(x3403).
n(x3402).
n(x3401).
n(x3400).
n(x3399).
n(x3398).
n(x3397).
n(x3396).
n(x3395).
n(x3394).
n(x3393).
n(x3392).
n(x3391).
n(x3390).
n(x3389).
n(x3388).
n(x3387).
n(x3386).
n(x3385).
n(x3384).
n(x3383).
n(x3382).
n(x3381).
n(x3380).
n(x3379).
n(x3378).
n(x3377).
n(x3376).
n(x3375).
n(x3374).
n(x3373).
n(x3372).
n(x3371).
n(x3370).
n(x3369).
n(x3368).
n(x3367).
n(x3366).
n(x3365).
n(x3364).
n(x3363).
n(x3362).
n(x3361).
n(x3360).
n(x3359).
n(x3358).
n(x3357).
n(x3356).
n(x3355).
n(x3354).
n(x3353).
n(x3352).
n(x3351).
n(x3350).
n(x3349).
n(x3348).
n(x3347).
n(x3346).
n(x3345).
n(x3344).
n(x3343).
n(x3342).
n(x3341).
n(x3340).
n(x3339).
n(x3338).
n(x3337).
n(x3336).
n(x3335).
n(x3334).
n(x3333).
n(x3332).
n(x3331).
n(x3330).
n(x3329).
n(x3328).
n(x3327).
n(x3326).
n(x3325).
n(x3324).
n(x3323).
n(x3322).
n(x3321).
n(x3320).
n(x3319).
n(x3318).
n(x3317).
n(x3316).
n(x3315).
n(x3314).
n(x3313).
n(x3312).
n(x3311).
n(x3310).
n(x3309).
n(x3308).
n(x3307).
n(x3306).
n(x3305).
n(x3304).
n(x3303).
n(x3302).
n(x3301).
n(x3300).
n(x3299).
n(x3298).
n(x3297).
n(x3296).
n(x3295).
n(x3294).
n(x3293).
n(x3292).
n(x3291).
n(x3290).
n(x3289).
n(x3288).
n(x3287).
n(x3286).
n(x3285).
n(x3284).
n(x3283).
n(x3282).
n(x3281).
n(x3280).
n(x3279).
n(x3278).
n(x3277).
n(x3276).
n(x3275).
n(x3274).
n(x3273).
n(x3272).
n(x3271).
n(x3270).
n(x3269).
n(x3268).
n(x3267).
n(x3266).
n(x3265).
n(x3264).
n(x3263).
n(x3262).
n(x3261).
n(x3260).
n(x3259).
n(x3258).
n(x3257).
n(x3256).
n(x3255).
n(x3254).
n(x3253).
n(x3252).
n(x3251).
n(x3250).
n(x3249).
n(x3248).
n(x3247).
n(x3246).
n(x3245).
n(x3244).
n(x3243).
n(x3242).
n(x3241).
n(x3240).
n(x3239).
n(x3238).
n(x3237).
n(x3236).
n(x3235).
n(x3234).
n(x3233).
n(x3232).
n(x3231).
n(x3230).
n(x3229).
n(x3228).
n(x3227).
n(x3226).
n(x3225).
n(x3224).
n(x3223).
n(x3222).
n(x3221).
n(x3220).
n(x3219).
n(x3218).
n(x3217).
n(x3216).
n(x3215).
n(x3214).
n(x3213).
n(x3212).
n(x3211).
n(x3210).
n(x3209).
n(x3208).
n(x3207).
n(x3206).
n(x3205).
n(x3204).
n(x3203).
n(x3202).
n(x3201).
n(x3200).
n(x3199).
n(x3198).
n(x3197).
n(x3196).
n(x3195).
n(x3194).
n(x3193).
n(x3192).
n(x3191).
n(x3190).
n(x3189).
n(x3188).
n(x3187).
n(x3186).
n(x3185).
n(x3184).
n(x3183).
n(x3182).
n(x3181).
n(x3180).
n(x3179).
n(x3178).
n(x3177).
n(x3176).
n(x3175).
n(x3174).
n(x3173).
n(x3172).
n(x3171).
n(x3170).
n(x3169).
n(x3168).
n(x3167).
n(x3166).
n(x3165).
n(x3164).
n(x3163).
n(x3162).
n(x3161).
n(x3160).
n(x3159).
n(x3158).
n(x3157).
n(x3156).
n(x3155).
n(x3154).
n(x3153).
n(x3152).
n(x3151).
n(x3150).
n(x3149).
n(x3148).
n(x3147).
n(x3146).
n(x3145).
n(x3144).
n(x3143).
n(x3142).
n(x3141).
n(x3140).
n(x3139).
n(x3138).
n(x3137).
n(x3136).
n(x3135).
n(x3134).
n(x3133).
n(x3132).
n(x3131).
n(x3130).
n(x3129).
n(x3128).
n(x3127).
n(x3126).
n(x3125).
n(x3124).
n(x3123).
n(x3122).
n(x3121).
n(x3120).
n(x3119).
n(x3118).
n(x3117).
n(x3116).
n(x3115).
n(x3114).
n(x3113).
n(x3112).
n(x3111).
n(x3110).
n(x3109).
n(x3108).
n(x3107).
n(x3106).
n(x3105).
n(x3104).
n(x3103).
n(x3102).
n(x3101).
n(x3100).
n(x3099).
n(x3098).
n(x3097).
n(x3096).
n(x3095).
n(x3094).
n(x3093).
n(x3092).
n(x3091).
n(x3090).
n(x3089).
n(x3088).
n(x3087).
n(x3086).
n(x3085).
n(x3084).
n(x3083).
n(x3082).
n(x3081).
n(x3080).
n(x3079).
n(x3078).
n(x3077).
n(x3076).
n(x3075).
n(x3074).
n(x3073).
n(x3072).
n(x3071).
n(x3070).
n(x3069).
n(x3068).
n(x3067).
n(x3066).
n(x3065).
n(x3064).
n(x3063).
n(x3062).
n(x3061).
n(x3060).
n(x3059).
n(x3058).
n(x3057).
n(x3056).
n(x3055).
n(x3054).
n(x3053).
n(x3052).
n(x3051).
n(x3050).
n(x3049).
n(x3048).
n(x3047).
n(x3046).
n(x3045).
n(x3044).
n(x3043).
n(x3042).
n(x3041).
n(x3040).
n(x3039).
n(x3038).
n(x3037).
n(x3036).
n(x3035).
n(x3034).
n(x3033).
n(x3032).
n(x3031).
n(x3030).
n(x3029).
n(x3028).
n(x3027).
n(x3026).
n(x3025).
n(x3024).
n(x3023).
n(x3022).
n(x3021).
n(x3020).
n(x3019).
n(x3018).
n(x3017).
n(x3016).
n(x3015).
n(x3014).
n(x3013).
n(x3012).
n(x3011).
n(x3010).
n(x3009).
n(x3008).
n(x3007).
n(x3006).
n(x3005).
n(x3004).
n(x3003).
n(x3002).
n(x3001).
n(x3000).
n(x2999).
n(x2998).
n(x2997).
n(x2996).
n(x2995).
n(x2994).
n(x2993).
n(x2992).
n(x2991).
n(x2990).
n(x2989).
n(x2988).
n(x2987).
n(x2986).
n(x2985).
n(x2984).
n(x2983).
n(x2982).
n(x2981).
n(x2980).
n(x2979).
n(x2978).
n(x2977).
n(x2976).
n(x2975).
n(x2974).
n(x2973).
n(x2972).
n(x2971).
n(x2970).
n(x2969).
n(x2968).
n(x2967).
n(x2966).
n(x2965).
n(x2964).
n(x2963).
n(x2962).
n(x2961).
n(x2960).
n(x2959).
n(x2958).
n(x2957).
n(x2956).
n(x2955).
n(x2954).
n(x2953).
n(x2952).
n(x2951).
n(x2950).
n(x2949).
n(x2948).
n(x2947).
n(x2946).
n(x2945).
n(x2944).
n(x2943).
n(x2942).
n(x2941).
n(x2940).
n(x2939).
n(x2938).
n(x2937).
n(x2936).
n(x2935).
n(x2934).
n(x2933).
n(x2932).
n(x2931).
n(x2930).
n(x2929).
n(x2928).
n(x2927).
n(x2926).
n(x2925).
n(x2924).
n(x2923).
n(x2922).
n(x2921).
n(x2920).
n(x2919).
n(x2918).
n(x2917).
n(x2916).
n(x2915).
n(x2914).
n(x2913).
n(x2912).
n(x2911).
n(x2910).
n(x2909).
n(x2908).
n(x2907).
n(x2906).
n(x2905).
n(x2904).
n(x2903).
n(x2902).
n(x2901).
n(x2900).
n(x2899).
n(x2898).
n(x2897).
n(x2896).
n(x2895).
n(x2894).
n(x2893).
n(x2892).
n(x2891).
n(x2890).
n(x2889).
n(x2888).
n(x2887).
n(x2886).
n(x2885).
n(x2884).
n(x2883).
n(x2882).
n(x2881).
n(x2880).
n(x2879).
n(x2878).
n(x2877).
n(x2876).
n(x2875).
n(x2874).
n(x2873).
n(x2872).
n(x2871).
n(x2870).
n(x2869).
n(x2868).
n(x2867).
n(x2866).
n(x2865).
n(x2864).
n(x2863).
n(x2862).
n(x2861).
n(x2860).
n(x2859).
n(x2858).
n(x2857).
n(x2856).
n(x2855).
n(x2854).
n(x2853).
n(x2852).
n(x2851).
n(x2850).
n(x2849).
n(x2848).
n(x2847).
n(x2846).
n(x2845).
n(x2844).
n(x2843).
n(x2842).
n(x2841).
n(x2840).
n(x2839).
n(x2838).
n(x2837).
n(x2836).
n(x2835).
n(x2834).
n(x2833).
n(x2832).
n(x2831).
n(x2830).
n(x2829).
n(x2828).
n(x2827).
n(x2826).
n(x2825).
n(x2824).
n(x2823).
n(x2822).
n(x2821).
n(x2820).
n(x2819).
n(x2818).
n(x2817).
n(x2816).
n(x2815).
n(x2814).
n(x2813).
n(x2812).
n(x2811).
n(x2810).
n(x2809).
n(x2808).
n(x2807).
n(x2806).
n(x2805).
n(x2804).
n(x2803).
n(x2802).
n(x2801).
n(x2800).
n(x2799).
n(x2798).
n(x2797).
n(x2796).
n(x2795).
n(x2794).
n(x2793).
n(x2792).
n(x2791).
n(x2790).
n(x2789).
n(x2788).
n(x2787).
n(x2786).
n(x2785).
n(x2784).
n(x2783).
n(x2782).
n(x2781).
n(x2780).
n(x2779).
n(x2778).
n(x2777).
n(x2776).
n(x2775).
n(x2774).
n(x2773).
n(x2772).
n(x2771).
n(x2770).
n(x2769).
n(x2768).
n(x2767).
n(x2766).
n(x2765).
n(x2764).
n(x2763).
n(x2762).
n(x2761).
n(x2760).
n(x2759).
n(x2758).
n(x2757).
n(x2756).
n(x2755).
n(x2754).
n(x2753).
n(x2752).
n(x2751).
n(x2750).
n(x2749).
n(x2748).
n(x2747).
n(x2746).
n(x2745).
n(x2744).
n(x2743).
n(x2742).
n(x2741).
n(x2740).
n(x2739).
n(x2738).
n(x2737).
n(x2736).
n(x2735).
n(x2734).
n(x2733).
n(x2732).
n(x2731).
n(x2730).
n(x2729).
n(x2728).
n(x2727).
n(x2726).
n(x2725).
n(x2724).
n(x2723).
n(x2722).
n(x2721).
n(x2720).
n(x2719).
n(x2718).
n(x2717).
n(x2716).
n(x2715).
n(x2714).
n(x2713).
n(x2712).
n(x2711).
n(x2710).
n(x2709).
n(x2708).
n(x2707).
n(x2706).
n(x2705).
n(x2704).
n(x2703).
n(x2702).
n(x2701).
n(x2700).
n(x2699).
n(x2698).
n(x2697).
n(x2696).
n(x2695).
n(x2694).
n(x2693).
n(x2692).
n(x2691).
n(x2690).
n(x2689).
n(x2688).
n(x2687).
n(x2686).
n(x2685).
n(x2684).
n(x2683).
n(x2682).
n(x2681).
n(x2680).
n(x2679).
n(x2678).
n(x2677).
n(x2676).
n(x2675).
n(x2674).
n(x2673).
n(x2672).
n(x2671).
n(x2670).
n(x2669).
n(x2668).
n(x2667).
n(x2666).
n(x2665).
n(x2664).
n(x2663).
n(x2662).
n(x2661).
n(x2660).
n(x2659).
n(x2658).
n(x2657).
n(x2656).
n(x2655).
n(x2654).
n(x2653).
n(x2652).
n(x2651).
n(x2650).
n(x2649).
n(x2648).
n(x2647).
n(x2646).
n(x2645).
n(x2644).
n(x2643).
n(x2642).
n(x2641).
n(x2640).
n(x2639).
n(x2638).
n(x2637).
n(x2636).
n(x2635).
n(x2634).
n(x2633).
n(x2632).
n(x2631).
n(x2630).
n(x2629).
n(x2628).
n(x2627).
n(x2626).
n(x2625).
n(x2624).
n(x2623).
n(x2622).
n(x2621).
n(x2620).
n(x2619).
n(x2618).
n(x2617).
n(x2616).
n(x2615).
n(x2614).
n(x2613).
n(x2612).
n(x2611).
n(x2610).
n(x2609).
n(x2608).
n(x2607).
n(x2606).
n(x2605).
n(x2604).
n(x2603).
n(x2602).
n(x2601).
n(x2600).
n(x2599).
n(x2598).
n(x2597).
n(x2596).
n(x2595).
n(x2594).
n(x2593).
n(x2592).
n(x2591).
n(x2590).
n(x2589).
n(x2588).
n(x2587).
n(x2586).
n(x2585).
n(x2584).
n(x2583).
n(x2582).
n(x2581).
n(x2580).
n(x2579).
n(x2578).
n(x2577).
n(x2576).
n(x2575).
n(x2574).
n(x2573).
n(x2572).
n(x2571).
n(x2570).
n(x2569).
n(x2568).
n(x2567).
n(x2566).
n(x2565).
n(x2564).
n(x2563).
n(x2562).
n(x2561).
n(x2560).
n(x2559).
n(x2558).
n(x2557).
n(x2556).
n(x2555).
n(x2554).
n(x2553).
n(x2552).
n(x2551).
n(x2550).
n(x2549).
n(x2548).
n(x2547).
n(x2546).
n(x2545).
n(x2544).
n(x2543).
n(x2542).
n(x2541).
n(x2540).
n(x2539).
n(x2538).
n(x2537).
n(x2536).
n(x2535).
n(x2534).
n(x2533).
n(x2532).
n(x2531).
n(x2530).
n(x2529).
n(x2528).
n(x2527).
n(x2526).
n(x2525).
n(x2524).
n(x2523).
n(x2522).
n(x2521).
n(x2520).
n(x2519).
n(x2518).
n(x2517).
n(x2516).
n(x2515).
n(x2514).
n(x2513).
n(x2512).
n(x2511).
n(x2510).
n(x2509).
n(x2508).
n(x2507).
n(x2506).
n(x2505).
n(x2504).
n(x2503).
n(x2502).
n(x2501).
n(x2500).
n(x2499).
n(x2498).
n(x2497).
n(x2496).
n(x2495).
n(x2494).
n(x2493).
n(x2492).
n(x2491).
n(x2490).
n(x2489).
n(x2488).
n(x2487).
n(x2486).
n(x2485).
n(x2484).
n(x2483).
n(x2482).
n(x2481).
n(x2480).
n(x2479).
n(x2478).
n(x2477).
n(x2476).
n(x2475).
n(x2474).
n(x2473).
n(x2472).
n(x2471).
n(x2470).
n(x2469).
n(x2468).
n(x2467).
n(x2466).
n(x2465).
n(x2464).
n(x2463).
n(x2462).
n(x2461).
n(x2460).
n(x2459).
n(x2458).
n(x2457).
n(x2456).
n(x2455).
n(x2454).
n(x2453).
n(x2452).
n(x2451).
n(x2450).
n(x2449).
n(x2448).
n(x2447).
n(x2446).
n(x2445).
n(x2444).
n(x2443).
n(x2442).
n(x2441).
n(x2440).
n(x2439).
n(x2438).
n(x2437).
n(x2436).
n(x2435).
n(x2434).
n(x2433).
n(x2432).
n(x2431).
n(x2430).
n(x2429).
n(x2428).
n(x2427).
n(x2426).
n(x2425).
n(x2424).
n(x2423).
n(x2422).
n(x2421).
n(x2420).
n(x2419).
n(x2418).
n(x2417).
n(x2416).
n(x2415).
n(x2414).
n(x2413).
n(x2412).
n(x2411).
n(x2410).
n(x2409).
n(x2408).
n(x2407).
n(x2406).
n(x2405).
n(x2404).
n(x2403).
n(x2402).
n(x2401).
n(x2400).
n(x2399).
n(x2398).
n(x2397).
n(x2396).
n(x2395).
n(x2394).
n(x2393).
n(x2392).
n(x2391).
n(x2390).
n(x2389).
n(x2388).
n(x2387).
n(x2386).
n(x2385).
n(x2384).
n(x2383).
n(x2382).
n(x2381).
n(x2380).
n(x2379).
n(x2378).
n(x2377).
n(x2376).
n(x2375).
n(x2374).
n(x2373).
n(x2372).
n(x2371).
n(x2370).
n(x2369).
n(x2368).
n(x2367).
n(x2366).
n(x2365).
n(x2364).
n(x2363).
n(x2362).
n(x2361).
n(x2360).
n(x2359).
n(x2358).
n(x2357).
n(x2356).
n(x2355).
n(x2354).
n(x2353).
n(x2352).
n(x2351).
n(x2350).
n(x2349).
n(x2348).
n(x2347).
n(x2346).
n(x2345).
n(x2344).
n(x2343).
n(x2342).
n(x2341).
n(x2340).
n(x2339).
n(x2338).
n(x2337).
n(x2336).
n(x2335).
n(x2334).
n(x2333).
n(x2332).
n(x2331).
n(x2330).
n(x2329).
n(x2328).
n(x2327).
n(x2326).
n(x2325).
n(x2324).
n(x2323).
n(x2322).
n(x2321).
n(x2320).
n(x2319).
n(x2318).
n(x2317).
n(x2316).
n(x2315).
n(x2314).
n(x2313).
n(x2312).
n(x2311).
n(x2310).
n(x2309).
n(x2308).
n(x2307).
n(x2306).
n(x2305).
n(x2304).
n(x2303).
n(x2302).
n(x2301).
n(x2300).
n(x2299).
n(x2298).
n(x2297).
n(x2296).
n(x2295).
n(x2294).
n(x2293).
n(x2292).
n(x2291).
n(x2290).
n(x2289).
n(x2288).
n(x2287).
n(x2286).
n(x2285).
n(x2284).
n(x2283).
n(x2282).
n(x2281).
n(x2280).
n(x2279).
n(x2278).
n(x2277).
n(x2276).
n(x2275).
n(x2274).
n(x2273).
n(x2272).
n(x2271).
n(x2270).
n(x2269).
n(x2268).
n(x2267).
n(x2266).
n(x2265).
n(x2264).
n(x2263).
n(x2262).
n(x2261).
n(x2260).
n(x2259).
n(x2258).
n(x2257).
n(x2256).
n(x2255).
n(x2254).
n(x2253).
n(x2252).
n(x2251).
n(x2250).
n(x2249).
n(x2248).
n(x2247).
n(x2246).
n(x2245).
n(x2244).
n(x2243).
n(x2242).
n(x2241).
n(x2240).
n(x2239).
n(x2238).
n(x2237).
n(x2236).
n(x2235).
n(x2234).
n(x2233).
n(x2232).
n(x2231).
n(x2230).
n(x2229).
n(x2228).
n(x2227).
n(x2226).
n(x2225).
n(x2224).
n(x2223).
n(x2222).
n(x2221).
n(x2220).
n(x2219).
n(x2218).
n(x2217).
n(x2216).
n(x2215).
n(x2214).
n(x2213).
n(x2212).
n(x2211).
n(x2210).
n(x2209).
n(x2208).
n(x2207).
n(x2206).
n(x2205).
n(x2204).
n(x2203).
n(x2202).
n(x2201).
n(x2200).
n(x2199).
n(x2198).
n(x2197).
n(x2196).
n(x2195).
n(x2194).
n(x2193).
n(x2192).
n(x2191).
n(x2190).
n(x2189).
n(x2188).
n(x2187).
n(x2186).
n(x2185).
n(x2184).
n(x2183).
n(x2182).
n(x2181).
n(x2180).
n(x2179).
n(x2178).
n(x2177).
n(x2176).
n(x2175).
n(x2174).
n(x2173).
n(x2172).
n(x2171).
n(x2170).
n(x2169).
n(x2168).
n(x2167).
n(x2166).
n(x2165).
n(x2164).
n(x2163).
n(x2162).
n(x2161).
n(x2160).
n(x2159).
n(x2158).
n(x2157).
n(x2156).
n(x2155).
n(x2154).
n(x2153).
n(x2152).
n(x2151).
n(x2150).
n(x2149).
n(x2148).
n(x2147).
n(x2146).
n(x2145).
n(x2144).
n(x2143).
n(x2142).
n(x2141).
n(x2140).
n(x2139).
n(x2138).
n(x2137).
n(x2136).
n(x2135).
n(x2134).
n(x2133).
n(x2132).
n(x2131).
n(x2130).
n(x2129).
n(x2128).
n(x2127).
n(x2126).
n(x2125).
n(x2124).
n(x2123).
n(x2122).
n(x2121).
n(x2120).
n(x2119).
n(x2118).
n(x2117).
n(x2116).
n(x2115).
n(x2114).
n(x2113).
n(x2112).
n(x2111).
n(x2110).
n(x2109).
n(x2108).
n(x2107).
n(x2106).
n(x2105).
n(x2104).
n(x2103).
n(x2102).
n(x2101).
n(x2100).
n(x2099).
n(x2098).
n(x2097).
n(x2096).
n(x2095).
n(x2094).
n(x2093).
n(x2092).
n(x2091).
n(x2090).
n(x2089).
n(x2088).
n(x2087).
n(x2086).
n(x2085).
n(x2084).
n(x2083).
n(x2082).
n(x2081).
n(x2080).
n(x2079).
n(x2078).
n(x2077).
n(x2076).
n(x2075).
n(x2074).
n(x2073).
n(x2072).
n(x2071).
n(x2070).
n(x2069).
n(x2068).
n(x2067).
n(x2066).
n(x2065).
n(x2064).
n(x2063).
n(x2062).
n(x2061).
n(x2060).
n(x2059).
n(x2058).
n(x2057).
n(x2056).
n(x2055).
n(x2054).
n(x2053).
n(x2052).
n(x2051).
n(x2050).
n(x2049).
n(x2048).
n(x2047).
n(x2046).
n(x2045).
n(x2044).
n(x2043).
n(x2042).
n(x2041).
n(x2040).
n(x2039).
n(x2038).
n(x2037).
n(x2036).
n(x2035).
n(x2034).
n(x2033).
n(x2032).
n(x2031).
n(x2030).
n(x2029).
n(x2028).
n(x2027).
n(x2026).
n(x2025).
n(x2024).
n(x2023).
n(x2022).
n(x2021).
n(x2020).
n(x2019).
n(x2018).
n(x2017).
n(x2016).
n(x2015).
n(x2014).
n(x2013).
n(x2012).
n(x2011).
n(x2010).
n(x2009).
n(x2008).
n(x2007).
n(x2006).
n(x2005).
n(x2004).
n(x2003).
n(x2002).
n(x2001).
n(x2000).
n(x1999).
n(x1998).
n(x1997).
n(x1996).
n(x1995).
n(x1994).
n(x1993).
n(x1992).
n(x1991).
n(x1990).
n(x1989).
n(x1988).
n(x1987).
n(x1986).
n(x1985).
n(x1984).
n(x1983).
n(x1982).
n(x1981).
n(x1980).
n(x1979).
n(x1978).
n(x1977).
n(x1976).
n(x1975).
n(x1974).
n(x1973).
n(x1972).
n(x1971).
n(x1970).
n(x1969).
n(x1968).
n(x1967).
n(x1966).
n(x1965).
n(x1964).
n(x1963).
n(x1962).
n(x1961).
n(x1960).
n(x1959).
n(x1958).
n(x1957).
n(x1956).
n(x1955).
n(x1954).
n(x1953).
n(x1952).
n(x1951).
n(x1950).
n(x1949).
n(x1948).
n(x1947).
n(x1946).
n(x1945).
n(x1944).
n(x1943).
n(x1942).
n(x1941).
n(x1940).
n(x1939).
n(x1938).
n(x1937).
n(x1936).
n(x1935).
n(x1934).
n(x1933).
n(x1932).
n(x1931).
n(x1930).
n(x1929).
n(x1928).
n(x1927).
n(x1926).
n(x1925).
n(x1924).
n(x1923).
n(x1922).
n(x1921).
n(x1920).
n(x1919).
n(x1918).
n(x1917).
n(x1916).
n(x1915).
n(x1914).
n(x1913).
n(x1912).
n(x1911).
n(x1910).
n(x1909).
n(x1908).
n(x1907).
n(x1906).
n(x1905).
n(x1904).
n(x1903).
n(x1902).
n(x1901).
n(x1900).
n(x1899).
n(x1898).
n(x1897).
n(x1896).
n(x1895).
n(x1894).
n(x1893).
n(x1892).
n(x1891).
n(x1890).
n(x1889).
n(x1888).
n(x1887).
n(x1886).
n(x1885).
n(x1884).
n(x1883).
n(x1882).
n(x1881).
n(x1880).
n(x1879).
n(x1878).
n(x1877).
n(x1876).
n(x1875).
n(x1874).
n(x1873).
n(x1872).
n(x1871).
n(x1870).
n(x1869).
n(x1868).
n(x1867).
n(x1866).
n(x1865).
n(x1864).
n(x1863).
n(x1862).
n(x1861).
n(x1860).
n(x1859).
n(x1858).
n(x1857).
n(x1856).
n(x1855).
n(x1854).
n(x1853).
n(x1852).
n(x1851).
n(x1850).
n(x1849).
n(x1848).
n(x1847).
n(x1846).
n(x1845).
n(x1844).
n(x1843).
n(x1842).
n(x1841).
n(x1840).
n(x1839).
n(x1838).
n(x1837).
n(x1836).
n(x1835).
n(x1834).
n(x1833).
n(x1832).
n(x1831).
n(x1830).
n(x1829).
n(x1828).
n(x1827).
n(x1826).
n(x1825).
n(x1824).
n(x1823).
n(x1822).
n(x1821).
n(x1820).
n(x1819).
n(x1818).
n(x1817).
n(x1816).
n(x1815).
n(x1814).
n(x1813).
n(x1812).
n(x1811).
n(x1810).
n(x1809).
n(x1808).
n(x1807).
n(x1806).
n(x1805).
n(x1804).
n(x1803).
n(x1802).
n(x1801).
n(x1800).
n(x1799).
n(x1798).
n(x1797).
n(x1796).
n(x1795).
n(x1794).
n(x1793).
n(x1792).
n(x1791).
n(x1790).
n(x1789).
n(x1788).
n(x1787).
n(x1786).
n(x1785).
n(x1784).
n(x1783).
n(x1782).
n(x1781).
n(x1780).
n(x1779).
n(x1778).
n(x1777).
n(x1776).
n(x1775).
n(x1774).
n(x1773).
n(x1772).
n(x1771).
n(x1770).
n(x1769).
n(x1768).
n(x1767).
n(x1766).
n(x1765).
n(x1764).
n(x1763).
n(x1762).
n(x1761).
n(x1760).
n(x1759).
n(x1758).
n(x1757).
n(x1756).
n(x1755).
n(x1754).
n(x1753).
n(x1752).
n(x1751).
n(x1750).
n(x1749).
n(x1748).
n(x1747).
n(x1746).
n(x1745).
n(x1744).
n(x1743).
n(x1742).
n(x1741).
n(x1740).
n(x1739).
n(x1738).
n(x1737).
n(x1736).
n(x1735).
n(x1734).
n(x1733).
n(x1732).
n(x1731).
n(x1730).
n(x1729).
n(x1728).
n(x1727).
n(x1726).
n(x1725).
n(x1724).
n(x1723).
n(x1722).
n(x1721).
n(x1720).
n(x1719).
n(x1718).
n(x1717).
n(x1716).
n(x1715).
n(x1714).
n(x1713).
n(x1712).
n(x1711).
n(x1710).
n(x1709).
n(x1708).
n(x1707).
n(x1706).
n(x1705).
n(x1704).
n(x1703).
n(x1702).
n(x1701).
n(x1700).
n(x1699).
n(x1698).
n(x1697).
n(x1696).
n(x1695).
n(x1694).
n(x1693).
n(x1692).
n(x1691).
n(x1690).
n(x1689).
n(x1688).
n(x1687).
n(x1686).
n(x1685).
n(x1684).
n(x1683).
n(x1682).
n(x1681).
n(x1680).
n(x1679).
n(x1678).
n(x1677).
n(x1676).
n(x1675).
n(x1674).
n(x1673).
n(x1672).
n(x1671).
n(x1670).
n(x1669).
n(x1668).
n(x1667).
n(x1666).
n(x1665).
n(x1664).
n(x1663).
n(x1662).
n(x1661).
n(x1660).
n(x1659).
n(x1658).
n(x1657).
n(x1656).
n(x1655).
n(x1654).
n(x1653).
n(x1652).
n(x1651).
n(x1650).
n(x1649).
n(x1648).
n(x1647).
n(x1646).
n(x1645).
n(x1644).
n(x1643).
n(x1642).
n(x1641).
n(x1640).
n(x1639).
n(x1638).
n(x1637).
n(x1636).
n(x1635).
n(x1634).
n(x1633).
n(x1632).
n(x1631).
n(x1630).
n(x1629).
n(x1628).
n(x1627).
n(x1626).
n(x1625).
n(x1624).
n(x1623).
n(x1622).
n(x1621).
n(x1620).
n(x1619).
n(x1618).
n(x1617).
n(x1616).
n(x1615).
n(x1614).
n(x1613).
n(x1612).
n(x1611).
n(x1610).
n(x1609).
n(x1608).
n(x1607).
n(x1606).
n(x1605).
n(x1604).
n(x1603).
n(x1602).
n(x1601).
n(x1600).
n(x1599).
n(x1598).
n(x1597).
n(x1596).
n(x1595).
n(x1594).
n(x1593).
n(x1592).
n(x1591).
n(x1590).
n(x1589).
n(x1588).
n(x1587).
n(x1586).
n(x1585).
n(x1584).
n(x1583).
n(x1582).
n(x1581).
n(x1580).
n(x1579).
n(x1578).
n(x1577).
n(x1576).
n(x1575).
n(x1574).
n(x1573).
n(x1572).
n(x1571).
n(x1570).
n(x1569).
n(x1568).
n(x1567).
n(x1566).
n(x1565).
n(x1564).
n(x1563).
n(x1562).
n(x1561).
n(x1560).
n(x1559).
n(x1558).
n(x1557).
n(x1556).
n(x1555).
n(x1554).
n(x1553).
n(x1552).
n(x1551).
n(x1550).
n(x1549).
n(x1548).
n(x1547).
n(x1546).
n(x1545).
n(x1544).
n(x1543).
n(x1542).
n(x1541).
n(x1540).
n(x1539).
n(x1538).
n(x1537).
n(x1536).
n(x1535).
n(x1534).
n(x1533).
n(x1532).
n(x1531).
n(x1530).
n(x1529).
n(x1528).
n(x1527).
n(x1526).
n(x1525).
n(x1524).
n(x1523).
n(x1522).
n(x1521).
n(x1520).
n(x1519).
n(x1518).
n(x1517).
n(x1516).
n(x1515).
n(x1514).
n(x1513).
n(x1512).
n(x1511).
n(x1510).
n(x1509).
n(x1508).
n(x1507).
n(x1506).
n(x1505).
n(x1504).
n(x1503).
n(x1502).
n(x1501).
n(x1500).
n(x1499).
n(x1498).
n(x1497).
n(x1496).
n(x1495).
n(x1494).
n(x1493).
n(x1492).
n(x1491).
n(x1490).
n(x1489).
n(x1488).
n(x1487).
n(x1486).
n(x1485).
n(x1484).
n(x1483).
n(x1482).
n(x1481).
n(x1480).
n(x1479).
n(x1478).
n(x1477).
n(x1476).
n(x1475).
n(x1474).
n(x1473).
n(x1472).
n(x1471).
n(x1470).
n(x1469).
n(x1468).
n(x1467).
n(x1466).
n(x1465).
n(x1464).
n(x1463).
n(x1462).
n(x1461).
n(x1460).
n(x1459).
n(x1458).
n(x1457).
n(x1456).
n(x1455).
n(x1454).
n(x1453).
n(x1452).
n(x1451).
n(x1450).
n(x1449).
n(x1448).
n(x1447).
n(x1446).
n(x1445).
n(x1444).
n(x1443).
n(x1442).
n(x1441).
n(x1440).
n(x1439).
n(x1438).
n(x1437).
n(x1436).
n(x1435).
n(x1434).
n(x1433).
n(x1432).
n(x1431).
n(x1430).
n(x1429).
n(x1428).
n(x1427).
n(x1426).
n(x1425).
n(x1424).
n(x1423).
n(x1422).
n(x1421).
n(x1420).
n(x1419).
n(x1418).
n(x1417).
n(x1416).
n(x1415).
n(x1414).
n(x1413).
n(x1412).
n(x1411).
n(x1410).
n(x1409).
n(x1408).
n(x1407).
n(x1406).
n(x1405).
n(x1404).
n(x1403).
n(x1402).
n(x1401).
n(x1400).
n(x1399).
n(x1398).
n(x1397).
n(x1396).
n(x1395).
n(x1394).
n(x1393).
n(x1392).
n(x1391).
n(x1390).
n(x1389).
n(x1388).
n(x1387).
n(x1386).
n(x1385).
n(x1384).
n(x1383).
n(x1382).
n(x1381).
n(x1380).
n(x1379).
n(x1378).
n(x1377).
n(x1376).
n(x1375).
n(x1374).
n(x1373).
n(x1372).
n(x1371).
n(x1370).
n(x1369).
n(x1368).
n(x1367).
n(x1366).
n(x1365).
n(x1364).
n(x1363).
n(x1362).
n(x1361).
n(x1360).
n(x1359).
n(x1358).
n(x1357).
n(x1356).
n(x1355).
n(x1354).
n(x1353).
n(x1352).
n(x1351).
n(x1350).
n(x1349).
n(x1348).
n(x1347).
n(x1346).
n(x1345).
n(x1344).
n(x1343).
n(x1342).
n(x1341).
n(x1340).
n(x1339).
n(x1338).
n(x1337).
n(x1336).
n(x1335).
n(x1334).
n(x1333).
n(x1332).
n(x1331).
n(x1330).
n(x1329).
n(x1328).
n(x1327).
n(x1326).
n(x1325).
n(x1324).
n(x1323).
n(x1322).
n(x1321).
n(x1320).
n(x1319).
n(x1318).
n(x1317).
n(x1316).
n(x1315).
n(x1314).
n(x1313).
n(x1312).
n(x1311).
n(x1310).
n(x1309).
n(x1308).
n(x1307).
n(x1306).
n(x1305).
n(x1304).
n(x1303).
n(x1302).
n(x1301).
n(x1300).
n(x1299).
n(x1298).
n(x1297).
n(x1296).
n(x1295).
n(x1294).
n(x1293).
n(x1292).
n(x1291).
n(x1290).
n(x1289).
n(x1288).
n(x1287).
n(x1286).
n(x1285).
n(x1284).
n(x1283).
n(x1282).
n(x1281).
n(x1280).
n(x1279).
n(x1278).
n(x1277).
n(x1276).
n(x1275).
n(x1274).
n(x1273).
n(x1272).
n(x1271).
n(x1270).
n(x1269).
n(x1268).
n(x1267).
n(x1266).
n(x1265).
n(x1264).
n(x1263).
n(x1262).
n(x1261).
n(x1260).
n(x1259).
n(x1258).
n(x1257).
n(x1256).
n(x1255).
n(x1254).
n(x1253).
n(x1252).
n(x1251).
n(x1250).
n(x1249).
n(x1248).
n(x1247).
n(x1246).
n(x1245).
n(x1244).
n(x1243).
n(x1242).
n(x1241).
n(x1240).
n(x1239).
n(x1238).
n(x1237).
n(x1236).
n(x1235).
n(x1234).
n(x1233).
n(x1232).
n(x1231).
n(x1230).
n(x1229).
n(x1228).
n(x1227).
n(x1226).
n(x1225).
n(x1224).
n(x1223).
n(x1222).
n(x1221).
n(x1220).
n(x1219).
n(x1218).
n(x1217).
n(x1216).
n(x1215).
n(x1214).
n(x1213).
n(x1212).
n(x1211).
n(x1210).
n(x1209).
n(x1208).
n(x1207).
n(x1206).
n(x1205).
n(x1204).
n(x1203).
n(x1202).
n(x1201).
n(x1200).
n(x1199).
n(x1198).
n(x1197).
n(x1196).
n(x1195).
n(x1194).
n(x1193).
n(x1192).
n(x1191).
n(x1190).
n(x1189).
n(x1188).
n(x1187).
n(x1186).
n(x1185).
n(x1184).
n(x1183).
n(x1182).
n(x1181).
n(x1180).
n(x1179).
n(x1178).
n(x1177).
n(x1176).
n(x1175).
n(x1174).
n(x1173).
n(x1172).
n(x1171).
n(x1170).
n(x1169).
n(x1168).
n(x1167).
n(x1166).
n(x1165).
n(x1164).
n(x1163).
n(x1162).
n(x1161).
n(x1160).
n(x1159).
n(x1158).
n(x1157).
n(x1156).
n(x1155).
n(x1154).
n(x1153).
n(x1152).
n(x1151).
n(x1150).
n(x1149).
n(x1148).
n(x1147).
n(x1146).
n(x1145).
n(x1144).
n(x1143).
n(x1142).
n(x1141).
n(x1140).
n(x1139).
n(x1138).
n(x1137).
n(x1136).
n(x1135).
n(x1134).
n(x1133).
n(x1132).
n(x1131).
n(x1130).
n(x1129).
n(x1128).
n(x1127).
n(x1126).
n(x1125).
n(x1124).
n(x1123).
n(x1122).
n(x1121).
n(x1120).
n(x1119).
n(x1118).
n(x1117).
n(x1116).
n(x1115).
n(x1114).
n(x1113).
n(x1112).
n(x1111).
n(x1110).
n(x1109).
n(x1108).
n(x1107).
n(x1106).
n(x1105).
n(x1104).
n(x1103).
n(x1102).
n(x1101).
n(x1100).
n(x1099).
n(x1098).
n(x1097).
n(x1096).
n(x1095).
n(x1094).
n(x1093).
n(x1092).
n(x1091).
n(x1090).
n(x1089).
n(x1088).
n(x1087).
n(x1086).
n(x1085).
n(x1084).
n(x1083).
n(x1082).
n(x1081).
n(x1080).
n(x1079).
n(x1078).
n(x1077).
n(x1076).
n(x1075).
n(x1074).
n(x1073).
n(x1072).
n(x1071).
n(x1070).
n(x1069).
n(x1068).
n(x1067).
n(x1066).
n(x1065).
n(x1064).
n(x1063).
n(x1062).
n(x1061).
n(x1060).
n(x1059).
n(x1058).
n(x1057).
n(x1056).
n(x1055).
n(x1054).
n(x1053).
n(x1052).
n(x1051).
n(x1050).
n(x1049).
n(x1048).
n(x1047).
n(x1046).
n(x1045).
n(x1044).
n(x1043).
n(x1042).
n(x1041).
n(x1040).
n(x1039).
n(x1038).
n(x1037).
n(x1036).
n(x1035).
n(x1034).
n(x1033).
n(x1032).
n(x1031).
n(x1030).
n(x1029).
n(x1028).
n(x1027).
n(x1026).
n(x1025).
n(x1024).
n(x1023).
n(x1022).
n(x1021).
n(x1020).
n(x1019).
n(x1018).
n(x1017).
n(x1016).
n(x1015).
n(x1014).
n(x1013).
n(x1012).
n(x1011).
n(x1010).
n(x1009).
n(x1008).
n(x1007).
n(x1006).
n(x1005).
n(x1004).
n(x1003).
n(x1002).
n(x1001).
n(x1000).
n(x999).
n(x998).
n(x997).
n(x996).
n(x995).
n(x994).
n(x993).
n(x992).
n(x991).
n(x990).
n(x989).
n(x988).
n(x987).
n(x986).
n(x985).
n(x984).
n(x983).
n(x982).
n(x981).
n(x980).
n(x979).
n(x978).
n(x977).
n(x976).
n(x975).
n(x974).
n(x973).
n(x972).
n(x971).
n(x970).
n(x969).
n(x968).
n(x967).
n(x966).
n(x965).
n(x964).
n(x963).
n(x962).
n(x961).
n(x960).
n(x959).
n(x958).
n(x957).
n(x956).
n(x955).
n(x954).
n(x953).
n(x952).
n(x951).
n(x950).
n(x949).
n(x948).
n(x947).
n(x946).
n(x945).
n(x944).
n(x943).
n(x942).
n(x941).
n(x940).
n(x939).
n(x938).
n(x937).
n(x936).
n(x935).
n(x934).
n(x933).
n(x932).
n(x931).
n(x930).
n(x929).
n(x928).
n(x927).
n(x926).
n(x925).
n(x924).
n(x923).
n(x922).
n(x921).
n(x920).
n(x919).
n(x918).
n(x917).
n(x916).
n(x915).
n(x914).
n(x913).
n(x912).
n(x911).
n(x910).
n(x909).
n(x908).
n(x907).
n(x906).
n(x905).
n(x904).
n(x903).
n(x902).
n(x901).
n(x900).
n(x899).
n(x898).
n(x897).
n(x896).
n(x895).
n(x894).
n(x893).
n(x892).
n(x891).
n(x890).
n(x889).
n(x888).
n(x887).
n(x886).
n(x885).
n(x884).
n(x883).
n(x882).
n(x881).
n(x880).
n(x879).
n(x878).
n(x877).
n(x876).
n(x875).
n(x874).
n(x873).
n(x872).
n(x871).
n(x870).
n(x869).
n(x868).
n(x867).
n(x866).
n(x865).
n(x864).
n(x863).
n(x862).
n(x861).
n(x860).
n(x859).
n(x858).
n(x857).
n(x856).
n(x855).
n(x854).
n(x853).
n(x852).
n(x851).
n(x850).
n(x849).
n(x848).
n(x847).
n(x846).
n(x845).
n(x844).
n(x843).
n(x842).
n(x841).
n(x840).
n(x839).
n(x838).
n(x837).
n(x836).
n(x835).
n(x834).
n(x833).
n(x832).
n(x831).
n(x830).
n(x829).
n(x828).
n(x827).
n(x826).
n(x825).
n(x824).
n(x823).
n(x822).
n(x821).
n(x820).
n(x819).
n(x818).
n(x817).
n(x816).
n(x815).
n(x814).
n(x813).
n(x812).
n(x811).
n(x810).
n(x809).
n(x808).
n(x807).
n(x806).
n(x805).
n(x804).
n(x803).
n(x802).
n(x801).
n(x800).
n(x799).
n(x798).
n(x797).
n(x796).
n(x795).
n(x794).
n(x793).
n(x792).
n(x791).
n(x790).
n(x789).
n(x788).
n(x787).
n(x786).
n(x785).
n(x784).
n(x783).
n(x782).
n(x781).
n(x780).
n(x779).
n(x778).
n(x777).
n(x776).
n(x775).
n(x774).
n(x773).
n(x772).
n(x771).
n(x770).
n(x769).
n(x768).
n(x767).
n(x766).
n(x765).
n(x764).
n(x763).
n(x762).
n(x761).
n(x760).
n(x759).
n(x758).
n(x757).
n(x756).
n(x755).
n(x754).
n(x753).
n(x752).
n(x751).
n(x750).
n(x749).
n(x748).
n(x747).
n(x746).
n(x745).
n(x744).
n(x743).
n(x742).
n(x741).
n(x740).
n(x739).
n(x738).
n(x737).
n(x736).
n(x735).
n(x734).
n(x733).
n(x732).
n(x731).
n(x730).
n(x729).
n(x728).
n(x727).
n(x726).
n(x725).
n(x724).
n(x723).
n(x722).
n(x721).
n(x720).
n(x719).
n(x718).
n(x717).
n(x716).
n(x715).
n(x714).
n(x713).
n(x712).
n(x711).
n(x710).
n(x709).
n(x708).
n(x707).
n(x706).
n(x705).
n(x704).
n(x703).
n(x702).
n(x701).
n(x700).
n(x699).
n(x698).
n(x697).
n(x696).
n(x695).
n(x694).
n(x693).
n(x692).
n(x691).
n(x690).
n(x689).
n(x688).
n(x687).
n(x686).
n(x685).
n(x684).
n(x683).
n(x682).
n(x681).
n(x680).
n(x679).
n(x678).
n(x677).
n(x676).
n(x675).
n(x674).
n(x673).
n(x672).
n(x671).
n(x670).
n(x669).
n(x668).
n(x667).
n(x666).
n(x665).
n(x664).
n(x663).
n(x662).
n(x661).
n(x660).
n(x659).
n(x658).
n(x657).
n(x656).
n(x655).
n(x654).
n(x653).
n(x652).
n(x651).
n(x650).
n(x649).
n(x648).
n(x647).
n(x646).
n(x645).
n(x644).
n(x643).
n(x642).
n(x641).
n(x640).
n(x639).
n(x638).
n(x637).
n(x636).
n(x635).
n(x634).
n(x633).
n(x632).
n(x631).
n(x630).
n(x629).
n(x628).
n(x627).
n(x626).
n(x625).
n(x624).
n(x623).
n(x622).
n(x621).
n(x620).
n(x619).
n(x618).
n(x617).
n(x616).
n(x615).
n(x614).
n(x613).
n(x612).
n(x611).
n(x610).
n(x609).
n(x608).
n(x607).
n(x606).
n(x605).
n(x604).
n(x603).
n(x602).
n(x601).
n(x600).
n(x599).
n(x598).
n(x597).
n(x596).
n(x595).
n(x594).
n(x593).
n(x592).
n(x591).
n(x590).
n(x589).
n(x588).
n(x587).
n(x586).
n(x585).
n(x584).
n(x583).
n(x582).
n(x581).
n(x580).
n(x579).
n(x578).
n(x577).
n(x576).
n(x575).
n(x574).
n(x573).
n(x572).
n(x571).
n(x570).
n(x569).
n(x568).
n(x567).
n(x566).
n(x565).
n(x564).
n(x563).
n(x562).
n(x561).
n(x560).
n(x559).
n(x558).
n(x557).
n(x556).
n(x555).
n(x554).
n(x553).
n(x552).
n(x551).
n(x550).
n(x549).
n(x548).
n(x547).
n(x546).
n(x545).
n(x544).
n(x543).
n(x542).
n(x541).
n(x540).
n(x539).
n(x538).
n(x537).
n(x536).
n(x535).
n(x534).
n(x533).
n(x532).
n(x531).
n(x530).
n(x529).
n(x528).
n(x527).
n(x526).
n(x525).
n(x524).
n(x523).
n(x522).
n(x521).
n(x520).
n(x519).
n(x518).
n(x517).
n(x516).
n(x515).
n(x514).
n(x513).
n(x512).
n(x511).
n(x510).
n(x509).
n(x508).
n(x507).
n(x506).
n(x505).
n(x504).
n(x503).
n(x502).
n(x501).
n(x500).
n(x499).
n(x498).
n(x497).
n(x496).
n(x495).
n(x494).
n(x493).
n(x492).
n(x491).
n(x490).
n(x489).
n(x488).
n(x487).
n(x486).
n(x485).
n(x484).
n(x483).
n(x482).
n(x481).
n(x480).
n(x479).
n(x478).
n(x477).
n(x476).
n(x475).
n(x474).
n(x473).
n(x472).
n(x471).
n(x470).
n(x469).
n(x468).
n(x467).
n(x466).
n(x465).
n(x464).
n(x463).
n(x462).
n(x461).
n(x460).
n(x459).
n(x458).
n(x457).
n(x456).
n(x455).
n(x454).
n(x453).
n(x452).
n(x451).
n(x450).
n(x449).
n(x448).
n(x447).
n(x446).
n(x445).
n(x444).
n(x443).
n(x442).
n(x441).
n(x440).
n(x439).
n(x438).
n(x437).
n(x436).
n(x435).
n(x434).
n(x433).
n(x432).
n(x431).
n(x430).
n(x429).
n(x428).
n(x427).
n(x426).
n(x425).
n(x424).
n(x423).
n(x422).
n(x421).
n(x420).
n(x419).
n(x418).
n(x417).
n(x416).
n(x415).
n(x414).
n(x413).
n(x412).
n(x411).
n(x410).
n(x409).
n(x408).
n(x407).
n(x406).
n(x405).
n(x404).
n(x403).
n(x402).
n(x401).
n(x400).
n(x399).
n(x398).
n(x397).
n(x396).
n(x395).
n(x394).
n(x393).
n(x392).
n(x391).
n(x390).
n(x389).
n(x388).
n(x387).
n(x386).
n(x385).
n(x384).
n(x383).
n(x382).
n(x381).
n(x380).
n(x379).
n(x378).
n(x377).
n(x376).
n(x375).
n(x374).
n(x373).
n(x372).
n(x371).
n(x370).
n(x369).
n(x368).
n(x367).
n(x366).
n(x365).
n(x364).
n(x363).
n(x362).
n(x361).
n(x360).
n(x359).
n(x358).
n(x357).
n(x356).
n(x355).
n(x354).
n(x353).
n(x352).
n(x351).
n(x350).
n(x349).
n(x348).
n(x347).
n(x346).
n(x345).
n(x344).
n(x343).
n(x342).
n(x341).
n(x340).
n(x339).
n(x338).
n(x337).
n(x336).
n(x335).
n(x334).
n(x333).
n(x332).
n(x331).
n(x330).
n(x329).
n(x328).
n(x327).
n(x326).
n(x325).
n(x324).
n(x323).
n(x322).
n(x321).
n(x320).
n(x319).
n(x318).
n(x317).
n(x316).
n(x315).
n(x314).
n(x313).
n(x312).
n(x311).
n(x310).
n(x309).
n(x308).
n(x307).
n(x306).
n(x305).
n(x304).
n(x303).
n(x302).
n(x301).
n(x300).
n(x299).
n(x298).
n(x297).
n(x296).
n(x295).
n(x294).
n(x293).
n(x292).
n(x291).
n(x290).
n(x289).
n(x288).
n(x287).
n(x286).
n(x285).
n(x284).
n(x283).
n(x282).
n(x281).
n(x280).
n(x279).
n(x278).
n(x277).
n(x276).
n(x275).
n(x274).
n(x273).
n(x272).
n(x271).
n(x270).
n(x269).
n(x268).
n(x267).
n(x266).
n(x265).
n(x264).
n(x263).
n(x262).
n(x261).
n(x260).
n(x259).
n(x258).
n(x257).
n(x256).
n(x255).
n(x254).
n(x253).
n(x252).
n(x251).
n(x250).
n(x249).
n(x248).
n(x247).
n(x246).
n(x245).
n(x244).
n(x243).
n(x242).
n(x241).
n(x240).
n(x239).
n(x238).
n(x237).
n(x236).
n(x235).
n(x234).
n(x233).
n(x232).
n(x231).
n(x230).
n(x229).
n(x228).
n(x227).
n(x226).
n(x225).
n(x224).
n(x223).
n(x222).
n(x221).
n(x220).
n(x219).
n(x218).
n(x217).
n(x216).
n(x215).
n(x214).
n(x213).
n(x212).
n(x211).
n(x210).
n(x209).
n(x208).
n(x207).
n(x206).
n(x205).
n(x204).
n(x203).
n(x202).
n(x201).
n(x200).
n(x199).
n(x198).
n(x197).
n(x196).
n(x195).
n(x194).
n(x193).
n(x192).
n(x191).
n(x190).
n(x189).
n(x188).
n(x187).
n(x186).
n(x185).
n(x184).
n(x183).
n(x182).
n(x181).
n(x180).
n(x179).
n(x178).
n(x177).
n(x176).
n(x175).
n(x174).
n(x173).
n(x172).
n(x171).
n(x170).
n(x169).
n(x168).
n(x167).
n(x166).
n(x165).
n(x164).
n(x163).
n(x162).
n(x161).
n(x160).
n(x159).
n(x158).
n(x157).
n(x156).
n(x155).
n(x154).
n(x153).
n(x152).
n(x151).
n(x150).
n(x149).
n(x148).
n(x147).
n(x146).
n(x145).
n(x144).
n(x143).
n(x142).
n(x141).
n(x140).
n(x139).
n(x138).
n(x137).
n(x136).
n(x135).
n(x134).
n(x133).
n(x132).
n(x131).
n(x130).
n(x129).
n(x128).
n(x127).
n(x126).
n(x125).
n(x124).
n(x123).
n(x122).
n(x121).
n(x120).
n(x119).
n(x118).
n(x117).
n(x116).
n(x115).
n(x114).
n(x113).
n(x112).
n(x111).
n(x110).
n(x109).
n(x108).
n(x107).
n(x106).
n(x105).
n(x104).
n(x103).
n(x102).
n(x101).
n(x100).
n(x99).
n(x98).
n(x97).
n(x96).
n(x95).
n(x94).
n(x93).
n(x92).
n(x91).
n(x90).
n(x89).
n(x88).
n(x87).
n(x86).
n(x85).
n(x84).
n(x83).
n(x82).
n(x81).
n(x80).
n(x79).
n(x78).
n(x77).
n(x76).
n(x75).
n(x74).
n(x73).
n(x72).
n(x71).
n(x70).
n(x69).
n(x68).
n(x67).
n(x66).
n(x65).
n(x64).
n(x63).
n(x62).
n(x61).
n(x60).
n(x59).
n(x58).
n(x57).
n(x56).
n(x55).
n(x54).
n(x53).
n(x52).
n(x51).
n(x50).
n(x49).
n(x48).
n(x47).
n(x46).
n(x45).
n(x44).
n(x43).
n(x42).
n(x41).
n(x40).
n(x39).
n(x38).
n(x37).
n(x36).
n(x35).
n(x34).
n(x33).
n(x32).
n(x31).
n(x30).
n(x29).
n(x28).
n(x27).
n(x26).
n(x25).
n(x24).
n(x23).
n(x22).
n(x21).
n(x20).
n(x19).
n(x18).
n(x17).
n(x16).
n(x15).
n(x14).
n(x13).
n(x12).
n(x11).
n(x10).
n(x9).
n(x8).
n(x7).
n(x6).
n(x5).
n(x4).
n(x3).
n(x2).
n(x1).
n(x4001).
"""
| 10.720744
| 12
| 0.533649
| 16,006
| 85,841
| 2.861989
| 0.250156
| 0.000262
| 0.000306
| 0.000524
| 0.99976
| 0.99976
| 0.99976
| 0.99976
| 0.99976
| 0.99976
| 0
| 0.426671
| 0.186531
| 85,841
| 8,006
| 13
| 10.722083
| 0.229346
| 0
| 0
| 0.99975
| 0
| 0
| 0.999602
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
8c9d138dc32c21c945602b5eb99cf1c27e1b6119
| 124
|
py
|
Python
|
querio/service/exceptions/querio_file_error.py
|
Quer-io/Quer.io
|
381f4bf3fd505d35d0f10817322ff9072d453a18
|
[
"MIT"
] | null | null | null |
querio/service/exceptions/querio_file_error.py
|
Quer-io/Quer.io
|
381f4bf3fd505d35d0f10817322ff9072d453a18
|
[
"MIT"
] | 1
|
2018-10-31T18:29:36.000Z
|
2018-10-31T18:29:36.000Z
|
querio/service/exceptions/querio_file_error.py
|
Quer-io/Quer.io
|
381f4bf3fd505d35d0f10817322ff9072d453a18
|
[
"MIT"
] | 1
|
2018-09-05T05:57:17.000Z
|
2018-09-05T05:57:17.000Z
|
class QuerioFileError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, args, kwargs)
| 20.666667
| 46
| 0.685484
| 13
| 124
| 5.923077
| 0.615385
| 0.207792
| 0.311688
| 0.467532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185484
| 124
| 5
| 47
| 24.8
| 0.762376
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
8cc701e5a866060d94086eeff4f88f3b0cd515c6
| 11,146
|
py
|
Python
|
rb_tocase/rb_tocase.py
|
RickBarretto/toCase
|
13720f3bcef017951c76dc41ba0dbe0fb355b66a
|
[
"MIT"
] | 3
|
2021-04-20T00:36:09.000Z
|
2022-01-10T06:34:59.000Z
|
rb_tocase/rb_tocase.py
|
RickBarretto/toCase
|
13720f3bcef017951c76dc41ba0dbe0fb355b66a
|
[
"MIT"
] | 3
|
2022-01-10T07:27:25.000Z
|
2022-01-11T19:03:49.000Z
|
rb_tocase/rb_tocase.py
|
RickBarretto/toCase
|
13720f3bcef017951c76dc41ba0dbe0fb355b66a
|
[
"MIT"
] | null | null | null |
import re
class Case:
def _CamelSep(string:str, sep:str):
""" Don't use it!"""
_list = string.split(sep)
first = _list[0].lower()
last = []
for i in _list[1:]:
last.append(i.title())
last = "".join(last)
result = str(first + last)
return result
def _OthersSep(string: str, sep:str, sep1:str, case:str):
""" Don't use it!"""
_list = string.split(sep)
last = []
for i in _list:
if case == "lower":
last.append(i.lower())
elif case == "upper":
last.append(i.upper())
elif case == "title":
last.append(i.title())
result = sep1.join(last)
return result
def _Error():
""" Don't use it!"""
#if feedback:
#print(f"Was returned a {case1} case, because the string has no differentiator")
raise ValueError("case is wrong, choose between: 'lower', 'upper' or 'title'")
def to_camel(string: str):
"""toCamel
string: str, case1: str = "lower"
Returns a "camelCase"
+ string is the input - It can be in a Pascal, a Sentence, Snake or Kebab case.
+ case1 defines the output case of a string without differentiador, (" ", "-", "_")
+ case1 options: "lower", "title", "upper"
"""
string = string.strip()
# For Sentences:
if len(string.split(" ")) > 1:
return Case._CamelSep(string, " ")
# For Snakes:
elif len(string.split("_")) > 1:
return Case._CamelSep(string, "_")
# For Kebab:
elif len(string.split("-")) > 1:
return Case._CamelSep(string, "-")
# For Uppers, Titles and Lowers:
elif string.istitle() or string.isupper() or string.islower():
Case._Error()
# Errors:
elif string.isdecimal() or string.isdigit() or string.isnumeric():
raise ValueError("It's a number")
elif string == "":
raise ValueError("It's a white space")
# For Pascal:
else:
_list = re.findall('[A-Z][^A-Z]*', string)
first = _list[0].lower()
last = _list[1:]
last = "".join(last)
pascal = first + last
return pascal
def to_snake(string: str, case: str = "lower"):
"""toSnake
string: str, case: str = "lower", case1: str = "lower"
Returns "snake_case"
+ string is the input - It can be in a Pascal, a Sentence, Snake or Kebab case.
+ case defines the output case of a string with differentiador, (" ", "-", "_")
+ case1 defines the output case of a string without differentiador, (" ", "-", "_")
+ case/case1 options: "lower", "title", "upper"
"""
string = string.strip()
sep1 = "_"
case = case
# For Sentences:
if len(string.split(" ")) > 1:
return Case._OthersSep(string, " ", sep1=sep1, case=case)
# For Kebab:
elif len(string.split("-")) > 1:
return Case._OthersSep(string, "-", sep1=sep1, case=case)
# For Uppers, Titles and Lowers:
elif string.istitle() or string.isupper() or string.islower():
Case._Error()
# Errors:
elif string.isdecimal() or string.isdigit() or string.isnumeric():
raise ValueError("It's a number")
elif string == "":
raise ValueError("It's a white space")
# For Pascal/Camel:
else:
last = re.findall('[A-Z][^A-Z]*', string)
for i in re.finditer('[A-Z][^A-Z]*', string):
index = i.span()[0]
break
if index == 0:
first = ""
l = []
for i in last:
l.append(i.lower())
pascal = str("_".join(l))
return pascal
else:
first = string[:index]
l = []
for i in last:
l.append(i.lower())
r = str("_".join(l))
camel = first + "_" + r
return camel
def to_kebab(string: str, case: str = "lower"):
"""toKebab
string: str, case: str = "lower", case1: str = "lower"
Returns "kebab-case"
+ string is the input - It can be in a Pascal, a Sentence, Snake or Kebab case.
+ case defines the output case of a string with differentiador, (" ", "-", "_")
+ case1 defines the output case of a string without differentiador, (" ", "-", "_")
+ case/case1 options: "lower", "title", "upper"
"""
string = string.strip()
sep1 = "-"
case=case
# For Sentences:
if len(string.split(" ")) > 1:
return Case._OthersSep(string, " ", sep1=sep1, case=case)
# For Snake:
elif len(string.split("_")) > 1:
return Case._OthersSep(string, "_", sep1=sep1, case=case)
# For Uppers, Titles and Lowers:
elif string.istitle() or string.isupper() or string.islower():
Case._Error()
# Errors:
elif string.isdecimal() or string.isdigit() or string.isnumeric():
raise ValueError("It's a number")
elif string == "":
raise ValueError("It's a white space")
# For Pascal/Camel:
else:
last = re.findall('[A-Z][^A-Z]*', string)
for i in re.finditer('[A-Z][^A-Z]*', string):
index = i.span()[0]
break
if index == 0:
first = ""
l = []
for i in last:
l.append(i.lower())
pascal = str("-".join(l))
return pascal
else:
first = string[:index]
l = []
for i in last:
l.append(i.lower())
r = str("-".join(l))
camel = first + "-" + r
return camel
def to_pascal(string: str):
"""toPascal
string: str, case: str = "lower", case1: str = "lower"
Returns "PascalCase"
+ string is the input - It can be in a Pascal, a Sentence, Snake or Kebab case.
+ case1 defines the output case of a string without differentiador, (" ", "-", "_")
+ case1 options: "lower", "title", "upper"
"""
string = string.strip()
sep1 = ""
case= "title"
# For Sentences:
if len(string.split(" ")) > 1:
return Case._OthersSep(string, " ", sep1=sep1, case=case)
# For Snakes:
elif len(string.split("_")) > 1:
return Case._OthersSep(string, "_", sep1=sep1, case=case)
# For Kebab:
elif len(string.split("-")) > 1:
return Case._OthersSep(string, "-", sep1=sep1, case=case)
# For Uppers, Titles and Lowers:
elif string.istitle() or string.isupper() or string.islower():
Case._Error()
# Errors:
elif string.isdecimal() or string.isdigit() or string.isnumeric():
raise ValueError("It's a number")
elif string == "":
raise ValueError("It's a white space")
# For Camel:
else:
last = re.findall('[A-Z][^A-Z]*', string)
for i in re.finditer('[A-Z][^A-Z]*', string):
index = i.span()[0]
break
first = string[:index].title()
l = []
for i in last:
l.append(i.title())
l = "".join(l)
camel = first + l
return camel
def to_upper_snake(string: str):
"""toUpperSnake
string: str, case: str = "lower", case1: str = "lower"
Returns "UPPER_SNAKE_CASE"
+ string is the input - It can be in a Pascal, a Sentence, Snake or Kebab case.
"""
# For Lowers, Uppers and Titles:
if string.islower() or string.isupper() or string.istitle():
string = string.strip()
return string.upper()
else:
return Case.toSnake(string).upper()
def to_sentence(string: str, case: str = "lower"):
"""toSentence
string: str, case: str = "lower", case1: str = "lower"
Returns "String sentence"
+ string is the input - It can be in a Pascal, a Sentence, Snake or Kebab case.
+ case defines the output case of a string with differentiador, (" ", "-", "_")
+ case1 defines the output case of a string without differentiador, (" ", "-", "_")
+ case/case1 options: "lower", "title", "upper"
"""
string = string.strip()
sep1= " "
case=case
if len(string.split(" ")) > 1:
return Case._OthersSep(string, " ", sep1=sep1, case=case)
# For Snake:
elif len(string.split("_")) > 1:
return Case._OthersSep(string, "_", sep1=sep1, case=case)
# For Kebab:
elif len(string.split("-")) > 1:
return Case._OthersSep(string, "-", sep1=sep1, case=case)
# For Uppers, Titles and Lowers:
elif string.istitle() or string.isupper() or string.islower():
Case._Error()
# Errors:
elif string.isdecimal() or string.isdigit() or string.isnumeric():
raise ValueError("It's a number")
elif string == "":
raise ValueError("It's a white space")
# For Pascal/Camel:
else:
last = re.findall('[A-Z][^A-Z]*', string)
for i in re.finditer('[A-Z][^A-Z]*', string):
index = i.span()[0]
break
if index == 0:
first = ""
l = []
for i in last:
if case == "lower":
l.append(i.lower())
elif case == "upper":
l.append(i.upper())
elif case == "title":
l.append(i.title())
pascal = str(" ".join(l))
return pascal
else:
first = string[:index]
if case == "lower":
first = first.lower()
elif case == "upper":
first = first.upper()
elif case == "title":
first = first.title()
l = []
for i in last:
if case == "lower":
l.append(i.lower())
elif case == "upper":
l.append(i.upper())
elif case == "title":
l.append(i.title())
r = str(" ".join(l))
camel = first + " " + r
return camel
| 33.074184
| 92
| 0.467701
| 1,204
| 11,146
| 4.280731
| 0.08804
| 0.034148
| 0.015134
| 0.037835
| 0.834692
| 0.805976
| 0.794335
| 0.790648
| 0.786962
| 0.715367
| 0
| 0.009997
| 0.398708
| 11,146
| 337
| 93
| 33.074184
| 0.759027
| 0.221784
| 0
| 0.775
| 0
| 0
| 0.05293
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045
| false
| 0
| 0.005
| 0
| 0.18
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8cfdbb68d11cce80f3113cfdcf2bf122b322b8df
| 310
|
py
|
Python
|
src/__init__.py
|
davidkowalk/FunctionSynthesizer
|
8513ca8991679baf36c7edcbd4d5984ff2660bb9
|
[
"MIT"
] | 3
|
2020-04-03T07:32:37.000Z
|
2020-09-18T15:02:48.000Z
|
src/__init__.py
|
davidkowalk/FunctionSynthesizer
|
8513ca8991679baf36c7edcbd4d5984ff2660bb9
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
davidkowalk/FunctionSynthesizer
|
8513ca8991679baf36c7edcbd4d5984ff2660bb9
|
[
"MIT"
] | null | null | null |
from function_synthesizer.function_synthesizer import solve
from function_synthesizer.function_synthesizer import solve_mixed
from function_synthesizer.function_synthesizer import to_str
from function_synthesizer.function_synthesizer import calculate
from function_synthesizer.function_synthesizer import read
| 51.666667
| 65
| 0.919355
| 37
| 310
| 7.378378
| 0.27027
| 0.695971
| 0.421245
| 0.567766
| 0.915751
| 0.915751
| 0.388278
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 310
| 5
| 66
| 62
| 0.941379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
508be3617b2d7114b88649c6f8d40b1b5d603c02
| 29,344
|
py
|
Python
|
test/test_sampler_args.py
|
daikonradish/cmdstanpy
|
5645a6bad11edfecd28ede5e7798440b345f6994
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_sampler_args.py
|
daikonradish/cmdstanpy
|
5645a6bad11edfecd28ede5e7798440b345f6994
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_sampler_args.py
|
daikonradish/cmdstanpy
|
5645a6bad11edfecd28ede5e7798440b345f6994
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import os.path
import unittest
from cmdstanpy import TMPDIR
from cmdstanpy.lib import Model, SamplerArgs
datafiles_path = os.path.join("test", "data")
class SamplerArgsTest(unittest.TestCase):
def test_args_min(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
output = os.path.join(TMPDIR, 'bernoulli.output')
args = SamplerArgs(model,
chain_ids=[1,2],
output_file=output)
args.validate()
cmd = args.compose_command(0, ''.join([output,'-1.csv']))
self.assertIn('id=1', cmd)
def test_args_good(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
rdata = os.path.join(datafiles_path, 'bernoulli.data.R')
output = os.path.join(TMPDIR, 'bernoulli.output')
args = SamplerArgs(model,
chain_ids=[1,2],
seed=12345,
data=rdata,
output_file=output,
max_treedepth=15,
adapt_delta=0.99)
cmd = args.compose_command(0, ''.join([output,'-1.csv']))
self.assertIn('random seed=12345', cmd)
self.assertIn('data file=', cmd)
self.assertIn(
'algorithm=hmc engine=nuts max_depth=15 adapt delta=0.99', cmd)
def test_args_typical(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
jdata = os.path.join(datafiles_path, 'bernoulli.data.json')
output = os.path.join(TMPDIR, 'bernoulli.output')
args = SamplerArgs(model,
chain_ids=[1,2],
seed=12345,
sampling_iters=100,
data=jdata,
output_file=output,
max_treedepth=11,
adapt_delta=0.9)
cmd = args.compose_command(0, ''.join([output,'-1.csv']))
self.assertIn('bernoulli', cmd)
self.assertIn('seed=12345', cmd)
self.assertIn('num_samples=100', cmd)
self.assertIn('bernoulli.data.json', cmd)
self.assertIn('algorithm=hmc engine=nuts max_depth=11 adapt delta=0.9',
cmd)
def test_args_many_1(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
jdata = os.path.join(datafiles_path, 'bernoulli.data.json')
jmetric = os.path.join(datafiles_path, 'bernoulli.metric.json')
output = os.path.join(TMPDIR, 'bernoulli.output')
args = SamplerArgs(model,
chain_ids=[1,2],
seed=12345,
warmup_iters=100,
sampling_iters=100,
save_warmup=True,
thin=2,
metric=jmetric,
step_size=1.5,
data=jdata,
output_file=output,
max_treedepth=11,
adapt_delta=0.9)
cmd = args.compose_command(0, ''.join([output,'-1.csv']))
s1 = 'test/data/bernoulli id=1 random seed=12345 data file=test/data/bernoulli.data.json'
s2 = 'method=sample num_samples=100 num_warmup=100 save_warmup=1 thin=2'
s3 = 'algorithm=hmc engine=nuts max_depth=11 stepsize=1.5 metric=diag_e metric_file="test/data/bernoulli.metric.json" adapt delta=0.9'
self.assertIn(s1, cmd)
self.assertIn(s2, cmd)
self.assertIn(s3, cmd)
def test_args_chain_ids(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
jdata = os.path.join(datafiles_path, 'bernoulli.data.json')
args = SamplerArgs(model,
chain_ids=[7,9],
data=jdata)
cmd = args.compose_command(0, 'output')
self.assertIn('bernoulli', cmd)
self.assertIn('bernoulli.data.json', cmd)
self.assertIn('id=7', cmd)
cmd = args.compose_command(1, 'output')
self.assertIn('id=9', cmd)
def test_args_chain_ids_bad(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaisesRegex(ValueError, 'invalid chain_id -99'):
args = SamplerArgs(model,
chain_ids=[7,-99])
def test_args_missing_args_1(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(Exception):
args = SamplerArgs()
def test_args_missing_args_2(self):
with self.assertRaises(Exception):
args = SamplerArgs(model)
def test_args_bad_seed_1(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
output = os.path.join(TMPDIR, 'bernoulli.output')
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
output_file=output,
seed='badseed')
def test_args_bad_seed_2(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
output = os.path.join(TMPDIR, 'bernoulli.output')
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
output_file=output,
seed=-10)
def test_args_bad_seed_3(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
output = os.path.join(TMPDIR, 'bernoulli.output')
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
output_file=output,
seed=[1, 2, 3])
def test_args_bad_seed_4(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
output = os.path.join(TMPDIR, 'bernoulli.output')
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
output_file=output,
seed=4294967299)
def test_args_bad_data(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
output = os.path.join(TMPDIR, 'bernoulli.output')
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
output_file=output,
data='/no/such/path/to.file')
def test_args_inits_1(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
jdata = os.path.join(datafiles_path, 'bernoulli.data.json')
jinits = os.path.join(datafiles_path, 'bernoulli.init.json')
args = SamplerArgs(model,
chain_ids=[1,2],
data=jdata,
inits=jinits)
cmd = args.compose_command(0, 'output')
s1 = 'data file=test/data/bernoulli.data.json init=test/data/bernoulli.init.json'
self.assertIn(s1, cmd)
def test_args_inits_2(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
jdata = os.path.join(datafiles_path, 'bernoulli.data.json')
args = SamplerArgs(model,
chain_ids=[1,2],
data=jdata,
inits=0)
cmd = args.compose_command(0, 'output')
s1 = 'data file=test/data/bernoulli.data.json init=0'
self.assertIn(s1, cmd)
def test_args_inits_3(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
jdata = os.path.join(datafiles_path, 'bernoulli.data.json')
args = SamplerArgs(model,
chain_ids=[1,2],
data=jdata,
inits=3.33)
cmd = args.compose_command(0, 'output')
s1 = 'data file=test/data/bernoulli.data.json init=3.33'
self.assertIn(s1, cmd)
def test_args_inits_4(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
jdata = os.path.join(datafiles_path, 'bernoulli.data.json')
jinits1 = os.path.join(datafiles_path, 'bernoulli.init_1.json')
jinits2 = os.path.join(datafiles_path, 'bernoulli.init_2.json')
args = SamplerArgs(model,
chain_ids=[1,2],
data=jdata,
inits=[jinits1, jinits2])
cmd = args.compose_command(0, 'output')
s1 = 'data file=test/data/bernoulli.data.json init=test/data/bernoulli.init_1.json'
self.assertIn(s1, cmd)
def test_args_bad_inits_value(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
inits=-5)
def test_args_bad_inits_file(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
inits='/no/such/path/to.file')
def test_args_bad_inits_files_1(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
jinits1 = os.path.join(datafiles_path, 'bernoulli.init_1.json')
jinits2 = os.path.join(datafiles_path, 'bernoulli.init_2.json')
jinits3 = os.path.join(datafiles_path, 'bernoulli.init.json')
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
inits=[jinits1, jinits2, jinits3])
def test_args_bad_inits_files_2(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
jinits = os.path.join(datafiles_path, 'bernoulli.init.json')
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
inits=[jinits, 'no/such/file.json'])
def test_args_bad_inits_files_3(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
jinits = os.path.join(datafiles_path, 'bernoulli.init.json')
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
inits=[jinits, jinits])
def test_args_iters_1(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
args = SamplerArgs(model,
chain_ids=[1,2],
warmup_iters=123)
cmd = args.compose_command(0, 'output')
self.assertIn('num_warmup=123', cmd)
def test_args_iters_2(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
args = SamplerArgs(model,
chain_ids=[1,2],
sampling_iters=123)
cmd = args.compose_command(0, 'output')
self.assertIn('num_samples=123', cmd)
def test_args_iters_3(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
warmup_iters=-123)
def test_args_iters_4(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
sampling_iters=-123)
def test_args_iters_5(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
warmup_iters=0,
adapt_engaged=True)
def test_args_warmup_schedule_1(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
args = SamplerArgs(model,
chain_ids=[1,2],
warmup_iters=200,
warmup_schedule=(0.1, 0.8, 0.1))
cmd = args.compose_command(0, 'output')
s1 = 'algorithm=hmc adapt init_buffer=20 term_buffer=20'
def test_args_warmup_schedule_2(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
warmup_schedule=(-0.1, 0.8, 0.1))
def test_args_warmup_schedule_3(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
warmup_schedule=(8.1, 0.8, 0.1))
def test_args_iters_schedule_mismatch(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
warmup_iters=0,
warmup_schedule=(0.1, 0.8, 0.1))
def test_args_iters_adapt_mismatch(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
warmup_iters=0,
adapt_engaged=True)
def test_args_save_warmup_1(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
args = SamplerArgs(model,
chain_ids=[1,2],
save_warmup=True)
cmd = args.compose_command(0, 'output')
self.assertIn('save_warmup=1', cmd)
def test_args_save_warmup_2(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
args = SamplerArgs(model,
chain_ids=[1,2],
save_warmup=False)
cmd = args.compose_command(0, 'output')
self.assertNotIn('save_warmup', cmd)
def test_args_num_iters(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
output = os.path.join(TMPDIR, 'bernoulli.output')
args = SamplerArgs(model,
chain_ids=[1,2],
output_file=output,
sampling_iters=3,
warmup_iters=7)
cmd = args.compose_command(0, ''.join([output,'-1.csv']))
self.assertIn('num_samples=3', cmd)
self.assertIn('num_warmup=7', cmd)
def test_args_thin_good(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
args = SamplerArgs(model,
chain_ids=[1,2],
thin=3)
cmd = args.compose_command(0, 'output')
self.assertIn('thin=3', cmd)
def test_args_thin_bad(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
thin=-3)
def test_args_max_treedepth_good(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
args = SamplerArgs(model,
chain_ids=[1,2],
max_treedepth=15)
cmd = args.compose_command(0, 'output')
self.assertIn('max_depth=15', cmd)
def test_args_max_treedepth_bad(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
max_treedepth=-3)
def test_args_metric_1(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
args = SamplerArgs(model,
chain_ids=[1,2],
metric='diag')
cmd = args.compose_command(0, 'output')
s1 = 'metric=diag_e'
self.assertIn(s1, cmd)
def test_args_metric_2(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
args = SamplerArgs(model,
chain_ids=[1,2],
metric='diag_e')
cmd = args.compose_command(0, 'output')
s1 = 'metric=diag_e'
self.assertIn(s1, cmd)
def test_args_metric_3(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
args = SamplerArgs(model,
chain_ids=[1,2],
metric='dense')
cmd = args.compose_command(0, 'output')
s1 = 'metric=dense_e'
self.assertIn(s1, cmd)
def test_args_metric_4(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
args = SamplerArgs(model,
chain_ids=[1,2],
metric='dense_e')
cmd = args.compose_command(0, 'output')
s1 = 'metric=dense_e'
self.assertIn(s1, cmd)
def test_args_metric_file_1(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
jmetric = os.path.join(datafiles_path, 'bernoulli.metric.json')
args = SamplerArgs(model,
chain_ids=[1,2],
metric=jmetric)
cmd = args.compose_command(0, 'output')
s1 = 'metric=diag_e metric_file="test/data/bernoulli.metric.json'
self.assertIn(s1, cmd)
def test_args_metric_file_2(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
jmetric = os.path.join(datafiles_path, 'bernoulli.metric.json')
jmetric2 = os.path.join(datafiles_path, 'bernoulli.metric-2.json')
args = SamplerArgs(model,
chain_ids=[1,2],
metric=[jmetric, jmetric2])
cmd = args.compose_command(1, 'output')
s1 = 'metric=diag_e metric_file="test/data/bernoulli.metric-2.json'
self.assertIn(s1, cmd)
def test_args_bad_metric_file(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
metric='/no/such/path/to.file')
def test_args_bad_metric_file_1(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
jmetric = os.path.join(datafiles_path, 'bernoulli.metric.json')
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
metric=[jmetric, '/no/such/path/to.file'])
def test_args_bad_metric_file_2(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
jmetric = os.path.join(datafiles_path, 'bernoulli.metric.json')
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
metric=[jmetric, jmetric])
def test_args_bad_metric_file_3(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
jmetric = os.path.join(datafiles_path, 'bernoulli.metric.json')
jmetric2 = os.path.join(datafiles_path, 'bernoulli.metric-2.json')
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1],
metric=[jmetric, jmetric2])
def test_args_step_size_1(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
args = SamplerArgs(model,
chain_ids=[1,2],
step_size=1.3)
cmd = args.compose_command(0, 'output')
self.assertIn('stepsize=1.3', cmd)
def test_args_step_size_2(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
args = SamplerArgs(model,
chain_ids=[1,2],
step_size=[1.31, 1.29])
cmd = args.compose_command(1, 'output')
self.assertIn('stepsize=1.29', cmd)
def test_args_step_size_bad_1(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
step_size=-0.99)
def test_args_step_size_bad_2(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
step_size=[1.31, -0.99])
def test_args_step_size_bad_3(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
step_size=[2])
def test_args_adapt_delta_1(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
args = SamplerArgs(model,
chain_ids=[1,2],
adapt_delta=.93)
cmd = args.compose_command(0, 'output')
self.assertIn('adapt delta=0.93', cmd)
def test_args_adapt_delta_2(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
adapt_delta=-3)
def test_args_adapt_delta_3(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
adapt_delta=1.3)
def test_args_bad_output(self):
stan = os.path.join(datafiles_path, 'bernoulli.stan')
exe = os.path.join(datafiles_path, 'bernoulli')
model = Model(exe_file=exe, stan_file=stan)
with self.assertRaises(ValueError):
args = SamplerArgs(model,
chain_ids=[1,2],
output_file='/no/such/path/to.file')
if __name__ == '__main__':
unittest.main()
| 44.937213
| 142
| 0.550095
| 3,386
| 29,344
| 4.577377
| 0.037803
| 0.058068
| 0.096135
| 0.169172
| 0.932125
| 0.896832
| 0.867411
| 0.856765
| 0.828376
| 0.793729
| 0
| 0.022634
| 0.336014
| 29,344
| 652
| 143
| 45.006135
| 0.772839
| 0
| 0
| 0.717687
| 0
| 0.006803
| 0.116106
| 0.025082
| 0
| 0
| 0
| 0
| 0.117347
| 1
| 0.098639
| false
| 0
| 0.008503
| 0
| 0.108844
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
50dd7cab01df657bc89561dbacb5af02edd08f0e
| 39
|
py
|
Python
|
src/lib/gettext.py
|
DTenore/skulpt
|
098d20acfb088d6db85535132c324b7ac2f2d212
|
[
"MIT"
] | 2,671
|
2015-01-03T08:23:25.000Z
|
2022-03-31T06:15:48.000Z
|
src/lib/gettext.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 972
|
2015-01-05T08:11:00.000Z
|
2022-03-29T13:47:15.000Z
|
src/lib/gettext.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 845
|
2015-01-03T19:53:36.000Z
|
2022-03-29T18:34:22.000Z
|
import _sk_fail; _sk_fail._("gettext")
| 19.5
| 38
| 0.769231
| 6
| 39
| 4.166667
| 0.666667
| 0.48
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.694444
| 0
| 0
| 0
| 0
| 0
| 0.179487
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ba29544ad5b82cf4a7063da436df89f86b8bbe47
| 195
|
py
|
Python
|
v1.0.0.test/otp/uberdog/BanManager.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-01T15:46:43.000Z
|
2021-07-23T16:26:48.000Z
|
v1.0.0.test/otp/uberdog/BanManager.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 1
|
2019-06-29T03:40:05.000Z
|
2021-06-13T01:15:16.000Z
|
v1.0.0.test/otp/uberdog/BanManager.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-28T21:18:46.000Z
|
2021-02-25T06:37:25.000Z
|
from direct.distributed.DistributedObjectGlobal import DistributedObjectGlobal
from direct.directnotify.DirectNotifyGlobal import directNotify
class BanManager(DistributedObjectGlobal):
pass
| 39
| 78
| 0.887179
| 16
| 195
| 10.8125
| 0.625
| 0.115607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 195
| 5
| 79
| 39
| 0.961111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
e895c3d2f88572d8c5889ba28ad81aec2a42af4e
| 7,056
|
py
|
Python
|
train_exp.py
|
mk37972/SCAPE
|
01080e4159917546c76dd15ae5c74e092f4ae299
|
[
"MIT"
] | null | null | null |
train_exp.py
|
mk37972/SCAPE
|
01080e4159917546c76dd15ae5c74e092f4ae299
|
[
"MIT"
] | null | null | null |
train_exp.py
|
mk37972/SCAPE
|
01080e4159917546c76dd15ae5c74e092f4ae299
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 15:36:15 2020
@author: mincheol
"""
from baselines import run
import mpi4py
## Block (Position control approach)
defaultargs = ['--alg=her','--env=Block-v1', '--num_timesteps=5e4']
for dim in [4]:
for seed in [10,500,1000]:
savepath = '--save_path=./models/block/pos_ctrl_{}'.format(seed)
demofile = '--demo_file=./block_demo_25.npz'
logpath = '--log_path=./models/block/pos_ctrl_{}_log'.format(seed)
perturb = '--perturb=delay'
algdim = '--algdim={}'.format(dim)
finalargs = defaultargs + [savepath, demofile, logpath, perturb, algdim, '--seed={}'.format(seed)]
run.main(finalargs)
# ## Block (SCAPE)
# defaultargs = ['--alg=her','--env=Block-v1', '--num_timesteps=5e4']
# for dim in [6]:
# for seed in [10,500,1000]:
# savepath = '--save_path=./models/block/stf_ctrl_{}'.format(seed)
# demofile = '--demo_file=./block_demo_25_augmented.npz'
# logpath = '--log_path=./models/block/stf_ctrl_{}_log'.format(seed)
# perturb = '--perturb=delay'
# algdim = '--algdim={}'.format(dim)
# finalargs = defaultargs + [savepath, demofile, logpath, perturb, algdim, '--seed={}'.format(seed)]
# run.main(finalargs)
# ## Chip (Position control approach)
# defaultargs = ['--alg=her','--env=Chip-v1', '--num_timesteps=1e5']
# for dim in [3]:
# for seed in [10,500,1000]:
# savepath = '--save_path=./models/chip/pos_ctrl_{}'.format(seed)
# demofile = '--demo_file=./chip_demo_25.npz'
# logpath = '--log_path=./models/chip/pos_ctrl_{}_log'.format(seed)
# perturb = '--perturb=delay'
# algdim = '--algdim={}'.format(dim)
# finalargs = defaultargs + [savepath, demofile, logpath, perturb, algdim, '--seed={}'.format(seed)]
# run.main(finalargs)
# ## Chip (SCAPE)
# defaultargs = ['--alg=her','--env=Chip-v1', '--num_timesteps=1e5']
# for dim in [5]:
# for seed in [10,500,1000]:
# savepath = '--save_path=./models/chip/stf_ctrl_{}'.format(seed)
# demofile = '--demo_file=./chip_demo_25_augmented.npz'
# logpath = '--log_path=./models/chip/stf_ctrl_{}_log'.format(seed)
# perturb = '--perturb=delay'
# algdim = '--algdim={}'.format(dim)
# finalargs = defaultargs + [savepath, demofile, logpath, perturb, algdim, '--seed={}'.format(seed)]
# run.main(finalargs)
# ## NuFingers (Position control approach)
# defaultargs = ['--alg=her','--env=NuFingers-v1', '--num_timesteps=1e5']
# for dim in [2]:
# for seed in [10,500,1000]:
# savepath = '--save_path=./models/nufingers/pos_ctrl_{}'.format(seed)
# demofile = '--demo_file=./nufingers_demo_25.npz'
# logpath = '--log_path=./models/nufingers/pos_ctrl_{}_log'.format(seed)
# perturb = '--perturb=delay'
# algdim = '--algdim={}'.format(dim)
# finalargs = defaultargs + [savepath, demofile, logpath, perturb, algdim, '--seed={}'.format(seed)]
# run.main(finalargs)
# ## NuFingers (SCAPE)
# defaultargs = ['--alg=her','--env=NuFingers-v1', '--num_timesteps=1e5']
# for dim in [4]:
# for seed in [10,500,1000]:
# savepath = '--save_path=./models/nufingers/stf_ctrl_{}'.format(seed)
# demofile = '--demo_file=./nufingers_demo_25_augmented.npz'
# logpath = '--log_path=./models/nufingers/stf_ctrl_{}_log'.format(seed)
# perturb = '--perturb=delay'
# algdim = '--algdim={}'.format(dim)
# finalargs = defaultargs + [savepath, demofile, logpath, perturb, algdim, '--seed={}'.format(seed)]
# run.main(finalargs)
# ## Hybrid approach (Imitation learning for position control -> reinforcement learning for stiffness control)
# ## Block (first stage)
# defaultargs = ['--alg=her','--env=Block-v1', '--num_timesteps=2.5e4']
# for dim in [4]:
# savepath = '--save_path=./models/block/hybrid_pos_ctrl'
# demofile = '--demo_file=./block_demo_25.npz'
# logpath = '--log_path=./models/block/hybrid_pos_ctrl_log'
# perturb = '--perturb=none'
# algdim = '--algdim={}'.format(dim)
# finalargs = defaultargs + [savepath, demofile, logpath, perturb, algdim]
# run.main(finalargs)
# ## Block (second stage)
# defaultargs = ['--alg=her','--env=Block-v2', '--num_timesteps=2.5e4']
# for dim in [6]:
# savepath = '--save_path=./models/block/hybrid_stf_ctrl'
# logpath = '--log_path=./models/block/hybrid_stf_ctrl_log'
# perturb = '--perturb=delay'
# algdim = '--algdim={}'.format(dim)
# finalargs = defaultargs + [savepath, logpath, perturb, algdim]
# run.main(finalargs)
# ## Chip (first stage)
# defaultargs = ['--alg=her','--env=Chip-v1', '--num_timesteps=5e4']
# for dim in [3]:
# savepath = '--save_path=./models/chip/hybrid_pos_ctrl'
# demofile = '--demo_file=./chip_demo_25.npz'
# logpath = '--log_path=./models/chip/hybrid_pos_ctrl_log'
# perturb = '--perturb=none'
# algdim = '--algdim={}'.format(dim)
# finalargs = defaultargs + [savepath, demofile, logpath, perturb, algdim]
# run.main(finalargs)
# ## Chip (second stage)
# defaultargs = ['--alg=her','--env=Chip-v2', '--num_timesteps=5e4']
# for dim in [5]:
# savepath = '--save_path=./models/chip/hybrid_stf_ctrl'
# logpath = '--log_path=./models/chip/hybrid_stf_ctrl_log'
# perturb = '--perturb=delay'
# algdim = '--algdim={}'.format(dim)
# finalargs = defaultargs + [savepath, logpath, perturb, algdim]
# run.main(finalargs)
# ## NuFingers (first stage)
# defaultargs = ['--alg=her','--env=NuFingers-v1', '--num_timesteps=1e5']
# for dim in [2]:
# savepath = '--save_path=./models/nufingers/hybrid_pos_ctrl'
# demofile = '--demo_file=./nufingers_demo_25.npz'
# logpath = '--log_path=./models/nufingers/hybrid_pos_ctrl_log'
# perturb = '--perturb=none'
# algdim = '--algdim={}'.format(dim)
# finalargs = defaultargs + [savepath, demofile, logpath, perturb, algdim]
# run.main(finalargs)
# ## NuFingers (second stage)
# defaultargs = ['--alg=her','--env=NuFingers-v2', '--num_timesteps=1e5']
# for dim in [4]:
# savepath = '--save_path=./models/nufingers/hybrid_stf_ctrl'
# logpath = '--log_path=./models/nufingers/hybrid_stf_ctrl_log'
# perturb = '--perturb=delay'
# algdim = '--algdim={}'.format(dim)
# finalargs = defaultargs + [savepath, logpath, perturb, algdim]
# run.main(finalargs)
| 41.751479
| 111
| 0.569728
| 767
| 7,056
| 5.076923
| 0.104302
| 0.061633
| 0.052388
| 0.061633
| 0.943503
| 0.939137
| 0.845917
| 0.777607
| 0.717257
| 0.674114
| 0
| 0.025796
| 0.247307
| 7,056
| 169
| 112
| 41.751479
| 0.7074
| 0.81661
| 0
| 0
| 0
| 0
| 0.191011
| 0.11236
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e8ca6537b323322333c954356234837c4dd883e3
| 12,724
|
py
|
Python
|
comet_chaser_api/comet_utils/analyzer/entry_strategy.py
|
chung-ejy/comet_chaser_api
|
e18a4b65d606bcf5106cff5095b1a3134901abff
|
[
"MIT"
] | null | null | null |
comet_chaser_api/comet_utils/analyzer/entry_strategy.py
|
chung-ejy/comet_chaser_api
|
e18a4b65d606bcf5106cff5095b1a3134901abff
|
[
"MIT"
] | null | null | null |
comet_chaser_api/comet_utils/analyzer/entry_strategy.py
|
chung-ejy/comet_chaser_api
|
e18a4b65d606bcf5106cff5095b1a3134901abff
|
[
"MIT"
] | null | null | null |
from cmath import nan
import pandas as pd
import pickle
from database.comet_historian import CometHistorian
import os
from dotenv import load_dotenv
load_dotenv()
mongouser = os.getenv("MONGOUSER")
mongokey = os.getenv("MONGOKEY")
class EntryStrategy(object):
@classmethod
def entry_analysis(self,entry_strat,final,signal,value,conservative):
if entry_strat == "standard":
offerings = self.standard(final,signal,value,conservative)
else:
if entry_strat == "signal_based":
offerings = self.signal_based(final,signal,value,conservative)
else:
if entry_strat == "parameter_defined":
offerings = self.parameter_defined(final,signal,value,conservative)
else:
if entry_strat == "research_parameter_defined":
offerings = self.research_parameter_defined(final,signal,value,conservative)
else:
if entry_strat == "all":
offerings = self.all(final,signal,value,conservative)
else:
if entry_strat == "ai":
offerings = self.ai_driven(final,value,conservative)
else:
offerings = pd.DataFrame([{}])
offerings["entry_strat"] = entry_strat
offerings["value"] = value
offerings["signal"] = signal
offerings["conservative"] = conservative
return offerings
@classmethod
def backtest_entry_analysis(self,date,entry_strat,final,signal,value,conservative):
if entry_strat == "standard":
offerings = self.backtest_standard(final,date,signal,value,conservative)
else:
if entry_strat == "signal_based":
offerings = self.backtest_signal_based(final,date,signal,value,conservative)
else:
if entry_strat == "parameter_defined":
offerings = self.backtest_parameter_defined(final,date,signal,value,conservative)
else:
if entry_strat == "research_parameter_defined":
offerings = self.backtest_research_parameter_defined(final,date,signal,value,conservative)
else:
if entry_strat == "all":
offerings = self.backtest_all(final,date,signal,value,conservative)
else:
if entry_strat == "ai":
offerings = self.backtest_ai(final,date,signal,value,conservative)
else:
offerings = pd.DataFrame([{}])
offerings["entry_strat"] = entry_strat
offerings["value"] = value
offerings["signal"] = signal
offerings["conservative"] = conservative
return offerings
@classmethod
def ai_driven(self,final,value,conservative):
comet_historian = CometHistorian()
comet_historian.cloud_connect()
models = comet_historian.retrieve("coinbase_models")
comet_historian.disconnect()
factors = ["signal","velocity","concavity"]
models["model"] = [pickle.loads(x) for x in models["model"]]
final.rename(columns={"inflection":"concavity"},inplace=True)
predictions = []
for row in final.iterrows():
try:
symbol = row[1]["crypto"]
model = models[models["symbol"]==symbol]["model"].item()
prediction = model.predict(final[final["crypto"]==symbol][factors])[0]
predictions.append(prediction)
except:
predictions.append(nan)
final["prediction"] = predictions
print(final)
if value:
offerings = final[final["prediction"]==value].sort_values("signal",ascending=conservative)
else:
sorting = not conservative
offerings = final[final["prediction"]==value].sort_values("signal",ascending=sorting)
return offerings
@classmethod
def standard(self,final,signal,value,conservative):
if value:
offerings = final[(final["signal"] < -signal)
].sort_values("signal",ascending=conservative)
else:
sorting = not conservative
offerings = final[(final["signal"] > signal)
].sort_values("signal",ascending=sorting)
return offerings
@classmethod
def research_parameter_defined(self,final,signal,value,conservative):
if value:
offerings = final[(final["signal"] < -signal)
& (final["velocity"] >= -3)
& (final["velocity"] < 0)
& (final["inflection"] >= -1)
& (final["inflection"] <= 1)
].sort_values("signal",ascending=conservative)
else:
sorting = not conservative
offerings = final[(final["signal"] > signal)
& (final["velocity"] > 0)
& ((final["inflection"] <= 1)
| (final["inflection"] >= -1))
].sort_values("signal",ascending=sorting)
return offerings
@classmethod
def parameter_defined(self,final,signal,value,conservative):
if value:
offerings = final[(final["signal"] < -signal)
& (final["velocity"] >= -3)
& (final["velocity"] < 0)
& (final["inflection"] >= -1)
& (final["inflection"] <= 1)
].sort_values("signal",ascending=conservative)
else:
sorting = not conservative
offerings = final[(final["signal"] > signal)
& (final["velocity"] > 0)
& ((final["inflection"] >= 1)
& (final["inflection"] <= -1))
].sort_values("signal",ascending=sorting)
return offerings
@classmethod
def signal_based(self,final,signal,value,conservative):
if value:
offerings = final[(final["signal"] < -signal)
& (final["p_sign_change"]==True)
].sort_values("signal",ascending=conservative)
else:
sorting = not conservative
offerings = final[(final["signal"] > signal)
& (final["p_sign_change"]==True)
].sort_values("signal",ascending=sorting)
return offerings
@classmethod
def all(self,final,signal,value,conservative):
if value:
offerings = final[(final["signal"] < -signal)
& (final["p_sign_change"]==True)
& (final["velocity"] >= -3)
& (final["velocity"] < 0)
& (final["inflection"] >= 0)
& (final["inflection"] <= 1)
].sort_values("signal",ascending=conservative)
else:
sorting = not conservative
offerings = final[(final["signal"] > signal)
& (final["p_sign_change"]==True)
& (final["velocity"] > 0)
& ((final["inflection"] <= 0)
| (final["inflection"] >= -1))
].sort_values("signal",ascending=sorting)
return offerings
@classmethod
def backtest_ai(self,final,date,signal,value,conservative):
if value:
offerings = final[(final["date"]==date) & (final["prediction"] == value)].sort_values("signal",ascending=conservative)
else:
sorting = not conservative
offerings = final[(final["date"]==date) & (final["prediction"] == value)].sort_values("signal",ascending=sorting)
return offerings
@classmethod
def backtest_standard(self,final,date,signal,value,conservative):
if value:
offerings = final[(final["date"]==date)
& (final["signal"] < -signal)
].sort_values("signal",ascending=conservative)
else:
sorting = not conservative
offerings = final[(final["date"]==date)
& (final["signal"] > signal)
].sort_values("signal",ascending=sorting)
return offerings
@classmethod
def backtest_research_parameter_defined(self,final,date,signal,value,conservative):
if value:
offerings = final[(final["date"]==date)
& (final["signal"] < -signal)
& (final["velocity"] >= -3)
& (final["velocity"] < 0)
& (final["inflection"] >= -1)
& (final["inflection"] <= 1)
].sort_values("signal",ascending=conservative)
else:
sorting = not conservative
offerings = final[(final["date"]==date)
& (final["signal"] > signal)
& (final["velocity"] > 0)
& ((final["inflection"] <= 1)
| (final["inflection"] >= -1))
].sort_values("signal",ascending=sorting)
return offerings
@classmethod
def backtest_parameter_defined(self,final,date,signal,value,conservative):
if value:
offerings = final[(final["date"]==date)
& (final["signal"] < -signal)
& (final["velocity"] >= -3)
& (final["velocity"] < 0)
& (final["inflection"] >= -1)
& (final["inflection"] <= 1)
].sort_values("signal",ascending=conservative)
else:
sorting = not conservative
offerings = final[(final["date"]==date)
& (final["signal"] > signal)
& (final["velocity"] > 0)
& ((final["inflection"] >= 1)
& (final["inflection"] <= -1))
].sort_values("signal",ascending=sorting)
return offerings
@classmethod
def backtest_signal_based(self,final,date,signal,value,conservative):
if value:
offerings = final[(final["date"]==date)
& (final["signal"] < -signal)
& (final["p_sign_change"]==True)
].sort_values("signal",ascending=conservative)
else:
sorting = not conservative
offerings = final[(final["date"]==date)
& (final["signal"] > signal)
& (final["p_sign_change"]==True)
].sort_values("signal",ascending=sorting)
return offerings
@classmethod
def backtest_all(self,final,date,signal,value,conservative):
if value:
offerings = final[(final["date"]==date)
& (final["signal"] < -signal)
& (final["p_sign_change"]==True)
& (final["velocity"] >= -3)
& (final["velocity"] < 0)
& (final["inflection"] >= 0)
& (final["inflection"] <= 1)
].sort_values("signal",ascending=conservative)
else:
sorting = not conservative
offerings = final[(final["date"]==date)
& (final["signal"] > signal)
& (final["p_sign_change"]==True)
& (final["velocity"] > 0)
& ((final["inflection"] <= 0)
| (final["inflection"] >= -1))
].sort_values("signal",ascending=sorting)
return offerings
| 47.477612
| 130
| 0.478309
| 1,000
| 12,724
| 5.978
| 0.088
| 0.058883
| 0.092339
| 0.100368
| 0.846604
| 0.838742
| 0.836233
| 0.836233
| 0.833724
| 0.826865
| 0
| 0.005802
| 0.40404
| 12,724
| 268
| 131
| 47.477612
| 0.78254
| 0
| 0
| 0.773438
| 0
| 0
| 0.091238
| 0.004086
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054688
| false
| 0
| 0.023438
| 0
| 0.136719
| 0.003906
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2cf2ff3d4fd1f56700dba8eeabbbb4b801afe191
| 37
|
py
|
Python
|
setka/__init__.py
|
RomanovMikeV/setka
|
cad6f17429a4bb3479c5557ad58c15fee568f410
|
[
"MIT"
] | 11
|
2019-04-16T11:41:24.000Z
|
2021-05-28T15:01:17.000Z
|
setka/__init__.py
|
RomanovMikeV/cv_utilities
|
cad6f17429a4bb3479c5557ad58c15fee568f410
|
[
"MIT"
] | 15
|
2019-12-05T22:25:37.000Z
|
2020-03-18T20:09:03.000Z
|
setka/__init__.py
|
RomanovMikeV/setka
|
cad6f17429a4bb3479c5557ad58c15fee568f410
|
[
"MIT"
] | 6
|
2019-04-24T15:35:22.000Z
|
2021-08-10T07:48:39.000Z
|
import setka.base
import setka.pipes
| 12.333333
| 18
| 0.837838
| 6
| 37
| 5.166667
| 0.666667
| 0.709677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 2
| 19
| 18.5
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fa4367eb975be64e534d7ddc18212c16a58d36eb
| 48,238
|
py
|
Python
|
models.py
|
subin2/LearningGroupStructure
|
c5af7880d7443f576ad3dbe4c1e0af7c34ddcbad
|
[
"MIT"
] | 1
|
2018-12-04T06:29:59.000Z
|
2018-12-04T06:29:59.000Z
|
models.py
|
subin2/LearningGroupStructure
|
c5af7880d7443f576ad3dbe4c1e0af7c34ddcbad
|
[
"MIT"
] | null | null | null |
models.py
|
subin2/LearningGroupStructure
|
c5af7880d7443f576ad3dbe4c1e0af7c34ddcbad
|
[
"MIT"
] | 1
|
2018-12-04T06:48:54.000Z
|
2018-12-04T06:48:54.000Z
|
import numpy as np
import tensorflow as tf
class conv2d(object):
def __init__(self, input, weight_size, strides=[1,1,1,1], padding='SAME', pool=None, pool_size=4, nonlinearity=None,
use_dropout=True, keep_prob=1.0, use_batchnorm=True, std=0.01, offset=1e-10, scale=1, epsilon=1e-10, name='conv2d_default'):
with tf.variable_scope(name):
self.weight = tf.Variable( tf.random_normal( weight_size, stddev=std, dtype=tf.float32) )
self.bias = tf.Variable( tf.random_normal([weight_size[-1]], stddev=std, dtype=tf.float32) )
network = tf.nn.bias_add( tf.nn.conv2d(input = input, filter = self.weight, strides=strides, padding=padding),
self.bias, name=name)
if use_batchnorm:
batch_mean, batch_var = tf.nn.moments(network, [0])#,1,2])
network = tf.nn.batch_normalization(network, batch_mean, batch_var, offset=offset, scale=scale, variance_epsilon=epsilon, name=name)
if nonlinearity != None:
network = nonlinearity(network, name=name)
if use_dropout:
network = tf.nn.dropout(network, keep_prob=keep_prob, name=name)
if pool=='p':
network = tf.nn.max_pool(value=network,
ksize=[1,1,pool_size,1],
strides=[1,1,pool_size,1],
padding='SAME')
self.result = network
def get_layer(self):
return self.result
def get_weight(self):
return self.weight
def get_bias(self):
return self.bias
class res_conv2d(object):
def __init__(self, input, weight_size, strides=[1,1,1,1], padding='SAME', pool=None, pool_size=4, nonlinearity=None,
use_dropout=True, keep_prob=1.0, use_batchnorm=True, std=0.01, offset=1e-10, scale=1, epsilon=1e-10, name='conv2d_default'):
with tf.variable_scope(name):
self.weight1 = tf.Variable( tf.random_normal( weight_size, stddev=std, dtype=tf.float32) )
self.bias1 = tf.Variable( tf.random_normal([weight_size[-1]], stddev=std, dtype=tf.float32) )
self.weight2 = tf.Variable( tf.random_normal( weight_size, stddev=std, dtype=tf.float32) )
self.bias2 = tf.Variable( tf.random_normal([weight_size[-1]], stddev=std, dtype=tf.float32) )
network = tf.nn.bias_add( tf.nn.conv2d(input = input, filter = self.weight1, strides=strides, padding=padding),
self.bias1, name=name)
if use_batchnorm:
batch_mean, batch_var = tf.nn.moments(network, [0])#,1,2])
network = tf.nn.batch_normalization(network, batch_mean, batch_var, offset=offset, scale=scale, variance_epsilon=epsilon, name=name)
if nonlinearity != None:
network = nonlinearity(network, name=name)
#
network = tf.nn.bias_add( tf.nn.conv2d(input = network, filter = self.weight2, strides=strides, padding=padding),
self.bias2, name=name)
network = tf.add(input, network)
if use_batchnorm:
batch_mean, batch_var = tf.nn.moments(network, [0])#,1,2])
network = tf.nn.batch_normalization(network, batch_mean, batch_var, offset=offset, scale=scale, variance_epsilon=epsilon, name=name)
if nonlinearity != None:
network = nonlinearity(network, name=name)
#
if use_dropout:
network = tf.nn.dropout(network, keep_prob=keep_prob, name=name)
if pool=='p':
network = tf.nn.max_pool(value=network,
ksize=[1,1,pool_size,1],
strides=[1,1,pool_size,1],
padding='SAME')
self.result = network
def get_layer(self):
return self.result
def get_weight(self):
return self.weight
def get_bias(self):
return self.bias
class shared_depthwise_conv2d(object):
"""
input: tensor of shape [batch, in_height, in_width, in_channels]
weight_size: an array of the form [filter_height, filter_width, in_channels, channel_multiplier].
Let in_channels be 1.
returns:
A 4D Tensor of shape [batch, out_height, out_width, in_channels * channel_multiplier].
"""
def __init__(self, input, weight_size, strides=[1,1,1,1], padding='SAME', pool='p', pool_size=4,
nonlinearity=None, use_dropout=True, keep_prob=1.0, use_batchnorm=True, std=0.01,
offset=1e-10, scale=1, epsilon=1e-10, name='depthwise_conv2d_default'):
self.pool = pool
self.weight_size = [weight_size[0],weight_size[1],1,weight_size[3]]
with tf.variable_scope(name):
self.weight = tf.Variable( tf.tile(tf.reduce_mean(tf.random_normal( weight_size, stddev=std, dtype=tf.float32), axis=2, keep_dims=True),
[1,1,weight_size[2],1]))
self.bias = tf.Variable( tf.random_normal([weight_size[-1]], stddev=std, dtype=tf.float32) )
network = tf.add( tf.nn.depthwise_conv2d(input = input, filter = self.weight, strides=strides, padding=padding),
self.bias, name=name)
if use_batchnorm:
batch_mean, batch_var = tf.nn.moments(network, axes=[0])
network = tf.nn.batch_normalization(network, batch_mean, batch_var, offset=offset, scale=scale, variance_epsilon=epsilon, name=name)
if nonlinearity != None:
network = nonlinearity(network, name=name)
if use_dropout:
network = tf.nn.dropout(network, keep_prob=keep_prob, name=name)
if pool=='p':
network = tf.nn.max_pool(value=network,
ksize=[1,1,pool_size,1],
strides=[1,1,pool_size,1],
padding='SAME')
self.result = network
def get_layer(self):
return self.result
def get_weight(self):
return self.weight
def get_bias(self):
return self.bias
class depthwise_conv2d(object):
"""
input: tensor of shape [batch, in_height, in_width, in_channels]
weight_size: an array of the form [filter_height, filter_width, in_channels, channel_multiplier].
Let in_channels be 1.
returns:
A 4D Tensor of shape [batch, out_height, out_width, in_channels * channel_multiplier].
"""
def __init__(self, input, weight_size, strides=[1,1,1,1], padding='SAME', pool='p', pool_size=4,
nonlinearity=None, use_dropout=True, keep_prob=1.0, use_batchnorm=True, std=0.01,
offset=1e-10, scale=1, epsilon=1e-10, name='depthwise_conv2d_default'):
self.pool = pool
with tf.variable_scope(name):
self.weight = tf.Variable( tf.random_normal( weight_size, stddev=std, dtype=tf.float32))
self.bias = tf.Variable( tf.random_normal([weight_size[-1]*weight_size[-2]], stddev=std, dtype=tf.float32) )
network = tf.nn.bias_add( tf.nn.depthwise_conv2d(input = input, filter = self.weight, strides=strides, padding=padding),
self.bias, name=name)
if use_batchnorm:
batch_mean, batch_var = tf.nn.moments(network, axes=[0])
network = tf.nn.batch_normalization(network, batch_mean, batch_var, offset=offset, scale=scale, variance_epsilon=epsilon, name=name)
if nonlinearity != None:
network = nonlinearity(network, name=name)
if use_dropout:
network = tf.nn.dropout(network, keep_prob=keep_prob, name=name)
if pool=='p':
network = tf.nn.max_pool(value=network,
ksize=[1,1,pool_size,1],
strides=[1,1,pool_size,1],
padding='SAME')
self.result = network
def get_layer(self):
return self.result
def get_weight(self):
return self.weight
def get_bias(self):
return self.bias
class RCL(object):
def __init__(self, input, weight_size, weight=None, biases=None, strides=[1,1,1,1], padding='SAME', pool='p', pool_size=[1,4], num_iter=3,
nonlinearity=None, use_dropout=True, keep_prob=1.0, use_batchnorm=True,
std=0.01, offset=1e-10, scale=1, epsilon=1e-10, name='RCL_default'):
"""
when num_iter==1, same as conv2d
"""
self.pool = pool
with tf.variable_scope(name):
self.weight = tf.Variable( tf.random_normal(weight_size, stddev=std, dtype=tf.float32)) if weight is None else weight
self.biases = tf.Variable( tf.random_normal([weight_size[-1]], stddev=std, dtype=tf.float32)) if biases is None else biases
"""
rcl = tf.nn.bias_add(tf.nn.conv2d(input=input, filter=self.weight, strides=strides, padding=padding),
self.biases)
if use_batchnorm:
batch_mean, batch_var = tf.nn.moments(rcl, [0])#[0,1,2]
rcl = tf.nn.batch_normalization(rcl, batch_mean, batch_var, offset, scale, epsilon)
if nonlinearity != None:
rcl = nonlinearity(rcl)
network = rcl
"""
network = input
if num_iter == 0:
network = tf.nn.bias_add(tf.nn.conv2d(input=network, filter=self.weight, strides=strides, padding=padding),
self.biases
)
if use_batchnorm:
batch_mean, batch_var = tf.nn.moments(network, [0])#[0,1,2]
network = tf.nn.batch_normalization(network, batch_mean, batch_var, offset, scale, epsilon, name=name)
if nonlinearity != None:
network = nonlinearity(network, name=name)
else:
for i in range(num_iter):
#network = tf.add( rcl,
# tf.nn.bias_add(tf.nn.conv2d(input=network, filter=self.weight, strides=strides, padding=padding),
# self.biases
# )
# )
network = tf.nn.bias_add(tf.nn.conv2d(input=network, filter=self.weight, strides=strides, padding=padding),
self.biases
)
if use_batchnorm:
batch_mean, batch_var = tf.nn.moments(network, [0])#[0,1,2]
network = tf.nn.batch_normalization(network, batch_mean, batch_var, offset, scale, epsilon, name=name)
if nonlinearity != None:
network = nonlinearity(network, name=name)
network = tf.add(input, network)
if use_dropout:
network = tf.nn.dropout(network, keep_prob=keep_prob, name=name)
if pool=='c':
#input: [batch, height, width, channel]
#kernel: [height, width, in_channels, out_channels]
network = conv2d(input=network,
weight_size=[1,pool_size,weight_size[-1],weight_size[-1]],
padding='VALID',
nonlinearity=nonlinearity,
use_dropout=use_dropout,
keep_prob=keep_prob,
name=name+'_convpool')
elif pool=='p':
network = tf.nn.max_pool(value=network,
ksize=[1,pool_size[0],pool_size[1],1],
strides=[1,pool_size[0],pool_size[1],1],
padding='SAME')
self.result = network
def get_layer(self):
if self.pool == 'c':
return self.result.get_layer()
return self.result
def get_conv_layer(self):
if self.pool != 'c':
raise ValueError('No conv layer is used for pooling.')
return self.pool
def get_weight(self):
return self.weight
def get_biases(self):
return self.biases
class RCL_coef(object):
def __init__(self, input, weight_size, strides=[1,1,1,1], padding='SAME', pool='p', pool_size=[1,4], num_iter=3, nonlinearity=None,
use_dropout=True, keep_prob=1.0, use_batchnorm=True, std=0.01, offset=1e-10, scale=1, epsilon=1e-10, name='RCL_default'):
"""
when num_iter==1, same as conv2d
"""
self.pool = pool
with tf.variable_scope(name):
self.weight = tf.Variable( tf.random_normal(weight_size, stddev=std, dtype=tf.float32) )
self.biases = tf.Variable( tf.random_normal([weight_size[-1]], stddev=std, dtype=tf.float32))
self.coef_weight = [tf.Variable(tf.random_normal([1, 1, weight_size[-2], weight_size[-1]], stddev=std, dtype=tf.float32)) for i in range(num_iter)]
self.coef_bias = [tf.Variable(tf.random_normal([weight_size[-1]], stddev=std, dtype=tf.float32)) for i in range(num_iter)]
"""
rcl = tf.nn.bias_add(tf.nn.conv2d(input=input, filter=self.weight, strides=strides, padding=padding),
self.biases)
if use_batchnorm:
batch_mean, batch_var = tf.nn.moments(rcl, [0])#[0,1,2]
rcl = tf.nn.batch_normalization(rcl, batch_mean, batch_var, offset, scale, epsilon)
if nonlinearity != None:
rcl = nonlinearity(rcl)
network = rcl
"""
network = input
if num_iter == 0:
network = tf.nn.bias_add(tf.nn.conv2d(input=network, filter=self.weight, strides=strides, padding=padding),
self.biases
)
if use_batchnorm:
batch_mean, batch_var = tf.nn.moments(network, [0])#[0,1,2]
network = tf.nn.batch_normalization(network, batch_mean, batch_var, offset, scale, epsilon, name=name)
if nonlinearity != None:
network = nonlinearity(network, name=name)
else:
for i in range(num_iter):
network = tf.nn.bias_add(tf.nn.conv2d(input=network, filter=self.weight, strides=strides, padding=padding),
self.biases
)
network = tf.nn.bias_add(tf.nn.conv2d(input=network, filter=self.coef_weight[i], strides=strides, padding=padding),
self.coef_bias[i]
)
if use_batchnorm:
batch_mean, batch_var = tf.nn.moments(network, [0])#[0,1,2]
network = tf.nn.batch_normalization(network, batch_mean, batch_var, offset, scale, epsilon, name=name)
if nonlinearity != None:
network = nonlinearity(network, name=name)
network = tf.add(input, network)
if use_dropout:
network = tf.nn.dropout(network, keep_prob=keep_prob, name=name)
if pool=='c':
#input: [batch, height, width, channel]
#kernel: [height, width, in_channels, out_channels]
network = conv2d(input=network,
weight_size=[1,pool_size,weight_size[-1],weight_size[-1]],
padding='VALID',
nonlinearity=nonlinearity,
use_dropout=use_dropout,
keep_prob=keep_prob,
name=name+'_convpool')
elif pool=='p':
network = tf.nn.max_pool(value=network,
ksize=[1,pool_size[0],pool_size[1],1],
strides=[1,pool_size[0],pool_size[1],1],
padding='SAME')
self.result = network
def get_layer(self):
if self.pool == 'c':
return self.result.get_layer()
return self.result
def get_conv_layer(self):
if self.pool != 'c':
raise ValueError('No conv layer is used for pooling.')
return self.pool
def get_weight(self):
return self.weight
def get_biases(self):
return self.biases
class depthwise_RCL(object):
def __init__(self, input, weight_size, strides=[1,1,1,1], padding='SAME', pool='p', pool_size=[1,4], num_iter=3, nonlinearity=None,
use_dropout=True, keep_prob=1.0, use_batchnorm=True, std=0.01, offset=1e-10, scale=1, epsilon=1e-10, name='depthwise_RCL_default'):
"""
when num_iter==1, same as conv2d
"""
self.pool = pool
with tf.variable_scope(name):
self.weight = tf.Variable( tf.random_normal(weight_size, stddev=std, dtype=tf.float32) )
#self.bias = tf.Variable(tf.random_normal([weight_size[-1]*weight_size[-2]], stddev=std, dtype=tf.float32))
self.biases = [tf.Variable( tf.random_normal([weight_size[-1]*weight_size[-2]], stddev=std, dtype=tf.float32)) for i \
in range(num_iter+1)]
"""
rcl = tf.nn.bias_add(tf.nn.depthwise_conv2d(input=input, filter=self.weight, strides=strides, padding=padding),
self.biases[0])
if use_batchnorm:
batch_mean, batch_var = tf.nn.moments(rcl, [0])#[0,1,2]
rcl = tf.nn.batch_normalization(rcl, batch_mean, batch_var, offset, scale, epsilon)
if nonlinearity != None:
rcl = nonlinearity(rcl)
network = rcl
"""
network = input
network = tf.nn.bias_add( tf.nn.depthwise_conv2d(input=network, filter=self.weight, strides=strides, padding=padding),
self.biases[0])
if use_batchnorm:
batch_mean, batch_var = tf.nn.moments(network, [0])#[0,1,2]
network = tf.nn.batch_normalization(network, batch_mean, batch_var, offset, scale, epsilon, name=name)
if nonlinearity != None:
network = nonlinearity(network, name=name)
for i in range(num_iter):
#network = tf.add( rcl,
# tf.nn.bias_add(tf.nn.depthwise_conv2d(input=network, filter=self.weight, strides=strides, padding=padding),
# self.biases[i+1]
# )
# )
network = tf.nn.bias_add( tf.nn.depthwise_conv2d(input=network, filter=self.weight, strides=strides, padding=padding),
self.biases[i+1])
if use_batchnorm:
batch_mean, batch_var = tf.nn.moments(network, [0])#[0,1,2]
network = tf.nn.batch_normalization(network, batch_mean, batch_var, offset, scale, epsilon, name=name)
if nonlinearity != None:
network = nonlinearity(network, name=name)
network = tf.add(input, network)
if use_dropout:
network = tf.nn.dropout(network, keep_prob=keep_prob, name=name)
if pool=='c':
network = conv2d(input=network,
weight_size=[1,pool_size, weight_size[-1]*weight_size[-2], weight_size[-1]*weight_size[-2]],
padding='VALID',
nonlinearity=nonlinearity,
use_dropout=use_dropout,
keep_prob=keep_prob,
name=name+'_convpool')
elif pool=='p':
network = tf.nn.max_pool(value=network,
ksize=[1,pool_size[0],pool_size[1],1],
strides=[1,pool_size[0],pool_size[1],1],
padding='SAME')
self.result = network
def get_layer(self):
return self.result
def get_weight(self):
return self.weight
def get_biases(self):
return self.biases
class feedforward(object):
def __init__(self, input, weight_size, weight=None, bias=None, nonlinearity=None, use_dropout=False, keep_prob=1.0, use_batchnorm=False, std=0.01, offset=1e-10, scale=1, epsilon=1e-10, name='feedforward_default'):
with tf.variable_scope(name):
self.weight = tf.Variable( tf.random_normal( weight_size, stddev=std, dtype=tf.float32) ) if weight is None else weight
self.bias = tf.Variable( tf.random_normal( [weight_size[-1]], stddev=std, dtype=tf.float32) ) if bias is None else bias
network = tf.nn.bias_add( tf.matmul(input, self.weight), self.bias, name=name)
if use_batchnorm:
batch_mean, batch_var = tf.nn.moments(network, [0])
network = tf.nn.batch_normalization(network, batch_mean, batch_var, offset, scale, epsilon, name=name)
if nonlinearity != None:
network = nonlinearity(network, name=name)
if use_dropout:
network = tf.nn.dropout(network, keep_prob=keep_prob, name=name)
self.result = network
def get_layer(self):
return self.result
def get_bias(self):
return self.bias
def get_weight(self):
return self.weight
class RCNN(object):
def __init__(self, batch_size=128, time_point=1024, in_channels = 126, out_channels=256, ch_multiplier=None,
rrcl_iter=2, rrcl_num=4, forward_layers=[200,3], pool=['n', 'p', 'p', 'p', 'c'],
use_batchnorm=True, scale=1, offset=0.01, epsilon=0.01, nonlinearity=None, keep_probs=None,
std=0.01, w_filter_size=9, p_filter_size=4, l_rate=0.01, l_decay=0.95, l_step=1000, decay=0.9, momentum=0.9, optimizer='RMSProp', opt_epsilon=0.1):
self.batch_size = batch_size
self.time_point = time_point
self.in_channels = in_channels
self.out_channels = out_channels
if ch_multiplier != None:
print'\'ch_multiplier\' is depreciated. Use \'out_channels\' instead.'
self.out_channels = ch_multiplier
self.rrcl_iter = rrcl_iter
self.rrcl_num = rrcl_num
self.use_batchnorm = use_batchnorm
self.offset = offset
self.scale = scale
self.epsilon = epsilon
self.nonlinearity = nonlinearity
self.keep_probs = keep_probs
self.use_dropout = not (keep_probs == None or keep_probs == [1.0 for i in range(len(keep_probs))])
if keep_probs == None:
self.keep_probs = [1.0 for i in range(1+rrcl_num+len(forward_layers)-1)]
if self.use_dropout and len(keep_probs) != (1 + rrcl_num + len(forward_layers)-1):
raise ValueError('Parameter \'keep_probs\' length is wrong.')
self.std = std
self.w_filter_size = w_filter_size
self.p_filter_size = p_filter_size
self.forward_layers = [out_channels] + forward_layers
self.pool = pool
if len(self.pool) != rrcl_num+1:
raise ValueError('Parameter \'pool\' length does not match with the model shape.')
global_step = tf.Variable(0, trainable=False)
self.l_rate = tf.train.exponential_decay(l_rate, global_step, l_step, l_decay, staircase=True)
self.decay = decay
self.momentum = momentum
self.y = tf.placeholder(tf.float32, [None, self.forward_layers[-1]], name='y')
self.x = tf.placeholder(tf.float32, [None, 1, time_point, in_channels], name='x')
self.build_model( )
# Define loss and optimizer, minimize the squared error
self.cost = tf.reduce_mean(tf.pow(self.y - self.output_layer, 2))
if optimizer=='Adam':
self.optimizer = tf.train.AdamOptimizer(self.l_rate, epsilon=opt_epsilon).minimize(self.cost, global_step=global_step)
else :#optimizer=='RMSProp':
self.optimizer = tf.train.RMSPropOptimizer(self.l_rate,
decay=self.decay,
momentum=self.momentum).minimize(self.cost, global_step = global_step)
# Initializing the tensor flow variables
init = tf.initialize_all_variables()
# Launch the session
self.session_conf = tf.ConfigProto()
self.session_conf.gpu_options.allow_growth = True
self.sess = tf.InteractiveSession(config=self.session_conf)
#self.sess = tf.InteractiveSession()
self.sess.run(init)
self.saver = tf.train.Saver(max_to_keep=10000)
def build_model(self):
#self.weights, self.biases = self.init_weights()
length = self.time_point ##length
network = conv2d(self.x,
weight_size=[1, self.w_filter_size, self.in_channels, self.out_channels],
nonlinearity=self.nonlinearity,
pool=self.pool[0],
pool_size = self.p_filter_size,
use_dropout=self.use_dropout,
keep_prob=self.keep_probs[0],
use_batchnorm=self.use_batchnorm,
std=self.std,
offset=self.offset,
scale=self.scale,
epsilon=self.epsilon,
name='conv2d1')
self.conv1 = network
#output: (batch_size, 1, in_width, out_channels*in_channels)
"""
RCL(input, filter, strides=[1,1,1,1], padding='SAME', num_iter=3, nonlinearity=None, use_dropout=True, keep_prob=1.0,
use_batchnorm=True, std=0.01, offset=1e-10, scale=1, epsilon=1e-10, name='RCL_default'):
"""
#networks = self.conv1.get_layer()
self.rrcls = []
for r in range(self.rrcl_num):
filter_size = self.w_filter_size
while filter_size> length:
filter_size = filter_size/2
network = RCL(input = network.get_layer(),
weight_size = [1, filter_size, self.out_channels, self.out_channels],
num_iter = self.rrcl_iter,
nonlinearity = self.nonlinearity,
use_dropout = self.use_dropout,
keep_prob = self.keep_probs[1+r],
use_batchnorm = self.use_batchnorm,
std=self.std,
offset=self.offset,
scale=self.scale,
epsilon=self.epsilon,
pool=self.pool[r+1],
pool_size=[1,self.p_filter_size],
name='RCL'+str(r))
self.rrcls.append(network)
length = length/self.p_filter_size
print'rrcl{} done'.format(r),
print' {}'.format(network.get_layer())
#
network = tf.reshape(network.get_layer(), shape=[-1, self.out_channels])# * self.keep_probs[1]]) ###
self.flatten = network
print 'flatten to {}'.format(self.flatten)
"""
(input, weight, nonlinearity=None, use_dropout=False, keep_prob=1.0,
use_batchnorm=False, std=0.01, offset=1e-10, scale=1, epsilon=1e-10, name='feedforward_default')
"""
def train(self, data, target):
## data: [batch, time_idx]
## x: [batch, in_height, in_width, in_channels]
train_feed_dict = {self.x:data}
train_feed_dict.update({self.y:target})
opt, cost = self.sess.run((self.optimizer, self.cost),
feed_dict=train_feed_dict
)
return cost
def test(self, data, target):
test_feed_dict = {self.x:data}
test_feed_dict.update({self.y:target})
cost = self.sess.run(self.cost,
feed_dict=test_feed_dict
)
return cost
def reconstruct(self, data):
recon_feed_dict = {self.x:data}
return self.sess.run(self.output_layer,
feed_dict=recon_feed_dict
)
def save(self, save_path='./model.ckpt'):
saved_path = self.saver.save(self.sess, save_path)
print("Model saved in file: %s"%saved_path)
def load(self, load_path = './model.ckpt'):
self.saver.restore(self.sess, load_path)
print("Model restored")
def terminate(self):
self.sess.close()
tf.reset_default_graph()
class RRCNN(object):
"""
input&output:
x : [batch_size, 1, time_point, num_features_per cluster] * number of clusters
y : [batch, output_nodes]
inner variables:
x: tensorflow placeholder. input.
y: tensorflow placeholder. target.
sess: tensorflow session
conv1: a list of 'conv2d' objects. use get_weight(), get_bias(), get_layer().
rrcls: a list of list of 'RRCL' objects. first list contains RRCL per layer. second list represents the clusters.
use get_weight(), get_biases(), get_layer().
forwards: list of 'feedforward' classes. use get_weight(), get_bias(), get_layer().
output: a instance of class 'feedforward'.
output_layer: a tensorflow tensor instance. output.get_layer().
functions:
train(self, data, target):
return cost
test(self, data, target):
return cost
reconstruct(self, data):
recon_feed_dict = {self.x[i]:data[:,:,:,np.where(self.cluster==i+1)[0]] for i in range(len(np.unique(self.cluster))) }
return output
save(self, save_path='./model.ckpt'): save model
load(self, load_path = './model.ckpt'): restore model
terminate(self): close session ane reset dafault graph
parameters:
batch_size=128,
time_point=256, # length.
in_channels = train_data.shape[2], #number of channels of input data.
out_channels=512,
cluster=cluster, #cluster index list. should start from 0.
rrcl_iter=3, # number of iterations in each RRCL.
rrcl_num=4, # number of RRCLs.
forward_layers=[100,3], # [(concatenated layer node omitted) forward_1, ..., forward_fin, output]
use_batchnorm=True,
scale=1, offset=1e-10, epsilon=1e-10, # parameters for batch_normalization.
nonlinearity=tf.nn.elu, # nonlinearity function
keep_probs=[1.0, 1.0, 1.0, 1.0, 1.0, 0.9, ], # dropout keep_probs.
# Must be in form [conv_layer, RRCL_1, ..., RRCL_fin, forward_1, ..., forward_fin],
# length should be rrcl_num + len(forward_layers)-2
# Use None if you don't want to use dropout.
std=0.001,
w_filter_size=9, # filter size for conv layer and RRCLs. cut in half if too long.
p_filter_size=4, # max pooling filter size. p_filter_size**rrcl_num must be same as time_point.
l_rate=0.01,
l_decay=0.95,
l_step=1000,
decay=0.9,
momentum=0.9
"""
def __init__(self, batch_size=128, time_point=1024, in_channels=126, out_channels=256, ch_multiplier=None,
cluster=None, rrcl_iter=[2, 2, 2], rrcl_num=3, forward_layers=[200, 3], pool=['n', 'p', 'p', 'p'],
use_batchnorm=True, scale=1, offset=0.01, epsilon=0.01, nonlinearity=None, keep_probs=None,
std=0.01, w_filter_size=9, p_filter_size=4, l_rate=0.01, l_decay=0.95, l_step=1000,
optimizer='RMSProp', opt_epsilon=0.1, decay=0.9, momentum=0.9):
self.batch_size = batch_size
self.time_point = time_point
self.in_channels = in_channels
# self.out_channels= ch_multiplier
self.out_channels = out_channels
if ch_multiplier != None:
print
'\'ch_multiplier\' is depreciated. Use \'out_channels\''
self.out_channels = ch_multiplier
self.cluster = cluster
self.rrcl_iter = rrcl_iter
self.rrcl_num = rrcl_num
self.use_batchnorm = use_batchnorm
self.offset = offset
self.scale = scale
self.epsilon = epsilon
self.nonlinearity = nonlinearity
self.keep_probs = keep_probs
self.use_dropout = not (keep_probs == None or keep_probs == [1.0 for i in range(len(keep_probs))])
if keep_probs == None:
self.keep_probs = [1.0 for i in range(1 + rrcl_num + len(forward_layers) - 1)]
if self.use_dropout and len(keep_probs) != (1 + rrcl_num + len(forward_layers) - 1):
raise ValueError('\'keep_probs\' length is wrong')
self.std = std
self.w_filter_size = w_filter_size
self.p_filter_size = p_filter_size
t = 0
for i in range(len(np.unique(self.cluster))):
t = t + self.out_channels * np.sum(self.cluster == i) / self.in_channels
self.ch_sum = t
self.forward_layers = [t] + forward_layers ################
self.pool = pool
if len(self.pool) != rrcl_num + 1:
raise ValueError('Parameter \'pool\' length does not match with the model shape.')
global_step = tf.Variable(0, trainable=False)
self.l_rate = tf.train.exponential_decay(l_rate, global_step, l_step, l_decay, staircase=True)
self.decay = decay
self.momentum = momentum
# self.h_nums = h_nums
self.y = tf.placeholder(tf.float32, [None, self.forward_layers[-1]], name='y');
self.x = [tf.placeholder(tf.float32, [None, 1, time_point, np.sum(cluster == i)], name='x' + str(i)) for i
in range(len(np.unique(cluster)))]
self.build_model()
# Define loss and optimizer, minimize the squared error
# self.cost = tf.reduce_mean(tf.pow(self.y - self.output, 2))
# self.cost = tf.reduce_mean(-tf.reduce_sum(self.y*tf.log(self.output), reduction_indices=[1]))
self.cost = tf.reduce_mean(tf.pow(self.y - self.output_layer, 2))
if optimizer == 'Adam':
self.optimizer = tf.train.AdamOptimizer(self.l_rate, epsilon=opt_epsilon).minimize(self.cost,
global_step=global_step)
else: # optimizer=='RMSProp':
self.optimizer = tf.train.RMSPropOptimizer(self.l_rate,
decay=self.decay,
momentum=self.momentum).minimize(self.cost,
global_step=global_step)
# Initializing the tensor flow variables
init = tf.initialize_all_variables()
# Launch the session
self.session_conf = tf.ConfigProto()
self.session_conf.gpu_options.allow_growth = True
self.sess = tf.InteractiveSession(config=self.session_conf)
# self.sess = tf.InteractiveSession()
self.sess.run(init)
self.saver = tf.train.Saver(max_to_keep=10000)
def build_model(self):
# self.weights, self.biases = self.init_weights()
length = self.time_point ##length
self.conv1 = []
networks = []
for i in range(len(np.unique(self.cluster))):
"""
conv2d(input, filter, strides=[1,1,1,1], padding='SAME', nonlinearity=None, use_dropout=True, keep_prob=1.0,
use_batchnorm=True, std=0.01, offset=1e-10, scale=1, epsilon=1e-10, name='conv2d_default'):
"""
# print i
# print self.x[i],
# print [1, self.w_filter_size, np.sum(self.cluster==i), self.out_channels*np.sum(self.cluster==i)/self.in_channels]
conv1 = conv2d(self.x[i],
weight_size=[1, self.w_filter_size, np.sum(self.cluster == i),
self.out_channels * np.sum(self.cluster == i) / self.in_channels],
nonlinearity=self.nonlinearity,
pool=self.pool[0],
pool_size=self.p_filter_size,
use_dropout=self.use_dropout,
keep_prob=self.keep_probs[0],
use_batchnorm=self.use_batchnorm,
std=self.std,
offset=self.offset,
scale=self.scale,
epsilon=self.epsilon,
name='conv2d_cluster' + str(i))
self.conv1.append(conv1)
networks.append(conv1)
# print conv1.get_layer()
# (batch, time, in_ch, ch_mult)
print
'conv done'
"""
self.conv1p = tf.nn.max_pool(value=self.conv1,
ksize=[1,1,4,1],
strides=[1,1,4,1],
padding='SAME')
"""
# output: (batch_size, 1, in_width, out_channels*in_channels)
"""
RCL(input, filter, strides=[1,1,1,1], padding='SAME', num_iter=3, nonlinearity=None, use_dropout=True, keep_prob=1.0,
use_batchnorm=True, std=0.01, offset=1e-10, scale=1, epsilon=1e-10, name='RCL_default'):
"""
# networks = self.conv1.get_layer()
self.rrcls = []
for r in range(self.rrcl_num):
rrcl = []
filter_size = self.w_filter_size
while filter_size > length:
filter_size = filter_size / 2
for i in range(len(np.unique(self.cluster))):
# print ' cluster{} start'.format(i),
tmp = RCL(input=networks[i].get_layer(),
weight_size=[1, filter_size, self.out_channels * np.sum(self.cluster == i) / self.in_channels,
self.out_channels * np.sum(self.cluster == i) / self.in_channels],
num_iter=self.rrcl_iter[r],
nonlinearity=self.nonlinearity,
use_dropout=self.use_dropout,
keep_prob=self.keep_probs[1 + r],
use_batchnorm=self.use_batchnorm,
std=self.std,
offset=self.offset,
scale=self.scale,
epsilon=self.epsilon,
pool=self.pool[r + 1],
pool_size=[1, self.p_filter_size],
name='RCL' + str(r) + '_cluster' + str(i))
rrcl.append(tmp)
# print 'done'
networks = rrcl
self.rrcls.append(rrcl)
length = length / self.p_filter_size
print
'rrcl{} done'.format(r),
print
' {}'.format(rrcl[-1].get_layer())
#
networks = []
for i in range(len(rrcl)):
networks.append(rrcl[i].get_layer())
# print networks[i]
self.concat = tf.concat(3, networks)
print
'concatenated to {}'.format(self.concat)
network = tf.reshape(self.concat, shape=[-1, self.ch_sum]) # * self.keep_probs[1]]) ###
self.flatten = network
print
'flatten to {}'.format(self.flatten)
"""
(input, weight, nonlinearity=None, use_dropout=False, keep_prob=1.0,
use_batchnorm=False, std=0.01, offset=1e-10, scale=1, epsilon=1e-10, name='feedforward_default')
"""
if len(self.forward_layers) == 2:
network = feedforward(input=network,
weight_size=[self.forward_layers[0], self.forward_layers[1]],
nonlinearity=None,
use_dropout=False,
use_batchnorm=False,
std=self.std,
offset=self.offset,
scale=self.scale,
epsilon=self.epsilon,
name='output')
self.output = network # .get_layer()
self.output_layer = network.get_layer()
print
'feedforward {} done, {}'.format(i + 1, self.output_layer)
print
'model built'
else:
self.forwards = []
for i in range(len(self.forward_layers) - 1 - 1):
network = feedforward(input=network,
weight_size=[self.forward_layers[i], self.forward_layers[i + 1]],
nonlinearity=self.nonlinearity,
use_dropout=self.use_dropout,
keep_prob=self.keep_probs[1 + r],
use_batchnorm=self.use_batchnorm,
std=self.std,
offset=self.offset,
scale=self.scale,
epsilon=self.epsilon,
name='forward' + str(i))
self.forwards.append(network)
network = network.get_layer()
print
'feedforward {} done, {}'.format(i, network)
network = feedforward(input=network,
weight_size=[self.forward_layers[-2], self.forward_layers[-1]],
nonlinearity=None,
use_dropout=False,
use_batchnorm=False,
std=self.std,
offset=self.offset,
scale=self.scale,
epsilon=self.epsilon,
name='output')
self.output = network # .get_layer()
self.output_layer = network.get_layer()
print
'feedforward {} done, {}'.format(i + 1, self.output_layer)
print
'model built'
def train(self, data, target):
## data: [batch, time_idx]
## x: [batch, in_height, in_width, in_channels]
train_feed_dict = {self.x[i]: data[:, :, :, np.where(self.cluster == i)[0]] for i in
range(len(np.unique(self.cluster)))}
train_feed_dict.update({self.y: target})
opt, cost = self.sess.run((self.optimizer, self.cost),
feed_dict=train_feed_dict
)
return cost
def test(self, data, target):
test_feed_dict = {self.x[i]: data[:, :, :, np.where(self.cluster == i)[0]] for i in
range(len(np.unique(self.cluster)))}
test_feed_dict.update({self.y: target})
cost = self.sess.run(self.cost,
feed_dict=test_feed_dict
)
return cost
def reconstruct(self, data):
recon_feed_dict = {self.x[i]: data[:, :, :, np.where(self.cluster == i)[0]] for i in
range(len(np.unique(self.cluster)))}
return self.sess.run(self.output_layer,
feed_dict=recon_feed_dict
)
def save(self, save_path='./model.ckpt'):
saved_path = self.saver.save(self.sess, save_path)
print("Model saved in file: %s" % saved_path)
def load(self, load_path='./model.ckpt'):
self.saver.restore(self.sess, load_path)
print("Model restored")
def terminate(self):
self.sess.close()
tf.reset_default_graph()
class LSTM(object):
def __init__(self, std=0.01, batch_size=64, lstm_time=100, lstm_layers=[442,442,442], layers=[442,200,50,3], num_sensors=442,
scale=1, offset=0.01, epsilon=0.01, keep_probs=[0.9, 0.8, 0.7],
l_rate=0.01, l_decay=0.95, l_step=1000, decay=0.9, momentum=0.9):
self.std = std
self.batch_size = batch_size
self.lstm_time = lstm_time
self.lstm_layers = lstm_layers
self.layers = layers
self.num_sensors = num_sensors
self.scale = scale
self.offset = offset
self.epsilon = epsilon
self.keep_probs = keep_probs
#
global_step = tf.Variable(0, trainable=False)
self.l_rate = tf.train.exponential_decay(l_rate, global_step, l_step, l_decay, staircase=True)
self.decay = decay
self.momentum = momentum
#
self.x = tf.placeholder(tf.float32, [None, lstm_time, lstm_layers[0]], name='x')
self.y = tf.placeholder(tf.float32, [None, 3], name='y')
self.build_model( )
# Define loss and optimizer, minimize the squared error
self.cost = tf.reduce_mean(tf.pow(self.y - self.output, 2))
self.optimizer = tf.train.RMSPropOptimizer(self.l_rate, decay=self.decay, momentum=self.momentum).minimize(self.cost, global_step = global_step)
# Initializing the tensor flow variables
init = tf.initialize_all_variables()
# Launch the session
self.session_conf = tf.ConfigProto()
self.session_conf.gpu_options.allow_growth = True
self.sess = tf.InteractiveSession(config=self.session_conf)
#self.sess = tf.InteractiveSession()
self.sess.run(init)
self.saver = tf.train.Saver(max_to_keep=10000)
def build_model(self):
#self.weights, self.biases = self.init_weights()
"""
dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None)
cell: An instance of RNNCell.
inputs: The RNN inputs.
If time_major == False (default), this must be a Tensor of shape: [batch_size, max_time, ...], or a nested tuple of such elements.
If time_major == True, this must be a Tensor of shape: [max_time, batch_size, ...], or a nested tuple of such elements.
Returns: A pair (outputs, state) where:
outputs: The RNN output Tensor
If time_major == False (default), this will be a Tensor shaped: [batch_size, max_time, cell.output_size].
If time_major == True, this will be a Tensor shaped: [max_time, batch_size, cell.output_size].
Note, if cell.output_size is a (possibly nested) tuple of integers or TensorShape objects,
then outputs will be a tuple having the same structure as cell.output_size,
containing Tensors having shapes corresponding to the shape data in cell.output_size.
state: The final state.
If cell.state_size is an int, this will be shaped [batch_size, cell.state_size].
If it is a TensorShape, this will be shaped [batch_size] + cell.state_size.
If it is a (possibly nested) tuple of ints or TensorShape, this will be a tuple having the corresponding shapes.
"""
network = self.x
self.layers = []
for i in range(len(self.lstm_layers)-1):
with tf.variable_scope('lstm'+str(i)):
cells = tf.nn.rnn_cell.DropoutWrapper( tf.nn.rnn_cell.BasicLSTMCell(self.lstm_layers[i+1], state_is_tuple=True),
self.keep_probs[i])
outputs, states = tf.nn.dynamic_rnn(cells, network, dtype=tf.float32)
network = outputs
self.layers.append(network)
#network outputs: [batch_size, max_time, cell.output_size]
network = network[:, network.get_shape().as_list()[1]-1, :]
batch_mean, batch_var = tf.nn.moments(network, axes=[0])
network = tf.nn.batch_normalization(network, batch_mean, batch_var, self.offset, self.scale, self.epsilon)
self.layers.append(network)
for i in range(len(self.layers)-2):
network = tf.nn.bias_add( tf.matmul(network, self.weights[i]), self.biases[i])
batch_mean, batch_var = tf.nn.moments(network, axes=[0])
network = tf.nn.batch_normalization(network, batch_mean, batch_var, self.offset, self.scale, self.epsilon)
network = tf.nn.dropout( network, self.keep_probs[ (len(self.lstm_layers)-1) + i] )
self.layers.append( network )
self.output = tf.nn.bias_add( tf.matmul(network, self.weights[-1]),
self.biases[-1])
def init_weights(self):
weights = []
biases = []
for i in range(len(self.layers)-1):
weights.append( tf.Variable(tf.random_normal([layers[i], layers[i+1]], stddev=0.01, dtype=tf.float32)) )
biases.append( tf.Variable(tf.random_normal([layers[i+1]], stddev=0.01, dtype=tf.float32)) )
return weights, biases
def train(self, data, target):
## data: [batch, time_idx]
## x: [batch, in_height, in_width, in_channels]
opt, cost = self.sess.run((self.optimizer, self.cost),
feed_dict={ self.y: target,
self.x:data
}
)
return cost
def test(self, data, target):
cost = self.sess.run(self.cost,
feed_dict={self.y: target,
self.x:data
}
)
return cost
def reconstruct(self, data):
return self.sess.run(self.output,
feed_dict={self.x:data}
)
def save(self, save_path='./model.ckpt'):
saved_path = self.saver.save(self.sess, save_path)
print("Model saved in file: %s"%saved_path)
def load(self, load_path = './model.ckpt'):
self.saver.restore(self.sess, load_path)
print("Model restored")
def terminate(self):
self.sess.close()
tf.reset_default_graph()
| 48.141717
| 217
| 0.580538
| 6,145
| 48,238
| 4.389422
| 0.05598
| 0.013495
| 0.017536
| 0.021429
| 0.862974
| 0.848997
| 0.827754
| 0.805324
| 0.799763
| 0.777778
| 0
| 0.023922
| 0.300655
| 48,238
| 1,001
| 218
| 48.18981
| 0.775634
| 0.050645
| 0
| 0.761379
| 0
| 0
| 0.025944
| 0.00181
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.002759
| null | null | 0.028966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d704b4bffb0a137f6aaca278fc2208f0b9542efc
| 6,635
|
py
|
Python
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/_api/v1/keras/layers/__init__.py
|
JustinACoder/H22-GR3-UnrealAI
|
361eb9ef1147f8a2991e5f98c4118cd823184adf
|
[
"MIT"
] | 6
|
2022-02-04T18:12:24.000Z
|
2022-03-21T23:57:12.000Z
|
Lib/site-packages/tensorflow/_api/v1/keras/layers/__init__.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/tensorflow/_api/v1/keras/layers/__init__.py
|
shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings
|
1fa4cd6a566c8745f455fc3d2273208f21f88ced
|
[
"bzip2-1.0.6"
] | 1
|
2022-02-08T03:53:23.000Z
|
2022-02-08T03:53:23.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Keras layers API.
"""
from __future__ import print_function
from tensorflow.python.keras import Input
from tensorflow.python.keras.engine import InputLayer
from tensorflow.python.keras.engine import InputSpec
from tensorflow.python.keras.engine import Layer
from tensorflow.python.keras.layers import Activation
from tensorflow.python.keras.layers import ActivityRegularization
from tensorflow.python.keras.layers import Add
from tensorflow.python.keras.layers import AlphaDropout
from tensorflow.python.keras.layers import Average
from tensorflow.python.keras.layers import AveragePooling1D
from tensorflow.python.keras.layers import AveragePooling1D as AvgPool1D
from tensorflow.python.keras.layers import AveragePooling2D
from tensorflow.python.keras.layers import AveragePooling2D as AvgPool2D
from tensorflow.python.keras.layers import AveragePooling3D
from tensorflow.python.keras.layers import AveragePooling3D as AvgPool3D
from tensorflow.python.keras.layers import BatchNormalization
from tensorflow.python.keras.layers import Bidirectional
from tensorflow.python.keras.layers import Concatenate
from tensorflow.python.keras.layers import Conv1D
from tensorflow.python.keras.layers import Conv1D as Convolution1D
from tensorflow.python.keras.layers import Conv2D
from tensorflow.python.keras.layers import Conv2D as Convolution2D
from tensorflow.python.keras.layers import Conv2DTranspose
from tensorflow.python.keras.layers import Conv2DTranspose as Convolution2DTranspose
from tensorflow.python.keras.layers import Conv3D
from tensorflow.python.keras.layers import Conv3D as Convolution3D
from tensorflow.python.keras.layers import Conv3DTranspose
from tensorflow.python.keras.layers import Conv3DTranspose as Convolution3DTranspose
from tensorflow.python.keras.layers import ConvLSTM2D
from tensorflow.python.keras.layers import Cropping1D
from tensorflow.python.keras.layers import Cropping2D
from tensorflow.python.keras.layers import Cropping3D
from tensorflow.python.keras.layers import CuDNNGRU
from tensorflow.python.keras.layers import CuDNNLSTM
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.layers import DepthwiseConv2D
from tensorflow.python.keras.layers import Dot
from tensorflow.python.keras.layers import Dropout
from tensorflow.python.keras.layers import ELU
from tensorflow.python.keras.layers import Embedding
from tensorflow.python.keras.layers import Flatten
from tensorflow.python.keras.layers import GRU
from tensorflow.python.keras.layers import GRUCell
from tensorflow.python.keras.layers import GaussianDropout
from tensorflow.python.keras.layers import GaussianNoise
from tensorflow.python.keras.layers import GlobalAveragePooling1D
from tensorflow.python.keras.layers import GlobalAveragePooling1D as GlobalAvgPool1D
from tensorflow.python.keras.layers import GlobalAveragePooling2D
from tensorflow.python.keras.layers import GlobalAveragePooling2D as GlobalAvgPool2D
from tensorflow.python.keras.layers import GlobalAveragePooling3D
from tensorflow.python.keras.layers import GlobalAveragePooling3D as GlobalAvgPool3D
from tensorflow.python.keras.layers import GlobalMaxPool1D
from tensorflow.python.keras.layers import GlobalMaxPool1D as GlobalMaxPooling1D
from tensorflow.python.keras.layers import GlobalMaxPool2D
from tensorflow.python.keras.layers import GlobalMaxPool2D as GlobalMaxPooling2D
from tensorflow.python.keras.layers import GlobalMaxPool3D
from tensorflow.python.keras.layers import GlobalMaxPool3D as GlobalMaxPooling3D
from tensorflow.python.keras.layers import LSTM
from tensorflow.python.keras.layers import LSTMCell
from tensorflow.python.keras.layers import Lambda
from tensorflow.python.keras.layers import LeakyReLU
from tensorflow.python.keras.layers import LocallyConnected1D
from tensorflow.python.keras.layers import LocallyConnected2D
from tensorflow.python.keras.layers import Masking
from tensorflow.python.keras.layers import MaxPool1D
from tensorflow.python.keras.layers import MaxPool1D as MaxPooling1D
from tensorflow.python.keras.layers import MaxPool2D
from tensorflow.python.keras.layers import MaxPool2D as MaxPooling2D
from tensorflow.python.keras.layers import MaxPool3D
from tensorflow.python.keras.layers import MaxPool3D as MaxPooling3D
from tensorflow.python.keras.layers import Maximum
from tensorflow.python.keras.layers import Minimum
from tensorflow.python.keras.layers import Multiply
from tensorflow.python.keras.layers import PReLU
from tensorflow.python.keras.layers import Permute
from tensorflow.python.keras.layers import RNN
from tensorflow.python.keras.layers import ReLU
from tensorflow.python.keras.layers import RepeatVector
from tensorflow.python.keras.layers import Reshape
from tensorflow.python.keras.layers import SeparableConv1D
from tensorflow.python.keras.layers import SeparableConv1D as SeparableConvolution1D
from tensorflow.python.keras.layers import SeparableConv2D
from tensorflow.python.keras.layers import SeparableConv2D as SeparableConvolution2D
from tensorflow.python.keras.layers import SimpleRNN
from tensorflow.python.keras.layers import SimpleRNNCell
from tensorflow.python.keras.layers import Softmax
from tensorflow.python.keras.layers import SpatialDropout1D
from tensorflow.python.keras.layers import SpatialDropout2D
from tensorflow.python.keras.layers import SpatialDropout3D
from tensorflow.python.keras.layers import StackedRNNCells
from tensorflow.python.keras.layers import Subtract
from tensorflow.python.keras.layers import ThresholdedReLU
from tensorflow.python.keras.layers import TimeDistributed
from tensorflow.python.keras.layers import UpSampling1D
from tensorflow.python.keras.layers import UpSampling2D
from tensorflow.python.keras.layers import UpSampling3D
from tensorflow.python.keras.layers import Wrapper
from tensorflow.python.keras.layers import ZeroPadding1D
from tensorflow.python.keras.layers import ZeroPadding2D
from tensorflow.python.keras.layers import ZeroPadding3D
from tensorflow.python.keras.layers import add
from tensorflow.python.keras.layers import average
from tensorflow.python.keras.layers import concatenate
from tensorflow.python.keras.layers import dot
from tensorflow.python.keras.layers import maximum
from tensorflow.python.keras.layers import minimum
from tensorflow.python.keras.layers import multiply
from tensorflow.python.keras.layers import subtract
del print_function
| 56.228814
| 85
| 0.855916
| 824
| 6,635
| 6.882282
| 0.154126
| 0.30753
| 0.380885
| 0.476107
| 0.810263
| 0.810263
| 0.484042
| 0.146711
| 0.146711
| 0.146711
| 0
| 0.012124
| 0.09254
| 6,635
| 117
| 86
| 56.709402
| 0.929746
| 0.021703
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.990909
| 0
| 0.990909
| 0.018182
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
d71475d8777bb43a98856cbe2fb897a258fe2575
| 13,604
|
py
|
Python
|
PhysicsTools/NanoAOD/countEvents.py
|
GonzalezFJR/cmssw
|
8f453e1b07c4a6d79b9e52190f6f68ec89959c03
|
[
"Apache-2.0"
] | null | null | null |
PhysicsTools/NanoAOD/countEvents.py
|
GonzalezFJR/cmssw
|
8f453e1b07c4a6d79b9e52190f6f68ec89959c03
|
[
"Apache-2.0"
] | null | null | null |
PhysicsTools/NanoAOD/countEvents.py
|
GonzalezFJR/cmssw
|
8f453e1b07c4a6d79b9e52190f6f68ec89959c03
|
[
"Apache-2.0"
] | null | null | null |
from ROOT import *
samples = [
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_24549.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_26688.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_30946.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_32399.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_33560.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_35098.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_37739.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_39302.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_40299.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_42581.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_43587.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_44513.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_45508.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_46545.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_47643.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_48384.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_49123.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_50510.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_51285.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_52141.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_52702.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_53616.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_54168.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_54861.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_56908.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_57104.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_57466.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_58588.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_58969.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_60105.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_60620.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_62038.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_62333.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_63605.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_63973.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_64430.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_64592.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_65026.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_66297.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_66506.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_66963.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_67429.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_67922.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_67923.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_67924.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_68432.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_68914.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_69136.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_69400.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_69677.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_70471.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_70770.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_70771.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_71055.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_71342.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_71842.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_71843.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_72134.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_72135.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_72475.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_72765.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_73499.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_73759.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_74051.root',
'root://deepthought.crc.nd.edu//store/user/kmohrman/FullProduction/Round6/Batch6/postLHE_step/v2/mAOD_step_ttllNuNuJetNoHiggs_HanV4ttXJetStartPtChecks_run2/HIG-RunIIFall17MiniAOD-00821ND_74052.root'
]
nEv = 0
for s in samples:
f = TFile.Open(s)
print('file = ', s)
ev = f.Events.GetEntries()
nEv += ev
print(' >> nEvents = ', ev)
f.Close()
print('nEv = ', nEv)
| 170.05
| 205
| 0.849309
| 1,591
| 13,604
| 7.01697
| 0.065368
| 0.087334
| 0.104801
| 0.116446
| 0.960319
| 0.960319
| 0.960319
| 0.960319
| 0.960319
| 0.960319
| 0
| 0.084505
| 0.03793
| 13,604
| 79
| 206
| 172.202532
| 0.76849
| 0
| 0
| 0
| 0
| 0.844156
| 0.939062
| 0.936489
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.012987
| 0
| 0.012987
| 0.038961
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
d71503b69bdb90744f030d4552e673e955764eaf
| 119
|
py
|
Python
|
lambda/lambda.py
|
Journera/glutil
|
aeb75974ae162456617334db4bd235c2101c8fd5
|
[
"BSD-3-Clause"
] | 14
|
2019-06-19T20:14:38.000Z
|
2020-05-21T18:25:02.000Z
|
lambda/lambda.py
|
Journera/glutil
|
aeb75974ae162456617334db4bd235c2101c8fd5
|
[
"BSD-3-Clause"
] | 2
|
2019-06-19T17:39:31.000Z
|
2019-10-29T18:24:20.000Z
|
lambda/lambda.py
|
Journera/glutil
|
aeb75974ae162456617334db4bd235c2101c8fd5
|
[
"BSD-3-Clause"
] | 2
|
2019-10-04T04:33:06.000Z
|
2020-05-21T19:25:31.000Z
|
import glutil.serverless_function
def handler(event, context):
glutil.serverless_function.handle(event, context)
| 19.833333
| 53
| 0.806723
| 14
| 119
| 6.714286
| 0.642857
| 0.340426
| 0.510638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109244
| 119
| 5
| 54
| 23.8
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d7687b433c69a27dec11e381c355fa3f31236be7
| 2,161
|
py
|
Python
|
TestDatabase.py
|
patrick310/flask-wtform-tutorial
|
b4b9581cdd457dfd54238e806bcabcd01c5bb9ab
|
[
"MIT"
] | null | null | null |
TestDatabase.py
|
patrick310/flask-wtform-tutorial
|
b4b9581cdd457dfd54238e806bcabcd01c5bb9ab
|
[
"MIT"
] | null | null | null |
TestDatabase.py
|
patrick310/flask-wtform-tutorial
|
b4b9581cdd457dfd54238e806bcabcd01c5bb9ab
|
[
"MIT"
] | 1
|
2021-06-07T14:20:36.000Z
|
2021-06-07T14:20:36.000Z
|
import unittest
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Date
class SimpleSqliteTest(unittest.TestCase):
def setUp(self):
print("Connecting DB")
db_string = "sqlite://"
self.db = create_engine(db_string)
def tearDown(self):
print("Disconnecting DB")
del self.db
def testCanCreateTableAndManageData(self):
# Create
self.db.execute("CREATE TABLE IF NOT EXISTS films (title text, director text, year text)")
self.db.execute(
"INSERT INTO films (title, director, year) VALUES ('Doctor Strange', 'Scott Derrickson', '2016')"
)
# Read
result_set = self.db.execute("SELECT * FROM films")
for r in result_set:
self.assertEqual(int(r['year']), 2016, "Test value was not read read back correctly.")
# Update
self.db.execute("UPDATE films SET title='Some2016Film' WHERE year='2016'")
# Delete
self.db.execute("DELETE FROM films WHERE year='2016'")
class PostgresTest(unittest.TestCase):
def setUp(self):
print("Connecting DB")
db_string = 'postgresql://uzbiy6sxtg1wi:smTY464FvRGfYMB@35.202.17.55/dbekqfrcjb4dfy'
self.db = create_engine(db_string)
def tearDown(self):
print("Disconnecting DB")
del self.db
def testCanCreateTableAndManageData(self):
# Create
self.db.execute("CREATE TABLE IF NOT EXISTS films (title text, director text, year text)")
self.db.execute(
"INSERT INTO films (title, director, year) VALUES ('Doctor Strange', 'Scott Derrickson', '2016')"
)
# Read
result_set = self.db.execute("SELECT * FROM films")
for r in result_set:
self.assertEqual(int(r['year']), 2016, "Test value was not read read back correctly.")
# Update
self.db.execute("UPDATE films SET title='Some2016Film' WHERE year='2016'")
# Delete
self.db.execute("DELETE FROM films WHERE year='2016'")
if __name__ == '__main__':
unittest.main()
| 31.779412
| 109
| 0.639056
| 257
| 2,161
| 5.29572
| 0.287938
| 0.061719
| 0.095518
| 0.035268
| 0.80529
| 0.80529
| 0.80529
| 0.80529
| 0.80529
| 0.80529
| 0
| 0.033951
| 0.250347
| 2,161
| 67
| 110
| 32.253731
| 0.806173
| 0.0236
| 0
| 0.714286
| 0
| 0.047619
| 0.376487
| 0.033317
| 0
| 0
| 0
| 0
| 0.047619
| 1
| 0.142857
| false
| 0
| 0.095238
| 0
| 0.285714
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d76c33d94ab7040d71702816520dca8eeb47ee4f
| 15,133
|
py
|
Python
|
apem.py
|
DyningAida/apem
|
c90afc4cd06315f42f6300d5668909ec1e4e2eb0
|
[
"MIT"
] | null | null | null |
apem.py
|
DyningAida/apem
|
c90afc4cd06315f42f6300d5668909ec1e4e2eb0
|
[
"MIT"
] | null | null | null |
apem.py
|
DyningAida/apem
|
c90afc4cd06315f42f6300d5668909ec1e4e2eb0
|
[
"MIT"
] | null | null | null |
import speech_recognition as sr
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
class Apem(object):
def __init__(self, npm, paswd):
self.npm = npm
self.paswd = paswd
def masuk(self):
self.opsi = Options()
self.opsi.headless = False
self.cap = webdriver.common.desired_capabilities.DesiredCapabilities().FIREFOX
self.cap['marionette'] = True
self.driver= webdriver.Firefox()
self.driver.get('http://siap.poltekpos.ac.id/')
def ceknilai1(self):
self.opsi = Options()
self.opsi.headless = False
self.cap = webdriver.common.desired_capabilities.DesiredCapabilities().FIREFOX
self.cap['marionette'] = True
self.driver = webdriver.Firefox()
self.driver.get('http://siap.poltekpos.ac.id/siap/besan.depan.php')
self.driver.find_element_by_name('user_name').send_keys(self.npm)
self.driver.find_element_by_name('user_pass').send_keys(self.paswd)
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[2]/table[2]/tbody/tr[1]/td[2]/div/form/input[4]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[1]/table[2]/tbody/tr[1]/td[2]/a[5]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p[1]/table/tbody/tr/td[3]/select/option[5]').click()
self.driver.find_element_by_class_name('button').click()
def ceknilai2(self):
self.opsi = Options()
self.opsi.headless = False
self.cap = webdriver.common.desired_capabilities.DesiredCapabilities().FIREFOX
self.cap['marionette'] = True
self.driver = webdriver.Firefox()
self.driver.get('http://siap.poltekpos.ac.id/siap/besan.depan.php')
self.driver.find_element_by_name('user_name').send_keys(self.npm)
self.driver.find_element_by_name('user_pass').send_keys(self.paswd)
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[2]/table[2]/tbody/tr[1]/td[2]/div/form/input[4]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[1]/table[2]/tbody/tr[1]/td[2]/a[5]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p[1]/table/tbody/tr/td[3]/select/option[4]').click()
self.driver.find_element_by_class_name('button').click()
def ceknilai3(self):
self.opsi = Options()
self.opsi.headless = False
self.cap = webdriver.common.desired_capabilities.DesiredCapabilities().FIREFOX
self.cap['marionette'] = True
self.driver = webdriver.Firefox()
self.driver.get('http://siap.poltekpos.ac.id/siap/besan.depan.php')
self.driver.find_element_by_name('user_name').send_keys(self.npm)
self.driver.find_element_by_name('user_pass').send_keys(self.paswd)
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[2]/table[2]/tbody/tr[1]/td[2]/div/form/input[4]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[1]/table[2]/tbody/tr[1]/td[2]/a[5]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p[1]/table/tbody/tr/td[3]/select/option[2]').click()
self.driver.find_element_by_class_name('button').click()
def ceknilaipendek(self):
self.opsi = Options()
self.opsi.headless = False
self.cap = webdriver.common.desired_capabilities.DesiredCapabilities().FIREFOX
self.cap['marionette'] = True
self.driver = webdriver.Firefox()
self.driver.get('http://siap.poltekpos.ac.id/siap/besan.depan.php')
self.driver.find_element_by_name('user_name').send_keys(self.npm)
self.driver.find_element_by_name('user_pass').send_keys(self.paswd)
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[2]/table[2]/tbody/tr[1]/td[2]/div/form/input[4]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[1]/table[2]/tbody/tr[1]/td[2]/a[5]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p[1]/table/tbody/tr/td[3]/select/option[3]').click()
self.driver.find_element_by_class_name('button').click()
def login(self):
self.opsi = Options()
self.opsi.headless = False
self.cap = webdriver.common.desired_capabilities.DesiredCapabilities().FIREFOX
self.cap['marionette'] = True
self.driver= webdriver.Firefox()
self.driver.get('http://siap.poltekpos.ac.id/siap/besan.depan.php')
self.driver.find_element_by_name('user_name').send_keys(self.npm)
self.driver.find_element_by_name('user_pass').send_keys(self.paswd)
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[2]/table[2]/tbody/tr[1]/td[2]/div/form/input[4]').click()
def kalenderganjil2019(self):
self.opsi = Options()
self.opsi.headless = False
self.cap = webdriver.common.desired_capabilities.DesiredCapabilities().FIREFOX
self.cap['marionette'] = True
self.driver = webdriver.Firefox()
self.driver.get('http://siap.poltekpos.ac.id/siap/besan.depan.php')
self.driver.find_element_by_name('user_name').send_keys(self.npm)
self.driver.find_element_by_name('user_pass').send_keys(self.paswd)
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[2]/table[2]/tbody/tr[1]/td[2]/div/form/input[4]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[1]/table[2]/tbody/tr[1]/td[2]/a[6]').click()
self.driver.find_element_by_class_name('textbox').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p/table/tbody/tr[2]/td[2]/select/option[1]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p[1]/table/tbody/tr[4]/td[2]/select/option[2]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p/table/tbody/tr[5]/td/input').click()
def kalendergenap2019(self):
self.opsi = Options()
self.opsi.headless = False
self.cap = webdriver.common.desired_capabilities.DesiredCapabilities().FIREFOX
self.cap['marionette'] = True
self.driver = webdriver.Firefox()
self.driver.get('http://siap.poltekpos.ac.id/siap/besan.depan.php')
self.driver.find_element_by_name('user_name').send_keys(self.npm)
self.driver.find_element_by_name('user_pass').send_keys(self.paswd)
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[2]/table[2]/tbody/tr[1]/td[2]/div/form/input[4]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[1]/table[2]/tbody/tr[1]/td[2]/a[6]').click()
self.driver.find_element_by_class_name('textbox').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p/table/tbody/tr[2]/td[2]/select/option[2]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p[1]/table/tbody/tr[4]/td[2]/select/option[2]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p/table/tbody/tr[5]/td/input').click()
def kalendergenap2018(self):
self.opsi = Options()
self.opsi.headless = False
self.cap = webdriver.common.desired_capabilities.DesiredCapabilities().FIREFOX
self.cap['marionette'] = True
self.driver = webdriver.Firefox()
self.driver.get('http://siap.poltekpos.ac.id/siap/besan.depan.php')
self.driver.find_element_by_name('user_name').send_keys(self.npm)
self.driver.find_element_by_name('user_pass').send_keys(self.paswd)
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[2]/table[2]/tbody/tr[1]/td[2]/div/form/input[4]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[1]/table[2]/tbody/tr[1]/td[2]/a[6]').click()
self.driver.find_element_by_class_name('textbox').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p/table/tbody/tr[2]/td[2]/select/option[3]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p[1]/table/tbody/tr[4]/td[2]/select/option[2]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p/table/tbody/tr[5]/td/input').click()
def kalenderganjil2018(self):
self.opsi = Options()
self.opsi.headless = False
self.cap = webdriver.common.desired_capabilities.DesiredCapabilities().FIREFOX
self.cap['marionette'] = True
self.driver = webdriver.Firefox()
self.driver.get('http://siap.poltekpos.ac.id/siap/besan.depan.php')
self.driver.find_element_by_name('user_name').send_keys(self.npm)
self.driver.find_element_by_name('user_pass').send_keys(self.paswd)
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[2]/table[2]/tbody/tr[1]/td[2]/div/form/input[4]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[1]/table[2]/tbody/tr[1]/td[2]/a[6]').click()
self.driver.find_element_by_class_name('textbox').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p/table/tbody/tr[2]/td[2]/select/option[4]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p[1]/table/tbody/tr[4]/td[2]/select/option[2]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p/table/tbody/tr[5]/td/input').click()
def kalendergenap2017(self):
self.opsi = Options()
self.opsi.headless = False
self.cap = webdriver.common.desired_capabilities.DesiredCapabilities().FIREFOX
self.cap['marionette'] = True
self.driver = webdriver.Firefox()
self.driver.get('http://siap.poltekpos.ac.id/siap/besan.depan.php')
self.driver.find_element_by_name('user_name').send_keys(self.npm)
self.driver.find_element_by_name('user_pass').send_keys(self.paswd)
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[2]/table[2]/tbody/tr[1]/td[2]/div/form/input[4]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[1]/table[2]/tbody/tr[1]/td[2]/a[6]').click()
self.driver.find_element_by_class_name('textbox').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p/table/tbody/tr[2]/td[2]/select/option[5]').click() #issue 60
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p[1]/table/tbody/tr[4]/td[2]/select/option[2]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p/table/tbody/tr[5]/td/input').click()
def kalenderganjil2017(self):
self.opsi = Options()
self.opsi.headless = False
self.cap = webdriver.common.desired_capabilities.DesiredCapabilities().FIREFOX
self.cap['marionette'] = True
self.driver = webdriver.Firefox()
self.driver.get('http://siap.poltekpos.ac.id/siap/besan.depan.php')
self.driver.find_element_by_name('user_name').send_keys(self.npm)
self.driver.find_element_by_name('user_pass').send_keys(self.paswd)
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[2]/table[2]/tbody/tr[1]/td[2]/div/form/input[4]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[1]/tbody/tr/td[1]/table[2]/tbody/tr[1]/td[2]/a[6]').click()
self.driver.find_element_by_class_name('textbox').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p/table/tbody/tr[2]/td[2]/select/option[6]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p[1]/table/tbody/tr[4]/td[2]/select/option[2]').click()
self.driver.find_element_by_xpath('/html/body/table/tbody/tr[5]/td/table[3]/tbody/tr[1]/td[2]/p/table/tbody/tr[5]/td/input').click()
def speak(self):
r= sr.Recognizer()
with sr.Microphone() as source:
print("SAY SOMETHING, PLEASE")
audio = r.listen(source)
try:
print("TEXT : "+r.recognize_google(audio, language='id-ID'))
x = "siap"
y = "login siap"
z = "Cek nilai semester 1"
a = "Cek nilai semester 2"
b = "Cek nilai semester 3"
c = "Cek nilai semester pendek"
d = "kalender akademik ganjil 2017"
e = "kalender akademik ganjil 2018"
f = "kalender akademik ganjil 2019"
g = "kalender akademik genap 2017"
h = "kalender akademik genap 2018"
i = "kalender akademik genap 2019"
if (r.recognize_google(audio, language='id-ID')) == x:
self.masuk()
if (r.recognize_google(audio, language='id-ID')) == y:
self.login()
if (r.recognize_google(audio, language='id-ID')) == z:
self.ceknilai1()
if (r.recognize_google(audio, language='id-ID')) == a:
self.ceknilai2()
if (r.recognize_google(audio, language='id-ID')) == b:
self.ceknilai3()
if (r.recognize_google(audio, language='id-ID')) == c:
self.ceknilaipendek()
if (r.recognize_google(audio, language='id-ID')) == d:
self.kalenderganjil2017()
if (r.recognize_google(audio, language='id-ID')) == e:
self.kalenderganjil2018()
if (r.recognize_google(audio, language='id-ID')) == f:
self.kalenderganjil2019()
if (r.recognize_google(audio, language='id-ID')) == g:
self.kalendergenap2017()
if (r.recognize_google(audio, language='id-ID')) == h:
self.kalendergenap2018()
if (r.recognize_google(audio, language='id-ID')) == i:
self.kalendergenap2019()
except Exception as e:
print(e)
print("error")
print("Time is over, thanks")
| 70.714953
| 164
| 0.660543
| 2,368
| 15,133
| 4.090794
| 0.054899
| 0.093218
| 0.108393
| 0.162589
| 0.901105
| 0.901105
| 0.901105
| 0.900898
| 0.853928
| 0.853515
| 0
| 0.029494
| 0.153109
| 15,133
| 214
| 165
| 70.714953
| 0.726358
| 0.000529
| 0
| 0.638498
| 0
| 0.201878
| 0.360595
| 0.272793
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065728
| false
| 0.051643
| 0.014085
| 0
| 0.084507
| 0.023474
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
ad82174840c8222cb56df871d7770383b4134a5a
| 4,817
|
py
|
Python
|
Support.records/Integrate.Validated.Results.for.NA12878.and.CHM1.py
|
mills-lab/svelter
|
d318b06d588483fe8a8ebcac8c8a6c7878f2c2b3
|
[
"MIT"
] | 21
|
2015-11-02T06:31:52.000Z
|
2021-12-20T03:14:04.000Z
|
Support.records/Integrate.Validated.Results.for.NA12878.and.CHM1.py
|
mills-lab/svelter
|
d318b06d588483fe8a8ebcac8c8a6c7878f2c2b3
|
[
"MIT"
] | 14
|
2016-03-02T21:12:53.000Z
|
2019-08-02T20:01:02.000Z
|
Support.records/Integrate.Validated.Results.for.NA12878.and.CHM1.py
|
mills-lab/svelter
|
d318b06d588483fe8a8ebcac8c8a6c7878f2c2b3
|
[
"MIT"
] | 6
|
2015-08-19T18:33:02.000Z
|
2017-05-16T03:42:57.000Z
|
import os
def path_modify(path):
if not path[-1]=='/':
path+='/'
return path
def Vali_files_readin(Vali_Folder):
out=[]
Vali_Folder=path_modify(Vali_Folder)
for k1 in os.listdir(Vali_Folder):
if k1.split('.')[-1]=='PacVal':
out.append(Vali_Folder+k1)
return out
def Vali_file_read(filein,Vali_hash,score_cff):
fin=open(filein)
pin=fin.readline().strip().split()
for line in fin:
pin=line.strip().split()
if not pin[4] in Vali_hash.keys():
Vali_hash[pin[4]]={}
if not pin[5] in Vali_hash[pin[4]].keys():
Vali_hash[pin[4]][pin[5]]=[0,0]
if float(pin[-1])>score_cff:
Vali_hash[pin[4]][pin[5]][0]+=1
Vali_hash[pin[4]][pin[5]][1]+=1
fin.close()
return Vali_hash
ppre='/scratch/remills_flux/xuefzhao/NA12878.NGS/hg19/VaLoR_Vali/'
fin=open(ppre+'NA12878.Subtype.SV')
SV_hash={}
for line in fin:
pin=line.strip().split()
if not pin==[]:
if not pin[0] in SV_hash.keys():
SV_hash[pin[0]]={}
if not pin[1] in SV_hash[pin[0]].keys():
SV_hash[pin[0]][pin[1]]={}
if not pin[2] in SV_hash[pin[0]][pin[1]].keys():
SV_hash[pin[0]][pin[1]][pin[2]]=[]
score_cff=0.1
Vali_Folder=ppre
Vali_files=Vali_files_readin(Vali_Folder)
Vali_hash={}
for k1 in Vali_files:
Vali_hash=Vali_file_read(k1,Vali_hash,score_cff)
for k1 in SV_hash.keys():
for k2 in SV_hash[k1].keys():
for k3 in SV_hash[k1][k2].keys():
SV_hash[k1][k2][k3]=Vali_hash[k2][k3]
print ' '.join([str(i) for i in [k1,k2,k3]+SV_hash[k1][k2][k3]])
SVs=['INV_DUP','INV_DEL','DEL_DUP','DEL_DUP_INV']
Figure_hash={}
for k1 in SVs:
Figure_hash[k1]=[0,0]
for k2 in SV_hash[k1].keys():
for k3 in SV_hash[k1][k2].keys():
print [k1,k2,k3]
Figure_hash[k1][0]+=SV_hash[k1][k2][k3][0]
Figure_hash[k1][1]+=SV_hash[k1][k2][k3][1]
#print validated and all SV numbers for the four SVs listed above
for k1 in Figure_hash.keys():
print ' '.join([str(i) for i in [k1]+Figure_hash[k1]])
Figure_hash={}
for k1 in SV_hash.keys():
Figure_hash[k1]=[0,0]
for k2 in SV_hash[k1].keys():
for k3 in SV_hash[k1][k2].keys():
Figure_hash[k1][0]+=SV_hash[k1][k2][k3][0]
Figure_hash[k1][1]+=SV_hash[k1][k2][k3][1]
listed_SVs=[]
for k1 in SV_hash.keys():
for k2 in SV_hash[k1].keys():
for k3 in SV_hash[k1][k2].keys():
listed_SVs.append([k2,k3])
for k1 in Vali_hash:
for k2 in Vali_hash[k1].keys():
if not [k1,k2] in listed_SVs:
print ' '.join([str(i) for i in [k1,k2]+Vali_hash[k1][k2]])
for k1 in Figure_hash.keys():
print ' '.join([str(i) for i in [k1]+Figure_hash[k1]+[float(Figure_hash[k1][0])/float(Figure_hash[k1][1])]])
ppre='/scratch/remills_flux/xuefzhao/CHM1/IL500/hg19/SVelter.rec4/'
fin=open(ppre+'CHM1.Subtype.SV')
SV_hash={}
for line in fin:
pin=line.strip().split()
if not pin==[]:
if not pin[0] in SV_hash.keys():
SV_hash[pin[0]]={}
if not pin[1] in SV_hash[pin[0]].keys():
SV_hash[pin[0]][pin[1]]={}
if not pin[2] in SV_hash[pin[0]][pin[1]].keys():
SV_hash[pin[0]][pin[1]][pin[2]]=[]
fin.close()
score_cff=0.2
Vali_Folder=ppre
Vali_files=Vali_files_readin(Vali_Folder)
Vali_hash={}
for k1 in Vali_files:
Vali_hash=Vali_file_read(k1,Vali_hash,score_cff)
for k1 in SV_hash.keys():
for k2 in SV_hash[k1].keys():
for k3 in SV_hash[k1][k2].keys():
if k3 in Vali_hash[k2].keys():
SV_hash[k1][k2][k3]=Vali_hash[k2][k3]
print ' '.join([str(i) for i in [k1,k2,k3]+SV_hash[k1][k2][k3]])
SVs=['DUP_INV','DEL_INV','DEL_DUP','DEL_DUP_INV']
Figure_hash={}
for k1 in SVs:
if k1 in SV_hash.keys():
Figure_hash[k1]=[0,0]
for k2 in SV_hash[k1].keys():
for k3 in SV_hash[k1][k2].keys():
print [k1,k2,k3]
if len(SV_hash[k1][k2][k3])>0:
Figure_hash[k1][0]+=SV_hash[k1][k2][k3][0]
Figure_hash[k1][1]+=SV_hash[k1][k2][k3][1]
for k1 in Figure_hash.keys():
print ' '.join([str(i) for i in [k1]+Figure_hash[k1]])
Figure_hash={}
for k1 in SV_hash.keys():
if k1 in SV_hash.keys():
Figure_hash[k1]=[0,0]
for k2 in SV_hash[k1].keys():
for k3 in SV_hash[k1][k2].keys():
print [k1,k2,k3]
if len(SV_hash[k1][k2][k3])>0:
Figure_hash[k1][0]+=SV_hash[k1][k2][k3][0]
Figure_hash[k1][1]+=SV_hash[k1][k2][k3][1]
for k1 in Figure_hash.keys():
print ' '.join([str(i) for i in [k1]+Figure_hash[k1]])
listed_SVs=[]
for k1 in SV_hash.keys():
for k2 in SV_hash[k1].keys():
for k3 in SV_hash[k1][k2].keys():
listed_SVs.append([k2,k3])
for k1 in Vali_hash:
for k2 in Vali_hash[k1].keys():
if not [k1,k2] in listed_SVs:
print ' '.join([str(i) for i in [k1,k2]+Vali_hash[k1][k2]])
fo=open('/scratch/remills_flux/xuefzhao/CHM1/IL500/hg19/SVelter/VaLoR_Vali/Integrated.Validated.Result.Cff0.2','w')
for k1 in sorted(Vali_hash.keys()):
for k2 in sorted(Vali_hash[k1].keys()):
print >>fo,' '.join([str(i) for i in [k1,k2]+Vali_hash[k1][k2]+[float(Vali_hash[k1][k2][0])/float(Vali_hash[k1][k2][1])]])
fo.close()
| 26.467033
| 124
| 0.652688
| 961
| 4,817
| 3.105099
| 0.084287
| 0.112601
| 0.080429
| 0.073727
| 0.790885
| 0.747989
| 0.742627
| 0.731233
| 0.700402
| 0.700402
| 0
| 0.067943
| 0.13224
| 4,817
| 181
| 125
| 26.61326
| 0.645933
| 0.013286
| 0
| 0.732394
| 0
| 0.007042
| 0.070571
| 0.046134
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.007042
| null | null | 0.084507
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d175cc55f54c1a1970cccc69716dc57f93f8a4eb
| 52
|
py
|
Python
|
autorop/assertion/__init__.py
|
Tanson/autorop
|
0d2fc71cdcc9649a6006aee641a3808f884d7fc4
|
[
"MIT"
] | 15
|
2020-10-03T05:20:31.000Z
|
2022-03-20T06:19:29.000Z
|
autorop/assertion/__init__.py
|
Tanson/autorop
|
0d2fc71cdcc9649a6006aee641a3808f884d7fc4
|
[
"MIT"
] | 8
|
2020-10-02T09:51:39.000Z
|
2021-04-24T03:14:18.000Z
|
autorop/assertion/__init__.py
|
Tanson/autorop
|
0d2fc71cdcc9649a6006aee641a3808f884d7fc4
|
[
"MIT"
] | 2
|
2021-04-16T06:33:49.000Z
|
2021-09-03T09:21:10.000Z
|
from autorop.assertion.have_shell import have_shell
| 26
| 51
| 0.884615
| 8
| 52
| 5.5
| 0.75
| 0.409091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 52
| 1
| 52
| 52
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0f63d6dc7658f08f613781cc2cf066a3af3ce801
| 175
|
py
|
Python
|
imports.py
|
Matt-cloud/Discord.py-Template
|
4b2ac9f0897bb44dfd799d821e536fc34ef3064e
|
[
"MIT"
] | null | null | null |
imports.py
|
Matt-cloud/Discord.py-Template
|
4b2ac9f0897bb44dfd799d821e536fc34ef3064e
|
[
"MIT"
] | null | null | null |
imports.py
|
Matt-cloud/Discord.py-Template
|
4b2ac9f0897bb44dfd799d821e536fc34ef3064e
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
from lib.utils import checks
from lib.utils import ui
from lib.utils.globals import logger, db
import time
import importlib
import discord
| 19.444444
| 40
| 0.817143
| 28
| 175
| 5.107143
| 0.5
| 0.146853
| 0.251748
| 0.251748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148571
| 175
| 8
| 41
| 21.875
| 0.959732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7e2923a1538496376fcd2704a51e98ca816cbcd2
| 19,484
|
py
|
Python
|
at_tmp/model/FUNC/REPORT_DATA.py
|
zuoleilei3253/zuoleilei
|
e188b15a0aa4a9fde00dba15e8300e4b87973e2d
|
[
"Apache-2.0"
] | null | null | null |
at_tmp/model/FUNC/REPORT_DATA.py
|
zuoleilei3253/zuoleilei
|
e188b15a0aa4a9fde00dba15e8300e4b87973e2d
|
[
"Apache-2.0"
] | null | null | null |
at_tmp/model/FUNC/REPORT_DATA.py
|
zuoleilei3253/zuoleilei
|
e188b15a0aa4a9fde00dba15e8300e4b87973e2d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/8/8 16:59
# @Author : bxf
# @File : REPORT_DATA.py
# @Software: PyCharm
'''
report_data:报告日期 data now
report_er:报告人
report_env_type:环境类型 case_info
report_exe_time:执行起止时间 case_result
report_exe_counts:用例总数
report_exe_pass:通过数
report_exe_fail:用例失败数
report_test_result:测试结论
case_id:用例ID
case_path:用例路径
case_desc:用例描述
case_exe_type:类型
case_prev_data:预期结果
case_real_result:实际结果
case_result:测试结论
case_detail:执行详情
'''
from model.util.TMP_DB_OPT import *
from model.FUNC.USERINFO.LOG_IN import *
from model.FUNC.GROUP_OPT import *
class REPORT_DATA():
def __init__(self,token):
self.token=token
# 回归测试报告--按照时间
def getReportData(self, group_id, parmas,TimeOrTask,table):
'''
:param group_id: 分组ID
:param parmas: onglinetiome,task_id,batch_id
:param TimeOrTask: 1-上线时间,2-任务,3-定时批次号
:param table: 表类型:1-回归用例,2-线上任务
:return:
'''
try:
#判断是否是定时任务:定时任务无token 传送
if self.token== None:
userName="定时任务"
else:
userName = getRealName(self.token)
sql=''
report_info_sql=''
report_lists_sql=''
report_lists_fail_sql=''
#判断分组级别:获取该分组及分组下的所有级别的分组报告数据
if group_id==0:
sql_doc=''
else:
sql_doc= "' AND B.group_id LIKE '" + str(group_id) + "%'"
#判断读取表格:1为回归报告 2 为线上数据
if table ==1:
regress_task_info='regress_task_info'
regress_case_info='regress_case_info'
regress_case_result='regress_case_result'
elif table ==2:
regress_task_info = 'core_task_info'
regress_case_info = 'core_case_info'
regress_case_result = 'core_case_result'
# 判断获取报告的类型:1-上线时间,2-任务,3-定时批次号
if TimeOrTask ==1:
sql="SELECT C.case_id,A.online_time,MIN(C.case_time) AS report_start_time,MAX(C.case_time) AS report_end_time,NOW() AS report_date,(CASE B.case_exe_env WHEN '1' THEN '测试环境' WHEN '2' THEN '灰度环境' ELSE '线上环境' END) AS report_env_type,COUNT(1) AS report_exe_counts,SUM(CASE C.case_result WHEN '1' THEN 1 ELSE 0 END) AS report_exe_pass,SUM(CASE C.case_result WHEN '2' THEN 1 WHEN '2' THEN 1 ELSE 0 END) AS report_exe_fail,SUM(CASE WHEN C.case_result IS NULL THEN 1 ELSE 0 END) AS report_exe_not FROM t_task_to_case M LEFT JOIN "+regress_task_info+" A ON A.task_id=M.task_id LEFT JOIN "+regress_case_info+" B ON B.case_id=M.case_id LEFT JOIN (SELECT 1 AS num,T.* FROM "+regress_case_result+" T INNER JOIN (SELECT task_id,case_id,MAX(case_time) AS happen_time,1 AS num FROM "+regress_case_result+" GROUP BY task_id,case_id) S ON (T.task_id=T.task_id AND S.case_id=T.case_id AND S.happen_time=T.case_time)) C ON (C.task_id=M.task_id AND C.case_id=M.case_id) WHERE A.online_time='" + parmas +sql_doc
report_info_sql = "SELECT M.group_desc,IFNULL(N.total,0) AS total_api,IFNULL(O.total,0) AS auto_api,IFNULL(P.tc_total,0) AS total_regress,IFNULL(Q.tc_total,0) AS total_auto,IF(IFNULL(N.total,0)=0,'0.000%',CONCAT(LEFT((IFNULL(O.total,0)/IFNULL(N.total,0))*100,5),'%')) AS rate_api,IF(IFNULL(P.tc_total,0)=0,'0.000%',CONCAT(LEFT((IFNULL(Q.tc_total,0)/IFNULL(P.tc_total,0))*100,5),'%')) AS rate_cover,IF(IFNULL(P.tc_total,0)=0,'0.000%',CONCAT(LEFT((IFNULL(R.tc_total,0)/IFNULL(P.tc_total,0))*100,5),'%')) AS rate_exec,IF(IFNULL(R.tc_total,0)=0,'0.000%',CONCAT(LEFT((IFNULL(S.tc_total,0)/IFNULL(R.tc_total,0))*100,5),'%')) AS rate_pass FROM p_group_info M LEFT JOIN (SELECT LEFT(B.code,3) AS group_code,COUNT(1) AS total FROM api_case_info A INNER JOIN p_group_info B ON B.code=A.group_id GROUP BY LEFT(B.code,3)) N ON N.group_code=M.code LEFT JOIN (SELECT LEFT(B.code,3) AS group_code,COUNT(1) AS total FROM api_case_info A INNER JOIN p_group_info B ON B.code=A.group_id INNER JOIN (SELECT info_id FROM case_suite_info WHERE case_id IN (SELECT case_id FROM "+regress_case_info+") GROUP BY info_id HAVING COUNT(1)>0) C ON C.info_id=A.api_id GROUP BY LEFT(B.code,3)) O ON O.group_code=M.code LEFT JOIN (SELECT LEFT(B.code,3) AS group_code,COUNT(1) AS tc_total FROM "+regress_case_info+" A INNER JOIN p_group_info B ON B.code=A.group_id WHERE A.case_exe_plugin IN ('200','201','202') GROUP BY LEFT(B.code,3)) P ON P.group_code=M.code LEFT JOIN (SELECT LEFT(B.code,3) AS group_code,COUNT(1) AS tc_total FROM "+regress_case_info+" A INNER JOIN p_group_info B ON B.code=A.group_id WHERE A.case_exe_plugin IN ('200','201','202') AND A.case_exe_type='2' GROUP BY LEFT(B.code,3)) Q ON Q.group_code=M.code LEFT JOIN (SELECT LEFT(C.code,3) AS group_code,COUNT(1) AS tc_total FROM (SELECT case_id FROM "+regress_case_result+" GROUP BY case_id) A INNER JOIN "+regress_case_info+" B ON B.case_id=A.case_id INNER JOIN p_group_info C ON C.code=B.group_id WHERE B.case_exe_plugin IN ('200','201','202') AND B.case_exe_type='2' GROUP BY LEFT(C.code,3)) R ON R.group_code=M.code LEFT JOIN (SELECT LEFT(C.code,3) AS group_code,COUNT(1) AS tc_total FROM (SELECT case_id FROM "+regress_case_result+" WHERE case_result='1' GROUP BY case_id) A INNER JOIN "+regress_case_info+" B ON B.case_id=A.case_id INNER JOIN p_group_info C ON C.code=B.group_id WHERE B.case_exe_plugin IN ('200','201','202') AND B.case_exe_type='2' GROUP BY LEFT(C.code,3)) S ON S.group_code=M.code WHERE LENGTH(M.code)='3' AND M.code LIKE '" + str(group_id) + "%' ORDER BY M.code"
report_lists_sql = "SELECT M.case_id,B.case_path,B.case_desc,(CASE B.case_exe_type WHEN '1' THEN '手工' WHEN '2' THEN '自动' END) case_exe_type,B.case_prev_data,IFNULL(C.num,0) AS case_exe_num,IFNULL(C.case_result,0) AS case_exe_result,IFNULL(C.case_real_result,'') AS case_exe_realresult FROM t_task_to_case M LEFT JOIN "+regress_task_info+" A ON A.task_id=M.task_id LEFT JOIN "+regress_case_info+" B ON B.case_id=M.case_id LEFT JOIN (SELECT 1 AS num,T.* FROM "+regress_case_result+" T INNER JOIN (SELECT task_id,case_id,MAX(case_time) AS happen_time,1 AS num FROM "+regress_case_result+" GROUP BY task_id,case_id) S ON (T.task_id=T.task_id AND S.case_id=T.case_id AND S.happen_time=T.case_time)) C ON (C.task_id=M.task_id AND C.case_id=M.case_id) WHERE A.online_time='" + parmas + "' AND B.group_id LIKE '" + str(group_id) + "%'"
report_lists_fail_sql = "SELECT M.case_id,B.case_path,B.case_desc,(CASE B.case_exe_type WHEN '1' THEN '手工' WHEN '2' THEN '自动' END) case_exe_type,B.case_prev_data,IFNULL(C.num,0) AS case_exe_num,IFNULL(C.case_result,0) AS case_exe_result,IFNULL(C.case_real_result,'') AS case_exe_realresult FROM t_task_to_case M LEFT JOIN "+regress_task_info+" A ON A.task_id=M.task_id LEFT JOIN "+regress_case_info+" B ON B.case_id=M.case_id LEFT JOIN (SELECT 1 AS num,T.* FROM "+regress_case_result+" T INNER JOIN (SELECT task_id,case_id,MAX(case_time) AS happen_time,1 AS num FROM "+regress_case_result+" GROUP BY task_id,case_id) S ON (T.task_id=T.task_id AND S.case_id=T.case_id AND S.happen_time=T.case_time)) C ON (C.task_id=M.task_id AND C.case_id=M.case_id) WHERE C.case_result!='1' AND A.online_time='" + parmas + "' AND B.group_id LIKE '" + str(group_id) + "%'"
elif TimeOrTask ==2:
group_id_sql = "SELECT * FROM ((SELECT * FROM core_task_info) UNION ALL (SELECT * FROM regress_task_info)) T WHERE T.task_id='" + parmas + "'"
task_data = getJsonFromDatabase(group_id_sql)
group_id = task_data[0]['group_id'][0:3]
sql = "SELECT C.case_id,A.online_time,MIN(C.case_time) report_start_time,MAX(C.case_time) report_end_time,NOW() report_date,COUNT(1) report_exe_counts,(CASE B.case_exe_env WHEN '1' THEN '测试环境' WHEN '2' THEN '灰度环境' ELSE '线上环境' END) report_env_type,SUM(CASE C.case_result WHEN '1' THEN 1 ELSE 0 END ) report_exe_pass,SUM(CASE C.case_result WHEN '2' THEN 1 ELSE 0 END) report_exe_fail,SUM(CASE C.case_result WHEN '3' THEN 1 ELSE 0 END) report_exe_exception,SUM(CASE WHEN C.case_result IS NULL THEN 1 ELSE 0 END) report_exe_not FROM t_task_to_case M LEFT JOIN "+regress_task_info+" A ON A.task_id=M.task_id LEFT JOIN "+regress_case_info+" B ON B.case_id=M.case_id LEFT JOIN (SELECT 1 AS num,T.* FROM "+regress_case_result+" T INNER JOIN (SELECT task_id,case_id,MAX(case_time) AS happen_time,1 AS num FROM "+regress_case_result+" GROUP BY task_id,case_id) S ON (T.task_id=T.task_id AND S.case_id=T.case_id AND S.happen_time=T.case_time)) C ON (C.task_id=M.task_id AND C.case_id=M.case_id) WHERE A.task_id='" + parmas + "'"
report_info_sql="SELECT M.group_desc,IFNULL(N.total,0) AS total_api,IFNULL(O.total,0) AS auto_api,IFNULL(P.tc_total,0) AS total_regress,IFNULL(Q.tc_total,0) AS total_auto,IF(IFNULL(N.total,0)=0,'0.000%',CONCAT(LEFT((IFNULL(O.total,0)/IFNULL(N.total,0))*100,5),'%')) AS rate_api,IF(IFNULL(P.tc_total,0)=0,'0.000%',CONCAT(LEFT((IFNULL(Q.tc_total,0)/IFNULL(P.tc_total,0))*100,5),'%')) AS rate_cover,IF(IFNULL(P.tc_total,0)=0,'0.000%',CONCAT(LEFT((IFNULL(R.tc_total,0)/IFNULL(P.tc_total,0))*100,5),'%')) AS rate_exec,IF(IFNULL(R.tc_total,0)=0,'0.000%',CONCAT(LEFT((IFNULL(S.tc_total,0)/IFNULL(R.tc_total,0))*100,5),'%')) AS rate_pass FROM p_group_info M LEFT JOIN (SELECT LEFT(B.code,3) AS group_code,COUNT(1) AS total FROM api_case_info A INNER JOIN p_group_info B ON B.code=A.group_id GROUP BY LEFT(B.code,3)) N ON N.group_code=M.code LEFT JOIN (SELECT LEFT(B.code,3) AS group_code,COUNT(1) AS total FROM api_case_info A INNER JOIN p_group_info B ON B.code=A.group_id INNER JOIN (SELECT info_id FROM case_suite_info WHERE case_id IN (SELECT case_id FROM "+regress_case_info+") GROUP BY info_id HAVING COUNT(1)>0) C ON C.info_id=A.api_id GROUP BY LEFT(B.code,3)) O ON O.group_code=M.code LEFT JOIN (SELECT LEFT(B.code,3) AS group_code,COUNT(1) AS tc_total FROM "+regress_case_info+" A INNER JOIN p_group_info B ON B.code=A.group_id WHERE A.case_exe_plugin IN ('200','201','202') GROUP BY LEFT(B.code,3)) P ON P.group_code=M.code LEFT JOIN (SELECT LEFT(B.code,3) AS group_code,COUNT(1) AS tc_total FROM "+regress_case_info+" A INNER JOIN p_group_info B ON B.code=A.group_id WHERE A.case_exe_plugin IN ('200','201','202') AND A.case_exe_type='2' GROUP BY LEFT(B.code,3)) Q ON Q.group_code=M.code LEFT JOIN (SELECT LEFT(C.code,3) AS group_code,COUNT(1) AS tc_total FROM (SELECT case_id FROM "+regress_case_result+" GROUP BY case_id) A INNER JOIN "+regress_case_info+" B ON B.case_id=A.case_id INNER JOIN p_group_info C ON C.code=B.group_id WHERE B.case_exe_plugin IN ('200','201','202') AND B.case_exe_type='2' GROUP BY LEFT(C.code,3)) R ON R.group_code=M.code LEFT JOIN (SELECT LEFT(C.code,3) AS group_code,COUNT(1) AS tc_total FROM (SELECT case_id FROM "+regress_case_result+" WHERE case_result='1' GROUP BY case_id) A INNER JOIN "+regress_case_info+" B ON B.case_id=A.case_id INNER JOIN p_group_info C ON C.code=B.group_id WHERE B.case_exe_plugin IN ('200','201','202') AND B.case_exe_type='2' GROUP BY LEFT(C.code,3)) S ON S.group_code=M.code WHERE LENGTH(M.code)='3' AND M.code LIKE '" + str(group_id) + "%' ORDER BY M.code"
report_lists_sql="SELECT M.case_id,B.case_path,B.case_desc,(CASE B.case_exe_type WHEN '1' THEN '手工' WHEN '2' THEN '自动' END) case_exe_type,B.case_prev_data,IFNULL(C.num,0) AS case_exe_num,IFNULL(C.case_result,0) AS case_exe_result,IFNULL(C.case_real_result,'') AS case_exe_realresult FROM t_task_to_case M LEFT JOIN "+regress_task_info+" A ON A.task_id=M.task_id LEFT JOIN "+regress_case_info+" B ON B.case_id=M.case_id LEFT JOIN (SELECT 1 AS num,T.* FROM "+regress_case_result+" T INNER JOIN (SELECT task_id,case_id,MAX(case_time) AS happen_time,1 AS num FROM "+regress_case_result+" GROUP BY task_id,case_id) S ON (T.task_id=T.task_id AND S.case_id=T.case_id AND S.happen_time=T.case_time)) C ON (C.task_id=M.task_id AND C.case_id=M.case_id) WHERE A.task_id='" + parmas + "' "
report_lists_fail_sql="SELECT M.case_id,B.case_path,B.case_desc,(CASE B.case_exe_type WHEN '1' THEN '手工' WHEN '2' THEN '自动' END) case_exe_type,B.case_prev_data,IFNULL(C.num,0) AS case_exe_num,IFNULL(C.case_result,0) AS case_exe_result,IFNULL(C.case_real_result,'') AS case_exe_realresult FROM t_task_to_case M LEFT JOIN "+regress_task_info+" A ON A.task_id=M.task_id LEFT JOIN "+regress_case_info+" B ON B.case_id=M.case_id LEFT JOIN (SELECT 1 AS num,T.* FROM "+regress_case_result+" T INNER JOIN (SELECT task_id,case_id,MAX(case_time) AS happen_time,1 AS num FROM "+regress_case_result+" GROUP BY task_id,case_id) S ON (T.task_id=T.task_id AND S.case_id=T.case_id AND S.happen_time=T.case_time)) C ON (C.task_id=M.task_id AND C.case_id=M.case_id) WHERE C.case_result!='1' AND A.task_id='" + parmas + "' "
# 定时
elif TimeOrTask == 3:
group_id=group_id[0:3]
sql="SELECT C.case_id,A.online_time,MIN(C.case_time) report_start_time,MAX(C.case_time) report_end_time,NOW() report_date,COUNT(1) report_exe_counts,(CASE B.case_exe_env WHEN '1' THEN '测试环境' WHEN '2' THEN '灰度环境' ELSE '线上环境' END) report_env_type,SUM(CASE C.case_result WHEN '1' THEN 1 ELSE 0 END ) report_exe_pass,SUM(CASE C.case_result WHEN '2' THEN 1 ELSE 0 END) report_exe_fail,SUM(CASE C.case_result WHEN '3' THEN 1 ELSE 0 END) report_exe_exception,SUM(CASE WHEN C.case_result IS NULL THEN 1 ELSE 0 END) report_exe_not FROM t_task_to_case M LEFT JOIN "+regress_task_info+" A ON A.task_id=M.task_id LEFT JOIN "+regress_case_info+" B ON B.case_id=M.case_id LEFT JOIN (SELECT 1 AS num,T.* FROM "+regress_case_result+" T) C ON (C.task_id=M.task_id AND C.case_id=M.case_id) WHERE C.batch_id='"+parmas+"'"
report_info_sql="SELECT M.group_desc,IFNULL(N.total,0) AS total_api,IFNULL(P.tc_total,0) AS total_regress,IFNULL(O.total,0) AS auto_api,IFNULL(Q.tc_total,0) AS total_auto,IF(IFNULL(N.total,0)=0,'0.000%',CONCAT(LEFT((IFNULL(O.total,0)/IFNULL(N.total,0))*100,5),'%')) AS rate_api,IF(IFNULL(P.tc_total,0)=0,'0.000%',CONCAT(LEFT((IFNULL(Q.tc_total,0)/IFNULL(P.tc_total,0))*100,5),'%')) AS rate_cover,IF(IFNULL(P.tc_total,0)=0,'0.000%',CONCAT(LEFT((IFNULL(R.tc_total,0)/IFNULL(P.tc_total,0))*100,5),'%')) AS rate_exec,IF(IFNULL(R.tc_total,0)=0,'0.000%',CONCAT(LEFT((IFNULL(S.tc_total,0)/IFNULL(R.tc_total,0))*100,5),'%')) AS rate_pass FROM p_group_info M LEFT JOIN (SELECT LEFT(B.code,3) AS group_code,COUNT(1) AS total FROM api_case_info A INNER JOIN p_group_info B ON B.code=A.group_id GROUP BY LEFT(B.code,3)) N ON N.group_code=M.code LEFT JOIN (SELECT LEFT(B.code,3) AS group_code,COUNT(1) AS total FROM api_case_info A INNER JOIN p_group_info B ON B.code=A.group_id INNER JOIN (SELECT info_id FROM case_suite_info WHERE case_id IN (SELECT case_id FROM "+regress_case_info+") GROUP BY info_id HAVING COUNT(1)>0) C ON C.info_id=A.api_id GROUP BY LEFT(B.code,3)) O ON O.group_code=M.code LEFT JOIN (SELECT LEFT(B.code,3) AS group_code,COUNT(1) AS tc_total FROM "+regress_case_info+" A INNER JOIN p_group_info B ON B.code=A.group_id WHERE A.case_exe_plugin IN ('200','201','202') GROUP BY LEFT(B.code,3)) P ON P.group_code=M.code LEFT JOIN (SELECT LEFT(B.code,3) AS group_code,COUNT(1) AS tc_total FROM "+regress_case_info+" A INNER JOIN p_group_info B ON B.code=A.group_id WHERE A.case_exe_plugin IN ('200','201','202') AND A.case_exe_type='2' GROUP BY LEFT(B.code,3)) Q ON Q.group_code=M.code LEFT JOIN (SELECT LEFT(C.code,3) AS group_code,COUNT(1) AS tc_total FROM (SELECT case_id FROM "+regress_case_result+" GROUP BY case_id) A INNER JOIN "+regress_case_info+" B ON B.case_id=A.case_id INNER JOIN p_group_info C ON C.code=B.group_id WHERE B.case_exe_plugin IN ('200','201','202') AND B.case_exe_type='2' GROUP BY LEFT(C.code,3)) R ON R.group_code=M.code LEFT JOIN (SELECT LEFT(C.code,3) AS group_code,COUNT(1) AS tc_total FROM (SELECT case_id FROM "+regress_case_result+" WHERE case_result='1' GROUP BY case_id) A INNER JOIN "+regress_case_info+" B ON B.case_id=A.case_id INNER JOIN p_group_info C ON C.code=B.group_id WHERE B.case_exe_plugin IN ('200','201','202') AND B.case_exe_type='2' GROUP BY LEFT(C.code,3)) S ON S.group_code=M.code WHERE LENGTH(M.code)='3' AND M.code LIKE '" + str(group_id) + "%' ORDER BY M.code"
report_lists_fail_sql="SELECT M.case_id,B.case_path,B.case_desc,(CASE B.case_exe_type WHEN '1' THEN '手工' WHEN '2' THEN '自动' END) case_exe_type,B.case_prev_data,IFNULL(C.num,0) AS case_exe_num,IFNULL(C.case_result,0) AS case_exe_result,IFNULL(C.case_real_result,'') AS case_exe_realresult FROM t_task_to_case M LEFT JOIN "+regress_task_info+" A ON A.task_id=M.task_id LEFT JOIN "+regress_case_info+" B ON B.case_id=M.case_id LEFT JOIN (SELECT 1 AS num,T.* FROM "+regress_case_result+" T) C ON (C.task_id=M.task_id AND C.case_id=M.case_id) WHERE C.case_result!='1' AND C.batch_id='"+parmas+"'"
report_lists_sql ="SELECT M.case_id,B.case_path,B.case_desc,(CASE B.case_exe_type WHEN '1' THEN '手工' WHEN '2' THEN '自动' END) case_exe_type,B.case_prev_data,IFNULL(C.num,0) AS case_exe_num,IFNULL(C.case_result,0) AS case_exe_result,IFNULL(C.case_real_result,'') AS case_exe_realresult FROM t_task_to_case M LEFT JOIN "+regress_task_info+" A ON A.task_id=M.task_id LEFT JOIN "+regress_case_info+" B ON B.case_id=M.case_id LEFT JOIN (SELECT 1 AS num,T.* FROM "+regress_case_result+" T) C ON (C.task_id=M.task_id AND C.case_id=M.case_id) WHERE C.batch_id='"+parmas+"'"
case_lists = getJsonFromDatabase(sql)
report_info_detail = getJsonFromDatabase(report_info_sql)
report_lists = getJsonFromDatabase(report_lists_sql)
report_lists_fail = getJsonFromDatabase(report_lists_fail_sql)
report_info = case_lists[0]
report_info['report_exe_counts'] = str(report_info['report_exe_counts'])
report_info['report_exe_pass'] = str(report_info['report_exe_pass'])
report_info['report_exe_fail'] = str(report_info['report_exe_fail'])
# report_info['report_exe_exception'] = str(report_info['report_exe_exception'])
report_info['report_exe_not'] = str(report_info['report_exe_not'])
report_info['report_er'] = userName
if report_info['report_start_time'] is not None:
report_test_result = ''
if int(report_info['report_exe_not']) > 0:
report_test_result = '未完成'
# elif int(report_info['report_exe_fail']) > 0 or int(report_info['report_exe_exception']) > 0:
# report_test_result = 'Fail'
elif int(report_info['report_exe_pass']) == int(report_info['report_exe_counts']):
report_test_result = 'Pass'
report_info['report_test_result'] = report_test_result
if report_info['case_id'] != None:
report_info['report_info'] = report_info_detail[0]
report_info['records'] = report_lists
report_info['records_fail'] = report_lists_fail
exeLog("*******REPORT 获取报告数据成功*******")
return (json.dumps(report_info, cls=MyEncoder, ensure_ascii=False))
else:
return False
else:
return False
except Exception as e:
exeLog("*******REPORT 获取报告失败,错误代码为:" + str(e))
return False
# 回归测试报告--按照任务
| 142.218978
| 2,544
| 0.707658
| 3,797
| 19,484
| 3.377667
| 0.052673
| 0.047251
| 0.020585
| 0.016842
| 0.860741
| 0.828304
| 0.809201
| 0.806706
| 0.801326
| 0.801326
| 0
| 0.028053
| 0.160234
| 19,484
| 137
| 2,545
| 142.218978
| 0.755776
| 0.048553
| 0
| 0.116883
| 0
| 0.480519
| 0.751078
| 0.199804
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025974
| false
| 0.116883
| 0.038961
| 0
| 0.12987
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 11
|
7e2d8d14ae0ed60be6046776a3ba7e84091d36fb
| 160
|
py
|
Python
|
frappe_telegram/utils/bench.py
|
rafatali686/frappe_telegram
|
724ead04a531eddfe935acf35282684fef41cb67
|
[
"MIT"
] | 16
|
2021-07-25T09:30:28.000Z
|
2022-03-24T04:56:57.000Z
|
frappe_telegram/utils/bench.py
|
rafatali686/frappe_telegram
|
724ead04a531eddfe935acf35282684fef41cb67
|
[
"MIT"
] | 5
|
2021-08-24T18:07:13.000Z
|
2022-02-03T04:26:08.000Z
|
frappe_telegram/utils/bench.py
|
rafatali686/frappe_telegram
|
724ead04a531eddfe935acf35282684fef41cb67
|
[
"MIT"
] | 10
|
2021-07-27T07:26:11.000Z
|
2022-03-24T11:16:38.000Z
|
import os
from frappe.utils import get_bench_path, get_site_path # noqa
def get_bench_name():
return os.path.basename(os.path.abspath(get_bench_path()))
| 22.857143
| 62
| 0.775
| 27
| 160
| 4.296296
| 0.555556
| 0.206897
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 160
| 6
| 63
| 26.666667
| 0.828571
| 0.025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 9
|
7e3d8bb50a7ba3ab857a94a8d537eb7d7a7559a5
| 911
|
py
|
Python
|
catalog/bindings/gmd/formula_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/formula_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/formula_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class FormulaType:
a: Optional[float] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
b: Optional[float] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"required": True,
},
)
c: Optional[float] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"required": True,
},
)
d: Optional[float] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
| 23.358974
| 54
| 0.497256
| 82
| 911
| 5.47561
| 0.329268
| 0.144766
| 0.178174
| 0.256125
| 0.804009
| 0.804009
| 0.739421
| 0.739421
| 0.739421
| 0.739421
| 0
| 0
| 0.349067
| 911
| 38
| 55
| 23.973684
| 0.757167
| 0
| 0
| 0.514286
| 0
| 0
| 0.248079
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.057143
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7e513f09fd9a9677a31a0df67992e43713456bc9
| 132
|
py
|
Python
|
class9/ex4/mytest/__init__.py
|
patrebert/pynet_cert
|
b82cce3ddb20d9e4abc89d74579ddeb3513bdf55
|
[
"Apache-2.0"
] | null | null | null |
class9/ex4/mytest/__init__.py
|
patrebert/pynet_cert
|
b82cce3ddb20d9e4abc89d74579ddeb3513bdf55
|
[
"Apache-2.0"
] | null | null | null |
class9/ex4/mytest/__init__.py
|
patrebert/pynet_cert
|
b82cce3ddb20d9e4abc89d74579ddeb3513bdf55
|
[
"Apache-2.0"
] | null | null | null |
from mytest.simple import func1
from mytest.whatever import func2
from mytest.world import func3
from mytest.world import testclass
| 26.4
| 34
| 0.848485
| 20
| 132
| 5.6
| 0.5
| 0.357143
| 0.267857
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025862
| 0.121212
| 132
| 4
| 35
| 33
| 0.939655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
7e5e8daf10948a1d4e97654acba69b5147e985ae
| 6,229
|
py
|
Python
|
synthesizer/tests/test_tacotron_model.py
|
puppyapple/Real-Time-Voice-Cloning
|
d1e1481e18ee7d604372025f3b751663d8dda37b
|
[
"MIT"
] | null | null | null |
synthesizer/tests/test_tacotron_model.py
|
puppyapple/Real-Time-Voice-Cloning
|
d1e1481e18ee7d604372025f3b751663d8dda37b
|
[
"MIT"
] | null | null | null |
synthesizer/tests/test_tacotron_model.py
|
puppyapple/Real-Time-Voice-Cloning
|
d1e1481e18ee7d604372025f3b751663d8dda37b
|
[
"MIT"
] | null | null | null |
import os
import copy
import torch
import unittest
from torch import optim
from torch import nn
from TTS.utils.generic_utils import load_config
from TTS.layers.losses import L1LossMasked
from TTS.models.tacotron import Tacotron
#pylint: disable=unused-variable
torch.manual_seed(1)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
file_path = os.path.dirname(os.path.realpath(__file__))
c = load_config(os.path.join(file_path, 'test_config.json'))
def count_parameters(model):
r"""Count number of trainable parameters in a network"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class TacotronTrainTest(unittest.TestCase):
@staticmethod
def test_train_step():
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 129, (8, )).long().to(device)
input_lengths[-1] = 128
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
linear_spec = torch.rand(8, 30, c.audio['num_freq']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_ids = torch.randint(0, 5, (8, )).long().to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) >
0.0).unsqueeze(2).float().squeeze()
criterion = L1LossMasked().to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron(
num_chars=32,
num_speakers=5,
postnet_output_dim=c.audio['num_freq'],
decoder_output_dim=c.audio['num_mels'],
r=c.r,
memory_size=c.memory_size
).to(device) #FIXME: missing num_speakers parameter to Tacotron ctor
model.train()
print(" > Num parameters for Tacotron model:%s" %
(count_parameters(model)))
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for _ in range(5):
mel_out, linear_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, speaker_ids)
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(linear_out, linear_spec,
mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
class TacotronGSTTrainTest(unittest.TestCase):
@staticmethod
def test_train_step():
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 129, (8, )).long().to(device)
input_lengths[-1] = 128
mel_spec = torch.rand(8, 120, c.audio['num_mels']).to(device)
linear_spec = torch.rand(8, 120, c.audio['num_freq']).to(device)
mel_lengths = torch.randint(20, 120, (8, )).long().to(device)
stop_targets = torch.zeros(8, 120, 1).float().to(device)
speaker_ids = torch.randint(0, 5, (8, )).long().to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) >
0.0).unsqueeze(2).float().squeeze()
criterion = L1LossMasked().to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron(
num_chars=32,
num_speakers=5,
gst=True,
postnet_output_dim=c.audio['num_freq'],
decoder_output_dim=c.audio['num_mels'],
r=c.r,
memory_size=c.memory_size
).to(device) #FIXME: missing num_speakers parameter to Tacotron ctor
model.train()
print(model)
print(" > Num parameters for Tacotron GST model:%s" %
(count_parameters(model)))
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for _ in range(10):
mel_out, linear_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, speaker_ids)
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(linear_out, linear_spec,
mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
| 41.526667
| 77
| 0.577781
| 771
| 6,229
| 4.488975
| 0.206226
| 0.046229
| 0.037561
| 0.022537
| 0.825484
| 0.81017
| 0.81017
| 0.81017
| 0.807281
| 0.787056
| 0
| 0.028703
| 0.300851
| 6,229
| 149
| 78
| 41.805369
| 0.766016
| 0.058757
| 0
| 0.740157
| 0
| 0
| 0.044615
| 0
| 0
| 0
| 0
| 0.006711
| 0.031496
| 1
| 0.023622
| false
| 0
| 0.070866
| 0
| 0.11811
| 0.023622
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7ebe62e244f748435f7c53619821a5be51928d6c
| 21,759
|
py
|
Python
|
mayan/apps/user_management/tests/test_group_views.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 343
|
2015-01-05T14:19:35.000Z
|
2018-12-10T19:07:48.000Z
|
mayan/apps/user_management/tests/test_group_views.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 191
|
2015-01-03T00:48:19.000Z
|
2018-11-30T09:10:25.000Z
|
mayan/apps/user_management/tests/test_group_views.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 114
|
2015-01-08T20:21:05.000Z
|
2018-12-10T19:07:53.000Z
|
from django.contrib.auth.models import Group
from mayan.apps.documents.tests.base import GenericDocumentViewTestCase
from mayan.apps.metadata.permissions import permission_document_metadata_edit
from mayan.apps.metadata.tests.mixins import MetadataTypeTestMixin
from mayan.apps.testing.tests.base import GenericViewTestCase
from ..events import event_group_created, event_group_edited
from ..permissions import (
permission_group_create, permission_group_delete, permission_group_edit,
permission_group_view, permission_user_edit
)
from .mixins import (
GroupTestMixin, GroupUserViewTestMixin, GroupViewTestMixin
)
class GroupViewsTestCase(
GroupTestMixin, GroupViewTestMixin, GenericViewTestCase
):
def test_group_create_view_no_permission(self):
group_count = Group.objects.count()
self._clear_events()
response = self._request_test_group_create_view()
self.assertEqual(response.status_code, 403)
self.assertEqual(Group.objects.count(), group_count)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_create_view_with_permission(self):
self.grant_permission(permission=permission_group_create)
group_count = Group.objects.count()
self._clear_events()
response = self._request_test_group_create_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(Group.objects.count(), group_count + 1)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_group)
self.assertEqual(events[0].verb, event_group_created.id)
def test_group_delete_single_view_no_permission(self):
self._create_test_group()
group_count = Group.objects.count()
self._clear_events()
response = self._request_test_group_delete_single_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(Group.objects.count(), group_count)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_delete_single_view_with_access(self):
self._create_test_group()
self.grant_access(
obj=self.test_group, permission=permission_group_delete
)
group_count = Group.objects.count()
self._clear_events()
response = self._request_test_group_delete_single_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(Group.objects.count(), group_count - 1)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_delete_multiple_view_no_permission(self):
self._create_test_group()
group_count = Group.objects.count()
self._clear_events()
response = self._request_test_group_delete_multiple_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(Group.objects.count(), group_count)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_delete_multiple_view_with_access(self):
self._create_test_group()
group_count = Group.objects.count()
self.grant_access(
obj=self.test_group, permission=permission_group_delete
)
self._clear_events()
response = self._request_test_group_delete_multiple_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(Group.objects.count(), group_count - 1)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_edit_view_no_permission(self):
self._create_test_group()
group_name = self.test_group.name
self._clear_events()
response = self._request_test_group_edit_view()
self.assertEqual(response.status_code, 404)
self.test_group.refresh_from_db()
self.assertEqual(self.test_group.name, group_name)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_edit_view_with_access(self):
self._create_test_group()
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
group_name = self.test_group.name
self._clear_events()
response = self._request_test_group_edit_view()
self.assertEqual(response.status_code, 302)
self.test_group.refresh_from_db()
self.assertNotEqual(self.test_group.name, group_name)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_group)
self.assertEqual(events[0].verb, event_group_edited.id)
def test_group_list_view_no_permission(self):
self._create_test_group()
self._clear_events()
response = self._request_test_group_list_view()
self.assertNotContains(
response=response, text=self.test_group.name, status_code=200
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_list_view_with_permission(self):
self._create_test_group()
self.grant_access(
obj=self.test_group, permission=permission_group_view
)
self._clear_events()
response = self._request_test_group_list_view()
self.assertContains(
response=response, text=self.test_group.name, status_code=200
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
class GroupAddRemoveUserViewTestCase(
GroupTestMixin, GroupUserViewTestMixin, GenericViewTestCase
):
def setUp(self):
super().setUp()
self._create_test_group()
self._create_test_user()
def test_group_user_add_remove_get_view_no_permission(self):
self.test_user.groups.add(self.test_group)
self._clear_events()
response = self._request_test_group_user_add_remove_get_view()
self.assertNotContains(
response=response, text=str(self.test_user),
status_code=404
)
self.assertNotContains(
response=response, text=str(self.test_group),
status_code=404
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_add_remove_get_view_with_user_access(self):
self.test_user.groups.add(self.test_group)
self.grant_access(
obj=self.test_user, permission=permission_user_edit
)
self._clear_events()
response = self._request_test_group_user_add_remove_get_view()
self.assertNotContains(
response=response, text=str(self.test_user),
status_code=404
)
self.assertNotContains(
response=response, text=str(self.test_group),
status_code=404
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_add_remove_get_view_with_group_access(self):
self.test_user.groups.add(self.test_group)
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
self._clear_events()
response = self._request_test_group_user_add_remove_get_view()
self.assertNotContains(
response=response, text=str(self.test_user),
status_code=200
)
self.assertContains(
response=response, text=str(self.test_group),
status_code=200
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_add_remove_get_view_with_full_access(self):
self.test_user.groups.add(self.test_group)
self.grant_access(
obj=self.test_user, permission=permission_user_edit
)
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
self._clear_events()
response = self._request_test_group_user_add_remove_get_view()
self.assertContains(
response=response, text=str(self.test_user),
status_code=200
)
self.assertContains(
response=response, text=str(self.test_group),
status_code=200
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_add_view_no_permission(self):
self._clear_events()
response = self._request_test_group_user_add_view()
self.assertEqual(response.status_code, 404)
self.assertTrue(
self.test_user not in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_add_view_with_user_access(self):
self.grant_access(
obj=self.test_user, permission=permission_user_edit
)
self._clear_events()
response = self._request_test_group_user_add_view()
self.assertEqual(response.status_code, 404)
self.assertTrue(
self.test_user not in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_add_view_with_group_access(self):
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
self._clear_events()
response = self._request_test_group_user_add_view()
self.assertEqual(response.status_code, 200)
self.assertTrue(
self.test_user not in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_add_view_with_full_access(self):
self.grant_access(
obj=self.test_user, permission=permission_user_edit
)
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
self._clear_events()
response = self._request_test_group_user_add_view()
self.assertEqual(response.status_code, 302)
self.assertTrue(
self.test_user in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, self.test_user)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_group)
self.assertEqual(events[0].verb, event_group_edited.id)
def test_group_user_remove_view_no_permission(self):
self.test_user.groups.add(self.test_group)
self._clear_events()
response = self._request_test_group_user_remove_view()
self.assertEqual(response.status_code, 404)
self.assertTrue(
self.test_user in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_remove_view_with_user_access(self):
self.test_user.groups.add(self.test_group)
self.grant_access(
obj=self.test_user, permission=permission_user_edit
)
self._clear_events()
response = self._request_test_group_user_remove_view()
self.assertEqual(response.status_code, 404)
self.assertTrue(
self.test_user in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_remove_view_with_group_access(self):
self.test_user.groups.add(self.test_group)
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
self._clear_events()
response = self._request_test_group_user_remove_view()
self.assertEqual(response.status_code, 200)
self.assertTrue(
self.test_user in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_remove_view_with_full_access(self):
self.test_user.groups.add(self.test_group)
self.grant_access(
obj=self.test_user, permission=permission_user_edit
)
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
self._clear_events()
response = self._request_test_group_user_remove_view()
self.assertEqual(response.status_code, 302)
self.assertTrue(
self.test_user not in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, self.test_user)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_group)
self.assertEqual(events[0].verb, event_group_edited.id)
class SuperUserGroupAddRemoveViewTestCase(
GroupTestMixin, GroupUserViewTestMixin, GenericViewTestCase
):
def setUp(self):
super().setUp()
self._create_test_group()
self._create_test_superuser()
self.test_user = self.test_superuser
def test_group_user_add_remove_get_view_no_permission(self):
self.test_user.groups.add(self.test_group)
self._clear_events()
response = self._request_test_group_user_add_remove_get_view()
self.assertNotContains(
response=response, text=str(self.test_user),
status_code=404
)
self.assertNotContains(
response=response, text=str(self.test_group),
status_code=404
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_add_remove_get_view_with_user_access(self):
self.test_user.groups.add(self.test_group)
self.grant_access(
obj=self.test_user, permission=permission_user_edit
)
self._clear_events()
response = self._request_test_group_user_add_remove_get_view()
self.assertNotContains(
response=response, text=str(self.test_user),
status_code=404
)
self.assertNotContains(
response=response, text=str(self.test_group),
status_code=404
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_add_remove_get_view_with_group_access(self):
self.test_user.groups.add(self.test_group)
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
self._clear_events()
response = self._request_test_group_user_add_remove_get_view()
self.assertNotContains(
response=response, text=str(self.test_user),
status_code=200
)
self.assertContains(
response=response, text=str(self.test_group),
status_code=200
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_add_remove_get_view_with_full_access(self):
self.test_user.groups.add(self.test_group)
self.grant_access(
obj=self.test_user, permission=permission_user_edit
)
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
self._clear_events()
response = self._request_test_group_user_add_remove_get_view()
self.assertNotContains(
response=response, text=str(self.test_user),
status_code=200
)
self.assertContains(
response=response, text=str(self.test_group),
status_code=200
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_add_view_no_permission(self):
self._clear_events()
response = self._request_test_group_user_add_view()
self.assertEqual(response.status_code, 404)
self.assertTrue(
self.test_user not in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_add_view_with_user_access(self):
self.grant_access(
obj=self.test_user, permission=permission_user_edit
)
self._clear_events()
response = self._request_test_group_user_add_view()
self.assertEqual(response.status_code, 404)
self.assertTrue(
self.test_user not in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_add_view_with_group_access(self):
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
self._clear_events()
response = self._request_test_group_user_add_view()
self.assertEqual(response.status_code, 200)
self.assertTrue(
self.test_user not in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_add_view_with_full_access(self):
self.grant_access(
obj=self.test_user, permission=permission_user_edit
)
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
self._clear_events()
response = self._request_test_group_user_add_view()
self.assertEqual(response.status_code, 200)
self.assertTrue(
self.test_user not in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_remove_view_no_permission(self):
self.test_user.groups.add(self.test_group)
self._clear_events()
response = self._request_test_group_user_remove_view()
self.assertEqual(response.status_code, 404)
self.assertTrue(
self.test_user in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_remove_view_with_user_access(self):
self.test_user.groups.add(self.test_group)
self.grant_access(
obj=self.test_user, permission=permission_user_edit
)
self._clear_events()
response = self._request_test_group_user_remove_view()
self.assertEqual(response.status_code, 404)
self.assertTrue(
self.test_user in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_remove_view_with_group_access(self):
self.test_user.groups.add(self.test_group)
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
self._clear_events()
response = self._request_test_group_user_remove_view()
self.assertEqual(response.status_code, 200)
self.assertTrue(
self.test_user in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_group_user_remove_view_with_full_access(self):
self.test_user.groups.add(self.test_group)
self.grant_access(
obj=self.test_user, permission=permission_user_edit
)
self.grant_access(
obj=self.test_group, permission=permission_group_edit
)
self._clear_events()
response = self._request_test_group_user_remove_view()
self.assertEqual(response.status_code, 200)
self.assertTrue(
self.test_user in self.test_group.user_set.all()
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
class MetadataLookupIntegrationTestCase(
MetadataTypeTestMixin, GenericDocumentViewTestCase
):
def setUp(self):
super().setUp()
self._create_test_metadata_type()
self.test_document_type.metadata.create(
metadata_type=self.test_metadata_type
)
def test_group_list_lookup_render(self):
self.test_metadata_type.lookup = '{{ groups }}'
self.test_metadata_type.save()
self.test_document.metadata.create(
metadata_type=self.test_metadata_type
)
self.grant_access(
obj=self.test_document,
permission=permission_document_metadata_edit
)
self.grant_access(
obj=self.test_metadata_type,
permission=permission_document_metadata_edit
)
self._clear_events()
response = self.get(
viewname='metadata:metadata_edit', kwargs={
'document_id': self.test_document.pk
}
)
self.assertContains(
response=response, text='<option value="{}">{}</option>'.format(
self._test_case_group.name, self._test_case_group.name
), status_code=200
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
| 30.178918
| 77
| 0.66492
| 2,594
| 21,759
| 5.180802
| 0.039322
| 0.098445
| 0.065779
| 0.0599
| 0.915544
| 0.905052
| 0.890915
| 0.884069
| 0.869484
| 0.86688
| 0
| 0.011162
| 0.246519
| 21,759
| 720
| 78
| 30.220833
| 0.808539
| 0
| 0
| 0.766284
| 0
| 0
| 0.003447
| 0.002022
| 0
| 0
| 0
| 0
| 0.226054
| 1
| 0.072797
| false
| 0
| 0.015326
| 0
| 0.095785
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7edb9cca4a7e8e7d010ed1b49b2eea660884d36c
| 2,279
|
py
|
Python
|
cdisutilstest/data/editAccount.py
|
uc-cdis/cdisutils-test
|
623fea1cc339c8623f99590c8603ed1368b8890d
|
[
"Apache-2.0"
] | null | null | null |
cdisutilstest/data/editAccount.py
|
uc-cdis/cdisutils-test
|
623fea1cc339c8623f99590c8603ed1368b8890d
|
[
"Apache-2.0"
] | 12
|
2017-05-22T18:31:50.000Z
|
2020-10-26T15:57:30.000Z
|
cdisutilstest/data/editAccount.py
|
uc-cdis/cdisutils-test
|
623fea1cc339c8623f99590c8603ed1368b8890d
|
[
"Apache-2.0"
] | 1
|
2019-02-18T19:38:51.000Z
|
2019-02-18T19:38:51.000Z
|
values = {
frozenset(("id=72", "vaultUserPermissions%5B2%5D=readOnly")): {
"status_code": "200",
"text": {
"responseData": {},
"responseHeader": {
"now": 1492630700324,
"requestId": "WPe8rAoQgF4AADVcyb0AAAAv",
"status": "ok",
},
"responseStatus": "ok",
},
},
frozenset(("vaultUserPermissions%5B274%5D=disabled", "id=95")): {
"status_code": "200",
"text": {
"responseData": {},
"responseHeader": {
"now": 1492630700324,
"requestId": "WPe8rAoQgF4AADVcyb0AAAAv",
"status": "ok",
},
"responseStatus": "ok",
},
},
frozenset(("vaultUserPermissions%5B274%5D=disabled", "id=12")): {
"status_code": "200",
"text": {
"responseData": {},
"responseHeader": {
"now": 1492630700324,
"requestId": "WPe8rAoQgF4AADVcyb0AAAAv",
"status": "ok",
},
"responseStatus": "ok",
},
},
frozenset(("vaultUserPermissions%5B274%5D=readOnly", "id=72")): {
"status_code": "200",
"text": {
"responseData": {},
"responseHeader": {
"now": 1492630700324,
"requestId": "WPe8rAoQgF4AADVcyb0AAAAv",
"status": "ok",
},
"responseStatus": "ok",
},
},
frozenset(("vaultUserPermissions%5B274%5D=readOnly", "id=14")): {
"status_code": "500",
"text": {
"responseData": {},
"responseHeader": {
"now": 1492630700324,
"requestId": "WPe8rAoQgF4AADVcyb0AAAAv",
"status": "ok",
},
"responseStatus": "ok",
},
},
frozenset(("vaultUserPermissions%5B274%5D=disabled", "id=72")): {
"status_code": "200",
"text": {
"responseData": {},
"responseHeader": {
"now": 1492630700324,
"requestId": "WPe8rAoQgF4AADVcyb0AAAAv",
"status": "ok",
},
"responseStatus": "ok",
},
},
}
| 30.386667
| 69
| 0.436156
| 127
| 2,279
| 7.779528
| 0.204724
| 0.060729
| 0.182186
| 0.200405
| 0.928138
| 0.928138
| 0.928138
| 0.928138
| 0.928138
| 0.928138
| 0
| 0.112
| 0.396665
| 2,279
| 74
| 70
| 30.797297
| 0.606545
| 0
| 0
| 0.635135
| 0
| 0
| 0.386134
| 0.162352
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ada54bf7cfffee95e466585d20978488896e3bb5
| 147
|
bzl
|
Python
|
defs.bzl
|
codebyravi/angular-samples
|
356e691c12f61ec481df41b4bcf331828bc0f196
|
[
"MIT"
] | null | null | null |
defs.bzl
|
codebyravi/angular-samples
|
356e691c12f61ec481df41b4bcf331828bc0f196
|
[
"MIT"
] | 3
|
2021-05-11T23:23:53.000Z
|
2022-02-13T21:09:30.000Z
|
defs.bzl
|
codebyravi/angular-samples
|
356e691c12f61ec481df41b4bcf331828bc0f196
|
[
"MIT"
] | null | null | null |
load("//:package.bzl", _angular_samples_dependencies = "angular_samples_dependencies")
angular_samples_dependencies = _angular_samples_dependencies
| 73.5
| 86
| 0.870748
| 15
| 147
| 7.866667
| 0.4
| 0.474576
| 0.881356
| 0.838983
| 0.881356
| 0.881356
| 0.881356
| 0.881356
| 0.881356
| 0
| 0
| 0
| 0.040816
| 147
| 2
| 87
| 73.5
| 0.836879
| 0
| 0
| 0
| 0
| 0
| 0.283784
| 0.189189
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
adc746e3b1490a18bc4c21db310a434696b9a578
| 4,799
|
py
|
Python
|
Ex_3/km.py
|
coffeerr/Data_Mining
|
132c475b0cbc8be22b1a3100ed879acae6e7325c
|
[
"MIT"
] | null | null | null |
Ex_3/km.py
|
coffeerr/Data_Mining
|
132c475b0cbc8be22b1a3100ed879acae6e7325c
|
[
"MIT"
] | null | null | null |
Ex_3/km.py
|
coffeerr/Data_Mining
|
132c475b0cbc8be22b1a3100ed879acae6e7325c
|
[
"MIT"
] | null | null | null |
from numpy import *
import matplotlib.pyplot as plt
# 加载本地数据
def loadDataSet(fileName):
dataMat = []
fr = open(fileName)
for line in fr.readlines():
temp = [] #这一行为我自己加的
curLine = line.strip().split('\n')
#fltLine = map(float, curLine) #书上代码
temp.append(float(curLine[0]))
temp.append(float(curLine[1]))
dataMat.append(temp)
return dataMat
# 欧式距离计算
def distEclud(vecA, vecB):
return sqrt(sum(power(vecA - vecB, 2))) # 格式相同的两个向量做运算
# 中心点生成 随机生成最小到最大值之间的值
def randCent(dataSet, k):
n = shape(dataSet)[1]
centroids = mat(zeros((k, n))) # 创建中心点,由于需要与数据向量做运算,所以每个中心点与数据得格式应该一致(特征列)
for j in range(n): # 循环所有特征列,获得每个中心点该列的随机值
minJ = min(dataSet[:, j])
rangeJ = float(max(dataSet[:, j]) - minJ)
centroids[:, j] = mat(minJ + rangeJ * random.rand(k, 1)) # 获得每列的随机值 一列一列生成
return centroids
# 返回 中心点矩阵和聚类信息
def kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):
m = shape(dataSet)[0]
clusterAssment = mat(zeros((m, 2))) # 创建一个矩阵用于记录该样本 (所属中心点 与该点距离)
centroids = createCent(dataSet, k)
clusterChanged = True
while clusterChanged:
clusterChanged = False # 如果没有点更新则为退出
for i in range(m):
minDist = inf;
minIndex = -1
for j in range(k): # 每个样本点需要与 所有 的中心点作比较
distJI = distMeas(centroids[j, :], dataSet[i, :]) # 距离计算
if distJI < minDist:
minDist = distJI;
minIndex = j
if clusterAssment[i, 0] != minIndex: # 若记录矩阵的i样本的所属中心点更新,则为True,while下次继续循环更新
clusterChanged = True
clusterAssment[i, :] = minIndex, minDist ** 2 # 记录该点的两个信息
# print(centroids)
for cent in range(k): # 重新计算中心点
# print(dataSet[nonzero(clusterAssment[:,0] == cent)[0]]) # nonzero返回True样本的下标
ptsInClust = dataSet[nonzero(clusterAssment[:, 0].A == cent)[0]] # 得到属于该中心点的所有样本数据
centroids[cent, :] = mean(ptsInClust, axis=0) # 求每列的均值替换原来的中心点
return centroids, clusterAssment
datMat = mat(loadDataSet('data.txt'))
myCentroids, clustAssing = kMeans(datMat, 4)
print(myCentroids)
print(clustAssing)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(myCentroids[:, 0].flatten().A[0], myCentroids[:, 1].flatten().A[0], color='r', s=60)
ax.scatter(datMat[:, 0].flatten().A[0], datMat[:, 1].flatten().A[0])
plt.show()
from numpy import *
import matplotlib.pyplot as plt
# 加载本地数据
def loadDataSet(fileName):
dataMat = []
fr = open(fileName)
for line in fr.readlines():
curLine = line.strip().split('\t')
fltLine = list(map(float, curLine)) # 映射所有数据为浮点数
dataMat.append(fltLine)
return dataMat
# 欧式距离计算
def distEclud(vecA, vecB):
return sqrt(sum(power(vecA - vecB, 2))) # 格式相同的两个向量做运算
# 中心点生成 随机生成最小到最大值之间的值
def randCent(dataSet, k):
n = shape(dataSet)[1]
centroids = mat(zeros((k, n))) # 创建中心点,由于需要与数据向量做运算,所以每个中心点与数据得格式应该一致(特征列)
for j in range(n): # 循环所有特征列,获得每个中心点该列的随机值
minJ = min(dataSet[:, j])
rangeJ = float(max(dataSet[:, j]) - minJ)
centroids[:, j] = mat(minJ + rangeJ * random.rand(k, 1)) # 获得每列的随机值 一列一列生成
return centroids
# 返回 中心点矩阵和聚类信息
def kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):
m = shape(dataSet)[0]
clusterAssment = mat(zeros((m, 2))) # 创建一个矩阵用于记录该样本 (所属中心点 与该点距离)
centroids = createCent(dataSet, k)
clusterChanged = True
while clusterChanged:
clusterChanged = False # 如果没有点更新则为退出
for i in range(m):
minDist = inf;
minIndex = -1
for j in range(k): # 每个样本点需要与 所有 的中心点作比较
distJI = distMeas(centroids[j, :], dataSet[i, :]) # 距离计算
if distJI < minDist:
minDist = distJI;
minIndex = j
if clusterAssment[i, 0] != minIndex: # 若记录矩阵的i样本的所属中心点更新,则为True,while下次继续循环更新
clusterChanged = True
clusterAssment[i, :] = minIndex, minDist ** 2 # 记录该点的两个信息
# print(centroids)
for cent in range(k): # 重新计算中心点
# print(dataSet[nonzero(clusterAssment[:,0] == cent)[0]]) # nonzero返回True样本的下标
ptsInClust = dataSet[nonzero(clusterAssment[:, 0].A == cent)[0]] # 得到属于该中心点的所有样本数据
centroids[cent, :] = mean(ptsInClust, axis=0) # 求每列的均值替换原来的中心点
return centroids, clusterAssment
datMat = mat(loadDataSet('./data.txt'))
myCentroids, clustAssing = kMeans(datMat, 4)
print(myCentroids)
print(clustAssing)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(myCentroids[:, 0].flatten().A[0], myCentroids[:, 1].flatten().A[0], color='r', s=60)
ax.scatter(datMat[:, 0].flatten().A[0], datMat[:, 1].flatten().A[0])
plt.show()
| 34.278571
| 95
| 0.611794
| 543
| 4,799
| 5.403315
| 0.235727
| 0.019087
| 0.02454
| 0.014997
| 0.931152
| 0.931152
| 0.931152
| 0.931152
| 0.931152
| 0.931152
| 0
| 0.015569
| 0.250469
| 4,799
| 139
| 96
| 34.52518
| 0.800111
| 0.172744
| 0
| 0.903846
| 0
| 0
| 0.006115
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.038462
| 0.019231
| 0.192308
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
adff7b9b05375b2d98eb56ac57e32d92947aa431
| 91
|
py
|
Python
|
good/functionNestedFunctionsAndScopes.py
|
Alberto42/Interpreter
|
a56c4d905672572734a8470ef607b66727489f15
|
[
"BSD-3-Clause"
] | null | null | null |
good/functionNestedFunctionsAndScopes.py
|
Alberto42/Interpreter
|
a56c4d905672572734a8470ef607b66727489f15
|
[
"BSD-3-Clause"
] | null | null | null |
good/functionNestedFunctionsAndScopes.py
|
Alberto42/Interpreter
|
a56c4d905672572734a8470ef607b66727489f15
|
[
"BSD-3-Clause"
] | null | null | null |
def f(x) {
y = 3
def f(x) {
y = 2
}
f(42)
return y
}
y = f(42)
| 9.1
| 14
| 0.307692
| 17
| 91
| 1.647059
| 0.470588
| 0.285714
| 0.357143
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 0.527473
| 91
| 10
| 15
| 9.1
| 0.511628
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc07d650c4957e9b50f4ffd5ce0427d549d1b66c
| 12,151
|
py
|
Python
|
nitorch/tools/registration/losses/robust.py
|
balbasty/nitorch
|
d30c3125a8a66ea1434f2b39ed03338afd9724b4
|
[
"MIT"
] | 46
|
2020-07-31T10:14:05.000Z
|
2022-03-24T12:51:46.000Z
|
nitorch/tools/registration/losses/robust.py
|
balbasty/nitorch
|
d30c3125a8a66ea1434f2b39ed03338afd9724b4
|
[
"MIT"
] | 36
|
2020-10-06T19:01:38.000Z
|
2022-02-03T18:07:35.000Z
|
nitorch/tools/registration/losses/robust.py
|
balbasty/nitorch
|
d30c3125a8a66ea1434f2b39ed03338afd9724b4
|
[
"MIT"
] | 6
|
2021-01-05T14:59:05.000Z
|
2021-11-18T18:26:45.000Z
|
from nitorch.core import utils, py
from .base import OptimizationLoss
from .mse import weighted_precision, mse
def irls_laplace_reweight(moving, fixed, lam=1, joint=False, eps=1e-5,
dim=None, mask=None):
"""Update iteratively reweighted least-squares weights for l1
Parameters
----------
moving : ([B], K, *spatial) tensor
Moving image
fixed : ([B], K, *spatial) tensor
Fixed image
lam : float or ([B], K|1, [*spatial]) tensor_like
Inverse-squared scale parameter of the Laplace distribution.
(equivalent to Gaussian noise precision)
dim : int, default=`fixed.dim() - 1`
Number of spatial dimensions
Returns
-------
weights : (..., K|1, *spatial) tensor
IRLS weights
"""
if lam is None:
lam = 1
fixed, moving, lam = utils.to_max_backend(fixed, moving, lam)
if mask is not None:
mask = mask.to(fixed.device)
dim = dim or (fixed.dim() - 1)
if lam.dim() <= 2:
if lam.dim() == 0:
lam = lam.flatten()
lam = utils.unsqueeze(lam, -1, dim) # pad spatial dimensions
weights = (moving - fixed).square_().mul_(lam)
if mask is not None:
weights = weights.mul_(mask)
if joint:
weights = weights.sum(dim=-dim-1, keepdims=True)
weights = weights.sqrt_().clamp_min_(eps).reciprocal_()
if mask is not None:
weights = weights.masked_fill_(mask == 0, 0)
return weights
def irls_tukey_reweight(moving, fixed, lam=1, c=4.685, joint=False, dim=None,
mask=None):
"""Update iteratively reweighted least-squares weights for Tukey's biweight
Parameters
----------
moving : ([B], K, *spatial) tensor
Moving image
fixed : ([B], K, *spatial) tensor
Fixed image
lam : float or ([B], K|1, [*spatial]) tensor_like
Equivalent to Gaussian noise precision
(used to standardize the residuals)
c : float, default=4.685
Tukey's threshold.
Approximately equal to a number of standard deviations above
which the loss is capped.
dim : int, default=`fixed.dim() - 1`
Number of spatial dimensions
Returns
-------
weights : (..., K|1, *spatial) tensor
IRLS weights
"""
if lam is None:
lam = 1
c = c * c
fixed, moving, lam = utils.to_max_backend(fixed, moving, lam)
if mask is not None:
mask = mask.to(fixed.device)
dim = dim or (fixed.dim() - 1)
if lam.dim() <= 2:
if lam.dim() == 0:
lam = lam.flatten()
lam = utils.unsqueeze(lam, -1, dim) # pad spatial dimensions
weights = (moving - fixed).square_().mul_(lam)
if mask is not None:
weights = weights.mul_(mask)
if joint:
weights = weights.sum(dim=-dim-1, keepdims=True)
zeromsk = weights > c
weights = weights.div_(-c).add_(1).square()
weights[zeromsk].zero_()
return weights
class MAD(OptimizationLoss):
"""Median absolute deviation (using IRLS)"""
order = 2 # Hessian defined
def __init__(self, lam=None, joint=False, dim=None):
"""
Parameters
----------
lam : (K|1,) tensor_like
Precision
dim : int, default=1fixed.dim() - 1`
Number of spatial dimensions
"""
super().__init__()
self.lam = lam
self.dim = dim
self.joint = joint
def loss(self, moving, fixed, **kwargs):
"""Compute the [weighted] mse (* 0.5)
Parameters
----------
moving : ([B], K, *spatial) tensor
Moving image
fixed : ([B], K, *spatial) tensor
Fixed image
Returns
-------
ll : () tensor
Loss
grad : ([B], K, *spatial) tensor
Gradient
"""
lam = kwargs.pop('lam', self.lam)
dim = kwargs.pop('dim', self.dim)
joint = kwargs.pop('joint', self.joint)
mask = kwargs.pop('mask', None)
dim = dim or (fixed.dim() - 1)
nvox = py.prod(fixed.shape[-dim:])
recompute_lam = lam is None
if lam is None:
lam = weighted_precision(moving, fixed, dim=dim, weights=mask)
weights = irls_laplace_reweight(moving, fixed, lam=lam, joint=joint,
dim=dim, mask=mask)
if mask is not None:
weights *= mask
lll = 0
if recompute_lam:
lam = weighted_precision(moving, fixed, weights, dim=dim)
lll = -0.5 * lam.log().sum() # mse: no need to divide by voxels
lam = lam * weights
llx = mse(moving, fixed, dim=dim, lam=lam, grad=False, hess=False,
**kwargs)
llw = weights[weights > 1e-9].reciprocal_().sum().div_(2*nvox)
return llx + llw + lll
def loss_grad(self, moving, fixed, **kwargs):
"""Compute the [weighted] mse (* 0.5)
Parameters
----------
moving : ([B], K, *spatial) tensor
Moving image
fixed : ([B], K, *spatial) tensor
Fixed image
Returns
-------
ll : () tensor
Loss
grad : ([B], K, *spatial) tensor
Gradient
"""
lam = kwargs.pop('lam', self.lam)
dim = kwargs.pop('dim', self.dim)
joint = kwargs.pop('joint', self.joint)
mask = kwargs.pop('mask', None)
dim = dim or (fixed.dim() - 1)
nvox = py.prod(fixed.shape[-dim:])
recompute_lam = lam is None
if lam is None:
lam = weighted_precision(moving, fixed, dim=dim, weights=mask)
weights = irls_laplace_reweight(moving, fixed, lam=lam, joint=joint,
dim=dim, mask=mask)
if mask is not None:
weights *= mask
lll = 0
if recompute_lam:
lam = weighted_precision(moving, fixed, weights, dim=dim)
lll = -0.5 * lam.log().sum() # mse: no need to divide by voxels
lam = lam * weights
llx, g = mse(moving, fixed, dim=dim, lam=lam, grad=True, hess=False)
llw = weights[weights > 1e-9].reciprocal_().sum().div_(2*nvox)
return llx + llw + lll, g
def loss_grad_hess(self, moving, fixed, **kwargs):
"""Compute the [weighted] mse (* 0.5)
Parameters
----------
moving : ([B], K, *spatial) tensor
Moving image
fixed : ([B], K, *spatial) tensor
Fixed image
Returns
-------
ll : () tensor
Loss
grad : ([B], K, *spatial) tensor
Gradient
hess : ([B], K, *spatial) tensor
Diagonal Hessian
"""
lam = kwargs.pop('lam', self.lam)
dim = kwargs.pop('dim', self.dim)
joint = kwargs.pop('joint', self.joint)
mask = kwargs.pop('mask', None)
dim = dim or (fixed.dim() - 1)
nvox = py.prod(fixed.shape[-dim:])
recompute_lam = lam is None
if lam is None:
lam = weighted_precision(moving, fixed, dim=dim)
weights = irls_laplace_reweight(moving, fixed, lam=lam, joint=joint,
dim=dim, mask=mask)
if mask is not None:
weights *= mask
lll = 0
if recompute_lam:
lam = weighted_precision(moving, fixed, weights, dim=dim)
lll = -0.5 * lam.log().sum() # mse: no need to divide by voxels
lam = lam * weights
llx, g, h = mse(moving, fixed, dim=dim, lam=lam, grad=True, hess=True)
llw = weights[weights > 1e-9].reciprocal_().sum().div_(2*nvox)
return llx + llw + lll, g, h
class Tukey(OptimizationLoss):
"""Tukey's biweight loss (using IRLS)"""
order = 2 # Hessian defined
def __init__(self, lam=None, c=4.685, joint=False, dim=None):
"""
Parameters
----------
lam : (K|1,) tensor_like
Precision
dim : int, default=1fixed.dim() - 1`
Number of spatial dimensions
"""
super().__init__()
self.lam = lam
self.dim = dim
self.joint = joint
self.c = c
def loss(self, moving, fixed, **kwargs):
"""Compute the [weighted] mse (* 0.5)
Parameters
----------
moving : ([B], K, *spatial) tensor
Moving image
fixed : ([B], K, *spatial) tensor
Fixed image
Returns
-------
ll : () tensor
Loss
grad : ([B], K, *spatial) tensor
Gradient
"""
lam = kwargs.pop('lam', self.lam)
dim = kwargs.pop('dim', self.dim)
joint = kwargs.pop('joint', self.joint)
c = kwargs.pop('c', self.c)
mask = kwargs.pop('mask', None)
dim = dim or (fixed.dim() - 1)
nvox = py.prod(fixed.shape[-dim:])
weights = irls_tukey_reweight(moving, fixed, lam=lam, c=c, joint=joint,
dim=dim, mask=mask)
if mask is not None:
weights *= mask
lll = 0
if lam is None:
lam = weighted_precision(moving, fixed, weights, dim=dim)
lll = -0.5 * lam.log().sum() # mse: no need to divide by voxels
lam = lam * weights
llx = mse(moving, fixed, dim=dim, lam=lam, grad=False, hess=False)
llw = weights[weights > 1e-9].reciprocal_().sum().div_(2*nvox)
return llx + llw + lll
def loss_grad(self, moving, fixed, **kwargs):
"""Compute the [weighted] mse (* 0.5)
Parameters
----------
moving : ([B], K, *spatial) tensor
Moving image
fixed : ([B], K, *spatial) tensor
Fixed image
Returns
-------
ll : () tensor
Loss
"""
lam = kwargs.pop('lam', self.lam)
dim = kwargs.pop('dim', self.dim)
joint = kwargs.pop('joint', self.joint)
c = kwargs.pop('c', self.c)
mask = kwargs.pop('mask', None)
dim = dim or (fixed.dim() - 1)
nvox = py.prod(fixed.shape[-dim:])
weights = irls_tukey_reweight(moving, fixed, lam=lam, c=c, joint=joint,
dim=dim, mask=mask)
if mask is not None:
weights *= mask
lll = 0
if lam is None:
lam = weighted_precision(moving, fixed, weights, dim=dim)
lll = -0.5 * lam.log().sum() # mse: no need to divide by voxels
lam = lam * weights
llx, g = mse(moving, fixed, dim=dim, lam=lam, grad=True, hess=False)
llw = weights[weights > 1e-9].reciprocal_().sum().div_(2*nvox)
return llx + llw + lll, g
def loss_grad_hess(self, moving, fixed, **kwargs):
"""Compute the [weighted] mse (* 0.5)
Parameters
----------
moving : ([B], K, *spatial) tensor
Moving image
fixed : ([B], K, *spatial) tensor
Fixed image
Returns
-------
ll : () tensor
Loss
grad : ([B], K, *spatial) tensor
Gradient
hess : ([B], K, *spatial) tensor
Diagonal Hessian
"""
lam = kwargs.pop('lam', self.lam)
dim = kwargs.pop('dim', self.dim)
joint = kwargs.pop('joint', self.joint)
c = kwargs.pop('c', self.c)
mask = kwargs.pop('mask', None)
dim = dim or (fixed.dim() - 1)
nvox = py.prod(fixed.shape[-dim:])
weights = irls_tukey_reweight(moving, fixed, lam=lam, c=c, joint=joint,
dim=dim, mask=mask)
if mask is not None:
weights *= mask
lll = 0
if lam is None:
lam = weighted_precision(moving, fixed, weights, dim=dim)
lll = -0.5 * lam.log().sum() # mse: no need to divide by voxels
lam = lam * weights
llx, g, h = mse(moving, fixed, dim=dim, lam=lam, grad=True, hess=True)
llw = weights[weights > 1e-9].reciprocal_().sum().div_(2*nvox)
return llx + llw + lll, g, h
| 32.489305
| 79
| 0.520369
| 1,489
| 12,151
| 4.186702
| 0.096038
| 0.031761
| 0.033205
| 0.055342
| 0.909849
| 0.89862
| 0.888354
| 0.880494
| 0.880494
| 0.880494
| 0
| 0.012542
| 0.34384
| 12,151
| 374
| 80
| 32.489305
| 0.769347
| 0.268208
| 0
| 0.882979
| 0
| 0
| 0.011765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053191
| false
| 0
| 0.015957
| 0
| 0.132979
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
70ac2b857489fb779bac94e54882d819e1118bbd
| 1,482
|
py
|
Python
|
tests/components/checkboxes/test_checkboxes.py
|
quis/govuk-frontend-jinja
|
1aab34d77cebad91a3001cda654b3177cd0201fd
|
[
"MIT"
] | null | null | null |
tests/components/checkboxes/test_checkboxes.py
|
quis/govuk-frontend-jinja
|
1aab34d77cebad91a3001cda654b3177cd0201fd
|
[
"MIT"
] | null | null | null |
tests/components/checkboxes/test_checkboxes.py
|
quis/govuk-frontend-jinja
|
1aab34d77cebad91a3001cda654b3177cd0201fd
|
[
"MIT"
] | null | null | null |
def test_checkboxes(env, similar, template, expected):
template = env.from_string(template)
assert similar(template.render(), expected)
def test_checkboxes_with_id_and_name(env, similar, template, expected):
template = env.from_string(template)
assert similar(template.render(), expected)
def test_checkboxes_with_hints_on_items(env, similar, template, expected):
template = env.from_string(template)
assert similar(template.render(), expected)
def test_checkboxes_with_disabled_item(env, similar, template, expected):
template = env.from_string(template)
assert similar(template.render(), expected)
def test_checkboxes_with_legend_as_page_heading(env, similar, template, expected):
template = env.from_string(template)
assert similar(template.render(), expected)
def test_checkboxes_with_a_medium_legend(env, similar, template, expected):
template = env.from_string(template)
assert similar(template.render(), expected)
def test_checkboxes_without_fieldset(env, similar, template, expected):
template = env.from_string(template)
assert similar(template.render(), expected)
def test_checkboxes_with_all_fieldset_attributes(env, similar, template, expected):
template = env.from_string(template)
assert similar(template.render(), expected)
def test_checkboxes_with_error_message(env, similar, template, expected):
template = env.from_string(template)
assert similar(template.render(), expected)
| 33.681818
| 83
| 0.776653
| 182
| 1,482
| 6.065934
| 0.175824
| 0.244565
| 0.138587
| 0.211957
| 0.882246
| 0.882246
| 0.882246
| 0.882246
| 0.882246
| 0.882246
| 0
| 0
| 0.126181
| 1,482
| 43
| 84
| 34.465116
| 0.85251
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
70b4ac66fe3089f9c0089c2dd3651d24e0f3a5cd
| 21,916
|
py
|
Python
|
tests/test_indexer.py
|
mosuka/basilisk
|
abe2de265af234bd78053ccc974ca4218a25cad3
|
[
"Apache-2.0"
] | 17
|
2018-10-19T02:36:41.000Z
|
2022-01-29T01:02:50.000Z
|
tests/test_indexer.py
|
mosuka/basilisk
|
abe2de265af234bd78053ccc974ca4218a25cad3
|
[
"Apache-2.0"
] | 23
|
2018-10-28T16:54:00.000Z
|
2019-02-15T17:09:25.000Z
|
tests/test_indexer.py
|
mosuka/basilisk
|
abe2de265af234bd78053ccc974ca4218a25cad3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Minoru Osuka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
import zipfile
from logging import ERROR, Formatter, getLogger, INFO, NOTSET, StreamHandler
from tempfile import TemporaryDirectory
from time import sleep
import yaml
from prometheus_client.core import CollectorRegistry
from pysyncobj import SyncObjConf
from whoosh.filedb.filestore import FileStorage
from cockatrice import NAME
from cockatrice.index_config import IndexConfig
from cockatrice.indexer import Indexer
from tests import get_free_port
class TestIndexer(unittest.TestCase):
def setUp(self):
self.temp_dir = TemporaryDirectory()
self.example_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), '../example'))
host = '0.0.0.0'
port = get_free_port()
seed_addr = None
conf = SyncObjConf(
fullDumpFile=self.temp_dir.name + '/index.zip',
logCompactionMinTime=300,
dynamicMembershipChange=True
)
data_dir = self.temp_dir.name + '/index'
grpc_port = get_free_port()
grpc_max_workers = 10
http_port = get_free_port()
logger = getLogger(NAME)
log_handler = StreamHandler()
logger.setLevel(ERROR)
log_handler.setLevel(INFO)
log_format = Formatter('%(asctime)s - %(levelname)s - %(pathname)s:%(lineno)d - %(message)s')
log_handler.setFormatter(log_format)
logger.addHandler(log_handler)
http_logger = getLogger(NAME + '_http')
http_log_handler = StreamHandler()
http_logger.setLevel(NOTSET)
http_log_handler.setLevel(INFO)
http_log_format = Formatter('%(message)s')
http_log_handler.setFormatter(http_log_format)
http_logger.addHandler(http_log_handler)
metrics_registry = CollectorRegistry()
self.indexer = Indexer(host=host, port=port, seed_addr=seed_addr, conf=conf, data_dir=data_dir,
grpc_port=grpc_port, grpc_max_workers=grpc_max_workers, http_port=http_port,
logger=logger, http_logger=http_logger, metrics_registry=metrics_registry)
def tearDown(self):
self.indexer.stop()
self.temp_dir.cleanup()
def test_create_index(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_dict = yaml.safe_load(file_obj.read())
index_config = IndexConfig(index_config_dict)
# create index
index_name = 'test_file_index'
self.indexer.create_index(index_name, index_config, sync=True)
self.assertTrue(self.indexer.is_index_exist(index_name))
def test_delete_index(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_dict = yaml.safe_load(file_obj.read())
index_config = IndexConfig(index_config_dict)
# create index
index_name = 'test_file_index'
self.indexer.create_index(index_name, index_config, sync=True)
self.assertTrue(self.indexer.is_index_exist(index_name))
# delete index
self.indexer.delete_index(index_name, sync=True)
self.assertFalse(self.indexer.is_index_exist(index_name))
def test_get_index(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_dict = yaml.safe_load(file_obj.read())
index_config = IndexConfig(index_config_dict)
# create index
index_name = 'test_file_index'
self.indexer.create_index(index_name, index_config, sync=True)
self.assertTrue(self.indexer.is_index_exist(index_name))
i = self.indexer.get_index(index_name)
self.assertTrue(isinstance(i.storage, FileStorage))
# # close index
# self.index_core.close_index(index_name)
def test_put_document(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_dict = yaml.safe_load(file_obj.read())
index_config = IndexConfig(index_config_dict)
# create index
index_name = 'test_file_index'
self.indexer.create_index(index_name, index_config, sync=True)
self.assertTrue(self.indexer.is_index_exist(index_name))
test_doc_id = '1'
with open(self.example_dir + '/doc1.json', 'r', encoding='utf-8') as file_obj:
test_fields = json.loads(file_obj.read(), encoding='utf-8')
# put document
count = self.indexer.put_document(index_name, test_doc_id, test_fields, sync=True)
self.assertEqual(1, count)
def test_commit(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_dict = yaml.safe_load(file_obj.read())
index_config = IndexConfig(index_config_dict)
# create index
index_name = 'test_file_index'
self.indexer.create_index(index_name, index_config, sync=True)
self.assertTrue(self.indexer.is_index_exist(index_name))
test_doc_id = '1'
with open(self.example_dir + '/doc1.json', 'r', encoding='utf-8') as file_obj:
test_fields = json.loads(file_obj.read(), encoding='utf-8')
# put document
count = self.indexer.put_document(index_name, test_doc_id, test_fields, sync=True)
self.assertEqual(1, count)
# commit
success = self.indexer.commit_index(index_name, sync=True)
self.assertTrue(success)
# get document
results_page = self.indexer.get_document(index_name, test_doc_id)
self.assertEqual(1, results_page.total)
def test_rollback(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_dict = yaml.safe_load(file_obj.read())
index_config = IndexConfig(index_config_dict)
# create index
index_name = 'test_file_index'
self.indexer.create_index(index_name, index_config, sync=True)
self.assertTrue(self.indexer.is_index_exist(index_name))
test_doc_id = '1'
with open(self.example_dir + '/doc1.json', 'r', encoding='utf-8') as file_obj:
test_fields = json.loads(file_obj.read(), encoding='utf-8')
# put document
count = self.indexer.put_document(index_name, test_doc_id, test_fields, sync=True)
self.assertEqual(1, count)
# rollback
success = self.indexer.rollback_index(index_name, sync=True)
self.assertTrue(success)
# # get document
# results_page = self.index_core.get_document(index_name, test_doc_id)
# self.assertEqual(0, results_page.total)
def test_optimize(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_dict = yaml.safe_load(file_obj.read())
index_config = IndexConfig(index_config_dict)
# create index
index_name = 'test_file_index'
self.indexer.create_index(index_name, index_config, sync=True)
self.assertTrue(self.indexer.is_index_exist(index_name))
test_doc_id = '1'
with open(self.example_dir + '/doc1.json', 'r', encoding='utf-8') as file_obj:
test_fields = json.loads(file_obj.read(), encoding='utf-8')
# put document
count = self.indexer.put_document(index_name, test_doc_id, test_fields, sync=True)
self.assertEqual(1, count)
# commit
success = self.indexer.commit_index(index_name, sync=True)
self.assertTrue(success)
# optimize
success = self.indexer.optimize_index(index_name, sync=True)
self.assertTrue(success)
def test_get_document(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_dict = yaml.safe_load(file_obj.read())
index_config = IndexConfig(index_config_dict)
# create index
index_name = 'test_file_index'
self.indexer.create_index(index_name, index_config, sync=True)
self.assertTrue(self.indexer.is_index_exist(index_name))
test_doc_id = '1'
with open(self.example_dir + '/doc1.json', 'r', encoding='utf-8') as file_obj:
test_fields = json.loads(file_obj.read(), encoding='utf-8')
# put document
count = self.indexer.put_document(index_name, test_doc_id, test_fields, sync=True)
self.assertEqual(1, count)
# commit
success = self.indexer.commit_index(index_name, sync=True)
self.assertTrue(success)
# get document
results_page = self.indexer.get_document(index_name, test_doc_id)
self.assertEqual(1, results_page.total)
def test_delete_document(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_dict = yaml.safe_load(file_obj.read())
index_config = IndexConfig(index_config_dict)
# create index
index_name = 'test_file_index'
self.indexer.create_index(index_name, index_config, sync=True)
self.assertTrue(self.indexer.is_index_exist(index_name))
test_doc_id = '1'
with open(self.example_dir + '/doc1.json', 'r', encoding='utf-8') as file_obj:
test_fields = json.loads(file_obj.read(), encoding='utf-8')
# put document
count = self.indexer.put_document(index_name, test_doc_id, test_fields, sync=True)
self.assertEqual(1, count)
# commit
success = self.indexer.commit_index(index_name, sync=True)
self.assertTrue(success)
# get document
results_page = self.indexer.get_document(index_name, test_doc_id)
self.assertEqual(1, results_page.total)
# delete document
count = self.indexer.delete_document(index_name, test_doc_id, sync=True)
self.assertEqual(1, count)
# commit
success = self.indexer.commit_index(index_name, sync=True)
self.assertTrue(success)
# get document
results_page = self.indexer.get_document(index_name, test_doc_id)
self.assertEqual(0, results_page.total)
def test_put_documents(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_dict = yaml.safe_load(file_obj.read())
index_config = IndexConfig(index_config_dict)
# create index
index_name = 'test_file_index'
self.indexer.create_index(index_name, index_config, sync=True)
self.assertTrue(self.indexer.is_index_exist(index_name))
with open(self.example_dir + '/bulk_put.json', 'r', encoding='utf-8') as file_obj:
test_docs = json.loads(file_obj.read(), encoding='utf-8')
# put documents in bulk
count = self.indexer.put_documents(index_name, test_docs, sync=True)
self.assertEqual(5, count)
# commit
success = self.indexer.commit_index(index_name, sync=True)
self.assertTrue(success)
results_page = self.indexer.get_document(index_name, '1')
self.assertEqual(1, results_page.total)
results_page = self.indexer.get_document(index_name, '2')
self.assertEqual(1, results_page.total)
results_page = self.indexer.get_document(index_name, '3')
self.assertEqual(1, results_page.total)
results_page = self.indexer.get_document(index_name, '4')
self.assertEqual(1, results_page.total)
results_page = self.indexer.get_document(index_name, '5')
self.assertEqual(1, results_page.total)
def test_delete_documents(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_dict = yaml.safe_load(file_obj.read())
index_config = IndexConfig(index_config_dict)
# create index
index_name = 'test_file_index'
self.indexer.create_index(index_name, index_config, sync=True)
self.assertTrue(self.indexer.is_index_exist(index_name))
with open(self.example_dir + '/bulk_put.json', 'r', encoding='utf-8') as file_obj:
test_docs = json.loads(file_obj.read(), encoding='utf-8')
# put documents in bulk
count = self.indexer.put_documents(index_name, test_docs, sync=True)
self.assertEqual(5, count)
# commit
success = self.indexer.commit_index(index_name, sync=True)
self.assertTrue(success)
results_page = self.indexer.get_document(index_name, '1')
self.assertEqual(1, results_page.total)
results_page = self.indexer.get_document(index_name, '2')
self.assertEqual(1, results_page.total)
results_page = self.indexer.get_document(index_name, '3')
self.assertEqual(1, results_page.total)
results_page = self.indexer.get_document(index_name, '4')
self.assertEqual(1, results_page.total)
results_page = self.indexer.get_document(index_name, '5')
self.assertEqual(1, results_page.total)
with open(self.example_dir + '/bulk_delete.json', 'r', encoding='utf-8') as file_obj:
test_docs = json.loads(file_obj.read(), encoding='utf-8')
# delete documents in bulk
count = self.indexer.delete_documents(index_name, test_docs, sync=True)
self.assertEqual(5, count)
# commit
success = self.indexer.commit_index(index_name, sync=True)
self.assertTrue(success)
results_page = self.indexer.get_document(index_name, '1')
self.assertEqual(0, results_page.total)
results_page = self.indexer.get_document(index_name, '2')
self.assertEqual(0, results_page.total)
results_page = self.indexer.get_document(index_name, '3')
self.assertEqual(0, results_page.total)
results_page = self.indexer.get_document(index_name, '4')
self.assertEqual(0, results_page.total)
results_page = self.indexer.get_document(index_name, '5')
self.assertEqual(0, results_page.total)
def test_search_documents(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_dict = yaml.safe_load(file_obj.read())
index_config = IndexConfig(index_config_dict)
# create file index
index_name = 'test_file_index'
self.indexer.create_index(index_name, index_config, sync=True)
self.assertTrue(self.indexer.is_index_exist(index_name))
# read documents
with open(self.example_dir + '/bulk_put.json', 'r', encoding='utf-8') as file_obj:
test_docs = json.loads(file_obj.read(), encoding='utf-8')
# put documents in bulk
count = self.indexer.put_documents(index_name, test_docs, sync=True)
self.assertEqual(5, count)
# commit
success = self.indexer.commit_index(index_name, sync=True)
self.assertTrue(success)
# search documents
page = self.indexer.search_documents(index_name, 'search', search_field='text', page_num=1, page_len=10)
self.assertEqual(5, page.total)
def test_snapshot_exists(self):
# snapshot exists
self.assertFalse(self.indexer.is_snapshot_exist())
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_dict = yaml.safe_load(file_obj.read())
index_config = IndexConfig(index_config_dict)
# create file index
index_name = 'test_file_index'
self.indexer.create_index(index_name, index_config, sync=True)
self.assertTrue(self.indexer.is_index_exist(index_name))
# read documents
with open(self.example_dir + '/bulk_put.json', 'r', encoding='utf-8') as file_obj:
test_docs = json.loads(file_obj.read(), encoding='utf-8')
# put documents in bulk
count = self.indexer.put_documents(index_name, test_docs, sync=True)
self.assertEqual(5, count)
# commit
success = self.indexer.commit_index(index_name, sync=True)
self.assertTrue(success)
# search documents
page = self.indexer.search_documents(index_name, 'search', search_field='text', page_num=1, page_len=10)
self.assertEqual(5, page.total)
# create snapshot
self.indexer.create_snapshot(sync=True)
sleep(1) # wait for snapshot file to be created
self.assertTrue(os.path.exists(self.indexer.get_snapshot_file_name()))
with zipfile.ZipFile(self.indexer.get_snapshot_file_name()) as f:
self.assertTrue('raft.bin' in f.namelist())
self.assertTrue('test_file_index_WRITELOCK' in f.namelist())
self.assertEqual(1,
len([n for n in f.namelist() if n.startswith('_test_file_index_') and n.endswith('.toc')]))
self.assertEqual(1,
len([n for n in f.namelist() if n.startswith('test_file_index_') and n.endswith('.seg')]))
# snapshot exists
self.assertTrue(True, self.indexer.is_snapshot_exist())
def test_create_snapshot(self):
# read index config
with open(self.example_dir + '/index_config.yaml', 'r', encoding='utf-8') as file_obj:
index_config_dict = yaml.safe_load(file_obj.read())
index_config = IndexConfig(index_config_dict)
# create file index
index_name = 'test_file_index'
self.indexer.create_index(index_name, index_config, sync=True)
self.assertTrue(self.indexer.is_index_exist(index_name))
with open(self.example_dir + '/bulk_put.json', 'r', encoding='utf-8') as file_obj:
test_docs = json.loads(file_obj.read(), encoding='utf-8')
# put documents in bulk
count = self.indexer.put_documents(index_name, test_docs, sync=True)
self.assertEqual(5, count)
# commit
success = self.indexer.commit_index(index_name, sync=True)
self.assertTrue(success)
# search documents
page = self.indexer.search_documents(index_name, 'search', search_field='text', page_num=1, page_len=10)
self.assertEqual(5, page.total)
# create snapshot
self.indexer.create_snapshot(sync=True)
sleep(5) # wait for snapshot file to be created
self.assertTrue(os.path.exists(self.indexer.get_snapshot_file_name()))
with zipfile.ZipFile(self.indexer.get_snapshot_file_name()) as f:
self.assertTrue('raft.bin' in f.namelist())
self.assertTrue(
0 < len([n for n in f.namelist() if n.startswith('_test_file_index_') and n.endswith('.toc')]))
self.assertTrue(
0 < len([n for n in f.namelist() if n.startswith('test_file_index_') and n.endswith('.seg')]))
self.assertTrue('test_file_index_WRITELOCK' in f.namelist())
self.assertTrue(self.indexer.get_index_config_file(index_name) in f.namelist())
def test_create_snapshot_ram(self):
# read index config
with open(self.example_dir + '/index_config_ram.yaml', 'r', encoding='utf-8') as file_obj:
index_config_dict = yaml.safe_load(file_obj.read())
index_config = IndexConfig(index_config_dict)
# create file index
index_name = 'test_file_index'
self.indexer.create_index(index_name, index_config, sync=True)
self.assertTrue(self.indexer.is_index_exist(index_name))
with open(self.example_dir + '/bulk_put.json', 'r', encoding='utf-8') as file_obj:
test_docs = json.loads(file_obj.read(), encoding='utf-8')
# put documents in bulk
count = self.indexer.put_documents(index_name, test_docs, sync=True)
self.assertEqual(5, count)
# commit
success = self.indexer.commit_index(index_name, sync=True)
self.assertTrue(success)
# search documents
page = self.indexer.search_documents(index_name, 'search', search_field='text', page_num=1, page_len=10)
self.assertEqual(5, page.total)
# create snapshot
self.indexer.create_snapshot(sync=True)
sleep(5) # wait for snapshot file to be created
self.assertTrue(os.path.exists(self.indexer.get_snapshot_file_name()))
with zipfile.ZipFile(self.indexer.get_snapshot_file_name()) as f:
self.assertTrue('raft.bin' in f.namelist())
self.assertEqual(1,
len([n for n in f.namelist() if n.startswith('_test_file_index_') and n.endswith('.toc')]))
self.assertEqual(1,
len([n for n in f.namelist() if n.startswith('test_file_index_') and n.endswith('.seg')]))
self.assertTrue(self.indexer.get_index_config_file(index_name) in f.namelist())
| 40.811918
| 120
| 0.663032
| 2,897
| 21,916
| 4.769762
| 0.074905
| 0.067086
| 0.047619
| 0.046172
| 0.83109
| 0.82255
| 0.814662
| 0.814662
| 0.809813
| 0.805109
| 0
| 0.008571
| 0.228053
| 21,916
| 536
| 121
| 40.88806
| 0.808192
| 0.088565
| 0
| 0.75
| 0
| 0.003049
| 0.063745
| 0.00478
| 0
| 0
| 0
| 0
| 0.262195
| 1
| 0.051829
| false
| 0
| 0.045732
| 0
| 0.10061
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
70bb29eaf37726c8604d6a6c316d0c701bbddd2a
| 319
|
py
|
Python
|
neuron_ml/core/public/__init__.py
|
fossabot/Neuron
|
ee8b328411bddb9c86675914b0e0b50250fb7ff9
|
[
"MIT"
] | 9
|
2018-12-18T06:19:09.000Z
|
2021-11-22T19:46:13.000Z
|
neuron_ml/core/public/__init__.py
|
fossabot/Neuron
|
ee8b328411bddb9c86675914b0e0b50250fb7ff9
|
[
"MIT"
] | 20
|
2018-11-23T16:09:04.000Z
|
2022-02-10T00:06:17.000Z
|
neuron_ml/core/public/__init__.py
|
fossabot/Neuron
|
ee8b328411bddb9c86675914b0e0b50250fb7ff9
|
[
"MIT"
] | 1
|
2019-02-25T11:58:20.000Z
|
2019-02-25T11:58:20.000Z
|
import neuron_ml.core.public.load
import neuron_ml.core.public.export
import neuron_ml.core.public.train
import neuron_ml.core.public.clean
import neuron_ml.core.public.classify
import neuron_ml.core.public.graph
import neuron_ml.core.public.labels
import neuron_ml.core.public.model
import neuron_ml.core.public.image
| 31.9
| 37
| 0.858934
| 54
| 319
| 4.907407
| 0.259259
| 0.407547
| 0.475472
| 0.611321
| 0.815094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056426
| 319
| 9
| 38
| 35.444444
| 0.880399
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
70d82a31100f143270181754f5f20a72b4bca266
| 318
|
py
|
Python
|
PIP/Minor Assignment 9/A9Q10.py
|
ankitrajbiswal/SEM_5
|
db716e242e77149a4091e0e564356ddc724aeff0
|
[
"Apache-2.0"
] | null | null | null |
PIP/Minor Assignment 9/A9Q10.py
|
ankitrajbiswal/SEM_5
|
db716e242e77149a4091e0e564356ddc724aeff0
|
[
"Apache-2.0"
] | null | null | null |
PIP/Minor Assignment 9/A9Q10.py
|
ankitrajbiswal/SEM_5
|
db716e242e77149a4091e0e564356ddc724aeff0
|
[
"Apache-2.0"
] | 1
|
2022-03-02T05:07:39.000Z
|
2022-03-02T05:07:39.000Z
|
'''
try:
f = open('file1.txt', 'r')
except IOError:
print('Problem with Input Output...\n')
else:
print('No Problem with Input Output...')
'''
try:
f = open('file1.txt', 'w')
except IOError:
print('Problem with Input Output...\n')
else:
print('No Problem with Input Output...')
| 19.875
| 45
| 0.572327
| 42
| 318
| 4.333333
| 0.404762
| 0.241758
| 0.351648
| 0.483516
| 0.989011
| 0.813187
| 0.813187
| 0.813187
| 0.813187
| 0.813187
| 0
| 0.008264
| 0.238994
| 318
| 15
| 46
| 21.2
| 0.743802
| 0.455975
| 0
| 0
| 0
| 0
| 0.47973
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
70dcf593607dccac3e7a5ac83ed6915a273e2059
| 3,021
|
py
|
Python
|
wizer/tools/colors.py
|
pa3kDaWae/workoutizer
|
15501d0060711bbd8308642bc89b45c1442d4d0f
|
[
"MIT"
] | null | null | null |
wizer/tools/colors.py
|
pa3kDaWae/workoutizer
|
15501d0060711bbd8308642bc89b45c1442d4d0f
|
[
"MIT"
] | null | null | null |
wizer/tools/colors.py
|
pa3kDaWae/workoutizer
|
15501d0060711bbd8308642bc89b45c1442d4d0f
|
[
"MIT"
] | null | null | null |
lines_colors = [
"Red",
"DodgerBlue",
"LimeGreen",
"Gold",
"MediumSlateBlue",
"Brown",
"Olive",
"Orange",
"DarkGoldenRod",
"Salmon",
"Fuchsia",
"Aqua",
"LightSlateGray",
"MediumBlue",
"GreenYellow",
"DarkRed",
"DarkMagenta",
"Khaki",
"MediumSpringGreen",
"OrangeRed",
"DarkGreen",
"LightPink",
"DarkSlateBlue",
"Yellow",
"Turquoise",
"SaddleBrown",
"Maroon",
"Ivory",
"SpringGreen",
"BlueViolet",
"Coral",
"Teal",
"Navy",
"LightGoldenRodYellow",
"DarkOliveGreen",
"Coral",
"Red",
"DodgerBlue",
"LimeGreen",
"Gold",
"MediumSlateBlue",
"Brown",
"Olive",
"Orange",
"DarkGoldenRod",
"Salmon",
"Fuchsia",
"Aqua",
"LightSlateGray",
"MediumBlue",
"GreenYellow",
"DarkRed",
"DarkMagenta",
"Khaki",
"MediumSpringGreen",
"OrangeRed",
"DarkGreen",
"LightPink",
"DarkSlateBlue",
"Yellow",
"Turquoise",
"SaddleBrown",
"Maroon",
"Ivory",
"SpringGreen",
"BlueViolet",
"Coral",
"Teal",
"Navy",
"LightGoldenRodYellow",
"DarkOliveGreen",
"Coral",
"Red",
"DodgerBlue",
"LimeGreen",
"Gold",
"MediumSlateBlue",
"Brown",
"Olive",
"Orange",
"DarkGoldenRod",
"Salmon",
"Fuchsia",
"Aqua",
"LightSlateGray",
"MediumBlue",
"GreenYellow",
"DarkRed",
"DarkMagenta",
"Khaki",
"MediumSpringGreen",
"OrangeRed",
"DarkGreen",
"LightPink",
"DarkSlateBlue",
"Yellow",
"Turquoise",
"SaddleBrown",
"Maroon",
"Ivory",
"SpringGreen",
"BlueViolet",
"Coral",
"Teal",
"Navy",
"LightGoldenRodYellow",
"DarkOliveGreen",
"Coral",
"Red",
"DodgerBlue",
"LimeGreen",
"Gold",
"MediumSlateBlue",
"Brown",
"Olive",
"Orange",
"DarkGoldenRod",
"Salmon",
"Fuchsia",
"Aqua",
"LightSlateGray",
"MediumBlue",
"GreenYellow",
"DarkRed",
"DarkMagenta",
"Khaki",
"MediumSpringGreen",
"OrangeRed",
"DarkGreen",
"LightPink",
"DarkSlateBlue",
"Yellow",
"Turquoise",
"SaddleBrown",
"Maroon",
"Ivory",
"SpringGreen",
"BlueViolet",
"Coral",
"Teal",
"Navy",
"LightGoldenRodYellow",
"DarkOliveGreen",
"Coral",
"Red",
"DodgerBlue",
"LimeGreen",
"Gold",
"MediumSlateBlue",
"Brown",
"Olive",
"Orange",
"DarkGoldenRod",
"Salmon",
"Fuchsia",
"Aqua",
"LightSlateGray",
"MediumBlue",
"GreenYellow",
"DarkRed",
"DarkMagenta",
"Khaki",
"MediumSpringGreen",
"OrangeRed",
"DarkGreen",
"LightPink",
"DarkSlateBlue",
"Yellow",
"Turquoise",
"SaddleBrown",
"Maroon",
"Ivory",
"SpringGreen",
"BlueViolet",
"Coral",
"Teal",
"Navy",
"LightGoldenRodYellow",
"DarkOliveGreen",
"Coral",
]
| 16.32973
| 27
| 0.520357
| 182
| 3,021
| 8.631868
| 0.203297
| 0.041375
| 0.070019
| 0.08275
| 0.992998
| 0.992998
| 0.992998
| 0.992998
| 0.992998
| 0.992998
| 0
| 0
| 0.299901
| 3,021
| 184
| 28
| 16.418478
| 0.74279
| 0
| 0
| 0.989011
| 0
| 0
| 0.516727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cb813e9db5f927fbf0d9d65663f7f01de865dd0d
| 112
|
py
|
Python
|
test/regression/features/operators/notin.py
|
bjpop/blip
|
3d9105a44d1afb7bd007da3742fb19dc69372e10
|
[
"BSD-3-Clause"
] | 137
|
2015-02-13T21:03:23.000Z
|
2021-11-24T03:53:55.000Z
|
test/regression/features/operators/notin.py
|
bjpop/blip
|
3d9105a44d1afb7bd007da3742fb19dc69372e10
|
[
"BSD-3-Clause"
] | 2
|
2015-03-07T14:08:33.000Z
|
2015-10-13T02:00:40.000Z
|
test/regression/features/operators/notin.py
|
bjpop/blip
|
3d9105a44d1afb7bd007da3742fb19dc69372e10
|
[
"BSD-3-Clause"
] | 4
|
2015-05-03T22:07:27.000Z
|
2018-09-10T08:55:03.000Z
|
print(2 not in [1,2,3])
print(2 not in [4,5])
print(1 not in [1] not in [[1]])
print(3 not in [1] not in [[1]])
| 22.4
| 32
| 0.5625
| 29
| 112
| 2.172414
| 0.275862
| 0.47619
| 0.47619
| 0.333333
| 0.380952
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0.144444
| 0.196429
| 112
| 4
| 33
| 28
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
cb9fc7e193833a6fbe7b0ac8689a4523d2c836ca
| 1,350
|
py
|
Python
|
tests/test_1941.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_1941.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_1941.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import pytest
"""
Test 1941. Check if All Characters Have Equal Number of Occurrences
"""
@pytest.fixture(scope="session")
def init_variables_1941():
from src.leetcode_1941_check_if_all_characters_have_equal_number_of_occurrences import (
Solution,
)
solution = Solution()
def _init_variables_1941():
return solution
yield _init_variables_1941
class TestClass1941:
def test_solution_0(self, init_variables_1941):
assert init_variables_1941().areOccurrencesEqual("abacbc")
def test_solution_1(self, init_variables_1941):
assert not init_variables_1941().areOccurrencesEqual("aaabb")
#!/usr/bin/env python
import pytest
"""
Test 1941. Check if All Characters Have Equal Number of Occurrences
"""
@pytest.fixture(scope="session")
def init_variables_1941():
from src.leetcode_1941_check_if_all_characters_have_equal_number_of_occurrences import (
Solution,
)
solution = Solution()
def _init_variables_1941():
return solution
yield _init_variables_1941
class TestClass1941:
def test_solution_0(self, init_variables_1941):
assert init_variables_1941().areOccurrencesEqual("abacbc")
def test_solution_1(self, init_variables_1941):
assert not init_variables_1941().areOccurrencesEqual("aaabb")
| 22.131148
| 92
| 0.742222
| 166
| 1,350
| 5.674699
| 0.23494
| 0.193206
| 0.252654
| 0.059448
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0.07554
| 0.176296
| 1,350
| 60
| 93
| 22.5
| 0.771583
| 0.02963
| 0
| 0.933333
| 0
| 0
| 0.031088
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.266667
| false
| 0
| 0.133333
| 0.066667
| 0.533333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
cbd15281278905f7ba1a745c3aeaf19d8152c5c4
| 880
|
py
|
Python
|
atividade2/TarefasEmOrdemFila.py
|
mateus2810/atividadesIa
|
0ffc816c962889fb9e0b9635692d616e46a0d0c5
|
[
"Apache-2.0"
] | null | null | null |
atividade2/TarefasEmOrdemFila.py
|
mateus2810/atividadesIa
|
0ffc816c962889fb9e0b9635692d616e46a0d0c5
|
[
"Apache-2.0"
] | null | null | null |
atividade2/TarefasEmOrdemFila.py
|
mateus2810/atividadesIa
|
0ffc816c962889fb9e0b9635692d616e46a0d0c5
|
[
"Apache-2.0"
] | null | null | null |
#Questao 9
fila = []
print("Fila: ", fila)
fila.append("Tarefa E")
print("Inserindo um elemento no final da fila: ", fila)
fila.append("Tarefa I")
print("Inserindo outro elemento no final da fila: ", fila)
fila.append("Tarefa C")
print("Inserindo outro elemento no final da fila: ", fila)
fila.append("Tarefa F")
print("Inserindo outro elemento no final da fila: ", fila)
fila.append("Tarefa B")
print("Inserindo outro elemento no final da fila: ", fila)
fila.append("Tarefa H")
print("Inserindo outro elemento no final da fila: ", fila)
fila.append("Tarefa J")
print("Inserindo outro elemento no final da fila: ", fila)
fila.append("Tarefa A")
print("Inserindo outro elemento no final da fila: ", fila)
fila.append("Tarefa D")
print("Inserindo outro elemento no final da fila: ", fila)
fila.append("Tarefa G")
print("Inserindo outro elemento no final da fila: ", fila)
| 25.882353
| 58
| 0.718182
| 136
| 880
| 4.647059
| 0.169118
| 0.265823
| 0.189873
| 0.28481
| 0.931962
| 0.893987
| 0.893987
| 0.893987
| 0.893987
| 0.759494
| 0
| 0.00133
| 0.145455
| 880
| 33
| 59
| 26.666667
| 0.839096
| 0.010227
| 0
| 0.409091
| 0
| 0
| 0.589655
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
cbe05834900beeba456d8a0e304e9986ce96a600
| 28
|
py
|
Python
|
cursoEmVideo/Python/Mundo 1/Aulas/teste.py
|
VictorDG00/Cursos
|
b1411f3179ef17f128c883b0f5a56c2478de45e8
|
[
"MIT"
] | 2
|
2021-02-08T13:34:15.000Z
|
2021-02-08T19:43:42.000Z
|
cursoEmVideo/Python/Mundo 1/Aulas/teste.py
|
VictorDG00/Cursos
|
b1411f3179ef17f128c883b0f5a56c2478de45e8
|
[
"MIT"
] | null | null | null |
cursoEmVideo/Python/Mundo 1/Aulas/teste.py
|
VictorDG00/Cursos
|
b1411f3179ef17f128c883b0f5a56c2478de45e8
|
[
"MIT"
] | null | null | null |
x = 3 * 5 + 4 ** 2
print(x)
| 9.333333
| 18
| 0.392857
| 7
| 28
| 1.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 0.357143
| 28
| 2
| 19
| 14
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
1df4b313c4cbbb0a5bdd17889019f3697d015d13
| 131
|
py
|
Python
|
simple_todo_list/views.py
|
1MahdiR/simple-todo-list
|
ef233da6daedd4971ce7ee8602f3fb7fdd1f7381
|
[
"MIT"
] | null | null | null |
simple_todo_list/views.py
|
1MahdiR/simple-todo-list
|
ef233da6daedd4971ce7ee8602f3fb7fdd1f7381
|
[
"MIT"
] | null | null | null |
simple_todo_list/views.py
|
1MahdiR/simple-todo-list
|
ef233da6daedd4971ce7ee8602f3fb7fdd1f7381
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
def main(req):
return render(req, 'main.html', {})
| 16.375
| 39
| 0.732824
| 18
| 131
| 5.333333
| 0.666667
| 0.208333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160305
| 131
| 7
| 40
| 18.714286
| 0.872727
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
1df8277baf8fc297c4a4fb3ea08434409bfd14a6
| 34
|
py
|
Python
|
trips/float.py
|
dineshkumar2509/learning-python
|
e8af11ff0b396da4c3f2cfe21d14131bae4b2adb
|
[
"MIT"
] | 86
|
2015-06-13T16:53:55.000Z
|
2022-03-24T20:56:42.000Z
|
trips/float.py
|
pei-zheng-yi/learning-python
|
55e350dfe44cf04f7d4408e76e72d2f467bd42ce
|
[
"MIT"
] | 9
|
2015-05-27T07:52:44.000Z
|
2022-03-29T21:52:40.000Z
|
trips/float.py
|
pei-zheng-yi/learning-python
|
55e350dfe44cf04f7d4408e76e72d2f467bd42ce
|
[
"MIT"
] | 124
|
2015-12-10T01:17:18.000Z
|
2021-11-08T04:03:38.000Z
|
print(0.1 + 0.2 == 0.3) // False
| 11.333333
| 32
| 0.470588
| 8
| 34
| 2
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 0.235294
| 34
| 2
| 33
| 17
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
3817811ce3c77ef3f68bfd15dc8807ca09f10e9a
| 175
|
py
|
Python
|
epson_projector/timeout.py
|
markbergsma/epson_projector
|
73dbb92a9f123d33afce49f698f3f359ce17bc6b
|
[
"MIT"
] | null | null | null |
epson_projector/timeout.py
|
markbergsma/epson_projector
|
73dbb92a9f123d33afce49f698f3f359ce17bc6b
|
[
"MIT"
] | null | null | null |
epson_projector/timeout.py
|
markbergsma/epson_projector
|
73dbb92a9f123d33afce49f698f3f359ce17bc6b
|
[
"MIT"
] | null | null | null |
from .const import TIMEOUT_TIMES, DEFAULT_TIMEOUT_TIME
def get_timeout(command, timeout_scale=1):
return TIMEOUT_TIMES.get(command, DEFAULT_TIMEOUT_TIME) * timeout_scale
| 35
| 75
| 0.828571
| 25
| 175
| 5.44
| 0.52
| 0.176471
| 0.264706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006369
| 0.102857
| 175
| 4
| 76
| 43.75
| 0.859873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
381cac2f13a63a973b62dc3a47e0cfa343433454
| 3,328
|
py
|
Python
|
cx_token_builders.py
|
jfitz/code-stat
|
dd2a13177f3ef03ab42123ef3cfcbbd062a2ae26
|
[
"MIT"
] | null | null | null |
cx_token_builders.py
|
jfitz/code-stat
|
dd2a13177f3ef03ab42123ef3cfcbbd062a2ae26
|
[
"MIT"
] | null | null | null |
cx_token_builders.py
|
jfitz/code-stat
|
dd2a13177f3ef03ab42123ef3cfcbbd062a2ae26
|
[
"MIT"
] | null | null | null |
from codestat_token import Token
from token_builders import TokenBuilder
# token reader for // comment
class SlashSlashCommentTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.text = ''
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'comment', False)]
def accept(self, candidate, c):
if c in ['\n', '\r']:
return False
if candidate.startswith('//'):
return True
if candidate == '/':
return c == '/'
if candidate == '':
return c == '/'
return False
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if self.text.startswith('//'):
return len(self.text)
return 0
# token reader for /// comment
class TripleSlashCommentTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.text = ''
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'comment', False)]
def accept(self, candidate, c):
if c in ['\n', '\r']:
return False
if candidate.startswith('///'):
return True
if candidate == '':
return c == '/'
if candidate == '/':
return c == '/'
if candidate == '//':
return c == '/'
return False
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if self.text.startswith('///'):
return len(self.text)
return 0
# token reader for /* */ comment
class SlashStarCommentTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.text = ''
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'comment', False)]
def accept(self, candidate, c):
if len(candidate) == 0:
return c == '/'
if len(candidate) == 1:
return c == '*'
return not candidate.endswith('*/')
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if self.text.startswith('/*') and self.text.endswith('*/'):
return len(self.text)
return 0
# token reader for <name> class identifier
class ClassTypeTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.text = ''
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'type', True)]
def accept(self, candidate, c):
if len(candidate) == 0:
return c == '<'
level = 0
for ch in candidate:
if ch == '<':
level += 1
if ch == '>' and level > 0:
level -= 1
if level > 0:
return c.isalpha() or c.isdigit() or c in "</\\ ,_.:*>'"
return False
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
level = 0
for ch in self.text:
if ch == '<':
level += 1
if ch == '>':
level -= 1
if level != 0:
return 0
if self.text[0] == '<' and self.text[-1] == '>':
return len(self.text)
return 0
| 17.515789
| 63
| 0.582031
| 413
| 3,328
| 4.508475
| 0.128329
| 0.116004
| 0.064447
| 0.051557
| 0.821697
| 0.787863
| 0.738453
| 0.738453
| 0.738453
| 0.718045
| 0
| 0.009636
| 0.282752
| 3,328
| 189
| 64
| 17.608466
| 0.770423
| 0.038462
| 0
| 0.788136
| 0
| 0
| 0.035994
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.169492
| false
| 0
| 0.016949
| 0
| 0.576271
| 0.033898
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
38455e4f19d72e046db83faec26850f2ea125f26
| 3,688
|
py
|
Python
|
tests/test_library.py
|
hiousi/mopidy-radionet
|
7aa59f7fc954f0117bc6ae54f4a5e6c1b8c0da5d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_library.py
|
hiousi/mopidy-radionet
|
7aa59f7fc954f0117bc6ae54f4a5e6c1b8c0da5d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_library.py
|
hiousi/mopidy-radionet
|
7aa59f7fc954f0117bc6ae54f4a5e6c1b8c0da5d
|
[
"Apache-2.0"
] | null | null | null |
from unittest import mock
def test_browse_root(library):
results = library.browse('radionet:root');
assert 8 == len(results)
def test_browse_localstations(library):
results = library.browse('radionet:localstations');
assert len(results) > 0
page_uri = results[0].uri if results is not None else None
assert page_uri is not None
results = library.browse(page_uri)
assert len(results) > 0
def test_browse_topstations(library):
results = library.browse('radionet:topstations');
assert len(results) > 0
def test_browse_genres(library):
results = library.browse('radionet:genres');
assert len(results) > 0
cat_uri = results[0].uri if results is not None else None
assert cat_uri is not None
results = library.browse(cat_uri)
assert len(results) == 2
sort_uri = results[0].uri if results is not None else None
assert sort_uri is not None
results = library.browse(sort_uri)
assert len(results) > 0
page_uri = results[0].uri if results is not None else None
assert page_uri is not None
results = library.browse(page_uri)
assert len(results) > 0
def test_browse_topics(library):
results = library.browse('radionet:topics');
assert len(results) > 0
cat_uri = results[0].uri if results is not None else None
assert cat_uri is not None
results = library.browse(cat_uri)
assert len(results) == 2
sort_uri = results[0].uri if results is not None else None
assert sort_uri is not None
results = library.browse(sort_uri)
assert len(results) > 0
page_uri = results[0].uri if results is not None else None
assert page_uri is not None
results = library.browse(page_uri)
assert len(results) > 0
def test_browse_languages(library):
results = library.browse('radionet:languages');
assert len(results) > 0
cat_uri = results[0].uri if results is not None else None
assert cat_uri is not None
results = library.browse(cat_uri)
assert len(results) == 2
sort_uri = results[0].uri if results is not None else None
assert sort_uri is not None
results = library.browse(sort_uri)
assert len(results) > 0
page_uri = results[0].uri if results is not None else None
assert page_uri is not None
results = library.browse(page_uri)
assert len(results) > 0
def test_browse_cities(library):
results = library.browse('radionet:cities');
assert len(results) > 0
cat_uri = results[0].uri if results is not None else None
assert cat_uri is not None
results = library.browse(cat_uri)
assert len(results) == 2
sort_uri = results[0].uri if results is not None else None
assert sort_uri is not None
results = library.browse(sort_uri)
assert len(results) > 0
page_uri = results[0].uri if results is not None else None
assert page_uri is not None
results = library.browse(page_uri)
assert len(results) > 0
def test_browse_countries(library):
results = library.browse('radionet:countries');
assert len(results) > 0
cat_uri = results[0].uri if results is not None else None
assert cat_uri is not None
results = library.browse(cat_uri)
assert len(results) == 2
sort_uri = results[0].uri if results is not None else None
assert sort_uri is not None
results = library.browse(sort_uri)
assert len(results) > 0
page_uri = results[0].uri if results is not None else None
assert page_uri is not None
results = library.browse(page_uri)
assert len(results) > 0
def test_browse_favorites(library):
results = library.browse('radionet:favorites');
assert 1 == len(results)
| 25.79021
| 62
| 0.695228
| 562
| 3,688
| 4.44484
| 0.060498
| 0.108887
| 0.115292
| 0.122498
| 0.908727
| 0.782626
| 0.782626
| 0.770616
| 0.770616
| 0.770616
| 0
| 0.014271
| 0.220987
| 3,688
| 142
| 63
| 25.971831
| 0.855204
| 0
| 0
| 0.771739
| 0
| 0
| 0.041757
| 0.005965
| 0
| 0
| 0
| 0
| 0.445652
| 1
| 0.097826
| false
| 0
| 0.01087
| 0
| 0.108696
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
697f0ced5400feccd4266b758ac130bf69436319
| 221
|
py
|
Python
|
Examples/5.1.2 Message Publisher.py
|
wangyonghong/RabbitMQ-in-Depth
|
56a35c6359d500b7597daf1bb2185b4c451a572c
|
[
"BSD-3-Clause"
] | 111
|
2015-01-06T20:26:31.000Z
|
2022-03-14T13:17:12.000Z
|
Examples/5.1.2 Message Publisher.py
|
wangyonghong/RabbitMQ-in-Depth
|
56a35c6359d500b7597daf1bb2185b4c451a572c
|
[
"BSD-3-Clause"
] | 4
|
2018-06-15T20:35:36.000Z
|
2021-01-13T16:03:40.000Z
|
Examples/5.1.2 Message Publisher.py
|
wangyonghong/RabbitMQ-in-Depth
|
56a35c6359d500b7597daf1bb2185b4c451a572c
|
[
"BSD-3-Clause"
] | 43
|
2015-04-18T13:44:01.000Z
|
2022-03-14T13:17:13.000Z
|
import rabbitpy
for iteration in range(10):
rabbitpy.publish('amqp://guest:guest@localhost:5672/%2f', '', 'test-messages', 'go')
rabbitpy.publish('amqp://guest:guest@localhost:5672/%2f', '', 'test-messages', 'stop')
| 36.833333
| 88
| 0.692308
| 29
| 221
| 5.275862
| 0.586207
| 0.196078
| 0.248366
| 0.313725
| 0.732026
| 0.732026
| 0.732026
| 0.732026
| 0.732026
| 0.732026
| 0
| 0.059406
| 0.085973
| 221
| 5
| 89
| 44.2
| 0.69802
| 0
| 0
| 0
| 0
| 0
| 0.479638
| 0.334842
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
6992764fc4b851cf753983bafdd42e0564afce23
| 52
|
py
|
Python
|
appstoreconnect/__init__.py
|
Tuni/appstoreconnectapi
|
f0fbaf75d57aabfdd6f0f45b8b1119eebdaf6e6e
|
[
"MIT"
] | 1
|
2019-10-02T13:13:08.000Z
|
2019-10-02T13:13:08.000Z
|
appstoreconnect/__init__.py
|
Tuni/appstoreconnectapi
|
f0fbaf75d57aabfdd6f0f45b8b1119eebdaf6e6e
|
[
"MIT"
] | null | null | null |
appstoreconnect/__init__.py
|
Tuni/appstoreconnectapi
|
f0fbaf75d57aabfdd6f0f45b8b1119eebdaf6e6e
|
[
"MIT"
] | null | null | null |
from .api import Api
from .api import AppStoreState
| 17.333333
| 30
| 0.807692
| 8
| 52
| 5.25
| 0.5
| 0.333333
| 0.619048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 52
| 2
| 31
| 26
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
69ae1ac59157145d4772b0ee39fbc6cacb579a7a
| 92
|
py
|
Python
|
pyBN/inference/map_exact/__init__.py
|
seuzmj/pyBN
|
ce7b6823f4e6c4f6f9b77e89f05de87ed486b349
|
[
"MIT"
] | 126
|
2016-01-17T22:59:08.000Z
|
2021-12-19T15:35:22.000Z
|
pyBN/inference/map_exact/__init__.py
|
levilentz/pyBN
|
ce7b6823f4e6c4f6f9b77e89f05de87ed486b349
|
[
"MIT"
] | 24
|
2016-01-21T20:11:03.000Z
|
2018-09-21T01:23:58.000Z
|
pyBN/inference/map_exact/__init__.py
|
levilentz/pyBN
|
ce7b6823f4e6c4f6f9b77e89f05de87ed486b349
|
[
"MIT"
] | 55
|
2016-05-27T00:46:54.000Z
|
2022-03-24T11:43:57.000Z
|
from pyBN.inference.map_exact.ilp_map import *
from pyBN.inference.map_exact.ve_map import *
| 46
| 46
| 0.836957
| 16
| 92
| 4.5625
| 0.5
| 0.219178
| 0.465753
| 0.547945
| 0.684932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076087
| 92
| 2
| 47
| 46
| 0.858824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
69bad0f44e8574d974e60a05ca19f3f5cfca6c38
| 470
|
py
|
Python
|
src/files2.py
|
mpicbg-csbd/structured_N2V
|
82c2a0f98d354a1afeff2deab3b04fb7cfc4b21f
|
[
"BSD-3-Clause"
] | 13
|
2020-11-03T12:38:20.000Z
|
2022-03-20T01:32:02.000Z
|
src/files2.py
|
mpicbg-csbd/structured_N2V
|
82c2a0f98d354a1afeff2deab3b04fb7cfc4b21f
|
[
"BSD-3-Clause"
] | 1
|
2021-11-05T08:11:17.000Z
|
2022-01-21T22:17:59.000Z
|
src/files2.py
|
mpicbg-csbd/structured_N2V
|
82c2a0f98d354a1afeff2deab3b04fb7cfc4b21f
|
[
"BSD-3-Clause"
] | 3
|
2021-01-13T04:51:31.000Z
|
2021-10-06T08:59:33.000Z
|
wildcards = dict()
## experiments x params x
wildcards['/lustre/projects/project-broaddus/denoise_experiments/flower/e01/n2v2/'] = "mask_{n}_{m}/table.csv"
wildcards['/lustre/projects/project-broaddus/denoise_experiments/flower/e01/n2gt/'] = "d{a}/table.csv"
wildcards['/lustre/projects/project-broaddus/denoise_experiments/flower/e01/nlm/'] = "d{a}/table.csv"
wildcards['/lustre/projects/project-broaddus/denoise_experiments/flower/e01/bm4d/'] = "d{a}/table.csv"
| 39.166667
| 110
| 0.759574
| 63
| 470
| 5.571429
| 0.365079
| 0.17094
| 0.262108
| 0.34188
| 0.820513
| 0.820513
| 0.820513
| 0.820513
| 0.820513
| 0.635328
| 0
| 0.027027
| 0.055319
| 470
| 11
| 111
| 42.727273
| 0.763514
| 0.046809
| 0
| 0
| 0
| 0
| 0.776018
| 0.680995
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
69e871e31621ab5577e578c068f3d77b10d4e44a
| 42,890
|
py
|
Python
|
Saliency-detection-in-360-video_TORCH/data.py
|
ustundag/2D-3D-Semantics
|
6f79be0082e2bfd6b7940c2314972a603e55f201
|
[
"Apache-2.0"
] | 72
|
2018-09-09T02:11:58.000Z
|
2022-02-24T09:51:09.000Z
|
Saliency-detection-in-360-video_TORCH/data.py
|
ustundag/2D-3D-Semantics
|
6f79be0082e2bfd6b7940c2314972a603e55f201
|
[
"Apache-2.0"
] | 8
|
2018-09-13T16:48:34.000Z
|
2021-12-21T18:13:16.000Z
|
Saliency-detection-in-360-video_TORCH/data.py
|
ustundag/2D-3D-Semantics
|
6f79be0082e2bfd6b7940c2314972a603e55f201
|
[
"Apache-2.0"
] | 18
|
2018-11-29T07:11:59.000Z
|
2020-06-16T09:06:23.000Z
|
import numpy as np
import torch as th
import torch.utils.data as data
from PIL import Image
import os
import pickle
from scipy import signal
from sconv.functional.sconv import spherical_conv
from tqdm import tqdm
import numbers
import cv2
from functools import lru_cache
from random import Random
class VRSaliency(data.Dataset):
def __init__(self, root, frame_h, frame_w, frame_interval=1, video_chosen=None, video_exclude=None, transform=None,
gaussian_sigma=np.pi / 20, kernel_rad=np.pi/7, kernel_size=(30, 60), cache_gt=True, rnd_seed=367643):
self.frame_interval = frame_interval
self.transform = transform
self.frame_h = frame_h
self.frame_w = frame_w
self.gaussian_sigma = gaussian_sigma
self.kernel_size = kernel_size
self.kernel_rad = kernel_rad
self.cache_gt = cache_gt
rnd = Random(rnd_seed)
# load target
self.vinfo = pickle.load(open(os.path.join(root, 'vinfo.pkl'), 'rb'))
# load image paths
vset = set()
for vid in tqdm(os.listdir(root), desc='scanning dir'):
if os.path.isdir(os.path.join(root, vid)):
vset.add(vid)
assert set(self.vinfo.keys()) == vset
print('{} videos found.'.format(len(vset)))
if isinstance(video_chosen, set):
vset = vset.intersection(video_chosen)
elif isinstance(video_chosen, numbers.Integral):
vset = set(rnd.sample(vset, k=video_chosen))
if video_exclude:
vset = vset - set(video_exclude)
print('{} videos chosen.'.format(len(vset)))
self.data = []
self.target = []
for vid in tqdm(vset, desc='video'):
obj_path = os.path.join(root, vid)
fcnt = 0
for frame in tqdm(os.listdir(obj_path), desc='frame({})'.format(vid)):
if frame.endswith('.jpg'):
fid = frame[:-4]
if fid not in self.vinfo[vid].keys():
print('warn: video {}, frame {} have no gt, abandoned.')
continue
fcnt += 1
if fcnt >= frame_interval:
self.data.append(os.path.join(obj_path, frame))
self.target.append(self.vinfo[vid][fid])
fcnt = 0
def __getitem__(self, item):
img = Image.open(open(self.data[item], 'rb'))
# img = img.resize((self.frame_w, self.frame_h))
if self.transform:
img = self.transform(img)
else:
img = np.array(img)
target = self._get_salency_map(item)
return img, target
def __len__(self):
return len(self.data)
def _get_salency_map(self, item, use_cuda=False):
cfile = self.data[item][:-4] + '_gt.npy'
if self.cache_gt and os.path.isfile(cfile):
target_map = th.from_numpy(np.load(cfile)).float()
assert target_map.size() == (1, self.frame_h, self.frame_w)
return th.from_numpy(np.load(cfile)).float()
target = np.zeros((self.frame_h, self.frame_w))
for x_norm, y_norm in self.target[item]:
x, y = min(int(x_norm * self.frame_w + 0.5), self.frame_w - 1), min(int(y_norm * self.frame_h + 0.5), self.frame_h - 1)
target[y, x] = 10
kernel = self._gen_gaussian_kernel()
# print(kernel.max())
if use_cuda:
target_map = spherical_conv(
th.from_numpy(
target.reshape(1, 1, *target.shape)
).cuda(),
th.from_numpy(kernel.reshape(1, 1, *kernel.shape)).cuda(),
kernel_rad=self.kernel_rad,
padding_mode=0
).view(1, self.frame_h, self.frame_w)
else:
target_map = spherical_conv(
th.from_numpy(
target.reshape(1, 1, *target.shape)
),
th.from_numpy(kernel.reshape(1, 1, *kernel.shape)),
kernel_rad=self.kernel_rad,
padding_mode=0
).view(1, self.frame_h, self.frame_w)
if self.cache_gt:
np.save(cfile, target_map.data.cpu().numpy() / len(self.target[item]))
return target_map.data.float() / len(self.target[item])
def _gen_gaussian_kernel(self):
sigma = self.gaussian_sigma
kernel = th.zeros(self.kernel_size)
delta_theta = self.kernel_rad / (self.kernel_size[0] - 1)
sigma_idx = sigma / delta_theta
gauss1d = signal.gaussian(2 * kernel.shape[0], sigma_idx)
gauss2d = np.outer(gauss1d, np.ones(kernel.shape[1]))
return gauss2d[-kernel.shape[0]:, :]
def clear_cache(self):
from tqdm import trange
for item in trange(len(self), desc='cleaning'):
cfile = self.data[item][:-4] + '_gt.npy'
if os.path.isfile(cfile):
print('remove {}'.format(cfile))
os.remove(cfile)
return self
def cache_map(self):
from tqdm import trange
cache_gt = self.cache_gt
self.cache_gt = True
for item in trange(len(self), desc='caching'):
# pool.apply_async(self._get_salency_map, (item, True))
self._get_salency_map(item, use_cuda=True)
self.cache_gt = cache_gt
return self
class VRVideo(data.Dataset):
def __init__(self, root, frame_h, frame_w, video_train, frame_interval=1, transform=None, train=True,
gaussian_sigma=np.pi / 20, kernel_rad=np.pi/7, kernel_size=(30, 60), cache_gt=True, rnd_seed=367643):
self.frame_interval = frame_interval
self.transform = transform
self.frame_h = frame_h
self.frame_w = frame_w
self.gaussian_sigma = gaussian_sigma
self.kernel_size = kernel_size
self.kernel_rad = kernel_rad
self.cache_gt = cache_gt
self.train = train
rnd = Random(rnd_seed)
# load target
self.vinfo = pickle.load(open(os.path.join(root, 'vinfo.pkl'), 'rb'))
# load image paths
vset = list()
for vid in tqdm(os.listdir(root), desc='scanning dir'):
if os.path.isdir(os.path.join(root, vid)):
vset.append(vid)
vset.sort()
assert set(self.vinfo.keys()) == set(vset)
print('{} videos found.'.format(len(vset)))
if isinstance(video_train, numbers.Integral):
vset_train = set(rnd.sample(vset, k=video_train))
vset_val = set(vset) - vset_train
else:
raise NotImplementedError()
print('{}:{} videos chosen for training:testing.'.format(len(vset_train), len(vset_val)))
# print('test videos: {}'.format(vset_val))
vset = vset_train if train else vset_val
self.data = []
self.target = []
self.i2v = {}
self.v2i = {}
for vid in vset:
obj_path = os.path.join(root, vid)
# fcnt = 0
frame_list = [frame for frame in os.listdir(obj_path) if frame.endswith('.jpg')]
frame_list.sort()
for frame in frame_list:
fid = frame[:-4]
# fcnt += 1
# if fcnt >= frame_interval:
self.i2v[len(self.data)] = (vid, fid)
self.v2i[(vid, fid)] = len(self.data)
self.data.append(os.path.join(obj_path, frame))
self.target.append(self.vinfo[vid][fid])
# fcnt = 0
self.target.append([(0.5, 0.5)])
def __getitem__(self, item):
img = Image.open(open(self.data[item], 'rb'))
# img = img.resize((self.frame_w, self.frame_h))
if self.transform:
img = self.transform(img)
else:
img = np.array(img)
vid, fid = self.i2v[item]
if int(fid) - self.frame_interval <= 0:
last = self._get_salency_map(-1)
else:
last = self._get_salency_map(self.v2i[(vid, '%04d' % (int(fid) - self.frame_interval))])
target = self._get_salency_map(item)
if self.train:
return img, last, target
else:
return img, self.data[item], last, target
def __len__(self):
return len(self.data)
def _get_salency_map(self, item, use_cuda=False):
cfile = self.data[item][:-4] + '_gt.npy'
if item >= 0:
if self.cache_gt and os.path.isfile(cfile):
target_map = th.from_numpy(np.load(cfile)).float()
assert target_map.size() == (1, self.frame_h, self.frame_w)
return th.from_numpy(np.load(cfile)).float()
target = np.zeros((self.frame_h, self.frame_w))
for x_norm, y_norm in self.target[item]:
x, y = min(int(x_norm * self.frame_w + 0.5), self.frame_w - 1), min(int(y_norm * self.frame_h + 0.5), self.frame_h - 1)
target[y, x] = 10
kernel = self._gen_gaussian_kernel()
# print(kernel.max())
if use_cuda:
target_map = spherical_conv(
th.from_numpy(
target.reshape(1, 1, *target.shape)
).cuda(),
th.from_numpy(kernel.reshape(1, 1, *kernel.shape)).cuda(),
kernel_rad=self.kernel_rad,
padding_mode=0
).view(1, self.frame_h, self.frame_w)
else:
target_map = spherical_conv(
th.from_numpy(
target.reshape(1, 1, *target.shape)
),
th.from_numpy(kernel.reshape(1, 1, *kernel.shape)),
kernel_rad=self.kernel_rad,
padding_mode=0
).view(1, self.frame_h, self.frame_w)
if item >= 0 and self.cache_gt:
np.save(cfile, target_map.data.cpu().numpy() / len(self.target[item]))
return target_map.data.float() / len(self.target[item])
def _gen_gaussian_kernel(self):
sigma = self.gaussian_sigma
kernel = th.zeros(self.kernel_size)
delta_theta = self.kernel_rad / (self.kernel_size[0] - 1)
sigma_idx = sigma / delta_theta
gauss1d = signal.gaussian(2 * kernel.shape[0], sigma_idx)
gauss2d = np.outer(gauss1d, np.ones(kernel.shape[1]))
return gauss2d[-kernel.shape[0]:, :]
def clear_cache(self):
from tqdm import trange
for item in trange(len(self), desc='cleaning'):
cfile = self.data[item][:-4] + '_gt.npy'
if os.path.isfile(cfile):
print('remove {}'.format(cfile))
os.remove(cfile)
return self
def cache_map(self):
from tqdm import trange
cache_gt = self.cache_gt
self.cache_gt = True
for item in trange(len(self), desc='caching'):
# pool.apply_async(self._get_salency_map, (item, True))
self._get_salency_map(item, use_cuda=True)
self.cache_gt = cache_gt
return self
class VRVideoS2CNN(data.Dataset):
def __init__(self, root, frame_h, frame_w, video_train, frame_interval=1, transform=None, train=True,
gaussian_sigma=np.pi / 20, kernel_rad=np.pi/7, kernel_size=(30, 60), cache_gt=True, rnd_seed=367643):
self.frame_interval = frame_interval
self.transform = transform
self.frame_h = frame_h
self.frame_w = frame_w
self.gaussian_sigma = gaussian_sigma
self.kernel_size = kernel_size
self.kernel_rad = kernel_rad
self.cache_gt = cache_gt
self.train = train
rnd = Random(rnd_seed)
# load target
self.vinfo = pickle.load(open(os.path.join(root, 'vinfo.pkl'), 'rb'))
# load image paths
vset = list()
for vid in tqdm(os.listdir(root), desc='scanning dir'):
if os.path.isdir(os.path.join(root, vid)):
vset.append(vid)
vset.sort()
assert set(self.vinfo.keys()) == set(vset)
print('{} videos found.'.format(len(vset)))
if isinstance(video_train, numbers.Integral):
vset_train = set(rnd.sample(vset, k=video_train))
vset_val = set(vset) - vset_train
else:
raise NotImplementedError()
print('{}:{} videos chosen for training:testing.'.format(len(vset_train), len(vset_val)))
# print('test videos: {}'.format(vset_val))
vset = vset_train if train else vset_val
self.data = []
self.target = []
self.i2v = {}
self.v2i = {}
for vid in vset:
obj_path = os.path.join(root, vid)
# fcnt = 0
frame_list = [frame for frame in os.listdir(obj_path) if frame.endswith('.jpg')]
frame_list.sort()
for frame in frame_list:
fid = frame[:-4]
# fcnt += 1
# if fcnt >= frame_interval:
self.i2v[len(self.data)] = (vid, fid)
self.v2i[(vid, fid)] = len(self.data)
self.data.append(os.path.join(obj_path, frame))
self.target.append(self.vinfo[vid][fid])
# fcnt = 0
self.target.append([(0.5, 0.5)])
def __getitem__(self, item):
img = Image.open(open(self.data[item], 'rb'))
img = img.resize((self.frame_w, self.frame_h))
if self.transform:
img = self.transform(img)
else:
img = np.array(img)
vid, fid = self.i2v[item]
if int(fid) - self.frame_interval <= 0:
last = self._get_salency_map(-1)
else:
last = self._get_salency_map(self.v2i[(vid, '%04d' % (int(fid) - self.frame_interval))])
target = self._get_salency_map(item)
if self.train:
return img, last, target
else:
return img, self.data[item], last, target
def __len__(self):
return len(self.data)
def _get_salency_map(self, item, use_cuda=False):
cfile = self.data[item][:-4] + '_gt.npy'
if item >= 0:
pass
# if self.cache_gt and os.path.isfile(cfile):
# target_map = np.load(cfile)
# if not target_map.size() == (1, self.frame_h, self.frame_w):
# target_map = cv2.resize(target_map[0, :, :], (self.frame_w, self.frame_h)).reshape(1, self.frame_h, self.frame_w)
# return th.from_numpy(target_map).float()
target = np.zeros((self.frame_h, self.frame_w))
for x_norm, y_norm in self.target[item]:
x, y = min(int(x_norm * self.frame_w + 0.5), self.frame_w - 1), min(int(y_norm * self.frame_h + 0.5), self.frame_h - 1)
target[y, x] = 10
kernel = self._gen_gaussian_kernel()
# print(kernel.max())
if use_cuda:
target_map = spherical_conv(
th.from_numpy(
target.reshape(1, 1, *target.shape)
).cuda(),
th.from_numpy(kernel.reshape(1, 1, *kernel.shape)).cuda(),
kernel_rad=self.kernel_rad,
padding_mode=0
).view(1, self.frame_h, self.frame_w)
else:
target_map = spherical_conv(
th.from_numpy(
target.reshape(1, 1, *target.shape)
),
th.from_numpy(kernel.reshape(1, 1, *kernel.shape)),
kernel_rad=self.kernel_rad,
padding_mode=0
).view(1, self.frame_h, self.frame_w)
if item >= 0 and self.cache_gt:
np.save(cfile, target_map.data.cpu().numpy() / len(self.target[item]))
return target_map.data.float() / len(self.target[item])
def _gen_gaussian_kernel(self):
sigma = self.gaussian_sigma
kernel = th.zeros(self.kernel_size)
delta_theta = self.kernel_rad / (self.kernel_size[0] - 1)
sigma_idx = sigma / delta_theta
gauss1d = signal.gaussian(2 * kernel.shape[0], sigma_idx)
gauss2d = np.outer(gauss1d, np.ones(kernel.shape[1]))
return gauss2d[-kernel.shape[0]:, :]
def clear_cache(self):
from tqdm import trange
for item in trange(len(self), desc='cleaning'):
cfile = self.data[item][:-4] + '_gt.npy'
if os.path.isfile(cfile):
print('remove {}'.format(cfile))
os.remove(cfile)
return self
def cache_map(self):
from tqdm import trange
cache_gt = self.cache_gt
self.cache_gt = True
for item in trange(len(self), desc='caching'):
# pool.apply_async(self._get_salency_map, (item, True))
self._get_salency_map(item, use_cuda=True)
self.cache_gt = cache_gt
return self
class ICMEDataset(data.Dataset):
def __init__(self, root, train=True, transform=None):
data_dir = os.path.join(root, 'train' if train else 'eval')
self.transform = transform
self.train = train
self.img = []
self.target = []
for file in tqdm(os.listdir(data_dir), desc='scanning dir'):
if file.endswith('.bin'):
self.target.append(os.path.join(data_dir, file))
self.img.append(os.path.join(data_dir, 'P' + file[3:-4] + '.jpg'))
def __getitem__(self, item):
img = Image.open(open(self.img[item], 'rb'))
# print(self.img[item], flush=True)
img_shape = np.array(img).shape[:2]
target = np.fromfile(self.target[item], dtype=np.float32).reshape(*img_shape)
target = cv2.resize(target, (256, 128)).reshape(1, 128, 256)
if self.transform:
img = self.transform(img)
if self.train:
return img, th.from_numpy(target).float()
else:
_, filename = os.path.split(self.target[item])
return img, filename[:-4], th.from_numpy(target).float()
def __len__(self):
return len(self.img)
class VRVideoImproved(data.Dataset):
def __init__(self, root, frame_h, frame_w, video_train, frame_interval=1, transform=None, train=True,
gaussian_sigma=np.pi / 20, kernel_rad=np.pi/7, kernel_size=(30, 60), cache_gt=True, rnd_seed=367643, tmp_root='./'):
self.frame_interval = frame_interval
self.transform = transform
self.frame_h = frame_h
self.frame_w = frame_w
self.gaussian_sigma = gaussian_sigma
self.kernel_size = kernel_size
self.kernel_rad = kernel_rad
self.cache_gt = cache_gt
self.train = train
self.tmp_root = tmp_root
rnd = Random(rnd_seed)
# load target
self.vinfo = pickle.load(open(os.path.join(root, 'vinfo.pkl'), 'rb'))
# load image paths
vset = list()
for vid in tqdm(os.listdir(root), desc='scanning dir'):
if os.path.isdir(os.path.join(root, vid)):
vset.append(vid)
vset.sort()
assert set(self.vinfo.keys()) == set(vset)
print('{} videos found.'.format(len(vset)))
if isinstance(video_train, numbers.Integral):
vset_train = set(rnd.sample(vset, k=video_train))
vset_val = set(vset) - vset_train
else:
raise NotImplementedError()
print('{}:{} videos chosen for training:testing.'.format(len(vset_train), len(vset_val)))
# print('test videos: {}'.format(vset_val))
vset = vset_train if train else vset_val
self.data = []
self.target = []
self.i2v = {}
self.v2i = {}
for vid in vset:
obj_path = os.path.join(root, vid)
# fcnt = 0
frame_list = [frame for frame in os.listdir(obj_path) if frame.endswith('.jpg')]
frame_list.sort()
for frame in frame_list:
fid = frame[:-4]
# fcnt += 1
# if fcnt >= frame_interval:
self.i2v[len(self.data)] = (vid, fid)
self.v2i[(vid, fid)] = len(self.data)
self.data.append(os.path.join(obj_path, frame))
self.target.append(self.vinfo[vid][fid])
# fcnt = 0
self.target.append([(0.5, 0.5)])
def __getitem__(self, item):
img = Image.open(open(self.data[item], 'rb'))
# img = img.resize((self.frame_w, self.frame_h))
if self.transform:
img = self.transform(img)
else:
img = np.array(img)
vid, fid = self.i2v[item]
if int(fid) - self.frame_interval <= 0:
last = self._get_salency_map(-1)
last_pred = last
else:
last = self._get_salency_map(self.v2i[(vid, '%04d' % (int(fid) - self.frame_interval))])
if os.path.isfile(os.path.join(self.tmp_root, vid, ('%04d' % (int(fid) - self.frame_interval)) + '.bin')):
# print('use last pred map.')
last_pred = np.fromfile(
os.path.join(self.tmp_root, vid, ('%04d' % (int(fid) - self.frame_interval)) + '.bin'),
dtype=np.float32).reshape(128, 256)
last_pred = th.from_numpy(cv2.resize(last_pred, (256, 128)).reshape(1, 128, 256)).float()
else:
last_pred = last
target = self._get_salency_map(item)
return img, last, last_pred, target, vid, fid
def __len__(self):
return len(self.data)
def _get_salency_map(self, item, use_cuda=False):
cfile = self.data[item][:-4] + '_gt.npy'
if item >= 0:
if self.cache_gt and os.path.isfile(cfile):
target_map = th.from_numpy(np.load(cfile)).float()
assert target_map.size() == (1, self.frame_h, self.frame_w)
return th.from_numpy(np.load(cfile)).float()
target = np.zeros((self.frame_h, self.frame_w))
for x_norm, y_norm in self.target[item]:
x, y = min(int(x_norm * self.frame_w + 0.5), self.frame_w - 1), min(int(y_norm * self.frame_h + 0.5), self.frame_h - 1)
target[y, x] = 10
kernel = self._gen_gaussian_kernel()
# print(kernel.max())
if use_cuda:
target_map = spherical_conv(
th.from_numpy(
target.reshape(1, 1, *target.shape)
).cuda(),
th.from_numpy(kernel.reshape(1, 1, *kernel.shape)).cuda(),
kernel_rad=self.kernel_rad,
padding_mode=0
).view(1, self.frame_h, self.frame_w)
else:
target_map = spherical_conv(
th.from_numpy(
target.reshape(1, 1, *target.shape)
),
th.from_numpy(kernel.reshape(1, 1, *kernel.shape)),
kernel_rad=self.kernel_rad,
padding_mode=0
).view(1, self.frame_h, self.frame_w)
if item >= 0 and self.cache_gt:
np.save(cfile, target_map.data.cpu().numpy() / len(self.target[item]))
return target_map.data.float() / len(self.target[item])
def _gen_gaussian_kernel(self):
sigma = self.gaussian_sigma
kernel = th.zeros(self.kernel_size)
delta_theta = self.kernel_rad / (self.kernel_size[0] - 1)
sigma_idx = sigma / delta_theta
gauss1d = signal.gaussian(2 * kernel.shape[0], sigma_idx)
gauss2d = np.outer(gauss1d, np.ones(kernel.shape[1]))
return gauss2d[-kernel.shape[0]:, :]
def clear_cache(self):
from tqdm import trange
for item in trange(len(self), desc='cleaning'):
cfile = self.data[item][:-4] + '_gt.npy'
if os.path.isfile(cfile):
print('remove {}'.format(cfile))
os.remove(cfile)
return self
def cache_map(self):
from tqdm import trange
cache_gt = self.cache_gt
self.cache_gt = True
for item in trange(len(self), desc='caching'):
# pool.apply_async(self._get_salency_map, (item, True))
self._get_salency_map(item, use_cuda=True)
self.cache_gt = cache_gt
return self
class VRVideoImprovedJoint(data.Dataset):
def __init__(self, root, frame_h, frame_w, video_train, frame_interval=1, transform=None, train=True,
gaussian_sigma=np.pi / 20, kernel_rad=np.pi/7, kernel_size=(30, 60), cache_gt=True, rnd_seed=367643, tmp_root='./'):
self.frame_interval = frame_interval
self.transform = transform
self.frame_h = frame_h
self.frame_w = frame_w
self.gaussian_sigma = gaussian_sigma
self.kernel_size = kernel_size
self.kernel_rad = kernel_rad
self.cache_gt = cache_gt
self.train = train
self.tmp_root = tmp_root
rnd = Random(rnd_seed)
# load target
self.vinfo = pickle.load(open(os.path.join(root, 'vinfo.pkl'), 'rb'))
# load image paths
vset = list()
for vid in tqdm(os.listdir(root), desc='scanning dir'):
if os.path.isdir(os.path.join(root, vid)):
vset.append(vid)
vset.sort()
assert set(self.vinfo.keys()) == set(vset)
print('{} videos found.'.format(len(vset)))
if isinstance(video_train, numbers.Integral):
vset_train = set(rnd.sample(vset, k=video_train))
vset_val = set(vset) - vset_train
else:
raise NotImplementedError()
print('{}:{} videos chosen for training:testing.'.format(len(vset_train), len(vset_val)))
# print('test videos: {}'.format(vset_val))
vset = vset_train if train else vset_val
self.data = []
self.target = []
self.i2v = {}
self.v2i = {}
for vid in vset:
obj_path = os.path.join(root, vid)
# fcnt = 0
frame_list = [frame for frame in os.listdir(obj_path) if frame.endswith('.jpg')]
frame_list.sort()
for frame in frame_list:
fid = frame[:-4]
# fcnt += 1
# if fcnt >= frame_interval:
self.i2v[len(self.data)] = (vid, fid)
self.v2i[(vid, fid)] = len(self.data)
self.data.append(os.path.join(obj_path, frame))
self.target.append(self.vinfo[vid][fid])
# fcnt = 0
self.target.append([(0.5, 0.5)])
def __getitem__(self, item):
img = Image.open(open(self.data[item], 'rb'))
# img = img.resize((self.frame_w, self.frame_h))
if self.transform:
img = self.transform(img)
else:
img = np.array(img)
# vid, fid = self.i2v[item]
# if int(fid) - self.frame_interval <= 0:
# last = self._get_salency_map(-1)
# last_pred = last
# else:
# last = self._get_salency_map(self.v2i[(vid, '%04d' % (int(fid) - self.frame_interval))])
# if os.path.isfile(os.path.join(self.tmp_root, vid, ('%04d' % (int(fid) - self.frame_interval)) + '.bin')):
# # print('use last pred map.')
# last_pred = np.fromfile(
# os.path.join(self.tmp_root, vid, ('%04d' % (int(fid) - self.frame_interval)) + '.bin'),
# dtype=np.float32).reshape(128, 256)
# last_pred = th.from_numpy(cv2.resize(last_pred, (256, 128)).reshape(1, 128, 256)).float()
# else:
# last_pred = last
target = self._get_salency_map(item)
return img, target / target.max()
def __len__(self):
return len(self.data)
def _get_salency_map(self, item, use_cuda=False):
cfile = self.data[item][:-4] + '_gt.npy'
if item >= 0:
if self.cache_gt and os.path.isfile(cfile):
target_map = th.from_numpy(np.load(cfile)).float()
assert target_map.size() == (1, self.frame_h, self.frame_w)
return th.from_numpy(np.load(cfile)).float()
target = np.zeros((self.frame_h, self.frame_w))
for x_norm, y_norm in self.target[item]:
x, y = min(int(x_norm * self.frame_w + 0.5), self.frame_w - 1), min(int(y_norm * self.frame_h + 0.5), self.frame_h - 1)
target[y, x] = 10
kernel = self._gen_gaussian_kernel()
# print(kernel.max())
if use_cuda:
target_map = spherical_conv(
th.from_numpy(
target.reshape(1, 1, *target.shape)
).cuda(),
th.from_numpy(kernel.reshape(1, 1, *kernel.shape)).cuda(),
kernel_rad=self.kernel_rad,
padding_mode=0
).view(1, self.frame_h, self.frame_w)
else:
target_map = spherical_conv(
th.from_numpy(
target.reshape(1, 1, *target.shape)
),
th.from_numpy(kernel.reshape(1, 1, *kernel.shape)),
kernel_rad=self.kernel_rad,
padding_mode=0
).view(1, self.frame_h, self.frame_w)
if item >= 0 and self.cache_gt:
np.save(cfile, target_map.data.cpu().numpy() / len(self.target[item]))
return target_map.data.float() / len(self.target[item])
def _gen_gaussian_kernel(self):
sigma = self.gaussian_sigma
kernel = th.zeros(self.kernel_size)
delta_theta = self.kernel_rad / (self.kernel_size[0] - 1)
sigma_idx = sigma / delta_theta
gauss1d = signal.gaussian(2 * kernel.shape[0], sigma_idx)
gauss2d = np.outer(gauss1d, np.ones(kernel.shape[1]))
return gauss2d[-kernel.shape[0]:, :]
def clear_cache(self):
from tqdm import trange
for item in trange(len(self), desc='cleaning'):
cfile = self.data[item][:-4] + '_gt.npy'
if os.path.isfile(cfile):
print('remove {}'.format(cfile))
os.remove(cfile)
return self
def cache_map(self):
from tqdm import trange
cache_gt = self.cache_gt
self.cache_gt = True
for item in trange(len(self), desc='caching'):
# pool.apply_async(self._get_salency_map, (item, True))
self._get_salency_map(item, use_cuda=True)
self.cache_gt = cache_gt
return self
class VRVideoRotTest(data.Dataset):
def __init__(self, root, frame_h, frame_w, frame_interval=5, transform=None):
self.root = root
self.frame_interval = frame_interval
self.transform = transform
self.frame_h = frame_h
self.frame_w = frame_w
self.gaussian_sigma = np.pi / 20
self.kernel_rad = np.pi / 7
self.kernel_size = (30, 60)
self.cache_gt = False
self.data = []
self.target = []
self.i2v = {}
self.v2i = {}
for vid in os.listdir(root):
for frame in os.listdir(os.path.join(root, vid)):
if frame.endswith('.jpg'):
fid = frame[:-4]
self.i2v[len(self.data)] = (vid, fid)
self.v2i[(vid, fid)] = len(self.data)
self.data.append(os.path.join(root, vid, frame))
self.target.append(os.path.join(root, vid, fid + '.bin'))
self.target.append([(0.5, 0.5)])
def __getitem__(self, item):
img = Image.open(open(self.data[item], 'rb'))
h, w, _ = np.array(img).shape
if self.transform:
img = self.transform(img)
else:
img = np.array(img)
vid, fid = self.i2v[item]
if int(fid) - self.frame_interval <= 0:
last = self._get_salency_map(-1)
else:
last = np.fromfile(os.path.join(self.root, vid, ('%04d' % (int(fid) - self.frame_interval)) + '.bin'),
dtype=np.float32).reshape(h, w)
last = th.from_numpy(cv2.resize(last, (self.frame_w, self.frame_h)).reshape(1, self.frame_h, self.frame_w)).float()
target = np.fromfile(self.target[item], dtype=np.float32).reshape(h, w)
target = th.from_numpy(cv2.resize(target, (self.frame_w, self.frame_h)).reshape(1, self.frame_h, self.frame_w)).float()
return img, last, target, vid, fid
def __len__(self):
return len(self.data)
@lru_cache(maxsize=None)
def _get_salency_map(self, item, use_cuda=False):
assert item == -1
target = np.zeros((self.frame_h, self.frame_w))
for x_norm, y_norm in self.target[item]:
x, y = min(int(x_norm * self.frame_w + 0.5), self.frame_w - 1), min(int(y_norm * self.frame_h + 0.5), self.frame_h - 1)
target[y, x] = 10
kernel = self._gen_gaussian_kernel()
# print(kernel.max())
if use_cuda:
target_map = spherical_conv(
th.from_numpy(
target.reshape(1, 1, *target.shape)
).cuda(),
th.from_numpy(kernel.reshape(1, 1, *kernel.shape)).cuda(),
kernel_rad=self.kernel_rad,
padding_mode=0
).view(1, self.frame_h, self.frame_w)
else:
target_map = spherical_conv(
th.from_numpy(
target.reshape(1, 1, *target.shape)
),
th.from_numpy(kernel.reshape(1, 1, *kernel.shape)),
kernel_rad=self.kernel_rad,
padding_mode=0
).view(1, self.frame_h, self.frame_w)
return target_map.data.float() / len(self.target[item])
def _gen_gaussian_kernel(self):
sigma = self.gaussian_sigma
kernel = th.zeros(self.kernel_size)
delta_theta = self.kernel_rad / (self.kernel_size[0] - 1)
sigma_idx = sigma / delta_theta
gauss1d = signal.gaussian(2 * kernel.shape[0], sigma_idx)
gauss2d = np.outer(gauss1d, np.ones(kernel.shape[1]))
return gauss2d[-kernel.shape[0]:, :]
def clear_cache(self):
from tqdm import trange
for item in trange(len(self), desc='cleaning'):
cfile = self.data[item][:-4] + '_gt.npy'
if os.path.isfile(cfile):
print('remove {}'.format(cfile))
os.remove(cfile)
return self
def cache_map(self):
from tqdm import trange
cache_gt = self.cache_gt
self.cache_gt = True
for item in trange(len(self), desc='caching'):
# pool.apply_async(self._get_salency_map, (item, True))
self._get_salency_map(item, use_cuda=True)
self.cache_gt = cache_gt
return self
class VRVideoMultiFrame(data.Dataset):
def __init__(self, root, frame_h, frame_w, video_train, frame_interval=1, transform=None, train=True,
gaussian_sigma=np.pi / 20, kernel_rad=np.pi/7, kernel_size=(30, 60), cache_gt=True, rnd_seed=367643):
self.frame_interval = frame_interval
self.transform = transform
self.frame_h = frame_h
self.frame_w = frame_w
self.gaussian_sigma = gaussian_sigma
self.kernel_size = kernel_size
self.kernel_rad = kernel_rad
self.cache_gt = cache_gt
self.train = train
rnd = Random(rnd_seed)
# load target
self.vinfo = pickle.load(open(os.path.join(root, 'vinfo.pkl'), 'rb'))
# load image paths
vset = list()
for vid in tqdm(os.listdir(root), desc='scanning dir'):
if os.path.isdir(os.path.join(root, vid)):
vset.append(vid)
vset.sort()
assert set(self.vinfo.keys()) == set(vset)
print('{} videos found.'.format(len(vset)))
if isinstance(video_train, numbers.Integral):
vset_train = set(rnd.sample(vset, k=video_train))
vset_val = set(vset) - vset_train
else:
raise NotImplementedError()
print('{}:{} videos chosen for training:testing.'.format(len(vset_train), len(vset_val)))
# print('test videos: {}'.format(vset_val))
vset = vset_train if train else vset_val
self.data = []
self.target = []
self.i2v = {}
self.v2i = {}
for vid in vset:
obj_path = os.path.join(root, vid)
# fcnt = 0
frame_list = [frame for frame in os.listdir(obj_path) if frame.endswith('.jpg')]
frame_list.sort()
for frame in frame_list:
fid = frame[:-4]
# fcnt += 1
# if fcnt >= frame_interval:
self.i2v[len(self.data)] = (vid, fid)
self.v2i[(vid, fid)] = len(self.data)
self.data.append(os.path.join(obj_path, frame))
self.target.append(self.vinfo[vid][fid])
# fcnt = 0
self.target.append([(0.5, 0.5)])
def __getitem__(self, item):
img = Image.open(open(self.data[item], 'rb'))
# img = img.resize((self.frame_w, self.frame_h))
if self.transform:
img = self.transform(img)
else:
img = np.array(img)
last = []
vid, fid = self.i2v[item]
for step in range(1, 6):
if int(fid) - self.frame_interval * step <= 0:
last.append(self._get_salency_map(-1))
else:
last.append(self._get_salency_map(self.v2i[(vid, '%04d' % (int(fid) - self.frame_interval * step))]))
target = self._get_salency_map(item)
last = th.cat(last, dim=0)
if self.train:
return img, last, target
else:
return img, self.data[item], last, target
def __len__(self):
return len(self.data)
def _get_salency_map(self, item, use_cuda=False):
cfile = self.data[item][:-4] + '_gt.npy'
if item >= 0:
if self.cache_gt and os.path.isfile(cfile):
target_map = th.from_numpy(np.load(cfile)).float()
assert target_map.size() == (1, self.frame_h, self.frame_w)
return th.from_numpy(np.load(cfile)).float()
target = np.zeros((self.frame_h, self.frame_w))
for x_norm, y_norm in self.target[item]:
x, y = min(int(x_norm * self.frame_w + 0.5), self.frame_w - 1), min(int(y_norm * self.frame_h + 0.5), self.frame_h - 1)
target[y, x] = 10
kernel = self._gen_gaussian_kernel()
# print(kernel.max())
if use_cuda:
target_map = spherical_conv(
th.from_numpy(
target.reshape(1, 1, *target.shape)
).cuda(),
th.from_numpy(kernel.reshape(1, 1, *kernel.shape)).cuda(),
kernel_rad=self.kernel_rad,
padding_mode=0
).view(1, self.frame_h, self.frame_w)
else:
target_map = spherical_conv(
th.from_numpy(
target.reshape(1, 1, *target.shape)
),
th.from_numpy(kernel.reshape(1, 1, *kernel.shape)),
kernel_rad=self.kernel_rad,
padding_mode=0
).view(1, self.frame_h, self.frame_w)
if item >= 0 and self.cache_gt:
np.save(cfile, target_map.data.cpu().numpy() / len(self.target[item]))
return target_map.data.float() / len(self.target[item])
def _gen_gaussian_kernel(self):
sigma = self.gaussian_sigma
kernel = th.zeros(self.kernel_size)
delta_theta = self.kernel_rad / (self.kernel_size[0] - 1)
sigma_idx = sigma / delta_theta
gauss1d = signal.gaussian(2 * kernel.shape[0], sigma_idx)
gauss2d = np.outer(gauss1d, np.ones(kernel.shape[1]))
return gauss2d[-kernel.shape[0]:, :]
def clear_cache(self):
from tqdm import trange
for item in trange(len(self), desc='cleaning'):
cfile = self.data[item][:-4] + '_gt.npy'
if os.path.isfile(cfile):
print('remove {}'.format(cfile))
os.remove(cfile)
return self
def cache_map(self):
from tqdm import trange
cache_gt = self.cache_gt
self.cache_gt = True
for item in trange(len(self), desc='caching'):
# pool.apply_async(self._get_salency_map, (item, True))
self._get_salency_map(item, use_cuda=True)
self.cache_gt = cache_gt
return self
class VRRotatedTest(data.Dataset):
def __init__(self, root, transform=None):
self.transform = transform
self.img = []
self.target = []
self.i2v = {}
for vid in tqdm(os.listdir(root), desc='video'):
for fid in tqdm(os.listdir(os.path.join(root, vid)), desc='frame'):
file = os.path.join(root, vid, fid)
if file.endswith('.jpg'):
self.i2v[len(self.img)] = (vid, fid[:-4])
self.target.append(file[:-4] + '.bin')
self.img.append(file)
def __getitem__(self, item):
vid, fid = self.i2v[item]
img = Image.open(open(self.img[item], 'rb'))
img_shape = np.array(img).shape[:2]
target = np.fromfile(self.target[item], dtype=np.float32).reshape(*img_shape)
target = cv2.resize(target, (256, 128)).reshape(1, 128, 256)
if self.transform:
img = self.transform(img)
return img, th.from_numpy(target).float(), vid, fid
def __len__(self):
return len(self.img)
if __name__ == '__main__':
def gen_gaussian_kernel(sigma_idx=8, kernel_size=(15, 30)):
gauss1d = signal.gaussian(2 * kernel_size[0], sigma_idx)
gauss2d = np.outer(gauss1d, np.ones(kernel_size[1]))
return gauss2d[-kernel_size[0]:, :]
import matplotlib.pyplot as plt
# h, w = 30, 15
# kernel = gen_gaussian_kernel(sigma_idx=4)
# for test_h in range(h):
# img = np.zeros((h, w))
# img[test_h, int(w/2)] = 1
# target_map = spherical_conv(
# th.from_numpy(
# img.reshape(1, 1, *img.shape)
# ),
# th.from_numpy(kernel.reshape(1, 1, *kernel.shape)),
# kernel_rad=np.pi/5
# ).view(h, w).data.numpy()
# # print(test_h)
# cv2.imshow('res', target_map*255)
# cv2.waitKey(500)
import matplotlib.pyplot as plt
# dataset = VRSaliency('/home/ziheng/dataset-beta-v2.0-jpg', 150, 300, cache_gt=False).cache_map()
# img, map = dataset[5]
#
# fix, (ax1, ax2) = plt.subplots(1, 2)
# ax1.imshow(img)
# ax2.imshow(map.numpy().reshape(150, 300))
# plt.show()
dataset = ICMEDataset('/home/ziheng/2018-ECCV/ICME')
img, map = dataset[11]
fix, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(img)
ax2.imshow(map)
plt.show()
| 37.821869
| 135
| 0.55675
| 5,645
| 42,890
| 4.047121
| 0.040213
| 0.056334
| 0.026263
| 0.024293
| 0.918848
| 0.907161
| 0.89311
| 0.883349
| 0.878184
| 0.869999
| 0
| 0.02088
| 0.312147
| 42,890
| 1,133
| 136
| 37.855252
| 0.753508
| 0.073024
| 0
| 0.878893
| 0
| 0
| 0.024032
| 0.000681
| 0
| 0
| 0
| 0
| 0.013841
| 1
| 0.064591
| false
| 0.001153
| 0.033449
| 0.010381
| 0.17301
| 0.023068
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.