max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
global_variables.py
|
akshatsh49/InfoGan
| 0
|
6628551
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation
import torch
import torchvision
from torch import optim
from torch import nn
import torch.nn.functional as F
import time
import math
import os
import pickle
g_l_file='gen_loss.sav'
d_l_file='dis_loss.sav'
sample_folder='samples'
si_folder='Space_interpolation'
fi_folder='Factor_interpolation'
saved_models='saved_models'
test_samples='test_samples'
batch_size=64
if(torch.cuda.is_available()):
device='cuda:0'
else :
device='cpu'
print('Device for training : {}'.format(device))
torch.pi = torch.acos(torch.zeros(1,device=device)) * 2
train_loader=torch.utils.data.DataLoader(dataset=torchvision.datasets.MNIST('./root',train=True,download=True,transform=torchvision.transforms.ToTensor()) ,batch_size=batch_size,drop_last=True)
test_loader=torch.utils.data.DataLoader(dataset=torchvision.datasets.MNIST('./root',train=False,download=True,transform=torchvision.transforms.ToTensor()) ,batch_size=batch_size,drop_last=True)
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation
import torch
import torchvision
from torch import optim
from torch import nn
import torch.nn.functional as F
import time
import math
import os
import pickle
g_l_file='gen_loss.sav'
d_l_file='dis_loss.sav'
sample_folder='samples'
si_folder='Space_interpolation'
fi_folder='Factor_interpolation'
saved_models='saved_models'
test_samples='test_samples'
batch_size=64
if(torch.cuda.is_available()):
device='cuda:0'
else :
device='cpu'
print('Device for training : {}'.format(device))
torch.pi = torch.acos(torch.zeros(1,device=device)) * 2
train_loader=torch.utils.data.DataLoader(dataset=torchvision.datasets.MNIST('./root',train=True,download=True,transform=torchvision.transforms.ToTensor()) ,batch_size=batch_size,drop_last=True)
test_loader=torch.utils.data.DataLoader(dataset=torchvision.datasets.MNIST('./root',train=False,download=True,transform=torchvision.transforms.ToTensor()) ,batch_size=batch_size,drop_last=True)
|
none
| 1
| 2.309663
| 2
|
|
utils/datasets.py
|
holanlan/pipcook-plugin-pytorch-yolov5-model
| 0
|
6628552
|
<reponame>holanlan/pipcook-plugin-pytorch-yolov5-model
import cv2
import numpy as np
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
|
import cv2
import numpy as np
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
|
en
| 0.519601
|
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232 # current shape [height, width] # Scale ratio (new / old) # only scale down, do not scale up (for better test mAP) # Compute padding # width, height ratios # wh padding # minimum rectangle # wh padding # stretch # width, height ratios # divide padding into 2 sides # resize # add border
| 2.54427
| 3
|
app/profile/__init__.py
|
Ken-mbira/BLOG_SPOT
| 0
|
6628553
|
<reponame>Ken-mbira/BLOG_SPOT<filename>app/profile/__init__.py
from flask import Blueprint
profile = Blueprint('profile',__name__,url_prefix='/profile')
from . import views,forms
|
from flask import Blueprint
profile = Blueprint('profile',__name__,url_prefix='/profile')
from . import views,forms
|
none
| 1
| 1.542016
| 2
|
|
ds/chunk_2/intro.py
|
quydau35/quydau35.github.io
| 0
|
6628554
|
"""
Scala Type\n
1. Booleans\n
2. Numbers\n
3. Casting\n
4. String\n
5. Operators\n
# Booleans\n
Booleans represent one of two values: ```True``` or ```False```.\n
When you compare two values, the expression is evaluated and Python returns the Boolean answer:\n
```
print(10 > 9)
print(10 >= 9)
print(10 == 9)
print(10 < 9)
print(10 <= 9)
```
Evaluate a string and a number:\n
```
print(bool("Hello"))
print(bool(15))
x = "Hello"
y = 15
print(bool(x))
print(bool(y))
```
Almost any value is evaluated to True if it has some sort of content.\n
Any string is True, except empty strings.\n
Any number is True, except 0.\n
Any list, tuple, set, and dictionary are True, except empty ones.\n
The following will return False:\n
```
bool(False)
bool(None)
bool(0)
bool("")
bool(())
bool([])
bool({})
```
Python also has many built-in functions that return a boolean value, like the ```isinstance()``` function, which can be used to determine if an object is of a certain data type:\n
```
x = 200
print(isinstance(x, int))
```
# Numbers\n
There are three numeric types in Python:\n
```int```\n
```float```\n
```complex```\n
Variables of numeric types are created when you assign a value to them:\n
```
x = 1 # int
y = 2.8 # float
z = 1j # complex
```
To verify the type of any object in Python, use the type() function:\n
```
print(type(x))
print(type(y))
print(type(z))
```
Integers:\n
Int, or integer, is a whole number, positive or negative, without decimals, of unlimited length.\n
```
x = 1
y = 35656222554887711
z = -3255522
print(type(x))
print(type(y))
print(type(z))
```
Floats:\n
Float, or "floating point number" is a number, positive or negative, containing one or more decimals.\n
```
x = 1.10
y = 1.0
z = -35.59
print(type(x))
print(type(y))
print(type(z))
```
Float can also be scientific numbers with an "e" to indicate the power of 10.\n
```
x = 35e3
y = 12E4
z = -87.7e100
print(type(x))
print(type(y))
print(type(z))
```
Complex:\n
Complex numbers are written with a "j" as the imaginary part:\n
```
x = 3+5j
y = 5j
z = -5j
print(type(x))
print(type(y))
print(type(z))
```
You can convert from one type to another with the int(), float(), and complex() methods:\n
```
x = 1 # int
y = 2.8 # float
z = 1j # complex
#convert from int to float:
a = float(x)
#convert from float to int:
b = int(y)
#convert from int to complex:
c = complex(x)
print(a)
print(b)
print(c)
print(type(a))
print(type(b))
print(type(c))
```
# Casting
There may be times when you want to specify a type on to a variable. This can be done with casting. Python is an object-orientated language, and as such it uses classes to define data types, including its primitive types.\n
Casting in python is therefore done using constructor functions:\n
int() - constructs an integer number from an integer literal, a float literal (by rounding down to the previous whole number), or a string literal (providing the string represents a whole number)\n
float() - constructs a float number from an integer literal, a float literal or a string literal (providing the string represents a float or an integer)\n
str() - constructs a string from a wide variety of data types, including strings, integer literals and float literals\n
Integers:\n
```
x = int(1) # x will be 1
y = int(2.8) # y will be 2
z = int("3") # z will be 3
```
Floats:\n
```
x = float(1) # x will be 1.0
y = float(2.8) # y will be 2.8
z = float("3") # z will be 3.0
w = float("4.2") # w will be 4.2
```
Strings:\n
```
x = str("s1") # x will be 's1'
y = str(2) # y will be '2'
z = str(3.0) # z will be '3.0'
```
# Strings\n
Strings in python are surrounded by either single quotation marks, or double quotation marks.
'hello' is the same as "hello".\n
You can display a string literal with the print() function:\n
```
print("Hello")
print('Hello')
```
# Operators\n
Operators are used to perform operations on variables and values.\n
In the example below, we use the ```+``` operator to add together two values:\n
```
print(10 + 5)
```
Python divides the operators in the following groups:\n
Arithmetic operators\n
Assignment operators\n
Comparison operators\n
Logical operators\n
Identity operators\n
Membership operators\n
Bitwise operators\n
Arithmetic operators are used with numeric values to perform common mathematical operations:\n
```
Operator Name Example
+ Addition x + y
- Subtraction x - y
* Multiplication x * y
/ Division x / y
% Modulus x % y
** Exponentiation x ** y
// Floor division x // y
```
Assignment operators are used to assign values to variables:\n
```
Operator Example Same As
= x = 5 x = 5
+= x += 3 x = x + 3
-= x -= 3 x = x - 3
*= x *= 3 x = x * 3
/= x /= 3 x = x / 3
%= x %= 3 x = x % 3
//= x //= 3 x = x // 3
**= x **= 3 x = x ** 3
&= x &= 3 x = x & 3
|= x |= 3 x = x | 3
^= x ^= 3 x = x ^ 3
>>= x >>= 3 x = x >> 3
<<= x <<= 3 x = x << 3
```
Comparison operators are used to compare two values:\n
```
Operator Name Example
== Equal x == y
!= Not equal x != y
> Greater than x > y
< Less than x < y
>= Greater than or equal to x >= y
<= Less than or equal to x <= y
```
Logical operators are used to combine conditional statements:\n
```
Operator Description Example
and Returns True if both statements are true x < 5 and x < 10
or Returns True if one of the statements is true x < 5 or x < 4
not Reverse the result, returns False if the result is true not(x < 5 and x < 10)
```
Identity operators are used to compare the objects, not if they are equal, but if they are actually the same object, with the same memory location:\n
```
Operator Description Example
is Returns True if both variables are the same object x is y
is not Returns True if both variables are not the same object x is not y
```
Membership operators are used to test if a sequence is presented in an object:\n
```
Operator Description Example
in Returns True if a sequence with the specified value is present in the object x in y
not in Returns True if a sequence with the specified value is not present in the object x not in y
```
Bitwise operators are used to compare (binary) numbers:
```
Operator Name Description
& AND Sets each bit to 1 if both bits are 1
| OR Sets each bit to 1 if one of two bits is 1
^ XOR Sets each bit to 1 if only one of two bits is 1
~ NOT Inverts all the bits
<< Zero fill left shift Shift left by pushing zeros in from the right and let the leftmost bits fall off
>> Signed right shift Shift right by pushing copies of the leftmost bit in from the left, and let the rightmost bits fall off
```
"""
|
"""
Scala Type\n
1. Booleans\n
2. Numbers\n
3. Casting\n
4. String\n
5. Operators\n
# Booleans\n
Booleans represent one of two values: ```True``` or ```False```.\n
When you compare two values, the expression is evaluated and Python returns the Boolean answer:\n
```
print(10 > 9)
print(10 >= 9)
print(10 == 9)
print(10 < 9)
print(10 <= 9)
```
Evaluate a string and a number:\n
```
print(bool("Hello"))
print(bool(15))
x = "Hello"
y = 15
print(bool(x))
print(bool(y))
```
Almost any value is evaluated to True if it has some sort of content.\n
Any string is True, except empty strings.\n
Any number is True, except 0.\n
Any list, tuple, set, and dictionary are True, except empty ones.\n
The following will return False:\n
```
bool(False)
bool(None)
bool(0)
bool("")
bool(())
bool([])
bool({})
```
Python also has many built-in functions that return a boolean value, like the ```isinstance()``` function, which can be used to determine if an object is of a certain data type:\n
```
x = 200
print(isinstance(x, int))
```
# Numbers\n
There are three numeric types in Python:\n
```int```\n
```float```\n
```complex```\n
Variables of numeric types are created when you assign a value to them:\n
```
x = 1 # int
y = 2.8 # float
z = 1j # complex
```
To verify the type of any object in Python, use the type() function:\n
```
print(type(x))
print(type(y))
print(type(z))
```
Integers:\n
Int, or integer, is a whole number, positive or negative, without decimals, of unlimited length.\n
```
x = 1
y = 35656222554887711
z = -3255522
print(type(x))
print(type(y))
print(type(z))
```
Floats:\n
Float, or "floating point number" is a number, positive or negative, containing one or more decimals.\n
```
x = 1.10
y = 1.0
z = -35.59
print(type(x))
print(type(y))
print(type(z))
```
Float can also be scientific numbers with an "e" to indicate the power of 10.\n
```
x = 35e3
y = 12E4
z = -87.7e100
print(type(x))
print(type(y))
print(type(z))
```
Complex:\n
Complex numbers are written with a "j" as the imaginary part:\n
```
x = 3+5j
y = 5j
z = -5j
print(type(x))
print(type(y))
print(type(z))
```
You can convert from one type to another with the int(), float(), and complex() methods:\n
```
x = 1 # int
y = 2.8 # float
z = 1j # complex
#convert from int to float:
a = float(x)
#convert from float to int:
b = int(y)
#convert from int to complex:
c = complex(x)
print(a)
print(b)
print(c)
print(type(a))
print(type(b))
print(type(c))
```
# Casting
There may be times when you want to specify a type on to a variable. This can be done with casting. Python is an object-orientated language, and as such it uses classes to define data types, including its primitive types.\n
Casting in python is therefore done using constructor functions:\n
int() - constructs an integer number from an integer literal, a float literal (by rounding down to the previous whole number), or a string literal (providing the string represents a whole number)\n
float() - constructs a float number from an integer literal, a float literal or a string literal (providing the string represents a float or an integer)\n
str() - constructs a string from a wide variety of data types, including strings, integer literals and float literals\n
Integers:\n
```
x = int(1) # x will be 1
y = int(2.8) # y will be 2
z = int("3") # z will be 3
```
Floats:\n
```
x = float(1) # x will be 1.0
y = float(2.8) # y will be 2.8
z = float("3") # z will be 3.0
w = float("4.2") # w will be 4.2
```
Strings:\n
```
x = str("s1") # x will be 's1'
y = str(2) # y will be '2'
z = str(3.0) # z will be '3.0'
```
# Strings\n
Strings in python are surrounded by either single quotation marks, or double quotation marks.
'hello' is the same as "hello".\n
You can display a string literal with the print() function:\n
```
print("Hello")
print('Hello')
```
# Operators\n
Operators are used to perform operations on variables and values.\n
In the example below, we use the ```+``` operator to add together two values:\n
```
print(10 + 5)
```
Python divides the operators in the following groups:\n
Arithmetic operators\n
Assignment operators\n
Comparison operators\n
Logical operators\n
Identity operators\n
Membership operators\n
Bitwise operators\n
Arithmetic operators are used with numeric values to perform common mathematical operations:\n
```
Operator Name Example
+ Addition x + y
- Subtraction x - y
* Multiplication x * y
/ Division x / y
% Modulus x % y
** Exponentiation x ** y
// Floor division x // y
```
Assignment operators are used to assign values to variables:\n
```
Operator Example Same As
= x = 5 x = 5
+= x += 3 x = x + 3
-= x -= 3 x = x - 3
*= x *= 3 x = x * 3
/= x /= 3 x = x / 3
%= x %= 3 x = x % 3
//= x //= 3 x = x // 3
**= x **= 3 x = x ** 3
&= x &= 3 x = x & 3
|= x |= 3 x = x | 3
^= x ^= 3 x = x ^ 3
>>= x >>= 3 x = x >> 3
<<= x <<= 3 x = x << 3
```
Comparison operators are used to compare two values:\n
```
Operator Name Example
== Equal x == y
!= Not equal x != y
> Greater than x > y
< Less than x < y
>= Greater than or equal to x >= y
<= Less than or equal to x <= y
```
Logical operators are used to combine conditional statements:\n
```
Operator Description Example
and Returns True if both statements are true x < 5 and x < 10
or Returns True if one of the statements is true x < 5 or x < 4
not Reverse the result, returns False if the result is true not(x < 5 and x < 10)
```
Identity operators are used to compare the objects, not if they are equal, but if they are actually the same object, with the same memory location:\n
```
Operator Description Example
is Returns True if both variables are the same object x is y
is not Returns True if both variables are not the same object x is not y
```
Membership operators are used to test if a sequence is presented in an object:\n
```
Operator Description Example
in Returns True if a sequence with the specified value is present in the object x in y
not in Returns True if a sequence with the specified value is not present in the object x not in y
```
Bitwise operators are used to compare (binary) numbers:
```
Operator Name Description
& AND Sets each bit to 1 if both bits are 1
| OR Sets each bit to 1 if one of two bits is 1
^ XOR Sets each bit to 1 if only one of two bits is 1
~ NOT Inverts all the bits
<< Zero fill left shift Shift left by pushing zeros in from the right and let the leftmost bits fall off
>> Signed right shift Shift right by pushing copies of the leftmost bit in from the left, and let the rightmost bits fall off
```
"""
|
en
| 0.767197
|
Scala Type\n 1. Booleans\n 2. Numbers\n 3. Casting\n 4. String\n 5. Operators\n # Booleans\n Booleans represent one of two values: ```True``` or ```False```.\n When you compare two values, the expression is evaluated and Python returns the Boolean answer:\n ``` print(10 > 9) print(10 >= 9) print(10 == 9) print(10 < 9) print(10 <= 9) ``` Evaluate a string and a number:\n ``` print(bool("Hello")) print(bool(15)) x = "Hello" y = 15 print(bool(x)) print(bool(y)) ``` Almost any value is evaluated to True if it has some sort of content.\n Any string is True, except empty strings.\n Any number is True, except 0.\n Any list, tuple, set, and dictionary are True, except empty ones.\n The following will return False:\n ``` bool(False) bool(None) bool(0) bool("") bool(()) bool([]) bool({}) ``` Python also has many built-in functions that return a boolean value, like the ```isinstance()``` function, which can be used to determine if an object is of a certain data type:\n ``` x = 200 print(isinstance(x, int)) ``` # Numbers\n There are three numeric types in Python:\n ```int```\n ```float```\n ```complex```\n Variables of numeric types are created when you assign a value to them:\n ``` x = 1 # int y = 2.8 # float z = 1j # complex ``` To verify the type of any object in Python, use the type() function:\n ``` print(type(x)) print(type(y)) print(type(z)) ``` Integers:\n Int, or integer, is a whole number, positive or negative, without decimals, of unlimited length.\n ``` x = 1 y = 35656222554887711 z = -3255522 print(type(x)) print(type(y)) print(type(z)) ``` Floats:\n Float, or "floating point number" is a number, positive or negative, containing one or more decimals.\n ``` x = 1.10 y = 1.0 z = -35.59 print(type(x)) print(type(y)) print(type(z)) ``` Float can also be scientific numbers with an "e" to indicate the power of 10.\n ``` x = 35e3 y = 12E4 z = -87.7e100 print(type(x)) print(type(y)) print(type(z)) ``` Complex:\n Complex numbers are written with a "j" as the imaginary part:\n ``` x = 3+5j y = 5j z = -5j print(type(x)) print(type(y)) print(type(z)) ``` You can convert from one type to another with the int(), float(), and complex() methods:\n ``` x = 1 # int y = 2.8 # float z = 1j # complex #convert from int to float: a = float(x) #convert from float to int: b = int(y) #convert from int to complex: c = complex(x) print(a) print(b) print(c) print(type(a)) print(type(b)) print(type(c)) ``` # Casting There may be times when you want to specify a type on to a variable. This can be done with casting. Python is an object-orientated language, and as such it uses classes to define data types, including its primitive types.\n Casting in python is therefore done using constructor functions:\n int() - constructs an integer number from an integer literal, a float literal (by rounding down to the previous whole number), or a string literal (providing the string represents a whole number)\n float() - constructs a float number from an integer literal, a float literal or a string literal (providing the string represents a float or an integer)\n str() - constructs a string from a wide variety of data types, including strings, integer literals and float literals\n Integers:\n ``` x = int(1) # x will be 1 y = int(2.8) # y will be 2 z = int("3") # z will be 3 ``` Floats:\n ``` x = float(1) # x will be 1.0 y = float(2.8) # y will be 2.8 z = float("3") # z will be 3.0 w = float("4.2") # w will be 4.2 ``` Strings:\n ``` x = str("s1") # x will be 's1' y = str(2) # y will be '2' z = str(3.0) # z will be '3.0' ``` # Strings\n Strings in python are surrounded by either single quotation marks, or double quotation marks. 'hello' is the same as "hello".\n You can display a string literal with the print() function:\n ``` print("Hello") print('Hello') ``` # Operators\n Operators are used to perform operations on variables and values.\n In the example below, we use the ```+``` operator to add together two values:\n ``` print(10 + 5) ``` Python divides the operators in the following groups:\n Arithmetic operators\n Assignment operators\n Comparison operators\n Logical operators\n Identity operators\n Membership operators\n Bitwise operators\n Arithmetic operators are used with numeric values to perform common mathematical operations:\n ``` Operator Name Example + Addition x + y - Subtraction x - y * Multiplication x * y / Division x / y % Modulus x % y ** Exponentiation x ** y // Floor division x // y ``` Assignment operators are used to assign values to variables:\n ``` Operator Example Same As = x = 5 x = 5 += x += 3 x = x + 3 -= x -= 3 x = x - 3 *= x *= 3 x = x * 3 /= x /= 3 x = x / 3 %= x %= 3 x = x % 3 //= x //= 3 x = x // 3 **= x **= 3 x = x ** 3 &= x &= 3 x = x & 3 |= x |= 3 x = x | 3 ^= x ^= 3 x = x ^ 3 >>= x >>= 3 x = x >> 3 <<= x <<= 3 x = x << 3 ``` Comparison operators are used to compare two values:\n ``` Operator Name Example == Equal x == y != Not equal x != y > Greater than x > y < Less than x < y >= Greater than or equal to x >= y <= Less than or equal to x <= y ``` Logical operators are used to combine conditional statements:\n ``` Operator Description Example and Returns True if both statements are true x < 5 and x < 10 or Returns True if one of the statements is true x < 5 or x < 4 not Reverse the result, returns False if the result is true not(x < 5 and x < 10) ``` Identity operators are used to compare the objects, not if they are equal, but if they are actually the same object, with the same memory location:\n ``` Operator Description Example is Returns True if both variables are the same object x is y is not Returns True if both variables are not the same object x is not y ``` Membership operators are used to test if a sequence is presented in an object:\n ``` Operator Description Example in Returns True if a sequence with the specified value is present in the object x in y not in Returns True if a sequence with the specified value is not present in the object x not in y ``` Bitwise operators are used to compare (binary) numbers: ``` Operator Name Description & AND Sets each bit to 1 if both bits are 1 | OR Sets each bit to 1 if one of two bits is 1 ^ XOR Sets each bit to 1 if only one of two bits is 1 ~ NOT Inverts all the bits << Zero fill left shift Shift left by pushing zeros in from the right and let the leftmost bits fall off >> Signed right shift Shift right by pushing copies of the leftmost bit in from the left, and let the rightmost bits fall off ```
| 4.367694
| 4
|
setup.py
|
Edsger-dev/Edsger
| 0
|
6628555
|
from setuptools import setup, find_packages, Extension
from codecs import open # To use a consistent encoding
from Cython.Build import cythonize
import numpy
import os
import re
requirements = ["cython", "numpy", "pandas", "scipy", "psutil"]
setup_requirements = ["cython", "numpy"]
test_requirements = ["pytest"]
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
with open(os.path.join(here, *parts), "r") as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Get the licence
with open("LICENSE") as f:
license = f.read()
extra_compile_args = ["-Ofast"]
extensions = [
Extension("edsger.commons", ["src/edsger/commons.pyx"]),
Extension(
"edsger.priority_queue_binary_heap",
["src/edsger/priority_queue_binary_heap.pyx"],
extra_compile_args=extra_compile_args,
),
Extension(
"edsger.priority_queue_fibonacci_heap",
["src/edsger/priority_queue_fibonacci_heap.pyx"],
extra_compile_args=extra_compile_args,
),
Extension("edsger.test_heaps", ["src/edsger/test_heaps.pyx"]),
Extension(
"edsger.priority_queue_timings", ["src/edsger/priority_queue_timings.pyx"]
),
Extension("edsger.shortestpath", ["src/edsger/shortestpath.pyx"]),
]
setup(
name="Edsger",
version=find_version("src", "edsger", "__init__.py"),
description="Static user equilibrium assignment",
author="<NAME>",
author_email="<EMAIL>",
license=license,
package_dir={"": "src"},
packages=find_packages(where="src"),
package_data={
"edsger.commons": ["src/edsger/commons.pxd"],
"edsger.priority_queue_binary_heap": [
"src/edsger/priority_queue_binary_heap.pxd"
],
"edsger.priority_queue_fibonacci_heap": [
"src/edsger/priority_queue_fibonacci_heap.pxd"
],
},
ext_modules=cythonize(
extensions,
compiler_directives={"language_level": "3"},
include_path=["src/edsger/"],
),
install_requires=requirements,
setup_requires=setup_requirements,
tests_require=test_requirements,
extras_require={"test": test_requirements},
include_dirs=[numpy.get_include()],
)
|
from setuptools import setup, find_packages, Extension
from codecs import open # To use a consistent encoding
from Cython.Build import cythonize
import numpy
import os
import re
requirements = ["cython", "numpy", "pandas", "scipy", "psutil"]
setup_requirements = ["cython", "numpy"]
test_requirements = ["pytest"]
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
with open(os.path.join(here, *parts), "r") as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Get the licence
with open("LICENSE") as f:
license = f.read()
extra_compile_args = ["-Ofast"]
extensions = [
Extension("edsger.commons", ["src/edsger/commons.pyx"]),
Extension(
"edsger.priority_queue_binary_heap",
["src/edsger/priority_queue_binary_heap.pyx"],
extra_compile_args=extra_compile_args,
),
Extension(
"edsger.priority_queue_fibonacci_heap",
["src/edsger/priority_queue_fibonacci_heap.pyx"],
extra_compile_args=extra_compile_args,
),
Extension("edsger.test_heaps", ["src/edsger/test_heaps.pyx"]),
Extension(
"edsger.priority_queue_timings", ["src/edsger/priority_queue_timings.pyx"]
),
Extension("edsger.shortestpath", ["src/edsger/shortestpath.pyx"]),
]
setup(
name="Edsger",
version=find_version("src", "edsger", "__init__.py"),
description="Static user equilibrium assignment",
author="<NAME>",
author_email="<EMAIL>",
license=license,
package_dir={"": "src"},
packages=find_packages(where="src"),
package_data={
"edsger.commons": ["src/edsger/commons.pxd"],
"edsger.priority_queue_binary_heap": [
"src/edsger/priority_queue_binary_heap.pxd"
],
"edsger.priority_queue_fibonacci_heap": [
"src/edsger/priority_queue_fibonacci_heap.pxd"
],
},
ext_modules=cythonize(
extensions,
compiler_directives={"language_level": "3"},
include_path=["src/edsger/"],
),
install_requires=requirements,
setup_requires=setup_requirements,
tests_require=test_requirements,
extras_require={"test": test_requirements},
include_dirs=[numpy.get_include()],
)
|
en
| 0.502471
|
# To use a consistent encoding # Get the licence
| 1.972603
| 2
|
tools/setup_hall_as_index.py
|
deafloo/ODrive
| 1,068
|
6628556
|
import odrive
from odrive.utils import dump_errors
from odrive.enums import *
import time
print("Finding an odrive...")
odrv = odrive.find_any()
# axes = [odrv.axis0, odrv.axis1];
axes = [odrv.axis0];
flip_index_search_direction = False
save_and_reboot = True
print("Setting config...")
# Settings to protect battery
odrv.config.dc_bus_overvoltage_trip_level = 14.8
odrv.config.dc_bus_undervoltage_trip_level = 8.0
odrv.config.brake_resistance = 0
for ax in axes:
ax.motor.config.requested_current_range = 25
ax.motor.config.calibration_current = 10
ax.motor.config.current_lim = 10
ax.motor.config.resistance_calib_max_voltage = 4
ax.motor.config.pole_pairs = 10
ax.encoder.config.cpr = 4096
ax.encoder.config.use_index = True
ax.encoder.config.find_idx_on_lockin_only = True
ax.controller.config.control_mode = CONTROL_MODE_VELOCITY_CONTROL
ax.controller.config.vel_limit = 10000
ax.controller.config.vel_gain = 0.002205736003816127
ax.controller.config.vel_integrator_gain = 0.022057360038161278
ax.controller.config.pos_gain = 26
ax.config.lockin.current = 10
ax.config.lockin.ramp_distance = 3.14
ax.config.lockin.vel = 15
ax.config.lockin.accel = 10
ax.config.lockin.finish_distance = 30
def wait_and_exit_on_error(ax):
while ax.current_state != AXIS_STATE_IDLE:
time.sleep(0.1)
if ax.error != AXIS_ERROR_NONE:
dump_errors(odrv, True)
exit()
for axnum, ax in enumerate(axes):
print("Calibrating motor {}...".format(axnum))
ax.requested_state = AXIS_STATE_MOTOR_CALIBRATION
wait_and_exit_on_error(ax)
print("Checking motor {} direction...".format(axnum))
ax.requested_state = AXIS_STATE_ENCODER_DIR_FIND
wait_and_exit_on_error(ax)
print(" Direction is {}".format(ax.motor.config.direction))
if flip_index_search_direction:
ax.config.lockin.ramp_distance = -ax.config.lockin.ramp_distance
ax.config.lockin.vel = -ax.config.lockin.vel
ax.config.lockin.accel = -ax.config.lockin.accel
print("Searching for index on motor {}...".format(axnum))
ax.requested_state = AXIS_STATE_ENCODER_INDEX_SEARCH
wait_and_exit_on_error(ax)
if (not ax.encoder.index_found):
print("Failed finding index! Quitting.")
exit()
print("Calibrating encoder offset on motor {}...".format(axnum))
ax.requested_state = AXIS_STATE_ENCODER_OFFSET_CALIBRATION
wait_and_exit_on_error(ax)
if (not ax.encoder.is_ready):
print("Failed to calibrate encoder! Quitting")
exit()
# If we get here there were no errors, so let's commit the values
ax.motor.config.pre_calibrated = True
ax.encoder.config.pre_calibrated = True
# Uncomment this if you wish to automatically run index search and closed loop control on boot
# ax.config.startup_encoder_index_search = True
# ax.config.startup_closed_loop_control = True
#Everything should be good to go here, so let's save and reboot
print("")
print("All operations successful!")
if save_and_reboot:
odrv.save_configuration()
try:
odrv.reboot()
except odrive.fibre.ObjectLostError:
pass
|
import odrive
from odrive.utils import dump_errors
from odrive.enums import *
import time
print("Finding an odrive...")
odrv = odrive.find_any()
# axes = [odrv.axis0, odrv.axis1];
axes = [odrv.axis0];
flip_index_search_direction = False
save_and_reboot = True
print("Setting config...")
# Settings to protect battery
odrv.config.dc_bus_overvoltage_trip_level = 14.8
odrv.config.dc_bus_undervoltage_trip_level = 8.0
odrv.config.brake_resistance = 0
for ax in axes:
ax.motor.config.requested_current_range = 25
ax.motor.config.calibration_current = 10
ax.motor.config.current_lim = 10
ax.motor.config.resistance_calib_max_voltage = 4
ax.motor.config.pole_pairs = 10
ax.encoder.config.cpr = 4096
ax.encoder.config.use_index = True
ax.encoder.config.find_idx_on_lockin_only = True
ax.controller.config.control_mode = CONTROL_MODE_VELOCITY_CONTROL
ax.controller.config.vel_limit = 10000
ax.controller.config.vel_gain = 0.002205736003816127
ax.controller.config.vel_integrator_gain = 0.022057360038161278
ax.controller.config.pos_gain = 26
ax.config.lockin.current = 10
ax.config.lockin.ramp_distance = 3.14
ax.config.lockin.vel = 15
ax.config.lockin.accel = 10
ax.config.lockin.finish_distance = 30
def wait_and_exit_on_error(ax):
while ax.current_state != AXIS_STATE_IDLE:
time.sleep(0.1)
if ax.error != AXIS_ERROR_NONE:
dump_errors(odrv, True)
exit()
for axnum, ax in enumerate(axes):
print("Calibrating motor {}...".format(axnum))
ax.requested_state = AXIS_STATE_MOTOR_CALIBRATION
wait_and_exit_on_error(ax)
print("Checking motor {} direction...".format(axnum))
ax.requested_state = AXIS_STATE_ENCODER_DIR_FIND
wait_and_exit_on_error(ax)
print(" Direction is {}".format(ax.motor.config.direction))
if flip_index_search_direction:
ax.config.lockin.ramp_distance = -ax.config.lockin.ramp_distance
ax.config.lockin.vel = -ax.config.lockin.vel
ax.config.lockin.accel = -ax.config.lockin.accel
print("Searching for index on motor {}...".format(axnum))
ax.requested_state = AXIS_STATE_ENCODER_INDEX_SEARCH
wait_and_exit_on_error(ax)
if (not ax.encoder.index_found):
print("Failed finding index! Quitting.")
exit()
print("Calibrating encoder offset on motor {}...".format(axnum))
ax.requested_state = AXIS_STATE_ENCODER_OFFSET_CALIBRATION
wait_and_exit_on_error(ax)
if (not ax.encoder.is_ready):
print("Failed to calibrate encoder! Quitting")
exit()
# If we get here there were no errors, so let's commit the values
ax.motor.config.pre_calibrated = True
ax.encoder.config.pre_calibrated = True
# Uncomment this if you wish to automatically run index search and closed loop control on boot
# ax.config.startup_encoder_index_search = True
# ax.config.startup_closed_loop_control = True
#Everything should be good to go here, so let's save and reboot
print("")
print("All operations successful!")
if save_and_reboot:
odrv.save_configuration()
try:
odrv.reboot()
except odrive.fibre.ObjectLostError:
pass
|
en
| 0.750558
|
# axes = [odrv.axis0, odrv.axis1]; # Settings to protect battery # If we get here there were no errors, so let's commit the values # Uncomment this if you wish to automatically run index search and closed loop control on boot # ax.config.startup_encoder_index_search = True # ax.config.startup_closed_loop_control = True #Everything should be good to go here, so let's save and reboot
| 2.336888
| 2
|
kitsune/sumo/tests/test_parser.py
|
The-smooth-operator/kitsune
| 929
|
6628557
|
<reponame>The-smooth-operator/kitsune
from functools import partial
from django.conf import settings
from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.gallery.tests import ImageFactory
from kitsune.sumo.parser import (
WikiParser,
build_hook_params,
_get_wiki_link,
get_object_fallback,
IMAGE_PARAMS,
IMAGE_PARAM_VALUES,
)
from kitsune.sumo.tests import TestCase
from kitsune.wiki.models import Document
from kitsune.wiki.tests import DocumentFactory, ApprovedRevisionFactory
def pq_link(p, text):
return pq(p.parse(text))("a")
def pq_img(p, text, selector="img", locale=settings.WIKI_DEFAULT_LANGUAGE):
doc = pq(p.parse(text, locale=locale))
return doc(selector)
def doc_rev_parser(content, title="Installing Firefox", parser_cls=WikiParser, **kwargs):
p = parser_cls()
d = DocumentFactory(title=title, **kwargs)
r = ApprovedRevisionFactory(document=d, content=content)
return (d, r, p)
build_hook_params_default = partial(
build_hook_params,
locale=settings.WIKI_DEFAULT_LANGUAGE,
allowed_params=IMAGE_PARAMS,
allowed_param_values=IMAGE_PARAM_VALUES,
)
class GetObjectFallbackTests(TestCase):
def test_empty(self):
"""get_object_fallback returns message when no objects."""
# English does not exist
obj = get_object_fallback(Document, "A doc", "en-US", "!")
eq_("!", obj)
def test_english(self):
# Create the English document
d = DocumentFactory(title="A doc")
# Now it exists
obj = get_object_fallback(Document, "A doc", "en-US", "!")
eq_(d, obj)
def test_from_french(self):
# Create the English document
d = DocumentFactory(title="A doc")
d.save()
# Returns English document for French
obj = get_object_fallback(Document, "A doc", "fr", "!")
eq_(d, obj)
def test_french(self):
# Create English parent document
en_d = DocumentFactory()
ApprovedRevisionFactory(document=en_d)
# Create the French document
fr_d = DocumentFactory(parent=en_d, title="A doc", locale="fr")
obj = get_object_fallback(Document, "A doc", "fr", "!")
eq_(fr_d, obj)
# Also works when English exists
DocumentFactory(title="A doc")
obj = get_object_fallback(Document, "A doc", "fr", "!")
eq_(fr_d, obj)
def test_translated(self):
"""If a localization of the English fallback exists, use it."""
en_d = DocumentFactory(title="A doc")
ApprovedRevisionFactory(document=en_d)
fr_d = DocumentFactory(parent=en_d, title="Une doc", locale="fr")
# Without an approved revision, the en-US doc should be returned.
obj = get_object_fallback(Document, "A doc", "fr")
eq_(en_d, obj)
# Approve a revision, then fr doc should be returned.
ApprovedRevisionFactory(document=fr_d)
obj = get_object_fallback(Document, "A doc", "fr")
eq_(fr_d, obj)
def test_redirect(self):
"""Assert get_object_fallback follows wiki redirects."""
target_rev = ApprovedRevisionFactory(document__title="target")
translated_target_rev = ApprovedRevisionFactory(
document__parent=target_rev.document, document__locale="de"
)
ApprovedRevisionFactory(document__title="redirect", content="REDIRECT [[target]]")
eq_(
translated_target_rev.document,
get_object_fallback(Document, "redirect", "de"),
)
def test_redirect_translations_only(self):
"""Make sure get_object_fallback doesn't follow redirects when working
purely in the default language.
That would make it hard to navigate to redirects (to edit them, for
example).
"""
ApprovedRevisionFactory(document__title="target", content="O hai.")
redirect_rev = ApprovedRevisionFactory(
document__title="redirect", content="REDIRECT [[target]]"
)
eq_(
redirect_rev.document,
get_object_fallback(Document, "redirect", redirect_rev.document.locale),
)
class TestWikiParser(TestCase):
def setUp(self):
self.d, self.r, self.p = doc_rev_parser("Test content", "Installing Firefox")
def test_image_params_page(self):
"""build_hook_params handles wiki pages."""
_, params = build_hook_params_default("t|page=Installing Firefox")
eq_("/en-US/kb/installing-firefox", params["link"])
assert params["found"]
def test_image_params_link(self):
"""_build_image_params handles external links."""
_, params = build_hook_params_default("t|link=http://example.com")
eq_("http://example.com", params["link"])
def test_image_params_page_link(self):
"""_build_image_params - wiki page overrides link."""
text = "t|page=Installing Firefox|link=http://example.com"
_, params = build_hook_params_default(text)
eq_("/en-US/kb/installing-firefox", params["link"])
def test_image_params_align(self):
"""Align valid options."""
align_vals = ("none", "left", "center", "right")
for align in align_vals:
_, params = build_hook_params_default("test.jpg|align=" + align)
eq_(align, params["align"])
def test_image_params_align_invalid(self):
"""Align invalid options."""
_, params = build_hook_params_default("align=zzz")
assert "align" not in params, "Align is present in params"
def test_image_params_valign(self):
"""Vertical align valid options."""
valign_vals = (
"baseline",
"sub",
"super",
"top",
"text-top",
"middle",
"bottom",
"text-bottom",
)
for valign in valign_vals:
_, params = build_hook_params_default("title|valign=" + valign)
eq_(valign, params["valign"])
def test_image_params_valign_invalid(self):
"""Vertical align invalid options."""
_, params = build_hook_params_default("valign=zzz")
assert "valign" not in params, "Vertical align is present in params"
def test_image_params_alt(self):
"""Image alt override."""
_, params = build_hook_params_default("t|alt=some alternative text")
eq_("some alternative text", params["alt"])
def test_image_params_frame(self):
"""Framed image."""
_, params = build_hook_params_default("title|frame")
assert params["frame"]
def test_image_params_width_height(self):
"""Image width."""
_, params = build_hook_params_default("t|width=10|height=20")
eq_("10", params["width"])
eq_("20", params["height"])
def test_get_wiki_link(self):
"""Wiki links are properly built for existing pages."""
eq_(
{
"found": True,
"url": "/en-US/kb/installing-firefox",
"text": "Installing Firefox",
},
_get_wiki_link("Installing Firefox", locale=settings.WIKI_DEFAULT_LANGUAGE),
)
def test_showfor(self):
"""<showfor> tags should be escaped, not obeyed."""
eq_(
"<p><showfor>smoo</showfor></p>",
self.p.parse("<showfor>smoo</showfor>").replace("\n", ""),
)
def test_youtube_video(self):
"""Verify youtube embeds."""
urls = [
"http://www.youtube.com/watch?v=oHg5SJYRHA0",
"https://youtube.com/watch?v=oHg5SJYRHA0"
"http://youtu.be/oHg5SJYRHA0"
"https://youtu.be/oHg5SJYRHA0",
]
for url in urls:
doc = pq(self.p.parse("[[V:%s]]" % url))
assert doc("iframe")[0].attrib["src"].startswith("//www.youtube.com/embed/oHg5SJYRHA0")
def test_iframe_in_markup(self):
"""Verify iframe in wiki markup is escaped."""
doc = pq(self.p.parse('<iframe src="http://example.com"></iframe>'))
eq_(0, len(doc("iframe")))
def test_iframe_hell_bug_898769(self):
"""Verify fix for bug 898769."""
content = r"""<iframe/src \/\/onload = prompt(1)
<iframe/onreadystatechange=alert(/@blinkms/)
<svg/onload=alert(1)"""
eq_(
'<p><iframe src="" \\="" onload="prompt(1)" <="" p=""'
"><p><iframe/onreadystatechange="
"alert(/@blinkms/)\n</p><p><"
"svg/onload=alert(1)\n</p></iframe></p>",
self.p.parse(content),
)
def test_injections(self):
testdata = (
# Normal image urls
(
'<img src="https://example.com/nursekitty.jpg">',
'<p><img src="https://example.com/nursekitty.jpg">\n</p>',
),
(
"<img src=https://example.com/nursekitty.jpg />",
'<p><img src="https://example.com/nursekitty.jpg">\n</p>',
),
(
'<img src="https://example.com/nursekitty.jpg" />',
'<p><img src="https://example.com/nursekitty.jpg">\n</p>',
),
(
"<img src=https://example.com/nursekitty.jpg </img>",
'<p><img src="https://example.com/nursekitty.jpg"></p>',
),
# Script insertions from OWASP site
("<IMG SRC=`javascript:alert(\"'XSS'\")`>", "<p><img>\n</p>"),
('<IMG SRC=javascript:alert("XSS")>', "<p><img>\n</p>"),
("<IMG SRC=JaVaScRiPt:alert('XSS')>", "<p><img>\n</p>"),
("<IMG SRC=javascript:alert('XSS')>", "<p><img>\n</p>"),
("<IMG SRC=\"javascript:alert('XSS');\">", "<p><img>\n</p>"),
)
for content, expected in testdata:
eq_(expected, self.p.parse(content))
class TestWikiInternalLinks(TestCase):
def setUp(self):
self.d, self.r, self.p = doc_rev_parser("Test content", "Installing Firefox")
def test_simple(self):
"""Simple internal link markup."""
link = pq_link(self.p, "[[Installing Firefox]]")
eq_("/en-US/kb/installing-firefox", link.attr("href"))
eq_("Installing Firefox", link.text())
assert not link.hasClass("new")
def test_simple_markup(self):
text = "[[Installing Firefox]]"
eq_(
'<p><a href="/en-US/kb/installing-firefox">' + "Installing Firefox</a></p>",
self.p.parse(text).replace("\n", ""),
)
def test_link_hash(self):
"""Internal link with hash."""
link = pq_link(self.p, "[[Installing Firefox#section name]]")
eq_("/en-US/kb/installing-firefox#section_name", link.attr("href"))
eq_("Installing Firefox", link.text())
def test_link_hash_text(self):
"""Internal link with hash and text."""
link = pq_link(self.p, "[[Installing Firefox#section name|section]]")
eq_("/en-US/kb/installing-firefox#section_name", link.attr("href"))
eq_("section", link.text())
def test_hash_only(self):
"""Internal hash only."""
link = pq_link(self.p, "[[#section 3]]")
eq_("#section_3", link.attr("href"))
eq_("#section 3", link.text())
def test_link_name(self):
"""Internal link with name."""
link = pq_link(self.p, "[[Installing Firefox|this name]]")
eq_("/en-US/kb/installing-firefox", link.attr("href"))
eq_("this name", link.text())
def test_link_with_extra_pipe(self):
link = pq_link(self.p, "[[Installing Firefox|with|pipe]]")
eq_("/en-US/kb/installing-firefox", link.attr("href"))
eq_("with|pipe", link.text())
def test_hash_name(self):
"""Internal hash with name."""
link = pq_link(self.p, "[[#section 3|this name]]")
eq_("#section_3", link.attr("href"))
eq_("this name", link.text())
assert not link.hasClass("new")
def test_link_hash_name(self):
"""Internal link with hash and name."""
link = pq_link(self.p, "[[Installing Firefox#section 3|this name]]")
eq_("/en-US/kb/installing-firefox#section_3", link.attr("href"))
eq_("this name", link.text())
def test_link_hash_name_markup(self):
"""Internal link with hash and name."""
text = "[[Installing Firefox#section 3|this name]]"
eq_(
'<p><a href="/en-US/kb/installing-firefox#section_3">this name</a>\n</p>',
self.p.parse(text),
)
def test_simple_create(self):
"""Simple link for inexistent page."""
link = pq_link(self.p, "[[A new page]]")
assert link.hasClass("new")
eq_("/en-US/kb/new?title=A+new+page", link.attr("href"))
eq_("A new page", link.text())
def test_link_edit_hash_name(self):
"""Internal link for inexistent page with hash and name."""
link = pq_link(self.p, "[[A new page#section 3|this name]]")
eq_("/en-US/kb/new?title=A+new+page#section_3", link.attr("href"))
eq_("this name", link.text())
def test_link_with_localization(self):
"""A link to an English doc with a local translation."""
en_d = DocumentFactory(title="A doc")
ApprovedRevisionFactory(document=en_d)
fr_d = DocumentFactory(parent=en_d, title="Une doc", locale="fr")
# Without an approved revision, link should go to en-US doc.
# The site should stay in fr locale (/<locale>/<en-US slug>).
link = pq(self.p.parse("[[A doc]]", locale="fr"))
eq_("/fr/kb/a-doc", link.find("a").attr("href"))
eq_("A doc", link.find("a").text())
# Approve a revision. Now link should go to fr doc.
ApprovedRevisionFactory(document=fr_d)
link = pq(self.p.parse("[[A doc]]", locale="fr"))
eq_("/fr/kb/une-doc", link.find("a").attr("href"))
eq_("Une doc", link.find("a").text())
class TestWikiImageTags(TestCase):
def setUp(self):
self.d, self.r, self.p = doc_rev_parser("Test content", "Installing Firefox")
self.img = ImageFactory(title="test.jpg")
def tearDown(self):
self.img.delete()
def test_empty(self):
"""Empty image tag markup does not change."""
img = pq_img(self.p, "[[Image:]]", "p")
eq_('The image "" does not exist.', img.text())
def test_simple(self):
"""Simple image tag markup."""
img = pq_img(self.p, "[[Image:test.jpg]]", "img")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
def test_simple_fallback(self):
"""Fallback to English if current locale doesn't have the image."""
img = pq_img(self.p, "[[Image:test.jpg]]", selector="img", locale="ja")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
def test_full_fallback(self):
"""Find current locale's image, not the English one."""
# first, pretend there is no English version
self.img.locale = "ja"
self.img.save()
img = pq_img(self.p, "[[Image:test.jpg]]", selector="img", locale="ja")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
# then, create an English version
en_img = ImageFactory(title="test.jpg", locale="en-US")
# Ensure they're not equal
self.assertNotEqual(en_img.file.url, self.img.file.url)
# make sure there is no fallback
img = pq_img(self.p, "[[Image:test.jpg]]", selector="img", locale="ja")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
# now delete the English version
self.img.delete()
self.img = en_img # don't break tearDown
img = pq_img(self.p, "[[Image:test.jpg]]", selector="img", locale="ja")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
def test_caption(self):
"""Give the image a caption."""
self.img.title = "img test.jpg"
self.img.save()
img_div = pq_img(self.p, "[[Image:img test.jpg|frame|my caption]]", "div.img")
img = img_div("img")
caption = img_div.text()
eq_(self.img.file.url, img.attr("src"))
eq_("my caption", img.attr("alt"))
eq_("my caption", caption)
def test_page_link(self):
"""Link to a wiki page."""
img_a = pq_img(self.p, "[[Image:test.jpg|page=Installing Firefox]]", "a")
img = img_a("img")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
eq_("/en-US/kb/installing-firefox", img_a.attr("href"))
def test_page_link_edit(self):
"""Link to a nonexistent wiki page."""
img_a = pq_img(self.p, "[[Image:test.jpg|page=Article List]]", "a")
img = img_a("img")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
assert img_a.hasClass("new")
eq_("/en-US/kb/new?title=Article+List", img_a.attr("href"))
def test_page_link_caption(self):
"""Link to a wiki page with caption and frame."""
img_div = pq_img(self.p, "[[Image:test.jpg|frame|page=A page|my caption]]", "div.img")
img_a = img_div("a")
img = img_a("img")
caption = img_div.text()
eq_("my caption", img.attr("alt"))
eq_("my caption", caption)
eq_(self.img.file.url, img.attr("src"))
assert img_a.hasClass("new")
eq_("/en-US/kb/new?title=A+page", img_a.attr("href"))
def test_link(self):
"""Link to an external page."""
img_a = pq_img(self.p, "[[Image:test.jpg|link=http://test.com]]", "a")
img = img_a("img")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
eq_("http://test.com", img_a.attr("href"))
def test_link_caption(self):
"""Link to an external page with caption."""
img_div = pq_img(self.p, "[[Image:test.jpg|link=http://ab.us|frame|caption]]", "div.img")
img = img_div("img")
img_a = img_div("a")
eq_(self.img.file.url, img.attr("src"))
eq_("http://ab.us", img_a.attr("href"))
def test_link_align(self):
"""Link with align."""
img_div = pq_img(self.p, "[[Image:test.jpg|link=http://site.com|align=left]]", "div.img")
eq_("img align-left", img_div.attr("class"))
def test_link_align_invalid(self):
"""Link with invalid align."""
img = pq_img(self.p, "[[Image:test.jpg|link=http://example.ro|align=inv]]")
assert "frameless" in img.attr("class")
def test_link_valign(self):
"""Link with valign."""
img = pq_img(self.p, "[[Image:test.jpg|link=http://example.com|valign=top]]")
eq_("vertical-align: top;", img.attr("style"))
def test_link_valign_invalid(self):
"""Link with invalid valign."""
img = pq_img(self.p, "[[Image:test.jpg|link=http://example.com|valign=off]]")
eq_(None, img.attr("style"))
def test_alt(self):
"""Image alt attribute is overriden but caption is not."""
img_div = pq_img(self.p, "[[Image:test.jpg|alt=my alt|frame|my caption]]", "div.img")
img = img_div("img")
caption = img_div.text()
eq_("my alt", img.attr("alt"))
eq_("my caption", caption)
def test_alt_empty(self):
"""Image alt attribute can be empty."""
img = pq_img(self.p, "[[Image:test.jpg|alt=|my caption]]")
eq_("", img.attr("alt"))
def test_alt_unsafe(self):
"""Potentially unsafe alt content is escaped."""
unsafe_vals = (
(
'an"<script>alert()</script>',
"an"&amp;lt;script&amp;gt;alert()&amp;lt;/script&amp;gt;",
),
(
"an'<script>alert()</script>",
"an'<script>alert()</script>",
),
("single'\"double", "single'"double"),
)
for alt_sent, alt_expected in unsafe_vals:
img = pq_img(self.p, "[[Image:test.jpg|alt=" + alt_sent + "]]")
is_true = str(img).startswith('<img alt="' + alt_expected + '"')
assert is_true, 'Expected "%s", sent "%s"' % (alt_expected, alt_sent)
def test_width(self):
"""Image width attribute set."""
img = pq_img(self.p, "[[Image:test.jpg|width=10]]")
eq_("10", img.attr("width"))
def test_width_invalid(self):
"""Invalid image width attribute set to auto."""
img = pq_img(self.p, "[[Image:test.jpg|width=invalid]]")
eq_(None, img.attr("width"))
def test_height(self):
"""Image height attribute set."""
img = pq_img(self.p, "[[Image:test.jpg|height=10]]")
eq_("10", img.attr("height"))
def test_height_invalid(self):
"""Invalid image height attribute set to auto."""
img = pq_img(self.p, "[[Image:test.jpg|height=invalid]]")
eq_(None, img.attr("height"))
def test_frame(self):
"""Image has frame if specified."""
img_div = pq_img(self.p, "[[Image:test.jpg|frame|caption]]", "div.img")
assert not img_div("img").hasClass("frameless")
eq_("caption", img_div("img").attr("alt"))
eq_("caption", img_div.text())
eq_(self.img.file.url, img_div("img").attr("src"))
def test_frameless_link(self):
"""Image has frameless class and link if specified."""
img_a = pq_img(self.p, "[[Image:test.jpg|page=Installing Firefox]]", "a")
img = img_a("img")
assert "frameless" in img.attr("class")
eq_("/en-US/kb/installing-firefox", img_a.attr("href"))
|
from functools import partial
from django.conf import settings
from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.gallery.tests import ImageFactory
from kitsune.sumo.parser import (
WikiParser,
build_hook_params,
_get_wiki_link,
get_object_fallback,
IMAGE_PARAMS,
IMAGE_PARAM_VALUES,
)
from kitsune.sumo.tests import TestCase
from kitsune.wiki.models import Document
from kitsune.wiki.tests import DocumentFactory, ApprovedRevisionFactory
def pq_link(p, text):
return pq(p.parse(text))("a")
def pq_img(p, text, selector="img", locale=settings.WIKI_DEFAULT_LANGUAGE):
doc = pq(p.parse(text, locale=locale))
return doc(selector)
def doc_rev_parser(content, title="Installing Firefox", parser_cls=WikiParser, **kwargs):
p = parser_cls()
d = DocumentFactory(title=title, **kwargs)
r = ApprovedRevisionFactory(document=d, content=content)
return (d, r, p)
build_hook_params_default = partial(
build_hook_params,
locale=settings.WIKI_DEFAULT_LANGUAGE,
allowed_params=IMAGE_PARAMS,
allowed_param_values=IMAGE_PARAM_VALUES,
)
class GetObjectFallbackTests(TestCase):
def test_empty(self):
"""get_object_fallback returns message when no objects."""
# English does not exist
obj = get_object_fallback(Document, "A doc", "en-US", "!")
eq_("!", obj)
def test_english(self):
# Create the English document
d = DocumentFactory(title="A doc")
# Now it exists
obj = get_object_fallback(Document, "A doc", "en-US", "!")
eq_(d, obj)
def test_from_french(self):
# Create the English document
d = DocumentFactory(title="A doc")
d.save()
# Returns English document for French
obj = get_object_fallback(Document, "A doc", "fr", "!")
eq_(d, obj)
def test_french(self):
# Create English parent document
en_d = DocumentFactory()
ApprovedRevisionFactory(document=en_d)
# Create the French document
fr_d = DocumentFactory(parent=en_d, title="A doc", locale="fr")
obj = get_object_fallback(Document, "A doc", "fr", "!")
eq_(fr_d, obj)
# Also works when English exists
DocumentFactory(title="A doc")
obj = get_object_fallback(Document, "A doc", "fr", "!")
eq_(fr_d, obj)
def test_translated(self):
"""If a localization of the English fallback exists, use it."""
en_d = DocumentFactory(title="A doc")
ApprovedRevisionFactory(document=en_d)
fr_d = DocumentFactory(parent=en_d, title="Une doc", locale="fr")
# Without an approved revision, the en-US doc should be returned.
obj = get_object_fallback(Document, "A doc", "fr")
eq_(en_d, obj)
# Approve a revision, then fr doc should be returned.
ApprovedRevisionFactory(document=fr_d)
obj = get_object_fallback(Document, "A doc", "fr")
eq_(fr_d, obj)
def test_redirect(self):
"""Assert get_object_fallback follows wiki redirects."""
target_rev = ApprovedRevisionFactory(document__title="target")
translated_target_rev = ApprovedRevisionFactory(
document__parent=target_rev.document, document__locale="de"
)
ApprovedRevisionFactory(document__title="redirect", content="REDIRECT [[target]]")
eq_(
translated_target_rev.document,
get_object_fallback(Document, "redirect", "de"),
)
def test_redirect_translations_only(self):
"""Make sure get_object_fallback doesn't follow redirects when working
purely in the default language.
That would make it hard to navigate to redirects (to edit them, for
example).
"""
ApprovedRevisionFactory(document__title="target", content="O hai.")
redirect_rev = ApprovedRevisionFactory(
document__title="redirect", content="REDIRECT [[target]]"
)
eq_(
redirect_rev.document,
get_object_fallback(Document, "redirect", redirect_rev.document.locale),
)
class TestWikiParser(TestCase):
def setUp(self):
self.d, self.r, self.p = doc_rev_parser("Test content", "Installing Firefox")
def test_image_params_page(self):
"""build_hook_params handles wiki pages."""
_, params = build_hook_params_default("t|page=Installing Firefox")
eq_("/en-US/kb/installing-firefox", params["link"])
assert params["found"]
def test_image_params_link(self):
"""_build_image_params handles external links."""
_, params = build_hook_params_default("t|link=http://example.com")
eq_("http://example.com", params["link"])
def test_image_params_page_link(self):
"""_build_image_params - wiki page overrides link."""
text = "t|page=Installing Firefox|link=http://example.com"
_, params = build_hook_params_default(text)
eq_("/en-US/kb/installing-firefox", params["link"])
def test_image_params_align(self):
"""Align valid options."""
align_vals = ("none", "left", "center", "right")
for align in align_vals:
_, params = build_hook_params_default("test.jpg|align=" + align)
eq_(align, params["align"])
def test_image_params_align_invalid(self):
"""Align invalid options."""
_, params = build_hook_params_default("align=zzz")
assert "align" not in params, "Align is present in params"
def test_image_params_valign(self):
"""Vertical align valid options."""
valign_vals = (
"baseline",
"sub",
"super",
"top",
"text-top",
"middle",
"bottom",
"text-bottom",
)
for valign in valign_vals:
_, params = build_hook_params_default("title|valign=" + valign)
eq_(valign, params["valign"])
def test_image_params_valign_invalid(self):
"""Vertical align invalid options."""
_, params = build_hook_params_default("valign=zzz")
assert "valign" not in params, "Vertical align is present in params"
def test_image_params_alt(self):
"""Image alt override."""
_, params = build_hook_params_default("t|alt=some alternative text")
eq_("some alternative text", params["alt"])
def test_image_params_frame(self):
"""Framed image."""
_, params = build_hook_params_default("title|frame")
assert params["frame"]
def test_image_params_width_height(self):
"""Image width."""
_, params = build_hook_params_default("t|width=10|height=20")
eq_("10", params["width"])
eq_("20", params["height"])
def test_get_wiki_link(self):
"""Wiki links are properly built for existing pages."""
eq_(
{
"found": True,
"url": "/en-US/kb/installing-firefox",
"text": "Installing Firefox",
},
_get_wiki_link("Installing Firefox", locale=settings.WIKI_DEFAULT_LANGUAGE),
)
def test_showfor(self):
"""<showfor> tags should be escaped, not obeyed."""
eq_(
"<p><showfor>smoo</showfor></p>",
self.p.parse("<showfor>smoo</showfor>").replace("\n", ""),
)
def test_youtube_video(self):
"""Verify youtube embeds."""
urls = [
"http://www.youtube.com/watch?v=oHg5SJYRHA0",
"https://youtube.com/watch?v=oHg5SJYRHA0"
"http://youtu.be/oHg5SJYRHA0"
"https://youtu.be/oHg5SJYRHA0",
]
for url in urls:
doc = pq(self.p.parse("[[V:%s]]" % url))
assert doc("iframe")[0].attrib["src"].startswith("//www.youtube.com/embed/oHg5SJYRHA0")
def test_iframe_in_markup(self):
"""Verify iframe in wiki markup is escaped."""
doc = pq(self.p.parse('<iframe src="http://example.com"></iframe>'))
eq_(0, len(doc("iframe")))
def test_iframe_hell_bug_898769(self):
"""Verify fix for bug 898769."""
content = r"""<iframe/src \/\/onload = prompt(1)
<iframe/onreadystatechange=alert(/@blinkms/)
<svg/onload=alert(1)"""
eq_(
'<p><iframe src="" \\="" onload="prompt(1)" <="" p=""'
"><p><iframe/onreadystatechange="
"alert(/@blinkms/)\n</p><p><"
"svg/onload=alert(1)\n</p></iframe></p>",
self.p.parse(content),
)
def test_injections(self):
testdata = (
# Normal image urls
(
'<img src="https://example.com/nursekitty.jpg">',
'<p><img src="https://example.com/nursekitty.jpg">\n</p>',
),
(
"<img src=https://example.com/nursekitty.jpg />",
'<p><img src="https://example.com/nursekitty.jpg">\n</p>',
),
(
'<img src="https://example.com/nursekitty.jpg" />',
'<p><img src="https://example.com/nursekitty.jpg">\n</p>',
),
(
"<img src=https://example.com/nursekitty.jpg </img>",
'<p><img src="https://example.com/nursekitty.jpg"></p>',
),
# Script insertions from OWASP site
("<IMG SRC=`javascript:alert(\"'XSS'\")`>", "<p><img>\n</p>"),
('<IMG SRC=javascript:alert("XSS")>', "<p><img>\n</p>"),
("<IMG SRC=JaVaScRiPt:alert('XSS')>", "<p><img>\n</p>"),
("<IMG SRC=javascript:alert('XSS')>", "<p><img>\n</p>"),
("<IMG SRC=\"javascript:alert('XSS');\">", "<p><img>\n</p>"),
)
for content, expected in testdata:
eq_(expected, self.p.parse(content))
class TestWikiInternalLinks(TestCase):
def setUp(self):
self.d, self.r, self.p = doc_rev_parser("Test content", "Installing Firefox")
def test_simple(self):
"""Simple internal link markup."""
link = pq_link(self.p, "[[Installing Firefox]]")
eq_("/en-US/kb/installing-firefox", link.attr("href"))
eq_("Installing Firefox", link.text())
assert not link.hasClass("new")
def test_simple_markup(self):
text = "[[Installing Firefox]]"
eq_(
'<p><a href="/en-US/kb/installing-firefox">' + "Installing Firefox</a></p>",
self.p.parse(text).replace("\n", ""),
)
def test_link_hash(self):
"""Internal link with hash."""
link = pq_link(self.p, "[[Installing Firefox#section name]]")
eq_("/en-US/kb/installing-firefox#section_name", link.attr("href"))
eq_("Installing Firefox", link.text())
def test_link_hash_text(self):
"""Internal link with hash and text."""
link = pq_link(self.p, "[[Installing Firefox#section name|section]]")
eq_("/en-US/kb/installing-firefox#section_name", link.attr("href"))
eq_("section", link.text())
def test_hash_only(self):
"""Internal hash only."""
link = pq_link(self.p, "[[#section 3]]")
eq_("#section_3", link.attr("href"))
eq_("#section 3", link.text())
def test_link_name(self):
"""Internal link with name."""
link = pq_link(self.p, "[[Installing Firefox|this name]]")
eq_("/en-US/kb/installing-firefox", link.attr("href"))
eq_("this name", link.text())
def test_link_with_extra_pipe(self):
link = pq_link(self.p, "[[Installing Firefox|with|pipe]]")
eq_("/en-US/kb/installing-firefox", link.attr("href"))
eq_("with|pipe", link.text())
def test_hash_name(self):
"""Internal hash with name."""
link = pq_link(self.p, "[[#section 3|this name]]")
eq_("#section_3", link.attr("href"))
eq_("this name", link.text())
assert not link.hasClass("new")
def test_link_hash_name(self):
"""Internal link with hash and name."""
link = pq_link(self.p, "[[Installing Firefox#section 3|this name]]")
eq_("/en-US/kb/installing-firefox#section_3", link.attr("href"))
eq_("this name", link.text())
def test_link_hash_name_markup(self):
"""Internal link with hash and name."""
text = "[[Installing Firefox#section 3|this name]]"
eq_(
'<p><a href="/en-US/kb/installing-firefox#section_3">this name</a>\n</p>',
self.p.parse(text),
)
def test_simple_create(self):
"""Simple link for inexistent page."""
link = pq_link(self.p, "[[A new page]]")
assert link.hasClass("new")
eq_("/en-US/kb/new?title=A+new+page", link.attr("href"))
eq_("A new page", link.text())
def test_link_edit_hash_name(self):
"""Internal link for inexistent page with hash and name."""
link = pq_link(self.p, "[[A new page#section 3|this name]]")
eq_("/en-US/kb/new?title=A+new+page#section_3", link.attr("href"))
eq_("this name", link.text())
def test_link_with_localization(self):
"""A link to an English doc with a local translation."""
en_d = DocumentFactory(title="A doc")
ApprovedRevisionFactory(document=en_d)
fr_d = DocumentFactory(parent=en_d, title="Une doc", locale="fr")
# Without an approved revision, link should go to en-US doc.
# The site should stay in fr locale (/<locale>/<en-US slug>).
link = pq(self.p.parse("[[A doc]]", locale="fr"))
eq_("/fr/kb/a-doc", link.find("a").attr("href"))
eq_("A doc", link.find("a").text())
# Approve a revision. Now link should go to fr doc.
ApprovedRevisionFactory(document=fr_d)
link = pq(self.p.parse("[[A doc]]", locale="fr"))
eq_("/fr/kb/une-doc", link.find("a").attr("href"))
eq_("Une doc", link.find("a").text())
class TestWikiImageTags(TestCase):
def setUp(self):
self.d, self.r, self.p = doc_rev_parser("Test content", "Installing Firefox")
self.img = ImageFactory(title="test.jpg")
def tearDown(self):
self.img.delete()
def test_empty(self):
"""Empty image tag markup does not change."""
img = pq_img(self.p, "[[Image:]]", "p")
eq_('The image "" does not exist.', img.text())
def test_simple(self):
"""Simple image tag markup."""
img = pq_img(self.p, "[[Image:test.jpg]]", "img")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
def test_simple_fallback(self):
"""Fallback to English if current locale doesn't have the image."""
img = pq_img(self.p, "[[Image:test.jpg]]", selector="img", locale="ja")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
def test_full_fallback(self):
"""Find current locale's image, not the English one."""
# first, pretend there is no English version
self.img.locale = "ja"
self.img.save()
img = pq_img(self.p, "[[Image:test.jpg]]", selector="img", locale="ja")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
# then, create an English version
en_img = ImageFactory(title="test.jpg", locale="en-US")
# Ensure they're not equal
self.assertNotEqual(en_img.file.url, self.img.file.url)
# make sure there is no fallback
img = pq_img(self.p, "[[Image:test.jpg]]", selector="img", locale="ja")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
# now delete the English version
self.img.delete()
self.img = en_img # don't break tearDown
img = pq_img(self.p, "[[Image:test.jpg]]", selector="img", locale="ja")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
def test_caption(self):
"""Give the image a caption."""
self.img.title = "img test.jpg"
self.img.save()
img_div = pq_img(self.p, "[[Image:img test.jpg|frame|my caption]]", "div.img")
img = img_div("img")
caption = img_div.text()
eq_(self.img.file.url, img.attr("src"))
eq_("my caption", img.attr("alt"))
eq_("my caption", caption)
def test_page_link(self):
"""Link to a wiki page."""
img_a = pq_img(self.p, "[[Image:test.jpg|page=Installing Firefox]]", "a")
img = img_a("img")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
eq_("/en-US/kb/installing-firefox", img_a.attr("href"))
def test_page_link_edit(self):
"""Link to a nonexistent wiki page."""
img_a = pq_img(self.p, "[[Image:test.jpg|page=Article List]]", "a")
img = img_a("img")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
assert img_a.hasClass("new")
eq_("/en-US/kb/new?title=Article+List", img_a.attr("href"))
def test_page_link_caption(self):
"""Link to a wiki page with caption and frame."""
img_div = pq_img(self.p, "[[Image:test.jpg|frame|page=A page|my caption]]", "div.img")
img_a = img_div("a")
img = img_a("img")
caption = img_div.text()
eq_("my caption", img.attr("alt"))
eq_("my caption", caption)
eq_(self.img.file.url, img.attr("src"))
assert img_a.hasClass("new")
eq_("/en-US/kb/new?title=A+page", img_a.attr("href"))
def test_link(self):
"""Link to an external page."""
img_a = pq_img(self.p, "[[Image:test.jpg|link=http://test.com]]", "a")
img = img_a("img")
eq_("test.jpg", img.attr("alt"))
eq_(self.img.file.url, img.attr("src"))
eq_("http://test.com", img_a.attr("href"))
def test_link_caption(self):
"""Link to an external page with caption."""
img_div = pq_img(self.p, "[[Image:test.jpg|link=http://ab.us|frame|caption]]", "div.img")
img = img_div("img")
img_a = img_div("a")
eq_(self.img.file.url, img.attr("src"))
eq_("http://ab.us", img_a.attr("href"))
def test_link_align(self):
"""Link with align."""
img_div = pq_img(self.p, "[[Image:test.jpg|link=http://site.com|align=left]]", "div.img")
eq_("img align-left", img_div.attr("class"))
def test_link_align_invalid(self):
"""Link with invalid align."""
img = pq_img(self.p, "[[Image:test.jpg|link=http://example.ro|align=inv]]")
assert "frameless" in img.attr("class")
def test_link_valign(self):
"""Link with valign."""
img = pq_img(self.p, "[[Image:test.jpg|link=http://example.com|valign=top]]")
eq_("vertical-align: top;", img.attr("style"))
def test_link_valign_invalid(self):
"""Link with invalid valign."""
img = pq_img(self.p, "[[Image:test.jpg|link=http://example.com|valign=off]]")
eq_(None, img.attr("style"))
def test_alt(self):
"""Image alt attribute is overriden but caption is not."""
img_div = pq_img(self.p, "[[Image:test.jpg|alt=my alt|frame|my caption]]", "div.img")
img = img_div("img")
caption = img_div.text()
eq_("my alt", img.attr("alt"))
eq_("my caption", caption)
def test_alt_empty(self):
"""Image alt attribute can be empty."""
img = pq_img(self.p, "[[Image:test.jpg|alt=|my caption]]")
eq_("", img.attr("alt"))
def test_alt_unsafe(self):
"""Potentially unsafe alt content is escaped."""
unsafe_vals = (
(
'an"<script>alert()</script>',
"an"&amp;lt;script&amp;gt;alert()&amp;lt;/script&amp;gt;",
),
(
"an'<script>alert()</script>",
"an'<script>alert()</script>",
),
("single'\"double", "single'"double"),
)
for alt_sent, alt_expected in unsafe_vals:
img = pq_img(self.p, "[[Image:test.jpg|alt=" + alt_sent + "]]")
is_true = str(img).startswith('<img alt="' + alt_expected + '"')
assert is_true, 'Expected "%s", sent "%s"' % (alt_expected, alt_sent)
def test_width(self):
"""Image width attribute set."""
img = pq_img(self.p, "[[Image:test.jpg|width=10]]")
eq_("10", img.attr("width"))
def test_width_invalid(self):
"""Invalid image width attribute set to auto."""
img = pq_img(self.p, "[[Image:test.jpg|width=invalid]]")
eq_(None, img.attr("width"))
def test_height(self):
"""Image height attribute set."""
img = pq_img(self.p, "[[Image:test.jpg|height=10]]")
eq_("10", img.attr("height"))
def test_height_invalid(self):
"""Invalid image height attribute set to auto."""
img = pq_img(self.p, "[[Image:test.jpg|height=invalid]]")
eq_(None, img.attr("height"))
def test_frame(self):
"""Image has frame if specified."""
img_div = pq_img(self.p, "[[Image:test.jpg|frame|caption]]", "div.img")
assert not img_div("img").hasClass("frameless")
eq_("caption", img_div("img").attr("alt"))
eq_("caption", img_div.text())
eq_(self.img.file.url, img_div("img").attr("src"))
def test_frameless_link(self):
"""Image has frameless class and link if specified."""
img_a = pq_img(self.p, "[[Image:test.jpg|page=Installing Firefox]]", "a")
img = img_a("img")
assert "frameless" in img.attr("class")
eq_("/en-US/kb/installing-firefox", img_a.attr("href"))
|
en
| 0.704033
|
get_object_fallback returns message when no objects. # English does not exist # Create the English document # Now it exists # Create the English document # Returns English document for French # Create English parent document # Create the French document # Also works when English exists If a localization of the English fallback exists, use it. # Without an approved revision, the en-US doc should be returned. # Approve a revision, then fr doc should be returned. Assert get_object_fallback follows wiki redirects. Make sure get_object_fallback doesn't follow redirects when working purely in the default language. That would make it hard to navigate to redirects (to edit them, for example). build_hook_params handles wiki pages. _build_image_params handles external links. _build_image_params - wiki page overrides link. Align valid options. Align invalid options. Vertical align valid options. Vertical align invalid options. Image alt override. Framed image. Image width. Wiki links are properly built for existing pages. <showfor> tags should be escaped, not obeyed. Verify youtube embeds. Verify iframe in wiki markup is escaped. Verify fix for bug 898769. <iframe/src \/\/onload = prompt(1) <iframe/onreadystatechange=alert(/@blinkms/) <svg/onload=alert(1) # Normal image urls # Script insertions from OWASP site Simple internal link markup. Internal link with hash. #section name]]") #section_name", link.attr("href")) Internal link with hash and text. #section name|section]]") #section_name", link.attr("href")) Internal hash only. #section 3]]") Internal link with name. Internal hash with name. #section 3|this name]]") Internal link with hash and name. #section 3|this name]]") #section_3", link.attr("href")) Internal link with hash and name. #section 3|this name]]" #section_3">this name</a>\n</p>', Simple link for inexistent page. Internal link for inexistent page with hash and name. #section 3|this name]]") #section_3", link.attr("href")) A link to an English doc with a local translation. # Without an approved revision, link should go to en-US doc. # The site should stay in fr locale (/<locale>/<en-US slug>). # Approve a revision. Now link should go to fr doc. Empty image tag markup does not change. Simple image tag markup. Fallback to English if current locale doesn't have the image. Find current locale's image, not the English one. # first, pretend there is no English version # then, create an English version # Ensure they're not equal # make sure there is no fallback # now delete the English version # don't break tearDown Give the image a caption. Link to a wiki page. Link to a nonexistent wiki page. Link to a wiki page with caption and frame. Link to an external page. Link to an external page with caption. Link with align. Link with invalid align. Link with valign. Link with invalid valign. Image alt attribute is overriden but caption is not. Image alt attribute can be empty. Potentially unsafe alt content is escaped. Image width attribute set. Invalid image width attribute set to auto. Image height attribute set. Invalid image height attribute set to auto. Image has frame if specified. Image has frameless class and link if specified.
| 2.156659
| 2
|
CursoEmVideo/pythonProject/ex109/testing.py
|
cassio645/Aprendendo-python
| 0
|
6628558
|
<reponame>cassio645/Aprendendo-python
from ex109 import moeda
preco = float(input('Digite o preço: R$ '))
while True:
condicao = str(input('Deseja formatado como moeda?[S/N]: ')).upper().strip()[0]
if condicao in 'SN':
if condicao == 'S':
formatar = True
break
else:
formatar = False
break
print('-='*30)
print(f'A metade de {moeda.money(preco)} é {moeda.metade(preco, formatar)}')
print(f'O dobro de de {moeda.money(preco)} é {moeda.dobro(preco, formatar)}')
print(f'Aumentando 10% de {moeda.money(preco)} temos {moeda.dez_porcento(preco, formatar)}')
print(f'Reduzindo 15% de {moeda.money(preco)} temos {moeda.quinze_porcento(preco, formatar)}')
|
from ex109 import moeda
preco = float(input('Digite o preço: R$ '))
while True:
condicao = str(input('Deseja formatado como moeda?[S/N]: ')).upper().strip()[0]
if condicao in 'SN':
if condicao == 'S':
formatar = True
break
else:
formatar = False
break
print('-='*30)
print(f'A metade de {moeda.money(preco)} é {moeda.metade(preco, formatar)}')
print(f'O dobro de de {moeda.money(preco)} é {moeda.dobro(preco, formatar)}')
print(f'Aumentando 10% de {moeda.money(preco)} temos {moeda.dez_porcento(preco, formatar)}')
print(f'Reduzindo 15% de {moeda.money(preco)} temos {moeda.quinze_porcento(preco, formatar)}')
|
none
| 1
| 3.491388
| 3
|
|
Deliverables/network_2.py
|
ScottGKirkpatrick/466PA5
| 0
|
6628559
|
import queue
import threading
from link_2 import LinkFrame
class MPLSlabel:
labelLength = 5
## initialize the frame and label
def __init__(self, frame, label):
self.frame = frame
self.label = label
## called when printing the object
def __str__(self):
return self.to_byte_S()
## Sets the back of it with the label and fills the rest with zeros, then appends this to the packet
def to_byte_S(self):
byte_S = str(self.label).zfill(self.labelLength)
byte_S += str(self.frame)
return byte_S
##decode our label from byte_S
@classmethod
def from_byte_S(self, byte_S):
frame = byte_S[self.labelLength : ]
label = byte_S[ : self.labelLength].strip('0')
return self(frame, label)
## wrapper class for a queue of packets
class Interface:
## @param maxsize - the maximum size of the queue storing packets
# @param capacity - the capacity of the link in bps
def __init__(self, name = "defaultName", maxsize=0, capacity=500):
self.name = name
self.in_queue = queue.Queue(maxsize)
self.out_queue = queue.Queue(maxsize)
self.capacity = capacity #serialization rate
self.next_avail_time = 0 #the next time the interface can transmit a packet
##get packet from the queue interface
# @param in_or_out - use 'in' or 'out' interface
def get(self, in_or_out):
try:
if in_or_out == 'in':
pkt_S = self.in_queue.get(False)
# if pkt_S is not None:
# print('getting packet from the IN queue')
return pkt_S
else:
pkt_S = self.out_queue.get(False)
# if pkt_S is not None:
# print('getting packet from the OUT queue')
return pkt_S
except queue.Empty:
return None
##put the packet into the interface queue
# @param pkt - Packet to be inserted into the queue
# @param in_or_out - use 'in' or 'out' interface
# @param block - if True, block until room in queue, if False may throw queue.Full exception
def put(self, pkt, in_or_out, block=False):
if in_or_out == 'out':
# print('putting packet in the OUT queue')
self.out_queue.put(pkt, block)
else:
# print('putting packet in the IN queue')
self.in_queue.put(pkt, block)
## Implements a network layer packet
# NOTE: You will need to extend this class for the packet to include
# the fields necessary for the completion of this assignment.
class NetworkPacket:
## packet encoding lengths
dst_S_length = 5
##@param dst: address of the destination host
# @param data_S: packet payload
# @param priority: packet priority
def __init__(self, dst, data_S, priority=0):
self.dst = dst
self.data_S = data_S
#TODO: add priority to the packet class
## called when printing the object
def __str__(self):
return self.to_byte_S()
## convert packet to a byte string for transmission over links
def to_byte_S(self):
byte_S = str(self.dst).zfill(self.dst_S_length)
byte_S += self.data_S
return byte_S
## extract a packet object from a byte string
# @param byte_S: byte string representation of the packet
@classmethod
def from_byte_S(self, byte_S):
dst = byte_S[0 : NetworkPacket.dst_S_length].strip('0')
data_S = byte_S[NetworkPacket.dst_S_length : ]
return self(dst, data_S)
## Implements a network host for receiving and transmitting data
class Host:
##@param addr: address of this node represented as an integer
def __init__(self, addr):
self.addr = addr
self.intf_L = [Interface()]
self.stop = False #for thread termination
## called when printing the object
def __str__(self):
return self.addr
## create a packet and enqueue for transmission
# @param dst: destination address for the packet
# @param data_S: data being transmitted to the network layer
# @param priority: packet priority
def udt_send(self, dst, data_S, priority=0):
pkt = NetworkPacket(dst, data_S)
print('%s: sending packet "%s" with priority %d' % (self, pkt, priority))
#encapsulate network packet in a link frame (usually would be done by the OS)
fr = LinkFrame('Network', pkt.to_byte_S())
#enque frame onto the interface for transmission
self.intf_L[0].put(fr.to_byte_S(), 'out')
## receive frame from the link layer
def udt_receive(self):
fr_S = self.intf_L[0].get('in')
if fr_S is None:
return
#decapsulate the network packet
fr = LinkFrame.from_byte_S(fr_S)
assert(fr.type_S == 'Network') #should be receiving network packets by hosts
pkt_S = fr.data_S
print('%s: received packet "%s"' % (self, pkt_S))
## thread target for the host to keep receiving data
def run(self):
print (threading.currentThread().getName() + ': Starting')
while True:
#receive data arriving to the in interface
self.udt_receive()
#terminate
if(self.stop):
print (threading.currentThread().getName() + ': Ending')
return
## Implements a multi-interface router
class Router:
##@param name: friendly router name for debugging
# @param intf_capacity_L: capacities of outgoing interfaces in bps
# @param encap_tbl_D: table used to encapsulate network packets into MPLS frames
# @param frwd_tbl_D: table used to forward MPLS frames
# @param decap_tbl_D: table used to decapsulate network packets from MPLS frames
# @param max_queue_size: max queue length (passed to Interface)
def __init__(self, name, intf_capacity_L, encap_tbl_D, frwd_tbl_D, decap_tbl_D, max_queue_size):
self.stop = False #for thread termination
self.name = name
#create a list of interfaces
self.intf_L = [Interface(name = name, maxsize = max_queue_size, capacity = cap) for name, cap in intf_capacity_L]
#save MPLS tables
self.encap_tbl_D = encap_tbl_D
self.frwd_tbl_D = frwd_tbl_D
self.decap_tbl_D = decap_tbl_D
## called when printing the object
def __str__(self):
return self.name
## look through the content of incoming interfaces and
# process data and control packets
def process_queues(self):
for i in range(len(self.intf_L)):
fr_S = None #make sure we are starting the loop with a blank frame
fr_S = self.intf_L[i].get('in') #get frame from interface i
if fr_S is None:
continue # no frame to process yet
#decapsulate the packet
fr = LinkFrame.from_byte_S(fr_S)
pkt_S = fr.data_S
#process the packet as network, or MPLS
if fr.type_S == "Network":
p = NetworkPacket.from_byte_S(pkt_S) #parse a packet out
self.process_network_packet(p, i)
elif fr.type_S == "MPLS":
# TODO: handle MPLS frames
#for now, we just relabel the packet as an MPLS frame without encapsulation
m_fr = MPLSlabel.from_byte_S(pkt_S)
#send the MPLS frame for processing
self.process_MPLS_frame(m_fr, i)
else:
raise('%s: unknown frame type: %s' % (self, fr.type_S))
## process a network packet incoming to this router
# @param p Packet to forward
# @param i Incoming interface number for packet p
def process_network_packet(self, pkt, i):
#TODO: encapsulate the packet in an MPLS frame based on self.encap_tbl_D
intfName = self.intf_L[i].name
## do we need to encapsulate?
if self.name in self.encap_tbl_D[intfName]: ## if from host, encapsulate
m_fr = MPLSlabel(pkt, intfName)
print('%s: encapsulated packet "%s" as MPLS frame "%s"' % (self, pkt, m_fr))
#send the encapsulated packet for processing as MPLS frame
self.process_MPLS_frame(m_fr, i)
## process an MPLS frame incoming to this router
# @param m_fr: MPLS frame to process
# @param i Incoming interface number for the frame
def process_MPLS_frame(self, m_fr, i):
#TODO: implement MPLS forward, or MPLS decapsulation if this is the last hop router for the path
print('%s: processing MPLS frame "%s"' % (self, m_fr))
## From the label received, we determine where it's going
tbl_D = self.frwd_tbl_D[m_fr.label]
m_fr.label = tbl_D["outLabel"]
outInterface = tbl_D["intf"]
##see if we can decapsulate
try:
if m_fr.label == tbl_D['dest']:
fr = LinkFrame("Network", m_fr.frame)
else:
fr = LinkFrame("MPLS", m_fr.to_byte_S())
# fr = LinkFrame('Network', m_fr.to_byte_S()) ##this is how it used to be set up. Always assume it was in there
self.intf_L[outInterface].put(fr.to_byte_S(), 'out', True)
print('%s: forwarding frame "%s" from interface %d to %d' % (self, fr, i, outInterface))
except queue.Full:
print('%s: frame "%s" lost on interface %d' % (self, m_fr, i))
pass
## thread target for the host to keep forwarding data
def run(self):
print (threading.currentThread().getName() + ': Starting')
while True:
self.process_queues()
if self.stop:
print (threading.currentThread().getName() + ': Ending')
return
|
import queue
import threading
from link_2 import LinkFrame
class MPLSlabel:
labelLength = 5
## initialize the frame and label
def __init__(self, frame, label):
self.frame = frame
self.label = label
## called when printing the object
def __str__(self):
return self.to_byte_S()
## Sets the back of it with the label and fills the rest with zeros, then appends this to the packet
def to_byte_S(self):
byte_S = str(self.label).zfill(self.labelLength)
byte_S += str(self.frame)
return byte_S
##decode our label from byte_S
@classmethod
def from_byte_S(self, byte_S):
frame = byte_S[self.labelLength : ]
label = byte_S[ : self.labelLength].strip('0')
return self(frame, label)
## wrapper class for a queue of packets
class Interface:
## @param maxsize - the maximum size of the queue storing packets
# @param capacity - the capacity of the link in bps
def __init__(self, name = "defaultName", maxsize=0, capacity=500):
self.name = name
self.in_queue = queue.Queue(maxsize)
self.out_queue = queue.Queue(maxsize)
self.capacity = capacity #serialization rate
self.next_avail_time = 0 #the next time the interface can transmit a packet
##get packet from the queue interface
# @param in_or_out - use 'in' or 'out' interface
def get(self, in_or_out):
try:
if in_or_out == 'in':
pkt_S = self.in_queue.get(False)
# if pkt_S is not None:
# print('getting packet from the IN queue')
return pkt_S
else:
pkt_S = self.out_queue.get(False)
# if pkt_S is not None:
# print('getting packet from the OUT queue')
return pkt_S
except queue.Empty:
return None
##put the packet into the interface queue
# @param pkt - Packet to be inserted into the queue
# @param in_or_out - use 'in' or 'out' interface
# @param block - if True, block until room in queue, if False may throw queue.Full exception
def put(self, pkt, in_or_out, block=False):
if in_or_out == 'out':
# print('putting packet in the OUT queue')
self.out_queue.put(pkt, block)
else:
# print('putting packet in the IN queue')
self.in_queue.put(pkt, block)
## Implements a network layer packet
# NOTE: You will need to extend this class for the packet to include
# the fields necessary for the completion of this assignment.
class NetworkPacket:
## packet encoding lengths
dst_S_length = 5
##@param dst: address of the destination host
# @param data_S: packet payload
# @param priority: packet priority
def __init__(self, dst, data_S, priority=0):
self.dst = dst
self.data_S = data_S
#TODO: add priority to the packet class
## called when printing the object
def __str__(self):
return self.to_byte_S()
## convert packet to a byte string for transmission over links
def to_byte_S(self):
byte_S = str(self.dst).zfill(self.dst_S_length)
byte_S += self.data_S
return byte_S
## extract a packet object from a byte string
# @param byte_S: byte string representation of the packet
@classmethod
def from_byte_S(self, byte_S):
dst = byte_S[0 : NetworkPacket.dst_S_length].strip('0')
data_S = byte_S[NetworkPacket.dst_S_length : ]
return self(dst, data_S)
## Implements a network host for receiving and transmitting data
class Host:
##@param addr: address of this node represented as an integer
def __init__(self, addr):
self.addr = addr
self.intf_L = [Interface()]
self.stop = False #for thread termination
## called when printing the object
def __str__(self):
return self.addr
## create a packet and enqueue for transmission
# @param dst: destination address for the packet
# @param data_S: data being transmitted to the network layer
# @param priority: packet priority
def udt_send(self, dst, data_S, priority=0):
pkt = NetworkPacket(dst, data_S)
print('%s: sending packet "%s" with priority %d' % (self, pkt, priority))
#encapsulate network packet in a link frame (usually would be done by the OS)
fr = LinkFrame('Network', pkt.to_byte_S())
#enque frame onto the interface for transmission
self.intf_L[0].put(fr.to_byte_S(), 'out')
## receive frame from the link layer
def udt_receive(self):
fr_S = self.intf_L[0].get('in')
if fr_S is None:
return
#decapsulate the network packet
fr = LinkFrame.from_byte_S(fr_S)
assert(fr.type_S == 'Network') #should be receiving network packets by hosts
pkt_S = fr.data_S
print('%s: received packet "%s"' % (self, pkt_S))
## thread target for the host to keep receiving data
def run(self):
print (threading.currentThread().getName() + ': Starting')
while True:
#receive data arriving to the in interface
self.udt_receive()
#terminate
if(self.stop):
print (threading.currentThread().getName() + ': Ending')
return
## Implements a multi-interface router
class Router:
##@param name: friendly router name for debugging
# @param intf_capacity_L: capacities of outgoing interfaces in bps
# @param encap_tbl_D: table used to encapsulate network packets into MPLS frames
# @param frwd_tbl_D: table used to forward MPLS frames
# @param decap_tbl_D: table used to decapsulate network packets from MPLS frames
# @param max_queue_size: max queue length (passed to Interface)
def __init__(self, name, intf_capacity_L, encap_tbl_D, frwd_tbl_D, decap_tbl_D, max_queue_size):
self.stop = False #for thread termination
self.name = name
#create a list of interfaces
self.intf_L = [Interface(name = name, maxsize = max_queue_size, capacity = cap) for name, cap in intf_capacity_L]
#save MPLS tables
self.encap_tbl_D = encap_tbl_D
self.frwd_tbl_D = frwd_tbl_D
self.decap_tbl_D = decap_tbl_D
## called when printing the object
def __str__(self):
return self.name
## look through the content of incoming interfaces and
# process data and control packets
def process_queues(self):
for i in range(len(self.intf_L)):
fr_S = None #make sure we are starting the loop with a blank frame
fr_S = self.intf_L[i].get('in') #get frame from interface i
if fr_S is None:
continue # no frame to process yet
#decapsulate the packet
fr = LinkFrame.from_byte_S(fr_S)
pkt_S = fr.data_S
#process the packet as network, or MPLS
if fr.type_S == "Network":
p = NetworkPacket.from_byte_S(pkt_S) #parse a packet out
self.process_network_packet(p, i)
elif fr.type_S == "MPLS":
# TODO: handle MPLS frames
#for now, we just relabel the packet as an MPLS frame without encapsulation
m_fr = MPLSlabel.from_byte_S(pkt_S)
#send the MPLS frame for processing
self.process_MPLS_frame(m_fr, i)
else:
raise('%s: unknown frame type: %s' % (self, fr.type_S))
## process a network packet incoming to this router
# @param p Packet to forward
# @param i Incoming interface number for packet p
def process_network_packet(self, pkt, i):
#TODO: encapsulate the packet in an MPLS frame based on self.encap_tbl_D
intfName = self.intf_L[i].name
## do we need to encapsulate?
if self.name in self.encap_tbl_D[intfName]: ## if from host, encapsulate
m_fr = MPLSlabel(pkt, intfName)
print('%s: encapsulated packet "%s" as MPLS frame "%s"' % (self, pkt, m_fr))
#send the encapsulated packet for processing as MPLS frame
self.process_MPLS_frame(m_fr, i)
## process an MPLS frame incoming to this router
# @param m_fr: MPLS frame to process
# @param i Incoming interface number for the frame
def process_MPLS_frame(self, m_fr, i):
#TODO: implement MPLS forward, or MPLS decapsulation if this is the last hop router for the path
print('%s: processing MPLS frame "%s"' % (self, m_fr))
## From the label received, we determine where it's going
tbl_D = self.frwd_tbl_D[m_fr.label]
m_fr.label = tbl_D["outLabel"]
outInterface = tbl_D["intf"]
##see if we can decapsulate
try:
if m_fr.label == tbl_D['dest']:
fr = LinkFrame("Network", m_fr.frame)
else:
fr = LinkFrame("MPLS", m_fr.to_byte_S())
# fr = LinkFrame('Network', m_fr.to_byte_S()) ##this is how it used to be set up. Always assume it was in there
self.intf_L[outInterface].put(fr.to_byte_S(), 'out', True)
print('%s: forwarding frame "%s" from interface %d to %d' % (self, fr, i, outInterface))
except queue.Full:
print('%s: frame "%s" lost on interface %d' % (self, m_fr, i))
pass
## thread target for the host to keep forwarding data
def run(self):
print (threading.currentThread().getName() + ': Starting')
while True:
self.process_queues()
if self.stop:
print (threading.currentThread().getName() + ': Ending')
return
|
en
| 0.724708
|
## initialize the frame and label ## called when printing the object ## Sets the back of it with the label and fills the rest with zeros, then appends this to the packet ##decode our label from byte_S ## wrapper class for a queue of packets ## @param maxsize - the maximum size of the queue storing packets # @param capacity - the capacity of the link in bps #serialization rate #the next time the interface can transmit a packet ##get packet from the queue interface # @param in_or_out - use 'in' or 'out' interface # if pkt_S is not None: # print('getting packet from the IN queue') # if pkt_S is not None: # print('getting packet from the OUT queue') ##put the packet into the interface queue # @param pkt - Packet to be inserted into the queue # @param in_or_out - use 'in' or 'out' interface # @param block - if True, block until room in queue, if False may throw queue.Full exception # print('putting packet in the OUT queue') # print('putting packet in the IN queue') ## Implements a network layer packet # NOTE: You will need to extend this class for the packet to include # the fields necessary for the completion of this assignment. ## packet encoding lengths ##@param dst: address of the destination host # @param data_S: packet payload # @param priority: packet priority #TODO: add priority to the packet class ## called when printing the object ## convert packet to a byte string for transmission over links ## extract a packet object from a byte string # @param byte_S: byte string representation of the packet ## Implements a network host for receiving and transmitting data ##@param addr: address of this node represented as an integer #for thread termination ## called when printing the object ## create a packet and enqueue for transmission # @param dst: destination address for the packet # @param data_S: data being transmitted to the network layer # @param priority: packet priority #encapsulate network packet in a link frame (usually would be done by the OS) #enque frame onto the interface for transmission ## receive frame from the link layer #decapsulate the network packet #should be receiving network packets by hosts ## thread target for the host to keep receiving data #receive data arriving to the in interface #terminate ## Implements a multi-interface router ##@param name: friendly router name for debugging # @param intf_capacity_L: capacities of outgoing interfaces in bps # @param encap_tbl_D: table used to encapsulate network packets into MPLS frames # @param frwd_tbl_D: table used to forward MPLS frames # @param decap_tbl_D: table used to decapsulate network packets from MPLS frames # @param max_queue_size: max queue length (passed to Interface) #for thread termination #create a list of interfaces #save MPLS tables ## called when printing the object ## look through the content of incoming interfaces and # process data and control packets #make sure we are starting the loop with a blank frame #get frame from interface i # no frame to process yet #decapsulate the packet #process the packet as network, or MPLS #parse a packet out # TODO: handle MPLS frames #for now, we just relabel the packet as an MPLS frame without encapsulation #send the MPLS frame for processing ## process a network packet incoming to this router # @param p Packet to forward # @param i Incoming interface number for packet p #TODO: encapsulate the packet in an MPLS frame based on self.encap_tbl_D ## do we need to encapsulate? ## if from host, encapsulate #send the encapsulated packet for processing as MPLS frame ## process an MPLS frame incoming to this router # @param m_fr: MPLS frame to process # @param i Incoming interface number for the frame #TODO: implement MPLS forward, or MPLS decapsulation if this is the last hop router for the path ## From the label received, we determine where it's going ##see if we can decapsulate # fr = LinkFrame('Network', m_fr.to_byte_S()) ##this is how it used to be set up. Always assume it was in there ## thread target for the host to keep forwarding data
| 3.249307
| 3
|
resources/library/pycontrol/src/soccer_pycontrol/footpath.py
|
utra-robosoccer/Bez_IsaacGym
| 0
|
6628560
|
<filename>resources/library/pycontrol/src/soccer_pycontrol/footpath.py
import numpy as np
import math
import enum
from resources.library.pycontrol.src.soccer_pycontrol.path import Path
from resources.library.geometry.src.soccer_geometry.transformation import Transformation as tr
import matplotlib.pyplot as plt
from copy import deepcopy
class PostPreSetting(enum.IntEnum):
POST_AND_PRE = 0
ONLY_POST_AND_PRE_ON_LAST_ONES = 1
ONLY_POST = 2
NO_POST_NOR_PRE = 3
# TODO: where is first_step_left????
class Footpath(Path):
half_to_full_step_time_ratio = 0.7 # Duration difference between half and full step
foot_separation = 0.044 # seperation between feet and center
step_height = 0.065 # height of step
step_outwardness = 0.015
step_rotation = 0.05
def __init__(self, start_transform, end_transform, foot_center_to_floor):
super().__init__(start_transform, end_transform)
self.foot_center_to_floor = foot_center_to_floor
def half_step_time(self):
return self.full_step_time() * self.half_to_full_step_time_ratio
def num_steps(self):
return self.bodyStepCount() + 1
def full_step_time(self):
total_step_time = self.duration()
return total_step_time / (2 * self.half_to_full_step_time_ratio + (self.num_steps() - 2))
def footHeightRatio(self, t, post_pre_settings=0):
full_step_time = self.full_step_time()
half_step_time = self.half_step_time()
post_step_time = self.post_footstep_ratio * full_step_time
pre_step_time = self.pre_footstep_ratio * full_step_time
if post_pre_settings == PostPreSetting.ONLY_POST_AND_PRE_ON_LAST_ONES:
if t < half_step_time:
pre_step_time = 0
elif t > (self.duration() - half_step_time):
post_step_time = 0
else:
post_step_time = 0
pre_step_time = 0
elif post_pre_settings == PostPreSetting.ONLY_POST:
pre_step_time = 0
post_step_time = -post_step_time
elif post_pre_settings == PostPreSetting.NO_POST_NOR_PRE:
post_step_time = 0
pre_step_time = 0
last_foot_same = self.num_steps() % 2
step_num = -1
# First foot
if t < half_step_time:
if t < post_step_time:
first_foot_step_ratio = 0
elif t > (half_step_time - pre_step_time):
first_foot_step_ratio = 1
else:
first_foot_step_ratio = (t - post_step_time) / (half_step_time - post_step_time - pre_step_time)
elif last_foot_same and (t > self.duration() - half_step_time):
adjusted_step_time = t - (self.duration() - half_step_time)
if adjusted_step_time < post_step_time:
first_foot_step_ratio = 0
elif adjusted_step_time > (half_step_time - pre_step_time):
first_foot_step_ratio = 1
else:
first_foot_step_ratio = (adjusted_step_time - post_step_time) / (
half_step_time - post_step_time - pre_step_time)
else:
adjusted_step_time = t - half_step_time
# fix in matlab function rounds to nearest integer towards 0
if(adjusted_step_time / full_step_time) >= 0:
step_num = np.floor(adjusted_step_time / full_step_time) # fix function in matlab
else:
step_num = np.ceil(adjusted_step_time / full_step_time) # fix function in matlab
adjusted_step_time = adjusted_step_time - step_num * full_step_time
if (step_num % 2) == 0:
first_foot_step_ratio = 0
else:
if adjusted_step_time < post_step_time:
first_foot_step_ratio = 0
elif adjusted_step_time > (full_step_time - pre_step_time):
first_foot_step_ratio = 1
else:
first_foot_step_ratio = (adjusted_step_time - post_step_time) / (
full_step_time - post_step_time - pre_step_time)
# Second foot
if t < half_step_time:
second_foot_step_ratio = 0
elif (not last_foot_same) and (t > (self.duration() - half_step_time)):
adjusted_step_time = t - (self.duration() - half_step_time)
if adjusted_step_time < post_step_time:
second_foot_step_ratio = 0
elif adjusted_step_time > (half_step_time - pre_step_time):
second_foot_step_ratio = 1
else:
second_foot_step_ratio = (adjusted_step_time - post_step_time) / (
half_step_time - post_step_time - pre_step_time)
else:
adjusted_step_time = t - half_step_time
# fix in matlab function rounds to nearest integer towards 0
if(adjusted_step_time / full_step_time) >=0:
step_num = int(np.floor(adjusted_step_time / full_step_time)) # fix function in matlab
else:
step_num = int(np.ceil(adjusted_step_time / full_step_time)) # fix function in matlab
adjusted_step_time = adjusted_step_time - step_num * full_step_time
if (step_num % 2) == 1:
second_foot_step_ratio = 0
else:
if adjusted_step_time < post_step_time:
second_foot_step_ratio = 0
elif adjusted_step_time > (full_step_time - pre_step_time):
second_foot_step_ratio = 1
else:
second_foot_step_ratio = (adjusted_step_time - post_step_time) / (
full_step_time - post_step_time - pre_step_time)
# Which foot is first?
assert (first_foot_step_ratio <= 1)
assert (second_foot_step_ratio <= 1)
if self.first_step_left:
right_foot_step_ratio = first_foot_step_ratio
left_foot_step_ratio = second_foot_step_ratio
else:
right_foot_step_ratio = second_foot_step_ratio
left_foot_step_ratio = first_foot_step_ratio
step_num = step_num + 1
return [step_num, right_foot_step_ratio, left_foot_step_ratio]
def right_foot_position_at_step(self, n):
bodystep = self.getBodyStep(n)
bodypos = bodystep.get_position()
transformToLeftFoot = tr([0, -self.foot_separation, -bodypos[2] + self.foot_center_to_floor])
return np.matmul(bodystep, transformToLeftFoot)
def left_foot_position_at_step(self, n):
bodystep = self.getBodyStep(n)
bodypos = bodystep.get_position()
transformToRightFoot = tr([0, self.foot_separation, -bodypos[2] + self.foot_center_to_floor])
return np.matmul(bodystep, transformToRightFoot)
def whatIsTheFootDoing(self, step_num):
if step_num == 0:
if self.first_step_left:
right_foot_action = [0, 1] # Go from body position 0 to 1
left_foot_action = [0] # Stay put at position 0
else:
right_foot_action = [0]
left_foot_action = [0, 1]
elif step_num == (self.num_steps() - 1):
if self.first_step_left ^ ((self.num_steps() % 2) == 0): # xor
right_foot_action = [self.num_steps() - 2, self.num_steps() - 1]
left_foot_action = [self.num_steps() - 1]
else:
left_foot_action = [self.num_steps() - 2, self.num_steps() - 1]
right_foot_action = [self.num_steps() - 1]
else:
if self.first_step_left:
if (step_num % 2) == 0: # Left foot moving
left_foot_action = [step_num]
right_foot_action = [step_num - 1, step_num + 1]
else:
left_foot_action = [step_num - 1, step_num + 1]
right_foot_action = [step_num]
else:
if (step_num % 2) == 0: # Left foot moving
right_foot_action = [step_num]
left_foot_action = [step_num - 1, step_num + 1]
else:
right_foot_action = [step_num - 1, step_num + 1]
left_foot_action = [step_num]
return [right_foot_action, left_foot_action]
def footPosition(self, t):
[step_num, right_foot_step_ratio, left_foot_step_ratio] = self.footHeightRatio(t)
[right_foot_action, left_foot_action] = self.whatIsTheFootDoing(step_num)
if right_foot_step_ratio != 0 and right_foot_step_ratio != 1:
assert (len(right_foot_action) == 2)
if left_foot_step_ratio != 0 and left_foot_step_ratio != 1:
assert (len(left_foot_action) == 2)
# assert ((len(right_foot_action) == 2) == (right_foot_step_ratio != 0 and right_foot_step_ratio != 1))
# Left foot
if len(right_foot_action) == 1:
right_foot_position = self.right_foot_position_at_step(right_foot_action[0])
else:
_from = self.right_foot_position_at_step(right_foot_action[0])
_to = self.right_foot_position_at_step(right_foot_action[1])
right_foot_position = self.parabolicPath(_from, _to, self.step_height, -self.step_outwardness, -self.step_rotation, right_foot_step_ratio)
# Right foot
if len(left_foot_action) == 1:
left_foot_position = self.left_foot_position_at_step(left_foot_action[0])
else:
_from = self.left_foot_position_at_step(left_foot_action[0])
_to = self.left_foot_position_at_step(left_foot_action[1])
left_foot_position = self.parabolicPath(_from, _to, self.step_height, self.step_outwardness, self.step_rotation, left_foot_step_ratio)
return [right_foot_position, left_foot_position]
def parabolicPath(self, startTransform, endTransform, zdiff, sidediff, rotdiff, ratio):
"""
http://mathworld.wolfram.com/ParabolicSegment.html
"""
step_time = self.bodyStepTime()
distance_between_step = tr.get_distance(startTransform, endTransform)
if distance_between_step == 0.0:
delta = 0.001
angle = startTransform.get_orientation_euler()[2]
delta_tr = [np.cos(angle) * delta, np.sin(angle) * delta, 0]
endTransform = deepcopy(endTransform)
endTransform.set_position(endTransform.get_position() + delta_tr)
distance_between_step = tr.get_distance(startTransform, endTransform)
assert (distance_between_step != 0.0)
height_per_step = np.linalg.norm([zdiff, sidediff])
h = height_per_step
a = distance_between_step / 2
# Using Newton Approximation Method
# https://math.stackexchange.com/questions/3129154/divide-a-parabola-in-segments-of-equal-length
L = distance_between_step
aa = 4 * h / L
f = lambda x: x * np.sqrt(1 + (x ** 2)) + np.arcsinh(x) # f = @(x) x * sqrt(1+x^2) + asinh(x);
s = ratio
J = lambda X: 2 * np.sqrt(1 + (X ** 2)) # J = @(X) 2 * sqrt(1+X^2);
r = lambda X: f(X) - (1 - (2 * s)) * f(aa) # r = @(X) f(X) - (1-2*s)*f(aa);
X = 0
while np.abs(r(X)) > 0.0001:
X = X - r(X) / J(X)
if aa == 0:
dist = ratio * L
else:
dist = 0.5 * (1 - X / aa) * L
# Calculate intermediate transform
position_time = dist / distance_between_step * step_time
if position_time < 0:
position_time = 0
ratio = position_time / step_time
if ratio < 0:
ratio = 0
elif ratio > 1:
ratio = 1
# Interpolate between the two H-transforms
t1 = tr.transformation_weighted_average(startTransform, endTransform, ratio)
x = (-a) + dist
y = h * (1 - (x ** 2) / (a ** 2))
zdelta = np.cos(np.arctan2(sidediff, zdiff)) * y
ydelta = np.sin(np.arctan2(sidediff, zdiff)) * y
if rotdiff != 0:
thetadelta = y / height_per_step * rotdiff
else:
thetadelta = 0
t2 = tr(position=[0, ydelta, zdelta], quaternion=tr.get_quaternion_from_axis_angle(vector = [1, 0, 0], angle = thetadelta))
position = np.matmul(t1, t2)
return position
def show(self, fig=None):
"""
Draws the feet positions
:param fig: Shared figure object
:return: None
"""
i = 0
# for t = 0:obj.step_size:obj.duration
# TODO: make a generator?
iterator = np.linspace(0, self.duration(), num=math.ceil(self.duration() / self.step_size)+1)
tfInterp_l = np.zeros((4, 4, len(iterator)))
tfInterp_r = np.zeros((4, 4, len(iterator)))
for t in iterator:
[lfp, rfp] = self.footPosition(t)
tfInterp_l[:,:,i] = lfp
tfInterp_r[:,:,i] = rfp
i = i + 1
self.show_tf(fig, tfInterp_l, len(iterator))
self.show_tf(fig, tfInterp_r, len(iterator))
@staticmethod
def show_tf(fig=None, tf_array=None, length=0):
"""
Helper function to draw the H-transforms equivalent to plotTransforms function in Matlab
:param fig: Shared figure object
:param tf_array: Array of transforms of size (4,4,n)
:param length: The 3rd dimension of the array, n
:return: None
"""
if fig is None:
fig = plt.figure()
ax = fig.gca(projection='3d')
tfInterp_axis = np.zeros((3, length))
axes = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
colors = ['r', 'g', 'b']
ax = fig.gca(projection='3d')
for j in range(len(axes)):
for i in range(np.size(tf_array, 2)):
tfInterp_axis[:, i] = np.matmul(tf_array[0:3, 0:3, i], axes[j]).ravel()
tfInterp_axis = tfInterp_axis * 0.01
ax.quiver(tf_array[0, 3, :], tf_array[1, 3, :], tf_array[2, 3, :], tfInterp_axis[0, :],
tfInterp_axis[1, :], tfInterp_axis[2, :], color=colors[j], normalize=True, length=0.01,
arrow_length_ratio=.2)
fig.canvas.draw()
plt.show(block=False)
|
<filename>resources/library/pycontrol/src/soccer_pycontrol/footpath.py
import numpy as np
import math
import enum
from resources.library.pycontrol.src.soccer_pycontrol.path import Path
from resources.library.geometry.src.soccer_geometry.transformation import Transformation as tr
import matplotlib.pyplot as plt
from copy import deepcopy
class PostPreSetting(enum.IntEnum):
POST_AND_PRE = 0
ONLY_POST_AND_PRE_ON_LAST_ONES = 1
ONLY_POST = 2
NO_POST_NOR_PRE = 3
# TODO: where is first_step_left????
class Footpath(Path):
half_to_full_step_time_ratio = 0.7 # Duration difference between half and full step
foot_separation = 0.044 # seperation between feet and center
step_height = 0.065 # height of step
step_outwardness = 0.015
step_rotation = 0.05
def __init__(self, start_transform, end_transform, foot_center_to_floor):
super().__init__(start_transform, end_transform)
self.foot_center_to_floor = foot_center_to_floor
def half_step_time(self):
return self.full_step_time() * self.half_to_full_step_time_ratio
def num_steps(self):
return self.bodyStepCount() + 1
def full_step_time(self):
total_step_time = self.duration()
return total_step_time / (2 * self.half_to_full_step_time_ratio + (self.num_steps() - 2))
def footHeightRatio(self, t, post_pre_settings=0):
full_step_time = self.full_step_time()
half_step_time = self.half_step_time()
post_step_time = self.post_footstep_ratio * full_step_time
pre_step_time = self.pre_footstep_ratio * full_step_time
if post_pre_settings == PostPreSetting.ONLY_POST_AND_PRE_ON_LAST_ONES:
if t < half_step_time:
pre_step_time = 0
elif t > (self.duration() - half_step_time):
post_step_time = 0
else:
post_step_time = 0
pre_step_time = 0
elif post_pre_settings == PostPreSetting.ONLY_POST:
pre_step_time = 0
post_step_time = -post_step_time
elif post_pre_settings == PostPreSetting.NO_POST_NOR_PRE:
post_step_time = 0
pre_step_time = 0
last_foot_same = self.num_steps() % 2
step_num = -1
# First foot
if t < half_step_time:
if t < post_step_time:
first_foot_step_ratio = 0
elif t > (half_step_time - pre_step_time):
first_foot_step_ratio = 1
else:
first_foot_step_ratio = (t - post_step_time) / (half_step_time - post_step_time - pre_step_time)
elif last_foot_same and (t > self.duration() - half_step_time):
adjusted_step_time = t - (self.duration() - half_step_time)
if adjusted_step_time < post_step_time:
first_foot_step_ratio = 0
elif adjusted_step_time > (half_step_time - pre_step_time):
first_foot_step_ratio = 1
else:
first_foot_step_ratio = (adjusted_step_time - post_step_time) / (
half_step_time - post_step_time - pre_step_time)
else:
adjusted_step_time = t - half_step_time
# fix in matlab function rounds to nearest integer towards 0
if(adjusted_step_time / full_step_time) >= 0:
step_num = np.floor(adjusted_step_time / full_step_time) # fix function in matlab
else:
step_num = np.ceil(adjusted_step_time / full_step_time) # fix function in matlab
adjusted_step_time = adjusted_step_time - step_num * full_step_time
if (step_num % 2) == 0:
first_foot_step_ratio = 0
else:
if adjusted_step_time < post_step_time:
first_foot_step_ratio = 0
elif adjusted_step_time > (full_step_time - pre_step_time):
first_foot_step_ratio = 1
else:
first_foot_step_ratio = (adjusted_step_time - post_step_time) / (
full_step_time - post_step_time - pre_step_time)
# Second foot
if t < half_step_time:
second_foot_step_ratio = 0
elif (not last_foot_same) and (t > (self.duration() - half_step_time)):
adjusted_step_time = t - (self.duration() - half_step_time)
if adjusted_step_time < post_step_time:
second_foot_step_ratio = 0
elif adjusted_step_time > (half_step_time - pre_step_time):
second_foot_step_ratio = 1
else:
second_foot_step_ratio = (adjusted_step_time - post_step_time) / (
half_step_time - post_step_time - pre_step_time)
else:
adjusted_step_time = t - half_step_time
# fix in matlab function rounds to nearest integer towards 0
if(adjusted_step_time / full_step_time) >=0:
step_num = int(np.floor(adjusted_step_time / full_step_time)) # fix function in matlab
else:
step_num = int(np.ceil(adjusted_step_time / full_step_time)) # fix function in matlab
adjusted_step_time = adjusted_step_time - step_num * full_step_time
if (step_num % 2) == 1:
second_foot_step_ratio = 0
else:
if adjusted_step_time < post_step_time:
second_foot_step_ratio = 0
elif adjusted_step_time > (full_step_time - pre_step_time):
second_foot_step_ratio = 1
else:
second_foot_step_ratio = (adjusted_step_time - post_step_time) / (
full_step_time - post_step_time - pre_step_time)
# Which foot is first?
assert (first_foot_step_ratio <= 1)
assert (second_foot_step_ratio <= 1)
if self.first_step_left:
right_foot_step_ratio = first_foot_step_ratio
left_foot_step_ratio = second_foot_step_ratio
else:
right_foot_step_ratio = second_foot_step_ratio
left_foot_step_ratio = first_foot_step_ratio
step_num = step_num + 1
return [step_num, right_foot_step_ratio, left_foot_step_ratio]
def right_foot_position_at_step(self, n):
bodystep = self.getBodyStep(n)
bodypos = bodystep.get_position()
transformToLeftFoot = tr([0, -self.foot_separation, -bodypos[2] + self.foot_center_to_floor])
return np.matmul(bodystep, transformToLeftFoot)
def left_foot_position_at_step(self, n):
bodystep = self.getBodyStep(n)
bodypos = bodystep.get_position()
transformToRightFoot = tr([0, self.foot_separation, -bodypos[2] + self.foot_center_to_floor])
return np.matmul(bodystep, transformToRightFoot)
def whatIsTheFootDoing(self, step_num):
if step_num == 0:
if self.first_step_left:
right_foot_action = [0, 1] # Go from body position 0 to 1
left_foot_action = [0] # Stay put at position 0
else:
right_foot_action = [0]
left_foot_action = [0, 1]
elif step_num == (self.num_steps() - 1):
if self.first_step_left ^ ((self.num_steps() % 2) == 0): # xor
right_foot_action = [self.num_steps() - 2, self.num_steps() - 1]
left_foot_action = [self.num_steps() - 1]
else:
left_foot_action = [self.num_steps() - 2, self.num_steps() - 1]
right_foot_action = [self.num_steps() - 1]
else:
if self.first_step_left:
if (step_num % 2) == 0: # Left foot moving
left_foot_action = [step_num]
right_foot_action = [step_num - 1, step_num + 1]
else:
left_foot_action = [step_num - 1, step_num + 1]
right_foot_action = [step_num]
else:
if (step_num % 2) == 0: # Left foot moving
right_foot_action = [step_num]
left_foot_action = [step_num - 1, step_num + 1]
else:
right_foot_action = [step_num - 1, step_num + 1]
left_foot_action = [step_num]
return [right_foot_action, left_foot_action]
def footPosition(self, t):
[step_num, right_foot_step_ratio, left_foot_step_ratio] = self.footHeightRatio(t)
[right_foot_action, left_foot_action] = self.whatIsTheFootDoing(step_num)
if right_foot_step_ratio != 0 and right_foot_step_ratio != 1:
assert (len(right_foot_action) == 2)
if left_foot_step_ratio != 0 and left_foot_step_ratio != 1:
assert (len(left_foot_action) == 2)
# assert ((len(right_foot_action) == 2) == (right_foot_step_ratio != 0 and right_foot_step_ratio != 1))
# Left foot
if len(right_foot_action) == 1:
right_foot_position = self.right_foot_position_at_step(right_foot_action[0])
else:
_from = self.right_foot_position_at_step(right_foot_action[0])
_to = self.right_foot_position_at_step(right_foot_action[1])
right_foot_position = self.parabolicPath(_from, _to, self.step_height, -self.step_outwardness, -self.step_rotation, right_foot_step_ratio)
# Right foot
if len(left_foot_action) == 1:
left_foot_position = self.left_foot_position_at_step(left_foot_action[0])
else:
_from = self.left_foot_position_at_step(left_foot_action[0])
_to = self.left_foot_position_at_step(left_foot_action[1])
left_foot_position = self.parabolicPath(_from, _to, self.step_height, self.step_outwardness, self.step_rotation, left_foot_step_ratio)
return [right_foot_position, left_foot_position]
def parabolicPath(self, startTransform, endTransform, zdiff, sidediff, rotdiff, ratio):
"""
http://mathworld.wolfram.com/ParabolicSegment.html
"""
step_time = self.bodyStepTime()
distance_between_step = tr.get_distance(startTransform, endTransform)
if distance_between_step == 0.0:
delta = 0.001
angle = startTransform.get_orientation_euler()[2]
delta_tr = [np.cos(angle) * delta, np.sin(angle) * delta, 0]
endTransform = deepcopy(endTransform)
endTransform.set_position(endTransform.get_position() + delta_tr)
distance_between_step = tr.get_distance(startTransform, endTransform)
assert (distance_between_step != 0.0)
height_per_step = np.linalg.norm([zdiff, sidediff])
h = height_per_step
a = distance_between_step / 2
# Using Newton Approximation Method
# https://math.stackexchange.com/questions/3129154/divide-a-parabola-in-segments-of-equal-length
L = distance_between_step
aa = 4 * h / L
f = lambda x: x * np.sqrt(1 + (x ** 2)) + np.arcsinh(x) # f = @(x) x * sqrt(1+x^2) + asinh(x);
s = ratio
J = lambda X: 2 * np.sqrt(1 + (X ** 2)) # J = @(X) 2 * sqrt(1+X^2);
r = lambda X: f(X) - (1 - (2 * s)) * f(aa) # r = @(X) f(X) - (1-2*s)*f(aa);
X = 0
while np.abs(r(X)) > 0.0001:
X = X - r(X) / J(X)
if aa == 0:
dist = ratio * L
else:
dist = 0.5 * (1 - X / aa) * L
# Calculate intermediate transform
position_time = dist / distance_between_step * step_time
if position_time < 0:
position_time = 0
ratio = position_time / step_time
if ratio < 0:
ratio = 0
elif ratio > 1:
ratio = 1
# Interpolate between the two H-transforms
t1 = tr.transformation_weighted_average(startTransform, endTransform, ratio)
x = (-a) + dist
y = h * (1 - (x ** 2) / (a ** 2))
zdelta = np.cos(np.arctan2(sidediff, zdiff)) * y
ydelta = np.sin(np.arctan2(sidediff, zdiff)) * y
if rotdiff != 0:
thetadelta = y / height_per_step * rotdiff
else:
thetadelta = 0
t2 = tr(position=[0, ydelta, zdelta], quaternion=tr.get_quaternion_from_axis_angle(vector = [1, 0, 0], angle = thetadelta))
position = np.matmul(t1, t2)
return position
def show(self, fig=None):
"""
Draws the feet positions
:param fig: Shared figure object
:return: None
"""
i = 0
# for t = 0:obj.step_size:obj.duration
# TODO: make a generator?
iterator = np.linspace(0, self.duration(), num=math.ceil(self.duration() / self.step_size)+1)
tfInterp_l = np.zeros((4, 4, len(iterator)))
tfInterp_r = np.zeros((4, 4, len(iterator)))
for t in iterator:
[lfp, rfp] = self.footPosition(t)
tfInterp_l[:,:,i] = lfp
tfInterp_r[:,:,i] = rfp
i = i + 1
self.show_tf(fig, tfInterp_l, len(iterator))
self.show_tf(fig, tfInterp_r, len(iterator))
@staticmethod
def show_tf(fig=None, tf_array=None, length=0):
"""
Helper function to draw the H-transforms equivalent to plotTransforms function in Matlab
:param fig: Shared figure object
:param tf_array: Array of transforms of size (4,4,n)
:param length: The 3rd dimension of the array, n
:return: None
"""
if fig is None:
fig = plt.figure()
ax = fig.gca(projection='3d')
tfInterp_axis = np.zeros((3, length))
axes = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
colors = ['r', 'g', 'b']
ax = fig.gca(projection='3d')
for j in range(len(axes)):
for i in range(np.size(tf_array, 2)):
tfInterp_axis[:, i] = np.matmul(tf_array[0:3, 0:3, i], axes[j]).ravel()
tfInterp_axis = tfInterp_axis * 0.01
ax.quiver(tf_array[0, 3, :], tf_array[1, 3, :], tf_array[2, 3, :], tfInterp_axis[0, :],
tfInterp_axis[1, :], tfInterp_axis[2, :], color=colors[j], normalize=True, length=0.01,
arrow_length_ratio=.2)
fig.canvas.draw()
plt.show(block=False)
|
en
| 0.711564
|
# TODO: where is first_step_left???? # Duration difference between half and full step # seperation between feet and center # height of step # First foot # fix in matlab function rounds to nearest integer towards 0 # fix function in matlab # fix function in matlab # Second foot # fix in matlab function rounds to nearest integer towards 0 # fix function in matlab # fix function in matlab # Which foot is first? # Go from body position 0 to 1 # Stay put at position 0 # xor # Left foot moving # Left foot moving # assert ((len(right_foot_action) == 2) == (right_foot_step_ratio != 0 and right_foot_step_ratio != 1)) # Left foot # Right foot http://mathworld.wolfram.com/ParabolicSegment.html # Using Newton Approximation Method # https://math.stackexchange.com/questions/3129154/divide-a-parabola-in-segments-of-equal-length # f = @(x) x * sqrt(1+x^2) + asinh(x); # J = @(X) 2 * sqrt(1+X^2); # r = @(X) f(X) - (1-2*s)*f(aa); # Calculate intermediate transform # Interpolate between the two H-transforms Draws the feet positions :param fig: Shared figure object :return: None # for t = 0:obj.step_size:obj.duration # TODO: make a generator? Helper function to draw the H-transforms equivalent to plotTransforms function in Matlab :param fig: Shared figure object :param tf_array: Array of transforms of size (4,4,n) :param length: The 3rd dimension of the array, n :return: None
| 2.312612
| 2
|
probez/file_handling/recording_io.py
|
Sepidak/spikeGUI
| 0
|
6628561
|
import math
import os
import struct
from itertools import compress
import numpy as np
from file_handling import binary_classes
from util import detrending
class RecordingIo:
def __init__(self, path, n_chan):
self.path = path
self.root = os.path.split(path)[0]
self.file_name = os.path.split(path)[1]
self.name = self.file_name.split('.')[0]
self.n_chan = n_chan
self.dtype = 'h'
self.byte_width = struct.calcsize(self.dtype)
self.data_point = binary_classes.DataPoint(base_format=self.dtype)
self.time_point = binary_classes.TimePoint(self.data_point, n_chan=self.n_chan)
@property
def _size(self):
info = os.stat(self.path)
return info.st_size
def create_chunks(self, n_samples_total=None, n_samples_to_process=50000):
if n_samples_total is None:
n_samples_total = self._size
if n_samples_to_process > n_samples_total:
n_samples_to_process = n_samples_total
chunk = binary_classes.Chunk(self.time_point, n_samples_to_process) # define normal chunk
leftover_bytes = n_samples_total % chunk.size # this is a problem if the last chunk is very small
leftover_samples = int(leftover_bytes/self.time_point.size)
last_chunk = binary_classes.Chunk(self.time_point, leftover_samples) # define final chunk
print('chunk size is {}, last chunk is {} bytes:'.format(chunk.size, leftover_bytes))
print('leftover samples = {}'.format(leftover_samples))
n_chunks = math.ceil(n_samples_total/chunk.size)
return n_chunks, chunk, last_chunk
@staticmethod
def append_chunk(f_out, chunk_out):
f_out.write(bytes(chunk_out))
@staticmethod
def get_next_data_chunk(f_in, chunk_struct):
"""
:param file f_in:
:param struct.Struct chunk_struct:
:return:
"""
return f_in.read(chunk_struct.size)
@staticmethod
def get_data(chunk_in, chunk_struct):
data = chunk_struct.s.unpack_from(chunk_in)
n_samples = int(chunk_struct.size/chunk_struct.byte_width/chunk_struct.n_chan)
reshaped_data = np.array(data).reshape(n_samples, chunk_struct.n_chan)
return reshaped_data
def pack_data(self, data, chunk_struct):
"""
:param data:
:param chunk_struct:
:return:
"""
packable_data = self._make_packable(data, chunk_struct) # reshape into packable format
chunk_out = chunk_struct.s.pack(*packable_data) # pack
return chunk_out
@staticmethod
def _make_packable(data, chunk):
new_data_length = int(chunk.size/chunk.byte_width)
data = data.reshape(new_data_length)
return tuple(data)
@property
def data_shape(self):
if self._size % self.n_chan != 0:
raise ValueError('size: {} or n_chan: {} incorrect'.format(self._size, self.n_chan))
n_samples = self._size/self.n_chan/self.byte_width
n_channels = self.n_chan
return n_samples, n_channels
def process_to_file(self, f_in_path, f_out_path, n_chan, channels_to_discard,
processing_func=detrending.denoise_detrend, n_samples_to_process=50000,
on_data=True, start_sample=0, end_sample=None):
# TODO: make this work for both chunk and data operations at the same time
# TODO: make this much cleaner
# TODO: multiple output files
start_byte = start_sample * self.time_point.size # time point is multiple of n_chan
end_byte = self._size if end_sample is None else end_sample * self.time_point.size
print(f_in_path, f_out_path)
with open(f_in_path, 'rb') as f_in:
f_in.seek(start_byte)
with open(f_out_path, 'wb') as f_out:
n_samples_total = end_byte - start_byte
n_chunks, chunk, last_chunk = self.create_chunks(n_samples_total, n_samples_to_process)
for i in range(n_chunks):
current_chunk_struct = last_chunk if i == n_chunks-1 else chunk
print('chunk: {} of {}'.format(i+1, n_chunks))
try:
chunk_in = self.get_next_data_chunk(f_in, current_chunk_struct)
except EOFError:
break
if processing_func is None:
data = self.get_data(chunk_in, current_chunk_struct)
chunk_out = self.pack_data(data, current_chunk_struct)
elif on_data:
data = self.get_data(chunk_in, current_chunk_struct)
processed_data = processing_func(data, n_chan)
chunk_out = self.pack_data(processed_data, current_chunk_struct) # pack only works if processing step returns integer values
else:
print('n_chan_recfile = {}'.format(n_chan))
chunk_out, out_channels_bytes = processing_func(chunk_in, n_chan, channels_to_discard)
if len(chunk_out) != out_channels_bytes:
raise ValueError("Expected to write {} bytes, wrote: {}".format(out_channels_bytes,
len(chunk_out)))
self.append_chunk(f_out, chunk_out)
def make_mask(self, chunk_in, n_chan, channels_to_discard=[]):
"""
generates a byte mask such that only bytes of channels of interest are marked as True
:param chunk_in:
:param n_chan:
:param channels_to_discard:
:return:
"""
mask = []
byte_width = self.data_point.size
n_repeats = int(len(chunk_in)/(n_chan*byte_width))
for i in range(n_chan):
if i in channels_to_discard:
mask += [False]*byte_width
else:
mask += [True]*byte_width
return list(np.tile(mask, n_repeats))
def remove_channels_from_chunk(self, chunk_in, n_chan, channels_to_discard):
channels_bytes_mask = self.make_mask(chunk_in, n_chan, channels_to_discard)
n_out_channels_bytes = channels_bytes_mask.count(True)
chunk_out = list(compress(chunk_in, channels_bytes_mask)) # return only the desired data
return chunk_out, n_out_channels_bytes
|
import math
import os
import struct
from itertools import compress
import numpy as np
from file_handling import binary_classes
from util import detrending
class RecordingIo:
def __init__(self, path, n_chan):
self.path = path
self.root = os.path.split(path)[0]
self.file_name = os.path.split(path)[1]
self.name = self.file_name.split('.')[0]
self.n_chan = n_chan
self.dtype = 'h'
self.byte_width = struct.calcsize(self.dtype)
self.data_point = binary_classes.DataPoint(base_format=self.dtype)
self.time_point = binary_classes.TimePoint(self.data_point, n_chan=self.n_chan)
@property
def _size(self):
info = os.stat(self.path)
return info.st_size
def create_chunks(self, n_samples_total=None, n_samples_to_process=50000):
if n_samples_total is None:
n_samples_total = self._size
if n_samples_to_process > n_samples_total:
n_samples_to_process = n_samples_total
chunk = binary_classes.Chunk(self.time_point, n_samples_to_process) # define normal chunk
leftover_bytes = n_samples_total % chunk.size # this is a problem if the last chunk is very small
leftover_samples = int(leftover_bytes/self.time_point.size)
last_chunk = binary_classes.Chunk(self.time_point, leftover_samples) # define final chunk
print('chunk size is {}, last chunk is {} bytes:'.format(chunk.size, leftover_bytes))
print('leftover samples = {}'.format(leftover_samples))
n_chunks = math.ceil(n_samples_total/chunk.size)
return n_chunks, chunk, last_chunk
@staticmethod
def append_chunk(f_out, chunk_out):
f_out.write(bytes(chunk_out))
@staticmethod
def get_next_data_chunk(f_in, chunk_struct):
"""
:param file f_in:
:param struct.Struct chunk_struct:
:return:
"""
return f_in.read(chunk_struct.size)
@staticmethod
def get_data(chunk_in, chunk_struct):
data = chunk_struct.s.unpack_from(chunk_in)
n_samples = int(chunk_struct.size/chunk_struct.byte_width/chunk_struct.n_chan)
reshaped_data = np.array(data).reshape(n_samples, chunk_struct.n_chan)
return reshaped_data
def pack_data(self, data, chunk_struct):
"""
:param data:
:param chunk_struct:
:return:
"""
packable_data = self._make_packable(data, chunk_struct) # reshape into packable format
chunk_out = chunk_struct.s.pack(*packable_data) # pack
return chunk_out
@staticmethod
def _make_packable(data, chunk):
new_data_length = int(chunk.size/chunk.byte_width)
data = data.reshape(new_data_length)
return tuple(data)
@property
def data_shape(self):
if self._size % self.n_chan != 0:
raise ValueError('size: {} or n_chan: {} incorrect'.format(self._size, self.n_chan))
n_samples = self._size/self.n_chan/self.byte_width
n_channels = self.n_chan
return n_samples, n_channels
def process_to_file(self, f_in_path, f_out_path, n_chan, channels_to_discard,
processing_func=detrending.denoise_detrend, n_samples_to_process=50000,
on_data=True, start_sample=0, end_sample=None):
# TODO: make this work for both chunk and data operations at the same time
# TODO: make this much cleaner
# TODO: multiple output files
start_byte = start_sample * self.time_point.size # time point is multiple of n_chan
end_byte = self._size if end_sample is None else end_sample * self.time_point.size
print(f_in_path, f_out_path)
with open(f_in_path, 'rb') as f_in:
f_in.seek(start_byte)
with open(f_out_path, 'wb') as f_out:
n_samples_total = end_byte - start_byte
n_chunks, chunk, last_chunk = self.create_chunks(n_samples_total, n_samples_to_process)
for i in range(n_chunks):
current_chunk_struct = last_chunk if i == n_chunks-1 else chunk
print('chunk: {} of {}'.format(i+1, n_chunks))
try:
chunk_in = self.get_next_data_chunk(f_in, current_chunk_struct)
except EOFError:
break
if processing_func is None:
data = self.get_data(chunk_in, current_chunk_struct)
chunk_out = self.pack_data(data, current_chunk_struct)
elif on_data:
data = self.get_data(chunk_in, current_chunk_struct)
processed_data = processing_func(data, n_chan)
chunk_out = self.pack_data(processed_data, current_chunk_struct) # pack only works if processing step returns integer values
else:
print('n_chan_recfile = {}'.format(n_chan))
chunk_out, out_channels_bytes = processing_func(chunk_in, n_chan, channels_to_discard)
if len(chunk_out) != out_channels_bytes:
raise ValueError("Expected to write {} bytes, wrote: {}".format(out_channels_bytes,
len(chunk_out)))
self.append_chunk(f_out, chunk_out)
def make_mask(self, chunk_in, n_chan, channels_to_discard=[]):
"""
generates a byte mask such that only bytes of channels of interest are marked as True
:param chunk_in:
:param n_chan:
:param channels_to_discard:
:return:
"""
mask = []
byte_width = self.data_point.size
n_repeats = int(len(chunk_in)/(n_chan*byte_width))
for i in range(n_chan):
if i in channels_to_discard:
mask += [False]*byte_width
else:
mask += [True]*byte_width
return list(np.tile(mask, n_repeats))
def remove_channels_from_chunk(self, chunk_in, n_chan, channels_to_discard):
channels_bytes_mask = self.make_mask(chunk_in, n_chan, channels_to_discard)
n_out_channels_bytes = channels_bytes_mask.count(True)
chunk_out = list(compress(chunk_in, channels_bytes_mask)) # return only the desired data
return chunk_out, n_out_channels_bytes
|
en
| 0.77702
|
# define normal chunk # this is a problem if the last chunk is very small # define final chunk :param file f_in:
:param struct.Struct chunk_struct:
:return: :param data:
:param chunk_struct:
:return: # reshape into packable format # pack # TODO: make this work for both chunk and data operations at the same time # TODO: make this much cleaner # TODO: multiple output files # time point is multiple of n_chan # pack only works if processing step returns integer values generates a byte mask such that only bytes of channels of interest are marked as True
:param chunk_in:
:param n_chan:
:param channels_to_discard:
:return: # return only the desired data
| 2.541854
| 3
|
visan/plot/axispropertypanel.py
|
ercumentaksoy/visan
| 7
|
6628562
|
# Copyright (C) 2002-2021 S[&]T, The Netherlands.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import wx
from .labeledtextctrl import LabeledTextCtrl, EVT_VALUE_CHANGED
from .util import DetermineCharSize
from .validators import DigitValidator, FloatValidator
class AxisPropertyPanel(wx.Panel):
def __init__(self, parent, plotWindow, axisId):
panelstyle = wx.TAB_TRAVERSAL
if wx.Platform == '__WXGTK__':
panelstyle |= wx.SUNKEN_BORDER
wx.Panel.__init__(self, parent, -1, style=panelstyle)
self.plotWindow = plotWindow
self.maxTicks = 50
self.axisId = axisId
# Create and configure all widgets
self.CreateControls()
self.CreateLayout()
def CreateControls(self):
(charWidth, charHeight) = DetermineCharSize(self)
# static boxes should be created before the controls they contain in order to preserve the correct Z-Order
self.rangeBox = wx.StaticBox(self, -1, "Range")
self.TitleCtrl = LabeledTextCtrl(self, -1, label="Label:", size=(8 * charWidth, -1), style=wx.TE_PROCESS_ENTER)
self.TitleCtrl.SetValue(self.plotWindow.GetAxisTitle(self.axisId))
self.TitleCtrl.SetToolTip(wx.ToolTip("The axis title will be displayed centered on the outside of the axis "
"in the plot panel."))
self.TitleCtrl.Bind(EVT_VALUE_CHANGED, self.OnTitle)
self.LogAxisCtrl = wx.CheckBox(self, -1, "Logarithmic Scale")
self.LogAxisCtrl.SetValue(self.plotWindow.GetLogAxis(self.axisId))
self.LogAxisCtrl.SetToolTip(wx.ToolTip("Use a logarithmic axis. Disabled if the current range of axis values "
"contains the number 0."))
self.LogAxisCtrl.Bind(wx.EVT_CHECKBOX, self.OnLog)
self.NTicksCtrl = LabeledTextCtrl(self, -1, label="Nr of Ticks:", validator=DigitValidator(),
size=(3 * charWidth, -1), style=wx.TE_PROCESS_ENTER)
self.NTicksCtrl.SetValue(self.plotWindow.GetNumAxisLabels(self.axisId))
self.NTicksCtrl.SetToolTip(wx.ToolTip("The target number of ticks to display on the axis. "
"The actual number of ticks is calculated; this control's value is "
"what the calculation will try to aim for."))
self.NTicksCtrl.Bind(EVT_VALUE_CHANGED, self.OnNTicks)
self.BaseCtrl = LabeledTextCtrl(self, -1, label="Base:", validator=FloatValidator(), formatstring="%-#.3g",
size=(5 * charWidth, -1), style=wx.TE_PROCESS_ENTER)
self.BaseCtrl.SetValue(self.plotWindow.GetAxisBase(self.axisId))
self.BaseCtrl.SetToolTip(wx.ToolTip("The base for the linear and logarithmic tick calculation for the axis. "
"This value can be fractional but must be greater than 1."))
self.BaseCtrl.Bind(EVT_VALUE_CHANGED, self.OnBase)
self.MinCtrl = LabeledTextCtrl(self, -1, label="Min:", validator=FloatValidator(), formatstring="%-#.3g",
size=(5 * charWidth, -1), style=wx.TE_PROCESS_ENTER)
self.MinCtrl.SetValue(self.plotWindow.GetAxisRangeMin(self.axisId))
self.MinCtrl.SetToolTip(wx.ToolTip("The currently displayed minimum range value."))
self.MinCtrl.Bind(EVT_VALUE_CHANGED, self.OnMin)
self.MaxCtrl = LabeledTextCtrl(self, -1, label="Max:", validator=FloatValidator(), formatstring="%-#.3g",
size=(5 * charWidth, -1), style=wx.TE_PROCESS_ENTER)
self.MaxCtrl.SetValue(self.plotWindow.GetAxisRangeMax(self.axisId))
self.MaxCtrl.SetToolTip(wx.ToolTip("The currently displayed maximum range value."))
self.MaxCtrl.Bind(EVT_VALUE_CHANGED, self.OnMax)
def CreateLayout(self):
sizer0 = wx.BoxSizer(wx.HORIZONTAL)
sizer0.Add(self.NTicksCtrl, 0, wx.RIGHT, border=10)
sizer0.Add(self.BaseCtrl, 0)
rsizer1 = wx.BoxSizer(wx.HORIZONTAL)
rsizer1.Add((15, 0), 0)
rsizer1.Add(self.MinCtrl, 0, wx.RIGHT, border=10)
rsizer1.Add(self.MaxCtrl, 0)
sizer1 = wx.StaticBoxSizer(self.rangeBox, wx.VERTICAL)
sizer1.Add((0, 10), 0)
sizer1.Add(rsizer1, 1, wx.LEFT | wx.RIGHT, border=5)
sizer1.Add((0, 10), 0)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.TitleCtrl, 0, wx.EXPAND)
sizer.Add((0, 10), 0)
sizer.Add(self.LogAxisCtrl, 0)
sizer.Add((0, 10), 0)
sizer.Add(sizer0, 0, wx.EXPAND)
sizer.Add((0, 15), 0)
sizer.Add(sizer1, 0, wx.EXPAND)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(sizer, 1, wx.EXPAND | wx.ALL, border=10)
self.SetSizerAndFit(mainSizer)
def UpdateControls(self):
# Retrieve and set the values of axis properties
self.TitleCtrl.SetValue(self.plotWindow.GetAxisTitle(self.axisId))
self.NTicksCtrl.SetValue(self.plotWindow.GetNumAxisLabels(self.axisId))
self.LogAxisCtrl.SetValue(self.plotWindow.GetLogAxis(self.axisId))
self.BaseCtrl.SetValue(self.plotWindow.GetAxisBase(self.axisId))
self.UpdateRangeFromPlot()
def UpdateRangeFromPlot(self):
self.MinCtrl.SetValue(self.plotWindow.GetAxisRangeMin(self.axisId))
self.MaxCtrl.SetValue(self.plotWindow.GetAxisRangeMax(self.axisId))
def OnLog(self, event):
curLog = self.plotWindow.GetLogAxis(self.axisId)
newLog = self.LogAxisCtrl.IsChecked()
# Check if the new value is different
if newLog != curLog:
self.plotWindow.SetLogAxis(self.axisId, newLog)
self.UpdateRangeFromPlot()
def OnBase(self, event):
curBase = self.plotWindow.GetAxisBase(self.axisId)
try:
newBase = float(self.BaseCtrl.GetValue())
except ValueError:
wx.Bell()
self.BaseCtrl.SetValue(curBase)
return
# Check if the new value is different
if newBase != curBase:
if newBase > 1:
self.plotWindow.SetAxisBase(self.axisId, newBase)
else:
wx.Bell()
self.BaseCtrl.SetValue(curBase)
def OnTitle(self, event):
curTitle = str(self.plotWindow.GetAxisTitle(self.axisId))
newTitle = self.TitleCtrl.GetValue()
# Check if the new value is different
if newTitle != curTitle:
self.plotWindow.SetAxisTitle(self.axisId, newTitle)
def OnNTicks(self, event):
curTicks = self.plotWindow.GetNumAxisLabels(self.axisId)
try:
newTicks = int(self.NTicksCtrl.GetValue())
except ValueError:
wx.Bell()
self.NTicksCtrl.SetValue(curTicks)
return
# Check if the new value is different
if newTicks != curTicks:
if 0 < newTicks <= self.maxTicks:
self.plotWindow.SetNumAxisLabels(self.axisId, newTicks)
else:
wx.Bell()
if newTicks > self.maxTicks:
self.NTicksCtrl.SetValue(self.maxTicks)
else:
self.NTicksCtrl.SetValue(1)
def OnMin(self, event):
min = self.plotWindow.GetAxisRangeMin(self.axisId)
try:
newMin = float(self.MinCtrl.GetValue())
except ValueError:
wx.Bell()
self.MinCtrl.SetValue(range.min())
return
# Check if the new value is different
if newMin != min:
self.plotWindow.SetAxisRange(self.axisId, newMin, self.plotWindow.GetAxisRangeMax(self.axisId))
def OnMax(self, event):
max = self.plotWindow.GetAxisRangeMax(self.axisId)
try:
newMax = float(self.MaxCtrl.GetValue())
except ValueError:
wx.Bell()
self.MaxCtrl.SetValue(max)
return
# Check if the new value is different
if newMax != max:
self.plotWindow.SetAxisRange(self.axisId, self.plotWindow.GetAxisRangeMin(self.axisId), newMax)
|
# Copyright (C) 2002-2021 S[&]T, The Netherlands.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import wx
from .labeledtextctrl import LabeledTextCtrl, EVT_VALUE_CHANGED
from .util import DetermineCharSize
from .validators import DigitValidator, FloatValidator
class AxisPropertyPanel(wx.Panel):
def __init__(self, parent, plotWindow, axisId):
panelstyle = wx.TAB_TRAVERSAL
if wx.Platform == '__WXGTK__':
panelstyle |= wx.SUNKEN_BORDER
wx.Panel.__init__(self, parent, -1, style=panelstyle)
self.plotWindow = plotWindow
self.maxTicks = 50
self.axisId = axisId
# Create and configure all widgets
self.CreateControls()
self.CreateLayout()
def CreateControls(self):
(charWidth, charHeight) = DetermineCharSize(self)
# static boxes should be created before the controls they contain in order to preserve the correct Z-Order
self.rangeBox = wx.StaticBox(self, -1, "Range")
self.TitleCtrl = LabeledTextCtrl(self, -1, label="Label:", size=(8 * charWidth, -1), style=wx.TE_PROCESS_ENTER)
self.TitleCtrl.SetValue(self.plotWindow.GetAxisTitle(self.axisId))
self.TitleCtrl.SetToolTip(wx.ToolTip("The axis title will be displayed centered on the outside of the axis "
"in the plot panel."))
self.TitleCtrl.Bind(EVT_VALUE_CHANGED, self.OnTitle)
self.LogAxisCtrl = wx.CheckBox(self, -1, "Logarithmic Scale")
self.LogAxisCtrl.SetValue(self.plotWindow.GetLogAxis(self.axisId))
self.LogAxisCtrl.SetToolTip(wx.ToolTip("Use a logarithmic axis. Disabled if the current range of axis values "
"contains the number 0."))
self.LogAxisCtrl.Bind(wx.EVT_CHECKBOX, self.OnLog)
self.NTicksCtrl = LabeledTextCtrl(self, -1, label="Nr of Ticks:", validator=DigitValidator(),
size=(3 * charWidth, -1), style=wx.TE_PROCESS_ENTER)
self.NTicksCtrl.SetValue(self.plotWindow.GetNumAxisLabels(self.axisId))
self.NTicksCtrl.SetToolTip(wx.ToolTip("The target number of ticks to display on the axis. "
"The actual number of ticks is calculated; this control's value is "
"what the calculation will try to aim for."))
self.NTicksCtrl.Bind(EVT_VALUE_CHANGED, self.OnNTicks)
self.BaseCtrl = LabeledTextCtrl(self, -1, label="Base:", validator=FloatValidator(), formatstring="%-#.3g",
size=(5 * charWidth, -1), style=wx.TE_PROCESS_ENTER)
self.BaseCtrl.SetValue(self.plotWindow.GetAxisBase(self.axisId))
self.BaseCtrl.SetToolTip(wx.ToolTip("The base for the linear and logarithmic tick calculation for the axis. "
"This value can be fractional but must be greater than 1."))
self.BaseCtrl.Bind(EVT_VALUE_CHANGED, self.OnBase)
self.MinCtrl = LabeledTextCtrl(self, -1, label="Min:", validator=FloatValidator(), formatstring="%-#.3g",
size=(5 * charWidth, -1), style=wx.TE_PROCESS_ENTER)
self.MinCtrl.SetValue(self.plotWindow.GetAxisRangeMin(self.axisId))
self.MinCtrl.SetToolTip(wx.ToolTip("The currently displayed minimum range value."))
self.MinCtrl.Bind(EVT_VALUE_CHANGED, self.OnMin)
self.MaxCtrl = LabeledTextCtrl(self, -1, label="Max:", validator=FloatValidator(), formatstring="%-#.3g",
size=(5 * charWidth, -1), style=wx.TE_PROCESS_ENTER)
self.MaxCtrl.SetValue(self.plotWindow.GetAxisRangeMax(self.axisId))
self.MaxCtrl.SetToolTip(wx.ToolTip("The currently displayed maximum range value."))
self.MaxCtrl.Bind(EVT_VALUE_CHANGED, self.OnMax)
def CreateLayout(self):
sizer0 = wx.BoxSizer(wx.HORIZONTAL)
sizer0.Add(self.NTicksCtrl, 0, wx.RIGHT, border=10)
sizer0.Add(self.BaseCtrl, 0)
rsizer1 = wx.BoxSizer(wx.HORIZONTAL)
rsizer1.Add((15, 0), 0)
rsizer1.Add(self.MinCtrl, 0, wx.RIGHT, border=10)
rsizer1.Add(self.MaxCtrl, 0)
sizer1 = wx.StaticBoxSizer(self.rangeBox, wx.VERTICAL)
sizer1.Add((0, 10), 0)
sizer1.Add(rsizer1, 1, wx.LEFT | wx.RIGHT, border=5)
sizer1.Add((0, 10), 0)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.TitleCtrl, 0, wx.EXPAND)
sizer.Add((0, 10), 0)
sizer.Add(self.LogAxisCtrl, 0)
sizer.Add((0, 10), 0)
sizer.Add(sizer0, 0, wx.EXPAND)
sizer.Add((0, 15), 0)
sizer.Add(sizer1, 0, wx.EXPAND)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(sizer, 1, wx.EXPAND | wx.ALL, border=10)
self.SetSizerAndFit(mainSizer)
def UpdateControls(self):
# Retrieve and set the values of axis properties
self.TitleCtrl.SetValue(self.plotWindow.GetAxisTitle(self.axisId))
self.NTicksCtrl.SetValue(self.plotWindow.GetNumAxisLabels(self.axisId))
self.LogAxisCtrl.SetValue(self.plotWindow.GetLogAxis(self.axisId))
self.BaseCtrl.SetValue(self.plotWindow.GetAxisBase(self.axisId))
self.UpdateRangeFromPlot()
def UpdateRangeFromPlot(self):
self.MinCtrl.SetValue(self.plotWindow.GetAxisRangeMin(self.axisId))
self.MaxCtrl.SetValue(self.plotWindow.GetAxisRangeMax(self.axisId))
def OnLog(self, event):
curLog = self.plotWindow.GetLogAxis(self.axisId)
newLog = self.LogAxisCtrl.IsChecked()
# Check if the new value is different
if newLog != curLog:
self.plotWindow.SetLogAxis(self.axisId, newLog)
self.UpdateRangeFromPlot()
def OnBase(self, event):
curBase = self.plotWindow.GetAxisBase(self.axisId)
try:
newBase = float(self.BaseCtrl.GetValue())
except ValueError:
wx.Bell()
self.BaseCtrl.SetValue(curBase)
return
# Check if the new value is different
if newBase != curBase:
if newBase > 1:
self.plotWindow.SetAxisBase(self.axisId, newBase)
else:
wx.Bell()
self.BaseCtrl.SetValue(curBase)
def OnTitle(self, event):
curTitle = str(self.plotWindow.GetAxisTitle(self.axisId))
newTitle = self.TitleCtrl.GetValue()
# Check if the new value is different
if newTitle != curTitle:
self.plotWindow.SetAxisTitle(self.axisId, newTitle)
def OnNTicks(self, event):
curTicks = self.plotWindow.GetNumAxisLabels(self.axisId)
try:
newTicks = int(self.NTicksCtrl.GetValue())
except ValueError:
wx.Bell()
self.NTicksCtrl.SetValue(curTicks)
return
# Check if the new value is different
if newTicks != curTicks:
if 0 < newTicks <= self.maxTicks:
self.plotWindow.SetNumAxisLabels(self.axisId, newTicks)
else:
wx.Bell()
if newTicks > self.maxTicks:
self.NTicksCtrl.SetValue(self.maxTicks)
else:
self.NTicksCtrl.SetValue(1)
def OnMin(self, event):
min = self.plotWindow.GetAxisRangeMin(self.axisId)
try:
newMin = float(self.MinCtrl.GetValue())
except ValueError:
wx.Bell()
self.MinCtrl.SetValue(range.min())
return
# Check if the new value is different
if newMin != min:
self.plotWindow.SetAxisRange(self.axisId, newMin, self.plotWindow.GetAxisRangeMax(self.axisId))
def OnMax(self, event):
max = self.plotWindow.GetAxisRangeMax(self.axisId)
try:
newMax = float(self.MaxCtrl.GetValue())
except ValueError:
wx.Bell()
self.MaxCtrl.SetValue(max)
return
# Check if the new value is different
if newMax != max:
self.plotWindow.SetAxisRange(self.axisId, self.plotWindow.GetAxisRangeMin(self.axisId), newMax)
|
en
| 0.728187
|
# Copyright (C) 2002-2021 S[&]T, The Netherlands. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # Create and configure all widgets # static boxes should be created before the controls they contain in order to preserve the correct Z-Order #.3g", #.3g", #.3g", # Retrieve and set the values of axis properties # Check if the new value is different # Check if the new value is different # Check if the new value is different # Check if the new value is different # Check if the new value is different # Check if the new value is different
| 1.288332
| 1
|
venv/lib/python2.7/site-packages/pyramid/scaffolds/__init__.py
|
bhavul/GIDS-Endurance-Hacker-Puzzle
| 0
|
6628563
|
import binascii
import os
from textwrap import dedent
from pyramid.compat import native_
from pyramid.scaffolds.template import Template # API
class PyramidTemplate(Template):
"""
A class that can be used as a base class for Pyramid scaffolding
templates.
"""
def pre(self, command, output_dir, vars):
""" Overrides :meth:`pyramid.scaffolds.template.Template.pre`, adding
several variables to the default variables list (including
``random_string``, and ``package_logger``). It also prevents common
misnamings (such as naming a package "site" or naming a package
logger "root".
"""
vars['random_string'] = native_(binascii.hexlify(os.urandom(20)))
package_logger = vars['package']
if package_logger == 'root':
# Rename the app logger in the rare case a project is named 'root'
package_logger = 'app'
vars['package_logger'] = package_logger
return Template.pre(self, command, output_dir, vars)
def post(self, command, output_dir, vars): # pragma: no cover
""" Overrides :meth:`pyramid.scaffolds.template.Template.post`, to
print "Welcome to Pyramid. Sorry for the convenience." after a
successful scaffolding rendering."""
separator = "=" * 79
msg = dedent(
"""
%(separator)s
Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials/en/latest/
Documentation: http://docs.pylonsproject.org/projects/pyramid/en/latest/
Twitter: https://twitter.com/PylonsProject
Mailing List: https://groups.google.com/forum/#!forum/pylons-discuss
Welcome to Pyramid. Sorry for the convenience.
%(separator)s
""" % {'separator': separator})
self.out(msg)
return Template.post(self, command, output_dir, vars)
def out(self, msg): # pragma: no cover (replaceable testing hook)
print(msg)
class StarterProjectTemplate(PyramidTemplate):
_template_dir = 'starter'
summary = 'Pyramid starter project using URL dispatch and Jinja2'
class ZODBProjectTemplate(PyramidTemplate):
_template_dir = 'zodb'
summary = 'Pyramid project using ZODB, traversal, and Chameleon'
class AlchemyProjectTemplate(PyramidTemplate):
_template_dir = 'alchemy'
summary = (
'Pyramid project using SQLAlchemy, SQLite, URL dispatch, and '
'Jinja2')
|
import binascii
import os
from textwrap import dedent
from pyramid.compat import native_
from pyramid.scaffolds.template import Template # API
class PyramidTemplate(Template):
"""
A class that can be used as a base class for Pyramid scaffolding
templates.
"""
def pre(self, command, output_dir, vars):
""" Overrides :meth:`pyramid.scaffolds.template.Template.pre`, adding
several variables to the default variables list (including
``random_string``, and ``package_logger``). It also prevents common
misnamings (such as naming a package "site" or naming a package
logger "root".
"""
vars['random_string'] = native_(binascii.hexlify(os.urandom(20)))
package_logger = vars['package']
if package_logger == 'root':
# Rename the app logger in the rare case a project is named 'root'
package_logger = 'app'
vars['package_logger'] = package_logger
return Template.pre(self, command, output_dir, vars)
def post(self, command, output_dir, vars): # pragma: no cover
""" Overrides :meth:`pyramid.scaffolds.template.Template.post`, to
print "Welcome to Pyramid. Sorry for the convenience." after a
successful scaffolding rendering."""
separator = "=" * 79
msg = dedent(
"""
%(separator)s
Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials/en/latest/
Documentation: http://docs.pylonsproject.org/projects/pyramid/en/latest/
Twitter: https://twitter.com/PylonsProject
Mailing List: https://groups.google.com/forum/#!forum/pylons-discuss
Welcome to Pyramid. Sorry for the convenience.
%(separator)s
""" % {'separator': separator})
self.out(msg)
return Template.post(self, command, output_dir, vars)
def out(self, msg): # pragma: no cover (replaceable testing hook)
print(msg)
class StarterProjectTemplate(PyramidTemplate):
_template_dir = 'starter'
summary = 'Pyramid starter project using URL dispatch and Jinja2'
class ZODBProjectTemplate(PyramidTemplate):
_template_dir = 'zodb'
summary = 'Pyramid project using ZODB, traversal, and Chameleon'
class AlchemyProjectTemplate(PyramidTemplate):
_template_dir = 'alchemy'
summary = (
'Pyramid project using SQLAlchemy, SQLite, URL dispatch, and '
'Jinja2')
|
en
| 0.679873
|
# API A class that can be used as a base class for Pyramid scaffolding templates. Overrides :meth:`pyramid.scaffolds.template.Template.pre`, adding several variables to the default variables list (including ``random_string``, and ``package_logger``). It also prevents common misnamings (such as naming a package "site" or naming a package logger "root". # Rename the app logger in the rare case a project is named 'root' # pragma: no cover Overrides :meth:`pyramid.scaffolds.template.Template.post`, to print "Welcome to Pyramid. Sorry for the convenience." after a successful scaffolding rendering. %(separator)s Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials/en/latest/ Documentation: http://docs.pylonsproject.org/projects/pyramid/en/latest/ Twitter: https://twitter.com/PylonsProject Mailing List: https://groups.google.com/forum/#!forum/pylons-discuss Welcome to Pyramid. Sorry for the convenience. %(separator)s # pragma: no cover (replaceable testing hook)
| 2.583869
| 3
|
ML_CW1/assgn_1_part_1/3_regularized_linear_regression/compute_cost.py
|
ShellySrivastava/Machine-Learning
| 0
|
6628564
|
<reponame>ShellySrivastava/Machine-Learning
from calculate_hypothesis import *
def compute_cost(X, y, theta):
"""
:param X : 2D array of our dataset
:param y : 1D array of the groundtruth labels of the dataset
:param theta : 1D array of the trainable parameters
"""
# initialize cost
J = 0.0
# get number of training examples
m = y.shape[0]
for i in range(m):
hypothesis = calculate_hypothesis(X, theta, i)
output = y[i]
squared_error = (hypothesis - output)**2
J = J + squared_error
J = J/(2*m)
return J
|
from calculate_hypothesis import *
def compute_cost(X, y, theta):
"""
:param X : 2D array of our dataset
:param y : 1D array of the groundtruth labels of the dataset
:param theta : 1D array of the trainable parameters
"""
# initialize cost
J = 0.0
# get number of training examples
m = y.shape[0]
for i in range(m):
hypothesis = calculate_hypothesis(X, theta, i)
output = y[i]
squared_error = (hypothesis - output)**2
J = J + squared_error
J = J/(2*m)
return J
|
en
| 0.392899
|
:param X : 2D array of our dataset :param y : 1D array of the groundtruth labels of the dataset :param theta : 1D array of the trainable parameters # initialize cost # get number of training examples
| 3.463728
| 3
|
outputs/apps.py
|
jayvdb/django-outputs
| 0
|
6628565
|
<filename>outputs/apps.py
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from django.db.utils import ProgrammingError
class OutputsConfig(AppConfig):
name = 'outputs'
verbose_name = _('Outputs')
def ready(self):
try:
self.schedule_jobs()
except ProgrammingError:
pass
def schedule_jobs(self):
print('Scheduling outputs jobs...')
from outputs.models import Scheduler
for output_scheduler in Scheduler.objects.active():
output_scheduler.schedule()
|
<filename>outputs/apps.py
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from django.db.utils import ProgrammingError
class OutputsConfig(AppConfig):
name = 'outputs'
verbose_name = _('Outputs')
def ready(self):
try:
self.schedule_jobs()
except ProgrammingError:
pass
def schedule_jobs(self):
print('Scheduling outputs jobs...')
from outputs.models import Scheduler
for output_scheduler in Scheduler.objects.active():
output_scheduler.schedule()
|
none
| 1
| 2.023039
| 2
|
|
setup.py
|
whn09/fastmoe
| 0
|
6628566
|
import setuptools
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
import os
cxx_flags = []
ext_libs = []
if os.environ.get('USE_NCCL', '0') == '1':
cxx_flags.append('-DMOE_USE_NCCL')
ext_libs.append('nccl')
if __name__ == '__main__':
setuptools.setup(
name='fastmoe',
version='0.1.1',
description='An efficient Mixture-of-Experts system for PyTorch',
author='<NAME>, <NAME> and <NAME>',
author_email='<EMAIL>',
license='Apache-2',
url='https://github.com/laekov/fastmoe',
packages=['fmoe'],
ext_modules=[
CUDAExtension(
name='fmoe_cuda',
sources=[
'cuda/moe.cpp',
'cuda/cuda_stream_manager.cpp',
'cuda/moe_compute_kernel.cu',
'cuda/moe_comm_kernel.cu',
'cuda/moe_fused_kernel.cu',
],
extra_compile_args={
'cxx': cxx_flags,
'nvcc': cxx_flags
},
libraries=ext_libs
)
],
cmdclass={
'build_ext': BuildExtension
})
|
import setuptools
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
import os
cxx_flags = []
ext_libs = []
if os.environ.get('USE_NCCL', '0') == '1':
cxx_flags.append('-DMOE_USE_NCCL')
ext_libs.append('nccl')
if __name__ == '__main__':
setuptools.setup(
name='fastmoe',
version='0.1.1',
description='An efficient Mixture-of-Experts system for PyTorch',
author='<NAME>, <NAME> and <NAME>',
author_email='<EMAIL>',
license='Apache-2',
url='https://github.com/laekov/fastmoe',
packages=['fmoe'],
ext_modules=[
CUDAExtension(
name='fmoe_cuda',
sources=[
'cuda/moe.cpp',
'cuda/cuda_stream_manager.cpp',
'cuda/moe_compute_kernel.cu',
'cuda/moe_comm_kernel.cu',
'cuda/moe_fused_kernel.cu',
],
extra_compile_args={
'cxx': cxx_flags,
'nvcc': cxx_flags
},
libraries=ext_libs
)
],
cmdclass={
'build_ext': BuildExtension
})
|
none
| 1
| 1.567172
| 2
|
|
python/trezorlib/tests/device_tests/test_msg_signtx.py
|
Kayuii/trezor-crypto
| 0
|
6628567
|
# This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import pytest
from trezorlib import btc, messages as proto
from trezorlib.tools import H_, CallException, btc_hash, parse_path
from ..support.tx_cache import tx_cache
from .common import TrezorTest
from .conftest import TREZOR_VERSION
TXHASH_157041 = bytes.fromhex(
"1570416eb4302cf52979afd5e6909e37d8fdd874301f7cc87e547e509cb1caa6"
)
TXHASH_39a29e = bytes.fromhex(
"39a29e954977662ab3879c66fb251ef753e0912223a83d1dcb009111d28265e5"
)
TXHASH_4a7b7e = bytes.fromhex(
"4a7b7e0403ae5607e473949cfa03f09f2cd8b0f404bf99ce10b7303d86280bf7"
)
TXHASH_54aa56 = bytes.fromhex(
"54aa5680dea781f45ebb536e53dffc526d68c0eb5c00547e323b2c32382dfba3"
)
TXHASH_58497a = bytes.fromhex(
"58497a7757224d1ff1941488d23087071103e5bf855f4c1c44e5c8d9d82ca46e"
)
TXHASH_6f90f3 = bytes.fromhex(
"6f90f3c7cbec2258b0971056ef3fe34128dbde30daa9c0639a898f9977299d54"
)
TXHASH_c63e24 = bytes.fromhex(
"c63e24ed820c5851b60c54613fbc4bcb37df6cd49b4c96143e99580a472f79fb"
)
TXHASH_c6be22 = bytes.fromhex(
"c6be22d34946593bcad1d2b013e12f74159e69574ffea21581dad115572e031c"
)
TXHASH_d5f65e = bytes.fromhex(
"d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882"
)
TXHASH_d6da21 = bytes.fromhex(
"d6da21677d7cca5f42fbc7631d062c9ae918a0254f7c6c22de8e8cb7fd5b8236"
)
TXHASH_d2dcda = bytes.fromhex(
"d2dcdaf547ea7f57a713c607f15e883ddc4a98167ee2c43ed953c53cb5153e24"
)
TXHASH_e5040e = bytes.fromhex(
"e5040e1bc1ae7667ffb9e5248e90b2fb93cd9150234151ce90e14ab2f5933bcd"
)
TXHASH_50f6f1 = bytes.fromhex(
"50f6f1209ca92d7359564be803cb2c932cde7d370f7cee50fd1fad6790f6206d"
)
TXHASH_2bac7a = bytes.fromhex(
"2bac7ad1dec654579a71ea9555463f63ac7b7df9d8ba67b4682bba4e514d0f0c"
)
def check_sign_tx(
client,
coin_name,
inputs,
outputs,
fee_too_high=False,
failure=None,
unknown_path=False,
):
__tracebackhide__ = True
expected_responses = []
txes = tx_cache(coin_name)
t = proto.RequestType
b = proto.ButtonRequestType
def tx_request(request_type, **kwargs):
if kwargs:
details = proto.TxRequestDetailsType(**kwargs)
else:
details = None
return proto.TxRequest(request_type=request_type, details=details)
def btn(code):
return proto.ButtonRequest(code=code)
for i, inp in enumerate(inputs):
expected_responses.append(tx_request(t.TXINPUT, request_index=i))
if unknown_path and TREZOR_VERSION != 1:
expected_responses.append(btn(b.UnknownDerivationPath))
expected_responses.append(tx_request(t.TXMETA, tx_hash=inp.prev_hash))
if inp.script_type in (
proto.InputScriptType.SPENDP2SHWITNESS,
proto.InputScriptType.SPENDWITNESS,
):
continue
prev_tx = txes[inp.prev_hash]
for pi in range(len(prev_tx.inputs)):
r = tx_request(t.TXINPUT, request_index=pi, tx_hash=inp.prev_hash)
expected_responses.append(r)
for po in range(len(prev_tx.bin_outputs)):
r = tx_request(t.TXOUTPUT, request_index=po, tx_hash=inp.prev_hash)
expected_responses.append(r)
for i, outp in enumerate(outputs):
expected_responses.append(tx_request(t.TXOUTPUT, request_index=i))
if outp.address is not None or hasattr(outp, "force_confirm"):
expected_responses.append(btn(b.ConfirmOutput))
if fee_too_high:
expected_responses.append(btn(b.FeeOverThreshold))
if failure is not None:
expected_responses.append(proto.Failure(code=failure))
else:
expected_responses.append(btn(b.SignTx))
input_requests = [
tx_request(t.TXINPUT, request_index=i) for i in range(len(inputs))
]
output_requests = [
tx_request(t.TXOUTPUT, request_index=i) for i in range(len(outputs))
]
# No idea why the flow is like this. But it is.
for _ in range(len(inputs)):
expected_responses.extend(input_requests)
expected_responses.extend(output_requests)
# and once more for good measure
expected_responses.extend(output_requests)
expected_responses.append(tx_request(t.TXFINISHED))
with client:
client.set_expected_responses(expected_responses)
return btc.sign_tx(client, coin_name, inputs, outputs, prev_txes=txes)
class TestMsgSigntx(TrezorTest):
def test_one_one_fee(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882
# input 0: 0.0039 BTC
inp1 = proto.TxInputType(
address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=TXHASH_d5f65e,
prev_index=0,
)
out1 = proto.TxOutputType(
address="1MJ2tj2ThBE62zXbBYA5ZaN3fdve5CPAz1",
amount=390000 - 10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", [inp1], [out1], unknown_path=True
)
# Accepted by network: tx fd79435246dee76b2f159d2db08032d666c95adc544de64c8c49f474df4a7fee
assert (
serialized_tx.hex()
== "010000000182488650ef25a58fef6788bd71b8212038d7f2bbe4750bc7bcb44701e85ef6d5000000006b4830450221009a0b7be0d4ed3146ee262b42202841834698bb3ee39c24e7437df208b8b7077102202b79ab1e7736219387dffe8d615bbdba87e11477104b867ef47afed1a5ede7810121023230848585885f63803a0a8aecdd6538792d5c539215c91698e315bf0253b43dffffffff0160cc0500000000001976a914de9b2a8da088824e8fe51debea566617d851537888ac00000000"
)
def test_testnet_one_two_fee(self):
self.setup_mnemonic_allallall()
# see 87be0736f202f7c2bff0781b42bad3e0cdcb54761939da69ea793a3735552c56
# tx: e5040e1bc1ae7667ffb9e5248e90b2fb93cd9150234151ce90e14ab2f5933bcd
# input 0: 0.31 BTC
inp1 = proto.TxInputType(
address_n=parse_path("44'/1'/0'/0/0"),
# amount=31000000,
prev_hash=TXHASH_e5040e,
prev_index=0,
)
out1 = proto.TxOutputType(
address="msj42CCGruhRsFrGATiUuh25dtxYtnpbTx",
amount=30090000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address_n=parse_path("44'/1'/0'/1/0"),
amount=900000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = check_sign_tx(self.client, "Testnet", [inp1], [out1, out2])
assert (
serialized_tx.hex()
== "0100000001cd3b93f5b24ae190ce5141235091cd93fbb2908e24e5b9ff6776aec11b0e04e5000000006b483045022100eba3bbcbb82ab1ebac88a394e8fb53b0263dadbb3e8072f0a21ee62818c911060220686a9b7f306d028b54a228b5c47cc6c27b1d01a3b0770440bcc64d55d8bace2c0121030e669acac1f280d1ddf441cd2ba5e97417bf2689e4bbec86df4f831bf9f7ffd0ffffffff021023cb01000000001976a91485eb47fe98f349065d6f044e27a4ac541af79ee288aca0bb0d00000000001976a9143d3cca567e00a04819742b21a696a67da796498b88ac00000000"
)
def test_testnet_fee_too_high(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: 6f90f3c7cbec2258b0971056ef3fe34128dbde30daa9c0639a898f9977299d54
# input 1: 10.00000000 BTC
inp1 = proto.TxInputType(
address_n=[0], # mirio8q3gtv7fhdnmb3TpZ4EuafdzSs7zL
# amount=1000000000,
prev_hash=TXHASH_6f90f3,
prev_index=1,
)
out1 = proto.TxOutputType(
address="mfiGQVPcRcaEvQPYDErR34DcCovtxYvUUV",
amount=1000000000 - 500000000 - 100000000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address_n=[2],
amount=500000000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2.force_confirm = True
_, serialized_tx = check_sign_tx(
self.client,
"Testnet",
[inp1],
[out1, out2],
fee_too_high=True,
unknown_path=True,
)
assert (
serialized_tx.hex()
== "0100000001549d2977998f899a63c0a9da30dedb2841e33fef561097b05822eccbc7f3906f010000006a47304402205ea68e9d52d4be14420ccecf7f2e11489d49b86bedb79ee99b5e9b7188884150022056219cb3384a5df8048cca286a9533403dbda1571afd84b51379cdaee6a6dea80121023230848585885f63803a0a8aecdd6538792d5c539215c91698e315bf0253b43dffffffff020084d717000000001976a9140223b1a09138753c9cb0baf95a0a62c82711567a88ac0065cd1d000000001976a9142db345c36563122e2fd0f5485fb7ea9bbf7cb5a288ac00000000"
)
def test_one_two_fee(self):
self.setup_mnemonic_allallall()
# tx: c275c333fd1b36bef4af316226c66a8b3693fbfcc081a5e16a2ae5fcb09e92bf
inp1 = proto.TxInputType(
address_n=parse_path(
"m/44'/0'/0'/0/5"
), # 1GA9u9TfCG7SWmKCveBumdA1TZpfom6ZdJ
# amount=50000,
prev_hash=TXHASH_50f6f1,
prev_index=1,
)
out1 = proto.TxOutputType(
address_n=parse_path(
"m/44'/0'/0'/1/3"
), # 1EcL6AyfQTyWKGvXwNSfsWoYnD3whzVFdu
amount=30000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address="<KEY>",
amount=10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = check_sign_tx(self.client, "Bitcoin", [inp1], [out1, out2])
assert (
serialized_tx.hex()
== "01000000016d20f69067ad1ffd50ee7c0f377dde2c932ccb03e84b5659732da99c20f1f650010000006a47304402203429bd3ce7b38c5c1e8a15340edd79ced41a2939aae62e259d2e3d18e0c5ee7602201b83b10ebc4d6dcee3f9eb42ba8f1ef8a059a05397e0c1b9223d1565a3e6ec01012102a7a079c1ef9916b289c2ff21a992c808d0de3dfcf8a9f163205c5c9e21f55d5cffffffff0230750000000000001976a914954820f1de627a703596ac0396f986d958e3de4c88ac10270000000000001976a91405427736705cfbfaff76b1cff48283707fb1037088ac00000000"
)
def test_one_three_fee(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882
# input 0: 0.0039 BTC
inp1 = proto.TxInputType(
address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=TXHASH_d5f65e,
prev_index=0,
)
out1 = proto.TxOutputType(
address="1MJ2tj2ThBE62zXbBYA5ZaN3fdve5CPAz1",
amount=390000 - 80000 - 12000 - 10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address="13uaUYn6XAooo88QvAqAVsiVvr2mAXutqP",
amount=12000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out3 = proto.TxOutputType(
address_n=[1], amount=80000, script_type=proto.OutputScriptType.PAYTOADDRESS
)
out3.force_confirm = True
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", [inp1], [out1, out2, out3], unknown_path=True
)
assert (
serialized_tx.hex()
== "010000000182488650ef25a58fef6788bd71b8212038d7f2bbe4750bc7bcb44701e85ef6d5000000006b483045022100e695e2c530c7c0fc32e6b79b7cff56a7f70a8c9da787534f46b4204070f914fc02207b0879a81408a11e23b11d4c7965c62b5fc6d5c2d92340f5ee2da7b40e99314a0121023230848585885f63803a0a8aecdd6538792d5c539215c91698e315bf0253b43dffffffff0300650400000000001976a914de9b2a8da088824e8fe51debea566617d851537888ace02e0000000000001976a9141fe1d337fb81afca42818051e12fd18245d1b17288ac80380100000000001976a9140223b1a09138753c9cb0baf95a0a62c82711567a88ac00000000"
)
def test_two_two(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: c6be22d34946593bcad1d2b013e12f74159e69574ffea21581dad115572e031c
# input 1: 0.0010 BTC
# tx: 58497a7757224d1ff1941488d23087071103e5bf855f4c1c44e5c8d9d82ca46e
# input 1: 0.0011 BTC
inp1 = proto.TxInputType(
address_n=[1], # 1CK7SJdcb8z9HuvVft3D91HLpLC6KSsGb
# amount=100000,
prev_hash=TXHASH_c6be22,
prev_index=1,
)
inp2 = proto.TxInputType(
address_n=[2], # 15AeAhtNJNKyowK8qPHwgpXkhsokzLtUpG
# amount=110000,
prev_hash=TXHASH_58497a,
prev_index=1,
)
out1 = proto.TxOutputType(
address="15Jvu3nZNP7u2ipw2533Q9VVgEu2Lu9F2B",
amount=210000 - 100000 - 10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address_n=[3], # 1CmzyJp9w3NafXMSEFH4SLYUPAVCSUrrJ5
amount=100000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2.force_confirm = True
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", [inp1, inp2], [out1, out2], unknown_path=True
)
# Accepted by network: tx c63e24ed820c5851b60c54613fbc4bcb37df6cd49b4c96143e99580a472f79fb
# The transaction was produced before Trezor implemented BIP-66, so the signature
# is now different and txhash doesn't match what is on the blockchain.
assert (
serialized_tx.hex()
== "01000000021c032e5715d1da8115a2fe4f57699e15742fe113b0d2d1ca3b594649d322bec6010000006b483045022100f773c403b2f85a5c1d6c9c4ad69c43de66930fff4b1bc818eb257af98305546a0220443bde4be439f276a6ce793664b463580e210ec6c9255d68354449ac0443c76501210338d78612e990f2eea0c426b5e48a8db70b9d7ed66282b3b26511e0b1c75515a6ffffffff6ea42cd8d9c8e5441c4c5f85bfe50311078730d2881494f11f4d2257777a4958010000006b48304502210090cff1c1911e771605358a8cddd5ae94c7b60cc96e50275908d9bf9d6367c79f02202bfa72e10260a146abd59d0526e1335bacfbb2b4401780e9e3a7441b0480c8da0121038caebd6f753bbbd2bb1f3346a43cd32140648583673a31d62f2dfb56ad0ab9e3ffffffff02a0860100000000001976a9142f4490d5263906e4887ca2996b9e207af3e7824088aca0860100000000001976a914812c13d97f9159e54e326b481b8f88a73df8507a88ac00000000"
)
def test_lots_of_inputs(self):
self.setup_mnemonic_nopin_nopassphrase()
# Tests if device implements serialization of len(inputs) correctly
# tx 4a7b7e0403ae5607e473949cfa03f09f2cd8b0f404bf99ce10b7303d86280bf7 : 100 UTXO for spending for unit tests
inputs = []
for i in range(100):
inputs.append(
proto.TxInputType(
address_n=[4], # 1NwN6UduuVkJi6sw3gSiKZaCY5rHgVXC2h
prev_hash=TXHASH_4a7b7e,
prev_index=i,
)
)
out = proto.TxOutputType(
address="19dvDdyxxptP9dGvozYe8BP6tgFV9L4jg5",
amount=100 * 26000 - 15 * 10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", inputs, [out], unknown_path=True
)
# Accepted by network: tx 23d9d8eecf3abf6c0f0f3f8b0976a04792d7f1c9a4ea9b0a8931734949e27c92
# too big put in unit test, only check hash
assert (
btc_hash(serialized_tx)[::-1].hex()
== "23d9d8eecf3abf6c0f0f3f8b0976a04792d7f1c9a4ea9b0a8931734949e27c92"
)
def test_lots_of_outputs(self):
self.setup_mnemonic_nopin_nopassphrase()
# Tests if device implements serialization of len(outputs) correctly
# tx: c63e24ed820c5851b60c54613fbc4bcb37df6cd49b4c96143e99580a472f79fb
# index 1: 0.0010 BTC
# tx: 39a29e954977662ab3879c66fb251ef753e0912223a83d1dcb009111d28265e5
# index 1: 0.0254 BTC
inp1 = proto.TxInputType(
address_n=[3], # 1CmzyJp9w3NafXMSEFH4SLYUPAVCSUrrJ5
# amount=100000,
prev_hash=TXHASH_c63e24,
prev_index=1,
)
inp2 = proto.TxInputType(
address_n=[3], # 1CmzyJp9w3NafXMSEFH4SLYUPAVCSUrrJ5
# amount=2540000,
prev_hash=TXHASH_39a29e,
prev_index=1,
)
outputs = []
cnt = 255
for _ in range(cnt):
out = proto.TxOutputType(
address="1NwN6UduuVkJi6sw3gSiKZaCY5rHgVXC2h",
amount=(100000 + 2540000 - 39000) // cnt,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
outputs.append(out)
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", [inp1, inp2], outputs, unknown_path=True
)
assert (
serialized_tx.hex()
== "0100000002fb792f470a58993e14964c9bd46cdf37cb4bbc3f61540cb651580c82ed243ec6010000006b483045022100969da46f94a81f34f3717b014e0c3e1826eda1b0022ec2f9ce39f3d750ab9235022026da269770993211a1503413566a339bbb4389a482fffcf8e1f76713fc3b94f5012103477b9f0f34ae85434ce795f0c5e1e90c9420e5b5fad084d7cce9a487b94a7902ffffffffe56582d2119100cb1d3da8232291e053f71e25fb669c87b32a667749959ea239010000006a473044022052e1419bb237b9db400ab5e3df16db6355619d545fde9030924a360763ae9ad40220704beab04d72ecaeb42eca7d98faca7a0941e65f2e1341f183be2b83e6b09e1c012103477b9f0f34ae85434ce795f0c5e1e90c9420e5b5fad084d7cce9a487b94a7902fffffffffdff00"
+ "d8270000000000001976a914f0a2b64e56ee2ff57126232f84af6e3a41d4055088ac"
* cnt
+ "00000000"
)
def test_fee_too_high(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: 1570416eb4302cf52979afd5e6909e37d8fdd874301f7cc87e547e509cb1caa6
# input 0: 1.0 BTC
inp1 = proto.TxInputType(
address_n=[0], # 1HWDaLTpTCTtRWyWqZkzWx1wex5NKyncLW
# amount=100000000,
prev_hash=TXHASH_157041,
prev_index=0,
)
out1 = proto.TxOutputType(
address="1MJ2tj2ThBE62zXbBYA5ZaN3fdve5CPAz1",
amount=100000000 - 510000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", [inp1], [out1], fee_too_high=True, unknown_path=True
)
assert (
serialized_tx.hex()
== "0100000001a6cab19c507e547ec87c1f3074d8fdd8379e90e6d5af7929f52c30b46e417015000000006b483045022100dc3531da7feb261575f03b5b9bbb35edc7f73bb081c92538827105de4102737002200161e34395f6a8ee93979200cb974fa75ccef6d7c14021511cf468eece90d6450121023230848585885f63803a0a8aecdd6538792d5c539215c91698e315bf0253b43dffffffff01d018ee05000000001976a914de9b2a8da088824e8fe51debea566617d851537888ac00000000"
)
def test_not_enough_funds(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882
# input 0: 0.0039 BTC
inp1 = proto.TxInputType(
address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=TXHASH_d5f65e,
prev_index=0,
)
out1 = proto.TxOutputType(
address="1MJ2tj2ThBE62zXbBYA5ZaN3fdve5CPAz1",
amount=400000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
with pytest.raises(CallException) as exc:
check_sign_tx(
self.client,
"Bitcoin",
[inp1],
[out1],
failure=proto.FailureType.NotEnoughFunds,
unknown_path=True,
)
assert exc.value.args[0] == proto.FailureType.NotEnoughFunds
def test_p2sh(self):
self.setup_mnemonic_nopin_nopassphrase()
inp1 = proto.TxInputType(
address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=400000,
prev_hash=TXHASH_54aa56,
prev_index=1,
)
out1 = proto.TxOutputType(
address="3DKGE1pvPpBAgZj94MbCinwmksewUNNYVR", # p2sh
amount=400000 - 10000,
script_type=proto.OutputScriptType.PAYTOSCRIPTHASH,
)
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", [inp1], [out1], unknown_path=True
)
# Accepted by network: tx 8cc1f4adf7224ce855cf535a5104594a0004cb3b640d6714fdb00b9128832dd5
assert (
serialized_tx.hex()
== "0100000001a3fb2d38322c3b327e54005cebc0686d52fcdf536e53bb5ef481a7de8056aa54010000006b4830450221009e020b0390ccad533b73b552f8a99a9d827212c558e4f755503674d07c92ad4502202d606f7316990e0461c51d4add25054f19c697aa3e3c2ced4d568f0b2c57e62f0121023230848585885f63803a0a8aecdd6538792d5c539215c91698e315bf0253b43dffffffff0170f305000000000017a9147f844bdb0b8fd54b64e3d16c85dc1170f1ff97c18700000000"
)
def test_testnet_big_amount(self):
self.setup_mnemonic_allallall()
# This test is testing transaction with amount bigger than fits to uint32
# tx: 2bac7ad1dec654579a71ea9555463f63ac7b7df9d8ba67b4682bba4e514d0f0c:1
# input 1: 411102528330 Satoshi
inp1 = proto.TxInputType(
address_n=parse_path("m/44'/1'/0'/0/0"),
amount=411102528330,
prev_hash=TXHASH_2bac7a,
prev_index=1,
)
out1 = proto.TxOutputType(
address="mopZWqZZyQc3F2Sy33cvDtJchSAMsnLi7b", # seed allallall, bip32: m/44'/1'/0'/0/1
amount=411102528330,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = check_sign_tx(self.client, "Testnet", [inp1], [out1])
assert (
serialized_tx.hex()
== "01000000010c0f4d514eba2b68b467bad8f97d7bac633f465595ea719a5754c6ded17aac2b010000006b4830450221008e3b926f04d8830bd5b67698af25c9e00c9db1b1ef3e5d69af794446753da94a02202d4a7509f26bba29ff643a7ac0d43fb128c1a632cc502b8f44eada8930fb9c9b0121030e669acac1f280d1ddf441cd2ba5e97417bf2689e4bbec86df4f831bf9f7ffd0ffffffff014ac39eb75f0000001976a9145b157a678a10021243307e4bb58f36375aa80e1088ac00000000"
)
def test_attack_change_outputs(self):
# This unit test attempts to modify data sent during ping-pong of streaming signing.
# Because device is asking for human confirmation only during first pass (first input),
# device must detect that data has been modified during other passes and fail to sign
# such modified data (which has not been confirmed by the user).
# Test firstly prepare normal transaction and send it to device. Then it send the same
# transaction again, but change amount of output 1 during signing the second input.
self.setup_mnemonic_nopin_nopassphrase()
inp1 = proto.TxInputType(
address_n=[1], # 1CK7SJdcb8z9HuvVft3D91HLpLC6KSsGb
# amount=100000,
prev_hash=TXHASH_c6be22,
prev_index=1,
)
inp2 = proto.TxInputType(
address_n=[2], # 15AeAhtNJNKyowK8qPHwgpXkhsokzLtUpG
# amount=110000,
prev_hash=TXHASH_58497a,
prev_index=1,
)
out1 = proto.TxOutputType(
address="15Jvu3nZNP7u2ipw2533Q9VVgEu2Lu9F2B",
amount=210000 - 100000 - 10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address_n=[3], # 1CmzyJp9w3NafXMSEFH4SLYUPAVCSUrrJ5
amount=100000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2.force_confirm = True
# Test if the transaction can be signed normally
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", [inp1, inp2], [out1, out2], unknown_path=True
)
# Accepted by network: tx c63e24ed820c5851b60c54613fbc4bcb37df6cd49b4c96143e99580a472f79fb
assert (
serialized_tx.hex()
== "01000000021c032e5715d1da8115a2fe4f57699e15742fe113b0d2d1ca3b594649d322bec6010000006b483045022100f773c403b2f85a5c1d6c9c4ad69c43de66930fff4b1bc818eb257af98305546a0220443bde4be439f276a6ce793664b463580e210ec6c9255d68354449ac0443c76501210338d78612e990f2eea0c426b5e48a8db70b9d7ed66282b3b26511e0b1c75515a6ffffffff6ea42cd8d9c8e5441c4c5f85bfe50311078730d2881494f11f4d2257777a4958010000006b48304502210090cff1c1911e771605358a8cddd5ae94c7b60cc96e50275908d9bf9d6367c79f02202bfa72e10260a146abd59d0526e1335bacfbb2b4401780e9e3a7441b0480c8da0121038caebd6f753bbbd2bb1f3346a43cd32140648583673a31d62f2dfb56ad0ab9e3ffffffff02a0860100000000001976a9142f4490d5263906e4887ca2996b9e207af3e7824088aca0860100000000001976a914812c13d97f9159e54e326b481b8f88a73df8507a88ac00000000"
)
run_attack = False
def attack_processor(msg):
nonlocal run_attack
if msg.tx.outputs and msg.tx.outputs[0] == out2:
if not run_attack:
run_attack = True
else:
# Sign output with another amount
msg.tx.outputs[0].amount = 9999999
return msg
# Set up attack processors
self.client.set_filter(proto.TxAck, attack_processor)
with pytest.raises(CallException) as exc:
btc.sign_tx(
self.client,
"Bitcoin",
[inp1, inp2],
[out1, out2],
prev_txes=tx_cache("Bitcoin"),
)
assert exc.value.args[0] in (
proto.FailureType.ProcessError,
proto.FailureType.DataError,
)
assert exc.value.args[1].endswith("Transaction has changed during signing")
def test_attack_change_input_address(self):
# This unit test attempts to modify input address after the Trezor checked
# that it matches the change output
self.setup_mnemonic_allallall()
inp1 = proto.TxInputType(
address_n=parse_path("44'/1'/4'/0/0"),
# moUJnmge8SRXuediK7bW6t4YfrPqbE6hD7
prev_hash=TXHASH_d2dcda,
prev_index=1,
script_type=proto.InputScriptType.SPENDADDRESS,
)
out1 = proto.TxOutputType(
address="mwue7mokpBRAsJtHqEMcRPanYBmsSmYKvY",
amount=100000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address_n=parse_path("44'/1'/4'/1/0"),
amount=123400000 - 5000 - 100000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
# Test if the transaction can be signed normally
_, serialized_tx = check_sign_tx(self.client, "Testnet", [inp1], [out1, out2])
assert (
serialized_tx.hex()
== "0100000001243e15b53cc553d93ec4e27e16984adc3d885ef107c613a7577fea47f5dadcd2010000006b483045022100eedaadde3a771967beee39f1daa9e9450f72fccdec63488a96d71eeae4224b4002203a22be3c1677d3451c93a49550b69e8f8fc06328823c7e0f633dde13d67ef96b01210364430c9122948e525e2f1c6d88f00f47679274f0810fd8c63754954f310995c1ffffffff02a0860100000000001976a914b3cc67f3349974d0f1b50e9bb5dfdf226f888fa088ac18555907000000001976a914f80fb232a1e54b1fa732bc120cae72eabd7fcf6888ac00000000"
)
run_attack = False
def attack_processor(msg):
nonlocal run_attack
if msg.tx.inputs and msg.tx.inputs[0] == inp1:
if not run_attack:
run_attack = True
else:
msg.tx.inputs[0].address_n[2] = H_(12)
return msg
self.client.set_filter(proto.TxAck, attack_processor)
# Now run the attack, must trigger the exception
with self.client:
self.client.set_expected_responses(
[
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXMETA,
details=proto.TxRequestDetailsType(tx_hash=TXHASH_d2dcda),
),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(
request_index=0, tx_hash=TXHASH_d2dcda
),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(
request_index=0, tx_hash=TXHASH_d2dcda
),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(
request_index=1, tx_hash=TXHASH_d2dcda
),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.ButtonRequest(code=proto.ButtonRequestType.ConfirmOutput),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.ButtonRequest(code=proto.ButtonRequestType.SignTx),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.Failure(code=proto.FailureType.ProcessError),
]
)
# Now run the attack, must trigger the exception
with pytest.raises(CallException) as exc:
btc.sign_tx(
self.client,
"Testnet",
[inp1],
[out1, out2],
prev_txes=tx_cache("Testnet"),
)
assert exc.value.args[0] == proto.FailureType.ProcessError
if TREZOR_VERSION == 1:
assert exc.value.args[1].endswith("Failed to compile input")
else:
assert exc.value.args[1].endswith(
"Transaction has changed during signing"
)
def test_spend_coinbase(self):
# 25 TEST generated to m/1 (mfiGQVPcRcaEvQPYDErR34DcCovtxYvUUV)
# tx: d6da21677d7cca5f42fbc7631d062c9ae918a0254f7c6c22de8e8cb7fd5b8236
# input 0: 25.0027823 BTC
self.setup_mnemonic_nopin_nopassphrase()
inp1 = proto.TxInputType(
address_n=[1], # mfiGQVPcRcaEvQPYDErR34DcCovtxYvUUV
# amount=390000,
prev_hash=TXHASH_d6da21,
prev_index=0,
)
out1 = proto.TxOutputType(
address="mm6FM31rM5Vc3sw5D7kztiBg3jHUzyqF1g",
amount=2500278230 - 10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = check_sign_tx(
self.client, "Testnet", [inp1], [out1], unknown_path=True
)
# Accepted by network: tx
assert (
serialized_tx.hex()
== "010000000136825bfdb78c8ede226c7c4f25a018e99a2c061d63c7fb425fca7c7d6721dad6000000006a473044022047845c366eb24f40be315c7815a154513c444c7989eb80f7ce7ff6aeb703d26a022007c1f5efadf67c5889634fd7ac39a7ce78bffac291673e8772ecd8389c901d9f01210338d78612e990f2eea0c426b5e48a8db70b9d7ed66282b3b26511e0b1c75515a6ffffffff01c6100795000000001976a9143d2496e67f5f57a924353da42d4725b318e7a8ea88ac00000000"
)
def test_two_changes(self):
self.setup_mnemonic_allallall()
# see 87be0736f202f7c2bff0781b42bad3e0cdcb54761939da69ea793a3735552c56
# tx: e5040e1bc1ae7667ffb9e5248e90b2fb93cd9150234151ce90e14ab2f5933bcd
# input 0: 0.31 BTC
inp1 = proto.TxInputType(
address_n=parse_path("44'/1'/0'/0/0"),
# amount=31000000,
prev_hash=TXHASH_e5040e,
prev_index=0,
)
out1 = proto.TxOutputType(
address="msj42CCGruhRsFrGATiUuh25dtxYtnpbTx",
amount=30090000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out_change1 = proto.TxOutputType(
address_n=parse_path("44'/1'/0'/1/0"),
amount=900000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out_change2 = proto.TxOutputType(
address_n=parse_path("44'/1'/0'/1/1"),
amount=10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out_change2.force_confirm = True
check_sign_tx(self.client, "Testnet", [inp1], [out1, out_change1, out_change2])
def test_change_on_main_chain_allowed(self):
self.setup_mnemonic_allallall()
# see 87be0736f202f7c2bff0781b42bad3e0cdcb54761939da69ea793a3735552c56
# tx: e5040e1bc1ae7667ffb9e5248e90b2fb93cd9150234151ce90e14ab2f5933bcd
# input 0: 0.31 BTC
inp1 = proto.TxInputType(
address_n=parse_path("44'/1'/0'/0/0"),
# amount=31000000,
prev_hash=TXHASH_e5040e,
prev_index=0,
)
out1 = proto.TxOutputType(
address="msj42CCGruhRsFrGATiUuh25dtxYtnpbTx",
amount=30090000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
# change on main chain is allowed => treated as a change
out_change = proto.TxOutputType(
address_n=parse_path("44'/1'/0'/0/0"),
amount=900000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
check_sign_tx(self.client, "Testnet", [inp1], [out1, out_change])
|
# This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import pytest
from trezorlib import btc, messages as proto
from trezorlib.tools import H_, CallException, btc_hash, parse_path
from ..support.tx_cache import tx_cache
from .common import TrezorTest
from .conftest import TREZOR_VERSION
TXHASH_157041 = bytes.fromhex(
"1570416eb4302cf52979afd5e6909e37d8fdd874301f7cc87e547e509cb1caa6"
)
TXHASH_39a29e = bytes.fromhex(
"39a29e954977662ab3879c66fb251ef753e0912223a83d1dcb009111d28265e5"
)
TXHASH_4a7b7e = bytes.fromhex(
"4a7b7e0403ae5607e473949cfa03f09f2cd8b0f404bf99ce10b7303d86280bf7"
)
TXHASH_54aa56 = bytes.fromhex(
"54aa5680dea781f45ebb536e53dffc526d68c0eb5c00547e323b2c32382dfba3"
)
TXHASH_58497a = bytes.fromhex(
"58497a7757224d1ff1941488d23087071103e5bf855f4c1c44e5c8d9d82ca46e"
)
TXHASH_6f90f3 = bytes.fromhex(
"6f90f3c7cbec2258b0971056ef3fe34128dbde30daa9c0639a898f9977299d54"
)
TXHASH_c63e24 = bytes.fromhex(
"c63e24ed820c5851b60c54613fbc4bcb37df6cd49b4c96143e99580a472f79fb"
)
TXHASH_c6be22 = bytes.fromhex(
"c6be22d34946593bcad1d2b013e12f74159e69574ffea21581dad115572e031c"
)
TXHASH_d5f65e = bytes.fromhex(
"d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882"
)
TXHASH_d6da21 = bytes.fromhex(
"d6da21677d7cca5f42fbc7631d062c9ae918a0254f7c6c22de8e8cb7fd5b8236"
)
TXHASH_d2dcda = bytes.fromhex(
"d2dcdaf547ea7f57a713c607f15e883ddc4a98167ee2c43ed953c53cb5153e24"
)
TXHASH_e5040e = bytes.fromhex(
"e5040e1bc1ae7667ffb9e5248e90b2fb93cd9150234151ce90e14ab2f5933bcd"
)
TXHASH_50f6f1 = bytes.fromhex(
"50f6f1209ca92d7359564be803cb2c932cde7d370f7cee50fd1fad6790f6206d"
)
TXHASH_2bac7a = bytes.fromhex(
"2bac7ad1dec654579a71ea9555463f63ac7b7df9d8ba67b4682bba4e514d0f0c"
)
def check_sign_tx(
client,
coin_name,
inputs,
outputs,
fee_too_high=False,
failure=None,
unknown_path=False,
):
__tracebackhide__ = True
expected_responses = []
txes = tx_cache(coin_name)
t = proto.RequestType
b = proto.ButtonRequestType
def tx_request(request_type, **kwargs):
if kwargs:
details = proto.TxRequestDetailsType(**kwargs)
else:
details = None
return proto.TxRequest(request_type=request_type, details=details)
def btn(code):
return proto.ButtonRequest(code=code)
for i, inp in enumerate(inputs):
expected_responses.append(tx_request(t.TXINPUT, request_index=i))
if unknown_path and TREZOR_VERSION != 1:
expected_responses.append(btn(b.UnknownDerivationPath))
expected_responses.append(tx_request(t.TXMETA, tx_hash=inp.prev_hash))
if inp.script_type in (
proto.InputScriptType.SPENDP2SHWITNESS,
proto.InputScriptType.SPENDWITNESS,
):
continue
prev_tx = txes[inp.prev_hash]
for pi in range(len(prev_tx.inputs)):
r = tx_request(t.TXINPUT, request_index=pi, tx_hash=inp.prev_hash)
expected_responses.append(r)
for po in range(len(prev_tx.bin_outputs)):
r = tx_request(t.TXOUTPUT, request_index=po, tx_hash=inp.prev_hash)
expected_responses.append(r)
for i, outp in enumerate(outputs):
expected_responses.append(tx_request(t.TXOUTPUT, request_index=i))
if outp.address is not None or hasattr(outp, "force_confirm"):
expected_responses.append(btn(b.ConfirmOutput))
if fee_too_high:
expected_responses.append(btn(b.FeeOverThreshold))
if failure is not None:
expected_responses.append(proto.Failure(code=failure))
else:
expected_responses.append(btn(b.SignTx))
input_requests = [
tx_request(t.TXINPUT, request_index=i) for i in range(len(inputs))
]
output_requests = [
tx_request(t.TXOUTPUT, request_index=i) for i in range(len(outputs))
]
# No idea why the flow is like this. But it is.
for _ in range(len(inputs)):
expected_responses.extend(input_requests)
expected_responses.extend(output_requests)
# and once more for good measure
expected_responses.extend(output_requests)
expected_responses.append(tx_request(t.TXFINISHED))
with client:
client.set_expected_responses(expected_responses)
return btc.sign_tx(client, coin_name, inputs, outputs, prev_txes=txes)
class TestMsgSigntx(TrezorTest):
def test_one_one_fee(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882
# input 0: 0.0039 BTC
inp1 = proto.TxInputType(
address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=TXHASH_d5f65e,
prev_index=0,
)
out1 = proto.TxOutputType(
address="1MJ2tj2ThBE62zXbBYA5ZaN3fdve5CPAz1",
amount=390000 - 10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", [inp1], [out1], unknown_path=True
)
# Accepted by network: tx fd79435246dee76b2f159d2db08032d666c95adc544de64c8c49f474df4a7fee
assert (
serialized_tx.hex()
== "010000000182488650ef25a58fef6788bd71b8212038d7f2bbe4750bc7bcb44701e85ef6d5000000006b4830450221009a0b7be0d4ed3146ee262b42202841834698bb3ee39c24e7437df208b8b7077102202b79ab1e7736219387dffe8d615bbdba87e11477104b867ef47afed1a5ede7810121023230848585885f63803a0a8aecdd6538792d5c539215c91698e315bf0253b43dffffffff0160cc0500000000001976a914de9b2a8da088824e8fe51debea566617d851537888ac00000000"
)
def test_testnet_one_two_fee(self):
self.setup_mnemonic_allallall()
# see 87be0736f202f7c2bff0781b42bad3e0cdcb54761939da69ea793a3735552c56
# tx: e5040e1bc1ae7667ffb9e5248e90b2fb93cd9150234151ce90e14ab2f5933bcd
# input 0: 0.31 BTC
inp1 = proto.TxInputType(
address_n=parse_path("44'/1'/0'/0/0"),
# amount=31000000,
prev_hash=TXHASH_e5040e,
prev_index=0,
)
out1 = proto.TxOutputType(
address="msj42CCGruhRsFrGATiUuh25dtxYtnpbTx",
amount=30090000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address_n=parse_path("44'/1'/0'/1/0"),
amount=900000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = check_sign_tx(self.client, "Testnet", [inp1], [out1, out2])
assert (
serialized_tx.hex()
== "0100000001cd3b93f5b24ae190ce5141235091cd93fbb2908e24e5b9ff6776aec11b0e04e5000000006b483045022100eba3bbcbb82ab1ebac88a394e8fb53b0263dadbb3e8072f0a21ee62818c911060220686a9b7f306d028b54a228b5c47cc6c27b1d01a3b0770440bcc64d55d8bace2c0121030e669acac1f280d1ddf441cd2ba5e97417bf2689e4bbec86df4f831bf9f7ffd0ffffffff021023cb01000000001976a91485eb47fe98f349065d6f044e27a4ac541af79ee288aca0bb0d00000000001976a9143d3cca567e00a04819742b21a696a67da796498b88ac00000000"
)
def test_testnet_fee_too_high(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: 6f90f3c7cbec2258b0971056ef3fe34128dbde30daa9c0639a898f9977299d54
# input 1: 10.00000000 BTC
inp1 = proto.TxInputType(
address_n=[0], # mirio8q3gtv7fhdnmb3TpZ4EuafdzSs7zL
# amount=1000000000,
prev_hash=TXHASH_6f90f3,
prev_index=1,
)
out1 = proto.TxOutputType(
address="mfiGQVPcRcaEvQPYDErR34DcCovtxYvUUV",
amount=1000000000 - 500000000 - 100000000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address_n=[2],
amount=500000000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2.force_confirm = True
_, serialized_tx = check_sign_tx(
self.client,
"Testnet",
[inp1],
[out1, out2],
fee_too_high=True,
unknown_path=True,
)
assert (
serialized_tx.hex()
== "0100000001549d2977998f899a63c0a9da30dedb2841e33fef561097b05822eccbc7f3906f010000006a47304402205ea68e9d52d4be14420ccecf7f2e11489d49b86bedb79ee99b5e9b7188884150022056219cb3384a5df8048cca286a9533403dbda1571afd84b51379cdaee6a6dea80121023230848585885f63803a0a8aecdd6538792d5c539215c91698e315bf0253b43dffffffff020084d717000000001976a9140223b1a09138753c9cb0baf95a0a62c82711567a88ac0065cd1d000000001976a9142db345c36563122e2fd0f5485fb7ea9bbf7cb5a288ac00000000"
)
def test_one_two_fee(self):
self.setup_mnemonic_allallall()
# tx: c275c333fd1b36bef4af316226c66a8b3693fbfcc081a5e16a2ae5fcb09e92bf
inp1 = proto.TxInputType(
address_n=parse_path(
"m/44'/0'/0'/0/5"
), # 1GA9u9TfCG7SWmKCveBumdA1TZpfom6ZdJ
# amount=50000,
prev_hash=TXHASH_50f6f1,
prev_index=1,
)
out1 = proto.TxOutputType(
address_n=parse_path(
"m/44'/0'/0'/1/3"
), # 1EcL6AyfQTyWKGvXwNSfsWoYnD3whzVFdu
amount=30000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address="<KEY>",
amount=10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = check_sign_tx(self.client, "Bitcoin", [inp1], [out1, out2])
assert (
serialized_tx.hex()
== "01000000016d20f69067ad1ffd50ee7c0f377dde2c932ccb03e84b5659732da99c20f1f650010000006a47304402203429bd3ce7b38c5c1e8a15340edd79ced41a2939aae62e259d2e3d18e0c5ee7602201b83b10ebc4d6dcee3f9eb42ba8f1ef8a059a05397e0c1b9223d1565a3e6ec01012102a7a079c1ef9916b289c2ff21a992c808d0de3dfcf8a9f163205c5c9e21f55d5cffffffff0230750000000000001976a914954820f1de627a703596ac0396f986d958e3de4c88ac10270000000000001976a91405427736705cfbfaff76b1cff48283707fb1037088ac00000000"
)
def test_one_three_fee(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882
# input 0: 0.0039 BTC
inp1 = proto.TxInputType(
address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=TXHASH_d5f65e,
prev_index=0,
)
out1 = proto.TxOutputType(
address="1MJ2tj2ThBE62zXbBYA5ZaN3fdve5CPAz1",
amount=390000 - 80000 - 12000 - 10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address="13uaUYn6XAooo88QvAqAVsiVvr2mAXutqP",
amount=12000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out3 = proto.TxOutputType(
address_n=[1], amount=80000, script_type=proto.OutputScriptType.PAYTOADDRESS
)
out3.force_confirm = True
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", [inp1], [out1, out2, out3], unknown_path=True
)
assert (
serialized_tx.hex()
== "010000000182488650ef25a58fef6788bd71b8212038d7f2bbe4750bc7bcb44701e85ef6d5000000006b483045022100e695e2c530c7c0fc32e6b79b7cff56a7f70a8c9da787534f46b4204070f914fc02207b0879a81408a11e23b11d4c7965c62b5fc6d5c2d92340f5ee2da7b40e99314a0121023230848585885f63803a0a8aecdd6538792d5c539215c91698e315bf0253b43dffffffff0300650400000000001976a914de9b2a8da088824e8fe51debea566617d851537888ace02e0000000000001976a9141fe1d337fb81afca42818051e12fd18245d1b17288ac80380100000000001976a9140223b1a09138753c9cb0baf95a0a62c82711567a88ac00000000"
)
def test_two_two(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: c6be22d34946593bcad1d2b013e12f74159e69574ffea21581dad115572e031c
# input 1: 0.0010 BTC
# tx: 58497a7757224d1ff1941488d23087071103e5bf855f4c1c44e5c8d9d82ca46e
# input 1: 0.0011 BTC
inp1 = proto.TxInputType(
address_n=[1], # 1CK7SJdcb8z9HuvVft3D91HLpLC6KSsGb
# amount=100000,
prev_hash=TXHASH_c6be22,
prev_index=1,
)
inp2 = proto.TxInputType(
address_n=[2], # 15AeAhtNJNKyowK8qPHwgpXkhsokzLtUpG
# amount=110000,
prev_hash=TXHASH_58497a,
prev_index=1,
)
out1 = proto.TxOutputType(
address="15Jvu3nZNP7u2ipw2533Q9VVgEu2Lu9F2B",
amount=210000 - 100000 - 10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address_n=[3], # 1CmzyJp9w3NafXMSEFH4SLYUPAVCSUrrJ5
amount=100000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2.force_confirm = True
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", [inp1, inp2], [out1, out2], unknown_path=True
)
# Accepted by network: tx c63e24ed820c5851b60c54613fbc4bcb37df6cd49b4c96143e99580a472f79fb
# The transaction was produced before Trezor implemented BIP-66, so the signature
# is now different and txhash doesn't match what is on the blockchain.
assert (
serialized_tx.hex()
== "01000000021c032e5715d1da8115a2fe4f57699e15742fe113b0d2d1ca3b594649d322bec6010000006b483045022100f773c403b2f85a5c1d6c9c4ad69c43de66930fff4b1bc818eb257af98305546a0220443bde4be439f276a6ce793664b463580e210ec6c9255d68354449ac0443c76501210338d78612e990f2eea0c426b5e48a8db70b9d7ed66282b3b26511e0b1c75515a6ffffffff6ea42cd8d9c8e5441c4c5f85bfe50311078730d2881494f11f4d2257777a4958010000006b48304502210090cff1c1911e771605358a8cddd5ae94c7b60cc96e50275908d9bf9d6367c79f02202bfa72e10260a146abd59d0526e1335bacfbb2b4401780e9e3a7441b0480c8da0121038caebd6f753bbbd2bb1f3346a43cd32140648583673a31d62f2dfb56ad0ab9e3ffffffff02a0860100000000001976a9142f4490d5263906e4887ca2996b9e207af3e7824088aca0860100000000001976a914812c13d97f9159e54e326b481b8f88a73df8507a88ac00000000"
)
def test_lots_of_inputs(self):
self.setup_mnemonic_nopin_nopassphrase()
# Tests if device implements serialization of len(inputs) correctly
# tx 4a7b7e0403ae5607e473949cfa03f09f2cd8b0f404bf99ce10b7303d86280bf7 : 100 UTXO for spending for unit tests
inputs = []
for i in range(100):
inputs.append(
proto.TxInputType(
address_n=[4], # 1NwN6UduuVkJi6sw3gSiKZaCY5rHgVXC2h
prev_hash=TXHASH_4a7b7e,
prev_index=i,
)
)
out = proto.TxOutputType(
address="19dvDdyxxptP9dGvozYe8BP6tgFV9L4jg5",
amount=100 * 26000 - 15 * 10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", inputs, [out], unknown_path=True
)
# Accepted by network: tx 23d9d8eecf3abf6c0f0f3f8b0976a04792d7f1c9a4ea9b0a8931734949e27c92
# too big put in unit test, only check hash
assert (
btc_hash(serialized_tx)[::-1].hex()
== "23d9d8eecf3abf6c0f0f3f8b0976a04792d7f1c9a4ea9b0a8931734949e27c92"
)
def test_lots_of_outputs(self):
self.setup_mnemonic_nopin_nopassphrase()
# Tests if device implements serialization of len(outputs) correctly
# tx: c63e24ed820c5851b60c54613fbc4bcb37df6cd49b4c96143e99580a472f79fb
# index 1: 0.0010 BTC
# tx: 39a29e954977662ab3879c66fb251ef753e0912223a83d1dcb009111d28265e5
# index 1: 0.0254 BTC
inp1 = proto.TxInputType(
address_n=[3], # 1CmzyJp9w3NafXMSEFH4SLYUPAVCSUrrJ5
# amount=100000,
prev_hash=TXHASH_c63e24,
prev_index=1,
)
inp2 = proto.TxInputType(
address_n=[3], # 1CmzyJp9w3NafXMSEFH4SLYUPAVCSUrrJ5
# amount=2540000,
prev_hash=TXHASH_39a29e,
prev_index=1,
)
outputs = []
cnt = 255
for _ in range(cnt):
out = proto.TxOutputType(
address="1NwN6UduuVkJi6sw3gSiKZaCY5rHgVXC2h",
amount=(100000 + 2540000 - 39000) // cnt,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
outputs.append(out)
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", [inp1, inp2], outputs, unknown_path=True
)
assert (
serialized_tx.hex()
== "0100000002fb792f470a58993e14964c9bd46cdf37cb4bbc3f61540cb651580c82ed243ec6010000006b483045022100969da46f94a81f34f3717b014e0c3e1826eda1b0022ec2f9ce39f3d750ab9235022026da269770993211a1503413566a339bbb4389a482fffcf8e1f76713fc3b94f5012103477b9f0f34ae85434ce795f0c5e1e90c9420e5b5fad084d7cce9a487b94a7902ffffffffe56582d2119100cb1d3da8232291e053f71e25fb669c87b32a667749959ea239010000006a473044022052e1419bb237b9db400ab5e3df16db6355619d545fde9030924a360763ae9ad40220704beab04d72ecaeb42eca7d98faca7a0941e65f2e1341f183be2b83e6b09e1c012103477b9f0f34ae85434ce795f0c5e1e90c9420e5b5fad084d7cce9a487b94a7902fffffffffdff00"
+ "d8270000000000001976a914f0a2b64e56ee2ff57126232f84af6e3a41d4055088ac"
* cnt
+ "00000000"
)
def test_fee_too_high(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: 1570416eb4302cf52979afd5e6909e37d8fdd874301f7cc87e547e509cb1caa6
# input 0: 1.0 BTC
inp1 = proto.TxInputType(
address_n=[0], # 1HWDaLTpTCTtRWyWqZkzWx1wex5NKyncLW
# amount=100000000,
prev_hash=TXHASH_157041,
prev_index=0,
)
out1 = proto.TxOutputType(
address="1MJ2tj2ThBE62zXbBYA5ZaN3fdve5CPAz1",
amount=100000000 - 510000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", [inp1], [out1], fee_too_high=True, unknown_path=True
)
assert (
serialized_tx.hex()
== "0100000001a6cab19c507e547ec87c1f3074d8fdd8379e90e6d5af7929f52c30b46e417015000000006b483045022100dc3531da7feb261575f03b5b9bbb35edc7f73bb081c92538827105de4102737002200161e34395f6a8ee93979200cb974fa75ccef6d7c14021511cf468eece90d6450121023230848585885f63803a0a8aecdd6538792d5c539215c91698e315bf0253b43dffffffff01d018ee05000000001976a914de9b2a8da088824e8fe51debea566617d851537888ac00000000"
)
def test_not_enough_funds(self):
self.setup_mnemonic_nopin_nopassphrase()
# tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882
# input 0: 0.0039 BTC
inp1 = proto.TxInputType(
address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=390000,
prev_hash=TXHASH_d5f65e,
prev_index=0,
)
out1 = proto.TxOutputType(
address="1MJ2tj2ThBE62zXbBYA5ZaN3fdve5CPAz1",
amount=400000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
with pytest.raises(CallException) as exc:
check_sign_tx(
self.client,
"Bitcoin",
[inp1],
[out1],
failure=proto.FailureType.NotEnoughFunds,
unknown_path=True,
)
assert exc.value.args[0] == proto.FailureType.NotEnoughFunds
def test_p2sh(self):
self.setup_mnemonic_nopin_nopassphrase()
inp1 = proto.TxInputType(
address_n=[0], # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e
# amount=400000,
prev_hash=TXHASH_54aa56,
prev_index=1,
)
out1 = proto.TxOutputType(
address="3DKGE1pvPpBAgZj94MbCinwmksewUNNYVR", # p2sh
amount=400000 - 10000,
script_type=proto.OutputScriptType.PAYTOSCRIPTHASH,
)
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", [inp1], [out1], unknown_path=True
)
# Accepted by network: tx 8cc1f4adf7224ce855cf535a5104594a0004cb3b640d6714fdb00b9128832dd5
assert (
serialized_tx.hex()
== "0100000001a3fb2d38322c3b327e54005cebc0686d52fcdf536e53bb5ef481a7de8056aa54010000006b4830450221009e020b0390ccad533b73b552f8a99a9d827212c558e4f755503674d07c92ad4502202d606f7316990e0461c51d4add25054f19c697aa3e3c2ced4d568f0b2c57e62f0121023230848585885f63803a0a8aecdd6538792d5c539215c91698e315bf0253b43dffffffff0170f305000000000017a9147f844bdb0b8fd54b64e3d16c85dc1170f1ff97c18700000000"
)
def test_testnet_big_amount(self):
self.setup_mnemonic_allallall()
# This test is testing transaction with amount bigger than fits to uint32
# tx: 2bac7ad1dec654579a71ea9555463f63ac7b7df9d8ba67b4682bba4e514d0f0c:1
# input 1: 411102528330 Satoshi
inp1 = proto.TxInputType(
address_n=parse_path("m/44'/1'/0'/0/0"),
amount=411102528330,
prev_hash=TXHASH_2bac7a,
prev_index=1,
)
out1 = proto.TxOutputType(
address="mopZWqZZyQc3F2Sy33cvDtJchSAMsnLi7b", # seed allallall, bip32: m/44'/1'/0'/0/1
amount=411102528330,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = check_sign_tx(self.client, "Testnet", [inp1], [out1])
assert (
serialized_tx.hex()
== "01000000010c0f4d514eba2b68b467bad8f97d7bac633f465595ea719a5754c6ded17aac2b010000006b4830450221008e3b926f04d8830bd5b67698af25c9e00c9db1b1ef3e5d69af794446753da94a02202d4a7509f26bba29ff643a7ac0d43fb128c1a632cc502b8f44eada8930fb9c9b0121030e669acac1f280d1ddf441cd2ba5e97417bf2689e4bbec86df4f831bf9f7ffd0ffffffff014ac39eb75f0000001976a9145b157a678a10021243307e4bb58f36375aa80e1088ac00000000"
)
def test_attack_change_outputs(self):
# This unit test attempts to modify data sent during ping-pong of streaming signing.
# Because device is asking for human confirmation only during first pass (first input),
# device must detect that data has been modified during other passes and fail to sign
# such modified data (which has not been confirmed by the user).
# Test firstly prepare normal transaction and send it to device. Then it send the same
# transaction again, but change amount of output 1 during signing the second input.
self.setup_mnemonic_nopin_nopassphrase()
inp1 = proto.TxInputType(
address_n=[1], # 1CK7SJdcb8z9HuvVft3D91HLpLC6KSsGb
# amount=100000,
prev_hash=TXHASH_c6be22,
prev_index=1,
)
inp2 = proto.TxInputType(
address_n=[2], # 15AeAhtNJNKyowK8qPHwgpXkhsokzLtUpG
# amount=110000,
prev_hash=TXHASH_58497a,
prev_index=1,
)
out1 = proto.TxOutputType(
address="15Jvu3nZNP7u2ipw2533Q9VVgEu2Lu9F2B",
amount=210000 - 100000 - 10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address_n=[3], # 1CmzyJp9w3NafXMSEFH4SLYUPAVCSUrrJ5
amount=100000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2.force_confirm = True
# Test if the transaction can be signed normally
_, serialized_tx = check_sign_tx(
self.client, "Bitcoin", [inp1, inp2], [out1, out2], unknown_path=True
)
# Accepted by network: tx c63e24ed820c5851b60c54613fbc4bcb37df6cd49b4c96143e99580a472f79fb
assert (
serialized_tx.hex()
== "01000000021c032e5715d1da8115a2fe4f57699e15742fe113b0d2d1ca3b594649d322bec6010000006b483045022100f773c403b2f85a5c1d6c9c4ad69c43de66930fff4b1bc818eb257af98305546a0220443bde4be439f276a6ce793664b463580e210ec6c9255d68354449ac0443c76501210338d78612e990f2eea0c426b5e48a8db70b9d7ed66282b3b26511e0b1c75515a6ffffffff6ea42cd8d9c8e5441c4c5f85bfe50311078730d2881494f11f4d2257777a4958010000006b48304502210090cff1c1911e771605358a8cddd5ae94c7b60cc96e50275908d9bf9d6367c79f02202bfa72e10260a146abd59d0526e1335bacfbb2b4401780e9e3a7441b0480c8da0121038caebd6f753bbbd2bb1f3346a43cd32140648583673a31d62f2dfb56ad0ab9e3ffffffff02a0860100000000001976a9142f4490d5263906e4887ca2996b9e207af3e7824088aca0860100000000001976a914812c13d97f9159e54e326b481b8f88a73df8507a88ac00000000"
)
run_attack = False
def attack_processor(msg):
nonlocal run_attack
if msg.tx.outputs and msg.tx.outputs[0] == out2:
if not run_attack:
run_attack = True
else:
# Sign output with another amount
msg.tx.outputs[0].amount = 9999999
return msg
# Set up attack processors
self.client.set_filter(proto.TxAck, attack_processor)
with pytest.raises(CallException) as exc:
btc.sign_tx(
self.client,
"Bitcoin",
[inp1, inp2],
[out1, out2],
prev_txes=tx_cache("Bitcoin"),
)
assert exc.value.args[0] in (
proto.FailureType.ProcessError,
proto.FailureType.DataError,
)
assert exc.value.args[1].endswith("Transaction has changed during signing")
def test_attack_change_input_address(self):
# This unit test attempts to modify input address after the Trezor checked
# that it matches the change output
self.setup_mnemonic_allallall()
inp1 = proto.TxInputType(
address_n=parse_path("44'/1'/4'/0/0"),
# moUJnmge8SRXuediK7bW6t4YfrPqbE6hD7
prev_hash=TXHASH_d2dcda,
prev_index=1,
script_type=proto.InputScriptType.SPENDADDRESS,
)
out1 = proto.TxOutputType(
address="mwue7mokpBRAsJtHqEMcRPanYBmsSmYKvY",
amount=100000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out2 = proto.TxOutputType(
address_n=parse_path("44'/1'/4'/1/0"),
amount=123400000 - 5000 - 100000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
# Test if the transaction can be signed normally
_, serialized_tx = check_sign_tx(self.client, "Testnet", [inp1], [out1, out2])
assert (
serialized_tx.hex()
== "0100000001243e15b53cc553d93ec4e27e16984adc3d885ef107c613a7577fea47f5dadcd2010000006b483045022100eedaadde3a771967beee39f1daa9e9450f72fccdec63488a96d71eeae4224b4002203a22be3c1677d3451c93a49550b69e8f8fc06328823c7e0f633dde13d67ef96b01210364430c9122948e525e2f1c6d88f00f47679274f0810fd8c63754954f310995c1ffffffff02a0860100000000001976a914b3cc67f3349974d0f1b50e9bb5dfdf226f888fa088ac18555907000000001976a914f80fb232a1e54b1fa732bc120cae72eabd7fcf6888ac00000000"
)
run_attack = False
def attack_processor(msg):
nonlocal run_attack
if msg.tx.inputs and msg.tx.inputs[0] == inp1:
if not run_attack:
run_attack = True
else:
msg.tx.inputs[0].address_n[2] = H_(12)
return msg
self.client.set_filter(proto.TxAck, attack_processor)
# Now run the attack, must trigger the exception
with self.client:
self.client.set_expected_responses(
[
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.TxRequest(
request_type=proto.RequestType.TXMETA,
details=proto.TxRequestDetailsType(tx_hash=TXHASH_d2dcda),
),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(
request_index=0, tx_hash=TXHASH_d2dcda
),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(
request_index=0, tx_hash=TXHASH_d2dcda
),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(
request_index=1, tx_hash=TXHASH_d2dcda
),
),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.ButtonRequest(code=proto.ButtonRequestType.ConfirmOutput),
proto.TxRequest(
request_type=proto.RequestType.TXOUTPUT,
details=proto.TxRequestDetailsType(request_index=1),
),
proto.ButtonRequest(code=proto.ButtonRequestType.SignTx),
proto.TxRequest(
request_type=proto.RequestType.TXINPUT,
details=proto.TxRequestDetailsType(request_index=0),
),
proto.Failure(code=proto.FailureType.ProcessError),
]
)
# Now run the attack, must trigger the exception
with pytest.raises(CallException) as exc:
btc.sign_tx(
self.client,
"Testnet",
[inp1],
[out1, out2],
prev_txes=tx_cache("Testnet"),
)
assert exc.value.args[0] == proto.FailureType.ProcessError
if TREZOR_VERSION == 1:
assert exc.value.args[1].endswith("Failed to compile input")
else:
assert exc.value.args[1].endswith(
"Transaction has changed during signing"
)
def test_spend_coinbase(self):
# 25 TEST generated to m/1 (mfiGQVPcRcaEvQPYDErR34DcCovtxYvUUV)
# tx: d6da21677d7cca5f42fbc7631d062c9ae918a0254f7c6c22de8e8cb7fd5b8236
# input 0: 25.0027823 BTC
self.setup_mnemonic_nopin_nopassphrase()
inp1 = proto.TxInputType(
address_n=[1], # mfiGQVPcRcaEvQPYDErR34DcCovtxYvUUV
# amount=390000,
prev_hash=TXHASH_d6da21,
prev_index=0,
)
out1 = proto.TxOutputType(
address="mm6FM31rM5Vc3sw5D7kztiBg3jHUzyqF1g",
amount=2500278230 - 10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = check_sign_tx(
self.client, "Testnet", [inp1], [out1], unknown_path=True
)
# Accepted by network: tx
assert (
serialized_tx.hex()
== "010000000136825bfdb78c8ede226c7c4f25a018e99a2c061d63c7fb425fca7c7d6721dad6000000006a473044022047845c366eb24f40be315c7815a154513c444c7989eb80f7ce7ff6aeb703d26a022007c1f5efadf67c5889634fd7ac39a7ce78bffac291673e8772ecd8389c901d9f01210338d78612e990f2eea0c426b5e48a8db70b9d7ed66282b3b26511e0b1c75515a6ffffffff01c6100795000000001976a9143d2496e67f5f57a924353da42d4725b318e7a8ea88ac00000000"
)
def test_two_changes(self):
self.setup_mnemonic_allallall()
# see 87be0736f202f7c2bff0781b42bad3e0cdcb54761939da69ea793a3735552c56
# tx: e5040e1bc1ae7667ffb9e5248e90b2fb93cd9150234151ce90e14ab2f5933bcd
# input 0: 0.31 BTC
inp1 = proto.TxInputType(
address_n=parse_path("44'/1'/0'/0/0"),
# amount=31000000,
prev_hash=TXHASH_e5040e,
prev_index=0,
)
out1 = proto.TxOutputType(
address="msj42CCGruhRsFrGATiUuh25dtxYtnpbTx",
amount=30090000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out_change1 = proto.TxOutputType(
address_n=parse_path("44'/1'/0'/1/0"),
amount=900000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out_change2 = proto.TxOutputType(
address_n=parse_path("44'/1'/0'/1/1"),
amount=10000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
out_change2.force_confirm = True
check_sign_tx(self.client, "Testnet", [inp1], [out1, out_change1, out_change2])
def test_change_on_main_chain_allowed(self):
self.setup_mnemonic_allallall()
# see 87be0736f202f7c2bff0781b42bad3e0cdcb54761939da69ea793a3735552c56
# tx: e5040e1bc1ae7667ffb9e5248e90b2fb93cd9150234151ce90e14ab2f5933bcd
# input 0: 0.31 BTC
inp1 = proto.TxInputType(
address_n=parse_path("44'/1'/0'/0/0"),
# amount=31000000,
prev_hash=TXHASH_e5040e,
prev_index=0,
)
out1 = proto.TxOutputType(
address="msj42CCGruhRsFrGATiUuh25dtxYtnpbTx",
amount=30090000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
# change on main chain is allowed => treated as a change
out_change = proto.TxOutputType(
address_n=parse_path("44'/1'/0'/0/0"),
amount=900000,
script_type=proto.OutputScriptType.PAYTOADDRESS,
)
check_sign_tx(self.client, "Testnet", [inp1], [out1, out_change])
|
en
| 0.616248
|
# This file is part of the Trezor project. # # Copyright (C) 2012-2018 SatoshiLabs and contributors # # This library is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License version 3 # as published by the Free Software Foundation. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the License along with this library. # If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>. # No idea why the flow is like this. But it is. # and once more for good measure # tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882 # input 0: 0.0039 BTC # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e # amount=390000, # Accepted by network: tx fd79435246dee76b2f159d2db08032d666c95adc544de64c8c49f474df4a7fee # see 87be0736f202f7c2bff0781b42bad3e0cdcb54761939da69ea793a3735552c56 # tx: e5040e1bc1ae7667ffb9e5248e90b2fb93cd9150234151ce90e14ab2f5933bcd # input 0: 0.31 BTC # amount=31000000, # tx: 6f90f3c7cbec2258b0971056ef3fe34128dbde30daa9c0639a898f9977299d54 # input 1: 10.00000000 BTC # mirio8q3gtv7fhdnmb3TpZ4EuafdzSs7zL # amount=1000000000, # tx: c275c333fd1b36bef4af316226c66a8b3693fbfcc081a5e16a2ae5fcb09e92bf # 1GA9u9TfCG7SWmKCveBumdA1TZpfom6ZdJ # amount=50000, # 1EcL6AyfQTyWKGvXwNSfsWoYnD3whzVFdu # tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882 # input 0: 0.0039 BTC # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e # amount=390000, # tx: c6be22d34946593bcad1d2b013e12f74159e69574ffea21581dad115572e031c # input 1: 0.0010 BTC # tx: 58497a7757224d1ff1941488d23087071103e5bf855f4c1c44e5c8d9d82ca46e # input 1: 0.0011 BTC # 1CK7SJdcb8z9HuvVft3D91HLpLC6KSsGb # amount=100000, # 15AeAhtNJNKyowK8qPHwgpXkhsokzLtUpG # amount=110000, # 1CmzyJp9w3NafXMSEFH4SLYUPAVCSUrrJ5 # Accepted by network: tx c63e24ed820c5851b60c54613fbc4bcb37df6cd49b4c96143e99580a472f79fb # The transaction was produced before Trezor implemented BIP-66, so the signature # is now different and txhash doesn't match what is on the blockchain. # Tests if device implements serialization of len(inputs) correctly # tx 4a7b7e0403ae5607e473949cfa03f09f2cd8b0f404bf99ce10b7303d86280bf7 : 100 UTXO for spending for unit tests # 1NwN6UduuVkJi6sw3gSiKZaCY5rHgVXC2h # Accepted by network: tx 23d9d8eecf3abf6c0f0f3f8b0976a04792d7f1c9a4ea9b0a8931734949e27c92 # too big put in unit test, only check hash # Tests if device implements serialization of len(outputs) correctly # tx: c63e24ed820c5851b60c54613fbc4bcb37df6cd49b4c96143e99580a472f79fb # index 1: 0.0010 BTC # tx: 39a29e954977662ab3879c66fb251ef753e0912223a83d1dcb009111d28265e5 # index 1: 0.0254 BTC # 1CmzyJp9w3NafXMSEFH4SLYUPAVCSUrrJ5 # amount=100000, # 1CmzyJp9w3NafXMSEFH4SLYUPAVCSUrrJ5 # amount=2540000, # tx: 1570416eb4302cf52979afd5e6909e37d8fdd874301f7cc87e547e509cb1caa6 # input 0: 1.0 BTC # 1HWDaLTpTCTtRWyWqZkzWx1wex5NKyncLW # amount=100000000, # tx: d5f65ee80147b4bcc70b75e4bbf2d7382021b871bd8867ef8fa525ef50864882 # input 0: 0.0039 BTC # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e # amount=390000, # 14LmW5k4ssUrtbAB4255zdqv3b4w1TuX9e # amount=400000, # p2sh # Accepted by network: tx 8cc1f4adf7224ce855cf535a5104594a0004cb3b640d6714fdb00b9128832dd5 # This test is testing transaction with amount bigger than fits to uint32 # tx: 2bac7ad1dec654579a71ea9555463f63ac7b7df9d8ba67b4682bba4e514d0f0c:1 # input 1: 411102528330 Satoshi # seed allallall, bip32: m/44'/1'/0'/0/1 # This unit test attempts to modify data sent during ping-pong of streaming signing. # Because device is asking for human confirmation only during first pass (first input), # device must detect that data has been modified during other passes and fail to sign # such modified data (which has not been confirmed by the user). # Test firstly prepare normal transaction and send it to device. Then it send the same # transaction again, but change amount of output 1 during signing the second input. # 1CK7SJdcb8z9HuvVft3D91HLpLC6KSsGb # amount=100000, # 15AeAhtNJNKyowK8qPHwgpXkhsokzLtUpG # amount=110000, # 1CmzyJp9w3NafXMSEFH4SLYUPAVCSUrrJ5 # Test if the transaction can be signed normally # Accepted by network: tx c63e24ed820c5851b60c54613fbc4bcb37df6cd49b4c96143e99580a472f79fb # Sign output with another amount # Set up attack processors # This unit test attempts to modify input address after the Trezor checked # that it matches the change output # moUJnmge8SRXuediK7bW6t4YfrPqbE6hD7 # Test if the transaction can be signed normally # Now run the attack, must trigger the exception # Now run the attack, must trigger the exception # 25 TEST generated to m/1 (mfiGQVPcRcaEvQPYDErR34DcCovtxYvUUV) # tx: d6da21677d7cca5f42fbc7631d062c9ae918a0254f7c6c22de8e8cb7fd5b8236 # input 0: 25.0027823 BTC # mfiGQVPcRcaEvQPYDErR34DcCovtxYvUUV # amount=390000, # Accepted by network: tx # see 87be0736f202f7c2bff0781b42bad3e0cdcb54761939da69ea793a3735552c56 # tx: e5040e1bc1ae7667ffb9e5248e90b2fb93cd9150234151ce90e14ab2f5933bcd # input 0: 0.31 BTC # amount=31000000, # see 87be0736f202f7c2bff0781b42bad3e0cdcb54761939da69ea793a3735552c56 # tx: e5040e1bc1ae7667ffb9e5248e90b2fb93cd9150234151ce90e14ab2f5933bcd # input 0: 0.31 BTC # amount=31000000, # change on main chain is allowed => treated as a change
| 1.633751
| 2
|
src/profiles/migrations/0006_alter_relationship_managers.py
|
OmarYehia/django-social_network
| 0
|
6628568
|
# Generated by Django 3.2 on 2021-05-03 01:57
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('profiles', '0005_profile_slug'),
]
operations = [
migrations.AlterModelManagers(
name='relationship',
managers=[
('object', django.db.models.manager.Manager()),
],
),
]
|
# Generated by Django 3.2 on 2021-05-03 01:57
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('profiles', '0005_profile_slug'),
]
operations = [
migrations.AlterModelManagers(
name='relationship',
managers=[
('object', django.db.models.manager.Manager()),
],
),
]
|
en
| 0.89182
|
# Generated by Django 3.2 on 2021-05-03 01:57
| 1.648315
| 2
|
jupyterlab_code_formatter/handlers.py
|
edrogersamfam/jupyterlab_code_formatter
| 0
|
6628569
|
<gh_stars>0
import json
from notebook.notebookapp import NotebookWebApplication
from notebook.utils import url_path_join
from notebook.base.handlers import APIHandler
from jupyterlab_code_formatter.formatters import SERVER_FORMATTERS
def setup_handlers(web_app: NotebookWebApplication) -> None:
host_pattern = ".*$"
web_app.add_handlers(
host_pattern,
[
(
url_path_join(
web_app.settings["base_url"],
"/jupyterlab_code_formatter/formatters",
),
FormattersAPIHandler,
)
],
)
web_app.add_handlers(
host_pattern,
[
(
url_path_join(
web_app.settings["base_url"], "/jupyterlab_code_formatter/format"
),
FormatAPIHandler,
)
],
)
class FormattersAPIHandler(APIHandler):
def get(self) -> None:
"""Show what formatters are installed and avaliable."""
self.finish(
json.dumps(
{
"formatters": {
name: {
"enabled": formatter.importable,
"label": formatter.label,
}
for name, formatter in SERVER_FORMATTERS.items()
}
}
)
)
class FormatAPIHandler(APIHandler):
def post(self) -> None:
data = json.loads(self.request.body.decode("utf-8"))
formatter_instance = SERVER_FORMATTERS.get(data["formatter"])
if formatter_instance is None or not formatter_instance.importable:
self.set_status(404, "Formatter not found!")
self.finish()
else:
formatted_code = formatter_instance.format_code(
data["code"], **(data["options"] or {})
)
self.finish(json.dumps(formatted_code))
|
import json
from notebook.notebookapp import NotebookWebApplication
from notebook.utils import url_path_join
from notebook.base.handlers import APIHandler
from jupyterlab_code_formatter.formatters import SERVER_FORMATTERS
def setup_handlers(web_app: NotebookWebApplication) -> None:
host_pattern = ".*$"
web_app.add_handlers(
host_pattern,
[
(
url_path_join(
web_app.settings["base_url"],
"/jupyterlab_code_formatter/formatters",
),
FormattersAPIHandler,
)
],
)
web_app.add_handlers(
host_pattern,
[
(
url_path_join(
web_app.settings["base_url"], "/jupyterlab_code_formatter/format"
),
FormatAPIHandler,
)
],
)
class FormattersAPIHandler(APIHandler):
def get(self) -> None:
"""Show what formatters are installed and avaliable."""
self.finish(
json.dumps(
{
"formatters": {
name: {
"enabled": formatter.importable,
"label": formatter.label,
}
for name, formatter in SERVER_FORMATTERS.items()
}
}
)
)
class FormatAPIHandler(APIHandler):
def post(self) -> None:
data = json.loads(self.request.body.decode("utf-8"))
formatter_instance = SERVER_FORMATTERS.get(data["formatter"])
if formatter_instance is None or not formatter_instance.importable:
self.set_status(404, "Formatter not found!")
self.finish()
else:
formatted_code = formatter_instance.format_code(
data["code"], **(data["options"] or {})
)
self.finish(json.dumps(formatted_code))
|
en
| 0.945489
|
Show what formatters are installed and avaliable.
| 2.413022
| 2
|
aula20/aula20.py
|
jessicsous/Curso_Python
| 1
|
6628570
|
<reponame>jessicsous/Curso_Python
# índices
# 0123456789.......................33
frase = 'o rato roeu a roupa do rei de roma'
tamanho_frase = len(frase)
contador = 0
# iteração
while contador < tamanho_frase:
print(frase[contador], contador)
contador += 1
|
# índices
# 0123456789.......................33
frase = 'o rato roeu a roupa do rei de roma'
tamanho_frase = len(frase)
contador = 0
# iteração
while contador < tamanho_frase:
print(frase[contador], contador)
contador += 1
|
pt
| 0.799449
|
# índices # 0123456789.......................33 # iteração
| 3.559556
| 4
|
var/spack/repos/builtin/packages/r-testthat/package.py
|
xiki-tempula/spack
| 1
|
6628571
|
<gh_stars>1-10
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RTestthat(RPackage):
"""A unit testing system designed to be fun, flexible and easy to set
up."""
homepage = "https://github.com/hadley/testthat"
url = "https://cloud.r-project.org/src/contrib/testthat_1.0.2.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/testthat"
version('2.2.1', sha256='67ee0512bb312695c81fd74338bb8ce9e2e58763681ddbcdfdf35f52dfdb0b78')
version('2.1.0', sha256='cf5fa7108111b32b86e70819352f86b57ab4e835221bb1e83642d52a1fdbcdd4')
version('1.0.2', sha256='0ef7df0ace1fddf821d329f9d9a5d42296085350ae0d94af62c45bd203c8415e')
depends_on('r@3.1:', type=('build', 'run'))
depends_on('r-digest', type=('build', 'run'))
depends_on('r-crayon@1.3.4:', type=('build', 'run'))
depends_on('r-praise', type=('build', 'run'))
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-r6@2.2.0:', type=('build', 'run'))
depends_on('r-cli', when='@2.0.0:', type=('build', 'run'))
depends_on('r-rlang@0.3.0:', when='@2.0.0:', type=('build', 'run'))
depends_on('r-withr@2.0.0:', when='@2.0.0:', type=('build', 'run'))
depends_on('r-evaluate', when='@2.2.0:', type=('build', 'run'))
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RTestthat(RPackage):
"""A unit testing system designed to be fun, flexible and easy to set
up."""
homepage = "https://github.com/hadley/testthat"
url = "https://cloud.r-project.org/src/contrib/testthat_1.0.2.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/testthat"
version('2.2.1', sha256='67ee0512bb312695c81fd74338bb8ce9e2e58763681ddbcdfdf35f52dfdb0b78')
version('2.1.0', sha256='cf5fa7108111b32b86e70819352f86b57ab4e835221bb1e83642d52a1fdbcdd4')
version('1.0.2', sha256='0ef7df0ace1fddf821d329f9d9a5d42296085350ae0d94af62c45bd203c8415e')
depends_on('r@3.1:', type=('build', 'run'))
depends_on('r-digest', type=('build', 'run'))
depends_on('r-crayon@1.3.4:', type=('build', 'run'))
depends_on('r-praise', type=('build', 'run'))
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-r6@2.2.0:', type=('build', 'run'))
depends_on('r-cli', when='@2.0.0:', type=('build', 'run'))
depends_on('r-rlang@0.3.0:', when='@2.0.0:', type=('build', 'run'))
depends_on('r-withr@2.0.0:', when='@2.0.0:', type=('build', 'run'))
depends_on('r-evaluate', when='@2.2.0:', type=('build', 'run'))
|
en
| 0.793905
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) A unit testing system designed to be fun, flexible and easy to set up.
| 1.731004
| 2
|
tests/api/server/test_locations.py
|
FlorianRhiem/sampledb
| 0
|
6628572
|
<filename>tests/api/server/test_locations.py
# coding: utf-8
"""
"""
import requests
import pytest
import json
import sampledb
import sampledb.logic
import sampledb.models
from tests.test_utils import flask_server, app, app_context
@pytest.fixture
def auth_user(flask_server):
with flask_server.app.app_context():
user = sampledb.logic.users.create_user(name="Basic User", email="<EMAIL>", type=sampledb.models.UserType.PERSON)
sampledb.logic.authentication.add_other_authentication(user.id, 'username', 'password')
assert user.id is not None
return ('username', 'password'), user
@pytest.fixture
def auth(auth_user):
return auth_user[0]
@pytest.fixture
def user(auth_user):
return auth_user[1]
@pytest.fixture
def action():
action = sampledb.logic.actions.create_action(
action_type=sampledb.logic.actions.ActionType.SAMPLE_CREATION,
name="",
description="",
schema={
'title': 'Example Object',
'type': 'object',
'properties': {
'name': {
'title': 'Object Name',
'type': 'text'
}
},
'required': ['name']
}
)
return action
def test_get_location(flask_server, auth, user):
r = requests.get(flask_server.base_url + 'api/v1/locations/1', auth=auth)
assert r.status_code == 404
location = sampledb.logic.locations.create_location(
name="Example Location",
description="This is an example location",
parent_location_id=None,
user_id=user.id
)
r = requests.get(flask_server.base_url + 'api/v1/locations/{}'.format(location.id), auth=auth)
assert r.status_code == 200
assert r.json() == {
'location_id': location.id,
'name': "Example Location",
'description': "This is an example location",
'parent_location_id': None
}
parent_location = sampledb.logic.locations.create_location(
name="Example Location",
description="This is an example location",
parent_location_id=None,
user_id=user.id
)
sampledb.logic.locations.update_location(
location_id=location.id,
name="Example Location",
description="This is an example location",
parent_location_id=parent_location.id,
user_id=user.id
)
r = requests.get(flask_server.base_url + 'api/v1/locations/{}'.format(location.id), auth=auth)
assert r.status_code == 200
assert r.json() == {
'location_id': location.id,
'name': "Example Location",
'description': "This is an example location",
'parent_location_id': parent_location.id
}
def test_get_locations(flask_server, auth, user):
r = requests.get(flask_server.base_url + 'api/v1/locations/', auth=auth)
assert r.status_code == 200
assert r.json() == []
location = sampledb.logic.locations.create_location(
name="Example Location",
description="This is an example location",
parent_location_id=None,
user_id=user.id
)
r = requests.get(flask_server.base_url + 'api/v1/locations/', auth=auth)
assert r.status_code == 200
assert r.json() == [
{
'location_id': location.id,
'name': "Example Location",
'description': "This is an example location",
'parent_location_id': None
}
]
def test_get_location_assignment(flask_server, auth, user, action):
r = requests.get(flask_server.base_url + 'api/v1/objects/1/locations/', auth=auth)
assert r.status_code == 404
data = {
'name': {
'_type': 'text',
'text': 'Example'
}
}
object = sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
r = requests.get(flask_server.base_url + 'api/v1/objects/{}/locations/0'.format(object.id), auth=auth)
assert r.status_code == 404
location = sampledb.logic.locations.create_location(
name="Example Location",
description="This is an example location",
parent_location_id=None,
user_id=user.id
)
sampledb.logic.locations.assign_location_to_object(object.id, location.id, user.id, user.id, "This is an example description")
r = requests.get(flask_server.base_url + 'api/v1/objects/{}/locations/0'.format(object.id), auth=auth)
assert r.status_code == 200
assert r.json() == {
'object_id': object.id,
'location_id': location.id,
'responsible_user_id': user.id,
'user_id': user.id,
'description': "This is an example description",
'utc_datetime': sampledb.logic.locations.get_object_location_assignments(object.id)[0].utc_datetime.strftime('%Y-%m-%d %H:%M:%S')
}
def test_get_location_assignments(flask_server, auth, user, action):
r = requests.get(flask_server.base_url + 'api/v1/objects/1/locations/', auth=auth)
assert r.status_code == 404
data = {
'name': {
'_type': 'text',
'text': 'Example'
}
}
object = sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
r = requests.get(flask_server.base_url + 'api/v1/objects/{}/locations/'.format(object.id), auth=auth)
assert r.status_code == 200
assert r.json() == []
location = sampledb.logic.locations.create_location(
name="Example Location",
description="This is an example location",
parent_location_id=None,
user_id=user.id
)
sampledb.logic.locations.assign_location_to_object(object.id, location.id, None, user.id, "This is an example description")
r = requests.get(flask_server.base_url + 'api/v1/objects/{}/locations/'.format(object.id), auth=auth)
assert r.status_code == 200
assert r.json() == [
{
'object_id': object.id,
'location_id': location.id,
'responsible_user_id': None,
'user_id': user.id,
'description': "This is an example description",
'utc_datetime': sampledb.logic.locations.get_object_location_assignments(object.id)[0].utc_datetime.strftime('%Y-%m-%d %H:%M:%S')
}
]
|
<filename>tests/api/server/test_locations.py
# coding: utf-8
"""
"""
import requests
import pytest
import json
import sampledb
import sampledb.logic
import sampledb.models
from tests.test_utils import flask_server, app, app_context
@pytest.fixture
def auth_user(flask_server):
with flask_server.app.app_context():
user = sampledb.logic.users.create_user(name="Basic User", email="<EMAIL>", type=sampledb.models.UserType.PERSON)
sampledb.logic.authentication.add_other_authentication(user.id, 'username', 'password')
assert user.id is not None
return ('username', 'password'), user
@pytest.fixture
def auth(auth_user):
return auth_user[0]
@pytest.fixture
def user(auth_user):
return auth_user[1]
@pytest.fixture
def action():
action = sampledb.logic.actions.create_action(
action_type=sampledb.logic.actions.ActionType.SAMPLE_CREATION,
name="",
description="",
schema={
'title': 'Example Object',
'type': 'object',
'properties': {
'name': {
'title': 'Object Name',
'type': 'text'
}
},
'required': ['name']
}
)
return action
def test_get_location(flask_server, auth, user):
r = requests.get(flask_server.base_url + 'api/v1/locations/1', auth=auth)
assert r.status_code == 404
location = sampledb.logic.locations.create_location(
name="Example Location",
description="This is an example location",
parent_location_id=None,
user_id=user.id
)
r = requests.get(flask_server.base_url + 'api/v1/locations/{}'.format(location.id), auth=auth)
assert r.status_code == 200
assert r.json() == {
'location_id': location.id,
'name': "Example Location",
'description': "This is an example location",
'parent_location_id': None
}
parent_location = sampledb.logic.locations.create_location(
name="Example Location",
description="This is an example location",
parent_location_id=None,
user_id=user.id
)
sampledb.logic.locations.update_location(
location_id=location.id,
name="Example Location",
description="This is an example location",
parent_location_id=parent_location.id,
user_id=user.id
)
r = requests.get(flask_server.base_url + 'api/v1/locations/{}'.format(location.id), auth=auth)
assert r.status_code == 200
assert r.json() == {
'location_id': location.id,
'name': "Example Location",
'description': "This is an example location",
'parent_location_id': parent_location.id
}
def test_get_locations(flask_server, auth, user):
r = requests.get(flask_server.base_url + 'api/v1/locations/', auth=auth)
assert r.status_code == 200
assert r.json() == []
location = sampledb.logic.locations.create_location(
name="Example Location",
description="This is an example location",
parent_location_id=None,
user_id=user.id
)
r = requests.get(flask_server.base_url + 'api/v1/locations/', auth=auth)
assert r.status_code == 200
assert r.json() == [
{
'location_id': location.id,
'name': "Example Location",
'description': "This is an example location",
'parent_location_id': None
}
]
def test_get_location_assignment(flask_server, auth, user, action):
r = requests.get(flask_server.base_url + 'api/v1/objects/1/locations/', auth=auth)
assert r.status_code == 404
data = {
'name': {
'_type': 'text',
'text': 'Example'
}
}
object = sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
r = requests.get(flask_server.base_url + 'api/v1/objects/{}/locations/0'.format(object.id), auth=auth)
assert r.status_code == 404
location = sampledb.logic.locations.create_location(
name="Example Location",
description="This is an example location",
parent_location_id=None,
user_id=user.id
)
sampledb.logic.locations.assign_location_to_object(object.id, location.id, user.id, user.id, "This is an example description")
r = requests.get(flask_server.base_url + 'api/v1/objects/{}/locations/0'.format(object.id), auth=auth)
assert r.status_code == 200
assert r.json() == {
'object_id': object.id,
'location_id': location.id,
'responsible_user_id': user.id,
'user_id': user.id,
'description': "This is an example description",
'utc_datetime': sampledb.logic.locations.get_object_location_assignments(object.id)[0].utc_datetime.strftime('%Y-%m-%d %H:%M:%S')
}
def test_get_location_assignments(flask_server, auth, user, action):
r = requests.get(flask_server.base_url + 'api/v1/objects/1/locations/', auth=auth)
assert r.status_code == 404
data = {
'name': {
'_type': 'text',
'text': 'Example'
}
}
object = sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
r = requests.get(flask_server.base_url + 'api/v1/objects/{}/locations/'.format(object.id), auth=auth)
assert r.status_code == 200
assert r.json() == []
location = sampledb.logic.locations.create_location(
name="Example Location",
description="This is an example location",
parent_location_id=None,
user_id=user.id
)
sampledb.logic.locations.assign_location_to_object(object.id, location.id, None, user.id, "This is an example description")
r = requests.get(flask_server.base_url + 'api/v1/objects/{}/locations/'.format(object.id), auth=auth)
assert r.status_code == 200
assert r.json() == [
{
'object_id': object.id,
'location_id': location.id,
'responsible_user_id': None,
'user_id': user.id,
'description': "This is an example description",
'utc_datetime': sampledb.logic.locations.get_object_location_assignments(object.id)[0].utc_datetime.strftime('%Y-%m-%d %H:%M:%S')
}
]
|
en
| 0.833554
|
# coding: utf-8
| 2.353326
| 2
|
versions/version_names.bzl
|
actions-on-google/gactions
| 49
|
6628573
|
<reponame>actions-on-google/gactions<filename>versions/version_names.bzl
"""Contains the version of the app."""
# The app version consists follows semantic versioning.
#
# We need to manually update this version after we make a new release. Generally,
# APP_VERSION points to the next immedtiate version successor.
#
# The process for updating this should be:
# 1. Spin off a new release.
# 2. Deploy to prod.
# 3. Submit commit to bump version APP_VERSION.
APP_VERSION = "3.2.0"
|
"""Contains the version of the app."""
# The app version consists follows semantic versioning.
#
# We need to manually update this version after we make a new release. Generally,
# APP_VERSION points to the next immedtiate version successor.
#
# The process for updating this should be:
# 1. Spin off a new release.
# 2. Deploy to prod.
# 3. Submit commit to bump version APP_VERSION.
APP_VERSION = "3.2.0"
|
en
| 0.851494
|
Contains the version of the app. # The app version consists follows semantic versioning. # # We need to manually update this version after we make a new release. Generally, # APP_VERSION points to the next immedtiate version successor. # # The process for updating this should be: # 1. Spin off a new release. # 2. Deploy to prod. # 3. Submit commit to bump version APP_VERSION.
| 1.145538
| 1
|
src/roles/succubus.py
|
ThijsEigenwijs/lykos
| 0
|
6628574
|
<filename>src/roles/succubus.py
import re
import random
import itertools
import math
from collections import defaultdict
from src.utilities import *
from src import channels, users, debuglog, errlog, plog
from src.functions import get_players, get_all_players, get_main_role, get_reveal_role, get_target
from src.decorators import command, event_listener
from src.containers import UserList, UserSet, UserDict, DefaultUserDict
from src.messages import messages
from src.status import try_misdirection, try_exchange
from src.events import Event
ENTRANCED = UserSet() # type: Set[users.User]
VISITED = UserDict() # type: Dict[users.User, users.User]
PASSED = UserSet() # type: Set[users.User]
ALL_SUCC_IDLE = True
@command("visit", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("succubus",))
def hvisit(var, wrapper, message):
"""Entrance a player, converting them to your team."""
if VISITED.get(wrapper.source):
wrapper.send(messages["succubus_already_visited"].format(VISITED[wrapper.source]))
return
target = get_target(var, wrapper, re.split(" +", message)[0], not_self_message="succubus_not_self")
if not target:
return
target = try_misdirection(var, wrapper.source, target)
if try_exchange(var, wrapper.source, target):
return
VISITED[wrapper.source] = target
PASSED.discard(wrapper.source)
if target not in get_all_players(("succubus",)):
ENTRANCED.add(target)
wrapper.send(messages["succubus_target_success"].format(target))
else:
wrapper.send(messages["harlot_success"].format(target))
if wrapper.source is not target:
if target not in get_all_players(("succubus",)):
target.send(messages["notify_succubus_target"].format(wrapper.source))
else:
target.send(messages["harlot_success"].format(wrapper.source))
revt = Event("succubus_visit", {})
revt.dispatch(var, wrapper.source, target)
debuglog("{0} (succubus) VISIT: {1} ({2})".format(wrapper.source, target, get_main_role(target)))
@command("pass", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("succubus",))
def pass_cmd(var, wrapper, message):
"""Do not entrance someone tonight."""
if VISITED.get(wrapper.source):
wrapper.send(messages["succubus_already_visited"].format(VISITED[wrapper.source]))
return
PASSED.add(wrapper.source)
wrapper.send(messages["succubus_pass"])
debuglog("{0} (succubus) PASS".format(wrapper.source))
@event_listener("harlot_visit")
def on_harlot_visit(evt, var, harlot, victim):
if victim in get_all_players(("succubus",)):
harlot.send(messages["notify_succubus_target"].format(victim))
victim.send(messages["succubus_harlot_success"].format(harlot))
ENTRANCED.add(harlot)
# entranced logic should run after team wins have already been determined (aka run last)
@event_listener("player_win", priority=6)
def on_player_win(evt, var, user, role, winner, survived):
if user in ENTRANCED:
evt.data["special"].append("entranced")
if winner != "succubi":
evt.data["won"] = False
else:
evt.data["iwon"] = True
if role == "succubus" and winner == "succubi":
evt.data["won"] = True
@event_listener("chk_win", priority=2)
def on_chk_win(evt, var, rolemap, mainroles, lpl, lwolves, lrealwolves):
lsuccubi = len(rolemap.get("succubus", ()))
lentranced = len([x for x in ENTRANCED if x not in var.DEAD])
if var.PHASE == "day" and lpl - lsuccubi == lentranced:
evt.data["winner"] = "succubi"
evt.data["message"] = messages["succubus_win"].format(plural("succubus", lsuccubi), plural("has", lsuccubi), plural("master's", lsuccubi))
elif not lsuccubi and lentranced and var.PHASE == "day" and lpl == lentranced:
evt.data["winner"] = "succubi"
evt.data["message"] = messages["entranced_win"]
@event_listener("new_role")
def on_new_role(evt, var, player, old_role):
if old_role == "succubus" and evt.data["role"] != "succubus":
del VISITED[:player:]
PASSED.discard(player)
if evt.data["role"] == "succubus" and player in ENTRANCED:
ENTRANCED.remove(player)
player.send(messages["no_longer_entranced"])
@event_listener("del_player")
def on_del_player(evt, var, player, all_roles, death_triggers):
global ALL_SUCC_IDLE
if "succubus" not in all_roles:
return
if player in VISITED:
# if it's night, also unentrance the person they visited
if var.PHASE == "night" and var.GAMEPHASE == "night":
if VISITED[player] in ENTRANCED:
ENTRANCED.discard(VISITED[player])
VISITED[player].send(messages["entranced_revert_win"])
del VISITED[player]
# if all succubi idled out (every last one of them), un-entrance people
# death_triggers is False for an idle-out, so we use that to determine which it is
if death_triggers:
ALL_SUCC_IDLE = False
if ALL_SUCC_IDLE and not get_all_players(("succubus",)):
while ENTRANCED:
e = ENTRANCED.pop()
e.send(messages["entranced_revert_win"])
@event_listener("transition_day_resolve", priority=1)
def on_transition_day_resolve(evt, var, victim):
if victim in get_all_players(("succubus",)) and VISITED.get(victim) and victim not in evt.data["dead"] and evt.data["killers"][victim] == ["@wolves"]:
evt.data["message"][victim].append(messages["target_not_home"])
evt.data["novictmsg"] = False
evt.stop_processing = True
evt.prevent_default = True
@event_listener("transition_day_resolve_end", priority=1)
def on_transition_day_resolve_end(evt, var, victims):
for victim in victims:
if victim in evt.data["dead"] and victim in VISITED.values() and "@wolves" in evt.data["killers"][victim]:
for succubus in VISITED:
if VISITED[succubus] is victim and succubus not in evt.data["dead"]:
if var.ROLE_REVEAL in ("on", "team"):
evt.data["message"][succubus].append(messages["visited_victim"].format(succubus, get_reveal_role(succubus)))
else:
evt.data["message"][succubus].append(messages["visited_victim_noreveal"].format(succubus))
evt.data["dead"].append(succubus)
@event_listener("chk_nightdone")
def on_chk_nightdone(evt, var):
evt.data["actedcount"] += len(VISITED) + len(PASSED)
evt.data["nightroles"].extend(get_all_players(("succubus",)))
@event_listener("transition_night_end", priority=2)
def on_transition_night_end(evt, var):
succubi = get_all_players(("succubus",))
for succubus in succubi:
pl = get_players()
random.shuffle(pl)
pl.remove(succubus)
to_send = "succubus_notify"
if succubus.prefers_simple():
to_send = "succubus_simple"
succ = []
for p in pl:
if p in succubi:
succ.append("{0} (succubus)".format(p))
else:
succ.append(p.nick)
succubus.send(messages[to_send], messages["players_list"].format(", ".join(succ)), sep="\n")
@event_listener("gun_shoot")
def on_gun_shoot(evt, var, user, target):
if target in get_all_players(("succubus",)):
evt.data["kill"] = False
@event_listener("begin_day")
def on_begin_day(evt, var):
VISITED.clear()
PASSED.clear()
@event_listener("new_role")
def on_new_role(evt, var, user, old_role):
if evt.data["role"] == "succubus" and user in ENTRANCED:
ENTRANCED.remove(user)
user.send(messages["no_longer_entranced"])
@event_listener("reset")
def on_reset(evt, var):
global ALL_SUCC_IDLE
ALL_SUCC_IDLE = True
ENTRANCED.clear()
VISITED.clear()
PASSED.clear()
@event_listener("revealroles")
def on_revealroles(evt, var, wrapper):
if ENTRANCED:
evt.data["output"].append("\u0002entranced players\u0002: {0}".format(", ".join(p.nick for p in ENTRANCED)))
@event_listener("get_role_metadata")
def on_get_role_metadata(evt, var, kind):
if kind == "role_categories":
evt.data["succubus"] = {"Neutral", "Win Stealer", "Cursed", "Nocturnal"}
# vim: set sw=4 expandtab:
|
<filename>src/roles/succubus.py
import re
import random
import itertools
import math
from collections import defaultdict
from src.utilities import *
from src import channels, users, debuglog, errlog, plog
from src.functions import get_players, get_all_players, get_main_role, get_reveal_role, get_target
from src.decorators import command, event_listener
from src.containers import UserList, UserSet, UserDict, DefaultUserDict
from src.messages import messages
from src.status import try_misdirection, try_exchange
from src.events import Event
ENTRANCED = UserSet() # type: Set[users.User]
VISITED = UserDict() # type: Dict[users.User, users.User]
PASSED = UserSet() # type: Set[users.User]
ALL_SUCC_IDLE = True
@command("visit", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("succubus",))
def hvisit(var, wrapper, message):
"""Entrance a player, converting them to your team."""
if VISITED.get(wrapper.source):
wrapper.send(messages["succubus_already_visited"].format(VISITED[wrapper.source]))
return
target = get_target(var, wrapper, re.split(" +", message)[0], not_self_message="succubus_not_self")
if not target:
return
target = try_misdirection(var, wrapper.source, target)
if try_exchange(var, wrapper.source, target):
return
VISITED[wrapper.source] = target
PASSED.discard(wrapper.source)
if target not in get_all_players(("succubus",)):
ENTRANCED.add(target)
wrapper.send(messages["succubus_target_success"].format(target))
else:
wrapper.send(messages["harlot_success"].format(target))
if wrapper.source is not target:
if target not in get_all_players(("succubus",)):
target.send(messages["notify_succubus_target"].format(wrapper.source))
else:
target.send(messages["harlot_success"].format(wrapper.source))
revt = Event("succubus_visit", {})
revt.dispatch(var, wrapper.source, target)
debuglog("{0} (succubus) VISIT: {1} ({2})".format(wrapper.source, target, get_main_role(target)))
@command("pass", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("succubus",))
def pass_cmd(var, wrapper, message):
"""Do not entrance someone tonight."""
if VISITED.get(wrapper.source):
wrapper.send(messages["succubus_already_visited"].format(VISITED[wrapper.source]))
return
PASSED.add(wrapper.source)
wrapper.send(messages["succubus_pass"])
debuglog("{0} (succubus) PASS".format(wrapper.source))
@event_listener("harlot_visit")
def on_harlot_visit(evt, var, harlot, victim):
if victim in get_all_players(("succubus",)):
harlot.send(messages["notify_succubus_target"].format(victim))
victim.send(messages["succubus_harlot_success"].format(harlot))
ENTRANCED.add(harlot)
# entranced logic should run after team wins have already been determined (aka run last)
@event_listener("player_win", priority=6)
def on_player_win(evt, var, user, role, winner, survived):
if user in ENTRANCED:
evt.data["special"].append("entranced")
if winner != "succubi":
evt.data["won"] = False
else:
evt.data["iwon"] = True
if role == "succubus" and winner == "succubi":
evt.data["won"] = True
@event_listener("chk_win", priority=2)
def on_chk_win(evt, var, rolemap, mainroles, lpl, lwolves, lrealwolves):
lsuccubi = len(rolemap.get("succubus", ()))
lentranced = len([x for x in ENTRANCED if x not in var.DEAD])
if var.PHASE == "day" and lpl - lsuccubi == lentranced:
evt.data["winner"] = "succubi"
evt.data["message"] = messages["succubus_win"].format(plural("succubus", lsuccubi), plural("has", lsuccubi), plural("master's", lsuccubi))
elif not lsuccubi and lentranced and var.PHASE == "day" and lpl == lentranced:
evt.data["winner"] = "succubi"
evt.data["message"] = messages["entranced_win"]
@event_listener("new_role")
def on_new_role(evt, var, player, old_role):
if old_role == "succubus" and evt.data["role"] != "succubus":
del VISITED[:player:]
PASSED.discard(player)
if evt.data["role"] == "succubus" and player in ENTRANCED:
ENTRANCED.remove(player)
player.send(messages["no_longer_entranced"])
@event_listener("del_player")
def on_del_player(evt, var, player, all_roles, death_triggers):
global ALL_SUCC_IDLE
if "succubus" not in all_roles:
return
if player in VISITED:
# if it's night, also unentrance the person they visited
if var.PHASE == "night" and var.GAMEPHASE == "night":
if VISITED[player] in ENTRANCED:
ENTRANCED.discard(VISITED[player])
VISITED[player].send(messages["entranced_revert_win"])
del VISITED[player]
# if all succubi idled out (every last one of them), un-entrance people
# death_triggers is False for an idle-out, so we use that to determine which it is
if death_triggers:
ALL_SUCC_IDLE = False
if ALL_SUCC_IDLE and not get_all_players(("succubus",)):
while ENTRANCED:
e = ENTRANCED.pop()
e.send(messages["entranced_revert_win"])
@event_listener("transition_day_resolve", priority=1)
def on_transition_day_resolve(evt, var, victim):
if victim in get_all_players(("succubus",)) and VISITED.get(victim) and victim not in evt.data["dead"] and evt.data["killers"][victim] == ["@wolves"]:
evt.data["message"][victim].append(messages["target_not_home"])
evt.data["novictmsg"] = False
evt.stop_processing = True
evt.prevent_default = True
@event_listener("transition_day_resolve_end", priority=1)
def on_transition_day_resolve_end(evt, var, victims):
for victim in victims:
if victim in evt.data["dead"] and victim in VISITED.values() and "@wolves" in evt.data["killers"][victim]:
for succubus in VISITED:
if VISITED[succubus] is victim and succubus not in evt.data["dead"]:
if var.ROLE_REVEAL in ("on", "team"):
evt.data["message"][succubus].append(messages["visited_victim"].format(succubus, get_reveal_role(succubus)))
else:
evt.data["message"][succubus].append(messages["visited_victim_noreveal"].format(succubus))
evt.data["dead"].append(succubus)
@event_listener("chk_nightdone")
def on_chk_nightdone(evt, var):
evt.data["actedcount"] += len(VISITED) + len(PASSED)
evt.data["nightroles"].extend(get_all_players(("succubus",)))
@event_listener("transition_night_end", priority=2)
def on_transition_night_end(evt, var):
succubi = get_all_players(("succubus",))
for succubus in succubi:
pl = get_players()
random.shuffle(pl)
pl.remove(succubus)
to_send = "succubus_notify"
if succubus.prefers_simple():
to_send = "succubus_simple"
succ = []
for p in pl:
if p in succubi:
succ.append("{0} (succubus)".format(p))
else:
succ.append(p.nick)
succubus.send(messages[to_send], messages["players_list"].format(", ".join(succ)), sep="\n")
@event_listener("gun_shoot")
def on_gun_shoot(evt, var, user, target):
if target in get_all_players(("succubus",)):
evt.data["kill"] = False
@event_listener("begin_day")
def on_begin_day(evt, var):
VISITED.clear()
PASSED.clear()
@event_listener("new_role")
def on_new_role(evt, var, user, old_role):
if evt.data["role"] == "succubus" and user in ENTRANCED:
ENTRANCED.remove(user)
user.send(messages["no_longer_entranced"])
@event_listener("reset")
def on_reset(evt, var):
global ALL_SUCC_IDLE
ALL_SUCC_IDLE = True
ENTRANCED.clear()
VISITED.clear()
PASSED.clear()
@event_listener("revealroles")
def on_revealroles(evt, var, wrapper):
if ENTRANCED:
evt.data["output"].append("\u0002entranced players\u0002: {0}".format(", ".join(p.nick for p in ENTRANCED)))
@event_listener("get_role_metadata")
def on_get_role_metadata(evt, var, kind):
if kind == "role_categories":
evt.data["succubus"] = {"Neutral", "Win Stealer", "Cursed", "Nocturnal"}
# vim: set sw=4 expandtab:
|
en
| 0.892886
|
# type: Set[users.User] # type: Dict[users.User, users.User] # type: Set[users.User] Entrance a player, converting them to your team. Do not entrance someone tonight. # entranced logic should run after team wins have already been determined (aka run last) # if it's night, also unentrance the person they visited # if all succubi idled out (every last one of them), un-entrance people # death_triggers is False for an idle-out, so we use that to determine which it is # vim: set sw=4 expandtab:
| 2.186222
| 2
|
src/mtenv/tests/wrappers/ntasks_test.py
|
NagisaZj/ac-teach
| 56
|
6628575
|
<filename>src/mtenv/tests/wrappers/ntasks_test.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import pytest
from mtenv.envs.control.cartpole import MTCartPole
from mtenv.wrappers.ntasks import NTasks as NTasksWrapper
from tests.utils.utils import validate_mtenv
def get_valid_num_tasks() -> List[int]:
return [1, 10, 100]
def get_invalid_num_tasks() -> List[int]:
return [-1, 0]
@pytest.mark.parametrize("n_tasks", get_valid_num_tasks())
def test_ntasks_wrapper_with_valid_input(n_tasks):
env = MTCartPole()
env = NTasksWrapper(env, n_tasks=n_tasks)
validate_mtenv(env=env)
@pytest.mark.parametrize("n_tasks", get_invalid_num_tasks())
def test_ntasks_wrapper_with_invalid_input(n_tasks):
with pytest.raises(Exception):
env = MTCartPole()
env = NTasksWrapper(env, n_tasks=n_tasks)
validate_mtenv(env=env)
|
<filename>src/mtenv/tests/wrappers/ntasks_test.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import pytest
from mtenv.envs.control.cartpole import MTCartPole
from mtenv.wrappers.ntasks import NTasks as NTasksWrapper
from tests.utils.utils import validate_mtenv
def get_valid_num_tasks() -> List[int]:
return [1, 10, 100]
def get_invalid_num_tasks() -> List[int]:
return [-1, 0]
@pytest.mark.parametrize("n_tasks", get_valid_num_tasks())
def test_ntasks_wrapper_with_valid_input(n_tasks):
env = MTCartPole()
env = NTasksWrapper(env, n_tasks=n_tasks)
validate_mtenv(env=env)
@pytest.mark.parametrize("n_tasks", get_invalid_num_tasks())
def test_ntasks_wrapper_with_invalid_input(n_tasks):
with pytest.raises(Exception):
env = MTCartPole()
env = NTasksWrapper(env, n_tasks=n_tasks)
validate_mtenv(env=env)
|
en
| 0.928377
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| 2.317034
| 2
|
rally-jobs/plugins/vpn_utils.py
|
swordboy/neutron-vpnaas-7.0.0-vpnenhance
| 2
|
6628576
|
<reponame>swordboy/neutron-vpnaas-7.0.0-vpnenhance
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import concurrent.futures
import exceptions
import os
from oslo_config import cfg
import re
import stat
import time
def noop(*args, **kwargs):
pass
cfg.CONF.register_cli_opts = noop
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils as linux_utils
from rally.common import log as logging
from rally.plugins.openstack import scenario
from rally.task import utils as task_utils
LOG = logging.getLogger(__name__)
class VpnUtils(scenario.OpenStackScenario):
"""Utility class for VPNaaS scenarios with basic atomic actions."""
SUBNET_IP_VERSION = 4
def _create_network(self, neutron_client, network_suffix, cidr):
"""Create neutron network
:param neutron_client: neutron client
:param network_suffix: str, suffix name of the new network
:param cidr: subnet cidr
:return: router, subnet , network
"""
def create_network(neutron_client, network_suffix, isExternal=False):
network_name = "rally_network_" + network_suffix
network_args = {"name": network_name,
"router:external": isExternal}
LOG.debug("ADDING NEW NETWORK: %s", network_name)
rally_network = neutron_client.create_network(
{"network": network_args})
return rally_network
def create_subnet(neutron_client, rally_network, network_suffix, cidr):
network_id = rally_network["network"]["id"]
subnet_name = "rally_subnet_" + network_suffix
subnet_args = {"name": subnet_name,
"cidr": cidr,
"network_id": network_id,
"ip_version": self.SUBNET_IP_VERSION}
LOG.debug("ADDING SUBNET: %s", subnet_name)
rally_subnet = neutron_client.create_subnet(
{"subnet": subnet_args})
return rally_subnet
def create_router(private_subnet, public_network_id):
router_name = "rally_router_" + network_suffix
gw_info = {"network_id": public_network_id}
router_args = {"name": router_name,
"external_gateway_info": gw_info}
LOG.debug("ADDING ROUTER: %s", router_name)
rally_router = neutron_client.create_router(
{"router": router_args})
# create router interface - connect subnet to it
LOG.debug("ADDING ROUTER INTERFACE")
neutron_client.add_interface_router(
rally_router['router']["id"],
{"subnet_id": private_subnet["subnet"]["id"]})
return rally_router
# check for external network and create one if not found
def get_external_network():
for network in neutron_client.list_networks()['networks']:
if network['router:external']:
public_network_id = network['id']
LOG.debug("PUBLIC NETWORK ALREADY EXISTS")
break
else:
public_network = create_network(self.admin_clients("neutron"),
"public", True)
create_subnet(self.admin_clients("neutron"), public_network,
"public", "172.16.1.0/24")
public_network_id = public_network['network']['id']
return public_network_id
# create public network_id
public_network_id = get_external_network()
# create private network
private_network = create_network(neutron_client, network_suffix)
# create subnet
private_subnet = create_subnet(neutron_client,
private_network,
network_suffix,
cidr)
# create router
rally_router = create_router(private_subnet, public_network_id)
return rally_router, private_network, private_subnet
def _create_keypair(self, nova_client, key_name, key_file):
"""Create keypair
:param nova_client: nova_client
:param key_name: key_name
:param key_file: key_file_name
:return: keypair
"""
LOG.debug("ADDING NEW KEYPAIR")
keypair = nova_client.keypairs.create(key_name)
f = open(key_file, 'w')
os.chmod(key_file, stat.S_IREAD | stat.S_IWRITE)
f.write(keypair.private_key)
f.close()
return keypair
def _create_nova_vm(self, nova_client, keypair, **kwargs):
"""Create nova instance
:param nova_client: nova client
:param keypair: str, key-pair to allow ssh
:return: new nova instance
"""
# add sec-group
sec_group_suffix = "rally_secgroup_" + kwargs["sec_group_suffix"]
LOG.debug("ADDING NEW SECURITY GROUP %s", sec_group_suffix)
secgroup = nova_client.security_groups.create(sec_group_suffix,
sec_group_suffix)
# add security rules for SSH and ICMP
nova_client.security_group_rules.create(secgroup.id, from_port=22,
to_port=22, ip_protocol="tcp", cidr="0.0.0.0/0")
nova_client.security_group_rules.create(secgroup.id, from_port=-1,
to_port=-1, ip_protocol="icmp", cidr="0.0.0.0/0")
# boot new nova instance
server_name = "rally_server_" + (kwargs["server_suffix"])
LOG.debug("BOOTING NEW INSTANCE: %s", server_name)
server = nova_client.servers.create(server_name,
image=kwargs["image"],
flavor=kwargs["flavor"],
key_name=keypair.name,
security_groups=[secgroup.id],
nics=kwargs["nics"])
# wait for instance to become active
LOG.debug("WAITING FOR INSTANCE TO BECOME ACTIVE")
server = task_utils.wait_for(
server,
is_ready=task_utils.resource_is("ACTIVE"),
update_resource=task_utils.get_from_manager(),
timeout=kwargs["nova_server_boot_timeout"],
check_interval=5)
LOG.debug("SERVER STATUS: %s", server.status)
# assert if instance is 'active'
assert('ACTIVE' == server.status), (
"THE INSTANCE IS NOT IN ACTIVE STATE")
return server
def _get_server_ip(self, nova_client, server_id, network_suffix):
"""
:param nova_client: nova client
:param nova_id: uuid of the nova instance whose ip is wanted
:param network_suffix: network name suffix
:return: ip address of the instance
"""
network_name = "rally_network_" + network_suffix
server_details = nova_client.servers.get(server_id)
server_ip = server_details.addresses[network_name][0]["addr"]
return server_ip
def _create_ike_policy(self,
neutron_client,
**kwargs):
"""Creates IKE policy
:param neutron_client:neutron client
:return:created ike_policy
"""
LOG.debug("CREATING IKE_POLICY")
ike_policy = neutron_client.create_ikepolicy({
"ikepolicy": {
"phase1_negotiation_mode":
kwargs["phase1_negotiation_mode"] or "main",
"auth_algorithm": kwargs["auth_algorithm"] or "sha1",
"encryption_algorithm":
kwargs["encryption_algorithm"] or "aes-128",
"pfs": kwargs["pfs"] or "group5",
"lifetime": {
"units": "seconds",
"value": kwargs["value"] or 7200},
"ike_version": kwargs["ike_version"] or "v1",
"name": "rally_ikepolicy"
}
})
return ike_policy
def _create_ipsec_policy(self,
neutron_client,
**kwargs):
"""Creates IPSEC policy
:param neutron_client: neutron client
:return: created IPSEC policy
"""
LOG.debug("CREATING IPSEC_POLICY")
ipsec_policy = neutron_client.create_ipsecpolicy({
"ipsecpolicy": {
"name": "rally_ipsecpolicy",
"transform_protocol": kwargs["transform_protocol"] or "esp",
"auth_algorithm": kwargs["auth_algorithm"] or "sha1",
"encapsulation_mode": kwargs["encapsulation_mode"] or "tunnel",
"encryption_algorithm":
kwargs["encryption_algorithm"] or "aes-128",
"pfs": kwargs["pfs"] or "group5",
"lifetime": {
"units": "seconds",
"value": kwargs["value"] or 7200
}
}
})
return ipsec_policy
def _create_vpn_service(self, neutron_client, rally_subnet, rally_router,
name=None):
"""Creates VPN service endpoints
:param neutron_client: neutron client
:param name: name of vpn service
:param rally_subnet: local subnet
:param rally_router: router endpoint
:param admin_state_up: admin state of the vpn_service
:return: vpn_service
"""
LOG.debug("CREATING VPN_SERVICE")
vpn_service = neutron_client.create_vpnservice({
"vpnservice": {
"subnet_id": rally_subnet["subnet"]["id"],
"router_id": rally_router["router"]["id"],
"name": "rally_vpn_service_" + name,
"admin_state_up": True
}
})
return vpn_service
def _create_ipsec_site_connection(
self, neutron_client, ike_policy,
ipsec_policy, peer_cidrs,
peer_id, peer_address,
vpn_service, name=None,
mtu=None, secret=None):
"""Creates IPSEC site connections
:param neutron_client: neutron client
:param ike_policy: ikepolicy
:param ipsec_policy: ipsecpolicy
:param peer_cidrs: list of peer cidrs
:param peer_id: peer_id
:param peer_address: peer_address
:param vpn_service: vpn_service
:param secret: pre shared secret
:param admin_state_up: admin state of the ipsec site connections
:param mtu: max transmission unit
:param name: name of the ipsec site connections
:return:ipsec_site_connection
"""
LOG.debug("CREATING IPSEC_SITE_CONNECTION")
ipsec_site_connection = neutron_client.create_ipsec_site_connection({
"ipsec_site_connection": {
"psk": secret or "secret",
"initiator": "bi-directional",
"ipsecpolicy_id": ipsec_policy["ipsecpolicy"]["id"],
"admin_state_up": True,
"peer_cidrs": peer_cidrs,
"mtu": mtu or "1500",
"ikepolicy_id": ike_policy["ikepolicy"]["id"],
"dpd": {
"action": "disabled",
"interval": 60,
"timeout": 240
},
"vpnservice_id": vpn_service["vpnservice"]["id"],
"peer_address": peer_address,
"peer_id": peer_id,
"name": "rally_ipsec_site_connection_" + name
}
})
return ipsec_site_connection
def _get_resource(self, resource_tag, resource_id):
"""Gets the resource(vpn_service or ipsec_site_connection)
:param resource_tag: "vpnservice" or "ipsec_site_connection"
:param resource_id: id of the resource
:return:
"""
neutron_client = self.clients("neutron")
if resource_tag == "vpnservice":
vpn_service = neutron_client.show_vpnservice(resource_id)
if vpn_service:
return vpn_service
elif resource_tag == 'ipsec_site_connection':
ipsec_site_connection = neutron_client.show_ipsec_site_connection(
resource_id)
if ipsec_site_connection:
return ipsec_site_connection
def _wait_for_status_change(self, resource, final_status,
resource_tag, wait_timeout=60,
check_interval=1):
"""Wait for resource's status change
Wait till the status of the resource changes to final state or till
the time exceeds the wait_timeout value.
:param resource: resource whose status has to be checked
:param final_status: desired final status of the resource
:param resource_tag: to identify the resource as vpnservice or
ipser_site_connection
:param wait_timeout: timeout value in seconds
:param check_interval: time to sleep before each check for the status
change
:return: resource
"""
start_time = time.time()
while True:
resource = self._get_resource(
resource_tag,
resource[resource_tag]['id'])
current_status = resource[resource_tag]['status']
if current_status == final_status:
return resource
time.sleep(check_interval)
if time.time() - start_time > wait_timeout:
self._cleanup(called_from="VpnUtils._wait_for_status_change")
raise exceptions.Exception("Timeout while waiting for status "
"change to %s.", final_status)
def _assert_statuses(self, vpn_service, ipsec_site_connection,
ipsec_site_connection_creation_timeout=60,
vpn_service_creation_timeout=60):
"""Assert statuses of vpn_service and ipsec_site_connection
:param vpn_service: vpn_service
:param ipsec_site_connection: ipsec_site_connection
:param ipsec_site_connection_creation_timeout: timeout in seconds
:param vpn_service_creation_timeout: timeout in seconds
:return:
"""
vpn_service = self._wait_for_status_change(
vpn_service,
resource_tag="vpnservice",
final_status="ACTIVE",
wait_timeout=vpn_service_creation_timeout,
check_interval=5)
LOG.debug("VPN-SERVICE STATUS: %s",
vpn_service['vpnservice']['status'])
assert('ACTIVE' == vpn_service['vpnservice']['status']), (
"VPN_SERVICE IS NOT IN ACTIVE STATE")
ipsec_site_connection = self._wait_for_status_change(
ipsec_site_connection,
resource_tag="ipsec_site_connection",
final_status="ACTIVE",
wait_timeout=ipsec_site_connection_creation_timeout,
check_interval=5)
LOG.debug("IPSEC_SITE_CONNECTION STATUS: %s",
ipsec_site_connection['ipsec_site_connection']['status'])
assert('ACTIVE' ==
ipsec_site_connection['ipsec_site_connection']['status']), (
"THE INSTANCE IS NOT IN ACTIVE STATE")
def _verify_vpn_connectivity(self, server_ips, snat_namespaces,
qrouter_namespaces, key_file_names,
first, second):
"""Verifies the vpn connectivity between the endpoints
:param server_ips: list of private ips of the servers between
which the vpn connectivity has to verified.
:param snat_namespaces: snat_namespaces of the 2 routers
:param qrouter_namespaces: qrouter_namespaces of the 2 routers
:param key_file_names: path to private key files
:param first: parameter to point to the self
:param second: parameter to point to the peer
:return: True or False
"""
LOG.debug("VERIFY THE VPN CONNECTIVITY")
qg = self._get_interfaces(snat_namespaces[second])
if qg:
p = re.compile(r"qg-\w+-\w+")
m = p.search(qg)
if m:
qg_interface = m.group()
else:
qg_interface = None
if qg_interface:
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as e:
tcpdump_future = e.submit(self._start_tcpdump,
snat_namespaces[second],
qg_interface)
ssh_future = e.submit(self._ssh_and_ping_server,
server_ips[first],
server_ips[second],
qrouter_namespaces[first],
key_file_names[first])
assert(True == ssh_future.result()), "SSH/Ping failed"
lines = tcpdump_future.result().split('\n')
for line in lines:
if 'ESP' in line:
return True
return False
def _get_namespace(self):
"""Get namespaces
:return: namespaces
"""
LOG.debug("GET NAMESPACES USING 'ip netns'")
try:
cmd = ['ip', 'netns']
cmd = ip_lib.add_namespace_to_cmd(cmd)
namespaces = linux_utils.execute(cmd)
LOG.debug("%s", namespaces)
return namespaces
except RuntimeError:
return None
def _wait_for_namespace_creation(self, namespace, rally_router):
"""Wait for namespace creation
:param namespace: snat/qrouter namespace
:param rally_router: rally_router
:return:
"""
start_time = time.time()
while True:
namespaces = self._get_namespace().split()
for line in namespaces:
if line == (namespace + rally_router["router"]["id"]):
namespace = line
return namespace
time.sleep(1)
if time.time() - start_time > 20:
self._cleanup(called_from="_wait_for_namespace_creation")
raise exceptions.Exception("Timeout while waiting for"
" namespaces to be created")
def _ping(self, namespace, ip):
"""Pings ip address from network namespace.
In order to ping it uses following cli command:
ip netns exec <namespace> ping -c 4 -q <ip>
:param namespace: namespace
:param ip: ip to ping to
"""
LOG.debug("PING %s FROM THE NAMESPACE %s", ip, namespace)
try:
count = 4
cmd = ['ping', '-w', 2 * count, '-c', count, ip]
cmd = ip_lib.add_namespace_to_cmd(cmd, namespace)
ping_result = linux_utils.execute(cmd, run_as_root=True)
LOG.debug("%s", ping_result)
return True
except RuntimeError:
return False
def _get_interfaces(self, namespace):
"""Do an "ip a".
In order to do "ip a" it uses following cli command:
ip netns exec <namespace> ip a | grep qg
:param namespace: namespace
"""
LOG.debug("GET THE INTERFACES BY USING 'ip a' FROM THE NAMESPACE %s",
namespace)
try:
cmd = ['ip', 'a']
cmd = ip_lib.add_namespace_to_cmd(cmd, namespace)
interfaces = linux_utils.execute(cmd, run_as_root=True)
LOG.debug("%s", interfaces)
return interfaces
except RuntimeError:
return None
def _start_tcpdump(self, namespace, interface):
"""Starts tcpdump at the given interface
In order to start a "tcpdump" it uses the following command:
ip netns exec <namespace> sudo tcpdump -i <interface>
:param namespace: namespace
:param interface: interface
:return:
"""
LOG.debug("START THE TCPDUMP USING 'tcpdump -i <%s> FROM THE NAMESPACE"
" %s", interface, namespace)
try:
cmd = ['timeout', '10', 'tcpdump', '-n',
'-i', interface]
cmd = ip_lib.add_namespace_to_cmd(cmd, namespace)
tcpdump = linux_utils.execute(cmd, run_as_root=True,
extra_ok_codes=[124])
LOG.debug("%s", tcpdump)
return tcpdump
except RuntimeError:
return None
def _ssh_and_ping_server(self, ssh_server, ping_server,
namespace, key_file_name):
"""Ssh into the server from the namespace.
In order to ssh it uses the following command:
ip netns exec <namespace> ssh -i <path to keyfile> cirros@<server_ip>
:param ssh_server: ip of the server to ssh into
:param ping_server: ip of the server to ping to
:param namespace: qrouter namespace
:param key_file_name: path to private key file
:return:
"""
LOG.debug("SSH INTO SERVER %s AND PING THE PEER SERVER %s FROM THE"
" NAMESPACE %s", ssh_server, ping_server, namespace)
try:
# ssh instance
host = "cirros@" + ssh_server
count = 20
cmd = ['ssh', '-o', 'StrictHostKeyChecking=no',
'-i', key_file_name, host,
'ping', '-w',
2 * count, '-c', count, ping_server]
cmd = ip_lib.add_namespace_to_cmd(cmd, namespace)
ping_result = linux_utils.execute(cmd, run_as_root=True)
LOG.debug("%s", ping_result)
return True
except RuntimeError:
return False
def _delete_server(self, nova_client, server):
"""Delete nova instance
:param server: instance to delete
:return:
"""
# delete server
sec_group_name = server.security_groups[0]['name']
server_key_name = server.key_name
LOG.debug("DELETING NOVA INSTANCE: %s", server.id)
nova_client.servers.delete(server.id)
LOG.debug("WAITING FOR INSTANCE TO GET DELETED")
task_utils.wait_for_delete(server,
update_resource=task_utils.
get_from_manager())
# delete sec-group
for secgroup in nova_client.security_groups.list():
if secgroup.name == sec_group_name:
LOG.debug("DELETING SEC_GROUP: %s", sec_group_name)
nova_client.security_groups.delete(secgroup.id)
# delete key-pair
for key_pair in nova_client.keypairs.list():
if key_pair.name == server_key_name:
LOG.debug("DELETING KEY_PAIR: %s", server_key_name)
nova_client.keypairs.delete(key_pair.id)
def _delete_ipsec_site_connection(self, neutron_client,
ipsec_site_connection):
"""Deletes ipsec site connection
:param neutron_client: neutron client
:param ipsec_site_connection: ipsec_site_connection
:return:
"""
LOG.debug("DELETING IPSEC_SITE_CONNECTION %s",
ipsec_site_connection['id'])
neutron_client.delete_ipsec_site_connection(
ipsec_site_connection['id'])
def _delete_vpn_service(self, neutron_client, vpn_service):
"""Deletes VPN service endpoints
:param neutron_client: neutron client
:param vpn_services: vpn_service
:return:
"""
LOG.debug("DELETING VPN_SERVICE %s", vpn_service['id'])
neutron_client.delete_vpnservice(vpn_service['id'])
def _delete_ipsec_policy(self, neutron_client, ipsec_policy):
"""Deletes IPSEC policy
:param neutron_client: neutron client
:param ipsec_policy: ipsec_policy
:return:
"""
LOG.debug("DELETING IPSEC POLICY")
neutron_client.delete_ipsecpolicy(ipsec_policy['id'])
def _delete_ike_policy(self, neutron_client, ike_policy):
"""Deletes IKE policy
:param neutron_client: neutron client
:param ike_policy: ike_policy
:return:
"""
LOG.debug("DELETING IKE POLICY")
neutron_client.delete_ikepolicy(ike_policy['id'])
def _delete_network(self, neutron_client):
"""Delete neutron network.
:param network_tuple: tuple, router, network and subnet to delete
:return
"""
try:
# delete interface subnet-router
LOG.debug("DELETING RALLY ROUTER INTERFACES & GATEWAYS")
routers = neutron_client.list_routers()
subnets = neutron_client.list_subnets()
subnet_id = None
p = re.compile(r"\d")
if routers:
for router in routers['routers']:
if "rally" in router['name']:
neutron_client.remove_gateway_router(router['id'])
m = p.search(router['name'])
if m:
subnet_name = "rally_subnet_" + m.group()
if subnets:
for subnet in subnets['subnets']:
if subnet_name == subnet['name']:
subnet_id = subnet['id']
neutron_client.remove_interface_router(
router['id'],
{"subnet_id": subnet_id})
# delete ports associated with interface
LOG.debug("DELETING RALLY PORTS")
ports = neutron_client.list_ports()
if ports:
for port in ports['ports']:
neutron_client.delete_port(port['id'])
# delete router
LOG.debug("DELETING RALLY ROUTERS")
if routers:
for router in routers['routers']:
if "rally" in router['name']:
neutron_client.delete_router(router['id'])
# Delete external network & subnet:
LOG.debug("DELETING RALLY PUBLIC NETWORK")
networks = neutron_client.list_networks()
if networks:
for network in networks['networks']:
if network['router:external'] and (network['name']
== "rally_network_public"):
external_network = network
self.admin_clients("neutron").delete_network(
external_network["id"])
# delete network
LOG.debug("DELETING RALLY NETWORKS")
networks = neutron_client.list_networks()
if networks:
for network in networks['networks']:
if "rally_network" in network['name']:
neutron_client.delete_network(network['id'])
except Exception as err:
LOG.exception(err)
def _delete_key_file(self, key_files):
"""Delete ssh key file
:param key_files: list of paths to ssh key files
:return:
"""
LOG.debug("DELETING RALLY KEY FILES")
for key_file in key_files:
if os.path.exists(key_file):
os.remove(key_file)
def _delete_knownhosts_file(self):
"""Removes the knownhosts file
:param server_ips: ips to be removed from /root/.ssh/knownhosts
:return:
"""
LOG.debug("DELETE THE KNOWNHOST FILE")
try:
cmd = ['rm', '-rf', "~/.ssh/known_hosts"]
cmd = ip_lib.add_namespace_to_cmd(cmd)
linux_utils.execute(cmd)
return True
except RuntimeError:
return False
def _cleanup(self,
key_file_names=None,
called_from=None):
LOG.debug("CLEAN UP CALLED FROM %s", called_from)
nova_client = self.clients("nova")
neutron_client = self.clients("neutron")
servers = nova_client.servers.list()
if servers:
for server in servers:
if "rally" in server.name:
self._delete_server(nova_client, server)
if key_file_names:
self._delete_key_file(key_file_names)
self._delete_knownhosts_file()
vpn_connections = neutron_client.list_ipsec_site_connections()
if vpn_connections:
for vpn_connection in vpn_connections['ipsec_site_connections']:
if "rally" in vpn_connection['name']:
self._delete_ipsec_site_connection(neutron_client,
vpn_connection)
vpn_services = neutron_client.list_vpnservices()
if vpn_services:
for vpn_service in vpn_services['openvpn']:
if "rally" in vpn_service['name']:
self._delete_vpn_service(neutron_client, vpn_service)
ipsec_policies = neutron_client.list_ipsecpolicies()
if ipsec_policies:
for ipsec_policy in ipsec_policies['ipsecpolicies']:
if "rally" in ipsec_policy['name']:
self._delete_ipsec_policy(neutron_client, ipsec_policy)
ike_policies = neutron_client.list_ikepolicies()
if ike_policies:
for ike_policy in ike_policies['ikepolicies']:
if "rally" in ike_policy['name']:
self._delete_ike_policy(neutron_client, ike_policy)
# Deletes entire network
self._delete_network(neutron_client)
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import concurrent.futures
import exceptions
import os
from oslo_config import cfg
import re
import stat
import time
def noop(*args, **kwargs):
pass
cfg.CONF.register_cli_opts = noop
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils as linux_utils
from rally.common import log as logging
from rally.plugins.openstack import scenario
from rally.task import utils as task_utils
LOG = logging.getLogger(__name__)
class VpnUtils(scenario.OpenStackScenario):
"""Utility class for VPNaaS scenarios with basic atomic actions."""
SUBNET_IP_VERSION = 4
def _create_network(self, neutron_client, network_suffix, cidr):
"""Create neutron network
:param neutron_client: neutron client
:param network_suffix: str, suffix name of the new network
:param cidr: subnet cidr
:return: router, subnet , network
"""
def create_network(neutron_client, network_suffix, isExternal=False):
network_name = "rally_network_" + network_suffix
network_args = {"name": network_name,
"router:external": isExternal}
LOG.debug("ADDING NEW NETWORK: %s", network_name)
rally_network = neutron_client.create_network(
{"network": network_args})
return rally_network
def create_subnet(neutron_client, rally_network, network_suffix, cidr):
network_id = rally_network["network"]["id"]
subnet_name = "rally_subnet_" + network_suffix
subnet_args = {"name": subnet_name,
"cidr": cidr,
"network_id": network_id,
"ip_version": self.SUBNET_IP_VERSION}
LOG.debug("ADDING SUBNET: %s", subnet_name)
rally_subnet = neutron_client.create_subnet(
{"subnet": subnet_args})
return rally_subnet
def create_router(private_subnet, public_network_id):
router_name = "rally_router_" + network_suffix
gw_info = {"network_id": public_network_id}
router_args = {"name": router_name,
"external_gateway_info": gw_info}
LOG.debug("ADDING ROUTER: %s", router_name)
rally_router = neutron_client.create_router(
{"router": router_args})
# create router interface - connect subnet to it
LOG.debug("ADDING ROUTER INTERFACE")
neutron_client.add_interface_router(
rally_router['router']["id"],
{"subnet_id": private_subnet["subnet"]["id"]})
return rally_router
# check for external network and create one if not found
def get_external_network():
for network in neutron_client.list_networks()['networks']:
if network['router:external']:
public_network_id = network['id']
LOG.debug("PUBLIC NETWORK ALREADY EXISTS")
break
else:
public_network = create_network(self.admin_clients("neutron"),
"public", True)
create_subnet(self.admin_clients("neutron"), public_network,
"public", "172.16.1.0/24")
public_network_id = public_network['network']['id']
return public_network_id
# create public network_id
public_network_id = get_external_network()
# create private network
private_network = create_network(neutron_client, network_suffix)
# create subnet
private_subnet = create_subnet(neutron_client,
private_network,
network_suffix,
cidr)
# create router
rally_router = create_router(private_subnet, public_network_id)
return rally_router, private_network, private_subnet
def _create_keypair(self, nova_client, key_name, key_file):
"""Create keypair
:param nova_client: nova_client
:param key_name: key_name
:param key_file: key_file_name
:return: keypair
"""
LOG.debug("ADDING NEW KEYPAIR")
keypair = nova_client.keypairs.create(key_name)
f = open(key_file, 'w')
os.chmod(key_file, stat.S_IREAD | stat.S_IWRITE)
f.write(keypair.private_key)
f.close()
return keypair
def _create_nova_vm(self, nova_client, keypair, **kwargs):
"""Create nova instance
:param nova_client: nova client
:param keypair: str, key-pair to allow ssh
:return: new nova instance
"""
# add sec-group
sec_group_suffix = "rally_secgroup_" + kwargs["sec_group_suffix"]
LOG.debug("ADDING NEW SECURITY GROUP %s", sec_group_suffix)
secgroup = nova_client.security_groups.create(sec_group_suffix,
sec_group_suffix)
# add security rules for SSH and ICMP
nova_client.security_group_rules.create(secgroup.id, from_port=22,
to_port=22, ip_protocol="tcp", cidr="0.0.0.0/0")
nova_client.security_group_rules.create(secgroup.id, from_port=-1,
to_port=-1, ip_protocol="icmp", cidr="0.0.0.0/0")
# boot new nova instance
server_name = "rally_server_" + (kwargs["server_suffix"])
LOG.debug("BOOTING NEW INSTANCE: %s", server_name)
server = nova_client.servers.create(server_name,
image=kwargs["image"],
flavor=kwargs["flavor"],
key_name=keypair.name,
security_groups=[secgroup.id],
nics=kwargs["nics"])
# wait for instance to become active
LOG.debug("WAITING FOR INSTANCE TO BECOME ACTIVE")
server = task_utils.wait_for(
server,
is_ready=task_utils.resource_is("ACTIVE"),
update_resource=task_utils.get_from_manager(),
timeout=kwargs["nova_server_boot_timeout"],
check_interval=5)
LOG.debug("SERVER STATUS: %s", server.status)
# assert if instance is 'active'
assert('ACTIVE' == server.status), (
"THE INSTANCE IS NOT IN ACTIVE STATE")
return server
def _get_server_ip(self, nova_client, server_id, network_suffix):
"""
:param nova_client: nova client
:param nova_id: uuid of the nova instance whose ip is wanted
:param network_suffix: network name suffix
:return: ip address of the instance
"""
network_name = "rally_network_" + network_suffix
server_details = nova_client.servers.get(server_id)
server_ip = server_details.addresses[network_name][0]["addr"]
return server_ip
def _create_ike_policy(self,
neutron_client,
**kwargs):
"""Creates IKE policy
:param neutron_client:neutron client
:return:created ike_policy
"""
LOG.debug("CREATING IKE_POLICY")
ike_policy = neutron_client.create_ikepolicy({
"ikepolicy": {
"phase1_negotiation_mode":
kwargs["phase1_negotiation_mode"] or "main",
"auth_algorithm": kwargs["auth_algorithm"] or "sha1",
"encryption_algorithm":
kwargs["encryption_algorithm"] or "aes-128",
"pfs": kwargs["pfs"] or "group5",
"lifetime": {
"units": "seconds",
"value": kwargs["value"] or 7200},
"ike_version": kwargs["ike_version"] or "v1",
"name": "rally_ikepolicy"
}
})
return ike_policy
def _create_ipsec_policy(self,
neutron_client,
**kwargs):
"""Creates IPSEC policy
:param neutron_client: neutron client
:return: created IPSEC policy
"""
LOG.debug("CREATING IPSEC_POLICY")
ipsec_policy = neutron_client.create_ipsecpolicy({
"ipsecpolicy": {
"name": "rally_ipsecpolicy",
"transform_protocol": kwargs["transform_protocol"] or "esp",
"auth_algorithm": kwargs["auth_algorithm"] or "sha1",
"encapsulation_mode": kwargs["encapsulation_mode"] or "tunnel",
"encryption_algorithm":
kwargs["encryption_algorithm"] or "aes-128",
"pfs": kwargs["pfs"] or "group5",
"lifetime": {
"units": "seconds",
"value": kwargs["value"] or 7200
}
}
})
return ipsec_policy
def _create_vpn_service(self, neutron_client, rally_subnet, rally_router,
name=None):
"""Creates VPN service endpoints
:param neutron_client: neutron client
:param name: name of vpn service
:param rally_subnet: local subnet
:param rally_router: router endpoint
:param admin_state_up: admin state of the vpn_service
:return: vpn_service
"""
LOG.debug("CREATING VPN_SERVICE")
vpn_service = neutron_client.create_vpnservice({
"vpnservice": {
"subnet_id": rally_subnet["subnet"]["id"],
"router_id": rally_router["router"]["id"],
"name": "rally_vpn_service_" + name,
"admin_state_up": True
}
})
return vpn_service
def _create_ipsec_site_connection(
self, neutron_client, ike_policy,
ipsec_policy, peer_cidrs,
peer_id, peer_address,
vpn_service, name=None,
mtu=None, secret=None):
"""Creates IPSEC site connections
:param neutron_client: neutron client
:param ike_policy: ikepolicy
:param ipsec_policy: ipsecpolicy
:param peer_cidrs: list of peer cidrs
:param peer_id: peer_id
:param peer_address: peer_address
:param vpn_service: vpn_service
:param secret: pre shared secret
:param admin_state_up: admin state of the ipsec site connections
:param mtu: max transmission unit
:param name: name of the ipsec site connections
:return:ipsec_site_connection
"""
LOG.debug("CREATING IPSEC_SITE_CONNECTION")
ipsec_site_connection = neutron_client.create_ipsec_site_connection({
"ipsec_site_connection": {
"psk": secret or "secret",
"initiator": "bi-directional",
"ipsecpolicy_id": ipsec_policy["ipsecpolicy"]["id"],
"admin_state_up": True,
"peer_cidrs": peer_cidrs,
"mtu": mtu or "1500",
"ikepolicy_id": ike_policy["ikepolicy"]["id"],
"dpd": {
"action": "disabled",
"interval": 60,
"timeout": 240
},
"vpnservice_id": vpn_service["vpnservice"]["id"],
"peer_address": peer_address,
"peer_id": peer_id,
"name": "rally_ipsec_site_connection_" + name
}
})
return ipsec_site_connection
def _get_resource(self, resource_tag, resource_id):
"""Gets the resource(vpn_service or ipsec_site_connection)
:param resource_tag: "vpnservice" or "ipsec_site_connection"
:param resource_id: id of the resource
:return:
"""
neutron_client = self.clients("neutron")
if resource_tag == "vpnservice":
vpn_service = neutron_client.show_vpnservice(resource_id)
if vpn_service:
return vpn_service
elif resource_tag == 'ipsec_site_connection':
ipsec_site_connection = neutron_client.show_ipsec_site_connection(
resource_id)
if ipsec_site_connection:
return ipsec_site_connection
def _wait_for_status_change(self, resource, final_status,
resource_tag, wait_timeout=60,
check_interval=1):
"""Wait for resource's status change
Wait till the status of the resource changes to final state or till
the time exceeds the wait_timeout value.
:param resource: resource whose status has to be checked
:param final_status: desired final status of the resource
:param resource_tag: to identify the resource as vpnservice or
ipser_site_connection
:param wait_timeout: timeout value in seconds
:param check_interval: time to sleep before each check for the status
change
:return: resource
"""
start_time = time.time()
while True:
resource = self._get_resource(
resource_tag,
resource[resource_tag]['id'])
current_status = resource[resource_tag]['status']
if current_status == final_status:
return resource
time.sleep(check_interval)
if time.time() - start_time > wait_timeout:
self._cleanup(called_from="VpnUtils._wait_for_status_change")
raise exceptions.Exception("Timeout while waiting for status "
"change to %s.", final_status)
def _assert_statuses(self, vpn_service, ipsec_site_connection,
ipsec_site_connection_creation_timeout=60,
vpn_service_creation_timeout=60):
"""Assert statuses of vpn_service and ipsec_site_connection
:param vpn_service: vpn_service
:param ipsec_site_connection: ipsec_site_connection
:param ipsec_site_connection_creation_timeout: timeout in seconds
:param vpn_service_creation_timeout: timeout in seconds
:return:
"""
vpn_service = self._wait_for_status_change(
vpn_service,
resource_tag="vpnservice",
final_status="ACTIVE",
wait_timeout=vpn_service_creation_timeout,
check_interval=5)
LOG.debug("VPN-SERVICE STATUS: %s",
vpn_service['vpnservice']['status'])
assert('ACTIVE' == vpn_service['vpnservice']['status']), (
"VPN_SERVICE IS NOT IN ACTIVE STATE")
ipsec_site_connection = self._wait_for_status_change(
ipsec_site_connection,
resource_tag="ipsec_site_connection",
final_status="ACTIVE",
wait_timeout=ipsec_site_connection_creation_timeout,
check_interval=5)
LOG.debug("IPSEC_SITE_CONNECTION STATUS: %s",
ipsec_site_connection['ipsec_site_connection']['status'])
assert('ACTIVE' ==
ipsec_site_connection['ipsec_site_connection']['status']), (
"THE INSTANCE IS NOT IN ACTIVE STATE")
def _verify_vpn_connectivity(self, server_ips, snat_namespaces,
qrouter_namespaces, key_file_names,
first, second):
"""Verifies the vpn connectivity between the endpoints
:param server_ips: list of private ips of the servers between
which the vpn connectivity has to verified.
:param snat_namespaces: snat_namespaces of the 2 routers
:param qrouter_namespaces: qrouter_namespaces of the 2 routers
:param key_file_names: path to private key files
:param first: parameter to point to the self
:param second: parameter to point to the peer
:return: True or False
"""
LOG.debug("VERIFY THE VPN CONNECTIVITY")
qg = self._get_interfaces(snat_namespaces[second])
if qg:
p = re.compile(r"qg-\w+-\w+")
m = p.search(qg)
if m:
qg_interface = m.group()
else:
qg_interface = None
if qg_interface:
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as e:
tcpdump_future = e.submit(self._start_tcpdump,
snat_namespaces[second],
qg_interface)
ssh_future = e.submit(self._ssh_and_ping_server,
server_ips[first],
server_ips[second],
qrouter_namespaces[first],
key_file_names[first])
assert(True == ssh_future.result()), "SSH/Ping failed"
lines = tcpdump_future.result().split('\n')
for line in lines:
if 'ESP' in line:
return True
return False
def _get_namespace(self):
"""Get namespaces
:return: namespaces
"""
LOG.debug("GET NAMESPACES USING 'ip netns'")
try:
cmd = ['ip', 'netns']
cmd = ip_lib.add_namespace_to_cmd(cmd)
namespaces = linux_utils.execute(cmd)
LOG.debug("%s", namespaces)
return namespaces
except RuntimeError:
return None
def _wait_for_namespace_creation(self, namespace, rally_router):
"""Wait for namespace creation
:param namespace: snat/qrouter namespace
:param rally_router: rally_router
:return:
"""
start_time = time.time()
while True:
namespaces = self._get_namespace().split()
for line in namespaces:
if line == (namespace + rally_router["router"]["id"]):
namespace = line
return namespace
time.sleep(1)
if time.time() - start_time > 20:
self._cleanup(called_from="_wait_for_namespace_creation")
raise exceptions.Exception("Timeout while waiting for"
" namespaces to be created")
def _ping(self, namespace, ip):
"""Pings ip address from network namespace.
In order to ping it uses following cli command:
ip netns exec <namespace> ping -c 4 -q <ip>
:param namespace: namespace
:param ip: ip to ping to
"""
LOG.debug("PING %s FROM THE NAMESPACE %s", ip, namespace)
try:
count = 4
cmd = ['ping', '-w', 2 * count, '-c', count, ip]
cmd = ip_lib.add_namespace_to_cmd(cmd, namespace)
ping_result = linux_utils.execute(cmd, run_as_root=True)
LOG.debug("%s", ping_result)
return True
except RuntimeError:
return False
def _get_interfaces(self, namespace):
"""Do an "ip a".
In order to do "ip a" it uses following cli command:
ip netns exec <namespace> ip a | grep qg
:param namespace: namespace
"""
LOG.debug("GET THE INTERFACES BY USING 'ip a' FROM THE NAMESPACE %s",
namespace)
try:
cmd = ['ip', 'a']
cmd = ip_lib.add_namespace_to_cmd(cmd, namespace)
interfaces = linux_utils.execute(cmd, run_as_root=True)
LOG.debug("%s", interfaces)
return interfaces
except RuntimeError:
return None
def _start_tcpdump(self, namespace, interface):
"""Starts tcpdump at the given interface
In order to start a "tcpdump" it uses the following command:
ip netns exec <namespace> sudo tcpdump -i <interface>
:param namespace: namespace
:param interface: interface
:return:
"""
LOG.debug("START THE TCPDUMP USING 'tcpdump -i <%s> FROM THE NAMESPACE"
" %s", interface, namespace)
try:
cmd = ['timeout', '10', 'tcpdump', '-n',
'-i', interface]
cmd = ip_lib.add_namespace_to_cmd(cmd, namespace)
tcpdump = linux_utils.execute(cmd, run_as_root=True,
extra_ok_codes=[124])
LOG.debug("%s", tcpdump)
return tcpdump
except RuntimeError:
return None
def _ssh_and_ping_server(self, ssh_server, ping_server,
namespace, key_file_name):
"""Ssh into the server from the namespace.
In order to ssh it uses the following command:
ip netns exec <namespace> ssh -i <path to keyfile> cirros@<server_ip>
:param ssh_server: ip of the server to ssh into
:param ping_server: ip of the server to ping to
:param namespace: qrouter namespace
:param key_file_name: path to private key file
:return:
"""
LOG.debug("SSH INTO SERVER %s AND PING THE PEER SERVER %s FROM THE"
" NAMESPACE %s", ssh_server, ping_server, namespace)
try:
# ssh instance
host = "cirros@" + ssh_server
count = 20
cmd = ['ssh', '-o', 'StrictHostKeyChecking=no',
'-i', key_file_name, host,
'ping', '-w',
2 * count, '-c', count, ping_server]
cmd = ip_lib.add_namespace_to_cmd(cmd, namespace)
ping_result = linux_utils.execute(cmd, run_as_root=True)
LOG.debug("%s", ping_result)
return True
except RuntimeError:
return False
def _delete_server(self, nova_client, server):
"""Delete nova instance
:param server: instance to delete
:return:
"""
# delete server
sec_group_name = server.security_groups[0]['name']
server_key_name = server.key_name
LOG.debug("DELETING NOVA INSTANCE: %s", server.id)
nova_client.servers.delete(server.id)
LOG.debug("WAITING FOR INSTANCE TO GET DELETED")
task_utils.wait_for_delete(server,
update_resource=task_utils.
get_from_manager())
# delete sec-group
for secgroup in nova_client.security_groups.list():
if secgroup.name == sec_group_name:
LOG.debug("DELETING SEC_GROUP: %s", sec_group_name)
nova_client.security_groups.delete(secgroup.id)
# delete key-pair
for key_pair in nova_client.keypairs.list():
if key_pair.name == server_key_name:
LOG.debug("DELETING KEY_PAIR: %s", server_key_name)
nova_client.keypairs.delete(key_pair.id)
def _delete_ipsec_site_connection(self, neutron_client,
ipsec_site_connection):
"""Deletes ipsec site connection
:param neutron_client: neutron client
:param ipsec_site_connection: ipsec_site_connection
:return:
"""
LOG.debug("DELETING IPSEC_SITE_CONNECTION %s",
ipsec_site_connection['id'])
neutron_client.delete_ipsec_site_connection(
ipsec_site_connection['id'])
def _delete_vpn_service(self, neutron_client, vpn_service):
"""Deletes VPN service endpoints
:param neutron_client: neutron client
:param vpn_services: vpn_service
:return:
"""
LOG.debug("DELETING VPN_SERVICE %s", vpn_service['id'])
neutron_client.delete_vpnservice(vpn_service['id'])
def _delete_ipsec_policy(self, neutron_client, ipsec_policy):
"""Deletes IPSEC policy
:param neutron_client: neutron client
:param ipsec_policy: ipsec_policy
:return:
"""
LOG.debug("DELETING IPSEC POLICY")
neutron_client.delete_ipsecpolicy(ipsec_policy['id'])
def _delete_ike_policy(self, neutron_client, ike_policy):
"""Deletes IKE policy
:param neutron_client: neutron client
:param ike_policy: ike_policy
:return:
"""
LOG.debug("DELETING IKE POLICY")
neutron_client.delete_ikepolicy(ike_policy['id'])
def _delete_network(self, neutron_client):
"""Delete neutron network.
:param network_tuple: tuple, router, network and subnet to delete
:return
"""
try:
# delete interface subnet-router
LOG.debug("DELETING RALLY ROUTER INTERFACES & GATEWAYS")
routers = neutron_client.list_routers()
subnets = neutron_client.list_subnets()
subnet_id = None
p = re.compile(r"\d")
if routers:
for router in routers['routers']:
if "rally" in router['name']:
neutron_client.remove_gateway_router(router['id'])
m = p.search(router['name'])
if m:
subnet_name = "rally_subnet_" + m.group()
if subnets:
for subnet in subnets['subnets']:
if subnet_name == subnet['name']:
subnet_id = subnet['id']
neutron_client.remove_interface_router(
router['id'],
{"subnet_id": subnet_id})
# delete ports associated with interface
LOG.debug("DELETING RALLY PORTS")
ports = neutron_client.list_ports()
if ports:
for port in ports['ports']:
neutron_client.delete_port(port['id'])
# delete router
LOG.debug("DELETING RALLY ROUTERS")
if routers:
for router in routers['routers']:
if "rally" in router['name']:
neutron_client.delete_router(router['id'])
# Delete external network & subnet:
LOG.debug("DELETING RALLY PUBLIC NETWORK")
networks = neutron_client.list_networks()
if networks:
for network in networks['networks']:
if network['router:external'] and (network['name']
== "rally_network_public"):
external_network = network
self.admin_clients("neutron").delete_network(
external_network["id"])
# delete network
LOG.debug("DELETING RALLY NETWORKS")
networks = neutron_client.list_networks()
if networks:
for network in networks['networks']:
if "rally_network" in network['name']:
neutron_client.delete_network(network['id'])
except Exception as err:
LOG.exception(err)
def _delete_key_file(self, key_files):
"""Delete ssh key file
:param key_files: list of paths to ssh key files
:return:
"""
LOG.debug("DELETING RALLY KEY FILES")
for key_file in key_files:
if os.path.exists(key_file):
os.remove(key_file)
def _delete_knownhosts_file(self):
"""Removes the knownhosts file
:param server_ips: ips to be removed from /root/.ssh/knownhosts
:return:
"""
LOG.debug("DELETE THE KNOWNHOST FILE")
try:
cmd = ['rm', '-rf', "~/.ssh/known_hosts"]
cmd = ip_lib.add_namespace_to_cmd(cmd)
linux_utils.execute(cmd)
return True
except RuntimeError:
return False
def _cleanup(self,
key_file_names=None,
called_from=None):
LOG.debug("CLEAN UP CALLED FROM %s", called_from)
nova_client = self.clients("nova")
neutron_client = self.clients("neutron")
servers = nova_client.servers.list()
if servers:
for server in servers:
if "rally" in server.name:
self._delete_server(nova_client, server)
if key_file_names:
self._delete_key_file(key_file_names)
self._delete_knownhosts_file()
vpn_connections = neutron_client.list_ipsec_site_connections()
if vpn_connections:
for vpn_connection in vpn_connections['ipsec_site_connections']:
if "rally" in vpn_connection['name']:
self._delete_ipsec_site_connection(neutron_client,
vpn_connection)
vpn_services = neutron_client.list_vpnservices()
if vpn_services:
for vpn_service in vpn_services['openvpn']:
if "rally" in vpn_service['name']:
self._delete_vpn_service(neutron_client, vpn_service)
ipsec_policies = neutron_client.list_ipsecpolicies()
if ipsec_policies:
for ipsec_policy in ipsec_policies['ipsecpolicies']:
if "rally" in ipsec_policy['name']:
self._delete_ipsec_policy(neutron_client, ipsec_policy)
ike_policies = neutron_client.list_ikepolicies()
if ike_policies:
for ike_policy in ike_policies['ikepolicies']:
if "rally" in ike_policy['name']:
self._delete_ike_policy(neutron_client, ike_policy)
# Deletes entire network
self._delete_network(neutron_client)
|
en
| 0.710367
|
# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Utility class for VPNaaS scenarios with basic atomic actions. Create neutron network :param neutron_client: neutron client :param network_suffix: str, suffix name of the new network :param cidr: subnet cidr :return: router, subnet , network # create router interface - connect subnet to it # check for external network and create one if not found # create public network_id # create private network # create subnet # create router Create keypair :param nova_client: nova_client :param key_name: key_name :param key_file: key_file_name :return: keypair Create nova instance :param nova_client: nova client :param keypair: str, key-pair to allow ssh :return: new nova instance # add sec-group # add security rules for SSH and ICMP # boot new nova instance # wait for instance to become active # assert if instance is 'active' :param nova_client: nova client :param nova_id: uuid of the nova instance whose ip is wanted :param network_suffix: network name suffix :return: ip address of the instance Creates IKE policy :param neutron_client:neutron client :return:created ike_policy Creates IPSEC policy :param neutron_client: neutron client :return: created IPSEC policy Creates VPN service endpoints :param neutron_client: neutron client :param name: name of vpn service :param rally_subnet: local subnet :param rally_router: router endpoint :param admin_state_up: admin state of the vpn_service :return: vpn_service Creates IPSEC site connections :param neutron_client: neutron client :param ike_policy: ikepolicy :param ipsec_policy: ipsecpolicy :param peer_cidrs: list of peer cidrs :param peer_id: peer_id :param peer_address: peer_address :param vpn_service: vpn_service :param secret: pre shared secret :param admin_state_up: admin state of the ipsec site connections :param mtu: max transmission unit :param name: name of the ipsec site connections :return:ipsec_site_connection Gets the resource(vpn_service or ipsec_site_connection) :param resource_tag: "vpnservice" or "ipsec_site_connection" :param resource_id: id of the resource :return: Wait for resource's status change Wait till the status of the resource changes to final state or till the time exceeds the wait_timeout value. :param resource: resource whose status has to be checked :param final_status: desired final status of the resource :param resource_tag: to identify the resource as vpnservice or ipser_site_connection :param wait_timeout: timeout value in seconds :param check_interval: time to sleep before each check for the status change :return: resource Assert statuses of vpn_service and ipsec_site_connection :param vpn_service: vpn_service :param ipsec_site_connection: ipsec_site_connection :param ipsec_site_connection_creation_timeout: timeout in seconds :param vpn_service_creation_timeout: timeout in seconds :return: Verifies the vpn connectivity between the endpoints :param server_ips: list of private ips of the servers between which the vpn connectivity has to verified. :param snat_namespaces: snat_namespaces of the 2 routers :param qrouter_namespaces: qrouter_namespaces of the 2 routers :param key_file_names: path to private key files :param first: parameter to point to the self :param second: parameter to point to the peer :return: True or False Get namespaces :return: namespaces Wait for namespace creation :param namespace: snat/qrouter namespace :param rally_router: rally_router :return: Pings ip address from network namespace. In order to ping it uses following cli command: ip netns exec <namespace> ping -c 4 -q <ip> :param namespace: namespace :param ip: ip to ping to Do an "ip a". In order to do "ip a" it uses following cli command: ip netns exec <namespace> ip a | grep qg :param namespace: namespace Starts tcpdump at the given interface In order to start a "tcpdump" it uses the following command: ip netns exec <namespace> sudo tcpdump -i <interface> :param namespace: namespace :param interface: interface :return: Ssh into the server from the namespace. In order to ssh it uses the following command: ip netns exec <namespace> ssh -i <path to keyfile> cirros@<server_ip> :param ssh_server: ip of the server to ssh into :param ping_server: ip of the server to ping to :param namespace: qrouter namespace :param key_file_name: path to private key file :return: # ssh instance Delete nova instance :param server: instance to delete :return: # delete server # delete sec-group # delete key-pair Deletes ipsec site connection :param neutron_client: neutron client :param ipsec_site_connection: ipsec_site_connection :return: Deletes VPN service endpoints :param neutron_client: neutron client :param vpn_services: vpn_service :return: Deletes IPSEC policy :param neutron_client: neutron client :param ipsec_policy: ipsec_policy :return: Deletes IKE policy :param neutron_client: neutron client :param ike_policy: ike_policy :return: Delete neutron network. :param network_tuple: tuple, router, network and subnet to delete :return # delete interface subnet-router # delete ports associated with interface # delete router # Delete external network & subnet: # delete network Delete ssh key file :param key_files: list of paths to ssh key files :return: Removes the knownhosts file :param server_ips: ips to be removed from /root/.ssh/knownhosts :return: # Deletes entire network
| 1.766095
| 2
|
slam.py
|
Noob-can-Compile/Crap_SLAM
| 0
|
6628577
|
import time
import cv2
import numpy as np
from display import Display
from extractor import Extractor
width = 1280//2 #1920//2
height = 720//2 #1080//2
disp = Display(width, height)
fe = Extractor()
def frames_per_motion(img):
img = cv2.resize(img, (width, height))
matches = fe.extract(img)
print("%d matches" % (len(matches)))
for point1, point2 in matches:
u1,v1 = map(lambda x: int(round(x)), point1)
u2,v2 = map(lambda x: int(round(x)), point2)
cv2.circle(img, (u1,v1), color = (0,255,0), radius = 1, thickness = 2)
cv2.line(img, (u1,v1), (u2,v2), color = (255,0,0))
disp.paint(img)
if __name__ == "__main__":
cap = cv2.VideoCapture("videos/car3.mp4")
while cap.isOpened():
ret, frame = cap.read()
if ret == True:
frames_per_motion(frame)
else:
break
|
import time
import cv2
import numpy as np
from display import Display
from extractor import Extractor
width = 1280//2 #1920//2
height = 720//2 #1080//2
disp = Display(width, height)
fe = Extractor()
def frames_per_motion(img):
img = cv2.resize(img, (width, height))
matches = fe.extract(img)
print("%d matches" % (len(matches)))
for point1, point2 in matches:
u1,v1 = map(lambda x: int(round(x)), point1)
u2,v2 = map(lambda x: int(round(x)), point2)
cv2.circle(img, (u1,v1), color = (0,255,0), radius = 1, thickness = 2)
cv2.line(img, (u1,v1), (u2,v2), color = (255,0,0))
disp.paint(img)
if __name__ == "__main__":
cap = cv2.VideoCapture("videos/car3.mp4")
while cap.isOpened():
ret, frame = cap.read()
if ret == True:
frames_per_motion(frame)
else:
break
|
en
| 0.564681
|
#1920//2 #1080//2
| 2.694841
| 3
|
tests/base_tests/test_view_base_view.py
|
fy0/mapi
| 50
|
6628578
|
<reponame>fy0/mapi
import json
import pytest
from slim.base._view.base_view import BaseView
from slim.base.web import FileField
from slim import Application, ALL_PERMISSION
from slim.exception import PermissionDenied, InvalidPostData
from slim.retcode import RETCODE
from slim.tools.test import invoke_interface, make_mocked_request
pytestmark = [pytest.mark.asyncio]
app = Application(cookies_secret=b'123456', permission=ALL_PERMISSION)
@app.route.view('topic')
class TopicView(BaseView):
@app.route.interface('POST')
async def upload(self):
post = await self.post_data()
field = post.get('file')
assert isinstance(field, FileField)
assert field.file.read() == b'FILE_CONTENT'
self.finish(RETCODE.SUCCESS)
app.prepare()
def make_req(method, data=None, raw_data: bytes = None):
headers = {'Content-Type': 'application/json'}
if data:
raw_data = json.dumps(data).encode('utf-8')
return make_mocked_request(method, '/any', headers=headers, body=raw_data)
async def test_view_method():
view = TopicView(app, make_mocked_request('POST', '/any'))
assert view.method == 'POST'
async def test_view_postdata_json():
view = TopicView(app, make_req('POST', data={'test': 111}))
post = await view.post_data()
assert post['test'] == 111
async def test_view_postdata_get():
view = TopicView(app, make_req('GET', data={'test': 111}))
post = await view.post_data()
assert post['test'] == 111
async def test_view_postdata_invalid_json():
view = TopicView(app, make_req('POST', raw_data=b'{'))
with pytest.raises(InvalidPostData) as e:
await view.post_data()
async def test_view_post_file():
post_raw = b'------WebKitFormBoundaryRanewtcan8ETWm3N\r\nContent-Disposition: form-data; name="file"; filename="hhhh.txt"\r\nContent-Type: text/plain\r\n\r\nFILE_CONTENT\r\n------WebKitFormBoundaryRanewtcan8ETWm3N--\r\n'
await invoke_interface(app, TopicView().upload, content_type='multipart/form-data; boundary=----WebKitFormBoundaryRanewtcan8ETWm3N', body=post_raw)
|
import json
import pytest
from slim.base._view.base_view import BaseView
from slim.base.web import FileField
from slim import Application, ALL_PERMISSION
from slim.exception import PermissionDenied, InvalidPostData
from slim.retcode import RETCODE
from slim.tools.test import invoke_interface, make_mocked_request
pytestmark = [pytest.mark.asyncio]
app = Application(cookies_secret=b'123456', permission=ALL_PERMISSION)
@app.route.view('topic')
class TopicView(BaseView):
@app.route.interface('POST')
async def upload(self):
post = await self.post_data()
field = post.get('file')
assert isinstance(field, FileField)
assert field.file.read() == b'FILE_CONTENT'
self.finish(RETCODE.SUCCESS)
app.prepare()
def make_req(method, data=None, raw_data: bytes = None):
headers = {'Content-Type': 'application/json'}
if data:
raw_data = json.dumps(data).encode('utf-8')
return make_mocked_request(method, '/any', headers=headers, body=raw_data)
async def test_view_method():
view = TopicView(app, make_mocked_request('POST', '/any'))
assert view.method == 'POST'
async def test_view_postdata_json():
view = TopicView(app, make_req('POST', data={'test': 111}))
post = await view.post_data()
assert post['test'] == 111
async def test_view_postdata_get():
view = TopicView(app, make_req('GET', data={'test': 111}))
post = await view.post_data()
assert post['test'] == 111
async def test_view_postdata_invalid_json():
view = TopicView(app, make_req('POST', raw_data=b'{'))
with pytest.raises(InvalidPostData) as e:
await view.post_data()
async def test_view_post_file():
post_raw = b'------WebKitFormBoundaryRanewtcan8ETWm3N\r\nContent-Disposition: form-data; name="file"; filename="hhhh.txt"\r\nContent-Type: text/plain\r\n\r\nFILE_CONTENT\r\n------WebKitFormBoundaryRanewtcan8ETWm3N--\r\n'
await invoke_interface(app, TopicView().upload, content_type='multipart/form-data; boundary=----WebKitFormBoundaryRanewtcan8ETWm3N', body=post_raw)
|
none
| 1
| 1.956408
| 2
|
|
backend/api/serializers/model_year_report.py
|
bcgov/zeva
| 3
|
6628579
|
<reponame>bcgov/zeva
from enumfields.drf import EnumField, EnumSupportSerializerMixin
from rest_framework.serializers import ModelSerializer, \
SerializerMethodField, SlugRelatedField, CharField, \
ListField
from api.models.model_year import ModelYear
from api.models.model_year_report_confirmation import \
ModelYearReportConfirmation
from api.models.model_year_report import ModelYearReport
from api.models.model_year_report_history import ModelYearReportHistory
from api.models.model_year_report_ldv_sales import ModelYearReportLDVSales
from api.models.model_year_report_address import ModelYearReportAddress
from api.models.model_year_report_make import ModelYearReportMake
from api.models.model_year_report_statuses import ModelYearReportStatuses
from api.serializers.model_year_report_ldv_sales import ModelYearReportLDVSalesSerializer
from api.models.user_profile import UserProfile
from api.models.model_year_report_assessment import ModelYearReportAssessment
from api.models.supplemental_report import SupplementalReport
from api.serializers.model_year_report_address import \
ModelYearReportAddressSerializer
from api.serializers.model_year_report_make import \
ModelYearReportMakeSerializer
from api.serializers.model_year_report_history import \
ModelYearReportHistorySerializer
from api.serializers.user import MemberSerializer
from api.serializers.vehicle import ModelYearSerializer
from api.services.model_year_report import get_model_year_report_statuses
class ModelYearReportSerializer(ModelSerializer):
create_user = SerializerMethodField()
model_year = ModelYearSerializer()
model_year_report_addresses = ModelYearReportAddressSerializer(many=True)
makes = SerializerMethodField()
# validation_status = EnumField(ModelYearReportStatuses)
validation_status = SerializerMethodField()
model_year_report_history = SerializerMethodField()
confirmations = SerializerMethodField()
statuses = SerializerMethodField()
ldv_sales = SerializerMethodField()
ldv_sales_previous = SerializerMethodField()
avg_sales = SerializerMethodField()
changelog = SerializerMethodField()
def get_validation_status(self, obj):
request = self.context.get('request')
if not request.user.is_government and \
obj.validation_status in [
ModelYearReportStatuses.RETURNED,
ModelYearReportStatuses.RECOMMENDED
]:
return ModelYearReportStatuses.SUBMITTED.value
return obj.validation_status.value
def get_ldv_sales_previous(self, obj):
year = int(obj.model_year.name)
ldv_sales = ModelYearReportLDVSales.objects.filter(
model_year_report=obj,
model_year__name__in=[
str(year - 1),
str(year - 2),
str(year - 3)
]
)
serializer = ModelYearReportLDVSalesSerializer(ldv_sales, many=True)
return serializer.data
def get_avg_sales(self, obj):
rows = ModelYearReportLDVSales.objects.filter(
model_year_report_id=obj.id,
from_gov=False,
model_year__name__lt=obj.model_year.name
).values_list(
'ldv_sales', flat=True
)[:3]
avg_sales = 0
if rows.count() < 3:
row = ModelYearReportLDVSales.objects.filter(
model_year_report_id=obj.id,
model_year_id=obj.model_year_id
).first()
if row:
return row.ldv_sales
else:
return None
avg_sales = sum(list(rows)) / 3
return avg_sales
def get_create_user(self, obj):
user_profile = UserProfile.objects.filter(username=obj.create_user)
if user_profile.exists():
serializer = MemberSerializer(user_profile.first(), read_only=True)
return serializer.data
return obj.create_user
def get_confirmations(self, obj):
confirmations = ModelYearReportConfirmation.objects.filter(
model_year_report_id=obj.id
).values_list('signing_authority_assertion_id', flat=True).distinct()
return confirmations
def get_ldv_sales(self, obj):
request = self.context.get('request')
is_assessed = (
(request.user.organization_id == obj.organization_id and
obj.validation_status == ModelYearReportStatuses.ASSESSED) or
request.user.is_government
)
if is_assessed:
return obj.get_ldv_sales(from_gov=True) or obj.ldv_sales
return obj.ldv_sales
def get_changelog(self, obj):
request = self.context.get('request')
if request.user.is_government:
from_gov_sales = obj.get_ldv_sales_with_year(from_gov=True)
sales_changes = ''
if from_gov_sales:
not_gov_sales = obj.get_ldv_sales_with_year(from_gov=False)
sales_changes = {'from_gov': from_gov_sales['sales'], 'not_from_gov': not_gov_sales['sales'], 'year': from_gov_sales['year']}
gov_makes = ModelYearReportMake.objects.filter(
model_year_report_id=obj.id,
from_gov=True
)
gov_makes_additions_serializer = ModelYearReportMakeSerializer(gov_makes, many=True)
return {'makes_additions': gov_makes_additions_serializer.data, 'ldv_changes': sales_changes}
return obj.ldv_sales
def get_makes(self, obj):
request = self.context.get('request')
makes = ModelYearReportMake.objects.filter(
model_year_report_id=obj.id
)
if not request.user.is_government and \
obj.validation_status != ModelYearReportStatuses.ASSESSED:
makes = makes.filter(
from_gov=False
)
serializer = ModelYearReportMakeSerializer(makes, many=True)
return serializer.data
def get_statuses(self, obj):
request = self.context.get('request')
return get_model_year_report_statuses(obj, request.user)
def get_model_year_report_history(self, obj):
request = self.context.get('request')
history = ModelYearReportHistory.objects.filter(
model_year_report_id=obj.id
).order_by('create_timestamp')
if not request.user.is_government:
history = history.exclude(
validation_status__in=[
ModelYearReportStatuses.RECOMMENDED,
ModelYearReportStatuses.RETURNED,
]
)
serializer = ModelYearReportHistorySerializer(history, many=True)
return serializer.data
class Meta:
model = ModelYearReport
fields = (
'organization_name', 'supplier_class', 'model_year',
'model_year_report_addresses', 'makes', 'validation_status',
'create_user', 'model_year_report_history', 'confirmations',
'statuses', 'ldv_sales', 'ldv_sales_previous', 'avg_sales',
'credit_reduction_selection', 'changelog',
'update_timestamp',
)
class ModelYearReportsSerializer(ModelSerializer):
validation_status = EnumField(ModelYearReportStatuses)
model_year = SlugRelatedField(
slug_field='name',
queryset=ModelYear.objects.all()
)
class Meta:
model = ModelYearReport
fields = (
'organization_name', 'model_year', 'validation_status', 'id', 'organization_id'
)
class ModelYearReportListSerializer(
ModelSerializer, EnumSupportSerializerMixin
):
model_year = ModelYearSerializer()
validation_status = SerializerMethodField()
compliant = SerializerMethodField()
obligation_total = SerializerMethodField()
obligation_credits = SerializerMethodField()
ldv_sales = SerializerMethodField()
supplemental_status = SerializerMethodField()
def get_ldv_sales(self, obj):
request = self.context.get('request')
is_assessed = (
(request.user.organization_id == obj.organization_id and
obj.validation_status == ModelYearReportStatuses.ASSESSED) or
request.user.is_government
)
if is_assessed:
return obj.get_ldv_sales(from_gov=True) or obj.ldv_sales
return obj.ldv_sales
def get_compliant(self, obj):
if obj.validation_status != ModelYearReportStatuses.ASSESSED:
return '-'
assessment = ModelYearReportAssessment.objects.get(
model_year_report_id=obj.id
)
if assessment:
found = assessment.model_year_report_assessment_description.description.find(
'has complied'
)
if found >= 0:
return 'Yes'
else:
return 'No'
return 'No'
def get_obligation_total(self, obj):
return 0
def get_obligation_credits(self, obj):
return 0
def get_validation_status(self, obj):
request = self.context.get('request')
if not request.user.is_government and obj.validation_status in [
ModelYearReportStatuses.RECOMMENDED,
ModelYearReportStatuses.RETURNED]:
return ModelYearReportStatuses.SUBMITTED.value
return obj.get_validation_status_display()
def get_supplemental_status(self, obj):
request = self.context.get('request')
supplemental_records = SupplementalReport.objects.filter(
model_year_report=obj
).order_by('-create_timestamp')
supplemental_record = 0
if supplemental_records:
supplemental_record = supplemental_records[0]
if supplemental_record:
# get information on who created the record
create_user = UserProfile.objects.get(
username=supplemental_record.create_user
)
sup_status = supplemental_record.status.value
if create_user.is_government:
if sup_status == 'RETURNED':
# this record was created by idir but
# should show up as supplementary returned
return ('SUPPLEMENTARY {}').format(sup_status)
if sup_status == 'REASSESSED' or sup_status == 'ASSESSED':
# bceid and idir can see just 'reassessed' as status
return 'REASSESSED'
if request.user.is_government and sup_status in ['DRAFT', 'RECOMMENDED']:
# created by idir and viewed by idir, they can see
# draft or recommended status
return ('REASSESSMENT {}').format(sup_status)
if not request.user.is_government and sup_status in ['SUBMITTED', 'DRAFT', 'RECOMMENDED']:
# if it is being viewed by bceid, they shouldnt see it
# unless it is reassessed or returned
if supplemental_records.count() > 1:
for each in supplemental_records:
# find the newest record that is either created by bceid or one that they are allowed to see
item_create_user = UserProfile.objects.get(username=each.create_user)
# bceid are allowed to see any created by them or
# if the status is REASSESSED or RETURNED?
if not item_create_user.is_government or each.status.value == 'RETURNED':
return ('SUPPLEMENTARY {}').format(each.status.value)
if each.status.value == 'REASSESSED':
return each.status.value
else:
# if created by bceid its a supplemental report
if sup_status == 'SUBMITTED':
return('SUPPLEMENTARY {}').format(sup_status)
if sup_status == 'DRAFT':
if not request.user.is_government:
return('SUPPLEMENTARY {}').format(sup_status)
else:
# same logic for bceid to check if theres another record
if supplemental_records.count() > 1:
for each in supplemental_records:
# find the newest record that is either
# created by bceid or they are able to see
item_create_user = UserProfile.objects.get(username=each.create_user)
# they are allowed to see any created by idir
# or if it is submitted
if item.create_user.is_government:
return ('REASSESSMENT {}').format(each.status.value)
if each.status.value == 'SUBMITTED':
return ('SUPPLEMENTARY {}').format(each.status.value)
# no supplemental report, just return the status from the assessment
if not request.user.is_government and obj.validation_status in [
ModelYearReportStatuses.RECOMMENDED,
ModelYearReportStatuses.RETURNED]:
return ModelYearReportStatuses.SUBMITTED.value
return obj.get_validation_status_display()
class Meta:
model = ModelYearReport
fields = (
'id', 'organization_name', 'model_year', 'validation_status', 'ldv_sales',
'supplier_class', 'compliant', 'obligation_total',
'obligation_credits', 'supplemental_status'
)
class ModelYearReportSaveSerializer(
ModelSerializer, EnumSupportSerializerMixin
):
model_year = SlugRelatedField(
slug_field='name',
queryset=ModelYear.objects.all()
)
validation_status = EnumField(
ModelYearReportStatuses,
required=False
)
makes = ListField(
child=CharField()
)
def create(self, validated_data):
request = self.context.get('request')
organization = request.user.organization
makes = validated_data.pop('makes')
model_year = validated_data.pop('model_year')
confirmations = request.data.get('confirmations')
ldv_sales = request.data.get('ldv_sales')
report = ModelYearReport.objects.create(
model_year_id=model_year.id,
organization_id=organization.id,
organization_name=organization.name,
**validated_data,
create_user=request.user.username,
update_user=request.user.username,
supplier_class=request.user.organization.supplier_class
)
for each in ldv_sales:
model_year = ModelYear.objects.filter(
name=each.get('model_year')
).first()
if model_year:
ModelYearReportLDVSales.objects.create(
model_year=model_year,
ldv_sales=each.get('ldv_sales'),
model_year_report_id=report.id
)
for confirmation in confirmations:
ModelYearReportConfirmation.objects.create(
create_user=request.user.username,
model_year_report=report,
has_accepted=True,
title=request.user.title,
signing_authority_assertion_id=confirmation
)
for make in makes:
ModelYearReportMake.objects.create(
model_year_report=report,
make=make,
create_user=request.user.username,
update_user=request.user.username,
)
for address in request.user.organization.organization_address:
ModelYearReportAddress.objects.create(
model_year_report=report,
representative_name=address.representative_name,
address_type=address.address_type,
address_line_1=address.address_line_1,
address_line_2=address.address_line_2,
address_line_3=address.address_line_3,
city=address.city,
postal_code=address.postal_code,
state=address.state,
county=address.county,
country=address.country,
other=address.other
)
ModelYearReportHistory.objects.create(
create_user=request.user.username,
update_user=request.user.username,
model_year_report_id=report.id,
validation_status=ModelYearReportStatuses.DRAFT,
)
return report
def update(self, instance, validated_data):
request = self.context.get('request')
organization = request.user.organization
delete_confirmations = request.data.get('delete_confirmations', False)
if delete_confirmations:
module = request.data.get('module', None)
ModelYearReportConfirmation.objects.filter(
model_year_report=instance,
signing_authority_assertion__module=module
).delete()
return instance
makes = validated_data.pop('makes')
model_year = validated_data.pop('model_year')
confirmations = request.data.get('confirmations')
confirmation = ModelYearReportConfirmation.objects.filter(
model_year_report=instance,
signing_authority_assertion__module="supplier_information"
).first()
if confirmation:
return instance
instance.model_year_id = model_year.id
instance.organization_name = organization.name
instance.update_user = request.user.username
instance.save()
if makes:
ModelYearReportMake.objects.filter(
model_year_report=instance,
).delete()
for make in makes:
ModelYearReportMake.objects.create(
model_year_report=instance,
make=make,
create_user=request.user.username,
update_user=request.user.username,
)
ModelYearReportAddress.objects.filter(
model_year_report=instance,
).delete()
for address in request.user.organization.organization_address:
ModelYearReportAddress.objects.create(
model_year_report=instance,
representative_name=address.representative_name,
address_type=address.address_type,
address_line_1=address.address_line_1,
address_line_2=address.address_line_2,
address_line_3=address.address_line_3,
city=address.city,
postal_code=address.postal_code,
state=address.state,
county=address.county,
country=address.country,
other=address.other
)
ldv_sales = request.data.get('ldv_sales', None)
if 'ldv_sales' in request.data:
ModelYearReportLDVSales.objects.filter(
model_year_report_id=instance.id
).exclude(
model_year_id=instance.model_year_id
).delete()
for each in ldv_sales:
model_year = ModelYear.objects.filter(
name=each.get('model_year')
).first()
if model_year:
ModelYearReportLDVSales.objects.create(
model_year_id=model_year.id,
ldv_sales=each.get('ldv_sales'),
model_year_report_id=instance.id
)
if instance.get_avg_sales():
instance.supplier_class = request.user.organization.get_current_class(
avg_sales=instance.get_avg_sales()
)
instance.save()
for confirmation in confirmations:
ModelYearReportConfirmation.objects.update_or_create(
model_year_report=instance,
has_accepted=True,
title=request.user.title,
signing_authority_assertion_id=confirmation,
defaults={
'create_user': request.user.username
}
)
ModelYearReportHistory.objects.create(
create_user=request.user.username,
update_user=request.user.username,
model_year_report_id=instance.id,
validation_status=ModelYearReportStatuses.DRAFT,
)
return instance
class Meta:
model = ModelYearReport
fields = (
'id', 'model_year', 'validation_status', 'makes',
)
|
from enumfields.drf import EnumField, EnumSupportSerializerMixin
from rest_framework.serializers import ModelSerializer, \
SerializerMethodField, SlugRelatedField, CharField, \
ListField
from api.models.model_year import ModelYear
from api.models.model_year_report_confirmation import \
ModelYearReportConfirmation
from api.models.model_year_report import ModelYearReport
from api.models.model_year_report_history import ModelYearReportHistory
from api.models.model_year_report_ldv_sales import ModelYearReportLDVSales
from api.models.model_year_report_address import ModelYearReportAddress
from api.models.model_year_report_make import ModelYearReportMake
from api.models.model_year_report_statuses import ModelYearReportStatuses
from api.serializers.model_year_report_ldv_sales import ModelYearReportLDVSalesSerializer
from api.models.user_profile import UserProfile
from api.models.model_year_report_assessment import ModelYearReportAssessment
from api.models.supplemental_report import SupplementalReport
from api.serializers.model_year_report_address import \
ModelYearReportAddressSerializer
from api.serializers.model_year_report_make import \
ModelYearReportMakeSerializer
from api.serializers.model_year_report_history import \
ModelYearReportHistorySerializer
from api.serializers.user import MemberSerializer
from api.serializers.vehicle import ModelYearSerializer
from api.services.model_year_report import get_model_year_report_statuses
class ModelYearReportSerializer(ModelSerializer):
create_user = SerializerMethodField()
model_year = ModelYearSerializer()
model_year_report_addresses = ModelYearReportAddressSerializer(many=True)
makes = SerializerMethodField()
# validation_status = EnumField(ModelYearReportStatuses)
validation_status = SerializerMethodField()
model_year_report_history = SerializerMethodField()
confirmations = SerializerMethodField()
statuses = SerializerMethodField()
ldv_sales = SerializerMethodField()
ldv_sales_previous = SerializerMethodField()
avg_sales = SerializerMethodField()
changelog = SerializerMethodField()
def get_validation_status(self, obj):
request = self.context.get('request')
if not request.user.is_government and \
obj.validation_status in [
ModelYearReportStatuses.RETURNED,
ModelYearReportStatuses.RECOMMENDED
]:
return ModelYearReportStatuses.SUBMITTED.value
return obj.validation_status.value
def get_ldv_sales_previous(self, obj):
year = int(obj.model_year.name)
ldv_sales = ModelYearReportLDVSales.objects.filter(
model_year_report=obj,
model_year__name__in=[
str(year - 1),
str(year - 2),
str(year - 3)
]
)
serializer = ModelYearReportLDVSalesSerializer(ldv_sales, many=True)
return serializer.data
def get_avg_sales(self, obj):
rows = ModelYearReportLDVSales.objects.filter(
model_year_report_id=obj.id,
from_gov=False,
model_year__name__lt=obj.model_year.name
).values_list(
'ldv_sales', flat=True
)[:3]
avg_sales = 0
if rows.count() < 3:
row = ModelYearReportLDVSales.objects.filter(
model_year_report_id=obj.id,
model_year_id=obj.model_year_id
).first()
if row:
return row.ldv_sales
else:
return None
avg_sales = sum(list(rows)) / 3
return avg_sales
def get_create_user(self, obj):
user_profile = UserProfile.objects.filter(username=obj.create_user)
if user_profile.exists():
serializer = MemberSerializer(user_profile.first(), read_only=True)
return serializer.data
return obj.create_user
def get_confirmations(self, obj):
confirmations = ModelYearReportConfirmation.objects.filter(
model_year_report_id=obj.id
).values_list('signing_authority_assertion_id', flat=True).distinct()
return confirmations
def get_ldv_sales(self, obj):
request = self.context.get('request')
is_assessed = (
(request.user.organization_id == obj.organization_id and
obj.validation_status == ModelYearReportStatuses.ASSESSED) or
request.user.is_government
)
if is_assessed:
return obj.get_ldv_sales(from_gov=True) or obj.ldv_sales
return obj.ldv_sales
def get_changelog(self, obj):
request = self.context.get('request')
if request.user.is_government:
from_gov_sales = obj.get_ldv_sales_with_year(from_gov=True)
sales_changes = ''
if from_gov_sales:
not_gov_sales = obj.get_ldv_sales_with_year(from_gov=False)
sales_changes = {'from_gov': from_gov_sales['sales'], 'not_from_gov': not_gov_sales['sales'], 'year': from_gov_sales['year']}
gov_makes = ModelYearReportMake.objects.filter(
model_year_report_id=obj.id,
from_gov=True
)
gov_makes_additions_serializer = ModelYearReportMakeSerializer(gov_makes, many=True)
return {'makes_additions': gov_makes_additions_serializer.data, 'ldv_changes': sales_changes}
return obj.ldv_sales
def get_makes(self, obj):
request = self.context.get('request')
makes = ModelYearReportMake.objects.filter(
model_year_report_id=obj.id
)
if not request.user.is_government and \
obj.validation_status != ModelYearReportStatuses.ASSESSED:
makes = makes.filter(
from_gov=False
)
serializer = ModelYearReportMakeSerializer(makes, many=True)
return serializer.data
def get_statuses(self, obj):
request = self.context.get('request')
return get_model_year_report_statuses(obj, request.user)
def get_model_year_report_history(self, obj):
request = self.context.get('request')
history = ModelYearReportHistory.objects.filter(
model_year_report_id=obj.id
).order_by('create_timestamp')
if not request.user.is_government:
history = history.exclude(
validation_status__in=[
ModelYearReportStatuses.RECOMMENDED,
ModelYearReportStatuses.RETURNED,
]
)
serializer = ModelYearReportHistorySerializer(history, many=True)
return serializer.data
class Meta:
model = ModelYearReport
fields = (
'organization_name', 'supplier_class', 'model_year',
'model_year_report_addresses', 'makes', 'validation_status',
'create_user', 'model_year_report_history', 'confirmations',
'statuses', 'ldv_sales', 'ldv_sales_previous', 'avg_sales',
'credit_reduction_selection', 'changelog',
'update_timestamp',
)
class ModelYearReportsSerializer(ModelSerializer):
validation_status = EnumField(ModelYearReportStatuses)
model_year = SlugRelatedField(
slug_field='name',
queryset=ModelYear.objects.all()
)
class Meta:
model = ModelYearReport
fields = (
'organization_name', 'model_year', 'validation_status', 'id', 'organization_id'
)
class ModelYearReportListSerializer(
ModelSerializer, EnumSupportSerializerMixin
):
model_year = ModelYearSerializer()
validation_status = SerializerMethodField()
compliant = SerializerMethodField()
obligation_total = SerializerMethodField()
obligation_credits = SerializerMethodField()
ldv_sales = SerializerMethodField()
supplemental_status = SerializerMethodField()
def get_ldv_sales(self, obj):
request = self.context.get('request')
is_assessed = (
(request.user.organization_id == obj.organization_id and
obj.validation_status == ModelYearReportStatuses.ASSESSED) or
request.user.is_government
)
if is_assessed:
return obj.get_ldv_sales(from_gov=True) or obj.ldv_sales
return obj.ldv_sales
def get_compliant(self, obj):
if obj.validation_status != ModelYearReportStatuses.ASSESSED:
return '-'
assessment = ModelYearReportAssessment.objects.get(
model_year_report_id=obj.id
)
if assessment:
found = assessment.model_year_report_assessment_description.description.find(
'has complied'
)
if found >= 0:
return 'Yes'
else:
return 'No'
return 'No'
def get_obligation_total(self, obj):
return 0
def get_obligation_credits(self, obj):
return 0
def get_validation_status(self, obj):
request = self.context.get('request')
if not request.user.is_government and obj.validation_status in [
ModelYearReportStatuses.RECOMMENDED,
ModelYearReportStatuses.RETURNED]:
return ModelYearReportStatuses.SUBMITTED.value
return obj.get_validation_status_display()
def get_supplemental_status(self, obj):
request = self.context.get('request')
supplemental_records = SupplementalReport.objects.filter(
model_year_report=obj
).order_by('-create_timestamp')
supplemental_record = 0
if supplemental_records:
supplemental_record = supplemental_records[0]
if supplemental_record:
# get information on who created the record
create_user = UserProfile.objects.get(
username=supplemental_record.create_user
)
sup_status = supplemental_record.status.value
if create_user.is_government:
if sup_status == 'RETURNED':
# this record was created by idir but
# should show up as supplementary returned
return ('SUPPLEMENTARY {}').format(sup_status)
if sup_status == 'REASSESSED' or sup_status == 'ASSESSED':
# bceid and idir can see just 'reassessed' as status
return 'REASSESSED'
if request.user.is_government and sup_status in ['DRAFT', 'RECOMMENDED']:
# created by idir and viewed by idir, they can see
# draft or recommended status
return ('REASSESSMENT {}').format(sup_status)
if not request.user.is_government and sup_status in ['SUBMITTED', 'DRAFT', 'RECOMMENDED']:
# if it is being viewed by bceid, they shouldnt see it
# unless it is reassessed or returned
if supplemental_records.count() > 1:
for each in supplemental_records:
# find the newest record that is either created by bceid or one that they are allowed to see
item_create_user = UserProfile.objects.get(username=each.create_user)
# bceid are allowed to see any created by them or
# if the status is REASSESSED or RETURNED?
if not item_create_user.is_government or each.status.value == 'RETURNED':
return ('SUPPLEMENTARY {}').format(each.status.value)
if each.status.value == 'REASSESSED':
return each.status.value
else:
# if created by bceid its a supplemental report
if sup_status == 'SUBMITTED':
return('SUPPLEMENTARY {}').format(sup_status)
if sup_status == 'DRAFT':
if not request.user.is_government:
return('SUPPLEMENTARY {}').format(sup_status)
else:
# same logic for bceid to check if theres another record
if supplemental_records.count() > 1:
for each in supplemental_records:
# find the newest record that is either
# created by bceid or they are able to see
item_create_user = UserProfile.objects.get(username=each.create_user)
# they are allowed to see any created by idir
# or if it is submitted
if item.create_user.is_government:
return ('REASSESSMENT {}').format(each.status.value)
if each.status.value == 'SUBMITTED':
return ('SUPPLEMENTARY {}').format(each.status.value)
# no supplemental report, just return the status from the assessment
if not request.user.is_government and obj.validation_status in [
ModelYearReportStatuses.RECOMMENDED,
ModelYearReportStatuses.RETURNED]:
return ModelYearReportStatuses.SUBMITTED.value
return obj.get_validation_status_display()
class Meta:
model = ModelYearReport
fields = (
'id', 'organization_name', 'model_year', 'validation_status', 'ldv_sales',
'supplier_class', 'compliant', 'obligation_total',
'obligation_credits', 'supplemental_status'
)
class ModelYearReportSaveSerializer(
ModelSerializer, EnumSupportSerializerMixin
):
model_year = SlugRelatedField(
slug_field='name',
queryset=ModelYear.objects.all()
)
validation_status = EnumField(
ModelYearReportStatuses,
required=False
)
makes = ListField(
child=CharField()
)
def create(self, validated_data):
request = self.context.get('request')
organization = request.user.organization
makes = validated_data.pop('makes')
model_year = validated_data.pop('model_year')
confirmations = request.data.get('confirmations')
ldv_sales = request.data.get('ldv_sales')
report = ModelYearReport.objects.create(
model_year_id=model_year.id,
organization_id=organization.id,
organization_name=organization.name,
**validated_data,
create_user=request.user.username,
update_user=request.user.username,
supplier_class=request.user.organization.supplier_class
)
for each in ldv_sales:
model_year = ModelYear.objects.filter(
name=each.get('model_year')
).first()
if model_year:
ModelYearReportLDVSales.objects.create(
model_year=model_year,
ldv_sales=each.get('ldv_sales'),
model_year_report_id=report.id
)
for confirmation in confirmations:
ModelYearReportConfirmation.objects.create(
create_user=request.user.username,
model_year_report=report,
has_accepted=True,
title=request.user.title,
signing_authority_assertion_id=confirmation
)
for make in makes:
ModelYearReportMake.objects.create(
model_year_report=report,
make=make,
create_user=request.user.username,
update_user=request.user.username,
)
for address in request.user.organization.organization_address:
ModelYearReportAddress.objects.create(
model_year_report=report,
representative_name=address.representative_name,
address_type=address.address_type,
address_line_1=address.address_line_1,
address_line_2=address.address_line_2,
address_line_3=address.address_line_3,
city=address.city,
postal_code=address.postal_code,
state=address.state,
county=address.county,
country=address.country,
other=address.other
)
ModelYearReportHistory.objects.create(
create_user=request.user.username,
update_user=request.user.username,
model_year_report_id=report.id,
validation_status=ModelYearReportStatuses.DRAFT,
)
return report
def update(self, instance, validated_data):
request = self.context.get('request')
organization = request.user.organization
delete_confirmations = request.data.get('delete_confirmations', False)
if delete_confirmations:
module = request.data.get('module', None)
ModelYearReportConfirmation.objects.filter(
model_year_report=instance,
signing_authority_assertion__module=module
).delete()
return instance
makes = validated_data.pop('makes')
model_year = validated_data.pop('model_year')
confirmations = request.data.get('confirmations')
confirmation = ModelYearReportConfirmation.objects.filter(
model_year_report=instance,
signing_authority_assertion__module="supplier_information"
).first()
if confirmation:
return instance
instance.model_year_id = model_year.id
instance.organization_name = organization.name
instance.update_user = request.user.username
instance.save()
if makes:
ModelYearReportMake.objects.filter(
model_year_report=instance,
).delete()
for make in makes:
ModelYearReportMake.objects.create(
model_year_report=instance,
make=make,
create_user=request.user.username,
update_user=request.user.username,
)
ModelYearReportAddress.objects.filter(
model_year_report=instance,
).delete()
for address in request.user.organization.organization_address:
ModelYearReportAddress.objects.create(
model_year_report=instance,
representative_name=address.representative_name,
address_type=address.address_type,
address_line_1=address.address_line_1,
address_line_2=address.address_line_2,
address_line_3=address.address_line_3,
city=address.city,
postal_code=address.postal_code,
state=address.state,
county=address.county,
country=address.country,
other=address.other
)
ldv_sales = request.data.get('ldv_sales', None)
if 'ldv_sales' in request.data:
ModelYearReportLDVSales.objects.filter(
model_year_report_id=instance.id
).exclude(
model_year_id=instance.model_year_id
).delete()
for each in ldv_sales:
model_year = ModelYear.objects.filter(
name=each.get('model_year')
).first()
if model_year:
ModelYearReportLDVSales.objects.create(
model_year_id=model_year.id,
ldv_sales=each.get('ldv_sales'),
model_year_report_id=instance.id
)
if instance.get_avg_sales():
instance.supplier_class = request.user.organization.get_current_class(
avg_sales=instance.get_avg_sales()
)
instance.save()
for confirmation in confirmations:
ModelYearReportConfirmation.objects.update_or_create(
model_year_report=instance,
has_accepted=True,
title=request.user.title,
signing_authority_assertion_id=confirmation,
defaults={
'create_user': request.user.username
}
)
ModelYearReportHistory.objects.create(
create_user=request.user.username,
update_user=request.user.username,
model_year_report_id=instance.id,
validation_status=ModelYearReportStatuses.DRAFT,
)
return instance
class Meta:
model = ModelYearReport
fields = (
'id', 'model_year', 'validation_status', 'makes',
)
|
en
| 0.972627
|
# validation_status = EnumField(ModelYearReportStatuses) # get information on who created the record # this record was created by idir but # should show up as supplementary returned # bceid and idir can see just 'reassessed' as status # created by idir and viewed by idir, they can see # draft or recommended status # if it is being viewed by bceid, they shouldnt see it # unless it is reassessed or returned # find the newest record that is either created by bceid or one that they are allowed to see # bceid are allowed to see any created by them or # if the status is REASSESSED or RETURNED? # if created by bceid its a supplemental report # same logic for bceid to check if theres another record # find the newest record that is either # created by bceid or they are able to see # they are allowed to see any created by idir # or if it is submitted # no supplemental report, just return the status from the assessment
| 1.955661
| 2
|
dbfread/exceptions.py
|
neurohn/dbfread
| 3
|
6628580
|
<filename>dbfread/exceptions.py
class DBFNotFound(IOError):
"""Raised if the DBF file was not found."""
pass
class MissingMemoFile(IOError):
"""Raised if the corresponding memo file was not found."""
__all__ = ['DBFNotFound', 'MissingMemoFile']
|
<filename>dbfread/exceptions.py
class DBFNotFound(IOError):
"""Raised if the DBF file was not found."""
pass
class MissingMemoFile(IOError):
"""Raised if the corresponding memo file was not found."""
__all__ = ['DBFNotFound', 'MissingMemoFile']
|
en
| 0.970595
|
Raised if the DBF file was not found. Raised if the corresponding memo file was not found.
| 2.456555
| 2
|
normalization.py
|
cadurosar/graph_filter
| 0
|
6628581
|
#from https://github.com/Tiiiger/SGC
import numpy as np
import scipy.sparse as sp
import torch
def normalized_laplacian(adj):
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1))
d_inv_sqrt = np.power(row_sum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return (sp.eye(adj.shape[0]) - d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt)).tocoo()
def laplacian(adj):
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1)).flatten()
d_mat = sp.diags(row_sum)
return (d_mat - adj).tocoo()
def gcn(adj):
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1))
d_inv_sqrt = np.power(row_sum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return (sp.eye(adj.shape[0]) + d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt)).tocoo()
def aug_normalized_adjacency(adj, gamma=1):
adj = adj + gamma * sp.eye(adj.shape[0])
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1))
d_inv_sqrt = np.power(row_sum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt).tocoo()
def aug_normalized_adjacency_full(adj, gamma=1):
adj = adj + gamma * np.eye(adj.shape[0])
row_sum = np.array(adj.sum(1))
d_inv_sqrt = np.power(row_sum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = np.diag(d_inv_sqrt)
return d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt)
def normalized_adjacency(adj):
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1))
d_inv_sqrt = np.power(row_sum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return (d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt)).tocoo()
def random_walk_laplacian(adj):
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1))
d_inv = np.power(row_sum, -1.0).flatten()
d_mat = sp.diags(d_inv)
return (sp.eye(adj.shape[0]) - d_mat.dot(adj)).tocoo()
def aug_random_walk(adj, gamma=1):
adj = adj + gamma*sp.eye(adj.shape[0])
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1))
d_inv = np.power(row_sum, -1.0).flatten()
d_mat = sp.diags(d_inv)
return d_mat.dot(adj).tocoo()
def random_walk(adj):
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1))
d_inv = np.power(row_sum, -1.0).flatten()
d_mat = sp.diags(d_inv)
return d_mat.dot(adj).tocoo()
def no_norm(adj):
adj = sp.coo_matrix(adj)
return adj
def low_pass(adj,alpha=0.1):
x = normalized_laplacian(adj)
return (sp.eye(adj.shape[0]) - alpha*x).tocoo()
def low_pass_inverse(adj,alpha=0.99):
x = normalized_laplacian(adj)
the_the = sp.eye(adj.shape[0]) + alpha*x
inverse = sp.linalg.inv(the_the.tocsc())
inverse = inverse.multiply(inverse > 1e-4)
return inverse.tocoo()
def aug_low_pass(adj):
adj = adj + sp.eye(adj.shape[0])
adj = sp.coo_matrix(adj)
x = normalized_laplacian(adj)
return (sp.eye(adj.shape[0]) - 0.5*x).tocoo()
def fetch_normalization(type):
switcher = {
'NormLap': normalized_laplacian, # A' = I - D^-1/2 * A * D^-1/2
'Lap': laplacian, # A' = D - A
'RWalkLap': random_walk_laplacian, # A' = I - D^-1 * A
'FirstOrderGCN': gcn, # A' = I + D^-1/2 * A * D^-1/2
'AugNormAdj': aug_normalized_adjacency, # A' = (D + I)^-1/2 * ( A + I ) * (D + I)^-1/2
'NormAdj': normalized_adjacency, # D^-1/2 * A * D^-1/2
'RWalk': random_walk, # A' = D^-1*A
'AugRWalk': aug_random_walk, # A' = (D + I)^-1*(A + I)
'NoNorm': no_norm, # A' = A
'LowPass': low_pass, # A' = A
}
func = switcher.get(type, lambda: "Invalid normalization technique.")
return func
def row_normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
|
#from https://github.com/Tiiiger/SGC
import numpy as np
import scipy.sparse as sp
import torch
def normalized_laplacian(adj):
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1))
d_inv_sqrt = np.power(row_sum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return (sp.eye(adj.shape[0]) - d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt)).tocoo()
def laplacian(adj):
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1)).flatten()
d_mat = sp.diags(row_sum)
return (d_mat - adj).tocoo()
def gcn(adj):
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1))
d_inv_sqrt = np.power(row_sum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return (sp.eye(adj.shape[0]) + d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt)).tocoo()
def aug_normalized_adjacency(adj, gamma=1):
adj = adj + gamma * sp.eye(adj.shape[0])
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1))
d_inv_sqrt = np.power(row_sum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt).tocoo()
def aug_normalized_adjacency_full(adj, gamma=1):
adj = adj + gamma * np.eye(adj.shape[0])
row_sum = np.array(adj.sum(1))
d_inv_sqrt = np.power(row_sum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = np.diag(d_inv_sqrt)
return d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt)
def normalized_adjacency(adj):
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1))
d_inv_sqrt = np.power(row_sum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return (d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt)).tocoo()
def random_walk_laplacian(adj):
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1))
d_inv = np.power(row_sum, -1.0).flatten()
d_mat = sp.diags(d_inv)
return (sp.eye(adj.shape[0]) - d_mat.dot(adj)).tocoo()
def aug_random_walk(adj, gamma=1):
adj = adj + gamma*sp.eye(adj.shape[0])
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1))
d_inv = np.power(row_sum, -1.0).flatten()
d_mat = sp.diags(d_inv)
return d_mat.dot(adj).tocoo()
def random_walk(adj):
adj = sp.coo_matrix(adj)
row_sum = np.array(adj.sum(1))
d_inv = np.power(row_sum, -1.0).flatten()
d_mat = sp.diags(d_inv)
return d_mat.dot(adj).tocoo()
def no_norm(adj):
adj = sp.coo_matrix(adj)
return adj
def low_pass(adj,alpha=0.1):
x = normalized_laplacian(adj)
return (sp.eye(adj.shape[0]) - alpha*x).tocoo()
def low_pass_inverse(adj,alpha=0.99):
x = normalized_laplacian(adj)
the_the = sp.eye(adj.shape[0]) + alpha*x
inverse = sp.linalg.inv(the_the.tocsc())
inverse = inverse.multiply(inverse > 1e-4)
return inverse.tocoo()
def aug_low_pass(adj):
adj = adj + sp.eye(adj.shape[0])
adj = sp.coo_matrix(adj)
x = normalized_laplacian(adj)
return (sp.eye(adj.shape[0]) - 0.5*x).tocoo()
def fetch_normalization(type):
switcher = {
'NormLap': normalized_laplacian, # A' = I - D^-1/2 * A * D^-1/2
'Lap': laplacian, # A' = D - A
'RWalkLap': random_walk_laplacian, # A' = I - D^-1 * A
'FirstOrderGCN': gcn, # A' = I + D^-1/2 * A * D^-1/2
'AugNormAdj': aug_normalized_adjacency, # A' = (D + I)^-1/2 * ( A + I ) * (D + I)^-1/2
'NormAdj': normalized_adjacency, # D^-1/2 * A * D^-1/2
'RWalk': random_walk, # A' = D^-1*A
'AugRWalk': aug_random_walk, # A' = (D + I)^-1*(A + I)
'NoNorm': no_norm, # A' = A
'LowPass': low_pass, # A' = A
}
func = switcher.get(type, lambda: "Invalid normalization technique.")
return func
def row_normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
|
en
| 0.641594
|
#from https://github.com/Tiiiger/SGC # A' = I - D^-1/2 * A * D^-1/2 # A' = D - A # A' = I - D^-1 * A # A' = I + D^-1/2 * A * D^-1/2 # A' = (D + I)^-1/2 * ( A + I ) * (D + I)^-1/2 # D^-1/2 * A * D^-1/2 # A' = D^-1*A # A' = (D + I)^-1*(A + I) # A' = A # A' = A Row-normalize sparse matrix
| 2.186975
| 2
|
src/oci/healthchecks/models/ping_probe_result_summary.py
|
Manny27nyc/oci-python-sdk
| 249
|
6628582
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class PingProbeResultSummary(object):
"""
The results returned by running a ping probe. All times and durations are
returned in milliseconds. All times are relative to the POSIX epoch
(1970-01-01T00:00Z).
"""
#: A constant which can be used with the error_category property of a PingProbeResultSummary.
#: This constant has a value of "NONE"
ERROR_CATEGORY_NONE = "NONE"
#: A constant which can be used with the error_category property of a PingProbeResultSummary.
#: This constant has a value of "DNS"
ERROR_CATEGORY_DNS = "DNS"
#: A constant which can be used with the error_category property of a PingProbeResultSummary.
#: This constant has a value of "TRANSPORT"
ERROR_CATEGORY_TRANSPORT = "TRANSPORT"
#: A constant which can be used with the error_category property of a PingProbeResultSummary.
#: This constant has a value of "NETWORK"
ERROR_CATEGORY_NETWORK = "NETWORK"
#: A constant which can be used with the error_category property of a PingProbeResultSummary.
#: This constant has a value of "SYSTEM"
ERROR_CATEGORY_SYSTEM = "SYSTEM"
#: A constant which can be used with the protocol property of a PingProbeResultSummary.
#: This constant has a value of "ICMP"
PROTOCOL_ICMP = "ICMP"
#: A constant which can be used with the protocol property of a PingProbeResultSummary.
#: This constant has a value of "TCP"
PROTOCOL_TCP = "TCP"
def __init__(self, **kwargs):
"""
Initializes a new PingProbeResultSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param key:
The value to assign to the key property of this PingProbeResultSummary.
:type key: str
:param probe_configuration_id:
The value to assign to the probe_configuration_id property of this PingProbeResultSummary.
:type probe_configuration_id: str
:param start_time:
The value to assign to the start_time property of this PingProbeResultSummary.
:type start_time: float
:param target:
The value to assign to the target property of this PingProbeResultSummary.
:type target: str
:param vantage_point_name:
The value to assign to the vantage_point_name property of this PingProbeResultSummary.
:type vantage_point_name: str
:param is_timed_out:
The value to assign to the is_timed_out property of this PingProbeResultSummary.
:type is_timed_out: bool
:param is_healthy:
The value to assign to the is_healthy property of this PingProbeResultSummary.
:type is_healthy: bool
:param error_category:
The value to assign to the error_category property of this PingProbeResultSummary.
Allowed values for this property are: "NONE", "DNS", "TRANSPORT", "NETWORK", "SYSTEM", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type error_category: str
:param error_message:
The value to assign to the error_message property of this PingProbeResultSummary.
:type error_message: str
:param protocol:
The value to assign to the protocol property of this PingProbeResultSummary.
Allowed values for this property are: "ICMP", "TCP", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type protocol: str
:param connection:
The value to assign to the connection property of this PingProbeResultSummary.
:type connection: oci.healthchecks.models.Connection
:param dns:
The value to assign to the dns property of this PingProbeResultSummary.
:type dns: oci.healthchecks.models.DNS
:param domain_lookup_start:
The value to assign to the domain_lookup_start property of this PingProbeResultSummary.
:type domain_lookup_start: float
:param domain_lookup_end:
The value to assign to the domain_lookup_end property of this PingProbeResultSummary.
:type domain_lookup_end: float
:param latency_in_ms:
The value to assign to the latency_in_ms property of this PingProbeResultSummary.
:type latency_in_ms: float
:param icmp_code:
The value to assign to the icmp_code property of this PingProbeResultSummary.
:type icmp_code: int
"""
self.swagger_types = {
'key': 'str',
'probe_configuration_id': 'str',
'start_time': 'float',
'target': 'str',
'vantage_point_name': 'str',
'is_timed_out': 'bool',
'is_healthy': 'bool',
'error_category': 'str',
'error_message': 'str',
'protocol': 'str',
'connection': 'Connection',
'dns': 'DNS',
'domain_lookup_start': 'float',
'domain_lookup_end': 'float',
'latency_in_ms': 'float',
'icmp_code': 'int'
}
self.attribute_map = {
'key': 'key',
'probe_configuration_id': 'probeConfigurationId',
'start_time': 'startTime',
'target': 'target',
'vantage_point_name': 'vantagePointName',
'is_timed_out': 'isTimedOut',
'is_healthy': 'isHealthy',
'error_category': 'errorCategory',
'error_message': 'errorMessage',
'protocol': 'protocol',
'connection': 'connection',
'dns': 'dns',
'domain_lookup_start': 'domainLookupStart',
'domain_lookup_end': 'domainLookupEnd',
'latency_in_ms': 'latencyInMs',
'icmp_code': 'icmpCode'
}
self._key = None
self._probe_configuration_id = None
self._start_time = None
self._target = None
self._vantage_point_name = None
self._is_timed_out = None
self._is_healthy = None
self._error_category = None
self._error_message = None
self._protocol = None
self._connection = None
self._dns = None
self._domain_lookup_start = None
self._domain_lookup_end = None
self._latency_in_ms = None
self._icmp_code = None
@property
def key(self):
"""
Gets the key of this PingProbeResultSummary.
A value identifying this specific probe result. The key is only unique within
the results of its probe configuration. The key may be reused after 90 days.
:return: The key of this PingProbeResultSummary.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this PingProbeResultSummary.
A value identifying this specific probe result. The key is only unique within
the results of its probe configuration. The key may be reused after 90 days.
:param key: The key of this PingProbeResultSummary.
:type: str
"""
self._key = key
@property
def probe_configuration_id(self):
"""
Gets the probe_configuration_id of this PingProbeResultSummary.
The OCID of the monitor or on-demand probe responsible for creating this result.
:return: The probe_configuration_id of this PingProbeResultSummary.
:rtype: str
"""
return self._probe_configuration_id
@probe_configuration_id.setter
def probe_configuration_id(self, probe_configuration_id):
"""
Sets the probe_configuration_id of this PingProbeResultSummary.
The OCID of the monitor or on-demand probe responsible for creating this result.
:param probe_configuration_id: The probe_configuration_id of this PingProbeResultSummary.
:type: str
"""
self._probe_configuration_id = probe_configuration_id
@property
def start_time(self):
"""
Gets the start_time of this PingProbeResultSummary.
The date and time the probe was executed, expressed in milliseconds since the
POSIX epoch. This field is defined by the PerformanceResourceTiming interface
of the W3C Resource Timing specification. For more information, see
`Resource Timing`__.
__ https://w3c.github.io/resource-timing/#sec-resource-timing
:return: The start_time of this PingProbeResultSummary.
:rtype: float
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""
Sets the start_time of this PingProbeResultSummary.
The date and time the probe was executed, expressed in milliseconds since the
POSIX epoch. This field is defined by the PerformanceResourceTiming interface
of the W3C Resource Timing specification. For more information, see
`Resource Timing`__.
__ https://w3c.github.io/resource-timing/#sec-resource-timing
:param start_time: The start_time of this PingProbeResultSummary.
:type: float
"""
self._start_time = start_time
@property
def target(self):
"""
Gets the target of this PingProbeResultSummary.
The target hostname or IP address of the probe.
:return: The target of this PingProbeResultSummary.
:rtype: str
"""
return self._target
@target.setter
def target(self, target):
"""
Sets the target of this PingProbeResultSummary.
The target hostname or IP address of the probe.
:param target: The target of this PingProbeResultSummary.
:type: str
"""
self._target = target
@property
def vantage_point_name(self):
"""
Gets the vantage_point_name of this PingProbeResultSummary.
The name of the vantage point that executed the probe.
:return: The vantage_point_name of this PingProbeResultSummary.
:rtype: str
"""
return self._vantage_point_name
@vantage_point_name.setter
def vantage_point_name(self, vantage_point_name):
"""
Sets the vantage_point_name of this PingProbeResultSummary.
The name of the vantage point that executed the probe.
:param vantage_point_name: The vantage_point_name of this PingProbeResultSummary.
:type: str
"""
self._vantage_point_name = vantage_point_name
@property
def is_timed_out(self):
"""
Gets the is_timed_out of this PingProbeResultSummary.
True if the probe did not complete before the configured `timeoutInSeconds` value.
:return: The is_timed_out of this PingProbeResultSummary.
:rtype: bool
"""
return self._is_timed_out
@is_timed_out.setter
def is_timed_out(self, is_timed_out):
"""
Sets the is_timed_out of this PingProbeResultSummary.
True if the probe did not complete before the configured `timeoutInSeconds` value.
:param is_timed_out: The is_timed_out of this PingProbeResultSummary.
:type: bool
"""
self._is_timed_out = is_timed_out
@property
def is_healthy(self):
"""
Gets the is_healthy of this PingProbeResultSummary.
True if the probe result is determined to be healthy based on probe
type-specific criteria. For HTTP probes, a probe result is considered
healthy if the HTTP response code is greater than or equal to 200 and
less than 300.
:return: The is_healthy of this PingProbeResultSummary.
:rtype: bool
"""
return self._is_healthy
@is_healthy.setter
def is_healthy(self, is_healthy):
"""
Sets the is_healthy of this PingProbeResultSummary.
True if the probe result is determined to be healthy based on probe
type-specific criteria. For HTTP probes, a probe result is considered
healthy if the HTTP response code is greater than or equal to 200 and
less than 300.
:param is_healthy: The is_healthy of this PingProbeResultSummary.
:type: bool
"""
self._is_healthy = is_healthy
@property
def error_category(self):
"""
Gets the error_category of this PingProbeResultSummary.
The category of error if an error occurs executing the probe.
The `errorMessage` field provides a message with the error details.
* NONE - No error
* DNS - DNS errors
* TRANSPORT - Transport-related errors, for example a \"TLS certificate expired\" error.
* NETWORK - Network-related errors, for example a \"network unreachable\" error.
* SYSTEM - Internal system errors.
Allowed values for this property are: "NONE", "DNS", "TRANSPORT", "NETWORK", "SYSTEM", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The error_category of this PingProbeResultSummary.
:rtype: str
"""
return self._error_category
@error_category.setter
def error_category(self, error_category):
"""
Sets the error_category of this PingProbeResultSummary.
The category of error if an error occurs executing the probe.
The `errorMessage` field provides a message with the error details.
* NONE - No error
* DNS - DNS errors
* TRANSPORT - Transport-related errors, for example a \"TLS certificate expired\" error.
* NETWORK - Network-related errors, for example a \"network unreachable\" error.
* SYSTEM - Internal system errors.
:param error_category: The error_category of this PingProbeResultSummary.
:type: str
"""
allowed_values = ["NONE", "DNS", "TRANSPORT", "NETWORK", "SYSTEM"]
if not value_allowed_none_or_none_sentinel(error_category, allowed_values):
error_category = 'UNKNOWN_ENUM_VALUE'
self._error_category = error_category
@property
def error_message(self):
"""
Gets the error_message of this PingProbeResultSummary.
The error information indicating why a probe execution failed.
:return: The error_message of this PingProbeResultSummary.
:rtype: str
"""
return self._error_message
@error_message.setter
def error_message(self, error_message):
"""
Sets the error_message of this PingProbeResultSummary.
The error information indicating why a probe execution failed.
:param error_message: The error_message of this PingProbeResultSummary.
:type: str
"""
self._error_message = error_message
@property
def protocol(self):
"""
Gets the protocol of this PingProbeResultSummary.
Allowed values for this property are: "ICMP", "TCP", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The protocol of this PingProbeResultSummary.
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""
Sets the protocol of this PingProbeResultSummary.
:param protocol: The protocol of this PingProbeResultSummary.
:type: str
"""
allowed_values = ["ICMP", "TCP"]
if not value_allowed_none_or_none_sentinel(protocol, allowed_values):
protocol = 'UNKNOWN_ENUM_VALUE'
self._protocol = protocol
@property
def connection(self):
"""
Gets the connection of this PingProbeResultSummary.
:return: The connection of this PingProbeResultSummary.
:rtype: oci.healthchecks.models.Connection
"""
return self._connection
@connection.setter
def connection(self, connection):
"""
Sets the connection of this PingProbeResultSummary.
:param connection: The connection of this PingProbeResultSummary.
:type: oci.healthchecks.models.Connection
"""
self._connection = connection
@property
def dns(self):
"""
Gets the dns of this PingProbeResultSummary.
:return: The dns of this PingProbeResultSummary.
:rtype: oci.healthchecks.models.DNS
"""
return self._dns
@dns.setter
def dns(self, dns):
"""
Sets the dns of this PingProbeResultSummary.
:param dns: The dns of this PingProbeResultSummary.
:type: oci.healthchecks.models.DNS
"""
self._dns = dns
@property
def domain_lookup_start(self):
"""
Gets the domain_lookup_start of this PingProbeResultSummary.
The time immediately before the vantage point starts the domain name lookup for
the resource.
:return: The domain_lookup_start of this PingProbeResultSummary.
:rtype: float
"""
return self._domain_lookup_start
@domain_lookup_start.setter
def domain_lookup_start(self, domain_lookup_start):
"""
Sets the domain_lookup_start of this PingProbeResultSummary.
The time immediately before the vantage point starts the domain name lookup for
the resource.
:param domain_lookup_start: The domain_lookup_start of this PingProbeResultSummary.
:type: float
"""
self._domain_lookup_start = domain_lookup_start
@property
def domain_lookup_end(self):
"""
Gets the domain_lookup_end of this PingProbeResultSummary.
The time immediately before the vantage point finishes the domain name lookup for
the resource.
:return: The domain_lookup_end of this PingProbeResultSummary.
:rtype: float
"""
return self._domain_lookup_end
@domain_lookup_end.setter
def domain_lookup_end(self, domain_lookup_end):
"""
Sets the domain_lookup_end of this PingProbeResultSummary.
The time immediately before the vantage point finishes the domain name lookup for
the resource.
:param domain_lookup_end: The domain_lookup_end of this PingProbeResultSummary.
:type: float
"""
self._domain_lookup_end = domain_lookup_end
@property
def latency_in_ms(self):
"""
Gets the latency_in_ms of this PingProbeResultSummary.
The latency of the probe execution, in milliseconds.
:return: The latency_in_ms of this PingProbeResultSummary.
:rtype: float
"""
return self._latency_in_ms
@latency_in_ms.setter
def latency_in_ms(self, latency_in_ms):
"""
Sets the latency_in_ms of this PingProbeResultSummary.
The latency of the probe execution, in milliseconds.
:param latency_in_ms: The latency_in_ms of this PingProbeResultSummary.
:type: float
"""
self._latency_in_ms = latency_in_ms
@property
def icmp_code(self):
"""
Gets the icmp_code of this PingProbeResultSummary.
The ICMP code of the response message. This field is not used when the protocol
is set to TCP. For more information on ICMP codes, see
`Internet Control Message Protocol (ICMP) Parameters`__.
__ https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml
:return: The icmp_code of this PingProbeResultSummary.
:rtype: int
"""
return self._icmp_code
@icmp_code.setter
def icmp_code(self, icmp_code):
"""
Sets the icmp_code of this PingProbeResultSummary.
The ICMP code of the response message. This field is not used when the protocol
is set to TCP. For more information on ICMP codes, see
`Internet Control Message Protocol (ICMP) Parameters`__.
__ https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml
:param icmp_code: The icmp_code of this PingProbeResultSummary.
:type: int
"""
self._icmp_code = icmp_code
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class PingProbeResultSummary(object):
"""
The results returned by running a ping probe. All times and durations are
returned in milliseconds. All times are relative to the POSIX epoch
(1970-01-01T00:00Z).
"""
#: A constant which can be used with the error_category property of a PingProbeResultSummary.
#: This constant has a value of "NONE"
ERROR_CATEGORY_NONE = "NONE"
#: A constant which can be used with the error_category property of a PingProbeResultSummary.
#: This constant has a value of "DNS"
ERROR_CATEGORY_DNS = "DNS"
#: A constant which can be used with the error_category property of a PingProbeResultSummary.
#: This constant has a value of "TRANSPORT"
ERROR_CATEGORY_TRANSPORT = "TRANSPORT"
#: A constant which can be used with the error_category property of a PingProbeResultSummary.
#: This constant has a value of "NETWORK"
ERROR_CATEGORY_NETWORK = "NETWORK"
#: A constant which can be used with the error_category property of a PingProbeResultSummary.
#: This constant has a value of "SYSTEM"
ERROR_CATEGORY_SYSTEM = "SYSTEM"
#: A constant which can be used with the protocol property of a PingProbeResultSummary.
#: This constant has a value of "ICMP"
PROTOCOL_ICMP = "ICMP"
#: A constant which can be used with the protocol property of a PingProbeResultSummary.
#: This constant has a value of "TCP"
PROTOCOL_TCP = "TCP"
def __init__(self, **kwargs):
"""
Initializes a new PingProbeResultSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param key:
The value to assign to the key property of this PingProbeResultSummary.
:type key: str
:param probe_configuration_id:
The value to assign to the probe_configuration_id property of this PingProbeResultSummary.
:type probe_configuration_id: str
:param start_time:
The value to assign to the start_time property of this PingProbeResultSummary.
:type start_time: float
:param target:
The value to assign to the target property of this PingProbeResultSummary.
:type target: str
:param vantage_point_name:
The value to assign to the vantage_point_name property of this PingProbeResultSummary.
:type vantage_point_name: str
:param is_timed_out:
The value to assign to the is_timed_out property of this PingProbeResultSummary.
:type is_timed_out: bool
:param is_healthy:
The value to assign to the is_healthy property of this PingProbeResultSummary.
:type is_healthy: bool
:param error_category:
The value to assign to the error_category property of this PingProbeResultSummary.
Allowed values for this property are: "NONE", "DNS", "TRANSPORT", "NETWORK", "SYSTEM", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type error_category: str
:param error_message:
The value to assign to the error_message property of this PingProbeResultSummary.
:type error_message: str
:param protocol:
The value to assign to the protocol property of this PingProbeResultSummary.
Allowed values for this property are: "ICMP", "TCP", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type protocol: str
:param connection:
The value to assign to the connection property of this PingProbeResultSummary.
:type connection: oci.healthchecks.models.Connection
:param dns:
The value to assign to the dns property of this PingProbeResultSummary.
:type dns: oci.healthchecks.models.DNS
:param domain_lookup_start:
The value to assign to the domain_lookup_start property of this PingProbeResultSummary.
:type domain_lookup_start: float
:param domain_lookup_end:
The value to assign to the domain_lookup_end property of this PingProbeResultSummary.
:type domain_lookup_end: float
:param latency_in_ms:
The value to assign to the latency_in_ms property of this PingProbeResultSummary.
:type latency_in_ms: float
:param icmp_code:
The value to assign to the icmp_code property of this PingProbeResultSummary.
:type icmp_code: int
"""
self.swagger_types = {
'key': 'str',
'probe_configuration_id': 'str',
'start_time': 'float',
'target': 'str',
'vantage_point_name': 'str',
'is_timed_out': 'bool',
'is_healthy': 'bool',
'error_category': 'str',
'error_message': 'str',
'protocol': 'str',
'connection': 'Connection',
'dns': 'DNS',
'domain_lookup_start': 'float',
'domain_lookup_end': 'float',
'latency_in_ms': 'float',
'icmp_code': 'int'
}
self.attribute_map = {
'key': 'key',
'probe_configuration_id': 'probeConfigurationId',
'start_time': 'startTime',
'target': 'target',
'vantage_point_name': 'vantagePointName',
'is_timed_out': 'isTimedOut',
'is_healthy': 'isHealthy',
'error_category': 'errorCategory',
'error_message': 'errorMessage',
'protocol': 'protocol',
'connection': 'connection',
'dns': 'dns',
'domain_lookup_start': 'domainLookupStart',
'domain_lookup_end': 'domainLookupEnd',
'latency_in_ms': 'latencyInMs',
'icmp_code': 'icmpCode'
}
self._key = None
self._probe_configuration_id = None
self._start_time = None
self._target = None
self._vantage_point_name = None
self._is_timed_out = None
self._is_healthy = None
self._error_category = None
self._error_message = None
self._protocol = None
self._connection = None
self._dns = None
self._domain_lookup_start = None
self._domain_lookup_end = None
self._latency_in_ms = None
self._icmp_code = None
@property
def key(self):
"""
Gets the key of this PingProbeResultSummary.
A value identifying this specific probe result. The key is only unique within
the results of its probe configuration. The key may be reused after 90 days.
:return: The key of this PingProbeResultSummary.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this PingProbeResultSummary.
A value identifying this specific probe result. The key is only unique within
the results of its probe configuration. The key may be reused after 90 days.
:param key: The key of this PingProbeResultSummary.
:type: str
"""
self._key = key
@property
def probe_configuration_id(self):
"""
Gets the probe_configuration_id of this PingProbeResultSummary.
The OCID of the monitor or on-demand probe responsible for creating this result.
:return: The probe_configuration_id of this PingProbeResultSummary.
:rtype: str
"""
return self._probe_configuration_id
@probe_configuration_id.setter
def probe_configuration_id(self, probe_configuration_id):
"""
Sets the probe_configuration_id of this PingProbeResultSummary.
The OCID of the monitor or on-demand probe responsible for creating this result.
:param probe_configuration_id: The probe_configuration_id of this PingProbeResultSummary.
:type: str
"""
self._probe_configuration_id = probe_configuration_id
@property
def start_time(self):
"""
Gets the start_time of this PingProbeResultSummary.
The date and time the probe was executed, expressed in milliseconds since the
POSIX epoch. This field is defined by the PerformanceResourceTiming interface
of the W3C Resource Timing specification. For more information, see
`Resource Timing`__.
__ https://w3c.github.io/resource-timing/#sec-resource-timing
:return: The start_time of this PingProbeResultSummary.
:rtype: float
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""
Sets the start_time of this PingProbeResultSummary.
The date and time the probe was executed, expressed in milliseconds since the
POSIX epoch. This field is defined by the PerformanceResourceTiming interface
of the W3C Resource Timing specification. For more information, see
`Resource Timing`__.
__ https://w3c.github.io/resource-timing/#sec-resource-timing
:param start_time: The start_time of this PingProbeResultSummary.
:type: float
"""
self._start_time = start_time
@property
def target(self):
"""
Gets the target of this PingProbeResultSummary.
The target hostname or IP address of the probe.
:return: The target of this PingProbeResultSummary.
:rtype: str
"""
return self._target
@target.setter
def target(self, target):
"""
Sets the target of this PingProbeResultSummary.
The target hostname or IP address of the probe.
:param target: The target of this PingProbeResultSummary.
:type: str
"""
self._target = target
@property
def vantage_point_name(self):
"""
Gets the vantage_point_name of this PingProbeResultSummary.
The name of the vantage point that executed the probe.
:return: The vantage_point_name of this PingProbeResultSummary.
:rtype: str
"""
return self._vantage_point_name
@vantage_point_name.setter
def vantage_point_name(self, vantage_point_name):
"""
Sets the vantage_point_name of this PingProbeResultSummary.
The name of the vantage point that executed the probe.
:param vantage_point_name: The vantage_point_name of this PingProbeResultSummary.
:type: str
"""
self._vantage_point_name = vantage_point_name
@property
def is_timed_out(self):
"""
Gets the is_timed_out of this PingProbeResultSummary.
True if the probe did not complete before the configured `timeoutInSeconds` value.
:return: The is_timed_out of this PingProbeResultSummary.
:rtype: bool
"""
return self._is_timed_out
@is_timed_out.setter
def is_timed_out(self, is_timed_out):
"""
Sets the is_timed_out of this PingProbeResultSummary.
True if the probe did not complete before the configured `timeoutInSeconds` value.
:param is_timed_out: The is_timed_out of this PingProbeResultSummary.
:type: bool
"""
self._is_timed_out = is_timed_out
@property
def is_healthy(self):
"""
Gets the is_healthy of this PingProbeResultSummary.
True if the probe result is determined to be healthy based on probe
type-specific criteria. For HTTP probes, a probe result is considered
healthy if the HTTP response code is greater than or equal to 200 and
less than 300.
:return: The is_healthy of this PingProbeResultSummary.
:rtype: bool
"""
return self._is_healthy
@is_healthy.setter
def is_healthy(self, is_healthy):
"""
Sets the is_healthy of this PingProbeResultSummary.
True if the probe result is determined to be healthy based on probe
type-specific criteria. For HTTP probes, a probe result is considered
healthy if the HTTP response code is greater than or equal to 200 and
less than 300.
:param is_healthy: The is_healthy of this PingProbeResultSummary.
:type: bool
"""
self._is_healthy = is_healthy
@property
def error_category(self):
"""
Gets the error_category of this PingProbeResultSummary.
The category of error if an error occurs executing the probe.
The `errorMessage` field provides a message with the error details.
* NONE - No error
* DNS - DNS errors
* TRANSPORT - Transport-related errors, for example a \"TLS certificate expired\" error.
* NETWORK - Network-related errors, for example a \"network unreachable\" error.
* SYSTEM - Internal system errors.
Allowed values for this property are: "NONE", "DNS", "TRANSPORT", "NETWORK", "SYSTEM", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The error_category of this PingProbeResultSummary.
:rtype: str
"""
return self._error_category
@error_category.setter
def error_category(self, error_category):
"""
Sets the error_category of this PingProbeResultSummary.
The category of error if an error occurs executing the probe.
The `errorMessage` field provides a message with the error details.
* NONE - No error
* DNS - DNS errors
* TRANSPORT - Transport-related errors, for example a \"TLS certificate expired\" error.
* NETWORK - Network-related errors, for example a \"network unreachable\" error.
* SYSTEM - Internal system errors.
:param error_category: The error_category of this PingProbeResultSummary.
:type: str
"""
allowed_values = ["NONE", "DNS", "TRANSPORT", "NETWORK", "SYSTEM"]
if not value_allowed_none_or_none_sentinel(error_category, allowed_values):
error_category = 'UNKNOWN_ENUM_VALUE'
self._error_category = error_category
@property
def error_message(self):
"""
Gets the error_message of this PingProbeResultSummary.
The error information indicating why a probe execution failed.
:return: The error_message of this PingProbeResultSummary.
:rtype: str
"""
return self._error_message
@error_message.setter
def error_message(self, error_message):
"""
Sets the error_message of this PingProbeResultSummary.
The error information indicating why a probe execution failed.
:param error_message: The error_message of this PingProbeResultSummary.
:type: str
"""
self._error_message = error_message
@property
def protocol(self):
"""
Gets the protocol of this PingProbeResultSummary.
Allowed values for this property are: "ICMP", "TCP", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The protocol of this PingProbeResultSummary.
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""
Sets the protocol of this PingProbeResultSummary.
:param protocol: The protocol of this PingProbeResultSummary.
:type: str
"""
allowed_values = ["ICMP", "TCP"]
if not value_allowed_none_or_none_sentinel(protocol, allowed_values):
protocol = 'UNKNOWN_ENUM_VALUE'
self._protocol = protocol
@property
def connection(self):
"""
Gets the connection of this PingProbeResultSummary.
:return: The connection of this PingProbeResultSummary.
:rtype: oci.healthchecks.models.Connection
"""
return self._connection
@connection.setter
def connection(self, connection):
"""
Sets the connection of this PingProbeResultSummary.
:param connection: The connection of this PingProbeResultSummary.
:type: oci.healthchecks.models.Connection
"""
self._connection = connection
@property
def dns(self):
"""
Gets the dns of this PingProbeResultSummary.
:return: The dns of this PingProbeResultSummary.
:rtype: oci.healthchecks.models.DNS
"""
return self._dns
@dns.setter
def dns(self, dns):
"""
Sets the dns of this PingProbeResultSummary.
:param dns: The dns of this PingProbeResultSummary.
:type: oci.healthchecks.models.DNS
"""
self._dns = dns
@property
def domain_lookup_start(self):
"""
Gets the domain_lookup_start of this PingProbeResultSummary.
The time immediately before the vantage point starts the domain name lookup for
the resource.
:return: The domain_lookup_start of this PingProbeResultSummary.
:rtype: float
"""
return self._domain_lookup_start
@domain_lookup_start.setter
def domain_lookup_start(self, domain_lookup_start):
"""
Sets the domain_lookup_start of this PingProbeResultSummary.
The time immediately before the vantage point starts the domain name lookup for
the resource.
:param domain_lookup_start: The domain_lookup_start of this PingProbeResultSummary.
:type: float
"""
self._domain_lookup_start = domain_lookup_start
@property
def domain_lookup_end(self):
"""
Gets the domain_lookup_end of this PingProbeResultSummary.
The time immediately before the vantage point finishes the domain name lookup for
the resource.
:return: The domain_lookup_end of this PingProbeResultSummary.
:rtype: float
"""
return self._domain_lookup_end
@domain_lookup_end.setter
def domain_lookup_end(self, domain_lookup_end):
"""
Sets the domain_lookup_end of this PingProbeResultSummary.
The time immediately before the vantage point finishes the domain name lookup for
the resource.
:param domain_lookup_end: The domain_lookup_end of this PingProbeResultSummary.
:type: float
"""
self._domain_lookup_end = domain_lookup_end
@property
def latency_in_ms(self):
"""
Gets the latency_in_ms of this PingProbeResultSummary.
The latency of the probe execution, in milliseconds.
:return: The latency_in_ms of this PingProbeResultSummary.
:rtype: float
"""
return self._latency_in_ms
@latency_in_ms.setter
def latency_in_ms(self, latency_in_ms):
"""
Sets the latency_in_ms of this PingProbeResultSummary.
The latency of the probe execution, in milliseconds.
:param latency_in_ms: The latency_in_ms of this PingProbeResultSummary.
:type: float
"""
self._latency_in_ms = latency_in_ms
@property
def icmp_code(self):
"""
Gets the icmp_code of this PingProbeResultSummary.
The ICMP code of the response message. This field is not used when the protocol
is set to TCP. For more information on ICMP codes, see
`Internet Control Message Protocol (ICMP) Parameters`__.
__ https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml
:return: The icmp_code of this PingProbeResultSummary.
:rtype: int
"""
return self._icmp_code
@icmp_code.setter
def icmp_code(self, icmp_code):
"""
Sets the icmp_code of this PingProbeResultSummary.
The ICMP code of the response message. This field is not used when the protocol
is set to TCP. For more information on ICMP codes, see
`Internet Control Message Protocol (ICMP) Parameters`__.
__ https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml
:param icmp_code: The icmp_code of this PingProbeResultSummary.
:type: int
"""
self._icmp_code = icmp_code
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
en
| 0.787965
|
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. # noqa: F401 The results returned by running a ping probe. All times and durations are returned in milliseconds. All times are relative to the POSIX epoch (1970-01-01T00:00Z). #: A constant which can be used with the error_category property of a PingProbeResultSummary. #: This constant has a value of "NONE" #: A constant which can be used with the error_category property of a PingProbeResultSummary. #: This constant has a value of "DNS" #: A constant which can be used with the error_category property of a PingProbeResultSummary. #: This constant has a value of "TRANSPORT" #: A constant which can be used with the error_category property of a PingProbeResultSummary. #: This constant has a value of "NETWORK" #: A constant which can be used with the error_category property of a PingProbeResultSummary. #: This constant has a value of "SYSTEM" #: A constant which can be used with the protocol property of a PingProbeResultSummary. #: This constant has a value of "ICMP" #: A constant which can be used with the protocol property of a PingProbeResultSummary. #: This constant has a value of "TCP" Initializes a new PingProbeResultSummary object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param key: The value to assign to the key property of this PingProbeResultSummary. :type key: str :param probe_configuration_id: The value to assign to the probe_configuration_id property of this PingProbeResultSummary. :type probe_configuration_id: str :param start_time: The value to assign to the start_time property of this PingProbeResultSummary. :type start_time: float :param target: The value to assign to the target property of this PingProbeResultSummary. :type target: str :param vantage_point_name: The value to assign to the vantage_point_name property of this PingProbeResultSummary. :type vantage_point_name: str :param is_timed_out: The value to assign to the is_timed_out property of this PingProbeResultSummary. :type is_timed_out: bool :param is_healthy: The value to assign to the is_healthy property of this PingProbeResultSummary. :type is_healthy: bool :param error_category: The value to assign to the error_category property of this PingProbeResultSummary. Allowed values for this property are: "NONE", "DNS", "TRANSPORT", "NETWORK", "SYSTEM", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type error_category: str :param error_message: The value to assign to the error_message property of this PingProbeResultSummary. :type error_message: str :param protocol: The value to assign to the protocol property of this PingProbeResultSummary. Allowed values for this property are: "ICMP", "TCP", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type protocol: str :param connection: The value to assign to the connection property of this PingProbeResultSummary. :type connection: oci.healthchecks.models.Connection :param dns: The value to assign to the dns property of this PingProbeResultSummary. :type dns: oci.healthchecks.models.DNS :param domain_lookup_start: The value to assign to the domain_lookup_start property of this PingProbeResultSummary. :type domain_lookup_start: float :param domain_lookup_end: The value to assign to the domain_lookup_end property of this PingProbeResultSummary. :type domain_lookup_end: float :param latency_in_ms: The value to assign to the latency_in_ms property of this PingProbeResultSummary. :type latency_in_ms: float :param icmp_code: The value to assign to the icmp_code property of this PingProbeResultSummary. :type icmp_code: int Gets the key of this PingProbeResultSummary. A value identifying this specific probe result. The key is only unique within the results of its probe configuration. The key may be reused after 90 days. :return: The key of this PingProbeResultSummary. :rtype: str Sets the key of this PingProbeResultSummary. A value identifying this specific probe result. The key is only unique within the results of its probe configuration. The key may be reused after 90 days. :param key: The key of this PingProbeResultSummary. :type: str Gets the probe_configuration_id of this PingProbeResultSummary. The OCID of the monitor or on-demand probe responsible for creating this result. :return: The probe_configuration_id of this PingProbeResultSummary. :rtype: str Sets the probe_configuration_id of this PingProbeResultSummary. The OCID of the monitor or on-demand probe responsible for creating this result. :param probe_configuration_id: The probe_configuration_id of this PingProbeResultSummary. :type: str Gets the start_time of this PingProbeResultSummary. The date and time the probe was executed, expressed in milliseconds since the POSIX epoch. This field is defined by the PerformanceResourceTiming interface of the W3C Resource Timing specification. For more information, see `Resource Timing`__. __ https://w3c.github.io/resource-timing/#sec-resource-timing :return: The start_time of this PingProbeResultSummary. :rtype: float Sets the start_time of this PingProbeResultSummary. The date and time the probe was executed, expressed in milliseconds since the POSIX epoch. This field is defined by the PerformanceResourceTiming interface of the W3C Resource Timing specification. For more information, see `Resource Timing`__. __ https://w3c.github.io/resource-timing/#sec-resource-timing :param start_time: The start_time of this PingProbeResultSummary. :type: float Gets the target of this PingProbeResultSummary. The target hostname or IP address of the probe. :return: The target of this PingProbeResultSummary. :rtype: str Sets the target of this PingProbeResultSummary. The target hostname or IP address of the probe. :param target: The target of this PingProbeResultSummary. :type: str Gets the vantage_point_name of this PingProbeResultSummary. The name of the vantage point that executed the probe. :return: The vantage_point_name of this PingProbeResultSummary. :rtype: str Sets the vantage_point_name of this PingProbeResultSummary. The name of the vantage point that executed the probe. :param vantage_point_name: The vantage_point_name of this PingProbeResultSummary. :type: str Gets the is_timed_out of this PingProbeResultSummary. True if the probe did not complete before the configured `timeoutInSeconds` value. :return: The is_timed_out of this PingProbeResultSummary. :rtype: bool Sets the is_timed_out of this PingProbeResultSummary. True if the probe did not complete before the configured `timeoutInSeconds` value. :param is_timed_out: The is_timed_out of this PingProbeResultSummary. :type: bool Gets the is_healthy of this PingProbeResultSummary. True if the probe result is determined to be healthy based on probe type-specific criteria. For HTTP probes, a probe result is considered healthy if the HTTP response code is greater than or equal to 200 and less than 300. :return: The is_healthy of this PingProbeResultSummary. :rtype: bool Sets the is_healthy of this PingProbeResultSummary. True if the probe result is determined to be healthy based on probe type-specific criteria. For HTTP probes, a probe result is considered healthy if the HTTP response code is greater than or equal to 200 and less than 300. :param is_healthy: The is_healthy of this PingProbeResultSummary. :type: bool Gets the error_category of this PingProbeResultSummary. The category of error if an error occurs executing the probe. The `errorMessage` field provides a message with the error details. * NONE - No error * DNS - DNS errors * TRANSPORT - Transport-related errors, for example a \"TLS certificate expired\" error. * NETWORK - Network-related errors, for example a \"network unreachable\" error. * SYSTEM - Internal system errors. Allowed values for this property are: "NONE", "DNS", "TRANSPORT", "NETWORK", "SYSTEM", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The error_category of this PingProbeResultSummary. :rtype: str Sets the error_category of this PingProbeResultSummary. The category of error if an error occurs executing the probe. The `errorMessage` field provides a message with the error details. * NONE - No error * DNS - DNS errors * TRANSPORT - Transport-related errors, for example a \"TLS certificate expired\" error. * NETWORK - Network-related errors, for example a \"network unreachable\" error. * SYSTEM - Internal system errors. :param error_category: The error_category of this PingProbeResultSummary. :type: str Gets the error_message of this PingProbeResultSummary. The error information indicating why a probe execution failed. :return: The error_message of this PingProbeResultSummary. :rtype: str Sets the error_message of this PingProbeResultSummary. The error information indicating why a probe execution failed. :param error_message: The error_message of this PingProbeResultSummary. :type: str Gets the protocol of this PingProbeResultSummary. Allowed values for this property are: "ICMP", "TCP", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The protocol of this PingProbeResultSummary. :rtype: str Sets the protocol of this PingProbeResultSummary. :param protocol: The protocol of this PingProbeResultSummary. :type: str Gets the connection of this PingProbeResultSummary. :return: The connection of this PingProbeResultSummary. :rtype: oci.healthchecks.models.Connection Sets the connection of this PingProbeResultSummary. :param connection: The connection of this PingProbeResultSummary. :type: oci.healthchecks.models.Connection Gets the dns of this PingProbeResultSummary. :return: The dns of this PingProbeResultSummary. :rtype: oci.healthchecks.models.DNS Sets the dns of this PingProbeResultSummary. :param dns: The dns of this PingProbeResultSummary. :type: oci.healthchecks.models.DNS Gets the domain_lookup_start of this PingProbeResultSummary. The time immediately before the vantage point starts the domain name lookup for the resource. :return: The domain_lookup_start of this PingProbeResultSummary. :rtype: float Sets the domain_lookup_start of this PingProbeResultSummary. The time immediately before the vantage point starts the domain name lookup for the resource. :param domain_lookup_start: The domain_lookup_start of this PingProbeResultSummary. :type: float Gets the domain_lookup_end of this PingProbeResultSummary. The time immediately before the vantage point finishes the domain name lookup for the resource. :return: The domain_lookup_end of this PingProbeResultSummary. :rtype: float Sets the domain_lookup_end of this PingProbeResultSummary. The time immediately before the vantage point finishes the domain name lookup for the resource. :param domain_lookup_end: The domain_lookup_end of this PingProbeResultSummary. :type: float Gets the latency_in_ms of this PingProbeResultSummary. The latency of the probe execution, in milliseconds. :return: The latency_in_ms of this PingProbeResultSummary. :rtype: float Sets the latency_in_ms of this PingProbeResultSummary. The latency of the probe execution, in milliseconds. :param latency_in_ms: The latency_in_ms of this PingProbeResultSummary. :type: float Gets the icmp_code of this PingProbeResultSummary. The ICMP code of the response message. This field is not used when the protocol is set to TCP. For more information on ICMP codes, see `Internet Control Message Protocol (ICMP) Parameters`__. __ https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml :return: The icmp_code of this PingProbeResultSummary. :rtype: int Sets the icmp_code of this PingProbeResultSummary. The ICMP code of the response message. This field is not used when the protocol is set to TCP. For more information on ICMP codes, see `Internet Control Message Protocol (ICMP) Parameters`__. __ https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml :param icmp_code: The icmp_code of this PingProbeResultSummary. :type: int
| 1.705123
| 2
|
dev/08_01_2018/Relay_Controller_Test.py
|
npwebste/UPS_Controller
| 0
|
6628583
|
<gh_stars>0
# ©2018 The Arizona Board of Regents for and on behalf of Arizona State University and the Laboratory for Energy And Power Solutions, All Rights Reserved.
#
# Universal Power System Controller
# USAID Middle East Water Security Initiative
#
# Developed by: <NAME>
# Primary Investigator: <NAME>
#
# Version History (mm_dd_yyyy)
# 1.00 07_13_2018_NW
#
######################################################
from PWM_Wrapper import *
import Parameters
import time
# Setup event logging
# Pin 22 (BCM 25)
# Ground 20
PWM.PWM_Setup()
PWM.PWM_Pin_Mode(Parameters.PWMPin)
PWM.PWM_Set_Mode()
PWM.PWM_Set_Clock(Parameters.Divisor)
PWM.Pin_Mode_Output(Parameters.AC_DigitalPin)
PWM.Pin_Mode_Output(Parameters.DC_DigitalPin)
PWM.Digital_Write(Parameters.AC_DigitalPin,1) # Set solar power source
time.sleep(5)
PWM.Digital_Write(Parameters.AC_DigitalPin,0) # Set grid power source
time.sleep(5)
PWM.Digital_Write(Parameters.DC_DigitalPin, 1) # Set solar relay in closed position
time.sleep(5)
PWM.Digital_Write(Parameters.DC_DigitalPin, 0) # Set solar relay in open position
|
# ©2018 The Arizona Board of Regents for and on behalf of Arizona State University and the Laboratory for Energy And Power Solutions, All Rights Reserved.
#
# Universal Power System Controller
# USAID Middle East Water Security Initiative
#
# Developed by: <NAME>
# Primary Investigator: <NAME>
#
# Version History (mm_dd_yyyy)
# 1.00 07_13_2018_NW
#
######################################################
from PWM_Wrapper import *
import Parameters
import time
# Setup event logging
# Pin 22 (BCM 25)
# Ground 20
PWM.PWM_Setup()
PWM.PWM_Pin_Mode(Parameters.PWMPin)
PWM.PWM_Set_Mode()
PWM.PWM_Set_Clock(Parameters.Divisor)
PWM.Pin_Mode_Output(Parameters.AC_DigitalPin)
PWM.Pin_Mode_Output(Parameters.DC_DigitalPin)
PWM.Digital_Write(Parameters.AC_DigitalPin,1) # Set solar power source
time.sleep(5)
PWM.Digital_Write(Parameters.AC_DigitalPin,0) # Set grid power source
time.sleep(5)
PWM.Digital_Write(Parameters.DC_DigitalPin, 1) # Set solar relay in closed position
time.sleep(5)
PWM.Digital_Write(Parameters.DC_DigitalPin, 0) # Set solar relay in open position
|
en
| 0.65363
|
# ©2018 The Arizona Board of Regents for and on behalf of Arizona State University and the Laboratory for Energy And Power Solutions, All Rights Reserved. # # Universal Power System Controller # USAID Middle East Water Security Initiative # # Developed by: <NAME> # Primary Investigator: <NAME> # # Version History (mm_dd_yyyy) # 1.00 07_13_2018_NW # ###################################################### # Setup event logging # Pin 22 (BCM 25) # Ground 20 # Set solar power source # Set grid power source # Set solar relay in closed position # Set solar relay in open position
| 1.603212
| 2
|
src/HelloWorld_On_BAMS.py
|
rsoscia/BAMS-to-NeuroLex
| 1
|
6628584
|
<gh_stars>1-10
#import os
#import zipfile
#fh = open('BAMMMMMM.xml.zip','rb')
#print("the ZIP is of length: %s " %size(fh))
import rdflib
import os
import zipfile
#Get a Graph object
g = rdflib.Graph()
# pull in an RDF document from NeuroLex, parse, and store.
zip = zipfile.ZipFile("/Users/ryansoscia/BAMS-to-NeuroLex/src/BAMMMMMM.xml.zip")
#result = g.parse("http://neurolex.org/wiki/Special:ExportRDF/birnlex_1489", format="application/rdf+xml")
result = g.parse(extract('BAMMMMMM.xml.zip', '/Users/ryansoscia/BAMS-to-NeuroLex/src' ), format="application/rdf+xml")
print ("graph has %s statements." % len(result))
def extract(zipfilepath, extractiondir):
#def extract('BAMMMMMM.xml.zip', '/Users/ryansoscia/BAMS-to-NeuroLex/src'):
#zip = zipfile.ZipFile(zipfilepath)
#ZipFile is a class
zip = zipfile.ZipFile('/Users/ryansoscia/BAMS-to-NeuroLex/src/BAMMMMMM.xml.zip')
f.read(zip)
#zip.extractall(path=/Users/ryansoscia/BAMS-to-NeuroLex/src/BAMMMMMM.xml.zip)
unzip(sys.argv[:0])
extract('BAMMMMMM.xml.zip', '/Users/ryansoscia/BAMS-to-NeuroLex/src')
|
#import os
#import zipfile
#fh = open('BAMMMMMM.xml.zip','rb')
#print("the ZIP is of length: %s " %size(fh))
import rdflib
import os
import zipfile
#Get a Graph object
g = rdflib.Graph()
# pull in an RDF document from NeuroLex, parse, and store.
zip = zipfile.ZipFile("/Users/ryansoscia/BAMS-to-NeuroLex/src/BAMMMMMM.xml.zip")
#result = g.parse("http://neurolex.org/wiki/Special:ExportRDF/birnlex_1489", format="application/rdf+xml")
result = g.parse(extract('BAMMMMMM.xml.zip', '/Users/ryansoscia/BAMS-to-NeuroLex/src' ), format="application/rdf+xml")
print ("graph has %s statements." % len(result))
def extract(zipfilepath, extractiondir):
#def extract('BAMMMMMM.xml.zip', '/Users/ryansoscia/BAMS-to-NeuroLex/src'):
#zip = zipfile.ZipFile(zipfilepath)
#ZipFile is a class
zip = zipfile.ZipFile('/Users/ryansoscia/BAMS-to-NeuroLex/src/BAMMMMMM.xml.zip')
f.read(zip)
#zip.extractall(path=/Users/ryansoscia/BAMS-to-NeuroLex/src/BAMMMMMM.xml.zip)
unzip(sys.argv[:0])
extract('BAMMMMMM.xml.zip', '/Users/ryansoscia/BAMS-to-NeuroLex/src')
|
en
| 0.479228
|
#import os #import zipfile #fh = open('BAMMMMMM.xml.zip','rb') #print("the ZIP is of length: %s " %size(fh)) #Get a Graph object # pull in an RDF document from NeuroLex, parse, and store. #result = g.parse("http://neurolex.org/wiki/Special:ExportRDF/birnlex_1489", format="application/rdf+xml") #def extract('BAMMMMMM.xml.zip', '/Users/ryansoscia/BAMS-to-NeuroLex/src'): #zip = zipfile.ZipFile(zipfilepath) #ZipFile is a class #zip.extractall(path=/Users/ryansoscia/BAMS-to-NeuroLex/src/BAMMMMMM.xml.zip)
| 2.758848
| 3
|
dev_db_example/core/models.py
|
tschellenbach/dev_db
| 5
|
6628585
|
<reponame>tschellenbach/dev_db
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
'''
Some example models to demo the dev_db script
'''
class SiteCategory(models.Model):
name = models.CharField(max_length=255)
class Site(models.Model):
category = models.ForeignKey(SiteCategory)
#url = models.TextField()
class Tag(models.Model):
name = models.CharField(max_length=25)
class Item(models.Model):
site = models.ForeignKey(Site)
url = models.TextField()
user = models.ForeignKey(User)
tags = models.ManyToManyField(Tag)
# these two models are here to test if things break
# when relations go two ways (infinite loops etcs)
class Blogger(models.Model):
name = models.CharField(max_length=255)
favourite_post = models.ForeignKey('Post', related_name='favourites')
class Post(models.Model):
blogger = models.ForeignKey(Blogger)
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
'''
Some example models to demo the dev_db script
'''
class SiteCategory(models.Model):
name = models.CharField(max_length=255)
class Site(models.Model):
category = models.ForeignKey(SiteCategory)
#url = models.TextField()
class Tag(models.Model):
name = models.CharField(max_length=25)
class Item(models.Model):
site = models.ForeignKey(Site)
url = models.TextField()
user = models.ForeignKey(User)
tags = models.ManyToManyField(Tag)
# these two models are here to test if things break
# when relations go two ways (infinite loops etcs)
class Blogger(models.Model):
name = models.CharField(max_length=255)
favourite_post = models.ForeignKey('Post', related_name='favourites')
class Post(models.Model):
blogger = models.ForeignKey(Blogger)
|
en
| 0.836543
|
# Create your models here. Some example models to demo the dev_db script #url = models.TextField() # these two models are here to test if things break # when relations go two ways (infinite loops etcs)
| 2.68271
| 3
|
Python/Subtract the Product and Sum of Digits of an Integer.py
|
gbrough/LeetCode
| 0
|
6628586
|
class Solution:
def subtractProductAndSum(self, n: int) -> int:
prodTotal = 1
sumTotal = 0
numbers = str(n)
#loop through numbers and multiple each i and set to prodTotal, and sum of each i set to sumTotal
for i in numbers:
prodTotal *= int(i)
sumTotal += int(i)
#subtract result of prodTotal from result of sumTotal
return prodTotal - sumTotal
|
class Solution:
def subtractProductAndSum(self, n: int) -> int:
prodTotal = 1
sumTotal = 0
numbers = str(n)
#loop through numbers and multiple each i and set to prodTotal, and sum of each i set to sumTotal
for i in numbers:
prodTotal *= int(i)
sumTotal += int(i)
#subtract result of prodTotal from result of sumTotal
return prodTotal - sumTotal
|
en
| 0.824806
|
#loop through numbers and multiple each i and set to prodTotal, and sum of each i set to sumTotal #subtract result of prodTotal from result of sumTotal
| 3.673888
| 4
|
style/styled_string.py
|
codacy-badger/style
| 0
|
6628587
|
<filename>style/styled_string.py
import style
class _StyledString(str):
def __new__(cls, style_list, sep, *objects):
return super(_StyledString, cls).__new__(cls, sep.join([str(obj) for obj in objects]))
def __init__(self, style_list, sep, *objects):
self._style_start = ';'.join([str(s[0]) for s in style_list])
self._style_end = ';'.join([str(s[1]) for s in style_list])
self._sep = sep
self._objects = objects
def __add__(self, other):
return self.__str__() + str(other)
def __str__(self):
if style._StyledStringBuilder._enabled:
string = ''
for i, obj in enumerate(self._objects):
if i > 0:
string += self._sep
if type(obj) is _StyledString:
string += '%s\033[%sm' % (obj, self._style_start)
else:
string += str(obj)
return '\033[%sm%s\033[%sm' % (self._style_start, string, self._style_end)
return super(_StyledString, self).__str__()
def rjust(self, width, fillchar=' '):
n_chars = width - len(self)
if n_chars > 0:
string = str(self)
return string.rjust(len(string) + n_chars, fillchar)
return self
def ljust(self, width, fillchar=' '):
n_chars = width - len(self)
if n_chars > 0:
string = str(self)
return string.ljust(len(string) + n_chars, fillchar)
return self
|
<filename>style/styled_string.py
import style
class _StyledString(str):
def __new__(cls, style_list, sep, *objects):
return super(_StyledString, cls).__new__(cls, sep.join([str(obj) for obj in objects]))
def __init__(self, style_list, sep, *objects):
self._style_start = ';'.join([str(s[0]) for s in style_list])
self._style_end = ';'.join([str(s[1]) for s in style_list])
self._sep = sep
self._objects = objects
def __add__(self, other):
return self.__str__() + str(other)
def __str__(self):
if style._StyledStringBuilder._enabled:
string = ''
for i, obj in enumerate(self._objects):
if i > 0:
string += self._sep
if type(obj) is _StyledString:
string += '%s\033[%sm' % (obj, self._style_start)
else:
string += str(obj)
return '\033[%sm%s\033[%sm' % (self._style_start, string, self._style_end)
return super(_StyledString, self).__str__()
def rjust(self, width, fillchar=' '):
n_chars = width - len(self)
if n_chars > 0:
string = str(self)
return string.rjust(len(string) + n_chars, fillchar)
return self
def ljust(self, width, fillchar=' '):
n_chars = width - len(self)
if n_chars > 0:
string = str(self)
return string.ljust(len(string) + n_chars, fillchar)
return self
|
none
| 1
| 2.75571
| 3
|
|
example/example.py
|
kyrias/flask-kerberos
| 31
|
6628588
|
#!/usr/bin/env python
from flask import Flask
from flask import render_template
from flask_kerberos import init_kerberos
from flask_kerberos import requires_authentication
DEBUG=True
app = Flask(__name__)
app.config.from_object(__name__)
@app.route("/")
@requires_authentication
def index(user):
return render_template('index.html', user=user)
if __name__ == '__main__':
init_kerberos(app)
app.run(host='0.0.0.0')
|
#!/usr/bin/env python
from flask import Flask
from flask import render_template
from flask_kerberos import init_kerberos
from flask_kerberos import requires_authentication
DEBUG=True
app = Flask(__name__)
app.config.from_object(__name__)
@app.route("/")
@requires_authentication
def index(user):
return render_template('index.html', user=user)
if __name__ == '__main__':
init_kerberos(app)
app.run(host='0.0.0.0')
|
ru
| 0.26433
|
#!/usr/bin/env python
| 2.131829
| 2
|
src/xrootiface.py
|
LovisaLugnegard/wopiserver
| 0
|
6628589
|
'''
xrootiface.py
XRootD interface for the WOPI server for CERNBox
Author: Gi<EMAIL>ppe.<EMAIL>, CERN/IT-ST
Contributions: <EMAIL>
'''
import time
from XRootD import client as XrdClient
from XRootD.client.flags import OpenFlags, QueryCode
# module-wide state
config = None
log = None
xrdfs = {} # this is to map each endpoint [string] to its XrdClient
defaultstorage = None
homepath = None
def _getxrdfor(endpoint):
'''Look up the xrootd client for the given endpoint, create it if missing. Supports "default" for the defaultstorage endpoint.'''
global xrdfs # pylint: disable=global-statement
global defaultstorage # pylint: disable=global-statement
if endpoint == 'default':
return xrdfs[defaultstorage]
try:
return xrdfs[endpoint]
except KeyError:
# not found, create it
xrdfs[endpoint] = XrdClient.FileSystem(endpoint)
return xrdfs[endpoint]
def _geturlfor(endpoint):
'''Look up the URL for a given endpoint: "default" corresponds to the defaultstorage one'''
if endpoint == 'default':
return defaultstorage
return endpoint
def _eosargs(ruid, rgid, atomicwrite=0, bookingsize=0):
'''One-liner to generate extra EOS-specific arguments for the xroot URL'''
return '?eos.ruid=' + ruid + '&eos.rgid=' + rgid + ('&eos.atomic=1' if atomicwrite else '') + \
(('&eos.bookingsize='+str(bookingsize)) if bookingsize else '') + '&eos.app=wopi'
def _xrootcmd(endpoint, cmd, subcmd, ruid, rgid, args):
'''Perform the <cmd>/<subcmd> action on the special /proc/user path on behalf of the given uid,gid.
Note that this is entirely EOS-specific.'''
with XrdClient.File() as f:
url = _geturlfor(endpoint) + '//proc/user/' + _eosargs(ruid, rgid) + '&mgm.cmd=' + cmd + \
('&mgm.subcmd=' + subcmd if subcmd else '') + '&' + args
tstart = time.clock()
rc, statInfo_unused = f.open(url, OpenFlags.READ)
tend = time.clock()
log.info('msg="Invoked _xrootcmd" cmd="%s%s" url="%s" elapsedTimems="%.1f"' %
(cmd, ('/' + subcmd if subcmd else ''), url, (tend-tstart)*1000))
res = f.readline().decode('utf-8').strip('\n').split('&')
if len(res) == 3: # we may only just get stdout: in that case, assume it's all OK
rc = res[2]
rc = rc[rc.find('=')+1:]
if rc != '0':
# failure: get info from stderr, log and raise
msg = res[1][res[1].find('=')+1:]
log.info('msg="Error with xroot command" cmd="%s" subcmd="%s" args="%s" error="%s" rc="%s"' % \
(cmd, subcmd, args, msg, rc.strip('\00')))
raise IOError(msg)
# all right, return everything that came in stdout
return res[0][res[0].find('stdout=')+7:]
def _getfilename(filename):
'''map the given filename into the target namespace by prepending the homepath (see storagehomepath in wopiserver.conf)'''
return homepath + filename
def init(inconfig, inlog):
'''Init module-level variables'''
global config # pylint: disable=global-statement
global log # pylint: disable=global-statement
global defaultstorage # pylint: disable=global-statement
global homepath # pylint: disable=global-statement
config = inconfig
log = inlog
defaultstorage = config.get('xroot', 'storageserver')
# prepare the xroot client for the default storageserver
_getxrdfor(defaultstorage)
if config.has_option('xroot', 'storagehomepath'):
homepath = config.get('xroot', 'storagehomepath')
else:
homepath = ''
def stat(endpoint, filename, ruid, rgid):
'''Stat a file via xroot on behalf of the given uid,gid, and returns (size, mtime). Uses the default xroot API.'''
filename = _getfilename(filename)
tstart = time.clock()
rc, statInfo = _getxrdfor(endpoint).stat(filename + _eosargs(ruid, rgid))
tend = time.clock()
log.info('msg="Invoked stat" filename="%s" elapsedTimems="%.1f"' % (filename, (tend-tstart)*1000))
if statInfo is None:
raise IOError(rc.message.strip('\n'))
return {'size': statInfo.size, 'mtime': statInfo.modtime}
def statx(endpoint, filename, ruid, rgid):
'''Get extended stat info (inode, ouid, ogid, size, mtime) via an xroot opaque query on behalf of the given uid,gid'''
filename = _getfilename(filename)
tstart = time.clock()
rc, rawinfo = _getxrdfor(endpoint).query(QueryCode.OPAQUEFILE, filename + _eosargs(ruid, rgid) + '&mgm.pcmd=stat')
tend = time.clock()
log.info('msg="Invoked stat" filename="%s" elapsedTimems="%.1f"' % (filename, (tend-tstart)*1000))
if '[SUCCESS]' not in str(rc):
raise IOError(str(rc).strip('\n'))
rawinfo = str(rawinfo)
if 'retc=' in rawinfo:
raise IOError(rawinfo.strip('\n'))
statxdata = rawinfo.split()
return {'inode': statxdata[2],
'ouid': statxdata[5],
'ogid': statxdata[6],
'size': int(statxdata[8]),
'mtime': statxdata[12]}
def setxattr(endpoint, filename, ruid, rgid, key, value):
'''Set the extended attribute <key> to <value> via a special open on behalf of the given uid, gid'''
_xrootcmd(endpoint, 'attr', 'set', ruid, rgid, 'mgm.attr.key=' + key + '&mgm.attr.value=' + str(value) + '&mgm.path=' + _getfilename(filename))
def getxattr(endpoint, filename, ruid, rgid, key):
'''Get the extended attribute <key> via a special open on behalf of the given uid, gid'''
res = _xrootcmd(endpoint, 'attr', 'get', ruid, rgid, 'mgm.attr.key=' + key + '&mgm.path=' + _getfilename(filename))
# if no error, the response comes in the format <key>="<value>"
try:
return res.split('"')[1]
except IndexError:
log.warning('msg="Failed to getxattr" filename="%s" key="%s" res="%s"' % (filename, key, res))
return None
def rmxattr(endpoint, filename, ruid, rgid, key):
'''Remove the extended attribute <key> via a special open on behalf of the given uid, gid'''
filename = _getfilename(filename)
_xrootcmd(endpoint, 'attr', 'rm', ruid, rgid, 'mgm.attr.key=' + key + '&mgm.path=' + filename)
def readfile(endpoint, filename, ruid, rgid):
'''Read a file via xroot on behalf of the given uid, gid. Note that the function is a generator, managed by Flask.'''
log.debug('msg="Invoking readFile" filename="%s"' % filename)
with XrdClient.File() as f:
fileurl = _geturlfor(endpoint) + '/' + homepath + filename + _eosargs(ruid, rgid)
tstart = time.clock()
rc, statInfo_unused = f.open(fileurl, OpenFlags.READ)
tend = time.clock()
if not rc.ok:
# the file could not be opened: check the case of ENOENT and log it as info to keep the logs cleaner
if 'No such file or directory' in rc.message:
log.info('msg="File not found on read" filename="%s"' % filename)
else:
log.warning('msg="Error opening the file for read" filename="%s" code="%d" error="%s"' % \
(filename, rc.shellcode, rc.message.strip('\n')))
# as this is a generator, we yield the error string instead of the file's contents
yield 'ERROR on read'
yield rc.message
else:
log.info('msg="File open for read" filename="%s" elapsedTimems="%.1f"' % (filename, (tend-tstart)*1000))
chunksize = config.getint('io', 'chunksize')
rc, statInfo = f.stat()
chunksize = min(chunksize, statInfo.size-1)
# the actual read is buffered and managed by the Flask server
for chunk in f.readchunks(offset=0, chunksize=chunksize):
yield chunk
def writefile(endpoint, filename, ruid, rgid, content, noversion=0):
'''Write a file via xroot on behalf of the given uid, gid. The entire content is written
and any pre-existing file is deleted (or moved to the previous version if supported).
If noversion=1, the write explicitly disables versioning: this is useful for lock files.'''
size = len(content)
log.debug('msg="Invoking writeFile" filename="%s" size="%d"' % (filename, size))
f = XrdClient.File()
tstart = time.clock()
rc, statInfo_unused = f.open(_geturlfor(endpoint) + '/' + homepath + filename + _eosargs(ruid, rgid, 1, size) + \
('&sys.versioning=0' if noversion else ''), OpenFlags.DELETE)
tend = time.clock()
log.info('msg="File open for write" filename="%s" elapsedTimems="%.1f"' % (filename, (tend-tstart)*1000))
if not rc.ok:
log.warning('msg="Error opening the file for write" filename="%s" error="%s"' % (filename, rc.message.strip('\n')))
raise IOError(rc.message.strip('\n'))
# write the file. In a future implementation, we should find a way to only update the required chunks...
rc, statInfo_unused = f.write(content, offset=0, size=size)
if not rc.ok:
log.warning('msg="Error writing the file" filename="%s" error="%s"' % (filename, rc.message.strip('\n')))
raise IOError(rc.message.strip('\n'))
rc, statInfo_unused = f.truncate(size)
if not rc.ok:
log.warning('msg="Error truncating the file" filename="%s" error="%s"' % (filename, rc.message.strip('\n')))
raise IOError(rc.message.strip('\n'))
rc, statInfo_unused = f.close()
if not rc.ok:
log.warning('msg="Error closing the file" filename="%s" error="%s"' % (filename, rc.message.strip('\n')))
raise IOError(rc.message.strip('\n'))
def renamefile(endpoint, origfilename, newfilename, ruid, rgid):
'''Rename a file via a special open from origfilename to newfilename on behalf of the given uid, gid.'''
_xrootcmd(endpoint, 'file', 'rename', ruid, rgid, 'mgm.path=' + _getfilename(origfilename) + \
'&mgm.file.source=' + _getfilename(origfilename) + '&mgm.file.target=' + _getfilename(newfilename))
def removefile(endpoint, filename, ruid, rgid, force=0):
'''Remove a file via a special open on behalf of the given uid, gid.
If force=1 or True, then pass the f option, that is skip the recycle bin.
This is useful for lock files, but it requires uid,gid to be root.'''
_xrootcmd(endpoint, 'rm', None, ruid, rgid, 'mgm.path=' + _getfilename(filename) + \
('&mgm.option=f' if force and int(ruid) == 0 and int(rgid) == 0 else ''))
|
'''
xrootiface.py
XRootD interface for the WOPI server for CERNBox
Author: Gi<EMAIL>ppe.<EMAIL>, CERN/IT-ST
Contributions: <EMAIL>
'''
import time
from XRootD import client as XrdClient
from XRootD.client.flags import OpenFlags, QueryCode
# module-wide state
config = None
log = None
xrdfs = {} # this is to map each endpoint [string] to its XrdClient
defaultstorage = None
homepath = None
def _getxrdfor(endpoint):
'''Look up the xrootd client for the given endpoint, create it if missing. Supports "default" for the defaultstorage endpoint.'''
global xrdfs # pylint: disable=global-statement
global defaultstorage # pylint: disable=global-statement
if endpoint == 'default':
return xrdfs[defaultstorage]
try:
return xrdfs[endpoint]
except KeyError:
# not found, create it
xrdfs[endpoint] = XrdClient.FileSystem(endpoint)
return xrdfs[endpoint]
def _geturlfor(endpoint):
'''Look up the URL for a given endpoint: "default" corresponds to the defaultstorage one'''
if endpoint == 'default':
return defaultstorage
return endpoint
def _eosargs(ruid, rgid, atomicwrite=0, bookingsize=0):
'''One-liner to generate extra EOS-specific arguments for the xroot URL'''
return '?eos.ruid=' + ruid + '&eos.rgid=' + rgid + ('&eos.atomic=1' if atomicwrite else '') + \
(('&eos.bookingsize='+str(bookingsize)) if bookingsize else '') + '&eos.app=wopi'
def _xrootcmd(endpoint, cmd, subcmd, ruid, rgid, args):
'''Perform the <cmd>/<subcmd> action on the special /proc/user path on behalf of the given uid,gid.
Note that this is entirely EOS-specific.'''
with XrdClient.File() as f:
url = _geturlfor(endpoint) + '//proc/user/' + _eosargs(ruid, rgid) + '&mgm.cmd=' + cmd + \
('&mgm.subcmd=' + subcmd if subcmd else '') + '&' + args
tstart = time.clock()
rc, statInfo_unused = f.open(url, OpenFlags.READ)
tend = time.clock()
log.info('msg="Invoked _xrootcmd" cmd="%s%s" url="%s" elapsedTimems="%.1f"' %
(cmd, ('/' + subcmd if subcmd else ''), url, (tend-tstart)*1000))
res = f.readline().decode('utf-8').strip('\n').split('&')
if len(res) == 3: # we may only just get stdout: in that case, assume it's all OK
rc = res[2]
rc = rc[rc.find('=')+1:]
if rc != '0':
# failure: get info from stderr, log and raise
msg = res[1][res[1].find('=')+1:]
log.info('msg="Error with xroot command" cmd="%s" subcmd="%s" args="%s" error="%s" rc="%s"' % \
(cmd, subcmd, args, msg, rc.strip('\00')))
raise IOError(msg)
# all right, return everything that came in stdout
return res[0][res[0].find('stdout=')+7:]
def _getfilename(filename):
'''map the given filename into the target namespace by prepending the homepath (see storagehomepath in wopiserver.conf)'''
return homepath + filename
def init(inconfig, inlog):
'''Init module-level variables'''
global config # pylint: disable=global-statement
global log # pylint: disable=global-statement
global defaultstorage # pylint: disable=global-statement
global homepath # pylint: disable=global-statement
config = inconfig
log = inlog
defaultstorage = config.get('xroot', 'storageserver')
# prepare the xroot client for the default storageserver
_getxrdfor(defaultstorage)
if config.has_option('xroot', 'storagehomepath'):
homepath = config.get('xroot', 'storagehomepath')
else:
homepath = ''
def stat(endpoint, filename, ruid, rgid):
'''Stat a file via xroot on behalf of the given uid,gid, and returns (size, mtime). Uses the default xroot API.'''
filename = _getfilename(filename)
tstart = time.clock()
rc, statInfo = _getxrdfor(endpoint).stat(filename + _eosargs(ruid, rgid))
tend = time.clock()
log.info('msg="Invoked stat" filename="%s" elapsedTimems="%.1f"' % (filename, (tend-tstart)*1000))
if statInfo is None:
raise IOError(rc.message.strip('\n'))
return {'size': statInfo.size, 'mtime': statInfo.modtime}
def statx(endpoint, filename, ruid, rgid):
'''Get extended stat info (inode, ouid, ogid, size, mtime) via an xroot opaque query on behalf of the given uid,gid'''
filename = _getfilename(filename)
tstart = time.clock()
rc, rawinfo = _getxrdfor(endpoint).query(QueryCode.OPAQUEFILE, filename + _eosargs(ruid, rgid) + '&mgm.pcmd=stat')
tend = time.clock()
log.info('msg="Invoked stat" filename="%s" elapsedTimems="%.1f"' % (filename, (tend-tstart)*1000))
if '[SUCCESS]' not in str(rc):
raise IOError(str(rc).strip('\n'))
rawinfo = str(rawinfo)
if 'retc=' in rawinfo:
raise IOError(rawinfo.strip('\n'))
statxdata = rawinfo.split()
return {'inode': statxdata[2],
'ouid': statxdata[5],
'ogid': statxdata[6],
'size': int(statxdata[8]),
'mtime': statxdata[12]}
def setxattr(endpoint, filename, ruid, rgid, key, value):
'''Set the extended attribute <key> to <value> via a special open on behalf of the given uid, gid'''
_xrootcmd(endpoint, 'attr', 'set', ruid, rgid, 'mgm.attr.key=' + key + '&mgm.attr.value=' + str(value) + '&mgm.path=' + _getfilename(filename))
def getxattr(endpoint, filename, ruid, rgid, key):
'''Get the extended attribute <key> via a special open on behalf of the given uid, gid'''
res = _xrootcmd(endpoint, 'attr', 'get', ruid, rgid, 'mgm.attr.key=' + key + '&mgm.path=' + _getfilename(filename))
# if no error, the response comes in the format <key>="<value>"
try:
return res.split('"')[1]
except IndexError:
log.warning('msg="Failed to getxattr" filename="%s" key="%s" res="%s"' % (filename, key, res))
return None
def rmxattr(endpoint, filename, ruid, rgid, key):
'''Remove the extended attribute <key> via a special open on behalf of the given uid, gid'''
filename = _getfilename(filename)
_xrootcmd(endpoint, 'attr', 'rm', ruid, rgid, 'mgm.attr.key=' + key + '&mgm.path=' + filename)
def readfile(endpoint, filename, ruid, rgid):
'''Read a file via xroot on behalf of the given uid, gid. Note that the function is a generator, managed by Flask.'''
log.debug('msg="Invoking readFile" filename="%s"' % filename)
with XrdClient.File() as f:
fileurl = _geturlfor(endpoint) + '/' + homepath + filename + _eosargs(ruid, rgid)
tstart = time.clock()
rc, statInfo_unused = f.open(fileurl, OpenFlags.READ)
tend = time.clock()
if not rc.ok:
# the file could not be opened: check the case of ENOENT and log it as info to keep the logs cleaner
if 'No such file or directory' in rc.message:
log.info('msg="File not found on read" filename="%s"' % filename)
else:
log.warning('msg="Error opening the file for read" filename="%s" code="%d" error="%s"' % \
(filename, rc.shellcode, rc.message.strip('\n')))
# as this is a generator, we yield the error string instead of the file's contents
yield 'ERROR on read'
yield rc.message
else:
log.info('msg="File open for read" filename="%s" elapsedTimems="%.1f"' % (filename, (tend-tstart)*1000))
chunksize = config.getint('io', 'chunksize')
rc, statInfo = f.stat()
chunksize = min(chunksize, statInfo.size-1)
# the actual read is buffered and managed by the Flask server
for chunk in f.readchunks(offset=0, chunksize=chunksize):
yield chunk
def writefile(endpoint, filename, ruid, rgid, content, noversion=0):
'''Write a file via xroot on behalf of the given uid, gid. The entire content is written
and any pre-existing file is deleted (or moved to the previous version if supported).
If noversion=1, the write explicitly disables versioning: this is useful for lock files.'''
size = len(content)
log.debug('msg="Invoking writeFile" filename="%s" size="%d"' % (filename, size))
f = XrdClient.File()
tstart = time.clock()
rc, statInfo_unused = f.open(_geturlfor(endpoint) + '/' + homepath + filename + _eosargs(ruid, rgid, 1, size) + \
('&sys.versioning=0' if noversion else ''), OpenFlags.DELETE)
tend = time.clock()
log.info('msg="File open for write" filename="%s" elapsedTimems="%.1f"' % (filename, (tend-tstart)*1000))
if not rc.ok:
log.warning('msg="Error opening the file for write" filename="%s" error="%s"' % (filename, rc.message.strip('\n')))
raise IOError(rc.message.strip('\n'))
# write the file. In a future implementation, we should find a way to only update the required chunks...
rc, statInfo_unused = f.write(content, offset=0, size=size)
if not rc.ok:
log.warning('msg="Error writing the file" filename="%s" error="%s"' % (filename, rc.message.strip('\n')))
raise IOError(rc.message.strip('\n'))
rc, statInfo_unused = f.truncate(size)
if not rc.ok:
log.warning('msg="Error truncating the file" filename="%s" error="%s"' % (filename, rc.message.strip('\n')))
raise IOError(rc.message.strip('\n'))
rc, statInfo_unused = f.close()
if not rc.ok:
log.warning('msg="Error closing the file" filename="%s" error="%s"' % (filename, rc.message.strip('\n')))
raise IOError(rc.message.strip('\n'))
def renamefile(endpoint, origfilename, newfilename, ruid, rgid):
'''Rename a file via a special open from origfilename to newfilename on behalf of the given uid, gid.'''
_xrootcmd(endpoint, 'file', 'rename', ruid, rgid, 'mgm.path=' + _getfilename(origfilename) + \
'&mgm.file.source=' + _getfilename(origfilename) + '&mgm.file.target=' + _getfilename(newfilename))
def removefile(endpoint, filename, ruid, rgid, force=0):
'''Remove a file via a special open on behalf of the given uid, gid.
If force=1 or True, then pass the f option, that is skip the recycle bin.
This is useful for lock files, but it requires uid,gid to be root.'''
_xrootcmd(endpoint, 'rm', None, ruid, rgid, 'mgm.path=' + _getfilename(filename) + \
('&mgm.option=f' if force and int(ruid) == 0 and int(rgid) == 0 else ''))
|
en
| 0.771543
|
xrootiface.py XRootD interface for the WOPI server for CERNBox Author: Gi<EMAIL>ppe.<EMAIL>, CERN/IT-ST Contributions: <EMAIL> # module-wide state # this is to map each endpoint [string] to its XrdClient Look up the xrootd client for the given endpoint, create it if missing. Supports "default" for the defaultstorage endpoint. # pylint: disable=global-statement # pylint: disable=global-statement # not found, create it Look up the URL for a given endpoint: "default" corresponds to the defaultstorage one One-liner to generate extra EOS-specific arguments for the xroot URL Perform the <cmd>/<subcmd> action on the special /proc/user path on behalf of the given uid,gid. Note that this is entirely EOS-specific. # we may only just get stdout: in that case, assume it's all OK # failure: get info from stderr, log and raise # all right, return everything that came in stdout map the given filename into the target namespace by prepending the homepath (see storagehomepath in wopiserver.conf) Init module-level variables # pylint: disable=global-statement # pylint: disable=global-statement # pylint: disable=global-statement # pylint: disable=global-statement # prepare the xroot client for the default storageserver Stat a file via xroot on behalf of the given uid,gid, and returns (size, mtime). Uses the default xroot API. Get extended stat info (inode, ouid, ogid, size, mtime) via an xroot opaque query on behalf of the given uid,gid Set the extended attribute <key> to <value> via a special open on behalf of the given uid, gid Get the extended attribute <key> via a special open on behalf of the given uid, gid # if no error, the response comes in the format <key>="<value>" Remove the extended attribute <key> via a special open on behalf of the given uid, gid Read a file via xroot on behalf of the given uid, gid. Note that the function is a generator, managed by Flask. # the file could not be opened: check the case of ENOENT and log it as info to keep the logs cleaner # as this is a generator, we yield the error string instead of the file's contents # the actual read is buffered and managed by the Flask server Write a file via xroot on behalf of the given uid, gid. The entire content is written and any pre-existing file is deleted (or moved to the previous version if supported). If noversion=1, the write explicitly disables versioning: this is useful for lock files. # write the file. In a future implementation, we should find a way to only update the required chunks... Rename a file via a special open from origfilename to newfilename on behalf of the given uid, gid. Remove a file via a special open on behalf of the given uid, gid. If force=1 or True, then pass the f option, that is skip the recycle bin. This is useful for lock files, but it requires uid,gid to be root.
| 2.553645
| 3
|
agro_site/sales_backend/migrations/0004_auto_20220424_1054.py
|
LukoninDmitryPy/agro_site-2
| 0
|
6628590
|
# Generated by Django 2.2.16 on 2022-04-24 07:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sales_backend', '0003_auto_20220423_1218'),
]
operations = [
migrations.CreateModel(
name='RatingStar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.SmallIntegerField(default=0, verbose_name='Значение')),
],
options={
'verbose_name': 'Звезда рейтинга',
'verbose_name_plural': 'Звезды рейтинга',
'ordering': ['-value'],
},
),
migrations.AlterField(
model_name='comment',
name='product',
field=models.ForeignKey(help_text='Комментарий поста', on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='sales_backend.Product', verbose_name='Комментарий'),
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=15, verbose_name='IP адрес')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales_backend.Product', verbose_name='продукт')),
('star', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales_backend.RatingStar', verbose_name='звезда')),
],
options={
'verbose_name': 'Рейтинг',
'verbose_name_plural': 'Рейтинги',
},
),
]
|
# Generated by Django 2.2.16 on 2022-04-24 07:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sales_backend', '0003_auto_20220423_1218'),
]
operations = [
migrations.CreateModel(
name='RatingStar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.SmallIntegerField(default=0, verbose_name='Значение')),
],
options={
'verbose_name': 'Звезда рейтинга',
'verbose_name_plural': 'Звезды рейтинга',
'ordering': ['-value'],
},
),
migrations.AlterField(
model_name='comment',
name='product',
field=models.ForeignKey(help_text='Комментарий поста', on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='sales_backend.Product', verbose_name='Комментарий'),
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=15, verbose_name='IP адрес')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales_backend.Product', verbose_name='продукт')),
('star', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales_backend.RatingStar', verbose_name='звезда')),
],
options={
'verbose_name': 'Рейтинг',
'verbose_name_plural': 'Рейтинги',
},
),
]
|
en
| 0.81084
|
# Generated by Django 2.2.16 on 2022-04-24 07:54
| 1.542182
| 2
|
battlefield_rcon/connection.py
|
Eegras/python-battlefield-rcon
| 3
|
6628591
|
import binascii
import socket
from battlefield_rcon.utils import (
generate_password_hash,
create_packet,
contains_complete_packet,
decode_packet,
encode_packet,
)
from battlefield_rcon.exceptions import RCONLoginRequiredException, RCONAuthException
class RCONConnection(object):
def __init__(self, remote_addr, port, password=<PASSWORD>, recv_buffer=1024):
self._remote_addr = remote_addr
self._port = port
self._password = password
self._conn = None
self._authenticated = False
self._seq = 0
self.recv_buffer=int(recv_buffer)
def _read_response(self):
data_buffer = bytes()
while not contains_complete_packet(data_buffer):
data_buffer += self._conn.recv(self.recv_buffer)
return decode_packet(data_buffer)
def send(self, words):
packet_to_send = encode_packet(
create_packet(self._seq, False, False, words=words)
)
self._seq += 1
self._conn.send(packet_to_send)
data = self._read_response()
if "LogInRequired" in data["words"]:
raise RCONLoginRequiredException
return data["words"]
def connect(self):
if not self._conn:
self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._conn.settimeout(1)
self._conn.connect((self._remote_addr, self._port))
self._conn.setblocking(True)
self._seq = 0
if not self._password:
return
password_salt_response = self.send(words=["login.hashed"])
if "OK" not in password_salt_response:
raise RCONAuthException
salt_bytes = binascii.unhexlify(password_salt_response[1])
pwd_hash = generate_password_hash(password=<PASSWORD>._password, salt=salt_bytes)
pwd_hash_final = pwd_hash.upper()
response = self.send(words=["login.hashed", pwd_hash_final])
if "OK" not in response:
raise RCONAuthException
self._authenticated = True
return response
def disconnect(self):
if self._conn:
self._conn.close()
self._conn = None
self._authenticated = False
def read_events(self):
self.send(words=["admin.eventsEnabled", "true"])
while True:
raw = self._read_response()
yield raw["words"]
|
import binascii
import socket
from battlefield_rcon.utils import (
generate_password_hash,
create_packet,
contains_complete_packet,
decode_packet,
encode_packet,
)
from battlefield_rcon.exceptions import RCONLoginRequiredException, RCONAuthException
class RCONConnection(object):
def __init__(self, remote_addr, port, password=<PASSWORD>, recv_buffer=1024):
self._remote_addr = remote_addr
self._port = port
self._password = password
self._conn = None
self._authenticated = False
self._seq = 0
self.recv_buffer=int(recv_buffer)
def _read_response(self):
data_buffer = bytes()
while not contains_complete_packet(data_buffer):
data_buffer += self._conn.recv(self.recv_buffer)
return decode_packet(data_buffer)
def send(self, words):
packet_to_send = encode_packet(
create_packet(self._seq, False, False, words=words)
)
self._seq += 1
self._conn.send(packet_to_send)
data = self._read_response()
if "LogInRequired" in data["words"]:
raise RCONLoginRequiredException
return data["words"]
def connect(self):
if not self._conn:
self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._conn.settimeout(1)
self._conn.connect((self._remote_addr, self._port))
self._conn.setblocking(True)
self._seq = 0
if not self._password:
return
password_salt_response = self.send(words=["login.hashed"])
if "OK" not in password_salt_response:
raise RCONAuthException
salt_bytes = binascii.unhexlify(password_salt_response[1])
pwd_hash = generate_password_hash(password=<PASSWORD>._password, salt=salt_bytes)
pwd_hash_final = pwd_hash.upper()
response = self.send(words=["login.hashed", pwd_hash_final])
if "OK" not in response:
raise RCONAuthException
self._authenticated = True
return response
def disconnect(self):
if self._conn:
self._conn.close()
self._conn = None
self._authenticated = False
def read_events(self):
self.send(words=["admin.eventsEnabled", "true"])
while True:
raw = self._read_response()
yield raw["words"]
|
none
| 1
| 2.657784
| 3
|
|
pybite5.py
|
mladuke/Algorithms
| 0
|
6628592
|
NAMES = ['<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', '<NAME>', '<NAME>']
def dedup_and_title_case_names(names):
"""Should return a list of title cased names,
each name appears only once"""
return [x.title() for x in sorted(list(set(names)))]
def sort_by_surname_desc(names):
"""Returns names list sorted desc by surname"""
names = dedup_and_title_case_names(names)
return sorted(names, key=lambda x: x.split(" ")[-1], reverse=True)
def shortest_first_name(names):
"""Returns the shortest first name (str).
You can assume there is only one shortest name.
"""
names = dedup_and_title_case_names(names)
firstNames= [name.split()[0] for name in names]
return min(firstNames, key=len)
print(dedup_and_title_case_names(NAMES))
print(sort_by_surname_desc(NAMES))
print(shortest_first_name(NAMES))
|
NAMES = ['<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', '<NAME>',
'<NAME>', '<NAME>', '<NAME>', '<NAME>']
def dedup_and_title_case_names(names):
"""Should return a list of title cased names,
each name appears only once"""
return [x.title() for x in sorted(list(set(names)))]
def sort_by_surname_desc(names):
"""Returns names list sorted desc by surname"""
names = dedup_and_title_case_names(names)
return sorted(names, key=lambda x: x.split(" ")[-1], reverse=True)
def shortest_first_name(names):
"""Returns the shortest first name (str).
You can assume there is only one shortest name.
"""
names = dedup_and_title_case_names(names)
firstNames= [name.split()[0] for name in names]
return min(firstNames, key=len)
print(dedup_and_title_case_names(NAMES))
print(sort_by_surname_desc(NAMES))
print(shortest_first_name(NAMES))
|
en
| 0.824854
|
Should return a list of title cased names, each name appears only once Returns names list sorted desc by surname Returns the shortest first name (str). You can assume there is only one shortest name.
| 4.185545
| 4
|
auto-test/tapplet/acl/acl_rest_test.py
|
asterfusion/Tapplet
| 1
|
6628593
|
from tools.conftest_tools import *
from tools.rest_tools import *
from tools.tcpreplay_tools import *
import json
from pytest_main import port1_config
from pytest_main import port2_config
from pytest_main import sf_helper
from pytest_main import global_verbose
import time
mod_name = "acl"
rule_tuple6_model = {
"group_1":
{
"20":
{
"rule_type":"tuple6",
"rule_cfg":{
"dip":"0::0",
"dip_mask":128,
"dport_max":0,
"dport_min":0,
"proto_max":0,
"proto_min":0,
"sip":"0::0",
"sip_mask":128,
"sport_max":0,
"sport_min":0
},
}
}
}
rule_tuple4_model = {
"group_1":
{
"20":
{
"rule_cfg":{
"dip":"172.16.17.32",
"dip_mask":24,
"dport_max":45,
"dport_min":45,
"proto_max":17,
"proto_min":17,
"sip":"1.23.2.1",
"sip_mask":24,
"sport_max":67,
"sport_min":67,
},
"rule_type":"tuple4"
}
}
}
def test_acl_post_config():
'''
创建未创建规则
'''
# clean up all config
reset_all_mod_config(sf_helper)
data = rule_tuple6_model
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_POST , data = data , verbose = global_verbose )
assert ret[0] == 0
def test_acl_post_config_fail():
'''
创建已创建规则,失败
'''
# clean up all config
reset_all_mod_config(sf_helper)
data = rule_tuple6_model
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_POST , data = data , verbose = global_verbose )
assert ret[0] == 0
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_POST , data = data , verbose = global_verbose )
assert ret[0] == -1
def test_acl_put_config():
'''
更新已/未创建规则
'''
# clean up all config
reset_all_mod_config(sf_helper)
data = rule_tuple6_model
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
def test_acl_delete_config():
'''
删除规则
'''
# clean up all config
reset_all_mod_config(sf_helper)
data = rule_tuple6_model
param_1 = { "index" : 20 }
param_2 = { "group" : 1 , "index" : 20 }
# create rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
# delete rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_DELETE , params = param_2 , verbose = global_verbose )
assert ret[0] == 0
# create rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
# delete rule ( url with group )
ret = sf_helper.auto_run_no_login("acl/config/group_1" , GlobalRestValue.ACTION_DELETE , params = param_1 , verbose = global_verbose )
assert ret[0] == 0
# create rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
# delete rule ( url with group and index )
ret = sf_helper.auto_run_no_login("acl/config/group_1/20" , GlobalRestValue.ACTION_DELETE , verbose = global_verbose )
assert ret[0] == 0
def test_acl_get_single_stat():
'''
获取单条/多条计数
'''
# clean up all config
reset_all_mod_config(sf_helper)
data = rule_tuple6_model
param_1 = { "index" : 20 }
param_2 = { "group" : 1 , "index" : 20 }
# create rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
ret = sf_helper.auto_run_no_login("acl/stat" , GlobalRestValue.ACTION_GET , params = param_2 , verbose = global_verbose )
assert ret[0] == 0
ret = sf_helper.auto_run_no_login("acl/stat/group_1" , GlobalRestValue.ACTION_GET , params = param_1 , verbose = global_verbose )
assert ret[0] == 0
ret = sf_helper.auto_run_no_login("acl/stat/group_1/20" , GlobalRestValue.ACTION_GET , verbose = global_verbose )
assert ret[0] == 0
def test_acl_get_too_large_stat():
'''
获取过多计数
'''
# clean up all config
reset_all_mod_config(sf_helper)
#use rule_tuple6_model make 101 rules
# rule_g1_101 = {"group_1":{"%d"%i:rule_tuple6_model["group_1"]["20"] for i in range(1 , 101 + 1)}}
rule_config = {}
for i in range(1 , 101 + 1):
tmpcfg = {
"rule_type":"tuple6",
"rule_cfg":{
"dip":"0::0",
"dip_mask":128,
"dport_max": i ,
"dport_min":0,
"proto_max":0,
"proto_min":0,
"sip":"0::0",
"sip_mask":128,
"sport_max":0,
"sport_min":0,
}
}
tmpdict = {str(i) : tmpcfg}
rule_config.update(tmpdict)
data = {"group_1" : rule_config}
#creat rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
#get stat
ret = sf_helper.auto_run_no_login("acl/stat/group_1" , GlobalRestValue.ACTION_GET , verbose = global_verbose )
assert ret[0] == -1
def test_acl_clear_stat():
'''
清除计数
'''
# clean up all config
reset_all_mod_config(sf_helper)
patch_data = [ { "op" : "replace" , "path" : "/" , "value" : 0 } ]
ret = sf_helper.auto_run_no_login("acl/stat" , GlobalRestValue.ACTION_PATCH, data = patch_data, verbose = global_verbose )
assert ret[0] == 0
def test_acl_sync():
'''
测试acl同步
'''
# clean up all config
reset_all_mod_config(sf_helper)
patch_data = [ { "op" : "replace" , "path" : "/" , "value" : 0 } ]
ret = sf_helper.auto_run_no_login("acl/sync" , GlobalRestValue.ACTION_PATCH, data = patch_data, verbose = global_verbose )
assert ret[0] == 0
time.sleep(4)
def test_get_acl_rule_group():
'''
获取interface acl 组默认配置
'''
# clean up all config
reset_all_mod_config(sf_helper)
param_1 = { "index" : port1_config }
ret = sf_helper.auto_run_no_login("interfaces/config" , GlobalRestValue.ACTION_GET , params = param_1 , verbose = global_verbose )
assert ret[0] == 0
assert ret[1][port1_config]["ingress_config"]["acl_rule_group"] == 1
def test_set_acl_rule_group():
'''
配置interface acl 组
'''
# clean up all config
reset_all_mod_config(sf_helper)
patch_data = [ { "op" : "replace" ,
"path" : "/" + port1_config + "/ingress_config/acl_rule_group" , "value" : 2 } ]
ret = sf_helper.auto_run_no_login("interfaces/config" , GlobalRestValue.ACTION_PATCH, data = patch_data, verbose = global_verbose )
assert ret[0] == 0
param_1 = { "index" : port1_config }
ret = sf_helper.auto_run_no_login("interfaces/config" , GlobalRestValue.ACTION_GET , params = param_1 , verbose = global_verbose )
assert ret[0] == 0
assert ret[1][port1_config]["ingress_config"]["acl_rule_group"] == 2
def test_acl_duplicate_rule():
'''
重复规则测试
'''
# clean up all config
reset_all_mod_config(sf_helper)
### tuple6 ###
#use rule_tuple6_model make 2 rules
data = {"group_1":{"1" : rule_tuple6_model["group_1"]["20"] }}
#creat first rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
data = {"group_1":{"2" : rule_tuple6_model["group_1"]["20"] }}
#creat second rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == -1
### tuple4 ###
data = {"group_1":{"1" : rule_tuple4_model["group_1"]["20"] }}
#creat first rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
data = {"group_1":{"2" : rule_tuple4_model["group_1"]["20"] }}
#creat second rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == -1
|
from tools.conftest_tools import *
from tools.rest_tools import *
from tools.tcpreplay_tools import *
import json
from pytest_main import port1_config
from pytest_main import port2_config
from pytest_main import sf_helper
from pytest_main import global_verbose
import time
mod_name = "acl"
rule_tuple6_model = {
"group_1":
{
"20":
{
"rule_type":"tuple6",
"rule_cfg":{
"dip":"0::0",
"dip_mask":128,
"dport_max":0,
"dport_min":0,
"proto_max":0,
"proto_min":0,
"sip":"0::0",
"sip_mask":128,
"sport_max":0,
"sport_min":0
},
}
}
}
rule_tuple4_model = {
"group_1":
{
"20":
{
"rule_cfg":{
"dip":"172.16.17.32",
"dip_mask":24,
"dport_max":45,
"dport_min":45,
"proto_max":17,
"proto_min":17,
"sip":"1.23.2.1",
"sip_mask":24,
"sport_max":67,
"sport_min":67,
},
"rule_type":"tuple4"
}
}
}
def test_acl_post_config():
'''
创建未创建规则
'''
# clean up all config
reset_all_mod_config(sf_helper)
data = rule_tuple6_model
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_POST , data = data , verbose = global_verbose )
assert ret[0] == 0
def test_acl_post_config_fail():
'''
创建已创建规则,失败
'''
# clean up all config
reset_all_mod_config(sf_helper)
data = rule_tuple6_model
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_POST , data = data , verbose = global_verbose )
assert ret[0] == 0
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_POST , data = data , verbose = global_verbose )
assert ret[0] == -1
def test_acl_put_config():
'''
更新已/未创建规则
'''
# clean up all config
reset_all_mod_config(sf_helper)
data = rule_tuple6_model
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
def test_acl_delete_config():
'''
删除规则
'''
# clean up all config
reset_all_mod_config(sf_helper)
data = rule_tuple6_model
param_1 = { "index" : 20 }
param_2 = { "group" : 1 , "index" : 20 }
# create rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
# delete rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_DELETE , params = param_2 , verbose = global_verbose )
assert ret[0] == 0
# create rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
# delete rule ( url with group )
ret = sf_helper.auto_run_no_login("acl/config/group_1" , GlobalRestValue.ACTION_DELETE , params = param_1 , verbose = global_verbose )
assert ret[0] == 0
# create rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
# delete rule ( url with group and index )
ret = sf_helper.auto_run_no_login("acl/config/group_1/20" , GlobalRestValue.ACTION_DELETE , verbose = global_verbose )
assert ret[0] == 0
def test_acl_get_single_stat():
'''
获取单条/多条计数
'''
# clean up all config
reset_all_mod_config(sf_helper)
data = rule_tuple6_model
param_1 = { "index" : 20 }
param_2 = { "group" : 1 , "index" : 20 }
# create rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
ret = sf_helper.auto_run_no_login("acl/stat" , GlobalRestValue.ACTION_GET , params = param_2 , verbose = global_verbose )
assert ret[0] == 0
ret = sf_helper.auto_run_no_login("acl/stat/group_1" , GlobalRestValue.ACTION_GET , params = param_1 , verbose = global_verbose )
assert ret[0] == 0
ret = sf_helper.auto_run_no_login("acl/stat/group_1/20" , GlobalRestValue.ACTION_GET , verbose = global_verbose )
assert ret[0] == 0
def test_acl_get_too_large_stat():
'''
获取过多计数
'''
# clean up all config
reset_all_mod_config(sf_helper)
#use rule_tuple6_model make 101 rules
# rule_g1_101 = {"group_1":{"%d"%i:rule_tuple6_model["group_1"]["20"] for i in range(1 , 101 + 1)}}
rule_config = {}
for i in range(1 , 101 + 1):
tmpcfg = {
"rule_type":"tuple6",
"rule_cfg":{
"dip":"0::0",
"dip_mask":128,
"dport_max": i ,
"dport_min":0,
"proto_max":0,
"proto_min":0,
"sip":"0::0",
"sip_mask":128,
"sport_max":0,
"sport_min":0,
}
}
tmpdict = {str(i) : tmpcfg}
rule_config.update(tmpdict)
data = {"group_1" : rule_config}
#creat rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
#get stat
ret = sf_helper.auto_run_no_login("acl/stat/group_1" , GlobalRestValue.ACTION_GET , verbose = global_verbose )
assert ret[0] == -1
def test_acl_clear_stat():
'''
清除计数
'''
# clean up all config
reset_all_mod_config(sf_helper)
patch_data = [ { "op" : "replace" , "path" : "/" , "value" : 0 } ]
ret = sf_helper.auto_run_no_login("acl/stat" , GlobalRestValue.ACTION_PATCH, data = patch_data, verbose = global_verbose )
assert ret[0] == 0
def test_acl_sync():
'''
测试acl同步
'''
# clean up all config
reset_all_mod_config(sf_helper)
patch_data = [ { "op" : "replace" , "path" : "/" , "value" : 0 } ]
ret = sf_helper.auto_run_no_login("acl/sync" , GlobalRestValue.ACTION_PATCH, data = patch_data, verbose = global_verbose )
assert ret[0] == 0
time.sleep(4)
def test_get_acl_rule_group():
'''
获取interface acl 组默认配置
'''
# clean up all config
reset_all_mod_config(sf_helper)
param_1 = { "index" : port1_config }
ret = sf_helper.auto_run_no_login("interfaces/config" , GlobalRestValue.ACTION_GET , params = param_1 , verbose = global_verbose )
assert ret[0] == 0
assert ret[1][port1_config]["ingress_config"]["acl_rule_group"] == 1
def test_set_acl_rule_group():
'''
配置interface acl 组
'''
# clean up all config
reset_all_mod_config(sf_helper)
patch_data = [ { "op" : "replace" ,
"path" : "/" + port1_config + "/ingress_config/acl_rule_group" , "value" : 2 } ]
ret = sf_helper.auto_run_no_login("interfaces/config" , GlobalRestValue.ACTION_PATCH, data = patch_data, verbose = global_verbose )
assert ret[0] == 0
param_1 = { "index" : port1_config }
ret = sf_helper.auto_run_no_login("interfaces/config" , GlobalRestValue.ACTION_GET , params = param_1 , verbose = global_verbose )
assert ret[0] == 0
assert ret[1][port1_config]["ingress_config"]["acl_rule_group"] == 2
def test_acl_duplicate_rule():
'''
重复规则测试
'''
# clean up all config
reset_all_mod_config(sf_helper)
### tuple6 ###
#use rule_tuple6_model make 2 rules
data = {"group_1":{"1" : rule_tuple6_model["group_1"]["20"] }}
#creat first rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
data = {"group_1":{"2" : rule_tuple6_model["group_1"]["20"] }}
#creat second rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == -1
### tuple4 ###
data = {"group_1":{"1" : rule_tuple4_model["group_1"]["20"] }}
#creat first rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == 0
data = {"group_1":{"2" : rule_tuple4_model["group_1"]["20"] }}
#creat second rule
ret = sf_helper.auto_run_no_login("acl/config" , GlobalRestValue.ACTION_PUT , data = data , verbose = global_verbose )
assert ret[0] == -1
|
en
| 0.489111
|
创建未创建规则 # clean up all config 创建已创建规则,失败 # clean up all config 更新已/未创建规则 # clean up all config 删除规则 # clean up all config # create rule # delete rule # create rule # delete rule ( url with group ) # create rule # delete rule ( url with group and index ) 获取单条/多条计数 # clean up all config # create rule 获取过多计数 # clean up all config #use rule_tuple6_model make 101 rules # rule_g1_101 = {"group_1":{"%d"%i:rule_tuple6_model["group_1"]["20"] for i in range(1 , 101 + 1)}} #creat rule #get stat 清除计数 # clean up all config 测试acl同步 # clean up all config 获取interface acl 组默认配置 # clean up all config 配置interface acl 组 # clean up all config 重复规则测试 # clean up all config ### tuple6 ### #use rule_tuple6_model make 2 rules #creat first rule #creat second rule ### tuple4 ### #creat first rule #creat second rule
| 1.837273
| 2
|
ajax_upload/tests/tests.py
|
ixc/django-ajax-upload-widget
| 47
|
6628594
|
import json
import os
import unittest
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.translation import ugettext as _
from ajax_upload.models import UploadedFile
from ajax_upload.widgets import AjaxUploadException
TEST_FILEPATH = os.path.dirname(__file__) + '/files/test.png'
class UploaderTestHelper(object):
def tearDown(self):
# Delete all uploaded files created during testing
for up in UploadedFile.objects.all():
if up.file:
up.file.delete()
def create_uploaded_file(self, **kwargs):
defaults = {
'file': 'test.png'
}
defaults.update(kwargs)
return UploadedFile.objects.create(**defaults)
class AjaxUploadTests(UploaderTestHelper, TestCase):
def test_upload_file_submission_saves_file_with_different_name_and_returns_json_data(self):
post_data = {
'file': open(TEST_FILEPATH)
}
response = self.client.post(reverse('ajax-upload'), post_data)
self.assertEqual(response.status_code, 200)
json_data = json.loads(response.content)
uploaded_file = UploadedFile.objects.get()
self.assertTrue(os.path.exists(uploaded_file.file.path))
self.assertEqual(json_data['path'], uploaded_file.file.url)
# This is a not-so-good test to verify that the filename name is modified
self.assertTrue(len(os.path.basename(uploaded_file.file.name)) > 16)
def test_upload_file_submission_missing_file_returns_error(self):
post_data = {
'file': ''
}
response = self.client.post(reverse('ajax-upload'), post_data)
self.assertEqual(response.status_code, 400)
json_data = json.loads(response.content)
self.assertTrue('errors' in json_data)
self.assertEqual(json_data['errors']['file'][0], _('This field is required.'))
def test_upload_file_get_request_returns_405(self):
response = self.client.get(reverse('ajax-upload'))
self.assertEqual(response.status_code, 405)
class AjaxFileInputTests(UploaderTestHelper, TestCase):
urls = 'ajax_upload.tests.urls'
def test_submit_form_with_uploaded_file_path(self):
# First ajax upload the file to the uploader
post_data = {
'file': open(TEST_FILEPATH)
}
response = self.client.post(reverse('ajax-upload'), post_data)
self.assertEqual(response.status_code, 200)
json_data = json.loads(response.content)
uploaded_file = UploadedFile.objects.get()
self.assertTrue(os.path.exists(uploaded_file.file.path))
# Now submit the original form with the path of the uploaded file
post_data = {
'my_file': json_data['path'],
'my_image': json_data['path'] # We're testing both AjaxFileField and AjaxImageField
}
response = self.client.post(reverse('ajax-uploads-test'), post_data)
self.assertEqual(response.status_code, 200)
parsed = json.loads(response.content)
self.assertEqual(parsed['uploaded_file_name'], json_data['path'].replace(settings.MEDIA_URL, ''))
self.assertEqual(parsed['uploaded_image_name'], json_data['path'].replace(settings.MEDIA_URL, ''))
def test_submit_form_with_empty_path_clears_existing_file(self):
post_data = {
'my_file': '',
'my_image': ''
}
response = self.client.post(reverse('ajax-uploads-test'), post_data)
self.assertEqual(response.status_code, 200)
parsed = json.loads(response.content)
self.assertEqual(parsed['uploaded_file_name'], 'False')
self.assertEqual(parsed['uploaded_image_name'], 'False')
@unittest.skipUnless(settings.MEDIA_URL, 'Requires non-empty MEDIAL_URL')
def test_submit_form_with_external_file_path_returns_error(self):
post_data = {
'my_file': 'http://www.google.com/invalid.txt',
'my_image': 'http://www.google.com/invalid.png'
}
try:
self.client.post(reverse('ajax-uploads-test'), post_data)
except AjaxUploadException, err:
self.assertTrue(str(err).startswith(_('File path not allowed:')))
else:
self.fail()
def test_submit_form_with_internal_file_path_ignores_it_and_retains_original_value(self):
# In this scenario, we're simulating the submission of an form that had
# an existing file specified and didn't change/ajax upload it (eg. an update form).
post_data = {
'my_file': '%ssome/INVALID-path/file.txt' % settings.MEDIA_URL, # invalid path
'my_image': '%ssome/path/image.png' % settings.MEDIA_URL # valid path
# We ignore BOTH valid and invalid paths to prevent the user from setting
# the value to a file that they did not upload
}
response = self.client.post(reverse('ajax-uploads-test'), post_data)
self.assertEqual(response.status_code, 200)
parsed = json.loads(response.content)
self.assertEqual(parsed['uploaded_file_name'], 'some/path/file.txt')
self.assertEqual(parsed['uploaded_image_name'], 'some/path/image.png')
def test_submit_form_normally_with_file_data_in_multipart_format(self):
# Here we will NOT use the AJAX uploader to ensure the file field works normally.
post_data = {
'my_file': open(TEST_FILEPATH),
'my_image': open(TEST_FILEPATH)
}
response = self.client.post(reverse('ajax-uploads-test'), post_data)
self.assertEqual(response.status_code, 200)
parsed = json.loads(response.content)
self.assertTrue('errors' not in parsed)
self.assertTrue('uploaded_file_name' in parsed)
self.assertTrue('uploaded_image_name' in parsed)
|
import json
import os
import unittest
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.translation import ugettext as _
from ajax_upload.models import UploadedFile
from ajax_upload.widgets import AjaxUploadException
TEST_FILEPATH = os.path.dirname(__file__) + '/files/test.png'
class UploaderTestHelper(object):
def tearDown(self):
# Delete all uploaded files created during testing
for up in UploadedFile.objects.all():
if up.file:
up.file.delete()
def create_uploaded_file(self, **kwargs):
defaults = {
'file': 'test.png'
}
defaults.update(kwargs)
return UploadedFile.objects.create(**defaults)
class AjaxUploadTests(UploaderTestHelper, TestCase):
def test_upload_file_submission_saves_file_with_different_name_and_returns_json_data(self):
post_data = {
'file': open(TEST_FILEPATH)
}
response = self.client.post(reverse('ajax-upload'), post_data)
self.assertEqual(response.status_code, 200)
json_data = json.loads(response.content)
uploaded_file = UploadedFile.objects.get()
self.assertTrue(os.path.exists(uploaded_file.file.path))
self.assertEqual(json_data['path'], uploaded_file.file.url)
# This is a not-so-good test to verify that the filename name is modified
self.assertTrue(len(os.path.basename(uploaded_file.file.name)) > 16)
def test_upload_file_submission_missing_file_returns_error(self):
post_data = {
'file': ''
}
response = self.client.post(reverse('ajax-upload'), post_data)
self.assertEqual(response.status_code, 400)
json_data = json.loads(response.content)
self.assertTrue('errors' in json_data)
self.assertEqual(json_data['errors']['file'][0], _('This field is required.'))
def test_upload_file_get_request_returns_405(self):
response = self.client.get(reverse('ajax-upload'))
self.assertEqual(response.status_code, 405)
class AjaxFileInputTests(UploaderTestHelper, TestCase):
urls = 'ajax_upload.tests.urls'
def test_submit_form_with_uploaded_file_path(self):
# First ajax upload the file to the uploader
post_data = {
'file': open(TEST_FILEPATH)
}
response = self.client.post(reverse('ajax-upload'), post_data)
self.assertEqual(response.status_code, 200)
json_data = json.loads(response.content)
uploaded_file = UploadedFile.objects.get()
self.assertTrue(os.path.exists(uploaded_file.file.path))
# Now submit the original form with the path of the uploaded file
post_data = {
'my_file': json_data['path'],
'my_image': json_data['path'] # We're testing both AjaxFileField and AjaxImageField
}
response = self.client.post(reverse('ajax-uploads-test'), post_data)
self.assertEqual(response.status_code, 200)
parsed = json.loads(response.content)
self.assertEqual(parsed['uploaded_file_name'], json_data['path'].replace(settings.MEDIA_URL, ''))
self.assertEqual(parsed['uploaded_image_name'], json_data['path'].replace(settings.MEDIA_URL, ''))
def test_submit_form_with_empty_path_clears_existing_file(self):
post_data = {
'my_file': '',
'my_image': ''
}
response = self.client.post(reverse('ajax-uploads-test'), post_data)
self.assertEqual(response.status_code, 200)
parsed = json.loads(response.content)
self.assertEqual(parsed['uploaded_file_name'], 'False')
self.assertEqual(parsed['uploaded_image_name'], 'False')
@unittest.skipUnless(settings.MEDIA_URL, 'Requires non-empty MEDIAL_URL')
def test_submit_form_with_external_file_path_returns_error(self):
post_data = {
'my_file': 'http://www.google.com/invalid.txt',
'my_image': 'http://www.google.com/invalid.png'
}
try:
self.client.post(reverse('ajax-uploads-test'), post_data)
except AjaxUploadException, err:
self.assertTrue(str(err).startswith(_('File path not allowed:')))
else:
self.fail()
def test_submit_form_with_internal_file_path_ignores_it_and_retains_original_value(self):
# In this scenario, we're simulating the submission of an form that had
# an existing file specified and didn't change/ajax upload it (eg. an update form).
post_data = {
'my_file': '%ssome/INVALID-path/file.txt' % settings.MEDIA_URL, # invalid path
'my_image': '%ssome/path/image.png' % settings.MEDIA_URL # valid path
# We ignore BOTH valid and invalid paths to prevent the user from setting
# the value to a file that they did not upload
}
response = self.client.post(reverse('ajax-uploads-test'), post_data)
self.assertEqual(response.status_code, 200)
parsed = json.loads(response.content)
self.assertEqual(parsed['uploaded_file_name'], 'some/path/file.txt')
self.assertEqual(parsed['uploaded_image_name'], 'some/path/image.png')
def test_submit_form_normally_with_file_data_in_multipart_format(self):
# Here we will NOT use the AJAX uploader to ensure the file field works normally.
post_data = {
'my_file': open(TEST_FILEPATH),
'my_image': open(TEST_FILEPATH)
}
response = self.client.post(reverse('ajax-uploads-test'), post_data)
self.assertEqual(response.status_code, 200)
parsed = json.loads(response.content)
self.assertTrue('errors' not in parsed)
self.assertTrue('uploaded_file_name' in parsed)
self.assertTrue('uploaded_image_name' in parsed)
|
en
| 0.94889
|
# Delete all uploaded files created during testing # This is a not-so-good test to verify that the filename name is modified # First ajax upload the file to the uploader # Now submit the original form with the path of the uploaded file # We're testing both AjaxFileField and AjaxImageField # In this scenario, we're simulating the submission of an form that had # an existing file specified and didn't change/ajax upload it (eg. an update form). # invalid path # valid path # We ignore BOTH valid and invalid paths to prevent the user from setting # the value to a file that they did not upload # Here we will NOT use the AJAX uploader to ensure the file field works normally.
| 2.416498
| 2
|
entry.py
|
RyderTheCoder/gui-learning
| 0
|
6628595
|
from Tkinter import *
top = Tk()
L1 = Label(top, text="User Name")
L1.pack( side = LEFT)
E1 = Entry(top, bd =5)
E1.pack(side = RIGHT)
L2 = Label(top, text="And Password")
L2.pack( side = LEFT)
E2 = Entry(top, bd =5)
E2.pack(side = RIGHT)
top.mainloop()
|
from Tkinter import *
top = Tk()
L1 = Label(top, text="User Name")
L1.pack( side = LEFT)
E1 = Entry(top, bd =5)
E1.pack(side = RIGHT)
L2 = Label(top, text="And Password")
L2.pack( side = LEFT)
E2 = Entry(top, bd =5)
E2.pack(side = RIGHT)
top.mainloop()
|
none
| 1
| 3.33076
| 3
|
|
appinit_backend/app/lib/permissions/apis/edit.py
|
app-init/backend
| 1
|
6628596
|
from appinit_backend.lib.imports import *
import appinit_backend.app.lib.permissions.apis.get as get_api
def call(**kwargs):
manager = Manager()
db = manager.db("appinit")
if "id" in kwargs:
if "safe_name" in kwargs:
del kwargs["safe_name"]
cursor = db.permissions.find_one({"module": kwargs["module"]})
if cursor == None:
module = {
"module": kwargs['module'],
"route": kwargs['route'],
"permissions": kwargs['permissions']
}
db.permissions.insert(module)
return module
else:
cursor = manager.parse_cursor_object(cursor)
document = {}
if "permissions" in kwargs and kwargs["permissions"] != cursor["permissions"]:
document["permissions"] = kwargs["permissions"]
else:
document["permissions"] = cursor["permissions"]
db.permissions.update({"module": kwargs["module"]}, {"$set": document})
# every api has this added as an attribute
# it reinitializes the change
# Modules.reinit()
return get_api.call(id=kwargs["module"])
|
from appinit_backend.lib.imports import *
import appinit_backend.app.lib.permissions.apis.get as get_api
def call(**kwargs):
manager = Manager()
db = manager.db("appinit")
if "id" in kwargs:
if "safe_name" in kwargs:
del kwargs["safe_name"]
cursor = db.permissions.find_one({"module": kwargs["module"]})
if cursor == None:
module = {
"module": kwargs['module'],
"route": kwargs['route'],
"permissions": kwargs['permissions']
}
db.permissions.insert(module)
return module
else:
cursor = manager.parse_cursor_object(cursor)
document = {}
if "permissions" in kwargs and kwargs["permissions"] != cursor["permissions"]:
document["permissions"] = kwargs["permissions"]
else:
document["permissions"] = cursor["permissions"]
db.permissions.update({"module": kwargs["module"]}, {"$set": document})
# every api has this added as an attribute
# it reinitializes the change
# Modules.reinit()
return get_api.call(id=kwargs["module"])
|
en
| 0.910906
|
# every api has this added as an attribute # it reinitializes the change # Modules.reinit()
| 2.277888
| 2
|
Raif/pyspark/run_calc_05.py
|
musicnova/7a_task
| 0
|
6628597
|
<reponame>musicnova/7a_task<filename>Raif/pyspark/run_calc_05.py
# -*- coding: utf-8 -*-
import os
from datetime import datetime, timedelta
from airflow import DAG
from airflow.models import Variable
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
u"""
Airflow script for calc_05
"""
ALERT_MAILS = Variable.get("gv_ic_admin_lst")
DAG_NAME = str(os.path.basename(__file__).split('.')[0])
OWNER = 'User Airflow'
DEPENDS_ON_PAST = True
EMAIL_ON_FAILURE = True
EMAIL_ON_RETRY = False
RETRIES = int(Variable.get('gv_dag_retries'))
POOL = 'data_pool'
MAIN_VAR_NAME = 'gv_' + DAG_NAME
SRV_LIST = Variable.get('gv_psg_kafka_srv_list')
QUEUE_NAME = Variable.get('gv_psg_kafka_queue_name')
PARTITIONS = Variable.get('gv_psg_kafka_partitions')
LOADDTTM=str(datetime.now()).replace(" ","_")
WAIT_HRS = 1
start_dt = datetime(2018, 11, 15)
# setting default arguments of dag
default_args = {
'owner': OWNER,
'depends_on_past': DEPENDS_ON_PAST,
'start_date': start_dt,
'email': ALERT_MAILS,
'email_on_failure': EMAIL_ON_FAILURE,
'email_on_retry': EMAIL_ON_RETRY,
'retries': RETRIES,
'pool': POOL
}
# Creating DAG with parameters
dag = DAG(DAG_NAME, default_args=default_args, schedule_interval="0 */4 * * *")
dag.doc_md = __doc__
dag_start = DummyOperator(
task_id='dag_start',
dag=dag
)
dag_end = DummyOperator(
task_id='dag_end',
dag=dag
)
algo_bash_cmd = """
kinit airflow/airflow@HOME.LOCAL -kt /opt/airflow/airflow_home/kt/airflow.keytab
spark-submit --master yarn \
--num-executors {{ params.partitions }} \
--executor-cores 3 \
--executor-memory 6G \
--driver-cores 5 \
--driver-memory 10G \
--conf 'spark.driver.extraJavaOptions=-Djava.security.auth.login.config={{ params.home }}/kt/kafka_client.conf' \
--conf 'spark.executor.extraJavaOptions=-Djava.security.auth.login.config={{ params.home }}/kt/kafka_client.conf' \
--packages org.apache.spark:spark-sql-kafka-0-10_2.11:2.1.1 \
--jars """+"/opt/airflow/airflow-home/utils/HiveHomeUDF-0.0.1.jar"+""" \
{{ params.home }}/dags/pyspark/prod_data/calc_05.py {{ params.srv_list }} {{ params.queue_name }} {{ params.partitions }} {{ params.loaddttm }}
"""
algo_bash_load = BashOperator(
task_id='prod_data_algo_calc_05',
bash_command=algo_bash_cmd,
execution_timeout=timedelta(hours=WAIT_HRS),
params={
'home': '/opt/airflow/airflow_home',
'srv_list': SRV_LIST,
'queue_name': QUEUE_NAME,
'partitions': PARTITIONS,
'loaddttm': LOADDTTM
},
wait_for_downstream=True,
dag=dag
)
dag_start.set_downstream(algo_bash_load)
algo_bash_load.set_downstream(dag_end)
|
# -*- coding: utf-8 -*-
import os
from datetime import datetime, timedelta
from airflow import DAG
from airflow.models import Variable
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
u"""
Airflow script for calc_05
"""
ALERT_MAILS = Variable.get("gv_ic_admin_lst")
DAG_NAME = str(os.path.basename(__file__).split('.')[0])
OWNER = 'User Airflow'
DEPENDS_ON_PAST = True
EMAIL_ON_FAILURE = True
EMAIL_ON_RETRY = False
RETRIES = int(Variable.get('gv_dag_retries'))
POOL = 'data_pool'
MAIN_VAR_NAME = 'gv_' + DAG_NAME
SRV_LIST = Variable.get('gv_psg_kafka_srv_list')
QUEUE_NAME = Variable.get('gv_psg_kafka_queue_name')
PARTITIONS = Variable.get('gv_psg_kafka_partitions')
LOADDTTM=str(datetime.now()).replace(" ","_")
WAIT_HRS = 1
start_dt = datetime(2018, 11, 15)
# setting default arguments of dag
default_args = {
'owner': OWNER,
'depends_on_past': DEPENDS_ON_PAST,
'start_date': start_dt,
'email': ALERT_MAILS,
'email_on_failure': EMAIL_ON_FAILURE,
'email_on_retry': EMAIL_ON_RETRY,
'retries': RETRIES,
'pool': POOL
}
# Creating DAG with parameters
dag = DAG(DAG_NAME, default_args=default_args, schedule_interval="0 */4 * * *")
dag.doc_md = __doc__
dag_start = DummyOperator(
task_id='dag_start',
dag=dag
)
dag_end = DummyOperator(
task_id='dag_end',
dag=dag
)
algo_bash_cmd = """
kinit airflow/airflow@HOME.LOCAL -kt /opt/airflow/airflow_home/kt/airflow.keytab
spark-submit --master yarn \
--num-executors {{ params.partitions }} \
--executor-cores 3 \
--executor-memory 6G \
--driver-cores 5 \
--driver-memory 10G \
--conf 'spark.driver.extraJavaOptions=-Djava.security.auth.login.config={{ params.home }}/kt/kafka_client.conf' \
--conf 'spark.executor.extraJavaOptions=-Djava.security.auth.login.config={{ params.home }}/kt/kafka_client.conf' \
--packages org.apache.spark:spark-sql-kafka-0-10_2.11:2.1.1 \
--jars """+"/opt/airflow/airflow-home/utils/HiveHomeUDF-0.0.1.jar"+""" \
{{ params.home }}/dags/pyspark/prod_data/calc_05.py {{ params.srv_list }} {{ params.queue_name }} {{ params.partitions }} {{ params.loaddttm }}
"""
algo_bash_load = BashOperator(
task_id='prod_data_algo_calc_05',
bash_command=algo_bash_cmd,
execution_timeout=timedelta(hours=WAIT_HRS),
params={
'home': '/opt/airflow/airflow_home',
'srv_list': SRV_LIST,
'queue_name': QUEUE_NAME,
'partitions': PARTITIONS,
'loaddttm': LOADDTTM
},
wait_for_downstream=True,
dag=dag
)
dag_start.set_downstream(algo_bash_load)
algo_bash_load.set_downstream(dag_end)
|
en
| 0.160199
|
# -*- coding: utf-8 -*- Airflow script for calc_05 # setting default arguments of dag # Creating DAG with parameters kinit airflow/airflow@HOME.LOCAL -kt /opt/airflow/airflow_home/kt/airflow.keytab
spark-submit --master yarn \
--num-executors {{ params.partitions }} \
--executor-cores 3 \
--executor-memory 6G \
--driver-cores 5 \
--driver-memory 10G \
--conf 'spark.driver.extraJavaOptions=-Djava.security.auth.login.config={{ params.home }}/kt/kafka_client.conf' \
--conf 'spark.executor.extraJavaOptions=-Djava.security.auth.login.config={{ params.home }}/kt/kafka_client.conf' \
--packages org.apache.spark:spark-sql-kafka-0-10_2.11:2.1.1 \
--jars \
{{ params.home }}/dags/pyspark/prod_data/calc_05.py {{ params.srv_list }} {{ params.queue_name }} {{ params.partitions }} {{ params.loaddttm }}
| 2.033567
| 2
|
03-urls/pylons/app.py
|
sanogotech/benchmarchpythonweb
| 0
|
6628598
|
import os
import sys
from samples import features
from samples import sections
conf_dir = os.path.dirname(os.path.abspath(__file__))
conf_dir = os.path.join(conf_dir, 'helloworld')
sys.path.insert(0, conf_dir)
from helloworld.config.middleware import make_app
main = make_app({}, full_stack=False, static_files=False, cache_dir='')
routes_map = main.config['routes.map']
routes_map.connect('welcome', '/{controller}', action='index')
routes_map.connect('user', '/{user}', controller='welcome', action='user')
routes_map.connect('repo', '/{user}/{repo}',
controller='welcome', action='repo')
for s in sections:
for f in features:
route_name = '%s-%s' % (s, f)
routes_map.connect(route_name, '/%s/%s' % (s, f),
controller='welcome', action='index')
for f in features:
routes_map.connect(f, '/{user}/{repo}/%s' % f,
controller='welcome', action='index')
|
import os
import sys
from samples import features
from samples import sections
conf_dir = os.path.dirname(os.path.abspath(__file__))
conf_dir = os.path.join(conf_dir, 'helloworld')
sys.path.insert(0, conf_dir)
from helloworld.config.middleware import make_app
main = make_app({}, full_stack=False, static_files=False, cache_dir='')
routes_map = main.config['routes.map']
routes_map.connect('welcome', '/{controller}', action='index')
routes_map.connect('user', '/{user}', controller='welcome', action='user')
routes_map.connect('repo', '/{user}/{repo}',
controller='welcome', action='repo')
for s in sections:
for f in features:
route_name = '%s-%s' % (s, f)
routes_map.connect(route_name, '/%s/%s' % (s, f),
controller='welcome', action='index')
for f in features:
routes_map.connect(f, '/{user}/{repo}/%s' % f,
controller='welcome', action='index')
|
none
| 1
| 2.306
| 2
|
|
tensorflow_probability/python/experimental/mcmc/particle_filter_test.py
|
brianwa84/probability
| 1
|
6628599
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the _License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for particle filtering."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import test_util
tfb = tfp.bijectors
tfd = tfp.distributions
@test_util.test_all_tf_execution_regimes
class _ParticleFilterTest(test_util.TestCase):
def test_random_walk(self):
initial_state_prior = tfd.JointDistributionNamed({
'position': tfd.Deterministic(0.)})
# Biased random walk.
def particle_dynamics(_, previous_state):
state_shape = ps.shape(previous_state['position'])
return tfd.JointDistributionNamed({
'position': tfd.TransformedDistribution(
tfd.Bernoulli(probs=tf.fill(state_shape, 0.75),
dtype=self.dtype),
tfb.Shift(previous_state['position']))})
# Completely uninformative observations allowing a test
# of the pure dynamics.
def particle_observations(_, state):
state_shape = ps.shape(state['position'])
return tfd.Uniform(low=tf.fill(state_shape, -100.),
high=tf.fill(state_shape, 100.))
observations = tf.zeros((9,), dtype=self.dtype)
trajectories, _ = self.evaluate(
tfp.experimental.mcmc.infer_trajectories(
observations=observations,
initial_state_prior=initial_state_prior,
transition_fn=particle_dynamics,
observation_fn=particle_observations,
num_particles=16384,
seed=test_util.test_seed()))
position = trajectories['position']
# The trajectories have the following properties:
# 1. they lie completely in the range [0, 8]
self.assertAllInRange(position, 0., 8.)
# 2. each step lies in the range [0, 1]
self.assertAllInRange(position[1:] - position[:-1], 0., 1.)
# 3. the expectation and variance of the final positions are 6 and 1.5.
self.assertAllClose(tf.reduce_mean(position[-1]), 6., atol=0.1)
self.assertAllClose(tf.math.reduce_variance(position[-1]), 1.5, atol=0.1)
def test_batch_of_filters(self):
batch_shape = [3, 2]
num_particles = 1000
num_timesteps = 40
# Batch of priors on object 1D positions and velocities.
initial_state_prior = tfd.JointDistributionNamed({
'position': tfd.Normal(loc=0., scale=tf.ones(batch_shape)),
'velocity': tfd.Normal(loc=0., scale=tf.ones(batch_shape) * 0.1)})
def transition_fn(_, previous_state):
return tfd.JointDistributionNamed({
'position': tfd.Normal(
loc=previous_state['position'] + previous_state['velocity'],
scale=0.1),
'velocity': tfd.Normal(loc=previous_state['velocity'], scale=0.01)})
def observation_fn(_, state):
return tfd.Normal(loc=state['position'], scale=0.1)
# Batch of synthetic observations, .
true_initial_positions = np.random.randn(*batch_shape).astype(self.dtype)
true_velocities = 0.1 * np.random.randn(
*batch_shape).astype(self.dtype)
observed_positions = (
true_velocities *
np.arange(num_timesteps).astype(
self.dtype)[..., tf.newaxis, tf.newaxis] +
true_initial_positions)
(particles,
log_weights,
parent_indices,
incremental_log_marginal_likelihoods) = self.evaluate(
tfp.experimental.mcmc.particle_filter(
observations=observed_positions,
initial_state_prior=initial_state_prior,
transition_fn=transition_fn,
observation_fn=observation_fn,
num_particles=num_particles,
seed=test_util.test_seed()))
self.assertAllEqual(particles['position'].shape,
[num_timesteps, num_particles] + batch_shape)
self.assertAllEqual(particles['velocity'].shape,
[num_timesteps, num_particles] + batch_shape)
self.assertAllEqual(parent_indices.shape,
[num_timesteps, num_particles] + batch_shape)
self.assertAllEqual(incremental_log_marginal_likelihoods.shape,
[num_timesteps] + batch_shape)
self.assertAllClose(
self.evaluate(
tf.reduce_sum(tf.exp(log_weights) *
particles['position'], axis=1)),
observed_positions,
atol=0.1)
velocity_means = tf.reduce_sum(tf.exp(log_weights) *
particles['velocity'], axis=1)
self.assertAllClose(
self.evaluate(tf.reduce_mean(velocity_means, axis=0)),
true_velocities, atol=0.05)
# Uncertainty in velocity should decrease over time.
velocity_stddev = self.evaluate(
tf.math.reduce_std(particles['velocity'], axis=1))
self.assertAllLess((velocity_stddev[-1] - velocity_stddev[0]), 0.)
trajectories = self.evaluate(
tfp.experimental.mcmc.reconstruct_trajectories(particles,
parent_indices))
self.assertAllEqual([num_timesteps, num_particles] + batch_shape,
trajectories['position'].shape)
self.assertAllEqual([num_timesteps, num_particles] + batch_shape,
trajectories['velocity'].shape)
# Verify that `infer_trajectories` also works on batches.
trajectories, incremental_log_marginal_likelihoods = self.evaluate(
tfp.experimental.mcmc.infer_trajectories(
observations=observed_positions,
initial_state_prior=initial_state_prior,
transition_fn=transition_fn,
observation_fn=observation_fn,
num_particles=num_particles,
seed=test_util.test_seed()))
self.assertAllEqual([num_timesteps, num_particles] + batch_shape,
trajectories['position'].shape)
self.assertAllEqual([num_timesteps, num_particles] + batch_shape,
trajectories['velocity'].shape)
self.assertAllEqual(incremental_log_marginal_likelihoods.shape,
[num_timesteps] + batch_shape)
def test_reconstruct_trajectories_toy_example(self):
particles = tf.convert_to_tensor([[1, 2, 3], [4, 5, 6,], [7, 8, 9]])
# 1 -- 4 -- 7
# 2 \/ 5 .- 8
# 3 /\ 6 /-- 9
parent_indices = tf.convert_to_tensor([[0, 1, 2], [0, 2, 1], [0, 2, 2]])
trajectories = self.evaluate(
tfp.experimental.mcmc.reconstruct_trajectories(particles,
parent_indices))
self.assertAllEqual(
np.array([[1, 2, 2], [4, 6, 6], [7, 8, 9]]), trajectories)
def test_epidemiological_model(self):
# A toy, discrete version of an SIR (Susceptible, Infected, Recovered)
# model (https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology)
population_size = 1000
infection_rate = tf.convert_to_tensor(1.1)
infectious_period = tf.convert_to_tensor(8.0)
initial_state_prior = tfd.JointDistributionNamed({
'susceptible': tfd.Deterministic(999.),
'infected': tfd.Deterministic(1.),
'new_infections': tfd.Deterministic(1.),
'new_recoveries': tfd.Deterministic(0.)})
# Dynamics model: new infections and recoveries are given by the SIR
# model with Poisson noise.
def infection_dynamics(_, previous_state):
new_infections = tfd.Poisson(
infection_rate * previous_state['infected'] *
previous_state['susceptible'] / population_size)
new_recoveries = tfd.Poisson(previous_state['infected'] /
infectious_period)
def susceptible(new_infections):
return tfd.Deterministic(
ps.maximum(
0., previous_state['susceptible'] - new_infections))
def infected(new_infections, new_recoveries):
return tfd.Deterministic(
ps.maximum(
0.,
previous_state['infected'] + new_infections - new_recoveries))
return tfd.JointDistributionNamed({
'new_infections': new_infections,
'new_recoveries': new_recoveries,
'susceptible': susceptible,
'infected': infected})
# Observation model: each day we detect new cases, noisily.
def infection_observations(_, state):
return tfd.Poisson(state['infected'])
# pylint: disable=bad-whitespace
observations = tf.convert_to_tensor([
0., 4., 1., 5., 23., 27., 75., 127., 248., 384., 540., 683.,
714., 611., 561., 493., 385., 348., 300., 277., 249., 219., 216., 174.,
132., 122., 115., 99., 76., 84., 77., 56., 42., 56., 46., 38.,
34., 44., 25., 27.])
# pylint: enable=bad-whitespace
trajectories, _ = self.evaluate(
tfp.experimental.mcmc.infer_trajectories(
observations=observations,
initial_state_prior=initial_state_prior,
transition_fn=infection_dynamics,
observation_fn=infection_observations,
num_particles=100,
seed=test_util.test_seed()))
# The susceptible population should decrease over time.
self.assertAllLessEqual(
trajectories['susceptible'][1:, ...] -
trajectories['susceptible'][:-1, ...],
0.0)
def test_data_driven_proposal(self):
num_particles = 100
observations = tf.convert_to_tensor([60., -179.2, 1337.42])
# Define a system constrained primarily by observations, where proposing
# from the dynamics would be a bad fit.
initial_state_prior = tfd.Normal(loc=0., scale=1e6)
transition_fn = (
lambda _, previous_state: tfd.Normal(loc=previous_state, scale=1e6))
observation_fn = lambda _, state: tfd.Normal(loc=state, scale=0.1)
initial_state_proposal = tfd.Normal(loc=observations[0], scale=0.1)
proposal_fn = (lambda step, state: tfd.Normal( # pylint: disable=g-long-lambda
loc=tf.ones_like(state) * observations[step + 1], scale=1.0))
trajectories, _ = self.evaluate(
tfp.experimental.mcmc.infer_trajectories(
observations=observations,
initial_state_prior=initial_state_prior,
transition_fn=transition_fn,
observation_fn=observation_fn,
num_particles=num_particles,
initial_state_proposal=initial_state_proposal,
proposal_fn=proposal_fn,
seed=test_util.test_seed()))
self.assertAllClose(trajectories,
tf.convert_to_tensor(
tf.convert_to_tensor(
observations)[..., tf.newaxis] *
tf.ones([num_particles])), atol=1.0)
def test_estimated_prob_approximates_true_prob(self):
# Draw simulated data from a 2D linear Gaussian system.
initial_state_prior = tfd.MultivariateNormalDiag(
loc=0., scale_diag=(1., 1.))
transition_matrix = tf.convert_to_tensor([[1., -0.5], [0.4, -1.]])
transition_noise = tfd.MultivariateNormalTriL(
loc=1., scale_tril=tf.convert_to_tensor([[0.3, 0], [-0.1, 0.2]]))
observation_matrix = tf.convert_to_tensor([[0.1, 1.], [1., 0.2]])
observation_noise = tfd.MultivariateNormalTriL(
loc=-0.3, scale_tril=tf.convert_to_tensor([[0.5, 0], [0.1, 0.5]]))
model = tfd.LinearGaussianStateSpaceModel(
num_timesteps=20,
initial_state_prior=initial_state_prior,
transition_matrix=transition_matrix,
transition_noise=transition_noise,
observation_matrix=observation_matrix,
observation_noise=observation_noise)
observations = self.evaluate(
model.sample(seed=test_util.test_seed()))
(lps, filtered_means,
_, _, _, _, _) = self.evaluate(model.forward_filter(observations))
# Approximate the filtering means and marginal likelihood(s) using
# the particle filter.
# pylint: disable=g-long-lambda
(particles, log_weights, _,
estimated_incremental_log_marginal_likelihoods) = self.evaluate(
tfp.experimental.mcmc.particle_filter(
observations=observations,
initial_state_prior=initial_state_prior,
transition_fn=lambda _, previous_state: tfd.MultivariateNormalTriL(
loc=transition_noise.loc + tf.linalg.matvec(
transition_matrix, previous_state),
scale_tril=transition_noise.scale_tril),
observation_fn=lambda _, state: tfd.MultivariateNormalTriL(
loc=observation_noise.loc + tf.linalg.matvec(
observation_matrix, state),
scale_tril=observation_noise.scale_tril),
num_particles=1024,
seed=test_util.test_seed()))
# pylint: enable=g-long-lambda
particle_means = np.sum(
particles * np.exp(log_weights)[..., np.newaxis], axis=1)
self.assertAllClose(filtered_means, particle_means, atol=0.1, rtol=0.1)
self.assertAllClose(
lps, estimated_incremental_log_marginal_likelihoods, atol=0.6)
def test_proposal_weights_dont_affect_marginal_likelihood(self):
observation = np.array([-1.3, 0.7]).astype(self.dtype)
# This particle filter has proposals different from the dynamics,
# so internally it will use proposal weights in addition to observation
# weights. It should still get the observation likelihood correct.
_, lps = self.evaluate(tfp.experimental.mcmc.infer_trajectories(
observation,
initial_state_prior=tfd.Normal(loc=0., scale=1.),
transition_fn=lambda _, x: tfd.Normal(loc=x, scale=1.),
observation_fn=lambda _, x: tfd.Normal(loc=x, scale=1.),
initial_state_proposal=tfd.Normal(loc=0., scale=5.),
proposal_fn=lambda _, x: tfd.Normal(loc=x, scale=5.),
num_particles=2048,
seed=test_util.test_seed()))
# Compare marginal likelihood against that
# from the true (jointly normal) marginal distribution.
y1_marginal_dist = tfd.Normal(loc=0., scale=np.sqrt(1. + 1.))
y2_conditional_dist = (
lambda y1: tfd.Normal(loc=y1 / 2., scale=np.sqrt(5. / 2.)))
true_lps = [y1_marginal_dist.log_prob(observation[0]),
y2_conditional_dist(observation[0]).log_prob(observation[1])]
# The following line passes at atol = 0.01 if num_particles = 32768.
self.assertAllClose(true_lps, lps, atol=0.2)
def test_can_step_dynamics_faster_than_observations(self):
initial_state_prior = tfd.JointDistributionNamed({
'position': tfd.Deterministic(1.),
'velocity': tfd.Deterministic(0.)
})
# Use 100 steps between observations to integrate a simple harmonic
# oscillator.
dt = 0.01
def simple_harmonic_motion_transition_fn(_, state):
return tfd.JointDistributionNamed({
'position': tfd.Normal(
loc=state['position'] + dt * state['velocity'], scale=dt*0.01),
'velocity': tfd.Normal(
loc=state['velocity'] - dt * state['position'], scale=dt*0.01)
})
def observe_position(_, state):
return tfd.Normal(loc=state['position'], scale=0.01)
particles, _, _, lps = self.evaluate(tfp.experimental.mcmc.particle_filter(
# 'Observing' the values we'd expect from a proper integrator should
# give high likelihood if our discrete approximation is good.
observations=tf.convert_to_tensor([tf.math.cos(0.),
tf.math.cos(1.)]),
initial_state_prior=initial_state_prior,
transition_fn=simple_harmonic_motion_transition_fn,
observation_fn=observe_position,
num_particles=1024,
num_transitions_per_observation=100,
seed=test_util.test_seed()))
self.assertLen(particles['position'], 101)
self.assertAllClose(np.mean(particles['position'], axis=-1),
tf.math.cos(dt * np.arange(101)),
atol=0.04)
self.assertLen(lps, 101)
self.assertGreater(lps[0], 3.)
self.assertGreater(lps[-1], 3.)
def test_custom_trace_fn(self):
def trace_fn(state, _):
# Traces the mean and stddev of the particle population at each step.
weights = tf.exp(state.log_weights)
mean = tf.reduce_sum(weights * state.particles, axis=0)
variance = tf.reduce_sum(
weights * (state.particles - mean[tf.newaxis, ...])**2)
return {'mean': mean,
'stddev': tf.sqrt(variance),
# In real usage we would likely not track the particles and
# weights. We keep them here just so we can double-check the
# stats, below.
'particles': state.particles,
'weights': weights}
results = self.evaluate(
tfp.experimental.mcmc.particle_filter(
observations=tf.convert_to_tensor([1., 3., 5., 7., 9.]),
initial_state_prior=tfd.Normal(0., 1.),
transition_fn=lambda _, state: tfd.Normal(state, 1.),
observation_fn=lambda _, state: tfd.Normal(state, 1.),
num_particles=1024,
trace_fn=trace_fn,
seed=test_util.test_seed()))
# Verify that posterior means are increasing.
self.assertAllGreater(results['mean'][1:] - results['mean'][:-1], 0.)
# Check that our traced means and scales match values computed
# by averaging over particles after the fact.
all_means = self.evaluate(tf.reduce_sum(
results['weights'] * results['particles'], axis=1))
all_variances = self.evaluate(
tf.reduce_sum(
results['weights'] *
(results['particles'] - all_means[..., tf.newaxis])**2,
axis=1))
self.assertAllClose(results['mean'], all_means)
self.assertAllClose(results['stddev'], np.sqrt(all_variances))
def test_step_indices_to_trace(self):
num_particles = 1024
(particles_1_3,
log_weights_1_3,
parent_indices_1_3,
incremental_log_marginal_likelihood_1_3) = self.evaluate(
tfp.experimental.mcmc.particle_filter(
observations=tf.convert_to_tensor([1., 3., 5., 7., 9.]),
initial_state_prior=tfd.Normal(0., 1.),
transition_fn=lambda _, state: tfd.Normal(state, 10.),
observation_fn=lambda _, state: tfd.Normal(state, 0.1),
num_particles=num_particles,
trace_criterion_fn=lambda s, r: ps.logical_or( # pylint: disable=g-long-lambda
ps.equal(r.steps, 2),
ps.equal(r.steps, 4)),
static_trace_allocation_size=2,
seed=test_util.test_seed()))
self.assertLen(particles_1_3, 2)
self.assertLen(log_weights_1_3, 2)
self.assertLen(parent_indices_1_3, 2)
self.assertLen(incremental_log_marginal_likelihood_1_3, 2)
means = np.sum(np.exp(log_weights_1_3) * particles_1_3, axis=1)
self.assertAllClose(means, [3., 7.], atol=1.)
(final_particles,
final_log_weights,
final_cumulative_lp) = self.evaluate(
tfp.experimental.mcmc.particle_filter(
observations=tf.convert_to_tensor([1., 3., 5., 7., 9.]),
initial_state_prior=tfd.Normal(0., 1.),
transition_fn=lambda _, state: tfd.Normal(state, 10.),
observation_fn=lambda _, state: tfd.Normal(state, 0.1),
num_particles=num_particles,
trace_fn=lambda s, r: (s.particles, # pylint: disable=g-long-lambda
s.log_weights,
r.accumulated_log_marginal_likelihood),
trace_criterion_fn=None,
seed=test_util.test_seed()))
self.assertLen(final_particles, num_particles)
self.assertLen(final_log_weights, num_particles)
self.assertEqual(final_cumulative_lp.shape, ())
means = np.sum(np.exp(final_log_weights) * final_particles)
self.assertAllClose(means, 9., atol=1.5)
def test_warns_if_transition_distribution_has_unexpected_shape(self):
initial_state_prior = tfd.JointDistributionNamedAutoBatched(
{'sales': tfd.Deterministic(0.),
'inventory': tfd.Deterministic(1000.)})
# Inventory decreases by a Poisson RV 'sales', but is lower bounded at zero.
def valid_transition_fn(_, particles):
return tfd.JointDistributionNamedAutoBatched(
{'sales': tfd.Poisson(10. * tf.ones_like(particles['inventory'])),
'inventory': lambda sales: tfd.Deterministic( # pylint: disable=g-long-lambda
tf.maximum(0., particles['inventory'] - sales))},
batch_ndims=1,
validate_args=True)
def dummy_observation_fn(_, state):
return tfd.Normal(state['inventory'], 1000.)
run_filter = functools.partial(
tfp.experimental.mcmc.particle_filter,
observations=tf.zeros([10]),
initial_state_prior=initial_state_prior,
observation_fn=dummy_observation_fn,
num_particles=3,
seed=test_util.test_seed(sampler_type='stateless'))
# Check that the model runs as written.
self.evaluate(run_filter(transition_fn=valid_transition_fn))
self.evaluate(run_filter(transition_fn=valid_transition_fn,
proposal_fn=valid_transition_fn))
# Check that broken transition functions raise exceptions.
def transition_fn_broadcasts_over_particles(_, particles):
return tfd.JointDistributionNamed(
{'sales': tfd.Poisson(10.), # Proposes same value for all particles.
'inventory': lambda sales: tfd.Deterministic( # pylint: disable=g-long-lambda
tf.maximum(0., particles['inventory'] - sales))},
validate_args=True)
def transition_fn_partial_batch_shape(_, particles):
return tfd.JointDistributionNamed(
# Using `Sample` ensures iid proposals for each particle, but not
# per-particle log probs.
{'sales': tfd.Sample(tfd.Poisson(10.),
ps.shape(particles['sales'])),
'inventory': lambda sales: tfd.Deterministic( # pylint: disable=g-long-lambda
tf.maximum(0., particles['inventory'] - sales))},
validate_args=True)
def transition_fn_no_batch_shape(_, particles):
# Autobatched JD defaults to treating num_particles as event shape, but
# we need it to be batch shape to get per-particle logprobs.
return tfd.JointDistributionNamedAutoBatched(
{'sales': tfd.Poisson(10. * tf.ones_like(particles['inventory'])),
'inventory': lambda sales: tfd.Deterministic( # pylint: disable=g-long-lambda
tf.maximum(0., particles['inventory'] - sales))},
validate_args=True)
with self.assertRaisesRegex(ValueError, 'transition distribution'):
self.evaluate(
run_filter(transition_fn=transition_fn_broadcasts_over_particles))
with self.assertRaisesRegex(ValueError, 'transition distribution'):
self.evaluate(
run_filter(transition_fn=transition_fn_partial_batch_shape))
with self.assertRaisesRegex(ValueError, 'transition distribution'):
self.evaluate(
run_filter(transition_fn=transition_fn_no_batch_shape))
with self.assertRaisesRegex(ValueError, 'proposal distribution'):
self.evaluate(
run_filter(transition_fn=valid_transition_fn,
proposal_fn=transition_fn_partial_batch_shape))
with self.assertRaisesRegex(ValueError, 'proposal distribution'):
self.evaluate(
run_filter(transition_fn=valid_transition_fn,
proposal_fn=transition_fn_broadcasts_over_particles))
with self.assertRaisesRegex(ValueError, 'proposal distribution'):
self.evaluate(
run_filter(transition_fn=valid_transition_fn,
proposal_fn=transition_fn_no_batch_shape))
# TODO(b/186068104): add tests with dynamic shapes.
class ParticleFilterTestFloat32(_ParticleFilterTest):
dtype = np.float32
del _ParticleFilterTest
if __name__ == '__main__':
tf.test.main()
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the _License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for particle filtering."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import test_util
tfb = tfp.bijectors
tfd = tfp.distributions
@test_util.test_all_tf_execution_regimes
class _ParticleFilterTest(test_util.TestCase):
def test_random_walk(self):
initial_state_prior = tfd.JointDistributionNamed({
'position': tfd.Deterministic(0.)})
# Biased random walk.
def particle_dynamics(_, previous_state):
state_shape = ps.shape(previous_state['position'])
return tfd.JointDistributionNamed({
'position': tfd.TransformedDistribution(
tfd.Bernoulli(probs=tf.fill(state_shape, 0.75),
dtype=self.dtype),
tfb.Shift(previous_state['position']))})
# Completely uninformative observations allowing a test
# of the pure dynamics.
def particle_observations(_, state):
state_shape = ps.shape(state['position'])
return tfd.Uniform(low=tf.fill(state_shape, -100.),
high=tf.fill(state_shape, 100.))
observations = tf.zeros((9,), dtype=self.dtype)
trajectories, _ = self.evaluate(
tfp.experimental.mcmc.infer_trajectories(
observations=observations,
initial_state_prior=initial_state_prior,
transition_fn=particle_dynamics,
observation_fn=particle_observations,
num_particles=16384,
seed=test_util.test_seed()))
position = trajectories['position']
# The trajectories have the following properties:
# 1. they lie completely in the range [0, 8]
self.assertAllInRange(position, 0., 8.)
# 2. each step lies in the range [0, 1]
self.assertAllInRange(position[1:] - position[:-1], 0., 1.)
# 3. the expectation and variance of the final positions are 6 and 1.5.
self.assertAllClose(tf.reduce_mean(position[-1]), 6., atol=0.1)
self.assertAllClose(tf.math.reduce_variance(position[-1]), 1.5, atol=0.1)
def test_batch_of_filters(self):
batch_shape = [3, 2]
num_particles = 1000
num_timesteps = 40
# Batch of priors on object 1D positions and velocities.
initial_state_prior = tfd.JointDistributionNamed({
'position': tfd.Normal(loc=0., scale=tf.ones(batch_shape)),
'velocity': tfd.Normal(loc=0., scale=tf.ones(batch_shape) * 0.1)})
def transition_fn(_, previous_state):
return tfd.JointDistributionNamed({
'position': tfd.Normal(
loc=previous_state['position'] + previous_state['velocity'],
scale=0.1),
'velocity': tfd.Normal(loc=previous_state['velocity'], scale=0.01)})
def observation_fn(_, state):
return tfd.Normal(loc=state['position'], scale=0.1)
# Batch of synthetic observations, .
true_initial_positions = np.random.randn(*batch_shape).astype(self.dtype)
true_velocities = 0.1 * np.random.randn(
*batch_shape).astype(self.dtype)
observed_positions = (
true_velocities *
np.arange(num_timesteps).astype(
self.dtype)[..., tf.newaxis, tf.newaxis] +
true_initial_positions)
(particles,
log_weights,
parent_indices,
incremental_log_marginal_likelihoods) = self.evaluate(
tfp.experimental.mcmc.particle_filter(
observations=observed_positions,
initial_state_prior=initial_state_prior,
transition_fn=transition_fn,
observation_fn=observation_fn,
num_particles=num_particles,
seed=test_util.test_seed()))
self.assertAllEqual(particles['position'].shape,
[num_timesteps, num_particles] + batch_shape)
self.assertAllEqual(particles['velocity'].shape,
[num_timesteps, num_particles] + batch_shape)
self.assertAllEqual(parent_indices.shape,
[num_timesteps, num_particles] + batch_shape)
self.assertAllEqual(incremental_log_marginal_likelihoods.shape,
[num_timesteps] + batch_shape)
self.assertAllClose(
self.evaluate(
tf.reduce_sum(tf.exp(log_weights) *
particles['position'], axis=1)),
observed_positions,
atol=0.1)
velocity_means = tf.reduce_sum(tf.exp(log_weights) *
particles['velocity'], axis=1)
self.assertAllClose(
self.evaluate(tf.reduce_mean(velocity_means, axis=0)),
true_velocities, atol=0.05)
# Uncertainty in velocity should decrease over time.
velocity_stddev = self.evaluate(
tf.math.reduce_std(particles['velocity'], axis=1))
self.assertAllLess((velocity_stddev[-1] - velocity_stddev[0]), 0.)
trajectories = self.evaluate(
tfp.experimental.mcmc.reconstruct_trajectories(particles,
parent_indices))
self.assertAllEqual([num_timesteps, num_particles] + batch_shape,
trajectories['position'].shape)
self.assertAllEqual([num_timesteps, num_particles] + batch_shape,
trajectories['velocity'].shape)
# Verify that `infer_trajectories` also works on batches.
trajectories, incremental_log_marginal_likelihoods = self.evaluate(
tfp.experimental.mcmc.infer_trajectories(
observations=observed_positions,
initial_state_prior=initial_state_prior,
transition_fn=transition_fn,
observation_fn=observation_fn,
num_particles=num_particles,
seed=test_util.test_seed()))
self.assertAllEqual([num_timesteps, num_particles] + batch_shape,
trajectories['position'].shape)
self.assertAllEqual([num_timesteps, num_particles] + batch_shape,
trajectories['velocity'].shape)
self.assertAllEqual(incremental_log_marginal_likelihoods.shape,
[num_timesteps] + batch_shape)
def test_reconstruct_trajectories_toy_example(self):
particles = tf.convert_to_tensor([[1, 2, 3], [4, 5, 6,], [7, 8, 9]])
# 1 -- 4 -- 7
# 2 \/ 5 .- 8
# 3 /\ 6 /-- 9
parent_indices = tf.convert_to_tensor([[0, 1, 2], [0, 2, 1], [0, 2, 2]])
trajectories = self.evaluate(
tfp.experimental.mcmc.reconstruct_trajectories(particles,
parent_indices))
self.assertAllEqual(
np.array([[1, 2, 2], [4, 6, 6], [7, 8, 9]]), trajectories)
def test_epidemiological_model(self):
# A toy, discrete version of an SIR (Susceptible, Infected, Recovered)
# model (https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology)
population_size = 1000
infection_rate = tf.convert_to_tensor(1.1)
infectious_period = tf.convert_to_tensor(8.0)
initial_state_prior = tfd.JointDistributionNamed({
'susceptible': tfd.Deterministic(999.),
'infected': tfd.Deterministic(1.),
'new_infections': tfd.Deterministic(1.),
'new_recoveries': tfd.Deterministic(0.)})
# Dynamics model: new infections and recoveries are given by the SIR
# model with Poisson noise.
def infection_dynamics(_, previous_state):
new_infections = tfd.Poisson(
infection_rate * previous_state['infected'] *
previous_state['susceptible'] / population_size)
new_recoveries = tfd.Poisson(previous_state['infected'] /
infectious_period)
def susceptible(new_infections):
return tfd.Deterministic(
ps.maximum(
0., previous_state['susceptible'] - new_infections))
def infected(new_infections, new_recoveries):
return tfd.Deterministic(
ps.maximum(
0.,
previous_state['infected'] + new_infections - new_recoveries))
return tfd.JointDistributionNamed({
'new_infections': new_infections,
'new_recoveries': new_recoveries,
'susceptible': susceptible,
'infected': infected})
# Observation model: each day we detect new cases, noisily.
def infection_observations(_, state):
return tfd.Poisson(state['infected'])
# pylint: disable=bad-whitespace
observations = tf.convert_to_tensor([
0., 4., 1., 5., 23., 27., 75., 127., 248., 384., 540., 683.,
714., 611., 561., 493., 385., 348., 300., 277., 249., 219., 216., 174.,
132., 122., 115., 99., 76., 84., 77., 56., 42., 56., 46., 38.,
34., 44., 25., 27.])
# pylint: enable=bad-whitespace
trajectories, _ = self.evaluate(
tfp.experimental.mcmc.infer_trajectories(
observations=observations,
initial_state_prior=initial_state_prior,
transition_fn=infection_dynamics,
observation_fn=infection_observations,
num_particles=100,
seed=test_util.test_seed()))
# The susceptible population should decrease over time.
self.assertAllLessEqual(
trajectories['susceptible'][1:, ...] -
trajectories['susceptible'][:-1, ...],
0.0)
def test_data_driven_proposal(self):
num_particles = 100
observations = tf.convert_to_tensor([60., -179.2, 1337.42])
# Define a system constrained primarily by observations, where proposing
# from the dynamics would be a bad fit.
initial_state_prior = tfd.Normal(loc=0., scale=1e6)
transition_fn = (
lambda _, previous_state: tfd.Normal(loc=previous_state, scale=1e6))
observation_fn = lambda _, state: tfd.Normal(loc=state, scale=0.1)
initial_state_proposal = tfd.Normal(loc=observations[0], scale=0.1)
proposal_fn = (lambda step, state: tfd.Normal( # pylint: disable=g-long-lambda
loc=tf.ones_like(state) * observations[step + 1], scale=1.0))
trajectories, _ = self.evaluate(
tfp.experimental.mcmc.infer_trajectories(
observations=observations,
initial_state_prior=initial_state_prior,
transition_fn=transition_fn,
observation_fn=observation_fn,
num_particles=num_particles,
initial_state_proposal=initial_state_proposal,
proposal_fn=proposal_fn,
seed=test_util.test_seed()))
self.assertAllClose(trajectories,
tf.convert_to_tensor(
tf.convert_to_tensor(
observations)[..., tf.newaxis] *
tf.ones([num_particles])), atol=1.0)
def test_estimated_prob_approximates_true_prob(self):
# Draw simulated data from a 2D linear Gaussian system.
initial_state_prior = tfd.MultivariateNormalDiag(
loc=0., scale_diag=(1., 1.))
transition_matrix = tf.convert_to_tensor([[1., -0.5], [0.4, -1.]])
transition_noise = tfd.MultivariateNormalTriL(
loc=1., scale_tril=tf.convert_to_tensor([[0.3, 0], [-0.1, 0.2]]))
observation_matrix = tf.convert_to_tensor([[0.1, 1.], [1., 0.2]])
observation_noise = tfd.MultivariateNormalTriL(
loc=-0.3, scale_tril=tf.convert_to_tensor([[0.5, 0], [0.1, 0.5]]))
model = tfd.LinearGaussianStateSpaceModel(
num_timesteps=20,
initial_state_prior=initial_state_prior,
transition_matrix=transition_matrix,
transition_noise=transition_noise,
observation_matrix=observation_matrix,
observation_noise=observation_noise)
observations = self.evaluate(
model.sample(seed=test_util.test_seed()))
(lps, filtered_means,
_, _, _, _, _) = self.evaluate(model.forward_filter(observations))
# Approximate the filtering means and marginal likelihood(s) using
# the particle filter.
# pylint: disable=g-long-lambda
(particles, log_weights, _,
estimated_incremental_log_marginal_likelihoods) = self.evaluate(
tfp.experimental.mcmc.particle_filter(
observations=observations,
initial_state_prior=initial_state_prior,
transition_fn=lambda _, previous_state: tfd.MultivariateNormalTriL(
loc=transition_noise.loc + tf.linalg.matvec(
transition_matrix, previous_state),
scale_tril=transition_noise.scale_tril),
observation_fn=lambda _, state: tfd.MultivariateNormalTriL(
loc=observation_noise.loc + tf.linalg.matvec(
observation_matrix, state),
scale_tril=observation_noise.scale_tril),
num_particles=1024,
seed=test_util.test_seed()))
# pylint: enable=g-long-lambda
particle_means = np.sum(
particles * np.exp(log_weights)[..., np.newaxis], axis=1)
self.assertAllClose(filtered_means, particle_means, atol=0.1, rtol=0.1)
self.assertAllClose(
lps, estimated_incremental_log_marginal_likelihoods, atol=0.6)
def test_proposal_weights_dont_affect_marginal_likelihood(self):
observation = np.array([-1.3, 0.7]).astype(self.dtype)
# This particle filter has proposals different from the dynamics,
# so internally it will use proposal weights in addition to observation
# weights. It should still get the observation likelihood correct.
_, lps = self.evaluate(tfp.experimental.mcmc.infer_trajectories(
observation,
initial_state_prior=tfd.Normal(loc=0., scale=1.),
transition_fn=lambda _, x: tfd.Normal(loc=x, scale=1.),
observation_fn=lambda _, x: tfd.Normal(loc=x, scale=1.),
initial_state_proposal=tfd.Normal(loc=0., scale=5.),
proposal_fn=lambda _, x: tfd.Normal(loc=x, scale=5.),
num_particles=2048,
seed=test_util.test_seed()))
# Compare marginal likelihood against that
# from the true (jointly normal) marginal distribution.
y1_marginal_dist = tfd.Normal(loc=0., scale=np.sqrt(1. + 1.))
y2_conditional_dist = (
lambda y1: tfd.Normal(loc=y1 / 2., scale=np.sqrt(5. / 2.)))
true_lps = [y1_marginal_dist.log_prob(observation[0]),
y2_conditional_dist(observation[0]).log_prob(observation[1])]
# The following line passes at atol = 0.01 if num_particles = 32768.
self.assertAllClose(true_lps, lps, atol=0.2)
def test_can_step_dynamics_faster_than_observations(self):
initial_state_prior = tfd.JointDistributionNamed({
'position': tfd.Deterministic(1.),
'velocity': tfd.Deterministic(0.)
})
# Use 100 steps between observations to integrate a simple harmonic
# oscillator.
dt = 0.01
def simple_harmonic_motion_transition_fn(_, state):
return tfd.JointDistributionNamed({
'position': tfd.Normal(
loc=state['position'] + dt * state['velocity'], scale=dt*0.01),
'velocity': tfd.Normal(
loc=state['velocity'] - dt * state['position'], scale=dt*0.01)
})
def observe_position(_, state):
return tfd.Normal(loc=state['position'], scale=0.01)
particles, _, _, lps = self.evaluate(tfp.experimental.mcmc.particle_filter(
# 'Observing' the values we'd expect from a proper integrator should
# give high likelihood if our discrete approximation is good.
observations=tf.convert_to_tensor([tf.math.cos(0.),
tf.math.cos(1.)]),
initial_state_prior=initial_state_prior,
transition_fn=simple_harmonic_motion_transition_fn,
observation_fn=observe_position,
num_particles=1024,
num_transitions_per_observation=100,
seed=test_util.test_seed()))
self.assertLen(particles['position'], 101)
self.assertAllClose(np.mean(particles['position'], axis=-1),
tf.math.cos(dt * np.arange(101)),
atol=0.04)
self.assertLen(lps, 101)
self.assertGreater(lps[0], 3.)
self.assertGreater(lps[-1], 3.)
def test_custom_trace_fn(self):
def trace_fn(state, _):
# Traces the mean and stddev of the particle population at each step.
weights = tf.exp(state.log_weights)
mean = tf.reduce_sum(weights * state.particles, axis=0)
variance = tf.reduce_sum(
weights * (state.particles - mean[tf.newaxis, ...])**2)
return {'mean': mean,
'stddev': tf.sqrt(variance),
# In real usage we would likely not track the particles and
# weights. We keep them here just so we can double-check the
# stats, below.
'particles': state.particles,
'weights': weights}
results = self.evaluate(
tfp.experimental.mcmc.particle_filter(
observations=tf.convert_to_tensor([1., 3., 5., 7., 9.]),
initial_state_prior=tfd.Normal(0., 1.),
transition_fn=lambda _, state: tfd.Normal(state, 1.),
observation_fn=lambda _, state: tfd.Normal(state, 1.),
num_particles=1024,
trace_fn=trace_fn,
seed=test_util.test_seed()))
# Verify that posterior means are increasing.
self.assertAllGreater(results['mean'][1:] - results['mean'][:-1], 0.)
# Check that our traced means and scales match values computed
# by averaging over particles after the fact.
all_means = self.evaluate(tf.reduce_sum(
results['weights'] * results['particles'], axis=1))
all_variances = self.evaluate(
tf.reduce_sum(
results['weights'] *
(results['particles'] - all_means[..., tf.newaxis])**2,
axis=1))
self.assertAllClose(results['mean'], all_means)
self.assertAllClose(results['stddev'], np.sqrt(all_variances))
def test_step_indices_to_trace(self):
num_particles = 1024
(particles_1_3,
log_weights_1_3,
parent_indices_1_3,
incremental_log_marginal_likelihood_1_3) = self.evaluate(
tfp.experimental.mcmc.particle_filter(
observations=tf.convert_to_tensor([1., 3., 5., 7., 9.]),
initial_state_prior=tfd.Normal(0., 1.),
transition_fn=lambda _, state: tfd.Normal(state, 10.),
observation_fn=lambda _, state: tfd.Normal(state, 0.1),
num_particles=num_particles,
trace_criterion_fn=lambda s, r: ps.logical_or( # pylint: disable=g-long-lambda
ps.equal(r.steps, 2),
ps.equal(r.steps, 4)),
static_trace_allocation_size=2,
seed=test_util.test_seed()))
self.assertLen(particles_1_3, 2)
self.assertLen(log_weights_1_3, 2)
self.assertLen(parent_indices_1_3, 2)
self.assertLen(incremental_log_marginal_likelihood_1_3, 2)
means = np.sum(np.exp(log_weights_1_3) * particles_1_3, axis=1)
self.assertAllClose(means, [3., 7.], atol=1.)
(final_particles,
final_log_weights,
final_cumulative_lp) = self.evaluate(
tfp.experimental.mcmc.particle_filter(
observations=tf.convert_to_tensor([1., 3., 5., 7., 9.]),
initial_state_prior=tfd.Normal(0., 1.),
transition_fn=lambda _, state: tfd.Normal(state, 10.),
observation_fn=lambda _, state: tfd.Normal(state, 0.1),
num_particles=num_particles,
trace_fn=lambda s, r: (s.particles, # pylint: disable=g-long-lambda
s.log_weights,
r.accumulated_log_marginal_likelihood),
trace_criterion_fn=None,
seed=test_util.test_seed()))
self.assertLen(final_particles, num_particles)
self.assertLen(final_log_weights, num_particles)
self.assertEqual(final_cumulative_lp.shape, ())
means = np.sum(np.exp(final_log_weights) * final_particles)
self.assertAllClose(means, 9., atol=1.5)
def test_warns_if_transition_distribution_has_unexpected_shape(self):
initial_state_prior = tfd.JointDistributionNamedAutoBatched(
{'sales': tfd.Deterministic(0.),
'inventory': tfd.Deterministic(1000.)})
# Inventory decreases by a Poisson RV 'sales', but is lower bounded at zero.
def valid_transition_fn(_, particles):
return tfd.JointDistributionNamedAutoBatched(
{'sales': tfd.Poisson(10. * tf.ones_like(particles['inventory'])),
'inventory': lambda sales: tfd.Deterministic( # pylint: disable=g-long-lambda
tf.maximum(0., particles['inventory'] - sales))},
batch_ndims=1,
validate_args=True)
def dummy_observation_fn(_, state):
return tfd.Normal(state['inventory'], 1000.)
run_filter = functools.partial(
tfp.experimental.mcmc.particle_filter,
observations=tf.zeros([10]),
initial_state_prior=initial_state_prior,
observation_fn=dummy_observation_fn,
num_particles=3,
seed=test_util.test_seed(sampler_type='stateless'))
# Check that the model runs as written.
self.evaluate(run_filter(transition_fn=valid_transition_fn))
self.evaluate(run_filter(transition_fn=valid_transition_fn,
proposal_fn=valid_transition_fn))
# Check that broken transition functions raise exceptions.
def transition_fn_broadcasts_over_particles(_, particles):
return tfd.JointDistributionNamed(
{'sales': tfd.Poisson(10.), # Proposes same value for all particles.
'inventory': lambda sales: tfd.Deterministic( # pylint: disable=g-long-lambda
tf.maximum(0., particles['inventory'] - sales))},
validate_args=True)
def transition_fn_partial_batch_shape(_, particles):
return tfd.JointDistributionNamed(
# Using `Sample` ensures iid proposals for each particle, but not
# per-particle log probs.
{'sales': tfd.Sample(tfd.Poisson(10.),
ps.shape(particles['sales'])),
'inventory': lambda sales: tfd.Deterministic( # pylint: disable=g-long-lambda
tf.maximum(0., particles['inventory'] - sales))},
validate_args=True)
def transition_fn_no_batch_shape(_, particles):
# Autobatched JD defaults to treating num_particles as event shape, but
# we need it to be batch shape to get per-particle logprobs.
return tfd.JointDistributionNamedAutoBatched(
{'sales': tfd.Poisson(10. * tf.ones_like(particles['inventory'])),
'inventory': lambda sales: tfd.Deterministic( # pylint: disable=g-long-lambda
tf.maximum(0., particles['inventory'] - sales))},
validate_args=True)
with self.assertRaisesRegex(ValueError, 'transition distribution'):
self.evaluate(
run_filter(transition_fn=transition_fn_broadcasts_over_particles))
with self.assertRaisesRegex(ValueError, 'transition distribution'):
self.evaluate(
run_filter(transition_fn=transition_fn_partial_batch_shape))
with self.assertRaisesRegex(ValueError, 'transition distribution'):
self.evaluate(
run_filter(transition_fn=transition_fn_no_batch_shape))
with self.assertRaisesRegex(ValueError, 'proposal distribution'):
self.evaluate(
run_filter(transition_fn=valid_transition_fn,
proposal_fn=transition_fn_partial_batch_shape))
with self.assertRaisesRegex(ValueError, 'proposal distribution'):
self.evaluate(
run_filter(transition_fn=valid_transition_fn,
proposal_fn=transition_fn_broadcasts_over_particles))
with self.assertRaisesRegex(ValueError, 'proposal distribution'):
self.evaluate(
run_filter(transition_fn=valid_transition_fn,
proposal_fn=transition_fn_no_batch_shape))
# TODO(b/186068104): add tests with dynamic shapes.
class ParticleFilterTestFloat32(_ParticleFilterTest):
dtype = np.float32
del _ParticleFilterTest
if __name__ == '__main__':
tf.test.main()
|
en
| 0.83261
|
# Copyright 2020 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the _License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ Tests for particle filtering. # Biased random walk. # Completely uninformative observations allowing a test # of the pure dynamics. # The trajectories have the following properties: # 1. they lie completely in the range [0, 8] # 2. each step lies in the range [0, 1] # 3. the expectation and variance of the final positions are 6 and 1.5. # Batch of priors on object 1D positions and velocities. # Batch of synthetic observations, . # Uncertainty in velocity should decrease over time. # Verify that `infer_trajectories` also works on batches. # 1 -- 4 -- 7 # 2 \/ 5 .- 8 # 3 /\ 6 /-- 9 # A toy, discrete version of an SIR (Susceptible, Infected, Recovered) # model (https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology) # Dynamics model: new infections and recoveries are given by the SIR # model with Poisson noise. # Observation model: each day we detect new cases, noisily. # pylint: disable=bad-whitespace # pylint: enable=bad-whitespace # The susceptible population should decrease over time. # Define a system constrained primarily by observations, where proposing # from the dynamics would be a bad fit. # pylint: disable=g-long-lambda # Draw simulated data from a 2D linear Gaussian system. # Approximate the filtering means and marginal likelihood(s) using # the particle filter. # pylint: disable=g-long-lambda # pylint: enable=g-long-lambda # This particle filter has proposals different from the dynamics, # so internally it will use proposal weights in addition to observation # weights. It should still get the observation likelihood correct. # Compare marginal likelihood against that # from the true (jointly normal) marginal distribution. # The following line passes at atol = 0.01 if num_particles = 32768. # Use 100 steps between observations to integrate a simple harmonic # oscillator. # 'Observing' the values we'd expect from a proper integrator should # give high likelihood if our discrete approximation is good. # Traces the mean and stddev of the particle population at each step. # In real usage we would likely not track the particles and # weights. We keep them here just so we can double-check the # stats, below. # Verify that posterior means are increasing. # Check that our traced means and scales match values computed # by averaging over particles after the fact. # pylint: disable=g-long-lambda # pylint: disable=g-long-lambda # Inventory decreases by a Poisson RV 'sales', but is lower bounded at zero. # pylint: disable=g-long-lambda # Check that the model runs as written. # Check that broken transition functions raise exceptions. # Proposes same value for all particles. # pylint: disable=g-long-lambda # Using `Sample` ensures iid proposals for each particle, but not # per-particle log probs. # pylint: disable=g-long-lambda # Autobatched JD defaults to treating num_particles as event shape, but # we need it to be batch shape to get per-particle logprobs. # pylint: disable=g-long-lambda # TODO(b/186068104): add tests with dynamic shapes.
| 2.290103
| 2
|
froide/foirequestfollower/views.py
|
okko/tietopyynto
| 3
|
6628600
|
from django.shortcuts import get_object_or_404, redirect
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
from froide.foirequest.models import FoiRequest
from froide.foirequest.views import show
from .models import FoiRequestFollower
from .forms import FollowRequestForm
@require_POST
def follow(request, slug):
foirequest = get_object_or_404(FoiRequest, slug=slug)
form = FollowRequestForm(foirequest, request.user, request.POST)
if form.is_valid():
followed = form.save()
if request.user.is_authenticated:
if followed:
messages.add_message(request, messages.SUCCESS,
_("You are now following this request."))
else:
messages.add_message(request, messages.INFO,
_("You are not following this request anymore."))
else:
if followed is None:
messages.add_message(request, messages.INFO,
_("You have not yet confirmed that you want to follow this request. Click the link in the mail that was sent to you."))
elif followed:
messages.add_message(request, messages.SUCCESS,
_("Check your emails and click the confirmation link in order to follow this request."))
else:
messages.add_message(request, messages.INFO,
_("You are following this request. If you want to unfollow it, click the unfollow link in the emails you received."))
return redirect(foirequest)
else:
return show(request, slug, context={"followform": form}, status=400)
def confirm_follow(request, follow_id, check):
get_object_or_404(FoiRequestFollower, id=int(follow_id))
def unfollow_by_link(request, follow_id, check):
follower = get_object_or_404(FoiRequestFollower, id=int(follow_id))
if follower.check_and_unfollow(check):
messages.add_message(request, messages.INFO,
_("You are not following this request anymore."))
else:
messages.add_message(request, messages.ERROR,
_("There was something wrong with your link. Perhaps try again."))
return redirect(follower.request)
|
from django.shortcuts import get_object_or_404, redirect
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
from froide.foirequest.models import FoiRequest
from froide.foirequest.views import show
from .models import FoiRequestFollower
from .forms import FollowRequestForm
@require_POST
def follow(request, slug):
foirequest = get_object_or_404(FoiRequest, slug=slug)
form = FollowRequestForm(foirequest, request.user, request.POST)
if form.is_valid():
followed = form.save()
if request.user.is_authenticated:
if followed:
messages.add_message(request, messages.SUCCESS,
_("You are now following this request."))
else:
messages.add_message(request, messages.INFO,
_("You are not following this request anymore."))
else:
if followed is None:
messages.add_message(request, messages.INFO,
_("You have not yet confirmed that you want to follow this request. Click the link in the mail that was sent to you."))
elif followed:
messages.add_message(request, messages.SUCCESS,
_("Check your emails and click the confirmation link in order to follow this request."))
else:
messages.add_message(request, messages.INFO,
_("You are following this request. If you want to unfollow it, click the unfollow link in the emails you received."))
return redirect(foirequest)
else:
return show(request, slug, context={"followform": form}, status=400)
def confirm_follow(request, follow_id, check):
get_object_or_404(FoiRequestFollower, id=int(follow_id))
def unfollow_by_link(request, follow_id, check):
follower = get_object_or_404(FoiRequestFollower, id=int(follow_id))
if follower.check_and_unfollow(check):
messages.add_message(request, messages.INFO,
_("You are not following this request anymore."))
else:
messages.add_message(request, messages.ERROR,
_("There was something wrong with your link. Perhaps try again."))
return redirect(follower.request)
|
none
| 1
| 2.256151
| 2
|
|
pymaginopolis/chunkyfile/chunkxml.py
|
benstone/pymaginopolis
| 9
|
6628601
|
import base64
import logging
import pathlib
from xml.etree import ElementTree
from xml.dom import minidom
from pymaginopolis.chunkyfile import model as model, codecs as codecs
EMPTY_FILE = "EmpT"
def chunky_file_to_xml(this_file, chunk_data_dir=None):
"""
Generate an XML representation of a chunky file
:param this_file: chunky file object
:param chunk_data_dir: optional, directory to write chunk data files to
:return: string containing XML representation of a chunky file
"""
if chunk_data_dir:
chunk_data_dir = pathlib.Path(chunk_data_dir)
if not chunk_data_dir.is_dir():
chunk_data_dir.mkdir()
# Generate XML document
chunky_root = ElementTree.Element("ChunkyFile")
chunky_root.set("type", this_file.file_type)
chunky_root.set("endianness", this_file.endianness.name)
chunky_root.set("charset", this_file.characterset.name)
for chunk in this_file.chunks:
chunk_element = ElementTree.SubElement(chunky_root, "Chunk")
chunk_element.set("tag", chunk.chunk_id.tag)
chunk_element.set("number", str(chunk.chunk_id.number))
if chunk.name:
chunk_element.set("name", chunk.name)
if chunk.flags & model.ChunkFlags.Loner:
chunk_element.set("loner", "true")
# Add children
for chunk_child in chunk.children:
child_element = ElementTree.SubElement(chunk_element, "Child")
child_element.set("chid", str(chunk_child.chid))
child_element.set("tag", chunk_child.ref.tag)
child_element.set("number", str(chunk_child.ref.number))
# Add data
if chunk.flags & model.ChunkFlags.Compressed:
is_compressed = True
this_chunk_data = chunk.encoded_data
chunk_element.set("compressed", "true")
else:
is_compressed = False
this_chunk_data = chunk.raw_data
if chunk_data_dir:
file_extension = chunk.chunk_id.tag.lower().rstrip(" ")
# HACK
if file_extension == "wave":
file_extension = "wav"
chunk_data_file_name = "%d.%s" % (chunk.chunk_id.number, file_extension)
if is_compressed:
compression_type = codecs.identify_compression(this_chunk_data).name
chunk_data_file_name += ".%s" % (compression_type.lower())
chunk_data_file_path = chunk_data_dir / chunk_data_file_name
with open(chunk_data_file_path, "wb") as chunk_data_file:
chunk_data_file.write(this_chunk_data)
# Create element for data
data_element = ElementTree.SubElement(chunk_element, "File")
data_element.text = str(chunk_data_file_path)
if is_compressed:
data_element.set("compressed", "true")
else:
data_element = ElementTree.SubElement(chunk_element, "Data")
data_element.text = base64.b64encode(this_chunk_data).decode("utf-8")
this_file_xml = ElementTree.tostring(chunky_root)
# Pretty-print the XML
dom = minidom.parseString(this_file_xml)
this_file_pretty_xml = dom.toprettyxml()
return this_file_pretty_xml
def xml_to_chunky_file(chunky_file, xml_path, change_file_type=False):
"""
Load chunks from an XML file and add them to a chunky file
:param chunky_file: Existing chunky file instance that new chunks will be added to
:param xml_path: XML filename
:param change_file_type: change the file type tag in the header
"""
logger = logging.getLogger(__name__)
# Load XML file
tree = ElementTree.parse(xml_path)
chunky_file_xml = tree.getroot()
# TODO: validate with an XSD?
if chunky_file_xml.tag != "ChunkyFile":
raise Exception("Not the right kind of XML file")
# Set chunky file options if not already set
file_type = chunky_file_xml.attrib.get("type")
endianness = model.Endianness[chunky_file_xml.attrib.get("endianness", "LittleEndian")]
charset = model.CharacterSet[chunky_file_xml.attrib.get("charset", "ANSI")]
if chunky_file.file_type == EMPTY_FILE:
chunky_file.file_type = file_type
chunky_file.endianness = endianness
chunky_file.characterset = charset
else:
if file_type is not None and chunky_file.file_type != file_type and change_file_type:
logger.warning("Changing file type from %s to %s", chunky_file.file_type, file_type)
chunky_file.file_type = file_type
if chunky_file.endianness != endianness:
logger.warning("Changing file endianness from %s to %s", chunky_file.endianness, endianness)
chunky_file.endianness = endianness
if chunky_file.characterset != charset:
logger.warning("Changing file character set from %s to %s", chunky_file.characterset, charset)
chunky_file.characterset = charset
for chunk_xml in chunky_file_xml.findall("Chunk"):
# Get chunk metadata
chunk_tag = chunk_xml.attrib["tag"]
chunk_number = int(chunk_xml.attrib["number"])
chunk_id = model.ChunkId(chunk_tag, chunk_number)
chunk_name = chunk_xml.attrib.get("name", None)
logger.debug("Processing chunk: %s - %s", chunk_id, chunk_name if chunk_name else "n/a")
chunk_flags = model.ChunkFlags.Default
if chunk_xml.attrib.get("loner", "false").lower() == "true":
chunk_flags |= model.ChunkFlags.Loner
if chunk_xml.attrib.get("compressed", "false").lower() == "true":
chunk_flags |= model.ChunkFlags.Compressed
# Get chunk children and data
chunk_data = None
chunk_children = list()
for child_xml in chunk_xml:
if child_xml.tag == "Child":
chid = int(child_xml.attrib["chid"])
tag = child_xml.attrib["tag"]
number = int(child_xml.attrib["number"])
chunk_child = model.ChunkChild(chid=chid, ref=model.ChunkId(tag, number))
chunk_children.append(chunk_child)
elif child_xml.tag == "Data":
chunk_data = base64.b64decode(child_xml.text)
elif child_xml.tag == "File":
with open(child_xml.text, "rb") as data_file:
chunk_data = data_file.read()
else:
raise Exception("unhandled child tag type: %s" % child_xml.tag)
# Check if there is an existing chunk
if chunk_id in chunky_file:
existing_chunk = chunky_file[chunk_id]
logger.info("%s: Modifying existing chunk", chunk_id)
# Update chunk metadata
if chunk_name:
existing_chunk.name = chunk_name
if chunk_flags != existing_chunk.flags:
# TODO: set flags correctly
# if the loner flag is not set correctly the file won't load
logger.warning("Chunk flags are different: %s vs %s", existing_chunk.flags, chunk_flags)
# TODO: update existing children instead of just adding
for new_child in chunk_children:
existing_child = [c for c in existing_chunk.children if c.chid == new_child.chid]
if len(existing_child) > 0:
logger.warning("child %s: %s already exists" % (existing_chunk, existing_child))
else:
existing_chunk.children.append(new_child)
# Set chunk data
# TODO: handle compression
if chunk_data:
existing_chunk.raw_data = chunk_data
else:
logger.info("%s: Creating new chunk", chunk_id)
# Create a new chunk
this_chunk = model.Chunk(chunk_tag, chunk_number, chunk_name, chunk_flags, data=chunk_data)
this_chunk.children = chunk_children
chunky_file.chunks.append(this_chunk)
|
import base64
import logging
import pathlib
from xml.etree import ElementTree
from xml.dom import minidom
from pymaginopolis.chunkyfile import model as model, codecs as codecs
EMPTY_FILE = "EmpT"
def chunky_file_to_xml(this_file, chunk_data_dir=None):
"""
Generate an XML representation of a chunky file
:param this_file: chunky file object
:param chunk_data_dir: optional, directory to write chunk data files to
:return: string containing XML representation of a chunky file
"""
if chunk_data_dir:
chunk_data_dir = pathlib.Path(chunk_data_dir)
if not chunk_data_dir.is_dir():
chunk_data_dir.mkdir()
# Generate XML document
chunky_root = ElementTree.Element("ChunkyFile")
chunky_root.set("type", this_file.file_type)
chunky_root.set("endianness", this_file.endianness.name)
chunky_root.set("charset", this_file.characterset.name)
for chunk in this_file.chunks:
chunk_element = ElementTree.SubElement(chunky_root, "Chunk")
chunk_element.set("tag", chunk.chunk_id.tag)
chunk_element.set("number", str(chunk.chunk_id.number))
if chunk.name:
chunk_element.set("name", chunk.name)
if chunk.flags & model.ChunkFlags.Loner:
chunk_element.set("loner", "true")
# Add children
for chunk_child in chunk.children:
child_element = ElementTree.SubElement(chunk_element, "Child")
child_element.set("chid", str(chunk_child.chid))
child_element.set("tag", chunk_child.ref.tag)
child_element.set("number", str(chunk_child.ref.number))
# Add data
if chunk.flags & model.ChunkFlags.Compressed:
is_compressed = True
this_chunk_data = chunk.encoded_data
chunk_element.set("compressed", "true")
else:
is_compressed = False
this_chunk_data = chunk.raw_data
if chunk_data_dir:
file_extension = chunk.chunk_id.tag.lower().rstrip(" ")
# HACK
if file_extension == "wave":
file_extension = "wav"
chunk_data_file_name = "%d.%s" % (chunk.chunk_id.number, file_extension)
if is_compressed:
compression_type = codecs.identify_compression(this_chunk_data).name
chunk_data_file_name += ".%s" % (compression_type.lower())
chunk_data_file_path = chunk_data_dir / chunk_data_file_name
with open(chunk_data_file_path, "wb") as chunk_data_file:
chunk_data_file.write(this_chunk_data)
# Create element for data
data_element = ElementTree.SubElement(chunk_element, "File")
data_element.text = str(chunk_data_file_path)
if is_compressed:
data_element.set("compressed", "true")
else:
data_element = ElementTree.SubElement(chunk_element, "Data")
data_element.text = base64.b64encode(this_chunk_data).decode("utf-8")
this_file_xml = ElementTree.tostring(chunky_root)
# Pretty-print the XML
dom = minidom.parseString(this_file_xml)
this_file_pretty_xml = dom.toprettyxml()
return this_file_pretty_xml
def xml_to_chunky_file(chunky_file, xml_path, change_file_type=False):
"""
Load chunks from an XML file and add them to a chunky file
:param chunky_file: Existing chunky file instance that new chunks will be added to
:param xml_path: XML filename
:param change_file_type: change the file type tag in the header
"""
logger = logging.getLogger(__name__)
# Load XML file
tree = ElementTree.parse(xml_path)
chunky_file_xml = tree.getroot()
# TODO: validate with an XSD?
if chunky_file_xml.tag != "ChunkyFile":
raise Exception("Not the right kind of XML file")
# Set chunky file options if not already set
file_type = chunky_file_xml.attrib.get("type")
endianness = model.Endianness[chunky_file_xml.attrib.get("endianness", "LittleEndian")]
charset = model.CharacterSet[chunky_file_xml.attrib.get("charset", "ANSI")]
if chunky_file.file_type == EMPTY_FILE:
chunky_file.file_type = file_type
chunky_file.endianness = endianness
chunky_file.characterset = charset
else:
if file_type is not None and chunky_file.file_type != file_type and change_file_type:
logger.warning("Changing file type from %s to %s", chunky_file.file_type, file_type)
chunky_file.file_type = file_type
if chunky_file.endianness != endianness:
logger.warning("Changing file endianness from %s to %s", chunky_file.endianness, endianness)
chunky_file.endianness = endianness
if chunky_file.characterset != charset:
logger.warning("Changing file character set from %s to %s", chunky_file.characterset, charset)
chunky_file.characterset = charset
for chunk_xml in chunky_file_xml.findall("Chunk"):
# Get chunk metadata
chunk_tag = chunk_xml.attrib["tag"]
chunk_number = int(chunk_xml.attrib["number"])
chunk_id = model.ChunkId(chunk_tag, chunk_number)
chunk_name = chunk_xml.attrib.get("name", None)
logger.debug("Processing chunk: %s - %s", chunk_id, chunk_name if chunk_name else "n/a")
chunk_flags = model.ChunkFlags.Default
if chunk_xml.attrib.get("loner", "false").lower() == "true":
chunk_flags |= model.ChunkFlags.Loner
if chunk_xml.attrib.get("compressed", "false").lower() == "true":
chunk_flags |= model.ChunkFlags.Compressed
# Get chunk children and data
chunk_data = None
chunk_children = list()
for child_xml in chunk_xml:
if child_xml.tag == "Child":
chid = int(child_xml.attrib["chid"])
tag = child_xml.attrib["tag"]
number = int(child_xml.attrib["number"])
chunk_child = model.ChunkChild(chid=chid, ref=model.ChunkId(tag, number))
chunk_children.append(chunk_child)
elif child_xml.tag == "Data":
chunk_data = base64.b64decode(child_xml.text)
elif child_xml.tag == "File":
with open(child_xml.text, "rb") as data_file:
chunk_data = data_file.read()
else:
raise Exception("unhandled child tag type: %s" % child_xml.tag)
# Check if there is an existing chunk
if chunk_id in chunky_file:
existing_chunk = chunky_file[chunk_id]
logger.info("%s: Modifying existing chunk", chunk_id)
# Update chunk metadata
if chunk_name:
existing_chunk.name = chunk_name
if chunk_flags != existing_chunk.flags:
# TODO: set flags correctly
# if the loner flag is not set correctly the file won't load
logger.warning("Chunk flags are different: %s vs %s", existing_chunk.flags, chunk_flags)
# TODO: update existing children instead of just adding
for new_child in chunk_children:
existing_child = [c for c in existing_chunk.children if c.chid == new_child.chid]
if len(existing_child) > 0:
logger.warning("child %s: %s already exists" % (existing_chunk, existing_child))
else:
existing_chunk.children.append(new_child)
# Set chunk data
# TODO: handle compression
if chunk_data:
existing_chunk.raw_data = chunk_data
else:
logger.info("%s: Creating new chunk", chunk_id)
# Create a new chunk
this_chunk = model.Chunk(chunk_tag, chunk_number, chunk_name, chunk_flags, data=chunk_data)
this_chunk.children = chunk_children
chunky_file.chunks.append(this_chunk)
|
en
| 0.680926
|
Generate an XML representation of a chunky file :param this_file: chunky file object :param chunk_data_dir: optional, directory to write chunk data files to :return: string containing XML representation of a chunky file # Generate XML document # Add children # Add data # HACK # Create element for data # Pretty-print the XML Load chunks from an XML file and add them to a chunky file :param chunky_file: Existing chunky file instance that new chunks will be added to :param xml_path: XML filename :param change_file_type: change the file type tag in the header # Load XML file # TODO: validate with an XSD? # Set chunky file options if not already set # Get chunk metadata # Get chunk children and data # Check if there is an existing chunk # Update chunk metadata # TODO: set flags correctly # if the loner flag is not set correctly the file won't load # TODO: update existing children instead of just adding # Set chunk data # TODO: handle compression # Create a new chunk
| 2.711871
| 3
|
moclo/moclo/kits/__init__.py
|
althonos/automoclo
| 10
|
6628602
|
<filename>moclo/moclo/kits/__init__.py
# coding: utf-8 # noqa: D104
"""Namespace package for concrete MoClo implementations.
"""
__path__ = __import__("pkgutil").extend_path(__path__, __name__)
|
<filename>moclo/moclo/kits/__init__.py
# coding: utf-8 # noqa: D104
"""Namespace package for concrete MoClo implementations.
"""
__path__ = __import__("pkgutil").extend_path(__path__, __name__)
|
en
| 0.594517
|
# coding: utf-8 # noqa: D104 Namespace package for concrete MoClo implementations.
| 1.419588
| 1
|
proxyclient/find_sprr_regs.py
|
jannau/m1n1
| 0
|
6628603
|
from setup import *
from find_regs import find_regs, static_regs
import asm
p.iodev_set_usage(IODEV.FB, 0)
if u.mrs(SPRR_CONFIG_EL1):
u.msr(GXF_CONFIG_EL12, 0)
u.msr(SPRR_CONFIG_EL12, 0)
u.msr(GXF_CONFIG_EL1, 0)
u.msr(SPRR_CONFIG_EL1, 0)
# Set up HCR_EL2 for EL1, since we can't do it after enabling GXF
u.inst("nop", call="el1")
all_regs = set()
for reg in [SPRR_CONFIG_EL1, GXF_CONFIG_EL1, SPRR_CONFIG_EL12, GXF_CONFIG_EL12]:
old_regs = set(find_regs(u, values=False))
u.msr(reg, 1)
el2_items = set(find_regs(u))
el2_vals = dict(el2_items)
new_regs = set(k for k, v in el2_items)
all_regs = all_regs.union(new_regs)
diff_regs = new_regs - old_regs
print(reg)
for r in sorted(diff_regs):
print(" %s --> %lx" % (sysreg_name(r), u.mrs(r)))
gl2_items = list(find_regs(u, regs=static_regs,call="gl2"))
gl2_vals = dict(gl2_items)
gl2_regs = set(k for k, v in gl2_items)
print("GL2")
for reg in sorted(gl2_regs - all_regs):
print(" %s -> %lx" % (sysreg_name(reg), gl2_vals[reg]))
for reg in sorted(gl2_regs):
if reg in el2_vals and gl2_vals[reg] != el2_vals[reg]:
print(" ! %s %lx -> %lx" % (sysreg_name(reg), el2_vals[reg], gl2_vals[reg]))
u.msr(GXF_CONFIG_EL12, 0)
u.msr(SPRR_CONFIG_EL12, 0)
u.msr(GXF_CONFIG_EL1, 0)
u.msr(SPRR_CONFIG_EL1, 0)
gl1_items = list(find_regs(u, regs=static_regs, call="gl1"))
gl1_vals = dict(gl1_items)
gl1_regs = set(k for k, v in gl1_items)
print("GL1")
for reg in sorted(gl1_regs - all_regs):
val = gl1_vals[reg]
print(" %s -> %lx" % (sysreg_name(reg), val))
cval = u.mrs(reg, call="gl1", silent=False)
print(" cur: 0x%lx" % (cval))
try:
u.msr(reg, cval, call="gl1", silent=False)
except:
print(">RO")
continue
gl2_vals = dict(find_regs(u, regs=static_regs,call="gl2"))
u.msr(reg, cval ^ 0xffff, call="gl1", silent=True)
for r, v in find_regs(u, regs=static_regs, call="gl2"):
if v != gl2_vals[r]:
print(" GL2 access: %s %lx -> %lx" % (sysreg_name(r), gl2_vals[r], v))
u.msr(reg, cval, call="gl1", silent=True)
for reg in sorted(gl1_regs):
if reg in el2_vals and gl1_vals[reg] != el2_vals[reg]:
print(" ! %s %lx -> %lx" % (sysreg_name(reg), el2_vals[reg], gl1_vals[reg]))
|
from setup import *
from find_regs import find_regs, static_regs
import asm
p.iodev_set_usage(IODEV.FB, 0)
if u.mrs(SPRR_CONFIG_EL1):
u.msr(GXF_CONFIG_EL12, 0)
u.msr(SPRR_CONFIG_EL12, 0)
u.msr(GXF_CONFIG_EL1, 0)
u.msr(SPRR_CONFIG_EL1, 0)
# Set up HCR_EL2 for EL1, since we can't do it after enabling GXF
u.inst("nop", call="el1")
all_regs = set()
for reg in [SPRR_CONFIG_EL1, GXF_CONFIG_EL1, SPRR_CONFIG_EL12, GXF_CONFIG_EL12]:
old_regs = set(find_regs(u, values=False))
u.msr(reg, 1)
el2_items = set(find_regs(u))
el2_vals = dict(el2_items)
new_regs = set(k for k, v in el2_items)
all_regs = all_regs.union(new_regs)
diff_regs = new_regs - old_regs
print(reg)
for r in sorted(diff_regs):
print(" %s --> %lx" % (sysreg_name(r), u.mrs(r)))
gl2_items = list(find_regs(u, regs=static_regs,call="gl2"))
gl2_vals = dict(gl2_items)
gl2_regs = set(k for k, v in gl2_items)
print("GL2")
for reg in sorted(gl2_regs - all_regs):
print(" %s -> %lx" % (sysreg_name(reg), gl2_vals[reg]))
for reg in sorted(gl2_regs):
if reg in el2_vals and gl2_vals[reg] != el2_vals[reg]:
print(" ! %s %lx -> %lx" % (sysreg_name(reg), el2_vals[reg], gl2_vals[reg]))
u.msr(GXF_CONFIG_EL12, 0)
u.msr(SPRR_CONFIG_EL12, 0)
u.msr(GXF_CONFIG_EL1, 0)
u.msr(SPRR_CONFIG_EL1, 0)
gl1_items = list(find_regs(u, regs=static_regs, call="gl1"))
gl1_vals = dict(gl1_items)
gl1_regs = set(k for k, v in gl1_items)
print("GL1")
for reg in sorted(gl1_regs - all_regs):
val = gl1_vals[reg]
print(" %s -> %lx" % (sysreg_name(reg), val))
cval = u.mrs(reg, call="gl1", silent=False)
print(" cur: 0x%lx" % (cval))
try:
u.msr(reg, cval, call="gl1", silent=False)
except:
print(">RO")
continue
gl2_vals = dict(find_regs(u, regs=static_regs,call="gl2"))
u.msr(reg, cval ^ 0xffff, call="gl1", silent=True)
for r, v in find_regs(u, regs=static_regs, call="gl2"):
if v != gl2_vals[r]:
print(" GL2 access: %s %lx -> %lx" % (sysreg_name(r), gl2_vals[r], v))
u.msr(reg, cval, call="gl1", silent=True)
for reg in sorted(gl1_regs):
if reg in el2_vals and gl1_vals[reg] != el2_vals[reg]:
print(" ! %s %lx -> %lx" % (sysreg_name(reg), el2_vals[reg], gl1_vals[reg]))
|
en
| 0.963906
|
# Set up HCR_EL2 for EL1, since we can't do it after enabling GXF
| 2.058378
| 2
|
create_rainbow.py
|
gnu-user/cryptography-course
| 0
|
6628604
|
<gh_stars>0
#!/usr/bin/env python2
##############################################################################
#
# Script which creates a SHA-1 rainbow table of the passwords provided
#
# Copyright (C) 2014, <NAME> (100437638)
# All rights reserved.
#
##############################################################################
from hashlib import sha1
from base64 import b64encode
import sys
infile, outfile = sys.argv[1], sys.argv[2]
# Open the input and output files
with open(infile, 'r') as inf, open(outfile, 'w') as out:
# Store the hash and password for each entry in the file
hashes = [(b64encode(sha1(line.strip()).digest()), line.strip(),) for line in inf]
# Write out the hash and password
out.writelines(hash[0] + "\t" + hash[1] + "\n" for hash in hashes)
|
#!/usr/bin/env python2
##############################################################################
#
# Script which creates a SHA-1 rainbow table of the passwords provided
#
# Copyright (C) 2014, <NAME> (100437638)
# All rights reserved.
#
##############################################################################
from hashlib import sha1
from base64 import b64encode
import sys
infile, outfile = sys.argv[1], sys.argv[2]
# Open the input and output files
with open(infile, 'r') as inf, open(outfile, 'w') as out:
# Store the hash and password for each entry in the file
hashes = [(b64encode(sha1(line.strip()).digest()), line.strip(),) for line in inf]
# Write out the hash and password
out.writelines(hash[0] + "\t" + hash[1] + "\n" for hash in hashes)
|
de
| 0.354315
|
#!/usr/bin/env python2 ############################################################################## # # Script which creates a SHA-1 rainbow table of the passwords provided # # Copyright (C) 2014, <NAME> (100437638) # All rights reserved. # ############################################################################## # Open the input and output files # Store the hash and password for each entry in the file # Write out the hash and password
| 3.134814
| 3
|
luna/wrappers/cif.py
|
keiserlab/LUNA
| 2
|
6628605
|
import re
def get_atom_names_by_id(cif_file):
"""Read a single-molecule CIF file and return the molecule's atom names.
In the current version, if applied on multi-molecular CIF files,
only the first molecule's atom names are returned.
Returns
-------
: dict
"""
regex = re.compile(' {1,}')
atom_names = {}
with open(cif_file, "r") as IN:
loop_read = False
pdbx_ordinal_read = False
multi_atom = False
start_read = False
last_line = -1
for i, line in enumerate(IN.readlines()):
line = line.strip()
if line.startswith("loop_"):
loop_read = True
else:
if line.startswith("_chem_comp_atom.comp_id"):
if loop_read and i == last_line + 1:
multi_atom = True
else:
start_read = True
elif line.startswith("_chem_comp_atom.pdbx_ordinal"):
pdbx_ordinal_read = True
elif pdbx_ordinal_read and line.startswith("#"):
break
if start_read:
if multi_atom:
cols = regex.split(line)
atom_names[len(atom_names)] = cols[1]
elif line.startswith("_chem_comp_atom.atom_id"):
atom_names[len(atom_names)] = line[line.rfind(' ') + 1:]
break
if pdbx_ordinal_read and multi_atom:
start_read = True
last_line = i
return atom_names
|
import re
def get_atom_names_by_id(cif_file):
"""Read a single-molecule CIF file and return the molecule's atom names.
In the current version, if applied on multi-molecular CIF files,
only the first molecule's atom names are returned.
Returns
-------
: dict
"""
regex = re.compile(' {1,}')
atom_names = {}
with open(cif_file, "r") as IN:
loop_read = False
pdbx_ordinal_read = False
multi_atom = False
start_read = False
last_line = -1
for i, line in enumerate(IN.readlines()):
line = line.strip()
if line.startswith("loop_"):
loop_read = True
else:
if line.startswith("_chem_comp_atom.comp_id"):
if loop_read and i == last_line + 1:
multi_atom = True
else:
start_read = True
elif line.startswith("_chem_comp_atom.pdbx_ordinal"):
pdbx_ordinal_read = True
elif pdbx_ordinal_read and line.startswith("#"):
break
if start_read:
if multi_atom:
cols = regex.split(line)
atom_names[len(atom_names)] = cols[1]
elif line.startswith("_chem_comp_atom.atom_id"):
atom_names[len(atom_names)] = line[line.rfind(' ') + 1:]
break
if pdbx_ordinal_read and multi_atom:
start_read = True
last_line = i
return atom_names
|
en
| 0.739804
|
Read a single-molecule CIF file and return the molecule's atom names. In the current version, if applied on multi-molecular CIF files, only the first molecule's atom names are returned. Returns ------- : dict
| 3.246595
| 3
|
mutagen/id3/_file.py
|
lucienimmink/scanner.py
| 2
|
6628606
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 <NAME>
# 2006 <NAME>
# 2013 <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import struct
import mutagen
from mutagen._util import insert_bytes, delete_bytes, enum, \
loadfile, convert_error, read_full
from mutagen._tags import PaddingInfo
from ._util import error, ID3NoHeaderError, ID3UnsupportedVersionError, \
BitPaddedInt
from ._tags import ID3Tags, ID3Header, ID3SaveConfig
from ._id3v1 import MakeID3v1, find_id3v1
@enum
class ID3v1SaveOptions(object):
REMOVE = 0
"""ID3v1 tags will be removed"""
UPDATE = 1
"""ID3v1 tags will be updated but not added"""
CREATE = 2
"""ID3v1 tags will be created and/or updated"""
class ID3(ID3Tags, mutagen.Metadata):
"""ID3(filething=None)
A file with an ID3v2 tag.
If any arguments are given, the :meth:`load` is called with them. If no
arguments are given then an empty `ID3` object is created.
::
ID3("foo.mp3")
# same as
t = ID3()
t.load("foo.mp3")
Arguments:
filething (filething): or `None`
Attributes:
version (tuple[int]): ID3 tag version as a tuple
unknown_frames (list[bytes]): raw frame data of any unknown frames
found
size (int): the total size of the ID3 tag, including the header
"""
__module__ = "mutagen.id3"
PEDANTIC = True
"""`bool`:
.. deprecated:: 1.28
Doesn't have any effect
"""
filename = None
def __init__(self, *args, **kwargs):
self._header = None
self._version = (2, 4, 0)
super(ID3, self).__init__(*args, **kwargs)
@property
def version(self):
if self._header is not None:
return self._header.version
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def f_unsynch(self):
if self._header is not None:
return self._header.f_unsynch
return False
@property
def f_extended(self):
if self._header is not None:
return self._header.f_extended
return False
@property
def size(self):
if self._header is not None:
return self._header.size
return 0
def _pre_load_header(self, fileobj):
# XXX: for aiff to adjust the offset..
pass
@convert_error(IOError, error)
@loadfile()
def load(self, filething, known_frames=None, translate=True, v2_version=4,
load_v1=True):
"""Load tags from a filename.
Args:
filename (filething): filename or file object to load tag data from
known_frames (Dict[`mutagen.text`, `Frame`]): dict mapping frame
IDs to Frame objects
translate (bool): Update all tags to ID3v2.3/4 internally. If you
intend to save, this must be true or you have to
call update_to_v23() / update_to_v24() manually.
v2_version (int): if update_to_v23 or update_to_v24 get called
(3 or 4)
load_v1 (bool): Load tags from ID3v1 header if present. If both
ID3v1 and ID3v2 headers are present, combine the tags from
the two, with ID3v2 having precedence.
.. versionadded:: 1.42
Example of loading a custom frame::
my_frames = dict(mutagen.id3.Frames)
class XMYF(Frame): ...
my_frames["XMYF"] = XMYF
mutagen.id3.ID3(filename, known_frames=my_frames)
"""
fileobj = filething.fileobj
if v2_version not in (3, 4):
raise ValueError("Only 3 and 4 possible for v2_version")
self.unknown_frames = []
self._header = None
self._padding = 0
self._pre_load_header(fileobj)
try:
self._header = ID3Header(fileobj)
except (ID3NoHeaderError, ID3UnsupportedVersionError):
if not load_v1:
raise
frames, offset = find_id3v1(fileobj, v2_version, known_frames)
if frames is None:
raise
self.version = ID3Header._V11
for v in frames.values():
if len(self.getall(v.HashKey)) == 0:
self.add(v)
else:
# XXX: attach to the header object so we have it in spec parsing..
if known_frames is not None:
self._header._known_frames = known_frames
data = read_full(fileobj, self.size - 10)
remaining_data = self._read(self._header, data)
self._padding = len(remaining_data)
if load_v1:
v1v2_ver = 4 if self.version[1] == 4 else 3
frames, offset = find_id3v1(fileobj, v1v2_ver, known_frames)
if frames:
for v in frames.values():
if len(self.getall(v.HashKey)) == 0:
self.add(v)
if translate:
if v2_version == 3:
self.update_to_v23()
else:
self.update_to_v24()
def _prepare_data(self, fileobj, start, available, v2_version, v23_sep,
pad_func):
if v2_version not in (3, 4):
raise ValueError("Only 3 or 4 allowed for v2_version")
config = ID3SaveConfig(v2_version, v23_sep)
framedata = self._write(config)
needed = len(framedata) + 10
fileobj.seek(0, 2)
trailing_size = fileobj.tell() - start
info = PaddingInfo(available - needed, trailing_size)
new_padding = info._get_padding(pad_func)
if new_padding < 0:
raise error("invalid padding")
new_size = needed + new_padding
new_framesize = BitPaddedInt.to_str(new_size - 10, width=4)
header = struct.pack(
'>3sBBB4s', b'ID3', v2_version, 0, 0, new_framesize)
data = header + framedata
assert new_size >= len(data)
data += (new_size - len(data)) * b'\x00'
assert new_size == len(data)
return data
@convert_error(IOError, error)
@loadfile(writable=True, create=True)
def save(self, filething=None, v1=1, v2_version=4, v23_sep='/',
padding=None):
"""save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None)
Save changes to a file.
Args:
filething (filething):
Filename to save the tag to. If no filename is given,
the one most recently loaded is used.
v1 (ID3v1SaveOptions):
if 0, ID3v1 tags will be removed.
if 1, ID3v1 tags will be updated but not added.
if 2, ID3v1 tags will be created and/or updated
v2 (int):
version of ID3v2 tags (3 or 4).
v23_sep (text):
the separator used to join multiple text values
if v2_version == 3. Defaults to '/' but if it's None
will be the ID3v2v2.4 null separator.
padding (:obj:`mutagen.PaddingFunction`)
Raises:
mutagen.MutagenError
By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3
tags, you must call method update_to_v23 before saving the file.
The lack of a way to update only an ID3v1 tag is intentional.
"""
f = filething.fileobj
try:
header = ID3Header(filething.fileobj)
except ID3NoHeaderError:
old_size = 0
else:
old_size = header.size
data = self._prepare_data(
f, 0, old_size, v2_version, v23_sep, padding)
new_size = len(data)
if (old_size < new_size):
insert_bytes(f, new_size - old_size, old_size)
elif (old_size > new_size):
delete_bytes(f, old_size - new_size, new_size)
f.seek(0)
f.write(data)
self.__save_v1(f, v1)
def __save_v1(self, f, v1):
tag, offset = find_id3v1(f)
has_v1 = tag is not None
f.seek(offset, 2)
if v1 == ID3v1SaveOptions.UPDATE and has_v1 or \
v1 == ID3v1SaveOptions.CREATE:
f.write(MakeID3v1(self))
else:
f.truncate()
@loadfile(writable=True)
def delete(self, filething=None, delete_v1=True, delete_v2=True):
"""delete(filething=None, delete_v1=True, delete_v2=True)
Remove tags from a file.
Args:
filething (filething): A filename or `None` to use the one used
when loading.
delete_v1 (bool): delete any ID3v1 tag
delete_v2 (bool): delete any ID3v2 tag
If no filename is given, the one most recently loaded is used.
"""
delete(filething, delete_v1, delete_v2)
self.clear()
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething, delete_v1=True, delete_v2=True):
"""Remove tags from a file.
Args:
delete_v1 (bool): delete any ID3v1 tag
delete_v2 (bool): delete any ID3v2 tag
Raises:
mutagen.MutagenError: In case deleting failed
"""
f = filething.fileobj
if delete_v1:
tag, offset = find_id3v1(f)
if tag is not None:
f.seek(offset, 2)
f.truncate()
# technically an insize=0 tag is invalid, but we delete it anyway
# (primarily because we used to write it)
if delete_v2:
f.seek(0, 0)
idata = f.read(10)
try:
id3, vmaj, vrev, flags, insize = struct.unpack('>3sBBB4s', idata)
except struct.error:
pass
else:
insize = BitPaddedInt(insize)
if id3 == b'ID3' and insize >= 0:
delete_bytes(f, insize + 10, 0)
class ID3FileType(mutagen.FileType):
"""ID3FileType(filething, ID3=None, **kwargs)
An unknown type of file with ID3 tags.
Args:
filething (filething): A filename or file-like object
ID3 (ID3): An ID3 subclass to use for tags.
Raises:
mutagen.MutagenError: In case loading the file failed
Load stream and tag information from a file.
A custom tag reader may be used in instead of the default
mutagen.id3.ID3 object, e.g. an EasyID3 reader.
"""
__module__ = "mutagen.id3"
ID3 = ID3
class _Info(mutagen.StreamInfo):
length = 0
def __init__(self, fileobj, offset):
pass
@staticmethod
def pprint():
return u"Unknown format with ID3 tag"
@staticmethod
def score(filename, fileobj, header_data):
return header_data.startswith(b"ID3")
def add_tags(self, ID3=None):
"""Add an empty ID3 tag to the file.
Args:
ID3 (ID3): An ID3 subclass to use or `None` to use the one
that used when loading.
A custom tag reader may be used in instead of the default
`ID3` object, e.g. an `mutagen.easyid3.EasyID3` reader.
"""
if ID3 is None:
ID3 = self.ID3
if self.tags is None:
self.ID3 = ID3
self.tags = ID3()
else:
raise error("an ID3 tag already exists")
@loadfile()
def load(self, filething, ID3=None, **kwargs):
# see __init__ for docs
fileobj = filething.fileobj
if ID3 is None:
ID3 = self.ID3
else:
# If this was initialized with EasyID3, remember that for
# when tags are auto-instantiated in add_tags.
self.ID3 = ID3
try:
self.tags = ID3(fileobj, **kwargs)
except ID3NoHeaderError:
self.tags = None
if self.tags is not None:
try:
offset = self.tags.size
except AttributeError:
offset = None
else:
offset = None
self.info = self._Info(fileobj, offset)
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 <NAME>
# 2006 <NAME>
# 2013 <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import struct
import mutagen
from mutagen._util import insert_bytes, delete_bytes, enum, \
loadfile, convert_error, read_full
from mutagen._tags import PaddingInfo
from ._util import error, ID3NoHeaderError, ID3UnsupportedVersionError, \
BitPaddedInt
from ._tags import ID3Tags, ID3Header, ID3SaveConfig
from ._id3v1 import MakeID3v1, find_id3v1
@enum
class ID3v1SaveOptions(object):
REMOVE = 0
"""ID3v1 tags will be removed"""
UPDATE = 1
"""ID3v1 tags will be updated but not added"""
CREATE = 2
"""ID3v1 tags will be created and/or updated"""
class ID3(ID3Tags, mutagen.Metadata):
"""ID3(filething=None)
A file with an ID3v2 tag.
If any arguments are given, the :meth:`load` is called with them. If no
arguments are given then an empty `ID3` object is created.
::
ID3("foo.mp3")
# same as
t = ID3()
t.load("foo.mp3")
Arguments:
filething (filething): or `None`
Attributes:
version (tuple[int]): ID3 tag version as a tuple
unknown_frames (list[bytes]): raw frame data of any unknown frames
found
size (int): the total size of the ID3 tag, including the header
"""
__module__ = "mutagen.id3"
PEDANTIC = True
"""`bool`:
.. deprecated:: 1.28
Doesn't have any effect
"""
filename = None
def __init__(self, *args, **kwargs):
self._header = None
self._version = (2, 4, 0)
super(ID3, self).__init__(*args, **kwargs)
@property
def version(self):
if self._header is not None:
return self._header.version
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def f_unsynch(self):
if self._header is not None:
return self._header.f_unsynch
return False
@property
def f_extended(self):
if self._header is not None:
return self._header.f_extended
return False
@property
def size(self):
if self._header is not None:
return self._header.size
return 0
def _pre_load_header(self, fileobj):
# XXX: for aiff to adjust the offset..
pass
@convert_error(IOError, error)
@loadfile()
def load(self, filething, known_frames=None, translate=True, v2_version=4,
load_v1=True):
"""Load tags from a filename.
Args:
filename (filething): filename or file object to load tag data from
known_frames (Dict[`mutagen.text`, `Frame`]): dict mapping frame
IDs to Frame objects
translate (bool): Update all tags to ID3v2.3/4 internally. If you
intend to save, this must be true or you have to
call update_to_v23() / update_to_v24() manually.
v2_version (int): if update_to_v23 or update_to_v24 get called
(3 or 4)
load_v1 (bool): Load tags from ID3v1 header if present. If both
ID3v1 and ID3v2 headers are present, combine the tags from
the two, with ID3v2 having precedence.
.. versionadded:: 1.42
Example of loading a custom frame::
my_frames = dict(mutagen.id3.Frames)
class XMYF(Frame): ...
my_frames["XMYF"] = XMYF
mutagen.id3.ID3(filename, known_frames=my_frames)
"""
fileobj = filething.fileobj
if v2_version not in (3, 4):
raise ValueError("Only 3 and 4 possible for v2_version")
self.unknown_frames = []
self._header = None
self._padding = 0
self._pre_load_header(fileobj)
try:
self._header = ID3Header(fileobj)
except (ID3NoHeaderError, ID3UnsupportedVersionError):
if not load_v1:
raise
frames, offset = find_id3v1(fileobj, v2_version, known_frames)
if frames is None:
raise
self.version = ID3Header._V11
for v in frames.values():
if len(self.getall(v.HashKey)) == 0:
self.add(v)
else:
# XXX: attach to the header object so we have it in spec parsing..
if known_frames is not None:
self._header._known_frames = known_frames
data = read_full(fileobj, self.size - 10)
remaining_data = self._read(self._header, data)
self._padding = len(remaining_data)
if load_v1:
v1v2_ver = 4 if self.version[1] == 4 else 3
frames, offset = find_id3v1(fileobj, v1v2_ver, known_frames)
if frames:
for v in frames.values():
if len(self.getall(v.HashKey)) == 0:
self.add(v)
if translate:
if v2_version == 3:
self.update_to_v23()
else:
self.update_to_v24()
def _prepare_data(self, fileobj, start, available, v2_version, v23_sep,
pad_func):
if v2_version not in (3, 4):
raise ValueError("Only 3 or 4 allowed for v2_version")
config = ID3SaveConfig(v2_version, v23_sep)
framedata = self._write(config)
needed = len(framedata) + 10
fileobj.seek(0, 2)
trailing_size = fileobj.tell() - start
info = PaddingInfo(available - needed, trailing_size)
new_padding = info._get_padding(pad_func)
if new_padding < 0:
raise error("invalid padding")
new_size = needed + new_padding
new_framesize = BitPaddedInt.to_str(new_size - 10, width=4)
header = struct.pack(
'>3sBBB4s', b'ID3', v2_version, 0, 0, new_framesize)
data = header + framedata
assert new_size >= len(data)
data += (new_size - len(data)) * b'\x00'
assert new_size == len(data)
return data
@convert_error(IOError, error)
@loadfile(writable=True, create=True)
def save(self, filething=None, v1=1, v2_version=4, v23_sep='/',
padding=None):
"""save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None)
Save changes to a file.
Args:
filething (filething):
Filename to save the tag to. If no filename is given,
the one most recently loaded is used.
v1 (ID3v1SaveOptions):
if 0, ID3v1 tags will be removed.
if 1, ID3v1 tags will be updated but not added.
if 2, ID3v1 tags will be created and/or updated
v2 (int):
version of ID3v2 tags (3 or 4).
v23_sep (text):
the separator used to join multiple text values
if v2_version == 3. Defaults to '/' but if it's None
will be the ID3v2v2.4 null separator.
padding (:obj:`mutagen.PaddingFunction`)
Raises:
mutagen.MutagenError
By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3
tags, you must call method update_to_v23 before saving the file.
The lack of a way to update only an ID3v1 tag is intentional.
"""
f = filething.fileobj
try:
header = ID3Header(filething.fileobj)
except ID3NoHeaderError:
old_size = 0
else:
old_size = header.size
data = self._prepare_data(
f, 0, old_size, v2_version, v23_sep, padding)
new_size = len(data)
if (old_size < new_size):
insert_bytes(f, new_size - old_size, old_size)
elif (old_size > new_size):
delete_bytes(f, old_size - new_size, new_size)
f.seek(0)
f.write(data)
self.__save_v1(f, v1)
def __save_v1(self, f, v1):
tag, offset = find_id3v1(f)
has_v1 = tag is not None
f.seek(offset, 2)
if v1 == ID3v1SaveOptions.UPDATE and has_v1 or \
v1 == ID3v1SaveOptions.CREATE:
f.write(MakeID3v1(self))
else:
f.truncate()
@loadfile(writable=True)
def delete(self, filething=None, delete_v1=True, delete_v2=True):
"""delete(filething=None, delete_v1=True, delete_v2=True)
Remove tags from a file.
Args:
filething (filething): A filename or `None` to use the one used
when loading.
delete_v1 (bool): delete any ID3v1 tag
delete_v2 (bool): delete any ID3v2 tag
If no filename is given, the one most recently loaded is used.
"""
delete(filething, delete_v1, delete_v2)
self.clear()
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething, delete_v1=True, delete_v2=True):
"""Remove tags from a file.
Args:
delete_v1 (bool): delete any ID3v1 tag
delete_v2 (bool): delete any ID3v2 tag
Raises:
mutagen.MutagenError: In case deleting failed
"""
f = filething.fileobj
if delete_v1:
tag, offset = find_id3v1(f)
if tag is not None:
f.seek(offset, 2)
f.truncate()
# technically an insize=0 tag is invalid, but we delete it anyway
# (primarily because we used to write it)
if delete_v2:
f.seek(0, 0)
idata = f.read(10)
try:
id3, vmaj, vrev, flags, insize = struct.unpack('>3sBBB4s', idata)
except struct.error:
pass
else:
insize = BitPaddedInt(insize)
if id3 == b'ID3' and insize >= 0:
delete_bytes(f, insize + 10, 0)
class ID3FileType(mutagen.FileType):
"""ID3FileType(filething, ID3=None, **kwargs)
An unknown type of file with ID3 tags.
Args:
filething (filething): A filename or file-like object
ID3 (ID3): An ID3 subclass to use for tags.
Raises:
mutagen.MutagenError: In case loading the file failed
Load stream and tag information from a file.
A custom tag reader may be used in instead of the default
mutagen.id3.ID3 object, e.g. an EasyID3 reader.
"""
__module__ = "mutagen.id3"
ID3 = ID3
class _Info(mutagen.StreamInfo):
length = 0
def __init__(self, fileobj, offset):
pass
@staticmethod
def pprint():
return u"Unknown format with ID3 tag"
@staticmethod
def score(filename, fileobj, header_data):
return header_data.startswith(b"ID3")
def add_tags(self, ID3=None):
"""Add an empty ID3 tag to the file.
Args:
ID3 (ID3): An ID3 subclass to use or `None` to use the one
that used when loading.
A custom tag reader may be used in instead of the default
`ID3` object, e.g. an `mutagen.easyid3.EasyID3` reader.
"""
if ID3 is None:
ID3 = self.ID3
if self.tags is None:
self.ID3 = ID3
self.tags = ID3()
else:
raise error("an ID3 tag already exists")
@loadfile()
def load(self, filething, ID3=None, **kwargs):
# see __init__ for docs
fileobj = filething.fileobj
if ID3 is None:
ID3 = self.ID3
else:
# If this was initialized with EasyID3, remember that for
# when tags are auto-instantiated in add_tags.
self.ID3 = ID3
try:
self.tags = ID3(fileobj, **kwargs)
except ID3NoHeaderError:
self.tags = None
if self.tags is not None:
try:
offset = self.tags.size
except AttributeError:
offset = None
else:
offset = None
self.info = self._Info(fileobj, offset)
|
en
| 0.680822
|
# -*- coding: utf-8 -*- # Copyright (C) 2005 <NAME> # 2006 <NAME> # 2013 <NAME> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. ID3v1 tags will be removed ID3v1 tags will be updated but not added ID3v1 tags will be created and/or updated ID3(filething=None) A file with an ID3v2 tag. If any arguments are given, the :meth:`load` is called with them. If no arguments are given then an empty `ID3` object is created. :: ID3("foo.mp3") # same as t = ID3() t.load("foo.mp3") Arguments: filething (filething): or `None` Attributes: version (tuple[int]): ID3 tag version as a tuple unknown_frames (list[bytes]): raw frame data of any unknown frames found size (int): the total size of the ID3 tag, including the header `bool`: .. deprecated:: 1.28 Doesn't have any effect # XXX: for aiff to adjust the offset.. Load tags from a filename. Args: filename (filething): filename or file object to load tag data from known_frames (Dict[`mutagen.text`, `Frame`]): dict mapping frame IDs to Frame objects translate (bool): Update all tags to ID3v2.3/4 internally. If you intend to save, this must be true or you have to call update_to_v23() / update_to_v24() manually. v2_version (int): if update_to_v23 or update_to_v24 get called (3 or 4) load_v1 (bool): Load tags from ID3v1 header if present. If both ID3v1 and ID3v2 headers are present, combine the tags from the two, with ID3v2 having precedence. .. versionadded:: 1.42 Example of loading a custom frame:: my_frames = dict(mutagen.id3.Frames) class XMYF(Frame): ... my_frames["XMYF"] = XMYF mutagen.id3.ID3(filename, known_frames=my_frames) # XXX: attach to the header object so we have it in spec parsing.. save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None) Save changes to a file. Args: filething (filething): Filename to save the tag to. If no filename is given, the one most recently loaded is used. v1 (ID3v1SaveOptions): if 0, ID3v1 tags will be removed. if 1, ID3v1 tags will be updated but not added. if 2, ID3v1 tags will be created and/or updated v2 (int): version of ID3v2 tags (3 or 4). v23_sep (text): the separator used to join multiple text values if v2_version == 3. Defaults to '/' but if it's None will be the ID3v2v2.4 null separator. padding (:obj:`mutagen.PaddingFunction`) Raises: mutagen.MutagenError By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3 tags, you must call method update_to_v23 before saving the file. The lack of a way to update only an ID3v1 tag is intentional. delete(filething=None, delete_v1=True, delete_v2=True) Remove tags from a file. Args: filething (filething): A filename or `None` to use the one used when loading. delete_v1 (bool): delete any ID3v1 tag delete_v2 (bool): delete any ID3v2 tag If no filename is given, the one most recently loaded is used. Remove tags from a file. Args: delete_v1 (bool): delete any ID3v1 tag delete_v2 (bool): delete any ID3v2 tag Raises: mutagen.MutagenError: In case deleting failed # technically an insize=0 tag is invalid, but we delete it anyway # (primarily because we used to write it) ID3FileType(filething, ID3=None, **kwargs) An unknown type of file with ID3 tags. Args: filething (filething): A filename or file-like object ID3 (ID3): An ID3 subclass to use for tags. Raises: mutagen.MutagenError: In case loading the file failed Load stream and tag information from a file. A custom tag reader may be used in instead of the default mutagen.id3.ID3 object, e.g. an EasyID3 reader. Add an empty ID3 tag to the file. Args: ID3 (ID3): An ID3 subclass to use or `None` to use the one that used when loading. A custom tag reader may be used in instead of the default `ID3` object, e.g. an `mutagen.easyid3.EasyID3` reader. # see __init__ for docs # If this was initialized with EasyID3, remember that for # when tags are auto-instantiated in add_tags.
| 2.521629
| 3
|
kernel_tuner/runners/sequential.py
|
mfkiwl/kernel_tuner
| 0
|
6628607
|
""" The default runner for sequentially tuning the parameter space """
from collections import OrderedDict
import logging
from time import perf_counter
from kernel_tuner.util import get_config_string, store_cache, process_metrics, print_config_output, ErrorConfig
from kernel_tuner.core import DeviceInterface
class SequentialRunner(object):
""" SequentialRunner is used for tuning with a single process/thread """
def __init__(self, kernel_source, kernel_options, device_options, iterations, observers):
""" Instantiate the SequentialRunner
:param kernel_source: The kernel source
:type kernel_source: kernel_tuner.core.KernelSource
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param iterations: The number of iterations used for benchmarking
each kernel instance.
:type iterations: int
"""
#detect language and create high-level device interface
self.dev = DeviceInterface(kernel_source, iterations=iterations, observers=observers, **device_options).__enter__()
self.units = self.dev.units
self.quiet = device_options.quiet
self.kernel_source = kernel_source
self.warmed_up = False
self.simulation_mode = False
self.last_strategy_start_time = perf_counter()
#move data to the GPU
self.gpu_args = self.dev.ready_argument_list(kernel_options.arguments)
def __enter__(self):
return self
def run(self, parameter_space, kernel_options, tuning_options):
""" Iterate through the entire parameter space using a single Python process
:param parameter_space: The parameter space as an iterable.
:type parameter_space: iterable
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.iterface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
logging.debug('sequential runner started for ' + kernel_options.kernel_name)
results = []
#iterate over parameter space
for element in parameter_space:
params = OrderedDict(zip(tuning_options.tune_params.keys(), element))
#attempt to warmup the GPU by running the first config in the parameter space and ignoring the result
if not self.warmed_up:
self.dev.compile_and_benchmark(self.kernel_source, self.gpu_args, params, kernel_options, tuning_options)
self.warmed_up = True
#check if element is in the cache
x_int = ",".join([str(i) for i in element])
if tuning_options.cache and x_int in tuning_options.cache:
results.append(tuning_options.cache[x_int])
continue
result = self.dev.compile_and_benchmark(self.kernel_source, self.gpu_args, params, kernel_options, tuning_options)
if self.dev.last_compilation_time is not None:
params['compile_time'] = self.dev.last_compilation_time
if self.dev.last_verification_time is not None:
params['verification_time'] = self.dev.last_verification_time
if isinstance(result, ErrorConfig):
logging.debug('kernel configuration was skipped silently due to compile or runtime failure')
params.update({ tuning_options.objective: result })
store_cache(x_int, params, tuning_options)
results.append(params)
continue
# print and append to results
if not isinstance(result, dict):
params[tuning_options.objective] = result
else:
params.update(result)
if tuning_options.metrics:
params = process_metrics(params, tuning_options.metrics)
print_config_output(tuning_options.tune_params, params, self.quiet, tuning_options.metrics, self.units)
store_cache(x_int, params, tuning_options)
results.append(params)
return results, self.dev.get_environment()
def __exit__(self, *exc):
if hasattr(self, 'dev'):
self.dev.__exit__(*exc)
|
""" The default runner for sequentially tuning the parameter space """
from collections import OrderedDict
import logging
from time import perf_counter
from kernel_tuner.util import get_config_string, store_cache, process_metrics, print_config_output, ErrorConfig
from kernel_tuner.core import DeviceInterface
class SequentialRunner(object):
""" SequentialRunner is used for tuning with a single process/thread """
def __init__(self, kernel_source, kernel_options, device_options, iterations, observers):
""" Instantiate the SequentialRunner
:param kernel_source: The kernel source
:type kernel_source: kernel_tuner.core.KernelSource
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param iterations: The number of iterations used for benchmarking
each kernel instance.
:type iterations: int
"""
#detect language and create high-level device interface
self.dev = DeviceInterface(kernel_source, iterations=iterations, observers=observers, **device_options).__enter__()
self.units = self.dev.units
self.quiet = device_options.quiet
self.kernel_source = kernel_source
self.warmed_up = False
self.simulation_mode = False
self.last_strategy_start_time = perf_counter()
#move data to the GPU
self.gpu_args = self.dev.ready_argument_list(kernel_options.arguments)
def __enter__(self):
return self
def run(self, parameter_space, kernel_options, tuning_options):
""" Iterate through the entire parameter space using a single Python process
:param parameter_space: The parameter space as an iterable.
:type parameter_space: iterable
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.iterface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
logging.debug('sequential runner started for ' + kernel_options.kernel_name)
results = []
#iterate over parameter space
for element in parameter_space:
params = OrderedDict(zip(tuning_options.tune_params.keys(), element))
#attempt to warmup the GPU by running the first config in the parameter space and ignoring the result
if not self.warmed_up:
self.dev.compile_and_benchmark(self.kernel_source, self.gpu_args, params, kernel_options, tuning_options)
self.warmed_up = True
#check if element is in the cache
x_int = ",".join([str(i) for i in element])
if tuning_options.cache and x_int in tuning_options.cache:
results.append(tuning_options.cache[x_int])
continue
result = self.dev.compile_and_benchmark(self.kernel_source, self.gpu_args, params, kernel_options, tuning_options)
if self.dev.last_compilation_time is not None:
params['compile_time'] = self.dev.last_compilation_time
if self.dev.last_verification_time is not None:
params['verification_time'] = self.dev.last_verification_time
if isinstance(result, ErrorConfig):
logging.debug('kernel configuration was skipped silently due to compile or runtime failure')
params.update({ tuning_options.objective: result })
store_cache(x_int, params, tuning_options)
results.append(params)
continue
# print and append to results
if not isinstance(result, dict):
params[tuning_options.objective] = result
else:
params.update(result)
if tuning_options.metrics:
params = process_metrics(params, tuning_options.metrics)
print_config_output(tuning_options.tune_params, params, self.quiet, tuning_options.metrics, self.units)
store_cache(x_int, params, tuning_options)
results.append(params)
return results, self.dev.get_environment()
def __exit__(self, *exc):
if hasattr(self, 'dev'):
self.dev.__exit__(*exc)
|
en
| 0.694078
|
The default runner for sequentially tuning the parameter space SequentialRunner is used for tuning with a single process/thread Instantiate the SequentialRunner :param kernel_source: The kernel source :type kernel_source: kernel_tuner.core.KernelSource :param kernel_options: A dictionary with all options for the kernel. :type kernel_options: kernel_tuner.interface.Options :param device_options: A dictionary with all options for the device on which the kernel should be tuned. :type device_options: kernel_tuner.interface.Options :param iterations: The number of iterations used for benchmarking each kernel instance. :type iterations: int #detect language and create high-level device interface #move data to the GPU Iterate through the entire parameter space using a single Python process :param parameter_space: The parameter space as an iterable. :type parameter_space: iterable :param kernel_options: A dictionary with all options for the kernel. :type kernel_options: kernel_tuner.interface.Options :param tuning_options: A dictionary with all options regarding the tuning process. :type tuning_options: kernel_tuner.iterface.Options :returns: A list of dictionaries for executed kernel configurations and their execution times. And a dictionary that contains information about the hardware/software environment on which the tuning took place. :rtype: list(dict()), dict() #iterate over parameter space #attempt to warmup the GPU by running the first config in the parameter space and ignoring the result #check if element is in the cache # print and append to results
| 2.736756
| 3
|
src/sentry/api/endpoints/project_ownership.py
|
hieast/sentry
| 1
|
6628608
|
from __future__ import absolute_import
import six
from rest_framework import serializers
from rest_framework.response import Response
from django.utils import timezone
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.serializers import serialize
from sentry.models import ProjectOwnership, resolve_actors
from sentry.ownership.grammar import parse_rules, dump_schema, ParseError
class ProjectOwnershipSerializer(serializers.Serializer):
raw = serializers.CharField()
fallthrough = serializers.BooleanField()
def validate_raw(self, attrs, source):
if not attrs[source].strip():
return attrs
try:
rules = parse_rules(attrs[source])
except ParseError as e:
raise serializers.ValidationError(
u'Parse error: %r (line %d, column %d)' % (
e.expr.name, e.line(), e.column()
))
schema = dump_schema(rules)
owners = {o for rule in rules for o in rule.owners}
actors = resolve_actors(owners, self.context['ownership'].project_id)
bad_actors = []
for owner, actor in six.iteritems(actors):
if actor is None:
if owner.type == 'user':
bad_actors.append(owner.identifier)
elif owner.type == 'team':
bad_actors.append(u'#{}'.format(owner.identifier))
if bad_actors:
raise serializers.ValidationError(
u'Invalid rule owners: {}'.format(", ".join(bad_actors))
)
attrs['schema'] = schema
return attrs
def save(self):
ownership = self.context['ownership']
changed = False
if 'raw' in self.object:
raw = self.object['raw']
if not raw.strip():
raw = None
if ownership.raw != raw:
ownership.raw = raw
ownership.schema = self.object.get('schema')
changed = True
if 'fallthrough' in self.object:
fallthrough = self.object['fallthrough']
if ownership.fallthrough != fallthrough:
ownership.fallthrough = fallthrough
changed = True
if changed:
now = timezone.now()
if ownership.date_created is None:
ownership.date_created = now
ownership.last_updated = now
ownership.save()
return ownership
class ProjectOwnershipEndpoint(ProjectEndpoint):
def get_ownership(self, project):
try:
return ProjectOwnership.objects.get(project=project)
except ProjectOwnership.DoesNotExist:
return ProjectOwnership(
project=project,
date_created=None,
last_updated=None,
)
def get(self, request, project):
"""
Retrieve a Project's Ownership configuration
````````````````````````````````````````````
Return details on a project's ownership configuration.
:auth: required
"""
return Response(serialize(self.get_ownership(project), request.user))
def put(self, request, project):
"""
Update a Project's Ownership configuration
``````````````````````````````````````````
Updates a project's ownership configuration settings. Only the
attributes submitted are modified.
:param string raw: Raw input for ownership configuration.
:param boolean fallthrough: Indicate if there is no match on explicit rules,
to fall through and make everyone an implicit owner.
:auth: required
"""
serializer = ProjectOwnershipSerializer(
data=request.DATA,
partial=True,
context={'ownership': self.get_ownership(project)}
)
if serializer.is_valid():
ownership = serializer.save()
return Response(serialize(ownership, request.user))
return Response(serializer.errors, status=400)
|
from __future__ import absolute_import
import six
from rest_framework import serializers
from rest_framework.response import Response
from django.utils import timezone
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.serializers import serialize
from sentry.models import ProjectOwnership, resolve_actors
from sentry.ownership.grammar import parse_rules, dump_schema, ParseError
class ProjectOwnershipSerializer(serializers.Serializer):
raw = serializers.CharField()
fallthrough = serializers.BooleanField()
def validate_raw(self, attrs, source):
if not attrs[source].strip():
return attrs
try:
rules = parse_rules(attrs[source])
except ParseError as e:
raise serializers.ValidationError(
u'Parse error: %r (line %d, column %d)' % (
e.expr.name, e.line(), e.column()
))
schema = dump_schema(rules)
owners = {o for rule in rules for o in rule.owners}
actors = resolve_actors(owners, self.context['ownership'].project_id)
bad_actors = []
for owner, actor in six.iteritems(actors):
if actor is None:
if owner.type == 'user':
bad_actors.append(owner.identifier)
elif owner.type == 'team':
bad_actors.append(u'#{}'.format(owner.identifier))
if bad_actors:
raise serializers.ValidationError(
u'Invalid rule owners: {}'.format(", ".join(bad_actors))
)
attrs['schema'] = schema
return attrs
def save(self):
ownership = self.context['ownership']
changed = False
if 'raw' in self.object:
raw = self.object['raw']
if not raw.strip():
raw = None
if ownership.raw != raw:
ownership.raw = raw
ownership.schema = self.object.get('schema')
changed = True
if 'fallthrough' in self.object:
fallthrough = self.object['fallthrough']
if ownership.fallthrough != fallthrough:
ownership.fallthrough = fallthrough
changed = True
if changed:
now = timezone.now()
if ownership.date_created is None:
ownership.date_created = now
ownership.last_updated = now
ownership.save()
return ownership
class ProjectOwnershipEndpoint(ProjectEndpoint):
def get_ownership(self, project):
try:
return ProjectOwnership.objects.get(project=project)
except ProjectOwnership.DoesNotExist:
return ProjectOwnership(
project=project,
date_created=None,
last_updated=None,
)
def get(self, request, project):
"""
Retrieve a Project's Ownership configuration
````````````````````````````````````````````
Return details on a project's ownership configuration.
:auth: required
"""
return Response(serialize(self.get_ownership(project), request.user))
def put(self, request, project):
"""
Update a Project's Ownership configuration
``````````````````````````````````````````
Updates a project's ownership configuration settings. Only the
attributes submitted are modified.
:param string raw: Raw input for ownership configuration.
:param boolean fallthrough: Indicate if there is no match on explicit rules,
to fall through and make everyone an implicit owner.
:auth: required
"""
serializer = ProjectOwnershipSerializer(
data=request.DATA,
partial=True,
context={'ownership': self.get_ownership(project)}
)
if serializer.is_valid():
ownership = serializer.save()
return Response(serialize(ownership, request.user))
return Response(serializer.errors, status=400)
|
en
| 0.906033
|
Retrieve a Project's Ownership configuration ```````````````````````````````````````````` Return details on a project's ownership configuration. :auth: required Update a Project's Ownership configuration `````````````````````````````````````````` Updates a project's ownership configuration settings. Only the attributes submitted are modified. :param string raw: Raw input for ownership configuration. :param boolean fallthrough: Indicate if there is no match on explicit rules, to fall through and make everyone an implicit owner. :auth: required
| 2.072564
| 2
|
setup.py
|
snario/bakthat
| 144
|
6628609
|
<reponame>snario/bakthat
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="bakthat",
version="0.6.0",
author="<NAME>",
author_email="<EMAIL>",
description="Bakthat is a MIT licensed backup framework written in Python, it's both a command line tool and a Python module that helps you manage backups on Amazon S3/Glacier and OpenStack Swift. It automatically compress, encrypt (symmetric encryption) and upload your files.",
license="MIT",
keywords="aws s3 glacier backup restore archive",
url="http://docs.bakthat.io",
packages=find_packages(exclude=['ez_setup', 'tests', 'tests.*']),
long_description=read('README.rst'),
install_requires=["aaargh", "boto", "pycrypto", "beefish", "grandfatherson", "peewee", "byteformat", "pyyaml", "sh", "requests", "events"],
entry_points={'console_scripts': ["bakthat = bakthat:main"]},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: System :: Archiving :: Backup",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
],
zip_safe=False,
)
|
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="bakthat",
version="0.6.0",
author="<NAME>",
author_email="<EMAIL>",
description="Bakthat is a MIT licensed backup framework written in Python, it's both a command line tool and a Python module that helps you manage backups on Amazon S3/Glacier and OpenStack Swift. It automatically compress, encrypt (symmetric encryption) and upload your files.",
license="MIT",
keywords="aws s3 glacier backup restore archive",
url="http://docs.bakthat.io",
packages=find_packages(exclude=['ez_setup', 'tests', 'tests.*']),
long_description=read('README.rst'),
install_requires=["aaargh", "boto", "pycrypto", "beefish", "grandfatherson", "peewee", "byteformat", "pyyaml", "sh", "requests", "events"],
entry_points={'console_scripts': ["bakthat = bakthat:main"]},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: System :: Archiving :: Backup",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
],
zip_safe=False,
)
|
none
| 1
| 1.530257
| 2
|
|
selenium_automation/upcoming-events(python.org).py
|
amgad01/web-scraping-and-automation
| 1
|
6628610
|
import os
from selenium import webdriver
CHROME_DRIVER = os.environ.get('CHROME_DRIVER')
driver = webdriver.Chrome(executable_path=CHROME_DRIVER)
url = "https://www.python.org"
driver.get(url)
# locate the dates of upcoming events class using css selector
# get time element in the event-widget class
events_dates = driver.find_elements_by_css_selector(".event-widget time")
for date in events_dates:
print(date.text)
# get the texts which hold the upcoming events which are located as texts of the anchor tags that are in lists
# inside the event-widget class
events_names = driver.find_elements_by_css_selector(".event-widget li a")
# for name in events_names:
# print(name.text)
upcoming_events = dict(zip(events_dates, events_names))
for date, event in upcoming_events.items():
print(f"{date.text.split('-')[1]}-{date.text.split('-')[2]}: {event.text}")
driver.close()
|
import os
from selenium import webdriver
CHROME_DRIVER = os.environ.get('CHROME_DRIVER')
driver = webdriver.Chrome(executable_path=CHROME_DRIVER)
url = "https://www.python.org"
driver.get(url)
# locate the dates of upcoming events class using css selector
# get time element in the event-widget class
events_dates = driver.find_elements_by_css_selector(".event-widget time")
for date in events_dates:
print(date.text)
# get the texts which hold the upcoming events which are located as texts of the anchor tags that are in lists
# inside the event-widget class
events_names = driver.find_elements_by_css_selector(".event-widget li a")
# for name in events_names:
# print(name.text)
upcoming_events = dict(zip(events_dates, events_names))
for date, event in upcoming_events.items():
print(f"{date.text.split('-')[1]}-{date.text.split('-')[2]}: {event.text}")
driver.close()
|
en
| 0.925644
|
# locate the dates of upcoming events class using css selector # get time element in the event-widget class # get the texts which hold the upcoming events which are located as texts of the anchor tags that are in lists # inside the event-widget class # for name in events_names: # print(name.text)
| 3.424298
| 3
|
2dmodels/CrossConv.py
|
zenanz/ChemTables
| 4
|
6628611
|
<filename>2dmodels/CrossConv.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class CrossConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros'):
super(CrossConv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode)
self._mask = torch.ones(self.weight.size())
center_h = self.weight.size(2) // 2 # kernel_h // 2
center_w = self.weight.size(3) // 2 # kernel_w // 2
self._mask[:, :, :center_h-1, :center_w-1] = 0 # mask top left
self._mask[:, :, center_h+1:, :center_w-1] = 0 # mask bottom left
self._mask[:, :, :center_h-1, center_w+1:] = 0 # mask top right
self._mask[:, :, center_h+1:, center_w+1:] = 0 # mask bottom right
self._mask = nn.Parameter(self._mask, requires_grad=False)
def forward(self, inputs):
self.weight = nn.Parameter(self.weight * self._mask)
return F.conv2d(inputs, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
|
<filename>2dmodels/CrossConv.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class CrossConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros'):
super(CrossConv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode)
self._mask = torch.ones(self.weight.size())
center_h = self.weight.size(2) // 2 # kernel_h // 2
center_w = self.weight.size(3) // 2 # kernel_w // 2
self._mask[:, :, :center_h-1, :center_w-1] = 0 # mask top left
self._mask[:, :, center_h+1:, :center_w-1] = 0 # mask bottom left
self._mask[:, :, :center_h-1, center_w+1:] = 0 # mask top right
self._mask[:, :, center_h+1:, center_w+1:] = 0 # mask bottom right
self._mask = nn.Parameter(self._mask, requires_grad=False)
def forward(self, inputs):
self.weight = nn.Parameter(self.weight * self._mask)
return F.conv2d(inputs, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
|
uk
| 0.115302
|
# kernel_h // 2 # kernel_w // 2 # mask top left # mask bottom left # mask top right # mask bottom right
| 2.718116
| 3
|
src/sims4communitylib/classes/math/common_location.py
|
velocist/TS4CheatsInfo
| 0
|
6628612
|
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Any, Union
from sims4.math import Location
from sims4communitylib.classes.math.common_surface_identifier import CommonSurfaceIdentifier
from sims4communitylib.classes.math.common_transform import CommonTransform
class CommonLocation:
""" A class that contains locational data. """
def __init__(self, transform: CommonTransform, routing_surface: CommonSurfaceIdentifier, parent_ref: Any=None, joint_name_or_hash: Any=None, slot_hash: int=0):
self._transform = transform
self._routing_surface = routing_surface
self._parent_ref = parent_ref
self._joint_name_or_hash = joint_name_or_hash
self._slot_hash = slot_hash
@property
def transform(self) -> CommonTransform:
""" The translation and orientation of the location.
:return: The translation and orientation of the location.
:rtype: CommonTransform
"""
return self._transform
@property
def routing_surface(self) -> CommonSurfaceIdentifier:
""" The routing surface the location is located on.
:return: The routing surface the location is located on.
:rtype: CommonSurfaceIdentifier
"""
return self._routing_surface
@property
def parent_ref(self) -> Any:
""" The parent reference of the location.
:return: The parent reference of the location.
:rtype: Any
"""
return self._parent_ref
@property
def joint_name_or_hash(self) -> Union[str, int]:
""" The name or hash identifier of the joint the location is located at.
:return: The name or hash identifier of the joint the location is located at.
:rtype: Union[str, int]
"""
return self._joint_name_or_hash
@property
def slot_hash(self) -> int:
""" The hash identifier of the Slot the location is located at.
:return: The hash identifier of the Slot the location is located at.
:rtype: int
"""
return self._slot_hash
def __new__(cls, transform: CommonTransform, routing_surface: CommonSurfaceIdentifier, parent_ref: Any=None, joint_name_or_hash: Any=None, slot_hash: int=0) -> 'CommonLocation':
# noinspection PyTypeChecker, PyArgumentList
return Location(transform, routing_surface, parent_ref, joint_name_or_hash, slot_hash)
@staticmethod
def empty() -> 'CommonLocation':
"""empty()
Create an empty location.
:return: An empty location.
:rtype: CommonLocation
"""
return CommonLocation(CommonTransform.empty(), CommonSurfaceIdentifier.empty())
@staticmethod
def from_location(location: Union[Location, 'CommonLocation']) -> Union['CommonLocation', None]:
"""from_location(location)
Convert a vanilla Location object into a CommonLocation.
:param location: An instance of a Location.
:type location: Union[Location, CommonLocation]
:return: An instance of a CommonLocation or None if the object failed to convert.
:rtype: Union[CommonLocation, None]
"""
if location is None:
return None
if isinstance(location, CommonLocation):
return location
if not isinstance(location, Location):
raise Exception('Failed to convert {} with type {} was not of type {}.'.format(location, type(location), type(Location)))
routing_surface = location.routing_surface if location.routing_surface is not None else CommonSurfaceIdentifier.empty(
secondary_id=location.level)
return CommonLocation(CommonTransform.from_transform(location.transform), CommonSurfaceIdentifier.from_surface_identifier(routing_surface), parent_ref=location.parent_ref, joint_name_or_hash=location.joint_name_or_hash, slot_hash=location.slot_hash)
|
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Any, Union
from sims4.math import Location
from sims4communitylib.classes.math.common_surface_identifier import CommonSurfaceIdentifier
from sims4communitylib.classes.math.common_transform import CommonTransform
class CommonLocation:
""" A class that contains locational data. """
def __init__(self, transform: CommonTransform, routing_surface: CommonSurfaceIdentifier, parent_ref: Any=None, joint_name_or_hash: Any=None, slot_hash: int=0):
self._transform = transform
self._routing_surface = routing_surface
self._parent_ref = parent_ref
self._joint_name_or_hash = joint_name_or_hash
self._slot_hash = slot_hash
@property
def transform(self) -> CommonTransform:
""" The translation and orientation of the location.
:return: The translation and orientation of the location.
:rtype: CommonTransform
"""
return self._transform
@property
def routing_surface(self) -> CommonSurfaceIdentifier:
""" The routing surface the location is located on.
:return: The routing surface the location is located on.
:rtype: CommonSurfaceIdentifier
"""
return self._routing_surface
@property
def parent_ref(self) -> Any:
""" The parent reference of the location.
:return: The parent reference of the location.
:rtype: Any
"""
return self._parent_ref
@property
def joint_name_or_hash(self) -> Union[str, int]:
""" The name or hash identifier of the joint the location is located at.
:return: The name or hash identifier of the joint the location is located at.
:rtype: Union[str, int]
"""
return self._joint_name_or_hash
@property
def slot_hash(self) -> int:
""" The hash identifier of the Slot the location is located at.
:return: The hash identifier of the Slot the location is located at.
:rtype: int
"""
return self._slot_hash
def __new__(cls, transform: CommonTransform, routing_surface: CommonSurfaceIdentifier, parent_ref: Any=None, joint_name_or_hash: Any=None, slot_hash: int=0) -> 'CommonLocation':
# noinspection PyTypeChecker, PyArgumentList
return Location(transform, routing_surface, parent_ref, joint_name_or_hash, slot_hash)
@staticmethod
def empty() -> 'CommonLocation':
"""empty()
Create an empty location.
:return: An empty location.
:rtype: CommonLocation
"""
return CommonLocation(CommonTransform.empty(), CommonSurfaceIdentifier.empty())
@staticmethod
def from_location(location: Union[Location, 'CommonLocation']) -> Union['CommonLocation', None]:
"""from_location(location)
Convert a vanilla Location object into a CommonLocation.
:param location: An instance of a Location.
:type location: Union[Location, CommonLocation]
:return: An instance of a CommonLocation or None if the object failed to convert.
:rtype: Union[CommonLocation, None]
"""
if location is None:
return None
if isinstance(location, CommonLocation):
return location
if not isinstance(location, Location):
raise Exception('Failed to convert {} with type {} was not of type {}.'.format(location, type(location), type(Location)))
routing_surface = location.routing_surface if location.routing_surface is not None else CommonSurfaceIdentifier.empty(
secondary_id=location.level)
return CommonLocation(CommonTransform.from_transform(location.transform), CommonSurfaceIdentifier.from_surface_identifier(routing_surface), parent_ref=location.parent_ref, joint_name_or_hash=location.joint_name_or_hash, slot_hash=location.slot_hash)
|
en
| 0.85574
|
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0). https://creativecommons.org/licenses/by/4.0/ https://creativecommons.org/licenses/by/4.0/legalcode Copyright (c) COLONOLNUTTY A class that contains locational data. The translation and orientation of the location. :return: The translation and orientation of the location. :rtype: CommonTransform The routing surface the location is located on. :return: The routing surface the location is located on. :rtype: CommonSurfaceIdentifier The parent reference of the location. :return: The parent reference of the location. :rtype: Any The name or hash identifier of the joint the location is located at. :return: The name or hash identifier of the joint the location is located at. :rtype: Union[str, int] The hash identifier of the Slot the location is located at. :return: The hash identifier of the Slot the location is located at. :rtype: int # noinspection PyTypeChecker, PyArgumentList empty() Create an empty location. :return: An empty location. :rtype: CommonLocation from_location(location) Convert a vanilla Location object into a CommonLocation. :param location: An instance of a Location. :type location: Union[Location, CommonLocation] :return: An instance of a CommonLocation or None if the object failed to convert. :rtype: Union[CommonLocation, None]
| 2.060099
| 2
|
webcrawler.py
|
EthanC2/broken-link-finder
| 0
|
6628613
|
# Native Modules
import requests # For making HTTP requests
# External Modules
from bs4 import BeautifulSoup # BeautifulSoup is a webscraping module
# Classes
import cmd_args # Global commandline arguments
from link import Link # 'Link' class
from http_request_wrapper import HTTPRequestWrapper # Wrapper for HTTP errors (fatal and non)
## Webcrawler class ##
class Webcrawler:
def __init__(self):
self.websites = list()
# Parse a website for HTML
@HTTPRequestWrapper.nonfatal_errors
def scrape(self, url, timeout):
# Return the raw HTML of the website as text
response = requests.get(url, timeout=cmd_args.TIMEOUT, allow_redirects=True)
return response.text
# Reconstruct the URL of the website from the relative link and the URL
def reconstruct_url(self, url, website):
if not url.startswith('#') and not url.startswith("javascript") and not url.startswith("mailto"): # Filter out internal urls, javascript references, and mail servers
if url.startswith('/'): # Relative URLs
return website.url + url
else:
return url # Standard URL
# Find all links on a website
def find_links(self, raw_html, website):
soup = BeautifulSoup(raw_html, "html.parser")
# Get all the <a> (hyperlink) tags
a_tags = soup.find_all('a', href=True) # Parse the raw HTML for <a> tags with an 'href' (URL) element
print(f"Total of {len(a_tags)} <a> tags found\n") # Total amount of <a> tags (but not necessarily valid URLs) found
# Sort through all the URLs
for a_tag in a_tags:
if url := self.reconstruct_url(a_tag['href'], website): # Reconstruct the URL; if not an internal link, JS ref, or mail server, keep it
website.links.append( Link(a_tag.text, url) ) # Append a new 'Link' object (linked text, url, and status code)
@HTTPRequestWrapper.nonfatal_errors
def validate_links(self, links):
total = len(links)
for count, link in enumerate(links):
print(f"Progress: {count} / {total} valid links...{'Done!' if count == total else ''}", end='\r') # A simple progress bar
response = requests.head(link.url, timeout=cmd_args.TIMEOUT, allow_redirects=True) # Send simple HEAD request
link.status_code = response.status_code
# Parse and find links of a website
def scrape_for_links(self, website):
self.websites.append(website) # Add the website to the list of websites
raw_html = self.scrape(website.url, cmd_args.TIMEOUT) # Scrape the website for the HTML
self.find_links(raw_html, website) # Parse the HTML for <a> (link) tags
self.validate_links(website.links) # Validate all the links found
# Print all links
def print_links(self):
for website in self.websites:
website.links.sort( key = lambda link: link.status_code ) # Sort the links (by status code)
print(f"A total of {len(website.links)} links found on {website.url}")
print(f"============================{'=' * len(website.url)}")
website.print_links() # Print all the links
# End of Webcrawler Class
|
# Native Modules
import requests # For making HTTP requests
# External Modules
from bs4 import BeautifulSoup # BeautifulSoup is a webscraping module
# Classes
import cmd_args # Global commandline arguments
from link import Link # 'Link' class
from http_request_wrapper import HTTPRequestWrapper # Wrapper for HTTP errors (fatal and non)
## Webcrawler class ##
class Webcrawler:
def __init__(self):
self.websites = list()
# Parse a website for HTML
@HTTPRequestWrapper.nonfatal_errors
def scrape(self, url, timeout):
# Return the raw HTML of the website as text
response = requests.get(url, timeout=cmd_args.TIMEOUT, allow_redirects=True)
return response.text
# Reconstruct the URL of the website from the relative link and the URL
def reconstruct_url(self, url, website):
if not url.startswith('#') and not url.startswith("javascript") and not url.startswith("mailto"): # Filter out internal urls, javascript references, and mail servers
if url.startswith('/'): # Relative URLs
return website.url + url
else:
return url # Standard URL
# Find all links on a website
def find_links(self, raw_html, website):
soup = BeautifulSoup(raw_html, "html.parser")
# Get all the <a> (hyperlink) tags
a_tags = soup.find_all('a', href=True) # Parse the raw HTML for <a> tags with an 'href' (URL) element
print(f"Total of {len(a_tags)} <a> tags found\n") # Total amount of <a> tags (but not necessarily valid URLs) found
# Sort through all the URLs
for a_tag in a_tags:
if url := self.reconstruct_url(a_tag['href'], website): # Reconstruct the URL; if not an internal link, JS ref, or mail server, keep it
website.links.append( Link(a_tag.text, url) ) # Append a new 'Link' object (linked text, url, and status code)
@HTTPRequestWrapper.nonfatal_errors
def validate_links(self, links):
total = len(links)
for count, link in enumerate(links):
print(f"Progress: {count} / {total} valid links...{'Done!' if count == total else ''}", end='\r') # A simple progress bar
response = requests.head(link.url, timeout=cmd_args.TIMEOUT, allow_redirects=True) # Send simple HEAD request
link.status_code = response.status_code
# Parse and find links of a website
def scrape_for_links(self, website):
self.websites.append(website) # Add the website to the list of websites
raw_html = self.scrape(website.url, cmd_args.TIMEOUT) # Scrape the website for the HTML
self.find_links(raw_html, website) # Parse the HTML for <a> (link) tags
self.validate_links(website.links) # Validate all the links found
# Print all links
def print_links(self):
for website in self.websites:
website.links.sort( key = lambda link: link.status_code ) # Sort the links (by status code)
print(f"A total of {len(website.links)} links found on {website.url}")
print(f"============================{'=' * len(website.url)}")
website.print_links() # Print all the links
# End of Webcrawler Class
|
en
| 0.617606
|
# Native Modules # For making HTTP requests # External Modules # BeautifulSoup is a webscraping module # Classes # Global commandline arguments # 'Link' class # Wrapper for HTTP errors (fatal and non) ## Webcrawler class ## # Parse a website for HTML # Return the raw HTML of the website as text # Reconstruct the URL of the website from the relative link and the URL # Filter out internal urls, javascript references, and mail servers # Relative URLs # Standard URL # Find all links on a website # Get all the <a> (hyperlink) tags # Parse the raw HTML for <a> tags with an 'href' (URL) element # Total amount of <a> tags (but not necessarily valid URLs) found # Sort through all the URLs # Reconstruct the URL; if not an internal link, JS ref, or mail server, keep it # Append a new 'Link' object (linked text, url, and status code) # A simple progress bar # Send simple HEAD request # Parse and find links of a website # Add the website to the list of websites # Scrape the website for the HTML # Parse the HTML for <a> (link) tags # Validate all the links found # Print all links # Sort the links (by status code) # Print all the links # End of Webcrawler Class
| 3.52099
| 4
|
AA/heap_algorithm.py
|
zzvsjs1/MyPyScripts
| 0
|
6628614
|
def __parent(index: int) -> int:
return (index + 1) * 2
def make_heap(ll: list):
pass
|
def __parent(index: int) -> int:
return (index + 1) * 2
def make_heap(ll: list):
pass
|
none
| 1
| 2.310146
| 2
|
|
git_stacktrace/cmd.py
|
ryan953/git-stacktrace
| 0
|
6628615
|
from __future__ import print_function
import argparse
import logging
import os
import select
import sys
import git_stacktrace
from git_stacktrace import api
def main():
usage = "git stacktrace [<options>] [<RANGE>] < stacktrace from stdin"
description = "Lookup commits related to a given stacktrace."
parser = argparse.ArgumentParser(usage=usage, description=description)
range_group = parser.add_mutually_exclusive_group()
range_group.add_argument('--since', metavar="<date1>", help='show commits '
'more recent than a specific date (from git-log)')
range_group.add_argument('range', nargs='?', help='git commit range to use')
parser.add_argument('-f', '--fast', action="store_true", help='Speed things up by not running '
'pickaxe if the file for a line of code cannot be found')
parser.add_argument('-b', '--branch', nargs='?', help='Git branch. If using --since, use this to '
'specify which branch to run since on. Runs on current branch by default')
parser.add_argument('--version', action="version",
version='%s version %s' % (os.path.split(sys.argv[0])[-1], git_stacktrace.__version__))
parser.add_argument('-d', '--debug', action='store_true', help='Enable debug logging')
args = parser.parse_args()
logging.basicConfig(format='%(name)s:%(funcName)s:%(lineno)s: %(message)s')
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
if args.since:
git_range = api.convert_since(args.since, branch=args.branch)
print("commit range: %s" % git_range, file=sys.stderr)
else:
if args.range is None:
print("Error: Missing range and since, must use one\n")
parser.print_help()
sys.exit(1)
git_range = args.range
if not api.valid_range(git_range):
print("Found no commits in '%s'" % git_range)
sys.exit(1)
if not select.select([sys.stdin], [], [], 0.0)[0]:
raise Exception("No input found in stdin")
blob = sys.stdin.readlines()
traceback = api.parse_trace(blob)
print(traceback)
results = api.lookup_stacktrace(traceback, git_range, fast=args.fast)
for r in results.get_sorted_results():
print("")
print(r)
if len(results.get_sorted_results()) == 0:
print("No matches found")
if __name__ == "__main__":
main()
|
from __future__ import print_function
import argparse
import logging
import os
import select
import sys
import git_stacktrace
from git_stacktrace import api
def main():
usage = "git stacktrace [<options>] [<RANGE>] < stacktrace from stdin"
description = "Lookup commits related to a given stacktrace."
parser = argparse.ArgumentParser(usage=usage, description=description)
range_group = parser.add_mutually_exclusive_group()
range_group.add_argument('--since', metavar="<date1>", help='show commits '
'more recent than a specific date (from git-log)')
range_group.add_argument('range', nargs='?', help='git commit range to use')
parser.add_argument('-f', '--fast', action="store_true", help='Speed things up by not running '
'pickaxe if the file for a line of code cannot be found')
parser.add_argument('-b', '--branch', nargs='?', help='Git branch. If using --since, use this to '
'specify which branch to run since on. Runs on current branch by default')
parser.add_argument('--version', action="version",
version='%s version %s' % (os.path.split(sys.argv[0])[-1], git_stacktrace.__version__))
parser.add_argument('-d', '--debug', action='store_true', help='Enable debug logging')
args = parser.parse_args()
logging.basicConfig(format='%(name)s:%(funcName)s:%(lineno)s: %(message)s')
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
if args.since:
git_range = api.convert_since(args.since, branch=args.branch)
print("commit range: %s" % git_range, file=sys.stderr)
else:
if args.range is None:
print("Error: Missing range and since, must use one\n")
parser.print_help()
sys.exit(1)
git_range = args.range
if not api.valid_range(git_range):
print("Found no commits in '%s'" % git_range)
sys.exit(1)
if not select.select([sys.stdin], [], [], 0.0)[0]:
raise Exception("No input found in stdin")
blob = sys.stdin.readlines()
traceback = api.parse_trace(blob)
print(traceback)
results = api.lookup_stacktrace(traceback, git_range, fast=args.fast)
for r in results.get_sorted_results():
print("")
print(r)
if len(results.get_sorted_results()) == 0:
print("No matches found")
if __name__ == "__main__":
main()
|
none
| 1
| 2.578758
| 3
|
|
tests/test_pex_builder.py
|
pantsbuild/pex
| 2,160
|
6628616
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import filecmp
import os
import stat
import subprocess
import zipfile
import pytest
from pex.common import open_zip, safe_open, temporary_dir, touch
from pex.compatibility import WINDOWS
from pex.executor import Executor
from pex.layout import Layout
from pex.pex import PEX
from pex.pex_builder import CopyMode, PEXBuilder
from pex.testing import built_wheel, make_bdist
from pex.testing import write_simple_pex as write_pex
from pex.typing import TYPE_CHECKING
from pex.variables import ENV
if TYPE_CHECKING:
from typing import Any, Iterator, List, Set
exe_main = """
import sys
from p1.my_module import do_something
do_something()
with open(sys.argv[1], 'w') as fp:
fp.write('success')
"""
wheeldeps_exe_main = """
import sys
from pyparsing import *
from p1.my_module import do_something
do_something()
with open(sys.argv[1], 'w') as fp:
fp.write('success')
"""
def test_pex_builder():
# type: () -> None
# test w/ and w/o zipfile dists
with temporary_dir() as td, make_bdist("p1") as p1:
pb = write_pex(td, exe_main, dists=[p1])
success_txt = os.path.join(td, "success.txt")
PEX(td, interpreter=pb.interpreter).run(args=[success_txt])
assert os.path.exists(success_txt)
with open(success_txt) as fp:
assert fp.read() == "success"
# test w/ and w/o zipfile dists
with temporary_dir() as td1, temporary_dir() as td2, make_bdist("p1") as p1:
pb = write_pex(td1, exe_main, dists=[p1])
success_txt = os.path.join(td1, "success.txt")
PEX(td1, interpreter=pb.interpreter).run(args=[success_txt])
assert os.path.exists(success_txt)
with open(success_txt) as fp:
assert fp.read() == "success"
def test_pex_builder_wheeldep():
# type: () -> None
"""Repeat the pex_builder test, but this time include an import of something from a wheel that
doesn't come in importable form."""
with temporary_dir() as td, make_bdist("p1") as p1:
pyparsing_path = "./tests/example_packages/pyparsing-2.1.10-py2.py3-none-any.whl"
pb = write_pex(td, wheeldeps_exe_main, dists=[p1, pyparsing_path])
success_txt = os.path.join(td, "success.txt")
PEX(td, interpreter=pb.interpreter).run(args=[success_txt])
assert os.path.exists(success_txt)
with open(success_txt) as fp:
assert fp.read() == "success"
def test_pex_builder_shebang():
# type: () -> None
def builder(shebang):
# type: (str) -> PEXBuilder
pb = PEXBuilder()
pb.set_shebang(shebang)
return pb
for pb in builder("foobar"), builder("#!foobar"):
for b in pb, pb.clone():
with temporary_dir() as td:
target = os.path.join(td, "foo.pex")
b.build(target)
expected_preamble = b"#!foobar\n"
with open(target, "rb") as fp:
assert fp.read(len(expected_preamble)) == expected_preamble
def test_pex_builder_preamble():
# type: () -> None
with temporary_dir() as td:
target = os.path.join(td, "foo.pex")
should_create = os.path.join(td, "foo.1")
tempfile_preamble = "\n".join(
["import sys", "open('{0}', 'w').close()".format(should_create), "sys.exit(3)"]
)
pb = PEXBuilder(preamble=tempfile_preamble)
pb.build(target)
assert not os.path.exists(should_create)
pex = PEX(target, interpreter=pb.interpreter)
process = pex.run(blocking=False)
process.wait()
assert process.returncode == 3
assert os.path.exists(should_create)
def test_pex_builder_compilation():
# type: () -> None
with temporary_dir() as td1, temporary_dir() as td2, temporary_dir() as td3:
src = os.path.join(td1, "src.py")
with open(src, "w") as fp:
fp.write(exe_main)
exe = os.path.join(td1, "exe.py")
with open(exe, "w") as fp:
fp.write(exe_main)
def build_and_check(path, precompile):
# type: (str, bool) -> None
pb = PEXBuilder(path=path)
pb.add_source(src, "lib/src.py")
pb.set_executable(exe, "exe.py")
pb.freeze(bytecode_compile=precompile)
for pyc_file in ("exe.pyc", "lib/src.pyc", "__main__.pyc"):
pyc_exists = os.path.exists(os.path.join(path, pyc_file))
if precompile:
assert pyc_exists
else:
assert not pyc_exists
bootstrap_dir = os.path.join(path, pb.info.bootstrap)
bootstrap_pycs = [] # type: List[str]
for _, _, files in os.walk(bootstrap_dir):
bootstrap_pycs.extend(f for f in files if f.endswith(".pyc"))
if precompile:
assert len(bootstrap_pycs) > 0
else:
assert 0 == len(bootstrap_pycs)
build_and_check(td2, False)
build_and_check(td3, True)
@pytest.mark.skipif(WINDOWS, reason="No hardlinks on windows")
def test_pex_builder_copy_or_link():
# type: () -> None
with temporary_dir() as td:
src = os.path.join(td, "exe.py")
with safe_open(src, "w") as fp:
fp.write(exe_main)
def build_and_check(copy_mode):
# type: (CopyMode.Value) -> None
pb = PEXBuilder(copy_mode=copy_mode)
path = pb.path()
pb.add_source(src, "exe.py")
path_clone = os.path.join(path, "__clone")
pb.clone(into=path_clone)
for root in path, path_clone:
s1 = os.stat(src)
s2 = os.stat(os.path.join(root, "exe.py"))
is_link = (s1[stat.ST_INO], s1[stat.ST_DEV]) == (s2[stat.ST_INO], s2[stat.ST_DEV])
if copy_mode == CopyMode.COPY:
assert not is_link
else:
# Since os.stat follows symlinks; so in CopyMode.SYMLINK, this just proves
# the symlink points to the original file. Going further and checking path
# and path_clone for the presence of a symlink (an os.islink test) is
# trickier since a Linux hardlink of a symlink produces a symlink whereas a
# macOS hardlink of a symlink produces a hardlink.
assert is_link
build_and_check(CopyMode.LINK)
build_and_check(CopyMode.COPY)
build_and_check(CopyMode.SYMLINK)
@pytest.fixture
def tmp_chroot(tmpdir):
# type: (Any) -> Iterator[str]
tmp_chroot = str(tmpdir)
cwd = os.getcwd()
try:
os.chdir(tmp_chroot)
yield tmp_chroot
finally:
os.chdir(cwd)
@pytest.mark.parametrize(
"copy_mode", [pytest.param(copy_mode, id=copy_mode.value) for copy_mode in CopyMode.values()]
)
def test_pex_builder_add_source_relpath_issues_1192(
tmp_chroot, # type: str
copy_mode, # type: CopyMode.Value
):
# type: (...) -> None
pb = PEXBuilder(copy_mode=copy_mode)
with safe_open("src/main.py", "w") as fp:
fp.write("import sys; sys.exit(42)")
pb.add_source("src/main.py", "main.py")
pb.set_entry_point("main")
pb.build("test.pex")
process = Executor.open_process(cmd=[os.path.abspath("test.pex")])
process.wait()
assert 42 == process.returncode
def test_pex_builder_deterministic_timestamp():
# type: () -> None
pb = PEXBuilder()
with temporary_dir() as td:
target = os.path.join(td, "foo.pex")
pb.build(target, deterministic_timestamp=True)
with zipfile.ZipFile(target) as zf:
assert all(zinfo.date_time == (1980, 1, 1, 0, 0, 0) for zinfo in zf.infolist())
def test_pex_builder_from_requirements_pex():
# type: () -> None
def build_from_req_pex(path, req_pex):
# type: (str, str) -> PEXBuilder
pb = PEXBuilder(path=path)
pb.add_from_requirements_pex(req_pex)
with open(os.path.join(path, "exe.py"), "w") as fp:
fp.write(exe_main)
pb.set_executable(os.path.join(path, "exe.py"))
pb.freeze()
return pb
def verify(pb):
# type: (PEXBuilder) -> None
success_txt = os.path.join(pb.path(), "success.txt")
PEX(pb.path(), interpreter=pb.interpreter).run(args=[success_txt])
assert os.path.exists(success_txt)
with open(success_txt) as fp:
assert fp.read() == "success"
# Build from pex dir.
with temporary_dir() as td2:
with temporary_dir() as td1, make_bdist("p1") as p1:
pb1 = write_pex(td1, dists=[p1])
pb2 = build_from_req_pex(td2, pb1.path())
verify(pb2)
# Build from .pex file.
with temporary_dir() as td4:
with temporary_dir() as td3, make_bdist("p1") as p1:
pb3 = write_pex(td3, dists=[p1])
target = os.path.join(td3, "foo.pex")
pb3.build(target)
pb4 = build_from_req_pex(td4, target)
verify(pb4)
def test_pex_builder_script_from_pex_path(tmpdir):
# type: (Any) -> None
pex_with_script = os.path.join(str(tmpdir), "script.pex")
with built_wheel(
name="my_project",
entry_points={"console_scripts": ["my_app = my_project.my_module:do_something"]},
) as my_whl:
pb = PEXBuilder()
pb.add_dist_location(my_whl)
pb.build(pex_with_script)
pex_file = os.path.join(str(tmpdir), "app.pex")
pb = PEXBuilder()
pb.info.pex_path = pex_with_script
pb.set_script("my_app")
pb.build(pex_file)
assert "hello world!\n" == subprocess.check_output(args=[pex_file]).decode("utf-8")
def test_pex_builder_setuptools_script(tmpdir):
# type: (Any) -> None
pex_file = os.path.join(str(tmpdir), "app.pex")
with built_wheel(
name="my_project",
) as my_whl:
pb = PEXBuilder()
pb.add_dist_location(my_whl)
pb.set_script("shell_script")
pb.build(pex_file)
assert "hello world from shell script\n" == subprocess.check_output(args=[pex_file]).decode(
"utf-8"
)
def test_pex_builder_packed(tmpdir):
# type: (Any) -> None
pex_root = os.path.join(str(tmpdir), "pex_root")
pex_app = os.path.join(str(tmpdir), "app.pex")
source_file = os.path.join(str(tmpdir), "src")
touch(source_file)
with ENV.patch(PEX_ROOT=pex_root), built_wheel(name="my_project") as my_whl:
pb = PEXBuilder(copy_mode=CopyMode.SYMLINK)
pb.add_source(source_file, "a.file")
pb.add_dist_location(my_whl)
pb.set_script("shell_script")
pb.build(pex_app, layout=Layout.PACKED)
assert "hello world from shell script\n" == subprocess.check_output(
args=[os.path.join(pex_app, "__main__.py")]
).decode("utf-8")
spread_dist_bootstrap = os.path.join(pex_app, pb.info.bootstrap)
assert zipfile.is_zipfile(spread_dist_bootstrap)
cached_bootstrap_zip = os.path.join(
pex_root, "bootstrap_zips", pb.info.bootstrap_hash, pb.info.bootstrap
)
assert zipfile.is_zipfile(cached_bootstrap_zip)
assert filecmp.cmp(spread_dist_bootstrap, cached_bootstrap_zip, shallow=False)
assert os.path.isfile(os.path.join(pex_app, "a.file"))
for root, dirs, files in os.walk(pex_app, followlinks=False):
for f in files:
path = os.path.join(root, f)
assert not os.path.islink(path) or pex_app == os.path.commonprefix(
[pex_app, os.path.realpath(path)]
), (
"All packed layout files should be real files inside the packed layout root that "
"are divorced from either the PEXBuilder chroot or PEX_ROOT caches."
)
assert 1 == len(pb.info.distributions)
location, sha = next(iter(pb.info.distributions.items()))
spread_dist_zip = os.path.join(pex_app, pb.info.internal_cache, location)
assert zipfile.is_zipfile(spread_dist_zip)
cached_dist_zip = os.path.join(pex_root, "installed_wheel_zips", sha, location)
assert zipfile.is_zipfile(cached_dist_zip)
assert filecmp.cmp(spread_dist_zip, cached_dist_zip, shallow=False)
@pytest.mark.parametrize(
"copy_mode", [pytest.param(copy_mode, id=copy_mode.value) for copy_mode in CopyMode.values()]
)
@pytest.mark.parametrize(
"layout", [pytest.param(layout, id=layout.value) for layout in Layout.values()]
)
def test_pex_builder_exclude_bootstrap_testing(
tmpdir, # type: Any
copy_mode, # type: CopyMode.Value
layout, # type: Layout.Value
):
# type: (...) -> None
pex_path = os.path.join(str(tmpdir), "empty.pex")
pb = PEXBuilder(copy_mode=copy_mode)
pb.build(pex_path, layout=layout)
bootstrap_location = os.path.join(pex_path, pb.info.bootstrap)
bootstrap_files = set() # type: Set[str]
if Layout.ZIPAPP == layout:
with open_zip(pex_path) as zf:
bootstrap_files.update(
os.path.relpath(f, pb.info.bootstrap)
for f in zf.namelist()
if f.startswith(pb.info.bootstrap)
)
elif Layout.PACKED == layout:
with open_zip(bootstrap_location) as zf:
bootstrap_files.update(zf.namelist())
else:
bootstrap_files.update(
os.path.relpath(os.path.join(root, f), bootstrap_location)
for root, _, files in os.walk(bootstrap_location)
for f in files
)
assert {"pex/pex_bootstrapper.py", "pex/pex_info.py", "pex/pex.py"}.issubset(
bootstrap_files
), "Expected the `.bootstrap` to contain at least some of the key Pex runtime modules."
assert not [
f for f in bootstrap_files if f.endswith(("testing.py", "testing.pyc"))
], "Expected testing support files to be stripped from the Pex `.bootstrap`."
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import filecmp
import os
import stat
import subprocess
import zipfile
import pytest
from pex.common import open_zip, safe_open, temporary_dir, touch
from pex.compatibility import WINDOWS
from pex.executor import Executor
from pex.layout import Layout
from pex.pex import PEX
from pex.pex_builder import CopyMode, PEXBuilder
from pex.testing import built_wheel, make_bdist
from pex.testing import write_simple_pex as write_pex
from pex.typing import TYPE_CHECKING
from pex.variables import ENV
if TYPE_CHECKING:
from typing import Any, Iterator, List, Set
exe_main = """
import sys
from p1.my_module import do_something
do_something()
with open(sys.argv[1], 'w') as fp:
fp.write('success')
"""
wheeldeps_exe_main = """
import sys
from pyparsing import *
from p1.my_module import do_something
do_something()
with open(sys.argv[1], 'w') as fp:
fp.write('success')
"""
def test_pex_builder():
# type: () -> None
# test w/ and w/o zipfile dists
with temporary_dir() as td, make_bdist("p1") as p1:
pb = write_pex(td, exe_main, dists=[p1])
success_txt = os.path.join(td, "success.txt")
PEX(td, interpreter=pb.interpreter).run(args=[success_txt])
assert os.path.exists(success_txt)
with open(success_txt) as fp:
assert fp.read() == "success"
# test w/ and w/o zipfile dists
with temporary_dir() as td1, temporary_dir() as td2, make_bdist("p1") as p1:
pb = write_pex(td1, exe_main, dists=[p1])
success_txt = os.path.join(td1, "success.txt")
PEX(td1, interpreter=pb.interpreter).run(args=[success_txt])
assert os.path.exists(success_txt)
with open(success_txt) as fp:
assert fp.read() == "success"
def test_pex_builder_wheeldep():
# type: () -> None
"""Repeat the pex_builder test, but this time include an import of something from a wheel that
doesn't come in importable form."""
with temporary_dir() as td, make_bdist("p1") as p1:
pyparsing_path = "./tests/example_packages/pyparsing-2.1.10-py2.py3-none-any.whl"
pb = write_pex(td, wheeldeps_exe_main, dists=[p1, pyparsing_path])
success_txt = os.path.join(td, "success.txt")
PEX(td, interpreter=pb.interpreter).run(args=[success_txt])
assert os.path.exists(success_txt)
with open(success_txt) as fp:
assert fp.read() == "success"
def test_pex_builder_shebang():
# type: () -> None
def builder(shebang):
# type: (str) -> PEXBuilder
pb = PEXBuilder()
pb.set_shebang(shebang)
return pb
for pb in builder("foobar"), builder("#!foobar"):
for b in pb, pb.clone():
with temporary_dir() as td:
target = os.path.join(td, "foo.pex")
b.build(target)
expected_preamble = b"#!foobar\n"
with open(target, "rb") as fp:
assert fp.read(len(expected_preamble)) == expected_preamble
def test_pex_builder_preamble():
# type: () -> None
with temporary_dir() as td:
target = os.path.join(td, "foo.pex")
should_create = os.path.join(td, "foo.1")
tempfile_preamble = "\n".join(
["import sys", "open('{0}', 'w').close()".format(should_create), "sys.exit(3)"]
)
pb = PEXBuilder(preamble=tempfile_preamble)
pb.build(target)
assert not os.path.exists(should_create)
pex = PEX(target, interpreter=pb.interpreter)
process = pex.run(blocking=False)
process.wait()
assert process.returncode == 3
assert os.path.exists(should_create)
def test_pex_builder_compilation():
# type: () -> None
with temporary_dir() as td1, temporary_dir() as td2, temporary_dir() as td3:
src = os.path.join(td1, "src.py")
with open(src, "w") as fp:
fp.write(exe_main)
exe = os.path.join(td1, "exe.py")
with open(exe, "w") as fp:
fp.write(exe_main)
def build_and_check(path, precompile):
# type: (str, bool) -> None
pb = PEXBuilder(path=path)
pb.add_source(src, "lib/src.py")
pb.set_executable(exe, "exe.py")
pb.freeze(bytecode_compile=precompile)
for pyc_file in ("exe.pyc", "lib/src.pyc", "__main__.pyc"):
pyc_exists = os.path.exists(os.path.join(path, pyc_file))
if precompile:
assert pyc_exists
else:
assert not pyc_exists
bootstrap_dir = os.path.join(path, pb.info.bootstrap)
bootstrap_pycs = [] # type: List[str]
for _, _, files in os.walk(bootstrap_dir):
bootstrap_pycs.extend(f for f in files if f.endswith(".pyc"))
if precompile:
assert len(bootstrap_pycs) > 0
else:
assert 0 == len(bootstrap_pycs)
build_and_check(td2, False)
build_and_check(td3, True)
@pytest.mark.skipif(WINDOWS, reason="No hardlinks on windows")
def test_pex_builder_copy_or_link():
# type: () -> None
with temporary_dir() as td:
src = os.path.join(td, "exe.py")
with safe_open(src, "w") as fp:
fp.write(exe_main)
def build_and_check(copy_mode):
# type: (CopyMode.Value) -> None
pb = PEXBuilder(copy_mode=copy_mode)
path = pb.path()
pb.add_source(src, "exe.py")
path_clone = os.path.join(path, "__clone")
pb.clone(into=path_clone)
for root in path, path_clone:
s1 = os.stat(src)
s2 = os.stat(os.path.join(root, "exe.py"))
is_link = (s1[stat.ST_INO], s1[stat.ST_DEV]) == (s2[stat.ST_INO], s2[stat.ST_DEV])
if copy_mode == CopyMode.COPY:
assert not is_link
else:
# Since os.stat follows symlinks; so in CopyMode.SYMLINK, this just proves
# the symlink points to the original file. Going further and checking path
# and path_clone for the presence of a symlink (an os.islink test) is
# trickier since a Linux hardlink of a symlink produces a symlink whereas a
# macOS hardlink of a symlink produces a hardlink.
assert is_link
build_and_check(CopyMode.LINK)
build_and_check(CopyMode.COPY)
build_and_check(CopyMode.SYMLINK)
@pytest.fixture
def tmp_chroot(tmpdir):
# type: (Any) -> Iterator[str]
tmp_chroot = str(tmpdir)
cwd = os.getcwd()
try:
os.chdir(tmp_chroot)
yield tmp_chroot
finally:
os.chdir(cwd)
@pytest.mark.parametrize(
"copy_mode", [pytest.param(copy_mode, id=copy_mode.value) for copy_mode in CopyMode.values()]
)
def test_pex_builder_add_source_relpath_issues_1192(
tmp_chroot, # type: str
copy_mode, # type: CopyMode.Value
):
# type: (...) -> None
pb = PEXBuilder(copy_mode=copy_mode)
with safe_open("src/main.py", "w") as fp:
fp.write("import sys; sys.exit(42)")
pb.add_source("src/main.py", "main.py")
pb.set_entry_point("main")
pb.build("test.pex")
process = Executor.open_process(cmd=[os.path.abspath("test.pex")])
process.wait()
assert 42 == process.returncode
def test_pex_builder_deterministic_timestamp():
# type: () -> None
pb = PEXBuilder()
with temporary_dir() as td:
target = os.path.join(td, "foo.pex")
pb.build(target, deterministic_timestamp=True)
with zipfile.ZipFile(target) as zf:
assert all(zinfo.date_time == (1980, 1, 1, 0, 0, 0) for zinfo in zf.infolist())
def test_pex_builder_from_requirements_pex():
# type: () -> None
def build_from_req_pex(path, req_pex):
# type: (str, str) -> PEXBuilder
pb = PEXBuilder(path=path)
pb.add_from_requirements_pex(req_pex)
with open(os.path.join(path, "exe.py"), "w") as fp:
fp.write(exe_main)
pb.set_executable(os.path.join(path, "exe.py"))
pb.freeze()
return pb
def verify(pb):
# type: (PEXBuilder) -> None
success_txt = os.path.join(pb.path(), "success.txt")
PEX(pb.path(), interpreter=pb.interpreter).run(args=[success_txt])
assert os.path.exists(success_txt)
with open(success_txt) as fp:
assert fp.read() == "success"
# Build from pex dir.
with temporary_dir() as td2:
with temporary_dir() as td1, make_bdist("p1") as p1:
pb1 = write_pex(td1, dists=[p1])
pb2 = build_from_req_pex(td2, pb1.path())
verify(pb2)
# Build from .pex file.
with temporary_dir() as td4:
with temporary_dir() as td3, make_bdist("p1") as p1:
pb3 = write_pex(td3, dists=[p1])
target = os.path.join(td3, "foo.pex")
pb3.build(target)
pb4 = build_from_req_pex(td4, target)
verify(pb4)
def test_pex_builder_script_from_pex_path(tmpdir):
# type: (Any) -> None
pex_with_script = os.path.join(str(tmpdir), "script.pex")
with built_wheel(
name="my_project",
entry_points={"console_scripts": ["my_app = my_project.my_module:do_something"]},
) as my_whl:
pb = PEXBuilder()
pb.add_dist_location(my_whl)
pb.build(pex_with_script)
pex_file = os.path.join(str(tmpdir), "app.pex")
pb = PEXBuilder()
pb.info.pex_path = pex_with_script
pb.set_script("my_app")
pb.build(pex_file)
assert "hello world!\n" == subprocess.check_output(args=[pex_file]).decode("utf-8")
def test_pex_builder_setuptools_script(tmpdir):
# type: (Any) -> None
pex_file = os.path.join(str(tmpdir), "app.pex")
with built_wheel(
name="my_project",
) as my_whl:
pb = PEXBuilder()
pb.add_dist_location(my_whl)
pb.set_script("shell_script")
pb.build(pex_file)
assert "hello world from shell script\n" == subprocess.check_output(args=[pex_file]).decode(
"utf-8"
)
def test_pex_builder_packed(tmpdir):
# type: (Any) -> None
pex_root = os.path.join(str(tmpdir), "pex_root")
pex_app = os.path.join(str(tmpdir), "app.pex")
source_file = os.path.join(str(tmpdir), "src")
touch(source_file)
with ENV.patch(PEX_ROOT=pex_root), built_wheel(name="my_project") as my_whl:
pb = PEXBuilder(copy_mode=CopyMode.SYMLINK)
pb.add_source(source_file, "a.file")
pb.add_dist_location(my_whl)
pb.set_script("shell_script")
pb.build(pex_app, layout=Layout.PACKED)
assert "hello world from shell script\n" == subprocess.check_output(
args=[os.path.join(pex_app, "__main__.py")]
).decode("utf-8")
spread_dist_bootstrap = os.path.join(pex_app, pb.info.bootstrap)
assert zipfile.is_zipfile(spread_dist_bootstrap)
cached_bootstrap_zip = os.path.join(
pex_root, "bootstrap_zips", pb.info.bootstrap_hash, pb.info.bootstrap
)
assert zipfile.is_zipfile(cached_bootstrap_zip)
assert filecmp.cmp(spread_dist_bootstrap, cached_bootstrap_zip, shallow=False)
assert os.path.isfile(os.path.join(pex_app, "a.file"))
for root, dirs, files in os.walk(pex_app, followlinks=False):
for f in files:
path = os.path.join(root, f)
assert not os.path.islink(path) or pex_app == os.path.commonprefix(
[pex_app, os.path.realpath(path)]
), (
"All packed layout files should be real files inside the packed layout root that "
"are divorced from either the PEXBuilder chroot or PEX_ROOT caches."
)
assert 1 == len(pb.info.distributions)
location, sha = next(iter(pb.info.distributions.items()))
spread_dist_zip = os.path.join(pex_app, pb.info.internal_cache, location)
assert zipfile.is_zipfile(spread_dist_zip)
cached_dist_zip = os.path.join(pex_root, "installed_wheel_zips", sha, location)
assert zipfile.is_zipfile(cached_dist_zip)
assert filecmp.cmp(spread_dist_zip, cached_dist_zip, shallow=False)
@pytest.mark.parametrize(
"copy_mode", [pytest.param(copy_mode, id=copy_mode.value) for copy_mode in CopyMode.values()]
)
@pytest.mark.parametrize(
"layout", [pytest.param(layout, id=layout.value) for layout in Layout.values()]
)
def test_pex_builder_exclude_bootstrap_testing(
tmpdir, # type: Any
copy_mode, # type: CopyMode.Value
layout, # type: Layout.Value
):
# type: (...) -> None
pex_path = os.path.join(str(tmpdir), "empty.pex")
pb = PEXBuilder(copy_mode=copy_mode)
pb.build(pex_path, layout=layout)
bootstrap_location = os.path.join(pex_path, pb.info.bootstrap)
bootstrap_files = set() # type: Set[str]
if Layout.ZIPAPP == layout:
with open_zip(pex_path) as zf:
bootstrap_files.update(
os.path.relpath(f, pb.info.bootstrap)
for f in zf.namelist()
if f.startswith(pb.info.bootstrap)
)
elif Layout.PACKED == layout:
with open_zip(bootstrap_location) as zf:
bootstrap_files.update(zf.namelist())
else:
bootstrap_files.update(
os.path.relpath(os.path.join(root, f), bootstrap_location)
for root, _, files in os.walk(bootstrap_location)
for f in files
)
assert {"pex/pex_bootstrapper.py", "pex/pex_info.py", "pex/pex.py"}.issubset(
bootstrap_files
), "Expected the `.bootstrap` to contain at least some of the key Pex runtime modules."
assert not [
f for f in bootstrap_files if f.endswith(("testing.py", "testing.pyc"))
], "Expected testing support files to be stripped from the Pex `.bootstrap`."
|
en
| 0.668725
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import sys from p1.my_module import do_something do_something() with open(sys.argv[1], 'w') as fp: fp.write('success') import sys from pyparsing import * from p1.my_module import do_something do_something() with open(sys.argv[1], 'w') as fp: fp.write('success') # type: () -> None # test w/ and w/o zipfile dists # test w/ and w/o zipfile dists # type: () -> None Repeat the pex_builder test, but this time include an import of something from a wheel that doesn't come in importable form. # type: () -> None # type: (str) -> PEXBuilder # type: () -> None # type: () -> None # type: (str, bool) -> None # type: List[str] # type: () -> None # type: (CopyMode.Value) -> None # Since os.stat follows symlinks; so in CopyMode.SYMLINK, this just proves # the symlink points to the original file. Going further and checking path # and path_clone for the presence of a symlink (an os.islink test) is # trickier since a Linux hardlink of a symlink produces a symlink whereas a # macOS hardlink of a symlink produces a hardlink. # type: (Any) -> Iterator[str] # type: str # type: CopyMode.Value # type: (...) -> None # type: () -> None # type: () -> None # type: (str, str) -> PEXBuilder # type: (PEXBuilder) -> None # Build from pex dir. # Build from .pex file. # type: (Any) -> None # type: (Any) -> None # type: (Any) -> None # type: Any # type: CopyMode.Value # type: Layout.Value # type: (...) -> None # type: Set[str]
| 2.191669
| 2
|
custom/icds_reports/tests/agg_tests/reports/test_service_delivery_data.py
|
tobiasmcnulty/commcare-hq
| 1
|
6628617
|
<gh_stars>1-10
from django.test import TestCase
from custom.icds_reports.reports.service_delivery_dashboard_data import get_service_delivery_report_data
class TestServiceDeliveryData(TestCase):
def test_get_service_delivery_report_data_0_3(self):
get_service_delivery_report_data.clear('icds-cas', 0, 10, None, False,
{'aggregation_level': 1}, 2017, 5, 'pw_lw_children')
data = get_service_delivery_report_data(
'icds-cas',
0,
10,
None,
False,
{
'aggregation_level': 1,
},
2017,
5,
'pw_lw_children',
)
expected = {
'data': [
{
'state_name': 'All',
'district_name': 'All',
'block_name': 'All',
'supervisor_name': 'All',
'awc_name': 'All',
'num_launched_awcs': 22,
'valid_visits': 3,
'expected_visits': 379,
'gm_0_3': 222,
'children_0_3': 314,
'num_awcs_conducted_cbe': 1,
'num_awcs_conducted_vhnd': 8,
'thr_21_days': 261,
'thr_25_days': 180,
'thr_eligible': 598,
'vhnd_conducted': 12,
'home_visits': '0.79 %',
'gm': '70.70 %',
'cbe': '4.55 %',
'thr': '43.65 %',
'cbe_sector_percent': '14.29 %',
'vhnd_sector_value': 8
},
{
'state_name': 'st1',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 10,
'valid_visits': 3,
'expected_visits': 185,
'gm_0_3': 83,
'children_0_3': 143,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 2,
'thr_21_days': 80,
'thr_25_days': 24,
'thr_eligible': 279,
'vhnd_conducted': 3,
'home_visits': '1.62 %',
'gm': '58.04 %',
'cbe': '0.00 %',
'thr': '28.67 %'
},
{
'state_name': 'st2',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 11,
'valid_visits': 0,
'expected_visits': 193,
'gm_0_3': 139,
'children_0_3': 171,
'num_awcs_conducted_cbe': 1,
'num_awcs_conducted_vhnd': 6,
'thr_21_days': 181,
'thr_25_days': 156,
'thr_eligible': 318,
'vhnd_conducted': 9,
'home_visits': '0.00 %',
'gm': '81.29 %',
'cbe': '9.09 %',
'thr': '56.92 %'
},
{
'state_name': 'st3',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 0,
'valid_visits': 0,
'expected_visits': 0,
'gm_0_3': 0,
'children_0_3': 0,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 0,
'thr_21_days': 0,
'thr_25_days': 0,
'thr_eligible': 0,
'vhnd_conducted': 0,
'home_visits': 'Data Not Entered',
'gm': 'Data Not Entered',
'cbe': 'Data Not Entered',
'thr': 'Data Not Entered'
},
{
'state_name': 'st4',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 0,
'valid_visits': 0,
'expected_visits': 0,
'gm_0_3': 0,
'children_0_3': 0,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 0,
'thr_21_days': 0,
'thr_25_days': 0,
'thr_eligible': 0,
'vhnd_conducted': 0,
'home_visits': 'Data Not Entered',
'gm': 'Data Not Entered',
'cbe': 'Data Not Entered',
'thr': 'Data Not Entered'
},
{
'state_name': 'st5',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 0,
'valid_visits': 0,
'expected_visits': 0,
'gm_0_3': 0,
'children_0_3': 0,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 0,
'thr_21_days': 0,
'thr_25_days': 0,
'thr_eligible': 0,
'vhnd_conducted': 0,
'home_visits': 'Data Not Entered',
'gm': 'Data Not Entered',
'cbe': 'Data Not Entered',
'thr': 'Data Not Entered'
},
{
'state_name': 'st6',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 0,
'valid_visits': 0,
'expected_visits': 0,
'gm_0_3': 0,
'children_0_3': 0,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 0,
'thr_21_days': 0,
'thr_25_days': 0,
'thr_eligible': 0,
'vhnd_conducted': 0,
'home_visits': 'Data Not Entered',
'gm': 'Data Not Entered',
'cbe': 'Data Not Entered',
'thr': 'Data Not Entered'
},
{
'state_name': 'st7',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 1,
'valid_visits': 0,
'expected_visits': 1,
'gm_0_3': 0,
'children_0_3': 0,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 0,
'thr_21_days': 0,
'thr_25_days': 0,
'thr_eligible': 1,
'vhnd_conducted': 0,
'home_visits': '0.00 %',
'gm': 'Data Not Entered',
'cbe': '0.00 %',
'thr': '0.00 %'
}
],
'aggregationLevel': 1,
'recordsTotal': 7,
'recordsFiltered': 7
}
self.assertDictEqual(expected, data)
def test_get_service_delivery_data_state_0_3(self):
data = get_service_delivery_report_data(
'icds-cas',
0,
10,
'district_name',
False,
{
'aggregation_level': 2,
'state_id': 'st1',
},
2017,
5,
'pw_lw_children',
)
expected = {
'data': [
{
'state_name': 'All',
'district_name': 'All',
'block_name': 'All',
'supervisor_name': 'All',
'awc_name': 'All',
'num_launched_awcs': 10,
'valid_visits': 3,
'expected_visits': 185,
'gm_0_3': 83,
'children_0_3': 143,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 2,
'thr_21_days': 80,
'thr_25_days': 24,
'thr_eligible': 279,
'vhnd_conducted': 3,
'home_visits': '1.62 %',
'gm': '58.04 %',
'cbe': '0.00 %',
'thr': '28.67 %',
'cbe_sector_percent': '0.00 %',
'vhnd_sector_value': 2
},
{
'state_name': 'st1',
'district_name': 'd1',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 10,
'valid_visits': 3,
'expected_visits': 185,
'gm_0_3': 83,
'children_0_3': 143,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 2,
'thr_21_days': 80,
'thr_25_days': 24,
'thr_eligible': 279,
'vhnd_conducted': 3,
'home_visits': '1.62 %',
'gm': '58.04 %',
'cbe': '0.00 %',
'thr': '28.67 %'
}
],
'aggregationLevel': 2,
'recordsTotal': 1,
'recordsFiltered': 1
}
self.assertDictEqual(expected, data)
def test_get_service_delivery_data_3_6(self):
data = get_service_delivery_report_data(
'icds-cas',
0,
10,
None,
False,
{
'aggregation_level': 1,
},
2017,
5,
'children',
)
expected = {
'data': [
{
'num_launched_awcs': 22,
'state_name': 'All',
'district_name': 'All',
'block_name': 'All',
'supervisor_name': 'All',
'awc_name': 'All',
'lunch_21_days': 15,
'lunch_25_days': 0,
'pse_eligible': 991,
'pse_21_days': 66,
'pse_25_days': 20,
'gm_3_5': 473,
'children_3_5': 675,
'gm': '70.07 %',
'pse': '6.66 %',
'sn': '1.51 %'
},
{
'num_launched_awcs': 10,
'state_name': 'st1',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 4,
'lunch_25_days': 0,
'pse_eligible': 483,
'pse_21_days': 7,
'pse_25_days': 0,
'gm_3_5': 234,
'children_3_5': 332,
'gm': '70.48 %',
'pse': '1.45 %',
'sn': '0.83 %'
},
{
'num_launched_awcs': 11,
'state_name': 'st2',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 11,
'lunch_25_days': 0,
'pse_eligible': 507,
'pse_21_days': 59,
'pse_25_days': 20,
'gm_3_5': 239,
'children_3_5': 342,
'gm': '69.88 %',
'pse': '11.64 %',
'sn': '2.17 %'
},
{
'num_launched_awcs': 0,
'state_name': 'st3',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 0,
'lunch_25_days': 0,
'pse_eligible': 0,
'pse_21_days': 0,
'pse_25_days': 0,
'gm_3_5': 0,
'children_3_5': 0,
'gm': 'Data Not Entered',
'pse': 'Data Not Entered',
'sn': 'Data Not Entered'
},
{
'num_launched_awcs': 0,
'state_name': 'st4',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 0,
'lunch_25_days': 0,
'pse_eligible': 0,
'pse_21_days': 0,
'pse_25_days': 0,
'gm_3_5': 0,
'children_3_5': 0,
'gm': 'Data Not Entered',
'pse': 'Data Not Entered',
'sn': 'Data Not Entered'
},
{
'num_launched_awcs': 0,
'state_name': 'st5',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 0,
'lunch_25_days': 0,
'pse_eligible': 0,
'pse_21_days': 0,
'pse_25_days': 0,
'gm_3_5': 0,
'children_3_5': 0,
'gm': 'Data Not Entered',
'pse': 'Data Not Entered',
'sn': 'Data Not Entered'
},
{
'num_launched_awcs': 0,
'state_name': 'st6',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 0,
'lunch_25_days': 0,
'pse_eligible': 0,
'pse_21_days': 0,
'pse_25_days': 0,
'gm_3_5': 0,
'children_3_5': 0,
'gm': 'Data Not Entered',
'pse': 'Data Not Entered',
'sn': 'Data Not Entered'
},
{
'num_launched_awcs': 1,
'state_name': 'st7',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 0,
'lunch_25_days': 0,
'pse_eligible': 1,
'pse_21_days': 0,
'pse_25_days': 0,
'gm_3_5': 0,
'children_3_5': 1,
'gm': '0.00 %',
'pse': '0.00 %',
'sn': '0.00 %'
}
],
'aggregationLevel': 1,
'recordsTotal': 7,
'recordsFiltered': 7
}
self.assertDictEqual(expected, data)
def test_get_service_delivery_data_state_3_6(self):
data = get_service_delivery_report_data(
'icds-cas',
0,
10,
'district_name',
False,
{
'aggregation_level': 2,
'state_id': 'st1',
},
2017,
5,
'children',
)
expected = {
'data': [
{
'num_launched_awcs': 10,
'state_name': 'All',
'district_name': 'All',
'block_name': 'All',
'supervisor_name': 'All',
'awc_name': 'All',
'lunch_21_days': 4,
'lunch_25_days': 0,
'pse_eligible': 483,
'pse_21_days': 7,
'pse_25_days': 0,
'gm_3_5': 234,
'children_3_5': 332,
'gm': '70.48 %',
'pse': '1.45 %',
'sn': '0.83 %'
},
{
'num_launched_awcs': 10,
'state_name': 'st1',
'district_name': 'd1',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 4,
'lunch_25_days': 0,
'pse_eligible': 483,
'pse_21_days': 7,
'pse_25_days': 0,
'gm_3_5': 234,
'children_3_5': 332,
'gm': '70.48 %',
'pse': '1.45 %',
'sn': '0.83 %'
}
],
'aggregationLevel': 2,
'recordsTotal': 1,
'recordsFiltered': 1
}
self.assertDictEqual(expected, data)
|
from django.test import TestCase
from custom.icds_reports.reports.service_delivery_dashboard_data import get_service_delivery_report_data
class TestServiceDeliveryData(TestCase):
def test_get_service_delivery_report_data_0_3(self):
get_service_delivery_report_data.clear('icds-cas', 0, 10, None, False,
{'aggregation_level': 1}, 2017, 5, 'pw_lw_children')
data = get_service_delivery_report_data(
'icds-cas',
0,
10,
None,
False,
{
'aggregation_level': 1,
},
2017,
5,
'pw_lw_children',
)
expected = {
'data': [
{
'state_name': 'All',
'district_name': 'All',
'block_name': 'All',
'supervisor_name': 'All',
'awc_name': 'All',
'num_launched_awcs': 22,
'valid_visits': 3,
'expected_visits': 379,
'gm_0_3': 222,
'children_0_3': 314,
'num_awcs_conducted_cbe': 1,
'num_awcs_conducted_vhnd': 8,
'thr_21_days': 261,
'thr_25_days': 180,
'thr_eligible': 598,
'vhnd_conducted': 12,
'home_visits': '0.79 %',
'gm': '70.70 %',
'cbe': '4.55 %',
'thr': '43.65 %',
'cbe_sector_percent': '14.29 %',
'vhnd_sector_value': 8
},
{
'state_name': 'st1',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 10,
'valid_visits': 3,
'expected_visits': 185,
'gm_0_3': 83,
'children_0_3': 143,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 2,
'thr_21_days': 80,
'thr_25_days': 24,
'thr_eligible': 279,
'vhnd_conducted': 3,
'home_visits': '1.62 %',
'gm': '58.04 %',
'cbe': '0.00 %',
'thr': '28.67 %'
},
{
'state_name': 'st2',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 11,
'valid_visits': 0,
'expected_visits': 193,
'gm_0_3': 139,
'children_0_3': 171,
'num_awcs_conducted_cbe': 1,
'num_awcs_conducted_vhnd': 6,
'thr_21_days': 181,
'thr_25_days': 156,
'thr_eligible': 318,
'vhnd_conducted': 9,
'home_visits': '0.00 %',
'gm': '81.29 %',
'cbe': '9.09 %',
'thr': '56.92 %'
},
{
'state_name': 'st3',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 0,
'valid_visits': 0,
'expected_visits': 0,
'gm_0_3': 0,
'children_0_3': 0,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 0,
'thr_21_days': 0,
'thr_25_days': 0,
'thr_eligible': 0,
'vhnd_conducted': 0,
'home_visits': 'Data Not Entered',
'gm': 'Data Not Entered',
'cbe': 'Data Not Entered',
'thr': 'Data Not Entered'
},
{
'state_name': 'st4',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 0,
'valid_visits': 0,
'expected_visits': 0,
'gm_0_3': 0,
'children_0_3': 0,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 0,
'thr_21_days': 0,
'thr_25_days': 0,
'thr_eligible': 0,
'vhnd_conducted': 0,
'home_visits': 'Data Not Entered',
'gm': 'Data Not Entered',
'cbe': 'Data Not Entered',
'thr': 'Data Not Entered'
},
{
'state_name': 'st5',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 0,
'valid_visits': 0,
'expected_visits': 0,
'gm_0_3': 0,
'children_0_3': 0,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 0,
'thr_21_days': 0,
'thr_25_days': 0,
'thr_eligible': 0,
'vhnd_conducted': 0,
'home_visits': 'Data Not Entered',
'gm': 'Data Not Entered',
'cbe': 'Data Not Entered',
'thr': 'Data Not Entered'
},
{
'state_name': 'st6',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 0,
'valid_visits': 0,
'expected_visits': 0,
'gm_0_3': 0,
'children_0_3': 0,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 0,
'thr_21_days': 0,
'thr_25_days': 0,
'thr_eligible': 0,
'vhnd_conducted': 0,
'home_visits': 'Data Not Entered',
'gm': 'Data Not Entered',
'cbe': 'Data Not Entered',
'thr': 'Data Not Entered'
},
{
'state_name': 'st7',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 1,
'valid_visits': 0,
'expected_visits': 1,
'gm_0_3': 0,
'children_0_3': 0,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 0,
'thr_21_days': 0,
'thr_25_days': 0,
'thr_eligible': 1,
'vhnd_conducted': 0,
'home_visits': '0.00 %',
'gm': 'Data Not Entered',
'cbe': '0.00 %',
'thr': '0.00 %'
}
],
'aggregationLevel': 1,
'recordsTotal': 7,
'recordsFiltered': 7
}
self.assertDictEqual(expected, data)
def test_get_service_delivery_data_state_0_3(self):
data = get_service_delivery_report_data(
'icds-cas',
0,
10,
'district_name',
False,
{
'aggregation_level': 2,
'state_id': 'st1',
},
2017,
5,
'pw_lw_children',
)
expected = {
'data': [
{
'state_name': 'All',
'district_name': 'All',
'block_name': 'All',
'supervisor_name': 'All',
'awc_name': 'All',
'num_launched_awcs': 10,
'valid_visits': 3,
'expected_visits': 185,
'gm_0_3': 83,
'children_0_3': 143,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 2,
'thr_21_days': 80,
'thr_25_days': 24,
'thr_eligible': 279,
'vhnd_conducted': 3,
'home_visits': '1.62 %',
'gm': '58.04 %',
'cbe': '0.00 %',
'thr': '28.67 %',
'cbe_sector_percent': '0.00 %',
'vhnd_sector_value': 2
},
{
'state_name': 'st1',
'district_name': 'd1',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 10,
'valid_visits': 3,
'expected_visits': 185,
'gm_0_3': 83,
'children_0_3': 143,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 2,
'thr_21_days': 80,
'thr_25_days': 24,
'thr_eligible': 279,
'vhnd_conducted': 3,
'home_visits': '1.62 %',
'gm': '58.04 %',
'cbe': '0.00 %',
'thr': '28.67 %'
}
],
'aggregationLevel': 2,
'recordsTotal': 1,
'recordsFiltered': 1
}
self.assertDictEqual(expected, data)
def test_get_service_delivery_data_3_6(self):
data = get_service_delivery_report_data(
'icds-cas',
0,
10,
None,
False,
{
'aggregation_level': 1,
},
2017,
5,
'children',
)
expected = {
'data': [
{
'num_launched_awcs': 22,
'state_name': 'All',
'district_name': 'All',
'block_name': 'All',
'supervisor_name': 'All',
'awc_name': 'All',
'lunch_21_days': 15,
'lunch_25_days': 0,
'pse_eligible': 991,
'pse_21_days': 66,
'pse_25_days': 20,
'gm_3_5': 473,
'children_3_5': 675,
'gm': '70.07 %',
'pse': '6.66 %',
'sn': '1.51 %'
},
{
'num_launched_awcs': 10,
'state_name': 'st1',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 4,
'lunch_25_days': 0,
'pse_eligible': 483,
'pse_21_days': 7,
'pse_25_days': 0,
'gm_3_5': 234,
'children_3_5': 332,
'gm': '70.48 %',
'pse': '1.45 %',
'sn': '0.83 %'
},
{
'num_launched_awcs': 11,
'state_name': 'st2',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 11,
'lunch_25_days': 0,
'pse_eligible': 507,
'pse_21_days': 59,
'pse_25_days': 20,
'gm_3_5': 239,
'children_3_5': 342,
'gm': '69.88 %',
'pse': '11.64 %',
'sn': '2.17 %'
},
{
'num_launched_awcs': 0,
'state_name': 'st3',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 0,
'lunch_25_days': 0,
'pse_eligible': 0,
'pse_21_days': 0,
'pse_25_days': 0,
'gm_3_5': 0,
'children_3_5': 0,
'gm': 'Data Not Entered',
'pse': 'Data Not Entered',
'sn': 'Data Not Entered'
},
{
'num_launched_awcs': 0,
'state_name': 'st4',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 0,
'lunch_25_days': 0,
'pse_eligible': 0,
'pse_21_days': 0,
'pse_25_days': 0,
'gm_3_5': 0,
'children_3_5': 0,
'gm': 'Data Not Entered',
'pse': 'Data Not Entered',
'sn': 'Data Not Entered'
},
{
'num_launched_awcs': 0,
'state_name': 'st5',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 0,
'lunch_25_days': 0,
'pse_eligible': 0,
'pse_21_days': 0,
'pse_25_days': 0,
'gm_3_5': 0,
'children_3_5': 0,
'gm': 'Data Not Entered',
'pse': 'Data Not Entered',
'sn': 'Data Not Entered'
},
{
'num_launched_awcs': 0,
'state_name': 'st6',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 0,
'lunch_25_days': 0,
'pse_eligible': 0,
'pse_21_days': 0,
'pse_25_days': 0,
'gm_3_5': 0,
'children_3_5': 0,
'gm': 'Data Not Entered',
'pse': 'Data Not Entered',
'sn': 'Data Not Entered'
},
{
'num_launched_awcs': 1,
'state_name': 'st7',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 0,
'lunch_25_days': 0,
'pse_eligible': 1,
'pse_21_days': 0,
'pse_25_days': 0,
'gm_3_5': 0,
'children_3_5': 1,
'gm': '0.00 %',
'pse': '0.00 %',
'sn': '0.00 %'
}
],
'aggregationLevel': 1,
'recordsTotal': 7,
'recordsFiltered': 7
}
self.assertDictEqual(expected, data)
def test_get_service_delivery_data_state_3_6(self):
data = get_service_delivery_report_data(
'icds-cas',
0,
10,
'district_name',
False,
{
'aggregation_level': 2,
'state_id': 'st1',
},
2017,
5,
'children',
)
expected = {
'data': [
{
'num_launched_awcs': 10,
'state_name': 'All',
'district_name': 'All',
'block_name': 'All',
'supervisor_name': 'All',
'awc_name': 'All',
'lunch_21_days': 4,
'lunch_25_days': 0,
'pse_eligible': 483,
'pse_21_days': 7,
'pse_25_days': 0,
'gm_3_5': 234,
'children_3_5': 332,
'gm': '70.48 %',
'pse': '1.45 %',
'sn': '0.83 %'
},
{
'num_launched_awcs': 10,
'state_name': 'st1',
'district_name': 'd1',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 4,
'lunch_25_days': 0,
'pse_eligible': 483,
'pse_21_days': 7,
'pse_25_days': 0,
'gm_3_5': 234,
'children_3_5': 332,
'gm': '70.48 %',
'pse': '1.45 %',
'sn': '0.83 %'
}
],
'aggregationLevel': 2,
'recordsTotal': 1,
'recordsFiltered': 1
}
self.assertDictEqual(expected, data)
|
none
| 1
| 2.043995
| 2
|
|
leetcode/python/problem23/merge_k_lists_test.py
|
angelusualle/algorithms
| 0
|
6628618
|
<gh_stars>0
import unittest
from merge_k_lists import merge_k_lists, ListNode
class Test_Case_Merge_K_Lists(unittest.TestCase):
def test_merge_k_lists(self):
lists = [ListNode(1, ListNode(2, ListNode(2))), ListNode(1, ListNode(1, ListNode(2)))]
answer = merge_k_lists(lists)
answer_collector = []
while answer is not None:
answer_collector.append(answer.val)
answer = answer.next
self.assertListEqual(answer_collector, [1,1, 1, 2, 2, 2])
if __name__ == '__main__':
unittest.main()
|
import unittest
from merge_k_lists import merge_k_lists, ListNode
class Test_Case_Merge_K_Lists(unittest.TestCase):
def test_merge_k_lists(self):
lists = [ListNode(1, ListNode(2, ListNode(2))), ListNode(1, ListNode(1, ListNode(2)))]
answer = merge_k_lists(lists)
answer_collector = []
while answer is not None:
answer_collector.append(answer.val)
answer = answer.next
self.assertListEqual(answer_collector, [1,1, 1, 2, 2, 2])
if __name__ == '__main__':
unittest.main()
|
none
| 1
| 3.290256
| 3
|
|
indico/queries/datasets.py
|
IndicoDataSolutions/indico-client-python
| 2
|
6628619
|
<filename>indico/queries/datasets.py
# -*- coding: utf-8 -*-
import json
import tempfile
from pathlib import Path
from typing import List
import pandas as pd
from indico.client.request import (
Debouncer,
GraphQLRequest,
HTTPMethod,
HTTPRequest,
RequestChain,
)
from indico.errors import IndicoNotFound
from indico.queries.storage import UploadBatched, UploadImages
from indico.types.dataset import Dataset
class ListDatasets(GraphQLRequest):
"""
List all of your datasets
Options:
limit (int, default=100): Max number of datasets to retrieve
Returns:
List[Dataset]
Raises:
"""
query = """
query ListDatasets($limit: Int){
datasetsPage(limit: $limit) {
datasets {
id
name
rowCount
}
}
}
"""
def __init__(self, *, limit: int = 100):
super().__init__(self.query, variables={"limit": limit})
def process_response(self, response) -> Dataset:
response = super().process_response(response)
return [Dataset(**dataset) for dataset in response["datasetsPage"]["datasets"]]
class GetDataset(GraphQLRequest):
"""
Retrieve a dataset description object
Args:
id (int): id of the dataset to query
Returns:
Dataset object
Raises:
"""
query = """
query GetDataset($id: Int) {
dataset(id: $id) {
id
name
rowCount
status
permissions
datacolumns {
id
name
}
labelsets{
id
name
}
}
}
"""
def __init__(self, id: int):
super().__init__(self.query, variables={"id": id})
def process_response(self, response) -> Dataset:
response = super().process_response(response)
if "dataset" not in response or not isinstance(response["dataset"], dict):
raise IndicoNotFound("Failed to find dataset")
return Dataset(**response["dataset"])
class GetDatasetFileStatus(GetDataset):
"""
Get the status of dataset file upload
Args:
id (int): id of the dataset to query
Returns:
status (str): DOWNLOADED or FAILED
Raises:
"""
query = """
query DatasetUploadStatus($id: Int!) {
dataset(id: $id) {
id
status
files {
id
name
deleted
fileSize
rainbowUrl
fileType
fileHash
status
statusMeta
failureType
}
}
}
"""
class GetDatasetStatus(GraphQLRequest):
"""
Get the status of a dataset
Args:
id (int): id of the dataset to query
Returns:
status (str): COMPLETE or FAILED
Raises:
"""
query = """
query datasetStatus($id: Int!) {
dataset(id: $id) {
status
}
}
"""
def __init__(self, id: int):
super().__init__(self.query, variables={"id": id})
def process_response(self, response) -> str:
return response["data"]["dataset"]["status"]
class CreateDataset(RequestChain):
"""
Create a dataset and upload the associated files.
Args:
name (str): Name of the dataset
files (List[str]): List of pathnames to the dataset files
Options:
dataset_type (str): Type of dataset to create [TEXT, DOCUMENT, IMAGE]
wait (bool, default=True): Wait for the dataset to upload and finish
Returns:
Dataset object
Raises:
"""
previous = None
def __init__(
self,
name: str,
files: List[str],
wait: bool = True,
dataset_type: str = "TEXT",
from_local_images: bool = False,
image_filename_col: str = "filename",
batch_size: int = 20,
):
self.files = files
self.name = name
self.wait = wait
self.dataset_type = dataset_type
self.from_local_images = from_local_images
self.image_filename_col = image_filename_col
self.batch_size = batch_size
super().__init__()
def requests(self):
if self.from_local_images:
self.dataset_type = "IMAGE"
# Assume image filenames are in the same directory as the csv with
# image labels and that there is a column representing their name
df = pd.read_csv(self.files)
img_filenames = df[self.image_filename_col].tolist()
img_filepaths = [
str(Path(self.files).parent / imgfn) for imgfn in img_filenames
]
yield UploadBatched(
img_filepaths,
batch_size=self.batch_size,
request_cls=UploadImages,
)
df["urls"] = self.previous
with tempfile.TemporaryDirectory() as tmpdir:
image_csv_path = str(Path(tmpdir) / "image_urls.csv")
df.to_csv(image_csv_path)
yield _UploadDatasetFiles(files=[image_csv_path])
else:
yield UploadBatched(
files=self.files,
batch_size=self.batch_size,
request_cls=_UploadDatasetFiles,
)
file_metadata = self.previous
yield CreateEmptyDataset(name=self.name, dataset_type=self.dataset_type)
yield _AddFiles(dataset_id=self.previous.id, metadata=file_metadata)
dataset_id = self.previous.id
yield GetDatasetFileStatus(id=dataset_id)
debouncer = Debouncer()
while not all(
f.status in ["DOWNLOADED", "FAILED"] for f in self.previous.files
):
yield GetDatasetFileStatus(id=self.previous.id)
debouncer.backoff()
csv_files = [f.id for f in self.previous.files if f.file_type == "CSV"]
non_csv_files = [f.id for f in self.previous.files if f.file_type != "CSV"]
if csv_files:
yield _ProcessCSV(dataset_id=dataset_id, datafile_ids=csv_files)
elif non_csv_files:
yield _ProcessFiles(dataset_id=dataset_id, datafile_ids=non_csv_files)
yield GetDatasetFileStatus(id=dataset_id)
debouncer = Debouncer()
if self.wait is True:
while not all(
[f.status in ["PROCESSED", "FAILED"] for f in self.previous.files]
):
yield GetDatasetFileStatus(id=dataset_id)
debouncer.backoff()
yield GetDataset(id=dataset_id)
class _UploadDatasetFiles(HTTPRequest):
def __init__(self, files: List[str]):
super().__init__(
method=HTTPMethod.POST, path="/storage/files/upload", files=files
)
class DeleteDataset(GraphQLRequest):
"""
Delete a dataset
Args:
id (int): ID of the dataset
Returns:
success (bool): The success of the operation
Raises:
"""
query = """
mutation deleteDataset($id: Int!) {
deleteDataset(id: $id) {
success
}
}
"""
def __init__(self, id):
super().__init__(self.query, variables={"id": id})
def process_response(self, response):
return super().process_response(response)["deleteDataset"]["success"]
class CreateEmptyDataset(GraphQLRequest):
query = """
mutation($name: String!, $datasetType: DatasetType) {
createDataset(name: $name, datasetType: $datasetType) {
id
name
}
}
"""
def __init__(self, name: str, dataset_type: str = None):
if not dataset_type:
dataset_type = "TEXT"
super().__init__(
self.query, variables={"name": name, "datasetType": dataset_type}
)
def process_response(self, response):
return Dataset(**super().process_response(response)["createDataset"])
class _AddFiles(GraphQLRequest):
query = """
mutation AddFiles($datasetId: Int!, $metadata: JSONString!){
addDatasetFiles(datasetId: $datasetId, metadataList: $metadata) {
id
status
}
}
"""
def __init__(self, dataset_id: int, metadata: List[str]):
super().__init__(
self.query,
variables={"datasetId": dataset_id, "metadata": json.dumps(metadata)},
)
def process_response(self, response):
return Dataset(**super().process_response(response)["addDatasetFiles"])
class AddFiles(RequestChain):
"""
Add files to a dataset
Args:
dataset_id (int): ID of the dataset
files (List[str]): List of pathnames to the dataset files
Options:
wait (bool, default=True): Block while polling for status of files
batch_size (int, default=20): Batch size for uploading files
Returns:
Dataset
Raises:
"""
previous = None
def __init__(
self,
dataset_id: int,
files: List[str],
wait: bool = True,
batch_size: int = 20,
):
self.dataset_id = dataset_id
self.files = files
self.wait = wait
self.batch_size = batch_size
super().__init__()
def requests(self):
yield UploadBatched(
files=self.files,
batch_size=self.batch_size,
request_cls=_UploadDatasetFiles,
)
yield _AddFiles(dataset_id=self.dataset_id, metadata=self.previous)
yield GetDatasetFileStatus(id=self.dataset_id)
debouncer = Debouncer()
while not all(
f.status in ["DOWNLOADED", "FAILED", "PROCESSED"]
for f in self.previous.files
):
yield GetDatasetFileStatus(id=self.previous.id)
debouncer.backoff()
class _ProcessFiles(GraphQLRequest):
query = """
mutation (
$datasetId: Int!,
$datafileIds: [Int]) {
addDataFiles(
datasetId: $datasetId,
datafileIds: $datafileIds) {
id
name
}
}
"""
def __init__(self, dataset_id: int, datafile_ids: List[int]):
super().__init__(
self.query,
variables={"datasetId": dataset_id, "datafileIds": datafile_ids},
)
def process_response(self, response):
return Dataset(**super().process_response(response)["addDataFiles"])
class _ProcessCSV(GraphQLRequest):
query = """
mutation ($datasetId: Int!, $datafileIds: [Int]) {
addDataCsv(datasetId: $datasetId, datafileIds: $datafileIds) {
id
name
}
}
"""
def __init__(self, dataset_id: int, datafile_ids: List[int]):
super().__init__(
self.query, variables={"datasetId": dataset_id, "datafileIds": datafile_ids}
)
def process_response(self, response):
return Dataset(**super().process_response(response)["addDataCsv"])
class ProcessFiles(RequestChain):
"""
Process files associated with a dataset and add corresponding data to the dataset
Args:
dataset_id (int): ID of the dataset
datafile_ids (List[str]): IDs of the datafiles to process
wait (bool): Block while polling for status of files
Returns:
Dataset
Raises:
"""
def __init__(
self,
dataset_id: int,
datafile_ids: List[int],
wait: bool = True,
):
self.dataset_id = dataset_id
self.datafile_ids = datafile_ids
self.wait = wait
def requests(self):
yield _ProcessFiles(self.dataset_id, self.datafile_ids)
debouncer = Debouncer()
yield GetDatasetFileStatus(id=self.dataset_id)
if self.wait:
while not all(
f.status in ["PROCESSED", "FAILED"] for f in self.previous.files
):
yield GetDatasetFileStatus(id=self.dataset_id)
debouncer.backoff()
class ProcessCSV(RequestChain):
"""
Process CSV associated with a dataset and add corresponding data to the dataset
Args:
dataset_id (int): ID of the dataset
datafile_ids (List[str]): IDs of the CSV datafiles to process
Options:
wait (bool, default=True): Block while polling for status of files
Returns:
Dataset
Raises:
"""
def __init__(self, dataset_id: int, datafile_ids: List[int], wait: bool = True):
self.dataset_id = dataset_id
self.datafile_ids = datafile_ids
self.wait = wait
def requests(self):
yield _ProcessCSV(self.dataset_id, self.datafile_ids)
debouncer = Debouncer()
yield GetDatasetFileStatus(id=self.dataset_id)
if self.wait:
while not all(
f.status in ["PROCESSED", "FAILED"] for f in self.previous.files
):
yield GetDatasetFileStatus(id=self.dataset_id)
debouncer.backoff()
|
<filename>indico/queries/datasets.py
# -*- coding: utf-8 -*-
import json
import tempfile
from pathlib import Path
from typing import List
import pandas as pd
from indico.client.request import (
Debouncer,
GraphQLRequest,
HTTPMethod,
HTTPRequest,
RequestChain,
)
from indico.errors import IndicoNotFound
from indico.queries.storage import UploadBatched, UploadImages
from indico.types.dataset import Dataset
class ListDatasets(GraphQLRequest):
"""
List all of your datasets
Options:
limit (int, default=100): Max number of datasets to retrieve
Returns:
List[Dataset]
Raises:
"""
query = """
query ListDatasets($limit: Int){
datasetsPage(limit: $limit) {
datasets {
id
name
rowCount
}
}
}
"""
def __init__(self, *, limit: int = 100):
super().__init__(self.query, variables={"limit": limit})
def process_response(self, response) -> Dataset:
response = super().process_response(response)
return [Dataset(**dataset) for dataset in response["datasetsPage"]["datasets"]]
class GetDataset(GraphQLRequest):
"""
Retrieve a dataset description object
Args:
id (int): id of the dataset to query
Returns:
Dataset object
Raises:
"""
query = """
query GetDataset($id: Int) {
dataset(id: $id) {
id
name
rowCount
status
permissions
datacolumns {
id
name
}
labelsets{
id
name
}
}
}
"""
def __init__(self, id: int):
super().__init__(self.query, variables={"id": id})
def process_response(self, response) -> Dataset:
response = super().process_response(response)
if "dataset" not in response or not isinstance(response["dataset"], dict):
raise IndicoNotFound("Failed to find dataset")
return Dataset(**response["dataset"])
class GetDatasetFileStatus(GetDataset):
"""
Get the status of dataset file upload
Args:
id (int): id of the dataset to query
Returns:
status (str): DOWNLOADED or FAILED
Raises:
"""
query = """
query DatasetUploadStatus($id: Int!) {
dataset(id: $id) {
id
status
files {
id
name
deleted
fileSize
rainbowUrl
fileType
fileHash
status
statusMeta
failureType
}
}
}
"""
class GetDatasetStatus(GraphQLRequest):
"""
Get the status of a dataset
Args:
id (int): id of the dataset to query
Returns:
status (str): COMPLETE or FAILED
Raises:
"""
query = """
query datasetStatus($id: Int!) {
dataset(id: $id) {
status
}
}
"""
def __init__(self, id: int):
super().__init__(self.query, variables={"id": id})
def process_response(self, response) -> str:
return response["data"]["dataset"]["status"]
class CreateDataset(RequestChain):
"""
Create a dataset and upload the associated files.
Args:
name (str): Name of the dataset
files (List[str]): List of pathnames to the dataset files
Options:
dataset_type (str): Type of dataset to create [TEXT, DOCUMENT, IMAGE]
wait (bool, default=True): Wait for the dataset to upload and finish
Returns:
Dataset object
Raises:
"""
previous = None
def __init__(
self,
name: str,
files: List[str],
wait: bool = True,
dataset_type: str = "TEXT",
from_local_images: bool = False,
image_filename_col: str = "filename",
batch_size: int = 20,
):
self.files = files
self.name = name
self.wait = wait
self.dataset_type = dataset_type
self.from_local_images = from_local_images
self.image_filename_col = image_filename_col
self.batch_size = batch_size
super().__init__()
def requests(self):
if self.from_local_images:
self.dataset_type = "IMAGE"
# Assume image filenames are in the same directory as the csv with
# image labels and that there is a column representing their name
df = pd.read_csv(self.files)
img_filenames = df[self.image_filename_col].tolist()
img_filepaths = [
str(Path(self.files).parent / imgfn) for imgfn in img_filenames
]
yield UploadBatched(
img_filepaths,
batch_size=self.batch_size,
request_cls=UploadImages,
)
df["urls"] = self.previous
with tempfile.TemporaryDirectory() as tmpdir:
image_csv_path = str(Path(tmpdir) / "image_urls.csv")
df.to_csv(image_csv_path)
yield _UploadDatasetFiles(files=[image_csv_path])
else:
yield UploadBatched(
files=self.files,
batch_size=self.batch_size,
request_cls=_UploadDatasetFiles,
)
file_metadata = self.previous
yield CreateEmptyDataset(name=self.name, dataset_type=self.dataset_type)
yield _AddFiles(dataset_id=self.previous.id, metadata=file_metadata)
dataset_id = self.previous.id
yield GetDatasetFileStatus(id=dataset_id)
debouncer = Debouncer()
while not all(
f.status in ["DOWNLOADED", "FAILED"] for f in self.previous.files
):
yield GetDatasetFileStatus(id=self.previous.id)
debouncer.backoff()
csv_files = [f.id for f in self.previous.files if f.file_type == "CSV"]
non_csv_files = [f.id for f in self.previous.files if f.file_type != "CSV"]
if csv_files:
yield _ProcessCSV(dataset_id=dataset_id, datafile_ids=csv_files)
elif non_csv_files:
yield _ProcessFiles(dataset_id=dataset_id, datafile_ids=non_csv_files)
yield GetDatasetFileStatus(id=dataset_id)
debouncer = Debouncer()
if self.wait is True:
while not all(
[f.status in ["PROCESSED", "FAILED"] for f in self.previous.files]
):
yield GetDatasetFileStatus(id=dataset_id)
debouncer.backoff()
yield GetDataset(id=dataset_id)
class _UploadDatasetFiles(HTTPRequest):
def __init__(self, files: List[str]):
super().__init__(
method=HTTPMethod.POST, path="/storage/files/upload", files=files
)
class DeleteDataset(GraphQLRequest):
"""
Delete a dataset
Args:
id (int): ID of the dataset
Returns:
success (bool): The success of the operation
Raises:
"""
query = """
mutation deleteDataset($id: Int!) {
deleteDataset(id: $id) {
success
}
}
"""
def __init__(self, id):
super().__init__(self.query, variables={"id": id})
def process_response(self, response):
return super().process_response(response)["deleteDataset"]["success"]
class CreateEmptyDataset(GraphQLRequest):
query = """
mutation($name: String!, $datasetType: DatasetType) {
createDataset(name: $name, datasetType: $datasetType) {
id
name
}
}
"""
def __init__(self, name: str, dataset_type: str = None):
if not dataset_type:
dataset_type = "TEXT"
super().__init__(
self.query, variables={"name": name, "datasetType": dataset_type}
)
def process_response(self, response):
return Dataset(**super().process_response(response)["createDataset"])
class _AddFiles(GraphQLRequest):
query = """
mutation AddFiles($datasetId: Int!, $metadata: JSONString!){
addDatasetFiles(datasetId: $datasetId, metadataList: $metadata) {
id
status
}
}
"""
def __init__(self, dataset_id: int, metadata: List[str]):
super().__init__(
self.query,
variables={"datasetId": dataset_id, "metadata": json.dumps(metadata)},
)
def process_response(self, response):
return Dataset(**super().process_response(response)["addDatasetFiles"])
class AddFiles(RequestChain):
"""
Add files to a dataset
Args:
dataset_id (int): ID of the dataset
files (List[str]): List of pathnames to the dataset files
Options:
wait (bool, default=True): Block while polling for status of files
batch_size (int, default=20): Batch size for uploading files
Returns:
Dataset
Raises:
"""
previous = None
def __init__(
self,
dataset_id: int,
files: List[str],
wait: bool = True,
batch_size: int = 20,
):
self.dataset_id = dataset_id
self.files = files
self.wait = wait
self.batch_size = batch_size
super().__init__()
def requests(self):
yield UploadBatched(
files=self.files,
batch_size=self.batch_size,
request_cls=_UploadDatasetFiles,
)
yield _AddFiles(dataset_id=self.dataset_id, metadata=self.previous)
yield GetDatasetFileStatus(id=self.dataset_id)
debouncer = Debouncer()
while not all(
f.status in ["DOWNLOADED", "FAILED", "PROCESSED"]
for f in self.previous.files
):
yield GetDatasetFileStatus(id=self.previous.id)
debouncer.backoff()
class _ProcessFiles(GraphQLRequest):
query = """
mutation (
$datasetId: Int!,
$datafileIds: [Int]) {
addDataFiles(
datasetId: $datasetId,
datafileIds: $datafileIds) {
id
name
}
}
"""
def __init__(self, dataset_id: int, datafile_ids: List[int]):
super().__init__(
self.query,
variables={"datasetId": dataset_id, "datafileIds": datafile_ids},
)
def process_response(self, response):
return Dataset(**super().process_response(response)["addDataFiles"])
class _ProcessCSV(GraphQLRequest):
query = """
mutation ($datasetId: Int!, $datafileIds: [Int]) {
addDataCsv(datasetId: $datasetId, datafileIds: $datafileIds) {
id
name
}
}
"""
def __init__(self, dataset_id: int, datafile_ids: List[int]):
super().__init__(
self.query, variables={"datasetId": dataset_id, "datafileIds": datafile_ids}
)
def process_response(self, response):
return Dataset(**super().process_response(response)["addDataCsv"])
class ProcessFiles(RequestChain):
"""
Process files associated with a dataset and add corresponding data to the dataset
Args:
dataset_id (int): ID of the dataset
datafile_ids (List[str]): IDs of the datafiles to process
wait (bool): Block while polling for status of files
Returns:
Dataset
Raises:
"""
def __init__(
self,
dataset_id: int,
datafile_ids: List[int],
wait: bool = True,
):
self.dataset_id = dataset_id
self.datafile_ids = datafile_ids
self.wait = wait
def requests(self):
yield _ProcessFiles(self.dataset_id, self.datafile_ids)
debouncer = Debouncer()
yield GetDatasetFileStatus(id=self.dataset_id)
if self.wait:
while not all(
f.status in ["PROCESSED", "FAILED"] for f in self.previous.files
):
yield GetDatasetFileStatus(id=self.dataset_id)
debouncer.backoff()
class ProcessCSV(RequestChain):
"""
Process CSV associated with a dataset and add corresponding data to the dataset
Args:
dataset_id (int): ID of the dataset
datafile_ids (List[str]): IDs of the CSV datafiles to process
Options:
wait (bool, default=True): Block while polling for status of files
Returns:
Dataset
Raises:
"""
def __init__(self, dataset_id: int, datafile_ids: List[int], wait: bool = True):
self.dataset_id = dataset_id
self.datafile_ids = datafile_ids
self.wait = wait
def requests(self):
yield _ProcessCSV(self.dataset_id, self.datafile_ids)
debouncer = Debouncer()
yield GetDatasetFileStatus(id=self.dataset_id)
if self.wait:
while not all(
f.status in ["PROCESSED", "FAILED"] for f in self.previous.files
):
yield GetDatasetFileStatus(id=self.dataset_id)
debouncer.backoff()
|
en
| 0.442165
|
# -*- coding: utf-8 -*- List all of your datasets Options: limit (int, default=100): Max number of datasets to retrieve Returns: List[Dataset] Raises: query ListDatasets($limit: Int){ datasetsPage(limit: $limit) { datasets { id name rowCount } } } Retrieve a dataset description object Args: id (int): id of the dataset to query Returns: Dataset object Raises: query GetDataset($id: Int) { dataset(id: $id) { id name rowCount status permissions datacolumns { id name } labelsets{ id name } } } Get the status of dataset file upload Args: id (int): id of the dataset to query Returns: status (str): DOWNLOADED or FAILED Raises: query DatasetUploadStatus($id: Int!) { dataset(id: $id) { id status files { id name deleted fileSize rainbowUrl fileType fileHash status statusMeta failureType } } } Get the status of a dataset Args: id (int): id of the dataset to query Returns: status (str): COMPLETE or FAILED Raises: query datasetStatus($id: Int!) { dataset(id: $id) { status } } Create a dataset and upload the associated files. Args: name (str): Name of the dataset files (List[str]): List of pathnames to the dataset files Options: dataset_type (str): Type of dataset to create [TEXT, DOCUMENT, IMAGE] wait (bool, default=True): Wait for the dataset to upload and finish Returns: Dataset object Raises: # Assume image filenames are in the same directory as the csv with # image labels and that there is a column representing their name Delete a dataset Args: id (int): ID of the dataset Returns: success (bool): The success of the operation Raises: mutation deleteDataset($id: Int!) { deleteDataset(id: $id) { success } } mutation($name: String!, $datasetType: DatasetType) { createDataset(name: $name, datasetType: $datasetType) { id name } } mutation AddFiles($datasetId: Int!, $metadata: JSONString!){ addDatasetFiles(datasetId: $datasetId, metadataList: $metadata) { id status } } Add files to a dataset Args: dataset_id (int): ID of the dataset files (List[str]): List of pathnames to the dataset files Options: wait (bool, default=True): Block while polling for status of files batch_size (int, default=20): Batch size for uploading files Returns: Dataset Raises: mutation ( $datasetId: Int!, $datafileIds: [Int]) { addDataFiles( datasetId: $datasetId, datafileIds: $datafileIds) { id name } } mutation ($datasetId: Int!, $datafileIds: [Int]) { addDataCsv(datasetId: $datasetId, datafileIds: $datafileIds) { id name } } Process files associated with a dataset and add corresponding data to the dataset Args: dataset_id (int): ID of the dataset datafile_ids (List[str]): IDs of the datafiles to process wait (bool): Block while polling for status of files Returns: Dataset Raises: Process CSV associated with a dataset and add corresponding data to the dataset Args: dataset_id (int): ID of the dataset datafile_ids (List[str]): IDs of the CSV datafiles to process Options: wait (bool, default=True): Block while polling for status of files Returns: Dataset Raises:
| 2.383606
| 2
|
EvoMusicCompanion/ea/mutation.py
|
Jerryhu1/MasterThesis
| 0
|
6628620
|
from random import Random
from music21 import pitch
from music21.interval import Interval
from ea import initialisation, simulation, constants, duration
from ea.individual import Individual, Measure
import copy
rng = Random()
def applyMutation(individual: Individual, elitist_population: [Individual]):
mutations = [swap_measure, change_rest_or_note, change_duration, reverse_measure,
transpose_interval_measure, elitist_mutation]
p1 = 0.2
p2 = 0.2
p3 = 0.1
p4 = 0.1
p5 = 0.2
p6 = 0.05
probs = [p1, p2, p3, p4, p5, p6]
for i in range(len(mutations)):
prob = probs[i]
m = mutations[i]
p = rng.random()
if p < prob:
if m is elitist_mutation:
m(individual, elitist_population)
else:
m(individual)
def elitist_mutation(individual: Individual, elitist_population: [Individual]):
e_individual: Individual = rng.choice(elitist_population)
measure = rng.choice(range(len(e_individual.measures)))
e_individual_copy = copy.deepcopy(e_individual.measures[measure].notes)
individual.measures[measure].notes = e_individual_copy
if individual.measures[measure].notes is e_individual.measures[measure].notes:
print('Mutated individual has reference to elitist individual')
def swap_measure(individual: Individual):
i1 = rng.randrange(len(individual.measures))
i2 = rng.randrange(len(individual.measures))
while i1 == i2:
i2 = rng.randrange(len(individual.measures) - 1)
m1 = copy.deepcopy(individual.measures[i1].notes)
m2 = copy.deepcopy(individual.measures[i2].notes)
individual.measures[i1].notes = m2
individual.measures[i2].notes = m1
def swap_notes_in_measure(individual: Individual):
m_index = rng.randrange(len(individual.measures))
notes = individual.measures[m_index].notes
n_index1 = rng.randrange(len(notes))
n_index2 = rng.randrange(len(notes))
while n_index1 == n_index2:
n_index2 = rng.randrange(len(notes))
n1 = notes[n_index1]
n2 = notes[n_index2]
individual.measures[m_index].notes[n_index1] = n2
individual.measures[m_index].notes[n_index2] = n1
def change_rest_or_note(individual: Individual):
m_index = rng.randrange(len(individual.measures))
notes = individual.measures[m_index].notes
note_index = rng.randrange(len(notes))
note = notes[note_index]
if note.pitch == 'REST':
new_pitch = initialisation.get_random_pitch_transition(None)
note.set_pitch(new_pitch)
else:
note.set_pitch('REST')
notes[note_index] = note
def change_duration(individual: Individual):
measure = rng.choice(individual.measures)
notes = measure.notes
note = rng.choice(notes)
durations = [0.0625, 0.125, 0.25, 0.5]
d = rng.choice(durations)
new_d = duration.Duration(None, d)
note.duration = new_d
while measure.get_total_duration() > 1.0:
n = rng.choice(notes)
if n is note:
continue
n_dur_idx = durations.index(n.duration.duration_value)
# If this is a sixteenth note, we remove it
if n_dur_idx == 0:
measure.notes.pop(n_dur_idx)
# Else we go one step back in duration
else:
new_d = duration.Duration(None, durations[n_dur_idx - 1])
n.duration = new_d
def change_pitch(size: int, individual: Individual):
for i in range(size):
m = rng.choice(individual.measures)
note = rng.choice(m.notes)
def transpose_interval_measure(individual: Individual):
m: Measure = rng.choice(individual.measures)
intvl = 0
for i in range(len(m.notes)):
n = m.notes[i]
if n.pitch == 'REST':
continue
# If we find the first pitch, we transpose this first
if i == 0:
first_pitch = n.pitch
intvl = (rng.choice([1, 2, 3]))
init_scale_degree = constants.NOTE_RANGE.index(first_pitch)
if len(constants.NOTE_RANGE) - init_scale_degree < 13:
intvl = -intvl
# If the new scale degree is not in range, we set it to the minimum or maximum
if init_scale_degree + intvl < 0:
new_first_pitch = constants.NOTE_RANGE[0]
elif init_scale_degree + intvl > len(constants.NOTE_RANGE) - 1:
new_first_pitch = constants.NOTE_RANGE[-1]
else:
new_first_pitch = constants.NOTE_RANGE[init_scale_degree + intvl]
n.set_pitch(new_first_pitch)
continue
note_scale_degree = constants.NOTE_RANGE.index(n.pitch)
# The remaining notes will be transposed with the same intervals as previously
# If the note goes out of range, we lower or raise with an octave
if note_scale_degree + intvl > len(constants.NOTE_RANGE) - 1:
intvl = intvl - 7
elif note_scale_degree + intvl < 0:
intvl = intvl + 7
new_pitch = constants.NOTE_RANGE[note_scale_degree + intvl]
n.set_pitch(new_pitch)
def reverse_measure(individual: Individual):
m: Measure = rng.choice(individual.measures)
m_copy = copy.deepcopy(m)
j = len(m.notes) - 1
for i in range(len(m.notes)):
m.notes[i] = m_copy.notes[j]
j -= 1
|
from random import Random
from music21 import pitch
from music21.interval import Interval
from ea import initialisation, simulation, constants, duration
from ea.individual import Individual, Measure
import copy
rng = Random()
def applyMutation(individual: Individual, elitist_population: [Individual]):
mutations = [swap_measure, change_rest_or_note, change_duration, reverse_measure,
transpose_interval_measure, elitist_mutation]
p1 = 0.2
p2 = 0.2
p3 = 0.1
p4 = 0.1
p5 = 0.2
p6 = 0.05
probs = [p1, p2, p3, p4, p5, p6]
for i in range(len(mutations)):
prob = probs[i]
m = mutations[i]
p = rng.random()
if p < prob:
if m is elitist_mutation:
m(individual, elitist_population)
else:
m(individual)
def elitist_mutation(individual: Individual, elitist_population: [Individual]):
e_individual: Individual = rng.choice(elitist_population)
measure = rng.choice(range(len(e_individual.measures)))
e_individual_copy = copy.deepcopy(e_individual.measures[measure].notes)
individual.measures[measure].notes = e_individual_copy
if individual.measures[measure].notes is e_individual.measures[measure].notes:
print('Mutated individual has reference to elitist individual')
def swap_measure(individual: Individual):
i1 = rng.randrange(len(individual.measures))
i2 = rng.randrange(len(individual.measures))
while i1 == i2:
i2 = rng.randrange(len(individual.measures) - 1)
m1 = copy.deepcopy(individual.measures[i1].notes)
m2 = copy.deepcopy(individual.measures[i2].notes)
individual.measures[i1].notes = m2
individual.measures[i2].notes = m1
def swap_notes_in_measure(individual: Individual):
m_index = rng.randrange(len(individual.measures))
notes = individual.measures[m_index].notes
n_index1 = rng.randrange(len(notes))
n_index2 = rng.randrange(len(notes))
while n_index1 == n_index2:
n_index2 = rng.randrange(len(notes))
n1 = notes[n_index1]
n2 = notes[n_index2]
individual.measures[m_index].notes[n_index1] = n2
individual.measures[m_index].notes[n_index2] = n1
def change_rest_or_note(individual: Individual):
m_index = rng.randrange(len(individual.measures))
notes = individual.measures[m_index].notes
note_index = rng.randrange(len(notes))
note = notes[note_index]
if note.pitch == 'REST':
new_pitch = initialisation.get_random_pitch_transition(None)
note.set_pitch(new_pitch)
else:
note.set_pitch('REST')
notes[note_index] = note
def change_duration(individual: Individual):
measure = rng.choice(individual.measures)
notes = measure.notes
note = rng.choice(notes)
durations = [0.0625, 0.125, 0.25, 0.5]
d = rng.choice(durations)
new_d = duration.Duration(None, d)
note.duration = new_d
while measure.get_total_duration() > 1.0:
n = rng.choice(notes)
if n is note:
continue
n_dur_idx = durations.index(n.duration.duration_value)
# If this is a sixteenth note, we remove it
if n_dur_idx == 0:
measure.notes.pop(n_dur_idx)
# Else we go one step back in duration
else:
new_d = duration.Duration(None, durations[n_dur_idx - 1])
n.duration = new_d
def change_pitch(size: int, individual: Individual):
for i in range(size):
m = rng.choice(individual.measures)
note = rng.choice(m.notes)
def transpose_interval_measure(individual: Individual):
m: Measure = rng.choice(individual.measures)
intvl = 0
for i in range(len(m.notes)):
n = m.notes[i]
if n.pitch == 'REST':
continue
# If we find the first pitch, we transpose this first
if i == 0:
first_pitch = n.pitch
intvl = (rng.choice([1, 2, 3]))
init_scale_degree = constants.NOTE_RANGE.index(first_pitch)
if len(constants.NOTE_RANGE) - init_scale_degree < 13:
intvl = -intvl
# If the new scale degree is not in range, we set it to the minimum or maximum
if init_scale_degree + intvl < 0:
new_first_pitch = constants.NOTE_RANGE[0]
elif init_scale_degree + intvl > len(constants.NOTE_RANGE) - 1:
new_first_pitch = constants.NOTE_RANGE[-1]
else:
new_first_pitch = constants.NOTE_RANGE[init_scale_degree + intvl]
n.set_pitch(new_first_pitch)
continue
note_scale_degree = constants.NOTE_RANGE.index(n.pitch)
# The remaining notes will be transposed with the same intervals as previously
# If the note goes out of range, we lower or raise with an octave
if note_scale_degree + intvl > len(constants.NOTE_RANGE) - 1:
intvl = intvl - 7
elif note_scale_degree + intvl < 0:
intvl = intvl + 7
new_pitch = constants.NOTE_RANGE[note_scale_degree + intvl]
n.set_pitch(new_pitch)
def reverse_measure(individual: Individual):
m: Measure = rng.choice(individual.measures)
m_copy = copy.deepcopy(m)
j = len(m.notes) - 1
for i in range(len(m.notes)):
m.notes[i] = m_copy.notes[j]
j -= 1
|
en
| 0.874744
|
# If this is a sixteenth note, we remove it # Else we go one step back in duration # If we find the first pitch, we transpose this first # If the new scale degree is not in range, we set it to the minimum or maximum # The remaining notes will be transposed with the same intervals as previously # If the note goes out of range, we lower or raise with an octave
| 2.580096
| 3
|
emodelrunner/configuration/configparser.py
|
BlueBrain/EModelRunner
| 3
|
6628621
|
<gh_stars>1-10
"""Configuration parsing."""
# Copyright 2020-2022 Blue Brain Project / EPFL
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from configparser import ConfigParser
from enum import Enum
class PackageType(Enum):
"""Enumerator for the emodel package types."""
sscx = "sscx"
thalamus = "thalamus"
synplas = "synplas"
class EModelConfigParser(ConfigParser):
"""Built-in ConfigParser annotated with package type."""
def __init__(self):
"""Constructor."""
super().__init__()
@property
def package_type(self):
"""Package type as a property."""
return PackageType[self.get("Package", "type")]
|
"""Configuration parsing."""
# Copyright 2020-2022 Blue Brain Project / EPFL
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from configparser import ConfigParser
from enum import Enum
class PackageType(Enum):
"""Enumerator for the emodel package types."""
sscx = "sscx"
thalamus = "thalamus"
synplas = "synplas"
class EModelConfigParser(ConfigParser):
"""Built-in ConfigParser annotated with package type."""
def __init__(self):
"""Constructor."""
super().__init__()
@property
def package_type(self):
"""Package type as a property."""
return PackageType[self.get("Package", "type")]
|
en
| 0.834159
|
Configuration parsing. # Copyright 2020-2022 Blue Brain Project / EPFL # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Enumerator for the emodel package types. Built-in ConfigParser annotated with package type. Constructor. Package type as a property.
| 2.352501
| 2
|
pygame3D/fpc.py
|
loicgirard/pygame-3D
| 0
|
6628622
|
<gh_stars>0
import numpy as np
import pygame
from pygame.locals import *
import math
class FirstPersonController:
def __init__(self, camera, velocity=3.0, sensitivity=0.01):
"""
First Person Controller that can be attached to a camera
:param camera: the camera that the controller is attached to
:param velocity: velocity of the controller (world units per second)
:param sensitivity: how sensitive it responds to mouse movements (radian per pixel)
"""
self.velocity = velocity
self.sensitivity = sensitivity
self.camera = camera
# make the cursor invisible
pygame.mouse.set_visible(False)
# and make it able to go off the screen
pygame.event.set_grab(True)
def rotate(self):
"""
rotates the camera according to the mouse movement (position difference from the previous frame)
"""
a, b = pygame.mouse.get_rel()
# the reason for the sign difference is that:
# mouse goes right --> mouse x movement positive --> we should increase the rotation y of the camera
# mouse goes down --> mouse y movement positive --> we should decrease the rotation x of the camera
self.camera.ry += self.sensitivity * a # left right movement affects the rotation y
self.camera.rx += self.sensitivity * b # up down movement affects the rotation x
#print("self.camera.rx : ", self.camera.rx)
def move(self, fps):
"""
moves the camera
:param fps: the current fps, needed to move the camera by an amount independent of the fps
"""
y = self.camera.ry # only the rotation y is needed to construct the movement matrix
matrix = np.matrix(
[
[math.cos(y), math.sin(y)],
[0, 0],
[-math.sin(y), math.cos(y)],
[0, 0]
]
)
keys = pygame.key.get_pressed()
a = keys[K_d] - keys[K_a] # 1: right, -1: left, 0: no left-right movement
b = keys[K_w] - keys[K_s] # 1: up, -1: down, 0: no up-down movement
# if there is a movement and the frame needs to be updated
if a * a + b * b != 0 and fps != 0:
# fps in the denominator is there because fps should not affect the movement
k = self.velocity / (fps * math.sqrt(a * a + b * b))
self.camera.position += k * (matrix @ np.matrix([a, b]).T)
def update(self, fps):
"""
updates the camera every frame
:param fps: the current fps, needed for the camera movement
"""
self.rotate()
self.move(fps)
|
import numpy as np
import pygame
from pygame.locals import *
import math
class FirstPersonController:
def __init__(self, camera, velocity=3.0, sensitivity=0.01):
"""
First Person Controller that can be attached to a camera
:param camera: the camera that the controller is attached to
:param velocity: velocity of the controller (world units per second)
:param sensitivity: how sensitive it responds to mouse movements (radian per pixel)
"""
self.velocity = velocity
self.sensitivity = sensitivity
self.camera = camera
# make the cursor invisible
pygame.mouse.set_visible(False)
# and make it able to go off the screen
pygame.event.set_grab(True)
def rotate(self):
"""
rotates the camera according to the mouse movement (position difference from the previous frame)
"""
a, b = pygame.mouse.get_rel()
# the reason for the sign difference is that:
# mouse goes right --> mouse x movement positive --> we should increase the rotation y of the camera
# mouse goes down --> mouse y movement positive --> we should decrease the rotation x of the camera
self.camera.ry += self.sensitivity * a # left right movement affects the rotation y
self.camera.rx += self.sensitivity * b # up down movement affects the rotation x
#print("self.camera.rx : ", self.camera.rx)
def move(self, fps):
"""
moves the camera
:param fps: the current fps, needed to move the camera by an amount independent of the fps
"""
y = self.camera.ry # only the rotation y is needed to construct the movement matrix
matrix = np.matrix(
[
[math.cos(y), math.sin(y)],
[0, 0],
[-math.sin(y), math.cos(y)],
[0, 0]
]
)
keys = pygame.key.get_pressed()
a = keys[K_d] - keys[K_a] # 1: right, -1: left, 0: no left-right movement
b = keys[K_w] - keys[K_s] # 1: up, -1: down, 0: no up-down movement
# if there is a movement and the frame needs to be updated
if a * a + b * b != 0 and fps != 0:
# fps in the denominator is there because fps should not affect the movement
k = self.velocity / (fps * math.sqrt(a * a + b * b))
self.camera.position += k * (matrix @ np.matrix([a, b]).T)
def update(self, fps):
"""
updates the camera every frame
:param fps: the current fps, needed for the camera movement
"""
self.rotate()
self.move(fps)
|
en
| 0.857512
|
First Person Controller that can be attached to a camera :param camera: the camera that the controller is attached to :param velocity: velocity of the controller (world units per second) :param sensitivity: how sensitive it responds to mouse movements (radian per pixel) # make the cursor invisible # and make it able to go off the screen rotates the camera according to the mouse movement (position difference from the previous frame) # the reason for the sign difference is that: # mouse goes right --> mouse x movement positive --> we should increase the rotation y of the camera # mouse goes down --> mouse y movement positive --> we should decrease the rotation x of the camera # left right movement affects the rotation y # up down movement affects the rotation x #print("self.camera.rx : ", self.camera.rx) moves the camera :param fps: the current fps, needed to move the camera by an amount independent of the fps # only the rotation y is needed to construct the movement matrix # 1: right, -1: left, 0: no left-right movement # 1: up, -1: down, 0: no up-down movement # if there is a movement and the frame needs to be updated # fps in the denominator is there because fps should not affect the movement updates the camera every frame :param fps: the current fps, needed for the camera movement
| 3.74668
| 4
|
app/config.py
|
Saberlion/docker-webssh
| 664
|
6628623
|
<reponame>Saberlion/docker-webssh
__author__ = 'xsank'
from tornado.options import define
def init_config():
define('port', default=9527, type=int, help='server listening port')
|
__author__ = 'xsank'
from tornado.options import define
def init_config():
define('port', default=9527, type=int, help='server listening port')
|
none
| 1
| 1.642688
| 2
|
|
volttron/drivers/smap_logging.py
|
kruthikarshankar/bemoss_os
| 3
|
6628624
|
#
# Copyright (c) 2013, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization
# that has cooperated in the development of these materials, makes
# any warranty, express or implied, or assumes any legal liability
# or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed,
# or represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does
# not necessarily constitute or imply its endorsement, recommendation,
# r favoring by the United States Government or any agency thereof,
# or Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#
try:
import simplejson as json
except ImportError:
import json
from smap import driver
from smap.util import periodicSequentialCall
from smap.contrib import dtutil
import os.path
import zmq
#Addresses agents use to setup the pub/sub
publish_address = 'ipc:///tmp/volttron-lite-agent-publish'
subscribe_address = 'ipc:///tmp/volttron-lite-agent-subscribe'
import logging
logging_levels = logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG
logging_string_map = {logging.getLevelName(level):level for level in logging_levels}
class Logger(driver.SmapDriver):
def setup(self, opts):
self.setup_subscriber()
self.interval = float(opts.get('interval',1))
self.archiver_logging_level = logging_string_map.get(opts.get('level','INFO'))
self.log_file = opts.get('file')
if self.archiver_logging_level is None:
raise ValueError('Invalid logging level')
if not self.log_file is None:
logging.basicConfig(filename=self.log_file, level=logging.DEBUG)
self.set_metadata('/', {'Extra/Driver' : 'volttron.drivers.logging.Logger',
'Instrument/SamplingPeriod' : str(self.interval)})
for level in logging_levels:
if level < self.archiver_logging_level:
continue
name = logging.getLevelName(level)
print name
self.add_timeseries('/' + name, 'Logs', data_type='string', description='Log Messages')
self.setup_subscriber()
self.setup_publisher()
self.subscribe()
def start(self):
# Call read every minute seconds
periodicSequentialCall(self.read).start(self.interval)
def subscribe(self):
for level in logging_string_map:
topic = self.get_topic_for_logging('/') + '/' + level
self._sub.subscribe = topic
print "Subscribe to:", topic
def read(self):
while True:
evt = self._poller.poll(0)
#If evt is empty then we did not receive any messages, break
if evt == None or evt == []:
break
else:
#Examine the message we recieved
message = self._sub.recv_multipart()
print message
if len(message) < 2:
self._push.send_multipart(['platform/loggererror', 'missing message'] + message)
continue
tokens = message[0].split('/')
log_level_string = tokens[-1].upper()
log_level_value = logging_string_map.get(log_level_string)
if log_level_value is None:
self._push.send_multipart(['platform/loggererror', 'invalid logging level'] + message)
continue
log_message = '|'.join(message[1:])
# for level in logging_levels:
# if level < log_level_value:
# break
# self.add('/'+logging.getLevelName(level), log_message)
logging.log(log_level_value, log_message)
def get_topic_for_logging(self, point):
return 'LOG' + self._SmapDriver__join_id(point)
def setup_subscriber(self):
#Subscribe to sub topic
ctx = zmq.Context()
self._sub = zmq.Socket(ctx, zmq.SUB)
self._sub.connect(subscribe_address)
#Setup a poller for use with the subscriber
self._poller = zmq.Poller()
self._poller.register(self._sub)
def setup_publisher(self):
#Connects to the broker's push topic
#Broker will forward to the sub topic
ctx = zmq.Context()
self._push = zmq.Socket(ctx, zmq.PUSH)
self._push.connect(publish_address)
|
#
# Copyright (c) 2013, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization
# that has cooperated in the development of these materials, makes
# any warranty, express or implied, or assumes any legal liability
# or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed,
# or represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does
# not necessarily constitute or imply its endorsement, recommendation,
# r favoring by the United States Government or any agency thereof,
# or Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#
try:
import simplejson as json
except ImportError:
import json
from smap import driver
from smap.util import periodicSequentialCall
from smap.contrib import dtutil
import os.path
import zmq
#Addresses agents use to setup the pub/sub
publish_address = 'ipc:///tmp/volttron-lite-agent-publish'
subscribe_address = 'ipc:///tmp/volttron-lite-agent-subscribe'
import logging
logging_levels = logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG
logging_string_map = {logging.getLevelName(level):level for level in logging_levels}
class Logger(driver.SmapDriver):
def setup(self, opts):
self.setup_subscriber()
self.interval = float(opts.get('interval',1))
self.archiver_logging_level = logging_string_map.get(opts.get('level','INFO'))
self.log_file = opts.get('file')
if self.archiver_logging_level is None:
raise ValueError('Invalid logging level')
if not self.log_file is None:
logging.basicConfig(filename=self.log_file, level=logging.DEBUG)
self.set_metadata('/', {'Extra/Driver' : 'volttron.drivers.logging.Logger',
'Instrument/SamplingPeriod' : str(self.interval)})
for level in logging_levels:
if level < self.archiver_logging_level:
continue
name = logging.getLevelName(level)
print name
self.add_timeseries('/' + name, 'Logs', data_type='string', description='Log Messages')
self.setup_subscriber()
self.setup_publisher()
self.subscribe()
def start(self):
# Call read every minute seconds
periodicSequentialCall(self.read).start(self.interval)
def subscribe(self):
for level in logging_string_map:
topic = self.get_topic_for_logging('/') + '/' + level
self._sub.subscribe = topic
print "Subscribe to:", topic
def read(self):
while True:
evt = self._poller.poll(0)
#If evt is empty then we did not receive any messages, break
if evt == None or evt == []:
break
else:
#Examine the message we recieved
message = self._sub.recv_multipart()
print message
if len(message) < 2:
self._push.send_multipart(['platform/loggererror', 'missing message'] + message)
continue
tokens = message[0].split('/')
log_level_string = tokens[-1].upper()
log_level_value = logging_string_map.get(log_level_string)
if log_level_value is None:
self._push.send_multipart(['platform/loggererror', 'invalid logging level'] + message)
continue
log_message = '|'.join(message[1:])
# for level in logging_levels:
# if level < log_level_value:
# break
# self.add('/'+logging.getLevelName(level), log_message)
logging.log(log_level_value, log_message)
def get_topic_for_logging(self, point):
return 'LOG' + self._SmapDriver__join_id(point)
def setup_subscriber(self):
#Subscribe to sub topic
ctx = zmq.Context()
self._sub = zmq.Socket(ctx, zmq.SUB)
self._sub.connect(subscribe_address)
#Setup a poller for use with the subscriber
self._poller = zmq.Poller()
self._poller.register(self._sub)
def setup_publisher(self):
#Connects to the broker's push topic
#Broker will forward to the sub topic
ctx = zmq.Context()
self._push = zmq.Socket(ctx, zmq.PUSH)
self._push.connect(publish_address)
|
en
| 0.803697
|
# # Copyright (c) 2013, Battelle Memorial Institute # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are those # of the authors and should not be interpreted as representing official policies, # either expressed or implied, of the FreeBSD Project. # # This material was prepared as an account of work sponsored by an # agency of the United States Government. Neither the United States # Government nor the United States Department of Energy, nor Battelle, # nor any of their employees, nor any jurisdiction or organization # that has cooperated in the development of these materials, makes # any warranty, express or implied, or assumes any legal liability # or responsibility for the accuracy, completeness, or usefulness or # any information, apparatus, product, software, or process disclosed, # or represents that its use would not infringe privately owned rights. # # Reference herein to any specific commercial product, process, or # service by trade name, trademark, manufacturer, or otherwise does # not necessarily constitute or imply its endorsement, recommendation, # r favoring by the United States Government or any agency thereof, # or Battelle Memorial Institute. The views and opinions of authors # expressed herein do not necessarily state or reflect those of the # United States Government or any agency thereof. # # PACIFIC NORTHWEST NATIONAL LABORATORY # operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY # under Contract DE-AC05-76RL01830 # #Addresses agents use to setup the pub/sub # Call read every minute seconds #If evt is empty then we did not receive any messages, break #Examine the message we recieved # for level in logging_levels: # if level < log_level_value: # break # self.add('/'+logging.getLevelName(level), log_message) #Subscribe to sub topic #Setup a poller for use with the subscriber #Connects to the broker's push topic #Broker will forward to the sub topic
| 0.789596
| 1
|
patterns.py
|
frica/blink1
| 0
|
6628625
|
#!/usr/bin/env python
""" Load blinking patterns defined in BlinkControl
"""
import json
import time
from blink1.blink1 import Blink1
def hex_to_rgb(value):
""" Useful convert method from http://stackoverflow.com/a/214657 """
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def play_pattern(pattern, blink):
""" play a pattern with multiple colors
Example: ['6', '#ff0000', '0.3', '1', '#0000ff', '0.3', '2', '#000000', '0.1', '0', '#ff0000', '0.3', '2', '#0000ff', '0.3', '1', '#000000', '0.1', '0']
"""
print(pattern)
repeat = int(pattern[0])
print("We will repeat {0} times this pattern.".format(repeat))
for loop in range(0, repeat):
index = 1
for i in range(0, len(pattern) // 3):
fade = int(float(pattern[index + 1]) * 1000)
led = int(pattern[index + 2]) if pattern[index + 2] else 0
if loop == 0:
print("Color {0} (Index {1}): {2} - LED {3} - Fade {4}".format(i+1, index, pattern[index], led, fade))
blink.fade_to_rgb(fade, hex_to_rgb(pattern[index])[0], hex_to_rgb(pattern[index])[1],
hex_to_rgb(pattern[index])[2], led)
time.sleep(1)
index += 3
if __name__ == '__main__':
blink = Blink1()
try:
patternFile = open('patternsReadOnly.json', 'r')
patternData = json.load(patternFile)
except ValueError:
print("Invalid json from ", patternFile)
exit()
# pretty print json data
# print(json.dumps(patternData, indent=1))
print("# of patterns found: {}".format(len(patternData)))
if len(patternData) != 0:
for i in range(0, len(patternData)):
pattern = [x.strip() for x in patternData[i]["pattern"].split(',')]
print("Pattern: {}".format(patternData[i]["name"]))
play_pattern(pattern, blink)
blink.fade_to_rgb(1000, 0, 0, 0)
blink.close()
patternFile.close()
|
#!/usr/bin/env python
""" Load blinking patterns defined in BlinkControl
"""
import json
import time
from blink1.blink1 import Blink1
def hex_to_rgb(value):
""" Useful convert method from http://stackoverflow.com/a/214657 """
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def play_pattern(pattern, blink):
""" play a pattern with multiple colors
Example: ['6', '#ff0000', '0.3', '1', '#0000ff', '0.3', '2', '#000000', '0.1', '0', '#ff0000', '0.3', '2', '#0000ff', '0.3', '1', '#000000', '0.1', '0']
"""
print(pattern)
repeat = int(pattern[0])
print("We will repeat {0} times this pattern.".format(repeat))
for loop in range(0, repeat):
index = 1
for i in range(0, len(pattern) // 3):
fade = int(float(pattern[index + 1]) * 1000)
led = int(pattern[index + 2]) if pattern[index + 2] else 0
if loop == 0:
print("Color {0} (Index {1}): {2} - LED {3} - Fade {4}".format(i+1, index, pattern[index], led, fade))
blink.fade_to_rgb(fade, hex_to_rgb(pattern[index])[0], hex_to_rgb(pattern[index])[1],
hex_to_rgb(pattern[index])[2], led)
time.sleep(1)
index += 3
if __name__ == '__main__':
blink = Blink1()
try:
patternFile = open('patternsReadOnly.json', 'r')
patternData = json.load(patternFile)
except ValueError:
print("Invalid json from ", patternFile)
exit()
# pretty print json data
# print(json.dumps(patternData, indent=1))
print("# of patterns found: {}".format(len(patternData)))
if len(patternData) != 0:
for i in range(0, len(patternData)):
pattern = [x.strip() for x in patternData[i]["pattern"].split(',')]
print("Pattern: {}".format(patternData[i]["name"]))
play_pattern(pattern, blink)
blink.fade_to_rgb(1000, 0, 0, 0)
blink.close()
patternFile.close()
|
en
| 0.387035
|
#!/usr/bin/env python Load blinking patterns defined in BlinkControl Useful convert method from http://stackoverflow.com/a/214657 play a pattern with multiple colors Example: ['6', '#ff0000', '0.3', '1', '#0000ff', '0.3', '2', '#000000', '0.1', '0', '#ff0000', '0.3', '2', '#0000ff', '0.3', '1', '#000000', '0.1', '0'] # pretty print json data # print(json.dumps(patternData, indent=1))
| 3.284093
| 3
|
mcc/providers/python.py
|
long2ice/CyclomaticComplexity
| 0
|
6628626
|
<gh_stars>0
from mcc.languages import Lang
from mcc.providers import Mccabe
class MccabePy(Mccabe):
suffix = ".py"
language = Lang.py
judge_nodes = [
"if_statement",
"elif_clause",
"while_statement",
"for_statement",
"except_clause",
"boolean_operator",
"with_statement",
"assert_statement",
"list_comprehension",
"function_definition",
]
|
from mcc.languages import Lang
from mcc.providers import Mccabe
class MccabePy(Mccabe):
suffix = ".py"
language = Lang.py
judge_nodes = [
"if_statement",
"elif_clause",
"while_statement",
"for_statement",
"except_clause",
"boolean_operator",
"with_statement",
"assert_statement",
"list_comprehension",
"function_definition",
]
|
none
| 1
| 2.22148
| 2
|
|
src/main/mad_api_call_util.py
|
akshayub/mad-chat-bot
| 0
|
6628627
|
<filename>src/main/mad_api_call_util.py
# Import this class and use the necessary methods based on response by chat bot
import json
import requests
BASE_URL = 'http://testing.makeadiff.in/api/v1'
auth = ('username', 'password')
def get_credits(userid):
resource = '/users/{}/credit'.format(userid)
return requests.get(BASE_URL + resource, auth=auth)
def get_class_history(userid):
resource = '/users/{}/past_classes'.format(userid)
return requests.get(BASE_URL + resource, auth=auth)
def handle_response_extract_data(response):
res = response.json()
if res['status'] == 'success':
return res['data']
|
<filename>src/main/mad_api_call_util.py
# Import this class and use the necessary methods based on response by chat bot
import json
import requests
BASE_URL = 'http://testing.makeadiff.in/api/v1'
auth = ('username', 'password')
def get_credits(userid):
resource = '/users/{}/credit'.format(userid)
return requests.get(BASE_URL + resource, auth=auth)
def get_class_history(userid):
resource = '/users/{}/past_classes'.format(userid)
return requests.get(BASE_URL + resource, auth=auth)
def handle_response_extract_data(response):
res = response.json()
if res['status'] == 'success':
return res['data']
|
en
| 0.899148
|
# Import this class and use the necessary methods based on response by chat bot
| 2.701589
| 3
|
src/sagemaker_algorithm_toolkit/channel_validation.py
|
Chick-star/sagemaker-xgboost-container
| 1
|
6628628
|
<gh_stars>1-10
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from sagemaker_algorithm_toolkit import exceptions as exc
CONTENT_TYPE = "ContentType"
TRAINING_INPUT_MODE = "TrainingInputMode"
S3_DIST_TYPE = "S3DistributionType"
class Channel(object):
"""Represents a single SageMaker training job channel."""
FILE_MODE = "File"
PIPE_MODE = "Pipe"
AUGMENTED_MODE = "Augmented"
SHARDED = "ShardedByS3Key"
REPLICATED = "FullyReplicated"
def __init__(self, name, required):
self.name = name
self.required = required
self.supported = set()
def format(self):
"""Format channel for SageMaker's CreateAlgorithm API."""
supported_content_types = list(set(c[0] for c in self.supported))
supported_input_modes = list(set(c[1] for c in self.supported))
return {"Name": self.name,
"Description": self.name,
"IsRequired": self.required,
"SupportedContentTypes": supported_content_types,
"SupportedInputModes": supported_input_modes,
}
def add(self, content_type, supported_input_mode, supported_s3_data_distribution_type):
"""Add relevant configuration as a supported configuration for the channel."""
self.supported.add((content_type, supported_input_mode, supported_s3_data_distribution_type))
def validate(self, value):
"""Validate the provided configuration against the channel's supported configuration."""
if (value[CONTENT_TYPE], value[TRAINING_INPUT_MODE], value[S3_DIST_TYPE]) not in self.supported:
raise exc.UserError("Channel configuration for '{}' channel is not supported: {}".format(self.name, value))
class Channels(object):
"""Represents a collection of Channels for a SageMaker training job."""
def __init__(self, *channels):
self.channels = channels
self.default_content_type = None
def set_default_content_type(self, default_content_type):
self.default_content_type = default_content_type
def format(self):
"""Format channels for SageMaker's CreateAlgorithm API."""
return [channel.format() for channel in self.channels]
def validate(self, user_channels):
"""Validate the provided user-specified channels at runtime against the channels' supported configuration.
Note that this adds default content type for channels if a default exists.
:param user_channels: dictionary of channels formatted like so
{
"channel_name": {
"ContentType": <content_type>.
"TrainingInputMode": <training_input_mode>,
"S3DistributionType": <s3_dist_type>,
...
},
"channel_name": {...
}
}
"""
for channel in self.channels:
if channel.name not in user_channels:
if channel.required:
raise exc.UserError("Missing required channel: {}".format(channel.name))
name_to_channel = {channel.name: channel for channel in self.channels}
validated_channels = {}
for channel, value in user_channels.items():
try:
channel_obj = name_to_channel[channel]
except KeyError:
raise exc.UserError("Extraneous channel found: {}".format(channel))
if CONTENT_TYPE not in value:
if self.default_content_type:
value[CONTENT_TYPE] = self.default_content_type
else:
raise exc.UserError("Missing content type for channel: {}".format(channel))
channel_obj.validate(value)
validated_channels[channel] = value
return validated_channels
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from sagemaker_algorithm_toolkit import exceptions as exc
CONTENT_TYPE = "ContentType"
TRAINING_INPUT_MODE = "TrainingInputMode"
S3_DIST_TYPE = "S3DistributionType"
class Channel(object):
"""Represents a single SageMaker training job channel."""
FILE_MODE = "File"
PIPE_MODE = "Pipe"
AUGMENTED_MODE = "Augmented"
SHARDED = "ShardedByS3Key"
REPLICATED = "FullyReplicated"
def __init__(self, name, required):
self.name = name
self.required = required
self.supported = set()
def format(self):
"""Format channel for SageMaker's CreateAlgorithm API."""
supported_content_types = list(set(c[0] for c in self.supported))
supported_input_modes = list(set(c[1] for c in self.supported))
return {"Name": self.name,
"Description": self.name,
"IsRequired": self.required,
"SupportedContentTypes": supported_content_types,
"SupportedInputModes": supported_input_modes,
}
def add(self, content_type, supported_input_mode, supported_s3_data_distribution_type):
"""Add relevant configuration as a supported configuration for the channel."""
self.supported.add((content_type, supported_input_mode, supported_s3_data_distribution_type))
def validate(self, value):
"""Validate the provided configuration against the channel's supported configuration."""
if (value[CONTENT_TYPE], value[TRAINING_INPUT_MODE], value[S3_DIST_TYPE]) not in self.supported:
raise exc.UserError("Channel configuration for '{}' channel is not supported: {}".format(self.name, value))
class Channels(object):
"""Represents a collection of Channels for a SageMaker training job."""
def __init__(self, *channels):
self.channels = channels
self.default_content_type = None
def set_default_content_type(self, default_content_type):
self.default_content_type = default_content_type
def format(self):
"""Format channels for SageMaker's CreateAlgorithm API."""
return [channel.format() for channel in self.channels]
def validate(self, user_channels):
"""Validate the provided user-specified channels at runtime against the channels' supported configuration.
Note that this adds default content type for channels if a default exists.
:param user_channels: dictionary of channels formatted like so
{
"channel_name": {
"ContentType": <content_type>.
"TrainingInputMode": <training_input_mode>,
"S3DistributionType": <s3_dist_type>,
...
},
"channel_name": {...
}
}
"""
for channel in self.channels:
if channel.name not in user_channels:
if channel.required:
raise exc.UserError("Missing required channel: {}".format(channel.name))
name_to_channel = {channel.name: channel for channel in self.channels}
validated_channels = {}
for channel, value in user_channels.items():
try:
channel_obj = name_to_channel[channel]
except KeyError:
raise exc.UserError("Extraneous channel found: {}".format(channel))
if CONTENT_TYPE not in value:
if self.default_content_type:
value[CONTENT_TYPE] = self.default_content_type
else:
raise exc.UserError("Missing content type for channel: {}".format(channel))
channel_obj.validate(value)
validated_channels[channel] = value
return validated_channels
|
en
| 0.81849
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the 'license' file accompanying this file. This file is # distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. Represents a single SageMaker training job channel. Format channel for SageMaker's CreateAlgorithm API. Add relevant configuration as a supported configuration for the channel. Validate the provided configuration against the channel's supported configuration. Represents a collection of Channels for a SageMaker training job. Format channels for SageMaker's CreateAlgorithm API. Validate the provided user-specified channels at runtime against the channels' supported configuration. Note that this adds default content type for channels if a default exists. :param user_channels: dictionary of channels formatted like so { "channel_name": { "ContentType": <content_type>. "TrainingInputMode": <training_input_mode>, "S3DistributionType": <s3_dist_type>, ... }, "channel_name": {... } }
| 1.675465
| 2
|
koku/masu/test/processor/azure/test_azure_report_parquet_processor.py
|
rubik-ai/koku
| 157
|
6628629
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the AzureReportParquetProcessor."""
from tenant_schemas.utils import schema_context
from api.utils import DateHelper
from masu.processor.azure.azure_report_parquet_processor import AzureReportParquetProcessor
from masu.test import MasuTestCase
from reporting.provider.azure.models import AzureCostEntryBill
from reporting.provider.azure.models import AzureCostEntryLineItemDailySummary
from reporting.provider.azure.models import PRESTO_LINE_ITEM_TABLE
from reporting.provider.azure.models import PRESTO_OCP_ON_AZURE_DAILY_TABLE
class AzureReportParquetProcessorTest(MasuTestCase):
"""Test cases for the AzureReportParquetProcessor."""
def setUp(self):
"""Setup up shared variables."""
super().setUp()
self.manifest_id = 1
self.account = 10001
self.s3_path = "/s3/path"
self.provider_uuid = self.azure_provider_uuid
self.local_parquet = "/local/path"
self.processor = AzureReportParquetProcessor(
self.manifest_id, self.account, self.s3_path, self.provider_uuid, self.local_parquet
)
def test_azure_table_name(self):
"""Test the Azure table name generation."""
self.assertEqual(self.processor._table_name, PRESTO_LINE_ITEM_TABLE)
s3_path = "/s3/path/openshift/daily"
processor = AzureReportParquetProcessor(
self.manifest_id, self.account, s3_path, self.aws_provider_uuid, self.local_parquet
)
self.assertEqual(processor._table_name, PRESTO_OCP_ON_AZURE_DAILY_TABLE)
def test_postgres_summary_table(self):
"""Test that the correct table is returned."""
self.assertEqual(self.processor.postgres_summary_table, AzureCostEntryLineItemDailySummary)
def test_create_bill(self):
"""Test that a bill is created in the Postgres database."""
bill_date = DateHelper().this_month_start
start_date = bill_date
end_date = DateHelper().this_month_end
self.processor.create_bill(bill_date.date())
with schema_context(self.schema):
bill = AzureCostEntryBill.objects.filter(
billing_period_start=start_date, billing_period_end=end_date, provider=self.azure_provider_uuid
)
self.assertIsNotNone(bill.first())
def test_create_bill_with_string_arg(self):
"""Test that a bill is created in the Postgres database."""
bill_date = DateHelper().this_month_start
start_date = bill_date
end_date = DateHelper().this_month_end
self.processor.create_bill(str(bill_date.date()))
with schema_context(self.schema):
bill = AzureCostEntryBill.objects.filter(
billing_period_start=start_date, billing_period_end=end_date, provider=self.azure_provider_uuid
)
self.assertIsNotNone(bill.first())
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the AzureReportParquetProcessor."""
from tenant_schemas.utils import schema_context
from api.utils import DateHelper
from masu.processor.azure.azure_report_parquet_processor import AzureReportParquetProcessor
from masu.test import MasuTestCase
from reporting.provider.azure.models import AzureCostEntryBill
from reporting.provider.azure.models import AzureCostEntryLineItemDailySummary
from reporting.provider.azure.models import PRESTO_LINE_ITEM_TABLE
from reporting.provider.azure.models import PRESTO_OCP_ON_AZURE_DAILY_TABLE
class AzureReportParquetProcessorTest(MasuTestCase):
"""Test cases for the AzureReportParquetProcessor."""
def setUp(self):
"""Setup up shared variables."""
super().setUp()
self.manifest_id = 1
self.account = 10001
self.s3_path = "/s3/path"
self.provider_uuid = self.azure_provider_uuid
self.local_parquet = "/local/path"
self.processor = AzureReportParquetProcessor(
self.manifest_id, self.account, self.s3_path, self.provider_uuid, self.local_parquet
)
def test_azure_table_name(self):
"""Test the Azure table name generation."""
self.assertEqual(self.processor._table_name, PRESTO_LINE_ITEM_TABLE)
s3_path = "/s3/path/openshift/daily"
processor = AzureReportParquetProcessor(
self.manifest_id, self.account, s3_path, self.aws_provider_uuid, self.local_parquet
)
self.assertEqual(processor._table_name, PRESTO_OCP_ON_AZURE_DAILY_TABLE)
def test_postgres_summary_table(self):
"""Test that the correct table is returned."""
self.assertEqual(self.processor.postgres_summary_table, AzureCostEntryLineItemDailySummary)
def test_create_bill(self):
"""Test that a bill is created in the Postgres database."""
bill_date = DateHelper().this_month_start
start_date = bill_date
end_date = DateHelper().this_month_end
self.processor.create_bill(bill_date.date())
with schema_context(self.schema):
bill = AzureCostEntryBill.objects.filter(
billing_period_start=start_date, billing_period_end=end_date, provider=self.azure_provider_uuid
)
self.assertIsNotNone(bill.first())
def test_create_bill_with_string_arg(self):
"""Test that a bill is created in the Postgres database."""
bill_date = DateHelper().this_month_start
start_date = bill_date
end_date = DateHelper().this_month_end
self.processor.create_bill(str(bill_date.date()))
with schema_context(self.schema):
bill = AzureCostEntryBill.objects.filter(
billing_period_start=start_date, billing_period_end=end_date, provider=self.azure_provider_uuid
)
self.assertIsNotNone(bill.first())
|
en
| 0.715965
|
# # Copyright 2021 Red Hat Inc. # SPDX-License-Identifier: Apache-2.0 # Test the AzureReportParquetProcessor. Test cases for the AzureReportParquetProcessor. Setup up shared variables. Test the Azure table name generation. Test that the correct table is returned. Test that a bill is created in the Postgres database. Test that a bill is created in the Postgres database.
| 2.091052
| 2
|
sources/atomipython/test.py
|
kantel/python-schulung
| 0
|
6628630
|
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-10., 10., 1000)
plt.plot(x, np.sin(x), "-r", label="Sinus")
plt.plot(x, np.cos(x), "-b", label="Cosinus")
plt.legend()
plt.ylim(-3., 3.)
plt.grid()
plt.show()
|
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-10., 10., 1000)
plt.plot(x, np.sin(x), "-r", label="Sinus")
plt.plot(x, np.cos(x), "-b", label="Cosinus")
plt.legend()
plt.ylim(-3., 3.)
plt.grid()
plt.show()
|
none
| 1
| 3.301977
| 3
|
|
officials/ranking_statistics.py
|
Fabrice-64/advocacy_project
| 0
|
6628631
|
"""
This module calculates the ranking of the officials on the basis
of the calculations operated in the module calculations.py.
The data transit from the views :
def officials_ranking
def officials_to_engage
the module calculations computes the data and they
are send to this module to sort out the officials.
The named tuples are converted into a pandas DataFrame
in order to be processed.
Two methods are implemented and not used, but may be useful
in a further stage:
get_stats_from_officials:
returns a pandas description of the dataframe throught the method pd.describe()
get_quantiles_from_officials:
returns the quartiles of the dataframe.
The other methods sort out the officials in different categories, based on quartiles
get_officials_below_P50_I50:
officials whose influence and propinquity are below the threshold of 0.5
get_officials_below_P50_above_I50:
officials whose propinquity is below the threshold of 0.5, but with a higher influence
get_officials_above_P50_below_I50:
officials whose propinquity is above the threshold of 0.5, but with a lower influence
get_officials_above_P50_above_I50:
officials who are influential and own a high propinquity with the values of the association.
A method designed to get the influence targets:
get_influence_targets:
it loooks for the soft spot: officials who are both rather influential and own a propinquity
that can grow.
All these methods returnto the views a list of dictionaries with the relevant officials.
"""
import pandas as pd
class GetStatsFromOfficials:
def __init__(self, officials):
""" Create a dataframe to be processed all along the class
Attribute:
officials: a list of named tuples
"""
self.officials = pd.DataFrame(officials)
def get_stats_from_officials(self):
officials_description = self.officials.describe()
return officials_description.to_dict('dict')
def get_quantiles_from_officials(self):
officials_quantiles = self.officials.quantile([0.25, 0.5, 0.75, 1])
return officials_quantiles.to_dict('dict')
def get_officials_below_P50_I50(self):
return [value for key, value in self.officials.loc[(
self.officials['propinquity'] < self.officials['propinquity'].quantile(0.50)) & (
self.officials['influence'] < self.officials['influence'].quantile(0.50))].to_dict('index').items()]
def get_officials_below_P50_above_I50(self):
return [value for key, value in self.officials.loc[(
self.officials['propinquity'] < self.officials['propinquity'].quantile(0.50)) & (
self.officials['influence'] >= self.officials['influence'].quantile(0.50))].to_dict('index').items()]
def get_officials_above_P50_below_I50(self):
return [value for key, value in self.officials.loc[(
self.officials['propinquity'] >= self.officials['propinquity'].quantile(0.50)) & (
self.officials['influence'] < self.officials['influence'].quantile(0.50))].to_dict('index').items()]
def get_officials_above_P50_above_I50(self):
return [value for key, value in self.officials.loc[(
self.officials['propinquity'] >= self.officials['propinquity'].quantile(0.50)) & (
self.officials['influence'] >= self.officials['influence'].quantile(0.50))].to_dict('index').items()]
def get_influence_targets(self):
return [value for key, value in self.officials.loc[(
self.officials['propinquity'] >= self.officials['propinquity'].quantile(0.40)) & (
self.officials['propinquity'] < self.officials['propinquity'].quantile(0.80)) & (
self.officials['influence'] >= self.officials['influence'].quantile(0.40))].to_dict('index').items()]
|
"""
This module calculates the ranking of the officials on the basis
of the calculations operated in the module calculations.py.
The data transit from the views :
def officials_ranking
def officials_to_engage
the module calculations computes the data and they
are send to this module to sort out the officials.
The named tuples are converted into a pandas DataFrame
in order to be processed.
Two methods are implemented and not used, but may be useful
in a further stage:
get_stats_from_officials:
returns a pandas description of the dataframe throught the method pd.describe()
get_quantiles_from_officials:
returns the quartiles of the dataframe.
The other methods sort out the officials in different categories, based on quartiles
get_officials_below_P50_I50:
officials whose influence and propinquity are below the threshold of 0.5
get_officials_below_P50_above_I50:
officials whose propinquity is below the threshold of 0.5, but with a higher influence
get_officials_above_P50_below_I50:
officials whose propinquity is above the threshold of 0.5, but with a lower influence
get_officials_above_P50_above_I50:
officials who are influential and own a high propinquity with the values of the association.
A method designed to get the influence targets:
get_influence_targets:
it loooks for the soft spot: officials who are both rather influential and own a propinquity
that can grow.
All these methods returnto the views a list of dictionaries with the relevant officials.
"""
import pandas as pd
class GetStatsFromOfficials:
def __init__(self, officials):
""" Create a dataframe to be processed all along the class
Attribute:
officials: a list of named tuples
"""
self.officials = pd.DataFrame(officials)
def get_stats_from_officials(self):
officials_description = self.officials.describe()
return officials_description.to_dict('dict')
def get_quantiles_from_officials(self):
officials_quantiles = self.officials.quantile([0.25, 0.5, 0.75, 1])
return officials_quantiles.to_dict('dict')
def get_officials_below_P50_I50(self):
return [value for key, value in self.officials.loc[(
self.officials['propinquity'] < self.officials['propinquity'].quantile(0.50)) & (
self.officials['influence'] < self.officials['influence'].quantile(0.50))].to_dict('index').items()]
def get_officials_below_P50_above_I50(self):
return [value for key, value in self.officials.loc[(
self.officials['propinquity'] < self.officials['propinquity'].quantile(0.50)) & (
self.officials['influence'] >= self.officials['influence'].quantile(0.50))].to_dict('index').items()]
def get_officials_above_P50_below_I50(self):
return [value for key, value in self.officials.loc[(
self.officials['propinquity'] >= self.officials['propinquity'].quantile(0.50)) & (
self.officials['influence'] < self.officials['influence'].quantile(0.50))].to_dict('index').items()]
def get_officials_above_P50_above_I50(self):
return [value for key, value in self.officials.loc[(
self.officials['propinquity'] >= self.officials['propinquity'].quantile(0.50)) & (
self.officials['influence'] >= self.officials['influence'].quantile(0.50))].to_dict('index').items()]
def get_influence_targets(self):
return [value for key, value in self.officials.loc[(
self.officials['propinquity'] >= self.officials['propinquity'].quantile(0.40)) & (
self.officials['propinquity'] < self.officials['propinquity'].quantile(0.80)) & (
self.officials['influence'] >= self.officials['influence'].quantile(0.40))].to_dict('index').items()]
|
en
| 0.851431
|
This module calculates the ranking of the officials on the basis of the calculations operated in the module calculations.py. The data transit from the views : def officials_ranking def officials_to_engage the module calculations computes the data and they are send to this module to sort out the officials. The named tuples are converted into a pandas DataFrame in order to be processed. Two methods are implemented and not used, but may be useful in a further stage: get_stats_from_officials: returns a pandas description of the dataframe throught the method pd.describe() get_quantiles_from_officials: returns the quartiles of the dataframe. The other methods sort out the officials in different categories, based on quartiles get_officials_below_P50_I50: officials whose influence and propinquity are below the threshold of 0.5 get_officials_below_P50_above_I50: officials whose propinquity is below the threshold of 0.5, but with a higher influence get_officials_above_P50_below_I50: officials whose propinquity is above the threshold of 0.5, but with a lower influence get_officials_above_P50_above_I50: officials who are influential and own a high propinquity with the values of the association. A method designed to get the influence targets: get_influence_targets: it loooks for the soft spot: officials who are both rather influential and own a propinquity that can grow. All these methods returnto the views a list of dictionaries with the relevant officials. Create a dataframe to be processed all along the class Attribute: officials: a list of named tuples
| 3.074474
| 3
|
thirdparty/src/gazebo_plugins/__init__.py
|
hsr-project/hsrb_gazebo_plugins
| 0
|
6628632
|
## flake8: noqa
|
## flake8: noqa
|
it
| 0.170067
|
## flake8: noqa
| 0.894175
| 1
|
DATA/workflow/PTM/databases/ELMpred/pred_new.py
|
korcsmarosgroup/ARN2DataBase
| 0
|
6628633
|
'''
Maps ELMs to their protein IDs and the interacting domain's protein ID and inserts the two into an SQL database.
:argument: EXPORT_DB_LOCATION: saving location of the final database
:argument: ELMS_FILE: all ELM classes of the four used species in a .tsv files: http://elm.eu.org/classes/
:argument: INT_DOMAINS_FILE: files containing ELM names and their interacting domain PFAM ids in a .tsv files: http://elm.eu.org/interactiondomains
:argument: PROT_LIST: list of files for each species used, containing their whole proteomes from UniProt in .fa files
'''
# Imports
import csv, os
from SLKlib.SQLiteDBApi.sqlite_db_api import PsimiSQL
import re, logging
from dataclasses import dataclass, field
from typing import List
from functools import lru_cache
from collections import defaultdict
# Defining constants
SQL_SEED = '../../SLKlib/SQLiteDBApi/network-db-seed.sql'
DB_TYPE = 'ELM'
EXPORT_DB_LOCATION = '../../output/ELM.db'
ELMS_FILE = 'PTM/databases/ELMpred/files/elm_classes.tsv'
INT_DOMAINS_FILE = 'PTM/databases/ELMpred/files/elm_interaction_domains.tsv'
UNIPROT_DATA_FILE = 'PTM/databases/ELMpred/files/uniprot_9606,7227,6239,7955_entry,taxid,pdb,pfam.tsv'
UNIPROT = {}
pred_db = 'ELM_pred.db'
file_list = []
logging.basicConfig(level=logging.DEBUG)
qsse_dict = {
'E': 0.35,
'H': 0.55,
'I': 0.55,
'B': 1.5,
'T': 1.5,
'S': 1.5,
'U': 1.5,
'G': 1.33,
' ': 0.0,
}
# Residue max accessibility
Miller = {
'A': 113.0,
'R': 241.0,
'N': 158.0,
'D': 151.0,
'B': 154.5,
'C': 140.0,
'Q': 189.0,
'E': 183.0,
'G': 85.0,
'H': 194.0,
'I': 182.0,
'L': 180.0,
'K': 211.0,
'M': 204.0,
'F': 218.0,
'P': 143.0,
'S': 122.0,
'T': 146.0,
'W': 259.0,
'Y': 229.0,
'V': 160.0,
}
@dataclass
class ELMmatch:
elm_name: str = field(default_factory=lambda: "")
elm_start: int = field(default_factory=lambda: -1)
elm_end: int = field(default_factory=lambda: -1)
elm_seq: str = field(default_factory=lambda: "")
Qand: int = field(default_factory=lambda: 0)
elm_prot_id: List = field(default_factory=lambda: [])
taxid: int = field(default_factory=lambda: 0)
elm_domain: List = field(default_factory=lambda: [])
domain_prot_id: List = field(default_factory=lambda: [])
dssp_file: str = field(default_factory=lambda: "")
pdb: str = field(default_factory=lambda: "")
ELMmaches = []
@dataclass
class DSSPline:
position: int
letter: str
sec_str: str
QACC: int
QSSE: float
Accessibility: float
def get_match(filename):
# Getting ELM data
#elms = csv.reader(open(ELMS_FILE), delimiter='\t')
#next(elms)
#Getting sequence data from dssp
with open(filename) as dsspfile:
for line in dsspfile:
if line[2] == '#':
break
seq = ''
for row in dsspfile:
seq += row[13].upper()
#Using RegEx algorithm to find ELM matches in the dssp sequence
with open(ELMS_FILE) as elms:
for line in elms:
if line[0] == "#":
continue
line = line.strip().split('\t')
regex = r'%s' % line[4].replace('"','')
matches = re.finditer(regex, seq)
for matchNum, match in enumerate(matches):
match = ELMmatch(
elm_name=line[1].replace('"',''),
elm_start=match.start(),
elm_end=match.end(),
elm_seq=match.group(),
dssp_file=filename,
pdb=str(os.path.basename(filename).split(".")[0]),
)
ELMmaches.append(match)
#print("Match {matchNum} was found at {start}-{end}: {match}".format(matchNum=matchNum, start=match.start(),
#end=match.end(), match=match.group()), line[1])
@lru_cache(maxsize=None)
def proc_dssp(dsspfilename):
dssp = []
with open(dsspfilename) as dsspfile:
for line in dsspfile:
if line[2] == '#':
break
for row in dsspfile:
aa = row[13].upper()
qacc = int(row[35:38].strip())
rel_acc = 0
if aa in Miller.keys():
qacc = int(row[35:38].strip())
rel_acc = qacc / Miller[aa]
dssp.append(DSSPline(
position=int(row[0:6].strip()),
letter=aa,
sec_str=row[16],
QACC=qacc,
QSSE=qsse_dict[row[16]],
Accessibility=rel_acc,
))
return dssp
def get_scores():
# SELECT elm_name, elm_start, elm_end, elm_seq FROM elm_to_prot
for m in ELMmaches:
qsse_match = 0
qacc_match = 0
dssp = proc_dssp(m.dssp_file)
match_lenght = len(m.elm_seq)
for seq_pos in range(m.elm_start, m.elm_end+1):
qsse_match += dssp[seq_pos-1].QSSE
qacc_match += dssp[seq_pos-1].QACC
Qsse = qsse_match / match_lenght
Qacc = qacc_match / match_lenght
m.Qand = Qsse + Qacc
def get_protein_id():
"""
Converts dssp pdb files names to uniprot ids
:return:
"""
for m in ELMmaches:
for u in UNIPROT["pdb2uniprot"][m.pdb]:
m.elm_prot_id.append(u)
logging.debug('Protein id done')
def get_domain():
# Getting domain data
domains = csv.reader(open(INT_DOMAINS_FILE), delimiter='\t')
next(domains)
ELM2domain = {}
for line in domains:
pfamid = line[1]
elmname = line[0]
if elmname not in ELM2domain:
ELM2domain[elmname] = []
ELM2domain[elmname].append(pfamid)
for m in ELMmaches:
if m.elm_name in ELM2domain:
m.elm_domain = ELM2domain[m.elm_name].copy()
for d in m.elm_domain:
for u in UNIPROT["pfam2uniprot"][d]:
if UNIPROT["uniprotac2taxid"][u] == m.taxid:
m.domain_prot_id.append(u)
logging.debug('Domain done')
def get_taxid():
for m in ELMmaches:
taxid = None
for u in m.elm_prot_id:
if u in UNIPROT["uniprotac2taxid"]:
taxid = UNIPROT["uniprotac2taxid"][u]
m.taxid = taxid
def insert_or_get_node_dict(id, idtype, taxid, node_names_to_id, db_api):
node_dict = {
"name": idtype.strip() + ':' + id.strip(),
"tax_id": taxid,
"alt_accession": None,
'pathways': None,
"aliases": None,
"topology": None
}
if not re.search("^[/\\.\\w-]+$", id):
print("WARNING: malformed node id: " + node_dict['name'])
return None
if node_dict['name'] in node_names_to_id:
node_dict['id'] = node_names_to_id[node_dict['name']]
else:
db_api.insert_unique_node(node_dict)
node_names_to_id[node_dict['name']] = node_dict['id']
return node_dict
def loadUniprotFile(filename):
UNIPROT["uniprotac2taxid"] = {}
UNIPROT["pdb2uniprot"] = defaultdict(list)
UNIPROT["pfam2uniprot"] = defaultdict(list)
with open(filename) as f:
f.readline()
for line in f:
cells = line.strip().split("\t")
if len(cells) != 4:
continue
uniprotac = cells[0]
taxid = int(cells[1])
pdbs = [c for c in cells[2].split(";") if c != ""]
pfams = [c for c in cells[3].split(";") if c != ""]
UNIPROT["uniprotac2taxid"][uniprotac] = taxid
for pdb in pdbs:
UNIPROT["pdb2uniprot"][pdb].append(uniprotac)
for pfam in pfams:
UNIPROT["pfam2uniprot"][pfam].append(uniprotac)
def main(logger):
# Initiating the parser
db_api = PsimiSQL(SQL_SEED)
node_names_to_id = {}
loadUniprotFile(UNIPROT_DATA_FILE)
for files in os.listdir('PTM/databases/ELMpred/dssp/LAB/'):
file_list.append('PTM/databases/ELMpred/dssp/LAB/' + files)
i=0
for file in file_list:
i+=1
if i == 15000:
break
get_match(file)
get_scores()
get_protein_id()
get_taxid()
get_domain()
logging.debug('Done creating elm map. Starting adding to DB structure')
#SELECT elm_prot_id, domain_prot_id, taxid from elm_to_prot
for m in ELMmaches:
if len(m.domain_prot_id) > 0 and len(m.elm_prot_id) > 0:
for m_elm_prot_id in m.elm_prot_id:
for m_domain_prot_id in m.domain_prot_id:
# Creating the node dicts, if the node is already in the db assigning that to the node dict
source_dict = insert_or_get_node_dict(m_elm_prot_id, "Uniprot", m.taxid, node_names_to_id, db_api)
target_dict = insert_or_get_node_dict(m_domain_prot_id, "Uniprot", m.taxid, node_names_to_id, db_api)
# Nodes are inserted to the db if they are not in it yet
if 'id' not in source_dict:
db_api.insert_node(source_dict)
if 'id' not in target_dict:
db_api.insert_node(target_dict)
edge_dict = {
'publication_ids': 'pubmed:26615199',
'layer': '2',
'source_db': DB_TYPE, # ontology database citation
'interaction_identifiers': None,
'confidence_scores': None, # if available
'interaction_detection_method': None, # probably exp type
'interaction_types': 'MI:0190(interaction type)',
'first_author': None
}
db_api.insert_edge(source_dict, target_dict, edge_dict)
# Saving the to a DB_TYPE.db files
db_api.save_db_to_file(EXPORT_DB_LOCATION)
if __name__ == '__main__':
print("Parsing database...")
main(logger=None)
print("Parsing database is completed. SQLite database is saved to: " + EXPORT_DB_LOCATION)
|
'''
Maps ELMs to their protein IDs and the interacting domain's protein ID and inserts the two into an SQL database.
:argument: EXPORT_DB_LOCATION: saving location of the final database
:argument: ELMS_FILE: all ELM classes of the four used species in a .tsv files: http://elm.eu.org/classes/
:argument: INT_DOMAINS_FILE: files containing ELM names and their interacting domain PFAM ids in a .tsv files: http://elm.eu.org/interactiondomains
:argument: PROT_LIST: list of files for each species used, containing their whole proteomes from UniProt in .fa files
'''
# Imports
import csv, os
from SLKlib.SQLiteDBApi.sqlite_db_api import PsimiSQL
import re, logging
from dataclasses import dataclass, field
from typing import List
from functools import lru_cache
from collections import defaultdict
# Defining constants
SQL_SEED = '../../SLKlib/SQLiteDBApi/network-db-seed.sql'
DB_TYPE = 'ELM'
EXPORT_DB_LOCATION = '../../output/ELM.db'
ELMS_FILE = 'PTM/databases/ELMpred/files/elm_classes.tsv'
INT_DOMAINS_FILE = 'PTM/databases/ELMpred/files/elm_interaction_domains.tsv'
UNIPROT_DATA_FILE = 'PTM/databases/ELMpred/files/uniprot_9606,7227,6239,7955_entry,taxid,pdb,pfam.tsv'
UNIPROT = {}
pred_db = 'ELM_pred.db'
file_list = []
logging.basicConfig(level=logging.DEBUG)
qsse_dict = {
'E': 0.35,
'H': 0.55,
'I': 0.55,
'B': 1.5,
'T': 1.5,
'S': 1.5,
'U': 1.5,
'G': 1.33,
' ': 0.0,
}
# Residue max accessibility
Miller = {
'A': 113.0,
'R': 241.0,
'N': 158.0,
'D': 151.0,
'B': 154.5,
'C': 140.0,
'Q': 189.0,
'E': 183.0,
'G': 85.0,
'H': 194.0,
'I': 182.0,
'L': 180.0,
'K': 211.0,
'M': 204.0,
'F': 218.0,
'P': 143.0,
'S': 122.0,
'T': 146.0,
'W': 259.0,
'Y': 229.0,
'V': 160.0,
}
@dataclass
class ELMmatch:
elm_name: str = field(default_factory=lambda: "")
elm_start: int = field(default_factory=lambda: -1)
elm_end: int = field(default_factory=lambda: -1)
elm_seq: str = field(default_factory=lambda: "")
Qand: int = field(default_factory=lambda: 0)
elm_prot_id: List = field(default_factory=lambda: [])
taxid: int = field(default_factory=lambda: 0)
elm_domain: List = field(default_factory=lambda: [])
domain_prot_id: List = field(default_factory=lambda: [])
dssp_file: str = field(default_factory=lambda: "")
pdb: str = field(default_factory=lambda: "")
ELMmaches = []
@dataclass
class DSSPline:
position: int
letter: str
sec_str: str
QACC: int
QSSE: float
Accessibility: float
def get_match(filename):
# Getting ELM data
#elms = csv.reader(open(ELMS_FILE), delimiter='\t')
#next(elms)
#Getting sequence data from dssp
with open(filename) as dsspfile:
for line in dsspfile:
if line[2] == '#':
break
seq = ''
for row in dsspfile:
seq += row[13].upper()
#Using RegEx algorithm to find ELM matches in the dssp sequence
with open(ELMS_FILE) as elms:
for line in elms:
if line[0] == "#":
continue
line = line.strip().split('\t')
regex = r'%s' % line[4].replace('"','')
matches = re.finditer(regex, seq)
for matchNum, match in enumerate(matches):
match = ELMmatch(
elm_name=line[1].replace('"',''),
elm_start=match.start(),
elm_end=match.end(),
elm_seq=match.group(),
dssp_file=filename,
pdb=str(os.path.basename(filename).split(".")[0]),
)
ELMmaches.append(match)
#print("Match {matchNum} was found at {start}-{end}: {match}".format(matchNum=matchNum, start=match.start(),
#end=match.end(), match=match.group()), line[1])
@lru_cache(maxsize=None)
def proc_dssp(dsspfilename):
dssp = []
with open(dsspfilename) as dsspfile:
for line in dsspfile:
if line[2] == '#':
break
for row in dsspfile:
aa = row[13].upper()
qacc = int(row[35:38].strip())
rel_acc = 0
if aa in Miller.keys():
qacc = int(row[35:38].strip())
rel_acc = qacc / Miller[aa]
dssp.append(DSSPline(
position=int(row[0:6].strip()),
letter=aa,
sec_str=row[16],
QACC=qacc,
QSSE=qsse_dict[row[16]],
Accessibility=rel_acc,
))
return dssp
def get_scores():
# SELECT elm_name, elm_start, elm_end, elm_seq FROM elm_to_prot
for m in ELMmaches:
qsse_match = 0
qacc_match = 0
dssp = proc_dssp(m.dssp_file)
match_lenght = len(m.elm_seq)
for seq_pos in range(m.elm_start, m.elm_end+1):
qsse_match += dssp[seq_pos-1].QSSE
qacc_match += dssp[seq_pos-1].QACC
Qsse = qsse_match / match_lenght
Qacc = qacc_match / match_lenght
m.Qand = Qsse + Qacc
def get_protein_id():
"""
Converts dssp pdb files names to uniprot ids
:return:
"""
for m in ELMmaches:
for u in UNIPROT["pdb2uniprot"][m.pdb]:
m.elm_prot_id.append(u)
logging.debug('Protein id done')
def get_domain():
# Getting domain data
domains = csv.reader(open(INT_DOMAINS_FILE), delimiter='\t')
next(domains)
ELM2domain = {}
for line in domains:
pfamid = line[1]
elmname = line[0]
if elmname not in ELM2domain:
ELM2domain[elmname] = []
ELM2domain[elmname].append(pfamid)
for m in ELMmaches:
if m.elm_name in ELM2domain:
m.elm_domain = ELM2domain[m.elm_name].copy()
for d in m.elm_domain:
for u in UNIPROT["pfam2uniprot"][d]:
if UNIPROT["uniprotac2taxid"][u] == m.taxid:
m.domain_prot_id.append(u)
logging.debug('Domain done')
def get_taxid():
for m in ELMmaches:
taxid = None
for u in m.elm_prot_id:
if u in UNIPROT["uniprotac2taxid"]:
taxid = UNIPROT["uniprotac2taxid"][u]
m.taxid = taxid
def insert_or_get_node_dict(id, idtype, taxid, node_names_to_id, db_api):
node_dict = {
"name": idtype.strip() + ':' + id.strip(),
"tax_id": taxid,
"alt_accession": None,
'pathways': None,
"aliases": None,
"topology": None
}
if not re.search("^[/\\.\\w-]+$", id):
print("WARNING: malformed node id: " + node_dict['name'])
return None
if node_dict['name'] in node_names_to_id:
node_dict['id'] = node_names_to_id[node_dict['name']]
else:
db_api.insert_unique_node(node_dict)
node_names_to_id[node_dict['name']] = node_dict['id']
return node_dict
def loadUniprotFile(filename):
UNIPROT["uniprotac2taxid"] = {}
UNIPROT["pdb2uniprot"] = defaultdict(list)
UNIPROT["pfam2uniprot"] = defaultdict(list)
with open(filename) as f:
f.readline()
for line in f:
cells = line.strip().split("\t")
if len(cells) != 4:
continue
uniprotac = cells[0]
taxid = int(cells[1])
pdbs = [c for c in cells[2].split(";") if c != ""]
pfams = [c for c in cells[3].split(";") if c != ""]
UNIPROT["uniprotac2taxid"][uniprotac] = taxid
for pdb in pdbs:
UNIPROT["pdb2uniprot"][pdb].append(uniprotac)
for pfam in pfams:
UNIPROT["pfam2uniprot"][pfam].append(uniprotac)
def main(logger):
# Initiating the parser
db_api = PsimiSQL(SQL_SEED)
node_names_to_id = {}
loadUniprotFile(UNIPROT_DATA_FILE)
for files in os.listdir('PTM/databases/ELMpred/dssp/LAB/'):
file_list.append('PTM/databases/ELMpred/dssp/LAB/' + files)
i=0
for file in file_list:
i+=1
if i == 15000:
break
get_match(file)
get_scores()
get_protein_id()
get_taxid()
get_domain()
logging.debug('Done creating elm map. Starting adding to DB structure')
#SELECT elm_prot_id, domain_prot_id, taxid from elm_to_prot
for m in ELMmaches:
if len(m.domain_prot_id) > 0 and len(m.elm_prot_id) > 0:
for m_elm_prot_id in m.elm_prot_id:
for m_domain_prot_id in m.domain_prot_id:
# Creating the node dicts, if the node is already in the db assigning that to the node dict
source_dict = insert_or_get_node_dict(m_elm_prot_id, "Uniprot", m.taxid, node_names_to_id, db_api)
target_dict = insert_or_get_node_dict(m_domain_prot_id, "Uniprot", m.taxid, node_names_to_id, db_api)
# Nodes are inserted to the db if they are not in it yet
if 'id' not in source_dict:
db_api.insert_node(source_dict)
if 'id' not in target_dict:
db_api.insert_node(target_dict)
edge_dict = {
'publication_ids': 'pubmed:26615199',
'layer': '2',
'source_db': DB_TYPE, # ontology database citation
'interaction_identifiers': None,
'confidence_scores': None, # if available
'interaction_detection_method': None, # probably exp type
'interaction_types': 'MI:0190(interaction type)',
'first_author': None
}
db_api.insert_edge(source_dict, target_dict, edge_dict)
# Saving the to a DB_TYPE.db files
db_api.save_db_to_file(EXPORT_DB_LOCATION)
if __name__ == '__main__':
print("Parsing database...")
main(logger=None)
print("Parsing database is completed. SQLite database is saved to: " + EXPORT_DB_LOCATION)
|
en
| 0.684012
|
Maps ELMs to their protein IDs and the interacting domain's protein ID and inserts the two into an SQL database. :argument: EXPORT_DB_LOCATION: saving location of the final database :argument: ELMS_FILE: all ELM classes of the four used species in a .tsv files: http://elm.eu.org/classes/ :argument: INT_DOMAINS_FILE: files containing ELM names and their interacting domain PFAM ids in a .tsv files: http://elm.eu.org/interactiondomains :argument: PROT_LIST: list of files for each species used, containing their whole proteomes from UniProt in .fa files # Imports # Defining constants # Residue max accessibility # Getting ELM data #elms = csv.reader(open(ELMS_FILE), delimiter='\t') #next(elms) #Getting sequence data from dssp #Using RegEx algorithm to find ELM matches in the dssp sequence #print("Match {matchNum} was found at {start}-{end}: {match}".format(matchNum=matchNum, start=match.start(), #end=match.end(), match=match.group()), line[1]) # SELECT elm_name, elm_start, elm_end, elm_seq FROM elm_to_prot Converts dssp pdb files names to uniprot ids :return: # Getting domain data # Initiating the parser #SELECT elm_prot_id, domain_prot_id, taxid from elm_to_prot # Creating the node dicts, if the node is already in the db assigning that to the node dict # Nodes are inserted to the db if they are not in it yet # ontology database citation # if available # probably exp type # Saving the to a DB_TYPE.db files
| 2.567978
| 3
|
src/split_multiple_demlimiters.py
|
famavott/codewars-katas
| 0
|
6628634
|
<reponame>famavott/codewars-katas
"""Split string by multiple delimiters."""
def multiple_split(string, delimiters=[]):
"""."""
if delimiters == []:
return [string]
for x in delimiters:
string = string.replace(x, ' ')
return string.split()
|
"""Split string by multiple delimiters."""
def multiple_split(string, delimiters=[]):
"""."""
if delimiters == []:
return [string]
for x in delimiters:
string = string.replace(x, ' ')
return string.split()
|
en
| 0.709072
|
Split string by multiple delimiters. .
| 3.98365
| 4
|
util/util.py
|
caixin1998/pl-template
| 1
|
6628635
|
"""This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
import pytorch_lightning as pl
import numpy as np
import torch
import torch.nn.functional as F
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
if aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
class AverageMeter(object):
"""
Computes and stores the average and
current value.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class SetupCallback(pl.callbacks.Callback):
def on_train_epoch_start(self, trainer, pl_module):
torch.backends.cudnn.benchmark = True
def on_validation_epoch_start(self, trainer, pl_module):
# Disable CuDNN probing for variable length inference.
torch.backends.cudnn.benchmark = False
def pitchyaw_to_vector(pitchyaws):
r"""Convert given yaw (:math:`\theta`) and pitch (:math:`\phi`) angles to unit gaze vectors.
Args:
pitchyaws (:obj:`numpy.array`): yaw and pitch angles :math:`(n\times 2)` in radians.
Returns:
:obj:`numpy.array` of shape :math:`(n\times 3)` with 3D vectors per row.
"""
n = pitchyaws.shape[0]
sin = np.sin(pitchyaws)
cos = np.cos(pitchyaws)
out = np.empty((n, 3))
out[:, 0] = np.multiply(cos[:, 0], sin[:, 1])
out[:, 1] = sin[:, 0]
out[:, 2] = np.multiply(cos[:, 0], cos[:, 1])
return out
def vector_to_pitchyaw(vectors):
r"""Convert given gaze vectors to yaw (:math:`\theta`) and pitch (:math:`\phi`) angles.
Args:
vectors (:obj:`numpy.array`): gaze vectors in 3D :math:`(n\times 3)`.
Returns:
:obj:`numpy.array` of shape :math:`(n\times 2)` with values in radians.
"""
n = vectors.shape[0]
out = np.empty((n, 2))
vectors = np.divide(vectors, np.linalg.norm(vectors, axis=1).reshape(n, 1))
out[:, 0] = np.arcsin(vectors[:, 1]) # theta
out[:, 1] = np.arctan2(vectors[:, 0], vectors[:, 2]) # phi
return out
def angular_error(a, b):
"""Calculate angular error (via cosine similarity)."""
a = pitchyaw_to_vector(a) if a.shape[1] == 2 else a
b = pitchyaw_to_vector(b) if b.shape[1] == 2 else b
ab = np.sum(np.multiply(a, b), axis=1)
a_norm = np.linalg.norm(a, axis=1)
b_norm = np.linalg.norm(b, axis=1)
# Avoid zero-values (to avoid NaNs)
a_norm = np.clip(a_norm, a_min=1e-7, a_max=None)
b_norm = np.clip(b_norm, a_min=1e-7, a_max=None)
similarity = np.divide(ab, np.multiply(a_norm, b_norm))
return np.arccos(similarity) * 180.0 / np.pi
def torch_angular_error(a, b):
def pitchyaw_to_vector(pitchyaws):
sin = torch.sin(pitchyaws)
cos = torch.cos(pitchyaws)
return torch.stack([cos[:, 0] * sin[:, 1], sin[:, 0], cos[:, 0] * cos[:, 1]], 1)
def nn_angular_distance(a, b):
sim = F.cosine_similarity(a, b, eps=1e-6)
sim = F.hardtanh(sim, -1.0 + 1e-6, 1.0 - 1e-6)
return torch.acos(sim) * 180.0 / np.pi
y = pitchyaw_to_vector(a)
y_hat = b
if y_hat.shape[1] == 2:
y_hat = pitchyaw_to_vector(y_hat)
return torch.sum(nn_angular_distance(y, y_hat))
|
"""This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
import pytorch_lightning as pl
import numpy as np
import torch
import torch.nn.functional as F
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
if aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
class AverageMeter(object):
"""
Computes and stores the average and
current value.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class SetupCallback(pl.callbacks.Callback):
def on_train_epoch_start(self, trainer, pl_module):
torch.backends.cudnn.benchmark = True
def on_validation_epoch_start(self, trainer, pl_module):
# Disable CuDNN probing for variable length inference.
torch.backends.cudnn.benchmark = False
def pitchyaw_to_vector(pitchyaws):
r"""Convert given yaw (:math:`\theta`) and pitch (:math:`\phi`) angles to unit gaze vectors.
Args:
pitchyaws (:obj:`numpy.array`): yaw and pitch angles :math:`(n\times 2)` in radians.
Returns:
:obj:`numpy.array` of shape :math:`(n\times 3)` with 3D vectors per row.
"""
n = pitchyaws.shape[0]
sin = np.sin(pitchyaws)
cos = np.cos(pitchyaws)
out = np.empty((n, 3))
out[:, 0] = np.multiply(cos[:, 0], sin[:, 1])
out[:, 1] = sin[:, 0]
out[:, 2] = np.multiply(cos[:, 0], cos[:, 1])
return out
def vector_to_pitchyaw(vectors):
r"""Convert given gaze vectors to yaw (:math:`\theta`) and pitch (:math:`\phi`) angles.
Args:
vectors (:obj:`numpy.array`): gaze vectors in 3D :math:`(n\times 3)`.
Returns:
:obj:`numpy.array` of shape :math:`(n\times 2)` with values in radians.
"""
n = vectors.shape[0]
out = np.empty((n, 2))
vectors = np.divide(vectors, np.linalg.norm(vectors, axis=1).reshape(n, 1))
out[:, 0] = np.arcsin(vectors[:, 1]) # theta
out[:, 1] = np.arctan2(vectors[:, 0], vectors[:, 2]) # phi
return out
def angular_error(a, b):
"""Calculate angular error (via cosine similarity)."""
a = pitchyaw_to_vector(a) if a.shape[1] == 2 else a
b = pitchyaw_to_vector(b) if b.shape[1] == 2 else b
ab = np.sum(np.multiply(a, b), axis=1)
a_norm = np.linalg.norm(a, axis=1)
b_norm = np.linalg.norm(b, axis=1)
# Avoid zero-values (to avoid NaNs)
a_norm = np.clip(a_norm, a_min=1e-7, a_max=None)
b_norm = np.clip(b_norm, a_min=1e-7, a_max=None)
similarity = np.divide(ab, np.multiply(a_norm, b_norm))
return np.arccos(similarity) * 180.0 / np.pi
def torch_angular_error(a, b):
def pitchyaw_to_vector(pitchyaws):
sin = torch.sin(pitchyaws)
cos = torch.cos(pitchyaws)
return torch.stack([cos[:, 0] * sin[:, 1], sin[:, 0], cos[:, 0] * cos[:, 1]], 1)
def nn_angular_distance(a, b):
sim = F.cosine_similarity(a, b, eps=1e-6)
sim = F.hardtanh(sim, -1.0 + 1e-6, 1.0 - 1e-6)
return torch.acos(sim) * 180.0 / np.pi
y = pitchyaw_to_vector(a)
y_hat = b
if y_hat.shape[1] == 2:
y_hat = pitchyaw_to_vector(y_hat)
return torch.sum(nn_angular_distance(y, y_hat))
|
en
| 0.572605
|
This module contains simple helper functions "Converts a Tensor array into a numpy image array. Parameters: input_image (tensor) -- the input image tensor array imtype (type) -- the desired type of the converted numpy array # get the data from a variable # convert it into a numpy array # grayscale to RGB # post-processing: tranpose and scaling # if it is a numpy array, do nothing Calculate and print the mean of average absolute(gradients) Parameters: net (torch network) -- Torch network name (str) -- the name of the network Save a numpy image to the disk Parameters: image_numpy (numpy array) -- input numpy array image_path (str) -- the path of the image Print the mean, min, max, median, std, and size of a numpy array Parameters: val (bool) -- if print the values of the numpy array shp (bool) -- if print the shape of the numpy array create empty directories if they don't exist Parameters: paths (str list) -- a list of directory paths create a single empty directory if it didn't exist Parameters: path (str) -- a single directory path Computes and stores the average and current value. # Disable CuDNN probing for variable length inference. Convert given yaw (:math:`\theta`) and pitch (:math:`\phi`) angles to unit gaze vectors. Args: pitchyaws (:obj:`numpy.array`): yaw and pitch angles :math:`(n\times 2)` in radians. Returns: :obj:`numpy.array` of shape :math:`(n\times 3)` with 3D vectors per row. Convert given gaze vectors to yaw (:math:`\theta`) and pitch (:math:`\phi`) angles. Args: vectors (:obj:`numpy.array`): gaze vectors in 3D :math:`(n\times 3)`. Returns: :obj:`numpy.array` of shape :math:`(n\times 2)` with values in radians. # theta # phi Calculate angular error (via cosine similarity). # Avoid zero-values (to avoid NaNs)
| 3.081297
| 3
|
tests/scripts/thread-cert/node_api.py
|
ltaoti/openthread
| 1
|
6628636
|
<gh_stars>1-10
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import time
import ctypes
class otApi:
def __init__(self, nodeid):
self.verbose = int(float(os.getenv('VERBOSE', 0)))
self.__init_dll(nodeid)
def __del__(self):
self.Api.otNodeFinalize(self.otNode)
def set_mode(self, mode):
if self.Api.otNodeSetMode(self.otNode, mode.encode('utf-8')) != 0:
raise OSError("otNodeSetMode failed!")
def interface_up(self):
if self.Api.otNodeInterfaceUp(self.otNode) != 0:
raise OSError("otNodeInterfaceUp failed!")
def interface_down(self):
if self.Api.otNodeInterfaceDown(self.otNode) != 0:
raise OSError("otNodeInterfaceDown failed!")
def thread_start(self):
if self.Api.otNodeThreadStart(self.otNode) != 0:
raise OSError("otNodeThreadStart failed!")
def thread_stop(self):
if self.Api.otNodeThreadStop(self.otNode) != 0:
raise OSError("otNodeThreadStop failed!")
def commissioner_start(self):
if self.Api.otNodeCommissionerStart(self.otNode) != 0:
raise OSError("otNodeCommissionerStart failed!")
def commissioner_add_joiner(self, addr, psk):
if self.Api.otNodeCommissionerJoinerAdd(self.otNode, addr.encode('utf-8'), psk.encode('utf-8')) != 0:
raise OSError("otNodeCommissionerJoinerAdd failed!")
def joiner_start(self, pskd='', provisioning_url=''):
if self.Api.otNodeJoinerStart(self.otNode, pskd.encode('utf-8'), provisioning_url.encode('utf-8')) != 0:
raise OSError("otNodeJoinerStart failed!")
def clear_whitelist(self):
if self.Api.otNodeClearWhitelist(self.otNode) != 0:
raise OSError("otNodeClearWhitelist failed!")
def enable_whitelist(self):
if self.Api.otNodeEnableWhitelist(self.otNode) != 0:
raise OSError("otNodeEnableWhitelist failed!")
def disable_whitelist(self):
if self.Api.otNodeDisableWhitelist(self.otNode) != 0:
raise OSError("otNodeDisableWhitelist failed!")
def add_whitelist(self, addr, rssi=None):
if rssi == None:
rssi = 0
if self.Api.otNodeAddWhitelist(self.otNode, addr.encode('utf-8'), ctypes.c_byte(rssi)) != 0:
raise OSError("otNodeAddWhitelist failed!")
def remove_whitelist(self, addr):
if self.Api.otNodeRemoveWhitelist(self.otNode, addr.encode('utf-8')) != 0:
raise OSError("otNodeRemoveWhitelist failed!")
def get_addr16(self):
return self.Api.otNodeGetAddr16(self.otNode)
def get_addr64(self):
return self.Api.otNodeGetAddr64(self.otNode).decode('utf-8')
def get_eui64(self):
return self.Api.otNodeGetEui64(self.otNode).decode('utf-8')
def get_joiner_id(self):
return self.Api.otNodeGetJoinerId(self.otNode).decode('utf-8')
def get_channel(self):
return self.Api.otNodeGetChannel(self.otNode)
def set_channel(self, channel):
if self.Api.otNodeSetChannel(self.otNode, ctypes.c_ubyte(channel)) != 0:
raise OSError("otNodeSetChannel failed!")
def get_masterkey(self):
return self.Api.otNodeGetMasterkey(self.otNode).decode("utf-8")
def set_masterkey(self, masterkey):
if self.Api.otNodeSetMasterkey(self.otNode, masterkey.encode('utf-8')) != 0:
raise OSError("otNodeSetMasterkey failed!")
def get_key_sequence_counter(self):
return self.Api.otNodeGetKeySequenceCounter(self.otNode)
def set_key_sequence_counter(self, key_sequence_counter):
if self.Api.otNodeSetKeySequenceCounter(self.otNode, ctypes.c_uint(key_sequence_counter)) != 0:
raise OSError("otNodeSetKeySequenceCounter failed!")
def set_key_switch_guardtime(self, key_switch_guardtime):
if self.Api.otNodeSetKeySwitchGuardTime(self.otNode, ctypes.c_uint(key_switch_guardtime)) != 0:
raise OSError("otNodeSetKeySwitchGuardTime failed!")
def set_network_id_timeout(self, network_id_timeout):
if self.Api.otNodeSetNetworkIdTimeout(self.otNode, ctypes.c_ubyte(network_id_timeout)) != 0:
raise OSError("otNodeSetNetworkIdTimeout failed!")
def get_network_name(self):
return self.Api.otNodeGetNetworkName(self.otNode).decode("utf-8")
def set_network_name(self, network_name):
if self.Api.otNodeSetNetworkName(self.otNode, network_name.encode('utf-8')) != 0:
raise OSError("otNodeSetNetworkName failed!")
def get_panid(self):
return int(self.Api.otNodeGetPanId(self.otNode))
def set_panid(self, panid):
if self.Api.otNodeSetPanId(self.otNode, ctypes.c_ushort(panid)) != 0:
raise OSError("otNodeSetPanId failed!")
def get_partition_id(self):
return int(self.Api.otNodeGetPartitionId(self.otNode))
def set_partition_id(self, partition_id):
if self.Api.otNodeSetPartitionId(self.otNode, ctypes.c_uint(partition_id)) != 0:
raise OSError("otNodeSetPartitionId failed!")
def set_router_upgrade_threshold(self, threshold):
if self.Api.otNodeSetRouterUpgradeThreshold(self.otNode, ctypes.c_ubyte(threshold)) != 0:
raise OSError("otNodeSetRouterUpgradeThreshold failed!")
def set_router_downgrade_threshold(self, threshold):
if self.Api.otNodeSetRouterDowngradeThreshold(self.otNode, ctypes.c_ubyte(threshold)) != 0:
raise OSError("otNodeSetRouterDowngradeThreshold failed!")
def release_router_id(self, router_id):
if self.Api.otNodeReleaseRouterId(self.otNode, ctypes.c_ubyte(router_id)) != 0:
raise OSError("otNodeReleaseRouterId failed!")
def get_state(self):
return self.Api.otNodeGetState(self.otNode).decode('utf-8')
def set_state(self, state):
if self.Api.otNodeSetState(self.otNode, state.encode('utf-8')) != 0:
raise OSError("otNodeSetState failed!")
def get_timeout(self):
return int(self.Api.otNodeGetTimeout(self.otNode))
def set_timeout(self, timeout):
if self.Api.otNodeSetTimeout(self.otNode, ctypes.c_uint(timeout)) != 0:
raise OSError("otNodeSetTimeout failed!")
def set_max_children(self, number):
if self.Api.otNodeSetMaxChildren(self.otNode, ctypes.c_ubyte(number)) != 0:
raise OSError("otNodeSetMaxChildren failed!")
def get_weight(self):
return int(self.Api.otNodeGetWeight(self.otNode))
def set_weight(self, weight):
if self.Api.otNodeSetWeight(self.otNode, ctypes.c_ubyte(weight)) != 0:
raise OSError("otNodeSetWeight failed!")
def add_ipaddr(self, ipaddr):
if self.Api.otNodeAddIpAddr(self.otNode, ipaddr.encode('utf-8')) != 0:
raise OSError("otNodeAddIpAddr failed!")
def get_addrs(self):
return self.Api.otNodeGetAddrs(self.otNode).decode("utf-8").split("\n")
def add_service(self, enterpriseNumber, serviceData, serverData):
raise OSError("otServerAddService wrapper not implemented!")
def remove_service(self, enterpriseNumber, serviceData):
raise OSError("otServerRemoveService wrapper not implemented!")
def get_context_reuse_delay(self):
return int(self.Api.otNodeGetContextReuseDelay(self.otNode))
def set_context_reuse_delay(self, delay):
if self.Api.otNodeSetContextReuseDelay(self.otNode, ctypes.c_uint(delay)) != 0:
raise OSError("otNodeSetContextReuseDelay failed!")
def add_prefix(self, prefix, flags, prf = 'med'):
if self.Api.otNodeAddPrefix(self.otNode, prefix.encode('utf-8'), flags.encode('utf-8'), prf.encode('utf-8')) != 0:
raise OSError("otNodeAddPrefix failed!")
def remove_prefix(self, prefix):
if self.Api.otNodeRemovePrefix(self.otNode, prefix.encode('utf-8')) != 0:
raise OSError("otNodeRemovePrefix failed!")
def add_route(self, prefix, prf = 'med'):
if self.Api.otNodeAddRoute(self.otNode, prefix.encode('utf-8'), prf.encode('utf-8')) != 0:
raise OSError("otNodeAddRoute failed!")
def remove_route(self, prefix):
if self.Api.otNodeRemoveRoute(self.otNode, prefix.encode('utf-8')) != 0:
raise OSError("otNodeRemovePrefix failed!")
def register_netdata(self):
if self.Api.otNodeRegisterNetdata(self.otNode) != 0:
raise OSError("otNodeRegisterNetdata failed!")
def energy_scan(self, mask, count, period, scan_duration, ipaddr):
if self.Api.otNodeEnergyScan(self.otNode, ctypes.c_uint(mask), ctypes.c_ubyte(count), ctypes.c_ushort(period), ctypes.c_ushort(scan_duration), ipaddr.encode('utf-8')) != 0:
raise OSError("otNodeEnergyScan failed!")
def panid_query(self, panid, mask, ipaddr):
if self.Api.otNodePanIdQuery(self.otNode, ctypes.c_ushort(panid), ctypes.c_uint(mask), ipaddr.encode('utf-8')) != 0:
raise OSError("otNodePanIdQuery failed!")
def scan(self):
return self.Api.otNodeScan(self.otNode).decode("utf-8").split("\n")
def ping(self, ipaddr, num_responses=1, size=None, timeout=5000):
if size == None:
size = 100
numberOfResponders = self.Api.otNodePing(self.otNode, ipaddr.encode('utf-8'), ctypes.c_ushort(size),
ctypes.c_uint(num_responses), ctypes.c_uint16(timeout))
return numberOfResponders >= num_responses
def set_router_selection_jitter(self, jitter):
if self.Api.otNodeSetRouterSelectionJitter(self.otNode, ctypes.c_ubyte(jitter)) != 0:
raise OSError("otNodeSetRouterSelectionJitter failed!")
def set_active_dataset(self, timestamp, panid=None, channel=None, channel_mask=None, master_key=None):
if panid == None:
panid = 0
if channel == None:
channel = 0
if channel_mask == None:
channel_mask = 0
if master_key == None:
master_key = ""
if self.Api.otNodeSetActiveDataset(
self.otNode,
ctypes.c_ulonglong(timestamp),
ctypes.c_ushort(panid),
ctypes.c_ushort(channel),
ctypes.c_uint(channel_mask),
master_key.encode('utf-8')
) != 0:
raise OSError("otNodeSetActiveDataset failed!")
def set_pending_dataset(self, pendingtimestamp, activetimestamp, panid=None, channel=None):
if pendingtimestamp == None:
pendingtimestamp = 0
if activetimestamp == None:
activetimestamp = 0
if panid == None:
panid = 0
if channel == None:
channel = 0
if self.Api.otNodeSetPendingDataset(
self.otNode,
ctypes.c_ulonglong(activetimestamp),
ctypes.c_ulonglong(pendingtimestamp),
ctypes.c_ushort(panid),
ctypes.c_ushort(channel)
) != 0:
raise OSError("otNodeSetPendingDataset failed!")
def announce_begin(self, mask, count, period, ipaddr):
if self.Api.otNodeCommissionerAnnounceBegin(self.otNode, ctypes.c_uint(mask), ctypes.c_ubyte(count), ctypes.c_ushort(period), ipaddr.encode('utf-8')) != 0:
raise OSError("otNodeCommissionerAnnounceBegin failed!")
def send_mgmt_active_set(self, active_timestamp=None, channel=None, channel_mask=None, extended_panid=None,
panid=None, master_key=None, mesh_local=None, network_name=None, binary=None):
if active_timestamp == None:
active_timestamp = 0
if panid == None:
panid = 0
if channel == None:
channel = 0
if channel_mask == None:
channel_mask = 0
if extended_panid == None:
extended_panid = ""
if master_key == None:
master_key = ""
if mesh_local == None:
mesh_local = ""
if network_name == None:
network_name = ""
if binary == None:
binary = ""
if self.Api.otNodeSendActiveSet(
self.otNode,
ctypes.c_ulonglong(active_timestamp),
ctypes.c_ushort(panid),
ctypes.c_ushort(channel),
ctypes.c_uint(channel_mask),
extended_panid.encode('utf-8'),
master_key.encode('utf-8'),
mesh_local.encode('utf-8'),
network_name.encode('utf-8'),
binary.encode('utf-8')
) != 0:
raise OSError("otNodeSendActiveSet failed!")
def send_mgmt_pending_set(self, pending_timestamp=None, active_timestamp=None, delay_timer=None, channel=None,
panid=None, master_key=None, mesh_local=None, network_name=None):
if pending_timestamp == None:
pending_timestamp = 0
if active_timestamp == None:
active_timestamp = 0
if delay_timer == None:
delay_timer = 0
if panid == None:
panid = 0
if channel == None:
channel = 0
if master_key == None:
master_key = ""
if mesh_local == None:
mesh_local = ""
if network_name == None:
network_name = ""
if self.Api.otNodeSendPendingSet(
self.otNode,
ctypes.c_ulonglong(active_timestamp),
ctypes.c_ulonglong(pending_timestamp),
ctypes.c_uint(delay_timer),
ctypes.c_ushort(panid),
ctypes.c_ushort(channel),
master_key.encode('utf-8'),
mesh_local.encode('utf-8'),
network_name.encode('utf-8')
) != 0:
raise OSError("otNodeSendPendingSet failed!")
def log(self, message):
self.Api.otNodeLog(message)
def __init_dll(self, nodeid):
""" Initialize the API from a Windows DLL. """
# Load the DLL
self.Api = ctypes.WinDLL("otnodeapi.dll")
if self.Api == None:
raise OSError("Failed to load otnodeapi.dll!")
# Define the functions
self.Api.otNodeLog.argtypes = [ctypes.c_char_p]
self.Api.otNodeInit.argtypes = [ctypes.c_uint]
self.Api.otNodeInit.restype = ctypes.c_void_p
self.Api.otNodeFinalize.argtypes = [ctypes.c_void_p]
self.Api.otNodeSetMode.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeInterfaceUp.argtypes = [ctypes.c_void_p]
self.Api.otNodeInterfaceDown.argtypes = [ctypes.c_void_p]
self.Api.otNodeThreadStart.argtypes = [ctypes.c_void_p]
self.Api.otNodeThreadStop.argtypes = [ctypes.c_void_p]
self.Api.otNodeCommissionerStart.argtypes = [ctypes.c_void_p]
self.Api.otNodeCommissionerJoinerAdd.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeCommissionerStop.argtypes = [ctypes.c_void_p]
self.Api.otNodeJoinerStart.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeJoinerStop.argtypes = [ctypes.c_void_p]
self.Api.otNodeClearWhitelist.argtypes = [ctypes.c_void_p]
self.Api.otNodeEnableWhitelist.argtypes = [ctypes.c_void_p]
self.Api.otNodeDisableWhitelist.argtypes = [ctypes.c_void_p]
self.Api.otNodeAddWhitelist.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_byte]
self.Api.otNodeRemoveWhitelist.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeGetAddr16.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetAddr16.restype = ctypes.c_ushort
self.Api.otNodeGetAddr64.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetAddr64.restype = ctypes.c_char_p
self.Api.otNodeGetEui64.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetEui64.restype = ctypes.c_char_p
self.Api.otNodeGetJoinerId.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetJoinerId.restype = ctypes.c_char_p
self.Api.otNodeSetChannel.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeGetChannel.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetChannel.restype = ctypes.c_ubyte
self.Api.otNodeSetMasterkey.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeGetMasterkey.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetMasterkey.restype = ctypes.c_char_p
self.Api.otNodeGetKeySequenceCounter.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetKeySequenceCounter.restype = ctypes.c_uint
self.Api.otNodeSetKeySequenceCounter.argtypes = [ctypes.c_void_p,
ctypes.c_uint]
self.Api.otNodeSetKeySwitchGuardTime.argtypes = [ctypes.c_void_p,
ctypes.c_uint]
self.Api.otNodeSetNetworkIdTimeout.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeGetNetworkName.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetNetworkName.restype = ctypes.c_char_p
self.Api.otNodeSetNetworkName.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeGetPanId.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetPanId.restype = ctypes.c_ushort
self.Api.otNodeSetPanId.argtypes = [ctypes.c_void_p,
ctypes.c_ushort]
self.Api.otNodeGetPartitionId.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetPartitionId.restype = ctypes.c_uint
self.Api.otNodeSetPartitionId.argtypes = [ctypes.c_void_p,
ctypes.c_uint]
self.Api.otNodeSetRouterUpgradeThreshold.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeSetRouterDowngradeThreshold.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeReleaseRouterId.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeGetState.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetState.restype = ctypes.c_char_p
self.Api.otNodeSetState.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeGetTimeout.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetTimeout.restype = ctypes.c_uint
self.Api.otNodeSetTimeout.argtypes = [ctypes.c_void_p,
ctypes.c_uint]
self.Api.otNodeGetWeight.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetWeight.restype = ctypes.c_ubyte
self.Api.otNodeSetWeight.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeAddIpAddr.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeGetAddrs.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetAddrs.restype = ctypes.c_char_p
self.Api.otNodeGetContextReuseDelay.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetContextReuseDelay.restype = ctypes.c_uint
self.Api.otNodeSetContextReuseDelay.argtypes = [ctypes.c_void_p,
ctypes.c_uint]
self.Api.otNodeAddPrefix.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeRemovePrefix.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeAddRoute.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeRemoveRoute.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeRegisterNetdata.argtypes = [ctypes.c_void_p]
self.Api.otNodeEnergyScan.argtypes = [ctypes.c_void_p,
ctypes.c_uint,
ctypes.c_ubyte,
ctypes.c_ushort,
ctypes.c_ushort,
ctypes.c_char_p]
self.Api.otNodePanIdQuery.argtypes = [ctypes.c_void_p,
ctypes.c_ushort,
ctypes.c_uint,
ctypes.c_char_p]
self.Api.otNodeScan.argtypes = [ctypes.c_void_p]
self.Api.otNodeScan.restype = ctypes.c_char_p
self.Api.otNodePing.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_ushort,
ctypes.c_uint,
ctypes.c_uint16]
self.Api.otNodePing.restype = ctypes.c_uint
self.Api.otNodeSetRouterSelectionJitter.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeCommissionerAnnounceBegin.argtypes = [ctypes.c_void_p,
ctypes.c_uint,
ctypes.c_ubyte,
ctypes.c_ushort,
ctypes.c_char_p]
self.Api.otNodeSetActiveDataset.argtypes = [ctypes.c_void_p,
ctypes.c_ulonglong,
ctypes.c_ushort,
ctypes.c_ushort,
ctypes.c_uint,
ctypes.c_char_p]
self.Api.otNodeSetPendingDataset.argtypes = [ctypes.c_void_p,
ctypes.c_ulonglong,
ctypes.c_ulonglong,
ctypes.c_ushort,
ctypes.c_ushort]
self.Api.otNodeSendPendingSet.argtypes = [ctypes.c_void_p,
ctypes.c_ulonglong,
ctypes.c_ulonglong,
ctypes.c_uint,
ctypes.c_ushort,
ctypes.c_ushort,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeSendActiveSet.argtypes = [ctypes.c_void_p,
ctypes.c_ulonglong,
ctypes.c_ushort,
ctypes.c_ushort,
ctypes.c_uint,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeSetMaxChildren.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
# Initialize a new node
self.otNode = self.Api.otNodeInit(ctypes.c_uint(nodeid))
if self.otNode == None:
raise OSError("otNodeInit failed!")
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import time
import ctypes
class otApi:
def __init__(self, nodeid):
self.verbose = int(float(os.getenv('VERBOSE', 0)))
self.__init_dll(nodeid)
def __del__(self):
self.Api.otNodeFinalize(self.otNode)
def set_mode(self, mode):
if self.Api.otNodeSetMode(self.otNode, mode.encode('utf-8')) != 0:
raise OSError("otNodeSetMode failed!")
def interface_up(self):
if self.Api.otNodeInterfaceUp(self.otNode) != 0:
raise OSError("otNodeInterfaceUp failed!")
def interface_down(self):
if self.Api.otNodeInterfaceDown(self.otNode) != 0:
raise OSError("otNodeInterfaceDown failed!")
def thread_start(self):
if self.Api.otNodeThreadStart(self.otNode) != 0:
raise OSError("otNodeThreadStart failed!")
def thread_stop(self):
if self.Api.otNodeThreadStop(self.otNode) != 0:
raise OSError("otNodeThreadStop failed!")
def commissioner_start(self):
if self.Api.otNodeCommissionerStart(self.otNode) != 0:
raise OSError("otNodeCommissionerStart failed!")
def commissioner_add_joiner(self, addr, psk):
if self.Api.otNodeCommissionerJoinerAdd(self.otNode, addr.encode('utf-8'), psk.encode('utf-8')) != 0:
raise OSError("otNodeCommissionerJoinerAdd failed!")
def joiner_start(self, pskd='', provisioning_url=''):
if self.Api.otNodeJoinerStart(self.otNode, pskd.encode('utf-8'), provisioning_url.encode('utf-8')) != 0:
raise OSError("otNodeJoinerStart failed!")
def clear_whitelist(self):
if self.Api.otNodeClearWhitelist(self.otNode) != 0:
raise OSError("otNodeClearWhitelist failed!")
def enable_whitelist(self):
if self.Api.otNodeEnableWhitelist(self.otNode) != 0:
raise OSError("otNodeEnableWhitelist failed!")
def disable_whitelist(self):
if self.Api.otNodeDisableWhitelist(self.otNode) != 0:
raise OSError("otNodeDisableWhitelist failed!")
def add_whitelist(self, addr, rssi=None):
if rssi == None:
rssi = 0
if self.Api.otNodeAddWhitelist(self.otNode, addr.encode('utf-8'), ctypes.c_byte(rssi)) != 0:
raise OSError("otNodeAddWhitelist failed!")
def remove_whitelist(self, addr):
if self.Api.otNodeRemoveWhitelist(self.otNode, addr.encode('utf-8')) != 0:
raise OSError("otNodeRemoveWhitelist failed!")
def get_addr16(self):
return self.Api.otNodeGetAddr16(self.otNode)
def get_addr64(self):
return self.Api.otNodeGetAddr64(self.otNode).decode('utf-8')
def get_eui64(self):
return self.Api.otNodeGetEui64(self.otNode).decode('utf-8')
def get_joiner_id(self):
return self.Api.otNodeGetJoinerId(self.otNode).decode('utf-8')
def get_channel(self):
return self.Api.otNodeGetChannel(self.otNode)
def set_channel(self, channel):
if self.Api.otNodeSetChannel(self.otNode, ctypes.c_ubyte(channel)) != 0:
raise OSError("otNodeSetChannel failed!")
def get_masterkey(self):
return self.Api.otNodeGetMasterkey(self.otNode).decode("utf-8")
def set_masterkey(self, masterkey):
if self.Api.otNodeSetMasterkey(self.otNode, masterkey.encode('utf-8')) != 0:
raise OSError("otNodeSetMasterkey failed!")
def get_key_sequence_counter(self):
return self.Api.otNodeGetKeySequenceCounter(self.otNode)
def set_key_sequence_counter(self, key_sequence_counter):
if self.Api.otNodeSetKeySequenceCounter(self.otNode, ctypes.c_uint(key_sequence_counter)) != 0:
raise OSError("otNodeSetKeySequenceCounter failed!")
def set_key_switch_guardtime(self, key_switch_guardtime):
if self.Api.otNodeSetKeySwitchGuardTime(self.otNode, ctypes.c_uint(key_switch_guardtime)) != 0:
raise OSError("otNodeSetKeySwitchGuardTime failed!")
def set_network_id_timeout(self, network_id_timeout):
if self.Api.otNodeSetNetworkIdTimeout(self.otNode, ctypes.c_ubyte(network_id_timeout)) != 0:
raise OSError("otNodeSetNetworkIdTimeout failed!")
def get_network_name(self):
return self.Api.otNodeGetNetworkName(self.otNode).decode("utf-8")
def set_network_name(self, network_name):
if self.Api.otNodeSetNetworkName(self.otNode, network_name.encode('utf-8')) != 0:
raise OSError("otNodeSetNetworkName failed!")
def get_panid(self):
return int(self.Api.otNodeGetPanId(self.otNode))
def set_panid(self, panid):
if self.Api.otNodeSetPanId(self.otNode, ctypes.c_ushort(panid)) != 0:
raise OSError("otNodeSetPanId failed!")
def get_partition_id(self):
return int(self.Api.otNodeGetPartitionId(self.otNode))
def set_partition_id(self, partition_id):
if self.Api.otNodeSetPartitionId(self.otNode, ctypes.c_uint(partition_id)) != 0:
raise OSError("otNodeSetPartitionId failed!")
def set_router_upgrade_threshold(self, threshold):
if self.Api.otNodeSetRouterUpgradeThreshold(self.otNode, ctypes.c_ubyte(threshold)) != 0:
raise OSError("otNodeSetRouterUpgradeThreshold failed!")
def set_router_downgrade_threshold(self, threshold):
if self.Api.otNodeSetRouterDowngradeThreshold(self.otNode, ctypes.c_ubyte(threshold)) != 0:
raise OSError("otNodeSetRouterDowngradeThreshold failed!")
def release_router_id(self, router_id):
if self.Api.otNodeReleaseRouterId(self.otNode, ctypes.c_ubyte(router_id)) != 0:
raise OSError("otNodeReleaseRouterId failed!")
def get_state(self):
return self.Api.otNodeGetState(self.otNode).decode('utf-8')
def set_state(self, state):
if self.Api.otNodeSetState(self.otNode, state.encode('utf-8')) != 0:
raise OSError("otNodeSetState failed!")
def get_timeout(self):
return int(self.Api.otNodeGetTimeout(self.otNode))
def set_timeout(self, timeout):
if self.Api.otNodeSetTimeout(self.otNode, ctypes.c_uint(timeout)) != 0:
raise OSError("otNodeSetTimeout failed!")
def set_max_children(self, number):
if self.Api.otNodeSetMaxChildren(self.otNode, ctypes.c_ubyte(number)) != 0:
raise OSError("otNodeSetMaxChildren failed!")
def get_weight(self):
return int(self.Api.otNodeGetWeight(self.otNode))
def set_weight(self, weight):
if self.Api.otNodeSetWeight(self.otNode, ctypes.c_ubyte(weight)) != 0:
raise OSError("otNodeSetWeight failed!")
def add_ipaddr(self, ipaddr):
if self.Api.otNodeAddIpAddr(self.otNode, ipaddr.encode('utf-8')) != 0:
raise OSError("otNodeAddIpAddr failed!")
def get_addrs(self):
return self.Api.otNodeGetAddrs(self.otNode).decode("utf-8").split("\n")
def add_service(self, enterpriseNumber, serviceData, serverData):
raise OSError("otServerAddService wrapper not implemented!")
def remove_service(self, enterpriseNumber, serviceData):
raise OSError("otServerRemoveService wrapper not implemented!")
def get_context_reuse_delay(self):
return int(self.Api.otNodeGetContextReuseDelay(self.otNode))
def set_context_reuse_delay(self, delay):
if self.Api.otNodeSetContextReuseDelay(self.otNode, ctypes.c_uint(delay)) != 0:
raise OSError("otNodeSetContextReuseDelay failed!")
def add_prefix(self, prefix, flags, prf = 'med'):
if self.Api.otNodeAddPrefix(self.otNode, prefix.encode('utf-8'), flags.encode('utf-8'), prf.encode('utf-8')) != 0:
raise OSError("otNodeAddPrefix failed!")
def remove_prefix(self, prefix):
if self.Api.otNodeRemovePrefix(self.otNode, prefix.encode('utf-8')) != 0:
raise OSError("otNodeRemovePrefix failed!")
def add_route(self, prefix, prf = 'med'):
if self.Api.otNodeAddRoute(self.otNode, prefix.encode('utf-8'), prf.encode('utf-8')) != 0:
raise OSError("otNodeAddRoute failed!")
def remove_route(self, prefix):
if self.Api.otNodeRemoveRoute(self.otNode, prefix.encode('utf-8')) != 0:
raise OSError("otNodeRemovePrefix failed!")
def register_netdata(self):
if self.Api.otNodeRegisterNetdata(self.otNode) != 0:
raise OSError("otNodeRegisterNetdata failed!")
def energy_scan(self, mask, count, period, scan_duration, ipaddr):
if self.Api.otNodeEnergyScan(self.otNode, ctypes.c_uint(mask), ctypes.c_ubyte(count), ctypes.c_ushort(period), ctypes.c_ushort(scan_duration), ipaddr.encode('utf-8')) != 0:
raise OSError("otNodeEnergyScan failed!")
def panid_query(self, panid, mask, ipaddr):
if self.Api.otNodePanIdQuery(self.otNode, ctypes.c_ushort(panid), ctypes.c_uint(mask), ipaddr.encode('utf-8')) != 0:
raise OSError("otNodePanIdQuery failed!")
def scan(self):
return self.Api.otNodeScan(self.otNode).decode("utf-8").split("\n")
def ping(self, ipaddr, num_responses=1, size=None, timeout=5000):
if size == None:
size = 100
numberOfResponders = self.Api.otNodePing(self.otNode, ipaddr.encode('utf-8'), ctypes.c_ushort(size),
ctypes.c_uint(num_responses), ctypes.c_uint16(timeout))
return numberOfResponders >= num_responses
def set_router_selection_jitter(self, jitter):
if self.Api.otNodeSetRouterSelectionJitter(self.otNode, ctypes.c_ubyte(jitter)) != 0:
raise OSError("otNodeSetRouterSelectionJitter failed!")
def set_active_dataset(self, timestamp, panid=None, channel=None, channel_mask=None, master_key=None):
if panid == None:
panid = 0
if channel == None:
channel = 0
if channel_mask == None:
channel_mask = 0
if master_key == None:
master_key = ""
if self.Api.otNodeSetActiveDataset(
self.otNode,
ctypes.c_ulonglong(timestamp),
ctypes.c_ushort(panid),
ctypes.c_ushort(channel),
ctypes.c_uint(channel_mask),
master_key.encode('utf-8')
) != 0:
raise OSError("otNodeSetActiveDataset failed!")
def set_pending_dataset(self, pendingtimestamp, activetimestamp, panid=None, channel=None):
if pendingtimestamp == None:
pendingtimestamp = 0
if activetimestamp == None:
activetimestamp = 0
if panid == None:
panid = 0
if channel == None:
channel = 0
if self.Api.otNodeSetPendingDataset(
self.otNode,
ctypes.c_ulonglong(activetimestamp),
ctypes.c_ulonglong(pendingtimestamp),
ctypes.c_ushort(panid),
ctypes.c_ushort(channel)
) != 0:
raise OSError("otNodeSetPendingDataset failed!")
def announce_begin(self, mask, count, period, ipaddr):
if self.Api.otNodeCommissionerAnnounceBegin(self.otNode, ctypes.c_uint(mask), ctypes.c_ubyte(count), ctypes.c_ushort(period), ipaddr.encode('utf-8')) != 0:
raise OSError("otNodeCommissionerAnnounceBegin failed!")
def send_mgmt_active_set(self, active_timestamp=None, channel=None, channel_mask=None, extended_panid=None,
panid=None, master_key=None, mesh_local=None, network_name=None, binary=None):
if active_timestamp == None:
active_timestamp = 0
if panid == None:
panid = 0
if channel == None:
channel = 0
if channel_mask == None:
channel_mask = 0
if extended_panid == None:
extended_panid = ""
if master_key == None:
master_key = ""
if mesh_local == None:
mesh_local = ""
if network_name == None:
network_name = ""
if binary == None:
binary = ""
if self.Api.otNodeSendActiveSet(
self.otNode,
ctypes.c_ulonglong(active_timestamp),
ctypes.c_ushort(panid),
ctypes.c_ushort(channel),
ctypes.c_uint(channel_mask),
extended_panid.encode('utf-8'),
master_key.encode('utf-8'),
mesh_local.encode('utf-8'),
network_name.encode('utf-8'),
binary.encode('utf-8')
) != 0:
raise OSError("otNodeSendActiveSet failed!")
def send_mgmt_pending_set(self, pending_timestamp=None, active_timestamp=None, delay_timer=None, channel=None,
panid=None, master_key=None, mesh_local=None, network_name=None):
if pending_timestamp == None:
pending_timestamp = 0
if active_timestamp == None:
active_timestamp = 0
if delay_timer == None:
delay_timer = 0
if panid == None:
panid = 0
if channel == None:
channel = 0
if master_key == None:
master_key = ""
if mesh_local == None:
mesh_local = ""
if network_name == None:
network_name = ""
if self.Api.otNodeSendPendingSet(
self.otNode,
ctypes.c_ulonglong(active_timestamp),
ctypes.c_ulonglong(pending_timestamp),
ctypes.c_uint(delay_timer),
ctypes.c_ushort(panid),
ctypes.c_ushort(channel),
master_key.encode('utf-8'),
mesh_local.encode('utf-8'),
network_name.encode('utf-8')
) != 0:
raise OSError("otNodeSendPendingSet failed!")
def log(self, message):
self.Api.otNodeLog(message)
def __init_dll(self, nodeid):
""" Initialize the API from a Windows DLL. """
# Load the DLL
self.Api = ctypes.WinDLL("otnodeapi.dll")
if self.Api == None:
raise OSError("Failed to load otnodeapi.dll!")
# Define the functions
self.Api.otNodeLog.argtypes = [ctypes.c_char_p]
self.Api.otNodeInit.argtypes = [ctypes.c_uint]
self.Api.otNodeInit.restype = ctypes.c_void_p
self.Api.otNodeFinalize.argtypes = [ctypes.c_void_p]
self.Api.otNodeSetMode.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeInterfaceUp.argtypes = [ctypes.c_void_p]
self.Api.otNodeInterfaceDown.argtypes = [ctypes.c_void_p]
self.Api.otNodeThreadStart.argtypes = [ctypes.c_void_p]
self.Api.otNodeThreadStop.argtypes = [ctypes.c_void_p]
self.Api.otNodeCommissionerStart.argtypes = [ctypes.c_void_p]
self.Api.otNodeCommissionerJoinerAdd.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeCommissionerStop.argtypes = [ctypes.c_void_p]
self.Api.otNodeJoinerStart.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeJoinerStop.argtypes = [ctypes.c_void_p]
self.Api.otNodeClearWhitelist.argtypes = [ctypes.c_void_p]
self.Api.otNodeEnableWhitelist.argtypes = [ctypes.c_void_p]
self.Api.otNodeDisableWhitelist.argtypes = [ctypes.c_void_p]
self.Api.otNodeAddWhitelist.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_byte]
self.Api.otNodeRemoveWhitelist.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeGetAddr16.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetAddr16.restype = ctypes.c_ushort
self.Api.otNodeGetAddr64.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetAddr64.restype = ctypes.c_char_p
self.Api.otNodeGetEui64.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetEui64.restype = ctypes.c_char_p
self.Api.otNodeGetJoinerId.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetJoinerId.restype = ctypes.c_char_p
self.Api.otNodeSetChannel.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeGetChannel.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetChannel.restype = ctypes.c_ubyte
self.Api.otNodeSetMasterkey.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeGetMasterkey.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetMasterkey.restype = ctypes.c_char_p
self.Api.otNodeGetKeySequenceCounter.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetKeySequenceCounter.restype = ctypes.c_uint
self.Api.otNodeSetKeySequenceCounter.argtypes = [ctypes.c_void_p,
ctypes.c_uint]
self.Api.otNodeSetKeySwitchGuardTime.argtypes = [ctypes.c_void_p,
ctypes.c_uint]
self.Api.otNodeSetNetworkIdTimeout.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeGetNetworkName.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetNetworkName.restype = ctypes.c_char_p
self.Api.otNodeSetNetworkName.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeGetPanId.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetPanId.restype = ctypes.c_ushort
self.Api.otNodeSetPanId.argtypes = [ctypes.c_void_p,
ctypes.c_ushort]
self.Api.otNodeGetPartitionId.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetPartitionId.restype = ctypes.c_uint
self.Api.otNodeSetPartitionId.argtypes = [ctypes.c_void_p,
ctypes.c_uint]
self.Api.otNodeSetRouterUpgradeThreshold.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeSetRouterDowngradeThreshold.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeReleaseRouterId.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeGetState.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetState.restype = ctypes.c_char_p
self.Api.otNodeSetState.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeGetTimeout.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetTimeout.restype = ctypes.c_uint
self.Api.otNodeSetTimeout.argtypes = [ctypes.c_void_p,
ctypes.c_uint]
self.Api.otNodeGetWeight.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetWeight.restype = ctypes.c_ubyte
self.Api.otNodeSetWeight.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeAddIpAddr.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeGetAddrs.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetAddrs.restype = ctypes.c_char_p
self.Api.otNodeGetContextReuseDelay.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetContextReuseDelay.restype = ctypes.c_uint
self.Api.otNodeSetContextReuseDelay.argtypes = [ctypes.c_void_p,
ctypes.c_uint]
self.Api.otNodeAddPrefix.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeRemovePrefix.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeAddRoute.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeRemoveRoute.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeRegisterNetdata.argtypes = [ctypes.c_void_p]
self.Api.otNodeEnergyScan.argtypes = [ctypes.c_void_p,
ctypes.c_uint,
ctypes.c_ubyte,
ctypes.c_ushort,
ctypes.c_ushort,
ctypes.c_char_p]
self.Api.otNodePanIdQuery.argtypes = [ctypes.c_void_p,
ctypes.c_ushort,
ctypes.c_uint,
ctypes.c_char_p]
self.Api.otNodeScan.argtypes = [ctypes.c_void_p]
self.Api.otNodeScan.restype = ctypes.c_char_p
self.Api.otNodePing.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_ushort,
ctypes.c_uint,
ctypes.c_uint16]
self.Api.otNodePing.restype = ctypes.c_uint
self.Api.otNodeSetRouterSelectionJitter.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeCommissionerAnnounceBegin.argtypes = [ctypes.c_void_p,
ctypes.c_uint,
ctypes.c_ubyte,
ctypes.c_ushort,
ctypes.c_char_p]
self.Api.otNodeSetActiveDataset.argtypes = [ctypes.c_void_p,
ctypes.c_ulonglong,
ctypes.c_ushort,
ctypes.c_ushort,
ctypes.c_uint,
ctypes.c_char_p]
self.Api.otNodeSetPendingDataset.argtypes = [ctypes.c_void_p,
ctypes.c_ulonglong,
ctypes.c_ulonglong,
ctypes.c_ushort,
ctypes.c_ushort]
self.Api.otNodeSendPendingSet.argtypes = [ctypes.c_void_p,
ctypes.c_ulonglong,
ctypes.c_ulonglong,
ctypes.c_uint,
ctypes.c_ushort,
ctypes.c_ushort,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeSendActiveSet.argtypes = [ctypes.c_void_p,
ctypes.c_ulonglong,
ctypes.c_ushort,
ctypes.c_ushort,
ctypes.c_uint,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeSetMaxChildren.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
# Initialize a new node
self.otNode = self.Api.otNodeInit(ctypes.c_uint(nodeid))
if self.otNode == None:
raise OSError("otNodeInit failed!")
|
en
| 0.709851
|
#!/usr/bin/env python # # Copyright (c) 2016, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # Initialize the API from a Windows DLL. # Load the DLL # Define the functions # Initialize a new node
| 1.440752
| 1
|
ATM-project/functions/utils.py
|
omzi/zuri-tasks
| 1
|
6628637
|
<reponame>omzi/zuri-tasks
import os
import re
from time import sleep
from rich import print
import simplejson as json
from rich.console import Console
console = Console()
def ordinal(n):
# Add ordinal suffix for the day of the month; i.e. 'st', 'nd', 'rd' or 'th'
return str(n) + ('th' if 4 <= n % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(n % 10, 'th'))
def saveData(data):
# Saves a user's bank details to a JSON file
dataFile = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '\\' + 'data.json'
open(dataFile, 'w+').write(json.dumps(data))
def printMessage(state, content):
# Prints a message and styles it based on a specified state
if state.lower() == 'invalid':
console.print(f'Error: {content}', style='bold red')
elif state.lower() == 'valid':
console.print(content, style='bold green')
sleep(.75)
def validateInput(question, type, inputEmpty, inputInvalid, inputHidden):
input = None
while True:
input = console.input(f'\n[bold blue]{question}[/bold blue]\n❯❯ ', password = inputHidden)
if not len(input) or re.match(r'\s', input):
printMessage('invalid', inputEmpty)
continue
if type == str and not isinstance(input, str):
printMessage('invalid', inputInvalid)
continue
elif type == int:
try:
input = int(input)
except ValueError:
printMessage('invalid', inputInvalid)
continue
return input
def exit(data):
saveData(data)
print('[bold blue_violet]Thank you for banking with us! Have a nice day :)[/bold blue_violet]')
quit()
def checkExitIntent(message, data, action, bankOperations, accountNumber):
isValidOption = True
while isValidOption:
option = console.input(f'[bold yellow]{message}[/bold yellow]' + '\n❯❯ ')
if option.upper() == 'Y':
isValidOption = False
action(data)
elif option.upper() == 'N':
isValidOption = False
bankOperations(data, accountNumber)
else:
printMessage('invalid', 'Invalid option. Reply Y for [Y]es & N for [N]o')
|
import os
import re
from time import sleep
from rich import print
import simplejson as json
from rich.console import Console
console = Console()
def ordinal(n):
# Add ordinal suffix for the day of the month; i.e. 'st', 'nd', 'rd' or 'th'
return str(n) + ('th' if 4 <= n % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(n % 10, 'th'))
def saveData(data):
# Saves a user's bank details to a JSON file
dataFile = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '\\' + 'data.json'
open(dataFile, 'w+').write(json.dumps(data))
def printMessage(state, content):
# Prints a message and styles it based on a specified state
if state.lower() == 'invalid':
console.print(f'Error: {content}', style='bold red')
elif state.lower() == 'valid':
console.print(content, style='bold green')
sleep(.75)
def validateInput(question, type, inputEmpty, inputInvalid, inputHidden):
input = None
while True:
input = console.input(f'\n[bold blue]{question}[/bold blue]\n❯❯ ', password = inputHidden)
if not len(input) or re.match(r'\s', input):
printMessage('invalid', inputEmpty)
continue
if type == str and not isinstance(input, str):
printMessage('invalid', inputInvalid)
continue
elif type == int:
try:
input = int(input)
except ValueError:
printMessage('invalid', inputInvalid)
continue
return input
def exit(data):
saveData(data)
print('[bold blue_violet]Thank you for banking with us! Have a nice day :)[/bold blue_violet]')
quit()
def checkExitIntent(message, data, action, bankOperations, accountNumber):
isValidOption = True
while isValidOption:
option = console.input(f'[bold yellow]{message}[/bold yellow]' + '\n❯❯ ')
if option.upper() == 'Y':
isValidOption = False
action(data)
elif option.upper() == 'N':
isValidOption = False
bankOperations(data, accountNumber)
else:
printMessage('invalid', 'Invalid option. Reply Y for [Y]es & N for [N]o')
|
en
| 0.552677
|
# Add ordinal suffix for the day of the month; i.e. 'st', 'nd', 'rd' or 'th' # Saves a user's bank details to a JSON file # Prints a message and styles it based on a specified state
| 3.15528
| 3
|
minos/utils.py
|
RobertClay/Paper1
| 0
|
6628638
|
<gh_stars>0
"""
utility functions. A lot borrowed from vivarium population spenser to avoid importing that package.
"""
import argparse
import glob
import yaml
import numpy as np
import os
import pandas as pd
#import humanleague as hl
from scipy.sparse import coo_matrix
import scipy
from vivarium.config_tree import ConfigTree
DAYS_PER_YEAR = 365.25
DAYS_PER_MONTH = DAYS_PER_YEAR / 12
def get_config(config):
# Open the vivarium config yaml.
with open(config) as config_file:
config = ConfigTree(yaml.full_load(config_file))
return config
# TODO Investigate the mock artifact manager. Not sure if this is what we should be using.
def base_plugins():
config = {'required': {
'data': {
'controller': 'minos.testing.mock_artifact.MockArtifactManager',
'builder_interface': 'vivarium.framework.artifact.ArtifactInterface'
}
}
}
return ConfigTree(config)
def relEqual(x, y, tol=2 ** -26):
"""
Simple test for relative equality of floating point within tolerance
Default tolerance is sqrt double epsilon i.e. about 7.5 significant figures
"""
if y == 0:
return x == 0
return abs(float(x) / float(y) - 1.) < tol
def create_age_sex_marginal(est, lad):
"""
Generate age-by-sex marginal from estimated (MYE/SNPP) data
"""
# TODO remove gender and age size hard-coding...
tmp = est[est.GEOGRAPHY_CODE == lad].drop("GEOGRAPHY_CODE", axis=1)
marginal = unlistify(tmp, ["GENDER", "C_AGE"], [2, 86], "OBS_VALUE")
return marginal
# this is a copy-paste from household_microsynth
def unlistify(table, columns, sizes, values):
"""
Converts an n-column table of counts into an n-dimensional array of counts
"""
pivot = table.pivot_table(index=columns, values=values)
# order must be same as column order above
array = np.zeros(sizes, dtype=int)
array[tuple(pivot.index.codes)] = pivot.values.flat
return array
def listify(array, valuename, colnames):
"""
converts a multidimensional numpy array into a pandas dataframe with colnames[0] referring to dimension 0, etc
and valuecolumn containing the array values
"""
multiindex = pd.MultiIndex.from_product([range(i) for i in array.shape])
colmapping = {"level_" + str(i): colnames[i] for i in range(len(colnames))}
return pd.DataFrame({valuename: pd.Series(index=multiindex, data=array.flatten())}).reset_index().rename(colmapping,
axis=1)
# this is a copy-paste from household_microsynth
def remap(indices, mapping):
"""
Converts array of index values back into category values
"""
# values = []
# for i in range(0, len(indices)):
# values.append(mapping[indices[i]])
values = [mapping[indices[i]] for i in range(len(indices))]
return values
def check_and_invert(columns, excluded):
"""
Returns the subset of column names that is not in excluded
"""
if isinstance(excluded, str):
excluded = [excluded]
included = columns.tolist()
for exclude in excluded:
if exclude in included:
included.remove(exclude)
return included
# TODO there is a lot of commonality in the 3 functions below
def cap_value(table, colname, maxval, sumcolname):
"""
Aggregates values in column colname
"""
table_under = table[table[colname] < maxval].copy()
table_over = \
table[table[colname] >= maxval].copy().groupby(check_and_invert(table.columns.values, [colname, sumcolname]))[
sumcolname].sum().reset_index()
table_over[colname] = maxval
return table_under.append(table_over, sort=False)
def adjust_mye_age(mye):
"""
Makes mid-year estimate/snpp data conform with census age categories:
- subtract 100 from age (so that "1" means under 1)
- aggregate 86,87,88,89,90,91 into 86 (meaning 85+)
"""
# keep track of some totals
pop = mye.OBS_VALUE.sum()
pop_m = mye[mye.GENDER == 1].OBS_VALUE.sum()
pop_a = mye[mye.GEOGRAPHY_CODE == "E06000015"].OBS_VALUE.sum()
# this modifies argument!
mye.C_AGE -= 100
mye_adj = mye[mye.C_AGE < 86].copy()
mye_over85 = mye[mye.C_AGE > 85].copy()
# print(myeOver85.head(12))
agg86 = mye_over85.pivot_table(index=["GEOGRAPHY_CODE", "GENDER"], values="OBS_VALUE", aggfunc=sum)
agg86["C_AGE"] = 86
agg86 = agg86.reset_index()
mye_adj = mye_adj.append(agg86, ignore_index=True, sort=False)
# ensure the totals in the adjusted table match the originals (within precision)
assert relEqual(mye_adj.OBS_VALUE.sum(), pop)
assert relEqual(mye_adj[mye_adj.GENDER == 1].OBS_VALUE.sum(), pop_m)
assert relEqual(mye_adj[mye_adj.GEOGRAPHY_CODE == "E06000015"].OBS_VALUE.sum(), pop_a)
return mye_adj
def adjust_pp_age(pp):
"""
Makes (s)npp data conform with census maximum categories:
- aggregate 85,86,87,88,89,90 into 85 (meaning >=85)
"""
# keep track of some totals
pop = pp.OBS_VALUE.sum()
pop_m = pp[pp.GENDER == 1].OBS_VALUE.sum()
pop_a = pp[pp.GEOGRAPHY_CODE == "E06000015"].OBS_VALUE.sum()
# pp.C_AGE += 1
mye_adj = pp[pp.C_AGE < 85].copy()
mye_over85 = pp[pp.C_AGE > 84].copy()
# print(myeOver85.head(12))
agg86 = mye_over85.pivot_table(index=["GEOGRAPHY_CODE", "GENDER", "PROJECTED_YEAR_NAME"], values="OBS_VALUE",
aggfunc=sum)
agg86["C_AGE"] = 85
agg86 = agg86.reset_index()
mye_adj = mye_adj.append(agg86, ignore_index=True, sort=False)
# ensure the totals in the adjusted table match the originals (within precision)
assert relEqual(mye_adj.OBS_VALUE.sum(), pop)
assert relEqual(mye_adj[mye_adj.GENDER == 1].OBS_VALUE.sum(), pop_m)
assert relEqual(mye_adj[mye_adj.GEOGRAPHY_CODE == "E06000015"].OBS_VALUE.sum(), pop_a)
return mye_adj
def check_result(msynth):
if isinstance(msynth, str):
raise ValueError(msynth)
elif not msynth["conv"]:
print(msynth)
raise ValueError("convergence failure")
def get_age_bucket(simulation_data):
"""
Assign age bucket to an input population. These are the age buckets:
0 - 15;
16 - 19;
20 - 24;
25 - 29;
30 - 44;
45 - 59;
60 - 74;
75 +
Parameters
----------
simulation_data : Dataframe
Input data from the VPH simulation
Returns:
-------
A dataframe with a new column with the age bucket.
"""
# Age buckets based on the file names
cut_bins = [-1, 16, 20, 25, 30, 45, 60, 75, 200]
cut_labels = ["0to15", "16to19", "20to24", "25to29", "30to44", "45to59", "60to74", "75plus"]
simulation_data.loc[:, "age_bucket"] = pd.cut(simulation_data['age'], bins=cut_bins, labels=cut_labels)
return simulation_data
def to_years(time: pd.Timedelta) -> float:
"""Converts a time delta to a float for years."""
return time / pd.Timedelta(days=DAYS_PER_YEAR)
|
"""
utility functions. A lot borrowed from vivarium population spenser to avoid importing that package.
"""
import argparse
import glob
import yaml
import numpy as np
import os
import pandas as pd
#import humanleague as hl
from scipy.sparse import coo_matrix
import scipy
from vivarium.config_tree import ConfigTree
DAYS_PER_YEAR = 365.25
DAYS_PER_MONTH = DAYS_PER_YEAR / 12
def get_config(config):
# Open the vivarium config yaml.
with open(config) as config_file:
config = ConfigTree(yaml.full_load(config_file))
return config
# TODO Investigate the mock artifact manager. Not sure if this is what we should be using.
def base_plugins():
config = {'required': {
'data': {
'controller': 'minos.testing.mock_artifact.MockArtifactManager',
'builder_interface': 'vivarium.framework.artifact.ArtifactInterface'
}
}
}
return ConfigTree(config)
def relEqual(x, y, tol=2 ** -26):
"""
Simple test for relative equality of floating point within tolerance
Default tolerance is sqrt double epsilon i.e. about 7.5 significant figures
"""
if y == 0:
return x == 0
return abs(float(x) / float(y) - 1.) < tol
def create_age_sex_marginal(est, lad):
"""
Generate age-by-sex marginal from estimated (MYE/SNPP) data
"""
# TODO remove gender and age size hard-coding...
tmp = est[est.GEOGRAPHY_CODE == lad].drop("GEOGRAPHY_CODE", axis=1)
marginal = unlistify(tmp, ["GENDER", "C_AGE"], [2, 86], "OBS_VALUE")
return marginal
# this is a copy-paste from household_microsynth
def unlistify(table, columns, sizes, values):
"""
Converts an n-column table of counts into an n-dimensional array of counts
"""
pivot = table.pivot_table(index=columns, values=values)
# order must be same as column order above
array = np.zeros(sizes, dtype=int)
array[tuple(pivot.index.codes)] = pivot.values.flat
return array
def listify(array, valuename, colnames):
"""
converts a multidimensional numpy array into a pandas dataframe with colnames[0] referring to dimension 0, etc
and valuecolumn containing the array values
"""
multiindex = pd.MultiIndex.from_product([range(i) for i in array.shape])
colmapping = {"level_" + str(i): colnames[i] for i in range(len(colnames))}
return pd.DataFrame({valuename: pd.Series(index=multiindex, data=array.flatten())}).reset_index().rename(colmapping,
axis=1)
# this is a copy-paste from household_microsynth
def remap(indices, mapping):
"""
Converts array of index values back into category values
"""
# values = []
# for i in range(0, len(indices)):
# values.append(mapping[indices[i]])
values = [mapping[indices[i]] for i in range(len(indices))]
return values
def check_and_invert(columns, excluded):
"""
Returns the subset of column names that is not in excluded
"""
if isinstance(excluded, str):
excluded = [excluded]
included = columns.tolist()
for exclude in excluded:
if exclude in included:
included.remove(exclude)
return included
# TODO there is a lot of commonality in the 3 functions below
def cap_value(table, colname, maxval, sumcolname):
"""
Aggregates values in column colname
"""
table_under = table[table[colname] < maxval].copy()
table_over = \
table[table[colname] >= maxval].copy().groupby(check_and_invert(table.columns.values, [colname, sumcolname]))[
sumcolname].sum().reset_index()
table_over[colname] = maxval
return table_under.append(table_over, sort=False)
def adjust_mye_age(mye):
"""
Makes mid-year estimate/snpp data conform with census age categories:
- subtract 100 from age (so that "1" means under 1)
- aggregate 86,87,88,89,90,91 into 86 (meaning 85+)
"""
# keep track of some totals
pop = mye.OBS_VALUE.sum()
pop_m = mye[mye.GENDER == 1].OBS_VALUE.sum()
pop_a = mye[mye.GEOGRAPHY_CODE == "E06000015"].OBS_VALUE.sum()
# this modifies argument!
mye.C_AGE -= 100
mye_adj = mye[mye.C_AGE < 86].copy()
mye_over85 = mye[mye.C_AGE > 85].copy()
# print(myeOver85.head(12))
agg86 = mye_over85.pivot_table(index=["GEOGRAPHY_CODE", "GENDER"], values="OBS_VALUE", aggfunc=sum)
agg86["C_AGE"] = 86
agg86 = agg86.reset_index()
mye_adj = mye_adj.append(agg86, ignore_index=True, sort=False)
# ensure the totals in the adjusted table match the originals (within precision)
assert relEqual(mye_adj.OBS_VALUE.sum(), pop)
assert relEqual(mye_adj[mye_adj.GENDER == 1].OBS_VALUE.sum(), pop_m)
assert relEqual(mye_adj[mye_adj.GEOGRAPHY_CODE == "E06000015"].OBS_VALUE.sum(), pop_a)
return mye_adj
def adjust_pp_age(pp):
"""
Makes (s)npp data conform with census maximum categories:
- aggregate 85,86,87,88,89,90 into 85 (meaning >=85)
"""
# keep track of some totals
pop = pp.OBS_VALUE.sum()
pop_m = pp[pp.GENDER == 1].OBS_VALUE.sum()
pop_a = pp[pp.GEOGRAPHY_CODE == "E06000015"].OBS_VALUE.sum()
# pp.C_AGE += 1
mye_adj = pp[pp.C_AGE < 85].copy()
mye_over85 = pp[pp.C_AGE > 84].copy()
# print(myeOver85.head(12))
agg86 = mye_over85.pivot_table(index=["GEOGRAPHY_CODE", "GENDER", "PROJECTED_YEAR_NAME"], values="OBS_VALUE",
aggfunc=sum)
agg86["C_AGE"] = 85
agg86 = agg86.reset_index()
mye_adj = mye_adj.append(agg86, ignore_index=True, sort=False)
# ensure the totals in the adjusted table match the originals (within precision)
assert relEqual(mye_adj.OBS_VALUE.sum(), pop)
assert relEqual(mye_adj[mye_adj.GENDER == 1].OBS_VALUE.sum(), pop_m)
assert relEqual(mye_adj[mye_adj.GEOGRAPHY_CODE == "E06000015"].OBS_VALUE.sum(), pop_a)
return mye_adj
def check_result(msynth):
if isinstance(msynth, str):
raise ValueError(msynth)
elif not msynth["conv"]:
print(msynth)
raise ValueError("convergence failure")
def get_age_bucket(simulation_data):
"""
Assign age bucket to an input population. These are the age buckets:
0 - 15;
16 - 19;
20 - 24;
25 - 29;
30 - 44;
45 - 59;
60 - 74;
75 +
Parameters
----------
simulation_data : Dataframe
Input data from the VPH simulation
Returns:
-------
A dataframe with a new column with the age bucket.
"""
# Age buckets based on the file names
cut_bins = [-1, 16, 20, 25, 30, 45, 60, 75, 200]
cut_labels = ["0to15", "16to19", "20to24", "25to29", "30to44", "45to59", "60to74", "75plus"]
simulation_data.loc[:, "age_bucket"] = pd.cut(simulation_data['age'], bins=cut_bins, labels=cut_labels)
return simulation_data
def to_years(time: pd.Timedelta) -> float:
"""Converts a time delta to a float for years."""
return time / pd.Timedelta(days=DAYS_PER_YEAR)
|
en
| 0.776668
|
utility functions. A lot borrowed from vivarium population spenser to avoid importing that package. #import humanleague as hl # Open the vivarium config yaml. # TODO Investigate the mock artifact manager. Not sure if this is what we should be using. Simple test for relative equality of floating point within tolerance Default tolerance is sqrt double epsilon i.e. about 7.5 significant figures Generate age-by-sex marginal from estimated (MYE/SNPP) data # TODO remove gender and age size hard-coding... # this is a copy-paste from household_microsynth Converts an n-column table of counts into an n-dimensional array of counts # order must be same as column order above converts a multidimensional numpy array into a pandas dataframe with colnames[0] referring to dimension 0, etc and valuecolumn containing the array values # this is a copy-paste from household_microsynth Converts array of index values back into category values # values = [] # for i in range(0, len(indices)): # values.append(mapping[indices[i]]) Returns the subset of column names that is not in excluded # TODO there is a lot of commonality in the 3 functions below Aggregates values in column colname Makes mid-year estimate/snpp data conform with census age categories: - subtract 100 from age (so that "1" means under 1) - aggregate 86,87,88,89,90,91 into 86 (meaning 85+) # keep track of some totals # this modifies argument! # print(myeOver85.head(12)) # ensure the totals in the adjusted table match the originals (within precision) Makes (s)npp data conform with census maximum categories: - aggregate 85,86,87,88,89,90 into 85 (meaning >=85) # keep track of some totals # pp.C_AGE += 1 # print(myeOver85.head(12)) # ensure the totals in the adjusted table match the originals (within precision) Assign age bucket to an input population. These are the age buckets: 0 - 15; 16 - 19; 20 - 24; 25 - 29; 30 - 44; 45 - 59; 60 - 74; 75 + Parameters ---------- simulation_data : Dataframe Input data from the VPH simulation Returns: ------- A dataframe with a new column with the age bucket. # Age buckets based on the file names Converts a time delta to a float for years.
| 2.703597
| 3
|
lorator/connectors/__init__.py
|
fenestron/lorator
| 0
|
6628639
|
# -*- coding: utf-8 -*-
from .connector import Connector
from .mysql_connector import MySQLConnector
from .postgres_connector import PostgresConnector
from .sqlite_connector import SQLiteConnector
from .empty_connector import EmptyConnector
from .rds_postgres_connector import RdsPostgresConnector
|
# -*- coding: utf-8 -*-
from .connector import Connector
from .mysql_connector import MySQLConnector
from .postgres_connector import PostgresConnector
from .sqlite_connector import SQLiteConnector
from .empty_connector import EmptyConnector
from .rds_postgres_connector import RdsPostgresConnector
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 1.136328
| 1
|
import_3dm/read3dm.py
|
jesterKing/import_3dm
| 167
|
6628640
|
<reponame>jesterKing/import_3dm
# MIT License
# Copyright (c) 2018-2020 <NAME>, <NAME>, <NAME>, <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os.path
import bpy
import sys
import os
import site
def modules_path():
# set up addons/modules under the user
# script path. Here we'll install the
# dependencies
modulespath = os.path.normpath(
os.path.join(
bpy.utils.script_path_user(),
"addons",
"modules"
)
)
if not os.path.exists(modulespath):
os.makedirs(modulespath)
# set user modules path at beginning of paths for earlier hit
if sys.path[1] != modulespath:
sys.path.insert(1, modulespath)
return modulespath
modules_path()
def install_dependencies():
modulespath = modules_path()
try:
from subprocess import run as sprun
try:
import pip
except:
print("Installing pip... "),
pyver = ""
if sys.platform != "win32":
pyver = "python{}.{}".format(
sys.version_info.major,
sys.version_info.minor
)
ensurepip = os.path.normpath(
os.path.join(
os.path.dirname(bpy.app.binary_path_python),
"..", "lib", pyver, "ensurepip"
)
)
# install pip using the user scheme using the Python
# version bundled with Blender
res = sprun([bpy.app.binary_path_python, ensurepip, "--user"])
if res.returncode == 0:
import pip
else:
raise Exception("Failed to install pip.")
print("Installing rhino3dm to {}... ".format(modulespath)),
# if we eventually want to pin a certain version
# we can add here something like "==0.0.5".
# for now assume latest available is ok
rhino3dm_version=""
pip3 = "pip3"
if sys.platform=="darwin":
pip3 = os.path.normpath(
os.path.join(
os.path.dirname(bpy.app.binary_path_python),
"..",
"bin",
pip3
)
)
# call pip in a subprocess so we don't have to mess
# with internals. Also, this ensures the Python used to
# install pip is going to be used
res = sprun([pip3, "install", "--upgrade", "--target", modulespath, "rhino3dm{}".format(rhino3dm_version)])
if res.returncode!=0:
print("Please try manually installing rhino3dm with: pip3 install --upgrade --target {} rhino3dm".format(modulespath))
raise Exception("Failed to install rhino3dm. See console for manual install instruction.")
except:
raise Exception("Failed to install dependencies. Please make sure you have pip installed.")
# TODO: add update mechanism
try:
import rhino3dm as r3d
except:
print("Failed to load rhino3dm, trying to install automatically...")
try:
install_dependencies()
# let user restart Blender, reloading of rhino3dm after automated
# install doesn't always work, better to just fail clearly before
# that
raise Exception("Please restart Blender.")
except:
raise
from . import converters
def read_3dm(context, options):
filepath = options.get("filepath", "")
model = None
try:
model = r3d.File3dm.Read(filepath)
except:
print("Failed to import .3dm model: {}".format(filepath))
return {'CANCELLED'}
top_collection_name = os.path.splitext(os.path.basename(filepath))[0]
if top_collection_name in context.blend_data.collections.keys():
toplayer = context.blend_data.collections[top_collection_name]
else:
toplayer = context.blend_data.collections.new(name=top_collection_name)
# Get proper scale for conversion
scale = r3d.UnitSystem.UnitScale(model.Settings.ModelUnitSystem, r3d.UnitSystem.Meters) / context.scene.unit_settings.scale_length
layerids = {}
materials = {}
# Parse options
import_views = options.get("import_views", False)
import_named_views = options.get("import_named_views", False)
import_hidden_objects = options.get("import_hidden_objects", False)
import_hidden_layers = options.get("import_hidden_layers", False)
import_groups = options.get("import_groups", False)
import_nested_groups = options.get("import_nested_groups", False)
import_instances = options.get("import_instances",False)
update_materials = options.get("update_materials", False)
# Import Views and NamedViews
if import_views:
converters.handle_views(context, model, toplayer, model.Views, "Views", scale)
if import_named_views:
converters.handle_views(context, model, toplayer, model.NamedViews, "NamedViews", scale)
# Handle materials
converters.handle_materials(context, model, materials, update_materials)
# Handle layers
converters.handle_layers(context, model, toplayer, layerids, materials, update_materials, import_hidden_layers)
materials[converters.DEFAULT_RHINO_MATERIAL] = None
#build skeletal hierarchy of instance definitions as collections (will be populated by object importer)
if import_instances:
converters.handle_instance_definitions(context, model, toplayer, "Instance Definitions")
# Handle objects
for ob in model.Objects:
og = ob.Geometry
# Skip unsupported object types early
if og.ObjectType not in converters.RHINO_TYPE_TO_IMPORT and og.ObjectType != r3d.ObjectType.InstanceReference:
print("Unsupported object type: {}".format(og.ObjectType))
continue
#convert_rhino_object = converters.RHINO_TYPE_TO_IMPORT[og.ObjectType]
# Check object and layer visibility
attr = ob.Attributes
if not attr.Visible and not import_hidden_objects:
continue
rhinolayer = model.Layers.FindIndex(attr.LayerIndex)
if not rhinolayer.Visible and not import_hidden_layers:
continue
# Create object name
if attr.Name == "" or attr.Name is None:
n = str(og.ObjectType).split(".")[1]+" " + str(attr.Id)
else:
n = attr.Name
# Get render material
mat_index = ob.Attributes.MaterialIndex
if ob.Attributes.MaterialSource == r3d.ObjectMaterialSource.MaterialFromLayer:
mat_index = rhinolayer.RenderMaterialIndex
rhino_material = model.Materials.FindIndex(mat_index)
# Handle default material and fetch associated Blender material
if rhino_material.Name == "":
matname = converters.material.DEFAULT_RHINO_MATERIAL
else:
matname = converters.material_name(rhino_material)
# Handle object view color
if ob.Attributes.ColorSource == r3d.ObjectColorSource.ColorFromLayer:
view_color = rhinolayer.Color
else:
view_color = ob.Attributes.ObjectColor
rhinomat = materials[matname]
# Fetch layer
layer = layerids[str(rhinolayer.Id)][1]
if og.ObjectType==r3d.ObjectType.InstanceReference and import_instances:
n = model.InstanceDefinitions.FindId(og.ParentIdefId).Name
# Convert object
converters.convert_object(context, ob, n, layer, rhinomat, view_color, scale, options)
#convert_rhino_object(og, context, n, attr.Name, attr.Id, layer, rhinomat, scale)
if import_groups:
converters.handle_groups(context,attr,toplayer,import_nested_groups)
if import_instances:
converters.populate_instance_definitions(context, model, toplayer, "Instance Definitions", options, scale)
# finally link in the container collection (top layer) into the main
# scene collection.
try:
context.blend_data.scenes[0].collection.children.link(toplayer)
bpy.ops.object.shade_smooth({'selected_editable_objects': toplayer.all_objects})
except Exception:
pass
return {'FINISHED'}
|
# MIT License
# Copyright (c) 2018-2020 <NAME>, <NAME>, <NAME>, <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os.path
import bpy
import sys
import os
import site
def modules_path():
# set up addons/modules under the user
# script path. Here we'll install the
# dependencies
modulespath = os.path.normpath(
os.path.join(
bpy.utils.script_path_user(),
"addons",
"modules"
)
)
if not os.path.exists(modulespath):
os.makedirs(modulespath)
# set user modules path at beginning of paths for earlier hit
if sys.path[1] != modulespath:
sys.path.insert(1, modulespath)
return modulespath
modules_path()
def install_dependencies():
modulespath = modules_path()
try:
from subprocess import run as sprun
try:
import pip
except:
print("Installing pip... "),
pyver = ""
if sys.platform != "win32":
pyver = "python{}.{}".format(
sys.version_info.major,
sys.version_info.minor
)
ensurepip = os.path.normpath(
os.path.join(
os.path.dirname(bpy.app.binary_path_python),
"..", "lib", pyver, "ensurepip"
)
)
# install pip using the user scheme using the Python
# version bundled with Blender
res = sprun([bpy.app.binary_path_python, ensurepip, "--user"])
if res.returncode == 0:
import pip
else:
raise Exception("Failed to install pip.")
print("Installing rhino3dm to {}... ".format(modulespath)),
# if we eventually want to pin a certain version
# we can add here something like "==0.0.5".
# for now assume latest available is ok
rhino3dm_version=""
pip3 = "pip3"
if sys.platform=="darwin":
pip3 = os.path.normpath(
os.path.join(
os.path.dirname(bpy.app.binary_path_python),
"..",
"bin",
pip3
)
)
# call pip in a subprocess so we don't have to mess
# with internals. Also, this ensures the Python used to
# install pip is going to be used
res = sprun([pip3, "install", "--upgrade", "--target", modulespath, "rhino3dm{}".format(rhino3dm_version)])
if res.returncode!=0:
print("Please try manually installing rhino3dm with: pip3 install --upgrade --target {} rhino3dm".format(modulespath))
raise Exception("Failed to install rhino3dm. See console for manual install instruction.")
except:
raise Exception("Failed to install dependencies. Please make sure you have pip installed.")
# TODO: add update mechanism
try:
import rhino3dm as r3d
except:
print("Failed to load rhino3dm, trying to install automatically...")
try:
install_dependencies()
# let user restart Blender, reloading of rhino3dm after automated
# install doesn't always work, better to just fail clearly before
# that
raise Exception("Please restart Blender.")
except:
raise
from . import converters
def read_3dm(context, options):
filepath = options.get("filepath", "")
model = None
try:
model = r3d.File3dm.Read(filepath)
except:
print("Failed to import .3dm model: {}".format(filepath))
return {'CANCELLED'}
top_collection_name = os.path.splitext(os.path.basename(filepath))[0]
if top_collection_name in context.blend_data.collections.keys():
toplayer = context.blend_data.collections[top_collection_name]
else:
toplayer = context.blend_data.collections.new(name=top_collection_name)
# Get proper scale for conversion
scale = r3d.UnitSystem.UnitScale(model.Settings.ModelUnitSystem, r3d.UnitSystem.Meters) / context.scene.unit_settings.scale_length
layerids = {}
materials = {}
# Parse options
import_views = options.get("import_views", False)
import_named_views = options.get("import_named_views", False)
import_hidden_objects = options.get("import_hidden_objects", False)
import_hidden_layers = options.get("import_hidden_layers", False)
import_groups = options.get("import_groups", False)
import_nested_groups = options.get("import_nested_groups", False)
import_instances = options.get("import_instances",False)
update_materials = options.get("update_materials", False)
# Import Views and NamedViews
if import_views:
converters.handle_views(context, model, toplayer, model.Views, "Views", scale)
if import_named_views:
converters.handle_views(context, model, toplayer, model.NamedViews, "NamedViews", scale)
# Handle materials
converters.handle_materials(context, model, materials, update_materials)
# Handle layers
converters.handle_layers(context, model, toplayer, layerids, materials, update_materials, import_hidden_layers)
materials[converters.DEFAULT_RHINO_MATERIAL] = None
#build skeletal hierarchy of instance definitions as collections (will be populated by object importer)
if import_instances:
converters.handle_instance_definitions(context, model, toplayer, "Instance Definitions")
# Handle objects
for ob in model.Objects:
og = ob.Geometry
# Skip unsupported object types early
if og.ObjectType not in converters.RHINO_TYPE_TO_IMPORT and og.ObjectType != r3d.ObjectType.InstanceReference:
print("Unsupported object type: {}".format(og.ObjectType))
continue
#convert_rhino_object = converters.RHINO_TYPE_TO_IMPORT[og.ObjectType]
# Check object and layer visibility
attr = ob.Attributes
if not attr.Visible and not import_hidden_objects:
continue
rhinolayer = model.Layers.FindIndex(attr.LayerIndex)
if not rhinolayer.Visible and not import_hidden_layers:
continue
# Create object name
if attr.Name == "" or attr.Name is None:
n = str(og.ObjectType).split(".")[1]+" " + str(attr.Id)
else:
n = attr.Name
# Get render material
mat_index = ob.Attributes.MaterialIndex
if ob.Attributes.MaterialSource == r3d.ObjectMaterialSource.MaterialFromLayer:
mat_index = rhinolayer.RenderMaterialIndex
rhino_material = model.Materials.FindIndex(mat_index)
# Handle default material and fetch associated Blender material
if rhino_material.Name == "":
matname = converters.material.DEFAULT_RHINO_MATERIAL
else:
matname = converters.material_name(rhino_material)
# Handle object view color
if ob.Attributes.ColorSource == r3d.ObjectColorSource.ColorFromLayer:
view_color = rhinolayer.Color
else:
view_color = ob.Attributes.ObjectColor
rhinomat = materials[matname]
# Fetch layer
layer = layerids[str(rhinolayer.Id)][1]
if og.ObjectType==r3d.ObjectType.InstanceReference and import_instances:
n = model.InstanceDefinitions.FindId(og.ParentIdefId).Name
# Convert object
converters.convert_object(context, ob, n, layer, rhinomat, view_color, scale, options)
#convert_rhino_object(og, context, n, attr.Name, attr.Id, layer, rhinomat, scale)
if import_groups:
converters.handle_groups(context,attr,toplayer,import_nested_groups)
if import_instances:
converters.populate_instance_definitions(context, model, toplayer, "Instance Definitions", options, scale)
# finally link in the container collection (top layer) into the main
# scene collection.
try:
context.blend_data.scenes[0].collection.children.link(toplayer)
bpy.ops.object.shade_smooth({'selected_editable_objects': toplayer.all_objects})
except Exception:
pass
return {'FINISHED'}
|
en
| 0.765376
|
# MIT License # Copyright (c) 2018-2020 <NAME>, <NAME>, <NAME>, <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # set up addons/modules under the user # script path. Here we'll install the # dependencies # set user modules path at beginning of paths for earlier hit # install pip using the user scheme using the Python # version bundled with Blender # if we eventually want to pin a certain version # we can add here something like "==0.0.5". # for now assume latest available is ok # call pip in a subprocess so we don't have to mess # with internals. Also, this ensures the Python used to # install pip is going to be used # TODO: add update mechanism # let user restart Blender, reloading of rhino3dm after automated # install doesn't always work, better to just fail clearly before # that # Get proper scale for conversion # Parse options # Import Views and NamedViews # Handle materials # Handle layers #build skeletal hierarchy of instance definitions as collections (will be populated by object importer) # Handle objects # Skip unsupported object types early #convert_rhino_object = converters.RHINO_TYPE_TO_IMPORT[og.ObjectType] # Check object and layer visibility # Create object name # Get render material # Handle default material and fetch associated Blender material # Handle object view color # Fetch layer # Convert object #convert_rhino_object(og, context, n, attr.Name, attr.Id, layer, rhinomat, scale) # finally link in the container collection (top layer) into the main # scene collection.
| 1.835646
| 2
|
parser/tests/test_properties.py
|
spzala/tosca-parser
| 0
|
6628641
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from parser.common import exception
from parser.elements.templates.properties import Property
from parser.tests.base import TestCase
from parser.utils import yamlparser
class PropertyTest(TestCase):
def test_type(self):
test_property_schema = {'type': 'string'}
propertyInstance = Property('test_property', 'Hughes',
test_property_schema)
self.assertEqual('string', propertyInstance.type)
def test_type_invalid(self):
test_property_schema = {'type': 'Fish'}
propertyInstance = Property('test_property', 'Hughes',
test_property_schema)
error = self.assertRaises(exception.InvalidTypeError,
propertyInstance.validate)
self.assertEqual('Type "Fish" is not a valid type.', str(error))
def test_list(self):
test_property_schema = {'type': 'list'}
propertyInstance = Property('test_property', ['a', 'b'],
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual(['a', 'b'], propertyInstance.value)
def test_list_invalid(self):
test_property_schema = {'type': 'list'}
propertyInstance = Property('test_property', 'a',
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual('"a" is not a list', str(error))
def test_list_entry_schema(self):
test_property_schema = {'type': 'list',
'entry_schema': {'type': 'string'}}
propertyInstance = Property('test_property', ['a', 'b'],
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual(['a', 'b'], propertyInstance.value)
schema_snippet = '''
type: list
entry_schema:
type: string
constraints:
- min_length: 2
'''
test_property_schema = yamlparser.simple_parse(schema_snippet)
propertyInstance = Property('test_property', ['ab', 'cd'],
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual(['ab', 'cd'], propertyInstance.value)
def test_list_entry_schema_invalid(self):
test_property_schema = {'type': 'list',
'entry_schema': {'type': 'integer'}}
propertyInstance = Property('test_property', [1, 'b'],
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual('"b" is not an integer',
str(error))
def test_map(self):
test_property_schema = {'type': 'map'}
propertyInstance = Property('test_property', {'a': 'b'},
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual({'a': 'b'}, propertyInstance.value)
def test_map_invalid(self):
test_property_schema = {'type': 'map'}
propertyInstance = Property('test_property', 12,
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual('"12" is not a map', str(error))
def test_map_entry_schema(self):
test_property_schema = {'type': 'map',
'entry_schema': {'type': 'boolean'}}
propertyInstance = Property('test_property',
{'valid': True, 'required': True},
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual({'valid': True, 'required': True},
propertyInstance.value)
def test_map_entry_schema_invalid(self):
test_property_schema = {'type': 'map',
'entry_schema': {'type': 'boolean'}}
propertyInstance = Property('test_property',
{'valid': True, 'contact_name': 123},
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual('"123" is not a boolean', str(error))
def test_boolean(self):
test_property_schema = {'type': 'boolean'}
propertyInstance = Property('test_property', 'true',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
propertyInstance = Property('test_property', True,
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual(True, propertyInstance.value)
def test_boolean_invalid(self):
test_property_schema = {'type': 'boolean'}
propertyInstance = Property('test_property', 12,
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual('"12" is not a boolean', str(error))
def test_float(self):
test_property_schema = {'type': 'float'}
propertyInstance = Property('test_property', 0.1,
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual(0.1, propertyInstance.value)
def test_float_invalid(self):
test_property_schema = {'type': 'float'}
propertyInstance = Property('test_property', 12,
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual('"12" is not a float', str(error))
def test_timestamp(self):
test_property_schema = {'type': 'timestamp'}
# canonical timestamp
propertyInstance = Property('test_property', '2015-04-01T02:59:43.1Z',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual("2015-04-01T02:59:43.1Z", propertyInstance.value)
# iso8601 timestamp
propertyInstance = Property('test_property',
'2015-04-01t21:59:43.10-05:00',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual("2015-04-01t21:59:43.10-05:00",
propertyInstance.value)
# space separated timestamp
propertyInstance = Property('test_property',
'2015-04-01 21:59:43.10 -5',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual("2015-04-01 21:59:43.10 -5", propertyInstance.value)
# no time zone timestamp
propertyInstance = Property('test_property', '2015-04-01 21:59:43.10',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual("2015-04-01 21:59:43.10", propertyInstance.value)
# date (00:00:00Z)
propertyInstance = Property('test_property', '2015-04-01',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual("2015-04-01", propertyInstance.value)
def test_timestamp_invalid(self):
test_property_schema = {'type': 'timestamp'}
# invalid timestamp - day out of range
propertyInstance = Property('test_property', '2015-04-115T02:59:43.1Z',
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual('day is out of range for month', str(error))
def test_required(self):
test_property_schema = {'type': 'string'}
propertyInstance = Property('test_property', 'Foo',
test_property_schema)
self.assertEqual(True, propertyInstance.required)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from parser.common import exception
from parser.elements.templates.properties import Property
from parser.tests.base import TestCase
from parser.utils import yamlparser
class PropertyTest(TestCase):
def test_type(self):
test_property_schema = {'type': 'string'}
propertyInstance = Property('test_property', 'Hughes',
test_property_schema)
self.assertEqual('string', propertyInstance.type)
def test_type_invalid(self):
test_property_schema = {'type': 'Fish'}
propertyInstance = Property('test_property', 'Hughes',
test_property_schema)
error = self.assertRaises(exception.InvalidTypeError,
propertyInstance.validate)
self.assertEqual('Type "Fish" is not a valid type.', str(error))
def test_list(self):
test_property_schema = {'type': 'list'}
propertyInstance = Property('test_property', ['a', 'b'],
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual(['a', 'b'], propertyInstance.value)
def test_list_invalid(self):
test_property_schema = {'type': 'list'}
propertyInstance = Property('test_property', 'a',
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual('"a" is not a list', str(error))
def test_list_entry_schema(self):
test_property_schema = {'type': 'list',
'entry_schema': {'type': 'string'}}
propertyInstance = Property('test_property', ['a', 'b'],
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual(['a', 'b'], propertyInstance.value)
schema_snippet = '''
type: list
entry_schema:
type: string
constraints:
- min_length: 2
'''
test_property_schema = yamlparser.simple_parse(schema_snippet)
propertyInstance = Property('test_property', ['ab', 'cd'],
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual(['ab', 'cd'], propertyInstance.value)
def test_list_entry_schema_invalid(self):
test_property_schema = {'type': 'list',
'entry_schema': {'type': 'integer'}}
propertyInstance = Property('test_property', [1, 'b'],
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual('"b" is not an integer',
str(error))
def test_map(self):
test_property_schema = {'type': 'map'}
propertyInstance = Property('test_property', {'a': 'b'},
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual({'a': 'b'}, propertyInstance.value)
def test_map_invalid(self):
test_property_schema = {'type': 'map'}
propertyInstance = Property('test_property', 12,
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual('"12" is not a map', str(error))
def test_map_entry_schema(self):
test_property_schema = {'type': 'map',
'entry_schema': {'type': 'boolean'}}
propertyInstance = Property('test_property',
{'valid': True, 'required': True},
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual({'valid': True, 'required': True},
propertyInstance.value)
def test_map_entry_schema_invalid(self):
test_property_schema = {'type': 'map',
'entry_schema': {'type': 'boolean'}}
propertyInstance = Property('test_property',
{'valid': True, 'contact_name': 123},
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual('"123" is not a boolean', str(error))
def test_boolean(self):
test_property_schema = {'type': 'boolean'}
propertyInstance = Property('test_property', 'true',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
propertyInstance = Property('test_property', True,
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual(True, propertyInstance.value)
def test_boolean_invalid(self):
test_property_schema = {'type': 'boolean'}
propertyInstance = Property('test_property', 12,
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual('"12" is not a boolean', str(error))
def test_float(self):
test_property_schema = {'type': 'float'}
propertyInstance = Property('test_property', 0.1,
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual(0.1, propertyInstance.value)
def test_float_invalid(self):
test_property_schema = {'type': 'float'}
propertyInstance = Property('test_property', 12,
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual('"12" is not a float', str(error))
def test_timestamp(self):
test_property_schema = {'type': 'timestamp'}
# canonical timestamp
propertyInstance = Property('test_property', '2015-04-01T02:59:43.1Z',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual("2015-04-01T02:59:43.1Z", propertyInstance.value)
# iso8601 timestamp
propertyInstance = Property('test_property',
'2015-04-01t21:59:43.10-05:00',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual("2015-04-01t21:59:43.10-05:00",
propertyInstance.value)
# space separated timestamp
propertyInstance = Property('test_property',
'2015-04-01 21:59:43.10 -5',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual("2015-04-01 21:59:43.10 -5", propertyInstance.value)
# no time zone timestamp
propertyInstance = Property('test_property', '2015-04-01 21:59:43.10',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual("2015-04-01 21:59:43.10", propertyInstance.value)
# date (00:00:00Z)
propertyInstance = Property('test_property', '2015-04-01',
test_property_schema)
self.assertIsNone(propertyInstance.validate())
self.assertEqual("2015-04-01", propertyInstance.value)
def test_timestamp_invalid(self):
test_property_schema = {'type': 'timestamp'}
# invalid timestamp - day out of range
propertyInstance = Property('test_property', '2015-04-115T02:59:43.1Z',
test_property_schema)
error = self.assertRaises(ValueError, propertyInstance.validate)
self.assertEqual('day is out of range for month', str(error))
def test_required(self):
test_property_schema = {'type': 'string'}
propertyInstance = Property('test_property', 'Foo',
test_property_schema)
self.assertEqual(True, propertyInstance.required)
|
en
| 0.810434
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. type: list entry_schema: type: string constraints: - min_length: 2 # canonical timestamp # iso8601 timestamp # space separated timestamp # no time zone timestamp # date (00:00:00Z) # invalid timestamp - day out of range
| 2.358655
| 2
|
examples/pyemu.py
|
ScottHMcKean/pyfracman
| 0
|
6628642
|
import pyemu
import os
os.chdir("C:\\Users\\scott.mckean\\Desktop\\exp1_onep32_seismiclength")
pst = pyemu.Pst("test1.pst")
pst.add_parameters("test1.ptf")
pst.write("test1.pst")
# this does an okay job, but lacks some functionality
psthelp = pyemu.helpers.pst_from_io_files(
tpl_files=['test1.ptf'],
in_files=['test1.fmf'],
ins_files=['output.pin'],
out_files=['output.sts'],
pst_filename='out.pst'
)
# make new pst file
par_names = pyemu.pst_utils.parse_tpl_file('test1.ptf')
obs_names = pyemu.pst_utils.parse_ins_file('output.pin')
new_pst = pyemu.pst_utils.generic_pst(par_names,obs_names)
new_pst.control_data.get_dataframe().to_csv('control_data.csv')
new_pst.control_data.
pyemu.helpers.pst_from_io_files()
import pyfracman
import pyfracman.pest
from importlib import reload
reload(pyfracman.pest)
from pyfracman.pest import PestGenerator
self = PestGenerator("examples/pest_config.json")
self.parse_config()
self.write_pst_file()
control_data = pyemu.pst.ControlData()
CONTROL_VARIABLE_LINES = """RSTFLE PESTMODE
NPAR NOBS NPARGP NPRIOR NOBSGP [MAXCOMPDIM]
NTPLFLE NINSFLE PRECIS DPOINT [NUMCOM] [JACFILE] [MESSFILE] [OBSREREF]
RLAMBDA1 RLAMFAC PHIRATSUF PHIREDLAM NUMLAM [JACUPDATE] [LAMFORGIVE] [DERFORGIVE]
RELPARMAX FACPARMAX FACORIG [IBOUNDSTICK] [UPVECBEND]
PHIREDSWH [NOPTSWITCH] [SPLITSWH] [DOAUI] [DOSENREUSE] [BOUNDSCALE]
NOPTMAX PHIREDSTP NPHISTP NPHINORED RELPARSTP NRELPAR [PHISTOPTHRESH] [LASTRUN] [PHIABANDON]
ICOV ICOR IEIG [IRES] [JCOSAVE] [VERBOSEREC] [JCOSAVEITN] [REISAVEITN] [PARSAVEITN] [PARSAVERUN]""".lower().split(
"\n"
)
|
import pyemu
import os
os.chdir("C:\\Users\\scott.mckean\\Desktop\\exp1_onep32_seismiclength")
pst = pyemu.Pst("test1.pst")
pst.add_parameters("test1.ptf")
pst.write("test1.pst")
# this does an okay job, but lacks some functionality
psthelp = pyemu.helpers.pst_from_io_files(
tpl_files=['test1.ptf'],
in_files=['test1.fmf'],
ins_files=['output.pin'],
out_files=['output.sts'],
pst_filename='out.pst'
)
# make new pst file
par_names = pyemu.pst_utils.parse_tpl_file('test1.ptf')
obs_names = pyemu.pst_utils.parse_ins_file('output.pin')
new_pst = pyemu.pst_utils.generic_pst(par_names,obs_names)
new_pst.control_data.get_dataframe().to_csv('control_data.csv')
new_pst.control_data.
pyemu.helpers.pst_from_io_files()
import pyfracman
import pyfracman.pest
from importlib import reload
reload(pyfracman.pest)
from pyfracman.pest import PestGenerator
self = PestGenerator("examples/pest_config.json")
self.parse_config()
self.write_pst_file()
control_data = pyemu.pst.ControlData()
CONTROL_VARIABLE_LINES = """RSTFLE PESTMODE
NPAR NOBS NPARGP NPRIOR NOBSGP [MAXCOMPDIM]
NTPLFLE NINSFLE PRECIS DPOINT [NUMCOM] [JACFILE] [MESSFILE] [OBSREREF]
RLAMBDA1 RLAMFAC PHIRATSUF PHIREDLAM NUMLAM [JACUPDATE] [LAMFORGIVE] [DERFORGIVE]
RELPARMAX FACPARMAX FACORIG [IBOUNDSTICK] [UPVECBEND]
PHIREDSWH [NOPTSWITCH] [SPLITSWH] [DOAUI] [DOSENREUSE] [BOUNDSCALE]
NOPTMAX PHIREDSTP NPHISTP NPHINORED RELPARSTP NRELPAR [PHISTOPTHRESH] [LASTRUN] [PHIABANDON]
ICOV ICOR IEIG [IRES] [JCOSAVE] [VERBOSEREC] [JCOSAVEITN] [REISAVEITN] [PARSAVEITN] [PARSAVERUN]""".lower().split(
"\n"
)
|
en
| 0.564945
|
# this does an okay job, but lacks some functionality # make new pst file RSTFLE PESTMODE NPAR NOBS NPARGP NPRIOR NOBSGP [MAXCOMPDIM] NTPLFLE NINSFLE PRECIS DPOINT [NUMCOM] [JACFILE] [MESSFILE] [OBSREREF] RLAMBDA1 RLAMFAC PHIRATSUF PHIREDLAM NUMLAM [JACUPDATE] [LAMFORGIVE] [DERFORGIVE] RELPARMAX FACPARMAX FACORIG [IBOUNDSTICK] [UPVECBEND] PHIREDSWH [NOPTSWITCH] [SPLITSWH] [DOAUI] [DOSENREUSE] [BOUNDSCALE] NOPTMAX PHIREDSTP NPHISTP NPHINORED RELPARSTP NRELPAR [PHISTOPTHRESH] [LASTRUN] [PHIABANDON] ICOV ICOR IEIG [IRES] [JCOSAVE] [VERBOSEREC] [JCOSAVEITN] [REISAVEITN] [PARSAVEITN] [PARSAVERUN]
| 2.188625
| 2
|
packages/amuse-aarsethzare/setup.py
|
Allyn69/amuse
| 1
|
6628643
|
import sys
import os
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-aarsethzare'
version = "12.0.0rc3"
author = 'The AMUSE team'
author_email = '<EMAIL>'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'wheel>=0.32',
'docutils>=0.6',
'numpy>=1.2.2',
'nose>=0.11.1',
'mpi4py>=1.1.0',
'h5py>=1.1.0',
'amuse-framework>=12.0.0rc3',
]
description = 'The Astrophysical Multipurpose Software Environment - AarsethZare'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: C',
'Programming Language :: C++',
'Programming Language :: Fortran',
'Topic :: Scientific/Engineering :: Astronomy',
]
extensions = []
all_data_files = []
packages = ['amuse.community.aarsethzare']
package_data = {
}
mapping_from_command_name_to_command_class=setup_commands()
setup(
name=name,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={'amuse.community.aarsethzare': 'src/amuse/community/aarsethzare'},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
|
import sys
import os
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-aarsethzare'
version = "12.0.0rc3"
author = 'The AMUSE team'
author_email = '<EMAIL>'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'wheel>=0.32',
'docutils>=0.6',
'numpy>=1.2.2',
'nose>=0.11.1',
'mpi4py>=1.1.0',
'h5py>=1.1.0',
'amuse-framework>=12.0.0rc3',
]
description = 'The Astrophysical Multipurpose Software Environment - AarsethZare'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: C',
'Programming Language :: C++',
'Programming Language :: Fortran',
'Topic :: Scientific/Engineering :: Astronomy',
]
extensions = []
all_data_files = []
packages = ['amuse.community.aarsethzare']
package_data = {
}
mapping_from_command_name_to_command_class=setup_commands()
setup(
name=name,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={'amuse.community.aarsethzare': 'src/amuse/community/aarsethzare'},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
|
none
| 1
| 1.553323
| 2
|
|
evap/evaluation/migrations/0002_initial_data.py
|
JenniferStamm/EvaP
| 0
|
6628644
|
<filename>evap/evaluation/migrations/0002_initial_data.py<gh_stars>0
# -*- coding: utf-8 -*-
from django.db import models, migrations
from django.contrib.auth.models import Group
def insert_emailtemplates(apps, schema_editor):
emailtemplates = [
("Lecturer Review Notice", "[EvaP] New Course ready for approval"),
("Student Reminder", "[EvaP] Evaluation period is ending"),
("Publishing Notice", "[EvaP] A course has been published"),
("Login Key Created", "[EvaP] A login key was created"),
]
EmailTemplate = apps.get_model("evaluation", "EmailTemplate")
for name, subject in emailtemplates:
if not EmailTemplate.objects.filter(name=name).exists():
EmailTemplate.objects.create(name=name, subject=subject, body="")
Group.objects.create(name="Staff")
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0001_initial'),
]
operations = [
migrations.RunPython(insert_emailtemplates),
]
|
<filename>evap/evaluation/migrations/0002_initial_data.py<gh_stars>0
# -*- coding: utf-8 -*-
from django.db import models, migrations
from django.contrib.auth.models import Group
def insert_emailtemplates(apps, schema_editor):
emailtemplates = [
("Lecturer Review Notice", "[EvaP] New Course ready for approval"),
("Student Reminder", "[EvaP] Evaluation period is ending"),
("Publishing Notice", "[EvaP] A course has been published"),
("Login Key Created", "[EvaP] A login key was created"),
]
EmailTemplate = apps.get_model("evaluation", "EmailTemplate")
for name, subject in emailtemplates:
if not EmailTemplate.objects.filter(name=name).exists():
EmailTemplate.objects.create(name=name, subject=subject, body="")
Group.objects.create(name="Staff")
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0001_initial'),
]
operations = [
migrations.RunPython(insert_emailtemplates),
]
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 2.005589
| 2
|
dashboard/migrations/0001_initial.py
|
open-legal-tech/open-decision-prototype
| 6
|
6628645
|
<reponame>open-legal-tech/open-decision-prototype
# Generated by Django 3.0.3 on 2020-02-29 02:46
import dashboard.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DecisionTree',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=200)),
('slug', models.SlugField(default='')),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Node',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=240)),
('slug', models.SlugField(default='')),
('question', dashboard.models.RichTextBleachField()),
('input_type', models.CharField(max_length=240)),
('data_answer', models.TextField(blank=True, null=True)),
('data_logic', models.TextField(blank=True, null=True)),
('new_node', models.BooleanField()),
('start_node', models.BooleanField()),
('end_node', models.BooleanField()),
('decision_tree', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.DecisionTree')),
],
),
migrations.AddConstraint(
model_name='node',
constraint=models.UniqueConstraint(fields=('slug', 'decision_tree'), name='unique nodeslug per tree'),
),
migrations.AddConstraint(
model_name='decisiontree',
constraint=models.UniqueConstraint(fields=('owner', 'slug'), name='unique tree slug per user'),
),
]
|
# Generated by Django 3.0.3 on 2020-02-29 02:46
import dashboard.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DecisionTree',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=200)),
('slug', models.SlugField(default='')),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Node',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=240)),
('slug', models.SlugField(default='')),
('question', dashboard.models.RichTextBleachField()),
('input_type', models.CharField(max_length=240)),
('data_answer', models.TextField(blank=True, null=True)),
('data_logic', models.TextField(blank=True, null=True)),
('new_node', models.BooleanField()),
('start_node', models.BooleanField()),
('end_node', models.BooleanField()),
('decision_tree', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dashboard.DecisionTree')),
],
),
migrations.AddConstraint(
model_name='node',
constraint=models.UniqueConstraint(fields=('slug', 'decision_tree'), name='unique nodeslug per tree'),
),
migrations.AddConstraint(
model_name='decisiontree',
constraint=models.UniqueConstraint(fields=('owner', 'slug'), name='unique tree slug per user'),
),
]
|
en
| 0.81792
|
# Generated by Django 3.0.3 on 2020-02-29 02:46
| 1.787863
| 2
|
tensor2tensor/data_generators/gym_utils.py
|
spacegoing/t2t_caps
| 0
|
6628646
|
<filename>tensor2tensor/data_generators/gym_utils.py
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for openai gym."""
from collections import deque
# Dependency imports
import gym
import numpy as np
import six
from tensor2tensor.data_generators import image_utils
class WarmupWrapper(gym.Wrapper):
"""Warmup wrapper."""
def __init__(self, env, warm_up_examples=0, warmup_action=0):
gym.Wrapper.__init__(self, env)
self.warm_up_examples = warm_up_examples
self.warm_up_action = warmup_action
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(210, 160, 3), dtype=np.uint8)
def get_starting_data(self, num_frames):
self.reset()
starting_observations, starting_actions, starting_rewards = [], [], []
for _ in range(num_frames):
observation, rew, _, _ = self.env.step(self.warm_up_action)
starting_observations.append(observation)
starting_rewards.append(rew)
starting_actions.append(self.warm_up_action)
return starting_observations, starting_actions, starting_rewards
def step(self, ac):
action = ac
return self.env.step(action)
def reset(self, **kwargs):
self.env.reset()
observation = None
for _ in range(self.warm_up_examples):
observation, _, _, _ = self.env.step(self.warm_up_action)
return observation
class PongWrapper(WarmupWrapper):
"""Pong Wrapper."""
def __init__(self, env, warm_up_examples=0,
action_space_reduction=False,
reward_skip_steps=0,
big_ball=False):
super(PongWrapper, self).__init__(env, warm_up_examples=warm_up_examples)
self.action_space_reduction = action_space_reduction
if self.action_space_reduction:
self.action_space = gym.spaces.Discrete(2)
self.warm_up_examples = warm_up_examples
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(210, 160, 3), dtype=np.uint8)
self.reward_skip_steps = reward_skip_steps
self.big_ball = big_ball
def step(self, ac):
action = ac
if self.action_space_reduction:
action = 2 if int(ac) == 0 else 5
ob, rew, done, info = self.env.step(action)
ob = self.process_observation(ob)
if rew != 0 and self.reward_skip_steps != 0:
for _ in range(self.reward_skip_steps):
self.env.step(0)
return ob, rew, done, info
def reset(self, **kwargs):
observation = super(PongWrapper, self).reset(**kwargs)
observation = self.process_observation(observation)
return observation
def process_observation(self, obs):
if self.big_ball:
pos = PongWrapper.find_ball(obs)
if pos is not None:
x, y = pos
obs[x-5:x+5, y-5:y+5, :] = 255
return obs
@staticmethod
def find_ball(obs, default=None):
ball_area = obs[37:193, :, 0]
res = np.argwhere(ball_area == 236)
if not res:
return default
else:
x, y = res[0]
x += 37
return x, y
def wrapped_pong_factory(warm_up_examples=0, action_space_reduction=False,
reward_skip_steps=0, big_ball=False):
"""Wrapped pong games."""
env = gym.make("PongDeterministic-v4")
env = env.env # Remove time_limit wrapper.
env = PongWrapper(env, warm_up_examples=warm_up_examples,
action_space_reduction=action_space_reduction,
reward_skip_steps=reward_skip_steps,
big_ball=big_ball)
return env
gym.envs.register(id="T2TPongWarmUp20RewSkip200Steps-v1",
entry_point=lambda: wrapped_pong_factory( # pylint: disable=g-long-lambda
warm_up_examples=20, reward_skip_steps=15),
max_episode_steps=200)
gym.envs.register(id="T2TPongWarmUp20RewSkip2000Steps-v1",
entry_point=lambda: wrapped_pong_factory( # pylint: disable=g-long-lambda
warm_up_examples=20, reward_skip_steps=15),
max_episode_steps=2000)
class BreakoutWrapper(WarmupWrapper):
"""Breakout Wrapper."""
FIRE_ACTION = 1
def __init__(self, env, warm_up_examples=0,
ball_down_skip=0,
big_ball=False,
include_direction_info=False,
reward_clipping=True):
super(BreakoutWrapper, self).__init__(
env, warm_up_examples=warm_up_examples,
warmup_action=BreakoutWrapper.FIRE_ACTION)
self.warm_up_examples = warm_up_examples
self.observation_space = gym.spaces.Box(low=0, high=255,
shape=(210, 160, 3),
dtype=np.uint8)
self.ball_down_skip = ball_down_skip
self.big_ball = big_ball
self.reward_clipping = reward_clipping
self.include_direction_info = include_direction_info
self.direction_info = deque([], maxlen=2)
self.points_gained = False
msg = ("ball_down_skip should be bigger equal 9 for "
"include_direction_info to work correctly")
assert not self.include_direction_info or ball_down_skip >= 9, msg
def step(self, ac):
ob, rew, done, info = self.env.step(ac)
if BreakoutWrapper.find_ball(ob) is None and self.ball_down_skip != 0:
for _ in range(self.ball_down_skip):
# We assume that nothing interesting happens during ball_down_skip
# and discard all information.
# We fire all the time to start new game
ob, _, _, _ = self.env.step(BreakoutWrapper.FIRE_ACTION)
self.direction_info.append(BreakoutWrapper.find_ball(ob))
ob = self.process_observation(ob)
self.points_gained = self.points_gained or rew > 0
if self.reward_clipping:
rew = np.sign(rew)
return ob, rew, done, info
def reset(self, **kwargs):
observation = super(BreakoutWrapper, self).reset(**kwargs)
self.env.step(BreakoutWrapper.FIRE_ACTION)
self.direction_info = deque([], maxlen=2)
observation = self.process_observation(observation)
return observation
@staticmethod
def find_ball(ob, default=None):
off_x = 63
clipped_ob = ob[off_x:-21, :, 0]
pos = np.argwhere(clipped_ob == 200)
if not pos.size:
return default
x = off_x + pos[0][0]
y = 0 + pos[0][1]
return x, y
def process_observation(self, obs):
if self.big_ball:
pos = BreakoutWrapper.find_ball(obs)
if pos is not None:
x, y = pos
obs[x-5:x+5, y-5:y+5, :] = 255
if self.include_direction_info:
for point in list(self.direction_info):
if point is not None:
x, y = point
obs[x-2:x+2, y-2:y+2, 1] = 255
return obs
def wrapped_breakout_factory(warm_up_examples=0,
ball_down_skip=0,
big_ball=False,
include_direction_info=False,
reward_clipping=True):
"""Wrapped breakout games."""
env = gym.make("BreakoutDeterministic-v4")
env = env.env # Remove time_limit wrapper.
env = BreakoutWrapper(env, warm_up_examples=warm_up_examples,
ball_down_skip=ball_down_skip,
big_ball=big_ball,
include_direction_info=include_direction_info,
reward_clipping=reward_clipping)
return env
gym.envs.register(id="T2TBreakoutWarmUp20RewSkip500Steps-v1",
entry_point=lambda: wrapped_breakout_factory( # pylint: disable=g-long-lambda
warm_up_examples=1,
ball_down_skip=9,
big_ball=False,
include_direction_info=True,
reward_clipping=True
),
max_episode_steps=500)
class FreewayWrapper(WarmupWrapper):
"""Wrapper for Freeway."""
def __init__(self, env,
warm_up_examples=0,
reward_clipping=True,
easy_freeway=False):
super(FreewayWrapper, self).__init__(env, warm_up_examples)
self.easy_freeway = easy_freeway
self.half_way_reward = 1.0
# this is probably not needed, just in case
self.reward_clipping = reward_clipping
def chicken_height(self, image):
raise NotImplementedError()
def step(self, ac):
ob, rew, done, info = self.env.step(ac)
if self.easy_freeway:
if rew > 0:
self.half_way_reward = 1
chicken_height = self.chicken_height(ob)
if chicken_height < 105:
rew += self.half_way_reward
self.half_way_reward = 0
if self.reward_clipping:
rew = np.sign(rew)
return ob, rew, done, info
def reset(self, **kwargs):
self.half_way_reward = 1.0
observation = super(FreewayWrapper, self).reset(**kwargs)
return observation
def wrapped_freeway_factory(warm_up_examples=0,
reward_clipping=True,
easy_freeway=False):
"""Wrapped freeway games."""
env = gym.make("FreewayDeterministic-v4")
env = env.env # Remove time_limit wrapper.
env = FreewayWrapper(env, warm_up_examples=warm_up_examples,
reward_clipping=reward_clipping,
easy_freeway=easy_freeway)
return env
gym.envs.register(id="T2TFreewayWarmUp20RewSkip500Steps-v1",
entry_point=lambda: wrapped_freeway_factory( # pylint: disable=g-long-lambda
warm_up_examples=1,
reward_clipping=True,
easy_freeway=False
),
max_episode_steps=500)
def encode_image_to_png(image):
encoded = six.next(
image_utils.encode_images_as_png([image]))
return encoded
|
<filename>tensor2tensor/data_generators/gym_utils.py
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for openai gym."""
from collections import deque
# Dependency imports
import gym
import numpy as np
import six
from tensor2tensor.data_generators import image_utils
class WarmupWrapper(gym.Wrapper):
"""Warmup wrapper."""
def __init__(self, env, warm_up_examples=0, warmup_action=0):
gym.Wrapper.__init__(self, env)
self.warm_up_examples = warm_up_examples
self.warm_up_action = warmup_action
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(210, 160, 3), dtype=np.uint8)
def get_starting_data(self, num_frames):
self.reset()
starting_observations, starting_actions, starting_rewards = [], [], []
for _ in range(num_frames):
observation, rew, _, _ = self.env.step(self.warm_up_action)
starting_observations.append(observation)
starting_rewards.append(rew)
starting_actions.append(self.warm_up_action)
return starting_observations, starting_actions, starting_rewards
def step(self, ac):
action = ac
return self.env.step(action)
def reset(self, **kwargs):
self.env.reset()
observation = None
for _ in range(self.warm_up_examples):
observation, _, _, _ = self.env.step(self.warm_up_action)
return observation
class PongWrapper(WarmupWrapper):
"""Pong Wrapper."""
def __init__(self, env, warm_up_examples=0,
action_space_reduction=False,
reward_skip_steps=0,
big_ball=False):
super(PongWrapper, self).__init__(env, warm_up_examples=warm_up_examples)
self.action_space_reduction = action_space_reduction
if self.action_space_reduction:
self.action_space = gym.spaces.Discrete(2)
self.warm_up_examples = warm_up_examples
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(210, 160, 3), dtype=np.uint8)
self.reward_skip_steps = reward_skip_steps
self.big_ball = big_ball
def step(self, ac):
action = ac
if self.action_space_reduction:
action = 2 if int(ac) == 0 else 5
ob, rew, done, info = self.env.step(action)
ob = self.process_observation(ob)
if rew != 0 and self.reward_skip_steps != 0:
for _ in range(self.reward_skip_steps):
self.env.step(0)
return ob, rew, done, info
def reset(self, **kwargs):
observation = super(PongWrapper, self).reset(**kwargs)
observation = self.process_observation(observation)
return observation
def process_observation(self, obs):
if self.big_ball:
pos = PongWrapper.find_ball(obs)
if pos is not None:
x, y = pos
obs[x-5:x+5, y-5:y+5, :] = 255
return obs
@staticmethod
def find_ball(obs, default=None):
ball_area = obs[37:193, :, 0]
res = np.argwhere(ball_area == 236)
if not res:
return default
else:
x, y = res[0]
x += 37
return x, y
def wrapped_pong_factory(warm_up_examples=0, action_space_reduction=False,
reward_skip_steps=0, big_ball=False):
"""Wrapped pong games."""
env = gym.make("PongDeterministic-v4")
env = env.env # Remove time_limit wrapper.
env = PongWrapper(env, warm_up_examples=warm_up_examples,
action_space_reduction=action_space_reduction,
reward_skip_steps=reward_skip_steps,
big_ball=big_ball)
return env
gym.envs.register(id="T2TPongWarmUp20RewSkip200Steps-v1",
entry_point=lambda: wrapped_pong_factory( # pylint: disable=g-long-lambda
warm_up_examples=20, reward_skip_steps=15),
max_episode_steps=200)
gym.envs.register(id="T2TPongWarmUp20RewSkip2000Steps-v1",
entry_point=lambda: wrapped_pong_factory( # pylint: disable=g-long-lambda
warm_up_examples=20, reward_skip_steps=15),
max_episode_steps=2000)
class BreakoutWrapper(WarmupWrapper):
"""Breakout Wrapper."""
FIRE_ACTION = 1
def __init__(self, env, warm_up_examples=0,
ball_down_skip=0,
big_ball=False,
include_direction_info=False,
reward_clipping=True):
super(BreakoutWrapper, self).__init__(
env, warm_up_examples=warm_up_examples,
warmup_action=BreakoutWrapper.FIRE_ACTION)
self.warm_up_examples = warm_up_examples
self.observation_space = gym.spaces.Box(low=0, high=255,
shape=(210, 160, 3),
dtype=np.uint8)
self.ball_down_skip = ball_down_skip
self.big_ball = big_ball
self.reward_clipping = reward_clipping
self.include_direction_info = include_direction_info
self.direction_info = deque([], maxlen=2)
self.points_gained = False
msg = ("ball_down_skip should be bigger equal 9 for "
"include_direction_info to work correctly")
assert not self.include_direction_info or ball_down_skip >= 9, msg
def step(self, ac):
ob, rew, done, info = self.env.step(ac)
if BreakoutWrapper.find_ball(ob) is None and self.ball_down_skip != 0:
for _ in range(self.ball_down_skip):
# We assume that nothing interesting happens during ball_down_skip
# and discard all information.
# We fire all the time to start new game
ob, _, _, _ = self.env.step(BreakoutWrapper.FIRE_ACTION)
self.direction_info.append(BreakoutWrapper.find_ball(ob))
ob = self.process_observation(ob)
self.points_gained = self.points_gained or rew > 0
if self.reward_clipping:
rew = np.sign(rew)
return ob, rew, done, info
def reset(self, **kwargs):
observation = super(BreakoutWrapper, self).reset(**kwargs)
self.env.step(BreakoutWrapper.FIRE_ACTION)
self.direction_info = deque([], maxlen=2)
observation = self.process_observation(observation)
return observation
@staticmethod
def find_ball(ob, default=None):
off_x = 63
clipped_ob = ob[off_x:-21, :, 0]
pos = np.argwhere(clipped_ob == 200)
if not pos.size:
return default
x = off_x + pos[0][0]
y = 0 + pos[0][1]
return x, y
def process_observation(self, obs):
if self.big_ball:
pos = BreakoutWrapper.find_ball(obs)
if pos is not None:
x, y = pos
obs[x-5:x+5, y-5:y+5, :] = 255
if self.include_direction_info:
for point in list(self.direction_info):
if point is not None:
x, y = point
obs[x-2:x+2, y-2:y+2, 1] = 255
return obs
def wrapped_breakout_factory(warm_up_examples=0,
ball_down_skip=0,
big_ball=False,
include_direction_info=False,
reward_clipping=True):
"""Wrapped breakout games."""
env = gym.make("BreakoutDeterministic-v4")
env = env.env # Remove time_limit wrapper.
env = BreakoutWrapper(env, warm_up_examples=warm_up_examples,
ball_down_skip=ball_down_skip,
big_ball=big_ball,
include_direction_info=include_direction_info,
reward_clipping=reward_clipping)
return env
gym.envs.register(id="T2TBreakoutWarmUp20RewSkip500Steps-v1",
entry_point=lambda: wrapped_breakout_factory( # pylint: disable=g-long-lambda
warm_up_examples=1,
ball_down_skip=9,
big_ball=False,
include_direction_info=True,
reward_clipping=True
),
max_episode_steps=500)
class FreewayWrapper(WarmupWrapper):
"""Wrapper for Freeway."""
def __init__(self, env,
warm_up_examples=0,
reward_clipping=True,
easy_freeway=False):
super(FreewayWrapper, self).__init__(env, warm_up_examples)
self.easy_freeway = easy_freeway
self.half_way_reward = 1.0
# this is probably not needed, just in case
self.reward_clipping = reward_clipping
def chicken_height(self, image):
raise NotImplementedError()
def step(self, ac):
ob, rew, done, info = self.env.step(ac)
if self.easy_freeway:
if rew > 0:
self.half_way_reward = 1
chicken_height = self.chicken_height(ob)
if chicken_height < 105:
rew += self.half_way_reward
self.half_way_reward = 0
if self.reward_clipping:
rew = np.sign(rew)
return ob, rew, done, info
def reset(self, **kwargs):
self.half_way_reward = 1.0
observation = super(FreewayWrapper, self).reset(**kwargs)
return observation
def wrapped_freeway_factory(warm_up_examples=0,
reward_clipping=True,
easy_freeway=False):
"""Wrapped freeway games."""
env = gym.make("FreewayDeterministic-v4")
env = env.env # Remove time_limit wrapper.
env = FreewayWrapper(env, warm_up_examples=warm_up_examples,
reward_clipping=reward_clipping,
easy_freeway=easy_freeway)
return env
gym.envs.register(id="T2TFreewayWarmUp20RewSkip500Steps-v1",
entry_point=lambda: wrapped_freeway_factory( # pylint: disable=g-long-lambda
warm_up_examples=1,
reward_clipping=True,
easy_freeway=False
),
max_episode_steps=500)
def encode_image_to_png(image):
encoded = six.next(
image_utils.encode_images_as_png([image]))
return encoded
|
en
| 0.796144
|
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Utilities for openai gym. # Dependency imports Warmup wrapper. Pong Wrapper. Wrapped pong games. # Remove time_limit wrapper. # pylint: disable=g-long-lambda # pylint: disable=g-long-lambda Breakout Wrapper. # We assume that nothing interesting happens during ball_down_skip # and discard all information. # We fire all the time to start new game Wrapped breakout games. # Remove time_limit wrapper. # pylint: disable=g-long-lambda Wrapper for Freeway. # this is probably not needed, just in case Wrapped freeway games. # Remove time_limit wrapper. # pylint: disable=g-long-lambda
| 2.192693
| 2
|
Concrete.py
|
TJ-Machine-Learning-Group/LAB1-Regression
| 3
|
6628647
|
<reponame>TJ-Machine-Learning-Group/LAB1-Regression
from sklearn.metrics import mean_squared_error,mean_absolute_error,max_error
from sklearn.linear_model import LinearRegression, Lasso, Ridge,ElasticNet
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from DecisionTreeRegressorHandWrite import DecisionTreeRegressorHandWrite
from MLPHandWrite import MLPHandWrite
from LinearModelHandWrite import LinearRegressionHandWrite,LassoHandWrite,RidgeHandWrite,ElasticNetHandWrite
from RandomForestHandWrite import myRandomForest
from Data_preprocessing import Data_preprocessing
from Regression import Regression,Draw
#import numpy as np
import copy
def main(data_url):
data,target=Data_preprocessing(data_url)
#实例化回归模型
# Linear Regression
lr_skl = LinearRegression()
lr_handwriting = LinearRegressionHandWrite()
# Lasso Regression
lasso_skl = Lasso()
lasso_handwriting= LassoHandWrite()
# Ridge Regression
ridge_skl = Ridge()
ridge_handwriting=RidgeHandWrite()
#ElasticNet Regression
enet_skl=ElasticNet()
enet_handwriting=ElasticNetHandWrite()
# Decision Trees
dtr_skl_mse = DecisionTreeRegressor()
dtr_skl_fmse = DecisionTreeRegressor(criterion="friedman_mse")
dtr_skl_mae = DecisionTreeRegressor(criterion='absolute_error')
dtr_handwriting = DecisionTreeRegressorHandWrite()
# Random Forest Regressor
rfr_skl_se = RandomForestRegressor(n_estimators=300)
rfr_skl_ae = RandomForestRegressor(n_estimators=300,criterion="absolute_error")
rfr_skl_p = RandomForestRegressor(n_estimators=300,criterion="poisson")
rfr_handwriting_se = myRandomForest(random_state=2, n_estimators=10, max_features=4, max_depth=12, min_change=0.001,min_samples_leaf=1, min_samples_split=2)
rfr_handwriting_ae = myRandomForest(criterion="MAE", random_state=2, n_estimators=10, max_features=4, max_depth=12, min_change=0.001,min_samples_leaf=1, min_samples_split=2)
# Multi-Layer Perceptron
mlp_skl = MLPRegressor(hidden_layer_sizes=(100,70),max_iter=1800)
mlp_handwriting=MLPHandWrite(network_struct=(data.shape[1],9,5,1),reg_const=1)
models = [lr_skl, lasso_skl, ridge_skl,lr_handwriting,lasso_handwriting, ridge_handwriting,enet_skl,enet_handwriting,
dtr_skl_mse, dtr_skl_fmse, dtr_skl_mae, dtr_handwriting,
rfr_skl_se,rfr_skl_ae,rfr_skl_p,rfr_handwriting_se,rfr_handwriting_ae,
mlp_skl,mlp_handwriting]
names = ["Linear Regression from sklearn", "Lasso Regression from sklearn", "Ridge Regression from sklearn",
"Linear Regression writing by hand", "Lasso Regression writing by hand", "Ridge Regression writing by hand",
"ElasticNet Regression from sklearn","ElasticNet Regression writing by hand",
"Decision Tree Regressor from sklearn(squared_error)", "Decision Tree Regressor from sklearn(friedman_mse)",
"Decision Tree Regressor from sklearn(absolute_error)","Decision Tree Regressor writing by hand",
"Random Forest Regressor from sklearn(squared_error)","Random Forest Regressor from sklearn(absolute_error)",
"Random Forest Regressor from sklearn(poisson)","Random Forest Regressor written by hand(squared_error)",
"Random Forest Regressor written by hand(absolute_error)","Multi-Layer Perceptron Regressor from sklearn",
"Multi-Layer Perceptron writing by hand"]
mses = []
R2_score=[]
times=[]
import time
for i in range(len(models)):
#参数为5折验证,测试集占20%
print(names[i])
tic = time.time()
R2_score.append(Regression(models[i],data,target,splits=5,size=0.2,model_name=names[i]))
toc = time.time()
mses.append(mean_squared_error(target, models[i].predict(data)))
times.append((toc-tic)*1000)
timeLabel = copy.deepcopy(times)
timeLabel[-1] = ">1e6"
times[-1] = 0#手写的多层感知机没有训练,而是直接加载了模型(因为训练速度太慢)
# times[-1]=">1e6"
Draw(names,mses,title="MSE")
Draw(names,R2_score,title="R2_score")
Draw(names,times,title="Time/ms",labels=timeLabel)
if __name__=='__main__':
url="./Concrete_Data.xls"
main(url)
|
from sklearn.metrics import mean_squared_error,mean_absolute_error,max_error
from sklearn.linear_model import LinearRegression, Lasso, Ridge,ElasticNet
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from DecisionTreeRegressorHandWrite import DecisionTreeRegressorHandWrite
from MLPHandWrite import MLPHandWrite
from LinearModelHandWrite import LinearRegressionHandWrite,LassoHandWrite,RidgeHandWrite,ElasticNetHandWrite
from RandomForestHandWrite import myRandomForest
from Data_preprocessing import Data_preprocessing
from Regression import Regression,Draw
#import numpy as np
import copy
def main(data_url):
data,target=Data_preprocessing(data_url)
#实例化回归模型
# Linear Regression
lr_skl = LinearRegression()
lr_handwriting = LinearRegressionHandWrite()
# Lasso Regression
lasso_skl = Lasso()
lasso_handwriting= LassoHandWrite()
# Ridge Regression
ridge_skl = Ridge()
ridge_handwriting=RidgeHandWrite()
#ElasticNet Regression
enet_skl=ElasticNet()
enet_handwriting=ElasticNetHandWrite()
# Decision Trees
dtr_skl_mse = DecisionTreeRegressor()
dtr_skl_fmse = DecisionTreeRegressor(criterion="friedman_mse")
dtr_skl_mae = DecisionTreeRegressor(criterion='absolute_error')
dtr_handwriting = DecisionTreeRegressorHandWrite()
# Random Forest Regressor
rfr_skl_se = RandomForestRegressor(n_estimators=300)
rfr_skl_ae = RandomForestRegressor(n_estimators=300,criterion="absolute_error")
rfr_skl_p = RandomForestRegressor(n_estimators=300,criterion="poisson")
rfr_handwriting_se = myRandomForest(random_state=2, n_estimators=10, max_features=4, max_depth=12, min_change=0.001,min_samples_leaf=1, min_samples_split=2)
rfr_handwriting_ae = myRandomForest(criterion="MAE", random_state=2, n_estimators=10, max_features=4, max_depth=12, min_change=0.001,min_samples_leaf=1, min_samples_split=2)
# Multi-Layer Perceptron
mlp_skl = MLPRegressor(hidden_layer_sizes=(100,70),max_iter=1800)
mlp_handwriting=MLPHandWrite(network_struct=(data.shape[1],9,5,1),reg_const=1)
models = [lr_skl, lasso_skl, ridge_skl,lr_handwriting,lasso_handwriting, ridge_handwriting,enet_skl,enet_handwriting,
dtr_skl_mse, dtr_skl_fmse, dtr_skl_mae, dtr_handwriting,
rfr_skl_se,rfr_skl_ae,rfr_skl_p,rfr_handwriting_se,rfr_handwriting_ae,
mlp_skl,mlp_handwriting]
names = ["Linear Regression from sklearn", "Lasso Regression from sklearn", "Ridge Regression from sklearn",
"Linear Regression writing by hand", "Lasso Regression writing by hand", "Ridge Regression writing by hand",
"ElasticNet Regression from sklearn","ElasticNet Regression writing by hand",
"Decision Tree Regressor from sklearn(squared_error)", "Decision Tree Regressor from sklearn(friedman_mse)",
"Decision Tree Regressor from sklearn(absolute_error)","Decision Tree Regressor writing by hand",
"Random Forest Regressor from sklearn(squared_error)","Random Forest Regressor from sklearn(absolute_error)",
"Random Forest Regressor from sklearn(poisson)","Random Forest Regressor written by hand(squared_error)",
"Random Forest Regressor written by hand(absolute_error)","Multi-Layer Perceptron Regressor from sklearn",
"Multi-Layer Perceptron writing by hand"]
mses = []
R2_score=[]
times=[]
import time
for i in range(len(models)):
#参数为5折验证,测试集占20%
print(names[i])
tic = time.time()
R2_score.append(Regression(models[i],data,target,splits=5,size=0.2,model_name=names[i]))
toc = time.time()
mses.append(mean_squared_error(target, models[i].predict(data)))
times.append((toc-tic)*1000)
timeLabel = copy.deepcopy(times)
timeLabel[-1] = ">1e6"
times[-1] = 0#手写的多层感知机没有训练,而是直接加载了模型(因为训练速度太慢)
# times[-1]=">1e6"
Draw(names,mses,title="MSE")
Draw(names,R2_score,title="R2_score")
Draw(names,times,title="Time/ms",labels=timeLabel)
if __name__=='__main__':
url="./Concrete_Data.xls"
main(url)
|
zh
| 0.346736
|
#import numpy as np #实例化回归模型 # Linear Regression # Lasso Regression # Ridge Regression #ElasticNet Regression # Decision Trees # Random Forest Regressor # Multi-Layer Perceptron #参数为5折验证,测试集占20% #手写的多层感知机没有训练,而是直接加载了模型(因为训练速度太慢) # times[-1]=">1e6"
| 2.371965
| 2
|
opennem/crawlers/schema.py
|
paulculmsee/opennem
| 0
|
6628648
|
<filename>opennem/crawlers/schema.py
"""OpenNEM Crawler Definitions"""
from datetime import datetime
from enum import Enum
from typing import Callable, List, Optional
from opennem.schema.core import BaseConfig
class CrawlerPriority(Enum):
high = 1
medium = 5
low = 10
class CrawlerSchedule(Enum):
live = "1m"
frequent = "5m"
quarter_hour = "15m"
half_hour = "30m"
hourly = "1h"
daily = "1d"
class CrawlerDefinition(BaseConfig):
"""Defines a crawler"""
name: str
url: Optional[str]
limit: Optional[int]
filename_filter: Optional[str]
priority: CrawlerPriority
schedule: Optional[CrawlerSchedule]
# crawl metadata
last_crawled: Optional[datetime]
last_processed: Optional[datetime]
processor: Callable
class CrawlerSet(BaseConfig):
"""Defines a set of crawlers"""
crawlers: List[CrawlerDefinition]
def get_crawler(self, name: str) -> CrawlerDefinition:
"""Get a crawler by name"""
_crawler_lookup = list(filter(lambda x: x.name == name, self.crawlers))
if not _crawler_lookup:
raise Exception(f"Could not find crawler {name}")
return _crawler_lookup.pop()
def get_crawlers_by_match(self, match: str) -> List[CrawlerDefinition]:
"""Get crawlers by match"""
_crawler_lookup = list(filter(lambda x: match in x.name, self.crawlers))
if not _crawler_lookup:
raise Exception(f"Could not find crawler matching {match}")
return _crawler_lookup
def get_crawlers_by_schedule(self, schedule: CrawlerSchedule) -> List[CrawlerDefinition]:
return list(
sorted(
filter(lambda x: x.schedule == schedule, self.crawlers),
key=lambda x: x.priority.value,
)
)
|
<filename>opennem/crawlers/schema.py
"""OpenNEM Crawler Definitions"""
from datetime import datetime
from enum import Enum
from typing import Callable, List, Optional
from opennem.schema.core import BaseConfig
class CrawlerPriority(Enum):
high = 1
medium = 5
low = 10
class CrawlerSchedule(Enum):
live = "1m"
frequent = "5m"
quarter_hour = "15m"
half_hour = "30m"
hourly = "1h"
daily = "1d"
class CrawlerDefinition(BaseConfig):
"""Defines a crawler"""
name: str
url: Optional[str]
limit: Optional[int]
filename_filter: Optional[str]
priority: CrawlerPriority
schedule: Optional[CrawlerSchedule]
# crawl metadata
last_crawled: Optional[datetime]
last_processed: Optional[datetime]
processor: Callable
class CrawlerSet(BaseConfig):
"""Defines a set of crawlers"""
crawlers: List[CrawlerDefinition]
def get_crawler(self, name: str) -> CrawlerDefinition:
"""Get a crawler by name"""
_crawler_lookup = list(filter(lambda x: x.name == name, self.crawlers))
if not _crawler_lookup:
raise Exception(f"Could not find crawler {name}")
return _crawler_lookup.pop()
def get_crawlers_by_match(self, match: str) -> List[CrawlerDefinition]:
"""Get crawlers by match"""
_crawler_lookup = list(filter(lambda x: match in x.name, self.crawlers))
if not _crawler_lookup:
raise Exception(f"Could not find crawler matching {match}")
return _crawler_lookup
def get_crawlers_by_schedule(self, schedule: CrawlerSchedule) -> List[CrawlerDefinition]:
return list(
sorted(
filter(lambda x: x.schedule == schedule, self.crawlers),
key=lambda x: x.priority.value,
)
)
|
en
| 0.776938
|
OpenNEM Crawler Definitions Defines a crawler # crawl metadata Defines a set of crawlers Get a crawler by name Get crawlers by match
| 2.749972
| 3
|
pycomicvine/tests/powers.py
|
jbbandos/pycomicvine
| 12
|
6628649
|
import pycomicvine
import datetime
from pycomicvine.tests.utils import *
pycomicvine.api_key = "476302e62d7e8f8f140182e36aebff2fe935514b"
class TestPowersList(ListResourceTestCase):
def test_get_id_and_name(self):
self.get_id_and_name_test(
pycomicvine.Powers,
pycomicvine.Power
)
class TestPowerAttributes(SingularResourceTestCase):
def setUp(self):
self.get_random_instance(pycomicvine.Powers)
def test_get_all_attributes(self):
power = self.get_sample(pycomicvine.Power)
if power != None:
self.assertIsInstance(
power.aliases,
(type(None),list)
)
self.assertIsInstance(
power.api_detail_url,
(type(None),str)
)
self.assertIsInstance(
power.characters,
pycomicvine.Characters
)
self.assertIsInstance(
power.date_added,
datetime.datetime
)
self.assertIsInstance(
power.date_last_updated,
datetime.datetime
)
self.assertIsInstance(
power.description,
(type(None),str)
)
self.assertIsInstance(
power.id,
int
)
self.assertIsInstance(
power.name,
(type(None),str)
)
self.assertIsInstance(
power.site_detail_url,
(type(None),str)
)
|
import pycomicvine
import datetime
from pycomicvine.tests.utils import *
pycomicvine.api_key = "476302e62d7e8f8f140182e36aebff2fe935514b"
class TestPowersList(ListResourceTestCase):
def test_get_id_and_name(self):
self.get_id_and_name_test(
pycomicvine.Powers,
pycomicvine.Power
)
class TestPowerAttributes(SingularResourceTestCase):
def setUp(self):
self.get_random_instance(pycomicvine.Powers)
def test_get_all_attributes(self):
power = self.get_sample(pycomicvine.Power)
if power != None:
self.assertIsInstance(
power.aliases,
(type(None),list)
)
self.assertIsInstance(
power.api_detail_url,
(type(None),str)
)
self.assertIsInstance(
power.characters,
pycomicvine.Characters
)
self.assertIsInstance(
power.date_added,
datetime.datetime
)
self.assertIsInstance(
power.date_last_updated,
datetime.datetime
)
self.assertIsInstance(
power.description,
(type(None),str)
)
self.assertIsInstance(
power.id,
int
)
self.assertIsInstance(
power.name,
(type(None),str)
)
self.assertIsInstance(
power.site_detail_url,
(type(None),str)
)
|
none
| 1
| 2.117638
| 2
|
|
recording/utils/common_utils.py
|
chrelli/3DDD_social_mouse_tracker
| 1
|
6628650
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 30 15:54:04 2018
@author: chrelli
"""
#%% Import the nescessary stuff
# basic OS stuff
import time, os, sys, shutil
# for math and plotting
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
# small utilities
import csv
from colour import Color
from itertools import compress # for list selection with logical
from tqdm import tqdm
# for image manipulation
import cv2
# for recording and connecting to the intel realsense librar
#import pyrealsense as pyrs
#import multiprocessing
import multiprocessing
from multiprocessing import Process
#%% Small handy functions for folders
def print_c_cores():
number_of_cpus = multiprocessing.cpu_count()
print('This machine has '+str(number_of_cpus)+' available cpu cores.')
def check_folder_if_present(this_path):
if os.path.isdir(this_path):
print(this_path+' was detected!')
else:
print('ERROR: ' +this_path+' was not detected!')
sys.exit(1)
def reset_folder_if_present(this_path):
if os.path.isdir(this_path):
shutil.rmtree(this_path)
time.sleep(0.1)
print(this_path+' was deleted!')
os.mkdir(this_path)
#%% firmata stuff
# get the serial port of the arduino for firmata
def get_serial_port():
ser_devs = [dev for dev in os.listdir('/dev') if dev.startswith('ttyAC')]
if len(ser_devs) > 0:
return '/dev/'+ser_devs[0]
return None
#%% plotting tools
def bare_plot3(a,b,c,mark="o",col="r"):
# very simple plot3 version
from matplotlib import pyplot
import pylab
from mpl_toolkits.mplot3d import Axes3D
pylab.ion()
fig = pylab.figure()
ax = Axes3D(fig)
ax.scatter(a, b, c,marker=mark,color=col)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
#%% Functions for loading cad images and parameters
#TODO resolve is there is any conflict here!!
def read_hsv_file(which_device,top_folder): # kill this one??
# MAKE A FULLFILE
# reads the hsv and gray values after filering
this_name = top_folder+'/hsv_values_'+str(which_device)+'.csv'
if os.path.exists(this_name):
raw_values = np.genfromtxt(this_name, delimiter=',',dtype='int' )
print(raw_values)
hsv_values = raw_values[0:6]
gray_values = raw_values[6:8]
else:
print('ERROR: '+ this_name+' not found!')
sys.exit(0)
return hsv_values,gray_values
def check_for_hsv_file(which_device,top_folder):
# these are the default values
hsv_values = np.array([0,179,0,255,0,255])
gray_values = np.array([1,1])
# these are the some guess values, good starting point
hsv_values = np.array([0,1,0,255,117,255])
gray_values = np.array([200,11])
this_name = top_folder+'/hsv_values_'+str(which_device)+'.csv'
if os.path.exists(this_name):
raw_values = np.genfromtxt(this_name, delimiter=',',dtype='int' )
print(raw_values)
if raw_values.size > 0:
# only get from text if it is not empty by mistake
hsv_values = raw_values[0:6]
gray_values = raw_values[6:8]
return hsv_values, gray_values
def read_cam_params(which_device,top_folder):
# reads the camera parameters of that camera
this_name = top_folder+'/parameters_'+str(which_device)+'.csv'
if os.path.exists(this_name):
raw_list = np.genfromtxt(this_name, delimiter=',')[1,:]
cam_params = raw_list
fps_choice,frame_width,frame_height = raw_list[5],raw_list[6],raw_list[7]
else:
print('ERROR: '+ this_name+' not found!')
sys.exit(0)
return cam_params
def get_file_shortlist(which_device,top_folder,image_type):
# TODO check if the padding digits have overflown!!!! if yes, do proper sorting by number!
# list of files in the folder, specific to images!
file_list = os.listdir(top_folder)
# sort the list
file_list.sort()
file_logic = np.empty(len(file_list))
for num,name in enumerate(file_list):
file_logic[num]=name.startswith('dev'+str(which_device)+'_'+image_type+'_')
short_list = list(compress(file_list,file_logic))
return short_list
def load_data(which_device,top_folder):
raw_data = np.genfromtxt(top_folder+'/central_point_'+str(which_device)+'.csv', delimiter=',')
frame,x,y,z,r = raw_data[:,0],raw_data[:,1],raw_data[:,2],raw_data[:,3],raw_data[:,4]
frame,x,y,z,r = clean_by_r(frame,x,y,z,r)
x,y,z = add_ping_pong_radius(x,y,z,r)
return x,y,z
#%% Functions to do filtering of image masks
def mask_stepper(c,hsv_values,gray_values,fgmask):
# takes the a cad in BGR as an input and returns the mask after filtering
HSVLOW = hsv_values[[0,2,4]]
HSVHIGH = hsv_values[[1,3,5]]
gcut, blur_amount = gray_values[0],gray_values[1]
if blur_amount % 2 == 0: # make sure it's odd
blur_amount += 1
res = cv2.bitwise_and(c, c, mask=fgmask)
#convert to HSV from BGR
hsv = cv2.cvtColor(res, cv2.COLOR_BGR2HSV)
#apply the range on a mask
mask = cv2.inRange(hsv,HSVLOW, HSVHIGH)
# and blur
mask = cv2.GaussianBlur(mask, (blur_amount, blur_amount), 0)
# for some reason the gcut has to be a float, bot an int - cv2 bug?
mask = cv2.inRange(mask,gcut.astype('float64'), 255)
return mask
# get the largest region in the image, and fill it!
def fill_largest_region(image_input):
# Find the largest contour and fill it
im, contours, hierarchy = cv2.findContours(image_input,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE )
maxContour = 0
maxContourData = 0
# loop over the contours and get the size, and the max
for contour in contours:
contourSize = cv2.contourArea(contour)
if contourSize > maxContour:
maxContour = contourSize
maxContourData = contour
# Create a mask from the largest contour
mask = np.zeros_like(image_input)
cv2.fillPoly(mask,[maxContourData],1)
return mask
#%% small fucntions to do gymnastics with the point clouds
def clean_by_pd(pi,pj,pd):
# function which cleans the data of instances where the depth is zero
clean_index = np.where(pd > 0)
pi,pj,pd = pi[clean_index],pj[clean_index],pd[clean_index]
return pi,pj,pd
# conver the pi,pj,pd = pixel_i,pixel_j,pixel_depth to xyz
def pixel_2_world(pi,pj,dij,cam_params):
# takes the pi pj pd as vectors
# the cam params are fx,fx,ppx,ppy,d_scale,fps_choice,frame_width,frame_height
# to calculate in mm, multiply with the depth scale
# WAIT this is not in mm at all - this is in meters!
fx,fy,ppx,ppy,depth_scale,frame_width,frame_height = cam_params[0],cam_params[1],cam_params[2],cam_params[3],cam_params[4],cam_params[6],cam_params[7]
z_m = dij*depth_scale
# and now use pinhole cam function to get the x and y
# remember the half is positive because of python!
# x_m = (pj + .5 - ppx) * z_m / fx
# y_m = (pi + .5 - ppy) * z_m / fy
x_m = (pj - ppx) * z_m / fx
y_m = (pi - ppy) * z_m / fy
return x_m,y_m,z_m
def world_2_range(x_m,y_m,z_m):
# calculates the range from the x,y,z values
r_m = np.linalg.norm([x_m,y_m,z_m],axis=0)
return r_m
#%%some helper functions for handling calibration traces
def clean_by_r(frame,x,y,z,r):
index_vector = np.where((r>0.5)*(r < 1.5))
return frame[index_vector],x[index_vector],y[index_vector],z[index_vector],r[index_vector]
def add_ping_pong_radius(x,y,z,r):
radius = 0.02 # m
points = np.vstack((x,y,z))
# rescale all the poins where r>0
points[:,r>0] = points[:,r>0]*( (1+radius/r[r>0]) )
x,y,z = points[0,:],points[1,:],points[2,:]
return x,y,z
def load_central_point(which_device,top_folder):
raw_data = np.genfromtxt(top_folder+'/central_point_'+str(which_device)+'.csv', delimiter=',')
frame,x,y,z,r = raw_data[:,0],raw_data[:,1],raw_data[:,2],raw_data[:,3],raw_data[:,4]
frame,x,y,z,r = clean_by_r(frame,x,y,z,r)
x,y,z = add_ping_pong_radius(x,y,z,r)
return x,y,z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 30 15:54:04 2018
@author: chrelli
"""
#%% Import the nescessary stuff
# basic OS stuff
import time, os, sys, shutil
# for math and plotting
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
# small utilities
import csv
from colour import Color
from itertools import compress # for list selection with logical
from tqdm import tqdm
# for image manipulation
import cv2
# for recording and connecting to the intel realsense librar
#import pyrealsense as pyrs
#import multiprocessing
import multiprocessing
from multiprocessing import Process
#%% Small handy functions for folders
def print_c_cores():
number_of_cpus = multiprocessing.cpu_count()
print('This machine has '+str(number_of_cpus)+' available cpu cores.')
def check_folder_if_present(this_path):
if os.path.isdir(this_path):
print(this_path+' was detected!')
else:
print('ERROR: ' +this_path+' was not detected!')
sys.exit(1)
def reset_folder_if_present(this_path):
if os.path.isdir(this_path):
shutil.rmtree(this_path)
time.sleep(0.1)
print(this_path+' was deleted!')
os.mkdir(this_path)
#%% firmata stuff
# get the serial port of the arduino for firmata
def get_serial_port():
ser_devs = [dev for dev in os.listdir('/dev') if dev.startswith('ttyAC')]
if len(ser_devs) > 0:
return '/dev/'+ser_devs[0]
return None
#%% plotting tools
def bare_plot3(a,b,c,mark="o",col="r"):
# very simple plot3 version
from matplotlib import pyplot
import pylab
from mpl_toolkits.mplot3d import Axes3D
pylab.ion()
fig = pylab.figure()
ax = Axes3D(fig)
ax.scatter(a, b, c,marker=mark,color=col)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
#%% Functions for loading cad images and parameters
#TODO resolve is there is any conflict here!!
def read_hsv_file(which_device,top_folder): # kill this one??
# MAKE A FULLFILE
# reads the hsv and gray values after filering
this_name = top_folder+'/hsv_values_'+str(which_device)+'.csv'
if os.path.exists(this_name):
raw_values = np.genfromtxt(this_name, delimiter=',',dtype='int' )
print(raw_values)
hsv_values = raw_values[0:6]
gray_values = raw_values[6:8]
else:
print('ERROR: '+ this_name+' not found!')
sys.exit(0)
return hsv_values,gray_values
def check_for_hsv_file(which_device,top_folder):
# these are the default values
hsv_values = np.array([0,179,0,255,0,255])
gray_values = np.array([1,1])
# these are the some guess values, good starting point
hsv_values = np.array([0,1,0,255,117,255])
gray_values = np.array([200,11])
this_name = top_folder+'/hsv_values_'+str(which_device)+'.csv'
if os.path.exists(this_name):
raw_values = np.genfromtxt(this_name, delimiter=',',dtype='int' )
print(raw_values)
if raw_values.size > 0:
# only get from text if it is not empty by mistake
hsv_values = raw_values[0:6]
gray_values = raw_values[6:8]
return hsv_values, gray_values
def read_cam_params(which_device,top_folder):
# reads the camera parameters of that camera
this_name = top_folder+'/parameters_'+str(which_device)+'.csv'
if os.path.exists(this_name):
raw_list = np.genfromtxt(this_name, delimiter=',')[1,:]
cam_params = raw_list
fps_choice,frame_width,frame_height = raw_list[5],raw_list[6],raw_list[7]
else:
print('ERROR: '+ this_name+' not found!')
sys.exit(0)
return cam_params
def get_file_shortlist(which_device,top_folder,image_type):
# TODO check if the padding digits have overflown!!!! if yes, do proper sorting by number!
# list of files in the folder, specific to images!
file_list = os.listdir(top_folder)
# sort the list
file_list.sort()
file_logic = np.empty(len(file_list))
for num,name in enumerate(file_list):
file_logic[num]=name.startswith('dev'+str(which_device)+'_'+image_type+'_')
short_list = list(compress(file_list,file_logic))
return short_list
def load_data(which_device,top_folder):
raw_data = np.genfromtxt(top_folder+'/central_point_'+str(which_device)+'.csv', delimiter=',')
frame,x,y,z,r = raw_data[:,0],raw_data[:,1],raw_data[:,2],raw_data[:,3],raw_data[:,4]
frame,x,y,z,r = clean_by_r(frame,x,y,z,r)
x,y,z = add_ping_pong_radius(x,y,z,r)
return x,y,z
#%% Functions to do filtering of image masks
def mask_stepper(c,hsv_values,gray_values,fgmask):
# takes the a cad in BGR as an input and returns the mask after filtering
HSVLOW = hsv_values[[0,2,4]]
HSVHIGH = hsv_values[[1,3,5]]
gcut, blur_amount = gray_values[0],gray_values[1]
if blur_amount % 2 == 0: # make sure it's odd
blur_amount += 1
res = cv2.bitwise_and(c, c, mask=fgmask)
#convert to HSV from BGR
hsv = cv2.cvtColor(res, cv2.COLOR_BGR2HSV)
#apply the range on a mask
mask = cv2.inRange(hsv,HSVLOW, HSVHIGH)
# and blur
mask = cv2.GaussianBlur(mask, (blur_amount, blur_amount), 0)
# for some reason the gcut has to be a float, bot an int - cv2 bug?
mask = cv2.inRange(mask,gcut.astype('float64'), 255)
return mask
# get the largest region in the image, and fill it!
def fill_largest_region(image_input):
# Find the largest contour and fill it
im, contours, hierarchy = cv2.findContours(image_input,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE )
maxContour = 0
maxContourData = 0
# loop over the contours and get the size, and the max
for contour in contours:
contourSize = cv2.contourArea(contour)
if contourSize > maxContour:
maxContour = contourSize
maxContourData = contour
# Create a mask from the largest contour
mask = np.zeros_like(image_input)
cv2.fillPoly(mask,[maxContourData],1)
return mask
#%% small fucntions to do gymnastics with the point clouds
def clean_by_pd(pi,pj,pd):
# function which cleans the data of instances where the depth is zero
clean_index = np.where(pd > 0)
pi,pj,pd = pi[clean_index],pj[clean_index],pd[clean_index]
return pi,pj,pd
# conver the pi,pj,pd = pixel_i,pixel_j,pixel_depth to xyz
def pixel_2_world(pi,pj,dij,cam_params):
# takes the pi pj pd as vectors
# the cam params are fx,fx,ppx,ppy,d_scale,fps_choice,frame_width,frame_height
# to calculate in mm, multiply with the depth scale
# WAIT this is not in mm at all - this is in meters!
fx,fy,ppx,ppy,depth_scale,frame_width,frame_height = cam_params[0],cam_params[1],cam_params[2],cam_params[3],cam_params[4],cam_params[6],cam_params[7]
z_m = dij*depth_scale
# and now use pinhole cam function to get the x and y
# remember the half is positive because of python!
# x_m = (pj + .5 - ppx) * z_m / fx
# y_m = (pi + .5 - ppy) * z_m / fy
x_m = (pj - ppx) * z_m / fx
y_m = (pi - ppy) * z_m / fy
return x_m,y_m,z_m
def world_2_range(x_m,y_m,z_m):
# calculates the range from the x,y,z values
r_m = np.linalg.norm([x_m,y_m,z_m],axis=0)
return r_m
#%%some helper functions for handling calibration traces
def clean_by_r(frame,x,y,z,r):
index_vector = np.where((r>0.5)*(r < 1.5))
return frame[index_vector],x[index_vector],y[index_vector],z[index_vector],r[index_vector]
def add_ping_pong_radius(x,y,z,r):
radius = 0.02 # m
points = np.vstack((x,y,z))
# rescale all the poins where r>0
points[:,r>0] = points[:,r>0]*( (1+radius/r[r>0]) )
x,y,z = points[0,:],points[1,:],points[2,:]
return x,y,z
def load_central_point(which_device,top_folder):
raw_data = np.genfromtxt(top_folder+'/central_point_'+str(which_device)+'.csv', delimiter=',')
frame,x,y,z,r = raw_data[:,0],raw_data[:,1],raw_data[:,2],raw_data[:,3],raw_data[:,4]
frame,x,y,z,r = clean_by_r(frame,x,y,z,r)
x,y,z = add_ping_pong_radius(x,y,z,r)
return x,y,z
|
en
| 0.789842
|
#!/usr/bin/env python2 # -*- coding: utf-8 -*- Created on Tue Jan 30 15:54:04 2018 @author: chrelli #%% Import the nescessary stuff # basic OS stuff # for math and plotting # small utilities # for list selection with logical # for image manipulation # for recording and connecting to the intel realsense librar #import pyrealsense as pyrs #import multiprocessing #%% Small handy functions for folders #%% firmata stuff # get the serial port of the arduino for firmata #%% plotting tools # very simple plot3 version #%% Functions for loading cad images and parameters #TODO resolve is there is any conflict here!! # kill this one?? # MAKE A FULLFILE # reads the hsv and gray values after filering # these are the default values # these are the some guess values, good starting point # only get from text if it is not empty by mistake # reads the camera parameters of that camera # TODO check if the padding digits have overflown!!!! if yes, do proper sorting by number! # list of files in the folder, specific to images! # sort the list #%% Functions to do filtering of image masks # takes the a cad in BGR as an input and returns the mask after filtering # make sure it's odd #convert to HSV from BGR #apply the range on a mask # and blur # for some reason the gcut has to be a float, bot an int - cv2 bug? # get the largest region in the image, and fill it! # Find the largest contour and fill it # loop over the contours and get the size, and the max # Create a mask from the largest contour #%% small fucntions to do gymnastics with the point clouds # function which cleans the data of instances where the depth is zero # conver the pi,pj,pd = pixel_i,pixel_j,pixel_depth to xyz # takes the pi pj pd as vectors # the cam params are fx,fx,ppx,ppy,d_scale,fps_choice,frame_width,frame_height # to calculate in mm, multiply with the depth scale # WAIT this is not in mm at all - this is in meters! # and now use pinhole cam function to get the x and y # remember the half is positive because of python! # x_m = (pj + .5 - ppx) * z_m / fx # y_m = (pi + .5 - ppy) * z_m / fy # calculates the range from the x,y,z values #%%some helper functions for handling calibration traces # m # rescale all the poins where r>0
| 2.352435
| 2
|