hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c339ba67336286373b0d53c302814b598d69ff3 | 199 | py | Python | src/dgl_gcn/gcn/__init__.py | zawaki/nara_revision | 28bb42f7ca3a768075748d258c405addc7b28c31 | [
"MIT"
] | null | null | null | src/dgl_gcn/gcn/__init__.py | zawaki/nara_revision | 28bb42f7ca3a768075748d258c405addc7b28c31 | [
"MIT"
] | null | null | null | src/dgl_gcn/gcn/__init__.py | zawaki/nara_revision | 28bb42f7ca3a768075748d258c405addc7b28c31 | [
"MIT"
] | null | null | null | from gcn.aggregators import *
from gcn.model import *
from gcn.supervised_train import *
from gcn.unsupervised_train import *
from gcn.random_walk_train import *
from gcn.tensorboard_writer import *
| 28.428571 | 36 | 0.819095 | from gcn.aggregators import *
from gcn.model import *
from gcn.supervised_train import *
from gcn.unsupervised_train import *
from gcn.random_walk_train import *
from gcn.tensorboard_writer import *
| true | true |
1c339c111d61c125ec834d29e84559a73518fcde | 8,544 | py | Python | day-28-pomodoro-and-dammits/dammits.py | jskolnicki/100-Days-of-Python | 146af2b73914a525121f1c91737abd4857dc2f89 | [
"CNRI-Python"
] | null | null | null | day-28-pomodoro-and-dammits/dammits.py | jskolnicki/100-Days-of-Python | 146af2b73914a525121f1c91737abd4857dc2f89 | [
"CNRI-Python"
] | null | null | null | day-28-pomodoro-and-dammits/dammits.py | jskolnicki/100-Days-of-Python | 146af2b73914a525121f1c91737abd4857dc2f89 | [
"CNRI-Python"
] | null | null | null | import tkinter
import os
import pandas as pd
import datetime
import csv
os.chdir(os.path.dirname(__file__))
window = tkinter.Tk()
window.title("Dammit Counter")
#variables
dammits_db = pd.read_csv("dammits.csv")
dammits_db['Week'] = pd.to_datetime(dammits_db['Week'])
today = datetime.date.today()
today = today + datetime.timedelta(days= 2)
start_of_week = pd.to_datetime(dammits_db['Week'].to_list()[-1])
current_week_index = int(dammits_db['Week'][dammits_db['Week'] == start_of_week].index[0])
num_of_yikes = dammits_db.iloc[current_week_index, 1]
num_of_dammits = dammits_db.iloc[current_week_index, 2]
#FUNCTIONS
def increase_dammits():
global current_week_index, dammits_db
with open("dammits.csv") as f:
reader = csv.reader(f)
data = list(reader)
data[current_week_index + 1][2] = int(data[current_week_index + 1][2]) + 1
with open("dammits.csv", "w", newline = "") as f:
a = csv.writer(f)
for row in data:
a.writerow(row)
dammits_db = pd.read_csv("dammits.csv")
dammits_db['Week'] = pd.to_datetime(dammits_db['Week'])
update_board()
def decrease_dammits():
global current_week_index, dammits_db
with open("dammits.csv") as f:
reader = csv.reader(f)
data = list(reader)
data[current_week_index + 1][2] = int(data[current_week_index + 1][2]) - 1
with open("dammits.csv", "w", newline = "") as f:
a = csv.writer(f)
for row in data:
a.writerow(row)
dammits_db = pd.read_csv("dammits.csv")
dammits_db['Week'] = pd.to_datetime(dammits_db['Week'])
update_board()
def increase_yikes():
global current_week_index, dammits_db
with open("dammits.csv") as f:
reader = csv.reader(f)
data = list(reader)
data[current_week_index + 1][1] = int(data[current_week_index + 1][1]) + 1
with open("dammits.csv", "w", newline = "") as f:
a = csv.writer(f)
for row in data:
a.writerow(row)
dammits_db = pd.read_csv("dammits.csv")
dammits_db['Week'] = pd.to_datetime(dammits_db['Week'])
update_board()
def decrease_yikes():
global current_week_index, dammits_db
with open("dammits.csv") as f:
reader = csv.reader(f)
data = list(reader)
data[current_week_index + 1][1] = int(data[current_week_index + 1][1]) - 1
with open("dammits.csv", "w", newline = "") as f:
a = csv.writer(f)
for row in data:
a.writerow(row)
dammits_db = pd.read_csv("dammits.csv")
dammits_db['Week'] = pd.to_datetime(dammits_db['Week'])
update_board()
def update_board():
global current_week_index
num_of_yikes = dammits_db.iloc[current_week_index, 1]
num_of_yikes_label.config(text=f"{num_of_yikes}")
num_of_dammits = dammits_db.iloc[current_week_index, 2]
num_of_dammits_label.config(text=f"{num_of_dammits}")
week_of_label.config(text=f"{dammits_db.iloc[current_week_index,0].strftime('%m/%d/%Y')} - {(dammits_db['Week'][current_week_index] + datetime.timedelta(days=6)).strftime('%m/%d/%Y')}")
if current_week_index == 0:
previous_week_button.grid_remove()
elif (current_week_index == len(dammits_db['Week'])-1) and (pd.Timestamp(today - datetime.timedelta(days= 7)) < dammits_db['Week'].to_list()[-1]):
next_week_button.grid_remove()
else:
previous_week_button.grid()
next_week_button.grid()
print(f"Current week index: {current_week_index}")
print(f"Total number of weeks: {len(dammits_db['Week'])-1}")
def next_week():
global current_week_index, num_of_dammits, num_of_dammits_label, num_of_yikes, num_of_yikes_label, start_of_week, week_of_label, dammits_db
print("")
print(f"current week index: {current_week_index}")
print(f"start_of_week: {start_of_week}")
print("")
if current_week_index < len(dammits_db['Week'])-1:
current_week_index += 1
num_of_yikes = dammits_db.iloc[current_week_index, 1]
num_of_dammits = dammits_db.iloc[current_week_index, 2]
update_board()
elif pd.Timestamp(today - datetime.timedelta(days= 7)) >= dammits_db['Week'].to_list()[-1]:
with open('dammits.csv', 'a', newline = "") as file:
writer_object = csv.writer(file)
date_to_append = (pd.to_datetime(today + datetime.timedelta(days=-today.weekday())).strftime('%Y-%m-%d'))
writer_object.writerow([date_to_append,0,0])
file.close()
dammits_db = pd.read_csv("dammits.csv")
dammits_db['Week'] = pd.to_datetime(dammits_db['Week'])
current_week_index += 1
num_of_yikes = dammits_db.iloc[current_week_index, 1]
num_of_dammits = dammits_db.iloc[current_week_index, 2]
update_board()
# num_of_yikes = dammits_db.iloc[current_week_index, 1]
# num_of_yikes_label.config(text=f"{num_of_yikes}")
# num_of_dammits = dammits_db.iloc[current_week_index, 2]
# num_of_dammits_label.config(text=f"{num_of_dammits}")
# week_of_label.config(text=f"{dammits_db.iloc[current_week_index,0].strftime('%m/%d/%Y')} - {(dammits_db['Week'][current_week_index] + datetime.timedelta(days=6)).strftime('%m/%d/%Y')}")
def previous_week():
global current_week_index, num_of_dammits, num_of_yikes, num_of_yikes_label, num_of_dammits_label
if current_week_index > 0:
current_week_index -= 1
num_of_yikes = dammits_db.iloc[current_week_index, 1]
num_of_dammits = dammits_db.iloc[current_week_index, 2]
update_board()
# num_of_yikes = dammits_db.iloc[current_week_index, 1]
# num_of_yikes_label.config(text=f"{num_of_yikes}")
# num_of_dammits = dammits_db.iloc[current_week_index, 2]
# num_of_dammits_label.config(text=f"{num_of_dammits}")
# week_of_label.config(text=f"{dammits_db.iloc[current_week_index,0].strftime('%m/%d/%Y')} - {(dammits_db['Week'][current_week_index] + datetime.timedelta(days=6)).strftime('%m/%d/%Y')}")
#print(f"Test: {pd.to_datetime(start_of_week + datetime.timedelta(days=6)).strftime(('%m/%d/%Y'))}")
print(f"Test: {(start_of_week + datetime.timedelta(days=6)).strftime('%m/%d/%Y')}")
#WEEKLY ROW
previous_week_button = tkinter.Button(text="⟵", width= 11, command= previous_week)
previous_week_button.grid(column=0, row=0)
week_of_label = tkinter.Label(text=f"{start_of_week.strftime('%m/%d/%Y')} - {(start_of_week + datetime.timedelta(days=6)).strftime('%m/%d/%Y')}", font=('Arial', 18,'bold'))
week_of_label.config(padx=40, pady=50)
week_of_label.grid(column=1, row=0)
next_week_button = tkinter.Button(text="⟶", width= 11, command= next_week)
next_week_button.grid(column=2, row=0)
if pd.Timestamp(today - datetime.timedelta(days= 7)) < dammits_db['Week'].to_list()[-1]:
next_week_button.grid_remove()
#DAMMITS ROW
decrease_dammits_button = tkinter.Button(text="-", width= 5, command= decrease_dammits)
decrease_dammits_button.grid(column=0, row=1)
dammits_label = tkinter.Label(text=f"DAMMITS", font=('Arial', 35,'normal'))
dammits_label.config(pady=30)
dammits_label.grid(column= 1, row=1)
increase_dammits_button = tkinter.Button(text="+", width= 5, command= increase_dammits)
increase_dammits_button.grid(column=2, row=1)
num_of_dammits_label = tkinter.Label(text=f"{num_of_dammits}", font=('Arial', 35,'normal'))
num_of_dammits_label.config(padx=20)
num_of_dammits_label.grid(column= 3, row= 1)
#YIKES ROW
decrease_yikes_button = tkinter.Button(text="-", width= 5, command= decrease_yikes)
decrease_yikes_button.grid(column=0, row=2)
# canvas = tkinter.Canvas(width=400, height=128, highlightthickness=0)
# yikes_label = tkinter.PhotoImage(file="yikes.png")
# canvas.create_image(258/2+200,64,image=yikes_label)
# canvas.grid(columns=2,rows=3)
yikes_label = tkinter.Label(text=f"YIKES", font=('Arial', 35,'normal'))
yikes_label.config(pady=30)
yikes_label.grid(column= 1, row=2)
increase_yikes_button = tkinter.Button(text="+", width= 5, command= increase_yikes)
increase_yikes_button.grid(column=2, row=2)
num_of_yikes_label = tkinter.Label(text=f"{num_of_yikes}", font=('Arial', 35,'normal'))
num_of_yikes_label.config(padx=20)
num_of_yikes_label.grid(column=3,row=2)
window.mainloop()
#TODO
# fix this bug where when today is far out, it still toggles correctly.. do i want to add each week until I get there or skip the csv to the current week? probably skip to current week to start
#
#
#
#
# | 37.473684 | 199 | 0.684457 | import tkinter
import os
import pandas as pd
import datetime
import csv
os.chdir(os.path.dirname(__file__))
window = tkinter.Tk()
window.title("Dammit Counter")
dammits_db = pd.read_csv("dammits.csv")
dammits_db['Week'] = pd.to_datetime(dammits_db['Week'])
today = datetime.date.today()
today = today + datetime.timedelta(days= 2)
start_of_week = pd.to_datetime(dammits_db['Week'].to_list()[-1])
current_week_index = int(dammits_db['Week'][dammits_db['Week'] == start_of_week].index[0])
num_of_yikes = dammits_db.iloc[current_week_index, 1]
num_of_dammits = dammits_db.iloc[current_week_index, 2]
def increase_dammits():
global current_week_index, dammits_db
with open("dammits.csv") as f:
reader = csv.reader(f)
data = list(reader)
data[current_week_index + 1][2] = int(data[current_week_index + 1][2]) + 1
with open("dammits.csv", "w", newline = "") as f:
a = csv.writer(f)
for row in data:
a.writerow(row)
dammits_db = pd.read_csv("dammits.csv")
dammits_db['Week'] = pd.to_datetime(dammits_db['Week'])
update_board()
def decrease_dammits():
global current_week_index, dammits_db
with open("dammits.csv") as f:
reader = csv.reader(f)
data = list(reader)
data[current_week_index + 1][2] = int(data[current_week_index + 1][2]) - 1
with open("dammits.csv", "w", newline = "") as f:
a = csv.writer(f)
for row in data:
a.writerow(row)
dammits_db = pd.read_csv("dammits.csv")
dammits_db['Week'] = pd.to_datetime(dammits_db['Week'])
update_board()
def increase_yikes():
global current_week_index, dammits_db
with open("dammits.csv") as f:
reader = csv.reader(f)
data = list(reader)
data[current_week_index + 1][1] = int(data[current_week_index + 1][1]) + 1
with open("dammits.csv", "w", newline = "") as f:
a = csv.writer(f)
for row in data:
a.writerow(row)
dammits_db = pd.read_csv("dammits.csv")
dammits_db['Week'] = pd.to_datetime(dammits_db['Week'])
update_board()
def decrease_yikes():
global current_week_index, dammits_db
with open("dammits.csv") as f:
reader = csv.reader(f)
data = list(reader)
data[current_week_index + 1][1] = int(data[current_week_index + 1][1]) - 1
with open("dammits.csv", "w", newline = "") as f:
a = csv.writer(f)
for row in data:
a.writerow(row)
dammits_db = pd.read_csv("dammits.csv")
dammits_db['Week'] = pd.to_datetime(dammits_db['Week'])
update_board()
def update_board():
global current_week_index
num_of_yikes = dammits_db.iloc[current_week_index, 1]
num_of_yikes_label.config(text=f"{num_of_yikes}")
num_of_dammits = dammits_db.iloc[current_week_index, 2]
num_of_dammits_label.config(text=f"{num_of_dammits}")
week_of_label.config(text=f"{dammits_db.iloc[current_week_index,0].strftime('%m/%d/%Y')} - {(dammits_db['Week'][current_week_index] + datetime.timedelta(days=6)).strftime('%m/%d/%Y')}")
if current_week_index == 0:
previous_week_button.grid_remove()
elif (current_week_index == len(dammits_db['Week'])-1) and (pd.Timestamp(today - datetime.timedelta(days= 7)) < dammits_db['Week'].to_list()[-1]):
next_week_button.grid_remove()
else:
previous_week_button.grid()
next_week_button.grid()
print(f"Current week index: {current_week_index}")
print(f"Total number of weeks: {len(dammits_db['Week'])-1}")
def next_week():
global current_week_index, num_of_dammits, num_of_dammits_label, num_of_yikes, num_of_yikes_label, start_of_week, week_of_label, dammits_db
print("")
print(f"current week index: {current_week_index}")
print(f"start_of_week: {start_of_week}")
print("")
if current_week_index < len(dammits_db['Week'])-1:
current_week_index += 1
num_of_yikes = dammits_db.iloc[current_week_index, 1]
num_of_dammits = dammits_db.iloc[current_week_index, 2]
update_board()
elif pd.Timestamp(today - datetime.timedelta(days= 7)) >= dammits_db['Week'].to_list()[-1]:
with open('dammits.csv', 'a', newline = "") as file:
writer_object = csv.writer(file)
date_to_append = (pd.to_datetime(today + datetime.timedelta(days=-today.weekday())).strftime('%Y-%m-%d'))
writer_object.writerow([date_to_append,0,0])
file.close()
dammits_db = pd.read_csv("dammits.csv")
dammits_db['Week'] = pd.to_datetime(dammits_db['Week'])
current_week_index += 1
num_of_yikes = dammits_db.iloc[current_week_index, 1]
num_of_dammits = dammits_db.iloc[current_week_index, 2]
update_board()
def previous_week():
global current_week_index, num_of_dammits, num_of_yikes, num_of_yikes_label, num_of_dammits_label
if current_week_index > 0:
current_week_index -= 1
num_of_yikes = dammits_db.iloc[current_week_index, 1]
num_of_dammits = dammits_db.iloc[current_week_index, 2]
update_board()
print(f"Test: {(start_of_week + datetime.timedelta(days=6)).strftime('%m/%d/%Y')}")
previous_week_button = tkinter.Button(text="⟵", width= 11, command= previous_week)
previous_week_button.grid(column=0, row=0)
week_of_label = tkinter.Label(text=f"{start_of_week.strftime('%m/%d/%Y')} - {(start_of_week + datetime.timedelta(days=6)).strftime('%m/%d/%Y')}", font=('Arial', 18,'bold'))
week_of_label.config(padx=40, pady=50)
week_of_label.grid(column=1, row=0)
next_week_button = tkinter.Button(text="⟶", width= 11, command= next_week)
next_week_button.grid(column=2, row=0)
if pd.Timestamp(today - datetime.timedelta(days= 7)) < dammits_db['Week'].to_list()[-1]:
next_week_button.grid_remove()
decrease_dammits_button = tkinter.Button(text="-", width= 5, command= decrease_dammits)
decrease_dammits_button.grid(column=0, row=1)
dammits_label = tkinter.Label(text=f"DAMMITS", font=('Arial', 35,'normal'))
dammits_label.config(pady=30)
dammits_label.grid(column= 1, row=1)
increase_dammits_button = tkinter.Button(text="+", width= 5, command= increase_dammits)
increase_dammits_button.grid(column=2, row=1)
num_of_dammits_label = tkinter.Label(text=f"{num_of_dammits}", font=('Arial', 35,'normal'))
num_of_dammits_label.config(padx=20)
num_of_dammits_label.grid(column= 3, row= 1)
decrease_yikes_button = tkinter.Button(text="-", width= 5, command= decrease_yikes)
decrease_yikes_button.grid(column=0, row=2)
yikes_label = tkinter.Label(text=f"YIKES", font=('Arial', 35,'normal'))
yikes_label.config(pady=30)
yikes_label.grid(column= 1, row=2)
increase_yikes_button = tkinter.Button(text="+", width= 5, command= increase_yikes)
increase_yikes_button.grid(column=2, row=2)
num_of_yikes_label = tkinter.Label(text=f"{num_of_yikes}", font=('Arial', 35,'normal'))
num_of_yikes_label.config(padx=20)
num_of_yikes_label.grid(column=3,row=2)
window.mainloop()
| true | true |
1c339c4341e326d02b8fdd14888075414ef08e24 | 9,385 | py | Python | CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/learn/models/_psp_utils.py | moazzamwaheed2017/carparkapi | e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a | [
"MIT"
] | null | null | null | CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/learn/models/_psp_utils.py | moazzamwaheed2017/carparkapi | e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a | [
"MIT"
] | 9 | 2020-02-03T15:50:10.000Z | 2022-03-02T07:11:34.000Z | CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/learn/models/_psp_utils.py | moazzamwaheed2017/carparkapi | e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a | [
"MIT"
] | null | null | null | # MIT License
# Copyright (c) 2019 Hengshuang Zhao
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Based on https://github.com/hszhao/semseg
import torch
import warnings
import PIL
import numpy as np
from pdb import set_trace
import torch.nn.functional as F
import torch.nn as nn
import torch
from torchvision import models
import math
from fastai.callbacks.hooks import hook_output
from fastai.vision.learner import create_body
from fastai.callbacks.hooks import model_sizes
def initialize_weights(*models):
for model in models:
for module in model.modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()
class _PyramidPoolingModule(nn.Module):
"""
Creates the pyramid pooling module as in https://arxiv.org/abs/1612.01105
Takes a feature map from the backbone and pools it at different scales
according to the given pyramid sizes and upsamples it to original feature
map size and concatenates it with the feature map.
Code from https://github.com/hszhao/semseg.
"""
def __init__(self, in_dim, reduction_dim, setting):
super(_PyramidPoolingModule, self).__init__()
self.features = []
## Creating modules for different pyramid sizes
for s in setting:
self.features.append(nn.Sequential(
nn.AdaptiveAvgPool2d(s),
nn.Conv2d(in_dim, reduction_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(reduction_dim, momentum=.95),
nn.ReLU(inplace=True)
))
self.features = nn.ModuleList(self.features)
def forward(self, x):
x_size = x.size()
out = [x]
for f in self.features:
## Pass through the module which reduces its spatial size and then upsamples it.
out.append(F.interpolate(f(x), x_size[2:], mode='bilinear', align_corners=True))
out = torch.cat(out, 1)
return out
def _pspnet_unet(num_classes, backbone_fn, chip_size=224, pyramid_sizes=(1, 2, 3, 6), pretrained=True):
"""
Function which returns PPM module attached to backbone which is then used to form the Unet.
"""
backbone = create_body(backbone_fn, pretrained=pretrained)
backbone_name = backbone_fn.__name__
## Support for different backbones
if "densenet" in backbone_name or "vgg" in backbone_name:
hookable_modules = list(backbone.children())[0]
else:
hookable_modules = list(backbone.children())
if "vgg" in backbone_name:
modify_dilation_index = -5
else:
modify_dilation_index = -2
if backbone_name == 'resnet18' or backbone_name == 'resnet34':
module_to_check = 'conv'
else:
module_to_check = 'conv2'
custom_idx = 0
for i, module in enumerate(hookable_modules[modify_dilation_index:]):
dilation = 2 * (i + 1)
padding = 2 * (i + 1)
# padding = 1
for n, m in module.named_modules():
if module_to_check in n:
m.dilation, m.padding, m.stride = (dilation, dilation), (padding, padding), (1, 1)
elif 'downsample.0' in n:
m.stride = (1, 1)
if "vgg" in backbone_fn.__name__:
if isinstance(module, nn.Conv2d):
dilation = 2 * (custom_idx + 1)
padding = 2 * (custom_idx + 1)
module.dilation, module.padding, module.stride = (dilation, dilation), (padding, padding), (1, 1)
custom_idx += 1
## returns the size of various activations
feature_sizes = model_sizes(backbone, size=(chip_size, chip_size))
## Get number of channels in the last layer
num_channels = feature_sizes[-1][1]
penultimate_channels = num_channels / len(pyramid_sizes)
ppm = _PyramidPoolingModule(num_channels, int(penultimate_channels), pyramid_sizes)
in_final = int(penultimate_channels) * len(pyramid_sizes) + num_channels
# Reduce channel size after pyramid pooling module to avoid CUDA OOM error.
final_conv = nn.Conv2d(in_channels=in_final, out_channels=512, kernel_size=3, padding=1)
## To make Dynamic Unet work as it expects a backbone which can be indexed.
if "densenet" in backbone_name or "vgg" in backbone_name:
backbone = backbone[0]
layers = [*backbone, ppm, final_conv]
return nn.Sequential(*layers)
class PSPNet(nn.Module):
def __init__(self, num_classes, backbone_fn, chip_size=224, pyramid_sizes=(1, 2, 3, 6), pretrained=True):
super(PSPNet, self).__init__()
self.backbone = create_body(backbone_fn, pretrained=pretrained)
backbone_name = backbone_fn.__name__
## Support for different backbones
if "densenet" in backbone_name or "vgg" in backbone_name:
hookable_modules = list(self.backbone.children())[0]
else:
hookable_modules = list(self.backbone.children())
if "vgg" in backbone_name:
modify_dilation_index = -5
else:
modify_dilation_index = -2
if backbone_name == 'resnet18' or backbone_name == 'resnet34':
module_to_check = 'conv'
else:
module_to_check = 'conv2'
## Hook at the index where we need to get the auxillary logits out
self.hook = hook_output(hookable_modules[modify_dilation_index])
custom_idx = 0
for i, module in enumerate(hookable_modules[modify_dilation_index:]):
dilation = 2 * (i + 1)
padding = 2 * (i + 1)
for n, m in module.named_modules():
if module_to_check in n:
m.dilation, m.padding, m.stride = (dilation, dilation), (padding, padding), (1, 1)
elif 'downsample.0' in n:
m.stride = (1, 1)
if "vgg" in backbone_fn.__name__:
if isinstance(module, nn.Conv2d):
dilation = 2 * (custom_idx + 1)
padding = 2 * (custom_idx + 1)
module.dilation, module.padding, module.stride = (dilation, dilation), (padding, padding), (1, 1)
custom_idx += 1
## returns the size of various activations
feature_sizes = model_sizes(self.backbone, size=(chip_size, chip_size))
## Geting the stored parameters inside of the hook
aux_in_channels = self.hook.stored.shape[1]
## Get number of channels in the last layer
num_channels = feature_sizes[-1][1]
penultimate_channels = num_channels / len(pyramid_sizes)
self.ppm = _PyramidPoolingModule(num_channels, int(penultimate_channels), pyramid_sizes)
self.final = nn.Sequential(
## To handle case when the length of pyramid_sizes is odd
nn.Conv2d(int(penultimate_channels) * len(pyramid_sizes) + num_channels, math.ceil(penultimate_channels), kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(math.ceil(penultimate_channels)),
nn.ReLU(inplace=True),
nn.Dropout(0.1),
nn.Conv2d(math.ceil(penultimate_channels), num_classes, kernel_size=1)
)
self.aux_logits = nn.Conv2d(aux_in_channels, num_classes, kernel_size=1)
initialize_weights(self.aux_logits)
initialize_weights(self.ppm, self.final)
def forward(self, x):
x_size = x.size()
x = self.backbone(x)
if self.training:
aux_l = self.aux_logits(self.hook.stored)
## Remove hook to free up memory.
self.hook.remove()
x = self.ppm(x)
x = self.final(x)
if self.training:
return F.interpolate(x, x_size[2:], mode='bilinear', align_corners=True), F.interpolate(aux_l, x_size[2:], mode='bilinear', align_corners=True)
else:
return F.interpolate(x, x_size[2:], mode='bilinear', align_corners=True) | 40.106838 | 156 | 0.636015 |
import torch
import warnings
import PIL
import numpy as np
from pdb import set_trace
import torch.nn.functional as F
import torch.nn as nn
import torch
from torchvision import models
import math
from fastai.callbacks.hooks import hook_output
from fastai.vision.learner import create_body
from fastai.callbacks.hooks import model_sizes
def initialize_weights(*models):
for model in models:
for module in model.modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()
class _PyramidPoolingModule(nn.Module):
def __init__(self, in_dim, reduction_dim, setting):
super(_PyramidPoolingModule, self).__init__()
self.features = []
atures.append(nn.Sequential(
nn.AdaptiveAvgPool2d(s),
nn.Conv2d(in_dim, reduction_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(reduction_dim, momentum=.95),
nn.ReLU(inplace=True)
))
self.features = nn.ModuleList(self.features)
def forward(self, x):
x_size = x.size()
out = [x]
for f in self.features:
corners=True))
out = torch.cat(out, 1)
return out
def _pspnet_unet(num_classes, backbone_fn, chip_size=224, pyramid_sizes=(1, 2, 3, 6), pretrained=True):
backbone = create_body(backbone_fn, pretrained=pretrained)
backbone_name = backbone_fn.__name__
me or "vgg" in backbone_name:
hookable_modules = list(backbone.children())[0]
else:
hookable_modules = list(backbone.children())
if "vgg" in backbone_name:
modify_dilation_index = -5
else:
modify_dilation_index = -2
if backbone_name == 'resnet18' or backbone_name == 'resnet34':
module_to_check = 'conv'
else:
module_to_check = 'conv2'
custom_idx = 0
for i, module in enumerate(hookable_modules[modify_dilation_index:]):
dilation = 2 * (i + 1)
padding = 2 * (i + 1)
for n, m in module.named_modules():
if module_to_check in n:
m.dilation, m.padding, m.stride = (dilation, dilation), (padding, padding), (1, 1)
elif 'downsample.0' in n:
m.stride = (1, 1)
if "vgg" in backbone_fn.__name__:
if isinstance(module, nn.Conv2d):
dilation = 2 * (custom_idx + 1)
padding = 2 * (custom_idx + 1)
module.dilation, module.padding, module.stride = (dilation, dilation), (padding, padding), (1, 1)
custom_idx += 1
, size=(chip_size, chip_size))
penultimate_channels = num_channels / len(pyramid_sizes)
ppm = _PyramidPoolingModule(num_channels, int(penultimate_channels), pyramid_sizes)
in_final = int(penultimate_channels) * len(pyramid_sizes) + num_channels
final_conv = nn.Conv2d(in_channels=in_final, out_channels=512, kernel_size=3, padding=1)
kbone = backbone[0]
layers = [*backbone, ppm, final_conv]
return nn.Sequential(*layers)
class PSPNet(nn.Module):
def __init__(self, num_classes, backbone_fn, chip_size=224, pyramid_sizes=(1, 2, 3, 6), pretrained=True):
super(PSPNet, self).__init__()
self.backbone = create_body(backbone_fn, pretrained=pretrained)
backbone_name = backbone_fn.__name__
e_name or "vgg" in backbone_name:
hookable_modules = list(self.backbone.children())[0]
else:
hookable_modules = list(self.backbone.children())
if "vgg" in backbone_name:
modify_dilation_index = -5
else:
modify_dilation_index = -2
if backbone_name == 'resnet18' or backbone_name == 'resnet34':
module_to_check = 'conv'
else:
module_to_check = 'conv2'
_index])
custom_idx = 0
for i, module in enumerate(hookable_modules[modify_dilation_index:]):
dilation = 2 * (i + 1)
padding = 2 * (i + 1)
for n, m in module.named_modules():
if module_to_check in n:
m.dilation, m.padding, m.stride = (dilation, dilation), (padding, padding), (1, 1)
elif 'downsample.0' in n:
m.stride = (1, 1)
if "vgg" in backbone_fn.__name__:
if isinstance(module, nn.Conv2d):
dilation = 2 * (custom_idx + 1)
padding = 2 * (custom_idx + 1)
module.dilation, module.padding, module.stride = (dilation, dilation), (padding, padding), (1, 1)
custom_idx += 1
.backbone, size=(chip_size, chip_size))
[1]
1]
penultimate_channels = num_channels / len(pyramid_sizes)
self.ppm = _PyramidPoolingModule(num_channels, int(penultimate_channels), pyramid_sizes)
self.final = nn.Sequential(
yramid_sizes) + num_channels, math.ceil(penultimate_channels), kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(math.ceil(penultimate_channels)),
nn.ReLU(inplace=True),
nn.Dropout(0.1),
nn.Conv2d(math.ceil(penultimate_channels), num_classes, kernel_size=1)
)
self.aux_logits = nn.Conv2d(aux_in_channels, num_classes, kernel_size=1)
initialize_weights(self.aux_logits)
initialize_weights(self.ppm, self.final)
def forward(self, x):
x_size = x.size()
x = self.backbone(x)
if self.training:
aux_l = self.aux_logits(self.hook.stored)
x = self.ppm(x)
x = self.final(x)
if self.training:
return F.interpolate(x, x_size[2:], mode='bilinear', align_corners=True), F.interpolate(aux_l, x_size[2:], mode='bilinear', align_corners=True)
else:
return F.interpolate(x, x_size[2:], mode='bilinear', align_corners=True) | true | true |
1c339d6fee9e1e9192e798a12bd4ddbd28d7495c | 1,728 | py | Python | e2e_testing/torchscript/xfail_sets.py | edrutte/torch-mlir | 87d1af699136452d6f35ff493366d7c872c232ac | [
"Apache-2.0"
] | null | null | null | e2e_testing/torchscript/xfail_sets.py | edrutte/torch-mlir | 87d1af699136452d6f35ff493366d7c872c232ac | [
"Apache-2.0"
] | null | null | null | e2e_testing/torchscript/xfail_sets.py | edrutte/torch-mlir | 87d1af699136452d6f35ff493366d7c872c232ac | [
"Apache-2.0"
] | null | null | null | # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
# This file describes the sets of tests expected to fail for each config.
# This information is deliberately kept in a side table, rather than
# in-situ on the test, as a deliberate layering decision: tests should
# have unique keys to identify them and enable side tables of various kinds
# (this includes down into lower parts of the stack, where a side table
# might be used to keep more elaborate sets of testing configurations).
# Lists of tests that fail to even reach the backends.
# These represent further work needed in torch-mlir to lower them properly
# to the backend contract.
COMMON_TORCH_MLIR_LOWERING_XFAILS = {
"QuantizedMLP_basic",
"IouOfModule_basic",
}
# Fails due to https://github.com/llvm/torch-mlir/issues/448
SIZE_ZERO_TENSOR_XFAILS = {
"SliceEndSleStartModule_basic",
"SliceStartEqEndModule_basic",
"SliceOutOfUpperBoundIndexModule_basic",
}
REFBACKEND_XFAIL_SET = set.union(COMMON_TORCH_MLIR_LOWERING_XFAILS, SIZE_ZERO_TENSOR_XFAILS)
# Write the TOSA set as a "passing" set as it is very early in development
# and very few tests work yet.
TOSA_PASS_SET = {
"ElementwiseUnaryModule_basic",
"ElementwiseSigmoidModule_basic",
"ElementwiseReluModule_basic",
"ElementwiseFloorModule_basic",
"ElementwiseLogModule_basic",
"TanhBackward_basic",
"ElementwiseAddModule_basic",
"ReturnThreeTensorFloat32_basic",
"AddCMulModule_basic",
"AddCDivModule_basic",
"SqueezeModule_broadcast",
}
| 40.186047 | 92 | 0.775463 |
COMMON_TORCH_MLIR_LOWERING_XFAILS = {
"QuantizedMLP_basic",
"IouOfModule_basic",
}
SIZE_ZERO_TENSOR_XFAILS = {
"SliceEndSleStartModule_basic",
"SliceStartEqEndModule_basic",
"SliceOutOfUpperBoundIndexModule_basic",
}
REFBACKEND_XFAIL_SET = set.union(COMMON_TORCH_MLIR_LOWERING_XFAILS, SIZE_ZERO_TENSOR_XFAILS)
TOSA_PASS_SET = {
"ElementwiseUnaryModule_basic",
"ElementwiseSigmoidModule_basic",
"ElementwiseReluModule_basic",
"ElementwiseFloorModule_basic",
"ElementwiseLogModule_basic",
"TanhBackward_basic",
"ElementwiseAddModule_basic",
"ReturnThreeTensorFloat32_basic",
"AddCMulModule_basic",
"AddCDivModule_basic",
"SqueezeModule_broadcast",
}
| true | true |
1c339d79aaf99ccbb8d865aaf4bbf5c885968a6c | 32,566 | py | Python | python/cudf/cudf/core/reshape.py | BenikaHall/cudf | d3f5add210293a4832dafb85f04cbb73149b9d54 | [
"Apache-2.0"
] | null | null | null | python/cudf/cudf/core/reshape.py | BenikaHall/cudf | d3f5add210293a4832dafb85f04cbb73149b9d54 | [
"Apache-2.0"
] | 1 | 2021-02-23T18:05:36.000Z | 2021-02-23T18:05:36.000Z | python/cudf/cudf/core/reshape.py | BenikaHall/cudf | d3f5add210293a4832dafb85f04cbb73149b9d54 | [
"Apache-2.0"
] | 1 | 2020-11-10T03:19:16.000Z | 2020-11-10T03:19:16.000Z | # Copyright (c) 2018-2021, NVIDIA CORPORATION.
import itertools
import numpy as np
import pandas as pd
import cudf
_axis_map = {0: 0, 1: 1, "index": 0, "columns": 1}
def _align_objs(objs, how="outer"):
"""Align a set of Series or Dataframe objects.
Parameters
----------
objs : list of DataFrame, Series, or Index
how : How to handle indexes on other axis (or axes),
similar to join in concat
Returns
-------
A bool for if indexes have matched and a set of
reindexed and aligned objects ready for concatenation
"""
# Check if multiindex then check if indexes match. GenericIndex
# returns ndarray tuple of bools requiring additional filter.
# Then check for duplicate index value.
i_objs = iter(objs)
first = next(i_objs)
not_matching_index = any(
not first.index.equals(rest.index) for rest in i_objs
)
if not_matching_index:
if not all(o.index.is_unique for o in objs):
raise ValueError("cannot reindex from a duplicate axis")
index = objs[0].index
name = index.name
if how == "inner" or isinstance(index, cudf.MultiIndex):
for obj in objs[1:]:
index = (
cudf.DataFrame(index=obj.index)
.join(cudf.DataFrame(index=index), how=how)
.index
)
index.name = name
return [obj.reindex(index) for obj in objs], False
else:
all_index_objs = [obj.index for obj in objs]
appended_index = all_index_objs[0].append(all_index_objs[1:])
df = cudf.DataFrame(
{
"idxs": appended_index,
"order": cudf.core.column.arange(
start=0, stop=len(appended_index)
),
}
)
df = df.drop_duplicates(subset=["idxs"]).sort_values(
by=["order"], ascending=True
)
final_index = df["idxs"]
final_index.name = name
return [obj.reindex(final_index) for obj in objs], False
else:
return objs, True
def _normalize_series_and_dataframe(objs, axis):
sr_name = 0
for idx, o in enumerate(objs):
if isinstance(o, cudf.Series):
if axis == 1:
name = o.name
if name is None:
name = sr_name
sr_name += 1
else:
name = sr_name
objs[idx] = o.to_frame(name=name)
def concat(objs, axis=0, join="outer", ignore_index=False, sort=None):
"""Concatenate DataFrames, Series, or Indices row-wise.
Parameters
----------
objs : list of DataFrame, Series, or Index
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
ignore_index : bool, default False
Set True to ignore the index of the *objs* and provide a
default range index instead.
sort : bool, default False
Sort non-concatenation axis if it is not already aligned.
Returns
-------
A new object of like type with rows from each object in ``objs``.
Examples
--------
Combine two ``Series``.
>>> import cudf
>>> s1 = cudf.Series(['a', 'b'])
>>> s2 = cudf.Series(['c', 'd'])
>>> s1
0 a
1 b
dtype: object
>>> s2
0 c
1 d
dtype: object
>>> cudf.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the
result by setting the ``ignore_index`` option to ``True``.
>>> cudf.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Combine two DataFrame objects with identical columns.
>>> df1 = cudf.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = cudf.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> cudf.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine DataFrame objects with overlapping columns and return
everything. Columns outside the intersection will
be filled with ``null`` values.
>>> df3 = cudf.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> cudf.concat([df1, df3], sort=False)
letter number animal
0 a 1 <NA>
1 b 2 <NA>
0 c 3 cat
1 d 4 dog
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> cudf.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the
x axis by passing in ``axis=1``.
>>> df4 = cudf.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> df4
animal name
0 bird polly
1 monkey george
>>> cudf.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
"""
if not objs:
raise ValueError("No objects to concatenate")
objs = [obj for obj in objs if obj is not None]
# Return for single object
if len(objs) == 1:
if ignore_index:
if axis == 1:
result = cudf.DataFrame(
data=objs[0]._data.copy(deep=True),
index=objs[0].index.copy(deep=True),
)
# TODO: Move following columns setting into
# above constructor after following issue is fixed:
# https://github.com/rapidsai/cudf/issues/6821
result.columns = pd.RangeIndex(len(objs[0]._data.names))
elif axis == 0:
result = cudf.DataFrame(
data=objs[0]._data.copy(deep=True),
index=cudf.RangeIndex(len(objs[0])),
)
else:
result = objs[0].copy()
if sort:
if axis == 0:
return result.sort_index()
elif not result.columns.is_monotonic:
# TODO: Sorting by columns can be done
# once the following issue is fixed:
# https://github.com/rapidsai/cudf/issues/6821
raise NotImplementedError(
"Sorting by columns is not yet supported"
)
else:
return result
if len(objs) == 0:
raise ValueError("All objects passed were None")
# Retrieve the base types of `objs`. In order to support sub-types
# and object wrappers, we use `isinstance()` instead of comparing
# types directly
typs = set()
for o in objs:
if isinstance(o, cudf.MultiIndex):
typs.add(cudf.MultiIndex)
if issubclass(type(o), cudf.Index):
typs.add(type(o))
elif isinstance(o, cudf.DataFrame):
typs.add(cudf.DataFrame)
elif isinstance(o, cudf.Series):
typs.add(cudf.Series)
else:
raise TypeError(f"cannot concatenate object of type {type(o)}")
allowed_typs = {cudf.Series, cudf.DataFrame}
param_axis = _axis_map.get(axis, None)
if param_axis is None:
raise ValueError(
f'`axis` must be 0 / "index" or 1 / "columns", got: {param_axis}'
)
else:
axis = param_axis
# when axis is 1 (column) we can concat with Series and Dataframes
if axis == 1:
if not typs.issubset(allowed_typs):
raise TypeError(
"Can only concatenate Series and DataFrame objects when axis=1"
)
df = cudf.DataFrame()
_normalize_series_and_dataframe(objs, axis=axis)
old_objs = objs
objs = [obj for obj in objs if obj.shape != (0, 0)]
if len(objs) == 0:
return df
empty_inner = False
if join == "inner":
# don't filter out empty df's
if any(obj.empty for obj in old_objs):
empty_inner = True
objs, match_index = _align_objs(objs, how=join)
for idx, o in enumerate(objs):
if idx == 0:
df.index = o.index
for col in o._data.names:
if col in df._data:
raise NotImplementedError(
f"A Column with duplicate name found: {col}, cuDF "
f"doesn't support having multiple columns with "
f"same names yet."
)
df[col] = o._data[col]
result_columns = objs[0].columns
for o in objs[1:]:
result_columns = result_columns.append(o.columns)
if ignore_index:
# with ignore_index the column names change to numbers
df.columns = pd.RangeIndex(len(result_columns.unique()))
else:
df.columns = result_columns.unique()
if empty_inner:
# if join is inner and it contains an empty df
# we return an empty df
return df.head(0)
if not match_index and sort is not False:
return df.sort_index()
if sort or join == "inner":
# when join='outer' and sort=False string indexes
# are returned unsorted. Everything else seems
# to be returned sorted when axis = 1
return df.sort_index()
else:
return df
typ = list(typs)[0]
if len(typs) > 1:
if allowed_typs == typs:
# This block of code will run when `objs` has
# both Series & DataFrame kind of inputs.
_normalize_series_and_dataframe(objs, axis=axis)
typ = cudf.DataFrame
else:
raise TypeError(
f"`concat` cannot concatenate objects of "
f"types: {sorted([t.__name__ for t in typs])}."
)
if typ is cudf.DataFrame:
old_objs = objs
objs = [obj for obj in objs if obj.shape != (0, 0)]
if len(objs) == 0:
# If objs is empty, that indicates all of
# objs are empty dataframes.
return cudf.DataFrame()
elif len(objs) == 1:
if join == "inner":
data = None
else:
data = objs[0]._data.copy(deep=True)
result = cudf.DataFrame(
data=data,
index=cudf.RangeIndex(len(objs[0]))
if ignore_index
else objs[0].index.copy(deep=True),
)
return result
else:
if join == "inner" and len(old_objs) != len(objs):
# don't filter out empty df's
objs = old_objs
result = cudf.DataFrame._concat(
objs,
axis=axis,
join=join,
ignore_index=ignore_index,
sort=sort,
)
return result
elif typ is cudf.Series:
objs = [obj for obj in objs if len(obj)]
if len(objs) == 0:
return cudf.Series()
elif len(objs) == 1 and not ignore_index:
return objs[0]
else:
return cudf.Series._concat(
objs, axis=axis, index=None if ignore_index else True
)
elif typ is cudf.MultiIndex:
return cudf.MultiIndex._concat(objs)
elif issubclass(typ, cudf.Index):
return cudf.Index._concat(objs)
else:
raise TypeError(f"cannot concatenate object of type {typ}")
def melt(
frame,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
"""Unpivots a DataFrame from wide format to long format,
optionally leaving identifier variables set.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
default: None
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot.
default: all columns that are not set as `id_vars`.
var_name : scalar
Name to use for the `variable` column.
default: frame.columns.name or 'variable'
value_name : str
Name to use for the `value` column.
default: 'value'
Returns
-------
out : DataFrame
Melted result
Difference from pandas:
* Does not support 'col_level' because cuDF does not have multi-index
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'A': ['a', 'b', 'c'],
... 'B': [1, 3, 5],
... 'C': [2, 4, 6]})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> cudf.melt(df, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> cudf.melt(df, id_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of ‘variable’ and ‘value’ columns can be customized:
>>> cudf.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
"""
assert col_level in (None,)
# Arg cleaning
import collections
# id_vars
if id_vars is not None:
if not isinstance(id_vars, collections.abc.Sequence):
id_vars = [id_vars]
id_vars = list(id_vars)
missing = set(id_vars) - set(frame.columns)
if not len(missing) == 0:
raise KeyError(
f"The following 'id_vars' are not present"
f" in the DataFrame: {list(missing)}"
)
else:
id_vars = []
# value_vars
if value_vars is not None:
if not isinstance(value_vars, collections.abc.Sequence):
value_vars = [value_vars]
value_vars = list(value_vars)
missing = set(value_vars) - set(frame.columns)
if not len(missing) == 0:
raise KeyError(
f"The following 'value_vars' are not present"
f" in the DataFrame: {list(missing)}"
)
else:
# then all remaining columns in frame
value_vars = frame.columns.drop(id_vars)
value_vars = list(value_vars)
# Error for unimplemented support for datatype
dtypes = [frame[col].dtype for col in id_vars + value_vars]
if any(cudf.utils.dtypes.is_categorical_dtype(t) for t in dtypes):
raise NotImplementedError(
"Categorical columns are not yet " "supported for function"
)
# Check dtype homogeneity in value_var
# Because heterogeneous concat is unimplemented
dtypes = [frame[col].dtype for col in value_vars]
if len(dtypes) > 0:
dtype = dtypes[0]
if any(t != dtype for t in dtypes):
raise ValueError("all cols in value_vars must have the same dtype")
# overlap
overlap = set(id_vars).intersection(set(value_vars))
if not len(overlap) == 0:
raise KeyError(
f"'value_vars' and 'id_vars' cannot have overlap."
f" The following 'value_vars' are ALSO present"
f" in 'id_vars': {list(overlap)}"
)
N = len(frame)
K = len(value_vars)
def _tile(A, reps):
series_list = [A] * reps
if reps > 0:
return cudf.Series._concat(objs=series_list, index=None)
else:
return cudf.Series([], dtype=A.dtype)
# Step 1: tile id_vars
mdata = collections.OrderedDict()
for col in id_vars:
mdata[col] = _tile(frame[col], K)
# Step 2: add variable
var_cols = []
for i, _ in enumerate(value_vars):
var_cols.append(
cudf.Series(cudf.core.column.full(N, i, dtype=np.int8))
)
temp = cudf.Series._concat(objs=var_cols, index=None)
if not var_name:
var_name = "variable"
mdata[var_name] = cudf.Series(
cudf.core.column.build_categorical_column(
categories=value_vars,
codes=cudf.core.column.as_column(
temp._column.base_data, dtype=temp._column.dtype
),
mask=temp._column.base_mask,
size=temp._column.size,
offset=temp._column.offset,
ordered=False,
)
)
# Step 3: add values
mdata[value_name] = cudf.Series._concat(
objs=[frame[val] for val in value_vars], index=None
)
return cudf.DataFrame(mdata)
def get_dummies(
df,
prefix=None,
prefix_sep="_",
dummy_na=False,
columns=None,
cats=None,
sparse=False,
drop_first=False,
dtype="uint8",
):
""" Returns a dataframe whose columns are the one hot encodings of all
columns in `df`
Parameters
----------
df : array-like, Series, or DataFrame
Data of which to get dummy indicators.
prefix : str, dict, or sequence, optional
prefix to append. Either a str (to apply a constant prefix), dict
mapping column names to prefixes, or sequence of prefixes to apply with
the same length as the number of columns. If not supplied, defaults
to the empty string
prefix_sep : str, dict, or sequence, optional, default '_'
separator to use when appending prefixes
dummy_na : boolean, optional
Add a column to indicate Nones, if False Nones are ignored.
cats : dict, optional
dictionary mapping column names to sequences of integers representing
that column's category. See `cudf.DataFrame.one_hot_encoding` for more
information. if not supplied, it will be computed
sparse : boolean, optional
Right now this is NON-FUNCTIONAL argument in rapids.
drop_first : boolean, optional
Right now this is NON-FUNCTIONAL argument in rapids.
columns : sequence of str, optional
Names of columns to encode. If not provided, will attempt to encode all
columns. Note this is different from pandas default behavior, which
encodes all columns with dtype object or categorical
dtype : str, optional
output dtype, default 'uint8'
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({"a": ["value1", "value2", None], "b": [0, 0, 0]})
>>> cudf.get_dummies(df)
b a_value1 a_value2
0 0 1 0
1 0 0 1
2 0 0 0
>>> cudf.get_dummies(df, dummy_na=True)
b a_None a_value1 a_value2
0 0 0 1 0
1 0 0 0 1
2 0 1 0 0
>>> import numpy as np
>>> df = cudf.DataFrame({"a":cudf.Series([1, 2, np.nan, None],
... nan_as_null=False)})
>>> df
a
0 1.0
1 2.0
2 NaN
3 <NA>
>>> cudf.get_dummies(df, dummy_na=True, columns=["a"])
a_1.0 a_2.0 a_nan a_null
0 1 0 0 0
1 0 1 0 0
2 0 0 1 0
3 0 0 0 1
>>> series = cudf.Series([1, 2, None, 2, 4])
>>> series
0 1
1 2
2 <NA>
3 2
4 4
dtype: int64
>>> cudf.get_dummies(series, dummy_na=True)
null 1 2 4
0 0 1 0 0
1 0 0 1 0
2 1 0 0 0
3 0 0 1 0
4 0 0 0 1
"""
if cats is None:
cats = {}
if sparse:
raise NotImplementedError("sparse is not supported yet")
if drop_first:
raise NotImplementedError("drop_first is not supported yet")
if isinstance(df, cudf.DataFrame):
encode_fallback_dtypes = ["object", "category"]
if columns is None or len(columns) == 0:
columns = df.select_dtypes(include=encode_fallback_dtypes).columns
_length_check_params(prefix, columns, "prefix")
_length_check_params(prefix_sep, columns, "prefix_sep")
if prefix is None:
prefix = columns
if isinstance(prefix, str):
prefix_map = {}
elif isinstance(prefix, dict):
prefix_map = prefix
else:
prefix_map = dict(zip(columns, prefix))
if isinstance(prefix_sep, str):
prefix_sep_map = {}
elif isinstance(prefix_sep, dict):
prefix_sep_map = prefix_sep
else:
prefix_sep_map = dict(zip(columns, prefix_sep))
# If we have no columns to encode, we need to drop
# fallback columns(if any)
if len(columns) == 0:
return df.select_dtypes(exclude=encode_fallback_dtypes)
else:
result_df = df.copy(deep=False)
result_df.drop(columns=columns, inplace=True)
for name in columns:
unique = _get_unique(column=df._data[name], dummy_na=dummy_na)
col_enc_df = df.one_hot_encoding(
name,
prefix=prefix_map.get(name, prefix),
cats=cats.get(name, unique),
prefix_sep=prefix_sep_map.get(name, prefix_sep),
dtype=dtype,
)
for col in col_enc_df.columns.difference(df._data.names):
result_df[col] = col_enc_df._data[col]
return result_df
else:
ser = cudf.Series(df)
unique = _get_unique(column=ser._column, dummy_na=dummy_na)
if hasattr(unique, "to_arrow"):
cats = unique.to_arrow().to_pylist()
else:
cats = pd.Series(unique, dtype="object")
col_names = ["null" if cat is None else cat for cat in cats]
if prefix is not None:
col_names = [f"{prefix}{prefix_sep}{cat}" for cat in col_names]
newcols = ser.one_hot_encoding(cats=cats, dtype=dtype)
result_df = cudf.DataFrame(index=ser.index)
for i, col in enumerate(newcols):
result_df._data[col_names[i]] = col
return result_df
def merge_sorted(
objs,
keys=None,
by_index=False,
ignore_index=False,
ascending=True,
na_position="last",
):
"""Merge a list of sorted DataFrame or Series objects.
Dataframes/Series in objs list MUST be pre-sorted by columns
listed in `keys`, or by the index (if `by_index=True`).
Parameters
----------
objs : list of DataFrame, Series, or Index
keys : list, default None
List of Column names to sort by. If None, all columns used
(Ignored if `index=True`)
by_index : bool, default False
Use index for sorting. `keys` input will be ignored if True
ignore_index : bool, default False
Drop and ignore index during merge. Default range index will
be used in the output dataframe.
ascending : bool, default True
Sorting is in ascending order, otherwise it is descending
na_position : {‘first’, ‘last’}, default ‘last’
'first' nulls at the beginning, 'last' nulls at the end
Returns
-------
A new, lexicographically sorted, DataFrame/Series.
"""
if not pd.api.types.is_list_like(objs):
raise TypeError("objs must be a list-like of Frame-like objects")
if len(objs) < 1:
raise ValueError("objs must be non-empty")
if not all(isinstance(table, cudf.core.frame.Frame) for table in objs):
raise TypeError("Elements of objs must be Frame-like")
if len(objs) == 1:
return objs[0]
if by_index and ignore_index:
raise ValueError("`by_index` and `ignore_index` cannot both be True")
result = objs[0].__class__._from_table(
cudf._lib.merge.merge_sorted(
objs,
keys=keys,
by_index=by_index,
ignore_index=ignore_index,
ascending=ascending,
na_position=na_position,
)
)
result._copy_type_metadata(objs[0])
return result
def _pivot(df, index, columns):
"""
Reorganize the values of the DataFrame according to the given
index and columns.
Parameters
----------
df : DataFrame
index : cudf.core.index.Index
Index labels of the result
columns : cudf.core.index.Index
Column labels of the result
"""
columns_labels, columns_idx = columns._encode()
index_labels, index_idx = index._encode()
column_labels = columns_labels.to_pandas().to_flat_index()
# the result of pivot always has a multicolumn
result = cudf.core.column_accessor.ColumnAccessor(
multiindex=True, level_names=(None,) + columns._data.names
)
def as_tuple(x):
return x if isinstance(x, tuple) else (x,)
for v in df:
names = [as_tuple(v) + as_tuple(name) for name in column_labels]
col = df._data[v]
result.update(
cudf.DataFrame._from_table(
col.scatter_to_table(
index_idx,
columns_idx,
names,
nrows=len(index_labels),
ncols=len(names),
)
)._data
)
out = cudf.DataFrame._from_data(
result, index=cudf.Index(index_labels, name=index.name)
)
return out
def pivot(data, index=None, columns=None, values=None):
"""
Return reshaped DataFrame organized by the given index and column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame.
Parameters
----------
index : column name, optional
Column used to construct the index of the result.
columns : column name, optional
Column used to construct the columns of the result.
values : column name or list of column names, optional
Column(s) whose values are rearranged to produce the result.
If not specified, all remaining columns of the DataFrame
are used.
Returns
-------
DataFrame
Examples
--------
>>> a = cudf.DataFrame()
>>> a['a'] = [1, 1, 2, 2],
>>> a['b'] = ['a', 'b', 'a', 'b']
>>> a['c'] = [1, 2, 3, 4]
>>> a.pivot(index='a', columns='b')
c
b a b
a
1 1 2
2 3 4
Pivot with missing values in result:
>>> a = cudf.DataFrame()
>>> a['a'] = [1, 1, 2]
>>> a['b'] = [1, 2, 3]
>>> a['c'] = ['one', 'two', 'three']
>>> a.pivot(index='a', columns='b')
c
b 1 2 3
a
1 one two <NA>
2 <NA> <NA> three
"""
df = data
if values is None:
values = df._columns_view(
col for col in df._column_names if col not in (index, columns)
)
else:
values = df._columns_view(values)
if index is None:
index = df.index
else:
index = cudf.core.index.Index(df.loc[:, index])
columns = cudf.Index(df.loc[:, columns])
# Create a DataFrame composed of columns from both
# columns and index
columns_index = {}
columns_index = {
i: col
for i, col in enumerate(
itertools.chain(index._data.columns, columns._data.columns)
)
}
columns_index = cudf.DataFrame(columns_index)
# Check that each row is unique:
if len(columns_index) != len(columns_index.drop_duplicates()):
raise ValueError("Duplicate index-column pairs found. Cannot reshape.")
return _pivot(values, index, columns)
def unstack(df, level, fill_value=None):
"""
Pivot one or more levels of the (necessarily hierarchical) index labels.
Pivots the specified levels of the index labels of df to the innermost
levels of the columns labels of the result.
* If the index of ``df`` has multiple levels, returns a ``Dataframe`` with
specified level of the index pivoted to the column levels.
* If the index of ``df`` has single level, returns a ``Series`` with all
column levels pivoted to the index levels.
Parameters
----------
df : DataFrame
level : level name or index, list-like
Integer, name or list of such, specifying one or more
levels of the index to pivot
fill_value
Non-functional argument provided for compatibility with Pandas.
Returns
-------
Series or DataFrame
Examples
--------
>>> df['a'] = [1, 1, 1, 2, 2]
>>> df['b'] = [1, 2, 3, 1, 2]
>>> df['c'] = [5, 6, 7, 8, 9]
>>> df['d'] = ['a', 'b', 'a', 'd', 'e']
>>> df = df.set_index(['a', 'b', 'd'])
>>> df
c
a b d
1 1 a 5
2 b 6
3 a 7
2 1 d 8
2 e 9
Unstacking level 'a':
>>> df.unstack('a')
c
a 1 2
b d
1 a 5 <NA>
d <NA> 8
2 b 6 <NA>
e <NA> 9
3 a 7 <NA>
Unstacking level 'd' :
>>> df.unstack('d')
c
d a b d e
a b
1 1 5 <NA> <NA> <NA>
2 <NA> 6 <NA> <NA>
3 7 <NA> <NA> <NA>
2 1 <NA> <NA> 8 <NA>
2 <NA> <NA> <NA> 9
Unstacking multiple levels:
>>> df.unstack(['b', 'd'])
c
b 1 2 3
d a d b e a
a
1 5 <NA> 6 <NA> 7
2 <NA> 8 <NA> 9 <NA>
Unstacking single level index dataframe:
>>> df = cudf.DataFrame({('c', 1): [1, 2, 3], ('c', 2):[9, 8, 7]})
>>> df.unstack()
c 1 0 1
1 2
2 3
2 0 9
1 8
2 7
dtype: int64
"""
if not isinstance(df, cudf.DataFrame):
raise ValueError("`df` should be a cudf Dataframe object.")
if df.empty:
raise ValueError("Cannot unstack an empty dataframe.")
if fill_value is not None:
raise NotImplementedError("fill_value is not supported.")
if pd.api.types.is_list_like(level):
if not level:
return df
df = df.copy(deep=False)
if not isinstance(df.index, cudf.MultiIndex):
dtype = df._columns[0].dtype
for col in df._columns:
if not col.dtype == dtype:
raise ValueError(
"Calling unstack() on single index dataframe"
" with different column datatype is not supported."
)
res = df.T.stack(dropna=False)
# Result's index is a multiindex
res.index.names = tuple(df.columns.names) + df.index.names
return res
else:
columns = df.index._poplevels(level)
index = df.index
result = _pivot(df, index, columns)
if result.index.nlevels == 1:
result.index = result.index.get_level_values(result.index.names[0])
return result
def _get_unique(column, dummy_na):
"""
Returns unique values in a column, if
dummy_na is False, nan's are also dropped.
"""
if isinstance(column, cudf.core.column.CategoricalColumn):
unique = column.categories
else:
unique = column.unique()
if not dummy_na:
if np.issubdtype(unique.dtype, np.floating):
unique = unique.nans_to_nulls()
unique = unique.dropna()
return unique
def _length_check_params(obj, columns, name):
if cudf.utils.dtypes.is_list_like(obj):
if len(obj) != len(columns):
raise ValueError(
f"Length of '{name}' ({len(obj)}) did not match the "
f"length of the columns being "
f"encoded ({len(columns)})."
)
| 30.578404 | 79 | 0.543665 |
import itertools
import numpy as np
import pandas as pd
import cudf
_axis_map = {0: 0, 1: 1, "index": 0, "columns": 1}
def _align_objs(objs, how="outer"):
i_objs = iter(objs)
first = next(i_objs)
not_matching_index = any(
not first.index.equals(rest.index) for rest in i_objs
)
if not_matching_index:
if not all(o.index.is_unique for o in objs):
raise ValueError("cannot reindex from a duplicate axis")
index = objs[0].index
name = index.name
if how == "inner" or isinstance(index, cudf.MultiIndex):
for obj in objs[1:]:
index = (
cudf.DataFrame(index=obj.index)
.join(cudf.DataFrame(index=index), how=how)
.index
)
index.name = name
return [obj.reindex(index) for obj in objs], False
else:
all_index_objs = [obj.index for obj in objs]
appended_index = all_index_objs[0].append(all_index_objs[1:])
df = cudf.DataFrame(
{
"idxs": appended_index,
"order": cudf.core.column.arange(
start=0, stop=len(appended_index)
),
}
)
df = df.drop_duplicates(subset=["idxs"]).sort_values(
by=["order"], ascending=True
)
final_index = df["idxs"]
final_index.name = name
return [obj.reindex(final_index) for obj in objs], False
else:
return objs, True
def _normalize_series_and_dataframe(objs, axis):
sr_name = 0
for idx, o in enumerate(objs):
if isinstance(o, cudf.Series):
if axis == 1:
name = o.name
if name is None:
name = sr_name
sr_name += 1
else:
name = sr_name
objs[idx] = o.to_frame(name=name)
def concat(objs, axis=0, join="outer", ignore_index=False, sort=None):
if not objs:
raise ValueError("No objects to concatenate")
objs = [obj for obj in objs if obj is not None]
if len(objs) == 1:
if ignore_index:
if axis == 1:
result = cudf.DataFrame(
data=objs[0]._data.copy(deep=True),
index=objs[0].index.copy(deep=True),
)
result.columns = pd.RangeIndex(len(objs[0]._data.names))
elif axis == 0:
result = cudf.DataFrame(
data=objs[0]._data.copy(deep=True),
index=cudf.RangeIndex(len(objs[0])),
)
else:
result = objs[0].copy()
if sort:
if axis == 0:
return result.sort_index()
elif not result.columns.is_monotonic:
raise NotImplementedError(
"Sorting by columns is not yet supported"
)
else:
return result
if len(objs) == 0:
raise ValueError("All objects passed were None")
typs = set()
for o in objs:
if isinstance(o, cudf.MultiIndex):
typs.add(cudf.MultiIndex)
if issubclass(type(o), cudf.Index):
typs.add(type(o))
elif isinstance(o, cudf.DataFrame):
typs.add(cudf.DataFrame)
elif isinstance(o, cudf.Series):
typs.add(cudf.Series)
else:
raise TypeError(f"cannot concatenate object of type {type(o)}")
allowed_typs = {cudf.Series, cudf.DataFrame}
param_axis = _axis_map.get(axis, None)
if param_axis is None:
raise ValueError(
f'`axis` must be 0 / "index" or 1 / "columns", got: {param_axis}'
)
else:
axis = param_axis
if axis == 1:
if not typs.issubset(allowed_typs):
raise TypeError(
"Can only concatenate Series and DataFrame objects when axis=1"
)
df = cudf.DataFrame()
_normalize_series_and_dataframe(objs, axis=axis)
old_objs = objs
objs = [obj for obj in objs if obj.shape != (0, 0)]
if len(objs) == 0:
return df
empty_inner = False
if join == "inner":
if any(obj.empty for obj in old_objs):
empty_inner = True
objs, match_index = _align_objs(objs, how=join)
for idx, o in enumerate(objs):
if idx == 0:
df.index = o.index
for col in o._data.names:
if col in df._data:
raise NotImplementedError(
f"A Column with duplicate name found: {col}, cuDF "
f"doesn't support having multiple columns with "
f"same names yet."
)
df[col] = o._data[col]
result_columns = objs[0].columns
for o in objs[1:]:
result_columns = result_columns.append(o.columns)
if ignore_index:
# with ignore_index the column names change to numbers
df.columns = pd.RangeIndex(len(result_columns.unique()))
else:
df.columns = result_columns.unique()
if empty_inner:
# if join is inner and it contains an empty df
# we return an empty df
return df.head(0)
if not match_index and sort is not False:
return df.sort_index()
if sort or join == "inner":
# when join='outer' and sort=False string indexes
# are returned unsorted. Everything else seems
# to be returned sorted when axis = 1
return df.sort_index()
else:
return df
typ = list(typs)[0]
if len(typs) > 1:
if allowed_typs == typs:
# This block of code will run when `objs` has
# both Series & DataFrame kind of inputs.
_normalize_series_and_dataframe(objs, axis=axis)
typ = cudf.DataFrame
else:
raise TypeError(
f"`concat` cannot concatenate objects of "
f"types: {sorted([t.__name__ for t in typs])}."
)
if typ is cudf.DataFrame:
old_objs = objs
objs = [obj for obj in objs if obj.shape != (0, 0)]
if len(objs) == 0:
# If objs is empty, that indicates all of
# objs are empty dataframes.
return cudf.DataFrame()
elif len(objs) == 1:
if join == "inner":
data = None
else:
data = objs[0]._data.copy(deep=True)
result = cudf.DataFrame(
data=data,
index=cudf.RangeIndex(len(objs[0]))
if ignore_index
else objs[0].index.copy(deep=True),
)
return result
else:
if join == "inner" and len(old_objs) != len(objs):
# don't filter out empty df's
objs = old_objs
result = cudf.DataFrame._concat(
objs,
axis=axis,
join=join,
ignore_index=ignore_index,
sort=sort,
)
return result
elif typ is cudf.Series:
objs = [obj for obj in objs if len(obj)]
if len(objs) == 0:
return cudf.Series()
elif len(objs) == 1 and not ignore_index:
return objs[0]
else:
return cudf.Series._concat(
objs, axis=axis, index=None if ignore_index else True
)
elif typ is cudf.MultiIndex:
return cudf.MultiIndex._concat(objs)
elif issubclass(typ, cudf.Index):
return cudf.Index._concat(objs)
else:
raise TypeError(f"cannot concatenate object of type {typ}")
def melt(
frame,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
assert col_level in (None,)
# Arg cleaning
import collections
# id_vars
if id_vars is not None:
if not isinstance(id_vars, collections.abc.Sequence):
id_vars = [id_vars]
id_vars = list(id_vars)
missing = set(id_vars) - set(frame.columns)
if not len(missing) == 0:
raise KeyError(
f"The following 'id_vars' are not present"
f" in the DataFrame: {list(missing)}"
)
else:
id_vars = []
# value_vars
if value_vars is not None:
if not isinstance(value_vars, collections.abc.Sequence):
value_vars = [value_vars]
value_vars = list(value_vars)
missing = set(value_vars) - set(frame.columns)
if not len(missing) == 0:
raise KeyError(
f"The following 'value_vars' are not present"
f" in the DataFrame: {list(missing)}"
)
else:
# then all remaining columns in frame
value_vars = frame.columns.drop(id_vars)
value_vars = list(value_vars)
# Error for unimplemented support for datatype
dtypes = [frame[col].dtype for col in id_vars + value_vars]
if any(cudf.utils.dtypes.is_categorical_dtype(t) for t in dtypes):
raise NotImplementedError(
"Categorical columns are not yet " "supported for function"
)
# Check dtype homogeneity in value_var
# Because heterogeneous concat is unimplemented
dtypes = [frame[col].dtype for col in value_vars]
if len(dtypes) > 0:
dtype = dtypes[0]
if any(t != dtype for t in dtypes):
raise ValueError("all cols in value_vars must have the same dtype")
# overlap
overlap = set(id_vars).intersection(set(value_vars))
if not len(overlap) == 0:
raise KeyError(
f"'value_vars' and 'id_vars' cannot have overlap."
f" The following 'value_vars' are ALSO present"
f" in 'id_vars': {list(overlap)}"
)
N = len(frame)
K = len(value_vars)
def _tile(A, reps):
series_list = [A] * reps
if reps > 0:
return cudf.Series._concat(objs=series_list, index=None)
else:
return cudf.Series([], dtype=A.dtype)
# Step 1: tile id_vars
mdata = collections.OrderedDict()
for col in id_vars:
mdata[col] = _tile(frame[col], K)
# Step 2: add variable
var_cols = []
for i, _ in enumerate(value_vars):
var_cols.append(
cudf.Series(cudf.core.column.full(N, i, dtype=np.int8))
)
temp = cudf.Series._concat(objs=var_cols, index=None)
if not var_name:
var_name = "variable"
mdata[var_name] = cudf.Series(
cudf.core.column.build_categorical_column(
categories=value_vars,
codes=cudf.core.column.as_column(
temp._column.base_data, dtype=temp._column.dtype
),
mask=temp._column.base_mask,
size=temp._column.size,
offset=temp._column.offset,
ordered=False,
)
)
# Step 3: add values
mdata[value_name] = cudf.Series._concat(
objs=[frame[val] for val in value_vars], index=None
)
return cudf.DataFrame(mdata)
def get_dummies(
df,
prefix=None,
prefix_sep="_",
dummy_na=False,
columns=None,
cats=None,
sparse=False,
drop_first=False,
dtype="uint8",
):
if cats is None:
cats = {}
if sparse:
raise NotImplementedError("sparse is not supported yet")
if drop_first:
raise NotImplementedError("drop_first is not supported yet")
if isinstance(df, cudf.DataFrame):
encode_fallback_dtypes = ["object", "category"]
if columns is None or len(columns) == 0:
columns = df.select_dtypes(include=encode_fallback_dtypes).columns
_length_check_params(prefix, columns, "prefix")
_length_check_params(prefix_sep, columns, "prefix_sep")
if prefix is None:
prefix = columns
if isinstance(prefix, str):
prefix_map = {}
elif isinstance(prefix, dict):
prefix_map = prefix
else:
prefix_map = dict(zip(columns, prefix))
if isinstance(prefix_sep, str):
prefix_sep_map = {}
elif isinstance(prefix_sep, dict):
prefix_sep_map = prefix_sep
else:
prefix_sep_map = dict(zip(columns, prefix_sep))
# If we have no columns to encode, we need to drop
# fallback columns(if any)
if len(columns) == 0:
return df.select_dtypes(exclude=encode_fallback_dtypes)
else:
result_df = df.copy(deep=False)
result_df.drop(columns=columns, inplace=True)
for name in columns:
unique = _get_unique(column=df._data[name], dummy_na=dummy_na)
col_enc_df = df.one_hot_encoding(
name,
prefix=prefix_map.get(name, prefix),
cats=cats.get(name, unique),
prefix_sep=prefix_sep_map.get(name, prefix_sep),
dtype=dtype,
)
for col in col_enc_df.columns.difference(df._data.names):
result_df[col] = col_enc_df._data[col]
return result_df
else:
ser = cudf.Series(df)
unique = _get_unique(column=ser._column, dummy_na=dummy_na)
if hasattr(unique, "to_arrow"):
cats = unique.to_arrow().to_pylist()
else:
cats = pd.Series(unique, dtype="object")
col_names = ["null" if cat is None else cat for cat in cats]
if prefix is not None:
col_names = [f"{prefix}{prefix_sep}{cat}" for cat in col_names]
newcols = ser.one_hot_encoding(cats=cats, dtype=dtype)
result_df = cudf.DataFrame(index=ser.index)
for i, col in enumerate(newcols):
result_df._data[col_names[i]] = col
return result_df
def merge_sorted(
objs,
keys=None,
by_index=False,
ignore_index=False,
ascending=True,
na_position="last",
):
if not pd.api.types.is_list_like(objs):
raise TypeError("objs must be a list-like of Frame-like objects")
if len(objs) < 1:
raise ValueError("objs must be non-empty")
if not all(isinstance(table, cudf.core.frame.Frame) for table in objs):
raise TypeError("Elements of objs must be Frame-like")
if len(objs) == 1:
return objs[0]
if by_index and ignore_index:
raise ValueError("`by_index` and `ignore_index` cannot both be True")
result = objs[0].__class__._from_table(
cudf._lib.merge.merge_sorted(
objs,
keys=keys,
by_index=by_index,
ignore_index=ignore_index,
ascending=ascending,
na_position=na_position,
)
)
result._copy_type_metadata(objs[0])
return result
def _pivot(df, index, columns):
columns_labels, columns_idx = columns._encode()
index_labels, index_idx = index._encode()
column_labels = columns_labels.to_pandas().to_flat_index()
# the result of pivot always has a multicolumn
result = cudf.core.column_accessor.ColumnAccessor(
multiindex=True, level_names=(None,) + columns._data.names
)
def as_tuple(x):
return x if isinstance(x, tuple) else (x,)
for v in df:
names = [as_tuple(v) + as_tuple(name) for name in column_labels]
col = df._data[v]
result.update(
cudf.DataFrame._from_table(
col.scatter_to_table(
index_idx,
columns_idx,
names,
nrows=len(index_labels),
ncols=len(names),
)
)._data
)
out = cudf.DataFrame._from_data(
result, index=cudf.Index(index_labels, name=index.name)
)
return out
def pivot(data, index=None, columns=None, values=None):
df = data
if values is None:
values = df._columns_view(
col for col in df._column_names if col not in (index, columns)
)
else:
values = df._columns_view(values)
if index is None:
index = df.index
else:
index = cudf.core.index.Index(df.loc[:, index])
columns = cudf.Index(df.loc[:, columns])
# Create a DataFrame composed of columns from both
# columns and index
columns_index = {}
columns_index = {
i: col
for i, col in enumerate(
itertools.chain(index._data.columns, columns._data.columns)
)
}
columns_index = cudf.DataFrame(columns_index)
# Check that each row is unique:
if len(columns_index) != len(columns_index.drop_duplicates()):
raise ValueError("Duplicate index-column pairs found. Cannot reshape.")
return _pivot(values, index, columns)
def unstack(df, level, fill_value=None):
if not isinstance(df, cudf.DataFrame):
raise ValueError("`df` should be a cudf Dataframe object.")
if df.empty:
raise ValueError("Cannot unstack an empty dataframe.")
if fill_value is not None:
raise NotImplementedError("fill_value is not supported.")
if pd.api.types.is_list_like(level):
if not level:
return df
df = df.copy(deep=False)
if not isinstance(df.index, cudf.MultiIndex):
dtype = df._columns[0].dtype
for col in df._columns:
if not col.dtype == dtype:
raise ValueError(
"Calling unstack() on single index dataframe"
" with different column datatype is not supported."
)
res = df.T.stack(dropna=False)
# Result's index is a multiindex
res.index.names = tuple(df.columns.names) + df.index.names
return res
else:
columns = df.index._poplevels(level)
index = df.index
result = _pivot(df, index, columns)
if result.index.nlevels == 1:
result.index = result.index.get_level_values(result.index.names[0])
return result
def _get_unique(column, dummy_na):
if isinstance(column, cudf.core.column.CategoricalColumn):
unique = column.categories
else:
unique = column.unique()
if not dummy_na:
if np.issubdtype(unique.dtype, np.floating):
unique = unique.nans_to_nulls()
unique = unique.dropna()
return unique
def _length_check_params(obj, columns, name):
if cudf.utils.dtypes.is_list_like(obj):
if len(obj) != len(columns):
raise ValueError(
f"Length of '{name}' ({len(obj)}) did not match the "
f"length of the columns being "
f"encoded ({len(columns)})."
)
| true | true |
1c339dfc6796a5e6a419380193c48467dd1213ae | 527 | py | Python | mlperf/clustering/clustering_dbscan.py | xinyin1990/ml-perf | a5367b41dffe188b3e86fa3e2fcf975bfcd1afb2 | [
"MIT"
] | null | null | null | mlperf/clustering/clustering_dbscan.py | xinyin1990/ml-perf | a5367b41dffe188b3e86fa3e2fcf975bfcd1afb2 | [
"MIT"
] | null | null | null | mlperf/clustering/clustering_dbscan.py | xinyin1990/ml-perf | a5367b41dffe188b3e86fa3e2fcf975bfcd1afb2 | [
"MIT"
] | null | null | null | # This file just defines some values needed for generating tables
# cf. Xin for exact implementation
# Author: Vincenzo Musco (http://www.vmusco.com)
import mlperf.clustering.dbscan.run_base as run
from mlperf.clustering.main_clustering import ClusterPipeline
from mlperf.tools.static import DBSCAN_ALGO, INCLUDED_ALGO
RUN_INFO_BASE = DBSCAN_ALGO
AVAIL_ALGOS = INCLUDED_ALGO[RUN_INFO_BASE]
class DBSCAN(ClusterPipeline):
def __init__(self):
super().__init__(RUN_INFO_BASE, AVAIL_ALGOS, run)
DBSCAN().runPipe()
| 29.277778 | 65 | 0.795066 |
import mlperf.clustering.dbscan.run_base as run
from mlperf.clustering.main_clustering import ClusterPipeline
from mlperf.tools.static import DBSCAN_ALGO, INCLUDED_ALGO
RUN_INFO_BASE = DBSCAN_ALGO
AVAIL_ALGOS = INCLUDED_ALGO[RUN_INFO_BASE]
class DBSCAN(ClusterPipeline):
def __init__(self):
super().__init__(RUN_INFO_BASE, AVAIL_ALGOS, run)
DBSCAN().runPipe()
| true | true |
1c339e6fcb4a63d8ddb674722578078f4d4353c7 | 2,689 | py | Python | DAAQS/utils/preprocess.py | esowc/DAAQS | 141b4d97edb319ab67d9f42a1aa54a4555829de2 | [
"MIT"
] | 2 | 2020-07-29T13:23:42.000Z | 2020-10-24T08:48:13.000Z | DAAQS/utils/preprocess.py | esowc/DAAQS | 141b4d97edb319ab67d9f42a1aa54a4555829de2 | [
"MIT"
] | null | null | null | DAAQS/utils/preprocess.py | esowc/DAAQS | 141b4d97edb319ab67d9f42a1aa54a4555829de2 | [
"MIT"
] | 1 | 2022-03-10T16:12:09.000Z | 2022-03-10T16:12:09.000Z | import numpy as np
from DAAQS.utils.misc import index_to_center
def temporal_average(c_data, o_data, index_lat, index_lon):
## Ideally CAMS data is time_step x 3 x 3
## And openaq_data is list of all stations in that 3x3 grid
## CAMS Data
c_grid = c_data[:,index_lat-1:index_lat+2,index_lon-1:index_lon+2]
cams_list = [[] for k in range(8)]
for time in range(c_grid.shape[0]):
index_time = time%8
cams_list[index_time].append(np.ravel(c_grid[time,:,:]))
cams_stack = np.stack(cams_list)
cams_avg = np.mean(cams_stack, axis = 1)
c_dict = dict()
lat_0, lon_0 = index_to_center(index_lat-1,index_lon-1)
lat_1, lon_1 = index_to_center(index_lat,index_lon)
lat_2, lon_2 = index_to_center(index_lat+1,index_lon+1)
coordinate_list = [(lat_0, lon_0), (lat_0, lon_1), (lat_0, lon_2),
(lat_1, lon_0), (lat_1, lon_1), (lat_1, lon_2),
(lat_2, lon_0), (lat_2, lon_1), (lat_2, lon_2),]
lat_lon_list = [(index_lat-1, index_lon-1),(index_lat-1, index_lon), (index_lat-1, index_lon+1),
(index_lat, index_lon-1), (index_lat, index_lon), (index_lat, index_lon+1),
(index_lat+1, index_lon-1),(index_lat+1, index_lon), (index_lat+1, index_lon+1)]
for grid in range(cams_avg.shape[1]):
if "grid_"+str(grid) in c_dict:
pass
else:
c_dict["grid_"+str(grid)] = list(cams_avg[:,grid])
c_dict["grid_"+str(grid)].append({"coordinates":coordinate_list[grid]})
c_dict["grid_"+str(grid)].append({"lat_lon_index":lat_lon_list[grid]})
c_dict["grid_"+str(grid)].append({"center_index":(index_lat, index_lon)})
# cams_avg is 8x9 values which is at each 9 location we have 1x8 different values
## OPENAQ Data
o_dict = dict()
for lat in range(index_lat-1,index_lat+2):
for lon in range(index_lon-1, index_lon+2):
for time in range(len(o_data)):
for obs in o_data[time][lat][lon]:
time_index = time%8
if obs.location in o_dict:
o_dict[obs.location][time_index].append(obs.value)
else:
o_dict[obs.location] = [[],[],[],[],[],[],[],[], {"coordinates":(obs.lat, obs.lon)}, {"lat_lon_index":(lat, lon)}, {"center_index":(index_lat, index_lon)}]
for each in o_dict:
for i in range(8):
try:
vals = o_dict[each][i]
o_dict[each][i] = sum(vals)/len(vals)
except:
o_dict[each][i] = -1
return c_dict, o_dict | 39.544118 | 179 | 0.579769 | import numpy as np
from DAAQS.utils.misc import index_to_center
def temporal_average(c_data, o_data, index_lat, index_lon):
in range(8)]
for time in range(c_grid.shape[0]):
index_time = time%8
cams_list[index_time].append(np.ravel(c_grid[time,:,:]))
cams_stack = np.stack(cams_list)
cams_avg = np.mean(cams_stack, axis = 1)
c_dict = dict()
lat_0, lon_0 = index_to_center(index_lat-1,index_lon-1)
lat_1, lon_1 = index_to_center(index_lat,index_lon)
lat_2, lon_2 = index_to_center(index_lat+1,index_lon+1)
coordinate_list = [(lat_0, lon_0), (lat_0, lon_1), (lat_0, lon_2),
(lat_1, lon_0), (lat_1, lon_1), (lat_1, lon_2),
(lat_2, lon_0), (lat_2, lon_1), (lat_2, lon_2),]
lat_lon_list = [(index_lat-1, index_lon-1),(index_lat-1, index_lon), (index_lat-1, index_lon+1),
(index_lat, index_lon-1), (index_lat, index_lon), (index_lat, index_lon+1),
(index_lat+1, index_lon-1),(index_lat+1, index_lon), (index_lat+1, index_lon+1)]
for grid in range(cams_avg.shape[1]):
if "grid_"+str(grid) in c_dict:
pass
else:
c_dict["grid_"+str(grid)] = list(cams_avg[:,grid])
c_dict["grid_"+str(grid)].append({"coordinates":coordinate_list[grid]})
c_dict["grid_"+str(grid)].append({"lat_lon_index":lat_lon_list[grid]})
c_dict["grid_"+str(grid)].append({"center_index":(index_lat, index_lon)})
ict = dict()
for lat in range(index_lat-1,index_lat+2):
for lon in range(index_lon-1, index_lon+2):
for time in range(len(o_data)):
for obs in o_data[time][lat][lon]:
time_index = time%8
if obs.location in o_dict:
o_dict[obs.location][time_index].append(obs.value)
else:
o_dict[obs.location] = [[],[],[],[],[],[],[],[], {"coordinates":(obs.lat, obs.lon)}, {"lat_lon_index":(lat, lon)}, {"center_index":(index_lat, index_lon)}]
for each in o_dict:
for i in range(8):
try:
vals = o_dict[each][i]
o_dict[each][i] = sum(vals)/len(vals)
except:
o_dict[each][i] = -1
return c_dict, o_dict | true | true |
1c339e8dd4fb94aed0b5e311ce5c943fc6ed1452 | 529 | py | Python | bookmarks/services/tags.py | mindovermiles262/linkding | 258c47ee7e7834f466e88ce379d5c2b11d461887 | [
"MIT"
] | 1 | 2019-12-26T18:50:21.000Z | 2019-12-26T18:50:21.000Z | bookmarks/services/tags.py | mindovermiles262/linkding | 258c47ee7e7834f466e88ce379d5c2b11d461887 | [
"MIT"
] | null | null | null | bookmarks/services/tags.py | mindovermiles262/linkding | 258c47ee7e7834f466e88ce379d5c2b11d461887 | [
"MIT"
] | null | null | null | from typing import List
from django.contrib.auth.models import User
from django.utils import timezone
from bookmarks.models import Tag
def get_or_create_tags(tag_names: List[str], user: User):
return [get_or_create_tag(tag_name, user) for tag_name in tag_names]
def get_or_create_tag(name: str, user: User):
try:
return Tag.objects.get(name=name, owner=user)
except Tag.DoesNotExist:
tag = Tag(name=name, owner=user)
tag.date_added = timezone.now()
tag.save()
return tag
| 25.190476 | 72 | 0.706994 | from typing import List
from django.contrib.auth.models import User
from django.utils import timezone
from bookmarks.models import Tag
def get_or_create_tags(tag_names: List[str], user: User):
return [get_or_create_tag(tag_name, user) for tag_name in tag_names]
def get_or_create_tag(name: str, user: User):
try:
return Tag.objects.get(name=name, owner=user)
except Tag.DoesNotExist:
tag = Tag(name=name, owner=user)
tag.date_added = timezone.now()
tag.save()
return tag
| true | true |
1c339ec89326bd780667ec2cc34b9f8dae8bd876 | 7,177 | py | Python | research/object_detection/predictors/heads/keras_class_head_test.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 153 | 2020-10-25T13:58:04.000Z | 2022-03-07T06:01:54.000Z | research/object_detection/predictors/heads/keras_class_head_test.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 12 | 2020-03-24T17:53:50.000Z | 2022-03-12T00:05:19.000Z | research/object_detection/predictors/heads/keras_class_head_test.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 23 | 2020-10-25T14:44:47.000Z | 2021-03-31T02:12:13.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.predictors.heads.class_head."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.predictors.heads import keras_class_head
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
class ConvolutionalKerasClassPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_prediction_size_depthwise_false(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.ConvolutionalClassHead(
is_training=True,
num_class_slots=20,
use_dropout=True,
dropout_keep_prob=0.5,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=False)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature,)
self.assertAllEqual([64, 323, 20],
class_predictions.get_shape().as_list())
def test_prediction_size_depthwise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.ConvolutionalClassHead(
is_training=True,
num_class_slots=20,
use_dropout=True,
dropout_keep_prob=0.5,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=True)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature,)
self.assertAllEqual([64, 323, 20],
class_predictions.get_shape().as_list())
class MaskRCNNClassHeadTest(test_case.TestCase):
def _build_fc_hyperparams(self,
op_type=hyperparams_pb2.Hyperparams.FC):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.KerasLayerHyperparams(hyperparams)
def test_prediction_size(self):
class_prediction_head = keras_class_head.MaskRCNNClassHead(
is_training=False,
num_class_slots=20,
fc_hyperparams=self._build_fc_hyperparams(),
freeze_batchnorm=False,
use_dropout=True,
dropout_keep_prob=0.5)
roi_pooled_features = tf.random_uniform(
[64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
prediction = class_prediction_head(roi_pooled_features)
self.assertAllEqual([64, 1, 20], prediction.get_shape().as_list())
class WeightSharedConvolutionalKerasClassPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_prediction_size_depthwise_false(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=False)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature)
self.assertAllEqual([64, 323, 20], class_predictions.get_shape().as_list())
def test_prediction_size_depthwise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=True)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature)
self.assertAllEqual([64, 323, 20], class_predictions.get_shape().as_list())
def test_variable_count_depth_wise_true(self):
g = tf.Graph()
with g.as_default():
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = (
keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=True))
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
_ = class_prediction_head(image_feature)
variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(variables), 3)
def test_variable_count_depth_wise_False(self):
g = tf.Graph()
with g.as_default():
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = (
keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=False))
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
_ = class_prediction_head(image_feature)
variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(variables), 2)
if __name__ == '__main__':
tf.test.main()
| 37.380208 | 80 | 0.704751 |
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.predictors.heads import keras_class_head
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
class ConvolutionalKerasClassPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_prediction_size_depthwise_false(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.ConvolutionalClassHead(
is_training=True,
num_class_slots=20,
use_dropout=True,
dropout_keep_prob=0.5,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=False)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature,)
self.assertAllEqual([64, 323, 20],
class_predictions.get_shape().as_list())
def test_prediction_size_depthwise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.ConvolutionalClassHead(
is_training=True,
num_class_slots=20,
use_dropout=True,
dropout_keep_prob=0.5,
kernel_size=3,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=False,
num_predictions_per_location=1,
use_depthwise=True)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature,)
self.assertAllEqual([64, 323, 20],
class_predictions.get_shape().as_list())
class MaskRCNNClassHeadTest(test_case.TestCase):
def _build_fc_hyperparams(self,
op_type=hyperparams_pb2.Hyperparams.FC):
hyperparams = hyperparams_pb2.Hyperparams()
hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(hyperparams_text_proto, hyperparams)
hyperparams.op = op_type
return hyperparams_builder.KerasLayerHyperparams(hyperparams)
def test_prediction_size(self):
class_prediction_head = keras_class_head.MaskRCNNClassHead(
is_training=False,
num_class_slots=20,
fc_hyperparams=self._build_fc_hyperparams(),
freeze_batchnorm=False,
use_dropout=True,
dropout_keep_prob=0.5)
roi_pooled_features = tf.random_uniform(
[64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
prediction = class_prediction_head(roi_pooled_features)
self.assertAllEqual([64, 1, 20], prediction.get_shape().as_list())
class WeightSharedConvolutionalKerasClassPredictorTest(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: NONE
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def test_prediction_size_depthwise_false(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=False)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature)
self.assertAllEqual([64, 323, 20], class_predictions.get_shape().as_list())
def test_prediction_size_depthwise_true(self):
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=True)
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
class_predictions = class_prediction_head(image_feature)
self.assertAllEqual([64, 323, 20], class_predictions.get_shape().as_list())
def test_variable_count_depth_wise_true(self):
g = tf.Graph()
with g.as_default():
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = (
keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=True))
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
_ = class_prediction_head(image_feature)
variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(variables), 3)
def test_variable_count_depth_wise_False(self):
g = tf.Graph()
with g.as_default():
conv_hyperparams = self._build_conv_hyperparams()
class_prediction_head = (
keras_class_head.WeightSharedConvolutionalClassHead(
num_class_slots=20,
conv_hyperparams=conv_hyperparams,
num_predictions_per_location=1,
use_depthwise=False))
image_feature = tf.random_uniform(
[64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)
_ = class_prediction_head(image_feature)
variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(variables), 2)
if __name__ == '__main__':
tf.test.main()
| true | true |
1c339f26fe988d3d0d3c50108569327ce6e8ea57 | 342 | py | Python | employees/serializers.py | rrobles9112/django-rest-framework-crud | 88f8e881fd520493beb480cf15e5079db5e26f25 | [
"MIT"
] | null | null | null | employees/serializers.py | rrobles9112/django-rest-framework-crud | 88f8e881fd520493beb480cf15e5079db5e26f25 | [
"MIT"
] | null | null | null | employees/serializers.py | rrobles9112/django-rest-framework-crud | 88f8e881fd520493beb480cf15e5079db5e26f25 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import Employees
from django.contrib.auth.models import User
class EmployeeSerializer(serializers.ModelSerializer): # create class to serializer model
class Meta:
model = Employees
fields = ('employee_code', 'salary_per_hour', 'start_data', 'departament')
| 26.307692 | 90 | 0.736842 | from rest_framework import serializers
from .models import Employees
from django.contrib.auth.models import User
class EmployeeSerializer(serializers.ModelSerializer):
class Meta:
model = Employees
fields = ('employee_code', 'salary_per_hour', 'start_data', 'departament')
| true | true |
1c339f77114e97021522ac4ae64937cd92a4e82f | 29,153 | py | Python | salt/utils/master.py | amaclean199/salt | 8aaac011b4616e3c9e74a1daafb4a2146a5a430f | [
"Apache-2.0"
] | null | null | null | salt/utils/master.py | amaclean199/salt | 8aaac011b4616e3c9e74a1daafb4a2146a5a430f | [
"Apache-2.0"
] | null | null | null | salt/utils/master.py | amaclean199/salt | 8aaac011b4616e3c9e74a1daafb4a2146a5a430f | [
"Apache-2.0"
] | 1 | 2019-06-10T17:42:31.000Z | 2019-06-10T17:42:31.000Z | # -*- coding: utf-8 -*-
'''
salt.utils.master
-----------------
Utilities that can only be used on a salt master.
'''
# Import python libs
from __future__ import absolute_import, unicode_literals
import os
import logging
import signal
from threading import Thread, Event
# Import salt libs
import salt.log
import salt.cache
import salt.client
import salt.pillar
import salt.utils.atomicfile
import salt.utils.files
import salt.utils.minions
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.verify
import salt.utils.versions
import salt.payload
from salt.exceptions import SaltException
import salt.config
from salt.utils.cache import CacheCli as cache_cli
from salt.utils.process import MultiprocessingProcess
# Import third party libs
from salt.ext import six
try:
import zmq
HAS_ZMQ = True
except ImportError:
HAS_ZMQ = False
log = logging.getLogger(__name__)
class MasterPillarUtil(object):
'''
Helper utility for easy access to targeted minion grain and
pillar data, either from cached data on the master or retrieved
on demand, or (by default) both.
The minion pillar data returned in get_minion_pillar() is
compiled directly from salt.pillar.Pillar on the master to
avoid any possible 'pillar poisoning' from a compromised or
untrusted minion.
** However, the minion grains are still possibly entirely
supplied by the minion. **
Example use case:
For runner modules that need access minion pillar data,
MasterPillarUtil.get_minion_pillar should be used instead
of getting the pillar data by executing the "pillar" module
on the minions:
# my_runner.py
tgt = 'web*'
pillar_util = salt.utils.master.MasterPillarUtil(tgt, tgt_type='glob', opts=__opts__)
pillar_data = pillar_util.get_minion_pillar()
'''
def __init__(self,
tgt='',
tgt_type='glob',
saltenv=None,
use_cached_grains=True,
use_cached_pillar=True,
grains_fallback=True,
pillar_fallback=True,
opts=None,
expr_form=None):
# remember to remove the expr_form argument from this function when
# performing the cleanup on this deprecation.
if expr_form is not None:
salt.utils.versions.warn_until(
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
log.debug('New instance of %s created.',
self.__class__.__name__)
if opts is None:
log.error('%s: Missing master opts init arg.',
self.__class__.__name__)
raise SaltException('{0}: Missing master opts init arg.'.format(
self.__class__.__name__))
else:
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
self.tgt = tgt
self.tgt_type = tgt_type
self.saltenv = saltenv
self.use_cached_grains = use_cached_grains
self.use_cached_pillar = use_cached_pillar
self.grains_fallback = grains_fallback
self.pillar_fallback = pillar_fallback
self.cache = salt.cache.factory(opts)
log.debug(
'Init settings: tgt: \'%s\', tgt_type: \'%s\', saltenv: \'%s\', '
'use_cached_grains: %s, use_cached_pillar: %s, '
'grains_fallback: %s, pillar_fallback: %s',
tgt, tgt_type, saltenv, use_cached_grains, use_cached_pillar,
grains_fallback, pillar_fallback
)
def _get_cached_mine_data(self, *minion_ids):
# Return one dict with the cached mine data of the targeted minions
mine_data = dict([(minion_id, {}) for minion_id in minion_ids])
if (not self.opts.get('minion_data_cache', False)
and not self.opts.get('enforce_mine_cache', False)):
log.debug('Skipping cached mine data minion_data_cache'
'and enfore_mine_cache are both disabled.')
return mine_data
if not minion_ids:
minion_ids = self.cache.list('minions')
for minion_id in minion_ids:
if not salt.utils.verify.valid_id(self.opts, minion_id):
continue
mdata = self.cache.fetch('minions/{0}'.format(minion_id), 'mine')
if isinstance(mdata, dict):
mine_data[minion_id] = mdata
return mine_data
def _get_cached_minion_data(self, *minion_ids):
# Return two separate dicts of cached grains and pillar data of the
# minions
grains = dict([(minion_id, {}) for minion_id in minion_ids])
pillars = grains.copy()
if not self.opts.get('minion_data_cache', False):
log.debug('Skipping cached data because minion_data_cache is not '
'enabled.')
return grains, pillars
if not minion_ids:
minion_ids = self.cache.list('minions')
for minion_id in minion_ids:
if not salt.utils.verify.valid_id(self.opts, minion_id):
continue
mdata = self.cache.fetch('minions/{0}'.format(minion_id), 'data')
if not isinstance(mdata, dict):
log.warning(
'cache.fetch should always return a dict. ReturnedType: %s, MinionId: %s',
type(mdata).__name__,
minion_id
)
continue
if 'grains' in mdata:
grains[minion_id] = mdata['grains']
if 'pillar' in mdata:
pillars[minion_id] = mdata['pillar']
return grains, pillars
def _get_live_minion_grains(self, minion_ids):
# Returns a dict of grains fetched directly from the minions
log.debug('Getting live grains for minions: "%s"', minion_ids)
client = salt.client.get_local_client(self.opts['conf_file'])
ret = client.cmd(
','.join(minion_ids),
'grains.items',
timeout=self.opts['timeout'],
tgt_type='list')
return ret
def _get_live_minion_pillar(self, minion_id=None, minion_grains=None):
# Returns a dict of pillar data for one minion
if minion_id is None:
return {}
if not minion_grains:
log.warning(
'Cannot get pillar data for %s: no grains supplied.',
minion_id
)
return {}
log.debug('Getting live pillar for %s', minion_id)
pillar = salt.pillar.Pillar(
self.opts,
minion_grains,
minion_id,
self.saltenv,
self.opts['ext_pillar'])
log.debug('Compiling pillar for %s', minion_id)
ret = pillar.compile_pillar()
return ret
def _get_minion_grains(self, *minion_ids, **kwargs):
# Get the minion grains either from cache or from a direct query
# on the minion. By default try to use cached grains first, then
# fall back to querying the minion directly.
ret = {}
cached_grains = kwargs.get('cached_grains', {})
cret = {}
lret = {}
if self.use_cached_grains:
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_grains) if mcache])
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in cret]
log.debug('Missed cached minion grains for: %s', missed_minions)
if self.grains_fallback:
lret = self._get_live_minion_grains(missed_minions)
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items()))
else:
lret = self._get_live_minion_grains(minion_ids)
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in lret]
log.debug('Missed live minion grains for: %s', missed_minions)
if self.grains_fallback:
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_grains) if mcache])
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items()))
return ret
def _get_minion_pillar(self, *minion_ids, **kwargs):
# Get the minion pillar either from cache or from a direct query
# on the minion. By default try use the cached pillar first, then
# fall back to rendering pillar on demand with the supplied grains.
ret = {}
grains = kwargs.get('grains', {})
cached_pillar = kwargs.get('cached_pillar', {})
cret = {}
lret = {}
if self.use_cached_pillar:
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_pillar) if mcache])
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in cret]
log.debug('Missed cached minion pillars for: %s', missed_minions)
if self.pillar_fallback:
lret = dict([(minion_id, self._get_live_minion_pillar(minion_id, grains.get(minion_id, {}))) for minion_id in missed_minions])
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items()))
else:
lret = dict([(minion_id, self._get_live_minion_pillar(minion_id, grains.get(minion_id, {}))) for minion_id in minion_ids])
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in lret]
log.debug('Missed live minion pillars for: %s', missed_minions)
if self.pillar_fallback:
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_pillar) if mcache])
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items()))
return ret
def _tgt_to_list(self):
# Return a list of minion ids that match the target and tgt_type
minion_ids = []
ckminions = salt.utils.minions.CkMinions(self.opts)
_res = ckminions.check_minions(self.tgt, self.tgt_type)
minion_ids = _res['minions']
if len(minion_ids) == 0:
log.debug('No minions matched for tgt="%s" and tgt_type="%s"', self.tgt, self.tgt_type)
return {}
log.debug('Matching minions for tgt="%s" and tgt_type="%s": %s', self.tgt, self.tgt_type, minion_ids)
return minion_ids
def get_minion_pillar(self):
'''
Get pillar data for the targeted minions, either by fetching the
cached minion data on the master, or by compiling the minion's
pillar data on the master.
For runner modules that need access minion pillar data, this
function should be used instead of getting the pillar data by
executing the pillar module on the minions.
By default, this function tries hard to get the pillar data:
- Try to get the cached minion grains and pillar if the
master has minion_data_cache: True
- If the pillar data for the minion is cached, use it.
- If there is no cached grains/pillar data for a minion,
then try to get the minion grains directly from the minion.
- Use the minion grains to compile the pillar directly from the
master using salt.pillar.Pillar
'''
minion_pillars = {}
minion_grains = {}
minion_ids = self._tgt_to_list()
if any(arg for arg in [self.use_cached_grains, self.use_cached_pillar, self.grains_fallback, self.pillar_fallback]):
log.debug('Getting cached minion data')
cached_minion_grains, cached_minion_pillars = self._get_cached_minion_data(*minion_ids)
else:
cached_minion_grains = {}
cached_minion_pillars = {}
log.debug('Getting minion grain data for: %s', minion_ids)
minion_grains = self._get_minion_grains(
*minion_ids,
cached_grains=cached_minion_grains)
log.debug('Getting minion pillar data for: %s', minion_ids)
minion_pillars = self._get_minion_pillar(
*minion_ids,
grains=minion_grains,
cached_pillar=cached_minion_pillars)
return minion_pillars
def get_minion_grains(self):
'''
Get grains data for the targeted minions, either by fetching the
cached minion data on the master, or by fetching the grains
directly on the minion.
By default, this function tries hard to get the grains data:
- Try to get the cached minion grains if the master
has minion_data_cache: True
- If the grains data for the minion is cached, use it.
- If there is no cached grains data for a minion,
then try to get the minion grains directly from the minion.
'''
minion_grains = {}
minion_ids = self._tgt_to_list()
if not minion_ids:
return {}
if any(arg for arg in [self.use_cached_grains, self.grains_fallback]):
log.debug('Getting cached minion data.')
cached_minion_grains, cached_minion_pillars = self._get_cached_minion_data(*minion_ids)
else:
cached_minion_grains = {}
log.debug('Getting minion grain data for: %s', minion_ids)
minion_grains = self._get_minion_grains(
*minion_ids,
cached_grains=cached_minion_grains)
return minion_grains
def get_cached_mine_data(self):
'''
Get cached mine data for the targeted minions.
'''
mine_data = {}
minion_ids = self._tgt_to_list()
log.debug('Getting cached mine data for: %s', minion_ids)
mine_data = self._get_cached_mine_data(*minion_ids)
return mine_data
def clear_cached_minion_data(self,
clear_pillar=False,
clear_grains=False,
clear_mine=False,
clear_mine_func=None):
'''
Clear the cached data/files for the targeted minions.
'''
clear_what = []
if clear_pillar:
clear_what.append('pillar')
if clear_grains:
clear_what.append('grains')
if clear_mine:
clear_what.append('mine')
if clear_mine_func is not None:
clear_what.append('mine_func: \'{0}\''.format(clear_mine_func))
if not len(clear_what):
log.debug('No cached data types specified for clearing.')
return False
minion_ids = self._tgt_to_list()
log.debug('Clearing cached %s data for: %s',
', '.join(clear_what),
minion_ids)
if clear_pillar == clear_grains:
# clear_pillar and clear_grains are both True or both False.
# This means we don't deal with pillar/grains caches at all.
grains = {}
pillars = {}
else:
# Unless both clear_pillar and clear_grains are True, we need
# to read in the pillar/grains data since they are both stored
# in the same file, 'data.p'
grains, pillars = self._get_cached_minion_data(*minion_ids)
try:
c_minions = self.cache.list('minions')
for minion_id in minion_ids:
if not salt.utils.verify.valid_id(self.opts, minion_id):
continue
if minion_id not in c_minions:
# Cache bank for this minion does not exist. Nothing to do.
continue
bank = 'minions/{0}'.format(minion_id)
minion_pillar = pillars.pop(minion_id, False)
minion_grains = grains.pop(minion_id, False)
if ((clear_pillar and clear_grains) or
(clear_pillar and not minion_grains) or
(clear_grains and not minion_pillar)):
# Not saving pillar or grains, so just delete the cache file
self.cache.flush(bank, 'data')
elif clear_pillar and minion_grains:
self.cache.store(bank, 'data', {'grains': minion_grains})
elif clear_grains and minion_pillar:
self.cache.store(bank, 'data', {'pillar': minion_pillar})
if clear_mine:
# Delete the whole mine file
self.cache.flush(bank, 'mine')
elif clear_mine_func is not None:
# Delete a specific function from the mine file
mine_data = self.cache.fetch(bank, 'mine')
if isinstance(mine_data, dict):
if mine_data.pop(clear_mine_func, False):
self.cache.store(bank, 'mine', mine_data)
except (OSError, IOError):
return True
return True
class CacheTimer(Thread):
'''
A basic timer class the fires timer-events every second.
This is used for cleanup by the ConnectedCache()
'''
def __init__(self, opts, event):
Thread.__init__(self)
self.opts = opts
self.stopped = event
self.daemon = True
self.serial = salt.payload.Serial(opts.get('serial', ''))
self.timer_sock = os.path.join(self.opts['sock_dir'], 'con_timer.ipc')
def run(self):
'''
main loop that fires the event every second
'''
context = zmq.Context()
# the socket for outgoing timer events
socket = context.socket(zmq.PUB)
socket.setsockopt(zmq.LINGER, 100)
socket.bind('ipc://' + self.timer_sock)
count = 0
log.debug('ConCache-Timer started')
while not self.stopped.wait(1):
socket.send(self.serial.dumps(count))
count += 1
if count >= 60:
count = 0
class CacheWorker(MultiprocessingProcess):
'''
Worker for ConnectedCache which runs in its
own process to prevent blocking of ConnectedCache
main-loop when refreshing minion-list
'''
def __init__(self, opts, log_queue=None):
'''
Sets up the zmq-connection to the ConCache
'''
super(CacheWorker, self).__init__(log_queue=log_queue)
self.opts = opts
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], log_queue=state['log_queue'])
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue}
def run(self):
'''
Gather currently connected minions and update the cache
'''
new_mins = list(salt.utils.minions.CkMinions(self.opts).connected_ids())
cc = cache_cli(self.opts)
cc.get_cached()
cc.put_cache([new_mins])
log.debug('ConCache CacheWorker update finished')
class ConnectedCache(MultiprocessingProcess):
'''
Provides access to all minions ids that the master has
successfully authenticated. The cache is cleaned up regularly by
comparing it to the IPs that have open connections to
the master publisher port.
'''
def __init__(self, opts, log_queue=None):
'''
starts the timer and inits the cache itself
'''
super(ConnectedCache, self).__init__(log_queue=log_queue)
log.debug('ConCache initializing...')
# the possible settings for the cache
self.opts = opts
# the actual cached minion ids
self.minions = []
self.cache_sock = os.path.join(self.opts['sock_dir'], 'con_cache.ipc')
self.update_sock = os.path.join(self.opts['sock_dir'], 'con_upd.ipc')
self.upd_t_sock = os.path.join(self.opts['sock_dir'], 'con_timer.ipc')
self.cleanup()
# the timer provides 1-second intervals to the loop in run()
# to make the cache system most responsive, we do not use a loop-
# delay which makes it hard to get 1-second intervals without a timer
self.timer_stop = Event()
self.timer = CacheTimer(self.opts, self.timer_stop)
self.timer.start()
self.running = True
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], log_queue=state['log_queue'])
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue}
def signal_handler(self, sig, frame):
'''
handle signals and shutdown
'''
self.stop()
def cleanup(self):
'''
remove sockets on shutdown
'''
log.debug('ConCache cleaning up')
if os.path.exists(self.cache_sock):
os.remove(self.cache_sock)
if os.path.exists(self.update_sock):
os.remove(self.update_sock)
if os.path.exists(self.upd_t_sock):
os.remove(self.upd_t_sock)
def secure(self):
'''
secure the sockets for root-only access
'''
log.debug('ConCache securing sockets')
if os.path.exists(self.cache_sock):
os.chmod(self.cache_sock, 0o600)
if os.path.exists(self.update_sock):
os.chmod(self.update_sock, 0o600)
if os.path.exists(self.upd_t_sock):
os.chmod(self.upd_t_sock, 0o600)
def stop(self):
'''
shutdown cache process
'''
# avoid getting called twice
self.cleanup()
if self.running:
self.running = False
self.timer_stop.set()
self.timer.join()
def run(self):
'''
Main loop of the ConCache, starts updates in intervals and
answers requests from the MWorkers
'''
context = zmq.Context()
# the socket for incoming cache requests
creq_in = context.socket(zmq.REP)
creq_in.setsockopt(zmq.LINGER, 100)
creq_in.bind('ipc://' + self.cache_sock)
# the socket for incoming cache-updates from workers
cupd_in = context.socket(zmq.SUB)
cupd_in.setsockopt(zmq.SUBSCRIBE, '')
cupd_in.setsockopt(zmq.LINGER, 100)
cupd_in.bind('ipc://' + self.update_sock)
# the socket for the timer-event
timer_in = context.socket(zmq.SUB)
timer_in.setsockopt(zmq.SUBSCRIBE, '')
timer_in.setsockopt(zmq.LINGER, 100)
timer_in.connect('ipc://' + self.upd_t_sock)
poller = zmq.Poller()
poller.register(creq_in, zmq.POLLIN)
poller.register(cupd_in, zmq.POLLIN)
poller.register(timer_in, zmq.POLLIN)
# our serializer
serial = salt.payload.Serial(self.opts.get('serial', ''))
# register a signal handler
signal.signal(signal.SIGINT, self.signal_handler)
# secure the sockets from the world
self.secure()
log.info('ConCache started')
while self.running:
# we check for new events with the poller
try:
socks = dict(poller.poll(1))
except KeyboardInterrupt:
self.stop()
except zmq.ZMQError as zmq_err:
log.error('ConCache ZeroMQ-Error occurred')
log.exception(zmq_err)
self.stop()
# check for next cache-request
if socks.get(creq_in) == zmq.POLLIN:
msg = serial.loads(creq_in.recv())
log.debug('ConCache Received request: %s', msg)
# requests to the minion list are send as str's
if isinstance(msg, six.string_types):
if msg == 'minions':
# Send reply back to client
reply = serial.dumps(self.minions)
creq_in.send(reply)
# check for next cache-update from workers
if socks.get(cupd_in) == zmq.POLLIN:
new_c_data = serial.loads(cupd_in.recv())
# tell the worker to exit
#cupd_in.send(serial.dumps('ACK'))
# check if the returned data is usable
if not isinstance(new_c_data, list):
log.error('ConCache Worker returned unusable result')
del new_c_data
continue
# the cache will receive lists of minions
# 1. if the list only has 1 item, its from an MWorker, we append it
# 2. if the list contains another list, its from a CacheWorker and
# the currently cached minions are replaced with that list
# 3. anything else is considered malformed
try:
if len(new_c_data) == 0:
log.debug('ConCache Got empty update from worker')
continue
data = new_c_data[0]
if isinstance(data, six.string_types):
if data not in self.minions:
log.debug('ConCache Adding minion %s to cache',
new_c_data[0])
self.minions.append(data)
elif isinstance(data, list):
log.debug('ConCache Replacing minion list from worker')
self.minions = data
except IndexError:
log.debug('ConCache Got malformed result dict from worker')
del new_c_data
log.info('ConCache %s entries in cache', len(self.minions))
# check for next timer-event to start new jobs
if socks.get(timer_in) == zmq.POLLIN:
sec_event = serial.loads(timer_in.recv())
# update the list every 30 seconds
if int(sec_event % 30) == 0:
cw = CacheWorker(self.opts)
cw.start()
self.stop()
creq_in.close()
cupd_in.close()
timer_in.close()
context.term()
log.debug('ConCache Shutting down')
def ping_all_connected_minions(opts):
client = salt.client.LocalClient()
if opts['minion_data_cache']:
tgt = list(salt.utils.minions.CkMinions(opts).connected_ids())
form = 'list'
else:
tgt = '*'
form = 'glob'
client.cmd(tgt, 'test.ping', tgt_type=form)
def get_master_key(key_user, opts, skip_perm_errors=False):
if key_user == 'root':
if opts.get('user', 'root') != 'root':
key_user = opts.get('user', 'root')
if key_user.startswith('sudo_'):
key_user = opts.get('user', 'root')
if salt.utils.platform.is_windows():
# The username may contain '\' if it is in Windows
# 'DOMAIN\username' format. Fix this for the keyfile path.
key_user = key_user.replace('\\', '_')
keyfile = os.path.join(opts['cachedir'],
'.{0}_key'.format(key_user))
# Make sure all key parent directories are accessible
salt.utils.verify.check_path_traversal(opts['cachedir'],
key_user,
skip_perm_errors)
try:
with salt.utils.files.fopen(keyfile, 'r') as key:
return key.read()
except (OSError, IOError):
# Fall back to eauth
return ''
def get_values_of_matching_keys(pattern_dict, user_name):
'''
Check a whitelist and/or blacklist to see if the value matches it.
'''
ret = []
for expr in pattern_dict:
if salt.utils.stringutils.expr_match(user_name, expr):
ret.extend(pattern_dict[expr])
return ret
# test code for the ConCache class
if __name__ == '__main__':
opts = salt.config.master_config('/etc/salt/master')
conc = ConnectedCache(opts)
conc.start()
| 39.663946 | 142 | 0.58759 |
from __future__ import absolute_import, unicode_literals
import os
import logging
import signal
from threading import Thread, Event
import salt.log
import salt.cache
import salt.client
import salt.pillar
import salt.utils.atomicfile
import salt.utils.files
import salt.utils.minions
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.verify
import salt.utils.versions
import salt.payload
from salt.exceptions import SaltException
import salt.config
from salt.utils.cache import CacheCli as cache_cli
from salt.utils.process import MultiprocessingProcess
from salt.ext import six
try:
import zmq
HAS_ZMQ = True
except ImportError:
HAS_ZMQ = False
log = logging.getLogger(__name__)
class MasterPillarUtil(object):
def __init__(self,
tgt='',
tgt_type='glob',
saltenv=None,
use_cached_grains=True,
use_cached_pillar=True,
grains_fallback=True,
pillar_fallback=True,
opts=None,
expr_form=None):
if expr_form is not None:
salt.utils.versions.warn_until(
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
log.debug('New instance of %s created.',
self.__class__.__name__)
if opts is None:
log.error('%s: Missing master opts init arg.',
self.__class__.__name__)
raise SaltException('{0}: Missing master opts init arg.'.format(
self.__class__.__name__))
else:
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
self.tgt = tgt
self.tgt_type = tgt_type
self.saltenv = saltenv
self.use_cached_grains = use_cached_grains
self.use_cached_pillar = use_cached_pillar
self.grains_fallback = grains_fallback
self.pillar_fallback = pillar_fallback
self.cache = salt.cache.factory(opts)
log.debug(
'Init settings: tgt: \'%s\', tgt_type: \'%s\', saltenv: \'%s\', '
'use_cached_grains: %s, use_cached_pillar: %s, '
'grains_fallback: %s, pillar_fallback: %s',
tgt, tgt_type, saltenv, use_cached_grains, use_cached_pillar,
grains_fallback, pillar_fallback
)
def _get_cached_mine_data(self, *minion_ids):
mine_data = dict([(minion_id, {}) for minion_id in minion_ids])
if (not self.opts.get('minion_data_cache', False)
and not self.opts.get('enforce_mine_cache', False)):
log.debug('Skipping cached mine data minion_data_cache'
'and enfore_mine_cache are both disabled.')
return mine_data
if not minion_ids:
minion_ids = self.cache.list('minions')
for minion_id in minion_ids:
if not salt.utils.verify.valid_id(self.opts, minion_id):
continue
mdata = self.cache.fetch('minions/{0}'.format(minion_id), 'mine')
if isinstance(mdata, dict):
mine_data[minion_id] = mdata
return mine_data
def _get_cached_minion_data(self, *minion_ids):
grains = dict([(minion_id, {}) for minion_id in minion_ids])
pillars = grains.copy()
if not self.opts.get('minion_data_cache', False):
log.debug('Skipping cached data because minion_data_cache is not '
'enabled.')
return grains, pillars
if not minion_ids:
minion_ids = self.cache.list('minions')
for minion_id in minion_ids:
if not salt.utils.verify.valid_id(self.opts, minion_id):
continue
mdata = self.cache.fetch('minions/{0}'.format(minion_id), 'data')
if not isinstance(mdata, dict):
log.warning(
'cache.fetch should always return a dict. ReturnedType: %s, MinionId: %s',
type(mdata).__name__,
minion_id
)
continue
if 'grains' in mdata:
grains[minion_id] = mdata['grains']
if 'pillar' in mdata:
pillars[minion_id] = mdata['pillar']
return grains, pillars
def _get_live_minion_grains(self, minion_ids):
log.debug('Getting live grains for minions: "%s"', minion_ids)
client = salt.client.get_local_client(self.opts['conf_file'])
ret = client.cmd(
','.join(minion_ids),
'grains.items',
timeout=self.opts['timeout'],
tgt_type='list')
return ret
def _get_live_minion_pillar(self, minion_id=None, minion_grains=None):
if minion_id is None:
return {}
if not minion_grains:
log.warning(
'Cannot get pillar data for %s: no grains supplied.',
minion_id
)
return {}
log.debug('Getting live pillar for %s', minion_id)
pillar = salt.pillar.Pillar(
self.opts,
minion_grains,
minion_id,
self.saltenv,
self.opts['ext_pillar'])
log.debug('Compiling pillar for %s', minion_id)
ret = pillar.compile_pillar()
return ret
def _get_minion_grains(self, *minion_ids, **kwargs):
ret = {}
cached_grains = kwargs.get('cached_grains', {})
cret = {}
lret = {}
if self.use_cached_grains:
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_grains) if mcache])
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in cret]
log.debug('Missed cached minion grains for: %s', missed_minions)
if self.grains_fallback:
lret = self._get_live_minion_grains(missed_minions)
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items()))
else:
lret = self._get_live_minion_grains(minion_ids)
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in lret]
log.debug('Missed live minion grains for: %s', missed_minions)
if self.grains_fallback:
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_grains) if mcache])
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items()))
return ret
def _get_minion_pillar(self, *minion_ids, **kwargs):
ret = {}
grains = kwargs.get('grains', {})
cached_pillar = kwargs.get('cached_pillar', {})
cret = {}
lret = {}
if self.use_cached_pillar:
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_pillar) if mcache])
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in cret]
log.debug('Missed cached minion pillars for: %s', missed_minions)
if self.pillar_fallback:
lret = dict([(minion_id, self._get_live_minion_pillar(minion_id, grains.get(minion_id, {}))) for minion_id in missed_minions])
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items()))
else:
lret = dict([(minion_id, self._get_live_minion_pillar(minion_id, grains.get(minion_id, {}))) for minion_id in minion_ids])
missed_minions = [minion_id for minion_id in minion_ids if minion_id not in lret]
log.debug('Missed live minion pillars for: %s', missed_minions)
if self.pillar_fallback:
cret = dict([(minion_id, mcache) for (minion_id, mcache) in six.iteritems(cached_pillar) if mcache])
ret = dict(list(six.iteritems(dict([(minion_id, {}) for minion_id in minion_ids]))) + list(lret.items()) + list(cret.items()))
return ret
def _tgt_to_list(self):
minion_ids = []
ckminions = salt.utils.minions.CkMinions(self.opts)
_res = ckminions.check_minions(self.tgt, self.tgt_type)
minion_ids = _res['minions']
if len(minion_ids) == 0:
log.debug('No minions matched for tgt="%s" and tgt_type="%s"', self.tgt, self.tgt_type)
return {}
log.debug('Matching minions for tgt="%s" and tgt_type="%s": %s', self.tgt, self.tgt_type, minion_ids)
return minion_ids
def get_minion_pillar(self):
minion_pillars = {}
minion_grains = {}
minion_ids = self._tgt_to_list()
if any(arg for arg in [self.use_cached_grains, self.use_cached_pillar, self.grains_fallback, self.pillar_fallback]):
log.debug('Getting cached minion data')
cached_minion_grains, cached_minion_pillars = self._get_cached_minion_data(*minion_ids)
else:
cached_minion_grains = {}
cached_minion_pillars = {}
log.debug('Getting minion grain data for: %s', minion_ids)
minion_grains = self._get_minion_grains(
*minion_ids,
cached_grains=cached_minion_grains)
log.debug('Getting minion pillar data for: %s', minion_ids)
minion_pillars = self._get_minion_pillar(
*minion_ids,
grains=minion_grains,
cached_pillar=cached_minion_pillars)
return minion_pillars
def get_minion_grains(self):
minion_grains = {}
minion_ids = self._tgt_to_list()
if not minion_ids:
return {}
if any(arg for arg in [self.use_cached_grains, self.grains_fallback]):
log.debug('Getting cached minion data.')
cached_minion_grains, cached_minion_pillars = self._get_cached_minion_data(*minion_ids)
else:
cached_minion_grains = {}
log.debug('Getting minion grain data for: %s', minion_ids)
minion_grains = self._get_minion_grains(
*minion_ids,
cached_grains=cached_minion_grains)
return minion_grains
def get_cached_mine_data(self):
mine_data = {}
minion_ids = self._tgt_to_list()
log.debug('Getting cached mine data for: %s', minion_ids)
mine_data = self._get_cached_mine_data(*minion_ids)
return mine_data
def clear_cached_minion_data(self,
clear_pillar=False,
clear_grains=False,
clear_mine=False,
clear_mine_func=None):
clear_what = []
if clear_pillar:
clear_what.append('pillar')
if clear_grains:
clear_what.append('grains')
if clear_mine:
clear_what.append('mine')
if clear_mine_func is not None:
clear_what.append('mine_func: \'{0}\''.format(clear_mine_func))
if not len(clear_what):
log.debug('No cached data types specified for clearing.')
return False
minion_ids = self._tgt_to_list()
log.debug('Clearing cached %s data for: %s',
', '.join(clear_what),
minion_ids)
if clear_pillar == clear_grains:
grains = {}
pillars = {}
else:
# Unless both clear_pillar and clear_grains are True, we need
# to read in the pillar/grains data since they are both stored
# in the same file, 'data.p'
grains, pillars = self._get_cached_minion_data(*minion_ids)
try:
c_minions = self.cache.list('minions')
for minion_id in minion_ids:
if not salt.utils.verify.valid_id(self.opts, minion_id):
continue
if minion_id not in c_minions:
# Cache bank for this minion does not exist. Nothing to do.
continue
bank = 'minions/{0}'.format(minion_id)
minion_pillar = pillars.pop(minion_id, False)
minion_grains = grains.pop(minion_id, False)
if ((clear_pillar and clear_grains) or
(clear_pillar and not minion_grains) or
(clear_grains and not minion_pillar)):
# Not saving pillar or grains, so just delete the cache file
self.cache.flush(bank, 'data')
elif clear_pillar and minion_grains:
self.cache.store(bank, 'data', {'grains': minion_grains})
elif clear_grains and minion_pillar:
self.cache.store(bank, 'data', {'pillar': minion_pillar})
if clear_mine:
# Delete the whole mine file
self.cache.flush(bank, 'mine')
elif clear_mine_func is not None:
# Delete a specific function from the mine file
mine_data = self.cache.fetch(bank, 'mine')
if isinstance(mine_data, dict):
if mine_data.pop(clear_mine_func, False):
self.cache.store(bank, 'mine', mine_data)
except (OSError, IOError):
return True
return True
class CacheTimer(Thread):
def __init__(self, opts, event):
Thread.__init__(self)
self.opts = opts
self.stopped = event
self.daemon = True
self.serial = salt.payload.Serial(opts.get('serial', ''))
self.timer_sock = os.path.join(self.opts['sock_dir'], 'con_timer.ipc')
def run(self):
context = zmq.Context()
# the socket for outgoing timer events
socket = context.socket(zmq.PUB)
socket.setsockopt(zmq.LINGER, 100)
socket.bind('ipc://' + self.timer_sock)
count = 0
log.debug('ConCache-Timer started')
while not self.stopped.wait(1):
socket.send(self.serial.dumps(count))
count += 1
if count >= 60:
count = 0
class CacheWorker(MultiprocessingProcess):
def __init__(self, opts, log_queue=None):
super(CacheWorker, self).__init__(log_queue=log_queue)
self.opts = opts
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], log_queue=state['log_queue'])
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue}
def run(self):
new_mins = list(salt.utils.minions.CkMinions(self.opts).connected_ids())
cc = cache_cli(self.opts)
cc.get_cached()
cc.put_cache([new_mins])
log.debug('ConCache CacheWorker update finished')
class ConnectedCache(MultiprocessingProcess):
def __init__(self, opts, log_queue=None):
super(ConnectedCache, self).__init__(log_queue=log_queue)
log.debug('ConCache initializing...')
# the possible settings for the cache
self.opts = opts
# the actual cached minion ids
self.minions = []
self.cache_sock = os.path.join(self.opts['sock_dir'], 'con_cache.ipc')
self.update_sock = os.path.join(self.opts['sock_dir'], 'con_upd.ipc')
self.upd_t_sock = os.path.join(self.opts['sock_dir'], 'con_timer.ipc')
self.cleanup()
# the timer provides 1-second intervals to the loop in run()
# to make the cache system most responsive, we do not use a loop-
# delay which makes it hard to get 1-second intervals without a timer
self.timer_stop = Event()
self.timer = CacheTimer(self.opts, self.timer_stop)
self.timer.start()
self.running = True
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(state['opts'], log_queue=state['log_queue'])
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue}
def signal_handler(self, sig, frame):
self.stop()
def cleanup(self):
log.debug('ConCache cleaning up')
if os.path.exists(self.cache_sock):
os.remove(self.cache_sock)
if os.path.exists(self.update_sock):
os.remove(self.update_sock)
if os.path.exists(self.upd_t_sock):
os.remove(self.upd_t_sock)
def secure(self):
log.debug('ConCache securing sockets')
if os.path.exists(self.cache_sock):
os.chmod(self.cache_sock, 0o600)
if os.path.exists(self.update_sock):
os.chmod(self.update_sock, 0o600)
if os.path.exists(self.upd_t_sock):
os.chmod(self.upd_t_sock, 0o600)
def stop(self):
# avoid getting called twice
self.cleanup()
if self.running:
self.running = False
self.timer_stop.set()
self.timer.join()
def run(self):
context = zmq.Context()
# the socket for incoming cache requests
creq_in = context.socket(zmq.REP)
creq_in.setsockopt(zmq.LINGER, 100)
creq_in.bind('ipc://' + self.cache_sock)
# the socket for incoming cache-updates from workers
cupd_in = context.socket(zmq.SUB)
cupd_in.setsockopt(zmq.SUBSCRIBE, '')
cupd_in.setsockopt(zmq.LINGER, 100)
cupd_in.bind('ipc://' + self.update_sock)
# the socket for the timer-event
timer_in = context.socket(zmq.SUB)
timer_in.setsockopt(zmq.SUBSCRIBE, '')
timer_in.setsockopt(zmq.LINGER, 100)
timer_in.connect('ipc://' + self.upd_t_sock)
poller = zmq.Poller()
poller.register(creq_in, zmq.POLLIN)
poller.register(cupd_in, zmq.POLLIN)
poller.register(timer_in, zmq.POLLIN)
# our serializer
serial = salt.payload.Serial(self.opts.get('serial', ''))
# register a signal handler
signal.signal(signal.SIGINT, self.signal_handler)
# secure the sockets from the world
self.secure()
log.info('ConCache started')
while self.running:
# we check for new events with the poller
try:
socks = dict(poller.poll(1))
except KeyboardInterrupt:
self.stop()
except zmq.ZMQError as zmq_err:
log.error('ConCache ZeroMQ-Error occurred')
log.exception(zmq_err)
self.stop()
# check for next cache-request
if socks.get(creq_in) == zmq.POLLIN:
msg = serial.loads(creq_in.recv())
log.debug('ConCache Received request: %s', msg)
# requests to the minion list are send as str's
if isinstance(msg, six.string_types):
if msg == 'minions':
reply = serial.dumps(self.minions)
creq_in.send(reply)
if socks.get(cupd_in) == zmq.POLLIN:
new_c_data = serial.loads(cupd_in.recv())
if not isinstance(new_c_data, list):
log.error('ConCache Worker returned unusable result')
del new_c_data
continue
try:
if len(new_c_data) == 0:
log.debug('ConCache Got empty update from worker')
continue
data = new_c_data[0]
if isinstance(data, six.string_types):
if data not in self.minions:
log.debug('ConCache Adding minion %s to cache',
new_c_data[0])
self.minions.append(data)
elif isinstance(data, list):
log.debug('ConCache Replacing minion list from worker')
self.minions = data
except IndexError:
log.debug('ConCache Got malformed result dict from worker')
del new_c_data
log.info('ConCache %s entries in cache', len(self.minions))
if socks.get(timer_in) == zmq.POLLIN:
sec_event = serial.loads(timer_in.recv())
if int(sec_event % 30) == 0:
cw = CacheWorker(self.opts)
cw.start()
self.stop()
creq_in.close()
cupd_in.close()
timer_in.close()
context.term()
log.debug('ConCache Shutting down')
def ping_all_connected_minions(opts):
client = salt.client.LocalClient()
if opts['minion_data_cache']:
tgt = list(salt.utils.minions.CkMinions(opts).connected_ids())
form = 'list'
else:
tgt = '*'
form = 'glob'
client.cmd(tgt, 'test.ping', tgt_type=form)
def get_master_key(key_user, opts, skip_perm_errors=False):
if key_user == 'root':
if opts.get('user', 'root') != 'root':
key_user = opts.get('user', 'root')
if key_user.startswith('sudo_'):
key_user = opts.get('user', 'root')
if salt.utils.platform.is_windows():
key_user = key_user.replace('\\', '_')
keyfile = os.path.join(opts['cachedir'],
'.{0}_key'.format(key_user))
salt.utils.verify.check_path_traversal(opts['cachedir'],
key_user,
skip_perm_errors)
try:
with salt.utils.files.fopen(keyfile, 'r') as key:
return key.read()
except (OSError, IOError):
return ''
def get_values_of_matching_keys(pattern_dict, user_name):
ret = []
for expr in pattern_dict:
if salt.utils.stringutils.expr_match(user_name, expr):
ret.extend(pattern_dict[expr])
return ret
if __name__ == '__main__':
opts = salt.config.master_config('/etc/salt/master')
conc = ConnectedCache(opts)
conc.start()
| true | true |
1c33a0ae9bb2ef8611e2ccd924393747b61b9446 | 7,505 | py | Python | create_RU_dataset/RU_doc2vec_baseline.py | OlegDurandin/AuthorStyle | 75288df4ad0f88677645c3af00fbd7c0f7f58822 | [
"MIT"
] | 3 | 2019-09-29T17:10:43.000Z | 2020-09-21T09:58:48.000Z | create_RU_dataset/RU_doc2vec_baseline.py | OlegDurandin/AuthorStyle | 75288df4ad0f88677645c3af00fbd7c0f7f58822 | [
"MIT"
] | 2 | 2019-07-14T11:14:48.000Z | 2019-07-14T11:16:54.000Z | create_RU_dataset/RU_doc2vec_baseline.py | OlegDurandin/AuthorStyle | 75288df4ad0f88677645c3af00fbd7c0f7f58822 | [
"MIT"
] | null | null | null | from gensim.models import Doc2Vec
import pickle
import os
from src.settings import PATH_TO_OUTPUT_FOLDER
from tqdm import tqdm
def save_data_from_doc2vec(filename, model,
authors_list, novels_list):
data_csv = open(filename + '.csv', 'w', encoding='utf-8')
print('Save file: {}'.format(filename + '.csv'))
data_csv_string = ''
for index, one_vector in tqdm(enumerate(model.docvecs.vectors_docs)):
row_result = ';'.join([str(value) for value in one_vector])
data_csv_string += row_result
data_csv_string += ';'+authors_list[index]+';'+novels_list[index].replace(';','')+ '\n'
data_csv.write(data_csv_string[:-1])
data_csv.close()
print('Save file: {} DONE'.format(filename + '.csv'))
def save_data_after_doc2vec_inference(filename, list_of_infered_vectors,
authors_list, novels_list):
data_csv = open(filename + '.csv', 'w', encoding='utf-8')
print('Save file: {}'.format(filename + '.csv'))
data_csv_string = ''
for index, one_vector in tqdm(enumerate(list_of_infered_vectors)):
row_result = ';'.join([str(value) for value in one_vector])
data_csv_string += row_result
data_csv_string += ';'+authors_list[index]+';'+novels_list[index].replace(';','')+ '\n'
data_csv.write(data_csv_string[:-1])
data_csv.close()
print('Save file: {} DONE'.format(filename + '.csv'))
TEST_AVALABLE = True
FAIR_TEST = True
COUNT_OF_SENTENCE = 350
if __name__ == "__main__":
print('Loading processed documents...')
PATH_TO_CURRENT_OUT_FOLDER = os.path.join(PATH_TO_OUTPUT_FOLDER, 'RUS_AA', '{} Sentences'.format(COUNT_OF_SENTENCE),
#'TRAIN_6000_BOOTSTRAP_{}_SENTENCES'.format(COUNT_OF_SENTENCE))
'TRAIN_FIXED_SEPARATION_{}_SENTENCES'.format(COUNT_OF_SENTENCE))
fullDatasetDocs = pickle.load(open(os.path.join(PATH_TO_CURRENT_OUT_FOLDER, 'tagged_documents_dump.pkl'), 'rb'))
print('Loading processed documents... DONE')
PATH_TO_DOC2VEC_VECTORS = os.path.join(PATH_TO_CURRENT_OUT_FOLDER, 'doc2vec_vectors_TRAIN')
print('We will save csv to: {}'.format(PATH_TO_DOC2VEC_VECTORS))
if not os.path.exists(PATH_TO_DOC2VEC_VECTORS):
print('Directory: {} was created'.format(PATH_TO_DOC2VEC_VECTORS))
os.makedirs(PATH_TO_DOC2VEC_VECTORS)
if TEST_AVALABLE:
PATH_TO_CURRENT_OUT_TEST = os.path.join(PATH_TO_OUTPUT_FOLDER, 'RUS_AA', '{} Sentences'.format(COUNT_OF_SENTENCE),
'TEST_FIXED_SEPARATION_{}_SENTENCES'.format(COUNT_OF_SENTENCE))
fullTestDatasetDocs = pickle.load(open(os.path.join(PATH_TO_CURRENT_OUT_TEST, 'tagged_documents_dump.pkl'),
'rb'))
print('Loading test processed documents... DONE')
#PATH_TO_DOC2VEC_INFER_VECTORS = os.path.join(PATH_TO_CURRENT_OUT_TEST, 'doc2vec_vectors_TEST_INFER_BOOTSTRAP')
PATH_TO_DOC2VEC_INFER_VECTORS = os.path.join(PATH_TO_CURRENT_OUT_TEST, 'doc2vec_vectors_TEST_INFER')
if not os.path.exists(PATH_TO_DOC2VEC_INFER_VECTORS):
print('Directory: {} was created'.format(PATH_TO_DOC2VEC_INFER_VECTORS))
os.makedirs(PATH_TO_DOC2VEC_INFER_VECTORS)
if FAIR_TEST:
PATH_TO_CURRENT_OUT_FAIR_TEST = os.path.join(PATH_TO_OUTPUT_FOLDER, 'RUS_AA', '{} Sentences'.format(COUNT_OF_SENTENCE),
'TEST_SAMPLE_0.1_PERCENT_{}_SENTENCES'.format(COUNT_OF_SENTENCE))
fullFairTestDatasetDocs = pickle.load(open(os.path.join(PATH_TO_CURRENT_OUT_FAIR_TEST, 'tagged_documents_dump.pkl'),
'rb'))
print('Loading test processed documents... DONE')
#PATH_TO_DOC2VEC_FAIR_TEST_INFER_VECTORS = os.path.join(PATH_TO_CURRENT_OUT_FAIR_TEST, 'doc2vec_vectors_FAIR_TEST_INFER_BOOTSTRAP')
PATH_TO_DOC2VEC_FAIR_TEST_INFER_VECTORS = os.path.join(PATH_TO_CURRENT_OUT_FAIR_TEST, 'doc2vec_vectors_FAIR_TEST_INFER')
if not os.path.exists(PATH_TO_DOC2VEC_FAIR_TEST_INFER_VECTORS):
print('Directory: {} was created'.format(PATH_TO_DOC2VEC_FAIR_TEST_INFER_VECTORS))
os.makedirs(PATH_TO_DOC2VEC_FAIR_TEST_INFER_VECTORS)
search_params = {#'vector_size' : [50,100,150],
'vector_size': [100], #'vector_size': [50, 100, 150],
'window': [10], #'window' : [5,10,15],
'min_count' : [3],#'min_count' : [1,3,5,10],
'negative': [5] #'negative' : [5,10]
}
current_params = {'min_count' : 1,
'negative' : 5 ,
'workers' : 4}
list_of_tagged_documents = list(map(lambda x : x[2], fullDatasetDocs))
list_of_authors = list(map(lambda x : x[0], fullDatasetDocs))
list_of_novels = list(map(lambda x: x[1], fullDatasetDocs))
for vector_size in search_params['vector_size']:
for window_size in search_params['window']:
current_params['vector_size'] = vector_size
current_params['window'] = window_size
model = Doc2Vec(**current_params)
print('Model declaration: {}'.format(model))
print('Building vocabulary for model...')
model.build_vocab(list_of_tagged_documents)
print('Building vocabulary for model... DONE')
print('Training model...')
model.train(list_of_tagged_documents, epochs=30, total_examples=len(fullDatasetDocs))
print('Training model... DONE')
save_data_from_doc2vec(
os.path.join(PATH_TO_DOC2VEC_VECTORS, 'doc2vec_data_size_{}_window_{}'.format(vector_size, window_size)),
model, list_of_authors, list_of_novels)
if TEST_AVALABLE:
print('Model inference (Test set)...')
test_authors_list = list(map(lambda x : x[0], fullTestDatasetDocs))
test_novels_list = list(map(lambda x: x[1], fullTestDatasetDocs))
list_of_vectors = [model.infer_vector(one_doc) for one_doc in map(lambda x : x[2].words,
fullTestDatasetDocs)]
save_data_after_doc2vec_inference(
os.path.join(PATH_TO_DOC2VEC_INFER_VECTORS,
'doc2vec_data_size_{}_window_{}_infered'.format(vector_size, window_size)),
list_of_vectors, test_authors_list, test_novels_list)
print('Model inference... DONE')
if FAIR_TEST:
print('Model inference (fair test set)...')
test_authors_list = list(map(lambda x : x[0], fullFairTestDatasetDocs))
test_novels_list = list(map(lambda x: x[1], fullFairTestDatasetDocs))
list_of_vectors = [model.infer_vector(one_doc) for one_doc in map(lambda x : x[2].words,
fullFairTestDatasetDocs)]
save_data_after_doc2vec_inference(
os.path.join(PATH_TO_DOC2VEC_FAIR_TEST_INFER_VECTORS,
'doc2vec_data_size_{}_window_{}_infered'.format(vector_size, window_size)),
list_of_vectors, test_authors_list, test_novels_list)
print('Model inference (fair test set)... DONE')
| 56.007463 | 139 | 0.630513 | from gensim.models import Doc2Vec
import pickle
import os
from src.settings import PATH_TO_OUTPUT_FOLDER
from tqdm import tqdm
def save_data_from_doc2vec(filename, model,
authors_list, novels_list):
data_csv = open(filename + '.csv', 'w', encoding='utf-8')
print('Save file: {}'.format(filename + '.csv'))
data_csv_string = ''
for index, one_vector in tqdm(enumerate(model.docvecs.vectors_docs)):
row_result = ';'.join([str(value) for value in one_vector])
data_csv_string += row_result
data_csv_string += ';'+authors_list[index]+';'+novels_list[index].replace(';','')+ '\n'
data_csv.write(data_csv_string[:-1])
data_csv.close()
print('Save file: {} DONE'.format(filename + '.csv'))
def save_data_after_doc2vec_inference(filename, list_of_infered_vectors,
authors_list, novels_list):
data_csv = open(filename + '.csv', 'w', encoding='utf-8')
print('Save file: {}'.format(filename + '.csv'))
data_csv_string = ''
for index, one_vector in tqdm(enumerate(list_of_infered_vectors)):
row_result = ';'.join([str(value) for value in one_vector])
data_csv_string += row_result
data_csv_string += ';'+authors_list[index]+';'+novels_list[index].replace(';','')+ '\n'
data_csv.write(data_csv_string[:-1])
data_csv.close()
print('Save file: {} DONE'.format(filename + '.csv'))
TEST_AVALABLE = True
FAIR_TEST = True
COUNT_OF_SENTENCE = 350
if __name__ == "__main__":
print('Loading processed documents...')
PATH_TO_CURRENT_OUT_FOLDER = os.path.join(PATH_TO_OUTPUT_FOLDER, 'RUS_AA', '{} Sentences'.format(COUNT_OF_SENTENCE),
'TRAIN_FIXED_SEPARATION_{}_SENTENCES'.format(COUNT_OF_SENTENCE))
fullDatasetDocs = pickle.load(open(os.path.join(PATH_TO_CURRENT_OUT_FOLDER, 'tagged_documents_dump.pkl'), 'rb'))
print('Loading processed documents... DONE')
PATH_TO_DOC2VEC_VECTORS = os.path.join(PATH_TO_CURRENT_OUT_FOLDER, 'doc2vec_vectors_TRAIN')
print('We will save csv to: {}'.format(PATH_TO_DOC2VEC_VECTORS))
if not os.path.exists(PATH_TO_DOC2VEC_VECTORS):
print('Directory: {} was created'.format(PATH_TO_DOC2VEC_VECTORS))
os.makedirs(PATH_TO_DOC2VEC_VECTORS)
if TEST_AVALABLE:
PATH_TO_CURRENT_OUT_TEST = os.path.join(PATH_TO_OUTPUT_FOLDER, 'RUS_AA', '{} Sentences'.format(COUNT_OF_SENTENCE),
'TEST_FIXED_SEPARATION_{}_SENTENCES'.format(COUNT_OF_SENTENCE))
fullTestDatasetDocs = pickle.load(open(os.path.join(PATH_TO_CURRENT_OUT_TEST, 'tagged_documents_dump.pkl'),
'rb'))
print('Loading test processed documents... DONE')
PATH_TO_DOC2VEC_INFER_VECTORS = os.path.join(PATH_TO_CURRENT_OUT_TEST, 'doc2vec_vectors_TEST_INFER')
if not os.path.exists(PATH_TO_DOC2VEC_INFER_VECTORS):
print('Directory: {} was created'.format(PATH_TO_DOC2VEC_INFER_VECTORS))
os.makedirs(PATH_TO_DOC2VEC_INFER_VECTORS)
if FAIR_TEST:
PATH_TO_CURRENT_OUT_FAIR_TEST = os.path.join(PATH_TO_OUTPUT_FOLDER, 'RUS_AA', '{} Sentences'.format(COUNT_OF_SENTENCE),
'TEST_SAMPLE_0.1_PERCENT_{}_SENTENCES'.format(COUNT_OF_SENTENCE))
fullFairTestDatasetDocs = pickle.load(open(os.path.join(PATH_TO_CURRENT_OUT_FAIR_TEST, 'tagged_documents_dump.pkl'),
'rb'))
print('Loading test processed documents... DONE')
PATH_TO_DOC2VEC_FAIR_TEST_INFER_VECTORS = os.path.join(PATH_TO_CURRENT_OUT_FAIR_TEST, 'doc2vec_vectors_FAIR_TEST_INFER')
if not os.path.exists(PATH_TO_DOC2VEC_FAIR_TEST_INFER_VECTORS):
print('Directory: {} was created'.format(PATH_TO_DOC2VEC_FAIR_TEST_INFER_VECTORS))
os.makedirs(PATH_TO_DOC2VEC_FAIR_TEST_INFER_VECTORS)
search_params = {
'vector_size': [100],
'window': [10],
'min_count' : [3],
'negative': [5]
}
current_params = {'min_count' : 1,
'negative' : 5 ,
'workers' : 4}
list_of_tagged_documents = list(map(lambda x : x[2], fullDatasetDocs))
list_of_authors = list(map(lambda x : x[0], fullDatasetDocs))
list_of_novels = list(map(lambda x: x[1], fullDatasetDocs))
for vector_size in search_params['vector_size']:
for window_size in search_params['window']:
current_params['vector_size'] = vector_size
current_params['window'] = window_size
model = Doc2Vec(**current_params)
print('Model declaration: {}'.format(model))
print('Building vocabulary for model...')
model.build_vocab(list_of_tagged_documents)
print('Building vocabulary for model... DONE')
print('Training model...')
model.train(list_of_tagged_documents, epochs=30, total_examples=len(fullDatasetDocs))
print('Training model... DONE')
save_data_from_doc2vec(
os.path.join(PATH_TO_DOC2VEC_VECTORS, 'doc2vec_data_size_{}_window_{}'.format(vector_size, window_size)),
model, list_of_authors, list_of_novels)
if TEST_AVALABLE:
print('Model inference (Test set)...')
test_authors_list = list(map(lambda x : x[0], fullTestDatasetDocs))
test_novels_list = list(map(lambda x: x[1], fullTestDatasetDocs))
list_of_vectors = [model.infer_vector(one_doc) for one_doc in map(lambda x : x[2].words,
fullTestDatasetDocs)]
save_data_after_doc2vec_inference(
os.path.join(PATH_TO_DOC2VEC_INFER_VECTORS,
'doc2vec_data_size_{}_window_{}_infered'.format(vector_size, window_size)),
list_of_vectors, test_authors_list, test_novels_list)
print('Model inference... DONE')
if FAIR_TEST:
print('Model inference (fair test set)...')
test_authors_list = list(map(lambda x : x[0], fullFairTestDatasetDocs))
test_novels_list = list(map(lambda x: x[1], fullFairTestDatasetDocs))
list_of_vectors = [model.infer_vector(one_doc) for one_doc in map(lambda x : x[2].words,
fullFairTestDatasetDocs)]
save_data_after_doc2vec_inference(
os.path.join(PATH_TO_DOC2VEC_FAIR_TEST_INFER_VECTORS,
'doc2vec_data_size_{}_window_{}_infered'.format(vector_size, window_size)),
list_of_vectors, test_authors_list, test_novels_list)
print('Model inference (fair test set)... DONE')
| true | true |
1c33a2588ff9ca2ac64905835a1581d4a124f5e7 | 220 | py | Python | Modulo-um/Exercicio15.py | Ribinha740/Exercicios-python | c2af02fedd2f72445abedf3598cb07c74fad326f | [
"MIT"
] | null | null | null | Modulo-um/Exercicio15.py | Ribinha740/Exercicios-python | c2af02fedd2f72445abedf3598cb07c74fad326f | [
"MIT"
] | null | null | null | Modulo-um/Exercicio15.py | Ribinha740/Exercicios-python | c2af02fedd2f72445abedf3598cb07c74fad326f | [
"MIT"
] | null | null | null | print('=========DESAFIO15=========')
dias = int(input('Quanto Dias o Carro Foi Alugados? '))
km = float(input('Quantos KM Rodados? '))
pago = (dias * 60) + (km * 0.15)
print('O total a pagar é de R${:.2f}'.format(pago))
| 36.666667 | 55 | 0.586364 | print('=========DESAFIO15=========')
dias = int(input('Quanto Dias o Carro Foi Alugados? '))
km = float(input('Quantos KM Rodados? '))
pago = (dias * 60) + (km * 0.15)
print('O total a pagar é de R${:.2f}'.format(pago))
| true | true |
1c33a3a4cbb41866a0f79ea7c4789a698fa13c56 | 87,240 | py | Python | test/functional/feature_taproot.py | quantumedusa/bitcoin | 9fb050720b88f4448547c49841c0c01c92370934 | [
"MIT"
] | 20 | 2021-02-24T18:57:12.000Z | 2021-06-27T01:20:43.000Z | test/functional/feature_taproot.py | quantumedusa/bitcoin | 9fb050720b88f4448547c49841c0c01c92370934 | [
"MIT"
] | 6 | 2021-04-08T23:50:08.000Z | 2021-12-31T10:53:38.000Z | test/functional/feature_taproot.py | quantumedusa/bitcoin | 9fb050720b88f4448547c49841c0c01c92370934 | [
"MIT"
] | 4 | 2021-03-14T13:38:48.000Z | 2021-06-18T17:11:05.000Z | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test Taproot softfork (BIPs 340-342)
from test_framework.blocktools import (
create_coinbase,
create_block,
add_witness_commitment,
MAX_BLOCK_SIGOPS_WEIGHT,
NORMAL_GBT_REQUEST_PARAMS,
WITNESS_SCALE_FACTOR,
)
from test_framework.messages import (
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
ToHex,
)
from test_framework.script import (
ANNEX_TAG,
CScript,
CScriptNum,
CScriptOp,
LEAF_VERSION_TAPSCRIPT,
LegacySignatureHash,
LOCKTIME_THRESHOLD,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_16,
OP_2DROP,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGADD,
OP_CHECKSIGVERIFY,
OP_CODESEPARATOR,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_NOP,
OP_NOT,
OP_NOTIF,
OP_PUSHDATA1,
OP_RETURN,
OP_SWAP,
OP_VERIFY,
SIGHASH_DEFAULT,
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY,
SegwitV0SignatureHash,
TaprootSignatureHash,
is_op_success,
taproot_construct,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error, assert_equal
from test_framework.key import generate_privkey, compute_xonly_pubkey, sign_schnorr, tweak_add_privkey, ECKey
from test_framework.address import (
hash160,
sha256,
)
from collections import OrderedDict, namedtuple
from io import BytesIO
import json
import hashlib
import os
import random
# === Framework for building spending transactions. ===
#
# The computation is represented as a "context" dict, whose entries store potentially-unevaluated expressions that
# refer to lower-level ones. By overwriting these expression, many aspects - both high and low level - of the signing
# process can be overridden.
#
# Specifically, a context object is a dict that maps names to compositions of:
# - values
# - lists of values
# - callables which, when fed the context object as argument, produce any of these
#
# The DEFAULT_CONTEXT object specifies a standard signing process, with many overridable knobs.
#
# The get(ctx, name) function can evaluate a name, and cache its result in the context.
# getter(name) can be used to construct a callable that evaluates name. For example:
#
# ctx1 = {**DEFAULT_CONTEXT, inputs=[getter("sign"), b'\x01']}
#
# creates a context where the script inputs are a signature plus the bytes 0x01.
#
# override(expr, name1=expr1, name2=expr2, ...) can be used to cause an expression to be evaluated in a selectively
# modified context. For example:
#
# ctx2 = {**DEFAULT_CONTEXT, sighash=override(default_sighash, hashtype=SIGHASH_DEFAULT)}
#
# creates a context ctx2 where the sighash is modified to use hashtype=SIGHASH_DEFAULT. This differs from
#
# ctx3 = {**DEFAULT_CONTEXT, hashtype=SIGHASH_DEFAULT}
#
# in that ctx3 will globally use hashtype=SIGHASH_DEFAULT (including in the hashtype byte appended to the signature)
# while ctx2 only uses the modified hashtype inside the sighash calculation.
def deep_eval(ctx, expr):
"""Recursively replace any callables c in expr (including inside lists) with c(ctx)."""
while callable(expr):
expr = expr(ctx)
if isinstance(expr, list):
expr = [deep_eval(ctx, x) for x in expr]
return expr
# Data type to represent fully-evaluated expressions in a context dict (so we can avoid reevaluating them).
Final = namedtuple("Final", "value")
def get(ctx, name):
"""Evaluate name in context ctx."""
assert name in ctx, "Missing '%s' in context" % name
expr = ctx[name]
if not isinstance(expr, Final):
# Evaluate and cache the result.
expr = Final(deep_eval(ctx, expr))
ctx[name] = expr
return expr.value
def getter(name):
"""Return a callable that evaluates name in its passed context."""
return lambda ctx: get(ctx, name)
def override(expr, **kwargs):
"""Return a callable that evaluates expr in a modified context."""
return lambda ctx: deep_eval({**ctx, **kwargs}, expr)
# === Implementations for the various default expressions in DEFAULT_CONTEXT ===
def default_hashtype(ctx):
"""Default expression for "hashtype": SIGHASH_DEFAULT for taproot, SIGHASH_ALL otherwise."""
mode = get(ctx, "mode")
if mode == "taproot":
return SIGHASH_DEFAULT
else:
return SIGHASH_ALL
def default_tapleaf(ctx):
"""Default expression for "tapleaf": looking up leaf in tap[2]."""
return get(ctx, "tap").leaves[get(ctx, "leaf")]
def default_script_taproot(ctx):
"""Default expression for "script_taproot": tapleaf.script."""
return get(ctx, "tapleaf").script
def default_leafversion(ctx):
"""Default expression for "leafversion": tapleaf.version"""
return get(ctx, "tapleaf").version
def default_negflag(ctx):
"""Default expression for "negflag": tap.negflag."""
return get(ctx, "tap").negflag
def default_pubkey_inner(ctx):
"""Default expression for "pubkey_inner": tap.inner_pubkey."""
return get(ctx, "tap").inner_pubkey
def default_merklebranch(ctx):
"""Default expression for "merklebranch": tapleaf.merklebranch."""
return get(ctx, "tapleaf").merklebranch
def default_controlblock(ctx):
"""Default expression for "controlblock": combine leafversion, negflag, pubkey_inner, merklebranch."""
return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_inner") + get(ctx, "merklebranch")
def default_sighash(ctx):
"""Default expression for "sighash": depending on mode, compute BIP341, BIP143, or legacy sighash."""
tx = get(ctx, "tx")
idx = get(ctx, "idx")
hashtype = get(ctx, "hashtype_actual")
mode = get(ctx, "mode")
if mode == "taproot":
# BIP341 signature hash
utxos = get(ctx, "utxos")
annex = get(ctx, "annex")
if get(ctx, "leaf") is not None:
codeseppos = get(ctx, "codeseppos")
leaf_ver = get(ctx, "leafversion")
script = get(ctx, "script_taproot")
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=True, script=script, leaf_ver=leaf_ver, codeseparator_pos=codeseppos, annex=annex)
else:
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=False, annex=annex)
elif mode == "witv0":
# BIP143 signature hash
scriptcode = get(ctx, "scriptcode")
utxos = get(ctx, "utxos")
return SegwitV0SignatureHash(scriptcode, tx, idx, hashtype, utxos[idx].nValue)
else:
# Pre-segwit signature hash
scriptcode = get(ctx, "scriptcode")
return LegacySignatureHash(scriptcode, tx, idx, hashtype)[0]
def default_tweak(ctx):
"""Default expression for "tweak": None if a leaf is specified, tap[0] otherwise."""
if get(ctx, "leaf") is None:
return get(ctx, "tap").tweak
return None
def default_key_tweaked(ctx):
"""Default expression for "key_tweaked": key if tweak is None, tweaked with it otherwise."""
key = get(ctx, "key")
tweak = get(ctx, "tweak")
if tweak is None:
return key
else:
return tweak_add_privkey(key, tweak)
def default_signature(ctx):
"""Default expression for "signature": BIP340 signature or ECDSA signature depending on mode."""
sighash = get(ctx, "sighash")
if get(ctx, "mode") == "taproot":
key = get(ctx, "key_tweaked")
flip_r = get(ctx, "flag_flip_r")
flip_p = get(ctx, "flag_flip_p")
return sign_schnorr(key, sighash, flip_r=flip_r, flip_p=flip_p)
else:
key = get(ctx, "key")
return key.sign_ecdsa(sighash)
def default_hashtype_actual(ctx):
"""Default expression for "hashtype_actual": hashtype, unless mismatching SIGHASH_SINGLE in taproot."""
hashtype = get(ctx, "hashtype")
mode = get(ctx, "mode")
if mode != "taproot":
return hashtype
idx = get(ctx, "idx")
tx = get(ctx, "tx")
if hashtype & 3 == SIGHASH_SINGLE and idx >= len(tx.vout):
return (hashtype & ~3) | SIGHASH_NONE
return hashtype
def default_bytes_hashtype(ctx):
"""Default expression for "bytes_hashtype": bytes([hashtype_actual]) if not 0, b"" otherwise."""
return bytes([x for x in [get(ctx, "hashtype_actual")] if x != 0])
def default_sign(ctx):
"""Default expression for "sign": concatenation of signature and bytes_hashtype."""
return get(ctx, "signature") + get(ctx, "bytes_hashtype")
def default_inputs_keypath(ctx):
"""Default expression for "inputs_keypath": a signature."""
return [get(ctx, "sign")]
def default_witness_taproot(ctx):
"""Default expression for "witness_taproot", consisting of inputs, script, control block, and annex as needed."""
annex = get(ctx, "annex")
suffix_annex = []
if annex is not None:
suffix_annex = [annex]
if get(ctx, "leaf") is None:
return get(ctx, "inputs_keypath") + suffix_annex
else:
return get(ctx, "inputs") + [bytes(get(ctx, "script_taproot")), get(ctx, "controlblock")] + suffix_annex
def default_witness_witv0(ctx):
"""Default expression for "witness_witv0", consisting of inputs and witness script, as needed."""
script = get(ctx, "script_witv0")
inputs = get(ctx, "inputs")
if script is None:
return inputs
else:
return inputs + [script]
def default_witness(ctx):
"""Default expression for "witness", delegating to "witness_taproot" or "witness_witv0" as needed."""
mode = get(ctx, "mode")
if mode == "taproot":
return get(ctx, "witness_taproot")
elif mode == "witv0":
return get(ctx, "witness_witv0")
else:
return []
def default_scriptsig(ctx):
"""Default expression for "scriptsig", consisting of inputs and redeemscript, as needed."""
scriptsig = []
mode = get(ctx, "mode")
if mode == "legacy":
scriptsig = get(ctx, "inputs")
redeemscript = get(ctx, "script_p2sh")
if redeemscript is not None:
scriptsig += [bytes(redeemscript)]
return scriptsig
# The default context object.
DEFAULT_CONTEXT = {
# == The main expressions to evaluate. Only override these for unusual or invalid spends. ==
# The overall witness stack, as a list of bytes objects.
"witness": default_witness,
# The overall scriptsig, as a list of CScript objects (to be concatenated) and bytes objects (to be pushed)
"scriptsig": default_scriptsig,
# == Expressions you'll generally only override for intentionally invalid spends. ==
# The witness stack for spending a taproot output.
"witness_taproot": default_witness_taproot,
# The witness stack for spending a P2WPKH/P2WSH output.
"witness_witv0": default_witness_witv0,
# The script inputs for a taproot key path spend.
"inputs_keypath": default_inputs_keypath,
# The actual hashtype to use (usually equal to hashtype, but in taproot SIGHASH_SINGLE is not always allowed).
"hashtype_actual": default_hashtype_actual,
# The bytes object for a full signature (including hashtype byte, if needed).
"bytes_hashtype": default_bytes_hashtype,
# A full script signature (bytes including hashtype, if needed)
"sign": default_sign,
# An ECDSA or Schnorr signature (excluding hashtype byte).
"signature": default_signature,
# The 32-byte tweaked key (equal to key for script path spends, or key+tweak for key path spends).
"key_tweaked": default_key_tweaked,
# The tweak to use (None for script path spends, the actual tweak for key path spends).
"tweak": default_tweak,
# The sighash value (32 bytes)
"sighash": default_sighash,
# The information about the chosen script path spend (TaprootLeafInfo object).
"tapleaf": default_tapleaf,
# The script to push, and include in the sighash, for a taproot script path spend.
"script_taproot": default_script_taproot,
# The inner pubkey for a taproot script path spend (32 bytes).
"pubkey_inner": default_pubkey_inner,
# The negation flag of the inner pubkey for a taproot script path spend.
"negflag": default_negflag,
# The leaf version to include in the sighash (this does not affect the one in the control block).
"leafversion": default_leafversion,
# The Merkle path to include in the control block for a script path spend.
"merklebranch": default_merklebranch,
# The control block to push for a taproot script path spend.
"controlblock": default_controlblock,
# Whether to produce signatures with invalid P sign (Schnorr signatures only).
"flag_flip_p": False,
# Whether to produce signatures with invalid R sign (Schnorr signatures only).
"flag_flip_r": False,
# == Parameters that can be changed without invalidating, but do have a default: ==
# The hashtype (as an integer).
"hashtype": default_hashtype,
# The annex (only when mode=="taproot").
"annex": None,
# The codeseparator position (only when mode=="taproot").
"codeseppos": -1,
# The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH).
"script_p2sh": None,
# The script to add to the witness in (if P2WSH; None implies P2WPKH)
"script_witv0": None,
# The leaf to use in taproot spends (if script path spend; None implies key path spend).
"leaf": None,
# The input arguments to provide to the executed script
"inputs": [],
# == Parameters to be set before evaluation: ==
# - mode: what spending style to use ("taproot", "witv0", or "legacy").
# - key: the (untweaked) private key to sign with (ECKey object for ECDSA, 32 bytes for Schnorr).
# - tap: the TaprootInfo object (see taproot_construct; needed in mode=="taproot").
# - tx: the transaction to sign.
# - utxos: the UTXOs being spent (needed in mode=="witv0" and mode=="taproot").
# - idx: the input position being signed.
# - scriptcode: the scriptcode to include in legacy and witv0 sighashes.
}
def flatten(lst):
ret = []
for elem in lst:
if isinstance(elem, list):
ret += flatten(elem)
else:
ret.append(elem)
return ret
def spend(tx, idx, utxos, **kwargs):
"""Sign transaction input idx of tx, provided utxos is the list of outputs being spent.
Additional arguments may be provided that override any aspect of the signing process.
See DEFAULT_CONTEXT above for what can be overridden, and what must be provided.
"""
ctx = {**DEFAULT_CONTEXT, "tx":tx, "idx":idx, "utxos":utxos, **kwargs}
def to_script(elem):
"""If fed a CScript, return it; if fed bytes, return a CScript that pushes it."""
if isinstance(elem, CScript):
return elem
else:
return CScript([elem])
scriptsig_list = flatten(get(ctx, "scriptsig"))
scriptsig = CScript(b"".join(bytes(to_script(elem)) for elem in scriptsig_list))
witness_stack = flatten(get(ctx, "witness"))
return (scriptsig, witness_stack)
# === Spender objects ===
#
# Each spender is a tuple of:
# - A scriptPubKey which is to be spent from (CScript)
# - A comment describing the test (string)
# - Whether the spending (on itself) is expected to be standard (bool)
# - A tx-signing lambda returning (scriptsig, witness_stack), taking as inputs:
# - A transaction to sign (CTransaction)
# - An input position (int)
# - The spent UTXOs by this transaction (list of CTxOut)
# - Whether to produce a valid spend (bool)
# - A string with an expected error message for failure case if known
# - The (pre-taproot) sigops weight consumed by a successful spend
# - Whether this spend cannot fail
# - Whether this test demands being placed in a txin with no corresponding txout (for testing SIGHASH_SINGLE behavior)
Spender = namedtuple("Spender", "script,comment,is_standard,sat_function,err_msg,sigops_weight,no_fail,need_vin_vout_mismatch")
def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=False, spk_mutate_pre_p2sh=None, failure=None, standard=True, err_msg=None, sigops_weight=0, need_vin_vout_mismatch=False, **kwargs):
"""Helper for constructing Spender objects using the context signing framework.
* tap: a TaprootInfo object (see taproot_construct), for Taproot spends (cannot be combined with pkh, witv0, or script)
* witv0: boolean indicating the use of witness v0 spending (needs one of script or pkh)
* script: the actual script executed (for bare/P2WSH/P2SH spending)
* pkh: the public key for P2PKH or P2WPKH spending
* p2sh: whether the output is P2SH wrapper (this is supported even for Taproot, where it makes the output unencumbered)
* spk_mutate_pre_psh: a callable to be applied to the script (before potentially P2SH-wrapping it)
* failure: a dict of entries to override in the context when intentionally failing to spend (if None, no_fail will be set)
* standard: whether the (valid version of) spending is expected to be standard
* err_msg: a string with an expected error message for failure (or None, if not cared about)
* sigops_weight: the pre-taproot sigops weight consumed by a successful spend
"""
conf = dict()
# Compute scriptPubKey and set useful defaults based on the inputs.
if witv0:
assert tap is None
conf["mode"] = "witv0"
if pkh is not None:
# P2WPKH
assert script is None
pubkeyhash = hash160(pkh)
spk = CScript([OP_0, pubkeyhash])
conf["scriptcode"] = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG])
conf["script_witv0"] = None
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# P2WSH
spk = CScript([OP_0, sha256(script)])
conf["scriptcode"] = script
conf["script_witv0"] = script
else:
assert False
elif tap is None:
conf["mode"] = "legacy"
if pkh is not None:
# P2PKH
assert script is None
pubkeyhash = hash160(pkh)
spk = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG])
conf["scriptcode"] = spk
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# bare
spk = script
conf["scriptcode"] = script
else:
assert False
else:
assert script is None
conf["mode"] = "taproot"
conf["tap"] = tap
spk = tap.scriptPubKey
if spk_mutate_pre_p2sh is not None:
spk = spk_mutate_pre_p2sh(spk)
if p2sh:
# P2SH wrapper can be combined with anything else
conf["script_p2sh"] = spk
spk = CScript([OP_HASH160, hash160(spk), OP_EQUAL])
conf = {**conf, **kwargs}
def sat_fn(tx, idx, utxos, valid):
if valid:
return spend(tx, idx, utxos, **conf)
else:
assert failure is not None
return spend(tx, idx, utxos, **{**conf, **failure})
return Spender(script=spk, comment=comment, is_standard=standard, sat_function=sat_fn, err_msg=err_msg, sigops_weight=sigops_weight, no_fail=failure is None, need_vin_vout_mismatch=need_vin_vout_mismatch)
def add_spender(spenders, *args, **kwargs):
"""Make a spender using make_spender, and add it to spenders."""
spenders.append(make_spender(*args, **kwargs))
# === Helpers for the test ===
def random_checksig_style(pubkey):
"""Creates a random CHECKSIG* tapscript that would succeed with only the valid signature on witness stack."""
return bytes(CScript([pubkey, OP_CHECKSIG]))
opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD])
if (opcode == OP_CHECKSIGVERIFY):
ret = CScript([pubkey, opcode, OP_1])
elif (opcode == OP_CHECKSIGADD):
num = random.choice([0, 0x7fffffff, -0x7fffffff])
ret = CScript([num, pubkey, opcode, num + 1, OP_EQUAL])
else:
ret = CScript([pubkey, opcode])
return bytes(ret)
def random_bytes(n):
"""Return a random bytes object of length n."""
return bytes(random.getrandbits(8) for i in range(n))
def bitflipper(expr):
"""Return a callable that evaluates expr and returns it with a random bitflip."""
def fn(ctx):
sub = deep_eval(ctx, expr)
assert isinstance(sub, bytes)
return (int.from_bytes(sub, 'little') ^ (1 << random.randrange(len(sub) * 8))).to_bytes(len(sub), 'little')
return fn
def zero_appender(expr):
"""Return a callable that evaluates expr and returns it with a zero added."""
return lambda ctx: deep_eval(ctx, expr) + b"\x00"
def byte_popper(expr):
"""Return a callable that evaluates expr and returns it with its last byte removed."""
return lambda ctx: deep_eval(ctx, expr)[:-1]
# Expected error strings
ERR_SIG_SIZE = {"err_msg": "Invalid Schnorr signature size"}
ERR_SIG_HASHTYPE = {"err_msg": "Invalid Schnorr signature hash type"}
ERR_SIG_SCHNORR = {"err_msg": "Invalid Schnorr signature"}
ERR_OP_RETURN = {"err_msg": "OP_RETURN was encountered"}
ERR_CONTROLBLOCK_SIZE = {"err_msg": "Invalid Taproot control block size"}
ERR_WITNESS_PROGRAM_MISMATCH = {"err_msg": "Witness program hash mismatch"}
ERR_PUSH_LIMIT = {"err_msg": "Push value size limit exceeded"}
ERR_DISABLED_OPCODE = {"err_msg": "Attempted to use a disabled opcode"}
ERR_TAPSCRIPT_CHECKMULTISIG = {"err_msg": "OP_CHECKMULTISIG(VERIFY) is not available in tapscript"}
ERR_MINIMALIF = {"err_msg": "OP_IF/NOTIF argument must be minimal in tapscript"}
ERR_UNKNOWN_PUBKEY = {"err_msg": "Public key is neither compressed or uncompressed"}
ERR_STACK_SIZE = {"err_msg": "Stack size limit exceeded"}
ERR_CLEANSTACK = {"err_msg": "Stack size must be exactly one after execution"}
ERR_STACK_EMPTY = {"err_msg": "Operation not valid with the current stack size"}
ERR_SIGOPS_RATIO = {"err_msg": "Too much signature validation relative to witness weight"}
ERR_UNDECODABLE = {"err_msg": "Opcode missing or not understood"}
ERR_NO_SUCCESS = {"err_msg": "Script evaluated without error but finished with a false/empty top stack element"}
ERR_EMPTY_WITNESS = {"err_msg": "Witness program was passed an empty witness"}
ERR_CHECKSIGVERIFY = {"err_msg": "Script failed an OP_CHECKSIGVERIFY operation"}
VALID_SIGHASHES_ECDSA = [
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_ALL,
SIGHASH_ANYONECANPAY + SIGHASH_NONE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT = [SIGHASH_DEFAULT] + VALID_SIGHASHES_ECDSA
VALID_SIGHASHES_TAPROOT_SINGLE = [
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT_NO_SINGLE = [h for h in VALID_SIGHASHES_TAPROOT if h not in VALID_SIGHASHES_TAPROOT_SINGLE]
SIGHASH_BITFLIP = {"failure": {"sighash": bitflipper(default_sighash)}}
SIG_POP_BYTE = {"failure": {"sign": byte_popper(default_sign)}}
SINGLE_SIG = {"inputs": [getter("sign")]}
SIG_ADD_ZERO = {"failure": {"sign": zero_appender(default_sign)}}
DUST_LIMIT = 600
MIN_FEE = 50000
# === Actual test cases ===
def spenders_taproot_active():
"""Return a list of Spenders for testing post-Taproot activation behavior."""
secs = [generate_privkey() for _ in range(8)]
pubs = [compute_xonly_pubkey(sec)[0] for sec in secs]
spenders = []
# == Tests for BIP340 signature validation. ==
# These are primarily tested through the test vectors implemented in libsecp256k1, and in src/tests/key_tests.cpp.
# Some things are tested programmatically as well here.
tap = taproot_construct(pubs[0])
# Test with key with bit flipped.
add_spender(spenders, "sig/key", tap=tap, key=secs[0], failure={"key_tweaked": bitflipper(default_key_tweaked)}, **ERR_SIG_SCHNORR)
# Test with sighash with bit flipped.
add_spender(spenders, "sig/sighash", tap=tap, key=secs[0], failure={"sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# Test with invalid R sign.
add_spender(spenders, "sig/flip_r", tap=tap, key=secs[0], failure={"flag_flip_r": True}, **ERR_SIG_SCHNORR)
# Test with invalid P sign.
add_spender(spenders, "sig/flip_p", tap=tap, key=secs[0], failure={"flag_flip_p": True}, **ERR_SIG_SCHNORR)
# Test with signature with bit flipped.
add_spender(spenders, "sig/bitflip", tap=tap, key=secs[0], failure={"signature": bitflipper(default_signature)}, **ERR_SIG_SCHNORR)
# == Tests for signature hashing ==
# Run all tests once with no annex, and once with a valid random annex.
for annex in [None, lambda _: bytes([ANNEX_TAG]) + random_bytes(random.randrange(0, 250))]:
# Non-empty annex is non-standard
no_annex = annex is None
# Sighash mutation tests (test all sighash combinations)
for hashtype in VALID_SIGHASHES_TAPROOT:
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
# Pure pubkey
tap = taproot_construct(pubs[0])
add_spender(spenders, "sighash/purepk", tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Pubkey/P2PK script combination
scripts = [("s0", CScript(random_checksig_style(pubs[1])))]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/keypath_hashtype_%x" % hashtype, tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath_hashtype_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Test SIGHASH_SINGLE behavior in combination with mismatching outputs
if hashtype in VALID_SIGHASHES_TAPROOT_SINGLE:
add_spender(spenders, "sighash/keypath_hashtype_mis_%x" % hashtype, tap=tap, key=secs[0], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
add_spender(spenders, "sighash/scriptpath_hashtype_mis_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), **SINGLE_SIG, failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
# Test OP_CODESEPARATOR impact on sighashing.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
scripts = [
("pk_codesep", CScript(random_checksig_style(pubs[1]) + bytes([OP_CODESEPARATOR]))), # codesep after checksig
("codesep_pk", CScript(bytes([OP_CODESEPARATOR]) + random_checksig_style(pubs[1]))), # codesep before checksig
("branched_codesep", CScript([random_bytes(random.randrange(511)), OP_DROP, OP_IF, OP_CODESEPARATOR, pubs[0], OP_ELSE, OP_CODESEPARATOR, pubs[1], OP_ENDIF, OP_CHECKSIG])), # branch dependent codesep
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/pk_codesep", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/codesep_pk", tap=tap, leaf="codesep_pk", key=secs[1], codeseppos=0, **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/left", tap=tap, leaf="branched_codesep", key=secs[0], codeseppos=3, **common, inputs=[getter("sign"), b'\x01'], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/right", tap=tap, leaf="branched_codesep", key=secs[1], codeseppos=6, **common, inputs=[getter("sign"), b''], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Reusing the scripts above, test that various features affect the sighash.
add_spender(spenders, "sighash/annex", tap=tap, leaf="pk_codesep", key=secs[1], hashtype=hashtype, standard=False, **SINGLE_SIG, annex=bytes([ANNEX_TAG]), failure={"sighash": override(default_sighash, annex=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/script", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, script_taproot=tap.leaves["codesep_pk"].script)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/leafver", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leafversion=random.choice([x & 0xFE for x in range(0x100) if x & 0xFE != 0xC0]))}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leaf=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/keypath", tap=tap, key=secs[0], **common, failure={"sighash": override(default_sighash, leaf="pk_codesep")}, **ERR_SIG_SCHNORR)
# Test that invalid hashtypes don't work, both in key path and script path spends
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for invalid_hashtype in [x for x in range(0x100) if x not in VALID_SIGHASHES_TAPROOT]:
add_spender(spenders, "sighash/keypath_unk_hashtype_%x" % invalid_hashtype, tap=tap, key=secs[0], hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/scriptpath_unk_hashtype_%x" % invalid_hashtype, tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
# Test that hashtype 0 cannot have a hashtype byte, and 1 must have one.
add_spender(spenders, "sighash/hashtype0_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype0_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype1_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test that hashtype 0 and hashtype 1 cannot be transmuted into each other.
add_spender(spenders, "sighash/hashtype0to1_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype0to1_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test aspects of signatures with unusual lengths
for hashtype in [SIGHASH_DEFAULT, random.choice(VALID_SIGHASHES_TAPROOT)]:
scripts = [
("csv", CScript([pubs[2], OP_CHECKSIGVERIFY, OP_1])),
("cs_pos", CScript([pubs[2], OP_CHECKSIG])),
("csa_pos", CScript([OP_0, pubs[2], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
("cs_neg", CScript([pubs[2], OP_CHECKSIG, OP_NOT])),
("csa_neg", CScript([OP_2, pubs[2], OP_CHECKSIGADD, OP_2, OP_EQUAL]))
]
random.shuffle(scripts)
tap = taproot_construct(pubs[3], scripts)
# Empty signatures
add_spender(spenders, "siglen/empty_keypath", tap=tap, key=secs[3], hashtype=hashtype, failure={"sign": b""}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_CHECKSIGVERIFY)
add_spender(spenders, "siglen/empty_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(1, 63))}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(66, 100))}, **ERR_SIG_SIZE)
# Appending a zero byte to signatures invalidates them
add_spender(spenders, "siglen/padzero_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
# Removing the last byte from signatures invalidates them
add_spender(spenders, "siglen/popbyte_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
# Verify that an invalid signature is not allowed, not even when the CHECKSIG* is expected to fail.
add_spender(spenders, "siglen/invalid_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "siglen/invalid_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# == Test that BIP341 spending only applies to witness version 1, program length 32, no P2SH ==
for p2sh in [False, True]:
for witver in range(1, 17):
for witlen in [20, 31, 32, 33]:
def mutate(spk):
prog = spk[2:]
assert len(prog) == 32
if witlen < 32:
prog = prog[0:witlen]
elif witlen > 32:
prog += bytes([0 for _ in range(witlen - 32)])
return CScript([CScriptOp.encode_op_n(witver), prog])
scripts = [("s0", CScript([pubs[0], OP_CHECKSIG])), ("dummy", CScript([OP_RETURN]))]
tap = taproot_construct(pubs[1], scripts)
if not p2sh and witver == 1 and witlen == 32:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, failure={"leaf": "dummy"}, **ERR_OP_RETURN)
else:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], standard=False)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, standard=False)
# == Test various aspects of BIP341 spending paths ==
# A set of functions that compute the hashing partner in a Merkle tree, designed to exercise
# edge cases. This relies on the taproot_construct feature that a lambda can be passed in
# instead of a subtree, to compute the partner to be hashed with.
PARTNER_MERKLE_FN = [
# Combine with itself
lambda h: h,
# Combine with hash 0
lambda h: bytes([0 for _ in range(32)]),
# Combine with hash 2^256-1
lambda h: bytes([0xff for _ in range(32)]),
# Combine with itself-1 (BE)
lambda h: (int.from_bytes(h, 'big') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (BE)
lambda h: (int.from_bytes(h, 'big') + 1).to_bytes(32, 'big'),
# Combine with itself-1 (LE)
lambda h: (int.from_bytes(h, 'little') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (LE)
lambda h: (int.from_bytes(h, 'little') + 1).to_bytes(32, 'little'),
# Combine with random bitflipped version of self.
lambda h: (int.from_bytes(h, 'little') ^ (1 << random.randrange(256))).to_bytes(32, 'little')
]
# Start with a tree of that has depth 1 for "128deep" and depth 2 for "129deep".
scripts = [("128deep", CScript([pubs[0], OP_CHECKSIG])), [("129deep", CScript([pubs[0], OP_CHECKSIG])), random.choice(PARTNER_MERKLE_FN)]]
# Add 127 nodes on top of that tree, so that "128deep" and "129deep" end up at their designated depths.
for _ in range(127):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
# Test that spends with a depth of 128 work, but 129 doesn't (even with a tree with weird Merkle branches in it).
add_spender(spenders, "spendpath/merklelimit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"leaf": "129deep"}, **ERR_CONTROLBLOCK_SIZE)
# Test that flipping the negation bit invalidates spends.
add_spender(spenders, "spendpath/negflag", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"negflag": lambda ctx: 1 - default_negflag(ctx)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the Merkle branch invalidate it.
add_spender(spenders, "spendpath/bitflipmerkle", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"merklebranch": bitflipper(default_merklebranch)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the inner pubkey invalidate it.
add_spender(spenders, "spendpath/bitflippubkey", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"pubkey_inner": bitflipper(default_pubkey_inner)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that empty witnesses are invalid.
add_spender(spenders, "spendpath/emptywit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"witness": []}, **ERR_EMPTY_WITNESS)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padlongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/trunclongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
scripts = [("s", CScript([pubs[0], OP_CHECKSIG]))]
tap = taproot_construct(pubs[1], scripts)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/truncshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block to 1 byte ("-1 Merkle length") invalidates it
add_spender(spenders, "spendpath/trunc1shortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:1]}, **ERR_CONTROLBLOCK_SIZE)
# == Test BIP342 edge cases ==
csa_low_val = random.randrange(0, 17) # Within range for OP_n
csa_low_result = csa_low_val + 1
csa_high_val = random.randrange(17, 100) if random.getrandbits(1) else random.randrange(-100, -1) # Outside OP_n range
csa_high_result = csa_high_val + 1
OVERSIZE_NUMBER = 2**31
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER))), 6)
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER-1))), 5)
big_choices = []
big_scriptops = []
for i in range(1000):
r = random.randrange(len(pubs))
big_choices.append(r)
big_scriptops += [pubs[r], OP_CHECKSIGVERIFY]
def big_spend_inputs(ctx):
"""Helper function to construct the script input for t33/t34 below."""
# Instead of signing 999 times, precompute signatures for every (key, hashtype) combination
sigs = {}
for ht in VALID_SIGHASHES_TAPROOT:
for k in range(len(pubs)):
sigs[(k, ht)] = override(default_sign, hashtype=ht, key=secs[k])(ctx)
num = get(ctx, "num")
return [sigs[(big_choices[i], random.choice(VALID_SIGHASHES_TAPROOT))] for i in range(num - 1, -1, -1)]
# Various BIP342 features
scripts = [
# 0) drop stack element and OP_CHECKSIG
("t0", CScript([OP_DROP, pubs[1], OP_CHECKSIG])),
# 1) normal OP_CHECKSIG
("t1", CScript([pubs[1], OP_CHECKSIG])),
# 2) normal OP_CHECKSIGVERIFY
("t2", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 3) Hypothetical OP_CHECKMULTISIG script that takes a single sig as input
("t3", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIG])),
# 4) Hypothetical OP_CHECKMULTISIGVERIFY script that takes a single sig as input
("t4", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIGVERIFY, OP_1])),
# 5) OP_IF script that needs a true input
("t5", CScript([OP_IF, pubs[1], OP_CHECKSIG, OP_ELSE, OP_RETURN, OP_ENDIF])),
# 6) OP_NOTIF script that needs a true input
("t6", CScript([OP_NOTIF, OP_RETURN, OP_ELSE, pubs[1], OP_CHECKSIG, OP_ENDIF])),
# 7) OP_CHECKSIG with an empty key
("t7", CScript([OP_0, OP_CHECKSIG])),
# 8) OP_CHECKSIGVERIFY with an empty key
("t8", CScript([OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 9) normal OP_CHECKSIGADD that also ensures return value is correct
("t9", CScript([csa_low_val, pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 10) OP_CHECKSIGADD with empty key
("t10", CScript([csa_low_val, OP_0, OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 11) OP_CHECKSIGADD with missing counter stack element
("t11", CScript([pubs[1], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
# 12) OP_CHECKSIG that needs invalid signature
("t12", CScript([pubs[1], OP_CHECKSIGVERIFY, pubs[0], OP_CHECKSIG, OP_NOT])),
# 13) OP_CHECKSIG with empty key that needs invalid signature
("t13", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_CHECKSIG, OP_NOT])),
# 14) OP_CHECKSIGADD that needs invalid signature
("t14", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, pubs[0], OP_CHECKSIGADD, OP_NOT])),
# 15) OP_CHECKSIGADD with empty key that needs invalid signature
("t15", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGADD, OP_NOT])),
# 16) OP_CHECKSIG with unknown pubkey type
("t16", CScript([OP_1, OP_CHECKSIG])),
# 17) OP_CHECKSIGADD with unknown pubkey type
("t17", CScript([OP_0, OP_1, OP_CHECKSIGADD])),
# 18) OP_CHECKSIGVERIFY with unknown pubkey type
("t18", CScript([OP_1, OP_CHECKSIGVERIFY, OP_1])),
# 19) script longer than 10000 bytes and over 201 non-push opcodes
("t19", CScript([OP_0, OP_0, OP_2DROP] * 10001 + [pubs[1], OP_CHECKSIG])),
# 20) OP_CHECKSIGVERIFY with empty key
("t20", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 21) Script that grows the stack to 1000 elements
("t21", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 999 + [OP_DROP] * 999)),
# 22) Script that grows the stack to 1001 elements
("t22", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 1000 + [OP_DROP] * 1000)),
# 23) Script that expects an input stack of 1000 elements
("t23", CScript([OP_DROP] * 999 + [pubs[1], OP_CHECKSIG])),
# 24) Script that expects an input stack of 1001 elements
("t24", CScript([OP_DROP] * 1000 + [pubs[1], OP_CHECKSIG])),
# 25) Script that pushes a MAX_SCRIPT_ELEMENT_SIZE-bytes element
("t25", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE), OP_DROP, pubs[1], OP_CHECKSIG])),
# 26) Script that pushes a (MAX_SCRIPT_ELEMENT_SIZE+1)-bytes element
("t26", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, pubs[1], OP_CHECKSIG])),
# 27) CHECKSIGADD that must fail because numeric argument number is >4 bytes
("t27", CScript([CScriptNum(OVERSIZE_NUMBER), pubs[1], OP_CHECKSIGADD])),
# 28) Pushes random CScriptNum value, checks OP_CHECKSIGADD result
("t28", CScript([csa_high_val, pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 29) CHECKSIGADD that succeeds with proper sig because numeric argument number is <=4 bytes
("t29", CScript([CScriptNum(OVERSIZE_NUMBER-1), pubs[1], OP_CHECKSIGADD])),
# 30) Variant of t1 with "normal" 33-byte pubkey
("t30", CScript([b'\x03' + pubs[1], OP_CHECKSIG])),
# 31) Variant of t2 with "normal" 33-byte pubkey
("t31", CScript([b'\x02' + pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 32) Variant of t28 with "normal" 33-byte pubkey
("t32", CScript([csa_high_val, b'\x03' + pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 33) 999-of-999 multisig
("t33", CScript(big_scriptops[:1998] + [OP_1])),
# 34) 1000-of-1000 multisig
("t34", CScript(big_scriptops[:2000] + [OP_1])),
# 35) Variant of t9 that uses a non-minimally encoded input arg
("t35", CScript([bytes([csa_low_val]), pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 36) Empty script
("t36", CScript([])),
]
# Add many dummies to test huge trees
for j in range(100000):
scripts.append((None, CScript([OP_RETURN, random.randrange(100000)])))
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
common = {
"hashtype": hashtype,
"key": secs[1],
"tap": tap,
}
# Test that MAX_SCRIPT_ELEMENT_SIZE byte stack element inputs are valid, but not one more (and 80 bytes is standard but 81 is not).
add_spender(spenders, "tapscript/inputmaxlimit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE)], failure={"inputs": [getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1)]}, **ERR_PUSH_LIMIT)
add_spender(spenders, "tapscript/input80limit", leaf="t0", **common, inputs=[getter("sign"), random_bytes(80)])
add_spender(spenders, "tapscript/input81limit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(81)])
# Test that OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY cause failure, but OP_CHECKSIG and OP_CHECKSIGVERIFY work.
add_spender(spenders, "tapscript/disabled_checkmultisig", leaf="t1", **common, **SINGLE_SIG, failure={"leaf": "t3"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
add_spender(spenders, "tapscript/disabled_checkmultisigverify", leaf="t2", **common, **SINGLE_SIG, failure={"leaf": "t4"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
# Test that OP_IF and OP_NOTIF do not accept non-0x01 as truth value (the MINIMALIF rule is consensus in Tapscript)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x02']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x03']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0001']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0100']}, **ERR_MINIMALIF)
# Test that 1-byte public keys (which are unknown) are acceptable but nonstandard with unrelated signatures, but 0-byte public keys are not valid.
add_spender(spenders, "tapscript/unkpk/checksig", leaf="t16", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigadd", leaf="t17", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigverify", leaf="t18", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
# Test that 33-byte public keys (which are unknown) are acceptable but nonstandard with valid signatures, but normal pubkeys are not valid in that case.
add_spender(spenders, "tapscript/oldpk/checksig", leaf="t30", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t1"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigadd", leaf="t31", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t2"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigverify", leaf="t32", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t28"}, **ERR_SIG_SCHNORR)
# Test that 0-byte public keys are not acceptable.
add_spender(spenders, "tapscript/emptypk/checksig", leaf="t1", **SINGLE_SIG, **common, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigverify", leaf="t2", **SINGLE_SIG, **common, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t35", standard=False, **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
# Test that OP_CHECKSIGADD results are as expected
add_spender(spenders, "tapscript/checksigaddresults", leaf="t28", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
add_spender(spenders, "tapscript/checksigaddoversize", leaf="t29", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
# Test that OP_CHECKSIGADD requires 3 stack elements.
add_spender(spenders, "tapscript/checksigadd3args", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t11"}, **ERR_STACK_EMPTY)
# Test that empty signatures do not cause script failure in OP_CHECKSIG and OP_CHECKSIGADD (but do fail with empty pubkey, and do fail OP_CHECKSIGVERIFY)
add_spender(spenders, "tapscript/emptysigs/checksig", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t13"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/nochecksigverify", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t20"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/checksigadd", leaf="t14", **common, inputs=[b'', getter("sign")], failure={"leaf": "t15"}, **ERR_UNKNOWN_PUBKEY)
# Test that scripts over 10000 bytes (and over 201 non-push ops) are acceptable.
add_spender(spenders, "tapscript/no10000limit", leaf="t19", **SINGLE_SIG, **common)
# Test that a stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000stack", leaf="t21", **SINGLE_SIG, **common, failure={"leaf": "t22"}, **ERR_STACK_SIZE)
# Test that an input stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000inputs", leaf="t23", **common, inputs=[getter("sign")] + [b'' for _ in range(999)], failure={"leaf": "t24", "inputs": [getter("sign")] + [b'' for _ in range(1000)]}, **ERR_STACK_SIZE)
# Test that pushing a MAX_SCRIPT_ELEMENT_SIZE byte stack element is valid, but one longer is not.
add_spender(spenders, "tapscript/pushmaxlimit", leaf="t25", **common, **SINGLE_SIG, failure={"leaf": "t26"}, **ERR_PUSH_LIMIT)
# Test that 999-of-999 multisig works (but 1000-of-1000 triggers stack size limits)
add_spender(spenders, "tapscript/bigmulti", leaf="t33", **common, inputs=big_spend_inputs, num=999, failure={"leaf": "t34", "num": 1000}, **ERR_STACK_SIZE)
# Test that the CLEANSTACK rule is consensus critical in tapscript
add_spender(spenders, "tapscript/cleanstack", leaf="t36", tap=tap, inputs=[b'\x01'], failure={"inputs": [b'\x01', b'\x01']}, **ERR_CLEANSTACK)
# == Test for sigops ratio limit ==
# Given a number n, and a public key pk, functions that produce a (CScript, sigops). Each script takes as
# input a valid signature with the passed pk followed by a dummy push of bytes that are to be dropped, and
# will execute sigops signature checks.
SIGOPS_RATIO_SCRIPTS = [
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIG.
lambda n, pk: (CScript([OP_DROP, pk] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGVERIFY.
lambda n, pk: (CScript([OP_DROP, pk, OP_0, OP_IF, OP_2DUP, OP_CHECKSIGVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_2, OP_SWAP, OP_CHECKSIGADD, OP_3, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIG.
lambda n, pk: (CScript([random_bytes(220), OP_2DROP, pk, OP_1, OP_NOTIF, OP_2DUP, OP_CHECKSIG, OP_VERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_4, OP_SWAP, OP_CHECKSIGADD, OP_5, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGADD.
lambda n, pk: (CScript([OP_DROP, pk, OP_1, OP_IF, OP_ELSE, OP_2DUP, OP_6, OP_SWAP, OP_CHECKSIGADD, OP_7, OP_EQUALVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_8, OP_SWAP, OP_CHECKSIGADD, OP_9, OP_EQUAL]), n + 1),
# n+1 OP_CHECKSIGs, but also one OP_CHECKSIG with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, pk, OP_CHECKSIG, OP_NOT, OP_VERIFY, pk] + [OP_2DUP, OP_CHECKSIG, OP_VERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGADDs and 1 OP_CHECKSIG, but also an OP_CHECKSIGADD with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, OP_10, pk, OP_CHECKSIGADD, OP_10, OP_EQUALVERIFY, pk] + [OP_2DUP, OP_16, OP_SWAP, OP_CHECKSIGADD, b'\x11', OP_EQUALVERIFY] * n + [OP_CHECKSIG]), n + 1),
]
for annex in [None, bytes([ANNEX_TAG]) + random_bytes(random.randrange(1000))]:
for hashtype in [SIGHASH_DEFAULT, SIGHASH_ALL]:
for pubkey in [pubs[1], random_bytes(random.choice([x for x in range(2, 81) if x != 32]))]:
for fn_num, fn in enumerate(SIGOPS_RATIO_SCRIPTS):
merkledepth = random.randrange(129)
def predict_sigops_ratio(n, dummy_size):
"""Predict whether spending fn(n, pubkey) with dummy_size will pass the ratio test."""
script, sigops = fn(n, pubkey)
# Predict the size of the witness for a given choice of n
stacklen_size = 1
sig_size = 64 + (hashtype != SIGHASH_DEFAULT)
siglen_size = 1
dummylen_size = 1 + 2 * (dummy_size >= 253)
script_size = len(script)
scriptlen_size = 1 + 2 * (script_size >= 253)
control_size = 33 + 32 * merkledepth
controllen_size = 1 + 2 * (control_size >= 253)
annex_size = 0 if annex is None else len(annex)
annexlen_size = 0 if annex is None else 1 + 2 * (annex_size >= 253)
witsize = stacklen_size + sig_size + siglen_size + dummy_size + dummylen_size + script_size + scriptlen_size + control_size + controllen_size + annex_size + annexlen_size
# sigops ratio test
return witsize + 50 >= 50 * sigops
# Make sure n is high enough that with empty dummy, the script is not valid
n = 0
while predict_sigops_ratio(n, 0):
n += 1
# But allow picking a bit higher still
n += random.randrange(5)
# Now pick dummy size *just* large enough that the overall construction passes
dummylen = 0
while not predict_sigops_ratio(n, dummylen):
dummylen += 1
scripts = [("s", fn(n, pubkey)[0])]
for _ in range(merkledepth):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
standard = annex is None and dummylen <= 80 and len(pubkey) == 32
add_spender(spenders, "tapscript/sigopsratio_%i" % fn_num, tap=tap, leaf="s", annex=annex, hashtype=hashtype, key=secs[1], inputs=[getter("sign"), random_bytes(dummylen)], standard=standard, failure={"inputs": [getter("sign"), random_bytes(dummylen - 1)]}, **ERR_SIGOPS_RATIO)
# Future leaf versions
for leafver in range(0, 0x100, 2):
if leafver == LEAF_VERSION_TAPSCRIPT or leafver == ANNEX_TAG:
# Skip the defined LEAF_VERSION_TAPSCRIPT, and the ANNEX_TAG which is not usable as leaf version
continue
scripts = [
("bare_c0", CScript([OP_NOP])),
("bare_unkver", CScript([OP_NOP]), leafver),
("return_c0", CScript([OP_RETURN])),
("return_unkver", CScript([OP_RETURN]), leafver),
("undecodable_c0", CScript([OP_PUSHDATA1])),
("undecodable_unkver", CScript([OP_PUSHDATA1]), leafver),
("bigpush_c0", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP])),
("bigpush_unkver", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP]), leafver),
("1001push_c0", CScript([OP_0] * 1001)),
("1001push_unkver", CScript([OP_0] * 1001), leafver),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "unkver/bare", standard=False, tap=tap, leaf="bare_unkver", failure={"leaf": "bare_c0"}, **ERR_CLEANSTACK)
add_spender(spenders, "unkver/return", standard=False, tap=tap, leaf="return_unkver", failure={"leaf": "return_c0"}, **ERR_OP_RETURN)
add_spender(spenders, "unkver/undecodable", standard=False, tap=tap, leaf="undecodable_unkver", failure={"leaf": "undecodable_c0"}, **ERR_UNDECODABLE)
add_spender(spenders, "unkver/bigpush", standard=False, tap=tap, leaf="bigpush_unkver", failure={"leaf": "bigpush_c0"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "unkver/1001push", standard=False, tap=tap, leaf="1001push_unkver", failure={"leaf": "1001push_c0"}, **ERR_STACK_SIZE)
add_spender(spenders, "unkver/1001inputs", standard=False, tap=tap, leaf="bare_unkver", inputs=[b'']*1001, failure={"leaf": "bare_c0"}, **ERR_STACK_SIZE)
# OP_SUCCESSx tests.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for opval in range(76, 0x100):
opcode = CScriptOp(opval)
if not is_op_success(opcode):
continue
scripts = [
("bare_success", CScript([opcode])),
("bare_nop", CScript([OP_NOP])),
("unexecif_success", CScript([OP_0, OP_IF, opcode, OP_ENDIF])),
("unexecif_nop", CScript([OP_0, OP_IF, OP_NOP, OP_ENDIF])),
("return_success", CScript([OP_RETURN, opcode])),
("return_nop", CScript([OP_RETURN, OP_NOP])),
("undecodable_success", CScript([opcode, OP_PUSHDATA1])),
("undecodable_nop", CScript([OP_NOP, OP_PUSHDATA1])),
("undecodable_bypassed_success", CScript([OP_PUSHDATA1, OP_2, opcode])),
("bigpush_success", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, opcode])),
("bigpush_nop", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, OP_NOP])),
("1001push_success", CScript([OP_0] * 1001 + [opcode])),
("1001push_nop", CScript([OP_0] * 1001 + [OP_NOP])),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "opsuccess/bare", standard=False, tap=tap, leaf="bare_success", failure={"leaf": "bare_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/unexecif", standard=False, tap=tap, leaf="unexecif_success", failure={"leaf": "unexecif_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/return", standard=False, tap=tap, leaf="return_success", failure={"leaf": "return_nop"}, **ERR_OP_RETURN)
add_spender(spenders, "opsuccess/undecodable", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_nop"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/undecodable_bypass", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_bypassed_success"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/bigpush", standard=False, tap=tap, leaf="bigpush_success", failure={"leaf": "bigpush_nop"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "opsuccess/1001push", standard=False, tap=tap, leaf="1001push_success", failure={"leaf": "1001push_nop"}, **ERR_STACK_SIZE)
add_spender(spenders, "opsuccess/1001inputs", standard=False, tap=tap, leaf="bare_success", inputs=[b'']*1001, failure={"leaf": "bare_nop"}, **ERR_STACK_SIZE)
# Non-OP_SUCCESSx (verify that those aren't accidentally treated as OP_SUCCESSx)
for opval in range(0, 0x100):
opcode = CScriptOp(opval)
if is_op_success(opcode):
continue
scripts = [
("normal", CScript([OP_RETURN, opcode] + [OP_NOP] * 75)),
("op_success", CScript([OP_RETURN, CScriptOp(0x50)]))
]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "alwaysvalid/notsuccessx", tap=tap, leaf="op_success", inputs=[], standard=False, failure={"leaf": "normal"}) # err_msg differs based on opcode
# == Legacy tests ==
# Also add a few legacy spends into the mix, so that transactions which combine taproot and pre-taproot spends get tested too.
for compressed in [False, True]:
eckey1 = ECKey()
eckey1.set(generate_privkey(), compressed)
pubkey1 = eckey1.get_pubkey().get_bytes()
eckey2 = ECKey()
eckey2.set(generate_privkey(), compressed)
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = (hashtype in VALID_SIGHASHES_ECDSA) and (compressed or not witv0)
add_spender(spenders, "legacy/pk-wrongkey", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([pubkey1, OP_CHECKSIG]), **SINGLE_SIG, key=eckey1, failure={"key": eckey2}, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
add_spender(spenders, "legacy/pkh-sighashflip", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, pkh=pubkey1, key=eckey1, **SIGHASH_BITFLIP, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
# Verify that OP_CHECKSIGADD wasn't accidentally added to pre-taproot validation logic.
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0)
add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE)
return spenders
def spenders_taproot_inactive():
"""Spenders for testing that pre-activation Taproot rules don't apply."""
spenders = []
sec = generate_privkey()
pub, _ = compute_xonly_pubkey(sec)
scripts = [
("pk", CScript([pub, OP_CHECKSIG])),
("future_leaf", CScript([pub, OP_CHECKSIG]), 0xc2),
("op_success", CScript([pub, OP_CHECKSIG, OP_0, OP_IF, CScriptOp(0x50), OP_ENDIF])),
]
tap = taproot_construct(pub, scripts)
# Test that keypath spending is valid & non-standard, regardless of validity.
add_spender(spenders, "inactive/keypath_valid", key=sec, tap=tap, standard=False)
add_spender(spenders, "inactive/keypath_invalidsig", key=sec, tap=tap, standard=False, sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/keypath_empty", key=sec, tap=tap, standard=False, witness=[])
# Same for scriptpath spending (and features like annex, leaf versions, or OP_SUCCESS don't change this)
add_spender(spenders, "inactive/scriptpath_valid", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalidsig", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_invalidcb", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], controlblock=bitflipper(default_controlblock))
add_spender(spenders, "inactive/scriptpath_valid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
return spenders
# Consensus validation flags to use in dumps for tests with "legacy/" or "inactive/" prefix.
LEGACY_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY"
# Consensus validation flags to use in dumps for all other tests.
TAPROOT_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY,TAPROOT"
def dump_json_test(tx, input_utxos, idx, success, failure):
spender = input_utxos[idx].spender
# Determine flags to dump
flags = LEGACY_FLAGS if spender.comment.startswith("legacy/") or spender.comment.startswith("inactive/") else TAPROOT_FLAGS
fields = [
("tx", tx.serialize().hex()),
("prevouts", [x.output.serialize().hex() for x in input_utxos]),
("index", idx),
("flags", flags),
("comment", spender.comment)
]
# The "final" field indicates that a spend should be always valid, even with more validation flags enabled
# than the listed ones. Use standardness as a proxy for this (which gives a conservative underestimate).
if spender.is_standard:
fields.append(("final", True))
def dump_witness(wit):
return OrderedDict([("scriptSig", wit[0].hex()), ("witness", [x.hex() for x in wit[1]])])
if success is not None:
fields.append(("success", dump_witness(success)))
if failure is not None:
fields.append(("failure", dump_witness(failure)))
# Write the dump to $TEST_DUMP_DIR/x/xyz... where x,y,z,... are the SHA1 sum of the dump (which makes the
# file naming scheme compatible with fuzzing infrastructure).
dump = json.dumps(OrderedDict(fields)) + ",\n"
sha1 = hashlib.sha1(dump.encode("utf-8")).hexdigest()
dirname = os.environ.get("TEST_DUMP_DIR", ".") + ("/%s" % sha1[0])
os.makedirs(dirname, exist_ok=True)
with open(dirname + ("/%s" % sha1), 'w', encoding="utf8") as f:
f.write(dump)
# Data type to keep track of UTXOs, where they were created, and how to spend them.
UTXOData = namedtuple('UTXOData', 'outpoint,output,spender')
class TaprootTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_argument("--dumptests", dest="dump_tests", default=False, action="store_true",
help="Dump generated test cases to directory set by TEST_DUMP_DIR environment variable")
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
["-par=1", "-vbparams=taproot:@-2:@-2"], # Node 0 has Taproot never active
["-par=1"] # Node 1 has taproot always active
]
def block_submit(self, node, txs, msg, err_msg, cb_pubkey=None, fees=0, sigops_weight=0, witness=False, accept=False):
# Deplete block of any non-tapscript sigops using a single additional 0-value coinbase output.
# It is not impossible to fit enough tapscript sigops to hit the old 80k limit without
# busting txin-level limits. We simply have to account for the p2pk outputs in all
# transactions.
extra_output_script = CScript([OP_CHECKSIG]*((MAX_BLOCK_SIGOPS_WEIGHT - sigops_weight) // WITNESS_SCALE_FACTOR))
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1, pubkey=cb_pubkey, extra_output_script=extra_output_script, fees=fees), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
block_response = node.submitblock(block.serialize().hex())
if err_msg is not None:
assert block_response is not None and err_msg in block_response, "Missing error message '%s' from block response '%s': %s" % (err_msg, "(None)" if block_response is None else block_response, msg)
if (accept):
assert node.getbestblockhash() == block.hash, "Failed to accept: %s (response: %s)" % (msg, block_response)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert node.getbestblockhash() == self.lastblockhash, "Failed to reject: " + msg
def test_spenders(self, node, spenders, input_counts):
"""Run randomized tests with a number of "spenders".
Steps:
1) Generate an appropriate UTXO for each spender to test spend conditions
2) Generate 100 random addresses of all wallet types: pkh/sh_wpkh/wpkh
3) Select random number of inputs from (1)
4) Select random number of addresses from (2) as outputs
Each spender embodies a test; in a large randomized test, it is verified
that toggling the valid argument to each lambda toggles the validity of
the transaction. This is accomplished by constructing transactions consisting
of all valid inputs, except one invalid one.
"""
# Construct a bunch of sPKs that send coins back to the host wallet
self.log.info("- Constructing addresses for returning coins")
host_spks = []
host_pubkeys = []
for i in range(16):
addr = node.getnewaddress(address_type=random.choice(["legacy", "p2sh-segwit", "bech32"]))
info = node.getaddressinfo(addr)
spk = bytes.fromhex(info['scriptPubKey'])
host_spks.append(spk)
host_pubkeys.append(bytes.fromhex(info['pubkey']))
# Initialize variables used by block_submit().
self.lastblockhash = node.getbestblockhash()
self.tip = int(self.lastblockhash, 16)
block = node.getblock(self.lastblockhash)
self.lastblockheight = block['height']
self.lastblocktime = block['time']
# Create transactions spending up to 50 of the wallet's inputs, with one output for each spender, and
# one change output at the end. The transaction is constructed on the Python side to enable
# having multiple outputs to the same address and outputs with no assigned address. The wallet
# is then asked to sign it through signrawtransactionwithwallet, and then added to a block on the
# Python side (to bypass standardness rules).
self.log.info("- Creating test UTXOs...")
random.shuffle(spenders)
normal_utxos = []
mismatching_utxos = [] # UTXOs with input that requires mismatching output position
done = 0
while done < len(spenders):
# Compute how many UTXOs to create with this transaction
count_this_tx = min(len(spenders) - done, (len(spenders) + 4) // 5, 10000)
fund_tx = CTransaction()
# Add the 50 highest-value inputs
unspents = node.listunspent()
random.shuffle(unspents)
unspents.sort(key=lambda x: int(x["amount"] * 100000000), reverse=True)
if len(unspents) > 50:
unspents = unspents[:50]
random.shuffle(unspents)
balance = 0
for unspent in unspents:
balance += int(unspent["amount"] * 100000000)
txid = int(unspent["txid"], 16)
fund_tx.vin.append(CTxIn(COutPoint(txid, int(unspent["vout"])), CScript()))
# Add outputs
cur_progress = done / len(spenders)
next_progress = (done + count_this_tx) / len(spenders)
change_goal = (1.0 - 0.6 * next_progress) / (1.0 - 0.6 * cur_progress) * balance
self.log.debug("Create %i UTXOs in a transaction spending %i inputs worth %.8f (sending ~%.8f to change)" % (count_this_tx, len(unspents), balance * 0.00000001, change_goal * 0.00000001))
for i in range(count_this_tx):
avg = (balance - change_goal) / (count_this_tx - i)
amount = int(random.randrange(int(avg*0.85 + 0.5), int(avg*1.15 + 0.5)) + 0.5)
balance -= amount
fund_tx.vout.append(CTxOut(amount, spenders[done + i].script))
# Add change
fund_tx.vout.append(CTxOut(balance - 10000, random.choice(host_spks)))
# Ask the wallet to sign
ss = BytesIO(bytes.fromhex(node.signrawtransactionwithwallet(ToHex(fund_tx))["hex"]))
fund_tx.deserialize(ss)
# Construct UTXOData entries
fund_tx.rehash()
for i in range(count_this_tx):
utxodata = UTXOData(outpoint=COutPoint(fund_tx.sha256, i), output=fund_tx.vout[i], spender=spenders[done])
if utxodata.spender.need_vin_vout_mismatch:
mismatching_utxos.append(utxodata)
else:
normal_utxos.append(utxodata)
done += 1
# Mine into a block
self.block_submit(node, [fund_tx], "Funding tx", None, random.choice(host_pubkeys), 10000, MAX_BLOCK_SIGOPS_WEIGHT, True, True)
# Consume groups of choice(input_coins) from utxos in a tx, testing the spenders.
self.log.info("- Running %i spending tests" % done)
random.shuffle(normal_utxos)
random.shuffle(mismatching_utxos)
assert done == len(normal_utxos) + len(mismatching_utxos)
left = done
while left:
# Construct CTransaction with random nVersion, nLocktime
tx = CTransaction()
tx.nVersion = random.choice([1, 2, random.randint(-0x80000000, 0x7fffffff)])
min_sequence = (tx.nVersion != 1 and tx.nVersion != 0) * 0x80000000 # The minimum sequence number to disable relative locktime
if random.choice([True, False]):
tx.nLockTime = random.randrange(LOCKTIME_THRESHOLD, self.lastblocktime - 7200) # all absolute locktimes in the past
else:
tx.nLockTime = random.randrange(self.lastblockheight + 1) # all block heights in the past
# Decide how many UTXOs to test with.
acceptable = [n for n in input_counts if n <= left and (left - n > max(input_counts) or (left - n) in [0] + input_counts)]
num_inputs = random.choice(acceptable)
# If we have UTXOs that require mismatching inputs/outputs left, include exactly one of those
# unless there is only one normal UTXO left (as tests with mismatching UTXOs require at least one
# normal UTXO to go in the first position), and we don't want to run out of normal UTXOs.
input_utxos = []
while len(mismatching_utxos) and (len(input_utxos) == 0 or len(normal_utxos) == 1):
input_utxos.append(mismatching_utxos.pop())
left -= 1
# Top up until we hit num_inputs (but include at least one normal UTXO always).
for _ in range(max(1, num_inputs - len(input_utxos))):
input_utxos.append(normal_utxos.pop())
left -= 1
# The first input cannot require a mismatching output (as there is at least one output).
while True:
random.shuffle(input_utxos)
if not input_utxos[0].spender.need_vin_vout_mismatch:
break
first_mismatch_input = None
for i in range(len(input_utxos)):
if input_utxos[i].spender.need_vin_vout_mismatch:
first_mismatch_input = i
assert first_mismatch_input is None or first_mismatch_input > 0
# Decide fee, and add CTxIns to tx.
amount = sum(utxo.output.nValue for utxo in input_utxos)
fee = min(random.randrange(MIN_FEE * 2, MIN_FEE * 4), amount - DUST_LIMIT) # 10000-20000 sat fee
in_value = amount - fee
tx.vin = [CTxIn(outpoint=utxo.outpoint, nSequence=random.randint(min_sequence, 0xffffffff)) for utxo in input_utxos]
tx.wit.vtxinwit = [CTxInWitness() for _ in range(len(input_utxos))]
sigops_weight = sum(utxo.spender.sigops_weight for utxo in input_utxos)
self.log.debug("Test: %s" % (", ".join(utxo.spender.comment for utxo in input_utxos)))
# Add 1 to 4 random outputs (but constrained by inputs that require mismatching outputs)
num_outputs = random.choice(range(1, 1 + min(4, 4 if first_mismatch_input is None else first_mismatch_input)))
assert in_value >= 0 and fee - num_outputs * DUST_LIMIT >= MIN_FEE
for i in range(num_outputs):
tx.vout.append(CTxOut())
if in_value <= DUST_LIMIT:
tx.vout[-1].nValue = DUST_LIMIT
elif i < num_outputs - 1:
tx.vout[-1].nValue = in_value
else:
tx.vout[-1].nValue = random.randint(DUST_LIMIT, in_value)
in_value -= tx.vout[-1].nValue
tx.vout[-1].scriptPubKey = random.choice(host_spks)
sigops_weight += CScript(tx.vout[-1].scriptPubKey).GetSigOpCount(False) * WITNESS_SCALE_FACTOR
fee += in_value
assert fee >= 0
# Select coinbase pubkey
cb_pubkey = random.choice(host_pubkeys)
sigops_weight += 1 * WITNESS_SCALE_FACTOR
# Precompute one satisfying and one failing scriptSig/witness for each input.
input_data = []
for i in range(len(input_utxos)):
fn = input_utxos[i].spender.sat_function
fail = None
success = fn(tx, i, [utxo.output for utxo in input_utxos], True)
if not input_utxos[i].spender.no_fail:
fail = fn(tx, i, [utxo.output for utxo in input_utxos], False)
input_data.append((fail, success))
if self.options.dump_tests:
dump_json_test(tx, input_utxos, i, success, fail)
# Sign each input incorrectly once on each complete signing pass, except the very last.
for fail_input in list(range(len(input_utxos))) + [None]:
# Skip trying to fail at spending something that can't be made to fail.
if fail_input is not None and input_utxos[fail_input].spender.no_fail:
continue
# Expected message with each input failure, may be None(which is ignored)
expected_fail_msg = None if fail_input is None else input_utxos[fail_input].spender.err_msg
# Fill inputs/witnesses
for i in range(len(input_utxos)):
tx.vin[i].scriptSig = input_data[i][i != fail_input][0]
tx.wit.vtxinwit[i].scriptWitness.stack = input_data[i][i != fail_input][1]
# Submit to mempool to check standardness
is_standard_tx = fail_input is None and all(utxo.spender.is_standard for utxo in input_utxos) and tx.nVersion >= 1 and tx.nVersion <= 2
tx.rehash()
msg = ','.join(utxo.spender.comment + ("*" if n == fail_input else "") for n, utxo in enumerate(input_utxos))
if is_standard_tx:
node.sendrawtransaction(tx.serialize().hex(), 0)
assert node.getmempoolentry(tx.hash) is not None, "Failed to accept into mempool: " + msg
else:
assert_raises_rpc_error(-26, None, node.sendrawtransaction, tx.serialize().hex(), 0)
# Submit in a block
self.block_submit(node, [tx], msg, witness=True, accept=fail_input is None, cb_pubkey=cb_pubkey, fees=fee, sigops_weight=sigops_weight, err_msg=expected_fail_msg)
if (len(spenders) - left) // 200 > (len(spenders) - left - len(input_utxos)) // 200:
self.log.info(" - %i tests done" % (len(spenders) - left))
assert left == 0
assert len(normal_utxos) == 0
assert len(mismatching_utxos) == 0
self.log.info(" - Done")
def run_test(self):
# Post-taproot activation tests go first (pre-taproot tests' blocks are invalid post-taproot).
self.log.info("Post-activation tests...")
self.nodes[1].generate(101)
self.test_spenders(self.nodes[1], spenders_taproot_active(), input_counts=[1, 2, 2, 2, 2, 3])
# Transfer value of the largest 500 coins to pre-taproot node.
addr = self.nodes[0].getnewaddress()
unsp = self.nodes[1].listunspent()
unsp = sorted(unsp, key=lambda i: i['amount'], reverse=True)
unsp = unsp[:500]
rawtx = self.nodes[1].createrawtransaction(
inputs=[{
'txid': i['txid'],
'vout': i['vout']
} for i in unsp],
outputs={addr: sum(i['amount'] for i in unsp)}
)
rawtx = self.nodes[1].signrawtransactionwithwallet(rawtx)['hex']
# Mine a block with the transaction
block = create_block(tmpl=self.nodes[1].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS), txlist=[rawtx])
add_witness_commitment(block)
block.rehash()
block.solve()
assert_equal(None, self.nodes[1].submitblock(block.serialize().hex()))
self.sync_blocks()
# Pre-taproot activation tests.
self.log.info("Pre-activation tests...")
# Run each test twice; once in isolation, and once combined with others. Testing in isolation
# means that the standardness is verified in every test (as combined transactions are only standard
# when all their inputs are standard).
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[1])
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[2, 3])
if __name__ == '__main__':
TaprootTest().main()
| 58.866397 | 363 | 0.670128 |
from test_framework.blocktools import (
create_coinbase,
create_block,
add_witness_commitment,
MAX_BLOCK_SIGOPS_WEIGHT,
NORMAL_GBT_REQUEST_PARAMS,
WITNESS_SCALE_FACTOR,
)
from test_framework.messages import (
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
ToHex,
)
from test_framework.script import (
ANNEX_TAG,
CScript,
CScriptNum,
CScriptOp,
LEAF_VERSION_TAPSCRIPT,
LegacySignatureHash,
LOCKTIME_THRESHOLD,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_16,
OP_2DROP,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGADD,
OP_CHECKSIGVERIFY,
OP_CODESEPARATOR,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_NOP,
OP_NOT,
OP_NOTIF,
OP_PUSHDATA1,
OP_RETURN,
OP_SWAP,
OP_VERIFY,
SIGHASH_DEFAULT,
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY,
SegwitV0SignatureHash,
TaprootSignatureHash,
is_op_success,
taproot_construct,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error, assert_equal
from test_framework.key import generate_privkey, compute_xonly_pubkey, sign_schnorr, tweak_add_privkey, ECKey
from test_framework.address import (
hash160,
sha256,
)
from collections import OrderedDict, namedtuple
from io import BytesIO
import json
import hashlib
import os
import random
def deep_eval(ctx, expr):
while callable(expr):
expr = expr(ctx)
if isinstance(expr, list):
expr = [deep_eval(ctx, x) for x in expr]
return expr
Final = namedtuple("Final", "value")
def get(ctx, name):
assert name in ctx, "Missing '%s' in context" % name
expr = ctx[name]
if not isinstance(expr, Final):
expr = Final(deep_eval(ctx, expr))
ctx[name] = expr
return expr.value
def getter(name):
return lambda ctx: get(ctx, name)
def override(expr, **kwargs):
return lambda ctx: deep_eval({**ctx, **kwargs}, expr)
def default_hashtype(ctx):
mode = get(ctx, "mode")
if mode == "taproot":
return SIGHASH_DEFAULT
else:
return SIGHASH_ALL
def default_tapleaf(ctx):
return get(ctx, "tap").leaves[get(ctx, "leaf")]
def default_script_taproot(ctx):
return get(ctx, "tapleaf").script
def default_leafversion(ctx):
return get(ctx, "tapleaf").version
def default_negflag(ctx):
return get(ctx, "tap").negflag
def default_pubkey_inner(ctx):
return get(ctx, "tap").inner_pubkey
def default_merklebranch(ctx):
return get(ctx, "tapleaf").merklebranch
def default_controlblock(ctx):
return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_inner") + get(ctx, "merklebranch")
def default_sighash(ctx):
tx = get(ctx, "tx")
idx = get(ctx, "idx")
hashtype = get(ctx, "hashtype_actual")
mode = get(ctx, "mode")
if mode == "taproot":
utxos = get(ctx, "utxos")
annex = get(ctx, "annex")
if get(ctx, "leaf") is not None:
codeseppos = get(ctx, "codeseppos")
leaf_ver = get(ctx, "leafversion")
script = get(ctx, "script_taproot")
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=True, script=script, leaf_ver=leaf_ver, codeseparator_pos=codeseppos, annex=annex)
else:
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=False, annex=annex)
elif mode == "witv0":
scriptcode = get(ctx, "scriptcode")
utxos = get(ctx, "utxos")
return SegwitV0SignatureHash(scriptcode, tx, idx, hashtype, utxos[idx].nValue)
else:
scriptcode = get(ctx, "scriptcode")
return LegacySignatureHash(scriptcode, tx, idx, hashtype)[0]
def default_tweak(ctx):
if get(ctx, "leaf") is None:
return get(ctx, "tap").tweak
return None
def default_key_tweaked(ctx):
key = get(ctx, "key")
tweak = get(ctx, "tweak")
if tweak is None:
return key
else:
return tweak_add_privkey(key, tweak)
def default_signature(ctx):
sighash = get(ctx, "sighash")
if get(ctx, "mode") == "taproot":
key = get(ctx, "key_tweaked")
flip_r = get(ctx, "flag_flip_r")
flip_p = get(ctx, "flag_flip_p")
return sign_schnorr(key, sighash, flip_r=flip_r, flip_p=flip_p)
else:
key = get(ctx, "key")
return key.sign_ecdsa(sighash)
def default_hashtype_actual(ctx):
hashtype = get(ctx, "hashtype")
mode = get(ctx, "mode")
if mode != "taproot":
return hashtype
idx = get(ctx, "idx")
tx = get(ctx, "tx")
if hashtype & 3 == SIGHASH_SINGLE and idx >= len(tx.vout):
return (hashtype & ~3) | SIGHASH_NONE
return hashtype
def default_bytes_hashtype(ctx):
return bytes([x for x in [get(ctx, "hashtype_actual")] if x != 0])
def default_sign(ctx):
return get(ctx, "signature") + get(ctx, "bytes_hashtype")
def default_inputs_keypath(ctx):
return [get(ctx, "sign")]
def default_witness_taproot(ctx):
annex = get(ctx, "annex")
suffix_annex = []
if annex is not None:
suffix_annex = [annex]
if get(ctx, "leaf") is None:
return get(ctx, "inputs_keypath") + suffix_annex
else:
return get(ctx, "inputs") + [bytes(get(ctx, "script_taproot")), get(ctx, "controlblock")] + suffix_annex
def default_witness_witv0(ctx):
script = get(ctx, "script_witv0")
inputs = get(ctx, "inputs")
if script is None:
return inputs
else:
return inputs + [script]
def default_witness(ctx):
mode = get(ctx, "mode")
if mode == "taproot":
return get(ctx, "witness_taproot")
elif mode == "witv0":
return get(ctx, "witness_witv0")
else:
return []
def default_scriptsig(ctx):
scriptsig = []
mode = get(ctx, "mode")
if mode == "legacy":
scriptsig = get(ctx, "inputs")
redeemscript = get(ctx, "script_p2sh")
if redeemscript is not None:
scriptsig += [bytes(redeemscript)]
return scriptsig
DEFAULT_CONTEXT = {
"witness": default_witness,
"scriptsig": default_scriptsig,
# The witness stack for spending a taproot output.
"witness_taproot": default_witness_taproot,
# The witness stack for spending a P2WPKH/P2WSH output.
"witness_witv0": default_witness_witv0,
# The script inputs for a taproot key path spend.
"inputs_keypath": default_inputs_keypath,
# The actual hashtype to use (usually equal to hashtype, but in taproot SIGHASH_SINGLE is not always allowed).
"hashtype_actual": default_hashtype_actual,
# The bytes object for a full signature (including hashtype byte, if needed).
"bytes_hashtype": default_bytes_hashtype,
# A full script signature (bytes including hashtype, if needed)
"sign": default_sign,
# An ECDSA or Schnorr signature (excluding hashtype byte).
"signature": default_signature,
# The 32-byte tweaked key (equal to key for script path spends, or key+tweak for key path spends).
"key_tweaked": default_key_tweaked,
# The tweak to use (None for script path spends, the actual tweak for key path spends).
"tweak": default_tweak,
# The sighash value (32 bytes)
"sighash": default_sighash,
# The information about the chosen script path spend (TaprootLeafInfo object).
"tapleaf": default_tapleaf,
# The script to push, and include in the sighash, for a taproot script path spend.
"script_taproot": default_script_taproot,
# The inner pubkey for a taproot script path spend (32 bytes).
"pubkey_inner": default_pubkey_inner,
# The negation flag of the inner pubkey for a taproot script path spend.
"negflag": default_negflag,
# The leaf version to include in the sighash (this does not affect the one in the control block).
"leafversion": default_leafversion,
# The Merkle path to include in the control block for a script path spend.
"merklebranch": default_merklebranch,
# The control block to push for a taproot script path spend.
"controlblock": default_controlblock,
# Whether to produce signatures with invalid P sign (Schnorr signatures only).
"flag_flip_p": False,
# Whether to produce signatures with invalid R sign (Schnorr signatures only).
"flag_flip_r": False,
# == Parameters that can be changed without invalidating, but do have a default: ==
# The hashtype (as an integer).
"hashtype": default_hashtype,
# The annex (only when mode=="taproot").
"annex": None,
# The codeseparator position (only when mode=="taproot").
"codeseppos": -1,
# The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH).
"script_p2sh": None,
# The script to add to the witness in (if P2WSH; None implies P2WPKH)
"script_witv0": None,
# The leaf to use in taproot spends (if script path spend; None implies key path spend).
"leaf": None,
# The input arguments to provide to the executed script
"inputs": [],
# == Parameters to be set before evaluation: ==
# - mode: what spending style to use ("taproot", "witv0", or "legacy").
# - key: the (untweaked) private key to sign with (ECKey object for ECDSA, 32 bytes for Schnorr).
# - tap: the TaprootInfo object (see taproot_construct; needed in mode=="taproot").
# - tx: the transaction to sign.
# - utxos: the UTXOs being spent (needed in mode=="witv0" and mode=="taproot").
# - idx: the input position being signed.
# - scriptcode: the scriptcode to include in legacy and witv0 sighashes.
}
def flatten(lst):
ret = []
for elem in lst:
if isinstance(elem, list):
ret += flatten(elem)
else:
ret.append(elem)
return ret
def spend(tx, idx, utxos, **kwargs):
ctx = {**DEFAULT_CONTEXT, "tx":tx, "idx":idx, "utxos":utxos, **kwargs}
def to_script(elem):
if isinstance(elem, CScript):
return elem
else:
return CScript([elem])
scriptsig_list = flatten(get(ctx, "scriptsig"))
scriptsig = CScript(b"".join(bytes(to_script(elem)) for elem in scriptsig_list))
witness_stack = flatten(get(ctx, "witness"))
return (scriptsig, witness_stack)
# === Spender objects ===
#
# Each spender is a tuple of:
# - A scriptPubKey which is to be spent from (CScript)
# - A comment describing the test (string)
# - Whether the spending (on itself) is expected to be standard (bool)
# - A tx-signing lambda returning (scriptsig, witness_stack), taking as inputs:
# - A transaction to sign (CTransaction)
# - An input position (int)
# - The spent UTXOs by this transaction (list of CTxOut)
# - Whether to produce a valid spend (bool)
# - A string with an expected error message for failure case if known
# - The (pre-taproot) sigops weight consumed by a successful spend
# - Whether this spend cannot fail
# - Whether this test demands being placed in a txin with no corresponding txout (for testing SIGHASH_SINGLE behavior)
Spender = namedtuple("Spender", "script,comment,is_standard,sat_function,err_msg,sigops_weight,no_fail,need_vin_vout_mismatch")
def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=False, spk_mutate_pre_p2sh=None, failure=None, standard=True, err_msg=None, sigops_weight=0, need_vin_vout_mismatch=False, **kwargs):
conf = dict()
# Compute scriptPubKey and set useful defaults based on the inputs.
if witv0:
assert tap is None
conf["mode"] = "witv0"
if pkh is not None:
# P2WPKH
assert script is None
pubkeyhash = hash160(pkh)
spk = CScript([OP_0, pubkeyhash])
conf["scriptcode"] = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG])
conf["script_witv0"] = None
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# P2WSH
spk = CScript([OP_0, sha256(script)])
conf["scriptcode"] = script
conf["script_witv0"] = script
else:
assert False
elif tap is None:
conf["mode"] = "legacy"
if pkh is not None:
# P2PKH
assert script is None
pubkeyhash = hash160(pkh)
spk = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG])
conf["scriptcode"] = spk
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# bare
spk = script
conf["scriptcode"] = script
else:
assert False
else:
assert script is None
conf["mode"] = "taproot"
conf["tap"] = tap
spk = tap.scriptPubKey
if spk_mutate_pre_p2sh is not None:
spk = spk_mutate_pre_p2sh(spk)
if p2sh:
# P2SH wrapper can be combined with anything else
conf["script_p2sh"] = spk
spk = CScript([OP_HASH160, hash160(spk), OP_EQUAL])
conf = {**conf, **kwargs}
def sat_fn(tx, idx, utxos, valid):
if valid:
return spend(tx, idx, utxos, **conf)
else:
assert failure is not None
return spend(tx, idx, utxos, **{**conf, **failure})
return Spender(script=spk, comment=comment, is_standard=standard, sat_function=sat_fn, err_msg=err_msg, sigops_weight=sigops_weight, no_fail=failure is None, need_vin_vout_mismatch=need_vin_vout_mismatch)
def add_spender(spenders, *args, **kwargs):
spenders.append(make_spender(*args, **kwargs))
# === Helpers for the test ===
def random_checksig_style(pubkey):
return bytes(CScript([pubkey, OP_CHECKSIG]))
opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD])
if (opcode == OP_CHECKSIGVERIFY):
ret = CScript([pubkey, opcode, OP_1])
elif (opcode == OP_CHECKSIGADD):
num = random.choice([0, 0x7fffffff, -0x7fffffff])
ret = CScript([num, pubkey, opcode, num + 1, OP_EQUAL])
else:
ret = CScript([pubkey, opcode])
return bytes(ret)
def random_bytes(n):
return bytes(random.getrandbits(8) for i in range(n))
def bitflipper(expr):
def fn(ctx):
sub = deep_eval(ctx, expr)
assert isinstance(sub, bytes)
return (int.from_bytes(sub, 'little') ^ (1 << random.randrange(len(sub) * 8))).to_bytes(len(sub), 'little')
return fn
def zero_appender(expr):
return lambda ctx: deep_eval(ctx, expr) + b"\x00"
def byte_popper(expr):
return lambda ctx: deep_eval(ctx, expr)[:-1]
# Expected error strings
ERR_SIG_SIZE = {"err_msg": "Invalid Schnorr signature size"}
ERR_SIG_HASHTYPE = {"err_msg": "Invalid Schnorr signature hash type"}
ERR_SIG_SCHNORR = {"err_msg": "Invalid Schnorr signature"}
ERR_OP_RETURN = {"err_msg": "OP_RETURN was encountered"}
ERR_CONTROLBLOCK_SIZE = {"err_msg": "Invalid Taproot control block size"}
ERR_WITNESS_PROGRAM_MISMATCH = {"err_msg": "Witness program hash mismatch"}
ERR_PUSH_LIMIT = {"err_msg": "Push value size limit exceeded"}
ERR_DISABLED_OPCODE = {"err_msg": "Attempted to use a disabled opcode"}
ERR_TAPSCRIPT_CHECKMULTISIG = {"err_msg": "OP_CHECKMULTISIG(VERIFY) is not available in tapscript"}
ERR_MINIMALIF = {"err_msg": "OP_IF/NOTIF argument must be minimal in tapscript"}
ERR_UNKNOWN_PUBKEY = {"err_msg": "Public key is neither compressed or uncompressed"}
ERR_STACK_SIZE = {"err_msg": "Stack size limit exceeded"}
ERR_CLEANSTACK = {"err_msg": "Stack size must be exactly one after execution"}
ERR_STACK_EMPTY = {"err_msg": "Operation not valid with the current stack size"}
ERR_SIGOPS_RATIO = {"err_msg": "Too much signature validation relative to witness weight"}
ERR_UNDECODABLE = {"err_msg": "Opcode missing or not understood"}
ERR_NO_SUCCESS = {"err_msg": "Script evaluated without error but finished with a false/empty top stack element"}
ERR_EMPTY_WITNESS = {"err_msg": "Witness program was passed an empty witness"}
ERR_CHECKSIGVERIFY = {"err_msg": "Script failed an OP_CHECKSIGVERIFY operation"}
VALID_SIGHASHES_ECDSA = [
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_ALL,
SIGHASH_ANYONECANPAY + SIGHASH_NONE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT = [SIGHASH_DEFAULT] + VALID_SIGHASHES_ECDSA
VALID_SIGHASHES_TAPROOT_SINGLE = [
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT_NO_SINGLE = [h for h in VALID_SIGHASHES_TAPROOT if h not in VALID_SIGHASHES_TAPROOT_SINGLE]
SIGHASH_BITFLIP = {"failure": {"sighash": bitflipper(default_sighash)}}
SIG_POP_BYTE = {"failure": {"sign": byte_popper(default_sign)}}
SINGLE_SIG = {"inputs": [getter("sign")]}
SIG_ADD_ZERO = {"failure": {"sign": zero_appender(default_sign)}}
DUST_LIMIT = 600
MIN_FEE = 50000
# === Actual test cases ===
def spenders_taproot_active():
secs = [generate_privkey() for _ in range(8)]
pubs = [compute_xonly_pubkey(sec)[0] for sec in secs]
spenders = []
# == Tests for BIP340 signature validation. ==
# These are primarily tested through the test vectors implemented in libsecp256k1, and in src/tests/key_tests.cpp.
# Some things are tested programmatically as well here.
tap = taproot_construct(pubs[0])
# Test with key with bit flipped.
add_spender(spenders, "sig/key", tap=tap, key=secs[0], failure={"key_tweaked": bitflipper(default_key_tweaked)}, **ERR_SIG_SCHNORR)
# Test with sighash with bit flipped.
add_spender(spenders, "sig/sighash", tap=tap, key=secs[0], failure={"sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# Test with invalid R sign.
add_spender(spenders, "sig/flip_r", tap=tap, key=secs[0], failure={"flag_flip_r": True}, **ERR_SIG_SCHNORR)
# Test with invalid P sign.
add_spender(spenders, "sig/flip_p", tap=tap, key=secs[0], failure={"flag_flip_p": True}, **ERR_SIG_SCHNORR)
# Test with signature with bit flipped.
add_spender(spenders, "sig/bitflip", tap=tap, key=secs[0], failure={"signature": bitflipper(default_signature)}, **ERR_SIG_SCHNORR)
# == Tests for signature hashing ==
# Run all tests once with no annex, and once with a valid random annex.
for annex in [None, lambda _: bytes([ANNEX_TAG]) + random_bytes(random.randrange(0, 250))]:
# Non-empty annex is non-standard
no_annex = annex is None
# Sighash mutation tests (test all sighash combinations)
for hashtype in VALID_SIGHASHES_TAPROOT:
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
# Pure pubkey
tap = taproot_construct(pubs[0])
add_spender(spenders, "sighash/purepk", tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Pubkey/P2PK script combination
scripts = [("s0", CScript(random_checksig_style(pubs[1])))]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/keypath_hashtype_%x" % hashtype, tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath_hashtype_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Test SIGHASH_SINGLE behavior in combination with mismatching outputs
if hashtype in VALID_SIGHASHES_TAPROOT_SINGLE:
add_spender(spenders, "sighash/keypath_hashtype_mis_%x" % hashtype, tap=tap, key=secs[0], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
add_spender(spenders, "sighash/scriptpath_hashtype_mis_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), **SINGLE_SIG, failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
# Test OP_CODESEPARATOR impact on sighashing.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
scripts = [
("pk_codesep", CScript(random_checksig_style(pubs[1]) + bytes([OP_CODESEPARATOR]))), # codesep after checksig
("codesep_pk", CScript(bytes([OP_CODESEPARATOR]) + random_checksig_style(pubs[1]))), # codesep before checksig
("branched_codesep", CScript([random_bytes(random.randrange(511)), OP_DROP, OP_IF, OP_CODESEPARATOR, pubs[0], OP_ELSE, OP_CODESEPARATOR, pubs[1], OP_ENDIF, OP_CHECKSIG])), # branch dependent codesep
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/pk_codesep", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/codesep_pk", tap=tap, leaf="codesep_pk", key=secs[1], codeseppos=0, **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/left", tap=tap, leaf="branched_codesep", key=secs[0], codeseppos=3, **common, inputs=[getter("sign"), b'\x01'], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/right", tap=tap, leaf="branched_codesep", key=secs[1], codeseppos=6, **common, inputs=[getter("sign"), b''], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Reusing the scripts above, test that various features affect the sighash.
add_spender(spenders, "sighash/annex", tap=tap, leaf="pk_codesep", key=secs[1], hashtype=hashtype, standard=False, **SINGLE_SIG, annex=bytes([ANNEX_TAG]), failure={"sighash": override(default_sighash, annex=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/script", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, script_taproot=tap.leaves["codesep_pk"].script)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/leafver", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leafversion=random.choice([x & 0xFE for x in range(0x100) if x & 0xFE != 0xC0]))}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leaf=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/keypath", tap=tap, key=secs[0], **common, failure={"sighash": override(default_sighash, leaf="pk_codesep")}, **ERR_SIG_SCHNORR)
# Test that invalid hashtypes don't work, both in key path and script path spends
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for invalid_hashtype in [x for x in range(0x100) if x not in VALID_SIGHASHES_TAPROOT]:
add_spender(spenders, "sighash/keypath_unk_hashtype_%x" % invalid_hashtype, tap=tap, key=secs[0], hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/scriptpath_unk_hashtype_%x" % invalid_hashtype, tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype0_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype0_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype1_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype0to1_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype0to1_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
for hashtype in [SIGHASH_DEFAULT, random.choice(VALID_SIGHASHES_TAPROOT)]:
scripts = [
("csv", CScript([pubs[2], OP_CHECKSIGVERIFY, OP_1])),
("cs_pos", CScript([pubs[2], OP_CHECKSIG])),
("csa_pos", CScript([OP_0, pubs[2], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
("cs_neg", CScript([pubs[2], OP_CHECKSIG, OP_NOT])),
("csa_neg", CScript([OP_2, pubs[2], OP_CHECKSIGADD, OP_2, OP_EQUAL]))
]
random.shuffle(scripts)
tap = taproot_construct(pubs[3], scripts)
add_spender(spenders, "siglen/empty_keypath", tap=tap, key=secs[3], hashtype=hashtype, failure={"sign": b""}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_CHECKSIGVERIFY)
add_spender(spenders, "siglen/empty_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(1, 63))}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(66, 100))}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/padzero_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/popbyte_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/invalid_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "siglen/invalid_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
for p2sh in [False, True]:
for witver in range(1, 17):
for witlen in [20, 31, 32, 33]:
def mutate(spk):
prog = spk[2:]
assert len(prog) == 32
if witlen < 32:
prog = prog[0:witlen]
elif witlen > 32:
prog += bytes([0 for _ in range(witlen - 32)])
return CScript([CScriptOp.encode_op_n(witver), prog])
scripts = [("s0", CScript([pubs[0], OP_CHECKSIG])), ("dummy", CScript([OP_RETURN]))]
tap = taproot_construct(pubs[1], scripts)
if not p2sh and witver == 1 and witlen == 32:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, failure={"leaf": "dummy"}, **ERR_OP_RETURN)
else:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], standard=False)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, standard=False)
PARTNER_MERKLE_FN = [
lambda h: h,
lambda h: bytes([0 for _ in range(32)]),
lambda h: bytes([0xff for _ in range(32)]),
lambda h: (int.from_bytes(h, 'big') - 1).to_bytes(32, 'big'),
lambda h: (int.from_bytes(h, 'big') + 1).to_bytes(32, 'big'),
lambda h: (int.from_bytes(h, 'little') - 1).to_bytes(32, 'big'),
lambda h: (int.from_bytes(h, 'little') + 1).to_bytes(32, 'little'),
lambda h: (int.from_bytes(h, 'little') ^ (1 << random.randrange(256))).to_bytes(32, 'little')
]
scripts = [("128deep", CScript([pubs[0], OP_CHECKSIG])), [("129deep", CScript([pubs[0], OP_CHECKSIG])), random.choice(PARTNER_MERKLE_FN)]]
for _ in range(127):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "spendpath/merklelimit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"leaf": "129deep"}, **ERR_CONTROLBLOCK_SIZE)
# Test that flipping the negation bit invalidates spends.
add_spender(spenders, "spendpath/negflag", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"negflag": lambda ctx: 1 - default_negflag(ctx)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the Merkle branch invalidate it.
add_spender(spenders, "spendpath/bitflipmerkle", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"merklebranch": bitflipper(default_merklebranch)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the inner pubkey invalidate it.
add_spender(spenders, "spendpath/bitflippubkey", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"pubkey_inner": bitflipper(default_pubkey_inner)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that empty witnesses are invalid.
add_spender(spenders, "spendpath/emptywit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"witness": []}, **ERR_EMPTY_WITNESS)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padlongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/trunclongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
scripts = [("s", CScript([pubs[0], OP_CHECKSIG]))]
tap = taproot_construct(pubs[1], scripts)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/truncshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block to 1 byte ("-1 Merkle length") invalidates it
add_spender(spenders, "spendpath/trunc1shortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:1]}, **ERR_CONTROLBLOCK_SIZE)
# == Test BIP342 edge cases ==
csa_low_val = random.randrange(0, 17) # Within range for OP_n
csa_low_result = csa_low_val + 1
csa_high_val = random.randrange(17, 100) if random.getrandbits(1) else random.randrange(-100, -1) # Outside OP_n range
csa_high_result = csa_high_val + 1
OVERSIZE_NUMBER = 2**31
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER))), 6)
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER-1))), 5)
big_choices = []
big_scriptops = []
for i in range(1000):
r = random.randrange(len(pubs))
big_choices.append(r)
big_scriptops += [pubs[r], OP_CHECKSIGVERIFY]
def big_spend_inputs(ctx):
# Instead of signing 999 times, precompute signatures for every (key, hashtype) combination
sigs = {}
for ht in VALID_SIGHASHES_TAPROOT:
for k in range(len(pubs)):
sigs[(k, ht)] = override(default_sign, hashtype=ht, key=secs[k])(ctx)
num = get(ctx, "num")
return [sigs[(big_choices[i], random.choice(VALID_SIGHASHES_TAPROOT))] for i in range(num - 1, -1, -1)]
# Various BIP342 features
scripts = [
# 0) drop stack element and OP_CHECKSIG
("t0", CScript([OP_DROP, pubs[1], OP_CHECKSIG])),
# 1) normal OP_CHECKSIG
("t1", CScript([pubs[1], OP_CHECKSIG])),
# 2) normal OP_CHECKSIGVERIFY
("t2", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 3) Hypothetical OP_CHECKMULTISIG script that takes a single sig as input
("t3", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIG])),
# 4) Hypothetical OP_CHECKMULTISIGVERIFY script that takes a single sig as input
("t4", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIGVERIFY, OP_1])),
# 5) OP_IF script that needs a true input
("t5", CScript([OP_IF, pubs[1], OP_CHECKSIG, OP_ELSE, OP_RETURN, OP_ENDIF])),
# 6) OP_NOTIF script that needs a true input
("t6", CScript([OP_NOTIF, OP_RETURN, OP_ELSE, pubs[1], OP_CHECKSIG, OP_ENDIF])),
# 7) OP_CHECKSIG with an empty key
("t7", CScript([OP_0, OP_CHECKSIG])),
# 8) OP_CHECKSIGVERIFY with an empty key
("t8", CScript([OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 9) normal OP_CHECKSIGADD that also ensures return value is correct
("t9", CScript([csa_low_val, pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 10) OP_CHECKSIGADD with empty key
("t10", CScript([csa_low_val, OP_0, OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 11) OP_CHECKSIGADD with missing counter stack element
("t11", CScript([pubs[1], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
# 12) OP_CHECKSIG that needs invalid signature
("t12", CScript([pubs[1], OP_CHECKSIGVERIFY, pubs[0], OP_CHECKSIG, OP_NOT])),
# 13) OP_CHECKSIG with empty key that needs invalid signature
("t13", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_CHECKSIG, OP_NOT])),
# 14) OP_CHECKSIGADD that needs invalid signature
("t14", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, pubs[0], OP_CHECKSIGADD, OP_NOT])),
# 15) OP_CHECKSIGADD with empty key that needs invalid signature
("t15", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGADD, OP_NOT])),
# 16) OP_CHECKSIG with unknown pubkey type
("t16", CScript([OP_1, OP_CHECKSIG])),
# 17) OP_CHECKSIGADD with unknown pubkey type
("t17", CScript([OP_0, OP_1, OP_CHECKSIGADD])),
# 18) OP_CHECKSIGVERIFY with unknown pubkey type
("t18", CScript([OP_1, OP_CHECKSIGVERIFY, OP_1])),
# 19) script longer than 10000 bytes and over 201 non-push opcodes
("t19", CScript([OP_0, OP_0, OP_2DROP] * 10001 + [pubs[1], OP_CHECKSIG])),
# 20) OP_CHECKSIGVERIFY with empty key
("t20", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 21) Script that grows the stack to 1000 elements
("t21", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 999 + [OP_DROP] * 999)),
# 22) Script that grows the stack to 1001 elements
("t22", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 1000 + [OP_DROP] * 1000)),
# 23) Script that expects an input stack of 1000 elements
("t23", CScript([OP_DROP] * 999 + [pubs[1], OP_CHECKSIG])),
# 24) Script that expects an input stack of 1001 elements
("t24", CScript([OP_DROP] * 1000 + [pubs[1], OP_CHECKSIG])),
# 25) Script that pushes a MAX_SCRIPT_ELEMENT_SIZE-bytes element
("t25", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE), OP_DROP, pubs[1], OP_CHECKSIG])),
# 26) Script that pushes a (MAX_SCRIPT_ELEMENT_SIZE+1)-bytes element
("t26", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, pubs[1], OP_CHECKSIG])),
# 27) CHECKSIGADD that must fail because numeric argument number is >4 bytes
("t27", CScript([CScriptNum(OVERSIZE_NUMBER), pubs[1], OP_CHECKSIGADD])),
# 28) Pushes random CScriptNum value, checks OP_CHECKSIGADD result
("t28", CScript([csa_high_val, pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 29) CHECKSIGADD that succeeds with proper sig because numeric argument number is <=4 bytes
("t29", CScript([CScriptNum(OVERSIZE_NUMBER-1), pubs[1], OP_CHECKSIGADD])),
# 30) Variant of t1 with "normal" 33-byte pubkey
("t30", CScript([b'\x03' + pubs[1], OP_CHECKSIG])),
# 31) Variant of t2 with "normal" 33-byte pubkey
("t31", CScript([b'\x02' + pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 32) Variant of t28 with "normal" 33-byte pubkey
("t32", CScript([csa_high_val, b'\x03' + pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 33) 999-of-999 multisig
("t33", CScript(big_scriptops[:1998] + [OP_1])),
# 34) 1000-of-1000 multisig
("t34", CScript(big_scriptops[:2000] + [OP_1])),
# 35) Variant of t9 that uses a non-minimally encoded input arg
("t35", CScript([bytes([csa_low_val]), pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 36) Empty script
("t36", CScript([])),
]
# Add many dummies to test huge trees
for j in range(100000):
scripts.append((None, CScript([OP_RETURN, random.randrange(100000)])))
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
common = {
"hashtype": hashtype,
"key": secs[1],
"tap": tap,
}
# Test that MAX_SCRIPT_ELEMENT_SIZE byte stack element inputs are valid, but not one more (and 80 bytes is standard but 81 is not).
add_spender(spenders, "tapscript/inputmaxlimit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE)], failure={"inputs": [getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1)]}, **ERR_PUSH_LIMIT)
add_spender(spenders, "tapscript/input80limit", leaf="t0", **common, inputs=[getter("sign"), random_bytes(80)])
add_spender(spenders, "tapscript/input81limit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(81)])
# Test that OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY cause failure, but OP_CHECKSIG and OP_CHECKSIGVERIFY work.
add_spender(spenders, "tapscript/disabled_checkmultisig", leaf="t1", **common, **SINGLE_SIG, failure={"leaf": "t3"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
add_spender(spenders, "tapscript/disabled_checkmultisigverify", leaf="t2", **common, **SINGLE_SIG, failure={"leaf": "t4"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
# Test that OP_IF and OP_NOTIF do not accept non-0x01 as truth value (the MINIMALIF rule is consensus in Tapscript)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x02']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x03']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0001']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0100']}, **ERR_MINIMALIF)
# Test that 1-byte public keys (which are unknown) are acceptable but nonstandard with unrelated signatures, but 0-byte public keys are not valid.
add_spender(spenders, "tapscript/unkpk/checksig", leaf="t16", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigadd", leaf="t17", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigverify", leaf="t18", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
# Test that 33-byte public keys (which are unknown) are acceptable but nonstandard with valid signatures, but normal pubkeys are not valid in that case.
add_spender(spenders, "tapscript/oldpk/checksig", leaf="t30", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t1"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigadd", leaf="t31", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t2"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigverify", leaf="t32", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t28"}, **ERR_SIG_SCHNORR)
# Test that 0-byte public keys are not acceptable.
add_spender(spenders, "tapscript/emptypk/checksig", leaf="t1", **SINGLE_SIG, **common, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigverify", leaf="t2", **SINGLE_SIG, **common, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t35", standard=False, **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
# Test that OP_CHECKSIGADD results are as expected
add_spender(spenders, "tapscript/checksigaddresults", leaf="t28", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
add_spender(spenders, "tapscript/checksigaddoversize", leaf="t29", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
# Test that OP_CHECKSIGADD requires 3 stack elements.
add_spender(spenders, "tapscript/checksigadd3args", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t11"}, **ERR_STACK_EMPTY)
# Test that empty signatures do not cause script failure in OP_CHECKSIG and OP_CHECKSIGADD (but do fail with empty pubkey, and do fail OP_CHECKSIGVERIFY)
add_spender(spenders, "tapscript/emptysigs/checksig", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t13"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/nochecksigverify", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t20"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/checksigadd", leaf="t14", **common, inputs=[b'', getter("sign")], failure={"leaf": "t15"}, **ERR_UNKNOWN_PUBKEY)
# Test that scripts over 10000 bytes (and over 201 non-push ops) are acceptable.
add_spender(spenders, "tapscript/no10000limit", leaf="t19", **SINGLE_SIG, **common)
# Test that a stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000stack", leaf="t21", **SINGLE_SIG, **common, failure={"leaf": "t22"}, **ERR_STACK_SIZE)
add_spender(spenders, "tapscript/1000inputs", leaf="t23", **common, inputs=[getter("sign")] + [b'' for _ in range(999)], failure={"leaf": "t24", "inputs": [getter("sign")] + [b'' for _ in range(1000)]}, **ERR_STACK_SIZE)
# Test that pushing a MAX_SCRIPT_ELEMENT_SIZE byte stack element is valid, but one longer is not.
add_spender(spenders, "tapscript/pushmaxlimit", leaf="t25", **common, **SINGLE_SIG, failure={"leaf": "t26"}, **ERR_PUSH_LIMIT)
# Test that 999-of-999 multisig works (but 1000-of-1000 triggers stack size limits)
add_spender(spenders, "tapscript/bigmulti", leaf="t33", **common, inputs=big_spend_inputs, num=999, failure={"leaf": "t34", "num": 1000}, **ERR_STACK_SIZE)
# Test that the CLEANSTACK rule is consensus critical in tapscript
add_spender(spenders, "tapscript/cleanstack", leaf="t36", tap=tap, inputs=[b'\x01'], failure={"inputs": [b'\x01', b'\x01']}, **ERR_CLEANSTACK)
# == Test for sigops ratio limit ==
# Given a number n, and a public key pk, functions that produce a (CScript, sigops). Each script takes as
# input a valid signature with the passed pk followed by a dummy push of bytes that are to be dropped, and
# will execute sigops signature checks.
SIGOPS_RATIO_SCRIPTS = [
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIG.
lambda n, pk: (CScript([OP_DROP, pk] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGVERIFY.
lambda n, pk: (CScript([OP_DROP, pk, OP_0, OP_IF, OP_2DUP, OP_CHECKSIGVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_2, OP_SWAP, OP_CHECKSIGADD, OP_3, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIG.
lambda n, pk: (CScript([random_bytes(220), OP_2DROP, pk, OP_1, OP_NOTIF, OP_2DUP, OP_CHECKSIG, OP_VERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_4, OP_SWAP, OP_CHECKSIGADD, OP_5, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGADD.
lambda n, pk: (CScript([OP_DROP, pk, OP_1, OP_IF, OP_ELSE, OP_2DUP, OP_6, OP_SWAP, OP_CHECKSIGADD, OP_7, OP_EQUALVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_8, OP_SWAP, OP_CHECKSIGADD, OP_9, OP_EQUAL]), n + 1),
# n+1 OP_CHECKSIGs, but also one OP_CHECKSIG with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, pk, OP_CHECKSIG, OP_NOT, OP_VERIFY, pk] + [OP_2DUP, OP_CHECKSIG, OP_VERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGADDs and 1 OP_CHECKSIG, but also an OP_CHECKSIGADD with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, OP_10, pk, OP_CHECKSIGADD, OP_10, OP_EQUALVERIFY, pk] + [OP_2DUP, OP_16, OP_SWAP, OP_CHECKSIGADD, b'\x11', OP_EQUALVERIFY] * n + [OP_CHECKSIG]), n + 1),
]
for annex in [None, bytes([ANNEX_TAG]) + random_bytes(random.randrange(1000))]:
for hashtype in [SIGHASH_DEFAULT, SIGHASH_ALL]:
for pubkey in [pubs[1], random_bytes(random.choice([x for x in range(2, 81) if x != 32]))]:
for fn_num, fn in enumerate(SIGOPS_RATIO_SCRIPTS):
merkledepth = random.randrange(129)
def predict_sigops_ratio(n, dummy_size):
script, sigops = fn(n, pubkey)
# Predict the size of the witness for a given choice of n
stacklen_size = 1
sig_size = 64 + (hashtype != SIGHASH_DEFAULT)
siglen_size = 1
dummylen_size = 1 + 2 * (dummy_size >= 253)
script_size = len(script)
scriptlen_size = 1 + 2 * (script_size >= 253)
control_size = 33 + 32 * merkledepth
controllen_size = 1 + 2 * (control_size >= 253)
annex_size = 0 if annex is None else len(annex)
annexlen_size = 0 if annex is None else 1 + 2 * (annex_size >= 253)
witsize = stacklen_size + sig_size + siglen_size + dummy_size + dummylen_size + script_size + scriptlen_size + control_size + controllen_size + annex_size + annexlen_size
# sigops ratio test
return witsize + 50 >= 50 * sigops
# Make sure n is high enough that with empty dummy, the script is not valid
n = 0
while predict_sigops_ratio(n, 0):
n += 1
# But allow picking a bit higher still
n += random.randrange(5)
# Now pick dummy size *just* large enough that the overall construction passes
dummylen = 0
while not predict_sigops_ratio(n, dummylen):
dummylen += 1
scripts = [("s", fn(n, pubkey)[0])]
for _ in range(merkledepth):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
standard = annex is None and dummylen <= 80 and len(pubkey) == 32
add_spender(spenders, "tapscript/sigopsratio_%i" % fn_num, tap=tap, leaf="s", annex=annex, hashtype=hashtype, key=secs[1], inputs=[getter("sign"), random_bytes(dummylen)], standard=standard, failure={"inputs": [getter("sign"), random_bytes(dummylen - 1)]}, **ERR_SIGOPS_RATIO)
# Future leaf versions
for leafver in range(0, 0x100, 2):
if leafver == LEAF_VERSION_TAPSCRIPT or leafver == ANNEX_TAG:
# Skip the defined LEAF_VERSION_TAPSCRIPT, and the ANNEX_TAG which is not usable as leaf version
continue
scripts = [
("bare_c0", CScript([OP_NOP])),
("bare_unkver", CScript([OP_NOP]), leafver),
("return_c0", CScript([OP_RETURN])),
("return_unkver", CScript([OP_RETURN]), leafver),
("undecodable_c0", CScript([OP_PUSHDATA1])),
("undecodable_unkver", CScript([OP_PUSHDATA1]), leafver),
("bigpush_c0", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP])),
("bigpush_unkver", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP]), leafver),
("1001push_c0", CScript([OP_0] * 1001)),
("1001push_unkver", CScript([OP_0] * 1001), leafver),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "unkver/bare", standard=False, tap=tap, leaf="bare_unkver", failure={"leaf": "bare_c0"}, **ERR_CLEANSTACK)
add_spender(spenders, "unkver/return", standard=False, tap=tap, leaf="return_unkver", failure={"leaf": "return_c0"}, **ERR_OP_RETURN)
add_spender(spenders, "unkver/undecodable", standard=False, tap=tap, leaf="undecodable_unkver", failure={"leaf": "undecodable_c0"}, **ERR_UNDECODABLE)
add_spender(spenders, "unkver/bigpush", standard=False, tap=tap, leaf="bigpush_unkver", failure={"leaf": "bigpush_c0"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "unkver/1001push", standard=False, tap=tap, leaf="1001push_unkver", failure={"leaf": "1001push_c0"}, **ERR_STACK_SIZE)
add_spender(spenders, "unkver/1001inputs", standard=False, tap=tap, leaf="bare_unkver", inputs=[b'']*1001, failure={"leaf": "bare_c0"}, **ERR_STACK_SIZE)
# OP_SUCCESSx tests.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for opval in range(76, 0x100):
opcode = CScriptOp(opval)
if not is_op_success(opcode):
continue
scripts = [
("bare_success", CScript([opcode])),
("bare_nop", CScript([OP_NOP])),
("unexecif_success", CScript([OP_0, OP_IF, opcode, OP_ENDIF])),
("unexecif_nop", CScript([OP_0, OP_IF, OP_NOP, OP_ENDIF])),
("return_success", CScript([OP_RETURN, opcode])),
("return_nop", CScript([OP_RETURN, OP_NOP])),
("undecodable_success", CScript([opcode, OP_PUSHDATA1])),
("undecodable_nop", CScript([OP_NOP, OP_PUSHDATA1])),
("undecodable_bypassed_success", CScript([OP_PUSHDATA1, OP_2, opcode])),
("bigpush_success", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, opcode])),
("bigpush_nop", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, OP_NOP])),
("1001push_success", CScript([OP_0] * 1001 + [opcode])),
("1001push_nop", CScript([OP_0] * 1001 + [OP_NOP])),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "opsuccess/bare", standard=False, tap=tap, leaf="bare_success", failure={"leaf": "bare_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/unexecif", standard=False, tap=tap, leaf="unexecif_success", failure={"leaf": "unexecif_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/return", standard=False, tap=tap, leaf="return_success", failure={"leaf": "return_nop"}, **ERR_OP_RETURN)
add_spender(spenders, "opsuccess/undecodable", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_nop"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/undecodable_bypass", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_bypassed_success"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/bigpush", standard=False, tap=tap, leaf="bigpush_success", failure={"leaf": "bigpush_nop"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "opsuccess/1001push", standard=False, tap=tap, leaf="1001push_success", failure={"leaf": "1001push_nop"}, **ERR_STACK_SIZE)
add_spender(spenders, "opsuccess/1001inputs", standard=False, tap=tap, leaf="bare_success", inputs=[b'']*1001, failure={"leaf": "bare_nop"}, **ERR_STACK_SIZE)
# Non-OP_SUCCESSx (verify that those aren't accidentally treated as OP_SUCCESSx)
for opval in range(0, 0x100):
opcode = CScriptOp(opval)
if is_op_success(opcode):
continue
scripts = [
("normal", CScript([OP_RETURN, opcode] + [OP_NOP] * 75)),
("op_success", CScript([OP_RETURN, CScriptOp(0x50)]))
]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "alwaysvalid/notsuccessx", tap=tap, leaf="op_success", inputs=[], standard=False, failure={"leaf": "normal"})
for compressed in [False, True]:
eckey1 = ECKey()
eckey1.set(generate_privkey(), compressed)
pubkey1 = eckey1.get_pubkey().get_bytes()
eckey2 = ECKey()
eckey2.set(generate_privkey(), compressed)
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = (hashtype in VALID_SIGHASHES_ECDSA) and (compressed or not witv0)
add_spender(spenders, "legacy/pk-wrongkey", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([pubkey1, OP_CHECKSIG]), **SINGLE_SIG, key=eckey1, failure={"key": eckey2}, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
add_spender(spenders, "legacy/pkh-sighashflip", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, pkh=pubkey1, key=eckey1, **SIGHASH_BITFLIP, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0)
add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE)
return spenders
def spenders_taproot_inactive():
spenders = []
sec = generate_privkey()
pub, _ = compute_xonly_pubkey(sec)
scripts = [
("pk", CScript([pub, OP_CHECKSIG])),
("future_leaf", CScript([pub, OP_CHECKSIG]), 0xc2),
("op_success", CScript([pub, OP_CHECKSIG, OP_0, OP_IF, CScriptOp(0x50), OP_ENDIF])),
]
tap = taproot_construct(pub, scripts)
# Test that keypath spending is valid & non-standard, regardless of validity.
add_spender(spenders, "inactive/keypath_valid", key=sec, tap=tap, standard=False)
add_spender(spenders, "inactive/keypath_invalidsig", key=sec, tap=tap, standard=False, sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/keypath_empty", key=sec, tap=tap, standard=False, witness=[])
# Same for scriptpath spending (and features like annex, leaf versions, or OP_SUCCESS don't change this)
add_spender(spenders, "inactive/scriptpath_valid", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalidsig", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_invalidcb", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], controlblock=bitflipper(default_controlblock))
add_spender(spenders, "inactive/scriptpath_valid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
return spenders
LEGACY_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY"
TAPROOT_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY,TAPROOT"
def dump_json_test(tx, input_utxos, idx, success, failure):
spender = input_utxos[idx].spender
flags = LEGACY_FLAGS if spender.comment.startswith("legacy/") or spender.comment.startswith("inactive/") else TAPROOT_FLAGS
fields = [
("tx", tx.serialize().hex()),
("prevouts", [x.output.serialize().hex() for x in input_utxos]),
("index", idx),
("flags", flags),
("comment", spender.comment)
]
if spender.is_standard:
fields.append(("final", True))
def dump_witness(wit):
return OrderedDict([("scriptSig", wit[0].hex()), ("witness", [x.hex() for x in wit[1]])])
if success is not None:
fields.append(("success", dump_witness(success)))
if failure is not None:
fields.append(("failure", dump_witness(failure)))
dump = json.dumps(OrderedDict(fields)) + ",\n"
sha1 = hashlib.sha1(dump.encode("utf-8")).hexdigest()
dirname = os.environ.get("TEST_DUMP_DIR", ".") + ("/%s" % sha1[0])
os.makedirs(dirname, exist_ok=True)
with open(dirname + ("/%s" % sha1), 'w', encoding="utf8") as f:
f.write(dump)
UTXOData = namedtuple('UTXOData', 'outpoint,output,spender')
class TaprootTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_argument("--dumptests", dest="dump_tests", default=False, action="store_true",
help="Dump generated test cases to directory set by TEST_DUMP_DIR environment variable")
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
["-par=1", "-vbparams=taproot:@-2:@-2"],
["-par=1"]
]
def block_submit(self, node, txs, msg, err_msg, cb_pubkey=None, fees=0, sigops_weight=0, witness=False, accept=False):
extra_output_script = CScript([OP_CHECKSIG]*((MAX_BLOCK_SIGOPS_WEIGHT - sigops_weight) // WITNESS_SCALE_FACTOR))
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1, pubkey=cb_pubkey, extra_output_script=extra_output_script, fees=fees), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
block_response = node.submitblock(block.serialize().hex())
if err_msg is not None:
assert block_response is not None and err_msg in block_response, "Missing error message '%s' from block response '%s': %s" % (err_msg, "(None)" if block_response is None else block_response, msg)
if (accept):
assert node.getbestblockhash() == block.hash, "Failed to accept: %s (response: %s)" % (msg, block_response)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert node.getbestblockhash() == self.lastblockhash, "Failed to reject: " + msg
def test_spenders(self, node, spenders, input_counts):
self.log.info("- Constructing addresses for returning coins")
host_spks = []
host_pubkeys = []
for i in range(16):
addr = node.getnewaddress(address_type=random.choice(["legacy", "p2sh-segwit", "bech32"]))
info = node.getaddressinfo(addr)
spk = bytes.fromhex(info['scriptPubKey'])
host_spks.append(spk)
host_pubkeys.append(bytes.fromhex(info['pubkey']))
self.lastblockhash = node.getbestblockhash()
self.tip = int(self.lastblockhash, 16)
block = node.getblock(self.lastblockhash)
self.lastblockheight = block['height']
self.lastblocktime = block['time']
# one change output at the end. The transaction is constructed on the Python side to enable
# having multiple outputs to the same address and outputs with no assigned address. The wallet
# is then asked to sign it through signrawtransactionwithwallet, and then added to a block on the
# Python side (to bypass standardness rules).
self.log.info("- Creating test UTXOs...")
random.shuffle(spenders)
normal_utxos = []
mismatching_utxos = [] # UTXOs with input that requires mismatching output position
done = 0
while done < len(spenders):
# Compute how many UTXOs to create with this transaction
count_this_tx = min(len(spenders) - done, (len(spenders) + 4) // 5, 10000)
fund_tx = CTransaction()
# Add the 50 highest-value inputs
unspents = node.listunspent()
random.shuffle(unspents)
unspents.sort(key=lambda x: int(x["amount"] * 100000000), reverse=True)
if len(unspents) > 50:
unspents = unspents[:50]
random.shuffle(unspents)
balance = 0
for unspent in unspents:
balance += int(unspent["amount"] * 100000000)
txid = int(unspent["txid"], 16)
fund_tx.vin.append(CTxIn(COutPoint(txid, int(unspent["vout"])), CScript()))
# Add outputs
cur_progress = done / len(spenders)
next_progress = (done + count_this_tx) / len(spenders)
change_goal = (1.0 - 0.6 * next_progress) / (1.0 - 0.6 * cur_progress) * balance
self.log.debug("Create %i UTXOs in a transaction spending %i inputs worth %.8f (sending ~%.8f to change)" % (count_this_tx, len(unspents), balance * 0.00000001, change_goal * 0.00000001))
for i in range(count_this_tx):
avg = (balance - change_goal) / (count_this_tx - i)
amount = int(random.randrange(int(avg*0.85 + 0.5), int(avg*1.15 + 0.5)) + 0.5)
balance -= amount
fund_tx.vout.append(CTxOut(amount, spenders[done + i].script))
# Add change
fund_tx.vout.append(CTxOut(balance - 10000, random.choice(host_spks)))
# Ask the wallet to sign
ss = BytesIO(bytes.fromhex(node.signrawtransactionwithwallet(ToHex(fund_tx))["hex"]))
fund_tx.deserialize(ss)
# Construct UTXOData entries
fund_tx.rehash()
for i in range(count_this_tx):
utxodata = UTXOData(outpoint=COutPoint(fund_tx.sha256, i), output=fund_tx.vout[i], spender=spenders[done])
if utxodata.spender.need_vin_vout_mismatch:
mismatching_utxos.append(utxodata)
else:
normal_utxos.append(utxodata)
done += 1
# Mine into a block
self.block_submit(node, [fund_tx], "Funding tx", None, random.choice(host_pubkeys), 10000, MAX_BLOCK_SIGOPS_WEIGHT, True, True)
# Consume groups of choice(input_coins) from utxos in a tx, testing the spenders.
self.log.info("- Running %i spending tests" % done)
random.shuffle(normal_utxos)
random.shuffle(mismatching_utxos)
assert done == len(normal_utxos) + len(mismatching_utxos)
left = done
while left:
# Construct CTransaction with random nVersion, nLocktime
tx = CTransaction()
tx.nVersion = random.choice([1, 2, random.randint(-0x80000000, 0x7fffffff)])
min_sequence = (tx.nVersion != 1 and tx.nVersion != 0) * 0x80000000 # The minimum sequence number to disable relative locktime
if random.choice([True, False]):
tx.nLockTime = random.randrange(LOCKTIME_THRESHOLD, self.lastblocktime - 7200) # all absolute locktimes in the past
else:
tx.nLockTime = random.randrange(self.lastblockheight + 1) # all block heights in the past
# Decide how many UTXOs to test with.
acceptable = [n for n in input_counts if n <= left and (left - n > max(input_counts) or (left - n) in [0] + input_counts)]
num_inputs = random.choice(acceptable)
# If we have UTXOs that require mismatching inputs/outputs left, include exactly one of those
# unless there is only one normal UTXO left (as tests with mismatching UTXOs require at least one
# normal UTXO to go in the first position), and we don't want to run out of normal UTXOs.
input_utxos = []
while len(mismatching_utxos) and (len(input_utxos) == 0 or len(normal_utxos) == 1):
input_utxos.append(mismatching_utxos.pop())
left -= 1
for _ in range(max(1, num_inputs - len(input_utxos))):
input_utxos.append(normal_utxos.pop())
left -= 1
while True:
random.shuffle(input_utxos)
if not input_utxos[0].spender.need_vin_vout_mismatch:
break
first_mismatch_input = None
for i in range(len(input_utxos)):
if input_utxos[i].spender.need_vin_vout_mismatch:
first_mismatch_input = i
assert first_mismatch_input is None or first_mismatch_input > 0
amount = sum(utxo.output.nValue for utxo in input_utxos)
fee = min(random.randrange(MIN_FEE * 2, MIN_FEE * 4), amount - DUST_LIMIT)
in_value = amount - fee
tx.vin = [CTxIn(outpoint=utxo.outpoint, nSequence=random.randint(min_sequence, 0xffffffff)) for utxo in input_utxos]
tx.wit.vtxinwit = [CTxInWitness() for _ in range(len(input_utxos))]
sigops_weight = sum(utxo.spender.sigops_weight for utxo in input_utxos)
self.log.debug("Test: %s" % (", ".join(utxo.spender.comment for utxo in input_utxos)))
num_outputs = random.choice(range(1, 1 + min(4, 4 if first_mismatch_input is None else first_mismatch_input)))
assert in_value >= 0 and fee - num_outputs * DUST_LIMIT >= MIN_FEE
for i in range(num_outputs):
tx.vout.append(CTxOut())
if in_value <= DUST_LIMIT:
tx.vout[-1].nValue = DUST_LIMIT
elif i < num_outputs - 1:
tx.vout[-1].nValue = in_value
else:
tx.vout[-1].nValue = random.randint(DUST_LIMIT, in_value)
in_value -= tx.vout[-1].nValue
tx.vout[-1].scriptPubKey = random.choice(host_spks)
sigops_weight += CScript(tx.vout[-1].scriptPubKey).GetSigOpCount(False) * WITNESS_SCALE_FACTOR
fee += in_value
assert fee >= 0
cb_pubkey = random.choice(host_pubkeys)
sigops_weight += 1 * WITNESS_SCALE_FACTOR
input_data = []
for i in range(len(input_utxos)):
fn = input_utxos[i].spender.sat_function
fail = None
success = fn(tx, i, [utxo.output for utxo in input_utxos], True)
if not input_utxos[i].spender.no_fail:
fail = fn(tx, i, [utxo.output for utxo in input_utxos], False)
input_data.append((fail, success))
if self.options.dump_tests:
dump_json_test(tx, input_utxos, i, success, fail)
for fail_input in list(range(len(input_utxos))) + [None]:
if fail_input is not None and input_utxos[fail_input].spender.no_fail:
continue
# Expected message with each input failure, may be None(which is ignored)
expected_fail_msg = None if fail_input is None else input_utxos[fail_input].spender.err_msg
# Fill inputs/witnesses
for i in range(len(input_utxos)):
tx.vin[i].scriptSig = input_data[i][i != fail_input][0]
tx.wit.vtxinwit[i].scriptWitness.stack = input_data[i][i != fail_input][1]
# Submit to mempool to check standardness
is_standard_tx = fail_input is None and all(utxo.spender.is_standard for utxo in input_utxos) and tx.nVersion >= 1 and tx.nVersion <= 2
tx.rehash()
msg = ','.join(utxo.spender.comment + ("*" if n == fail_input else "") for n, utxo in enumerate(input_utxos))
if is_standard_tx:
node.sendrawtransaction(tx.serialize().hex(), 0)
assert node.getmempoolentry(tx.hash) is not None, "Failed to accept into mempool: " + msg
else:
assert_raises_rpc_error(-26, None, node.sendrawtransaction, tx.serialize().hex(), 0)
# Submit in a block
self.block_submit(node, [tx], msg, witness=True, accept=fail_input is None, cb_pubkey=cb_pubkey, fees=fee, sigops_weight=sigops_weight, err_msg=expected_fail_msg)
if (len(spenders) - left) // 200 > (len(spenders) - left - len(input_utxos)) // 200:
self.log.info(" - %i tests done" % (len(spenders) - left))
assert left == 0
assert len(normal_utxos) == 0
assert len(mismatching_utxos) == 0
self.log.info(" - Done")
def run_test(self):
# Post-taproot activation tests go first (pre-taproot tests' blocks are invalid post-taproot).
self.log.info("Post-activation tests...")
self.nodes[1].generate(101)
self.test_spenders(self.nodes[1], spenders_taproot_active(), input_counts=[1, 2, 2, 2, 2, 3])
addr = self.nodes[0].getnewaddress()
unsp = self.nodes[1].listunspent()
unsp = sorted(unsp, key=lambda i: i['amount'], reverse=True)
unsp = unsp[:500]
rawtx = self.nodes[1].createrawtransaction(
inputs=[{
'txid': i['txid'],
'vout': i['vout']
} for i in unsp],
outputs={addr: sum(i['amount'] for i in unsp)}
)
rawtx = self.nodes[1].signrawtransactionwithwallet(rawtx)['hex']
block = create_block(tmpl=self.nodes[1].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS), txlist=[rawtx])
add_witness_commitment(block)
block.rehash()
block.solve()
assert_equal(None, self.nodes[1].submitblock(block.serialize().hex()))
self.sync_blocks()
self.log.info("Pre-activation tests...")
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[1])
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[2, 3])
if __name__ == '__main__':
TaprootTest().main()
| true | true |
1c33a470cd3d84b5a955d61b336be1ec9d152d1e | 3,254 | py | Python | utils/tests/test_overpass.py | posm/osm-export-tool2 | 5a1f4096f1afbe7420363376e6e1e8d42e47e1d1 | [
"BSD-3-Clause"
] | 2 | 2018-08-31T18:30:28.000Z | 2018-11-27T01:50:06.000Z | utils/tests/test_overpass.py | posm/osm-export-tool2 | 5a1f4096f1afbe7420363376e6e1e8d42e47e1d1 | [
"BSD-3-Clause"
] | null | null | null | utils/tests/test_overpass.py | posm/osm-export-tool2 | 5a1f4096f1afbe7420363376e6e1e8d42e47e1d1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import os
from unittest import skip
import mock
from mock import patch
from django.conf import settings
from django.contrib.auth.models import Group, User
from django.contrib.gis.geos import GEOSGeometry, Polygon
from django.test import TestCase
from jobs import presets
from jobs.models import ExportFormat, Job, Tag
from ..overpass import Overpass
logger = logging.getLogger(__name__)
class TestOverpass(TestCase):
def setUp(self,):
self.url = 'http://localhost/interpreter'
self.bbox = '6.25,-10.85,6.40,-10.62' # monrovia
self.path = settings.ABS_PATH()
self.formats = ExportFormat.objects.all() # pre-loaded by 'insert_export_formats' migration
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='demo@demo.com', password='demo')
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob',
description='Test description', event='Nepal activation',
user=self.user, the_geom=the_geom)
self.uid = self.job.uid
# add the formats to the job
self.job.formats = self.formats
self.job.save()
self.osm = self.path + '/files/query.osm'
self.query = '[maxsize:2147483648][timeout:1600];(node(6.25,-10.85,6.40,-10.62);<;);out body;'
self.job.tags.all().delete()
parser = presets.PresetParser(self.path + '/utils/tests/files/hdm_presets.xml')
tags = parser.parse()
self.assertIsNotNone(tags)
self.assertEquals(256, len(tags))
# save all the tags from the preset
for tag_dict in tags:
tag = Tag.objects.create(
key=tag_dict['key'],
value=tag_dict['value'],
job=self.job,
data_model='osm',
geom_types=tag_dict['geom_types']
)
self.assertEquals(256, self.job.tags.all().count())
def test_get_query(self,):
overpass = Overpass(
stage_dir=self.path + '/utils/tests/files/',
bbox=self.bbox, job_name='testjob',
filters=self.job.filters
)
q = overpass.get_query()
self.assertEquals(q, self.query)
@patch('utils.overpass.requests.post')
def test_run_query(self, mock_post):
op = Overpass(
stage_dir=self.path + '/utils/tests/files/',
bbox=self.bbox, job_name='testjob',
filters=self.job.filters
)
q = op.get_query()
out = self.path + '/utils/tests/files/query.osm'
mock_response = mock.Mock()
expected = ['<osm>some data</osm>']
mock_response.iter_content.return_value = expected
mock_post.return_value = mock_response
op.run_query()
mock_post.assert_called_once_with(self.url,
data=q,
stream=True)
f = open(out)
data = f.read()
self.assertEqual(data, expected[0])
f.close()
os.remove(out)
| 36.561798 | 102 | 0.592502 |
import logging
import os
from unittest import skip
import mock
from mock import patch
from django.conf import settings
from django.contrib.auth.models import Group, User
from django.contrib.gis.geos import GEOSGeometry, Polygon
from django.test import TestCase
from jobs import presets
from jobs.models import ExportFormat, Job, Tag
from ..overpass import Overpass
logger = logging.getLogger(__name__)
class TestOverpass(TestCase):
def setUp(self,):
self.url = 'http://localhost/interpreter'
self.bbox = '6.25,-10.85,6.40,-10.62'
self.path = settings.ABS_PATH()
self.formats = ExportFormat.objects.all()
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='demo@demo.com', password='demo')
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob',
description='Test description', event='Nepal activation',
user=self.user, the_geom=the_geom)
self.uid = self.job.uid
self.job.formats = self.formats
self.job.save()
self.osm = self.path + '/files/query.osm'
self.query = '[maxsize:2147483648][timeout:1600];(node(6.25,-10.85,6.40,-10.62);<;);out body;'
self.job.tags.all().delete()
parser = presets.PresetParser(self.path + '/utils/tests/files/hdm_presets.xml')
tags = parser.parse()
self.assertIsNotNone(tags)
self.assertEquals(256, len(tags))
for tag_dict in tags:
tag = Tag.objects.create(
key=tag_dict['key'],
value=tag_dict['value'],
job=self.job,
data_model='osm',
geom_types=tag_dict['geom_types']
)
self.assertEquals(256, self.job.tags.all().count())
def test_get_query(self,):
overpass = Overpass(
stage_dir=self.path + '/utils/tests/files/',
bbox=self.bbox, job_name='testjob',
filters=self.job.filters
)
q = overpass.get_query()
self.assertEquals(q, self.query)
@patch('utils.overpass.requests.post')
def test_run_query(self, mock_post):
op = Overpass(
stage_dir=self.path + '/utils/tests/files/',
bbox=self.bbox, job_name='testjob',
filters=self.job.filters
)
q = op.get_query()
out = self.path + '/utils/tests/files/query.osm'
mock_response = mock.Mock()
expected = ['<osm>some data</osm>']
mock_response.iter_content.return_value = expected
mock_post.return_value = mock_response
op.run_query()
mock_post.assert_called_once_with(self.url,
data=q,
stream=True)
f = open(out)
data = f.read()
self.assertEqual(data, expected[0])
f.close()
os.remove(out)
| true | true |
1c33a523912e848401b2771ef8932cd156444823 | 7,439 | py | Python | src/headers/__init__.py | joniumGit/headers | 0c0e0564445810d4408cafd6cd66ec0e5952179c | [
"MIT"
] | null | null | null | src/headers/__init__.py | joniumGit/headers | 0c0e0564445810d4408cafd6cd66ec0e5952179c | [
"MIT"
] | null | null | null | src/headers/__init__.py | joniumGit/headers | 0c0e0564445810d4408cafd6cd66ec0e5952179c | [
"MIT"
] | null | null | null | A_IM = "A-IM"
ACCEPT = "Accept"
ACCEPT_ADDITIONS = "Accept-Additions"
ACCEPT_CH = "Accept-CH"
ACCEPT_CHARSET = "Accept-Charset"
ACCEPT_DATETIME = "Accept-Datetime"
ACCEPT_ENCODING = "Accept-Encoding"
ACCEPT_FEATURES = "Accept-Features"
ACCEPT_LANGUAGE = "Accept-Language"
ACCEPT_PATCH = "Accept-Patch"
ACCEPT_POST = "Accept-Post"
ACCEPT_RANGES = "Accept-Ranges"
ACCESS_CONTROL = "Access-Control"
ACCESS_CONTROL_ALLOW_CREDENTIALS = "Access-Control-Allow-Credentials"
ACCESS_CONTROL_ALLOW_HEADERS = "Access-Control-Allow-Headers"
ACCESS_CONTROL_ALLOW_METHODS = "Access-Control-Allow-Methods"
ACCESS_CONTROL_ALLOW_ORIGIN = "Access-Control-Allow-Origin"
ACCESS_CONTROL_EXPOSE_HEADERS = "Access-Control-Expose-Headers"
ACCESS_CONTROL_MAX_AGE = "Access-Control-Max-Age"
ACCESS_CONTROL_REQUEST_HEADERS = "Access-Control-Request-Headers"
ACCESS_CONTROL_REQUEST_METHOD = "Access-Control-Request-Method"
AGE = "Age"
ALLOW = "Allow"
ALPN = "ALPN"
ALT_SVC = "Alt-Svc"
ALT_USED = "Alt-Used"
ALTERNATES = "Alternates"
AMP_CACHE_TRANSFORM = "AMP-Cache-Transform"
APPLY_TO_REDIRECT_REF = "Apply-To-Redirect-Ref"
AUTHENTICATION_CONTROL = "Authentication-Control"
AUTHENTICATION_INFO = "Authentication-Info"
AUTHORIZATION = "Authorization"
C_EXT = "C-Ext"
C_MAN = "C-Man"
C_OPT = "C-Opt"
C_PEP = "C-PEP"
C_PEP_INFO = "C-PEP-Info"
CACHE_CONTROL = "Cache-Control"
CACHE_STATUS = "Cache-Status"
CAL_MANAGED_ID = "Cal-Managed-ID"
CALDAV_TIMEZONES = "CalDAV-Timezones"
CDN_CACHE_CONTROL = "CDN-Cache-Control"
CDN_LOOP = "CDN-Loop"
CERT_NOT_AFTER = "Cert-Not-After"
CERT_NOT_BEFORE = "Cert-Not-Before"
CLEAR_SITE_DATA = "Clear-Site-Data"
CLOSE = "Close"
COMPLIANCE = "Compliance"
CONFIGURATION_CONTEXT = "Configuration-Context"
CONNECTION = "Connection"
CONTENT_BASE = "Content-Base"
CONTENT_DISPOSITION = "Content-Disposition"
CONTENT_ENCODING = "Content-Encoding"
CONTENT_ID = "Content-ID"
CONTENT_LANGUAGE = "Content-Language"
CONTENT_LENGTH = "Content-Length"
CONTENT_LOCATION = "Content-Location"
CONTENT_MD5 = "Content-MD5"
CONTENT_RANGE = "Content-Range"
CONTENT_SCRIPT_TYPE = "Content-Script-Type"
CONTENT_SECURITY_POLICY = "Content-Security-Policy"
CONTENT_SECURITY_POLICY_REPORT_ONLY = "Content-Security-Policy-Report-Only"
CONTENT_STYLE_TYPE = "Content-Style-Type"
CONTENT_TRANSFER_ENCODING = "Content-Transfer-Encoding"
CONTENT_TYPE = "Content-Type"
CONTENT_VERSION = "Content-Version"
COOKIE = "Cookie"
COOKIE2 = "Cookie2"
COST = "Cost"
CROSS_ORIGIN_EMBEDDER_POLICY = "Cross-Origin-Embedder-Policy"
CROSS_ORIGIN_EMBEDDER_POLICY_REPORT_ONLY = "Cross-Origin-Embedder-Policy-Report-Only"
CROSS_ORIGIN_OPENER_POLICY = "Cross-Origin-Opener-Policy"
CROSS_ORIGIN_OPENER_POLICY_REPORT_ONLY = "Cross-Origin-Opener-Policy-Report-Only"
CROSS_ORIGIN_RESOURCE_POLICY = "Cross-Origin-Resource-Policy"
DASL = "DASL"
DATE = "Date"
DAV = "DAV"
DEFAULT_STYLE = "Default-Style"
DELTA_BASE = "Delta-Base"
DEPTH = "Depth"
DERIVED_FROM = "Derived-From"
DESTINATION = "Destination"
DIFFERENTIAL_ID = "Differential-ID"
DIGEST = "Digest"
EARLY_DATA = "Early-Data"
EDIINT_FEATURES = "EDIINT-Features"
ETAG = "ETag"
EXPECT = "Expect"
EXPECT_CT = "Expect-CT"
EXPIRES = "Expires"
EXT = "Ext"
FORWARDED = "Forwarded"
FROM = "From"
GETPROFILE = "GetProfile"
HOBAREG = "Hobareg"
HOST = "Host"
HTTP2_SETTINGS = "HTTP2-Settings"
IF = "If"
IF_MATCH = "If-Match"
IF_MODIFIED_SINCE = "If-Modified-Since"
IF_NONE_MATCH = "If-None-Match"
IF_RANGE = "If-Range"
IF_SCHEDULE_TAG_MATCH = "If-Schedule-Tag-Match"
IF_UNMODIFIED_SINCE = "If-Unmodified-Since"
IM = "IM"
INCLUDE_REFERRED_TOKEN_BINDING_ID = "Include-Referred-Token-Binding-ID"
ISOLATION = "Isolation"
KEEP_ALIVE = "Keep-Alive"
LABEL = "Label"
LAST_EVENT_ID = "Last-Event-ID"
LAST_MODIFIED = "Last-Modified"
LINK = "Link"
LOCATION = "Location"
LOCK_TOKEN = "Lock-Token"
MAN = "Man"
MAX_FORWARDS = "Max-Forwards"
MEMENTO_DATETIME = "Memento-Datetime"
MESSAGE_ID = "Message-ID"
METER = "Meter"
METHOD_CHECK = "Method-Check"
METHOD_CHECK_EXPIRES = "Method-Check-Expires"
MIME_VERSION = "MIME-Version"
NEGOTIATE = "Negotiate"
NON_COMPLIANCE = "Non-Compliance"
ODATA_ENTITYID = "OData-EntityId"
ODATA_ISOLATION = "OData-Isolation"
ODATA_MAXVERSION = "OData-MaxVersion"
ODATA_VERSION = "OData-Version"
OPT = "Opt"
OPTIONAL = "Optional"
OPTIONAL_WWW_AUTHENTICATE = "Optional-WWW-Authenticate"
ORDERING_TYPE = "Ordering-Type"
ORIGIN = "Origin"
ORIGIN_AGENT_CLUSTER = "Origin-Agent-Cluster"
OSCORE = "OSCORE"
OSLC_CORE_VERSION = "OSLC-Core-Version"
OVERWRITE = "Overwrite"
P3P = "P3P"
PEP = "PEP"
PEP_INFO = "Pep-Info"
PICS_LABEL = "PICS-Label"
PING_FROM = "Ping-From"
PING_TO = "Ping-To"
POSITION = "Position"
PRAGMA = "Pragma"
PREFER = "Prefer"
PREFERENCE_APPLIED = "Preference-Applied"
PRIORITY = "Priority"
PROFILEOBJECT = "ProfileObject"
PROTOCOL = "Protocol"
PROTOCOL_INFO = "Protocol-Info"
PROTOCOL_QUERY = "Protocol-Query"
PROTOCOL_REQUEST = "Protocol-Request"
PROXY_AUTHENTICATE = "Proxy-Authenticate"
PROXY_AUTHENTICATION_INFO = "Proxy-Authentication-Info"
PROXY_AUTHORIZATION = "Proxy-Authorization"
PROXY_FEATURES = "Proxy-Features"
PROXY_INSTRUCTION = "Proxy-Instruction"
PROXY_STATUS = "Proxy-Status"
PUBLIC = "Public"
PUBLIC_KEY_PINS = "Public-Key-Pins"
PUBLIC_KEY_PINS_REPORT_ONLY = "Public-Key-Pins-Report-Only"
RANGE = "Range"
REDIRECT_REF = "Redirect-Ref"
REFERER = "Referer"
REFERER_ROOT = "Referer-Root"
REFRESH = "Refresh"
REPEATABILITY_CLIENT_ID = "Repeatability-Client-ID"
REPEATABILITY_FIRST_SENT = "Repeatability-First-Sent"
REPEATABILITY_REQUEST_ID = "Repeatability-Request-ID"
REPEATABILITY_RESULT = "Repeatability-Result"
REPLAY_NONCE = "Replay-Nonce"
RESOLUTION_HINT = "Resolution-Hint"
RESOLVER_LOCATION = "Resolver-Location"
RETRY_AFTER = "Retry-After"
SAFE = "Safe"
SCHEDULE_REPLY = "Schedule-Reply"
SCHEDULE_TAG = "Schedule-Tag"
SEC_GPC = "Sec-GPC"
SEC_TOKEN_BINDING = "Sec-Token-Binding"
SEC_WEBSOCKET_ACCEPT = "Sec-WebSocket-Accept"
SEC_WEBSOCKET_EXTENSIONS = "Sec-WebSocket-Extensions"
SEC_WEBSOCKET_KEY = "Sec-WebSocket-Key"
SEC_WEBSOCKET_PROTOCOL = "Sec-WebSocket-Protocol"
SEC_WEBSOCKET_VERSION = "Sec-WebSocket-Version"
SECURITY_SCHEME = "Security-Scheme"
SERVER = "Server"
SERVER_TIMING = "Server-Timing"
SET_COOKIE = "Set-Cookie"
SET_COOKIE2 = "Set-Cookie2"
SETPROFILE = "SetProfile"
SLUG = "SLUG"
SOAPACTION = "SoapAction"
STATUS_URI = "Status-URI"
STRICT_TRANSPORT_SECURITY = "Strict-Transport-Security"
SUBOK = "SubOK"
SUBST = "Subst"
SUNSET = "Sunset"
SURROGATE_CAPABILITY = "Surrogate-Capability"
SURROGATE_CONTROL = "Surrogate-Control"
TCN = "TCN"
TE = "TE"
TIMEOUT = "Timeout"
TIMING_ALLOW_ORIGIN = "Timing-Allow-Origin"
TITLE = "Title"
TOPIC = "Topic"
TRACEPARENT = "Traceparent"
TRACESTATE = "Tracestate"
TRAILER = "Trailer"
TRANSFER_ENCODING = "Transfer-Encoding"
TTL = "TTL"
UA_COLOR = "UA-Color"
UA_MEDIA = "UA-Media"
UA_PIXELS = "UA-Pixels"
UA_RESOLUTION = "UA-Resolution"
UA_WINDOWPIXELS = "UA-Windowpixels"
UPGRADE = "Upgrade"
URGENCY = "Urgency"
URI = "URI"
USER_AGENT = "User-Agent"
VARIANT_VARY = "Variant-Vary"
VARY = "Vary"
VERSION = "Version"
VIA = "Via"
WANT_DIGEST = "Want-Digest"
WARNING = "Warning"
WWW_AUTHENTICATE = "WWW-Authenticate"
X_CONTENT_TYPE_OPTIONS = "X-Content-Type-Options"
X_DEVICE_ACCEPT = "X-Device-Accept"
X_DEVICE_ACCEPT_CHARSET = "X-Device-Accept-Charset"
X_DEVICE_ACCEPT_ENCODING = "X-Device-Accept-Encoding"
X_DEVICE_ACCEPT_LANGUAGE = "X-Device-Accept-Language"
X_DEVICE_USER_AGENT = "X-Device-User-Agent"
X_FRAME_OPTIONS = "X-Frame-Options"
STAR = "*"
| 31.521186 | 85 | 0.777524 | A_IM = "A-IM"
ACCEPT = "Accept"
ACCEPT_ADDITIONS = "Accept-Additions"
ACCEPT_CH = "Accept-CH"
ACCEPT_CHARSET = "Accept-Charset"
ACCEPT_DATETIME = "Accept-Datetime"
ACCEPT_ENCODING = "Accept-Encoding"
ACCEPT_FEATURES = "Accept-Features"
ACCEPT_LANGUAGE = "Accept-Language"
ACCEPT_PATCH = "Accept-Patch"
ACCEPT_POST = "Accept-Post"
ACCEPT_RANGES = "Accept-Ranges"
ACCESS_CONTROL = "Access-Control"
ACCESS_CONTROL_ALLOW_CREDENTIALS = "Access-Control-Allow-Credentials"
ACCESS_CONTROL_ALLOW_HEADERS = "Access-Control-Allow-Headers"
ACCESS_CONTROL_ALLOW_METHODS = "Access-Control-Allow-Methods"
ACCESS_CONTROL_ALLOW_ORIGIN = "Access-Control-Allow-Origin"
ACCESS_CONTROL_EXPOSE_HEADERS = "Access-Control-Expose-Headers"
ACCESS_CONTROL_MAX_AGE = "Access-Control-Max-Age"
ACCESS_CONTROL_REQUEST_HEADERS = "Access-Control-Request-Headers"
ACCESS_CONTROL_REQUEST_METHOD = "Access-Control-Request-Method"
AGE = "Age"
ALLOW = "Allow"
ALPN = "ALPN"
ALT_SVC = "Alt-Svc"
ALT_USED = "Alt-Used"
ALTERNATES = "Alternates"
AMP_CACHE_TRANSFORM = "AMP-Cache-Transform"
APPLY_TO_REDIRECT_REF = "Apply-To-Redirect-Ref"
AUTHENTICATION_CONTROL = "Authentication-Control"
AUTHENTICATION_INFO = "Authentication-Info"
AUTHORIZATION = "Authorization"
C_EXT = "C-Ext"
C_MAN = "C-Man"
C_OPT = "C-Opt"
C_PEP = "C-PEP"
C_PEP_INFO = "C-PEP-Info"
CACHE_CONTROL = "Cache-Control"
CACHE_STATUS = "Cache-Status"
CAL_MANAGED_ID = "Cal-Managed-ID"
CALDAV_TIMEZONES = "CalDAV-Timezones"
CDN_CACHE_CONTROL = "CDN-Cache-Control"
CDN_LOOP = "CDN-Loop"
CERT_NOT_AFTER = "Cert-Not-After"
CERT_NOT_BEFORE = "Cert-Not-Before"
CLEAR_SITE_DATA = "Clear-Site-Data"
CLOSE = "Close"
COMPLIANCE = "Compliance"
CONFIGURATION_CONTEXT = "Configuration-Context"
CONNECTION = "Connection"
CONTENT_BASE = "Content-Base"
CONTENT_DISPOSITION = "Content-Disposition"
CONTENT_ENCODING = "Content-Encoding"
CONTENT_ID = "Content-ID"
CONTENT_LANGUAGE = "Content-Language"
CONTENT_LENGTH = "Content-Length"
CONTENT_LOCATION = "Content-Location"
CONTENT_MD5 = "Content-MD5"
CONTENT_RANGE = "Content-Range"
CONTENT_SCRIPT_TYPE = "Content-Script-Type"
CONTENT_SECURITY_POLICY = "Content-Security-Policy"
CONTENT_SECURITY_POLICY_REPORT_ONLY = "Content-Security-Policy-Report-Only"
CONTENT_STYLE_TYPE = "Content-Style-Type"
CONTENT_TRANSFER_ENCODING = "Content-Transfer-Encoding"
CONTENT_TYPE = "Content-Type"
CONTENT_VERSION = "Content-Version"
COOKIE = "Cookie"
COOKIE2 = "Cookie2"
COST = "Cost"
CROSS_ORIGIN_EMBEDDER_POLICY = "Cross-Origin-Embedder-Policy"
CROSS_ORIGIN_EMBEDDER_POLICY_REPORT_ONLY = "Cross-Origin-Embedder-Policy-Report-Only"
CROSS_ORIGIN_OPENER_POLICY = "Cross-Origin-Opener-Policy"
CROSS_ORIGIN_OPENER_POLICY_REPORT_ONLY = "Cross-Origin-Opener-Policy-Report-Only"
CROSS_ORIGIN_RESOURCE_POLICY = "Cross-Origin-Resource-Policy"
DASL = "DASL"
DATE = "Date"
DAV = "DAV"
DEFAULT_STYLE = "Default-Style"
DELTA_BASE = "Delta-Base"
DEPTH = "Depth"
DERIVED_FROM = "Derived-From"
DESTINATION = "Destination"
DIFFERENTIAL_ID = "Differential-ID"
DIGEST = "Digest"
EARLY_DATA = "Early-Data"
EDIINT_FEATURES = "EDIINT-Features"
ETAG = "ETag"
EXPECT = "Expect"
EXPECT_CT = "Expect-CT"
EXPIRES = "Expires"
EXT = "Ext"
FORWARDED = "Forwarded"
FROM = "From"
GETPROFILE = "GetProfile"
HOBAREG = "Hobareg"
HOST = "Host"
HTTP2_SETTINGS = "HTTP2-Settings"
IF = "If"
IF_MATCH = "If-Match"
IF_MODIFIED_SINCE = "If-Modified-Since"
IF_NONE_MATCH = "If-None-Match"
IF_RANGE = "If-Range"
IF_SCHEDULE_TAG_MATCH = "If-Schedule-Tag-Match"
IF_UNMODIFIED_SINCE = "If-Unmodified-Since"
IM = "IM"
INCLUDE_REFERRED_TOKEN_BINDING_ID = "Include-Referred-Token-Binding-ID"
ISOLATION = "Isolation"
KEEP_ALIVE = "Keep-Alive"
LABEL = "Label"
LAST_EVENT_ID = "Last-Event-ID"
LAST_MODIFIED = "Last-Modified"
LINK = "Link"
LOCATION = "Location"
LOCK_TOKEN = "Lock-Token"
MAN = "Man"
MAX_FORWARDS = "Max-Forwards"
MEMENTO_DATETIME = "Memento-Datetime"
MESSAGE_ID = "Message-ID"
METER = "Meter"
METHOD_CHECK = "Method-Check"
METHOD_CHECK_EXPIRES = "Method-Check-Expires"
MIME_VERSION = "MIME-Version"
NEGOTIATE = "Negotiate"
NON_COMPLIANCE = "Non-Compliance"
ODATA_ENTITYID = "OData-EntityId"
ODATA_ISOLATION = "OData-Isolation"
ODATA_MAXVERSION = "OData-MaxVersion"
ODATA_VERSION = "OData-Version"
OPT = "Opt"
OPTIONAL = "Optional"
OPTIONAL_WWW_AUTHENTICATE = "Optional-WWW-Authenticate"
ORDERING_TYPE = "Ordering-Type"
ORIGIN = "Origin"
ORIGIN_AGENT_CLUSTER = "Origin-Agent-Cluster"
OSCORE = "OSCORE"
OSLC_CORE_VERSION = "OSLC-Core-Version"
OVERWRITE = "Overwrite"
P3P = "P3P"
PEP = "PEP"
PEP_INFO = "Pep-Info"
PICS_LABEL = "PICS-Label"
PING_FROM = "Ping-From"
PING_TO = "Ping-To"
POSITION = "Position"
PRAGMA = "Pragma"
PREFER = "Prefer"
PREFERENCE_APPLIED = "Preference-Applied"
PRIORITY = "Priority"
PROFILEOBJECT = "ProfileObject"
PROTOCOL = "Protocol"
PROTOCOL_INFO = "Protocol-Info"
PROTOCOL_QUERY = "Protocol-Query"
PROTOCOL_REQUEST = "Protocol-Request"
PROXY_AUTHENTICATE = "Proxy-Authenticate"
PROXY_AUTHENTICATION_INFO = "Proxy-Authentication-Info"
PROXY_AUTHORIZATION = "Proxy-Authorization"
PROXY_FEATURES = "Proxy-Features"
PROXY_INSTRUCTION = "Proxy-Instruction"
PROXY_STATUS = "Proxy-Status"
PUBLIC = "Public"
PUBLIC_KEY_PINS = "Public-Key-Pins"
PUBLIC_KEY_PINS_REPORT_ONLY = "Public-Key-Pins-Report-Only"
RANGE = "Range"
REDIRECT_REF = "Redirect-Ref"
REFERER = "Referer"
REFERER_ROOT = "Referer-Root"
REFRESH = "Refresh"
REPEATABILITY_CLIENT_ID = "Repeatability-Client-ID"
REPEATABILITY_FIRST_SENT = "Repeatability-First-Sent"
REPEATABILITY_REQUEST_ID = "Repeatability-Request-ID"
REPEATABILITY_RESULT = "Repeatability-Result"
REPLAY_NONCE = "Replay-Nonce"
RESOLUTION_HINT = "Resolution-Hint"
RESOLVER_LOCATION = "Resolver-Location"
RETRY_AFTER = "Retry-After"
SAFE = "Safe"
SCHEDULE_REPLY = "Schedule-Reply"
SCHEDULE_TAG = "Schedule-Tag"
SEC_GPC = "Sec-GPC"
SEC_TOKEN_BINDING = "Sec-Token-Binding"
SEC_WEBSOCKET_ACCEPT = "Sec-WebSocket-Accept"
SEC_WEBSOCKET_EXTENSIONS = "Sec-WebSocket-Extensions"
SEC_WEBSOCKET_KEY = "Sec-WebSocket-Key"
SEC_WEBSOCKET_PROTOCOL = "Sec-WebSocket-Protocol"
SEC_WEBSOCKET_VERSION = "Sec-WebSocket-Version"
SECURITY_SCHEME = "Security-Scheme"
SERVER = "Server"
SERVER_TIMING = "Server-Timing"
SET_COOKIE = "Set-Cookie"
SET_COOKIE2 = "Set-Cookie2"
SETPROFILE = "SetProfile"
SLUG = "SLUG"
SOAPACTION = "SoapAction"
STATUS_URI = "Status-URI"
STRICT_TRANSPORT_SECURITY = "Strict-Transport-Security"
SUBOK = "SubOK"
SUBST = "Subst"
SUNSET = "Sunset"
SURROGATE_CAPABILITY = "Surrogate-Capability"
SURROGATE_CONTROL = "Surrogate-Control"
TCN = "TCN"
TE = "TE"
TIMEOUT = "Timeout"
TIMING_ALLOW_ORIGIN = "Timing-Allow-Origin"
TITLE = "Title"
TOPIC = "Topic"
TRACEPARENT = "Traceparent"
TRACESTATE = "Tracestate"
TRAILER = "Trailer"
TRANSFER_ENCODING = "Transfer-Encoding"
TTL = "TTL"
UA_COLOR = "UA-Color"
UA_MEDIA = "UA-Media"
UA_PIXELS = "UA-Pixels"
UA_RESOLUTION = "UA-Resolution"
UA_WINDOWPIXELS = "UA-Windowpixels"
UPGRADE = "Upgrade"
URGENCY = "Urgency"
URI = "URI"
USER_AGENT = "User-Agent"
VARIANT_VARY = "Variant-Vary"
VARY = "Vary"
VERSION = "Version"
VIA = "Via"
WANT_DIGEST = "Want-Digest"
WARNING = "Warning"
WWW_AUTHENTICATE = "WWW-Authenticate"
X_CONTENT_TYPE_OPTIONS = "X-Content-Type-Options"
X_DEVICE_ACCEPT = "X-Device-Accept"
X_DEVICE_ACCEPT_CHARSET = "X-Device-Accept-Charset"
X_DEVICE_ACCEPT_ENCODING = "X-Device-Accept-Encoding"
X_DEVICE_ACCEPT_LANGUAGE = "X-Device-Accept-Language"
X_DEVICE_USER_AGENT = "X-Device-User-Agent"
X_FRAME_OPTIONS = "X-Frame-Options"
STAR = "*"
| true | true |
1c33a5b52ee4f37831615da72e58cbf8b4b8979e | 1,840 | py | Python | commander/thirdparty/covertutils/shells/subshells/controlsubshell.py | how2how/ToyHome | 4457b1d28e21ed6fd4ab980a0f7fed345c570ae3 | [
"Apache-2.0"
] | 1 | 2020-07-26T01:08:30.000Z | 2020-07-26T01:08:30.000Z | commander/thirdparty/covertutils/shells/subshells/controlsubshell.py | how2how/ToyHome | 4457b1d28e21ed6fd4ab980a0f7fed345c570ae3 | [
"Apache-2.0"
] | null | null | null | commander/thirdparty/covertutils/shells/subshells/controlsubshell.py | how2how/ToyHome | 4457b1d28e21ed6fd4ab980a0f7fed345c570ae3 | [
"Apache-2.0"
] | null | null | null | import json
# from covertutils.payloads.generic.control import Commands as control_commands
from covertutils.shells.subshells import SimpleSubShell
Commands = {
'reset' : 'RST',
'identity' : 'ID',
'sysinfo' : 'SI',
'kill' : 'KI',
'mute' : 'MU',
'unmute' : 'UM',
'nuke' : 'NK',
}
def message_handle(message, instance) :
if instance.sysinfo :
# sysinfo_var = message
# sysinfo = json.loads(message)
sysinfo = message.split('+')
instance.message_logger.warn( """
General:
Host: {}
Machine: {}
Version: {}
Locale: {}
Platform: {}
Release: {}
System: {}
Processor: {}
User: {}
Specifics:
Windows: {}
Linux: {}
""".format( *sysinfo ) )
# MacOS: {}
instance.base_shell.sysinfo = sysinfo
instance.sysinfo = False
else :
instance.message_logger.warn( message )
class ControlSubShell ( SimpleSubShell ) :
def __init__( self, stream, handler, queue_dict, base_shell, ignore_messages = set(['X']), prompt_templ = " (>{stream}<) |-> ") :
SimpleSubShell.__init__( self, stream, handler, queue_dict, base_shell, ignore_messages, prompt_templ )
self.updatePrompt( )
self.message_function = message_handle
self.sysinfo = False
self.killed = False
def default( self, line ) :
comm, args, line = self.parseline(line)
try :
command = Commands[comm]
except :
self.debug_logger.warn( "No such control command [%s]!" % comm)
return
# print( "Sending '%s' command" % command )
if command == Commands['reset'] :
self.debug_logger.warn( "Reseting handler" )
self.resetHandler()
if command == Commands['sysinfo'] :
self.sysinfo = True
if command == Commands['kill'] :
self.killed = True
self.debug_logger.warn( "Sending '%s' control command!" % command )
self.handler.preferred_send( command, self.stream )
def resetHandler( self ) :
self.handler.reset()
| 22.439024 | 130 | 0.668478 | import json
from covertutils.shells.subshells import SimpleSubShell
Commands = {
'reset' : 'RST',
'identity' : 'ID',
'sysinfo' : 'SI',
'kill' : 'KI',
'mute' : 'MU',
'unmute' : 'UM',
'nuke' : 'NK',
}
def message_handle(message, instance) :
if instance.sysinfo :
sysinfo = message.split('+')
instance.message_logger.warn( """
General:
Host: {}
Machine: {}
Version: {}
Locale: {}
Platform: {}
Release: {}
System: {}
Processor: {}
User: {}
Specifics:
Windows: {}
Linux: {}
""".format( *sysinfo ) )
instance.base_shell.sysinfo = sysinfo
instance.sysinfo = False
else :
instance.message_logger.warn( message )
class ControlSubShell ( SimpleSubShell ) :
def __init__( self, stream, handler, queue_dict, base_shell, ignore_messages = set(['X']), prompt_templ = " (>{stream}<) |-> ") :
SimpleSubShell.__init__( self, stream, handler, queue_dict, base_shell, ignore_messages, prompt_templ )
self.updatePrompt( )
self.message_function = message_handle
self.sysinfo = False
self.killed = False
def default( self, line ) :
comm, args, line = self.parseline(line)
try :
command = Commands[comm]
except :
self.debug_logger.warn( "No such control command [%s]!" % comm)
return
if command == Commands['reset'] :
self.debug_logger.warn( "Reseting handler" )
self.resetHandler()
if command == Commands['sysinfo'] :
self.sysinfo = True
if command == Commands['kill'] :
self.killed = True
self.debug_logger.warn( "Sending '%s' control command!" % command )
self.handler.preferred_send( command, self.stream )
def resetHandler( self ) :
self.handler.reset()
| true | true |
1c33a6f0962c8eedbd246029b47627afbe7bdab3 | 1,040 | py | Python | lmgtfy/helpers.py | opendata/LMGTDFY | 5440d398dd3bdefbdbe5c4f075a0132e6ec9d9c0 | [
"MIT"
] | 120 | 2015-02-18T17:02:09.000Z | 2021-09-02T22:42:20.000Z | lmgtfy/helpers.py | opendata/LMGTDFY | 5440d398dd3bdefbdbe5c4f075a0132e6ec9d9c0 | [
"MIT"
] | 34 | 2015-02-12T16:53:47.000Z | 2016-05-04T20:17:09.000Z | lmgtfy/helpers.py | opendata/LMGTDFY | 5440d398dd3bdefbdbe5c4f075a0132e6ec9d9c0 | [
"MIT"
] | 14 | 2015-02-19T16:39:29.000Z | 2019-01-21T02:57:02.000Z | from datetime import datetime, timedelta
from crispy_forms.layout import Submit
from lmgtfy.models import Domain, DomainSearch, TLD
from lmgtfy.tasks import search_bing_task
class CleanSubmitButton(Submit):
field_classes = 'btn btn-default'
def search_bing(domain):
domain_db_record, _created = Domain.objects.get_or_create(name=domain)
# Bing does not allow us to search the same domain more than once per day.
recently_searched = DomainSearch.objects.filter(
created_at__gte=datetime.now()-timedelta(days=1),
domain=domain_db_record
).count()
if recently_searched:
return False
else:
domain_search_record = DomainSearch.objects.create(domain=domain_db_record)
search_bing_task.apply_async(kwargs={'domain_search_record': domain_search_record})
return True
def check_valid_tld(domain):
allowed_tlds = TLD.objects.all().values_list('name', flat=True)
for tld in allowed_tlds:
if domain.endswith(tld):
return True
return False
| 32.5 | 91 | 0.735577 | from datetime import datetime, timedelta
from crispy_forms.layout import Submit
from lmgtfy.models import Domain, DomainSearch, TLD
from lmgtfy.tasks import search_bing_task
class CleanSubmitButton(Submit):
field_classes = 'btn btn-default'
def search_bing(domain):
domain_db_record, _created = Domain.objects.get_or_create(name=domain)
recently_searched = DomainSearch.objects.filter(
created_at__gte=datetime.now()-timedelta(days=1),
domain=domain_db_record
).count()
if recently_searched:
return False
else:
domain_search_record = DomainSearch.objects.create(domain=domain_db_record)
search_bing_task.apply_async(kwargs={'domain_search_record': domain_search_record})
return True
def check_valid_tld(domain):
allowed_tlds = TLD.objects.all().values_list('name', flat=True)
for tld in allowed_tlds:
if domain.endswith(tld):
return True
return False
| true | true |
1c33a846e2842f65c339d069dc91f7a42d82d6da | 7,410 | py | Python | LaU-reg/experiments/segmentation/option.py | HolmesShuan/Location-aware-Upsampling-for-Semantic-Segmentation | 83822e86570bbff4ca721d80089b5d82f1958852 | [
"BSD-2-Clause"
] | 51 | 2019-11-14T01:48:24.000Z | 2021-11-09T02:42:22.000Z | LaU-reg/experiments/segmentation/option.py | HolmesShuan/Location-aware-Upsampling-for-Semantic-Segmentation | 83822e86570bbff4ca721d80089b5d82f1958852 | [
"BSD-2-Clause"
] | 4 | 2019-11-15T10:14:10.000Z | 2020-03-17T12:14:50.000Z | LaU-reg/experiments/segmentation/option.py | HolmesShuan/Location-aware-Upsampling-for-Semantic-Segmentation | 83822e86570bbff4ca721d80089b5d82f1958852 | [
"BSD-2-Clause"
] | 9 | 2019-11-14T12:39:03.000Z | 2020-03-03T08:27:19.000Z | ###########################################################################
# Created by: Hang Zhang
# Email: zhang.hang@rutgers.edu
# Copyright (c) 2017
###########################################################################
import argparse
import torch
class Options():
def __init__(self):
parser = argparse.ArgumentParser(description='PyTorch \
Segmentation')
# model and dataset
parser.add_argument('--model', type=str, default='encnet',
help='model name (default: encnet)')
parser.add_argument('--backbone', type=str, default='resnet50',
help='backbone name (default: resnet50)')
parser.add_argument('--jpu', action='store_true', default=
False, help='JPU')
parser.add_argument('--dilated', action='store_true', default=
False, help='dilation')
parser.add_argument('--lateral', action='store_true', default=
False, help='employ FPN')
parser.add_argument('--dataset', type=str, default='ade20k',
help='dataset name (default: pascal12)')
parser.add_argument('--workers', type=int, default=8,
metavar='N', help='dataloader threads')
parser.add_argument('--base-size', type=int, default=520,
help='base image size')
parser.add_argument('--crop-size', type=int, default=480,
help='crop image size')
parser.add_argument('--train-split', type=str, default='train',
help='dataset train split (default: train)')
# training hyper params
parser.add_argument('--aux', action='store_true', default= False,
help='Auxilary Loss')
parser.add_argument('--aux-weight', type=float, default=0.2,
help='Auxilary loss weight (default: 0.2)')
parser.add_argument('--se-loss', action='store_true', default= False,
help='Semantic Encoding Loss SE-loss')
parser.add_argument('--se-weight', type=float, default=0.2,
help='SE-loss weight (default: 0.2)')
parser.add_argument('--epochs', type=int, default=None, metavar='N',
help='number of epochs to train (default: auto)')
parser.add_argument('--start_epoch', type=int, default=0,
metavar='N', help='start epochs (default:0)')
parser.add_argument('--batch-size', type=int, default=None,
metavar='N', help='input batch size for \
training (default: auto)')
parser.add_argument('--test-batch-size', type=int, default=None,
metavar='N', help='input batch size for \
testing (default: same as batch size)')
# LaU offset loss
parser.add_argument('--offset-loss', action='store_true', default= True,
help='Location-aware loss')
parser.add_argument('--offset-weight', type=float, default=0.5,
help='offset-loss weight (default: 0.5)')
parser.add_argument('--location-weight', type=float, default=0.125,
help='location regression weight (default: 0.125)')
# optimizer params
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (default: auto)')
parser.add_argument('--lr-scheduler', type=str, default='poly',
help='learning rate scheduler (default: poly)')
parser.add_argument('--momentum', type=float, default=0.9,
metavar='M', help='momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=1e-4,
metavar='M', help='w-decay (default: 1e-4)')
# cuda, seed and logging
parser.add_argument('--no-cuda', action='store_true', default=
False, help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# checking point
parser.add_argument('--resume', type=str, default=None,
help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default='default',
help='set the checkpoint name')
parser.add_argument('--model-zoo', type=str, default=None,
help='evaluating on model zoo model')
# finetuning pre-trained models
parser.add_argument('--ft', action='store_true', default= False,
help='finetuning on a different dataset')
# evaluation option
parser.add_argument('--split', default='val')
parser.add_argument('--mode', default='testval')
parser.add_argument('--ms', action='store_true', default=False,
help='multi scale & flip')
parser.add_argument('--no-val', action='store_true', default= False,
help='skip validation during training')
parser.add_argument('--save-folder', type=str, default='results',
help = 'path to save images')
# LaU option
parser.add_argument('--batch-size-per-gpu', type=int, default=4,
help='batch size per GPU')
parser.add_argument('--up-factor', type=int, default=4,
help='upsampling factor in LaU')
parser.add_argument('--bottleneck-channel', type=int, default=64,
help='reduce channel number to C')
parser.add_argument('--offset-branch-input-channel', type=int, default=512,
help='input channel number in LaU')
parser.add_argument('--category', type=int, default=59,
help='category number')
parser.add_argument('--downsampled-input-size', type=int, default=60,
help='downsampled input size')
# the parser
self.parser = parser
def parse(self):
args = self.parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
# default settings for epochs, batch_size and lr
if args.epochs is None:
epoches = {
'coco': 30,
'citys': 240,
'pascal_voc': 50,
'pascal_aug': 50,
'pcontext': 80,
'ade20k': 120,
}
args.epochs = epoches[args.dataset.lower()]
if args.batch_size is None:
args.batch_size = 16
if args.test_batch_size is None:
args.test_batch_size = args.batch_size
if args.lr is None:
lrs = {
'coco': 0.01,
'citys': 0.01,
'pascal_voc': 0.0001,
'pascal_aug': 0.001,
'pcontext': 0.001,
'ade20k': 0.004,
}
args.lr = lrs[args.dataset.lower()] / 16 * args.batch_size
print(args)
return args
| 52.928571 | 83 | 0.521457 | ranch-input-channel', type=int, default=512,
help='input channel number in LaU')
parser.add_argument('--category', type=int, default=59,
help='category number')
parser.add_argument('--downsampled-input-size', type=int, default=60,
help='downsampled input size')
self.parser = parser
def parse(self):
args = self.parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.epochs is None:
epoches = {
'coco': 30,
'citys': 240,
'pascal_voc': 50,
'pascal_aug': 50,
'pcontext': 80,
'ade20k': 120,
}
args.epochs = epoches[args.dataset.lower()]
if args.batch_size is None:
args.batch_size = 16
if args.test_batch_size is None:
args.test_batch_size = args.batch_size
if args.lr is None:
lrs = {
'coco': 0.01,
'citys': 0.01,
'pascal_voc': 0.0001,
'pascal_aug': 0.001,
'pcontext': 0.001,
'ade20k': 0.004,
}
args.lr = lrs[args.dataset.lower()] / 16 * args.batch_size
print(args)
return args
| true | true |
1c33a85214b18127e3cd53a3c1cb7390dd0fa6e1 | 1,566 | py | Python | flags/migrations/0003_rename_variant_classification.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 5 | 2021-01-14T03:34:42.000Z | 2022-03-07T15:34:18.000Z | flags/migrations/0003_rename_variant_classification.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 551 | 2020-10-19T00:02:38.000Z | 2022-03-30T02:18:22.000Z | flags/migrations/0003_rename_variant_classification.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | null | null | null | # Generated by Django 3.1 on 2020-10-01 07:38
from django.db import migrations
def rename_variant_classification(apps, schema_editor):
FlagTypeContext = apps.get_model("flags", "FlagTypeContext")
FlagType = apps.get_model("flags", "FlagType")
FlagTypeResolution = apps.get_model("flags", "FlagTypeResolution")
Flag = apps.get_model("flags", "Flag")
FlagCollection = apps.get_model("flags", "FlagCollection")
old_context = FlagTypeContext.objects.filter(id="variant_classification").first()
if old_context:
classification_context = FlagTypeContext.objects.create(id='classification', label='Flags for Classifications')
FlagCollection.objects.filter(context=old_context).update(context=classification_context)
FlagType.objects.filter(context=old_context).update(context=classification_context)
for flag_type_value in FlagType.objects.filter(id__startswith="variant_classification").values():
old_id = flag_type_value["id"]
flag_type_value["id"] = old_id.replace("variant_classification", "classification")
ft = FlagType.objects.create(**flag_type_value)
Flag.objects.filter(flag_type_id=old_id).update(flag_type=ft)
FlagTypeResolution.objects.filter(flag_type_id=old_id).update(flag_type=ft)
FlagType.objects.filter(id__startswith="variant_classification").delete()
class Migration(migrations.Migration):
dependencies = [
('flags', '0002_initial_data'),
]
operations = [
migrations.RunPython(rename_variant_classification)
]
| 40.153846 | 119 | 0.742656 |
from django.db import migrations
def rename_variant_classification(apps, schema_editor):
FlagTypeContext = apps.get_model("flags", "FlagTypeContext")
FlagType = apps.get_model("flags", "FlagType")
FlagTypeResolution = apps.get_model("flags", "FlagTypeResolution")
Flag = apps.get_model("flags", "Flag")
FlagCollection = apps.get_model("flags", "FlagCollection")
old_context = FlagTypeContext.objects.filter(id="variant_classification").first()
if old_context:
classification_context = FlagTypeContext.objects.create(id='classification', label='Flags for Classifications')
FlagCollection.objects.filter(context=old_context).update(context=classification_context)
FlagType.objects.filter(context=old_context).update(context=classification_context)
for flag_type_value in FlagType.objects.filter(id__startswith="variant_classification").values():
old_id = flag_type_value["id"]
flag_type_value["id"] = old_id.replace("variant_classification", "classification")
ft = FlagType.objects.create(**flag_type_value)
Flag.objects.filter(flag_type_id=old_id).update(flag_type=ft)
FlagTypeResolution.objects.filter(flag_type_id=old_id).update(flag_type=ft)
FlagType.objects.filter(id__startswith="variant_classification").delete()
class Migration(migrations.Migration):
dependencies = [
('flags', '0002_initial_data'),
]
operations = [
migrations.RunPython(rename_variant_classification)
]
| true | true |
1c33ac662c0ee6f1d7ca9b77490a9526ccbec4a6 | 3,049 | py | Python | authliboclc/refreshtoken.py | pybrarian/oclc-auth-python | fbc6d396d0d8005dbe29d3c6636d44f02f0d8cd0 | [
"Apache-2.0"
] | 20 | 2015-04-08T14:55:32.000Z | 2022-03-28T14:40:17.000Z | authliboclc/refreshtoken.py | pybrarian/oclc-auth-python | fbc6d396d0d8005dbe29d3c6636d44f02f0d8cd0 | [
"Apache-2.0"
] | 4 | 2016-06-16T13:39:48.000Z | 2019-06-04T14:51:08.000Z | authliboclc/refreshtoken.py | pybrarian/oclc-auth-python | fbc6d396d0d8005dbe29d3c6636d44f02f0d8cd0 | [
"Apache-2.0"
] | 5 | 2016-10-12T19:22:32.000Z | 2019-02-27T21:26:43.000Z | # -*- coding: utf-8 -*-
###############################################################################
# Copyright 2014 OCLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""This class represents a refresh token object.
A refresh token can be returned with an Authentication Token and used to request another token if the authentication
token is expiring. Refresh tokens are only returned with Authentication Tokens if the services list includes
'refresh_token'.
"""
import time
import string
class InvalidParameter(Exception):
"""Custom exception - invalid parameter was passed to class"""
def __init__(self, message):
self.message = message
class RefreshToken(object):
"""Class represents a refresh token
Class Variables:
refresh_token string the refresh token string value
expires_at string the ISO 8601 time that the refresh token expires at
expires_in int the number of seconds until the token expires
"""
refresh_token = None
expires_in = None
expires_at = None
def __init__(self, tokenValue=None, expires_in=None, expires_at=None):
"""Constructor.
Args:
tokenValue: string, the refresh token string value
expires_at: string, the ISO 8601 time that the refresh token expires at
expires_in: int, the number of seconds until the token expires
"""
if tokenValue is None or expires_in is None or expires_at is None:
raise InvalidParameter('You must pass these parameters: tokenValue, expires_in and expires_at')
if not isinstance(expires_in, int):
raise InvalidParameter('expires_in must be an int')
self.refresh_token = tokenValue
self.expires_in = expires_in
self.expires_at = expires_at
def is_expired(self):
""" Test if the refresh token is expired
Returns:
isExpired: boolean, true if refresh token is expired
"""
status = False
if time.mktime(time.strptime(self.expires_at, "%Y-%m-%d %H:%M:%SZ")) < time.time():
status = True
return status
def __str__(self):
return string.Template("""refresh_token: $refresh_token
expires_in: $expires_in
expires_at: $expires_at
""").substitute({
'refresh_token': self.refresh_token,
'expires_in': self.expires_in,
'expires_at': self.expires_at
})
| 34.258427 | 120 | 0.642834 | true | true | |
1c33ad31f9732263899e11549699fa7de9573418 | 29,468 | py | Python | tests/sentry/receivers/test_featureadoption.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | tests/sentry/receivers/test_featureadoption.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/receivers/test_featureadoption.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import json
from django.utils import timezone
from sentry.models import FeatureAdoption, GroupAssignee, GroupTombstone, Rule
from sentry.plugins import IssueTrackingPlugin2, NotificationPlugin
from sentry.signals import (
alert_rule_created,
event_processed,
first_event_received,
project_created,
member_joined,
plugin_enabled,
user_feedback_received,
issue_assigned,
issue_resolved,
advanced_search,
save_search_created,
inbound_filter_toggled,
sso_enabled,
data_scrubber_enabled,
)
from sentry.receivers.rules import DEFAULT_RULE_DATA
from sentry.testutils import TestCase
class FeatureAdoptionTest(TestCase):
def setUp(self):
super(FeatureAdoptionTest, self).setUp()
self.now = timezone.now()
self.owner = self.create_user()
self.organization = self.create_organization(owner=self.owner)
self.team = self.create_team(organization=self.organization)
self.project = self.create_project(teams=[self.team])
def test_bad_feature_slug(self):
FeatureAdoption.objects.record(self.organization.id, "xxx")
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_event"
)
assert feature_complete is None
def test_all_passed_feature_slugs_are_complete(self):
event1 = self.create_full_event()
event2 = self.create_full_event(event_id="b")
event_processed.send(project=self.project, event=event1, sender=type(self.project))
event_processed.send(project=self.project, event=event2, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert feature_complete.complete
def test_first_event(self):
event = self.create_event(
project=self.project, platform="javascript", message="javascript error message"
)
first_event_received.send(project=self.project, event=event, sender=type(self.project))
first_event = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_event"
)
assert first_event.complete
def test_javascript(self):
group = self.create_group(
project=self.project, platform="javascript", message="javascript error message"
)
event = self.create_event(group=group, data={"platform": "javascript"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
js = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="javascript")
assert js.complete
def test_python(self):
group = self.create_group(
project=self.project, platform="python", message="python error message"
)
event = self.create_event(group=group)
event_processed.send(project=self.project, event=event, sender=type(self.project))
python = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="python")
assert python.complete
def test_node(self):
group = self.create_group(
project=self.project, platform="node", message="node error message"
)
event = self.create_event(group=group, data={"platform": "node"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
node = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="node")
assert node.complete
def test_ruby(self):
group = self.create_group(
project=self.project, platform="ruby", message="ruby error message"
)
event = self.create_event(group=group, data={"platform": "ruby"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
ruby = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="ruby")
assert ruby.complete
def test_java(self):
group = self.create_group(
project=self.project, platform="java", message="java error message"
)
event = self.create_event(group=group, data={"platform": "java"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
java = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="java")
assert java.complete
def test_cocoa(self):
group = self.create_group(
project=self.project, platform="cocoa", message="cocoa error message"
)
event = self.create_event(group=group, data={"platform": "cocoa"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
cocoa = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="cocoa")
assert cocoa.complete
def test_objc(self):
group = self.create_group(
project=self.project, platform="objc", message="objc error message"
)
event = self.create_event(group=group, data={"platform": "objc"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
objc = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="objc")
assert objc.complete
def test_php(self):
group = self.create_group(project=self.project, platform="php", message="php error message")
event = self.create_event(group=group, data={"platform": "php"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
php = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="php")
assert php.complete
def test_go(self):
group = self.create_group(project=self.project, platform="go", message="go error message")
event = self.create_event(group=group, data={"platform": "go"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
go = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="go")
assert go.complete
def test_csharp(self):
group = self.create_group(
project=self.project, platform="csharp", message="csharp error message"
)
event = self.create_event(group=group, data={"platform": "csharp"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
csharp = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="csharp")
assert csharp.complete
def test_perl(self):
group = self.create_group(
project=self.project, platform="perl", message="perl error message"
)
event = self.create_event(group=group, data={"platform": "perl"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
perl = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="perl")
assert perl.complete
def test_elixir(self):
group = self.create_group(
project=self.project, platform="elixir", message="elixir error message"
)
event = self.create_event(group=group, data={"platform": "elixir"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
elixir = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="elixir")
assert elixir.complete
def test_cfml(self):
group = self.create_group(
project=self.project, platform="cfml", message="cfml error message"
)
event = self.create_event(group=group, data={"platform": "cfml"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
cfml = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="cfml")
assert cfml.complete
def test_groovy(self):
group = self.create_group(
project=self.project, platform="groovy", message="groovy error message"
)
event = self.create_event(group=group, data={"platform": "groovy"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
groovy = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="groovy")
assert groovy.complete
def test_csp(self):
group = self.create_group(project=self.project, platform="csp", message="csp error message")
event = self.create_event(group=group, data={"platform": "csp"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
csp = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="csp")
assert csp.complete
def test_release_tracking(self):
event = self.create_full_event()
event_processed.send(project=self.project, event=event, sender=type(self.project))
release_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="release_tracking"
)
assert release_tracking
def test_environment_tracking(self):
event = self.create_full_event()
event_processed.send(project=self.project, event=event, sender=type(self.project))
environment_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert environment_tracking
def test_bulk_create(self):
group = self.create_group(
project=self.project, platform="javascript", message="javascript error message"
)
event = self.create_full_event(group=group)
event_processed.send(project=self.project, event=event, sender=type(self.project))
javascript = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="javascript"
)
assert javascript
environment_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert environment_tracking
release_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="release_tracking"
)
assert release_tracking
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete
def test_user_tracking(self):
event = self.create_full_event()
event_processed.send(project=self.project, event=event, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete
def test_no_user_tracking_for_ip_address_only(self):
"""test to see if just sending ip address doesn't check the user tracking box"""
userless_payload = """
{
"id": "f5dd88e612bc406ba89dfebd09120769",
"project": 11276,
"release": "e1b5d1900526feaf20fe2bc9cad83d392136030a",
"platform": "javascript",
"culprit": "app/components/events/eventEntries in map",
"message": "TypeError: Cannot read property '1' of null",
"tags": [
["environment", "prod"],
["sentry_version", "e1b5d1900526feaf20fe2bc9cad83d392136030a"],
["level", "error"],
["logger", "javascript"],
["sentry:release", "e1b5d1900526feaf20fe2bc9cad83d392136030a"],
["browser", "Chrome 48.0"],
["device", "Other"],
["os", "Windows 10"],
["url", "https://sentry.io/katon-direct/localhost/issues/112734598/"],
["sentry:user", "id:41656"]
],
"errors": [{
"url": "<anonymous>",
"type": "js_no_source"
}],
"extra": {
"session:duration": 40364
},
"exception": {
"exc_omitted": null,
"values": [{
"stacktrace": {
"frames": [{
"function": "batchedUpdates",
"abs_path": "webpack:////usr/src/getsentry/src/sentry/~/react/lib/ReactUpdates.js",
"pre_context": [" // verify that that's the case. (This is called by each top-level update", " // function, like setProps, setState, forceUpdate, etc.; creation and", " // destruction of top-level components is guarded in ReactMount.)", "", " if (!batchingStrategy.isBatchingUpdates) {"],
"post_context": [" return;", " }", "", " dirtyComponents.push(component);", "}"],
"filename": "~/react/lib/ReactUpdates.js",
"module": "react/lib/ReactUpdates",
"colno": 0,
"in_app": false,
"data": {
"orig_filename": "/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js",
"orig_abs_path": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js",
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map",
"orig_lineno": 37,
"orig_function": "Object.s [as enqueueUpdate]",
"orig_colno": 16101
},
"context_line": " batchingStrategy.batchedUpdates(enqueueUpdate, component);",
"lineno": 176
}],
"frames_omitted": null
},
"type": "TypeError",
"value": "Cannot read property '1' of null",
"module": null
}]
},
"request": {
"url": "https://sentry.io/katon-direct/localhost/issues/112734598/",
"headers": [
["Referer", "https://sentry.io/welcome/"],
["User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36"]
]
},
"user": {
"ip_address": "0.0.0.0"
},
"version": "7",
"breadcrumbs": {
"values": [
{
"category": "xhr",
"timestamp": 1496395011.63,
"type": "http",
"data": {
"url": "/api/path/here",
"status_code": "500",
"method": "POST"
}
}
]
}
}"""
userless_event = self.create_event(
event_id="a", platform="javascript", data=json.loads(userless_payload)
)
event_processed.send(project=self.project, event=userless_event, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete is None
def test_no_env_tracking(self):
"""test to see if just sending ip address doesn't check the user tracking box"""
envless_payload = """
{
"id": "f5dd88e612bc406ba89dfebd09120769",
"project": 11276,
"release": "e1b5d1900526feaf20fe2bc9cad83d392136030a",
"platform": "javascript",
"culprit": "app/components/events/eventEntries in map",
"message": "TypeError: Cannot read property '1' of null",
"tags": [
["sentry_version", "e1b5d1900526feaf20fe2bc9cad83d392136030a"],
["level", "error"],
["logger", "javascript"],
["sentry:release", "e1b5d1900526feaf20fe2bc9cad83d392136030a"],
["browser", "Chrome 48.0"],
["device", "Other"],
["os", "Windows 10"],
["url", "https://sentry.io/katon-direct/localhost/issues/112734598/"],
["sentry:user", "id:41656"]
],
"errors": [{
"url": "<anonymous>",
"type": "js_no_source"
}],
"extra": {
"session:duration": 40364
},
"exception": {
"exc_omitted": null,
"values": [{
"stacktrace": {
"frames": [{
"function": "batchedUpdates",
"abs_path": "webpack:////usr/src/getsentry/src/sentry/~/react/lib/ReactUpdates.js",
"pre_context": [" // verify that that's the case. (This is called by each top-level update", " // function, like setProps, setState, forceUpdate, etc.; creation and", " // destruction of top-level components is guarded in ReactMount.)", "", " if (!batchingStrategy.isBatchingUpdates) {"],
"post_context": [" return;", " }", "", " dirtyComponents.push(component);", "}"],
"filename": "~/react/lib/ReactUpdates.js",
"module": "react/lib/ReactUpdates",
"colno": 0,
"in_app": false,
"data": {
"orig_filename": "/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js",
"orig_abs_path": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js",
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map",
"orig_lineno": 37,
"orig_function": "Object.s [as enqueueUpdate]",
"orig_colno": 16101
},
"context_line": " batchingStrategy.batchedUpdates(enqueueUpdate, component);",
"lineno": 176
}],
"frames_omitted": null
},
"type": "TypeError",
"value": "Cannot read property '1' of null",
"module": null
}]
},
"request": {
"url": "https://sentry.io/katon-direct/localhost/issues/112734598/",
"headers": [
["Referer", "https://sentry.io/welcome/"],
["User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36"]
]
},
"user": {
"ip_address": "0.0.0.0"
},
"version": "7",
"breadcrumbs": {
"values": [
{
"category": "xhr",
"timestamp": 1496395011.63,
"type": "http",
"data": {
"url": "/api/path/here",
"status_code": "500",
"method": "POST"
}
}
]
}
}"""
envless_event = self.create_event(
event_id="a", platform="javascript", data=json.loads(envless_payload)
)
event_processed.send(project=self.project, event=envless_event, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert feature_complete is None
def test_custom_tags(self):
event = self.create_full_event()
event.data["tags"].append(("foo", "bar"))
assert event.get_tag("foo") == "bar"
event_processed.send(project=self.project, event=event, sender=type(self.project))
custom_tags = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="custom_tags"
)
assert custom_tags
def test_source_maps(self):
event = self.create_full_event()
event_processed.send(project=self.project, event=event, sender=type(self.project))
source_maps = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="source_maps"
)
assert source_maps
def test_breadcrumbs(self):
event = self.create_full_event()
event_processed.send(project=self.project, event=event, sender=type(self.project))
breadcrumbs = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="breadcrumbs"
)
assert breadcrumbs
def test_multiple_events(self):
group = self.create_group(
project=self.project, platform="javascript", message="javascript error message"
)
simple_event = self.create_event(group=group, platform="javascript")
first_event_received.send(
project=self.project, event=simple_event, sender=type(self.project)
)
event_processed.send(project=self.project, event=simple_event, sender=type(self.project))
first_event = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_event"
)
assert first_event.complete
js = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="javascript")
assert js.complete
full_event = self.create_full_event()
event_processed.send(project=self.project, event=full_event, sender=type(self.project))
release_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="release_tracking"
)
assert release_tracking
environment_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert environment_tracking
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete
source_maps = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="source_maps"
)
assert source_maps
breadcrumbs = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="breadcrumbs"
)
assert breadcrumbs
def test_user_feedback(self):
user_feedback_received.send(project=self.project, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_feedback"
)
assert feature_complete
def test_project_created(self):
project_created.send(project=self.project, user=self.owner, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_project"
)
assert feature_complete
def test_member_joined(self):
member = self.create_member(
organization=self.organization, teams=[self.team], user=self.create_user()
)
member_joined.send(member=member, organization=self.organization, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="invite_team"
)
assert feature_complete
def test_assignment(self):
GroupAssignee.objects.create(
group_id=self.group.id, user_id=self.user.id, project_id=self.project.id
)
issue_assigned.send(
project=self.project, group=self.group, user=self.user, sender="something"
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="assignment"
)
assert feature_complete
def test_resolved_in_release(self):
issue_resolved.send(
organization_id=self.organization.id,
project=self.project,
group=self.group,
user=self.user,
resolution_type="in_next_release",
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="resolved_in_release"
)
assert feature_complete
def test_resolved_manually(self):
issue_resolved.send(
organization_id=self.organization.id,
project=self.project,
group=self.group,
user=self.user,
resolution_type="now",
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="resolved_in_release"
)
assert not feature_complete
def test_advanced_search(self):
advanced_search.send(project=self.project, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="advanced_search"
)
assert feature_complete
def test_save_search(self):
save_search_created.send(project=self.project, user=self.user, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="saved_search"
)
assert feature_complete
def test_inbound_filters(self):
inbound_filter_toggled.send(project=self.project, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="inbound_filters"
)
assert feature_complete
def test_alert_rules(self):
rule = Rule.objects.create(
project=self.project, label="Trivially modified rule", data=DEFAULT_RULE_DATA
)
alert_rule_created.send(
user=self.owner, project=self.project, rule=rule, sender=type(self.project)
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="alert_rules"
)
assert feature_complete
def test_issue_tracker_plugin(self):
plugin_enabled.send(
plugin=IssueTrackingPlugin2(),
project=self.project,
user=self.owner,
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="issue_tracker_integration"
)
assert feature_complete
def test_notification_plugin(self):
plugin_enabled.send(
plugin=NotificationPlugin(),
project=self.project,
user=self.owner,
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="notification_integration"
)
assert feature_complete
def test_sso(self):
sso_enabled.send(
organization=self.organization,
user=self.user,
provider="google",
sender=type(self.organization),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="sso"
)
assert feature_complete
def test_data_scrubber(self):
data_scrubber_enabled.send(organization=self.organization, sender=type(self.organization))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="data_scrubbers"
)
assert feature_complete
def test_delete_and_discard(self):
GroupTombstone.objects.create(previous_group_id=self.group.id, project=self.project)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="delete_and_discard"
)
assert feature_complete
| 43.335294 | 324 | 0.591794 | from __future__ import absolute_import
import json
from django.utils import timezone
from sentry.models import FeatureAdoption, GroupAssignee, GroupTombstone, Rule
from sentry.plugins import IssueTrackingPlugin2, NotificationPlugin
from sentry.signals import (
alert_rule_created,
event_processed,
first_event_received,
project_created,
member_joined,
plugin_enabled,
user_feedback_received,
issue_assigned,
issue_resolved,
advanced_search,
save_search_created,
inbound_filter_toggled,
sso_enabled,
data_scrubber_enabled,
)
from sentry.receivers.rules import DEFAULT_RULE_DATA
from sentry.testutils import TestCase
class FeatureAdoptionTest(TestCase):
def setUp(self):
super(FeatureAdoptionTest, self).setUp()
self.now = timezone.now()
self.owner = self.create_user()
self.organization = self.create_organization(owner=self.owner)
self.team = self.create_team(organization=self.organization)
self.project = self.create_project(teams=[self.team])
def test_bad_feature_slug(self):
FeatureAdoption.objects.record(self.organization.id, "xxx")
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_event"
)
assert feature_complete is None
def test_all_passed_feature_slugs_are_complete(self):
event1 = self.create_full_event()
event2 = self.create_full_event(event_id="b")
event_processed.send(project=self.project, event=event1, sender=type(self.project))
event_processed.send(project=self.project, event=event2, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert feature_complete.complete
def test_first_event(self):
event = self.create_event(
project=self.project, platform="javascript", message="javascript error message"
)
first_event_received.send(project=self.project, event=event, sender=type(self.project))
first_event = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_event"
)
assert first_event.complete
def test_javascript(self):
group = self.create_group(
project=self.project, platform="javascript", message="javascript error message"
)
event = self.create_event(group=group, data={"platform": "javascript"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
js = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="javascript")
assert js.complete
def test_python(self):
group = self.create_group(
project=self.project, platform="python", message="python error message"
)
event = self.create_event(group=group)
event_processed.send(project=self.project, event=event, sender=type(self.project))
python = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="python")
assert python.complete
def test_node(self):
group = self.create_group(
project=self.project, platform="node", message="node error message"
)
event = self.create_event(group=group, data={"platform": "node"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
node = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="node")
assert node.complete
def test_ruby(self):
group = self.create_group(
project=self.project, platform="ruby", message="ruby error message"
)
event = self.create_event(group=group, data={"platform": "ruby"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
ruby = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="ruby")
assert ruby.complete
def test_java(self):
group = self.create_group(
project=self.project, platform="java", message="java error message"
)
event = self.create_event(group=group, data={"platform": "java"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
java = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="java")
assert java.complete
def test_cocoa(self):
group = self.create_group(
project=self.project, platform="cocoa", message="cocoa error message"
)
event = self.create_event(group=group, data={"platform": "cocoa"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
cocoa = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="cocoa")
assert cocoa.complete
def test_objc(self):
group = self.create_group(
project=self.project, platform="objc", message="objc error message"
)
event = self.create_event(group=group, data={"platform": "objc"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
objc = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="objc")
assert objc.complete
def test_php(self):
group = self.create_group(project=self.project, platform="php", message="php error message")
event = self.create_event(group=group, data={"platform": "php"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
php = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="php")
assert php.complete
def test_go(self):
group = self.create_group(project=self.project, platform="go", message="go error message")
event = self.create_event(group=group, data={"platform": "go"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
go = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="go")
assert go.complete
def test_csharp(self):
group = self.create_group(
project=self.project, platform="csharp", message="csharp error message"
)
event = self.create_event(group=group, data={"platform": "csharp"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
csharp = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="csharp")
assert csharp.complete
def test_perl(self):
group = self.create_group(
project=self.project, platform="perl", message="perl error message"
)
event = self.create_event(group=group, data={"platform": "perl"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
perl = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="perl")
assert perl.complete
def test_elixir(self):
group = self.create_group(
project=self.project, platform="elixir", message="elixir error message"
)
event = self.create_event(group=group, data={"platform": "elixir"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
elixir = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="elixir")
assert elixir.complete
def test_cfml(self):
group = self.create_group(
project=self.project, platform="cfml", message="cfml error message"
)
event = self.create_event(group=group, data={"platform": "cfml"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
cfml = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="cfml")
assert cfml.complete
def test_groovy(self):
group = self.create_group(
project=self.project, platform="groovy", message="groovy error message"
)
event = self.create_event(group=group, data={"platform": "groovy"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
groovy = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="groovy")
assert groovy.complete
def test_csp(self):
group = self.create_group(project=self.project, platform="csp", message="csp error message")
event = self.create_event(group=group, data={"platform": "csp"})
event_processed.send(project=self.project, event=event, sender=type(self.project))
csp = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="csp")
assert csp.complete
def test_release_tracking(self):
event = self.create_full_event()
event_processed.send(project=self.project, event=event, sender=type(self.project))
release_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="release_tracking"
)
assert release_tracking
def test_environment_tracking(self):
event = self.create_full_event()
event_processed.send(project=self.project, event=event, sender=type(self.project))
environment_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert environment_tracking
def test_bulk_create(self):
group = self.create_group(
project=self.project, platform="javascript", message="javascript error message"
)
event = self.create_full_event(group=group)
event_processed.send(project=self.project, event=event, sender=type(self.project))
javascript = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="javascript"
)
assert javascript
environment_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert environment_tracking
release_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="release_tracking"
)
assert release_tracking
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete
def test_user_tracking(self):
event = self.create_full_event()
event_processed.send(project=self.project, event=event, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete
def test_no_user_tracking_for_ip_address_only(self):
userless_payload = """
{
"id": "f5dd88e612bc406ba89dfebd09120769",
"project": 11276,
"release": "e1b5d1900526feaf20fe2bc9cad83d392136030a",
"platform": "javascript",
"culprit": "app/components/events/eventEntries in map",
"message": "TypeError: Cannot read property '1' of null",
"tags": [
["environment", "prod"],
["sentry_version", "e1b5d1900526feaf20fe2bc9cad83d392136030a"],
["level", "error"],
["logger", "javascript"],
["sentry:release", "e1b5d1900526feaf20fe2bc9cad83d392136030a"],
["browser", "Chrome 48.0"],
["device", "Other"],
["os", "Windows 10"],
["url", "https://sentry.io/katon-direct/localhost/issues/112734598/"],
["sentry:user", "id:41656"]
],
"errors": [{
"url": "<anonymous>",
"type": "js_no_source"
}],
"extra": {
"session:duration": 40364
},
"exception": {
"exc_omitted": null,
"values": [{
"stacktrace": {
"frames": [{
"function": "batchedUpdates",
"abs_path": "webpack:////usr/src/getsentry/src/sentry/~/react/lib/ReactUpdates.js",
"pre_context": [" // verify that that's the case. (This is called by each top-level update", " // function, like setProps, setState, forceUpdate, etc.; creation and", " // destruction of top-level components is guarded in ReactMount.)", "", " if (!batchingStrategy.isBatchingUpdates) {"],
"post_context": [" return;", " }", "", " dirtyComponents.push(component);", "}"],
"filename": "~/react/lib/ReactUpdates.js",
"module": "react/lib/ReactUpdates",
"colno": 0,
"in_app": false,
"data": {
"orig_filename": "/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js",
"orig_abs_path": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js",
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map",
"orig_lineno": 37,
"orig_function": "Object.s [as enqueueUpdate]",
"orig_colno": 16101
},
"context_line": " batchingStrategy.batchedUpdates(enqueueUpdate, component);",
"lineno": 176
}],
"frames_omitted": null
},
"type": "TypeError",
"value": "Cannot read property '1' of null",
"module": null
}]
},
"request": {
"url": "https://sentry.io/katon-direct/localhost/issues/112734598/",
"headers": [
["Referer", "https://sentry.io/welcome/"],
["User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36"]
]
},
"user": {
"ip_address": "0.0.0.0"
},
"version": "7",
"breadcrumbs": {
"values": [
{
"category": "xhr",
"timestamp": 1496395011.63,
"type": "http",
"data": {
"url": "/api/path/here",
"status_code": "500",
"method": "POST"
}
}
]
}
}"""
userless_event = self.create_event(
event_id="a", platform="javascript", data=json.loads(userless_payload)
)
event_processed.send(project=self.project, event=userless_event, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete is None
def test_no_env_tracking(self):
envless_payload = """
{
"id": "f5dd88e612bc406ba89dfebd09120769",
"project": 11276,
"release": "e1b5d1900526feaf20fe2bc9cad83d392136030a",
"platform": "javascript",
"culprit": "app/components/events/eventEntries in map",
"message": "TypeError: Cannot read property '1' of null",
"tags": [
["sentry_version", "e1b5d1900526feaf20fe2bc9cad83d392136030a"],
["level", "error"],
["logger", "javascript"],
["sentry:release", "e1b5d1900526feaf20fe2bc9cad83d392136030a"],
["browser", "Chrome 48.0"],
["device", "Other"],
["os", "Windows 10"],
["url", "https://sentry.io/katon-direct/localhost/issues/112734598/"],
["sentry:user", "id:41656"]
],
"errors": [{
"url": "<anonymous>",
"type": "js_no_source"
}],
"extra": {
"session:duration": 40364
},
"exception": {
"exc_omitted": null,
"values": [{
"stacktrace": {
"frames": [{
"function": "batchedUpdates",
"abs_path": "webpack:////usr/src/getsentry/src/sentry/~/react/lib/ReactUpdates.js",
"pre_context": [" // verify that that's the case. (This is called by each top-level update", " // function, like setProps, setState, forceUpdate, etc.; creation and", " // destruction of top-level components is guarded in ReactMount.)", "", " if (!batchingStrategy.isBatchingUpdates) {"],
"post_context": [" return;", " }", "", " dirtyComponents.push(component);", "}"],
"filename": "~/react/lib/ReactUpdates.js",
"module": "react/lib/ReactUpdates",
"colno": 0,
"in_app": false,
"data": {
"orig_filename": "/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js",
"orig_abs_path": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js",
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map",
"orig_lineno": 37,
"orig_function": "Object.s [as enqueueUpdate]",
"orig_colno": 16101
},
"context_line": " batchingStrategy.batchedUpdates(enqueueUpdate, component);",
"lineno": 176
}],
"frames_omitted": null
},
"type": "TypeError",
"value": "Cannot read property '1' of null",
"module": null
}]
},
"request": {
"url": "https://sentry.io/katon-direct/localhost/issues/112734598/",
"headers": [
["Referer", "https://sentry.io/welcome/"],
["User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36"]
]
},
"user": {
"ip_address": "0.0.0.0"
},
"version": "7",
"breadcrumbs": {
"values": [
{
"category": "xhr",
"timestamp": 1496395011.63,
"type": "http",
"data": {
"url": "/api/path/here",
"status_code": "500",
"method": "POST"
}
}
]
}
}"""
envless_event = self.create_event(
event_id="a", platform="javascript", data=json.loads(envless_payload)
)
event_processed.send(project=self.project, event=envless_event, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert feature_complete is None
def test_custom_tags(self):
event = self.create_full_event()
event.data["tags"].append(("foo", "bar"))
assert event.get_tag("foo") == "bar"
event_processed.send(project=self.project, event=event, sender=type(self.project))
custom_tags = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="custom_tags"
)
assert custom_tags
def test_source_maps(self):
event = self.create_full_event()
event_processed.send(project=self.project, event=event, sender=type(self.project))
source_maps = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="source_maps"
)
assert source_maps
def test_breadcrumbs(self):
event = self.create_full_event()
event_processed.send(project=self.project, event=event, sender=type(self.project))
breadcrumbs = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="breadcrumbs"
)
assert breadcrumbs
def test_multiple_events(self):
group = self.create_group(
project=self.project, platform="javascript", message="javascript error message"
)
simple_event = self.create_event(group=group, platform="javascript")
first_event_received.send(
project=self.project, event=simple_event, sender=type(self.project)
)
event_processed.send(project=self.project, event=simple_event, sender=type(self.project))
first_event = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_event"
)
assert first_event.complete
js = FeatureAdoption.objects.get_by_slug(organization=self.organization, slug="javascript")
assert js.complete
full_event = self.create_full_event()
event_processed.send(project=self.project, event=full_event, sender=type(self.project))
release_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="release_tracking"
)
assert release_tracking
environment_tracking = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="environment_tracking"
)
assert environment_tracking
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_tracking"
)
assert feature_complete
source_maps = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="source_maps"
)
assert source_maps
breadcrumbs = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="breadcrumbs"
)
assert breadcrumbs
def test_user_feedback(self):
user_feedback_received.send(project=self.project, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="user_feedback"
)
assert feature_complete
def test_project_created(self):
project_created.send(project=self.project, user=self.owner, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="first_project"
)
assert feature_complete
def test_member_joined(self):
member = self.create_member(
organization=self.organization, teams=[self.team], user=self.create_user()
)
member_joined.send(member=member, organization=self.organization, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="invite_team"
)
assert feature_complete
def test_assignment(self):
GroupAssignee.objects.create(
group_id=self.group.id, user_id=self.user.id, project_id=self.project.id
)
issue_assigned.send(
project=self.project, group=self.group, user=self.user, sender="something"
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="assignment"
)
assert feature_complete
def test_resolved_in_release(self):
issue_resolved.send(
organization_id=self.organization.id,
project=self.project,
group=self.group,
user=self.user,
resolution_type="in_next_release",
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="resolved_in_release"
)
assert feature_complete
def test_resolved_manually(self):
issue_resolved.send(
organization_id=self.organization.id,
project=self.project,
group=self.group,
user=self.user,
resolution_type="now",
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="resolved_in_release"
)
assert not feature_complete
def test_advanced_search(self):
advanced_search.send(project=self.project, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="advanced_search"
)
assert feature_complete
def test_save_search(self):
save_search_created.send(project=self.project, user=self.user, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="saved_search"
)
assert feature_complete
def test_inbound_filters(self):
inbound_filter_toggled.send(project=self.project, sender=type(self.project))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="inbound_filters"
)
assert feature_complete
def test_alert_rules(self):
rule = Rule.objects.create(
project=self.project, label="Trivially modified rule", data=DEFAULT_RULE_DATA
)
alert_rule_created.send(
user=self.owner, project=self.project, rule=rule, sender=type(self.project)
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="alert_rules"
)
assert feature_complete
def test_issue_tracker_plugin(self):
plugin_enabled.send(
plugin=IssueTrackingPlugin2(),
project=self.project,
user=self.owner,
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="issue_tracker_integration"
)
assert feature_complete
def test_notification_plugin(self):
plugin_enabled.send(
plugin=NotificationPlugin(),
project=self.project,
user=self.owner,
sender=type(self.project),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="notification_integration"
)
assert feature_complete
def test_sso(self):
sso_enabled.send(
organization=self.organization,
user=self.user,
provider="google",
sender=type(self.organization),
)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="sso"
)
assert feature_complete
def test_data_scrubber(self):
data_scrubber_enabled.send(organization=self.organization, sender=type(self.organization))
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="data_scrubbers"
)
assert feature_complete
def test_delete_and_discard(self):
GroupTombstone.objects.create(previous_group_id=self.group.id, project=self.project)
feature_complete = FeatureAdoption.objects.get_by_slug(
organization=self.organization, slug="delete_and_discard"
)
assert feature_complete
| true | true |
1c33aefdf59902ba2acdb9aad1f915e9bd0231db | 1,716 | py | Python | examples/dp-sgd-mnist/server.py | andreea-zaharia/flower | c576f0118e5c3d7a7d774dc156fb4b6db194655d | [
"Apache-2.0"
] | null | null | null | examples/dp-sgd-mnist/server.py | andreea-zaharia/flower | c576f0118e5c3d7a7d774dc156fb4b6db194655d | [
"Apache-2.0"
] | null | null | null | examples/dp-sgd-mnist/server.py | andreea-zaharia/flower | c576f0118e5c3d7a7d774dc156fb4b6db194655d | [
"Apache-2.0"
] | null | null | null | import argparse
import os
import tensorflow as tf
import flwr as fl
import common
# Make TensorFlow logs less verbose
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
def get_eval_fn(model):
"""Return an evaluation function for server-side evaluation."""
# Load test data here to avoid the overhead of doing it in `evaluate` itself
_, test = tf.keras.datasets.mnist.load_data()
test_data, test_labels = test
# preprocessing
test_data, test_labels = common.preprocess(test_data, test_labels)
# The `evaluate` function will be called after every round
def evaluate(weights: fl.common.Weights):
model.set_weights(weights) # Update model with the latest parameters
loss, accuracy = model.evaluate(test_data, test_labels)
return loss, {"accuracy": accuracy}
return evaluate
def main(args) -> None:
model = common.create_cnn_model()
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile("sgd", loss=loss, metrics=["accuracy"])
strategy = fl.server.strategy.FedAvg(
fraction_fit=args.fraction_fit,
min_available_clients=args.num_clients,
eval_fn=get_eval_fn(model),
initial_parameters=fl.common.weights_to_parameters(model.get_weights()),
)
fl.server.start_server(
strategy=strategy,
config={"num_rounds": args.num_rounds},
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Server Script")
parser.add_argument("--num-clients", default=2, type=int)
parser.add_argument("--num-rounds", default=1, type=int)
parser.add_argument("--fraction-fit", default=1.0, type=float)
args = parser.parse_args()
main(args)
| 30.642857 | 80 | 0.706294 | import argparse
import os
import tensorflow as tf
import flwr as fl
import common
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
def get_eval_fn(model):
_, test = tf.keras.datasets.mnist.load_data()
test_data, test_labels = test
test_data, test_labels = common.preprocess(test_data, test_labels)
def evaluate(weights: fl.common.Weights):
model.set_weights(weights)
loss, accuracy = model.evaluate(test_data, test_labels)
return loss, {"accuracy": accuracy}
return evaluate
def main(args) -> None:
model = common.create_cnn_model()
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile("sgd", loss=loss, metrics=["accuracy"])
strategy = fl.server.strategy.FedAvg(
fraction_fit=args.fraction_fit,
min_available_clients=args.num_clients,
eval_fn=get_eval_fn(model),
initial_parameters=fl.common.weights_to_parameters(model.get_weights()),
)
fl.server.start_server(
strategy=strategy,
config={"num_rounds": args.num_rounds},
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Server Script")
parser.add_argument("--num-clients", default=2, type=int)
parser.add_argument("--num-rounds", default=1, type=int)
parser.add_argument("--fraction-fit", default=1.0, type=float)
args = parser.parse_args()
main(args)
| true | true |
1c33afb249e0a2b024fc113cea6c70dec1148ad2 | 14 | py | Python | example_snippets/multimenus_snippets/NewSnippets/SymPy/Constants/Rational numbers.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/NewSnippets/SymPy/Constants/Rational numbers.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/NewSnippets/SymPy/Constants/Rational numbers.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | 1 | 2021-02-04T04:51:48.000Z | 2021-02-04T04:51:48.000Z | Rational(3, 7) | 14 | 14 | 0.714286 | Rational(3, 7) | true | true |
1c33b100dd29628c27cade81487d0558c654e802 | 27,834 | py | Python | astromodels/core/model_parser.py | domeckert/astromodels | 541e589c55969ce710bcc6eca583a1736b03c7d8 | [
"BSD-3-Clause"
] | null | null | null | astromodels/core/model_parser.py | domeckert/astromodels | 541e589c55969ce710bcc6eca583a1736b03c7d8 | [
"BSD-3-Clause"
] | null | null | null | astromodels/core/model_parser.py | domeckert/astromodels | 541e589c55969ce710bcc6eca583a1736b03c7d8 | [
"BSD-3-Clause"
] | null | null | null | from builtins import object, str
__author__ = "giacomov"
import re
import warnings
from astromodels.core import (model, parameter, polarization, sky_direction,
spectral_component)
from astromodels.core.my_yaml import my_yaml
from astromodels.functions import function
from astromodels.sources import extended_source, particle_source, point_source
from astromodels.sources.source import (EXTENDED_SOURCE, PARTICLE_SOURCE,
POINT_SOURCE)
from astromodels.utils.logging import setup_logger
log = setup_logger(__name__)
class ModelIOError(IOError):
pass
class ModelYAMLError(my_yaml.YAMLError):
pass
class ModelSyntaxError(RuntimeError):
pass
def load_model(filename):
"""
Load a model from a file.
:param filename: the name of the file containing the model
:return: an instance of a Model
"""
parser = ModelParser(filename)
return parser.get_model()
def clone_model(model_instance):
"""
Returns a copy of the given model with all objects cloned. This is equivalent to saving the model to
a file and reload it, but it doesn't require writing or reading to/from disk. The original model is not touched.
:param model: model to be cloned
:return: a cloned copy of the given model
"""
data = model_instance.to_dict_with_types()
parser = ModelParser(model_dict=data)
return parser.get_model()
def model_unpickler(state):
return ModelParser(model_dict=state).get_model()
class ModelParser(object):
def __init__(self, model_file=None, model_dict=None):
assert (model_file is not None) or (model_dict is not None), (
"You have to provide either a model file or a" "model dictionary"
)
if model_file is not None:
# Read model file and deserialize into a dictionary
try:
with open(model_file) as f:
self._model_dict = my_yaml.load(f, Loader=my_yaml.FullLoader)
except IOError:
raise ModelIOError(
"File %s cannot be read. Check path and permissions for current user."
% model_file
)
except my_yaml.YAMLError:
raise ModelYAMLError(
"Could not parse file %s. Check your syntax." % model_file
)
else:
self._model_dict = model_dict
self._parse()
def _parse(self):
# Traverse the dictionary and create all the needed classes
# The first level is the source level
self._sources = []
self._independent_variables = []
self._external_parameters = []
self._links = []
self._external_parameter_links = []
self._extra_setups = []
for source_or_var_name, source_or_var_definition in list(
self._model_dict.items()
):
if source_or_var_name.find("(IndependentVariable)") > 0:
var_name = source_or_var_name.split("(")[0].replace(" ", "")
this_parser = IndependentVariableParser(
var_name, source_or_var_definition
)
res = this_parser.get_variable()
assert isinstance(res, parameter.IndependentVariable)
self._independent_variables.append(res)
elif source_or_var_name.find("(Parameter)") > 0:
var_name = source_or_var_name.split("(")[0].replace(" ", "")
this_parser = ParameterParser(var_name, source_or_var_definition)
res = this_parser.get_variable()
assert isinstance(res, parameter.Parameter)
self._external_parameters.append(res)
self._links.extend(this_parser.links)
# self._external_parameter_links.extend(this_parser.links)
else:
this_parser = SourceParser(source_or_var_name, source_or_var_definition)
res = this_parser.get_source()
assert (
isinstance(res, point_source.PointSource)
or isinstance(res, extended_source.ExtendedSource)
or isinstance(res, particle_source.ParticleSource)
)
self._sources.append(res)
self._links.extend(this_parser.links)
self._extra_setups.extend(this_parser.extra_setups)
def get_model(self):
# Instance the model with all the parsed sources
new_model = model.Model(*self._sources)
# Now set up IndependentVariable instances (if any)
for independent_variable in self._independent_variables:
new_model.add_independent_variable(independent_variable)
# Now set up external parameters (if any)
for parameter in self._external_parameters:
new_model.add_external_parameter(parameter)
# Now set up the links
for link in self._links:
path = link["parameter_path"]
variable = link["variable"]
law = link["law"]
new_model[path].add_auxiliary_variable(new_model[variable], law)
# Finally the extra_setups (if any)
for extra_setup in self._extra_setups:
path = extra_setup["function_path"]
for property, value in list(extra_setup["extra_setup"].items()):
# First, check to see if the we have a valid path in the new model.
# If we aren't given a path, interpret it as being given a value.
if value in new_model:
new_model[path].__setattr__(property, new_model[value])
else:
new_model[path].__setattr__(property, value)
return new_model
class IndependentVariableParser(object):
def __init__(self, name, definition):
self._variable = parameter.IndependentVariable(name, **definition)
def get_variable(self):
return self._variable
class ParameterParser(object):
def __init__(self, name, definition):
self._links = []
# NOTE: this is triggered only for parameters outside of functions
if "prior" in definition:
# Need the create a function for the prior first
try:
function_name = list(definition["prior"].keys())[0]
parameters_definition = definition["prior"][function_name]
except KeyError: # pragma: no cover
raise ModelSyntaxError("The prior for parameter %s is malformed" % name)
# parse the function
shape_parser = ShapeParser(name)
prior_instance = shape_parser.parse(
name, function_name, parameters_definition
)
# Substitute the definition with the instance, so that the following constructor will work
definition["prior"] = prior_instance
# Check if this is a linked parameter, i.e., if 'value' is something like f(source.spectrum.powerlaw.index)
matches = re.findall("""f\((.+)\)""", str(definition["value"]))
if matches:
# This is an expression which marks a parameter
# with a link to another parameter (or an IndependentVariable such as time)
# Get the variable
linked_variable = matches[0]
# Now get the law
if "law" not in definition: # pragma: no cover
raise ModelSyntaxError(
"The parameter %s in function %s "
" is linked to %s but lacks a 'law' attribute"
% (name, function_name, linked_variable)
)
link_function_name = list(definition["law"].keys())[0]
# ok, now we parse the linked parameter
function_parser = ShapeParser(name)
link_function_instance = function_parser.parse(
name, link_function_name, definition["law"][link_function_name]
)
self._links.append(
{
"parameter_path": name,
"law": link_function_instance,
"variable": linked_variable,
}
)
# get rid of the 'law' entry
definition.pop("law", None)
# this parameter's value will be replaced later.
# for now we just need to get rid of the f(param) entry
definition["value"] = 1.0
self._variable = parameter.Parameter(name, **definition)
def get_variable(self):
return self._variable
@property
def links(self):
return self._links
class SourceParser(object):
def __init__(self, source_name, source_definition):
# Get the type of the source
try:
# Point source or extended source?
source_type = re.findall(
"\((%s|%s|%s)\)" % (POINT_SOURCE, EXTENDED_SOURCE, PARTICLE_SOURCE),
source_name,
)[-1]
except IndexError: # pragma: no cover
raise ModelSyntaxError(
"Don't recognize type for source '%s'. "
"Valid types are '%s', '%s' or '%s'."
% (source_name, POINT_SOURCE, EXTENDED_SOURCE, PARTICLE_SOURCE)
)
else:
# Strip the source_type from the name
source_name = source_name.split()[0]
self._source_name = source_name
# This will store the links (if any)
self._links = []
# This will store extra_setups (if any), used sometimes. For example, the function which uses naima
# to make a synchrotron spectrum uses this to save and set up the particle distribution
self._extra_setups = []
if source_type == POINT_SOURCE:
self._parsed_source = self._parse_point_source(source_definition)
elif source_type == EXTENDED_SOURCE:
self._parsed_source = self._parse_extended_source(source_definition)
elif source_type == PARTICLE_SOURCE:
self._parsed_source = self._parse_particle_source(source_definition)
@property
def extra_setups(self):
return self._extra_setups
@property
def links(self):
return self._links
def get_source(self):
return self._parsed_source
def _parse_particle_source(self, particle_source_definition):
# Parse the spectral information
try:
spectrum = particle_source_definition["spectrum"]
except KeyError: # pragma: no cover
raise ModelSyntaxError(
"Point source %s is missing the 'spectrum' attribute"
% self._source_name
)
components = []
for component_name, component_definition in list(
particle_source_definition["spectrum"].items()
):
this_component = self._parse_spectral_component(
component_name, component_definition
)
components.append(this_component)
this_particle_source = particle_source.ParticleSource(
self._source_name, components=components
)
return this_particle_source
def _parse_point_source(self, pts_source_definition):
# Parse the positional information
try:
position_definition = pts_source_definition["position"]
except KeyError: # pragma: no cover
raise ModelSyntaxError(
"Point source %s is missing the 'position' attribute"
% self._source_name
)
this_sky_direction = self._parse_sky_direction(position_definition)
# Parse the spectral information
try:
spectrum = pts_source_definition["spectrum"]
except KeyError: # pragma: no cover
raise ModelSyntaxError(
"Point source %s is missing the 'spectrum' attribute"
% self._source_name
)
components = []
for component_name, component_definition in list(
pts_source_definition["spectrum"].items()
):
try:
this_component = self._parse_spectral_component(
component_name, component_definition
)
components.append(this_component)
except:
raise
try:
this_point_source = point_source.PointSource(
self._source_name,
sky_position=this_sky_direction,
components=components,
)
except:
raise
return this_point_source
def _parse_sky_direction(self, sky_direction_definition):
# Instance the SkyDirection class using the coordinates provided
coordinates = {}
if "ra" in sky_direction_definition and "dec" in sky_direction_definition:
par_parser = ParameterParser("ra", sky_direction_definition["ra"])
ra = par_parser.get_variable()
if ra.bounds == (None, None):
ra.bounds = (0, 360)
par_parser = ParameterParser("dec", sky_direction_definition["dec"])
dec = par_parser.get_variable()
if dec.bounds == (None, None):
dec.bounds = (-90, 90)
coordinates["ra"] = ra
coordinates["dec"] = dec
elif "l" in sky_direction_definition and "b" in sky_direction_definition:
par_parser = ParameterParser("l", sky_direction_definition["l"])
l = par_parser.get_variable()
if l.bounds == (None, None):
l.bounds = (0, 360)
par_parser = ParameterParser("b", sky_direction_definition["b"])
b = par_parser.get_variable()
if b.bounds == (None, None):
b.bounds = (-90, 90)
coordinates["l"] = l
coordinates["b"] = b
else: # pragma: no cover
raise ModelSyntaxError(
"Position specification for source %s has an invalid coordinate pair. "
" You need to specify either 'ra' and 'dec', or 'l' and 'b'."
% self._source_name
)
# Check if there is a equinox specification
if "equinox" in sky_direction_definition:
coordinates["equinox"] = sky_direction_definition["equinox"]
try:
this_sky_direction = sky_direction.SkyDirection(**coordinates)
except sky_direction.WrongCoordinatePair: # pragma: no cover
raise ModelSyntaxError(
"Position specification for source %s has an invalid coordinate pair"
% self._source_name
)
return this_sky_direction
def _parse_polarization(self, polarization_definititon):
polarization_params = {}
if "degree" in polarization_definititon and "angle" in polarization_definititon:
par_parser = ParameterParser("degree", polarization_definititon["degree"])
degree = par_parser.get_variable()
degree.bounds = (0, 100)
par_parser = ParameterParser("angle", polarization_definititon["angle"])
angle = par_parser.get_variable()
angle.bounds = (0, 180)
this_polarization = polarization.LinearPolarization(
angle=angle, degree=degree
)
elif (
"I" in polarization_definititon
and "U" in polarization_definititon
and "Q" in polarization_definititon
and "V" in polarization_definititon
):
par_parser = ParameterParser("I", polarization_definititon["I"])
I = par_parser.get_variable()
I.bounds = (0, 1)
par_parser = ParameterParser("U", polarization_definititon["U"])
U = par_parser.get_variable()
U.bounds = (0, 1)
par_parser = ParameterParser("Q", polarization_definititon["Q"])
Q = par_parser.get_variable()
Q.bounds = (0, 1)
par_parser = ParameterParser("V", polarization_definititon["V"])
V = par_parser.get_variable()
V.bounds = (0, 1)
this_polarization = polarization.StokesPolarization(I=I, Q=Q, U=U, V=V)
else:
# just make a default polarization
this_polarization = polarization.Polarization()
# raise ModelSyntaxError("Polarization specification for source %s has an invalid parameters. "
# " You need to specify either 'angle' and 'degree', or 'I' ,'Q', 'U' and 'V'."
# % self._source_name)
return this_polarization
def _parse_spectral_component(self, component_name, component_definition):
# Parse the shape definition, which is the first to occur
try:
function_name = list(component_definition.keys())[0]
parameters_definition = component_definition[function_name]
except KeyError: # pragma: no cover
raise ModelSyntaxError(
"The component %s of source %s is malformed"
% (component_name, self._source_name)
)
# parse the function
shape_parser = ShapeParser(self._source_name)
shape = shape_parser.parse(
component_name, function_name, parameters_definition, is_spatial=False
)
# Get the links and extra setups, if any
self._links.extend(shape_parser.links)
self._extra_setups.extend(shape_parser.extra_setups)
if "polarization" in component_definition:
# get the polarization
polarization_definition = component_definition["polarization"]
this_polarization = self._parse_polarization(polarization_definition)
else:
this_polarization = polarization.Polarization()
this_spectral_component = spectral_component.SpectralComponent(
component_name, shape, this_polarization
)
return this_spectral_component
def _parse_extended_source(self, ext_source_definition):
# The first item in the dictionary is the definition of the extended shape
name_of_spatial_shape = list(ext_source_definition.keys())[0]
spatial_shape_parser = ShapeParser(self._source_name)
spatial_shape = spatial_shape_parser.parse(
"n.a.",
name_of_spatial_shape,
list(ext_source_definition.values())[0],
is_spatial=True,
)
# Get the links and extra setups, if any
self._links.extend(spatial_shape_parser.links)
self._extra_setups.extend(spatial_shape_parser.extra_setups)
# Parse the spectral information
try:
spectrum = ext_source_definition["spectrum"]
except KeyError: # pragma: no cover
raise ModelSyntaxError(
"Ext. source %s is missing the 'spectrum' attribute" % self._source_name
)
components = []
for component_name, component_definition in list(
ext_source_definition["spectrum"].items()
):
this_component = self._parse_spectral_component(
component_name, component_definition
)
components.append(this_component)
this_ext_source = extended_source.ExtendedSource(
self._source_name, spatial_shape, components=components
)
return this_ext_source
class ShapeParser(object):
def __init__(self, source_name):
self._source_name = source_name
self._links = []
self._extra_setups = []
@property
def links(self):
return self._links
@property
def extra_setups(self):
return self._extra_setups
def parse(
self, component_name, function_name, parameters_definition, is_spatial=False
):
return self._parse_shape_definition(
component_name, function_name, parameters_definition, is_spatial
)
@staticmethod
def _fix(value):
# Remove new lines where it shouldn't be any
# Sometimes YAML add new lines in the middle of definitions,
# such as in units
return value.replace("\n", " ")
def _parse_shape_definition(
self, component_name, function_name, parameters_definition, is_spatial=False
):
# Get the function
if "expression" in parameters_definition:
# This is a composite function
function_instance = function.get_function(
function_name, parameters_definition["expression"]
)
else:
try:
function_instance = function.get_function(function_name)
except function.UnknownFunction: # pragma: no cover
raise ModelSyntaxError(
"Function %s, specified as shape for %s of source %s, is not a "
"known function"
% (function_name, component_name, self._source_name)
)
# Loop over the parameters of the function instance, instead of the specification,
# so we can understand if there are parameters missing from the specification
for parameter_name, _ in function_instance.parameters.items():
try:
this_definition = parameters_definition[parameter_name]
except KeyError: # pragma: no cover
raise ModelSyntaxError(
"Function %s, specified as shape for %s of source %s, lacks "
"the definition for parameter %s"
% (function_name, component_name, self._source_name, parameter_name)
)
# Update the parameter. Note that the order is important, because trying to set the value before the
# minimum and maximum could result in a error.
# All these specifications are optional. If they are not present, then the default value
# already contained in the instance of the function will be used
# Ignore for a second the RuntimeWarning that is printed if the default value in the function definition
# is outside the bounds defined here
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
if "min_value" in this_definition:
function_instance.parameters[
parameter_name
].min_value = this_definition["min_value"]
if "max_value" in this_definition:
function_instance.parameters[
parameter_name
].max_value = this_definition["max_value"]
if "delta" in this_definition:
function_instance.parameters[parameter_name].delta = this_definition[
"delta"
]
if "free" in this_definition:
function_instance.parameters[parameter_name].free = this_definition[
"free"
]
if "unit" in this_definition:
function_instance.parameters[parameter_name].unit = self._fix(
this_definition["unit"]
)
# Now set the value, which must be present
if "value" not in this_definition: # pragma: no cover
raise ModelSyntaxError(
"The parameter %s in function %s, specified as shape for %s "
"of source %s, lacks a 'value' attribute"
% (parameter_name, function_name, component_name, self._source_name)
)
# Check if this is a linked parameter, i.e., if 'value' is something like f(source.spectrum.powerlaw.index)
matches = re.findall("""f\((.+)\)""", str(this_definition["value"]))
if matches:
# This is an expression which marks a parameter
# with a link to another parameter (or an IndependentVariable such as time)
# Get the variable
linked_variable = matches[0]
# Now get the law
if "law" not in this_definition: # pragma: no cover
raise ModelSyntaxError(
"The parameter %s in function %s, specified as shape for %s "
"of source %s, is linked to %s but lacks a 'law' attribute"
% (
parameter_name,
function_name,
component_name,
self._source_name,
linked_variable,
)
)
link_function_name = list(this_definition["law"].keys())[0]
link_function_instance = self._parse_shape_definition(
component_name,
link_function_name,
this_definition["law"][link_function_name],
)
if is_spatial:
path = ".".join([self._source_name, function_name, parameter_name])
else:
path = ".".join(
[
self._source_name,
"spectrum",
component_name,
function_name,
parameter_name,
]
)
self._links.append(
{
"parameter_path": path,
"law": link_function_instance,
"variable": linked_variable,
}
)
else:
# This is a normal (not linked) parameter
function_instance.parameters[parameter_name].value = this_definition[
"value"
]
# Setup the prior for this parameter, if it exists
if "prior" in this_definition:
# Get the function for this prior
# A name to display in case of errors
name_for_errors = (
"prior for %s" % function_instance.parameters[parameter_name].path
)
prior_function_name = list(this_definition["prior"].keys())[0]
prior_function_definition = this_definition["prior"][
prior_function_name
]
prior_function = self._parse_shape_definition(
name_for_errors, prior_function_name, prior_function_definition
)
# Set it as prior for current parameter
function_instance.parameters[parameter_name].prior = prior_function
# Now handle extra_setup if any
if "extra_setup" in parameters_definition:
if is_spatial:
path = ".".join([self._source_name, function_name])
else:
path = ".".join(
[self._source_name, "spectrum", component_name, function_name]
)
self._extra_setups.append(
{
"function_path": path,
"extra_setup": parameters_definition["extra_setup"],
}
)
return function_instance
| 29.896885 | 119 | 0.578932 | from builtins import object, str
__author__ = "giacomov"
import re
import warnings
from astromodels.core import (model, parameter, polarization, sky_direction,
spectral_component)
from astromodels.core.my_yaml import my_yaml
from astromodels.functions import function
from astromodels.sources import extended_source, particle_source, point_source
from astromodels.sources.source import (EXTENDED_SOURCE, PARTICLE_SOURCE,
POINT_SOURCE)
from astromodels.utils.logging import setup_logger
log = setup_logger(__name__)
class ModelIOError(IOError):
pass
class ModelYAMLError(my_yaml.YAMLError):
pass
class ModelSyntaxError(RuntimeError):
pass
def load_model(filename):
parser = ModelParser(filename)
return parser.get_model()
def clone_model(model_instance):
data = model_instance.to_dict_with_types()
parser = ModelParser(model_dict=data)
return parser.get_model()
def model_unpickler(state):
return ModelParser(model_dict=state).get_model()
class ModelParser(object):
def __init__(self, model_file=None, model_dict=None):
assert (model_file is not None) or (model_dict is not None), (
"You have to provide either a model file or a" "model dictionary"
)
if model_file is not None:
try:
with open(model_file) as f:
self._model_dict = my_yaml.load(f, Loader=my_yaml.FullLoader)
except IOError:
raise ModelIOError(
"File %s cannot be read. Check path and permissions for current user."
% model_file
)
except my_yaml.YAMLError:
raise ModelYAMLError(
"Could not parse file %s. Check your syntax." % model_file
)
else:
self._model_dict = model_dict
self._parse()
def _parse(self):
self._sources = []
self._independent_variables = []
self._external_parameters = []
self._links = []
self._external_parameter_links = []
self._extra_setups = []
for source_or_var_name, source_or_var_definition in list(
self._model_dict.items()
):
if source_or_var_name.find("(IndependentVariable)") > 0:
var_name = source_or_var_name.split("(")[0].replace(" ", "")
this_parser = IndependentVariableParser(
var_name, source_or_var_definition
)
res = this_parser.get_variable()
assert isinstance(res, parameter.IndependentVariable)
self._independent_variables.append(res)
elif source_or_var_name.find("(Parameter)") > 0:
var_name = source_or_var_name.split("(")[0].replace(" ", "")
this_parser = ParameterParser(var_name, source_or_var_definition)
res = this_parser.get_variable()
assert isinstance(res, parameter.Parameter)
self._external_parameters.append(res)
self._links.extend(this_parser.links)
else:
this_parser = SourceParser(source_or_var_name, source_or_var_definition)
res = this_parser.get_source()
assert (
isinstance(res, point_source.PointSource)
or isinstance(res, extended_source.ExtendedSource)
or isinstance(res, particle_source.ParticleSource)
)
self._sources.append(res)
self._links.extend(this_parser.links)
self._extra_setups.extend(this_parser.extra_setups)
def get_model(self):
new_model = model.Model(*self._sources)
for independent_variable in self._independent_variables:
new_model.add_independent_variable(independent_variable)
for parameter in self._external_parameters:
new_model.add_external_parameter(parameter)
for link in self._links:
path = link["parameter_path"]
variable = link["variable"]
law = link["law"]
new_model[path].add_auxiliary_variable(new_model[variable], law)
for extra_setup in self._extra_setups:
path = extra_setup["function_path"]
for property, value in list(extra_setup["extra_setup"].items()):
if value in new_model:
new_model[path].__setattr__(property, new_model[value])
else:
new_model[path].__setattr__(property, value)
return new_model
class IndependentVariableParser(object):
def __init__(self, name, definition):
self._variable = parameter.IndependentVariable(name, **definition)
def get_variable(self):
return self._variable
class ParameterParser(object):
def __init__(self, name, definition):
self._links = []
# NOTE: this is triggered only for parameters outside of functions
if "prior" in definition:
# Need the create a function for the prior first
try:
function_name = list(definition["prior"].keys())[0]
parameters_definition = definition["prior"][function_name]
except KeyError: # pragma: no cover
raise ModelSyntaxError("The prior for parameter %s is malformed" % name)
# parse the function
shape_parser = ShapeParser(name)
prior_instance = shape_parser.parse(
name, function_name, parameters_definition
)
# Substitute the definition with the instance, so that the following constructor will work
definition["prior"] = prior_instance
# Check if this is a linked parameter, i.e., if 'value' is something like f(source.spectrum.powerlaw.index)
matches = re.findall("""f\((.+)\)""", str(definition["value"]))
if matches:
# This is an expression which marks a parameter
# with a link to another parameter (or an IndependentVariable such as time)
# Get the variable
linked_variable = matches[0]
# Now get the law
if "law" not in definition: # pragma: no cover
raise ModelSyntaxError(
"The parameter %s in function %s "
" is linked to %s but lacks a 'law' attribute"
% (name, function_name, linked_variable)
)
link_function_name = list(definition["law"].keys())[0]
# ok, now we parse the linked parameter
function_parser = ShapeParser(name)
link_function_instance = function_parser.parse(
name, link_function_name, definition["law"][link_function_name]
)
self._links.append(
{
"parameter_path": name,
"law": link_function_instance,
"variable": linked_variable,
}
)
# get rid of the 'law' entry
definition.pop("law", None)
# this parameter's value will be replaced later.
definition["value"] = 1.0
self._variable = parameter.Parameter(name, **definition)
def get_variable(self):
return self._variable
@property
def links(self):
return self._links
class SourceParser(object):
def __init__(self, source_name, source_definition):
try:
source_type = re.findall(
"\((%s|%s|%s)\)" % (POINT_SOURCE, EXTENDED_SOURCE, PARTICLE_SOURCE),
source_name,
)[-1]
except IndexError:
raise ModelSyntaxError(
"Don't recognize type for source '%s'. "
"Valid types are '%s', '%s' or '%s'."
% (source_name, POINT_SOURCE, EXTENDED_SOURCE, PARTICLE_SOURCE)
)
else:
# Strip the source_type from the name
source_name = source_name.split()[0]
self._source_name = source_name
# This will store the links (if any)
self._links = []
# This will store extra_setups (if any), used sometimes. For example, the function which uses naima
# to make a synchrotron spectrum uses this to save and set up the particle distribution
self._extra_setups = []
if source_type == POINT_SOURCE:
self._parsed_source = self._parse_point_source(source_definition)
elif source_type == EXTENDED_SOURCE:
self._parsed_source = self._parse_extended_source(source_definition)
elif source_type == PARTICLE_SOURCE:
self._parsed_source = self._parse_particle_source(source_definition)
@property
def extra_setups(self):
return self._extra_setups
@property
def links(self):
return self._links
def get_source(self):
return self._parsed_source
def _parse_particle_source(self, particle_source_definition):
# Parse the spectral information
try:
spectrum = particle_source_definition["spectrum"]
except KeyError: # pragma: no cover
raise ModelSyntaxError(
"Point source %s is missing the 'spectrum' attribute"
% self._source_name
)
components = []
for component_name, component_definition in list(
particle_source_definition["spectrum"].items()
):
this_component = self._parse_spectral_component(
component_name, component_definition
)
components.append(this_component)
this_particle_source = particle_source.ParticleSource(
self._source_name, components=components
)
return this_particle_source
def _parse_point_source(self, pts_source_definition):
# Parse the positional information
try:
position_definition = pts_source_definition["position"]
except KeyError: # pragma: no cover
raise ModelSyntaxError(
"Point source %s is missing the 'position' attribute"
% self._source_name
)
this_sky_direction = self._parse_sky_direction(position_definition)
# Parse the spectral information
try:
spectrum = pts_source_definition["spectrum"]
except KeyError: # pragma: no cover
raise ModelSyntaxError(
"Point source %s is missing the 'spectrum' attribute"
% self._source_name
)
components = []
for component_name, component_definition in list(
pts_source_definition["spectrum"].items()
):
try:
this_component = self._parse_spectral_component(
component_name, component_definition
)
components.append(this_component)
except:
raise
try:
this_point_source = point_source.PointSource(
self._source_name,
sky_position=this_sky_direction,
components=components,
)
except:
raise
return this_point_source
def _parse_sky_direction(self, sky_direction_definition):
# Instance the SkyDirection class using the coordinates provided
coordinates = {}
if "ra" in sky_direction_definition and "dec" in sky_direction_definition:
par_parser = ParameterParser("ra", sky_direction_definition["ra"])
ra = par_parser.get_variable()
if ra.bounds == (None, None):
ra.bounds = (0, 360)
par_parser = ParameterParser("dec", sky_direction_definition["dec"])
dec = par_parser.get_variable()
if dec.bounds == (None, None):
dec.bounds = (-90, 90)
coordinates["ra"] = ra
coordinates["dec"] = dec
elif "l" in sky_direction_definition and "b" in sky_direction_definition:
par_parser = ParameterParser("l", sky_direction_definition["l"])
l = par_parser.get_variable()
if l.bounds == (None, None):
l.bounds = (0, 360)
par_parser = ParameterParser("b", sky_direction_definition["b"])
b = par_parser.get_variable()
if b.bounds == (None, None):
b.bounds = (-90, 90)
coordinates["l"] = l
coordinates["b"] = b
else: # pragma: no cover
raise ModelSyntaxError(
"Position specification for source %s has an invalid coordinate pair. "
" You need to specify either 'ra' and 'dec', or 'l' and 'b'."
% self._source_name
)
# Check if there is a equinox specification
if "equinox" in sky_direction_definition:
coordinates["equinox"] = sky_direction_definition["equinox"]
try:
this_sky_direction = sky_direction.SkyDirection(**coordinates)
except sky_direction.WrongCoordinatePair: # pragma: no cover
raise ModelSyntaxError(
"Position specification for source %s has an invalid coordinate pair"
% self._source_name
)
return this_sky_direction
def _parse_polarization(self, polarization_definititon):
polarization_params = {}
if "degree" in polarization_definititon and "angle" in polarization_definititon:
par_parser = ParameterParser("degree", polarization_definititon["degree"])
degree = par_parser.get_variable()
degree.bounds = (0, 100)
par_parser = ParameterParser("angle", polarization_definititon["angle"])
angle = par_parser.get_variable()
angle.bounds = (0, 180)
this_polarization = polarization.LinearPolarization(
angle=angle, degree=degree
)
elif (
"I" in polarization_definititon
and "U" in polarization_definititon
and "Q" in polarization_definititon
and "V" in polarization_definititon
):
par_parser = ParameterParser("I", polarization_definititon["I"])
I = par_parser.get_variable()
I.bounds = (0, 1)
par_parser = ParameterParser("U", polarization_definititon["U"])
U = par_parser.get_variable()
U.bounds = (0, 1)
par_parser = ParameterParser("Q", polarization_definititon["Q"])
Q = par_parser.get_variable()
Q.bounds = (0, 1)
par_parser = ParameterParser("V", polarization_definititon["V"])
V = par_parser.get_variable()
V.bounds = (0, 1)
this_polarization = polarization.StokesPolarization(I=I, Q=Q, U=U, V=V)
else:
# just make a default polarization
this_polarization = polarization.Polarization()
# raise ModelSyntaxError("Polarization specification for source %s has an invalid parameters. "
# " You need to specify either 'angle' and 'degree', or 'I' ,'Q', 'U' and 'V'."
# % self._source_name)
return this_polarization
def _parse_spectral_component(self, component_name, component_definition):
# Parse the shape definition, which is the first to occur
try:
function_name = list(component_definition.keys())[0]
parameters_definition = component_definition[function_name]
except KeyError: # pragma: no cover
raise ModelSyntaxError(
"The component %s of source %s is malformed"
% (component_name, self._source_name)
)
# parse the function
shape_parser = ShapeParser(self._source_name)
shape = shape_parser.parse(
component_name, function_name, parameters_definition, is_spatial=False
)
# Get the links and extra setups, if any
self._links.extend(shape_parser.links)
self._extra_setups.extend(shape_parser.extra_setups)
if "polarization" in component_definition:
# get the polarization
polarization_definition = component_definition["polarization"]
this_polarization = self._parse_polarization(polarization_definition)
else:
this_polarization = polarization.Polarization()
this_spectral_component = spectral_component.SpectralComponent(
component_name, shape, this_polarization
)
return this_spectral_component
def _parse_extended_source(self, ext_source_definition):
# The first item in the dictionary is the definition of the extended shape
name_of_spatial_shape = list(ext_source_definition.keys())[0]
spatial_shape_parser = ShapeParser(self._source_name)
spatial_shape = spatial_shape_parser.parse(
"n.a.",
name_of_spatial_shape,
list(ext_source_definition.values())[0],
is_spatial=True,
)
# Get the links and extra setups, if any
self._links.extend(spatial_shape_parser.links)
self._extra_setups.extend(spatial_shape_parser.extra_setups)
# Parse the spectral information
try:
spectrum = ext_source_definition["spectrum"]
except KeyError: # pragma: no cover
raise ModelSyntaxError(
"Ext. source %s is missing the 'spectrum' attribute" % self._source_name
)
components = []
for component_name, component_definition in list(
ext_source_definition["spectrum"].items()
):
this_component = self._parse_spectral_component(
component_name, component_definition
)
components.append(this_component)
this_ext_source = extended_source.ExtendedSource(
self._source_name, spatial_shape, components=components
)
return this_ext_source
class ShapeParser(object):
def __init__(self, source_name):
self._source_name = source_name
self._links = []
self._extra_setups = []
@property
def links(self):
return self._links
@property
def extra_setups(self):
return self._extra_setups
def parse(
self, component_name, function_name, parameters_definition, is_spatial=False
):
return self._parse_shape_definition(
component_name, function_name, parameters_definition, is_spatial
)
@staticmethod
def _fix(value):
# Remove new lines where it shouldn't be any
return value.replace("\n", " ")
def _parse_shape_definition(
self, component_name, function_name, parameters_definition, is_spatial=False
):
if "expression" in parameters_definition:
function_instance = function.get_function(
function_name, parameters_definition["expression"]
)
else:
try:
function_instance = function.get_function(function_name)
except function.UnknownFunction:
raise ModelSyntaxError(
"Function %s, specified as shape for %s of source %s, is not a "
"known function"
% (function_name, component_name, self._source_name)
)
for parameter_name, _ in function_instance.parameters.items():
try:
this_definition = parameters_definition[parameter_name]
except KeyError:
raise ModelSyntaxError(
"Function %s, specified as shape for %s of source %s, lacks "
"the definition for parameter %s"
% (function_name, component_name, self._source_name, parameter_name)
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
if "min_value" in this_definition:
function_instance.parameters[
parameter_name
].min_value = this_definition["min_value"]
if "max_value" in this_definition:
function_instance.parameters[
parameter_name
].max_value = this_definition["max_value"]
if "delta" in this_definition:
function_instance.parameters[parameter_name].delta = this_definition[
"delta"
]
if "free" in this_definition:
function_instance.parameters[parameter_name].free = this_definition[
"free"
]
if "unit" in this_definition:
function_instance.parameters[parameter_name].unit = self._fix(
this_definition["unit"]
)
if "value" not in this_definition:
raise ModelSyntaxError(
"The parameter %s in function %s, specified as shape for %s "
"of source %s, lacks a 'value' attribute"
% (parameter_name, function_name, component_name, self._source_name)
)
matches = re.findall("""f\((.+)\)""", str(this_definition["value"]))
if matches:
linked_variable = matches[0]
if "law" not in this_definition:
raise ModelSyntaxError(
"The parameter %s in function %s, specified as shape for %s "
"of source %s, is linked to %s but lacks a 'law' attribute"
% (
parameter_name,
function_name,
component_name,
self._source_name,
linked_variable,
)
)
link_function_name = list(this_definition["law"].keys())[0]
link_function_instance = self._parse_shape_definition(
component_name,
link_function_name,
this_definition["law"][link_function_name],
)
if is_spatial:
path = ".".join([self._source_name, function_name, parameter_name])
else:
path = ".".join(
[
self._source_name,
"spectrum",
component_name,
function_name,
parameter_name,
]
)
self._links.append(
{
"parameter_path": path,
"law": link_function_instance,
"variable": linked_variable,
}
)
else:
function_instance.parameters[parameter_name].value = this_definition[
"value"
]
if "prior" in this_definition:
name_for_errors = (
"prior for %s" % function_instance.parameters[parameter_name].path
)
prior_function_name = list(this_definition["prior"].keys())[0]
prior_function_definition = this_definition["prior"][
prior_function_name
]
prior_function = self._parse_shape_definition(
name_for_errors, prior_function_name, prior_function_definition
)
function_instance.parameters[parameter_name].prior = prior_function
if "extra_setup" in parameters_definition:
if is_spatial:
path = ".".join([self._source_name, function_name])
else:
path = ".".join(
[self._source_name, "spectrum", component_name, function_name]
)
self._extra_setups.append(
{
"function_path": path,
"extra_setup": parameters_definition["extra_setup"],
}
)
return function_instance
| true | true |
1c33b274571759e77e6dd52220d80f86a0c0bc06 | 4,544 | py | Python | docusign_esign/models/bulk_send_request.py | joekohlsdorf/docusign-esign-python-client | 40407544f79c88716d36fabf36f65c3ef1a5c3ba | [
"MIT"
] | 58 | 2017-10-18T23:06:57.000Z | 2021-04-15T23:14:58.000Z | docusign_esign/models/bulk_send_request.py | joekohlsdorf/docusign-esign-python-client | 40407544f79c88716d36fabf36f65c3ef1a5c3ba | [
"MIT"
] | 49 | 2017-10-27T05:54:09.000Z | 2021-04-29T22:06:17.000Z | docusign_esign/models/bulk_send_request.py | joekohlsdorf/docusign-esign-python-client | 40407544f79c88716d36fabf36f65c3ef1a5c3ba | [
"MIT"
] | 49 | 2017-09-16T07:23:41.000Z | 2021-05-07T20:21:20.000Z | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class BulkSendRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'batch_name': 'str',
'envelope_or_template_id': 'str'
}
attribute_map = {
'batch_name': 'batchName',
'envelope_or_template_id': 'envelopeOrTemplateId'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""BulkSendRequest - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._batch_name = None
self._envelope_or_template_id = None
self.discriminator = None
setattr(self, "_{}".format('batch_name'), kwargs.get('batch_name', None))
setattr(self, "_{}".format('envelope_or_template_id'), kwargs.get('envelope_or_template_id', None))
@property
def batch_name(self):
"""Gets the batch_name of this BulkSendRequest. # noqa: E501
# noqa: E501
:return: The batch_name of this BulkSendRequest. # noqa: E501
:rtype: str
"""
return self._batch_name
@batch_name.setter
def batch_name(self, batch_name):
"""Sets the batch_name of this BulkSendRequest.
# noqa: E501
:param batch_name: The batch_name of this BulkSendRequest. # noqa: E501
:type: str
"""
self._batch_name = batch_name
@property
def envelope_or_template_id(self):
"""Gets the envelope_or_template_id of this BulkSendRequest. # noqa: E501
# noqa: E501
:return: The envelope_or_template_id of this BulkSendRequest. # noqa: E501
:rtype: str
"""
return self._envelope_or_template_id
@envelope_or_template_id.setter
def envelope_or_template_id(self, envelope_or_template_id):
"""Sets the envelope_or_template_id of this BulkSendRequest.
# noqa: E501
:param envelope_or_template_id: The envelope_or_template_id of this BulkSendRequest. # noqa: E501
:type: str
"""
self._envelope_or_template_id = envelope_or_template_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BulkSendRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BulkSendRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, BulkSendRequest):
return True
return self.to_dict() != other.to_dict()
| 29.894737 | 140 | 0.600572 |
import pprint
import re
import six
from docusign_esign.client.configuration import Configuration
class BulkSendRequest(object):
swagger_types = {
'batch_name': 'str',
'envelope_or_template_id': 'str'
}
attribute_map = {
'batch_name': 'batchName',
'envelope_or_template_id': 'envelopeOrTemplateId'
}
def __init__(self, _configuration=None, **kwargs):
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._batch_name = None
self._envelope_or_template_id = None
self.discriminator = None
setattr(self, "_{}".format('batch_name'), kwargs.get('batch_name', None))
setattr(self, "_{}".format('envelope_or_template_id'), kwargs.get('envelope_or_template_id', None))
@property
def batch_name(self):
return self._batch_name
@batch_name.setter
def batch_name(self, batch_name):
self._batch_name = batch_name
@property
def envelope_or_template_id(self):
return self._envelope_or_template_id
@envelope_or_template_id.setter
def envelope_or_template_id(self, envelope_or_template_id):
self._envelope_or_template_id = envelope_or_template_id
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BulkSendRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, BulkSendRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, BulkSendRequest):
return True
return self.to_dict() != other.to_dict()
| true | true |
1c33b2a09b44db5a187a1aef02d537d19d775728 | 3,601 | py | Python | blabber_log_gen.py | markcitron/blabber_log_gen | 671e348d8d9c2120b5d99913d0828a724560dd2b | [
"Apache-2.0"
] | null | null | null | blabber_log_gen.py | markcitron/blabber_log_gen | 671e348d8d9c2120b5d99913d0828a724560dd2b | [
"Apache-2.0"
] | null | null | null | blabber_log_gen.py | markcitron/blabber_log_gen | 671e348d8d9c2120b5d99913d0828a724560dd2b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
""" Blabber log gen - This script will generate one or more sample logs
This script is called from the start_blabber.py so that it will
clean up any logs started, etc. If you start the logs directly from
here you can kill the log with cntl-c or whatever :-)
The log generator purposely generates an error so that you can have an
error to trigger off of (if needed).
"""
import logging, argparse, time
def make_parser():
""" Create a parser to parse arguments """
p = argparse.ArgumentParser(description="")
p.add_argument("--log_name", "-n", help="Name of log to create. If multiple logs, log number will be appended to this name.")
p.add_argument("--log_level", "-l", help="")
p.add_argument("--log_id", "-i", help="This is used to differentiate multiple logs.")
return p
def check_args(args):
""" eval and check arguments """
if not args.log_name:
args.log_name = "blabber.log"
if not args.log_level:
args.log_level = "INFO"
return args
def log_text(pointer):
""" sample text to log -- love this poem -- recited it in 2nd grade :-) """
logging.debug("Request for line: {0}".format(pointer))
lt = [
"Twas brillig, and the slithy toves ",
" Did gyre and gimble in the wabe: ",
"All mimsy were the borogoves, ",
" And the mome raths outgrabe. ",
"",
"Beware the Jabberwock, my son! " ,
" The jaws that bite, the claws that catch! " ,
"Beware the Jubjub bird, and shun " ,
" The frumious Bandersnatch! " ,
"",
"He took his vorpal sword in hand; " ,
" Long time the manxome foe he sought— " ,
"So rested he by the Tumtum tree " ,
" And stood awhile in thought. " ,
"",
"And, as in uffish thought he stood, " ,
" The Jabberwock, with eyes of flame, " ,
"Came whiffling through the tulgey wood, " ,
" And burbled as it came! " ,
"",
"One, two! One, two! And through and through " ,
" The vorpal blade went snicker-snack! " ,
"He left it dead, and with its head " ,
" He went galumphing back. " ,
"",
"“And hast thou slain the Jabberwock? ",
" Come to my arms, my beamish boy! ",
"O frabjous day! Callooh! Callay!” ",
" He chortled in his joy. ",
"",
"Twas brillig, and the slithy toves ",
" Did gyre and gimble in the wabe: ",
"All mimsy were the borogoves, ",
" And the mome raths outgrabe."
]
return lt[pointer]
def main():
""" Main function """
# parse args
passed_args = make_parser().parse_args()
args = check_args(passed_args)
# start logging
logname = args.log_name
if args.log_id:
logname = "{0}_{1}".format(args.log_id, args.log_name)
loglevel = args.log_level
logging.basicConfig(filename=logname, level=loglevel, format='%(asctime)s [%(levelname)s] %(message)s')
logging.info("Started sample log: {0}".format(args.log_name))
lc = 0
while True:
try:
logging.debug("Getting line: {0}".format(lc))
next_line = log_text(lc)
logging.info(next_line)
lc = lc + 1
except (KeyboardInterrupt):
raise
except Exception as e:
logging.error("expected: {0}".format(str(e)))
lc = 0
logging.debug("Sleeping one second.")
time.sleep(1)
if __name__ == "__main__":
main()
| 35.303922 | 130 | 0.579561 |
import logging, argparse, time
def make_parser():
p = argparse.ArgumentParser(description="")
p.add_argument("--log_name", "-n", help="Name of log to create. If multiple logs, log number will be appended to this name.")
p.add_argument("--log_level", "-l", help="")
p.add_argument("--log_id", "-i", help="This is used to differentiate multiple logs.")
return p
def check_args(args):
if not args.log_name:
args.log_name = "blabber.log"
if not args.log_level:
args.log_level = "INFO"
return args
def log_text(pointer):
logging.debug("Request for line: {0}".format(pointer))
lt = [
"Twas brillig, and the slithy toves ",
" Did gyre and gimble in the wabe: ",
"All mimsy were the borogoves, ",
" And the mome raths outgrabe. ",
"",
"Beware the Jabberwock, my son! " ,
" The jaws that bite, the claws that catch! " ,
"Beware the Jubjub bird, and shun " ,
" The frumious Bandersnatch! " ,
"",
"He took his vorpal sword in hand; " ,
" Long time the manxome foe he sought— " ,
"So rested he by the Tumtum tree " ,
" And stood awhile in thought. " ,
"",
"And, as in uffish thought he stood, " ,
" The Jabberwock, with eyes of flame, " ,
"Came whiffling through the tulgey wood, " ,
" And burbled as it came! " ,
"",
"One, two! One, two! And through and through " ,
" The vorpal blade went snicker-snack! " ,
"He left it dead, and with its head " ,
" He went galumphing back. " ,
"",
"“And hast thou slain the Jabberwock? ",
" Come to my arms, my beamish boy! ",
"O frabjous day! Callooh! Callay!” ",
" He chortled in his joy. ",
"",
"Twas brillig, and the slithy toves ",
" Did gyre and gimble in the wabe: ",
"All mimsy were the borogoves, ",
" And the mome raths outgrabe."
]
return lt[pointer]
def main():
passed_args = make_parser().parse_args()
args = check_args(passed_args)
logname = args.log_name
if args.log_id:
logname = "{0}_{1}".format(args.log_id, args.log_name)
loglevel = args.log_level
logging.basicConfig(filename=logname, level=loglevel, format='%(asctime)s [%(levelname)s] %(message)s')
logging.info("Started sample log: {0}".format(args.log_name))
lc = 0
while True:
try:
logging.debug("Getting line: {0}".format(lc))
next_line = log_text(lc)
logging.info(next_line)
lc = lc + 1
except (KeyboardInterrupt):
raise
except Exception as e:
logging.error("expected: {0}".format(str(e)))
lc = 0
logging.debug("Sleeping one second.")
time.sleep(1)
if __name__ == "__main__":
main()
| true | true |
1c33b603ad9429d7889269b3aeaa7810a1df9cce | 6,463 | py | Python | opencood/hypes_yaml/yaml_utils.py | CARLAlover/OpenCOOD | dd42cc7a31bc261ea2461b3068ed6111f13ff437 | [
"Apache-2.0"
] | null | null | null | opencood/hypes_yaml/yaml_utils.py | CARLAlover/OpenCOOD | dd42cc7a31bc261ea2461b3068ed6111f13ff437 | [
"Apache-2.0"
] | null | null | null | opencood/hypes_yaml/yaml_utils.py | CARLAlover/OpenCOOD | dd42cc7a31bc261ea2461b3068ed6111f13ff437 | [
"Apache-2.0"
] | null | null | null | import re
import yaml
import os
import numpy as np
def load_yaml(file, opt=None):
"""
Load yaml file and return a dictionary.
Parameters
----------
file : string
yaml file path.
opt : argparser
Argparser.
Returns
-------
param : dict
A dictionary that contains defined parameters.
"""
if opt and opt.model_dir:
file = os.path.join(opt.model_dir, 'config.yaml')
stream = open(file, 'r')
loader = yaml.Loader
loader.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(u'''^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
param = yaml.load(stream, Loader=loader)
if "yaml_parser" in param:
param = eval(param["yaml_parser"])(param)
return param
def load_voxel_params(param):
"""
Based on the lidar range and resolution of voxel, calcuate the anchor box
and target resolution.
Parameters
----------
param : dict
Original loaded parameter dictionary.
Returns
-------
param : dict
Modified parameter dictionary with new attribute `anchor_args[W][H][L]`
"""
anchor_args = param['postprocess']['anchor_args']
cav_lidar_range = anchor_args['cav_lidar_range']
voxel_size = param['preprocess']['args']['voxel_size']
vw = voxel_size[0]
vh = voxel_size[1]
vd = voxel_size[2]
anchor_args['vw'] = vw
anchor_args['vh'] = vh
anchor_args['vd'] = vd
anchor_args['W'] = int((cav_lidar_range[3] - cav_lidar_range[0]) / vw)
anchor_args['H'] = int((cav_lidar_range[4] - cav_lidar_range[1]) / vh)
anchor_args['D'] = int((cav_lidar_range[5] - cav_lidar_range[2]) / vd)
param['postprocess'].update({'anchor_args': anchor_args})
# sometimes we just want to visualize the data without implementing model
if 'model' in param:
param['model']['args']['W'] = anchor_args['W']
param['model']['args']['H'] = anchor_args['H']
param['model']['args']['D'] = anchor_args['D']
return param
def load_point_pillar_params(param):
"""
Based on the lidar range and resolution of voxel, calcuate the anchor box
and target resolution.
Parameters
----------
param : dict
Original loaded parameter dictionary.
Returns
-------
param : dict
Modified parameter dictionary with new attribute.
"""
cav_lidar_range = param['preprocess']['cav_lidar_range']
voxel_size = param['preprocess']['args']['voxel_size']
grid_size = (np.array(cav_lidar_range[3:6]) - np.array(
cav_lidar_range[0:3])) / \
np.array(voxel_size)
grid_size = np.round(grid_size).astype(np.int64)
param['model']['args']['point_pillar_scatter']['grid_size'] = grid_size
anchor_args = param['postprocess']['anchor_args']
vw = voxel_size[0]
vh = voxel_size[1]
vd = voxel_size[2]
anchor_args['vw'] = vw
anchor_args['vh'] = vh
anchor_args['vd'] = vd
anchor_args['W'] = int((cav_lidar_range[3] - cav_lidar_range[0]) / vw)
anchor_args['H'] = int((cav_lidar_range[4] - cav_lidar_range[1]) / vh)
anchor_args['D'] = int((cav_lidar_range[5] - cav_lidar_range[2]) / vd)
param['postprocess'].update({'anchor_args': anchor_args})
return param
def load_second_params(param):
"""
Based on the lidar range and resolution of voxel, calcuate the anchor box
and target resolution.
Parameters
----------
param : dict
Original loaded parameter dictionary.
Returns
-------
param : dict
Modified parameter dictionary with new attribute.
"""
cav_lidar_range = param['preprocess']['cav_lidar_range']
voxel_size = param['preprocess']['args']['voxel_size']
grid_size = (np.array(cav_lidar_range[3:6]) - np.array(
cav_lidar_range[0:3])) / \
np.array(voxel_size)
grid_size = np.round(grid_size).astype(np.int64)
param['model']['args']['grid_size'] = grid_size
anchor_args = param['postprocess']['anchor_args']
vw = voxel_size[0]
vh = voxel_size[1]
vd = voxel_size[2]
anchor_args['vw'] = vw
anchor_args['vh'] = vh
anchor_args['vd'] = vd
anchor_args['W'] = int((cav_lidar_range[3] - cav_lidar_range[0]) / vw)
anchor_args['H'] = int((cav_lidar_range[4] - cav_lidar_range[1]) / vh)
anchor_args['D'] = int((cav_lidar_range[5] - cav_lidar_range[2]) / vd)
param['postprocess'].update({'anchor_args': anchor_args})
return param
def load_bev_params(param):
"""
Load bev related geometry parameters s.t. boundary, resolutions, input
shape, target shape etc.
Parameters
----------
param : dict
Original loaded parameter dictionary.
Returns
-------
param : dict
Modified parameter dictionary with new attribute `geometry_param`.
"""
res = param["preprocess"]["args"]["res"]
L1, W1, H1, L2, W2, H2 = param["preprocess"]["cav_lidar_range"]
downsample_rate = param["preprocess"]["args"]["downsample_rate"]
def f(low, high, r):
return int((high - low) / r)
input_shape = (
int((f(L1, L2, res))),
int((f(W1, W2, res))),
int((f(H1, H2, res)) + 1)
)
label_shape = (
int(input_shape[0] / downsample_rate),
int(input_shape[1] / downsample_rate),
7
)
geometry_param = {
'L1': L1,
'L2': L2,
'W1': W1,
'W2': W2,
'H1': H1,
'H2': H2,
"downsample_rate": downsample_rate,
"input_shape": input_shape,
"label_shape": label_shape,
"res": res
}
param["preprocess"]["geometry_param"] = geometry_param
param["postprocess"]["geometry_param"] = geometry_param
param["model"]["args"]["geometry_param"] = geometry_param
return param
def save_yaml(data, save_name):
"""
Save the dictionary into a yaml file.
Parameters
----------
data : dict
The dictionary contains all data.
save_name : string
Full path of the output yaml file.
"""
with open(save_name, 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
| 26.929167 | 79 | 0.59183 | import re
import yaml
import os
import numpy as np
def load_yaml(file, opt=None):
if opt and opt.model_dir:
file = os.path.join(opt.model_dir, 'config.yaml')
stream = open(file, 'r')
loader = yaml.Loader
loader.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(u'''^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
param = yaml.load(stream, Loader=loader)
if "yaml_parser" in param:
param = eval(param["yaml_parser"])(param)
return param
def load_voxel_params(param):
anchor_args = param['postprocess']['anchor_args']
cav_lidar_range = anchor_args['cav_lidar_range']
voxel_size = param['preprocess']['args']['voxel_size']
vw = voxel_size[0]
vh = voxel_size[1]
vd = voxel_size[2]
anchor_args['vw'] = vw
anchor_args['vh'] = vh
anchor_args['vd'] = vd
anchor_args['W'] = int((cav_lidar_range[3] - cav_lidar_range[0]) / vw)
anchor_args['H'] = int((cav_lidar_range[4] - cav_lidar_range[1]) / vh)
anchor_args['D'] = int((cav_lidar_range[5] - cav_lidar_range[2]) / vd)
param['postprocess'].update({'anchor_args': anchor_args})
if 'model' in param:
param['model']['args']['W'] = anchor_args['W']
param['model']['args']['H'] = anchor_args['H']
param['model']['args']['D'] = anchor_args['D']
return param
def load_point_pillar_params(param):
cav_lidar_range = param['preprocess']['cav_lidar_range']
voxel_size = param['preprocess']['args']['voxel_size']
grid_size = (np.array(cav_lidar_range[3:6]) - np.array(
cav_lidar_range[0:3])) / \
np.array(voxel_size)
grid_size = np.round(grid_size).astype(np.int64)
param['model']['args']['point_pillar_scatter']['grid_size'] = grid_size
anchor_args = param['postprocess']['anchor_args']
vw = voxel_size[0]
vh = voxel_size[1]
vd = voxel_size[2]
anchor_args['vw'] = vw
anchor_args['vh'] = vh
anchor_args['vd'] = vd
anchor_args['W'] = int((cav_lidar_range[3] - cav_lidar_range[0]) / vw)
anchor_args['H'] = int((cav_lidar_range[4] - cav_lidar_range[1]) / vh)
anchor_args['D'] = int((cav_lidar_range[5] - cav_lidar_range[2]) / vd)
param['postprocess'].update({'anchor_args': anchor_args})
return param
def load_second_params(param):
cav_lidar_range = param['preprocess']['cav_lidar_range']
voxel_size = param['preprocess']['args']['voxel_size']
grid_size = (np.array(cav_lidar_range[3:6]) - np.array(
cav_lidar_range[0:3])) / \
np.array(voxel_size)
grid_size = np.round(grid_size).astype(np.int64)
param['model']['args']['grid_size'] = grid_size
anchor_args = param['postprocess']['anchor_args']
vw = voxel_size[0]
vh = voxel_size[1]
vd = voxel_size[2]
anchor_args['vw'] = vw
anchor_args['vh'] = vh
anchor_args['vd'] = vd
anchor_args['W'] = int((cav_lidar_range[3] - cav_lidar_range[0]) / vw)
anchor_args['H'] = int((cav_lidar_range[4] - cav_lidar_range[1]) / vh)
anchor_args['D'] = int((cav_lidar_range[5] - cav_lidar_range[2]) / vd)
param['postprocess'].update({'anchor_args': anchor_args})
return param
def load_bev_params(param):
res = param["preprocess"]["args"]["res"]
L1, W1, H1, L2, W2, H2 = param["preprocess"]["cav_lidar_range"]
downsample_rate = param["preprocess"]["args"]["downsample_rate"]
def f(low, high, r):
return int((high - low) / r)
input_shape = (
int((f(L1, L2, res))),
int((f(W1, W2, res))),
int((f(H1, H2, res)) + 1)
)
label_shape = (
int(input_shape[0] / downsample_rate),
int(input_shape[1] / downsample_rate),
7
)
geometry_param = {
'L1': L1,
'L2': L2,
'W1': W1,
'W2': W2,
'H1': H1,
'H2': H2,
"downsample_rate": downsample_rate,
"input_shape": input_shape,
"label_shape": label_shape,
"res": res
}
param["preprocess"]["geometry_param"] = geometry_param
param["postprocess"]["geometry_param"] = geometry_param
param["model"]["args"]["geometry_param"] = geometry_param
return param
def save_yaml(data, save_name):
with open(save_name, 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
| true | true |
1c33b6a7978fdbc0bd67a4184ebe877b4f9281f3 | 6,047 | py | Python | mut/flow.py | RPGroup-PBoC/mwc_mutants | 35581602c35793fc8ec42c8aff37b8305c5e54e1 | [
"MIT"
] | 3 | 2020-11-11T21:33:26.000Z | 2021-07-14T21:22:43.000Z | mut/flow.py | RPGroup-PBoC/mwc_mutants | 35581602c35793fc8ec42c8aff37b8305c5e54e1 | [
"MIT"
] | null | null | null | mut/flow.py | RPGroup-PBoC/mwc_mutants | 35581602c35793fc8ec42c8aff37b8305c5e54e1 | [
"MIT"
] | 1 | 2021-07-14T21:22:45.000Z | 2021-07-14T21:22:45.000Z | import numpy as np
import fcsparser
import pandas as pd
from ._fit_bivariate_normal_AstroML import fit_bivariate_normal
import scipy.stats
# #######################
# Automated Gating
# #######################
def fit_2D_gaussian(df, x_val='FSC-H', y_val='SSC-H', log=False):
'''
This function hacks astroML fit_bivariate_normal to return the mean
and covariance matrix when fitting a 2D gaussian fuction to the data
contained in the x_val and y_val columns of the DataFrame df.
Parameters
----------
df : DataFrame.
dataframe containing the data from which to fit the distribution
x_val, y_val : str.
name of the dataframe columns to be used in the function
log : bool.
indicate if the log of the data should be use for the fit or not
Returns
-------
mu : tuple.
(x, y) location of the best-fit bivariate normal
cov : 2 x 2 array
covariance matrix.
cov[0, 0] = variance of the x_val column
cov[1, 1] = variance of the y_val column
cov[0, 1] = cov[1, 0] = covariance of the data
'''
if log:
x = np.log10(df[x_val])
y = np.log10(df[y_val])
else:
x = df[x_val]
y = df[y_val]
# Fit the 2D Gaussian distribution using atroML function
mu, sigma_1, sigma_2, alpha = fit_bivariate_normal(x, y, robust=True)
# compute covariance matrix from the standar deviations and the angle
# that the fit_bivariate_normal function returns
sigma_xx = ((sigma_1 * np.cos(alpha)) ** 2 +
(sigma_2 * np.sin(alpha)) ** 2)
sigma_yy = ((sigma_1 * np.sin(alpha)) ** 2 +
(sigma_2 * np.cos(alpha)) ** 2)
sigma_xy = (sigma_1 ** 2 - sigma_2 ** 2) * np.sin(alpha) * np.cos(alpha)
# put elements of the covariance matrix into an actual matrix
cov = np.array([[sigma_xx, sigma_xy], [sigma_xy, sigma_yy]])
return mu, cov
# #################
def gauss_interval(df, mu, cov, x_val='FSC-H', y_val='SSC-H', log=False):
'''
Computes the of the statistic
(x - µx)'Σ(x - µx)
for each of the elements in df columns x_val and y_val.
Parameters
----------
df : DataFrame.
dataframe containing the data from which to fit the distribution
mu : array-like.
(x, y) location of bivariate normal
cov : 2 x 2 array
covariance matrix
x_val, y_val : str.
name of the dataframe columns to be used in the function
log : bool.
indicate if the log of the data should be use for the fit or not.
Returns
-------
statistic_gauss : array-like.
array containing the result of the linear algebra operation:
(x - µx)'sum(x - µx)
'''
# Determine that the covariance matrix is not singular
det = np.linalg.det(cov)
if det == 0:
raise NameError("The covariance matrix can't be singular")
# Compute the vector x defined as [[x - mu_x], [y - mu_y]]
if log is True:
x_vect = np.log10(np.array(df[[x_val, y_val]]))
else:
x_vect = np.array(df[[x_val, y_val]])
x_vect[:, 0] = x_vect[:, 0] - mu[0]
x_vect[:, 1] = x_vect[:, 1] - mu[1]
# compute the inverse of the covariance matrix
inv_sigma = np.linalg.inv(cov)
# compute the operation
interval_array = np.zeros(len(df))
for i, x in enumerate(x_vect):
interval_array[i] = np.dot(np.dot(x, inv_sigma), x.T)
return interval_array
def gaussian_gate(df, alpha, x_val='FSC-A', y_val='SSC-A', log=True,
verbose=False):
'''
Function that applies an "unsupervised bivariate Gaussian gate" to the data
over the channels x_val and y_val.
Parameters
----------
df : DataFrame.
dataframe containing the data from which to fit the distribution
alpha : float. [0, 1]
fraction of data aimed to keep. Used to compute the chi^2 quantile
function
x_val, y_val : str.
name of the dataframe columns to be used in the function
log : bool.
indicate if the log of the data should be use for the fit or not
verbose : bool.
indicate if the percentage of data kept should be print
Returns
-------
df_thresh : DataFrame
Pandas data frame to which the automatic gate was applied.
'''
# Perform sanity checks.
if alpha < 0 or alpha > 1:
return RuntimeError("`alpha` must be a float between 0 and 1.")
data = df[[x_val, y_val]]
# Fit the bivariate Gaussian distribution
mu, cov = fit_2D_gaussian(data, log=log, x_val=x_val, y_val=y_val)
# Compute the statistic for each of the pair of log scattering data
interval_array = gauss_interval(data, mu, cov, log=log,
x_val=x_val, y_val=y_val)
# Find which data points fall inside the interval
idx = interval_array <= scipy.stats.chi2.ppf(alpha, 2)
# print the percentage of data kept
if verbose:
print('''
with parameter alpha={0:0.2f}, percentage of data kept = {1:0.2f}
'''.format(alpha, np.sum(idx) / len(df)))
return df[idx]
# #######################
# File Parsing Utilities
# #######################
def fcs_to_csv(path, file_name, save_metadata=True):
R"""
Reads in a Flow Cytometry Standard (FCS) file and exports all content
directly to an easily parseable csv fie.
Parameters
----------
path : str
Path to .fcs file
file_name : str
Path to save file to .csv
save_metadata : bool
If True, a metadata file will also be saved. It will have the name of
`path` with `_metadata.csv`
"""
# Ensure provided file is actually .fcs
if path.split('.')[-1] is not '.fcs':
raise RuntimeError("`path` is not an FCS file.")
meta, data = fcsparser.parse(path)
data.to_csv(file_name, index=False)
if save_metadata:
meta_df = pd.DataFrame(meta)
meta_name = '{0}_metadata.csv'.format(path[:-4])
meta_df.to_csv(meta_name, index=False)
| 31.494792 | 79 | 0.613031 | import numpy as np
import fcsparser
import pandas as pd
from ._fit_bivariate_normal_AstroML import fit_bivariate_normal
import scipy.stats
s(alpha)
cov = np.array([[sigma_xx, sigma_xy], [sigma_xy, sigma_yy]])
return mu, cov
e covariance matrix can't be singular")
# Compute the vector x defined as [[x - mu_x], [y - mu_y]]
if log is True:
x_vect = np.log10(np.array(df[[x_val, y_val]]))
else:
x_vect = np.array(df[[x_val, y_val]])
x_vect[:, 0] = x_vect[:, 0] - mu[0]
x_vect[:, 1] = x_vect[:, 1] - mu[1]
# compute the inverse of the covariance matrix
inv_sigma = np.linalg.inv(cov)
# compute the operation
interval_array = np.zeros(len(df))
for i, x in enumerate(x_vect):
interval_array[i] = np.dot(np.dot(x, inv_sigma), x.T)
return interval_array
def gaussian_gate(df, alpha, x_val='FSC-A', y_val='SSC-A', log=True,
verbose=False):
# Perform sanity checks.
if alpha < 0 or alpha > 1:
return RuntimeError("`alpha` must be a float between 0 and 1.")
data = df[[x_val, y_val]]
# Fit the bivariate Gaussian distribution
mu, cov = fit_2D_gaussian(data, log=log, x_val=x_val, y_val=y_val)
# Compute the statistic for each of the pair of log scattering data
interval_array = gauss_interval(data, mu, cov, log=log,
x_val=x_val, y_val=y_val)
# Find which data points fall inside the interval
idx = interval_array <= scipy.stats.chi2.ppf(alpha, 2)
# print the percentage of data kept
if verbose:
print('''
with parameter alpha={0:0.2f}, percentage of data kept = {1:0.2f}
'''.format(alpha, np.sum(idx) / len(df)))
return df[idx]
# #######################
# File Parsing Utilities
# #######################
def fcs_to_csv(path, file_name, save_metadata=True):
# Ensure provided file is actually .fcs
if path.split('.')[-1] is not '.fcs':
raise RuntimeError("`path` is not an FCS file.")
meta, data = fcsparser.parse(path)
data.to_csv(file_name, index=False)
if save_metadata:
meta_df = pd.DataFrame(meta)
meta_name = '{0}_metadata.csv'.format(path[:-4])
meta_df.to_csv(meta_name, index=False)
| true | true |
1c33b6f8041b4f8c936f93105986606a5c958769 | 2,434 | py | Python | tests/linux_benchmarks/resnet_benchmark_test.py | kczauz/PerfKitBenchmarker | 66e148a35b54f67f008c7d6e9809d796179a3380 | [
"Apache-2.0"
] | null | null | null | tests/linux_benchmarks/resnet_benchmark_test.py | kczauz/PerfKitBenchmarker | 66e148a35b54f67f008c7d6e9809d796179a3380 | [
"Apache-2.0"
] | null | null | null | tests/linux_benchmarks/resnet_benchmark_test.py | kczauz/PerfKitBenchmarker | 66e148a35b54f67f008c7d6e9809d796179a3380 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for resnet_benchmark."""
import os
import unittest
import mock
from perfkitbenchmarker import test_util
from perfkitbenchmarker.linux_benchmarks import mnist_benchmark
from perfkitbenchmarker.linux_benchmarks import resnet_benchmark
from perfkitbenchmarker.sample import Sample
class ResNetBenchmarkTestCase(unittest.TestCase, test_util.SamplesTestMixin):
def setUp(self):
path = os.path.join(os.path.dirname(__file__), '..', 'data',
'resnet_output.txt')
with open(path) as fp:
self.contents = fp.read()
self.metadata_input = {'num_examples_per_epoch': 1251.1}
self.metadata_output = {'epoch': 4.000479577971386, 'elapsed_seconds': 0,
'num_examples_per_epoch': 1251.1, 'step': 5005}
@mock.patch('time.time', mock.MagicMock(return_value=0))
def testTrainResults(self):
samples = mnist_benchmark.MakeSamplesFromTrainOutput(
self.metadata_input, self.contents, 0)
golden = [
Sample('Loss', 3.6859958, '', self.metadata_output),
Sample('Global Steps Per Second', 3.6699466666666667,
'global_steps/sec', self.metadata_output),
Sample('Examples Per Second', 3758.023333333333,
'examples/sec', self.metadata_output)
]
self.assertEqual(samples, golden)
@mock.patch('time.time', mock.MagicMock(return_value=0))
def testEvalResults(self):
samples = resnet_benchmark.MakeSamplesFromEvalOutput(
self.metadata_input, self.contents, 0)
golden = [
Sample('Eval Loss', 3.86324, '', self.metadata_output),
Sample('Top 1 Accuracy', 32.751465, '%', self.metadata_output),
Sample('Top 5 Accuracy', 58.825684, '%', self.metadata_output)
]
self.assertEqual(samples, golden)
if __name__ == '__main__':
unittest.main()
| 38.634921 | 77 | 0.709942 |
import os
import unittest
import mock
from perfkitbenchmarker import test_util
from perfkitbenchmarker.linux_benchmarks import mnist_benchmark
from perfkitbenchmarker.linux_benchmarks import resnet_benchmark
from perfkitbenchmarker.sample import Sample
class ResNetBenchmarkTestCase(unittest.TestCase, test_util.SamplesTestMixin):
def setUp(self):
path = os.path.join(os.path.dirname(__file__), '..', 'data',
'resnet_output.txt')
with open(path) as fp:
self.contents = fp.read()
self.metadata_input = {'num_examples_per_epoch': 1251.1}
self.metadata_output = {'epoch': 4.000479577971386, 'elapsed_seconds': 0,
'num_examples_per_epoch': 1251.1, 'step': 5005}
@mock.patch('time.time', mock.MagicMock(return_value=0))
def testTrainResults(self):
samples = mnist_benchmark.MakeSamplesFromTrainOutput(
self.metadata_input, self.contents, 0)
golden = [
Sample('Loss', 3.6859958, '', self.metadata_output),
Sample('Global Steps Per Second', 3.6699466666666667,
'global_steps/sec', self.metadata_output),
Sample('Examples Per Second', 3758.023333333333,
'examples/sec', self.metadata_output)
]
self.assertEqual(samples, golden)
@mock.patch('time.time', mock.MagicMock(return_value=0))
def testEvalResults(self):
samples = resnet_benchmark.MakeSamplesFromEvalOutput(
self.metadata_input, self.contents, 0)
golden = [
Sample('Eval Loss', 3.86324, '', self.metadata_output),
Sample('Top 1 Accuracy', 32.751465, '%', self.metadata_output),
Sample('Top 5 Accuracy', 58.825684, '%', self.metadata_output)
]
self.assertEqual(samples, golden)
if __name__ == '__main__':
unittest.main()
| true | true |
1c33b801400323fb0a007fa682d7ea3b58a5b5c7 | 2,244 | py | Python | djangocms_baseplugins/download/migrations/0002_downloadsection.py | benzkji/djangocms-baseplugins | 7f041a030ed93dcdec70e4ca777b841846b8f2f2 | [
"MIT"
] | 2 | 2019-04-14T01:31:22.000Z | 2020-03-05T13:06:57.000Z | djangocms_baseplugins/download/migrations/0002_downloadsection.py | benzkji/djangocms-baseplugins | 7f041a030ed93dcdec70e4ca777b841846b8f2f2 | [
"MIT"
] | 32 | 2017-04-04T09:28:06.000Z | 2021-08-18T16:23:02.000Z | djangocms_baseplugins/download/migrations/0002_downloadsection.py | bnzk/djangocms-baseplugins | 7f041a030ed93dcdec70e4ca777b841846b8f2f2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-08-05 16:02
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0016_auto_20160608_1535'),
('download', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DownloadSection',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True, primary_key=True,
related_name='download_downloadsection',
serialize=False, to='cms.CMSPlugin')),
('title',
models.CharField(blank=True, default='', max_length=256, verbose_name='Title')),
('published', models.BooleanField(default=True, verbose_name='Published?')),
('published_from_date', models.DateTimeField(blank=True, default=None, null=True,
verbose_name='Published from')),
('published_until_date', models.DateTimeField(blank=True, default=None, null=True,
verbose_name='Published until')),
('in_menu', models.BooleanField(default=False, verbose_name='In Menu?')),
('layout',
models.CharField(blank=True, default='', max_length=64, verbose_name='Layout')),
('background', models.CharField(blank=True, default='', max_length=64,
verbose_name='Background')),
('color',
models.CharField(blank=True, default='', max_length=64, verbose_name='Color')),
('anchor', models.SlugField(blank=True, default='', verbose_name='Anchor')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| 48.782609 | 99 | 0.505348 |
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0016_auto_20160608_1535'),
('download', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DownloadSection',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True, primary_key=True,
related_name='download_downloadsection',
serialize=False, to='cms.CMSPlugin')),
('title',
models.CharField(blank=True, default='', max_length=256, verbose_name='Title')),
('published', models.BooleanField(default=True, verbose_name='Published?')),
('published_from_date', models.DateTimeField(blank=True, default=None, null=True,
verbose_name='Published from')),
('published_until_date', models.DateTimeField(blank=True, default=None, null=True,
verbose_name='Published until')),
('in_menu', models.BooleanField(default=False, verbose_name='In Menu?')),
('layout',
models.CharField(blank=True, default='', max_length=64, verbose_name='Layout')),
('background', models.CharField(blank=True, default='', max_length=64,
verbose_name='Background')),
('color',
models.CharField(blank=True, default='', max_length=64, verbose_name='Color')),
('anchor', models.SlugField(blank=True, default='', verbose_name='Anchor')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| true | true |
1c33b809346fc82b80b66e12604955377986dc88 | 659 | py | Python | Chase/code/fixSpeed.py | haghish/Chase | 8045bce3739cf4cfec63b8fd3387cb1b43904fc3 | [
"MIT"
] | 2 | 2020-09-01T13:09:10.000Z | 2020-12-17T16:36:42.000Z | Chase/code/fixSpeed.py | haghish/Chase | 8045bce3739cf4cfec63b8fd3387cb1b43904fc3 | [
"MIT"
] | null | null | null | Chase/code/fixSpeed.py | haghish/Chase | 8045bce3739cf4cfec63b8fd3387cb1b43904fc3 | [
"MIT"
] | null | null | null | # Written by HAGHISH UG 2016
# ALL RIGHTS RESERVED
import math
def fixSpeed(speed, dx, dy):
# calculate the distance
squaredDistance = ((dy)**2) + ((dx)**2)
distance = math.sqrt(squaredDistance)
# get the ratio between distance and speed
ratio = distance/speed
# get xHat and yHat (make sure you don't divide by 0)
if ratio != 0:
dxHat = math.sqrt(dx**2/ratio**2)
dyHat = math.sqrt(dy ** 2 / ratio ** 2)
else:
dxHat = 0
dyHat = 0
# check if movement is negative or positive
if dx < 0:
dxHat *= -1
if dy < 0:
dyHat *= -1
return (dxHat, dyHat)
| 21.258065 | 57 | 0.564492 |
import math
def fixSpeed(speed, dx, dy):
squaredDistance = ((dy)**2) + ((dx)**2)
distance = math.sqrt(squaredDistance)
ratio = distance/speed
if ratio != 0:
dxHat = math.sqrt(dx**2/ratio**2)
dyHat = math.sqrt(dy ** 2 / ratio ** 2)
else:
dxHat = 0
dyHat = 0
# check if movement is negative or positive
if dx < 0:
dxHat *= -1
if dy < 0:
dyHat *= -1
return (dxHat, dyHat)
| true | true |
1c33b832812df09e864b29741395873dbedd902b | 5,112 | py | Python | conans/client/packager.py | xaqq/conan | ab0870336550b7521da71595c6babf42d5690f7b | [
"MIT"
] | null | null | null | conans/client/packager.py | xaqq/conan | ab0870336550b7521da71595c6babf42d5690f7b | [
"MIT"
] | 1 | 2018-06-01T09:34:49.000Z | 2018-06-01T13:51:07.000Z | conans/client/packager.py | xaqq/conan | ab0870336550b7521da71595c6babf42d5690f7b | [
"MIT"
] | null | null | null | import os
import shutil
from conans.client import tools
from conans.client.file_copier import FileCopier, report_copied_files
from conans.client.output import ScopedOutput
from conans.errors import (ConanException, ConanExceptionInUserConanfileMethod,
conanfile_exception_formatter)
from conans.model.manifest import FileTreeManifest
from conans.paths import CONANINFO
from conans.util.files import mkdir, rmdir, save
from conans.util.log import logger
def export_pkg(conanfile, package_id, src_package_folder, package_folder, hook_manager,
conanfile_path, ref):
mkdir(package_folder)
conanfile.package_folder = src_package_folder
output = conanfile.output
output.info("Exporting to cache existing package from user folder")
output.info("Package folder %s" % package_folder)
hook_manager.execute("pre_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
copier = FileCopier(src_package_folder, package_folder)
copier("*", symlinks=True)
save(os.path.join(package_folder, CONANINFO), conanfile.info.dumps())
digest = FileTreeManifest.create(package_folder)
digest.save(package_folder)
_report_files_from_manifest(output, package_folder)
output.success("Package '%s' created" % package_id)
conanfile.package_folder = package_folder
hook_manager.execute("post_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
def create_package(conanfile, package_id, source_folder, build_folder, package_folder,
install_folder, hook_manager, conanfile_path, ref, local=False,
copy_info=False):
""" copies built artifacts, libs, headers, data, etc. from build_folder to
package folder
"""
mkdir(package_folder)
output = conanfile.output
# Make the copy of all the patterns
output.info("Generating the package")
output.info("Package folder %s" % package_folder)
try:
conanfile.package_folder = package_folder
conanfile.source_folder = source_folder
conanfile.install_folder = install_folder
conanfile.build_folder = build_folder
hook_manager.execute("pre_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
package_output = ScopedOutput("%s package()" % output.scope, output)
output.highlight("Calling package()")
if source_folder != build_folder:
conanfile.copy = FileCopier(source_folder, package_folder, build_folder)
with conanfile_exception_formatter(str(conanfile), "package"):
with tools.chdir(source_folder):
conanfile.package()
conanfile.copy = FileCopier(build_folder, package_folder)
with tools.chdir(build_folder):
with conanfile_exception_formatter(str(conanfile), "package"):
conanfile.package()
except Exception as e:
if not local:
os.chdir(build_folder)
try:
rmdir(package_folder)
except Exception as e_rm:
output.error("Unable to remove package folder %s\n%s" % (package_folder, str(e_rm)))
output.warn("**** Please delete it manually ****")
if isinstance(e, ConanExceptionInUserConanfileMethod):
raise
raise ConanException(e)
_create_aux_files(install_folder, package_folder, conanfile, copy_info)
_report_files_from_manifest(package_output, package_folder)
package_id = package_id or os.path.basename(package_folder)
output.success("Package '%s' created" % package_id)
hook_manager.execute("post_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
def _create_aux_files(install_folder, package_folder, conanfile, copy_info):
""" auxiliary method that creates CONANINFO and manifest in
the package_folder
"""
logger.debug("PACKAGE: Creating config files to %s" % package_folder)
if copy_info:
try:
shutil.copy(os.path.join(install_folder, CONANINFO), package_folder)
except IOError:
raise ConanException("%s does not exist inside of your %s folder. "
"Try to re-build it again to solve it."
% (CONANINFO, install_folder))
else:
save(os.path.join(package_folder, CONANINFO), conanfile.info.dumps())
# Create the digest for the package
digest = FileTreeManifest.create(package_folder)
digest.save(package_folder)
def _report_files_from_manifest(output, package_folder):
digest = FileTreeManifest.load(package_folder)
copied_files = list(digest.files())
copied_files.remove(CONANINFO)
if not copied_files:
output.warn("No files in this package!")
return
report_copied_files(copied_files, output, message_suffix="Packaged")
| 40.896 | 100 | 0.693075 | import os
import shutil
from conans.client import tools
from conans.client.file_copier import FileCopier, report_copied_files
from conans.client.output import ScopedOutput
from conans.errors import (ConanException, ConanExceptionInUserConanfileMethod,
conanfile_exception_formatter)
from conans.model.manifest import FileTreeManifest
from conans.paths import CONANINFO
from conans.util.files import mkdir, rmdir, save
from conans.util.log import logger
def export_pkg(conanfile, package_id, src_package_folder, package_folder, hook_manager,
conanfile_path, ref):
mkdir(package_folder)
conanfile.package_folder = src_package_folder
output = conanfile.output
output.info("Exporting to cache existing package from user folder")
output.info("Package folder %s" % package_folder)
hook_manager.execute("pre_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
copier = FileCopier(src_package_folder, package_folder)
copier("*", symlinks=True)
save(os.path.join(package_folder, CONANINFO), conanfile.info.dumps())
digest = FileTreeManifest.create(package_folder)
digest.save(package_folder)
_report_files_from_manifest(output, package_folder)
output.success("Package '%s' created" % package_id)
conanfile.package_folder = package_folder
hook_manager.execute("post_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
def create_package(conanfile, package_id, source_folder, build_folder, package_folder,
install_folder, hook_manager, conanfile_path, ref, local=False,
copy_info=False):
mkdir(package_folder)
output = conanfile.output
output.info("Generating the package")
output.info("Package folder %s" % package_folder)
try:
conanfile.package_folder = package_folder
conanfile.source_folder = source_folder
conanfile.install_folder = install_folder
conanfile.build_folder = build_folder
hook_manager.execute("pre_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
package_output = ScopedOutput("%s package()" % output.scope, output)
output.highlight("Calling package()")
if source_folder != build_folder:
conanfile.copy = FileCopier(source_folder, package_folder, build_folder)
with conanfile_exception_formatter(str(conanfile), "package"):
with tools.chdir(source_folder):
conanfile.package()
conanfile.copy = FileCopier(build_folder, package_folder)
with tools.chdir(build_folder):
with conanfile_exception_formatter(str(conanfile), "package"):
conanfile.package()
except Exception as e:
if not local:
os.chdir(build_folder)
try:
rmdir(package_folder)
except Exception as e_rm:
output.error("Unable to remove package folder %s\n%s" % (package_folder, str(e_rm)))
output.warn("**** Please delete it manually ****")
if isinstance(e, ConanExceptionInUserConanfileMethod):
raise
raise ConanException(e)
_create_aux_files(install_folder, package_folder, conanfile, copy_info)
_report_files_from_manifest(package_output, package_folder)
package_id = package_id or os.path.basename(package_folder)
output.success("Package '%s' created" % package_id)
hook_manager.execute("post_package", conanfile=conanfile, conanfile_path=conanfile_path,
reference=ref, package_id=package_id)
def _create_aux_files(install_folder, package_folder, conanfile, copy_info):
logger.debug("PACKAGE: Creating config files to %s" % package_folder)
if copy_info:
try:
shutil.copy(os.path.join(install_folder, CONANINFO), package_folder)
except IOError:
raise ConanException("%s does not exist inside of your %s folder. "
"Try to re-build it again to solve it."
% (CONANINFO, install_folder))
else:
save(os.path.join(package_folder, CONANINFO), conanfile.info.dumps())
digest = FileTreeManifest.create(package_folder)
digest.save(package_folder)
def _report_files_from_manifest(output, package_folder):
digest = FileTreeManifest.load(package_folder)
copied_files = list(digest.files())
copied_files.remove(CONANINFO)
if not copied_files:
output.warn("No files in this package!")
return
report_copied_files(copied_files, output, message_suffix="Packaged")
| true | true |
1c33b861ff7df4bb714197bfefe52bd56f66e9fc | 8,355 | py | Python | bfutils/bfpp-interp.py | borisfaure/bfb | 1f019ab580b1e75eaa1eca3c3e87944da148607e | [
"WTFPL"
] | 1 | 2015-04-22T08:23:47.000Z | 2015-04-22T08:23:47.000Z | bfutils/bfpp-interp.py | borisfaure/bfb | 1f019ab580b1e75eaa1eca3c3e87944da148607e | [
"WTFPL"
] | null | null | null | bfutils/bfpp-interp.py | borisfaure/bfb | 1f019ab580b1e75eaa1eca3c3e87944da148607e | [
"WTFPL"
] | null | null | null | #!/usr/bin/env python
"""
A brainfuck++ interpertor. Based on pybrain4.
"""
import os
import sys
import optparse
import bfpreprocessor
import tty
import termios
import socket
class Interp():
def __init__(self, code):
self.cells = [0] * 30000
self.maxint = (2 ** 8) - 1
self.cellpointer = 0
self.codecursor = 0
self.socket = None
self.file = None
self.code = code
self.socketbuf = None
if code == '':
return None
def run(self):
while True:
i = self.code[self.codecursor]
if i == '+':
if self.cells[self.cellpointer] < self.maxint:
self.cells[self.cellpointer] += 1
else:
self.cells[self.cellpointer] = 0
elif i == '-':
if self.cells[self.cellpointer] == 0:
self.cells[self.cellpointer] = self.maxint
else:
self.cells[self.cellpointer] -= 1
elif i == '.':
sys.stdout.write(chr(self.cells[self.cellpointer]))
elif i == ',':
self.cells[self.cellpointer] = ord(self.getchar())
elif i == '<':
self.cellpointer -= 1
elif i == '>':
self.cellpointer += 1
elif i == '[':
if self.cells[self.cellpointer] == 0:
self.matchingbracket()
elif i == ']':
if self.cells[self.cellpointer] != 0:
self.matchingbracket()
elif i == '%':
if self.socket:
self.socket.close()
self.socket = None
else:
self.create_socket()
elif i == '^':
if self.socket:
self.socket.send(chr(self.cells[self.cellpointer]))
elif i == '!':
self.readFromSocket()
elif i == '#':
if self.file is not None:
self.file.close()
self.file = None
else:
fname = ""
curcellpointer = self.cellpointer
while self.cells[curcellpointer] != 0:
fname = fname + chr(self.cells[curcellpointer])
curcellpointer += 1
try:
self.file = open(fname)
self.cells[self.cellpointer] = 0
except IOError as msg:
self.file = None
self.cells[self.cellpointer] = self.maxint
sys.stderr.write("opening file '%s' failed: %s"
%(fname, msg))
elif i == ';':
if self.file is not None:
try:
self.file.write(chr(self.cells[self.cellpointer]))
except IOError as msg:
self.cells[self.cellpointer] = 0
sys.stderr.write("error writing to file: %s" %(msg,))
elif i == ':':
if self.file is not None:
try:
s = self.file.read(1)
if len(s) == 0:
sys.stderr.write("error reading from file")
self.cells[self.cellpointer] = 0
else:
self.cells[self.cellpointer] = ord(s[0])
except IOError as msg:
self.cells[self.cellpointer] = 0
sys.stderr.write("error reading from file: %s" %(msg,))
elif i == 'D':
self.debug()
if self.codecursor == len(self.code) - 1:
sys.stdout.write('\n')
break
else:
self.codecursor += 1
def readFromSocket(self):
if self.socketbuf:
if self.socketbufpos < len(self.socketbuf):
self.cells[self.cellpointer] = \
ord(self.socketbuf[self.socketbufpos])
self.socketbufpos += 1
return
else:
self.socketbuf = None
if self.socket:
try:
self.socketbuf = self.socket.recv(4096)
self.cells[self.cellpointer] = ord(self.socketbuf[0])
self.socketbufpos = 1
except (socket.error, TypeError):
self.cells[self.cellpointer] = 0
return
else:
self.cells[self.cellpointer] = 0
def matchingbracket(self):
if self.code[self.codecursor] == '[':
opens = 0
for i in range(self.codecursor, len(self.code)):
if self.code[i] == '[':
opens += 1
elif self.code[i] == ']':
opens -= 1
if opens == 0:
self.codecursor = i
return
elif self.code[self.codecursor] == ']':
closeds = 0
for i in range(self.codecursor, -1, -1):
if self.code[i] == ']':
closeds += 1
elif self.code[i] == '[':
closeds -= 1
if closeds == 0:
self.codecursor = i
return
def getchar(self):
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch if ch != '\r' else '\n'
def create_socket(self):
addr = ""
curcellpointer = self.cellpointer
while self.cells[curcellpointer] != 0:
addr = addr + chr(self.cells[curcellpointer])
curcellpointer += 1
l = addr.split(':')
if len(l) < 2:
self.cells[self.cellpointer] = self.maxint
sys.stderr.write("parsing '%s' failed" % (addr,))
return
host = l[0]
port = l[1]
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if (len(l) > 2) and (l[2] == "ssl"):
import ssl
self.socket = ssl.wrap_socket(self.socket)
self.socket.connect((host, int(port)))
self.socket.setblocking(1)
self.cells[self.cellpointer] = 0
except socket.error as msg:
self.socket.close()
self.socket = None
self.cells[self.cellpointer] = self.maxint
sys.stderr.write("creating socket failed (%s,%s) : %s"
%(host, port, msg))
def debug(self):
print("Current position in the code: %d/%d" % (self.codecursor,
len(self.code)))
print("Next 5 instructions: ", end=' ')
for i in range(self.codecursor + 1, min(self.codecursor + 6, len(self.code))):
print(self.code[i], end=' ')
print('')
print("Current pointer is %d" % (self.cellpointer,))
print("Print cell range:")
rng = sys.stdin.readline()
(x, sep, y) = rng.partition('-')
if sep:
try:
x = int(x)
y = int(y)
except ValueError:
return
for i in range(x,y+1):
print("| %d" % (self.cells[i],), end=' ')
print('')
def main():
parser = optparse.OptionParser(usage="%prog [OPTIONS] FILE")
parser.add_option("-d", "--debug",
dest="debug", default=False, action="store_true",
help="run the python in interactive debug mode when 'D' is encountered")
(options, args) = parser.parse_args()
filename = None
if len(args) != 1:
parser.error("incorrect number of arguments")
if args[0] != '-':
filename = os.path.abspath(args[0])
code = bfpreprocessor.preprocess(filename, options.debug)
i = Interp(code)
if i:
i.run()
return 0
else:
return -1
if __name__ == '__main__':
sys.exit(main())
| 35.553191 | 94 | 0.452783 |
import os
import sys
import optparse
import bfpreprocessor
import tty
import termios
import socket
class Interp():
def __init__(self, code):
self.cells = [0] * 30000
self.maxint = (2 ** 8) - 1
self.cellpointer = 0
self.codecursor = 0
self.socket = None
self.file = None
self.code = code
self.socketbuf = None
if code == '':
return None
def run(self):
while True:
i = self.code[self.codecursor]
if i == '+':
if self.cells[self.cellpointer] < self.maxint:
self.cells[self.cellpointer] += 1
else:
self.cells[self.cellpointer] = 0
elif i == '-':
if self.cells[self.cellpointer] == 0:
self.cells[self.cellpointer] = self.maxint
else:
self.cells[self.cellpointer] -= 1
elif i == '.':
sys.stdout.write(chr(self.cells[self.cellpointer]))
elif i == ',':
self.cells[self.cellpointer] = ord(self.getchar())
elif i == '<':
self.cellpointer -= 1
elif i == '>':
self.cellpointer += 1
elif i == '[':
if self.cells[self.cellpointer] == 0:
self.matchingbracket()
elif i == ']':
if self.cells[self.cellpointer] != 0:
self.matchingbracket()
elif i == '%':
if self.socket:
self.socket.close()
self.socket = None
else:
self.create_socket()
elif i == '^':
if self.socket:
self.socket.send(chr(self.cells[self.cellpointer]))
elif i == '!':
self.readFromSocket()
elif i == '#':
if self.file is not None:
self.file.close()
self.file = None
else:
fname = ""
curcellpointer = self.cellpointer
while self.cells[curcellpointer] != 0:
fname = fname + chr(self.cells[curcellpointer])
curcellpointer += 1
try:
self.file = open(fname)
self.cells[self.cellpointer] = 0
except IOError as msg:
self.file = None
self.cells[self.cellpointer] = self.maxint
sys.stderr.write("opening file '%s' failed: %s"
%(fname, msg))
elif i == ';':
if self.file is not None:
try:
self.file.write(chr(self.cells[self.cellpointer]))
except IOError as msg:
self.cells[self.cellpointer] = 0
sys.stderr.write("error writing to file: %s" %(msg,))
elif i == ':':
if self.file is not None:
try:
s = self.file.read(1)
if len(s) == 0:
sys.stderr.write("error reading from file")
self.cells[self.cellpointer] = 0
else:
self.cells[self.cellpointer] = ord(s[0])
except IOError as msg:
self.cells[self.cellpointer] = 0
sys.stderr.write("error reading from file: %s" %(msg,))
elif i == 'D':
self.debug()
if self.codecursor == len(self.code) - 1:
sys.stdout.write('\n')
break
else:
self.codecursor += 1
def readFromSocket(self):
if self.socketbuf:
if self.socketbufpos < len(self.socketbuf):
self.cells[self.cellpointer] = \
ord(self.socketbuf[self.socketbufpos])
self.socketbufpos += 1
return
else:
self.socketbuf = None
if self.socket:
try:
self.socketbuf = self.socket.recv(4096)
self.cells[self.cellpointer] = ord(self.socketbuf[0])
self.socketbufpos = 1
except (socket.error, TypeError):
self.cells[self.cellpointer] = 0
return
else:
self.cells[self.cellpointer] = 0
def matchingbracket(self):
if self.code[self.codecursor] == '[':
opens = 0
for i in range(self.codecursor, len(self.code)):
if self.code[i] == '[':
opens += 1
elif self.code[i] == ']':
opens -= 1
if opens == 0:
self.codecursor = i
return
elif self.code[self.codecursor] == ']':
closeds = 0
for i in range(self.codecursor, -1, -1):
if self.code[i] == ']':
closeds += 1
elif self.code[i] == '[':
closeds -= 1
if closeds == 0:
self.codecursor = i
return
def getchar(self):
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch if ch != '\r' else '\n'
def create_socket(self):
addr = ""
curcellpointer = self.cellpointer
while self.cells[curcellpointer] != 0:
addr = addr + chr(self.cells[curcellpointer])
curcellpointer += 1
l = addr.split(':')
if len(l) < 2:
self.cells[self.cellpointer] = self.maxint
sys.stderr.write("parsing '%s' failed" % (addr,))
return
host = l[0]
port = l[1]
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if (len(l) > 2) and (l[2] == "ssl"):
import ssl
self.socket = ssl.wrap_socket(self.socket)
self.socket.connect((host, int(port)))
self.socket.setblocking(1)
self.cells[self.cellpointer] = 0
except socket.error as msg:
self.socket.close()
self.socket = None
self.cells[self.cellpointer] = self.maxint
sys.stderr.write("creating socket failed (%s,%s) : %s"
%(host, port, msg))
def debug(self):
print("Current position in the code: %d/%d" % (self.codecursor,
len(self.code)))
print("Next 5 instructions: ", end=' ')
for i in range(self.codecursor + 1, min(self.codecursor + 6, len(self.code))):
print(self.code[i], end=' ')
print('')
print("Current pointer is %d" % (self.cellpointer,))
print("Print cell range:")
rng = sys.stdin.readline()
(x, sep, y) = rng.partition('-')
if sep:
try:
x = int(x)
y = int(y)
except ValueError:
return
for i in range(x,y+1):
print("| %d" % (self.cells[i],), end=' ')
print('')
def main():
parser = optparse.OptionParser(usage="%prog [OPTIONS] FILE")
parser.add_option("-d", "--debug",
dest="debug", default=False, action="store_true",
help="run the python in interactive debug mode when 'D' is encountered")
(options, args) = parser.parse_args()
filename = None
if len(args) != 1:
parser.error("incorrect number of arguments")
if args[0] != '-':
filename = os.path.abspath(args[0])
code = bfpreprocessor.preprocess(filename, options.debug)
i = Interp(code)
if i:
i.run()
return 0
else:
return -1
if __name__ == '__main__':
sys.exit(main())
| true | true |
1c33b9cb76fb59d737a1b4f14ac11c663289ecdd | 10,541 | py | Python | web/SimpleHTTPServerWithUpload.py | laxa/scripts | 40bcf3b2090430ab0363d8326aede80a6a3318c1 | [
"MIT"
] | 1 | 2018-09-05T13:35:24.000Z | 2018-09-05T13:35:24.000Z | web/SimpleHTTPServerWithUpload.py | Laxa/scripts | 6eaeb4ac65a62fe098bff45eb9f421560d1a2984 | [
"MIT"
] | null | null | null | web/SimpleHTTPServerWithUpload.py | Laxa/scripts | 6eaeb4ac65a62fe098bff45eb9f421560d1a2984 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# script sources: https://gist.github.com/UniIsland/3346170
"""Simple HTTP Server With Upload.
This module builds on BaseHTTPServer by implementing the standard GET
and HEAD requests in a fairly straightforward manner.
"""
__version__ = "0.1"
__all__ = ["SimpleHTTPRequestHandler"]
__author__ = "bones7456"
__home_page__ = "http://li2z.cn/"
import os
import posixpath
import http.server
import urllib
import html
import shutil
import mimetypes
import re
import io
class SimpleHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET/HEAD/POST commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method. And can reveive file uploaded
by client.
The GET/HEAD/POST requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTPWithUpload/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def do_POST(self):
"""Serve a POST request."""
r, info = self.deal_post_data()
print(r, info, "by: ", self.client_address)
info = info.encode()
f = io.BytesIO()
f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write(b'<html>\n<title>Upload Result Page</title>\n')
f.write(b'<body>\n<h2>Upload Result Page</h2>\n')
f.write(b'<hr>\n')
if r:
f.write(b'<strong>Success:</strong>')
else:
f.write(b'<strong>Failed:</strong>')
f.write(info)
f.write(b'<br><a href=\"%s\">back</a>' % self.headers['referer'].encode())
f.write(b'<hr><small>Powerd By: bones7456, check new version at ')
f.write(b'<a href=\"http://li2z.cn/?s=SimpleHTTPServerWithUpload\">')
f.write(b'here</a>.</small></body>\n</html>\n')
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
if f:
self.copyfile(f, self.wfile)
f.close()
def deal_post_data(self):
boundary = self.headers['Content-Type'].split("=")[1].encode()
remainbytes = int(self.headers['content-length'])
line = self.rfile.readline()
remainbytes -= len(line)
if not boundary in line:
return (False, "Content NOT begin with boundary")
import pdb; pdb.set_trace()
line = self.rfile.readline()
remainbytes -= len(line)
fn = re.findall(r'Content-Disposition.*name="file"; filename="(.*)"', line.decode())
if not fn:
return (False, "Can't find out file name...")
path = self.translate_path(self.path)
fn = os.path.join(path, fn[0])
line = self.rfile.readline()
remainbytes -= len(line)
line = self.rfile.readline()
remainbytes -= len(line)
try:
out = open(fn, 'wb')
except IOError:
return (False, "Can't create file to write, do you have permission to write?")
preline = self.rfile.readline()
remainbytes -= len(preline)
while remainbytes > 0:
line = self.rfile.readline()
remainbytes -= len(line)
if boundary in line:
preline = preline[0:-1]
if preline.endswith(b'\r'):
preline = preline[0:-1]
out.write(preline)
out.close()
return (True, "File '%s' upload success!" % fn)
else:
out.write(preline)
preline = line
return (False, "Unexpect Ends of data.")
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = io.BytesIO()
displaypath = html.escape(urllib.parse.unquote(self.path))
f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write(b'<html>\n<title>Directory listing for %s</title>\n' % displaypath.encode())
f.write(b'<body>\n<h2>Directory listing for %s</h2>\n' % displaypath.encode())
f.write(b'<hr>\n')
f.write(b'<form ENCTYPE=\"multipart/form-data\" method=\"post\">')
f.write(b'<input name=\"file\" type=\"file\"/>')
f.write(b'<input type=\"submit\" value=\"upload\"/></form>\n')
f.write(b'<hr>\n<ul>\n')
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write(b'<li><a href="%s">%s</a>\n'
% (urllib.parse.quote(linkname).encode(), html.escape(displayname).encode()))
f.write(b'</ul>\n<hr>\n</body>\n</html>\n')
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.parse.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = http.server.HTTPServer):
http.server.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
| 35.611486 | 97 | 0.581064 |
__version__ = "0.1"
__all__ = ["SimpleHTTPRequestHandler"]
__author__ = "bones7456"
__home_page__ = "http://li2z.cn/"
import os
import posixpath
import http.server
import urllib
import html
import shutil
import mimetypes
import re
import io
class SimpleHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
server_version = "SimpleHTTPWithUpload/" + __version__
def do_GET(self):
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
f = self.send_head()
if f:
f.close()
def do_POST(self):
r, info = self.deal_post_data()
print(r, info, "by: ", self.client_address)
info = info.encode()
f = io.BytesIO()
f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write(b'<html>\n<title>Upload Result Page</title>\n')
f.write(b'<body>\n<h2>Upload Result Page</h2>\n')
f.write(b'<hr>\n')
if r:
f.write(b'<strong>Success:</strong>')
else:
f.write(b'<strong>Failed:</strong>')
f.write(info)
f.write(b'<br><a href=\"%s\">back</a>' % self.headers['referer'].encode())
f.write(b'<hr><small>Powerd By: bones7456, check new version at ')
f.write(b'<a href=\"http://li2z.cn/?s=SimpleHTTPServerWithUpload\">')
f.write(b'here</a>.</small></body>\n</html>\n')
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
if f:
self.copyfile(f, self.wfile)
f.close()
def deal_post_data(self):
boundary = self.headers['Content-Type'].split("=")[1].encode()
remainbytes = int(self.headers['content-length'])
line = self.rfile.readline()
remainbytes -= len(line)
if not boundary in line:
return (False, "Content NOT begin with boundary")
import pdb; pdb.set_trace()
line = self.rfile.readline()
remainbytes -= len(line)
fn = re.findall(r'Content-Disposition.*name="file"; filename="(.*)"', line.decode())
if not fn:
return (False, "Can't find out file name...")
path = self.translate_path(self.path)
fn = os.path.join(path, fn[0])
line = self.rfile.readline()
remainbytes -= len(line)
line = self.rfile.readline()
remainbytes -= len(line)
try:
out = open(fn, 'wb')
except IOError:
return (False, "Can't create file to write, do you have permission to write?")
preline = self.rfile.readline()
remainbytes -= len(preline)
while remainbytes > 0:
line = self.rfile.readline()
remainbytes -= len(line)
if boundary in line:
preline = preline[0:-1]
if preline.endswith(b'\r'):
preline = preline[0:-1]
out.write(preline)
out.close()
return (True, "File '%s' upload success!" % fn)
else:
out.write(preline)
preline = line
return (False, "Unexpect Ends of data.")
def send_head(self):
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = io.BytesIO()
displaypath = html.escape(urllib.parse.unquote(self.path))
f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write(b'<html>\n<title>Directory listing for %s</title>\n' % displaypath.encode())
f.write(b'<body>\n<h2>Directory listing for %s</h2>\n' % displaypath.encode())
f.write(b'<hr>\n')
f.write(b'<form ENCTYPE=\"multipart/form-data\" method=\"post\">')
f.write(b'<input name=\"file\" type=\"file\"/>')
f.write(b'<input type=\"submit\" value=\"upload\"/></form>\n')
f.write(b'<hr>\n<ul>\n')
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
f.write(b'<li><a href="%s">%s</a>\n'
% (urllib.parse.quote(linkname).encode(), html.escape(displayname).encode()))
f.write(b'</ul>\n<hr>\n</body>\n</html>\n')
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.parse.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init()
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream',
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = http.server.HTTPServer):
http.server.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
| true | true |
1c33ba948c776afd72bc9c5d8ccaf2566e5db1b2 | 791 | py | Python | api/celery_api/signal_handler.py | 240325184/KubeOperator | 777774050b236abf938a5a9ef505124c26e4916e | [
"Apache-2.0"
] | 3 | 2020-04-05T04:53:24.000Z | 2020-04-05T04:53:34.000Z | api/celery_api/signal_handler.py | 240325184/KubeOperator | 777774050b236abf938a5a9ef505124c26e4916e | [
"Apache-2.0"
] | 27 | 2021-05-05T02:51:26.000Z | 2022-01-04T21:30:21.000Z | api/celery_api/signal_handler.py | 240325184/KubeOperator | 777774050b236abf938a5a9ef505124c26e4916e | [
"Apache-2.0"
] | 1 | 2020-03-04T00:29:29.000Z | 2020-03-04T00:29:29.000Z | # -*- coding: utf-8 -*-
#
import logging
from celery.signals import after_setup_logger
from celery.utils.log import get_logger
from kombu.utils.encoding import safe_str
from .logger import CeleryTaskFileHandler
safe_str = lambda x: x
logger = get_logger(__file__)
@after_setup_logger.connect
def add_celery_redis_handler(sender=None, logger=None, loglevel=None, format=None, **kwargs):
if not logger:
return
handler = CeleryTaskFileHandler()
handler.setLevel(loglevel)
formatter = logging.Formatter(format)
handler.setFormatter(formatter)
logger.addHandler(handler)
# @task_failure.connect
# def on_task_failed(sender, task_id, **kwargs):
# CeleryTask.objects.filter(id=task_id).update(state=CeleryTask.STATE_FAILURE, date_finished=timezone.now())
| 27.275862 | 112 | 0.766119 |
import logging
from celery.signals import after_setup_logger
from celery.utils.log import get_logger
from kombu.utils.encoding import safe_str
from .logger import CeleryTaskFileHandler
safe_str = lambda x: x
logger = get_logger(__file__)
@after_setup_logger.connect
def add_celery_redis_handler(sender=None, logger=None, loglevel=None, format=None, **kwargs):
if not logger:
return
handler = CeleryTaskFileHandler()
handler.setLevel(loglevel)
formatter = logging.Formatter(format)
handler.setFormatter(formatter)
logger.addHandler(handler)
| true | true |
1c33babc1dbab32440463811c12abe576f496721 | 638 | py | Python | testing/rubik_testing/__init__.py | Borsos/rubik | af220a142b81a8f5b5011e4e072be9e3d130e827 | [
"Apache-2.0"
] | 1 | 2019-11-13T00:44:09.000Z | 2019-11-13T00:44:09.000Z | testing/rubik_testing/__init__.py | Borsos/rubik | af220a142b81a8f5b5011e4e072be9e3d130e827 | [
"Apache-2.0"
] | null | null | null | testing/rubik_testing/__init__.py | Borsos/rubik | af220a142b81a8f5b5011e4e072be9e3d130e827 | [
"Apache-2.0"
] | 1 | 2019-11-13T00:47:16.000Z | 2019-11-13T00:47:16.000Z | #!/usr/bin/env python3
#
# Copyright 2014 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
| 33.578947 | 74 | 0.757053 |
__author__ = "Simone Campagna"
| true | true |
1c33bb68b5b12b6c3eccc9179009a52e97d4c260 | 510 | py | Python | test/test_data.py | ikamensh/arc-py | 5b8d1d44e4602ff029dd77f65882423ee57bf5c1 | [
"MIT"
] | 3 | 2021-04-01T21:21:23.000Z | 2021-12-24T09:50:28.000Z | test/test_data.py | ikamensh/arc-py | 5b8d1d44e4602ff029dd77f65882423ee57bf5c1 | [
"MIT"
] | 1 | 2021-04-01T14:32:51.000Z | 2021-04-01T14:32:51.000Z | test/test_data.py | ikamensh/arc-py | 5b8d1d44e4602ff029dd77f65882423ee57bf5c1 | [
"MIT"
] | 1 | 2022-01-18T20:39:33.000Z | 2022-01-18T20:39:33.000Z | import os
import pytest
@pytest.fixture()
def no_cache():
from arc.data import cache_file
if os.path.isfile(cache_file):
os.remove(cache_file)
def test_eval_set(no_cache):
from arc import validation_problems, describe_task_group
assert len(validation_problems) == 400
describe_task_group(validation_problems)
def test_train_set(no_cache):
from arc import train_problems, describe_task_group
assert len(train_problems) == 400
describe_task_group(train_problems)
| 20.4 | 60 | 0.758824 | import os
import pytest
@pytest.fixture()
def no_cache():
from arc.data import cache_file
if os.path.isfile(cache_file):
os.remove(cache_file)
def test_eval_set(no_cache):
from arc import validation_problems, describe_task_group
assert len(validation_problems) == 400
describe_task_group(validation_problems)
def test_train_set(no_cache):
from arc import train_problems, describe_task_group
assert len(train_problems) == 400
describe_task_group(train_problems)
| true | true |
1c33bd0a99380be85fae7a96440f62e3c0394372 | 6,644 | py | Python | automancy/core/tactical_asserts.py | IAmTheBlurr/Automancy | 0c52916cd01dda6bd34ef8d048c37e478dfabbb5 | [
"MIT"
] | null | null | null | automancy/core/tactical_asserts.py | IAmTheBlurr/Automancy | 0c52916cd01dda6bd34ef8d048c37e478dfabbb5 | [
"MIT"
] | null | null | null | automancy/core/tactical_asserts.py | IAmTheBlurr/Automancy | 0c52916cd01dda6bd34ef8d048c37e478dfabbb5 | [
"MIT"
] | null | null | null | """ ./core/tactical_asserts.py """
from time import sleep
import chronomancy
import inspect
from automancy.core import Elemental
from selenium.common.exceptions import WebDriverException
class TacticalAsserts(object):
def __init__(self, sleep_time: float = 0.25, max_timeout: int = 10):
super().__init__()
self.max_timeout = max_timeout
self.sleep_time = sleep_time
self.sleep = sleep
@staticmethod
def __verify_is_elemental(element):
if not issubclass(element.__class__, Elemental):
raise TypeError(f'Input element must be a subclass of Elemental, found: {type(element)}')
def becomes_interactable(self, element: Elemental) -> Elemental:
self.__verify_is_elemental(element)
self.gains_existence(element)
self.gains_visibility(element)
self.gains_clickability(element)
return element
def becomes_true(self, element: Elemental) -> Elemental:
"""
Tactically asserts the `Elemental` passed in will become `True` within the time expected.
Args:
element (Elemental): an Automancy `Elemental` object able to be resolved to `True` or `False`
Returns:
Elemental: The same Elemental object which was passed in.
"""
calling_frame = inspect.stack()[1]
time_counted = 0
while time_counted < self.max_timeout:
try:
assert element is True
return element
except AssertionError:
self.sleep(self.sleep_time)
element = chronomancy.arcane_recall(calling_frame)
time_counted += self.sleep_time
raise AssertionError(f'Assertion Error: The element named {element.name} did not become True within {self.max_timeout} seconds')
def gains_clickability(self, element: Elemental) -> Elemental:
self.__verify_is_elemental(element)
time_counted = 0
while time_counted < self.max_timeout:
try:
assert element.clickable
return element
except AssertionError:
self.sleep(self.sleep_time)
time_counted += self.sleep_time
raise AssertionError(f'Assertion Error: The element named "{element.name}" did not gain clickability within the timeout limit ({self.max_timeout} seconds)')
def gains_existence(self, element: Elemental) -> Elemental:
self.__verify_is_elemental(element)
time_counted = 0
while time_counted < self.max_timeout:
try:
assert element.exists
return element
except AssertionError:
self.sleep(self.sleep_time)
time_counted += self.sleep_time
raise AssertionError(f'Assertion Error: The element named "{element.name}" did not come into existence within the timeout limit ({self.max_timeout} seconds)')
def gains_visibility(self, element: Elemental) -> Elemental:
self.__verify_is_elemental(element)
time_counted = 0
while time_counted < self.max_timeout:
try:
assert element.visible
return element
except AssertionError:
self.sleep(self.sleep_time)
time_counted += self.sleep_time
except WebDriverException:
# In some rare edge cases Selenium will raise this exception without a message.
# In all use cases this has been due to the element not existing even if it has
# already been detected to exist (through the element.exists property). This is
# a double check for existence a repeat of asserting that the element is visible.
self.gains_existence(element)
assert element.visible
return element
raise AssertionError(f'Assertion Error: The element named "{element.name}" did not gain visibility within the timeout limit ({self.max_timeout} seconds)')
def text_becomes_equal(self, element: Elemental, expected_text: str) -> Elemental:
"""
Tactically asserts the value of the `.text` property for the passed in Elemental will become equal to the expected text.
Args:
element (Elemental): the `Elemental` which `.text` will be inspected for
expected_text (str): the string you expect to match element.text
Returns:
Elemental: The same Elemental object which was passed in.
"""
self.__verify_is_elemental(element)
time_counted = 0
while time_counted < self.max_timeout:
try:
assert element.text == expected_text
return element
except AssertionError:
self.sleep(self.sleep_time)
time_counted += self.sleep_time
raise AssertionError(f'Assertion Error: Target elements\' text did not become equal to the expected text within {self.max_timeout} seconds, {element} != {expected_text}')
def text_becomes_found_in(self, element: Elemental, expected_text: str) -> Elemental:
"""
Tactically asserts the expected text becomes found in the value of the `.text` property for the passed in Elemental.
Args:
element (Elemental): the `Elemental` which `.text` will be inspected for
expected_text (str): the string you expect to match element.text
Returns:
Elemental: The same Elemental object which was passed in.
"""
self.__verify_is_elemental(element)
time_counted = 0
while time_counted < self.max_timeout:
try:
assert expected_text in element.text
return element
except AssertionError:
sleep(self.sleep_time)
time_counted += self.sleep_time
raise AssertionError(f'Assertion Error: The expected text was not found within the text of the element named ({element.name}) text within {self.max_timeout} seconds, {expected_text} not in {element.text}')
def video_begins_playing(self, element):
self.__verify_is_elemental(element)
time_counted = 0
while time_counted < self.max_timeout:
try:
assert element.is_playing()
return element
except AssertionError:
sleep(self.sleep_time)
time_counted += self.sleep_time
raise AssertionError(f'Assertion Error: Video did not begin playing within {self.max_timeout} seconds')
| 39.313609 | 213 | 0.638471 | from time import sleep
import chronomancy
import inspect
from automancy.core import Elemental
from selenium.common.exceptions import WebDriverException
class TacticalAsserts(object):
def __init__(self, sleep_time: float = 0.25, max_timeout: int = 10):
super().__init__()
self.max_timeout = max_timeout
self.sleep_time = sleep_time
self.sleep = sleep
@staticmethod
def __verify_is_elemental(element):
if not issubclass(element.__class__, Elemental):
raise TypeError(f'Input element must be a subclass of Elemental, found: {type(element)}')
def becomes_interactable(self, element: Elemental) -> Elemental:
self.__verify_is_elemental(element)
self.gains_existence(element)
self.gains_visibility(element)
self.gains_clickability(element)
return element
def becomes_true(self, element: Elemental) -> Elemental:
calling_frame = inspect.stack()[1]
time_counted = 0
while time_counted < self.max_timeout:
try:
assert element is True
return element
except AssertionError:
self.sleep(self.sleep_time)
element = chronomancy.arcane_recall(calling_frame)
time_counted += self.sleep_time
raise AssertionError(f'Assertion Error: The element named {element.name} did not become True within {self.max_timeout} seconds')
def gains_clickability(self, element: Elemental) -> Elemental:
self.__verify_is_elemental(element)
time_counted = 0
while time_counted < self.max_timeout:
try:
assert element.clickable
return element
except AssertionError:
self.sleep(self.sleep_time)
time_counted += self.sleep_time
raise AssertionError(f'Assertion Error: The element named "{element.name}" did not gain clickability within the timeout limit ({self.max_timeout} seconds)')
def gains_existence(self, element: Elemental) -> Elemental:
self.__verify_is_elemental(element)
time_counted = 0
while time_counted < self.max_timeout:
try:
assert element.exists
return element
except AssertionError:
self.sleep(self.sleep_time)
time_counted += self.sleep_time
raise AssertionError(f'Assertion Error: The element named "{element.name}" did not come into existence within the timeout limit ({self.max_timeout} seconds)')
def gains_visibility(self, element: Elemental) -> Elemental:
self.__verify_is_elemental(element)
time_counted = 0
while time_counted < self.max_timeout:
try:
assert element.visible
return element
except AssertionError:
self.sleep(self.sleep_time)
time_counted += self.sleep_time
except WebDriverException:
self.gains_existence(element)
assert element.visible
return element
raise AssertionError(f'Assertion Error: The element named "{element.name}" did not gain visibility within the timeout limit ({self.max_timeout} seconds)')
def text_becomes_equal(self, element: Elemental, expected_text: str) -> Elemental:
self.__verify_is_elemental(element)
time_counted = 0
while time_counted < self.max_timeout:
try:
assert element.text == expected_text
return element
except AssertionError:
self.sleep(self.sleep_time)
time_counted += self.sleep_time
raise AssertionError(f'Assertion Error: Target elements\' text did not become equal to the expected text within {self.max_timeout} seconds, {element} != {expected_text}')
def text_becomes_found_in(self, element: Elemental, expected_text: str) -> Elemental:
self.__verify_is_elemental(element)
time_counted = 0
while time_counted < self.max_timeout:
try:
assert expected_text in element.text
return element
except AssertionError:
sleep(self.sleep_time)
time_counted += self.sleep_time
raise AssertionError(f'Assertion Error: The expected text was not found within the text of the element named ({element.name}) text within {self.max_timeout} seconds, {expected_text} not in {element.text}')
def video_begins_playing(self, element):
self.__verify_is_elemental(element)
time_counted = 0
while time_counted < self.max_timeout:
try:
assert element.is_playing()
return element
except AssertionError:
sleep(self.sleep_time)
time_counted += self.sleep_time
raise AssertionError(f'Assertion Error: Video did not begin playing within {self.max_timeout} seconds')
| true | true |
1c33bd36d7692dffce76f4f41188662a80708b18 | 18,818 | py | Python | log_complete/model_244.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete/model_244.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete/model_244.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 61000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
| 91.349515 | 710 | 0.806515 |
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 61000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
| true | true |
1c33bd3cd9c3f9a94ceb7cff2cf858c2010efda8 | 1,068 | py | Python | setup.py | Baras64/Scrapera | bbd2f24915767be951acb1fc5fcf4d3d73eedbd4 | [
"MIT"
] | 300 | 2021-01-24T05:53:07.000Z | 2022-01-10T06:06:41.000Z | setup.py | pratik-choudhari/Scrapera | 3d48f2b861849d90aebe85d6e088365de7810c06 | [
"MIT"
] | 10 | 2021-01-24T06:37:10.000Z | 2021-08-30T16:47:15.000Z | setup.py | pratik-choudhari/Scrapera | 3d48f2b861849d90aebe85d6e088365de7810c06 | [
"MIT"
] | 21 | 2021-01-24T14:37:42.000Z | 2022-01-05T19:33:00.000Z | from setuptools import setup
import os
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name="scrapera",
version="1.1.3",
description="A universal package of scraper scripts for humans",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/DarshanDeshpande/Scrapera",
author="Darshan Deshpande",
author_email="darshan1504@gmail.com",
license="MIT",
python_requires=">=3.6.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
packages=["scrapera"],
package_data={"scrapera": ["*/*"]},
include_package_data=True,
install_requires=required,
)
| 30.514286 | 68 | 0.655431 | from setuptools import setup
import os
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name="scrapera",
version="1.1.3",
description="A universal package of scraper scripts for humans",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/DarshanDeshpande/Scrapera",
author="Darshan Deshpande",
author_email="darshan1504@gmail.com",
license="MIT",
python_requires=">=3.6.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
packages=["scrapera"],
package_data={"scrapera": ["*/*"]},
include_package_data=True,
install_requires=required,
)
| true | true |
1c33be3154e4fb054abe4a689218686c25115ebe | 487 | py | Python | lab/migrations/0016_alter_objectgroup_options.py | betagouv/euphrosyne | a67857a8716b5060cd9a2c6fa5f3d45c3fff435a | [
"MIT"
] | 1 | 2022-02-21T19:46:20.000Z | 2022-02-21T19:46:20.000Z | lab/migrations/0016_alter_objectgroup_options.py | betagouv/euphrosyne | a67857a8716b5060cd9a2c6fa5f3d45c3fff435a | [
"MIT"
] | 37 | 2021-10-18T18:33:26.000Z | 2022-03-31T12:38:38.000Z | lab/migrations/0016_alter_objectgroup_options.py | betagouv/euphrosyne | a67857a8716b5060cd9a2c6fa5f3d45c3fff435a | [
"MIT"
] | 2 | 2022-03-03T15:41:30.000Z | 2022-03-07T14:20:26.000Z | # Generated by Django 4.0.1 on 2022-02-14 10:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lab', '0015_objectgroup_object_count_alter_objectgroup_inventory_and_more_squashed_0016_alter_objectgroup_label'),
]
operations = [
migrations.AlterModelOptions(
name='objectgroup',
options={'verbose_name': 'Object / Sample', 'verbose_name_plural': 'Object(s) / Sample(s'},
),
]
| 27.055556 | 124 | 0.677618 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lab', '0015_objectgroup_object_count_alter_objectgroup_inventory_and_more_squashed_0016_alter_objectgroup_label'),
]
operations = [
migrations.AlterModelOptions(
name='objectgroup',
options={'verbose_name': 'Object / Sample', 'verbose_name_plural': 'Object(s) / Sample(s'},
),
]
| true | true |
1c33be844c886ef505a7fc609351b1c1dceb34b6 | 99 | py | Python | shared/python/__init__.py | carol-hsu/relay-bench | 0facffedb3cbb0d5f110769a84bba68718cff72b | [
"Apache-2.0"
] | 7 | 2019-10-03T22:41:18.000Z | 2020-05-31T18:52:15.000Z | shared/python/__init__.py | carol-hsu/relay-bench | 0facffedb3cbb0d5f110769a84bba68718cff72b | [
"Apache-2.0"
] | 14 | 2019-10-18T19:13:53.000Z | 2021-09-08T01:36:37.000Z | shared/python/__init__.py | carol-hsu/relay-bench | 0facffedb3cbb0d5f110769a84bba68718cff72b | [
"Apache-2.0"
] | 4 | 2019-10-03T21:34:03.000Z | 2022-02-23T10:29:49.000Z | from . import trial_util
from . import relay_util
from . import analysis_util
from . import common
| 19.8 | 27 | 0.79798 | from . import trial_util
from . import relay_util
from . import analysis_util
from . import common
| true | true |
1c33beb67d1b99e22341dd936653d4cf90801b6e | 9,407 | py | Python | dapper/tools/localization.py | dafeda/DAPPER | fc4ae95a3eb7c65387616f988b75559a9eacc048 | [
"MIT"
] | null | null | null | dapper/tools/localization.py | dafeda/DAPPER | fc4ae95a3eb7c65387616f988b75559a9eacc048 | [
"MIT"
] | 1 | 2022-02-18T12:29:38.000Z | 2022-02-18T12:29:38.000Z | dapper/tools/localization.py | dafeda/DAPPER | fc4ae95a3eb7c65387616f988b75559a9eacc048 | [
"MIT"
] | null | null | null | """Localization tools, including distance and tapering comps.
A good introduction to localization:
Sakov (2011), Computational Geosciences:
'Relation between two common localisation methods for the EnKF'.
"""
# NB: Why is the 'order' argument not supported by this module? Because:
# 1) Assuming only order (orientation) 'C' simplifies the module's code.
# 2) It's not necessary, because the module only communicates to *exterior* via indices
# [of what assumes to be X.flatten(order='C')], and not coordinates!
# Thus, the only adaptation necessary if the order is 'F' is to reverse
# the shape parameter passed to these functions (example: mods/QG/sakov2008).
import numpy as np
def pairwise_distances(A, B=None, domain=None):
"""Euclidian distance (not squared) between pts. in `A` and `B`.
Parameters
----------
A: array of shape `(nPoints, nDims)`.
A collection of points.
B:
Same as `A`, but `nPoints` can differ.
domain: tuple
Assume the domain is a **periodic** hyper-rectangle whose
edges along dimension `i` span from 0 to `domain[i]`.
NB: Behaviour not defined if `any(A.max(0) > domain)`, and likewise for `B`.
Returns
-------
Array of of shape `(nPointsA, nPointsB)`.
Examples
--------
>>> A = [[0, 0], [0, 1], [1, 0], [1, 1]]
>>> with np.printoptions(precision=2):
... print(pairwise_distances(A))
[[0. 1. 1. 1.41]
[1. 0. 1.41 1. ]
[1. 1.41 0. 1. ]
[1.41 1. 1. 0. ]]
The function matches `pdist(..., metric='euclidean')`, but is faster:
>>> from scipy.spatial.distance import pdist, squareform
>>> (pairwise_distances(A) == squareform(pdist(A))).all()
True
As opposed to `pdist`, it also allows comparing `A` to a different set of points,
`B`, without the augmentation/block tricks needed for pdist.
>>> A = np.arange(4)[:, None]
>>> pairwise_distances(A, [[2]]).T
array([[2., 1., 0., 1.]])
Illustration of periodicity:
>>> pairwise_distances(A, domain=(4, ))
array([[0., 1., 2., 1.],
[1., 0., 1., 2.],
[2., 1., 0., 1.],
[1., 2., 1., 0.]])
NB: If an input array is 1-dim, it is seen as a single point.
>>> pairwise_distances(np.arange(4))
array([[0.]])
"""
if B is None:
B = A
# Prep
A = np.atleast_2d(A)
B = np.atleast_2d(B)
mA, nA = A.shape
mB, nB = B.shape
assert nA == nB, "The last axis of A and B must have equal length."
# Diff
d = A[:, None] - B # shape: (mA, mB, nDims)
# Make periodic
if domain:
domain = np.reshape(domain, (1, 1, -1)) # for broadcasting
d = abs(d)
d = np.minimum(d, domain - d)
distances = np.sqrt((d * d).sum(axis=-1)) # == sla.norm(d, axis=-1)
return distances.reshape(mA, mB)
def dist2coeff(dists, radius, tag=None):
"""Compute tapering coefficients corresponding to a distances.
NB: The radius is internally adjusted such that, independently of 'tag',
`coeff==np.exp(-0.5)` when `distance==radius`.
This is largely based on Sakov's enkf-matlab code. Two bugs have here been fixed:
- The constants were slightly wrong, as noted in comments below.
- It forgot to take sqrt() of coeffs when applying them through 'local analysis'.
"""
coeffs = np.zeros(dists.shape)
if tag is None:
tag = "GC"
if tag == "Gauss":
R = radius
coeffs = np.exp(-0.5 * (dists / R) ** 2)
elif tag == "Exp":
R = radius
coeffs = np.exp(-0.5 * (dists / R) ** 3)
elif tag == "Cubic":
R = radius * 1.87 # Sakov: 1.8676
inds = dists <= R
coeffs[inds] = (1 - (dists[inds] / R) ** 3) ** 3
elif tag == "Quadro":
R = radius * 1.64 # Sakov: 1.7080
inds = dists <= R
coeffs[inds] = (1 - (dists[inds] / R) ** 4) ** 4
elif tag == "GC": # eqn 4.10 of Gaspari-Cohn'99, or eqn 25 of Sakov2011relation
R = radius * 1.82 # =np.sqrt(10/3). Sakov: 1.7386
# 1st segment
ind1 = dists <= R
r2 = (dists[ind1] / R) ** 2
r3 = (dists[ind1] / R) ** 3
coeffs[ind1] = 1 + r2 * (-r3 / 4 + r2 / 2) + r3 * (5 / 8) - r2 * (5 / 3)
# 2nd segment
ind2 = np.logical_and(R < dists, dists <= 2 * R)
r1 = dists[ind2] / R
r2 = (dists[ind2] / R) ** 2
r3 = (dists[ind2] / R) ** 3
coeffs[ind2] = (
r2 * (r3 / 12 - r2 / 2)
+ r3 * (5 / 8)
+ r2 * (5 / 3)
- r1 * 5
+ 4
- (2 / 3) / r1
)
elif tag == "Step":
R = radius
inds = dists <= R
coeffs[inds] = 1
else:
raise KeyError("No such coeff function.")
return coeffs
def inds_and_coeffs(dists, radius, cutoff=1e-3, tag=None):
"""Compute indices and coefficients of localization.
- inds : the indices of pts that are "close to" centre.
- coeffs : the corresponding tapering coefficients.
"""
coeffs = dist2coeff(dists, radius, tag)
# Truncate using cut-off
inds = np.arange(len(dists))[coeffs > cutoff]
coeffs = coeffs[inds]
return inds, coeffs
def localization_setup(y2x_distances, batches):
def localization_now(radius, direction, t, tag=None):
"""Provide localization setup for time t."""
y2x = y2x_distances(t)
if direction == "x2y":
def obs_taperer(batch):
# Don't use `batch = batches[iBatch]`
# (with iBatch as this function's input).
# This would slow down multiproc.,
# coz batches gets copied to each process.
x2y = y2x.T
dists = x2y[batch].mean(axis=0)
return inds_and_coeffs(dists, radius, tag=tag)
return batches, obs_taperer
elif direction == "y2x":
def state_taperer(obs_idx):
return inds_and_coeffs(y2x[obs_idx], radius, tag=tag)
return state_taperer
return localization_now
def no_localization(Nx, Ny):
def obs_taperer(batch):
return np.arange(Ny), np.ones(Ny)
def state_taperer(obs_idx):
return np.arange(Nx), np.ones(Nx)
def localization_now(radius, direction, t, tag=None):
"""Returns all of the indices, with all tapering coeffs. set to 1.
Used to validate local DA methods, eg. `LETKF<==>EnKF('Sqrt')`.
"""
assert radius in [None, np.inf], "Localizer not specified, but radius < infty."
if direction == "x2y":
return [np.arange(Nx)], obs_taperer
elif direction == "y2x":
return state_taperer
return localization_now
def rectangular_partitioning(shape, steps, do_ind=True):
"""N-D rectangular batch generation.
Parameters
----------
shape: (len(grid[dim]) for dim in range(ndim))
steps: (step_len[dim] for dim in range(ndim))
Returns
-------
A list of batches,
where each element (batch) is a list of indices.
Example
-------
>>> shape = [4, 13]
... batches = rectangular_partitioning(shape, [2, 4], do_ind=False)
... nB = len(batches)
... values = np.random.choice(np.arange(nB), nB, 0)
... Z = np.zeros(shape)
... for ib, b in enumerate(batches):
... Z[tuple(b)] = values[ib]
... plt.imshow(Z) # doctest: +SKIP
"""
import itertools
assert len(shape) == len(steps)
# ndim = len(steps)
# An ndim list of (average) local grid lengths:
nLocs = [round(n / d) for n, d in zip(shape, steps)]
# An ndim list of (marginal) grid partitions
# [array_split() handles non-divisibility]:
edge_partitions = [
np.array_split(np.arange(n), nLoc) for n, nLoc in zip(shape, nLocs)
]
batches = []
for batch_edges in itertools.product(*edge_partitions):
# The 'indexing' argument below is actually inconsequential:
# it merely changes batch's internal ordering.
batch_rect = np.meshgrid(*batch_edges, indexing="ij")
coords = [ii.flatten() for ii in batch_rect]
batches += [coords]
if do_ind:
def sub2ind(sub):
return np.ravel_multi_index(sub, shape)
batches = [sub2ind(b) for b in batches]
return batches
# NB: Don't try to put the time-dependence of obs_inds inside obs_taperer().
# That would require calling ind2sub len(batches) times per analysis,
# and the result cannot be easily cached, because of multiprocessing.
def safe_eval(fun, t):
try:
return fun(t)
except TypeError:
return fun
def nd_Id_localization(shape, batch_shape=None, obs_inds=None, periodic=True):
"""Localize Id (direct) point obs of an N-D, homogeneous, rectangular domain."""
M = np.prod(shape)
if batch_shape is None:
batch_shape = (1,) * len(shape)
if obs_inds is None:
obs_inds = np.arange(M)
def ind2sub(ind):
return np.asarray(np.unravel_index(ind, shape)).T
batches = rectangular_partitioning(shape, batch_shape)
state_coord = ind2sub(np.arange(M))
def y2x_distances(t):
obs_coord = ind2sub(safe_eval(obs_inds, t))
return pairwise_distances(obs_coord, state_coord, shape if periodic else None)
return localization_setup(y2x_distances, batches)
| 30.74183 | 88 | 0.582545 |
# 2) It's not necessary, because the module only communicates to *exterior* via indices
import numpy as np
def pairwise_distances(A, B=None, domain=None):
if B is None:
B = A
A = np.atleast_2d(A)
B = np.atleast_2d(B)
mA, nA = A.shape
mB, nB = B.shape
assert nA == nB, "The last axis of A and B must have equal length."
d = A[:, None] - B
if domain:
domain = np.reshape(domain, (1, 1, -1))
d = abs(d)
d = np.minimum(d, domain - d)
distances = np.sqrt((d * d).sum(axis=-1))
return distances.reshape(mA, mB)
def dist2coeff(dists, radius, tag=None):
coeffs = np.zeros(dists.shape)
if tag is None:
tag = "GC"
if tag == "Gauss":
R = radius
coeffs = np.exp(-0.5 * (dists / R) ** 2)
elif tag == "Exp":
R = radius
coeffs = np.exp(-0.5 * (dists / R) ** 3)
elif tag == "Cubic":
R = radius * 1.87
inds = dists <= R
coeffs[inds] = (1 - (dists[inds] / R) ** 3) ** 3
elif tag == "Quadro":
R = radius * 1.64
inds = dists <= R
coeffs[inds] = (1 - (dists[inds] / R) ** 4) ** 4
elif tag == "GC":
R = radius * 1.82 # =np.sqrt(10/3). Sakov: 1.7386
# 1st segment
ind1 = dists <= R
r2 = (dists[ind1] / R) ** 2
r3 = (dists[ind1] / R) ** 3
coeffs[ind1] = 1 + r2 * (-r3 / 4 + r2 / 2) + r3 * (5 / 8) - r2 * (5 / 3)
# 2nd segment
ind2 = np.logical_and(R < dists, dists <= 2 * R)
r1 = dists[ind2] / R
r2 = (dists[ind2] / R) ** 2
r3 = (dists[ind2] / R) ** 3
coeffs[ind2] = (
r2 * (r3 / 12 - r2 / 2)
+ r3 * (5 / 8)
+ r2 * (5 / 3)
- r1 * 5
+ 4
- (2 / 3) / r1
)
elif tag == "Step":
R = radius
inds = dists <= R
coeffs[inds] = 1
else:
raise KeyError("No such coeff function.")
return coeffs
def inds_and_coeffs(dists, radius, cutoff=1e-3, tag=None):
coeffs = dist2coeff(dists, radius, tag)
# Truncate using cut-off
inds = np.arange(len(dists))[coeffs > cutoff]
coeffs = coeffs[inds]
return inds, coeffs
def localization_setup(y2x_distances, batches):
def localization_now(radius, direction, t, tag=None):
y2x = y2x_distances(t)
if direction == "x2y":
def obs_taperer(batch):
# Don't use `batch = batches[iBatch]`
# This would slow down multiproc.,
# coz batches gets copied to each process.
x2y = y2x.T
dists = x2y[batch].mean(axis=0)
return inds_and_coeffs(dists, radius, tag=tag)
return batches, obs_taperer
elif direction == "y2x":
def state_taperer(obs_idx):
return inds_and_coeffs(y2x[obs_idx], radius, tag=tag)
return state_taperer
return localization_now
def no_localization(Nx, Ny):
def obs_taperer(batch):
return np.arange(Ny), np.ones(Ny)
def state_taperer(obs_idx):
return np.arange(Nx), np.ones(Nx)
def localization_now(radius, direction, t, tag=None):
assert radius in [None, np.inf], "Localizer not specified, but radius < infty."
if direction == "x2y":
return [np.arange(Nx)], obs_taperer
elif direction == "y2x":
return state_taperer
return localization_now
def rectangular_partitioning(shape, steps, do_ind=True):
import itertools
assert len(shape) == len(steps)
# ndim = len(steps)
# An ndim list of (average) local grid lengths:
nLocs = [round(n / d) for n, d in zip(shape, steps)]
# An ndim list of (marginal) grid partitions
# [array_split() handles non-divisibility]:
edge_partitions = [
np.array_split(np.arange(n), nLoc) for n, nLoc in zip(shape, nLocs)
]
batches = []
for batch_edges in itertools.product(*edge_partitions):
# The 'indexing' argument below is actually inconsequential:
# it merely changes batch's internal ordering.
batch_rect = np.meshgrid(*batch_edges, indexing="ij")
coords = [ii.flatten() for ii in batch_rect]
batches += [coords]
if do_ind:
def sub2ind(sub):
return np.ravel_multi_index(sub, shape)
batches = [sub2ind(b) for b in batches]
return batches
# That would require calling ind2sub len(batches) times per analysis,
# and the result cannot be easily cached, because of multiprocessing.
def safe_eval(fun, t):
try:
return fun(t)
except TypeError:
return fun
def nd_Id_localization(shape, batch_shape=None, obs_inds=None, periodic=True):
M = np.prod(shape)
if batch_shape is None:
batch_shape = (1,) * len(shape)
if obs_inds is None:
obs_inds = np.arange(M)
def ind2sub(ind):
return np.asarray(np.unravel_index(ind, shape)).T
batches = rectangular_partitioning(shape, batch_shape)
state_coord = ind2sub(np.arange(M))
def y2x_distances(t):
obs_coord = ind2sub(safe_eval(obs_inds, t))
return pairwise_distances(obs_coord, state_coord, shape if periodic else None)
return localization_setup(y2x_distances, batches)
| true | true |
1c33bfb83e4b9c8e2c5f242afa3184bebe3cef27 | 17,571 | py | Python | neutron/openstack/common/gettextutils.py | SnabbCo/neutron | a657c06d10f2171149c6b1863df36522bdc11cd7 | [
"Apache-2.0"
] | 1 | 2016-04-19T08:20:19.000Z | 2016-04-19T08:20:19.000Z | neutron/openstack/common/gettextutils.py | SnabbCo/neutron | a657c06d10f2171149c6b1863df36522bdc11cd7 | [
"Apache-2.0"
] | null | null | null | neutron/openstack/common/gettextutils.py | SnabbCo/neutron | a657c06d10f2171149c6b1863df36522bdc11cd7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from neutron.openstack.common.gettextutils import _
"""
import copy
import functools
import gettext
import locale
from logging import handlers
import os
from babel import localedata
import six
_localedir = os.environ.get('neutron'.upper() + '_LOCALEDIR')
_t = gettext.translation('neutron', localedir=_localedir, fallback=True)
# We use separate translation catalogs for each log level, so set up a
# mapping between the log level name and the translator. The domain
# for the log level is project_name + "-log-" + log_level so messages
# for each level end up in their own catalog.
_t_log_levels = dict(
(level, gettext.translation('neutron' + '-log-' + level,
localedir=_localedir,
fallback=True))
for level in ['info', 'warning', 'error', 'critical']
)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, domain='neutron')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def _log_translation(msg, level):
"""Build a single translation of a log message
"""
if USE_LAZY:
return Message(msg, domain='neutron' + '-log-' + level)
else:
translator = _t_log_levels[level]
if six.PY3:
return translator.gettext(msg)
return translator.ugettext(msg)
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = functools.partial(_log_translation, level='info')
_LW = functools.partial(_log_translation, level='warning')
_LE = functools.partial(_log_translation, level='error')
_LC = functools.partial(_log_translation, level='critical')
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. nova, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='neutron', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
# Merge the dictionaries
# Copy each item in case one does not support deep copy.
params = {}
if isinstance(self.params, dict):
for key, val in self.params.items():
params[key] = self._copy_param(val)
for key, val in other.items():
params[key] = self._copy_param(val)
else:
params = self._copy_param(other)
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except Exception:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
| 39.13363 | 79 | 0.648341 |
import copy
import functools
import gettext
import locale
from logging import handlers
import os
from babel import localedata
import six
_localedir = os.environ.get('neutron'.upper() + '_LOCALEDIR')
_t = gettext.translation('neutron', localedir=_localedir, fallback=True)
_t_log_levels = dict(
(level, gettext.translation('neutron' + '-log-' + level,
localedir=_localedir,
fallback=True))
for level in ['info', 'warning', 'error', 'critical']
)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
def enable_lazy():
global USE_LAZY
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, domain='neutron')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def _log_translation(msg, level):
if USE_LAZY:
return Message(msg, domain='neutron' + '-log-' + level)
else:
translator = _t_log_levels[level]
if six.PY3:
return translator.gettext(msg)
return translator.ugettext(msg)
_LI = functools.partial(_log_translation, level='info')
_LW = functools.partial(_log_translation, level='warning')
_LE = functools.partial(_log_translation, level='error')
_LC = functools.partial(_log_translation, level='critical')
def install(domain, lazy=False):
if lazy:
def _lazy_gettext(msg):
return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
def __new__(cls, msgid, msgtext=None, params=None,
domain='neutron', *args):
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
return translated_message
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
if other is None:
params = (other,)
elif isinstance(other, dict):
params = {}
if isinstance(self.params, dict):
for key, val in self.params.items():
params[key] = self._copy_param(val)
for key, val in other.items():
params[key] = self._copy_param(val)
else:
params = self._copy_param(other)
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except Exception:
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
message = obj
if not isinstance(message, Message):
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
def __init__(self, locale=None, target=None):
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
| true | true |
1c33bfd302e7d66c62c77fcf9ddcf3ff4d552c7b | 30,613 | py | Python | openaerostruct/geometry/utils.py | fkopsaf/OpenAeroStruct | 414bd76a7f14f1bd52d6dacc6694382d52e5fabc | [
"Apache-2.0"
] | null | null | null | openaerostruct/geometry/utils.py | fkopsaf/OpenAeroStruct | 414bd76a7f14f1bd52d6dacc6694382d52e5fabc | [
"Apache-2.0"
] | null | null | null | openaerostruct/geometry/utils.py | fkopsaf/OpenAeroStruct | 414bd76a7f14f1bd52d6dacc6694382d52e5fabc | [
"Apache-2.0"
] | 1 | 2018-09-24T04:58:37.000Z | 2018-09-24T04:58:37.000Z | from __future__ import print_function, division
import warnings
import numpy as np
from numpy import cos, sin, tan
from openaerostruct.geometry.CRM_definitions import get_crm_points
def rotate(mesh, theta_y, symmetry, rotate_x=True):
"""
Compute rotation matrices given mesh and rotation angles in degrees.
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
theta_y[ny] : numpy array
1-D array of rotation angles about y-axis for each wing slice in degrees.
symmetry : boolean
Flag set to True if surface is reflected about y=0 plane.
rotate_x : boolean
Flag set to True if the user desires the twist variable to always be
applied perpendicular to the wing (say, in the case of a winglet).
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the twisted aerodynamic surface.
"""
te = mesh[-1]
le = mesh[ 0]
quarter_chord = 0.25 * te + 0.75 * le
nx, ny, _ = mesh.shape
if rotate_x:
# Compute spanwise z displacements along quarter chord
if symmetry:
dz_qc = quarter_chord[:-1,2] - quarter_chord[1:,2]
dy_qc = quarter_chord[:-1,1] - quarter_chord[1:,1]
theta_x = np.arctan(dz_qc/dy_qc)
# Prepend with 0 so that root is not rotated
rad_theta_x = np.append(theta_x, 0.0)
else:
root_index = int((ny - 1) / 2)
dz_qc_left = quarter_chord[:root_index,2] - quarter_chord[1:root_index+1,2]
dy_qc_left = quarter_chord[:root_index,1] - quarter_chord[1:root_index+1,1]
theta_x_left = np.arctan(dz_qc_left/dy_qc_left)
dz_qc_right = quarter_chord[root_index+1:,2] - quarter_chord[root_index:-1,2]
dy_qc_right = quarter_chord[root_index+1:,1] - quarter_chord[root_index:-1,1]
theta_x_right = np.arctan(dz_qc_right/dy_qc_right)
# Concatenate thetas
rad_theta_x = np.concatenate((theta_x_left, np.zeros(1), theta_x_right))
else:
rad_theta_x = 0.0
rad_theta_y = theta_y * np.pi / 180.
mats = np.zeros((ny, 3, 3), dtype=type(rad_theta_y[0]))
cos_rtx = cos(rad_theta_x)
cos_rty = cos(rad_theta_y)
sin_rtx = sin(rad_theta_x)
sin_rty = sin(rad_theta_y)
mats[:, 0, 0] = cos_rty
mats[:, 0, 2] = sin_rty
mats[:, 1, 0] = sin_rtx * sin_rty
mats[:, 1, 1] = cos_rtx
mats[:, 1, 2] = -sin_rtx * cos_rty
mats[:, 2, 0] = -cos_rtx * sin_rty
mats[:, 2, 1] = sin_rtx
mats[:, 2, 2] = cos_rtx*cos_rty
mesh[:] = np.einsum("ikj, mij -> mik", mats, mesh - quarter_chord) + quarter_chord
def scale_x(mesh, chord_dist):
"""
Modify the chords along the span of the wing by scaling only the x-coord.
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
chord_dist[ny] : numpy array
Chord length for each panel edge.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh with the new chord lengths.
"""
te = mesh[-1]
le = mesh[ 0]
quarter_chord = 0.25 * te + 0.75 * le
ny = mesh.shape[1]
for i in range(ny):
mesh[:, i, 0] = (mesh[:, i, 0] - quarter_chord[i, 0]) * chord_dist[i] + \
quarter_chord[i, 0]
def shear_x(mesh, xshear):
"""
Shear the wing in the x direction (distributed sweep).
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
xshear[ny] : numpy array
Distance to translate wing in x direction.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh with the new chord lengths.
"""
mesh[:, :, 0] += xshear
def shear_y(mesh, yshear):
""" Shear the wing in the y direction (distributed span).
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
yshear[ny] : numpy array
Distance to translate wing in y direction.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh with the new span widths.
"""
mesh[:, :, 1] += yshear
def shear_z(mesh, zshear):
"""
Shear the wing in the z direction (distributed dihedral).
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
zshear[ny] : numpy array
Distance to translate wing in z direction.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh with the new chord lengths.
"""
mesh[:, :, 2] += zshear
def sweep(mesh, sweep_angle, symmetry):
"""
Apply shearing sweep. Positive sweeps back.
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
sweep_angle : float
Shearing sweep angle in degrees.
symmetry : boolean
Flag set to true if surface is reflected about y=0 plane.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the swept aerodynamic surface.
"""
# Get the mesh parameters and desired sweep angle
num_x, num_y, _ = mesh.shape
le = mesh[0]
p180 = np.pi / 180
tan_theta = tan(p180*sweep_angle)
# If symmetric, simply vary the x-coord based on the distance from the
# center of the wing
if symmetry:
y0 = le[-1, 1]
dx = -(le[:, 1] - y0) * tan_theta
# Else, vary the x-coord on either side of the wing
else:
ny2 = (num_y - 1) // 2
y0 = le[ny2, 1]
dx_right = (le[ny2:, 1] - y0) * tan_theta
dx_left = -(le[:ny2, 1] - y0) * tan_theta
dx = np.hstack((dx_left, dx_right))
# dx added spanwise.
mesh[:, :, 0] += dx
def dihedral(mesh, dihedral_angle, symmetry):
"""
Apply dihedral angle. Positive angles up.
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
dihedral_angle : float
Dihedral angle in degrees.
symmetry : boolean
Flag set to true if surface is reflected about y=0 plane.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the aerodynamic surface with dihedral angle.
"""
# Get the mesh parameters and desired sweep angle
num_x, num_y, _ = mesh.shape
le = mesh[0]
p180 = np.pi / 180
tan_theta = tan(p180*dihedral_angle)
# If symmetric, simply vary the z-coord based on the distance from the
# center of the wing
if symmetry:
y0 = le[-1, 1]
dz = -(le[:, 1] - y0) * tan_theta
else:
ny2 = (num_y-1) // 2
y0 = le[ny2, 1]
dz_right = (le[ny2:, 1] - y0) * tan_theta
dz_left = -(le[:ny2, 1] - y0) * tan_theta
dz = np.hstack((dz_left, dz_right))
# dz added spanwise.
mesh[:, :, 2] += dz
def stretch(mesh, span, symmetry):
"""
Stretch mesh in spanwise direction to reach specified span.
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
span : float
Relative stetch ratio in the spanwise direction.
symmetry : boolean
Flag set to true if surface is reflected about y=0 plane.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the stretched aerodynamic surface.
"""
# Set the span along the quarter-chord line
le = mesh[0]
te = mesh[-1]
quarter_chord = 0.25 * te + 0.75 * le
# The user always deals with the full span, so if they input a specific
# span value and have symmetry enabled, we divide this value by 2.
if symmetry:
span /= 2.
# Compute the previous span and determine the scalar needed to reach the
# desired span
prev_span = quarter_chord[-1, 1] - quarter_chord[0, 1]
s = quarter_chord[:,1] / prev_span
mesh[:, :, 1] = s * span
def taper(mesh, taper_ratio, symmetry):
"""
Alter the spanwise chord linearly to produce a tapered wing. Note that
we apply taper around the quarter-chord line.
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
taper_ratio : float
Taper ratio for the wing; 1 is untapered, 0 goes to a point.
symmetry : boolean
Flag set to true if surface is reflected about y=0 plane.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the tapered aerodynamic surface.
"""
# Get mesh parameters and the quarter-chord
le = mesh[0]
te = mesh[-1]
num_x, num_y, _ = mesh.shape
quarter_chord = 0.25 * te + 0.75 * le
x = quarter_chord[:, 1]
span = x[-1] - x[0]
# If symmetric, solve for the correct taper ratio, which is a linear
# interpolation problem
if symmetry:
xp = np.array([-span, 0.])
fp = np.array([taper_ratio, 1.])
# Otherwise, we set up an interpolation problem for the entire wing, which
# consists of two linear segments
else:
xp = np.array([-span/2, 0., span/2])
fp = np.array([taper_ratio, 1., taper_ratio])
taper = np.interp(x.real, xp.real, fp.real)
# Modify the mesh based on the taper amount computed per spanwise section
mesh[:] = np.einsum('ijk, j->ijk', mesh - quarter_chord, taper) + quarter_chord
def gen_rect_mesh(num_x, num_y, span, chord, span_cos_spacing=0., chord_cos_spacing=0.):
"""
Generate simple rectangular wing mesh.
Parameters
----------
num_x : float
Desired number of chordwise node points for the final mesh.
num_y : float
Desired number of chordwise node points for the final mesh.
span : float
Total wingspan.
chord : float
Root chord.
span_cos_spacing : float (optional)
Blending ratio of uniform and cosine spacing in the spanwise direction.
A value of 0. corresponds to uniform spacing and a value of 1.
corresponds to regular cosine spacing. This increases the number of
spanwise node points near the wingtips.
chord_cos_spacing : float (optional)
Blending ratio of uniform and cosine spacing in the chordwise direction.
A value of 0. corresponds to uniform spacing and a value of 1.
corresponds to regular cosine spacing. This increases the number of
chordwise node points near the wingtips.
Returns
-------
mesh[nx, ny, 3] : numpy array
Rectangular nodal mesh defining the final aerodynamic surface with the
specified parameters.
"""
mesh = np.zeros((num_x, num_y, 3))
ny2 = (num_y + 1) // 2
# Hotfix a special case for spacing bunched at the root and tips
if span_cos_spacing == 2.:
beta = np.linspace(0, np.pi, ny2)
# mixed spacing with span_cos_spacing as a weighting factor
# this is for the spanwise spacing
cosine = .25 * (1 - np.cos(beta)) # cosine spacing
uniform = np.linspace(0, .5, ny2)[::-1] # uniform spacing
half_wing = cosine[::-1] * span_cos_spacing + (1 - span_cos_spacing) * uniform
full_wing = np.hstack((-half_wing[:-1], half_wing[::-1])) * span
else:
beta = np.linspace(0, np.pi/2, ny2)
# mixed spacing with span_cos_spacing as a weighting factor
# this is for the spanwise spacing
cosine = .5 * np.cos(beta) # cosine spacing
uniform = np.linspace(0, .5, ny2)[::-1] # uniform spacing
half_wing = cosine * span_cos_spacing + (1 - span_cos_spacing) * uniform
full_wing = np.hstack((-half_wing[:-1], half_wing[::-1])) * span
nx2 = (num_x + 1) // 2
beta = np.linspace(0, np.pi/2, nx2)
# mixed spacing with span_cos_spacing as a weighting factor
# this is for the chordwise spacing
cosine = .5 * np.cos(beta) # cosine spacing
uniform = np.linspace(0, .5, nx2)[::-1] # uniform spacing
half_wing = cosine * chord_cos_spacing + (1 - chord_cos_spacing) * uniform
full_wing_x = np.hstack((-half_wing[:-1], half_wing[::-1])) * chord
# Special case if there are only 2 chordwise nodes
if num_x <= 2:
full_wing_x = np.array([0., chord])
for ind_x in range(num_x):
for ind_y in range(num_y):
mesh[ind_x, ind_y, :] = [full_wing_x[ind_x], full_wing[ind_y], 0]
return mesh
def gen_crm_mesh(num_x, num_y, span_cos_spacing=0., chord_cos_spacing=0., wing_type="CRM:jig"):
"""
Generate Common Research Model wing mesh.
Parameters
----------
num_x : float
Desired number of chordwise node points for the final mesh.
num_y : float
Desired number of chordwise node points for the final mesh.
span : float
Total wingspan.
chord : float
Root chord.
span_cos_spacing : float (optional)
Blending ratio of uniform and cosine spacing in the spanwise direction.
A value of 0. corresponds to uniform spacing and a value of 1.
corresponds to regular cosine spacing. This increases the number of
spanwise node points near the wingtips.
chord_cos_spacing : float (optional)
Blending ratio of uniform and cosine spacing in the chordwise direction.
A value of 0. corresponds to uniform spacing and a value of 1.
corresponds to regular cosine spacing. This increases the number of
chordwise node points near the wingtips.
wing_type : string (optional)
Describes the desired CRM shape. Current options are:
"CRM:jig" (undeformed jig shape),
"CRM:alpha_2.75" (shape from wind tunnel testing at a=2.75 from DPW6)
Returns
-------
mesh[nx, ny, 3] : numpy array
Rectangular nodal mesh defining the final aerodynamic surface with the
specified parameters.
eta : numpy array
Spanwise locations of the airfoil slices. Later used in the
interpolation function to obtain correct twist values at
points along the span that are not aligned with these slices.
twist : numpy array
Twist along the span at the spanwise eta locations. We use these twists
as training points for interpolation to obtain twist values at
arbitrary points along the span.
"""
# Call an external function to get the data points for the specific CRM
# type requested. See `CRM_definitions.py` for more information and the
# raw data.
raw_crm_points = get_crm_points(wing_type)
# If this is a jig shape, remove all z-deflection to create a
# poor person's version of the undeformed CRM.
if 'jig' in wing_type or 'CRM' == wing_type:
raw_crm_points[:, 3] = 0.
# Get the leading edge of the raw crm points
le = np.vstack((raw_crm_points[:,1],
raw_crm_points[:,2],
raw_crm_points[:,3]))
# Get the chord, twist(in correct order), and eta values from the points
chord = raw_crm_points[:, 5]
twist = raw_crm_points[:, 4][::-1]
eta = raw_crm_points[:, 0]
# Get the trailing edge of the crm points, based on the chord + le distance.
# Note that we do not account for twist here; instead we set that using
# the twist design variable later in run_classes.py.
te = np.vstack((raw_crm_points[:,1] + chord,
raw_crm_points[:,2],
raw_crm_points[:,3]))
# Get the number of points that define this CRM shape and create a mesh
# array based on this size
n_raw_points = raw_crm_points.shape[0]
mesh = np.empty((2, n_raw_points, 3))
# Set the leading and trailing edges of the mesh matrix
mesh[0, :, :] = le.T
mesh[1, :, :] = te.T
# Convert the mesh points to meters from inches.
raw_mesh = mesh * 0.0254
# Create the blended spacing using the user input for span_cos_spacing
ny2 = (num_y + 1) // 2
beta = np.linspace(0, np.pi/2, ny2)
# Distribution for cosine spacing
cosine = np.cos(beta)
# Distribution for uniform spacing
uniform = np.linspace(0, 1., ny2)[::-1]
# Combine the two distrubtions using span_cos_spacing as the weighting factor.
# span_cos_spacing == 1. is for fully cosine, 0. for uniform
lins = cosine * span_cos_spacing + (1 - span_cos_spacing) * uniform
# Populate a mesh object with the desired num_y dimension based on
# interpolated values from the raw CRM points.
mesh = np.empty((2, ny2, 3))
for j in range(2):
for i in range(3):
mesh[j, :, i] = np.interp(lins[::-1], eta, raw_mesh[j, :, i].real)
# That is just one half of the mesh and we later expect the full mesh,
# even if we're using symmetry == True.
# So here we mirror and stack the two halves of the wing.
full_mesh = getFullMesh(right_mesh=mesh)
# If we need to add chordwise panels, do so
if num_x > 2:
full_mesh = add_chordwise_panels(full_mesh, num_x, chord_cos_spacing)
return full_mesh, eta, twist
def add_chordwise_panels(mesh, num_x, chord_cos_spacing):
"""
Generate a new mesh with multiple chordwise panels.
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface with only
the leading and trailing edges defined.
num_x : float
Desired number of chordwise node points for the final mesh.
chord_cos_spacing : float
Blending ratio of uniform and cosine spacing in the chordwise direction.
A value of 0. corresponds to uniform spacing and a value of 1.
corresponds to regular cosine spacing. This increases the number of
chordwise node points near the wingtips.
Returns
-------
new_mesh[nx, ny, 3] : numpy array
Nodal mesh defining the final aerodynamic surface with the
specified number of chordwise node points.
"""
# Obtain mesh and num properties
num_y = mesh.shape[1]
ny2 = (num_y + 1) // 2
nx2 = (num_x + 1) // 2
# Create beta, an array of linear sampling points to pi/2
beta = np.linspace(0, np.pi/2, nx2)
# Obtain the two spacings that we will use to blend
cosine = .5 * np.cos(beta) # cosine spacing
uniform = np.linspace(0, .5, nx2)[::-1] # uniform spacing
# Create half of the wing in the chordwise direction
half_wing = cosine * chord_cos_spacing + (1 - chord_cos_spacing) * uniform
if chord_cos_spacing == 0.:
full_wing_x = np.linspace(0, 1., num_x)
else:
# Mirror this half wing into a full wing; offset by 0.5 so it goes 0 to 1
full_wing_x = np.hstack((-half_wing[:-1], half_wing[::-1])) + .5
# Obtain the leading and trailing edges
le = mesh[ 0, :, :]
te = mesh[-1, :, :]
# Create a new mesh with the desired num_x and set the leading and trailing edge values
new_mesh = np.zeros((num_x, num_y, 3))
new_mesh[ 0, :, :] = le
new_mesh[-1, :, :] = te
for i in range(1, num_x-1):
w = full_wing_x[i]
new_mesh[i, :, :] = (1 - w) * le + w * te
return new_mesh
def get_default_geo_dict():
"""
Obtain the default settings for the surface descriptions. Note that
these defaults are overwritten based on user input for each surface.
Each dictionary describes one surface.
Returns
-------
defaults : dict
A python dict containing the default surface-level settings.
"""
defaults = {
# Wing definition
'num_x' : 3, # number of chordwise points
'num_y' : 5, # number of spanwise points
'span_cos_spacing' : 0, # 0 for uniform spanwise panels
# 1 for cosine-spaced panels
# any value between 0 and 1 for
# a mixed spacing
'chord_cos_spacing' : 0., # 0 for uniform chordwise panels
# 1 for cosine-spaced panels
# any value between 0 and 1 for
# a mixed spacing
'wing_type' : 'rect', # initial shape of the wing
# either 'CRM' or 'rect'
# 'CRM' can have different options
# after it, such as 'CRM:alpha_2.75'
# for the CRM shape at alpha=2.75
'symmetry' : True, # if true, model one half of wing
# reflected across the plane y = 0
'offset' : np.zeros((3)), # coordinates to offset
# the surface from its default location
# Simple Geometric Variables
'span' : 10., # full wingspan, even for symmetric cases
'root_chord' : 1., # root chord
'dihedral' : 0., # wing dihedral angle in degrees
# positive is upward
'sweep' : 0., # wing sweep angle in degrees
# positive sweeps back
'taper' : 1., # taper ratio; 1. is uniform chord
}
return defaults
def generate_mesh(input_dict):
# Get defaults and update surface with the user-provided input
surf_dict = get_default_geo_dict()
surf_dict.update(input_dict)
num_x = surf_dict['num_x']
num_y = surf_dict['num_y']
span = surf_dict['span']
chord = surf_dict['root_chord']
span_cos_spacing = surf_dict['span_cos_spacing']
chord_cos_spacing = surf_dict['chord_cos_spacing']
# Check to make sure that an odd number of spanwise points (num_y) was provided
if not num_y % 2:
raise ValueError('num_y must be an odd number.')
# Check to make sure that an odd number of chordwise points (num_x) was provided
if not num_x % 2 and not num_x==2:
raise ValueError('num_x must be an odd number.')
# Generate rectangular mesh
if surf_dict['wing_type'] == 'rect':
mesh = gen_rect_mesh(num_x, num_y, span, chord,
span_cos_spacing, chord_cos_spacing)
# Generate CRM mesh. Note that this outputs twist information
# based on the data from the CRM definition paper, so we save
# this twist information to the surf_dict.
elif 'CRM' in surf_dict['wing_type']:
mesh, eta, twist = gen_crm_mesh(num_x, num_y,
span_cos_spacing, chord_cos_spacing, surf_dict['wing_type'])
surf_dict['crm_twist'] = twist
else:
raise NameError('wing_type option not understood. Must be either a type of ' +
'"CRM" or "rect".')
# Chop the mesh in half if using symmetry during analysis.
# Note that this means that the provided mesh should be the full mesh
if surf_dict['symmetry']:
num_y = int((num_y+1)/2)
mesh = mesh[:, :num_y, :]
# Apply the user-provided coordinate offset to position the mesh
mesh = mesh + surf_dict['offset']
# If CRM wing, then compute the jig twist values.
# Interpolate the twist values from the CRM wing definition to the twist
# control points.
if 'CRM' in surf_dict['wing_type']:
num_twist = surf_dict['num_twist_cp']
# If the surface is symmetric, simply interpolate the initial
# twist_cp values based on the mesh data
if surf_dict['symmetry']:
twist = np.interp(np.linspace(0, 1, num_twist), eta, surf_dict['crm_twist'])
else:
# If num_twist is odd, create the twist vector and mirror it
# then stack the two together, but remove the duplicated twist
# value.
if num_twist % 2:
twist = np.interp(np.linspace(0, 1, (num_twist+1)/2), eta, surf_dict['crm_twist'])
twist = np.hstack((twist[:-1], twist[::-1]))
# If num_twist is even, mirror the twist vector and stack
# them together
else:
twist = np.interp(np.linspace(0, 1, num_twist/2), eta, surf_dict['crm_twist'])
twist = np.hstack((twist, twist[::-1]))
return mesh, twist
else:
return mesh
def write_FFD_file(surface, mx, my):
mesh = surface['mesh']
nx, ny = mesh.shape[:2]
half_ffd = np.zeros((mx, my, 3))
LE = mesh[0, :, :]
TE = mesh[-1, :, :]
half_ffd[0, :, 0] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), LE[:, 0])
half_ffd[0, :, 1] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), LE[:, 1])
half_ffd[0, :, 2] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), LE[:, 2])
half_ffd[-1, :, 0] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), TE[:, 0])
half_ffd[-1, :, 1] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), TE[:, 1])
half_ffd[-1, :, 2] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), TE[:, 2])
for i in range(my):
half_ffd[:, i, 0] = np.linspace(half_ffd[0, i, 0], half_ffd[-1, i, 0], mx)
half_ffd[:, i, 1] = np.linspace(half_ffd[0, i, 1], half_ffd[-1, i, 1], mx)
half_ffd[:, i, 2] = np.linspace(half_ffd[0, i, 2], half_ffd[-1, i, 2], mx)
cushion = .5
half_ffd[0, :, 0] -= cushion
half_ffd[-1, :, 0] += cushion
half_ffd[:, 0, 1] -= cushion
half_ffd[:, -1, 1] += cushion
bottom_ffd = half_ffd.copy()
bottom_ffd[:, :, 2] -= cushion
top_ffd = half_ffd.copy()
top_ffd[:, :, 2] += cushion
ffd = np.vstack((bottom_ffd, top_ffd))
if 0:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
axes = []
axes.append(fig.add_subplot(221, projection='3d'))
axes.append(fig.add_subplot(222, projection='3d'))
axes.append(fig.add_subplot(223, projection='3d'))
axes.append(fig.add_subplot(224, projection='3d'))
for i, ax in enumerate(axes):
xs = ffd[:, :, 0].flatten()
ys = ffd[:, :, 1].flatten()
zs = ffd[:, :, 2].flatten()
ax.scatter(xs, ys, zs, c='red', alpha=1., clip_on=False)
xs = ffd[:, :, 0].flatten()
ys = ffd[:, :, 1].flatten()
zs = ffd[:, :, 2].flatten()
ax.scatter(xs, ys, zs, c='blue', alpha=1.)
xs = mesh[:, :, 0]
ys = mesh[:, :, 1]
zs = mesh[:, :, 2]
ax.plot_wireframe(xs, ys, zs, color='k')
ax.set_xlim([-5, 5])
ax.set_ylim([-5, 5])
ax.set_zlim([-5, 5])
ax.set_xlim([20, 40])
ax.set_ylim([-25, -5.])
ax.set_zlim([-10, 10])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_axis_off()
ax.set_axis_off()
if i == 0:
ax.view_init(elev=0, azim=180)
elif i == 1:
ax.view_init(elev=0, azim=90)
elif i == 2:
ax.view_init(elev=100000, azim=0)
else:
ax.view_init(elev=40, azim=-30)
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0)
plt.show()
filename = surface['name'] + '_ffd.fmt'
with open(filename, 'w') as f:
f.write('1\n')
f.write('{} {} {}\n'.format(mx, 2, my))
x = np.array_str(ffd[:, :, 0].flatten(order='F'))[1:-1] + '\n'
y = np.array_str(ffd[:, :, 1].flatten(order='F'))[1:-1] + '\n'
z = np.array_str(ffd[:, :, 2].flatten(order='F'))[1:-1] + '\n'
f.write(x)
f.write(y)
f.write(z)
return filename
def writeMesh(mesh,filename):
"""
Writes the OAS mesh in Tecplot .dat file format, for visualization and debugging purposes.
Parameters
----------
mesh[nx,ny,3] : numpy array
The OAS mesh to be written.
filename : str
The file name including the .dat extension.
"""
num_y = mesh.shape[0]
num_x = mesh.shape[1]
f = open(filename, 'w')
f.write('\t\t1\n')
f.write('\t\t%d\t\t%d\t\t%d\n' % (num_y, num_x, 1))
x = mesh[:, :, 0]
y = mesh[:, :, 1]
z = mesh[:, :, 2]
for dim in [x, y, z]:
for iy in range(num_x):
row = dim[:, iy]
for val in row:
f.write('\t{: 3.6f}'.format(val))
f.write('\n')
f.close()
def getFullMesh(left_mesh=None, right_mesh=None):
"""
For a symmetric wing, OAS only keeps and does computation on the left half.
This script mirros the OAS mesh and attaches it to the existing mesh to
obtain the full mesh.
Parameters
----------
left_mesh[nx,ny,3] or right_mesh : numpy array
The half mesh to be mirrored.
Returns
-------
full_mesh[nx,2*ny-1,3] : numpy array
The computed full mesh.
"""
if left_mesh is None and right_mesh is None:
raise ValueError("Either the left or right mesh need to be supplied.")
elif left_mesh is not None and right_mesh is not None:
raise ValueError("Please only provide either left or right mesh, not both.")
elif left_mesh is not None:
right_mesh = np.flip(left_mesh,axis=1).copy()
right_mesh[:,:,1] *= -1
else:
left_mesh = np.flip(right_mesh,axis=1).copy()
left_mesh[:,:,1] *= -1
full_mesh = np.concatenate((left_mesh,right_mesh[:,1:,:]),axis=1)
return full_mesh
| 34.669309 | 99 | 0.575964 | from __future__ import print_function, division
import warnings
import numpy as np
from numpy import cos, sin, tan
from openaerostruct.geometry.CRM_definitions import get_crm_points
def rotate(mesh, theta_y, symmetry, rotate_x=True):
te = mesh[-1]
le = mesh[ 0]
quarter_chord = 0.25 * te + 0.75 * le
nx, ny, _ = mesh.shape
if rotate_x:
if symmetry:
dz_qc = quarter_chord[:-1,2] - quarter_chord[1:,2]
dy_qc = quarter_chord[:-1,1] - quarter_chord[1:,1]
theta_x = np.arctan(dz_qc/dy_qc)
rad_theta_x = np.append(theta_x, 0.0)
else:
root_index = int((ny - 1) / 2)
dz_qc_left = quarter_chord[:root_index,2] - quarter_chord[1:root_index+1,2]
dy_qc_left = quarter_chord[:root_index,1] - quarter_chord[1:root_index+1,1]
theta_x_left = np.arctan(dz_qc_left/dy_qc_left)
dz_qc_right = quarter_chord[root_index+1:,2] - quarter_chord[root_index:-1,2]
dy_qc_right = quarter_chord[root_index+1:,1] - quarter_chord[root_index:-1,1]
theta_x_right = np.arctan(dz_qc_right/dy_qc_right)
rad_theta_x = np.concatenate((theta_x_left, np.zeros(1), theta_x_right))
else:
rad_theta_x = 0.0
rad_theta_y = theta_y * np.pi / 180.
mats = np.zeros((ny, 3, 3), dtype=type(rad_theta_y[0]))
cos_rtx = cos(rad_theta_x)
cos_rty = cos(rad_theta_y)
sin_rtx = sin(rad_theta_x)
sin_rty = sin(rad_theta_y)
mats[:, 0, 0] = cos_rty
mats[:, 0, 2] = sin_rty
mats[:, 1, 0] = sin_rtx * sin_rty
mats[:, 1, 1] = cos_rtx
mats[:, 1, 2] = -sin_rtx * cos_rty
mats[:, 2, 0] = -cos_rtx * sin_rty
mats[:, 2, 1] = sin_rtx
mats[:, 2, 2] = cos_rtx*cos_rty
mesh[:] = np.einsum("ikj, mij -> mik", mats, mesh - quarter_chord) + quarter_chord
def scale_x(mesh, chord_dist):
te = mesh[-1]
le = mesh[ 0]
quarter_chord = 0.25 * te + 0.75 * le
ny = mesh.shape[1]
for i in range(ny):
mesh[:, i, 0] = (mesh[:, i, 0] - quarter_chord[i, 0]) * chord_dist[i] + \
quarter_chord[i, 0]
def shear_x(mesh, xshear):
mesh[:, :, 0] += xshear
def shear_y(mesh, yshear):
mesh[:, :, 1] += yshear
def shear_z(mesh, zshear):
mesh[:, :, 2] += zshear
def sweep(mesh, sweep_angle, symmetry):
num_x, num_y, _ = mesh.shape
le = mesh[0]
p180 = np.pi / 180
tan_theta = tan(p180*sweep_angle)
if symmetry:
y0 = le[-1, 1]
dx = -(le[:, 1] - y0) * tan_theta
else:
ny2 = (num_y - 1) // 2
y0 = le[ny2, 1]
dx_right = (le[ny2:, 1] - y0) * tan_theta
dx_left = -(le[:ny2, 1] - y0) * tan_theta
dx = np.hstack((dx_left, dx_right))
mesh[:, :, 0] += dx
def dihedral(mesh, dihedral_angle, symmetry):
num_x, num_y, _ = mesh.shape
le = mesh[0]
p180 = np.pi / 180
tan_theta = tan(p180*dihedral_angle)
if symmetry:
y0 = le[-1, 1]
dz = -(le[:, 1] - y0) * tan_theta
else:
ny2 = (num_y-1) // 2
y0 = le[ny2, 1]
dz_right = (le[ny2:, 1] - y0) * tan_theta
dz_left = -(le[:ny2, 1] - y0) * tan_theta
dz = np.hstack((dz_left, dz_right))
mesh[:, :, 2] += dz
def stretch(mesh, span, symmetry):
le = mesh[0]
te = mesh[-1]
quarter_chord = 0.25 * te + 0.75 * le
if symmetry:
span /= 2.
prev_span = quarter_chord[-1, 1] - quarter_chord[0, 1]
s = quarter_chord[:,1] / prev_span
mesh[:, :, 1] = s * span
def taper(mesh, taper_ratio, symmetry):
le = mesh[0]
te = mesh[-1]
num_x, num_y, _ = mesh.shape
quarter_chord = 0.25 * te + 0.75 * le
x = quarter_chord[:, 1]
span = x[-1] - x[0]
if symmetry:
xp = np.array([-span, 0.])
fp = np.array([taper_ratio, 1.])
else:
xp = np.array([-span/2, 0., span/2])
fp = np.array([taper_ratio, 1., taper_ratio])
taper = np.interp(x.real, xp.real, fp.real)
mesh[:] = np.einsum('ijk, j->ijk', mesh - quarter_chord, taper) + quarter_chord
def gen_rect_mesh(num_x, num_y, span, chord, span_cos_spacing=0., chord_cos_spacing=0.):
mesh = np.zeros((num_x, num_y, 3))
ny2 = (num_y + 1) // 2
if span_cos_spacing == 2.:
beta = np.linspace(0, np.pi, ny2)
cosine = .25 * (1 - np.cos(beta))
uniform = np.linspace(0, .5, ny2)[::-1]
half_wing = cosine[::-1] * span_cos_spacing + (1 - span_cos_spacing) * uniform
full_wing = np.hstack((-half_wing[:-1], half_wing[::-1])) * span
else:
beta = np.linspace(0, np.pi/2, ny2)
cosine = .5 * np.cos(beta)
uniform = np.linspace(0, .5, ny2)[::-1]
half_wing = cosine * span_cos_spacing + (1 - span_cos_spacing) * uniform
full_wing = np.hstack((-half_wing[:-1], half_wing[::-1])) * span
nx2 = (num_x + 1) // 2
beta = np.linspace(0, np.pi/2, nx2)
cosine = .5 * np.cos(beta)
uniform = np.linspace(0, .5, nx2)[::-1]
half_wing = cosine * chord_cos_spacing + (1 - chord_cos_spacing) * uniform
full_wing_x = np.hstack((-half_wing[:-1], half_wing[::-1])) * chord
if num_x <= 2:
full_wing_x = np.array([0., chord])
for ind_x in range(num_x):
for ind_y in range(num_y):
mesh[ind_x, ind_y, :] = [full_wing_x[ind_x], full_wing[ind_y], 0]
return mesh
def gen_crm_mesh(num_x, num_y, span_cos_spacing=0., chord_cos_spacing=0., wing_type="CRM:jig"):
raw_crm_points = get_crm_points(wing_type)
if 'jig' in wing_type or 'CRM' == wing_type:
raw_crm_points[:, 3] = 0.
# Get the leading edge of the raw crm points
le = np.vstack((raw_crm_points[:,1],
raw_crm_points[:,2],
raw_crm_points[:,3]))
# Get the chord, twist(in correct order), and eta values from the points
chord = raw_crm_points[:, 5]
twist = raw_crm_points[:, 4][::-1]
eta = raw_crm_points[:, 0]
# Get the trailing edge of the crm points, based on the chord + le distance.
# Note that we do not account for twist here; instead we set that using
# the twist design variable later in run_classes.py.
te = np.vstack((raw_crm_points[:,1] + chord,
raw_crm_points[:,2],
raw_crm_points[:,3]))
# Get the number of points that define this CRM shape and create a mesh
# array based on this size
n_raw_points = raw_crm_points.shape[0]
mesh = np.empty((2, n_raw_points, 3))
# Set the leading and trailing edges of the mesh matrix
mesh[0, :, :] = le.T
mesh[1, :, :] = te.T
# Convert the mesh points to meters from inches.
raw_mesh = mesh * 0.0254
# Create the blended spacing using the user input for span_cos_spacing
ny2 = (num_y + 1) // 2
beta = np.linspace(0, np.pi/2, ny2)
# Distribution for cosine spacing
cosine = np.cos(beta)
# Distribution for uniform spacing
uniform = np.linspace(0, 1., ny2)[::-1]
# Combine the two distrubtions using span_cos_spacing as the weighting factor.
# span_cos_spacing == 1. is for fully cosine, 0. for uniform
lins = cosine * span_cos_spacing + (1 - span_cos_spacing) * uniform
# Populate a mesh object with the desired num_y dimension based on
# interpolated values from the raw CRM points.
mesh = np.empty((2, ny2, 3))
for j in range(2):
for i in range(3):
mesh[j, :, i] = np.interp(lins[::-1], eta, raw_mesh[j, :, i].real)
# That is just one half of the mesh and we later expect the full mesh,
# even if we're using symmetry == True.
full_mesh = getFullMesh(right_mesh=mesh)
if num_x > 2:
full_mesh = add_chordwise_panels(full_mesh, num_x, chord_cos_spacing)
return full_mesh, eta, twist
def add_chordwise_panels(mesh, num_x, chord_cos_spacing):
num_y = mesh.shape[1]
ny2 = (num_y + 1) // 2
nx2 = (num_x + 1) // 2
beta = np.linspace(0, np.pi/2, nx2)
cosine = .5 * np.cos(beta)
uniform = np.linspace(0, .5, nx2)[::-1]
half_wing = cosine * chord_cos_spacing + (1 - chord_cos_spacing) * uniform
if chord_cos_spacing == 0.:
full_wing_x = np.linspace(0, 1., num_x)
else:
full_wing_x = np.hstack((-half_wing[:-1], half_wing[::-1])) + .5
le = mesh[ 0, :, :]
te = mesh[-1, :, :]
new_mesh = np.zeros((num_x, num_y, 3))
new_mesh[ 0, :, :] = le
new_mesh[-1, :, :] = te
for i in range(1, num_x-1):
w = full_wing_x[i]
new_mesh[i, :, :] = (1 - w) * le + w * te
return new_mesh
def get_default_geo_dict():
defaults = {
'num_x' : 3,
'num_y' : 5,
'span_cos_spacing' : 0,
'chord_cos_spacing' : 0.,
'wing_type' : 'rect',
'symmetry' : True,
'offset' : np.zeros((3)),
'span' : 10.,
'root_chord' : 1.,
'dihedral' : 0.,
'sweep' : 0.,
'taper' : 1.,
}
return defaults
def generate_mesh(input_dict):
surf_dict = get_default_geo_dict()
surf_dict.update(input_dict)
num_x = surf_dict['num_x']
num_y = surf_dict['num_y']
span = surf_dict['span']
chord = surf_dict['root_chord']
span_cos_spacing = surf_dict['span_cos_spacing']
chord_cos_spacing = surf_dict['chord_cos_spacing']
if not num_y % 2:
raise ValueError('num_y must be an odd number.')
if not num_x % 2 and not num_x==2:
raise ValueError('num_x must be an odd number.')
if surf_dict['wing_type'] == 'rect':
mesh = gen_rect_mesh(num_x, num_y, span, chord,
span_cos_spacing, chord_cos_spacing)
elif 'CRM' in surf_dict['wing_type']:
mesh, eta, twist = gen_crm_mesh(num_x, num_y,
span_cos_spacing, chord_cos_spacing, surf_dict['wing_type'])
surf_dict['crm_twist'] = twist
else:
raise NameError('wing_type option not understood. Must be either a type of ' +
'"CRM" or "rect".')
if surf_dict['symmetry']:
num_y = int((num_y+1)/2)
mesh = mesh[:, :num_y, :]
mesh = mesh + surf_dict['offset']
if 'CRM' in surf_dict['wing_type']:
num_twist = surf_dict['num_twist_cp']
if surf_dict['symmetry']:
twist = np.interp(np.linspace(0, 1, num_twist), eta, surf_dict['crm_twist'])
else:
if num_twist % 2:
twist = np.interp(np.linspace(0, 1, (num_twist+1)/2), eta, surf_dict['crm_twist'])
twist = np.hstack((twist[:-1], twist[::-1]))
else:
twist = np.interp(np.linspace(0, 1, num_twist/2), eta, surf_dict['crm_twist'])
twist = np.hstack((twist, twist[::-1]))
return mesh, twist
else:
return mesh
def write_FFD_file(surface, mx, my):
mesh = surface['mesh']
nx, ny = mesh.shape[:2]
half_ffd = np.zeros((mx, my, 3))
LE = mesh[0, :, :]
TE = mesh[-1, :, :]
half_ffd[0, :, 0] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), LE[:, 0])
half_ffd[0, :, 1] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), LE[:, 1])
half_ffd[0, :, 2] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), LE[:, 2])
half_ffd[-1, :, 0] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), TE[:, 0])
half_ffd[-1, :, 1] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), TE[:, 1])
half_ffd[-1, :, 2] = np.interp(np.linspace(0, 1, my), np.linspace(0, 1, ny), TE[:, 2])
for i in range(my):
half_ffd[:, i, 0] = np.linspace(half_ffd[0, i, 0], half_ffd[-1, i, 0], mx)
half_ffd[:, i, 1] = np.linspace(half_ffd[0, i, 1], half_ffd[-1, i, 1], mx)
half_ffd[:, i, 2] = np.linspace(half_ffd[0, i, 2], half_ffd[-1, i, 2], mx)
cushion = .5
half_ffd[0, :, 0] -= cushion
half_ffd[-1, :, 0] += cushion
half_ffd[:, 0, 1] -= cushion
half_ffd[:, -1, 1] += cushion
bottom_ffd = half_ffd.copy()
bottom_ffd[:, :, 2] -= cushion
top_ffd = half_ffd.copy()
top_ffd[:, :, 2] += cushion
ffd = np.vstack((bottom_ffd, top_ffd))
if 0:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
axes = []
axes.append(fig.add_subplot(221, projection='3d'))
axes.append(fig.add_subplot(222, projection='3d'))
axes.append(fig.add_subplot(223, projection='3d'))
axes.append(fig.add_subplot(224, projection='3d'))
for i, ax in enumerate(axes):
xs = ffd[:, :, 0].flatten()
ys = ffd[:, :, 1].flatten()
zs = ffd[:, :, 2].flatten()
ax.scatter(xs, ys, zs, c='red', alpha=1., clip_on=False)
xs = ffd[:, :, 0].flatten()
ys = ffd[:, :, 1].flatten()
zs = ffd[:, :, 2].flatten()
ax.scatter(xs, ys, zs, c='blue', alpha=1.)
xs = mesh[:, :, 0]
ys = mesh[:, :, 1]
zs = mesh[:, :, 2]
ax.plot_wireframe(xs, ys, zs, color='k')
ax.set_xlim([-5, 5])
ax.set_ylim([-5, 5])
ax.set_zlim([-5, 5])
ax.set_xlim([20, 40])
ax.set_ylim([-25, -5.])
ax.set_zlim([-10, 10])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_axis_off()
ax.set_axis_off()
if i == 0:
ax.view_init(elev=0, azim=180)
elif i == 1:
ax.view_init(elev=0, azim=90)
elif i == 2:
ax.view_init(elev=100000, azim=0)
else:
ax.view_init(elev=40, azim=-30)
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0)
plt.show()
filename = surface['name'] + '_ffd.fmt'
with open(filename, 'w') as f:
f.write('1\n')
f.write('{} {} {}\n'.format(mx, 2, my))
x = np.array_str(ffd[:, :, 0].flatten(order='F'))[1:-1] + '\n'
y = np.array_str(ffd[:, :, 1].flatten(order='F'))[1:-1] + '\n'
z = np.array_str(ffd[:, :, 2].flatten(order='F'))[1:-1] + '\n'
f.write(x)
f.write(y)
f.write(z)
return filename
def writeMesh(mesh,filename):
num_y = mesh.shape[0]
num_x = mesh.shape[1]
f = open(filename, 'w')
f.write('\t\t1\n')
f.write('\t\t%d\t\t%d\t\t%d\n' % (num_y, num_x, 1))
x = mesh[:, :, 0]
y = mesh[:, :, 1]
z = mesh[:, :, 2]
for dim in [x, y, z]:
for iy in range(num_x):
row = dim[:, iy]
for val in row:
f.write('\t{: 3.6f}'.format(val))
f.write('\n')
f.close()
def getFullMesh(left_mesh=None, right_mesh=None):
if left_mesh is None and right_mesh is None:
raise ValueError("Either the left or right mesh need to be supplied.")
elif left_mesh is not None and right_mesh is not None:
raise ValueError("Please only provide either left or right mesh, not both.")
elif left_mesh is not None:
right_mesh = np.flip(left_mesh,axis=1).copy()
right_mesh[:,:,1] *= -1
else:
left_mesh = np.flip(right_mesh,axis=1).copy()
left_mesh[:,:,1] *= -1
full_mesh = np.concatenate((left_mesh,right_mesh[:,1:,:]),axis=1)
return full_mesh
| true | true |
1c33c028878b8df40f98e39ce8707d77981d1131 | 4,298 | py | Python | lib/editorconfig/handler.py | Twilight0/script.module.jsbeautifier | 40b8bbd342788cbd2affaf08921b213252146eaa | [
"MIT"
] | 70 | 2015-01-12T09:55:18.000Z | 2022-03-29T06:15:49.000Z | lib/editorconfig/handler.py | Twilight0/script.module.jsbeautifier | 40b8bbd342788cbd2affaf08921b213252146eaa | [
"MIT"
] | 26 | 2015-09-15T06:46:51.000Z | 2022-03-28T08:56:35.000Z | lib/editorconfig/handler.py | Twilight0/script.module.jsbeautifier | 40b8bbd342788cbd2affaf08921b213252146eaa | [
"MIT"
] | 28 | 2015-04-05T18:07:16.000Z | 2022-03-28T08:08:00.000Z | """EditorConfig file handler
Provides ``EditorConfigHandler`` class for locating and parsing
EditorConfig files relevant to a given filepath.
Licensed under Simplified BSD License (see LICENSE.BSD file).
"""
import os
from editorconfig import VERSION
from editorconfig.exceptions import PathError, VersionError
from editorconfig.ini import EditorConfigParser
__all__ = ['EditorConfigHandler']
def get_filenames(path, filename):
"""Yield full filepath for filename in each directory in and above path"""
path_list = []
while True:
path_list.append(os.path.join(path, filename))
newpath = os.path.dirname(path)
if path == newpath:
break
path = newpath
return path_list
class EditorConfigHandler(object):
"""
Allows locating and parsing of EditorConfig files for given filename
In addition to the constructor a single public method is provided,
``get_configurations`` which returns the EditorConfig options for
the ``filepath`` specified to the constructor.
"""
def __init__(self, filepath, conf_filename='.editorconfig',
version=VERSION):
"""Create EditorConfigHandler for matching given filepath"""
self.filepath = filepath
self.conf_filename = conf_filename
self.version = version
self.options = None
def get_configurations(self):
"""
Find EditorConfig files and return all options matching filepath
Special exceptions that may be raised by this function include:
- ``VersionError``: self.version is invalid EditorConfig version
- ``PathError``: self.filepath is not a valid absolute filepath
- ``ParsingError``: improperly formatted EditorConfig file found
"""
self.check_assertions()
path, filename = os.path.split(self.filepath)
conf_files = get_filenames(path, self.conf_filename)
# Attempt to find and parse every EditorConfig file in filetree
for filename in conf_files:
parser = EditorConfigParser(self.filepath)
parser.read(filename)
# Merge new EditorConfig file's options into current options
old_options = self.options
self.options = parser.options
if old_options:
self.options.update(old_options)
# Stop parsing if parsed file has a ``root = true`` option
if parser.root_file:
break
self.preprocess_values()
return self.options
def check_assertions(self):
"""Raise error if filepath or version have invalid values"""
# Raise ``PathError`` if filepath isn't an absolute path
if not os.path.isabs(self.filepath):
raise PathError("Input file must be a full path name.")
# Raise ``VersionError`` if version specified is greater than current
if self.version is not None and self.version[:3] > VERSION[:3]:
raise VersionError(
"Required version is greater than the current version.")
def preprocess_values(self):
"""Preprocess option values for consumption by plugins"""
opts = self.options
# Lowercase option value for certain options
for name in ["end_of_line", "indent_style", "indent_size",
"insert_final_newline", "trim_trailing_whitespace",
"charset"]:
if name in opts:
opts[name] = opts[name].lower()
# Set indent_size to "tab" if indent_size is unspecified and
# indent_style is set to "tab".
if (opts.get("indent_style") == "tab" and
not "indent_size" in opts and self.version >= (0, 10, 0)):
opts["indent_size"] = "tab"
# Set tab_width to indent_size if indent_size is specified and
# tab_width is unspecified
if ("indent_size" in opts and "tab_width" not in opts and
opts["indent_size"] != "tab"):
opts["tab_width"] = opts["indent_size"]
# Set indent_size to tab_width if indent_size is "tab"
if ("indent_size" in opts and "tab_width" in opts and
opts["indent_size"] == "tab"):
opts["indent_size"] = opts["tab_width"]
| 33.578125 | 78 | 0.639832 |
import os
from editorconfig import VERSION
from editorconfig.exceptions import PathError, VersionError
from editorconfig.ini import EditorConfigParser
__all__ = ['EditorConfigHandler']
def get_filenames(path, filename):
path_list = []
while True:
path_list.append(os.path.join(path, filename))
newpath = os.path.dirname(path)
if path == newpath:
break
path = newpath
return path_list
class EditorConfigHandler(object):
def __init__(self, filepath, conf_filename='.editorconfig',
version=VERSION):
self.filepath = filepath
self.conf_filename = conf_filename
self.version = version
self.options = None
def get_configurations(self):
self.check_assertions()
path, filename = os.path.split(self.filepath)
conf_files = get_filenames(path, self.conf_filename)
for filename in conf_files:
parser = EditorConfigParser(self.filepath)
parser.read(filename)
old_options = self.options
self.options = parser.options
if old_options:
self.options.update(old_options)
# Stop parsing if parsed file has a ``root = true`` option
if parser.root_file:
break
self.preprocess_values()
return self.options
def check_assertions(self):
# Raise ``PathError`` if filepath isn't an absolute path
if not os.path.isabs(self.filepath):
raise PathError("Input file must be a full path name.")
if self.version is not None and self.version[:3] > VERSION[:3]:
raise VersionError(
"Required version is greater than the current version.")
def preprocess_values(self):
opts = self.options
for name in ["end_of_line", "indent_style", "indent_size",
"insert_final_newline", "trim_trailing_whitespace",
"charset"]:
if name in opts:
opts[name] = opts[name].lower()
if (opts.get("indent_style") == "tab" and
not "indent_size" in opts and self.version >= (0, 10, 0)):
opts["indent_size"] = "tab"
if ("indent_size" in opts and "tab_width" not in opts and
opts["indent_size"] != "tab"):
opts["tab_width"] = opts["indent_size"]
if ("indent_size" in opts and "tab_width" in opts and
opts["indent_size"] == "tab"):
opts["indent_size"] = opts["tab_width"]
| true | true |
1c33c11e620e4693a9ee4e27ae8196c291f627c5 | 1,086 | py | Python | runtimes/actions/riskCalculationFlow/formatData_BulkWrite.py | Hitachi-CTI-Call-For-Code-COVID-19-Team/risk-calculator | 96ff4ebe9bfdf3f8b525c65678500ea61260ada3 | [
"Apache-2.0"
] | null | null | null | runtimes/actions/riskCalculationFlow/formatData_BulkWrite.py | Hitachi-CTI-Call-For-Code-COVID-19-Team/risk-calculator | 96ff4ebe9bfdf3f8b525c65678500ea61260ada3 | [
"Apache-2.0"
] | null | null | null | runtimes/actions/riskCalculationFlow/formatData_BulkWrite.py | Hitachi-CTI-Call-For-Code-COVID-19-Team/risk-calculator | 96ff4ebe9bfdf3f8b525c65678500ea61260ada3 | [
"Apache-2.0"
] | null | null | null | # /*
# Copyright 2020 Hitachi Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# */
#
#
# main() will be run when you invoke this action
#
# @param Cloud Functions actions accept a single parameter, which must be a JSON object.
#
# @return The output of this action, which must be a JSON object.
#
#
# formaData_BulkWrite
import sys
import json
def main(jsonified_outputList_dict):
docsFormatted = json.dumps(
{"docs": json.loads(jsonified_outputList_dict["calulatedRisks"])})
return {'docs': docsFormatted,
"dbname": "log_risk_calculation"
}
| 26.487805 | 88 | 0.724678 |
import sys
import json
def main(jsonified_outputList_dict):
docsFormatted = json.dumps(
{"docs": json.loads(jsonified_outputList_dict["calulatedRisks"])})
return {'docs': docsFormatted,
"dbname": "log_risk_calculation"
}
| true | true |
1c33c148de72f2a2ca14577dee55ad5de602841d | 2,997 | py | Python | python/2016/day10.py | SylvainDe/aoc | b8a4609327831685ef94c9960350ff7bb5ace1a5 | [
"MIT"
] | null | null | null | python/2016/day10.py | SylvainDe/aoc | b8a4609327831685ef94c9960350ff7bb5ace1a5 | [
"MIT"
] | null | null | null | python/2016/day10.py | SylvainDe/aoc | b8a4609327831685ef94c9960350ff7bb5ace1a5 | [
"MIT"
] | null | null | null | # vi: set shiftwidth=4 tabstop=4 expandtab:
import datetime
import re
import collections
def get_instructions_from_file(file_path="../../resources/year2016_day10_input.txt"):
with open(file_path) as f:
return [l.strip() for l in f]
value_goes_re = r"value (\d+) goes to bot (\d+)"
bot_gives_re = r"bot (\d+) gives low to ([a-z]+) (\d+) and high to ([a-z]+) (\d+)"
def parse_instructions(instructions):
bot_chips = dict()
rules = dict()
for instruction in instructions:
match = re.match(value_goes_re, instruction)
if match:
val, bot = match.groups()
bot_chips.setdefault(int(bot), []).append(int(val))
else:
match = re.match(bot_gives_re, instruction)
if match:
bot, low_type, low_nb, high_type, high_nb = match.groups()
bot_int = int(bot)
assert bot_int not in rules
rules[bot_int] = ((low_type, int(low_nb)), (high_type, int(high_nb)))
else:
assert False
return bot_chips, rules
def follow_instructions(instructions):
bot_chips, rules = parse_instructions(instructions)
bots_to_action = collections.deque(
[bot for bot, chip_lst in bot_chips.items() if len(chip_lst) > 1]
)
outputs = dict()
comparisons = []
while bots_to_action:
bot_nb = bots_to_action.popleft()
low_rule, high_rule = rules[bot_nb]
low, high = sorted(bot_chips.pop(bot_nb))
comparisons.append((bot_nb, low, high))
for c, (dest_type, dest_nb) in [(low, low_rule), (high, high_rule)]:
if dest_type == "output":
outputs.setdefault(dest_nb, []).append(c)
elif dest_type == "bot":
l = bot_chips.setdefault(dest_nb, [])
l.append(c)
if len(l) > 1:
bots_to_action.append(dest_nb)
else:
assert False
return comparisons, outputs
def run_tests():
instructions = [
"value 5 goes to bot 2",
"bot 2 gives low to bot 1 and high to bot 0",
"value 3 goes to bot 1",
"bot 1 gives low to output 1 and high to bot 0",
"bot 0 gives low to output 2 and high to output 0",
"value 2 goes to bot 2",
]
comp, out = follow_instructions(instructions)
assert comp == [(2, 2, 5), (1, 2, 3), (0, 3, 5)]
assert out == {1: [2], 2: [3], 0: [5]}
def get_solutions():
instructions = get_instructions_from_file()
comp, out = follow_instructions(instructions)
for bot, low, high in comp:
if (low, high) == (17, 61):
print(bot)
break
else:
assert False
mult = 1
for chips in [out[0], out[1], out[2]]:
(val,) = chips
mult *= val
print(mult)
if __name__ == "__main__":
begin = datetime.datetime.now()
run_tests()
get_solutions()
end = datetime.datetime.now()
print(end - begin)
| 30.896907 | 85 | 0.573907 |
import datetime
import re
import collections
def get_instructions_from_file(file_path="../../resources/year2016_day10_input.txt"):
with open(file_path) as f:
return [l.strip() for l in f]
value_goes_re = r"value (\d+) goes to bot (\d+)"
bot_gives_re = r"bot (\d+) gives low to ([a-z]+) (\d+) and high to ([a-z]+) (\d+)"
def parse_instructions(instructions):
bot_chips = dict()
rules = dict()
for instruction in instructions:
match = re.match(value_goes_re, instruction)
if match:
val, bot = match.groups()
bot_chips.setdefault(int(bot), []).append(int(val))
else:
match = re.match(bot_gives_re, instruction)
if match:
bot, low_type, low_nb, high_type, high_nb = match.groups()
bot_int = int(bot)
assert bot_int not in rules
rules[bot_int] = ((low_type, int(low_nb)), (high_type, int(high_nb)))
else:
assert False
return bot_chips, rules
def follow_instructions(instructions):
bot_chips, rules = parse_instructions(instructions)
bots_to_action = collections.deque(
[bot for bot, chip_lst in bot_chips.items() if len(chip_lst) > 1]
)
outputs = dict()
comparisons = []
while bots_to_action:
bot_nb = bots_to_action.popleft()
low_rule, high_rule = rules[bot_nb]
low, high = sorted(bot_chips.pop(bot_nb))
comparisons.append((bot_nb, low, high))
for c, (dest_type, dest_nb) in [(low, low_rule), (high, high_rule)]:
if dest_type == "output":
outputs.setdefault(dest_nb, []).append(c)
elif dest_type == "bot":
l = bot_chips.setdefault(dest_nb, [])
l.append(c)
if len(l) > 1:
bots_to_action.append(dest_nb)
else:
assert False
return comparisons, outputs
def run_tests():
instructions = [
"value 5 goes to bot 2",
"bot 2 gives low to bot 1 and high to bot 0",
"value 3 goes to bot 1",
"bot 1 gives low to output 1 and high to bot 0",
"bot 0 gives low to output 2 and high to output 0",
"value 2 goes to bot 2",
]
comp, out = follow_instructions(instructions)
assert comp == [(2, 2, 5), (1, 2, 3), (0, 3, 5)]
assert out == {1: [2], 2: [3], 0: [5]}
def get_solutions():
instructions = get_instructions_from_file()
comp, out = follow_instructions(instructions)
for bot, low, high in comp:
if (low, high) == (17, 61):
print(bot)
break
else:
assert False
mult = 1
for chips in [out[0], out[1], out[2]]:
(val,) = chips
mult *= val
print(mult)
if __name__ == "__main__":
begin = datetime.datetime.now()
run_tests()
get_solutions()
end = datetime.datetime.now()
print(end - begin)
| true | true |
1c33c267b7a90b6b1ed9a5308764bf8d3ab08bcf | 8,280 | py | Python | androguard/session.py | hakimkt/androguard | c16453c70f11df96e4ab3530c212aafe5e1e9e41 | [
"Apache-2.0"
] | 2 | 2018-01-28T22:51:12.000Z | 2021-02-26T12:02:55.000Z | androguard/session.py | eighthave/androguard | a4f6e7f192f0f21a2f9e063f467775c7b5e36190 | [
"Apache-2.0"
] | null | null | null | androguard/session.py | eighthave/androguard | a4f6e7f192f0f21a2f9e063f467775c7b5e36190 | [
"Apache-2.0"
] | null | null | null | import hashlib
from androguard.core.analysis.analysis import *
from androguard.core.bytecodes.dvm import *
from androguard.decompiler.decompiler import *
from androguard.core import androconf
import pickle
import logging
log = logging.getLogger("androguard.session")
def Save(session, filename):
"""
save your session!
:param session: A Session object to save
:param filename: output filename to save the session
:type filename: string
:Example:
s = session.Session()
session.Save(s, "msession.p")
"""
with open(filename, "wb") as fd:
pickle.dump(session, fd)
def Load(filename):
"""
load your session!
:param filename: the filename where the session has been saved
:type filename: string
:rtype: the elements of your session :)
:Example:
s = session.Load("mysession.p")
"""
with open(filename, "rb") as fd:
return pickle.load(fd)
class Session(object):
def __init__(self, export_ipython=False):
self._setup_objects()
self.export_ipython = export_ipython
def _setup_objects(self):
self.analyzed_files = collections.OrderedDict()
self.analyzed_digest = {}
self.analyzed_apk = {}
self.analyzed_dex = collections.OrderedDict()
self.analyzed_vms = collections.OrderedDict()
def reset(self):
"""
Reset the current session, delete all added files.
"""
self._setup_objects()
def isOpen(self):
"""
Test if any file was analyzed in this session
:return: `True` if any file was analyzed, `False` otherwise
"""
return self.analyzed_digest != {}
def show(self):
"""
Print information about the current session
"""
print("APKs in Session: {}".format(len(self.analyzed_apk)))
for d, a in self.analyzed_apk.items():
print("\t{}: {}".format(d, a))
print("DEXs in Session: {}".format(len(self.analyzed_dex)))
for d, dex in self.analyzed_dex.items():
print("\t{}: {}".format(d, dex))
print("Analysis in Session: {}".format(len(self.analyzed_vms)))
for d, a in self.analyzed_vms.items():
print("\t{}: {}".format(d, a))
def addAPK(self, filename, data):
"""
Add an APK file to the Session and run analysis on it.
:param filename: (file)name of APK file
:param data: binary data of the APK file
:return: a tuple of SHA256 Checksum and APK Object
"""
digest = hashlib.sha256(data).hexdigest()
log.debug("add APK:%s" % digest)
apk = APK(data, True)
self.analyzed_apk[digest] = [apk]
self.analyzed_files[filename].append(digest)
self.analyzed_digest[digest] = filename
self.analyzed_vms[digest] = Analysis()
log.debug("added APK:%s" % digest)
return digest, apk
def addDEX(self, filename, data, dx=None):
"""
Add a DEX file to the Session and run analysis.
:param filename: the (file)name of the DEX file
:param data: binary data of the dex file
:param dx: an existing Analysis Object (optional)
:return: A tuple of SHA256 Hash, DalvikVMFormat Object and Analysis object
"""
digest = hashlib.sha256(data).hexdigest()
log.debug("add DEX:%s" % digest)
log.debug("Parsing format ...")
d = DalvikVMFormat(data)
log.debug("added DEX:%s" % digest)
if filename not in self.analyzed_files:
self.analyzed_files[filename] = []
self.analyzed_files[filename].append(digest)
self.analyzed_digest[digest] = filename
if dx is None:
dx = Analysis()
dx.add(d)
dx.create_xref()
for d in dx.vms:
d.set_decompiler(DecompilerDAD(d, dx))
d.set_vmanalysis(dx)
self.analyzed_dex[digest] = dx
if self.export_ipython:
log.debug("Exporting in ipython")
d.create_python_export()
return digest, d, dx
def addDEY(self, filename, data, dx=None):
digest = hashlib.sha256(data).hexdigest()
log.debug("add DEY:%s" % digest)
d = DalvikOdexVMFormat(data)
log.debug("added DEY:%s" % digest)
if filename not in self.analyzed_files:
self.analyzed_files[filename] = []
self.analyzed_files[filename].append(digest)
self.analyzed_digest[digest] = filename
if self.export_ipython:
d.create_python_export()
if dx is None:
dx = Analysis()
dx.add(d)
dx.create_xref()
for d in dx.vms:
d.set_decompiler(DecompilerDAD(d, dx))
d.set_vmanalysis(dx)
self.analyzed_dex[digest] = dx
return digest, d, dx
def add(self, filename, raw_data, dx=None):
"""
Generic method to add a file to the session.
It guesses the filetype and calls the correct method.
:param filename: filename to load
:param raw_data: bytes of the file
:param dx: An already exiting :class:`~androguard.core.analysis.analysis.Analysis` object
:return: the sha256 of the file or None on failure
"""
ret = androconf.is_android_raw(raw_data)
if not ret:
return None
self.analyzed_files[filename] = []
if ret == "APK":
digest, apk = self.addAPK(filename, raw_data)
dx = self.analyzed_vms.get(digest)
for dex in apk.get_all_dex():
_, d, dx = self.addDEX(filename, dex, dx)
elif ret == "DEX":
digest, d, _ = self.addDEX(filename, raw_data)
dx = self.analyzed_dex.get(digest)
elif ret == "DEY":
digest, d, _ = self.addDEY(filename, raw_data, dx)
dx = self.analyzed_dex.get(digest)
else:
return None
return digest
def get_classes(self):
# NOTE: verify idx for this api.
idx = 0
for digest in self.analyzed_vms:
dx = self.analyzed_vms[digest]
for vm in dx.vms:
filename = self.analyzed_digest[digest]
yield idx, filename, digest, vm.get_classes()
idx += 1
def get_analysis(self, current_class):
for digest in self.analyzed_vms:
dx = self.analyzed_vms[digest]
if dx.is_class_present(current_class.get_name()):
return dx
return None
def get_format(self, current_class):
return current_class.CM.vm
def get_filename_by_class(self, current_class):
for digest in self.analyzed_vms:
dx = self.analyzed_vms[digest]
if dx.is_class_present(current_class.get_name()):
return self.analyzed_digest[digest]
return None
def get_digest_by_class(self, current_class):
for digest in self.analyzed_vms:
dx = self.analyzed_vms[digest]
if dx.is_class_present(current_class.get_name()):
return digest
return None
def get_strings(self):
for digest in self.analyzed_vms:
dx = self.analyzed_vms[digest]
yield digest, self.analyzed_digest[digest], dx.get_strings_analysis(
)
def get_nb_strings(self):
nb = 0
for digest in self.analyzed_vms:
dx = self.analyzed_vms[digest]
nb += len(dx.get_strings_analysis())
return nb
def get_all_apks(self):
for digest in self.analyzed_apk:
yield digest, self.analyzed_apk[digest]
def get_objects_apk(self, filename, digest=None):
if digest is None:
digests = self.analyzed_files.get(filename)
# Negate to reduce tree
if not digests:
return None, None, None
digest = digests[0]
a = self.analyzed_apk[digest][0]
dx = self.analyzed_vms[digest]
return a, dx.vms, dx
def get_objects_dex(self):
for digest in self.analyzed_vms:
dx = self.analyzed_vms[digest]
for vm in dx.vms:
yield digest, vm, dx
| 30.666667 | 97 | 0.592633 | import hashlib
from androguard.core.analysis.analysis import *
from androguard.core.bytecodes.dvm import *
from androguard.decompiler.decompiler import *
from androguard.core import androconf
import pickle
import logging
log = logging.getLogger("androguard.session")
def Save(session, filename):
with open(filename, "wb") as fd:
pickle.dump(session, fd)
def Load(filename):
with open(filename, "rb") as fd:
return pickle.load(fd)
class Session(object):
def __init__(self, export_ipython=False):
self._setup_objects()
self.export_ipython = export_ipython
def _setup_objects(self):
self.analyzed_files = collections.OrderedDict()
self.analyzed_digest = {}
self.analyzed_apk = {}
self.analyzed_dex = collections.OrderedDict()
self.analyzed_vms = collections.OrderedDict()
def reset(self):
self._setup_objects()
def isOpen(self):
return self.analyzed_digest != {}
def show(self):
print("APKs in Session: {}".format(len(self.analyzed_apk)))
for d, a in self.analyzed_apk.items():
print("\t{}: {}".format(d, a))
print("DEXs in Session: {}".format(len(self.analyzed_dex)))
for d, dex in self.analyzed_dex.items():
print("\t{}: {}".format(d, dex))
print("Analysis in Session: {}".format(len(self.analyzed_vms)))
for d, a in self.analyzed_vms.items():
print("\t{}: {}".format(d, a))
def addAPK(self, filename, data):
digest = hashlib.sha256(data).hexdigest()
log.debug("add APK:%s" % digest)
apk = APK(data, True)
self.analyzed_apk[digest] = [apk]
self.analyzed_files[filename].append(digest)
self.analyzed_digest[digest] = filename
self.analyzed_vms[digest] = Analysis()
log.debug("added APK:%s" % digest)
return digest, apk
def addDEX(self, filename, data, dx=None):
digest = hashlib.sha256(data).hexdigest()
log.debug("add DEX:%s" % digest)
log.debug("Parsing format ...")
d = DalvikVMFormat(data)
log.debug("added DEX:%s" % digest)
if filename not in self.analyzed_files:
self.analyzed_files[filename] = []
self.analyzed_files[filename].append(digest)
self.analyzed_digest[digest] = filename
if dx is None:
dx = Analysis()
dx.add(d)
dx.create_xref()
for d in dx.vms:
d.set_decompiler(DecompilerDAD(d, dx))
d.set_vmanalysis(dx)
self.analyzed_dex[digest] = dx
if self.export_ipython:
log.debug("Exporting in ipython")
d.create_python_export()
return digest, d, dx
def addDEY(self, filename, data, dx=None):
digest = hashlib.sha256(data).hexdigest()
log.debug("add DEY:%s" % digest)
d = DalvikOdexVMFormat(data)
log.debug("added DEY:%s" % digest)
if filename not in self.analyzed_files:
self.analyzed_files[filename] = []
self.analyzed_files[filename].append(digest)
self.analyzed_digest[digest] = filename
if self.export_ipython:
d.create_python_export()
if dx is None:
dx = Analysis()
dx.add(d)
dx.create_xref()
for d in dx.vms:
d.set_decompiler(DecompilerDAD(d, dx))
d.set_vmanalysis(dx)
self.analyzed_dex[digest] = dx
return digest, d, dx
def add(self, filename, raw_data, dx=None):
ret = androconf.is_android_raw(raw_data)
if not ret:
return None
self.analyzed_files[filename] = []
if ret == "APK":
digest, apk = self.addAPK(filename, raw_data)
dx = self.analyzed_vms.get(digest)
for dex in apk.get_all_dex():
_, d, dx = self.addDEX(filename, dex, dx)
elif ret == "DEX":
digest, d, _ = self.addDEX(filename, raw_data)
dx = self.analyzed_dex.get(digest)
elif ret == "DEY":
digest, d, _ = self.addDEY(filename, raw_data, dx)
dx = self.analyzed_dex.get(digest)
else:
return None
return digest
def get_classes(self):
idx = 0
for digest in self.analyzed_vms:
dx = self.analyzed_vms[digest]
for vm in dx.vms:
filename = self.analyzed_digest[digest]
yield idx, filename, digest, vm.get_classes()
idx += 1
def get_analysis(self, current_class):
for digest in self.analyzed_vms:
dx = self.analyzed_vms[digest]
if dx.is_class_present(current_class.get_name()):
return dx
return None
def get_format(self, current_class):
return current_class.CM.vm
def get_filename_by_class(self, current_class):
for digest in self.analyzed_vms:
dx = self.analyzed_vms[digest]
if dx.is_class_present(current_class.get_name()):
return self.analyzed_digest[digest]
return None
def get_digest_by_class(self, current_class):
for digest in self.analyzed_vms:
dx = self.analyzed_vms[digest]
if dx.is_class_present(current_class.get_name()):
return digest
return None
def get_strings(self):
for digest in self.analyzed_vms:
dx = self.analyzed_vms[digest]
yield digest, self.analyzed_digest[digest], dx.get_strings_analysis(
)
def get_nb_strings(self):
nb = 0
for digest in self.analyzed_vms:
dx = self.analyzed_vms[digest]
nb += len(dx.get_strings_analysis())
return nb
def get_all_apks(self):
for digest in self.analyzed_apk:
yield digest, self.analyzed_apk[digest]
def get_objects_apk(self, filename, digest=None):
if digest is None:
digests = self.analyzed_files.get(filename)
if not digests:
return None, None, None
digest = digests[0]
a = self.analyzed_apk[digest][0]
dx = self.analyzed_vms[digest]
return a, dx.vms, dx
def get_objects_dex(self):
for digest in self.analyzed_vms:
dx = self.analyzed_vms[digest]
for vm in dx.vms:
yield digest, vm, dx
| true | true |
1c33c385b705023e11290137cee26f62b0f93a76 | 7,695 | py | Python | r2d7/slackdroid.py | danrs/r2-d7 | d1f7a839f0bcb490954477c592245b5107b8a6aa | [
"MIT"
] | null | null | null | r2d7/slackdroid.py | danrs/r2-d7 | d1f7a839f0bcb490954477c592245b5107b8a6aa | [
"MIT"
] | null | null | null | r2d7/slackdroid.py | danrs/r2-d7 | d1f7a839f0bcb490954477c592245b5107b8a6aa | [
"MIT"
] | null | null | null | import html
import logging
import re
from urllib.parse import quote
from r2d7.core import DroidCore
logger = logging.getLogger(__name__)
class SlackDroid(DroidCore):
def __init__(self):
super().__init__()
self.load_data()
def load_data(self):
super().load_data()
# References to conditions and ship abilities are highlighted
self._ref_names = set()
for card in self.data['condition'].values():
self._ref_names.add(card['name'])
for card in self.data['pilot'].values():
if 'shipAbility' in card:
self._ref_names.add(card['shipAbility']['name'])
# Convert text now to save time later
for category, names in self.data.items():
for card in names.values():
if 'sides' in card:
for side in card['sides']:
if 'ability' in side:
side['ability'] = self.convert_text(
side['ability'])
if 'device' in side:
side['device']['effect'] = self.convert_text(
side['device']['effect'])
if 'ability' in card:
card['ability'] = self.convert_text(card['ability'])
if 'shipAbility' in card:
card['shipAbility']['text'] = self.convert_text(
card['shipAbility']['text'])
if category == 'damage':
card['text'] = self.convert_text(card['text'])
def helpMessage(self):
return f"""\
I am R2-D7, the x-wing miniatures chat bot.
{self.bold("List Printing:")} If you paste a (Yet Another) Squad Builder, Official FFG or LaunchBayNext permalink into a channel I'm in (or direct message me one), I will print a summary of the list.
{self.bold("Card Lookup:")} Type something surrounded by square brackets and I will describe any upgrades, ships or pilots that match what you said. (Eg. Why not try `[[Engine Upgrade]]`)
If you only want cards in a particular slot or ship, begin your lookup with the emoji for that ship or slot. (eg. `[[:crew: rey]]`)
You can also search for cards by points value in a particular slot. Eg. `[[:crew: <=3]]`. `=`, `<`, `>`, `<=` and `>=` are supported.
{self.bold("Dice Rolling:")} If you type `!roll` followed by a number and a dice color, I'll roll dice for you. Type `!roll syntax` for full syntax.
{self.bold("Metawing:")} Type `!meta` for a quick glimpse of the meta. Type `!meta syntax` for full syntax.
{self.bold("Issues:")} Type `!fix` for the best ways to contact the developers about issues.
"""
filter_pattern = re.compile(
r' *(?:(:[^:]+:))? *(?:([^=><:]*[^=><: ][^=><:]*)|([=><][=><]?)'
r' *(\d+)) *(?:(:[^:]+:))? *'
)
faction_icon_pattern = r':(rebel(2)?|resistance(2)?|scum(2)?|imperial|empire2|first_order|firstorder2|separatistalliance|separatist2|galacticrepublic|republic2):'
@staticmethod
def iconify(name, special_chars=False):
name = name.lower()
if special_chars:
name = re.sub(r'[^a-zA-Z0-9\-\_]', '', name)
else:
name = re.sub(r'[^a-zA-Z0-9]', '', name)
name = name.replace('+', 'plus')
if name in ['bomb', 'shield']:
name = 'x' + name
# Lock is a standard emoji, so we'll stick with targetlock for 2.0
elif name == 'lock':
name = 'targetlock'
elif name == 'rebelalliance':
name = 'rebel'
elif name == 'scumandvillainy':
name = 'scum'
elif name == 'galacticempire':
name = 'imperial'
elif name == 'firstorder':
name = 'first_order'
return f":{name}:"
@staticmethod
def bold(text):
return f"*{text}*"
@staticmethod
def italics(text):
return f"_{text}_"
_data_to_emoji = {
re.compile(r'\[Koiogran Turn\]'): ':kturn:',
re.compile(r'\[Turn Right\]'): ':turnright:',
re.compile(r'\[Turn Left\]'): ':turnleft:',
re.compile(r'\[Bank Right\]'): ':bankright:',
re.compile(r'\[Bank Left\]'): ':bankleft:',
re.compile(r'\[Segnor\'s Loop Left\]'): ':sloopleft:',
re.compile(r'\[Segnor\'s Loop Right\]'): ':sloopright:',
re.compile(r'\[Tallon Roll Left\]'): ':trollleft:',
re.compile(r'\[Tallon Roll Right\]'): ':trollright:',
re.compile(r'\[Stationary\]'): ':stop:',
re.compile(r'\[Critical Hit\]'): ':crit:',
re.compile(r'\[Bomb\]'): ':xbomb:',
re.compile(r'\[Barrel Roll\]'): ':barrelroll:',
# :lock: is a default Slack emoji, so we'll stick with targetlock for 2.0
re.compile(r'\[Lock\]'): ':targetlock:',
re.compile(r'\[Force\]'): ':forcecharge:',
re.compile(r'\[Rear Arc\]'): ':reararc:',
re.compile(r'\[Front Arc\]'): ':frontarc:',
re.compile(r'\[Left Arc\]'): ':leftarc:',
re.compile(r'\[Right Arc\]'): ':rightarc:',
re.compile(r'\[Bullseye Arc\]'): ':bullseyearc:',
re.compile(r'\[Single Turret Arc\]'): ':singleturretarc:',
re.compile(r'\[Double Turret Arc\]'): ':doubleturretarc:',
re.compile(r'\[Rotate Arc\]'): ':rotatearc:',
re.compile(r'(Ship|Pilot) damage card'): '_*\\1*_ damage card',
re.compile(r'^(Bomb|Mine)'): '_*\\1:*_',
}
_bold_words = [
'must',
]
def convert_text(self, text):
"""
The data has HTML formatting tags, convert them to slack formatting.
"""
if text == 'Attack':
return [self.bold('Attack')]
text = re.sub(r'\b([A-Z][A-Za-z ]+:)', '__BREAK__*\\1*', text)
for regex, sub in self._data_to_emoji.items():
text = regex.sub(sub, text)
for card_name in self._ref_names:
text = text.replace(card_name, self.italics(self.bold(card_name)))
text = re.sub(f"\\b({'|'.join(self._bold_words)})\\b", '*\\1*', text)
text = re.sub(r'\[([^\[\]:]+)\]', lambda pat: f":{pat.group(1).lower()}:", text)
lines = text.split('__BREAK__')
return [line.strip() for line in lines if line != '']
@classmethod
def wiki_link(cls, card_name, crew_of_pilot=False, wiki_name=False):
if not wiki_name:
wiki_name = card_name
fudged_name = re.sub(r' ', '_', wiki_name)
# Data and the wiki use different name conventions
#TODO work out the fudges for xwing-data
# fudged_name = re.sub(r'\(Scum\)', '(S&V)', fudged_name)
# fudged_name = re.sub(r'\((PS9|TFA)\)', '(HOR)', fudged_name)
if 'Core Set' in card_name:
fudged_name = 'X-Wing_' + fudged_name
fudged_name = re.sub(r'-wing', '-Wing', fudged_name)
fudged_name = re.sub(r'\/V', '/v', fudged_name)
fudged_name = re.sub(r'\/X', '/x', fudged_name)
fudged_name = re.sub(r'_\([-+]1\)', '', fudged_name)
if crew_of_pilot:
fudged_name += '_(Crew)'
# Stupid Nien Nunb is a stupid special case
elif fudged_name == 'Nien_Nunb':
fudged_name += '_(T-70_X-Wing)'
# All Hera's are suffixed on the wiki
elif fudged_name == 'Hera_Syndulla':
fudged_name += '_(VCX-100)'
elif re.match(r'"Heavy_Scyk"_Interceptor', fudged_name):
fudged_name = '"Heavy_Scyk"_Interceptor'
fudged_name = quote(fudged_name)
url = f"http://xwing-miniatures-second-edition.wikia.com/wiki/{fudged_name}"
return cls.link(url, card_name)
@staticmethod
def link(url, name):
return f"<{url}|{name}>"
| 42.988827 | 199 | 0.555036 | import html
import logging
import re
from urllib.parse import quote
from r2d7.core import DroidCore
logger = logging.getLogger(__name__)
class SlackDroid(DroidCore):
def __init__(self):
super().__init__()
self.load_data()
def load_data(self):
super().load_data()
self._ref_names = set()
for card in self.data['condition'].values():
self._ref_names.add(card['name'])
for card in self.data['pilot'].values():
if 'shipAbility' in card:
self._ref_names.add(card['shipAbility']['name'])
for category, names in self.data.items():
for card in names.values():
if 'sides' in card:
for side in card['sides']:
if 'ability' in side:
side['ability'] = self.convert_text(
side['ability'])
if 'device' in side:
side['device']['effect'] = self.convert_text(
side['device']['effect'])
if 'ability' in card:
card['ability'] = self.convert_text(card['ability'])
if 'shipAbility' in card:
card['shipAbility']['text'] = self.convert_text(
card['shipAbility']['text'])
if category == 'damage':
card['text'] = self.convert_text(card['text'])
def helpMessage(self):
return f"""\
I am R2-D7, the x-wing miniatures chat bot.
{self.bold("List Printing:")} If you paste a (Yet Another) Squad Builder, Official FFG or LaunchBayNext permalink into a channel I'm in (or direct message me one), I will print a summary of the list.
{self.bold("Card Lookup:")} Type something surrounded by square brackets and I will describe any upgrades, ships or pilots that match what you said. (Eg. Why not try `[[Engine Upgrade]]`)
If you only want cards in a particular slot or ship, begin your lookup with the emoji for that ship or slot. (eg. `[[:crew: rey]]`)
You can also search for cards by points value in a particular slot. Eg. `[[:crew: <=3]]`. `=`, `<`, `>`, `<=` and `>=` are supported.
{self.bold("Dice Rolling:")} If you type `!roll` followed by a number and a dice color, I'll roll dice for you. Type `!roll syntax` for full syntax.
{self.bold("Metawing:")} Type `!meta` for a quick glimpse of the meta. Type `!meta syntax` for full syntax.
{self.bold("Issues:")} Type `!fix` for the best ways to contact the developers about issues.
"""
filter_pattern = re.compile(
r' *(?:(:[^:]+:))? *(?:([^=><:]*[^=><: ][^=><:]*)|([=><][=><]?)'
r' *(\d+)) *(?:(:[^:]+:))? *'
)
faction_icon_pattern = r':(rebel(2)?|resistance(2)?|scum(2)?|imperial|empire2|first_order|firstorder2|separatistalliance|separatist2|galacticrepublic|republic2):'
@staticmethod
def iconify(name, special_chars=False):
name = name.lower()
if special_chars:
name = re.sub(r'[^a-zA-Z0-9\-\_]', '', name)
else:
name = re.sub(r'[^a-zA-Z0-9]', '', name)
name = name.replace('+', 'plus')
if name in ['bomb', 'shield']:
name = 'x' + name
elif name == 'lock':
name = 'targetlock'
elif name == 'rebelalliance':
name = 'rebel'
elif name == 'scumandvillainy':
name = 'scum'
elif name == 'galacticempire':
name = 'imperial'
elif name == 'firstorder':
name = 'first_order'
return f":{name}:"
@staticmethod
def bold(text):
return f"*{text}*"
@staticmethod
def italics(text):
return f"_{text}_"
_data_to_emoji = {
re.compile(r'\[Koiogran Turn\]'): ':kturn:',
re.compile(r'\[Turn Right\]'): ':turnright:',
re.compile(r'\[Turn Left\]'): ':turnleft:',
re.compile(r'\[Bank Right\]'): ':bankright:',
re.compile(r'\[Bank Left\]'): ':bankleft:',
re.compile(r'\[Segnor\'s Loop Left\]'): ':sloopleft:',
re.compile(r'\[Segnor\'s Loop Right\]'): ':sloopright:',
re.compile(r'\[Tallon Roll Left\]'): ':trollleft:',
re.compile(r'\[Tallon Roll Right\]'): ':trollright:',
re.compile(r'\[Stationary\]'): ':stop:',
re.compile(r'\[Critical Hit\]'): ':crit:',
re.compile(r'\[Bomb\]'): ':xbomb:',
re.compile(r'\[Barrel Roll\]'): ':barrelroll:',
# :lock: is a default Slack emoji, so we'll stick with targetlock for 2.0
re.compile(r'\[Lock\]'): ':targetlock:',
re.compile(r'\[Force\]'): ':forcecharge:',
re.compile(r'\[Rear Arc\]'): ':reararc:',
re.compile(r'\[Front Arc\]'): ':frontarc:',
re.compile(r'\[Left Arc\]'): ':leftarc:',
re.compile(r'\[Right Arc\]'): ':rightarc:',
re.compile(r'\[Bullseye Arc\]'): ':bullseyearc:',
re.compile(r'\[Single Turret Arc\]'): ':singleturretarc:',
re.compile(r'\[Double Turret Arc\]'): ':doubleturretarc:',
re.compile(r'\[Rotate Arc\]'): ':rotatearc:',
re.compile(r'(Ship|Pilot) damage card'): '_*\\1*_ damage card',
re.compile(r'^(Bomb|Mine)'): '_*\\1:*_',
}
_bold_words = [
'must',
]
def convert_text(self, text):
if text == 'Attack':
return [self.bold('Attack')]
text = re.sub(r'\b([A-Z][A-Za-z ]+:)', '__BREAK__*\\1*', text)
for regex, sub in self._data_to_emoji.items():
text = regex.sub(sub, text)
for card_name in self._ref_names:
text = text.replace(card_name, self.italics(self.bold(card_name)))
text = re.sub(f"\\b({'|'.join(self._bold_words)})\\b", '*\\1*', text)
text = re.sub(r'\[([^\[\]:]+)\]', lambda pat: f":{pat.group(1).lower()}:", text)
lines = text.split('__BREAK__')
return [line.strip() for line in lines if line != '']
@classmethod
def wiki_link(cls, card_name, crew_of_pilot=False, wiki_name=False):
if not wiki_name:
wiki_name = card_name
fudged_name = re.sub(r' ', '_', wiki_name)
if 'Core Set' in card_name:
fudged_name = 'X-Wing_' + fudged_name
fudged_name = re.sub(r'-wing', '-Wing', fudged_name)
fudged_name = re.sub(r'\/V', '/v', fudged_name)
fudged_name = re.sub(r'\/X', '/x', fudged_name)
fudged_name = re.sub(r'_\([-+]1\)', '', fudged_name)
if crew_of_pilot:
fudged_name += '_(Crew)'
elif fudged_name == 'Nien_Nunb':
fudged_name += '_(T-70_X-Wing)'
elif fudged_name == 'Hera_Syndulla':
fudged_name += '_(VCX-100)'
elif re.match(r'"Heavy_Scyk"_Interceptor', fudged_name):
fudged_name = '"Heavy_Scyk"_Interceptor'
fudged_name = quote(fudged_name)
url = f"http://xwing-miniatures-second-edition.wikia.com/wiki/{fudged_name}"
return cls.link(url, card_name)
@staticmethod
def link(url, name):
return f"<{url}|{name}>"
| true | true |
1c33c3d60a687988f1a25b8804876c5ef86fc7a7 | 740 | py | Python | BuildSimHubAPI/measures/wall_rvalue.py | ruijis/buildsimhub_python_api | 67a88a421a5970b9134a97faf3d52a5a8a6c6258 | [
"MIT"
] | 19 | 2018-02-27T22:58:04.000Z | 2022-02-21T15:03:59.000Z | BuildSimHubAPI/measures/wall_rvalue.py | ruijis/buildsimhub_python_api | 67a88a421a5970b9134a97faf3d52a5a8a6c6258 | [
"MIT"
] | 11 | 2018-02-15T16:47:53.000Z | 2018-12-19T18:33:20.000Z | BuildSimHubAPI/measures/wall_rvalue.py | ruijis/buildsimhub_python_api | 67a88a421a5970b9134a97faf3d52a5a8a6c6258 | [
"MIT"
] | 11 | 2018-01-26T02:12:38.000Z | 2019-09-29T12:05:31.000Z | from .model_action import ModelAction
class WallRValue(ModelAction):
# this shows the ip to si conversion rate
# if unit is 'ip', then multiply this rate.
# for window it is the U-value
# convert U-value IP to SI
CONVERSION_RATE = 5.678
def __init__(self, unit="si"):
ModelAction.__init__(self, 'wall_rvalue', unit)
self._measure_name = 'Wall_R'
self._lower_limit = 0
self._measure_help = '''
measure name: Wall_R
Unit: ip or si
Minimum: 0.1
Maximum: NA
Type: numeric
This measure will update the insulation layer of an exterior construction
'''
def _unit_convert_ratio(self):
return WallRValue.CONVERSION_RATE
| 27.407407 | 81 | 0.639189 | from .model_action import ModelAction
class WallRValue(ModelAction):
CONVERSION_RATE = 5.678
def __init__(self, unit="si"):
ModelAction.__init__(self, 'wall_rvalue', unit)
self._measure_name = 'Wall_R'
self._lower_limit = 0
self._measure_help = '''
measure name: Wall_R
Unit: ip or si
Minimum: 0.1
Maximum: NA
Type: numeric
This measure will update the insulation layer of an exterior construction
'''
def _unit_convert_ratio(self):
return WallRValue.CONVERSION_RATE
| true | true |
1c33c41b5744af5492119fefdab088d76a166432 | 1,592 | py | Python | prereise/gather/demanddata/eia/tests/test_get_eia_data.py | keforres/PreREISE | fcc111fdccc0626d3d34f1749a14035e47991043 | [
"MIT"
] | 15 | 2021-03-02T11:54:27.000Z | 2022-02-16T13:01:40.000Z | prereise/gather/demanddata/eia/tests/test_get_eia_data.py | keforres/PreREISE | fcc111fdccc0626d3d34f1749a14035e47991043 | [
"MIT"
] | 90 | 2021-01-25T19:02:14.000Z | 2022-03-31T20:27:28.000Z | prereise/gather/demanddata/eia/tests/test_get_eia_data.py | keforres/PreREISE | fcc111fdccc0626d3d34f1749a14035e47991043 | [
"MIT"
] | 15 | 2021-02-08T23:28:21.000Z | 2022-01-24T21:59:14.000Z | import getpass
import os
from datetime import datetime
import pandas as pd
import pytest
from prereise.gather.demanddata.eia import get_eia_data
@pytest.mark.skip(reason="Need API key")
def test_eia_download():
"""Check data frame assembled from data download by API call from EIA. Test
checks that the correct number of files are downloaded and correct
number of columns are created.
Token string can be obtained by registering
`here <https://www.eia.gov/opendata/>`_.
"""
print(
"A API key is required for the API download. The key "
"can be obtained by a user by registering at "
"https://www.eia.gov/opendata/."
)
token = getpass.getpass(prompt="API key=")
offset = 3
start = pd.to_datetime("2018-07-01 07:00:00")
end = datetime.today()
demand_list = [
"EBA.BANC-ALL.D.H",
"EBA.BPAT-ALL.D.H",
"EBA.CHPD-ALL.D.H",
"EBA.CISO-ALL.D.H",
]
this = get_eia_data.from_download(token, start, end, offset, demand_list)
assert len(this.columns) == (len(demand_list))
def test_from_excel():
"""Tests data frame assembled from Excel spreadsheets manually downloaded
from EIA. Test checks that correct number of columns are created.
"""
dir1 = os.path.join(os.path.dirname(__file__), "data")
start = pd.to_datetime("2018-07-01 07:00:00")
end = pd.to_datetime("2018-10-01 07:00:00")
ba_list = ["BPAT", "CISO", "EPE"]
ba_from_excel = get_eia_data.from_excel(dir1, ba_list, start, end)
assert len(ba_from_excel.columns) == len(ba_list)
| 28.945455 | 79 | 0.667714 | import getpass
import os
from datetime import datetime
import pandas as pd
import pytest
from prereise.gather.demanddata.eia import get_eia_data
@pytest.mark.skip(reason="Need API key")
def test_eia_download():
print(
"A API key is required for the API download. The key "
"can be obtained by a user by registering at "
"https://www.eia.gov/opendata/."
)
token = getpass.getpass(prompt="API key=")
offset = 3
start = pd.to_datetime("2018-07-01 07:00:00")
end = datetime.today()
demand_list = [
"EBA.BANC-ALL.D.H",
"EBA.BPAT-ALL.D.H",
"EBA.CHPD-ALL.D.H",
"EBA.CISO-ALL.D.H",
]
this = get_eia_data.from_download(token, start, end, offset, demand_list)
assert len(this.columns) == (len(demand_list))
def test_from_excel():
dir1 = os.path.join(os.path.dirname(__file__), "data")
start = pd.to_datetime("2018-07-01 07:00:00")
end = pd.to_datetime("2018-10-01 07:00:00")
ba_list = ["BPAT", "CISO", "EPE"]
ba_from_excel = get_eia_data.from_excel(dir1, ba_list, start, end)
assert len(ba_from_excel.columns) == len(ba_list)
| true | true |
1c33c59268836c60403eb41bb948186d544dbbd4 | 21,742 | py | Python | RUNTIME/DNN/python/from_boris/b_dnn.py | subramon/qlu | 2fb8a2b3636dd11e2dfeae2a6477bd130316da47 | [
"MIT"
] | null | null | null | RUNTIME/DNN/python/from_boris/b_dnn.py | subramon/qlu | 2fb8a2b3636dd11e2dfeae2a6477bd130316da47 | [
"MIT"
] | 7 | 2020-07-29T16:48:25.000Z | 2020-09-26T23:47:22.000Z | RUNTIME/DNN/python/from_boris/b_dnn.py | subramon/qlu | 2fb8a2b3636dd11e2dfeae2a6477bd130316da47 | [
"MIT"
] | 1 | 2015-05-14T22:34:13.000Z | 2015-05-14T22:34:13.000Z | import h5py
import numpy as np
import pandas as pd
from PIL import Image
from sklearn.datasets import make_blobs
from sklearn.metrics import log_loss
from sklearn.preprocessing import MinMaxScaler
# ----------------------------------------------------------------------
# Preprocess data
# ----------------------------------------------------------------------
def get_data(debug=False):
train_dataset = h5py.File('./data/train_cat_vs_noncat.h5', 'r')
train_x_orig = np.array(train_dataset['train_set_x'][:])
train_y_orig = np.array(train_dataset['train_set_y'][:])
test_dataset = h5py.File('./data/test_cat_vs_noncat.h5', 'r')
test_x_orig = np.array(test_dataset['test_set_x'][:])
test_y_orig = np.array(test_dataset['test_set_y'][:])
if debug:
Image.fromarray(train_x_orig[2]).show()
classes = np.array(test_dataset['list_classes'][:])
# reshape from (209,) to row vectors (1, 209)
train_y = train_y_orig.reshape((1, train_y_orig.shape[0]))
test_y = test_y_orig.reshape((1, test_y_orig.shape[0]))
num_px = train_x_orig.shape[1]
print('Dataset dimensions:')
print('Number of training examples:', train_x_orig.shape[0])
print('Number of testing examples:', test_x_orig.shape[0])
print('Images height and width:', num_px)
print('Image size: (%s, %s, 3)' % (num_px, num_px))
print('train_x shape:', train_x_orig.shape)
print('train_y shape:', train_y.shape)
print('test_x shape:', test_x_orig.shape)
print('test_y shape:', test_y.shape)
print('classes:', classes)
# reshape images from (num_px, num_px, 3) to (num_px * num_px * 3, 1)
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
print('train_x_flatten shape:', train_x_flatten.shape)
print('train_y shape:', train_y.shape)
print('test_x_flatten shape:', test_x_flatten.shape)
print('test_y shape:', test_y.shape)
print('sanity check after reshaping:', train_x_flatten[0:5, 0])
# standardize data
train_x = train_x_flatten / 255.
test_x = test_x_flatten / 255.
return train_x, train_y, test_x, test_y
# ----------------------------------------------------------------------
# Define model
# ----------------------------------------------------------------------
def init_params(layers_dims):
"""
Arguments:
layers_dims -- list with layers dimensions
Returns:
parameters -- dictionary with "w1", "b1", ..., "wn", "bn":
wi -- weight matrix of shape (l_dims[i], l_dims[i-1])
bi -- bias vector of shape (layer_dims[i], 1)
"""
params = {}
for n in range(1, len(layers_dims)):
w = 'w%s' % n
params[w] = np.random.randn(
layers_dims[n], layers_dims[n-1])
params[w] /= np.sqrt(layers_dims[n-1])
b = 'b%s' % n
params[b] = np.zeros((layers_dims[n], 1))
assert params[w].shape == (layers_dims[n], layers_dims[n - 1])
assert params[b].shape == (layers_dims[n], 1)
return params
# ----------------------------------------------------------------------
# Forward propagation
# ----------------------------------------------------------------------
def sigmoid(z):
"""
Implements sigmoid activation
Arguments:
z -- numpy array, shape (k, 1)
Returns:
a -- output of sigmoid(z), same shape as z
cache -- contains z for efficient backprop
"""
a = 1 / (1 + np.exp(-z))
assert a.shape == z.shape
return a, z
def relu(z):
"""
Implements ReLU activation.
Arguments:
z -- output of a dense layer, shape (k, 1)
Returns:
a -- output of relu(z), same shape as z
cache -- contains z for efficient backprop
"""
a = np.maximum(0, z)
assert a.shape == z.shape
return a, z
def softmax(z):
"""Computes softmax for array of scores.
Arguments:
z -- output of a dense layer, shape (k, 1)
Returns:
a -- post-activation vector, same shape as z
cache -- contains z for efficient backprop
Theory:
e^y_i / sum(e^y_j), for j = 0..(len(z)-1)
https://stackoverflow.com/questions/34968722
Example:
z = np.array([[5], [2], [-1], [3]])
a = np.exp(z) / np.exp(z).sum()
[[0.84203357], [0.04192238], [0.00208719], [0.11395685]]
assert np.isclose(a.sum(), 1)
"""
a = np.exp(z) / np.exp(z).sum(axis=0)
assert z.shape[1] == sum(np.isclose(a.sum(axis=0), 1))
# to predict use
# a = (a >= 0.5).astype(np.int)
return a, z
def dense_layer_propagate(a, w, b):
"""
Implements dense layer forward propagation.
Arguments:
a -- activations from previous layer (or input data):
(size of previous layer, number of examples)
w -- weights matrix: (size of current layer, size of previous layer)
b -- bias vector (size of the current layer, 1)
Returns:
z -- the input of the activation function, aka pre-activation parameter
cache -- dictionary with "a", "w" and "b"
stored for computing the backward pass efficiently
"""
z = np.dot(w, a) + b
assert z.shape == (w.shape[0], a.shape[1])
return z, (a, w, b)
def dense_activation_propagate(a_prev, w, b, activation):
"""
Implements forward propagation for a dense-activation layer
Arguments:
a_prev -- activations from previous layer:
(size of previous layer, number of examples)
w -- weights (size of curr layer, size of prev layer)
b -- bias vector (size of the current layer, 1)
activation -- 'sigmoid', 'relu', 'softmax'
Returns:
a -- also called the post-activation value
cache -- for computing the backward pass efficiently
"""
z, dense_cache = dense_layer_propagate(a_prev, w, b)
if activation == 'sigmoid':
a, activation_cache = sigmoid(z)
elif activation == 'relu':
a, activation_cache = relu(z)
elif activation == 'softmax':
a, activation_cache = softmax(z)
# a_prev.shape[1] gives the number of examples
assert (a.shape == (w.shape[0], a_prev.shape[1]))
return a, (dense_cache, activation_cache)
def foreword_propagate(x, params, activation, y_dim):
"""
Implements forward propagation for dense-relu * (n-1) -> dense-sigmoid
Arguments:
x -- data, array of shape (input size, number of examples)
parameters -- output of init_parameters()
activation -- activation function for last layer
Returns:
al -- last post-activation value
caches -- list containing:
caches of dense-relu with size n-1 indexed from 0 to n-2
cache of dense-sigmoid indexed n-1
"""
caches = []
a = x
n_layers = len(params) // 2 # number of layers
print('-' * 40)
# implements linear-relu * (l-1)
# adds cache to the caches list
for i in range(1, n_layers):
a_prev = a
wi = params['w' + str(i)]
bi = params['b' + str(i)]
a, cache = dense_activation_propagate(a_prev, wi, bi, activation='relu')
print('layer:', i)
print('z:', cache)
print('a:', a)
print('-' * 40)
caches.append(cache)
# implements linear-sigmoid or linear-softmax
# adds cache to the caches list
wi = params['w%s' % n_layers]
bi = params['b%s' % n_layers]
y_hat, cache = dense_activation_propagate(a, wi, bi, activation=activation)
print('output layer:')
print('z:', cache)
print('a:', y_hat)
print('-' * 40)
caches.append(cache)
assert (y_hat.shape == (y_dim, x.shape[1]))
return y_hat, caches
# ----------------------------------------------------------------------
# Compute cost -- log_loss
# ----------------------------------------------------------------------
def comp_cost(y_hat, y, activation, epsilon=1e-15):
"""
Computes x-entropy cost function.
Arguments:
y_hat -- probability vector (model predictions), shape: (1, # examples)
y -- true "label" vector
activation -- activation function for last layer
Returns:
cost -- cross-entropy cost
Note: experimental, use sklearn.metrics.log_loss instead
"""
if activation == 'sigmoid':
m = y.shape[1]
cost = np.dot(y, np.log(y_hat).T) + np.dot((1 - y), np.log(1 - y_hat).T)
cost = (-1. / m) * cost
cost = np.squeeze(cost) # turns [[17]] into 17).
assert (cost.shape == ())
elif activation == 'softmax':
"""
Computes x-entropy between y (encoded as one-hot vectors) and y_hat.
Arguments:
y_hat -- predictions, array (n, k), (# of examples, # of categories)
y -- true 'label' np.array (n, k) (# of examples, # of categories)
Returns:
cost -- categorical cross entropy cost
Algorithm:
-1./N * sum_i(sum_j t_ij * log(p_ij)), i=1..len(y), j=1..k
y_hat = np.clip(y_hat, epsilon, 1. - epsilon)
-np.sum(y * np.log(y_hat + epsilog)) / y_hat.shape[0]
"""
cost = log_loss(y, y_hat)
else:
raise AttributeError('Unexpected activation function:', activation)
return cost
# ----------------------------------------------------------------------
# Back propagate
# ----------------------------------------------------------------------
def sigmoid_back_propagate(da, cache):
"""
Implements back propagation for a single sigmoid unit.
Arguments:
da -- post-activation gradient, of any shape
cache -- (z,) from the forward propagate of curr layer
Returns:
dz -- gradient of cost wrt z
"""
z = cache
s = 1 / (1 + np.exp(-z))
dz = da * s * (1 - s)
assert (dz.shape == z.shape)
assert (da.shape == z.shape)
return dz
def softmax_back_propagate(da, cache):
"""
Implements back propagation for a softmax unit.
Arguments:
da -- post-activation gradient, of any shape
cache -- (z,) from the forward propagate of curr layer
Returns:
dz -- gradient of cost wrt z
"""
z = cache
y_hat = np.exp(z) / np.exp(z).sum()
dz = da * (1 - y_hat)
assert (dz.shape == z.shape)
return dz
def relu_back_propagate(da, cache):
"""
Implements back propagate for a single relu unit.
Arguments:
da -- post-activation gradient, of any shape
cache -- (z,) from forward propagattion of curr layer
Returns:
dz -- gradient cost wrt z
"""
z = cache
dz = np.array(da, copy=True) # converting dz to correct type
# when z <= 0, set dz to 0
dz[z <= 0] = 0.
assert (dz.shape == z.shape)
return dz
def dense_back_propagate(dz, cache):
"""
Implements dense layer back propagation.
Arguments:
dz -- gradient of cost wrt output of curr layer
cache -- (a_prev, w, b) from forward propagate in current layer
Returns:
da_prev -- gradient of cost wrt prev layer activation, shape as a_prev
dw -- gradient of cost wrt curr layer w, shape as w
db -- gradient of cost wrt b, shape as b
"""
a_prev, w, b = cache
m = a_prev.shape[1]
dw = (1. / m) * np.dot(dz, a_prev.T)
db = (1. / m) * np.sum(dz, axis=1, keepdims=True)
da_prev = np.dot(w.T, dz)
assert (da_prev.shape == a_prev.shape)
assert (dw.shape == w.shape)
assert (db.shape == b.shape)
return da_prev, dw, db
def dense_activation_back_propagate(da, cache, activation):
"""
Back propagation for a dense-activation layer.
Arguments:
da -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache)
a -- activation as string: 'sigmoid', 'relu', or 'softmax'
Returns:
da_prev -- gradient of cost wrt the activation
of the previous layer l-1, same shape as a_prev
dw -- gradient of cost wrt w (current layer l), same shape as w
db -- Gradient of cost wrt b (current layer l), same shape as b
"""
dense_cache, a_cache = cache
if activation == 'relu':
dz = relu_back_propagate(da, a_cache)
elif activation == 'sigmoid':
dz = sigmoid_back_propagate(da, a_cache)
elif activation == 'softmax':
dz = da # softmax_back_propagate(da, a_cache)
da_prev, dw, db = dense_back_propagate(dz, dense_cache)
return da_prev, dw, db
def back_propagate(y_hat, y, caches, activation):
"""
Implements backprop for linear-relu * (n-1) -> linear-sigmoid model.
Arguments:
al -- probability prediction vector, output of l_model_forward()
y -- true "label" vector
caches -- list of caches containing:
every cache from foreword_propagate
Returns:
grads -- dictionary with the gradients:
grads['dai'], grads['dwi'], grads['dbi'] for i in (n-1..0)
"""
y = y.reshape(y_hat.shape)
grads = {}
if activation == 'sigmoid':
# derivative of cost wrt output activation for binary classifier
da = - (np.divide(y, y_hat) - np.divide(1 - y, 1 - y_hat))
elif activation == 'softmax':
# for multi class classifier, unlike sigmoid,
# do not compute the derivative of cost
# wrt output activation
# but the derivative of cost wrt input of softmax
da = y_hat - y
else:
raise ValueError('Unexpected activation function:', activation)
# i-th layer sigmoid-dense gradients
# inputs: ai, y, caches
# outputs: grads['dai'], grads['dwi'], grads['dbi']
n = len(caches)
c = caches[n-1]
grads['da%s' % n], grads['dw%s' % n], grads['db%s' % n] = (
dense_activation_back_propagate(da, c, activation=activation))
for i in reversed(range(n - 1)):
c = caches[i]
da_prev_temp, dw_temp, db_temp = dense_activation_back_propagate(
grads['da%s' % (i+2)], c, activation="relu")
grads['da%s' % (i+1)] = da_prev_temp
grads['dw%s' % (i+1)] = dw_temp
grads['db%s' % (i+1)] = db_temp
return grads
def update_parameters(params, grads, alpha):
"""
Updates model parameters using gradient descent.
Arguments:
params -- dictionary containing model parameters
grads -- dictionary with gradients, output of L_model_backward()
Returns:
params -- dictionary with updated parameters
params['w' + str(l)] = ...
params['b' + str(l)] = ...
"""
n_layers = len(params) // 2
for i in range(n_layers):
params['w%s' % (i+1)] = (
params['w%s' % (i+1)] - alpha * grads['dw%s' % (i+1)])
params['b%s' % (i+1)] = (
params['b%s' % (i+1)] - alpha * grads['db%s' % (i+1)])
return params
def sequential_model(
x, y, layers_dims, alpha=0.0075, n_iters=3000, debug=False):
"""
Implements a multilayer NN: linear-relu*(l-1)->linear-sigmoid.
Arguments:
x -- input data, shape (# of examples, num_px * num_px * 3)
y -- true "label" vector, shape (1, number of examples)
layers_dims -- list with input and layer sizes of length
(# of layers + 1).
alpha -- learning rate of the gradient descent update rule
n_iters -- number of iterations of the optimization loop
debug -- if True, prints cost every 100 steps
Returns:
params -- learned parameters used for prediction
"""
costs = []
params = init_params(layers_dims)
activation = 'sigmoid' if y.shape[0] == 1 else 'softmax'
# gradient descent loop
for i in range(0, n_iters):
ai, caches = foreword_propagate(x, params, activation, layers_dims[-1])
cost = comp_cost(ai, y, activation)
grads = back_propagate(ai, y, caches, activation)
params = update_parameters(params, grads, alpha)
if debug and i % 100 == 0:
print('Cost after iteration %i: %f' % (i, cost))
if debug and i % 100 == 0:
costs.append(cost)
def plot_cost():
return True
# plt.plot(np.squeeze(costs))
# plt.ylabel('cost')
# plt.xlabel('iterations (per tens)')
# plt.title('Learning rate =' + str(learning_rate))
# plt.show()
if debug:
plot_cost()
return params
def test_dnn():
layers_dims = [10, 4, 2, 1]
np.random.seed(42)
x = np.random.randn(30).reshape((10, 3))
scaler = MinMaxScaler()
x = scaler.fit_transform(x)
print('x shape:', x.shape)
# (10, 3)
y = np.random.randint(0, 2, 3)
y = y.reshape((1, 3))
print('y shape:', y.shape)
# (1, 3)
params = init_params(layers_dims)
activation = 'sigmoid'
y_hat, caches = foreword_propagate(x, params, activation, layers_dims[-1])
print(y_hat)
'''
x = array([[ 0.49671415, -0.1382643 , 0.64768854],
[ 1.52302986, -0.23415337, -0.23413696],
[ 1.57921282, 0.76743473, -0.46947439],
[ 0.54256004, -0.46341769, -0.46572975],
[ 0.24196227, -1.91328024, -1.72491783],
[-0.56228753, -1.01283112, 0.31424733],
[-0.90802408, -1.4123037 , 1.46564877],
[-0.2257763 , 0.0675282 , -1.42474819],
[-0.54438272, 0.11092259, -1.15099358],
[ 0.37569802, -0.60063869, -0.29169375]])
y = array([[1, 0, 1]])
params = {
'b1': array([[0.],
[0.],
[0.],
[0.]]),
'b2': array([[0.],
[0.]]),
'b3': array([[0.]]),
'w1': array([[ 0.17511345, -0.47971962, -0.30251271, -0.32758364, -0.15845926,
0.13971159, -0.25937964, 0.21091907, 0.04563044, 0.23632542],
[-0.10095298, -0.19570727, 0.34871516, -0.58248266, 0.12900959,
0.29941416, 0.1690164 , -0.06477899, -0.08915248, 0.00968901],
[-0.22156274, 0.21357835, 0.02842162, -0.19919548, 0.33684907,
-0.21418677, 0.44400973, -0.39859007, -0.13523984, -0.05911348],
[-0.72570658, 0.19094223, -0.05694645, 0.05892507, 0.04916247,
-0.04978276, -0.14645337, 0.20778173, -0.4079519 , -0.04742307]]),
'w2': array([[-0.32146246, 0.11706767, -0.18786398, 0.20685326],
[ 0.61687454, -0.21195547, 0.51735934, -0.35066345]]),
'w3': array([[ 0.6328142 , -1.27748553]])}
layer: 1
z: ((array([[ 0.49671415, -0.1382643 , 0.64768854],
[ 1.52302986, -0.23415337, -0.23413696],
[ 1.57921282, 0.76743473, -0.46947439],
[ 0.54256004, -0.46341769, -0.46572975],
[ 0.24196227, -1.91328024, -1.72491783],
[-0.56228753, -1.01283112, 0.31424733],
[-0.90802408, -1.4123037 , 1.46564877],
[-0.2257763 , 0.0675282 , -1.42474819],
[-0.54438272, 0.11092259, -1.15099358],
[ 0.37569802, -0.60063869, -0.29169375]]), array([[ 0.17511345, -0.47971962, -0.30251271, -0.32758364, -0.15845926,
0.13971159, -0.25937964, 0.21091907, 0.04563044, 0.23632542],
[-0.10095298, -0.19570727, 0.34871516, -0.58248266, 0.12900959,
0.29941416, 0.1690164 , -0.06477899, -0.08915248, 0.00968901],
[-0.22156274, 0.21357835, 0.02842162, -0.19919548, 0.33684907,
-0.21418677, 0.44400973, -0.39859007, -0.13523984, -0.05911348],
[-0.72570658, 0.19094223, -0.05694645, 0.05892507, 0.04916247,
-0.04978276, -0.14645337, 0.20778173, -0.4079519 , -0.04742307]]), array([[0.],
[0.],
[0.],
[0.]])), array([[-1.16416195, 0.41311912, 0.03543866],
[-0.33736273, -0.2115404 , 0.39936218],
[ 0.09221453, -0.96629303, 0.62912924],
[ 0.20260571, 0.14508069, -0.64319486]]))
a: [[0. 0.41311912 0.03543866]
[0. 0. 0.39936218]
[0.09221453 0. 0.62912924]
[0.20260571 0.14508069 0. ]]
----------------------------------------
layer: 2
z: ((array([[0. , 0.41311912, 0.03543866],
[0. , 0. , 0.39936218],
[0.09221453, 0. , 0.62912924],
[0.20260571, 0.14508069, 0. ]]), array([[-0.32146246, 0.11706767, -0.18786398, 0.20685326],
[ 0.61687454, -0.21195547, 0.51735934, -0.35066345]]), array([[0.],
[0.]])), array([[ 0.02458586, -0.10279187, -0.08283052],
[-0.02333837, 0.20396817, 0.26270009]]))
a: [[0.02458586 0. 0. ]
[0. 0.20396817 0.26270009]]
----------------------------------------
output layer:
z: ((array([[0.02458586, 0. , 0. ],
[0. , 0.20396817, 0.26270009]]), array([[ 0.6328142 , -1.27748553]]), array([[0.]])), array([[ 0.01555828, -0.26056638, -0.33559556]]))
a: [[0.50388949 0.43522448 0.41687976]]
----------------------------------------
y_hat = array([[0.50388949, 0.43522448, 0.41687976]])
'''
if __name__ == '__main__':
np.random.seed(1)
train_x, train_y, test_x, test_y = get_data()
if False:
layers_dims = [12288, 20, 7, 5, 2]
df = pd.DataFrame(data=train_y[0], columns=['yt'])
df['yc'] = 1 - df.yt
train_y = df.values.T
print(train_y.shape)
else:
layers_dims = [12288, 20, 7, 5, 1]
fit_params = sequential_model(
train_x, train_y, layers_dims, n_iters=2500, debug=True)
| 33.76087 | 157 | 0.554641 | import h5py
import numpy as np
import pandas as pd
from PIL import Image
from sklearn.datasets import make_blobs
from sklearn.metrics import log_loss
from sklearn.preprocessing import MinMaxScaler
def get_data(debug=False):
train_dataset = h5py.File('./data/train_cat_vs_noncat.h5', 'r')
train_x_orig = np.array(train_dataset['train_set_x'][:])
train_y_orig = np.array(train_dataset['train_set_y'][:])
test_dataset = h5py.File('./data/test_cat_vs_noncat.h5', 'r')
test_x_orig = np.array(test_dataset['test_set_x'][:])
test_y_orig = np.array(test_dataset['test_set_y'][:])
if debug:
Image.fromarray(train_x_orig[2]).show()
classes = np.array(test_dataset['list_classes'][:])
train_y = train_y_orig.reshape((1, train_y_orig.shape[0]))
test_y = test_y_orig.reshape((1, test_y_orig.shape[0]))
num_px = train_x_orig.shape[1]
print('Dataset dimensions:')
print('Number of training examples:', train_x_orig.shape[0])
print('Number of testing examples:', test_x_orig.shape[0])
print('Images height and width:', num_px)
print('Image size: (%s, %s, 3)' % (num_px, num_px))
print('train_x shape:', train_x_orig.shape)
print('train_y shape:', train_y.shape)
print('test_x shape:', test_x_orig.shape)
print('test_y shape:', test_y.shape)
print('classes:', classes)
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
print('train_x_flatten shape:', train_x_flatten.shape)
print('train_y shape:', train_y.shape)
print('test_x_flatten shape:', test_x_flatten.shape)
print('test_y shape:', test_y.shape)
print('sanity check after reshaping:', train_x_flatten[0:5, 0])
train_x = train_x_flatten / 255.
test_x = test_x_flatten / 255.
return train_x, train_y, test_x, test_y
def init_params(layers_dims):
params = {}
for n in range(1, len(layers_dims)):
w = 'w%s' % n
params[w] = np.random.randn(
layers_dims[n], layers_dims[n-1])
params[w] /= np.sqrt(layers_dims[n-1])
b = 'b%s' % n
params[b] = np.zeros((layers_dims[n], 1))
assert params[w].shape == (layers_dims[n], layers_dims[n - 1])
assert params[b].shape == (layers_dims[n], 1)
return params
def sigmoid(z):
a = 1 / (1 + np.exp(-z))
assert a.shape == z.shape
return a, z
def relu(z):
a = np.maximum(0, z)
assert a.shape == z.shape
return a, z
def softmax(z):
a = np.exp(z) / np.exp(z).sum(axis=0)
assert z.shape[1] == sum(np.isclose(a.sum(axis=0), 1))
return a, z
def dense_layer_propagate(a, w, b):
z = np.dot(w, a) + b
assert z.shape == (w.shape[0], a.shape[1])
return z, (a, w, b)
def dense_activation_propagate(a_prev, w, b, activation):
z, dense_cache = dense_layer_propagate(a_prev, w, b)
if activation == 'sigmoid':
a, activation_cache = sigmoid(z)
elif activation == 'relu':
a, activation_cache = relu(z)
elif activation == 'softmax':
a, activation_cache = softmax(z)
assert (a.shape == (w.shape[0], a_prev.shape[1]))
return a, (dense_cache, activation_cache)
def foreword_propagate(x, params, activation, y_dim):
caches = []
a = x
n_layers = len(params) // 2
print('-' * 40)
for i in range(1, n_layers):
a_prev = a
wi = params['w' + str(i)]
bi = params['b' + str(i)]
a, cache = dense_activation_propagate(a_prev, wi, bi, activation='relu')
print('layer:', i)
print('z:', cache)
print('a:', a)
print('-' * 40)
caches.append(cache)
wi = params['w%s' % n_layers]
bi = params['b%s' % n_layers]
y_hat, cache = dense_activation_propagate(a, wi, bi, activation=activation)
print('output layer:')
print('z:', cache)
print('a:', y_hat)
print('-' * 40)
caches.append(cache)
assert (y_hat.shape == (y_dim, x.shape[1]))
return y_hat, caches
def comp_cost(y_hat, y, activation, epsilon=1e-15):
if activation == 'sigmoid':
m = y.shape[1]
cost = np.dot(y, np.log(y_hat).T) + np.dot((1 - y), np.log(1 - y_hat).T)
cost = (-1. / m) * cost
cost = np.squeeze(cost)
assert (cost.shape == ())
elif activation == 'softmax':
"""
Computes x-entropy between y (encoded as one-hot vectors) and y_hat.
Arguments:
y_hat -- predictions, array (n, k), (# of examples, # of categories)
y -- true 'label' np.array (n, k) (# of examples, # of categories)
Returns:
cost -- categorical cross entropy cost
Algorithm:
-1./N * sum_i(sum_j t_ij * log(p_ij)), i=1..len(y), j=1..k
y_hat = np.clip(y_hat, epsilon, 1. - epsilon)
-np.sum(y * np.log(y_hat + epsilog)) / y_hat.shape[0]
"""
cost = log_loss(y, y_hat)
else:
raise AttributeError('Unexpected activation function:', activation)
return cost
def sigmoid_back_propagate(da, cache):
z = cache
s = 1 / (1 + np.exp(-z))
dz = da * s * (1 - s)
assert (dz.shape == z.shape)
assert (da.shape == z.shape)
return dz
def softmax_back_propagate(da, cache):
z = cache
y_hat = np.exp(z) / np.exp(z).sum()
dz = da * (1 - y_hat)
assert (dz.shape == z.shape)
return dz
def relu_back_propagate(da, cache):
z = cache
dz = np.array(da, copy=True)
dz[z <= 0] = 0.
assert (dz.shape == z.shape)
return dz
def dense_back_propagate(dz, cache):
a_prev, w, b = cache
m = a_prev.shape[1]
dw = (1. / m) * np.dot(dz, a_prev.T)
db = (1. / m) * np.sum(dz, axis=1, keepdims=True)
da_prev = np.dot(w.T, dz)
assert (da_prev.shape == a_prev.shape)
assert (dw.shape == w.shape)
assert (db.shape == b.shape)
return da_prev, dw, db
def dense_activation_back_propagate(da, cache, activation):
dense_cache, a_cache = cache
if activation == 'relu':
dz = relu_back_propagate(da, a_cache)
elif activation == 'sigmoid':
dz = sigmoid_back_propagate(da, a_cache)
elif activation == 'softmax':
dz = da
da_prev, dw, db = dense_back_propagate(dz, dense_cache)
return da_prev, dw, db
def back_propagate(y_hat, y, caches, activation):
y = y.reshape(y_hat.shape)
grads = {}
if activation == 'sigmoid':
da = - (np.divide(y, y_hat) - np.divide(1 - y, 1 - y_hat))
elif activation == 'softmax':
da = y_hat - y
else:
raise ValueError('Unexpected activation function:', activation)
n = len(caches)
c = caches[n-1]
grads['da%s' % n], grads['dw%s' % n], grads['db%s' % n] = (
dense_activation_back_propagate(da, c, activation=activation))
for i in reversed(range(n - 1)):
c = caches[i]
da_prev_temp, dw_temp, db_temp = dense_activation_back_propagate(
grads['da%s' % (i+2)], c, activation="relu")
grads['da%s' % (i+1)] = da_prev_temp
grads['dw%s' % (i+1)] = dw_temp
grads['db%s' % (i+1)] = db_temp
return grads
def update_parameters(params, grads, alpha):
n_layers = len(params) // 2
for i in range(n_layers):
params['w%s' % (i+1)] = (
params['w%s' % (i+1)] - alpha * grads['dw%s' % (i+1)])
params['b%s' % (i+1)] = (
params['b%s' % (i+1)] - alpha * grads['db%s' % (i+1)])
return params
def sequential_model(
x, y, layers_dims, alpha=0.0075, n_iters=3000, debug=False):
costs = []
params = init_params(layers_dims)
activation = 'sigmoid' if y.shape[0] == 1 else 'softmax'
for i in range(0, n_iters):
ai, caches = foreword_propagate(x, params, activation, layers_dims[-1])
cost = comp_cost(ai, y, activation)
grads = back_propagate(ai, y, caches, activation)
params = update_parameters(params, grads, alpha)
if debug and i % 100 == 0:
print('Cost after iteration %i: %f' % (i, cost))
if debug and i % 100 == 0:
costs.append(cost)
def plot_cost():
return True
if debug:
plot_cost()
return params
def test_dnn():
layers_dims = [10, 4, 2, 1]
np.random.seed(42)
x = np.random.randn(30).reshape((10, 3))
scaler = MinMaxScaler()
x = scaler.fit_transform(x)
print('x shape:', x.shape)
y = np.random.randint(0, 2, 3)
y = y.reshape((1, 3))
print('y shape:', y.shape)
params = init_params(layers_dims)
activation = 'sigmoid'
y_hat, caches = foreword_propagate(x, params, activation, layers_dims[-1])
print(y_hat)
if __name__ == '__main__':
np.random.seed(1)
train_x, train_y, test_x, test_y = get_data()
if False:
layers_dims = [12288, 20, 7, 5, 2]
df = pd.DataFrame(data=train_y[0], columns=['yt'])
df['yc'] = 1 - df.yt
train_y = df.values.T
print(train_y.shape)
else:
layers_dims = [12288, 20, 7, 5, 1]
fit_params = sequential_model(
train_x, train_y, layers_dims, n_iters=2500, debug=True)
| true | true |
1c33c6174cf391981bca1a303fe544f975041755 | 401 | py | Python | Protinx_blog/Protinx_blog/wsgi.py | Protinx/Protinx_blog | 9f787d483cadfb40821e5374b773f789130c9b5c | [
"MIT"
] | null | null | null | Protinx_blog/Protinx_blog/wsgi.py | Protinx/Protinx_blog | 9f787d483cadfb40821e5374b773f789130c9b5c | [
"MIT"
] | null | null | null | Protinx_blog/Protinx_blog/wsgi.py | Protinx/Protinx_blog | 9f787d483cadfb40821e5374b773f789130c9b5c | [
"MIT"
] | null | null | null | """
WSGI config for Protinx_blog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Protinx_blog.settings')
application = get_wsgi_application()
| 23.588235 | 78 | 0.790524 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Protinx_blog.settings')
application = get_wsgi_application()
| true | true |
1c33c885d55663f1bbd4cc1fe9b47c5602907c1b | 4,484 | py | Python | deep3dmap/datasets/pipelines/test_time_aug.py | achao2013/DeepRecon | 1c9b0480710212e1fe86ab75dcf0b30bd9f654e7 | [
"Apache-2.0"
] | 30 | 2022-02-05T18:35:27.000Z | 2022-02-09T09:14:41.000Z | deep3dmap/datasets/pipelines/test_time_aug.py | achao2013/DeepRecon | 1c9b0480710212e1fe86ab75dcf0b30bd9f654e7 | [
"Apache-2.0"
] | null | null | null | deep3dmap/datasets/pipelines/test_time_aug.py | achao2013/DeepRecon | 1c9b0480710212e1fe86ab75dcf0b30bd9f654e7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import deep3dmap
from ..builder import PIPELINES
from .compose import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug:
"""Test-time augmentation with multiple scales and flipping.
An example configuration is as followed:
.. code-block::
img_scale=[(1333, 400), (1333, 800)],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
After MultiScaleFLipAug with above configuration, the results are wrapped
into lists of the same length as followed:
.. code-block::
dict(
img=[...],
img_shape=[...],
scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]
flip=[False, True, False, True]
...
)
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (tuple | list[tuple] | None): Images scales for resizing.
scale_factor (float | list[float] | None): Scale factors for resizing.
flip (bool): Whether apply flip augmentation. Default: False.
flip_direction (str | list[str]): Flip augmentation directions,
options are "horizontal", "vertical" and "diagonal". If
flip_direction is a list, multiple flip augmentations will be
applied. It has no effect when flip == False. Default:
"horizontal".
"""
def __init__(self,
transforms,
img_scale=None,
scale_factor=None,
flip=False,
flip_direction='horizontal'):
self.transforms = Compose(transforms)
assert (img_scale is None) ^ (scale_factor is None), (
'Must have but only one variable can be setted')
if img_scale is not None:
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
self.scale_key = 'scale'
assert deep3dmap.is_list_of(self.img_scale, tuple)
else:
self.img_scale = scale_factor if isinstance(
scale_factor, list) else [scale_factor]
self.scale_key = 'scale_factor'
self.flip = flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert deep3dmap.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip
and not any([t['type'] == 'RandomFlip' for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
"""Call function to apply test time augment transforms on results.
Args:
results (dict): Result dict contains the data to transform.
Returns:
dict[str: list]: The augmented data, where each value is wrapped
into a list.
"""
aug_data = []
flip_args = [(False, None)]
if self.flip:
flip_args += [(True, direction)
for direction in self.flip_direction]
for scale in self.img_scale:
for flip, direction in flip_args:
_results = results.copy()
_results[self.scale_key] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
repr_str += f'flip_direction={self.flip_direction})'
return repr_str
| 36.754098 | 78 | 0.575825 |
import warnings
import deep3dmap
from ..builder import PIPELINES
from .compose import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug:
def __init__(self,
transforms,
img_scale=None,
scale_factor=None,
flip=False,
flip_direction='horizontal'):
self.transforms = Compose(transforms)
assert (img_scale is None) ^ (scale_factor is None), (
'Must have but only one variable can be setted')
if img_scale is not None:
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
self.scale_key = 'scale'
assert deep3dmap.is_list_of(self.img_scale, tuple)
else:
self.img_scale = scale_factor if isinstance(
scale_factor, list) else [scale_factor]
self.scale_key = 'scale_factor'
self.flip = flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert deep3dmap.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip
and not any([t['type'] == 'RandomFlip' for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
aug_data = []
flip_args = [(False, None)]
if self.flip:
flip_args += [(True, direction)
for direction in self.flip_direction]
for scale in self.img_scale:
for flip, direction in flip_args:
_results = results.copy()
_results[self.scale_key] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
repr_str += f'flip_direction={self.flip_direction})'
return repr_str
| true | true |
1c33c9ee2203a2b57aac43d54040102017079a16 | 15,467 | py | Python | google/ads/google_ads/v1/proto/common/criterion_category_availability_pb2.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | 1 | 2019-11-30T23:42:39.000Z | 2019-11-30T23:42:39.000Z | google/ads/google_ads/v1/proto/common/criterion_category_availability_pb2.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v1/proto/common/criterion_category_availability_pb2.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | 1 | 2020-03-13T00:14:31.000Z | 2020-03-13T00:14:31.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v1/proto/common/criterion_category_availability.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v1.proto.enums import advertising_channel_sub_type_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_advertising__channel__sub__type__pb2
from google.ads.google_ads.v1.proto.enums import advertising_channel_type_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_advertising__channel__type__pb2
from google.ads.google_ads.v1.proto.enums import criterion_category_channel_availability_mode_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_criterion__category__channel__availability__mode__pb2
from google.ads.google_ads.v1.proto.enums import criterion_category_locale_availability_mode_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_criterion__category__locale__availability__mode__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/common/criterion_category_availability.proto',
package='google.ads.googleads.v1.common',
syntax='proto3',
serialized_options=_b('\n\"com.google.ads.googleads.v1.commonB\"CriterionCategoryAvailabilityProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v1/common;common\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V1.Common\312\002\036Google\\Ads\\GoogleAds\\V1\\Common\352\002\"Google::Ads::GoogleAds::V1::Common'),
serialized_pb=_b('\nJgoogle/ads/googleads_v1/proto/common/criterion_category_availability.proto\x12\x1egoogle.ads.googleads.v1.common\x1a\x46google/ads/googleads_v1/proto/enums/advertising_channel_sub_type.proto\x1a\x42google/ads/googleads_v1/proto/enums/advertising_channel_type.proto\x1aVgoogle/ads/googleads_v1/proto/enums/criterion_category_channel_availability_mode.proto\x1aUgoogle/ads/googleads_v1/proto/enums/criterion_category_locale_availability_mode.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1cgoogle/api/annotations.proto\"\xcb\x01\n\x1d\x43riterionCategoryAvailability\x12U\n\x07\x63hannel\x18\x01 \x01(\x0b\x32\x44.google.ads.googleads.v1.common.CriterionCategoryChannelAvailability\x12S\n\x06locale\x18\x02 \x03(\x0b\x32\x43.google.ads.googleads.v1.common.CriterionCategoryLocaleAvailability\"\xf0\x03\n$CriterionCategoryChannelAvailability\x12\x8f\x01\n\x11\x61vailability_mode\x18\x01 \x01(\x0e\x32t.google.ads.googleads.v1.enums.CriterionCategoryChannelAvailabilityModeEnum.CriterionCategoryChannelAvailabilityMode\x12r\n\x18\x61\x64vertising_channel_type\x18\x02 \x01(\x0e\x32P.google.ads.googleads.v1.enums.AdvertisingChannelTypeEnum.AdvertisingChannelType\x12|\n\x1c\x61\x64vertising_channel_sub_type\x18\x03 \x03(\x0e\x32V.google.ads.googleads.v1.enums.AdvertisingChannelSubTypeEnum.AdvertisingChannelSubType\x12\x44\n include_default_channel_sub_type\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\"\x9e\x02\n#CriterionCategoryLocaleAvailability\x12\x8d\x01\n\x11\x61vailability_mode\x18\x01 \x01(\x0e\x32r.google.ads.googleads.v1.enums.CriterionCategoryLocaleAvailabilityModeEnum.CriterionCategoryLocaleAvailabilityMode\x12\x32\n\x0c\x63ountry_code\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rlanguage_code\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValueB\xfd\x01\n\"com.google.ads.googleads.v1.commonB\"CriterionCategoryAvailabilityProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v1/common;common\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V1.Common\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V1\\Common\xea\x02\"Google::Ads::GoogleAds::V1::Commonb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_advertising__channel__sub__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_advertising__channel__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_criterion__category__channel__availability__mode__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_criterion__category__locale__availability__mode__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_CRITERIONCATEGORYAVAILABILITY = _descriptor.Descriptor(
name='CriterionCategoryAvailability',
full_name='google.ads.googleads.v1.common.CriterionCategoryAvailability',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='channel', full_name='google.ads.googleads.v1.common.CriterionCategoryAvailability.channel', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='locale', full_name='google.ads.googleads.v1.common.CriterionCategoryAvailability.locale', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=488,
serialized_end=691,
)
_CRITERIONCATEGORYCHANNELAVAILABILITY = _descriptor.Descriptor(
name='CriterionCategoryChannelAvailability',
full_name='google.ads.googleads.v1.common.CriterionCategoryChannelAvailability',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='availability_mode', full_name='google.ads.googleads.v1.common.CriterionCategoryChannelAvailability.availability_mode', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='advertising_channel_type', full_name='google.ads.googleads.v1.common.CriterionCategoryChannelAvailability.advertising_channel_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='advertising_channel_sub_type', full_name='google.ads.googleads.v1.common.CriterionCategoryChannelAvailability.advertising_channel_sub_type', index=2,
number=3, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='include_default_channel_sub_type', full_name='google.ads.googleads.v1.common.CriterionCategoryChannelAvailability.include_default_channel_sub_type', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=694,
serialized_end=1190,
)
_CRITERIONCATEGORYLOCALEAVAILABILITY = _descriptor.Descriptor(
name='CriterionCategoryLocaleAvailability',
full_name='google.ads.googleads.v1.common.CriterionCategoryLocaleAvailability',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='availability_mode', full_name='google.ads.googleads.v1.common.CriterionCategoryLocaleAvailability.availability_mode', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='country_code', full_name='google.ads.googleads.v1.common.CriterionCategoryLocaleAvailability.country_code', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='language_code', full_name='google.ads.googleads.v1.common.CriterionCategoryLocaleAvailability.language_code', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1193,
serialized_end=1479,
)
_CRITERIONCATEGORYAVAILABILITY.fields_by_name['channel'].message_type = _CRITERIONCATEGORYCHANNELAVAILABILITY
_CRITERIONCATEGORYAVAILABILITY.fields_by_name['locale'].message_type = _CRITERIONCATEGORYLOCALEAVAILABILITY
_CRITERIONCATEGORYCHANNELAVAILABILITY.fields_by_name['availability_mode'].enum_type = google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_criterion__category__channel__availability__mode__pb2._CRITERIONCATEGORYCHANNELAVAILABILITYMODEENUM_CRITERIONCATEGORYCHANNELAVAILABILITYMODE
_CRITERIONCATEGORYCHANNELAVAILABILITY.fields_by_name['advertising_channel_type'].enum_type = google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_advertising__channel__type__pb2._ADVERTISINGCHANNELTYPEENUM_ADVERTISINGCHANNELTYPE
_CRITERIONCATEGORYCHANNELAVAILABILITY.fields_by_name['advertising_channel_sub_type'].enum_type = google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_advertising__channel__sub__type__pb2._ADVERTISINGCHANNELSUBTYPEENUM_ADVERTISINGCHANNELSUBTYPE
_CRITERIONCATEGORYCHANNELAVAILABILITY.fields_by_name['include_default_channel_sub_type'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_CRITERIONCATEGORYLOCALEAVAILABILITY.fields_by_name['availability_mode'].enum_type = google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_criterion__category__locale__availability__mode__pb2._CRITERIONCATEGORYLOCALEAVAILABILITYMODEENUM_CRITERIONCATEGORYLOCALEAVAILABILITYMODE
_CRITERIONCATEGORYLOCALEAVAILABILITY.fields_by_name['country_code'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_CRITERIONCATEGORYLOCALEAVAILABILITY.fields_by_name['language_code'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
DESCRIPTOR.message_types_by_name['CriterionCategoryAvailability'] = _CRITERIONCATEGORYAVAILABILITY
DESCRIPTOR.message_types_by_name['CriterionCategoryChannelAvailability'] = _CRITERIONCATEGORYCHANNELAVAILABILITY
DESCRIPTOR.message_types_by_name['CriterionCategoryLocaleAvailability'] = _CRITERIONCATEGORYLOCALEAVAILABILITY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CriterionCategoryAvailability = _reflection.GeneratedProtocolMessageType('CriterionCategoryAvailability', (_message.Message,), dict(
DESCRIPTOR = _CRITERIONCATEGORYAVAILABILITY,
__module__ = 'google.ads.googleads_v1.proto.common.criterion_category_availability_pb2'
,
__doc__ = """Information of category availability, per advertising channel.
Attributes:
channel:
Channel types and subtypes that are available to the category.
locale:
Locales that are available to the category for the channel.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.common.CriterionCategoryAvailability)
))
_sym_db.RegisterMessage(CriterionCategoryAvailability)
CriterionCategoryChannelAvailability = _reflection.GeneratedProtocolMessageType('CriterionCategoryChannelAvailability', (_message.Message,), dict(
DESCRIPTOR = _CRITERIONCATEGORYCHANNELAVAILABILITY,
__module__ = 'google.ads.googleads_v1.proto.common.criterion_category_availability_pb2'
,
__doc__ = """Information of advertising channel type and subtypes a category is
available in.
Attributes:
availability_mode:
Format of the channel availability. Can be ALL\_CHANNELS (the
rest of the fields will not be set), CHANNEL\_TYPE (only
advertising\_channel\_type type will be set, the category is
available to all sub types under it) or
CHANNEL\_TYPE\_AND\_SUBTYPES (advertising\_channel\_type,
advertising\_channel\_sub\_type, and
include\_default\_channel\_sub\_type will all be set).
advertising_channel_type:
Channel type the category is available to.
advertising_channel_sub_type:
Channel subtypes under the channel type the category is
available to.
include_default_channel_sub_type:
Whether default channel sub type is included. For example,
advertising\_channel\_type being DISPLAY and
include\_default\_channel\_sub\_type being false means that
the default display campaign where channel sub type is not set
is not included in this availability configuration.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.common.CriterionCategoryChannelAvailability)
))
_sym_db.RegisterMessage(CriterionCategoryChannelAvailability)
CriterionCategoryLocaleAvailability = _reflection.GeneratedProtocolMessageType('CriterionCategoryLocaleAvailability', (_message.Message,), dict(
DESCRIPTOR = _CRITERIONCATEGORYLOCALEAVAILABILITY,
__module__ = 'google.ads.googleads_v1.proto.common.criterion_category_availability_pb2'
,
__doc__ = """Information about which locales a category is available in.
Attributes:
availability_mode:
Format of the locale availability. Can be LAUNCHED\_TO\_ALL
(both country and language will be empty), COUNTRY (only
country will be set), LANGUAGE (only language wil be set),
COUNTRY\_AND\_LANGUAGE (both country and language will be
set).
country_code:
Code of the country.
language_code:
Code of the language.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.common.CriterionCategoryLocaleAvailability)
))
_sym_db.RegisterMessage(CriterionCategoryLocaleAvailability)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 59.488462 | 2,138 | 0.814444 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v1.proto.enums import advertising_channel_sub_type_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_advertising__channel__sub__type__pb2
from google.ads.google_ads.v1.proto.enums import advertising_channel_type_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_advertising__channel__type__pb2
from google.ads.google_ads.v1.proto.enums import criterion_category_channel_availability_mode_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_criterion__category__channel__availability__mode__pb2
from google.ads.google_ads.v1.proto.enums import criterion_category_locale_availability_mode_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_criterion__category__locale__availability__mode__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/common/criterion_category_availability.proto',
package='google.ads.googleads.v1.common',
syntax='proto3',
serialized_options=_b('\n\"com.google.ads.googleads.v1.commonB\"CriterionCategoryAvailabilityProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v1/common;common\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V1.Common\312\002\036Google\\Ads\\GoogleAds\\V1\\Common\352\002\"Google::Ads::GoogleAds::V1::Common'),
serialized_pb=_b('\nJgoogle/ads/googleads_v1/proto/common/criterion_category_availability.proto\x12\x1egoogle.ads.googleads.v1.common\x1a\x46google/ads/googleads_v1/proto/enums/advertising_channel_sub_type.proto\x1a\x42google/ads/googleads_v1/proto/enums/advertising_channel_type.proto\x1aVgoogle/ads/googleads_v1/proto/enums/criterion_category_channel_availability_mode.proto\x1aUgoogle/ads/googleads_v1/proto/enums/criterion_category_locale_availability_mode.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1cgoogle/api/annotations.proto\"\xcb\x01\n\x1d\x43riterionCategoryAvailability\x12U\n\x07\x63hannel\x18\x01 \x01(\x0b\x32\x44.google.ads.googleads.v1.common.CriterionCategoryChannelAvailability\x12S\n\x06locale\x18\x02 \x03(\x0b\x32\x43.google.ads.googleads.v1.common.CriterionCategoryLocaleAvailability\"\xf0\x03\n$CriterionCategoryChannelAvailability\x12\x8f\x01\n\x11\x61vailability_mode\x18\x01 \x01(\x0e\x32t.google.ads.googleads.v1.enums.CriterionCategoryChannelAvailabilityModeEnum.CriterionCategoryChannelAvailabilityMode\x12r\n\x18\x61\x64vertising_channel_type\x18\x02 \x01(\x0e\x32P.google.ads.googleads.v1.enums.AdvertisingChannelTypeEnum.AdvertisingChannelType\x12|\n\x1c\x61\x64vertising_channel_sub_type\x18\x03 \x03(\x0e\x32V.google.ads.googleads.v1.enums.AdvertisingChannelSubTypeEnum.AdvertisingChannelSubType\x12\x44\n include_default_channel_sub_type\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\"\x9e\x02\n#CriterionCategoryLocaleAvailability\x12\x8d\x01\n\x11\x61vailability_mode\x18\x01 \x01(\x0e\x32r.google.ads.googleads.v1.enums.CriterionCategoryLocaleAvailabilityModeEnum.CriterionCategoryLocaleAvailabilityMode\x12\x32\n\x0c\x63ountry_code\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rlanguage_code\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValueB\xfd\x01\n\"com.google.ads.googleads.v1.commonB\"CriterionCategoryAvailabilityProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v1/common;common\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V1.Common\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V1\\Common\xea\x02\"Google::Ads::GoogleAds::V1::Commonb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_advertising__channel__sub__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_advertising__channel__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_criterion__category__channel__availability__mode__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_criterion__category__locale__availability__mode__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_CRITERIONCATEGORYAVAILABILITY = _descriptor.Descriptor(
name='CriterionCategoryAvailability',
full_name='google.ads.googleads.v1.common.CriterionCategoryAvailability',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='channel', full_name='google.ads.googleads.v1.common.CriterionCategoryAvailability.channel', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='locale', full_name='google.ads.googleads.v1.common.CriterionCategoryAvailability.locale', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=488,
serialized_end=691,
)
_CRITERIONCATEGORYCHANNELAVAILABILITY = _descriptor.Descriptor(
name='CriterionCategoryChannelAvailability',
full_name='google.ads.googleads.v1.common.CriterionCategoryChannelAvailability',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='availability_mode', full_name='google.ads.googleads.v1.common.CriterionCategoryChannelAvailability.availability_mode', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='advertising_channel_type', full_name='google.ads.googleads.v1.common.CriterionCategoryChannelAvailability.advertising_channel_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='advertising_channel_sub_type', full_name='google.ads.googleads.v1.common.CriterionCategoryChannelAvailability.advertising_channel_sub_type', index=2,
number=3, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='include_default_channel_sub_type', full_name='google.ads.googleads.v1.common.CriterionCategoryChannelAvailability.include_default_channel_sub_type', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=694,
serialized_end=1190,
)
_CRITERIONCATEGORYLOCALEAVAILABILITY = _descriptor.Descriptor(
name='CriterionCategoryLocaleAvailability',
full_name='google.ads.googleads.v1.common.CriterionCategoryLocaleAvailability',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='availability_mode', full_name='google.ads.googleads.v1.common.CriterionCategoryLocaleAvailability.availability_mode', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='country_code', full_name='google.ads.googleads.v1.common.CriterionCategoryLocaleAvailability.country_code', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='language_code', full_name='google.ads.googleads.v1.common.CriterionCategoryLocaleAvailability.language_code', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1193,
serialized_end=1479,
)
_CRITERIONCATEGORYAVAILABILITY.fields_by_name['channel'].message_type = _CRITERIONCATEGORYCHANNELAVAILABILITY
_CRITERIONCATEGORYAVAILABILITY.fields_by_name['locale'].message_type = _CRITERIONCATEGORYLOCALEAVAILABILITY
_CRITERIONCATEGORYCHANNELAVAILABILITY.fields_by_name['availability_mode'].enum_type = google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_criterion__category__channel__availability__mode__pb2._CRITERIONCATEGORYCHANNELAVAILABILITYMODEENUM_CRITERIONCATEGORYCHANNELAVAILABILITYMODE
_CRITERIONCATEGORYCHANNELAVAILABILITY.fields_by_name['advertising_channel_type'].enum_type = google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_advertising__channel__type__pb2._ADVERTISINGCHANNELTYPEENUM_ADVERTISINGCHANNELTYPE
_CRITERIONCATEGORYCHANNELAVAILABILITY.fields_by_name['advertising_channel_sub_type'].enum_type = google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_advertising__channel__sub__type__pb2._ADVERTISINGCHANNELSUBTYPEENUM_ADVERTISINGCHANNELSUBTYPE
_CRITERIONCATEGORYCHANNELAVAILABILITY.fields_by_name['include_default_channel_sub_type'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_CRITERIONCATEGORYLOCALEAVAILABILITY.fields_by_name['availability_mode'].enum_type = google_dot_ads_dot_googleads__v1_dot_proto_dot_enums_dot_criterion__category__locale__availability__mode__pb2._CRITERIONCATEGORYLOCALEAVAILABILITYMODEENUM_CRITERIONCATEGORYLOCALEAVAILABILITYMODE
_CRITERIONCATEGORYLOCALEAVAILABILITY.fields_by_name['country_code'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_CRITERIONCATEGORYLOCALEAVAILABILITY.fields_by_name['language_code'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
DESCRIPTOR.message_types_by_name['CriterionCategoryAvailability'] = _CRITERIONCATEGORYAVAILABILITY
DESCRIPTOR.message_types_by_name['CriterionCategoryChannelAvailability'] = _CRITERIONCATEGORYCHANNELAVAILABILITY
DESCRIPTOR.message_types_by_name['CriterionCategoryLocaleAvailability'] = _CRITERIONCATEGORYLOCALEAVAILABILITY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CriterionCategoryAvailability = _reflection.GeneratedProtocolMessageType('CriterionCategoryAvailability', (_message.Message,), dict(
DESCRIPTOR = _CRITERIONCATEGORYAVAILABILITY,
__module__ = 'google.ads.googleads_v1.proto.common.criterion_category_availability_pb2'
,
__doc__ = """Information of category availability, per advertising channel.
Attributes:
channel:
Channel types and subtypes that are available to the category.
locale:
Locales that are available to the category for the channel.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.common.CriterionCategoryAvailability)
))
_sym_db.RegisterMessage(CriterionCategoryAvailability)
CriterionCategoryChannelAvailability = _reflection.GeneratedProtocolMessageType('CriterionCategoryChannelAvailability', (_message.Message,), dict(
DESCRIPTOR = _CRITERIONCATEGORYCHANNELAVAILABILITY,
__module__ = 'google.ads.googleads_v1.proto.common.criterion_category_availability_pb2'
,
__doc__ = """Information of advertising channel type and subtypes a category is
available in.
Attributes:
availability_mode:
Format of the channel availability. Can be ALL\_CHANNELS (the
rest of the fields will not be set), CHANNEL\_TYPE (only
advertising\_channel\_type type will be set, the category is
available to all sub types under it) or
CHANNEL\_TYPE\_AND\_SUBTYPES (advertising\_channel\_type,
advertising\_channel\_sub\_type, and
include\_default\_channel\_sub\_type will all be set).
advertising_channel_type:
Channel type the category is available to.
advertising_channel_sub_type:
Channel subtypes under the channel type the category is
available to.
include_default_channel_sub_type:
Whether default channel sub type is included. For example,
advertising\_channel\_type being DISPLAY and
include\_default\_channel\_sub\_type being false means that
the default display campaign where channel sub type is not set
is not included in this availability configuration.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.common.CriterionCategoryChannelAvailability)
))
_sym_db.RegisterMessage(CriterionCategoryChannelAvailability)
CriterionCategoryLocaleAvailability = _reflection.GeneratedProtocolMessageType('CriterionCategoryLocaleAvailability', (_message.Message,), dict(
DESCRIPTOR = _CRITERIONCATEGORYLOCALEAVAILABILITY,
__module__ = 'google.ads.googleads_v1.proto.common.criterion_category_availability_pb2'
,
__doc__ = """Information about which locales a category is available in.
Attributes:
availability_mode:
Format of the locale availability. Can be LAUNCHED\_TO\_ALL
(both country and language will be empty), COUNTRY (only
country will be set), LANGUAGE (only language wil be set),
COUNTRY\_AND\_LANGUAGE (both country and language will be
set).
country_code:
Code of the country.
language_code:
Code of the language.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.common.CriterionCategoryLocaleAvailability)
))
_sym_db.RegisterMessage(CriterionCategoryLocaleAvailability)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
1c33ca6b07df9401deb2a7acc1624f8489ef1b94 | 11,911 | py | Python | dark/html.py | TaliVeith/dark-matter | 1548a6e6fbfceb7c8b13556bbf4f7ce7d1ac18a0 | [
"MIT"
] | 10 | 2016-03-09T09:43:14.000Z | 2021-04-03T21:46:12.000Z | dark/html.py | TaliVeith/dark-matter | 1548a6e6fbfceb7c8b13556bbf4f7ce7d1ac18a0 | [
"MIT"
] | 332 | 2015-01-07T12:37:30.000Z | 2022-01-20T15:48:11.000Z | dark/html.py | TaliVeith/dark-matter | 1548a6e6fbfceb7c8b13556bbf4f7ce7d1ac18a0 | [
"MIT"
] | 4 | 2016-03-08T14:56:39.000Z | 2021-01-27T08:11:27.000Z | from __future__ import print_function
from IPython.display import HTML
from six.moves.urllib.parse import quote
from dark.fastq import FastqReads
def NCBISequenceLinkURL(title, field=None, delim='|'):
"""
Given a sequence title, like
"acc|GENBANK|AY516849.1|GENBANK|42768646 Homo sapiens",
return the URL of a link to the info page at NCBI.
@param title: The C{str} sequence title to produce a link URL for.
@param field: The C{int} field number to use (as delimited by C{delim})
or C{None} if no field splitting should be done.
@param delim: The C{str} to split the title on (if C{field} is not
C{None}).
@return: A C{str} URL.
"""
if field is None:
ref = title
else:
try:
ref = title.split(delim)[field]
except IndexError:
raise IndexError(
'Could not extract field %d from sequence title %r' %
(field, title))
return 'http://www.ncbi.nlm.nih.gov/nuccore/' + quote(ref)
def NCBISequenceLink(title, field=None, delim='|'):
"""
Given a sequence title, like
"acc|GENBANK|AY516849.1|GENBANK|42768646 Homo sapiens",
return an HTML <A> tag displaying a link to the info page at NCBI.
@param title: The C{str} sequence title to produce a link URL for.
@param field: The C{int} field number to use (as delimited by C{delim})
or C{None} if no field splitting should be done.
@param delim: The C{str} to split the title on (if C{field} is not
C{None}).
@return: A C{str} HTML <A> tag.
"""
return '<a href="%s" target="_blank">%s</a>' % (
NCBISequenceLinkURL(title, field, delim), title)
def _sortHTML(titlesAlignments, by, limit=None):
"""
Return an C{IPython.display.HTML} object with the alignments sorted by the
given attribute.
@param titlesAlignments: A L{dark.titles.TitlesAlignments} instance.
@param by: A C{str}, one of 'length', 'maxScore', 'medianScore',
'readCount', or 'title'.
@param limit: An C{int} limit on the number of results to show.
@return: An HTML instance with sorted titles and information about
hit read count, length, and e-values.
"""
out = []
for i, title in enumerate(titlesAlignments.sortTitles(by), start=1):
if limit is not None and i > limit:
break
titleAlignments = titlesAlignments[title]
link = NCBISequenceLink(title, title)
out.append(
'%3d: reads=%d, len=%d, max=%s median=%s<br/>'
' %s' %
(i, titleAlignments.readCount(), titleAlignments.subjectLength,
titleAlignments.bestHsp().score.score,
titleAlignments.medianScore(), link))
return HTML('<pre>' + '<br/>'.join(out) + '</pre>')
def summarizeTitlesByTitle(titlesAlignments, limit=None):
"""
Sort match titles by title
@param titlesAlignments: A L{dark.titles.TitlesAlignments} instance.
@param limit: An C{int} limit on the number of results to show.
@return: An C{IPython.display.HTML} instance with match titles sorted by
title.
"""
return _sortHTML(titlesAlignments, 'title', limit)
def summarizeTitlesByCount(titlesAlignments, limit=None):
"""
Sort match titles by read count.
@param titlesAlignments: A L{dark.titles.TitlesAlignments} instance.
@param limit: An C{int} limit on the number of results to show.
@return: An C{IPython.display.HTML} instance with match titles sorted by
read count.
"""
return _sortHTML(titlesAlignments, 'readCount', limit)
def summarizeTitlesByLength(titlesAlignments, limit=None):
"""
Sort match titles by sequence length.
@param titlesAlignments: A L{dark.titles.TitlesAlignments} instance.
@param limit: An C{int} limit on the number of results to show.
@return: An C{IPython.display.HTML} instance with match titles sorted by
sequence length.
"""
return _sortHTML(titlesAlignments, 'length', limit)
def summarizeTitlesByMaxScore(titlesAlignments, limit=None):
"""
Sort hit titles by maximum score.
@param titlesAlignments: A L{dark.blast.BlastMatchs} instance.
@param limit: An C{int} limit on the number of results to show.
@return: An C{IPython.display.HTML} instance with hit titles sorted by
max score.
"""
return _sortHTML(titlesAlignments, 'maxScore', limit)
def summarizeTitlesByMedianScore(titlesAlignments, limit=None):
"""
Sort match titles by median score.
@param titlesAlignments: A L{dark.titles.TitlesAlignments} instance.
@param limit: An C{int} limit on the number of results to show.
@return: An C{IPython.display.HTML} instance with match titles sorted by
median score.
"""
return _sortHTML(titlesAlignments, 'medianScore', limit)
class AlignmentPanelHTMLWriter(object):
"""
Produces HTML details of a rectangular panel of graphs that each
contain an alignment graph against a given sequence. This is
supplementary output info for the AlignmentPanel class in graphics.py.
@param outputDir: The C{str} directory to write files into.
@param titlesAlignments: A L{dark.titles.TitlesAlignments} instance.
"""
def __init__(self, outputDir, titlesAlignments):
self._outputDir = outputDir
self._titlesAlignments = titlesAlignments
self._images = []
def addImage(self, imageBasename, title, graphInfo):
self._images.append({
'graphInfo': graphInfo,
'imageBasename': imageBasename,
'title': title
})
def close(self):
with open('%s/index.html' % self._outputDir, 'w') as fp:
self._writeHeader(fp)
self._writeBody(fp)
self._writeFooter(fp)
with open('%s/style.css' % self._outputDir, 'w') as fp:
self._writeCSS(fp)
def _writeHeader(self, fp):
fp.write("""\
<html>
<head>
<title>Read alignments for %d matched subjects</title>
<link rel="stylesheet" type="text/css" href="style.css">
</head>
<body>
<div id="content">
""" % len(self._images))
def _writeBody(self, fp):
fp.write('<h1>Read alignments for %d matched subjects</h1>\n' %
len(self._images))
# Write out an alignment panel as a table.
cols = 6
fp.write('<table><tbody>\n')
for i, image in enumerate(self._images):
title = image['title']
if i % cols == 0:
fp.write('<tr>\n')
fp.write(
'<td><a id="small_%d"></a><a href="#big_%d"><img src="%s" '
'class="thumbnail"/></a></td>\n' %
(i, i, image['imageBasename']))
if i % cols == cols - 1:
fp.write('</tr>')
# Add empty cells to the final table row, and close the row, if
# necessary.
if i % cols < cols - 1:
while i % cols < cols - 1:
fp.write('<td> </td>\n')
i += 1
fp.write('</tr>\n')
fp.write('</tbody></table>\n')
# Write out the full images with additional detail.
for i, image in enumerate(self._images):
title = image['title']
titleAlignments = self._titlesAlignments[title]
graphInfo = image['graphInfo']
readFormat = self._writeFASTA(i, image)
fp.write("""
<a id="big_%d"></a>
<h3>%d: %s</h3>
<p>
Length: %d.
Read count: %d.
HSP count: %d.
<a href="%d.%s">%s</a>.
<a href="#small_%d">Top panel.</a>
"""
% (i, i, title,
titleAlignments.subjectLength,
titleAlignments.readCount(),
titleAlignments.hspCount(), i, readFormat, readFormat,
i))
url = NCBISequenceLinkURL(title)
if url:
fp.write('<a href="%s" target="_blank">NCBI</a>.' % url)
# Write out feature information.
if graphInfo['features'] is None:
# Feature lookup was False (or we were offline).
pass
elif len(graphInfo['features']) == 0:
fp.write('There were no features.')
else:
fp.write('<a href="%s">Features</a>' %
self._writeFeatures(i, image))
# Write out the titles that this title invalidated due to its
# read set.
readSetFilter = self._titlesAlignments.readSetFilter
if readSetFilter:
invalidated = readSetFilter.invalidates(title)
if invalidated:
nInvalidated = len(invalidated)
fp.write('<br/>This title invalidated %d other%s due to '
'its read set:<ul>'
% (nInvalidated,
'' if nInvalidated == 1 else 's'))
for title in invalidated:
fp.write('<li>%s</li>' % title)
fp.write('</ul>')
fp.write(
'</p><img src="%s" class="full-size"/>' %
image['imageBasename'])
def _writeFooter(self, fp):
fp.write("""\
</div>
</body>
</html>
""")
def _writeCSS(self, fp):
fp.write("""\
#content {
width: 95%;
margin: auto;
}
img.thumbnail {
height: 300px;
}
img.full-size {
height: 900px;
}
""")
def _writeFASTA(self, i, image):
"""
Write a FASTA file containing the set of reads that hit a sequence.
@param i: The number of the image in self._images.
@param image: A member of self._images.
@return: A C{str}, either 'fasta' or 'fastq' indicating the format
of the reads in C{self._titlesAlignments}.
"""
if isinstance(self._titlesAlignments.readsAlignments.reads,
FastqReads):
format_ = 'fastq'
else:
format_ = 'fasta'
filename = '%s/%d.%s' % (self._outputDir, i, format_)
titleAlignments = self._titlesAlignments[image['title']]
with open(filename, 'w') as fp:
for titleAlignment in titleAlignments:
fp.write(titleAlignment.read.toString(format_))
return format_
def _writeFeatures(self, i, image):
"""
Write a text file containing the features as a table.
@param i: The number of the image in self._images.
@param image: A member of self._images.
@return: The C{str} features file name - just the base name, not
including the path to the file.
"""
basename = 'features-%d.txt' % i
filename = '%s/%s' % (self._outputDir, basename)
featureList = image['graphInfo']['features']
with open(filename, 'w') as fp:
for feature in featureList:
fp.write('%s\n\n' % feature.feature)
return basename
def readCountText(readCountColors, count, linkText=None):
"""
Produce colored read count text.
@param readCountColors: Either a C{dark.colors.colorsForCounts}
instance or C{None} for no read count coloring.
@param count: An C{int} read count.
@param linkText: A C{str} for the HTML link text. If C{None}, the
count will be used.
@return: An HTML span C{str} colored according to the read count,
or just the string of the count if no color information is given.
"""
if readCountColors:
_class = readCountColors.thresholdToCssName(
readCountColors.thresholdForCount(count))
return f'<span class="{_class}">{count}</span>'
else:
return linkText or str(count)
| 34.725948 | 78 | 0.589455 | from __future__ import print_function
from IPython.display import HTML
from six.moves.urllib.parse import quote
from dark.fastq import FastqReads
def NCBISequenceLinkURL(title, field=None, delim='|'):
if field is None:
ref = title
else:
try:
ref = title.split(delim)[field]
except IndexError:
raise IndexError(
'Could not extract field %d from sequence title %r' %
(field, title))
return 'http://www.ncbi.nlm.nih.gov/nuccore/' + quote(ref)
def NCBISequenceLink(title, field=None, delim='|'):
return '<a href="%s" target="_blank">%s</a>' % (
NCBISequenceLinkURL(title, field, delim), title)
def _sortHTML(titlesAlignments, by, limit=None):
out = []
for i, title in enumerate(titlesAlignments.sortTitles(by), start=1):
if limit is not None and i > limit:
break
titleAlignments = titlesAlignments[title]
link = NCBISequenceLink(title, title)
out.append(
'%3d: reads=%d, len=%d, max=%s median=%s<br/>'
' %s' %
(i, titleAlignments.readCount(), titleAlignments.subjectLength,
titleAlignments.bestHsp().score.score,
titleAlignments.medianScore(), link))
return HTML('<pre>' + '<br/>'.join(out) + '</pre>')
def summarizeTitlesByTitle(titlesAlignments, limit=None):
return _sortHTML(titlesAlignments, 'title', limit)
def summarizeTitlesByCount(titlesAlignments, limit=None):
return _sortHTML(titlesAlignments, 'readCount', limit)
def summarizeTitlesByLength(titlesAlignments, limit=None):
return _sortHTML(titlesAlignments, 'length', limit)
def summarizeTitlesByMaxScore(titlesAlignments, limit=None):
return _sortHTML(titlesAlignments, 'maxScore', limit)
def summarizeTitlesByMedianScore(titlesAlignments, limit=None):
return _sortHTML(titlesAlignments, 'medianScore', limit)
class AlignmentPanelHTMLWriter(object):
def __init__(self, outputDir, titlesAlignments):
self._outputDir = outputDir
self._titlesAlignments = titlesAlignments
self._images = []
def addImage(self, imageBasename, title, graphInfo):
self._images.append({
'graphInfo': graphInfo,
'imageBasename': imageBasename,
'title': title
})
def close(self):
with open('%s/index.html' % self._outputDir, 'w') as fp:
self._writeHeader(fp)
self._writeBody(fp)
self._writeFooter(fp)
with open('%s/style.css' % self._outputDir, 'w') as fp:
self._writeCSS(fp)
def _writeHeader(self, fp):
fp.write("""\
<html>
<head>
<title>Read alignments for %d matched subjects</title>
<link rel="stylesheet" type="text/css" href="style.css">
</head>
<body>
<div id="content">
""" % len(self._images))
def _writeBody(self, fp):
fp.write('<h1>Read alignments for %d matched subjects</h1>\n' %
len(self._images))
cols = 6
fp.write('<table><tbody>\n')
for i, image in enumerate(self._images):
title = image['title']
if i % cols == 0:
fp.write('<tr>\n')
fp.write(
'<td><a id="small_%d"></a><a href="#big_%d"><img src="%s" '
'class="thumbnail"/></a></td>\n' %
(i, i, image['imageBasename']))
if i % cols == cols - 1:
fp.write('</tr>')
if i % cols < cols - 1:
while i % cols < cols - 1:
fp.write('<td> </td>\n')
i += 1
fp.write('</tr>\n')
fp.write('</tbody></table>\n')
for i, image in enumerate(self._images):
title = image['title']
titleAlignments = self._titlesAlignments[title]
graphInfo = image['graphInfo']
readFormat = self._writeFASTA(i, image)
fp.write("""
<a id="big_%d"></a>
<h3>%d: %s</h3>
<p>
Length: %d.
Read count: %d.
HSP count: %d.
<a href="%d.%s">%s</a>.
<a href="#small_%d">Top panel.</a>
"""
% (i, i, title,
titleAlignments.subjectLength,
titleAlignments.readCount(),
titleAlignments.hspCount(), i, readFormat, readFormat,
i))
url = NCBISequenceLinkURL(title)
if url:
fp.write('<a href="%s" target="_blank">NCBI</a>.' % url)
if graphInfo['features'] is None:
pass
elif len(graphInfo['features']) == 0:
fp.write('There were no features.')
else:
fp.write('<a href="%s">Features</a>' %
self._writeFeatures(i, image))
readSetFilter = self._titlesAlignments.readSetFilter
if readSetFilter:
invalidated = readSetFilter.invalidates(title)
if invalidated:
nInvalidated = len(invalidated)
fp.write('<br/>This title invalidated %d other%s due to '
'its read set:<ul>'
% (nInvalidated,
'' if nInvalidated == 1 else 's'))
for title in invalidated:
fp.write('<li>%s</li>' % title)
fp.write('</ul>')
fp.write(
'</p><img src="%s" class="full-size"/>' %
image['imageBasename'])
def _writeFooter(self, fp):
fp.write("""\
</div>
</body>
</html>
""")
def _writeCSS(self, fp):
fp.write("""\
#content {
width: 95%;
margin: auto;
}
img.thumbnail {
height: 300px;
}
img.full-size {
height: 900px;
}
""")
def _writeFASTA(self, i, image):
if isinstance(self._titlesAlignments.readsAlignments.reads,
FastqReads):
format_ = 'fastq'
else:
format_ = 'fasta'
filename = '%s/%d.%s' % (self._outputDir, i, format_)
titleAlignments = self._titlesAlignments[image['title']]
with open(filename, 'w') as fp:
for titleAlignment in titleAlignments:
fp.write(titleAlignment.read.toString(format_))
return format_
def _writeFeatures(self, i, image):
basename = 'features-%d.txt' % i
filename = '%s/%s' % (self._outputDir, basename)
featureList = image['graphInfo']['features']
with open(filename, 'w') as fp:
for feature in featureList:
fp.write('%s\n\n' % feature.feature)
return basename
def readCountText(readCountColors, count, linkText=None):
if readCountColors:
_class = readCountColors.thresholdToCssName(
readCountColors.thresholdForCount(count))
return f'<span class="{_class}">{count}</span>'
else:
return linkText or str(count)
| true | true |
1c33ca7586d1155bc27847a4ecd9f840470dc365 | 4,305 | py | Python | api-ref/source/conf.py | soda-research/mistral | 550a3de9c2defc7ce26336cb705d9c8d87bbaddd | [
"Apache-2.0"
] | 3 | 2015-08-28T04:57:56.000Z | 2017-03-27T10:59:56.000Z | api-ref/source/conf.py | soda-research/mistral | 550a3de9c2defc7ce26336cb705d9c8d87bbaddd | [
"Apache-2.0"
] | 21 | 2015-04-14T22:41:53.000Z | 2019-02-20T09:30:10.000Z | api-ref/source/conf.py | soda-research/mistral | 550a3de9c2defc7ce26336cb705d9c8d87bbaddd | [
"Apache-2.0"
] | 12 | 2015-08-14T02:27:37.000Z | 2020-12-31T10:09:21.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.autohttp.flask',
'sphinxcontrib.pecanwsme.rest',
]
if not on_rtd:
extensions.append('oslosphinx')
wsme_protocols = ['restjson']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Workflow Service API Reference'
copyright = u'2017, Mistral Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
from mistral.version import version_info
release = version_info.release_string()
version = version_info.version_string()
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_static_path = ['_static']
if on_rtd:
html_theme_path = ['.']
html_theme = 'sphinx_rtd_theme'
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mistral.']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
html_last_updated_fmt = subprocess.check_output(
git_cmd).decode('utf-8')
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Mistral API Reference'
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': [
'sidebarlinks.html', 'localtoc.html', 'searchbox.html',
'sourcelink.html'
],
'**': [
'localtoc.html', 'relations.html',
'searchbox.html', 'sourcelink.html'
]
}
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mistral', u'Mistral',
[u'OpenStack Foundation'], 1)
]
# If true, show URL addresses after external links.
man_show_urls = True
| 32.862595 | 79 | 0.700116 |
import os
import subprocess
import sys
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.autohttp.flask',
'sphinxcontrib.pecanwsme.rest',
]
if not on_rtd:
extensions.append('oslosphinx')
wsme_protocols = ['restjson']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Workflow Service API Reference'
copyright = u'2017, Mistral Contributors'
# |version| and |release|, also used in various other places throughout the
# built documents.
from mistral.version import version_info
release = version_info.release_string()
version = version_info.version_string()
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_static_path = ['_static']
if on_rtd:
html_theme_path = ['.']
html_theme = 'sphinx_rtd_theme'
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mistral.']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
html_last_updated_fmt = subprocess.check_output(
git_cmd).decode('utf-8')
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Mistral API Reference'
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': [
'sidebarlinks.html', 'localtoc.html', 'searchbox.html',
'sourcelink.html'
],
'**': [
'localtoc.html', 'relations.html',
'searchbox.html', 'sourcelink.html'
]
}
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mistral', u'Mistral',
[u'OpenStack Foundation'], 1)
]
# If true, show URL addresses after external links.
man_show_urls = True
| true | true |
1c33cae5baaef18f55e1952e5b130f74b97e1f8f | 510 | py | Python | home/migrations/0073_auto_20201203_2319.py | SCCapstone/C-2319 | 4ab2b5b5511209dc4d7f9c25b6a4f70843287b77 | [
"bzip2-1.0.6"
] | null | null | null | home/migrations/0073_auto_20201203_2319.py | SCCapstone/C-2319 | 4ab2b5b5511209dc4d7f9c25b6a4f70843287b77 | [
"bzip2-1.0.6"
] | null | null | null | home/migrations/0073_auto_20201203_2319.py | SCCapstone/C-2319 | 4ab2b5b5511209dc4d7f9c25b6a4f70843287b77 | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 3.0 on 2020-12-04 04:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0072_auto_20201203_2306'),
]
operations = [
migrations.AlterField(
model_name='item',
name='condition',
field=models.IntegerField(choices=[(3, 'Used - Poor Condidtion'), (2, 'Used - Good'), (0, 'Brand New'), (1, 'Used - Like New'), (4, 'Used - Not Usable')], default=4),
),
]
| 26.842105 | 178 | 0.582353 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0072_auto_20201203_2306'),
]
operations = [
migrations.AlterField(
model_name='item',
name='condition',
field=models.IntegerField(choices=[(3, 'Used - Poor Condidtion'), (2, 'Used - Good'), (0, 'Brand New'), (1, 'Used - Like New'), (4, 'Used - Not Usable')], default=4),
),
]
| true | true |
1c33cafc1861c18e47a486e41801a7c693148de0 | 5,399 | py | Python | arguments.py | zegerk/gym-micropolis | 554bf41e9c4001140cdba90c5bbb3cc6bacf4c65 | [
"MIT"
] | 3 | 2020-07-13T08:44:36.000Z | 2022-03-18T01:17:59.000Z | arguments.py | zegerk/gym-micropolis | 554bf41e9c4001140cdba90c5bbb3cc6bacf4c65 | [
"MIT"
] | null | null | null | arguments.py | zegerk/gym-micropolis | 554bf41e9c4001140cdba90c5bbb3cc6bacf4c65 | [
"MIT"
] | null | null | null | import argparse
import torch
def get_args():
parser = argparse.ArgumentParser(description='RL')
parser.add_argument('--algo', default='a2c',
help='algorithm to use: a2c | ppo | acktr')
parser.add_argument('--lr', type=float, default=7e-4,
help='learning rate (default: 7e-4)')
parser.add_argument('--eps', type=float, default=1e-5,
help='RMSprop optimizer epsilon (default: 1e-5)')
parser.add_argument('--alpha', type=float, default=0.99,
help='RMSprop optimizer apha (default: 0.99)')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument('--use-gae', action='store_true', default=False,
help='use generalized advantage estimation')
parser.add_argument('--tau', type=float, default=0.95,
help='gae parameter (default: 0.95)')
parser.add_argument('--entropy-coef', type=float, default=0.01,
help='entropy term coefficient (default: 0.01)')
parser.add_argument('--value-loss-coef', type=float, default=0.5,
help='value loss coefficient (default: 0.5)')
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='max norm of gradients (default: 0.5)')
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--num-processes', type=int, default=12,
help='how many training CPU processes to use (default: 12)')
parser.add_argument('--num-steps', type=int, default=5,
help='number of forward steps in A2C (default: 5)')
parser.add_argument('--ppo-epoch', type=int, default=4,
help='number of ppo epochs (default: 4)')
parser.add_argument('--num-mini-batch', type=int, default=32,
help='number of batches for ppo (default: 32)')
parser.add_argument('--clip-param', type=float, default=0.2,
help='ppo clip parameter (default: 0.2)')
parser.add_argument('--log-interval', type=int, default=10,
help='log interval, one log per n updates (default: 10)')
parser.add_argument('--save-interval', type=int, default=100,
help='save interval, one save per n updates (default: 100)')
parser.add_argument('--eval-interval', type=int, default=None,
help='eval interval, one eval per n updates (default: None)')
parser.add_argument('--vis-interval', type=int, default=100,
help='vis interval, one log per n updates (default: 100)')
parser.add_argument('--num-frames', type=int, default=10e6,
help='number of frames to train (default: 10e6)')
parser.add_argument('--env-name', default='MicropolisEnv-v0',
help='environment to train on (default: PongNoFrameskip-v4)')
parser.add_argument('--log-dir', default='trained_models/a2c/',
help='directory to save agent logs (default: /tmp/gym)')
parser.add_argument('--save-dir', default='./trained_models/',
help='directory to save agent logs (default: ./trained_models/)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--render', action='store_true', default=False,
help="render gui of single agent during training")
parser.add_argument('--print-map', action='store_true', default=False)
parser.add_argument('--add-timestep', action='store_true', default=False,
help='add timestep to observations')
parser.add_argument('--recurrent-policy', action='store_true', default=False,
help='use a recurrent policy')
parser.add_argument('--vis', action='store_true', default=True,
help='enable visdom visualization')
parser.add_argument('--port', type=int, default=8097,
help='port to run the server on (default: 8097)')
parser.add_argument('--map-width', type=int, default=20,
help="width of micropolis map")
parser.add_argument('--empty-start', action='store_true', default=False)
parser.add_argument('--model', default='fixed')
parser.add_argument('--curiosity', action='store_true', default=False)
parser.add_argument('--no-reward', action='store_true', default=False)
parser.add_argument('--env-type', default='yeet')
########################################### ICM
parser.add_argument(
'--eta',
type=float,
default=0.01,
metavar='LR',
help='scaling factor for intrinsic reward')
parser.add_argument(
'--beta',
type=float,
default=0.2,
metavar='LR',
help='balance between inverse & forward')
parser.add_argument(
'--lmbda',
type=float,
default=0.1,
metavar='LR',
help='lambda : balance between A2C & icm')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
return args
| 53.99 | 89 | 0.588072 | import argparse
import torch
def get_args():
parser = argparse.ArgumentParser(description='RL')
parser.add_argument('--algo', default='a2c',
help='algorithm to use: a2c | ppo | acktr')
parser.add_argument('--lr', type=float, default=7e-4,
help='learning rate (default: 7e-4)')
parser.add_argument('--eps', type=float, default=1e-5,
help='RMSprop optimizer epsilon (default: 1e-5)')
parser.add_argument('--alpha', type=float, default=0.99,
help='RMSprop optimizer apha (default: 0.99)')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument('--use-gae', action='store_true', default=False,
help='use generalized advantage estimation')
parser.add_argument('--tau', type=float, default=0.95,
help='gae parameter (default: 0.95)')
parser.add_argument('--entropy-coef', type=float, default=0.01,
help='entropy term coefficient (default: 0.01)')
parser.add_argument('--value-loss-coef', type=float, default=0.5,
help='value loss coefficient (default: 0.5)')
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='max norm of gradients (default: 0.5)')
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--num-processes', type=int, default=12,
help='how many training CPU processes to use (default: 12)')
parser.add_argument('--num-steps', type=int, default=5,
help='number of forward steps in A2C (default: 5)')
parser.add_argument('--ppo-epoch', type=int, default=4,
help='number of ppo epochs (default: 4)')
parser.add_argument('--num-mini-batch', type=int, default=32,
help='number of batches for ppo (default: 32)')
parser.add_argument('--clip-param', type=float, default=0.2,
help='ppo clip parameter (default: 0.2)')
parser.add_argument('--log-interval', type=int, default=10,
help='log interval, one log per n updates (default: 10)')
parser.add_argument('--save-interval', type=int, default=100,
help='save interval, one save per n updates (default: 100)')
parser.add_argument('--eval-interval', type=int, default=None,
help='eval interval, one eval per n updates (default: None)')
parser.add_argument('--vis-interval', type=int, default=100,
help='vis interval, one log per n updates (default: 100)')
parser.add_argument('--num-frames', type=int, default=10e6,
help='number of frames to train (default: 10e6)')
parser.add_argument('--env-name', default='MicropolisEnv-v0',
help='environment to train on (default: PongNoFrameskip-v4)')
parser.add_argument('--log-dir', default='trained_models/a2c/',
help='directory to save agent logs (default: /tmp/gym)')
parser.add_argument('--save-dir', default='./trained_models/',
help='directory to save agent logs (default: ./trained_models/)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--render', action='store_true', default=False,
help="render gui of single agent during training")
parser.add_argument('--print-map', action='store_true', default=False)
parser.add_argument('--add-timestep', action='store_true', default=False,
help='add timestep to observations')
parser.add_argument('--recurrent-policy', action='store_true', default=False,
help='use a recurrent policy')
parser.add_argument('--vis', action='store_true', default=True,
help='enable visdom visualization')
parser.add_argument('--port', type=int, default=8097,
help='port to run the server on (default: 8097)')
parser.add_argument('--map-width', type=int, default=20,
help="width of micropolis map")
parser.add_argument('--empty-start', action='store_true', default=False)
parser.add_argument('--model', default='fixed')
parser.add_argument('--curiosity', action='store_true', default=False)
parser.add_argument('--no-reward', action='store_true', default=False)
parser.add_argument('--env-type', default='yeet')
| true | true |
1c33ccb5f496d7886eed6af7173903ed1970063c | 4,641 | py | Python | test/functional/wallet_importprunedfunds.py | pexacoin/core | 0c6ad31264dde2cbe612d35202e7005a9dae0e1a | [
"MIT"
] | 11 | 2019-07-08T01:45:34.000Z | 2020-04-24T22:17:43.000Z | test/functional/wallet_importprunedfunds.py | pexacoin/core | 0c6ad31264dde2cbe612d35202e7005a9dae0e1a | [
"MIT"
] | 1 | 2019-10-19T14:52:31.000Z | 2019-10-19T14:52:31.000Z | test/functional/wallet_importprunedfunds.py | pexacoin/core | 0c6ad31264dde2cbe612d35202e7005a9dae0e1a | [
"MIT"
] | 4 | 2019-07-08T01:45:51.000Z | 2021-12-17T18:20:26.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Pexa Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importprunedfunds and removeprunedfunds RPCs."""
from test_framework.test_framework import PexaTestFramework
from test_framework.util import *
class ImportPrunedFundsTest(PexaTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(101)
self.sync_all()
# address
address1 = self.nodes[0].getnewaddress()
# pubkey
address2 = self.nodes[0].getnewaddress()
# privkey
address3 = self.nodes[0].getnewaddress()
address3_privkey = self.nodes[0].dumpprivkey(address3) # Using privkey
#Check only one address
address_info = self.nodes[0].validateaddress(address1)
assert_equal(address_info['ismine'], True)
self.sync_all()
#Node 1 sync test
assert_equal(self.nodes[1].getblockcount(),101)
#Address Test - before import
address_info = self.nodes[1].validateaddress(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address2)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
#Send funds to self
txnid1 = self.nodes[0].sendtoaddress(address1, 0.1)
self.nodes[0].generate(1)
rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex']
proof1 = self.nodes[0].gettxoutproof([txnid1])
txnid2 = self.nodes[0].sendtoaddress(address2, 0.05)
self.nodes[0].generate(1)
rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex']
proof2 = self.nodes[0].gettxoutproof([txnid2])
txnid3 = self.nodes[0].sendtoaddress(address3, 0.025)
self.nodes[0].generate(1)
rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex']
proof3 = self.nodes[0].gettxoutproof([txnid3])
self.sync_all()
#Import with no affiliated address
assert_raises_rpc_error(-5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1)
balance1 = self.nodes[1].getbalance("", 0, True)
assert_equal(balance1, Decimal(0))
#Import with affiliated address with no rescan
self.nodes[1].importaddress(address2, "add2", False)
self.nodes[1].importprunedfunds(rawtxn2, proof2)
balance2 = self.nodes[1].getbalance("add2", 0, True)
assert_equal(balance2, Decimal('0.05'))
#Import with private key with no rescan
self.nodes[1].importprivkey(privkey=address3_privkey, label="add3", rescan=False)
self.nodes[1].importprunedfunds(rawtxn3, proof3)
balance3 = self.nodes[1].getbalance("add3", 0, False)
assert_equal(balance3, Decimal('0.025'))
balance3 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance3, Decimal('0.075'))
#Addresses Test - after import
address_info = self.nodes[1].validateaddress(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address2)
assert_equal(address_info['iswatchonly'], True)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], True)
#Remove transactions
assert_raises_rpc_error(-8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txnid1)
balance1 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance1, Decimal('0.075'))
self.nodes[1].removeprunedfunds(txnid2)
balance2 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance2, Decimal('0.025'))
self.nodes[1].removeprunedfunds(txnid3)
balance3 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance3, Decimal('0.0'))
if __name__ == '__main__':
ImportPrunedFundsTest().main()
| 40.008621 | 117 | 0.660203 |
from test_framework.test_framework import PexaTestFramework
from test_framework.util import *
class ImportPrunedFundsTest(PexaTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(101)
self.sync_all()
address1 = self.nodes[0].getnewaddress()
address2 = self.nodes[0].getnewaddress()
address3 = self.nodes[0].getnewaddress()
address3_privkey = self.nodes[0].dumpprivkey(address3)
address_info = self.nodes[0].validateaddress(address1)
assert_equal(address_info['ismine'], True)
self.sync_all()
assert_equal(self.nodes[1].getblockcount(),101)
address_info = self.nodes[1].validateaddress(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address2)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
txnid1 = self.nodes[0].sendtoaddress(address1, 0.1)
self.nodes[0].generate(1)
rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex']
proof1 = self.nodes[0].gettxoutproof([txnid1])
txnid2 = self.nodes[0].sendtoaddress(address2, 0.05)
self.nodes[0].generate(1)
rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex']
proof2 = self.nodes[0].gettxoutproof([txnid2])
txnid3 = self.nodes[0].sendtoaddress(address3, 0.025)
self.nodes[0].generate(1)
rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex']
proof3 = self.nodes[0].gettxoutproof([txnid3])
self.sync_all()
assert_raises_rpc_error(-5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1)
balance1 = self.nodes[1].getbalance("", 0, True)
assert_equal(balance1, Decimal(0))
self.nodes[1].importaddress(address2, "add2", False)
self.nodes[1].importprunedfunds(rawtxn2, proof2)
balance2 = self.nodes[1].getbalance("add2", 0, True)
assert_equal(balance2, Decimal('0.05'))
self.nodes[1].importprivkey(privkey=address3_privkey, label="add3", rescan=False)
self.nodes[1].importprunedfunds(rawtxn3, proof3)
balance3 = self.nodes[1].getbalance("add3", 0, False)
assert_equal(balance3, Decimal('0.025'))
balance3 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance3, Decimal('0.075'))
address_info = self.nodes[1].validateaddress(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address2)
assert_equal(address_info['iswatchonly'], True)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], True)
assert_raises_rpc_error(-8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txnid1)
balance1 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance1, Decimal('0.075'))
self.nodes[1].removeprunedfunds(txnid2)
balance2 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance2, Decimal('0.025'))
self.nodes[1].removeprunedfunds(txnid3)
balance3 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance3, Decimal('0.0'))
if __name__ == '__main__':
ImportPrunedFundsTest().main()
| true | true |
1c33cd7567a86a3efce192828b9c73c1ad9e3605 | 1,008 | py | Python | src/kid/core/kglobals.py | KidKaboom/Kid-Maya-2022 | 0daec301a63438d681cc4c3a5df6d4efdc70daef | [
"MIT"
] | null | null | null | src/kid/core/kglobals.py | KidKaboom/Kid-Maya-2022 | 0daec301a63438d681cc4c3a5df6d4efdc70daef | [
"MIT"
] | null | null | null | src/kid/core/kglobals.py | KidKaboom/Kid-Maya-2022 | 0daec301a63438d681cc4c3a5df6d4efdc70daef | [
"MIT"
] | null | null | null | # :coding: utf-8
# Python Modules
import os
import sys
# Platforms
PLATFORM = sys.platform
WINDOWS = "win32"
OSX = "darwin"
LINUX = "linux"
# Paths
GLOBALS_PATH = os.path.abspath(__file__)
SCRIPTS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(GLOBALS_PATH)))
PROJECT_PATH = os.path.dirname(SCRIPTS_PATH)
PLUGINS_PATH = os.path.join(PROJECT_PATH, "plug-ins")
LIB_PATH = os.path.join(PROJECT_PATH, "lib")
LIB_WINDOWS64_PATH = os.path.join(LIB_PATH, "win64")
LIB_OSX_PATH = os.path.join(LIB_PATH, "osx")
LIB_LINUX_PATH = os.path.join(LIB_PATH, "linux")
BIN_PATH = os.path.join(PROJECT_PATH, "bin")
BIN_WINDOWS64_PATH = os.path.join(BIN_PATH, "win64")
BIN_OSX_PATH = os.path.join(BIN_PATH, "osx")
BIN_LINUX_PATH = os.path.join(BIN_PATH, "linux")
DOCS_PATH = os.path.join(PROJECT_PATH, "docs")
USER_PATH = os.path.expanduser('~')
DATA_PATH = os.path.join(SCRIPTS_PATH, "data")
# User
# Maya
MAYA_WINDOW_NAME = "MayaWindow"
if __name__ == "__main__":
print(GLOBALS_PATH)
print(DATA_PATH)
| 24.585366 | 78 | 0.738095 |
import os
import sys
PLATFORM = sys.platform
WINDOWS = "win32"
OSX = "darwin"
LINUX = "linux"
GLOBALS_PATH = os.path.abspath(__file__)
SCRIPTS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(GLOBALS_PATH)))
PROJECT_PATH = os.path.dirname(SCRIPTS_PATH)
PLUGINS_PATH = os.path.join(PROJECT_PATH, "plug-ins")
LIB_PATH = os.path.join(PROJECT_PATH, "lib")
LIB_WINDOWS64_PATH = os.path.join(LIB_PATH, "win64")
LIB_OSX_PATH = os.path.join(LIB_PATH, "osx")
LIB_LINUX_PATH = os.path.join(LIB_PATH, "linux")
BIN_PATH = os.path.join(PROJECT_PATH, "bin")
BIN_WINDOWS64_PATH = os.path.join(BIN_PATH, "win64")
BIN_OSX_PATH = os.path.join(BIN_PATH, "osx")
BIN_LINUX_PATH = os.path.join(BIN_PATH, "linux")
DOCS_PATH = os.path.join(PROJECT_PATH, "docs")
USER_PATH = os.path.expanduser('~')
DATA_PATH = os.path.join(SCRIPTS_PATH, "data")
MAYA_WINDOW_NAME = "MayaWindow"
if __name__ == "__main__":
print(GLOBALS_PATH)
print(DATA_PATH)
| true | true |
1c33ce100945493873a1dec3a0ea0ac4d0857ad4 | 919 | py | Python | TestingBisection.py | abecker99/Interpolation | 0527e6296c98b1c7f6cf512e614090f61754705d | [
"MIT"
] | null | null | null | TestingBisection.py | abecker99/Interpolation | 0527e6296c98b1c7f6cf512e614090f61754705d | [
"MIT"
] | null | null | null | TestingBisection.py | abecker99/Interpolation | 0527e6296c98b1c7f6cf512e614090f61754705d | [
"MIT"
] | null | null | null | import numpy as np
def find_sign_change(f, step, a, b):
x = a
pairs = []
while (x + step < b):
if (f(x + step)/f(x) < 0):
pairs.append([x, x+step])
x += step
return pairs
def bisect(f, pairs, tolerance):
zeros = []
for pair in pairs:
midpoint = (pair[1] - pair[0])/2 + pair[0]
while (abs(f(midpoint)) > tolerance):
if (f(midpoint)/f(pair[0]) < 0):
pair[1] = midpoint
else:
pair[0] = midpoint
midpoint = (pair[1] - pair[0])/2 + pair[0]
max_iter = 1000
zeros.append(midpoint)
return zeros
#zeros are z, need to computer energy with it
def sinc(x):
if (x == 0):
return 1
else:
return np.sin(x)/x
pairs = find_sign_change(sinc, 0.1, 0, 10)
print(pairs)
zeros = bisect(sinc, pairs, 1E-10)
print(zeros)
print(np.pi, 2.0*np.pi, 3.0*np.pi) | 24.837838 | 54 | 0.517954 | import numpy as np
def find_sign_change(f, step, a, b):
x = a
pairs = []
while (x + step < b):
if (f(x + step)/f(x) < 0):
pairs.append([x, x+step])
x += step
return pairs
def bisect(f, pairs, tolerance):
zeros = []
for pair in pairs:
midpoint = (pair[1] - pair[0])/2 + pair[0]
while (abs(f(midpoint)) > tolerance):
if (f(midpoint)/f(pair[0]) < 0):
pair[1] = midpoint
else:
pair[0] = midpoint
midpoint = (pair[1] - pair[0])/2 + pair[0]
max_iter = 1000
zeros.append(midpoint)
return zeros
def sinc(x):
if (x == 0):
return 1
else:
return np.sin(x)/x
pairs = find_sign_change(sinc, 0.1, 0, 10)
print(pairs)
zeros = bisect(sinc, pairs, 1E-10)
print(zeros)
print(np.pi, 2.0*np.pi, 3.0*np.pi) | true | true |
1c33ce82e6d42b041f4b9a88731db0d99ab1c3ab | 141 | py | Python | setup.py | hepteract/nova | cf0e866aa5c0f59a3528de9d71671c567219c2ee | [
"MIT"
] | null | null | null | setup.py | hepteract/nova | cf0e866aa5c0f59a3528de9d71671c567219c2ee | [
"MIT"
] | null | null | null | setup.py | hepteract/nova | cf0e866aa5c0f59a3528de9d71671c567219c2ee | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name = "nova",
version = "0.1",
packages = find_packages()
)
| 17.625 | 43 | 0.574468 | from setuptools import setup, find_packages
setup(
name = "nova",
version = "0.1",
packages = find_packages()
)
| true | true |
1c33cebd405dc8f841b48127497e7256268141ab | 1,196 | py | Python | tests/io/test_unique_path.py | safurrier/data_science_utils | 842b025ea3197e8a9946401257b2fa22ef1bf82d | [
"MIT"
] | null | null | null | tests/io/test_unique_path.py | safurrier/data_science_utils | 842b025ea3197e8a9946401257b2fa22ef1bf82d | [
"MIT"
] | null | null | null | tests/io/test_unique_path.py | safurrier/data_science_utils | 842b025ea3197e8a9946401257b2fa22ef1bf82d | [
"MIT"
] | 1 | 2020-03-30T20:59:04.000Z | 2020-03-30T20:59:04.000Z | # %%
import os
import shutil
import pytest
import pathlib
from data_science_toolbox.io.unique_path import unique_path
UNIQUE_FPATH_TEST_CASES = [
('test_file_{:02d}.txt',
'tests/io/unique_path_files',
['test_file_00.txt',
'test_file_01.txt',
],
'tests/io/unique_path_files/test_file_02.txt'
),
]
@pytest.mark.parametrize("fname_pattern, create_dir, create_fnames_list, assert_fname",
UNIQUE_FPATH_TEST_CASES
)
def test_get_absolute_fpath(fname_pattern, create_dir, create_fnames_list, assert_fname):
# Pathlib.Path directories
create_dir = pathlib.Path(create_dir)
# Create test dir
if not os.path.exists(create_dir.as_posix()):
os.mkdir(create_dir.as_posix())
# Create test filenames
for test_file in create_fnames_list:
if not os.path.exists(create_dir / test_file):
with open(create_dir / test_file, 'w'):
pass
# Pull unique fpath
unique_path_to_test = unique_path(create_dir, fname_pattern)
# Remove directory and files
shutil.rmtree(create_dir.as_posix())
assert unique_path_to_test.as_posix() == assert_fname
| 28.47619 | 89 | 0.683946 |
import os
import shutil
import pytest
import pathlib
from data_science_toolbox.io.unique_path import unique_path
UNIQUE_FPATH_TEST_CASES = [
('test_file_{:02d}.txt',
'tests/io/unique_path_files',
['test_file_00.txt',
'test_file_01.txt',
],
'tests/io/unique_path_files/test_file_02.txt'
),
]
@pytest.mark.parametrize("fname_pattern, create_dir, create_fnames_list, assert_fname",
UNIQUE_FPATH_TEST_CASES
)
def test_get_absolute_fpath(fname_pattern, create_dir, create_fnames_list, assert_fname):
create_dir = pathlib.Path(create_dir)
if not os.path.exists(create_dir.as_posix()):
os.mkdir(create_dir.as_posix())
for test_file in create_fnames_list:
if not os.path.exists(create_dir / test_file):
with open(create_dir / test_file, 'w'):
pass
unique_path_to_test = unique_path(create_dir, fname_pattern)
shutil.rmtree(create_dir.as_posix())
assert unique_path_to_test.as_posix() == assert_fname
| true | true |
1c33cf617a1e7101deeca5ccbf7535fbaef869c7 | 288 | py | Python | st_marys/items.py | nbanion/blah | cf14d33d6f6222f4ba8e7582f11150a887508fa2 | [
"MIT"
] | null | null | null | st_marys/items.py | nbanion/blah | cf14d33d6f6222f4ba8e7582f11150a887508fa2 | [
"MIT"
] | 8 | 2019-10-12T16:38:21.000Z | 2019-10-21T03:20:56.000Z | st_marys/items.py | nbanion/blah | cf14d33d6f6222f4ba8e7582f11150a887508fa2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class StMarysItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 19.2 | 53 | 0.6875 |
import scrapy
class StMarysItem(scrapy.Item):
pass
| true | true |
1c33d09dea733d064f0ffa7d8da13c4cbc5f6edc | 7,728 | py | Python | marvel_world/migrations/0001_initial.py | xiaoranppp/si664-final | f5545c04452fd674ddf1d078444e79ea58385e7e | [
"MIT"
] | null | null | null | marvel_world/migrations/0001_initial.py | xiaoranppp/si664-final | f5545c04452fd674ddf1d078444e79ea58385e7e | [
"MIT"
] | 1 | 2018-11-25T21:07:37.000Z | 2018-11-25T21:07:37.000Z | marvel_world/migrations/0001_initial.py | xiaoranppp/si664-final | f5545c04452fd674ddf1d078444e79ea58385e7e | [
"MIT"
] | 1 | 2018-12-21T12:06:03.000Z | 2018-12-21T12:06:03.000Z | # Generated by Django 2.1.4 on 2018-12-12 07:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Alignment',
fields=[
('alignment_id', models.AutoField(primary_key=True, serialize=False)),
('alignment_name', models.CharField(max_length=8, unique=True)),
],
options={
'verbose_name': 'marvel heros alignment',
'verbose_name_plural': 'marvel heros alignment',
'db_table': 'alignments',
'ordering': ['alignment_name'],
'managed': False,
},
),
migrations.CreateModel(
name='Character',
fields=[
('character_id', models.AutoField(primary_key=True, serialize=False)),
('character_name', models.CharField(max_length=20, unique=True)),
('height', models.IntegerField()),
('weight', models.IntegerField()),
('character_number', models.CharField(max_length=8)),
('intelligence', models.IntegerField(blank=True, null=True)),
('strength', models.IntegerField(blank=True, null=True)),
('speed', models.IntegerField(blank=True, null=True)),
('durability', models.IntegerField(blank=True, null=True)),
('power', models.IntegerField(blank=True, null=True)),
('combat', models.IntegerField(blank=True, null=True)),
('total', models.IntegerField(blank=True, null=True)),
],
options={
'verbose_name': 'character information',
'verbose_name_plural': 'character information',
'db_table': 'characters',
'ordering': ['character_name'],
'managed': False,
},
),
migrations.CreateModel(
name='CharacterComic',
fields=[
('character_comic_id', models.AutoField(primary_key=True, serialize=False)),
],
options={
'verbose_name': 'character comic relationship',
'verbose_name_plural': 'character comic relationship',
'db_table': 'character_comic',
'ordering': ['character', 'comic'],
'managed': False,
},
),
migrations.CreateModel(
name='CharacterPower',
fields=[
('character_power_id', models.AutoField(primary_key=True, serialize=False)),
],
options={
'verbose_name': 'character power relationship',
'verbose_name_plural': 'character power relationship',
'db_table': 'character_power',
'ordering': ['character', 'power'],
'managed': False,
},
),
migrations.CreateModel(
name='Comic',
fields=[
('comic_id', models.AutoField(primary_key=True, serialize=False)),
('comic_number', models.CharField(max_length=25, unique=True)),
('comic_name', models.CharField(max_length=25, unique=True)),
('issue_number', models.CharField(blank=True, max_length=5, null=True)),
('description', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'comic information',
'verbose_name_plural': 'comic information',
'db_table': 'comics',
'ordering': ['comic_name'],
'managed': False,
},
),
migrations.CreateModel(
name='EyeColor',
fields=[
('eye_color_id', models.AutoField(primary_key=True, serialize=False)),
('eye_color_name', models.CharField(max_length=25, unique=True)),
],
options={
'verbose_name': 'eye colors of marvel heros',
'verbose_name_plural': 'eye colors of marvel heros ',
'db_table': 'eye_colors',
'ordering': ['eye_color_name'],
'managed': False,
},
),
migrations.CreateModel(
name='Gender',
fields=[
('gender_id', models.AutoField(primary_key=True, serialize=False)),
('gender_name', models.CharField(max_length=8, unique=True)),
],
options={
'verbose_name': 'gender of marvel heros',
'verbose_name_plural': 'gender of marvel heros',
'db_table': 'genders',
'ordering': ['gender_name'],
'managed': False,
},
),
migrations.CreateModel(
name='HairColor',
fields=[
('hair_color_id', models.AutoField(primary_key=True, serialize=False)),
('hair_color_name', models.CharField(max_length=25, unique=True)),
],
options={
'verbose_name': 'hair colors of marvel heros',
'verbose_name_plural': 'hair colors of marvel heros ',
'db_table': 'hair_colors',
'ordering': ['hair_color_name'],
'managed': False,
},
),
migrations.CreateModel(
name='Power',
fields=[
('power_id', models.AutoField(primary_key=True, serialize=False)),
('power_name', models.CharField(max_length=25, unique=True)),
],
options={
'verbose_name': 'super power information',
'verbose_name_plural': 'super power information',
'db_table': 'powers',
'ordering': ['power_name'],
'managed': False,
},
),
migrations.CreateModel(
name='Publisher',
fields=[
('publisher_id', models.AutoField(primary_key=True, serialize=False)),
('publisher_name', models.CharField(max_length=25, unique=True)),
],
options={
'verbose_name': 'publishers of marvel heros',
'verbose_name_plural': 'publishers of marvel heros ',
'db_table': 'publisher',
'ordering': ['publisher_name'],
'managed': False,
},
),
migrations.CreateModel(
name='Race',
fields=[
('race_id', models.AutoField(primary_key=True, serialize=False)),
('race_name', models.CharField(max_length=25, unique=True)),
],
options={
'verbose_name': 'races of marvel heros',
'verbose_name_plural': 'races of marvel heros ',
'db_table': 'race',
'ordering': ['race_name'],
'managed': False,
},
),
migrations.CreateModel(
name='SkinColor',
fields=[
('skin_color_id', models.AutoField(primary_key=True, serialize=False)),
('skin_color_name', models.CharField(max_length=25, unique=True)),
],
options={
'verbose_name': 'skin colors of marvel heros',
'verbose_name_plural': 'skin colors of marvel heros ',
'db_table': 'skin_color',
'ordering': ['skin_color_name'],
'managed': False,
},
),
]
| 39.835052 | 92 | 0.501682 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Alignment',
fields=[
('alignment_id', models.AutoField(primary_key=True, serialize=False)),
('alignment_name', models.CharField(max_length=8, unique=True)),
],
options={
'verbose_name': 'marvel heros alignment',
'verbose_name_plural': 'marvel heros alignment',
'db_table': 'alignments',
'ordering': ['alignment_name'],
'managed': False,
},
),
migrations.CreateModel(
name='Character',
fields=[
('character_id', models.AutoField(primary_key=True, serialize=False)),
('character_name', models.CharField(max_length=20, unique=True)),
('height', models.IntegerField()),
('weight', models.IntegerField()),
('character_number', models.CharField(max_length=8)),
('intelligence', models.IntegerField(blank=True, null=True)),
('strength', models.IntegerField(blank=True, null=True)),
('speed', models.IntegerField(blank=True, null=True)),
('durability', models.IntegerField(blank=True, null=True)),
('power', models.IntegerField(blank=True, null=True)),
('combat', models.IntegerField(blank=True, null=True)),
('total', models.IntegerField(blank=True, null=True)),
],
options={
'verbose_name': 'character information',
'verbose_name_plural': 'character information',
'db_table': 'characters',
'ordering': ['character_name'],
'managed': False,
},
),
migrations.CreateModel(
name='CharacterComic',
fields=[
('character_comic_id', models.AutoField(primary_key=True, serialize=False)),
],
options={
'verbose_name': 'character comic relationship',
'verbose_name_plural': 'character comic relationship',
'db_table': 'character_comic',
'ordering': ['character', 'comic'],
'managed': False,
},
),
migrations.CreateModel(
name='CharacterPower',
fields=[
('character_power_id', models.AutoField(primary_key=True, serialize=False)),
],
options={
'verbose_name': 'character power relationship',
'verbose_name_plural': 'character power relationship',
'db_table': 'character_power',
'ordering': ['character', 'power'],
'managed': False,
},
),
migrations.CreateModel(
name='Comic',
fields=[
('comic_id', models.AutoField(primary_key=True, serialize=False)),
('comic_number', models.CharField(max_length=25, unique=True)),
('comic_name', models.CharField(max_length=25, unique=True)),
('issue_number', models.CharField(blank=True, max_length=5, null=True)),
('description', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'comic information',
'verbose_name_plural': 'comic information',
'db_table': 'comics',
'ordering': ['comic_name'],
'managed': False,
},
),
migrations.CreateModel(
name='EyeColor',
fields=[
('eye_color_id', models.AutoField(primary_key=True, serialize=False)),
('eye_color_name', models.CharField(max_length=25, unique=True)),
],
options={
'verbose_name': 'eye colors of marvel heros',
'verbose_name_plural': 'eye colors of marvel heros ',
'db_table': 'eye_colors',
'ordering': ['eye_color_name'],
'managed': False,
},
),
migrations.CreateModel(
name='Gender',
fields=[
('gender_id', models.AutoField(primary_key=True, serialize=False)),
('gender_name', models.CharField(max_length=8, unique=True)),
],
options={
'verbose_name': 'gender of marvel heros',
'verbose_name_plural': 'gender of marvel heros',
'db_table': 'genders',
'ordering': ['gender_name'],
'managed': False,
},
),
migrations.CreateModel(
name='HairColor',
fields=[
('hair_color_id', models.AutoField(primary_key=True, serialize=False)),
('hair_color_name', models.CharField(max_length=25, unique=True)),
],
options={
'verbose_name': 'hair colors of marvel heros',
'verbose_name_plural': 'hair colors of marvel heros ',
'db_table': 'hair_colors',
'ordering': ['hair_color_name'],
'managed': False,
},
),
migrations.CreateModel(
name='Power',
fields=[
('power_id', models.AutoField(primary_key=True, serialize=False)),
('power_name', models.CharField(max_length=25, unique=True)),
],
options={
'verbose_name': 'super power information',
'verbose_name_plural': 'super power information',
'db_table': 'powers',
'ordering': ['power_name'],
'managed': False,
},
),
migrations.CreateModel(
name='Publisher',
fields=[
('publisher_id', models.AutoField(primary_key=True, serialize=False)),
('publisher_name', models.CharField(max_length=25, unique=True)),
],
options={
'verbose_name': 'publishers of marvel heros',
'verbose_name_plural': 'publishers of marvel heros ',
'db_table': 'publisher',
'ordering': ['publisher_name'],
'managed': False,
},
),
migrations.CreateModel(
name='Race',
fields=[
('race_id', models.AutoField(primary_key=True, serialize=False)),
('race_name', models.CharField(max_length=25, unique=True)),
],
options={
'verbose_name': 'races of marvel heros',
'verbose_name_plural': 'races of marvel heros ',
'db_table': 'race',
'ordering': ['race_name'],
'managed': False,
},
),
migrations.CreateModel(
name='SkinColor',
fields=[
('skin_color_id', models.AutoField(primary_key=True, serialize=False)),
('skin_color_name', models.CharField(max_length=25, unique=True)),
],
options={
'verbose_name': 'skin colors of marvel heros',
'verbose_name_plural': 'skin colors of marvel heros ',
'db_table': 'skin_color',
'ordering': ['skin_color_name'],
'managed': False,
},
),
]
| true | true |
1c33d1014073d8dbc778a801936790dfc8937be3 | 3,629 | py | Python | google/ads/googleads/v7/services/services/location_view_service/transports/base.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | 285 | 2018-10-05T16:47:58.000Z | 2022-03-31T00:58:39.000Z | google/ads/googleads/v7/services/services/location_view_service/transports/base.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | 425 | 2018-09-10T13:32:41.000Z | 2022-03-31T14:50:05.000Z | google/ads/googleads/v7/services/services/location_view_service/transports/base.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | 369 | 2018-11-28T07:01:00.000Z | 2022-03-28T09:53:22.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v7.resources.types import location_view
from google.ads.googleads.v7.services.types import location_view_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class LocationViewServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for LocationViewService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_location_view: gapic_v1.method.wrap_method(
self.get_location_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_location_view(
self,
) -> typing.Callable[
[location_view_service.GetLocationViewRequest],
location_view.LocationView,
]:
raise NotImplementedError
__all__ = ("LocationViewServiceTransport",)
| 35.930693 | 78 | 0.675117 |
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials
from google.ads.googleads.v7.resources.types import location_view
from google.ads.googleads.v7.services.types import location_view_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class LocationViewServiceTransport(metaclass=abc.ABCMeta):
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
if ":" not in host:
host += ":443"
self._host = host
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
self._credentials = credentials
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
self._wrapped_methods = {
self.get_location_view: gapic_v1.method.wrap_method(
self.get_location_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_location_view(
self,
) -> typing.Callable[
[location_view_service.GetLocationViewRequest],
location_view.LocationView,
]:
raise NotImplementedError
__all__ = ("LocationViewServiceTransport",)
| true | true |
1c33d19db4bdd04b747c13b4d844b0ad5ee8ff8b | 66,357 | py | Python | python/paddle/tensor/manipulation.py | wangxinxin08/Paddle | b9ab838baa3d9cecddc4c2463a7e35038e70ba42 | [
"Apache-2.0"
] | 2 | 2021-05-16T08:33:38.000Z | 2022-03-14T05:14:14.000Z | python/paddle/tensor/manipulation.py | BMBH/Paddle | 1b0c5ef264b52a9d75f971216618ebbbbc7e5931 | [
"Apache-2.0"
] | null | null | null | python/paddle/tensor/manipulation.py | BMBH/Paddle | 1b0c5ef264b52a9d75f971216618ebbbbc7e5931 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ..fluid.layers import core
from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import Variable, OpProtoHolder, in_dygraph_mode, convert_np_dtype_to_dtype_, device_guard, dygraph_only
from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from ..fluid.layers.tensor import fill_constant
from ..fluid.layers import utils
import numpy as np
# TODO: define functions to manipulate a tensor
from ..fluid.layers import cast # noqa: F401
from ..fluid.layers import slice # noqa: F401
from ..fluid.layers import transpose # noqa: F401
from ..fluid.layers import unstack # noqa: F401
from ..fluid.layers import scatter_nd # noqa: F401
from ..fluid.layers import shard_index # noqa: F401
from ..fluid import layers
from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only
import paddle
__all__ = []
@dygraph_only
def tolist(x):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
This function translate the paddle.Tensor to python list.
Args:
x(Tensor): ``x`` is the Tensor we want to translate to list
Returns:
list: A list that contain the same value of current Tensor.
Returns type:
list: dtype is same as current Tensor
Examples:
.. code-block:: python
import paddle
t = paddle.to_tensor([0,1,2,3,4])
expectlist = t.tolist()
print(expectlist) #[0, 1, 2, 3, 4]
expectlist = paddle.tolist(t)
print(expectlist) #[0, 1, 2, 3, 4]
"""
return x.numpy().tolist()
setattr(core.VarBase, 'tolist', tolist)
def concat(x, axis=0, name=None):
"""
This OP concatenates the input along the axis.
Args:
x(list|tuple): ``x`` is a Tensor list or Tensor tuple which is with data type bool, float16,
float32, float64, int32, int64, uint8. All the Tensors in ``x`` must have same data type.
axis(int|Tensor, optional): Specify the axis to operate on the input Tensors.
It's a scalar with data type int or a Tensor with shape [1] and data type int32
or int64. The effective range is [-R, R), where R is Rank(x). When ``axis < 0``,
it works the same way as ``axis+R``. Default is 0.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor with the same data type as ``x``.
Examples:
.. code-block:: python
import paddle
x1 = paddle.to_tensor([[1, 2, 3],
[4, 5, 6]])
x2 = paddle.to_tensor([[11, 12, 13],
[14, 15, 16]])
x3 = paddle.to_tensor([[21, 22],
[23, 24]])
zero = paddle.full(shape=[1], dtype='int32', fill_value=0)
# When the axis is negative, the real axis is (axis + Rank(x))
# As follow, axis is -1, Rank(x) is 2, the real axis is 1
out1 = paddle.concat(x=[x1, x2, x3], axis=-1)
out2 = paddle.concat(x=[x1, x2], axis=0)
out3 = paddle.concat(x=[x1, x2], axis=zero)
# out1
# [[ 1 2 3 11 12 13 21 22]
# [ 4 5 6 14 15 16 23 24]]
# out2 out3
# [[ 1 2 3]
# [ 4 5 6]
# [11 12 13]
# [14 15 16]]
"""
return paddle.fluid.layers.concat(input=x, axis=axis, name=name)
def flip(x, axis, name=None):
"""
Reverse the order of a n-D tensor along given axis in axis.
Args:
x (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor x
should be float32, float64, int32, int64, bool.
axis (list|tuple): The axis(axes) to flip on. Negative indices for indexing from the end are accepted.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: Tensor or LoDTensor calculated by flip layer. The data type is same with input x.
Examples:
.. code-block:: python
import paddle
import numpy as np
image_shape=(3, 2, 2)
x = np.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape)
x = x.astype('float32')
img = paddle.to_tensor(x)
out = paddle.flip(img, [0,1])
print(out) # [[[10,11][8, 9]],[[6, 7],[4, 5]] [[2, 3],[0, 1]]]
"""
helper = LayerHelper("flip", **locals())
check_type(x, 'X', (Variable), 'flip')
dtype = helper.input_dtype('x')
check_dtype(dtype, 'X',
['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
'flip')
check_type(axis, 'axis', (list, tuple), 'flip')
if name is None:
out = helper.create_variable_for_type_inference(dtype)
else:
out = helper.create_variable(name=name, dtype=dtype, persistable=False)
helper.append_op(
type="flip",
inputs={"X": x},
outputs={"Out": out},
attrs={"axis": axis})
return out
def flatten(x, start_axis=0, stop_axis=-1, name=None):
r"""
**Flatten op**
Flattens a contiguous range of axes in a tensor according to start_axis and stop_axis.
Note that the output Tensor will share data with origin Tensor and doesn't have a
Tensor copy in ``dygraph`` mode. If you want to use the Tensor copy version, please
use `Tensor.clone` like ``flatten_clone_x = x.flatten().clone()``.
For Example:
.. code-block:: text
Case 1:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 1
end_axis = 2
We get:
Out.shape = (3, 1000 * 100, 2)
Case 2:
Given
X.shape = (3, 100, 100, 4)
and
start_axis = 0
stop_axis = -1
We get:
Out.shape = (3 * 100 * 100 * 4)
Args:
x (Tensor): A tensor of number of dimentions >= axis. A tensor with data type float32,
float64, int8, int32, int64, uint8.
start_axis (int): the start axis to flatten
stop_axis (int): the stop axis to flatten
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Tensor: A tensor with the contents of the input tensor, with input \
axes flattened by indicated start axis and end axis. \
A Tensor with data type same as input x.
Raises:
ValueError: If x is not a Tensor.
ValueError: If start_axis or stop_axis is illegal.
Examples:
.. code-block:: python
import paddle
image_shape=(2, 3, 4, 4)
x = paddle.arange(end=image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3])
img = paddle.reshape(x, image_shape)
out = paddle.flatten(img, start_axis=1, stop_axis=2)
# out shape is [2, 12, 4]
# out shares data with img in dygraph mode
img[0, 0, 0, 0] = -1
print(out[0, 0, 0]) # [-1]
"""
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Tensor")
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'],
'flatten')
helper = LayerHelper('flatten', **locals())
x_dim = len(x.shape)
if not (isinstance(start_axis, int)) or (
start_axis > x_dim - 1) or start_axis < -x_dim:
raise ValueError(
"The start_axis should be a int, and in range [-rank(x), rank(x))")
if not (isinstance(stop_axis, int)) or (
stop_axis > x_dim - 1) or stop_axis < -x_dim:
raise ValueError(
"The stop_axis should be a int, and in range [-rank(x), rank(x))")
if start_axis < 0:
start_axis = start_axis + x_dim
if stop_axis < 0:
stop_axis = stop_axis + x_dim
if start_axis > stop_axis:
raise ValueError("The stop_axis should be larger than stat_axis")
if in_dygraph_mode():
dy_out, _ = core.ops.flatten_contiguous_range(
x, 'start_axis', start_axis, 'stop_axis', stop_axis)
return dy_out
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='flatten_contiguous_range',
inputs={"X": x},
outputs={'Out': out,
'XShape': x_shape},
attrs={"start_axis": start_axis,
"stop_axis": stop_axis})
return out
@inplace_apis_in_dygraph_only
def flatten_(x, start_axis=0, stop_axis=-1, name=None):
"""
Inplace version of ``flatten`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_flatten`.
"""
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Tensor")
x_dim = len(x.shape)
if not (isinstance(start_axis, int)) or (
start_axis > x_dim - 1) or start_axis < -x_dim:
raise ValueError(
"The start_axis should be a int, and in range [-rank(x), rank(x))")
if not (isinstance(stop_axis, int)) or (
stop_axis > x_dim - 1) or stop_axis < -x_dim:
raise ValueError(
"The stop_axis should be a int, and in range [-rank(x), rank(x))")
if start_axis < 0:
start_axis = start_axis + x_dim
if stop_axis < 0:
stop_axis = stop_axis + x_dim
if start_axis > stop_axis:
raise ValueError("The stop_axis should be larger than stat_axis")
dy_out, _ = core.ops.flatten_contiguous_range_(x, 'start_axis', start_axis,
'stop_axis', stop_axis)
return dy_out
def roll(x, shifts, axis=None, name=None):
"""
Roll the `x` tensor along the given axis(axes). With specific 'shifts', Elements that
roll beyond the last position are re-introduced at the first according to 'shifts'.
If a axis is not specified,
the tensor will be flattened before rolling and then restored to the original shape.
Args:
x (Tensor): The x tensor as input.
shifts (int|list|tuple): The number of places by which the elements
of the `x` tensor are shifted.
axis (int|list|tuple|None): axis(axes) along which to roll.
Returns:
Tensor: A Tensor with same data type as `x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0]])
out_z1 = paddle.roll(x, shifts=1)
print(out_z1)
#[[9. 1. 2.]
# [3. 4. 5.]
# [6. 7. 8.]]
out_z2 = paddle.roll(x, shifts=1, axis=0)
print(out_z2)
#[[7. 8. 9.]
# [1. 2. 3.]
# [4. 5. 6.]]
"""
helper = LayerHelper("roll", **locals())
origin_shape = x.shape
if type(shifts) == int:
shifts = [shifts]
if type(axis) == int:
axis = [axis]
len_origin_shape = len(origin_shape)
if axis:
for i in range(len(axis)):
if axis[i] >= len_origin_shape or axis[i] < -len_origin_shape:
raise ValueError(
"axis is out of range, it should be in range [{}, {}), but received {}".
format(-len_origin_shape, len_origin_shape, axis))
if axis:
check_type(axis, 'axis', (list, tuple), 'roll')
check_type(shifts, 'shifts', (list, tuple), 'roll')
if in_dygraph_mode():
if axis is None:
x = core.ops.reshape(x, 'shape', [-1, 1])
axis = [0]
out = core.ops.roll(x, 'axis', axis, 'shifts', shifts)
return core.ops.reshape(out, 'shape', origin_shape)
out = helper.create_variable_for_type_inference(x.dtype)
if axis is None:
x = reshape(x, shape=[-1, 1])
axis = [0]
helper.append_op(
type='roll',
inputs={'X': x},
outputs={'Out': out},
attrs={'axis': axis,
'shifts': shifts})
out = layers.reshape(out, shape=origin_shape)
return out
def stack(x, axis=0, name=None):
"""
This OP stacks all the input tensors ``x`` along ``axis`` dimemsion.
All tensors must be of the same shape and same dtype.
For example, given N tensors of shape [A, B], if ``axis == 0``, the shape of stacked
tensor is [N, A, B]; if ``axis == 1``, the shape of stacked
tensor is [A, N, B], etc.
.. code-block:: text
Case 1:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 0
Output:
Out.dims = [3, 1, 2]
Out.data =[ [ [1.0, 2.0] ],
[ [3.0, 4.0] ],
[ [5.0, 6.0] ] ]
Case 2:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 1 or axis = -2 # If axis = -2, axis = axis+ndim(x[0])+1 = -2+2+1 = 1.
Output:
Out.shape = [1, 3, 2]
Out.data =[ [ [1.0, 2.0]
[3.0, 4.0]
[5.0, 6.0] ] ]
Args:
x (list[Tensor]|tuple[Tensor]): Input ``x`` can be a ``list`` or ``tuple`` of tensors, the Tensors in ``x``
must be of the same shape and dtype. Supported data types: float32, float64, int32, int64.
axis (int, optional): The axis along which all inputs are stacked. ``axis`` range is ``[-(R+1), R+1)``,
where ``R`` is the number of dimensions of the first input tensor ``x[0]``.
If ``axis < 0``, ``axis = axis+R+1``. The default value of axis is 0.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Tensor: The stacked tensor with same data type as input.
Example:
.. code-block:: python
import paddle
x1 = paddle.to_tensor([[1.0, 2.0]])
x2 = paddle.to_tensor([[3.0, 4.0]])
x3 = paddle.to_tensor([[5.0, 6.0]])
out = paddle.stack([x1, x2, x3], axis=0)
print(out.shape) # [3, 1, 2]
print(out)
# [[[1., 2.]],
# [[3., 4.]],
# [[5., 6.]]]
"""
return layers.stack(x, axis, name)
def split(x, num_or_sections, axis=0, name=None):
"""
Split the input tensor into multiple sub-Tensors.
Args:
x (Tensor): A N-D Tensor. The data type is bool, float16, float32, float64, int32 or int64.
num_or_sections (int|list|tuple): If ``num_or_sections`` is an int, then ``num_or_sections``
indicates the number of equal sized sub-Tensors that the ``x`` will be divided into.
If ``num_or_sections`` is a list or tuple, the length of it indicates the number of
sub-Tensors and the elements in it indicate the sizes of sub-Tensors' dimension orderly.
The length of the list must not be larger than the ``x`` 's size of specified ``axis``.
axis (int|Tensor, optional): The axis along which to split, it can be a scalar with type
``int`` or a ``Tensor`` with shape [1] and data type ``int32`` or ``int64``.
If :math::`axis < 0`, the axis to split along is :math:`rank(x) + axis`. Default is 0.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
list(Tensor): The list of segmented Tensors.
Example:
.. code-block:: python
import paddle
# x is a Tensor of shape [3, 9, 5]
x = paddle.rand([3, 9, 5])
out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=1)
print(out0.shape) # [3, 3, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 3, 5]
out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, 4], axis=1)
print(out0.shape) # [3, 2, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 4, 5]
out0, out1, out2 = paddle.split(x, num_or_sections=[2, 3, -1], axis=1)
print(out0.shape) # [3, 2, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 4, 5]
# axis is negative, the real axis is (rank(x) + axis)=1
out0, out1, out2 = paddle.split(x, num_or_sections=3, axis=-2)
print(out0.shape) # [3, 3, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 3, 5]
"""
return paddle.fluid.layers.split(
input=x, num_or_sections=num_or_sections, dim=axis, name=name)
def squeeze(x, axis=None, name=None):
"""
This OP will squeeze the dimension(s) of size 1 of input tensor x's shape.
Note that the output Tensor will share data with origin Tensor and doesn't have a
Tensor copy in ``dygraph`` mode. If you want to use the Tensor copy version,
please use `Tensor.clone` like ``squeeze_clone_x = x.squeeze().clone()``.
If axis is provided, it will remove the dimension(s) by given axis that of size 1.
If the dimension of given axis is not of size 1, the dimension remain unchanged.
If axis is not provided, all dims equal of size 1 will be removed.
.. code-block:: text
Case1:
Input:
x.shape = [1, 3, 1, 5] # If axis is not provided, all dims equal of size 1 will be removed.
axis = None
Output:
out.shape = [3, 5]
Case2:
Input:
x.shape = [1, 3, 1, 5] # If axis is provided, it will remove the dimension(s) by given axis that of size 1.
axis = 0
Output:
out.shape = [3, 1, 5]
Case4:
Input:
x.shape = [1, 3, 1, 5] # If the dimension of one given axis (3) is not of size 1, the dimension remain unchanged.
axis = [0, 2, 3]
Output:
out.shape = [3, 5]
Case4:
Input:
x.shape = [1, 3, 1, 5] # If axis is negative, axis = axis + ndim (number of dimensions in x).
axis = [-2]
Output:
out.shape = [1, 3, 5]
Args:
x (Tensor): The input Tensor. Supported data type: float32, float64, bool, int8, int32, int64.
axis (int|list|tuple, optional): An integer or list/tuple of integers, indicating the dimensions to be squeezed. Default is None.
The range of axis is :math:`[-ndim(x), ndim(x))`.
If axis is negative, :math:`axis = axis + ndim(x)`.
If axis is None, all the dimensions of x of size 1 will be removed.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Tensor: Squeezed Tensor with the same data type as input Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.rand([5, 1, 10])
output = paddle.squeeze(x, axis=1)
print(x.shape) # [5, 1, 10]
print(output.shape) # [5, 10]
# output shares data with x in dygraph mode
x[0, 0, 0] = 10.
print(output[0, 0]) # [10.]
"""
if axis is None:
axis = []
elif isinstance(axis, int):
axis = [axis]
elif isinstance(axis, tuple):
axis = list(axis)
return layers.squeeze(x, axis, name)
@inplace_apis_in_dygraph_only
def squeeze_(x, axis=None, name=None):
"""
Inplace version of ``squeeze`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_squeeze`.
"""
if axis is None:
axis = []
elif isinstance(axis, int):
axis = [axis]
elif isinstance(axis, tuple):
axis = list(axis)
out, _ = core.ops.squeeze2_(x, 'axes', axis)
return out
def unique(x,
return_index=False,
return_inverse=False,
return_counts=False,
axis=None,
dtype="int64",
name=None):
r"""
Returns the unique elements of `x` in ascending order.
Args:
x(Tensor): The input tensor, it's data type should be float32, float64, int32, int64.
return_index(bool, optional): If True, also return the indices of the input tensor that
result in the unique Tensor.
return_inverse(bool, optional): If True, also return the indices for where elements in
the original input ended up in the returned unique tensor.
return_counts(bool, optional): If True, also return the counts for each unique element.
axis(int, optional): The axis to apply unique. If None, the input will be flattened.
Default: None.
dtype(np.dtype|str, optional): The date type of `indices` or `inverse` tensor: int32 or int64.
Default: int64.
name(str, optional): Name for the operation. For more information, please refer to
:ref:`api_guide_Name`. Default: None.
Returns:
tuple: (out, indices, inverse, counts). `out` is the unique tensor for `x`. `indices` is \
provided only if `return_index` is True. `inverse` is provided only if `return_inverse` \
is True. `counts` is provided only if `return_counts` is True.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([2, 3, 3, 1, 5, 3])
unique = paddle.unique(x)
np_unique = unique.numpy() # [1 2 3 5]
_, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True)
np_indices = indices.numpy() # [3 0 1 4]
np_inverse = inverse.numpy() # [1 2 2 0 3 2]
np_counts = counts.numpy() # [1 1 3 1]
x = paddle.to_tensor([[2, 1, 3], [3, 0, 1], [2, 1, 3]])
unique = paddle.unique(x)
np_unique = unique.numpy() # [0 1 2 3]
unique = paddle.unique(x, axis=0)
np_unique = unique.numpy()
# [[2 1 3]
# [3 0 1]]
"""
if axis is None:
axis = []
else:
axis = [axis]
attr_dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
out, inverse, indices, counts = core.ops.unique(
x, 'dtype', attr_dtype, 'return_index', return_index,
'return_inverse', return_inverse, 'return_counts', return_counts,
'axis', axis, "is_sorted", True)
outs = [out]
if return_index:
outs.append(indices)
if return_inverse:
outs.append(inverse)
if return_counts:
outs.append(counts)
if len(outs) == 1:
return outs[0]
return tuple(outs)
check_variable_and_dtype(x, "input",
['float32', 'float64', 'int32', 'int64'], 'unique')
check_type(return_index, 'return_index', bool, 'unique')
check_type(return_inverse, 'return_inverse', bool, 'unique')
check_type(return_counts, 'return_counts', bool, 'unique')
check_dtype(dtype, 'dtype', ['int32', 'int64'], 'unique')
if len(axis) != 0:
check_type(axis[0], 'axis', int, 'unique')
helper = LayerHelper('unique', **locals())
attrs = {
'dtype': attr_dtype,
"return_index": return_index,
"return_inverse": return_inverse,
"return_counts": return_counts,
"axis": axis,
"is_sorted": True
}
out = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True)
indices = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True)
inverse = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True)
counts = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True)
outputs = {
"Out": out,
"Indices": indices,
"Index": inverse,
"Counts": counts
}
outs = [out]
if return_index:
outs.append(indices)
if return_inverse:
outs.append(inverse)
if return_counts:
outs.append(counts)
helper.append_op(
type="unique", inputs={"X": x}, attrs=attrs, outputs=outputs)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def unsqueeze(x, axis, name=None):
"""
Insert single-dimensional entries to the shape of input Tensor ``x``. Takes one
required argument axis, a dimension or list of dimensions that will be inserted.
Dimension indices in axis are as seen in the output tensor.
Note that the output Tensor will share data with origin Tensor and doesn't have a
Tensor copy in ``dygraph`` mode. If you want to use the Tensor copy version,
please use `Tensor.clone` like ``unsqueeze_clone_x = x.unsqueeze(-1).clone()``.
Args:
x (Tensor): The input Tensor to be unsqueezed. Supported data type: float32, float64, bool, int8, int32, int64.
axis (int|list|tuple|Tensor): Indicates the dimensions to be inserted. The data type is ``int32`` .
If ``axis`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
If ``axis`` is a Tensor, it should be an 1-D Tensor .
If ``axis`` is negative, ``axis = axis + ndim(x) + 1``.
name (str|None): Name for this layer. Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Tensor: Unsqueezed Tensor with the same data type as input Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.rand([5, 10])
print(x.shape) # [5, 10]
out1 = paddle.unsqueeze(x, axis=0)
print(out1.shape) # [1, 5, 10]
out2 = paddle.unsqueeze(x, axis=[0, 2])
print(out2.shape) # [1, 5, 1, 10]
axis = paddle.to_tensor([0, 1, 2])
out3 = paddle.unsqueeze(x, axis=axis)
print(out3.shape) # [1, 1, 1, 5, 10]
# out1, out2, out3 share data with x in dygraph mode
x[0, 0] = 10.
print(out1[0, 0, 0]) # [10.]
print(out2[0, 0, 0, 0]) # [10.]
print(out3[0, 0, 0, 0, 0]) # [10.]
"""
return layers.unsqueeze(x, axis, name)
@inplace_apis_in_dygraph_only
def unsqueeze_(x, axis, name=None):
"""
Inplace version of ``unsqueeze`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_unsqueeze`.
"""
if isinstance(axis, int):
axis = [axis]
elif isinstance(axis, Variable):
axis = axis.numpy().tolist()
elif isinstance(axis, (list, tuple)):
axis = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in axis
]
out, _ = core.ops.unsqueeze2_(x, 'axes', axis)
return out
def gather(x, index, axis=None, name=None):
"""
Output is obtained by gathering entries of ``axis``
of ``x`` indexed by ``index`` and concatenate them together.
.. code-block:: text
Given:
x = [[1, 2],
[3, 4],
[5, 6]]
index = [1, 2]
axis=[0]
Then:
out = [[3, 4],
[5, 6]]
Args:
x (Tensor): The source input tensor with rank>=1. Supported data type is
int32, int64, float32, float64 and uint8 (only for CPU),
float16 (only for GPU).
index (Tensor): The index input tensor with rank=1. Data type is int32 or int64.
axis (Tensor|int, optional): The axis of input to be gathered, it's can be int or a Tensor with data type is int32 or int64. The default value is None, if None, the ``axis`` is 0.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
output (Tensor): The output is a tensor with the same rank as ``x``.
Examples:
.. code-block:: python
import paddle
input = paddle.to_tensor([[1,2],[3,4],[5,6]])
index = paddle.to_tensor([0,1])
output = paddle.gather(input, index, axis=0)
# expected output: [[1,2],[3,4]]
"""
if axis is None:
axis = 0
if in_dygraph_mode():
axis = axis.item() if isinstance(axis, paddle.Tensor) else axis
return core.ops.gather(x, index, None, "axis", axis, "overwrite", False)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'],
'gather')
check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather')
if isinstance(axis, Variable):
check_variable_and_dtype(axis, 'axis', ['int32', 'int64'], 'gather')
helper = LayerHelper('gather', **locals())
dtype = helper.input_dtype('x')
out = helper.create_variable_for_type_inference(dtype)
if not isinstance(axis, Variable):
helper.append_op(
type="gather",
inputs={"X": x,
"Index": index},
attrs={'axis': axis,
'overwrite': False},
outputs={"Out": out})
else:
helper.append_op(
type="gather",
inputs={"X": x,
"Index": index,
"Axis": axis},
attrs={"overwrite": False},
outputs={"Out": out})
return out
def unbind(input, axis=0):
"""
Removes a tensor dimension, then split the input tensor into multiple sub-Tensors.
Args:
input (Tensor): The input variable which is an N-D Tensor, data type being float32, float64, int32 or int64.
axis (int32|int64, optional): A scalar with type ``int32|int64`` shape [1]. The dimension along which to unbind.
If :math:`axis < 0`, the dimension to unbind along is :math:`rank(input) + axis`. Default is 0.
Returns:
list(Tensor): The list of segmented Tensor variables.
Example:
.. code-block:: python
import paddle
import numpy as np
# input is a variable which shape is [3, 4, 5]
np_input = np.random.rand(3, 4, 5).astype('float32')
input = paddle.to_tensor(np_input)
[x0, x1, x2] = paddle.unbind(input, axis=0)
# x0.shape [4, 5]
# x1.shape [4, 5]
# x2.shape [4, 5]
[x0, x1, x2, x3] = paddle.unbind(input, axis=1)
# x0.shape [3, 5]
# x1.shape [3, 5]
# x2.shape [3, 5]
# x3.shape [3, 5]
"""
helper = LayerHelper("unbind", **locals())
check_type(input, 'input', (Variable), 'unbind')
dtype = helper.input_dtype()
check_dtype(dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'],
'unbind')
if not isinstance(axis, (int)):
raise TypeError("The type of 'axis' must be int, but received %s." %
(type(axis)))
if isinstance(axis, np.generic):
axis = np.asscalar(axis)
input_shape = input.shape
axis_ = axis if axis >= 0 else len(input_shape) + axis
num = input_shape[axis_]
outs = [
helper.create_variable_for_type_inference(dtype=helper.input_dtype())
for i in range(num)
]
if in_dygraph_mode():
return core.ops.unbind(input, num, 'axis', axis)
helper.append_op(
type="unbind",
inputs={"X": input},
outputs={"Out": outs},
attrs={"axis": axis})
return outs
def scatter(x, index, updates, overwrite=True, name=None):
"""
**Scatter Layer**
Output is obtained by updating the input on selected indices based on updates.
.. code-block:: python
import numpy as np
#input:
x = np.array([[1, 1], [2, 2], [3, 3]])
index = np.array([2, 1, 0, 1])
# shape of updates should be the same as x
# shape of updates with dim > 1 should be the same as input
updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]])
overwrite = False
# calculation:
if not overwrite:
for i in range(len(index)):
x[index[i]] = np.zeros((2))
for i in range(len(index)):
if (overwrite):
x[index[i]] = updates[i]
else:
x[index[i]] += updates[i]
# output:
out = np.array([[3, 3], [6, 6], [1, 1]])
out.shape # [3, 2]
**NOTICE**: The order in which updates are applied is nondeterministic,
so the output will be nondeterministic if index contains duplicates.
Args:
x (Tensor): The input N-D Tensor with ndim>=1. Data type can be float32, float64.
index (Tensor): The index 1-D Tensor. Data type can be int32, int64. The length of index cannot exceed updates's length, and the value in index cannot exceed input's length.
updates (Tensor): update input with updates parameter based on index. shape should be the same as input, and dim value with dim > 1 should be the same as input.
overwrite (bool): The mode that updating the output when there are same indices.
If True, use the overwrite mode to update the output of the same index,
if False, use the accumulate mode to update the output of the same index.Default value is True.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: The output is a Tensor with the same shape as x.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1, 1], [2, 2], [3, 3]], dtype='float32')
index = paddle.to_tensor([2, 1, 0, 1], dtype='int64')
updates = paddle.to_tensor([[1, 1], [2, 2], [3, 3], [4, 4]], dtype='float32')
output1 = paddle.scatter(x, index, updates, overwrite=False)
# [[3., 3.],
# [6., 6.],
# [1., 1.]]
output2 = paddle.scatter(x, index, updates, overwrite=True)
# CPU device:
# [[3., 3.],
# [4., 4.],
# [1., 1.]]
# GPU device maybe have two results because of the repeated numbers in index
# result 1:
# [[3., 3.],
# [4., 4.],
# [1., 1.]]
# result 2:
# [[3., 3.],
# [2., 2.],
# [1., 1.]]
"""
if in_dygraph_mode():
return core.ops.scatter(x, index, updates, 'overwrite', overwrite)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'scatter')
check_type(overwrite, 'overwrite', bool, 'scatter')
helper = LayerHelper('scatter', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type="scatter",
inputs={"X": x,
"Ids": index,
"Updates": updates},
attrs={'overwrite': overwrite},
outputs={"Out": out})
return out
@inplace_apis_in_dygraph_only
def scatter_(x, index, updates, overwrite=True, name=None):
"""
Inplace version of ``scatter`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_scatter`.
"""
return core.ops.scatter_(x, index, updates, 'overwrite', overwrite)
def scatter_nd_add(x, index, updates, name=None):
r"""
**Scatter_nd_add Layer**
Output is obtained by applying sparse addition to a single value
or slice in a Tensor.
:attr:`x` is a Tensor with ndim :math:`R`
and :attr:`index` is a Tensor with ndim :math:`K` . Thus, :attr:`index`
has shape :math:`[i_0, i_1, ..., i_{K-2}, Q]` where :math:`Q \leq R` . :attr:`updates`
is a Tensor with ndim :math:`K - 1 + R - Q` and its
shape is :math:`index.shape[:-1] + x.shape[index.shape[-1]:]` .
According to the :math:`[i_0, i_1, ..., i_{K-2}]` of :attr:`index` ,
add the corresponding :attr:`updates` slice to the :attr:`x` slice
which is obtained by the last one dimension of :attr:`index` .
.. code-block:: text
Given:
* Case 1:
x = [0, 1, 2, 3, 4, 5]
index = [[1], [2], [3], [1]]
updates = [9, 10, 11, 12]
we get:
output = [0, 22, 12, 14, 4, 5]
* Case 2:
x = [[65, 17], [-14, -25]]
index = [[], []]
updates = [[[-1, -2], [1, 2]],
[[3, 4], [-3, -4]]]
x.shape = (2, 2)
index.shape = (2, 0)
updates.shape = (2, 2, 2)
we get:
output = [[67, 19], [-16, -27]]
Args:
x (Tensor): The x input. Its dtype should be float32, float64.
index (Tensor): The index input with ndim > 1 and index.shape[-1] <= x.ndim.
Its dtype should be int32 or int64 as it is used as indexes.
updates (Tensor): The updated value of scatter_nd_add op, and it must have the same dtype
as x. It must have the shape index.shape[:-1] + x.shape[index.shape[-1]:].
name (str|None): The output tensor name. If set None, the layer will be named automatically.
Returns:
output (Tensor): The output is a tensor with the same shape and dtype as x.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.rand(shape=[3, 5, 9, 10], dtype='float32')
updates = paddle.rand(shape=[3, 9, 10], dtype='float32')
index_data = np.array([[1, 1],
[0, 1],
[1, 3]]).astype(np.int64)
index = paddle.to_tensor(index_data)
output = paddle.scatter_nd_add(x, index, updates)
"""
return layers.scatter_nd_add(x, index, updates, name=None)
def chunk(x, chunks, axis=0, name=None):
"""
Split the input tensor into multiple sub-Tensors.
Args:
x (Tensor): A N-D Tensor. The data type is bool, float16, float32, float64, int32 or int64.
chunks(int): The number of tensor to be split along the certain axis.
axis (int|Tensor, optional): The axis along which to split, it can be a scalar with type
``int`` or a ``Tensor`` with shape [1] and data type ``int32`` or ``int64``.
If :math::`axis < 0`, the axis to split along is :math:`rank(x) + axis`. Default is 0.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
list(Tensor): The list of segmented Tensors.
Example:
.. code-block:: python
import numpy as np
import paddle
# x is a Tensor which shape is [3, 9, 5]
x_np = np.random.random([3, 9, 5]).astype("int32")
x = paddle.to_tensor(x_np)
out0, out1, out2 = paddle.chunk(x, chunks=3, axis=1)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
# axis is negative, the real axis is (rank(x) + axis) which real
# value is 1.
out0, out1, out2 = paddle.chunk(x, chunks=3, axis=-2)
# out0.shape [3, 3, 5]
# out1.shape [3, 3, 5]
# out2.shape [3, 3, 5]
"""
check_type(chunks, 'chunks', (int), 'chunk')
return paddle.fluid.layers.split(
input=x, num_or_sections=chunks, dim=axis, name=name)
def tile(x, repeat_times, name=None):
"""
Construct a new Tensor by repeating ``x`` the number of times given by ``repeat_times``.
After tiling, the value of the i'th dimension of the output is equal to ``x.shape[i]*repeat_times[i]``.
Both the number of dimensions of ``x`` and the number of elements in ``repeat_times`` should be less than or equal to 6.
Args:
x (Tensor): The input tensor, its data type should be bool, float32, float64, int32 or int64.
repeat_times (Tensor|tuple|list): The number of repeating times. If repeat_times is a list or tuple, all its elements
should be integers or 1-D Tensors with the data type int32. If repeat_times is a Tensor, it should be an 1-D Tensor with the data type int32.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. The data type is the same as ``x``.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.tile(data, repeat_times=[2, 1])
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
out = paddle.tile(data, repeat_times=[2, 2])
np_out = out.numpy()
# [[1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3]]
repeat_times = paddle.to_tensor([2, 1], dtype='int32')
out = paddle.tile(data, repeat_times=repeat_times)
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
"""
if in_dygraph_mode():
return core.ops.tile(x, 'repeat_times', repeat_times)
check_type(repeat_times, 'repeat_times', (list, tuple, Variable), 'tile')
if isinstance(repeat_times, Variable):
assert len(repeat_times.shape) == 1, (
'repeat_times must be an 1-D Tensor.')
else:
for elem in repeat_times:
if isinstance(elem, Variable):
assert len(elem.shape) == 1, (
'Elements in repeat_times must be 1-D Tensors or integers.')
else:
type_tuple = (int, np.int32, np.int64)
assert isinstance(elem, type_tuple), (
'Elements in repeat_times must be 1-D Tensors or integers.')
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'tile')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:
raise ValueError(
"When the date type is bool for the input 'x' of tile op, you "
"must set its stop_gradient to be True by "
"some_var.stop_gradient == True supporting some_var is the input.")
helper = LayerHelper('tile', **locals())
inputs = {"X": [x]}
attrs = {}
def get_attr_repeat_times(list_repeat_times):
attrs_repeat_times = []
for idx, times in enumerate(list_repeat_times):
if isinstance(times, Variable):
attrs_repeat_times.append(-1)
else:
attrs_repeat_times.append(times)
assert times > 0, (
"All elements in repeat_times must be positive for tile.")
return attrs_repeat_times
if isinstance(repeat_times, Variable):
repeat_times.stop_gradient = True
inputs['RepeatTimes'] = repeat_times
attrs['repeat_times'] = [-1]
elif isinstance(repeat_times, (list, tuple)):
attrs['repeat_times'] = get_attr_repeat_times(repeat_times)
if utils._contain_var(repeat_times):
inputs['repeat_times_tensor'] = utils._convert_to_tensor_list(
repeat_times)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='tile', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def expand_as(x, y, name=None):
"""
Expand the input tensor ``x`` to the same shape as the input tensor ``y``.
Both the number of dimensions of ``x`` and ``y`` must be less than or equal to 6, and the number of dimensions of ``y`` must be greather than or equal to that of ``x``. The dimension to expand must have a value of 1.
Args:
x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64.
y (Tensor): The input tensor that gives the shape to expand to.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor: A Tensor with the same shape as ``y``. The data type is the same as ``x``.
Examples:
.. code-block:: python
import paddle
data_x = paddle.to_tensor([1, 2, 3], 'int32')
data_y = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], 'int32')
out = paddle.expand_as(data_x, data_y)
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
"""
if in_dygraph_mode():
return core.ops.expand_as_v2(x, 'target_shape', y.shape)
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand_as')
check_type(y, 'y', Variable, 'expand_as')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:
raise ValueError(
"When the data type of input 'x' for expand_as is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input 'x'.")
inputs = {"X": [x]}
helper = LayerHelper('expand_as', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand_as_v2',
inputs=inputs,
attrs={'target_shape': y.shape},
outputs={'Out': out})
return out
def broadcast_to(x, shape, name=None):
"""
Broadcast the input tensor to a given shape.
Both the number of dimensions of ``x`` and the number of elements in ``shape`` should be less than or equal to 6. The dimension to broadcast to must have a value 1.
Args:
x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64.
shape (list|tuple|Tensor): The result shape after broadcasting. The data type is int32. If shape is a list or tuple, all its elements
should be integers or 1-D Tensors with the data type int32. If shape is a Tensor, it should be an 1-D Tensor with the data type int32.
The value -1 in shape means keeping the corresponding dimension unchanged.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
N-D Tensor: A Tensor with the given shape. The data type is the same as ``x``.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.broadcast_to(data, shape=[2, 3])
print(out)
# [[1, 2, 3], [1, 2, 3]]
"""
if in_dygraph_mode():
return core.ops.expand_v2(x, 'shape', shape)
if isinstance(shape, Variable):
assert len(shape.shape) == 1, ('shape must be an 1-D Tensor.')
else:
for elem in shape:
if isinstance(elem, Variable):
assert len(elem.shape) == 1, (
'Elements in shape must be 1-D Tensors or integers.')
else:
type_tuple = (int, np.int32, np.int64)
assert isinstance(elem, type_tuple), (
'Elements in shape must be 1-D Tensors or integers.')
check_variable_and_dtype(x, 'x',
['bool', 'float32', 'float64', 'int32', 'int64'],
'broadcast_to')
check_type(shape, 'shape', (list, tuple, Variable), 'broadcast_to')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:
raise ValueError(
"When the data type of input 'x' for broadcast_to is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input.")
inputs = {"X": [x]}
attrs = {}
helper = LayerHelper('expand', **locals())
def get_attr_expand_shape(list_expand_shape):
attrs_expand_shape = []
for idx, shape in enumerate(list_expand_shape):
if isinstance(shape, Variable):
attrs_expand_shape.append(-1)
else:
attrs_expand_shape.append(shape)
assert shape > 0 or shape == -1, (
"All elements in shape of broadcast_to must be positive or -1."
)
return attrs_expand_shape
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs['Shape'] = shape
elif isinstance(shape, (list, tuple)):
attrs['shape'] = get_attr_expand_shape(shape)
if utils._contain_var(shape):
inputs['expand_shapes_tensor'] = utils._convert_to_tensor_list(
shape)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand_v2', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def expand(x, shape, name=None):
"""
Expand the input tensor to a given shape.
Both the number of dimensions of ``x`` and the number of elements in ``shape`` should be less than or equal to 6. The dimension to expand must have a value 1.
Args:
x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64.
shape (list|tuple|Tensor): The result shape after expanding. The data type is int32. If shape is a list or tuple, all its elements
should be integers or 1-D Tensors with the data type int32. If shape is a Tensor, it should be an 1-D Tensor with the data type int32.
The value -1 in shape means keeping the corresponding dimension unchanged.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
N-D Tensor: A Tensor with the given shape. The data type is the same as ``x``.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.expand(data, shape=[2, 3])
print(out)
# [[1, 2, 3], [1, 2, 3]]
"""
if in_dygraph_mode():
return core.ops.expand_v2(x, 'shape', shape)
if isinstance(shape, Variable):
assert len(shape.shape) == 1, ('shape must be an 1-D Tensor.')
else:
for elem in shape:
if isinstance(elem, Variable):
assert len(elem.shape) == 1, (
'Elements in shape must be 1-D Tensors or integers.')
else:
type_tuple = (int, np.int32, np.int64)
assert isinstance(elem, type_tuple), (
'Elements in shape must be 1-D Tensors or integers.')
check_variable_and_dtype(
x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'expand')
check_type(shape, 'shape', (list, tuple, Variable), 'expand')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:
raise ValueError("When the data type of input 'x' for expand is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input.")
inputs = {"X": [x]}
attrs = {}
helper = LayerHelper('expand', **locals())
def get_attr_expand_shape(list_expand_shape):
attrs_expand_shape = []
for idx, shape in enumerate(list_expand_shape):
if isinstance(shape, Variable):
attrs_expand_shape.append(-2)
else:
attrs_expand_shape.append(shape)
assert shape > 0 or shape == -1, (
"All elements in shape of expand must be positive or -1.")
return attrs_expand_shape
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs['Shape'] = shape
elif isinstance(shape, (list, tuple)):
attrs['shape'] = get_attr_expand_shape(shape)
if utils._contain_var(shape):
inputs['expand_shapes_tensor'] = utils._convert_to_tensor_list(
shape)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand_v2', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def reshape(x, shape, name=None):
"""
This operator changes the shape of ``x`` without changing its data.
Note that the output Tensor will share data with origin Tensor and doesn't
have a Tensor copy in ``dygraph`` mode.
If you want to use the Tensor copy version, please use `Tensor.clone` like
``reshape_clone_x = x.reshape([-1]).clone()``.
Some tricks exist when specifying the target shape.
1. -1 means the value of this dimension is inferred from the total element
number of x and remaining dimensions. Thus one and only one dimension can
be set -1.
2. 0 means the actual dimension value is going to be copied from the
corresponding dimension of x. The index of 0s in shape can not exceed
the dimension of x.
Here are some examples to explain it.
1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [6, 8], the reshape operator will transform x into a 2-D tensor with
shape [6, 8] and leaving x's data unchanged.
2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
specified is [2, 3, -1, 2], the reshape operator will transform x into a
4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this
case, one dimension of the target shape is set to -1, the value of this
dimension is inferred from the total element number of x and remaining
dimensions.
3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor
with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case,
besides -1, 0 means the actual dimension value is going to be copied from
the corresponding dimension of x.
Args:
x(Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32``, ``int64`` or ``bool``
shape(list|tuple|Tensor): Define the target shape. At most one dimension of the target shape can be -1.
The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Tensor, it should be an 1-D Tensor .
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: A reshaped Tensor with the same data type as ``x``.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = paddle.rand([2, 4, 6], dtype="float32")
positive_four = paddle.full([1], 4, "int32")
out = paddle.reshape(x, [-1, 0, 3, 2])
print(out)
# the shape is [2,4,3,2].
out = paddle.reshape(x, shape=[positive_four, 12])
print(out)
# the shape of out_2 is [4, 12].
shape_tensor = paddle.to_tensor(np.array([8, 6]).astype("int32"))
out = paddle.reshape(x, shape=shape_tensor)
print(out)
# the shape is [8, 6].
# out shares data with x in dygraph mode
x[0, 0, 0] = 10.
print(out[0, 0])
# the value is [10.]
"""
return paddle.fluid.layers.reshape(x=x, shape=shape, name=name)
@inplace_apis_in_dygraph_only
def reshape_(x, shape, name=None):
"""
Inplace version of ``reshape`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_reshape`.
"""
if isinstance(shape, (list, tuple)):
shape = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in shape
]
out, _ = core.ops.reshape2_(x, None, 'shape', shape)
return out
elif isinstance(shape, Variable):
shape.stop_gradient = True
out, _ = core.ops.reshape2_(x, shape)
return out
def gather_nd(x, index, name=None):
"""
This function is actually a high-dimensional extension of :code:`gather`
and supports for simultaneous indexing by multiple axes. :attr:`index` is a
K-dimensional integer tensor, which is regarded as a (K-1)-dimensional
tensor of :attr:`index` into :attr:`input`, where each element defines
a slice of params:
.. math::
output[(i_0, ..., i_{K-2})] = input[index[(i_0, ..., i_{K-2})]]
Obviously, :code:`index.shape[-1] <= input.rank` . And, the output tensor has
shape :code:`index.shape[:-1] + input.shape[index.shape[-1]:]` .
.. code-block:: text
Given:
x = [[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]]
x.shape = (2, 3, 4)
* Case 1:
index = [[1]]
gather_nd(x, index)
= [x[1, :, :]]
= [[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]
* Case 2:
index = [[0,2]]
gather_nd(x, index)
= [x[0, 2, :]]
= [8, 9, 10, 11]
* Case 3:
index = [[1, 2, 3]]
gather_nd(x, index)
= [x[1, 2, 3]]
= [23]
Args:
x (Tensor): The input Tensor which it's data type should be bool, float32, float64, int32, int64.
index (Tensor): The index input with rank > 1, index.shape[-1] <= input.rank.
Its dtype should be int32, int64.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
output (Tensor): A tensor with the shape index.shape[:-1] + input.shape[index.shape[-1]:]
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[[1, 2], [3, 4], [5, 6]],
[[7, 8], [9, 10], [11, 12]]])
index = paddle.to_tensor([[0, 1]])
output = paddle.gather_nd(x, index) #[[3, 4]]
"""
return paddle.fluid.layers.gather_nd(input=x, index=index, name=name)
def strided_slice(x, axes, starts, ends, strides, name=None):
"""
This operator produces a slice of ``x`` along multiple axes. Similar to numpy:
https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
end dimension for each axis in the list of axes and Slice uses this information
to slice the input data tensor. If a negative value is passed to
``starts`` or ``ends`` such as :math:`-i`, it represents the reverse position of the
axis :math:`i-1` th(here 0 is the initial position). The ``strides`` represents steps of
slicing and if the ``strides`` is negative, slice operation is in the opposite direction.
If the value passed to ``starts`` or ``ends`` is greater than n
(the number of elements in this dimension), it represents n.
For slicing to the end of a dimension with unknown size, it is recommended
to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` , ``ends`` and ``strides``.
Following examples will explain how strided_slice works:
.. code-block:: text
Case1:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
strides = [1, 1]
Then:
result = [ [5, 6, 7], ]
Case2:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [2, 0]
strides = [1, -1]
Then:
result = [ [8, 7, 6], ]
Case3:
Given:
data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
axes = [0, 1]
starts = [0, 1]
ends = [-1, 1000]
strides = [1, 3]
Then:
result = [ [2], ]
Args:
x (Tensor): An N-D ``Tensor``. The data type is ``float32``, ``float64``, ``int32`` or ``int64``.
axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to.
It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`.
starts (list|tuple|Tensor): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``starts`` is an Tensor, it should be an 1-D Tensor. It represents starting indices of corresponding axis in ``axes``.
ends (list|tuple|Tensor): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``ends`` is an Tensor, it should be an 1-D Tensor . It represents ending indices of corresponding axis in ``axes``.
strides (list|tuple|Tensor): The data type is ``int32`` . If ``strides`` is a list or tuple, the elements of
it should be integers or Tensors with shape [1]. If ``strides`` is an Tensor, it should be an 1-D Tensor . It represents slice step of corresponding axis in ``axes``.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: A ``Tensor`` with the same dimension as ``x``. The data type is same as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.zeros(shape=[3,4,5,6], dtype="float32")
# example 1:
# attr starts is a list which doesn't contain Tensor.
axes = [1, 2, 3]
starts = [-3, 0, 2]
ends = [3, 2, 4]
strides_1 = [1, 1, 1]
strides_2 = [1, 1, 2]
sliced_1 = paddle.strided_slice(x, axes=axes, starts=starts, ends=ends, strides=strides_1)
# sliced_1 is x[:, 1:3:1, 0:2:1, 2:4:1].
# example 2:
# attr starts is a list which contain tensor Tensor.
minus_3 = paddle.full(shape=[1], fill_value=-3, dtype='int32')
sliced_2 = paddle.strided_slice(x, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2)
# sliced_2 is x[:, 1:3:1, 0:2:1, 2:4:2].
"""
return paddle.fluid.layers.strided_slice(
input=x, axes=axes, starts=starts, ends=ends, strides=strides)
| 37.939966 | 457 | 0.559308 |
from __future__ import print_function
from ..fluid.layers import core
from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import Variable, OpProtoHolder, in_dygraph_mode, convert_np_dtype_to_dtype_, device_guard, dygraph_only
from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from ..fluid.layers.tensor import fill_constant
from ..fluid.layers import utils
import numpy as np
from ..fluid.layers import cast
from ..fluid.layers import slice
from ..fluid.layers import transpose
from ..fluid.layers import unstack
from ..fluid.layers import scatter_nd
from ..fluid.layers import shard_index
from ..fluid import layers
from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only
import paddle
__all__ = []
@dygraph_only
def tolist(x):
return x.numpy().tolist()
setattr(core.VarBase, 'tolist', tolist)
def concat(x, axis=0, name=None):
return paddle.fluid.layers.concat(input=x, axis=axis, name=name)
def flip(x, axis, name=None):
helper = LayerHelper("flip", **locals())
check_type(x, 'X', (Variable), 'flip')
dtype = helper.input_dtype('x')
check_dtype(dtype, 'X',
['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
'flip')
check_type(axis, 'axis', (list, tuple), 'flip')
if name is None:
out = helper.create_variable_for_type_inference(dtype)
else:
out = helper.create_variable(name=name, dtype=dtype, persistable=False)
helper.append_op(
type="flip",
inputs={"X": x},
outputs={"Out": out},
attrs={"axis": axis})
return out
def flatten(x, start_axis=0, stop_axis=-1, name=None):
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Tensor")
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'],
'flatten')
helper = LayerHelper('flatten', **locals())
x_dim = len(x.shape)
if not (isinstance(start_axis, int)) or (
start_axis > x_dim - 1) or start_axis < -x_dim:
raise ValueError(
"The start_axis should be a int, and in range [-rank(x), rank(x))")
if not (isinstance(stop_axis, int)) or (
stop_axis > x_dim - 1) or stop_axis < -x_dim:
raise ValueError(
"The stop_axis should be a int, and in range [-rank(x), rank(x))")
if start_axis < 0:
start_axis = start_axis + x_dim
if stop_axis < 0:
stop_axis = stop_axis + x_dim
if start_axis > stop_axis:
raise ValueError("The stop_axis should be larger than stat_axis")
if in_dygraph_mode():
dy_out, _ = core.ops.flatten_contiguous_range(
x, 'start_axis', start_axis, 'stop_axis', stop_axis)
return dy_out
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='flatten_contiguous_range',
inputs={"X": x},
outputs={'Out': out,
'XShape': x_shape},
attrs={"start_axis": start_axis,
"stop_axis": stop_axis})
return out
@inplace_apis_in_dygraph_only
def flatten_(x, start_axis=0, stop_axis=-1, name=None):
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Tensor")
x_dim = len(x.shape)
if not (isinstance(start_axis, int)) or (
start_axis > x_dim - 1) or start_axis < -x_dim:
raise ValueError(
"The start_axis should be a int, and in range [-rank(x), rank(x))")
if not (isinstance(stop_axis, int)) or (
stop_axis > x_dim - 1) or stop_axis < -x_dim:
raise ValueError(
"The stop_axis should be a int, and in range [-rank(x), rank(x))")
if start_axis < 0:
start_axis = start_axis + x_dim
if stop_axis < 0:
stop_axis = stop_axis + x_dim
if start_axis > stop_axis:
raise ValueError("The stop_axis should be larger than stat_axis")
dy_out, _ = core.ops.flatten_contiguous_range_(x, 'start_axis', start_axis,
'stop_axis', stop_axis)
return dy_out
def roll(x, shifts, axis=None, name=None):
helper = LayerHelper("roll", **locals())
origin_shape = x.shape
if type(shifts) == int:
shifts = [shifts]
if type(axis) == int:
axis = [axis]
len_origin_shape = len(origin_shape)
if axis:
for i in range(len(axis)):
if axis[i] >= len_origin_shape or axis[i] < -len_origin_shape:
raise ValueError(
"axis is out of range, it should be in range [{}, {}), but received {}".
format(-len_origin_shape, len_origin_shape, axis))
if axis:
check_type(axis, 'axis', (list, tuple), 'roll')
check_type(shifts, 'shifts', (list, tuple), 'roll')
if in_dygraph_mode():
if axis is None:
x = core.ops.reshape(x, 'shape', [-1, 1])
axis = [0]
out = core.ops.roll(x, 'axis', axis, 'shifts', shifts)
return core.ops.reshape(out, 'shape', origin_shape)
out = helper.create_variable_for_type_inference(x.dtype)
if axis is None:
x = reshape(x, shape=[-1, 1])
axis = [0]
helper.append_op(
type='roll',
inputs={'X': x},
outputs={'Out': out},
attrs={'axis': axis,
'shifts': shifts})
out = layers.reshape(out, shape=origin_shape)
return out
def stack(x, axis=0, name=None):
return layers.stack(x, axis, name)
def split(x, num_or_sections, axis=0, name=None):
return paddle.fluid.layers.split(
input=x, num_or_sections=num_or_sections, dim=axis, name=name)
def squeeze(x, axis=None, name=None):
if axis is None:
axis = []
elif isinstance(axis, int):
axis = [axis]
elif isinstance(axis, tuple):
axis = list(axis)
return layers.squeeze(x, axis, name)
@inplace_apis_in_dygraph_only
def squeeze_(x, axis=None, name=None):
if axis is None:
axis = []
elif isinstance(axis, int):
axis = [axis]
elif isinstance(axis, tuple):
axis = list(axis)
out, _ = core.ops.squeeze2_(x, 'axes', axis)
return out
def unique(x,
return_index=False,
return_inverse=False,
return_counts=False,
axis=None,
dtype="int64",
name=None):
if axis is None:
axis = []
else:
axis = [axis]
attr_dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
out, inverse, indices, counts = core.ops.unique(
x, 'dtype', attr_dtype, 'return_index', return_index,
'return_inverse', return_inverse, 'return_counts', return_counts,
'axis', axis, "is_sorted", True)
outs = [out]
if return_index:
outs.append(indices)
if return_inverse:
outs.append(inverse)
if return_counts:
outs.append(counts)
if len(outs) == 1:
return outs[0]
return tuple(outs)
check_variable_and_dtype(x, "input",
['float32', 'float64', 'int32', 'int64'], 'unique')
check_type(return_index, 'return_index', bool, 'unique')
check_type(return_inverse, 'return_inverse', bool, 'unique')
check_type(return_counts, 'return_counts', bool, 'unique')
check_dtype(dtype, 'dtype', ['int32', 'int64'], 'unique')
if len(axis) != 0:
check_type(axis[0], 'axis', int, 'unique')
helper = LayerHelper('unique', **locals())
attrs = {
'dtype': attr_dtype,
"return_index": return_index,
"return_inverse": return_inverse,
"return_counts": return_counts,
"axis": axis,
"is_sorted": True
}
out = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True)
indices = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True)
inverse = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True)
counts = helper.create_variable_for_type_inference(
dtype=attr_dtype, stop_gradient=True)
outputs = {
"Out": out,
"Indices": indices,
"Index": inverse,
"Counts": counts
}
outs = [out]
if return_index:
outs.append(indices)
if return_inverse:
outs.append(inverse)
if return_counts:
outs.append(counts)
helper.append_op(
type="unique", inputs={"X": x}, attrs=attrs, outputs=outputs)
if len(outs) == 1:
return outs[0]
return tuple(outs)
def unsqueeze(x, axis, name=None):
return layers.unsqueeze(x, axis, name)
@inplace_apis_in_dygraph_only
def unsqueeze_(x, axis, name=None):
if isinstance(axis, int):
axis = [axis]
elif isinstance(axis, Variable):
axis = axis.numpy().tolist()
elif isinstance(axis, (list, tuple)):
axis = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in axis
]
out, _ = core.ops.unsqueeze2_(x, 'axes', axis)
return out
def gather(x, index, axis=None, name=None):
if axis is None:
axis = 0
if in_dygraph_mode():
axis = axis.item() if isinstance(axis, paddle.Tensor) else axis
return core.ops.gather(x, index, None, "axis", axis, "overwrite", False)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'],
'gather')
check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather')
if isinstance(axis, Variable):
check_variable_and_dtype(axis, 'axis', ['int32', 'int64'], 'gather')
helper = LayerHelper('gather', **locals())
dtype = helper.input_dtype('x')
out = helper.create_variable_for_type_inference(dtype)
if not isinstance(axis, Variable):
helper.append_op(
type="gather",
inputs={"X": x,
"Index": index},
attrs={'axis': axis,
'overwrite': False},
outputs={"Out": out})
else:
helper.append_op(
type="gather",
inputs={"X": x,
"Index": index,
"Axis": axis},
attrs={"overwrite": False},
outputs={"Out": out})
return out
def unbind(input, axis=0):
helper = LayerHelper("unbind", **locals())
check_type(input, 'input', (Variable), 'unbind')
dtype = helper.input_dtype()
check_dtype(dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'],
'unbind')
if not isinstance(axis, (int)):
raise TypeError("The type of 'axis' must be int, but received %s." %
(type(axis)))
if isinstance(axis, np.generic):
axis = np.asscalar(axis)
input_shape = input.shape
axis_ = axis if axis >= 0 else len(input_shape) + axis
num = input_shape[axis_]
outs = [
helper.create_variable_for_type_inference(dtype=helper.input_dtype())
for i in range(num)
]
if in_dygraph_mode():
return core.ops.unbind(input, num, 'axis', axis)
helper.append_op(
type="unbind",
inputs={"X": input},
outputs={"Out": outs},
attrs={"axis": axis})
return outs
def scatter(x, index, updates, overwrite=True, name=None):
if in_dygraph_mode():
return core.ops.scatter(x, index, updates, 'overwrite', overwrite)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'scatter')
check_type(overwrite, 'overwrite', bool, 'scatter')
helper = LayerHelper('scatter', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type="scatter",
inputs={"X": x,
"Ids": index,
"Updates": updates},
attrs={'overwrite': overwrite},
outputs={"Out": out})
return out
@inplace_apis_in_dygraph_only
def scatter_(x, index, updates, overwrite=True, name=None):
return core.ops.scatter_(x, index, updates, 'overwrite', overwrite)
def scatter_nd_add(x, index, updates, name=None):
return layers.scatter_nd_add(x, index, updates, name=None)
def chunk(x, chunks, axis=0, name=None):
check_type(chunks, 'chunks', (int), 'chunk')
return paddle.fluid.layers.split(
input=x, num_or_sections=chunks, dim=axis, name=name)
def tile(x, repeat_times, name=None):
if in_dygraph_mode():
return core.ops.tile(x, 'repeat_times', repeat_times)
check_type(repeat_times, 'repeat_times', (list, tuple, Variable), 'tile')
if isinstance(repeat_times, Variable):
assert len(repeat_times.shape) == 1, (
'repeat_times must be an 1-D Tensor.')
else:
for elem in repeat_times:
if isinstance(elem, Variable):
assert len(elem.shape) == 1, (
'Elements in repeat_times must be 1-D Tensors or integers.')
else:
type_tuple = (int, np.int32, np.int64)
assert isinstance(elem, type_tuple), (
'Elements in repeat_times must be 1-D Tensors or integers.')
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'tile')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:
raise ValueError(
"When the date type is bool for the input 'x' of tile op, you "
"must set its stop_gradient to be True by "
"some_var.stop_gradient == True supporting some_var is the input.")
helper = LayerHelper('tile', **locals())
inputs = {"X": [x]}
attrs = {}
def get_attr_repeat_times(list_repeat_times):
attrs_repeat_times = []
for idx, times in enumerate(list_repeat_times):
if isinstance(times, Variable):
attrs_repeat_times.append(-1)
else:
attrs_repeat_times.append(times)
assert times > 0, (
"All elements in repeat_times must be positive for tile.")
return attrs_repeat_times
if isinstance(repeat_times, Variable):
repeat_times.stop_gradient = True
inputs['RepeatTimes'] = repeat_times
attrs['repeat_times'] = [-1]
elif isinstance(repeat_times, (list, tuple)):
attrs['repeat_times'] = get_attr_repeat_times(repeat_times)
if utils._contain_var(repeat_times):
inputs['repeat_times_tensor'] = utils._convert_to_tensor_list(
repeat_times)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='tile', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def expand_as(x, y, name=None):
if in_dygraph_mode():
return core.ops.expand_as_v2(x, 'target_shape', y.shape)
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand_as')
check_type(y, 'y', Variable, 'expand_as')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:
raise ValueError(
"When the data type of input 'x' for expand_as is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input 'x'.")
inputs = {"X": [x]}
helper = LayerHelper('expand_as', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand_as_v2',
inputs=inputs,
attrs={'target_shape': y.shape},
outputs={'Out': out})
return out
def broadcast_to(x, shape, name=None):
if in_dygraph_mode():
return core.ops.expand_v2(x, 'shape', shape)
if isinstance(shape, Variable):
assert len(shape.shape) == 1, ('shape must be an 1-D Tensor.')
else:
for elem in shape:
if isinstance(elem, Variable):
assert len(elem.shape) == 1, (
'Elements in shape must be 1-D Tensors or integers.')
else:
type_tuple = (int, np.int32, np.int64)
assert isinstance(elem, type_tuple), (
'Elements in shape must be 1-D Tensors or integers.')
check_variable_and_dtype(x, 'x',
['bool', 'float32', 'float64', 'int32', 'int64'],
'broadcast_to')
check_type(shape, 'shape', (list, tuple, Variable), 'broadcast_to')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:
raise ValueError(
"When the data type of input 'x' for broadcast_to is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input.")
inputs = {"X": [x]}
attrs = {}
helper = LayerHelper('expand', **locals())
def get_attr_expand_shape(list_expand_shape):
attrs_expand_shape = []
for idx, shape in enumerate(list_expand_shape):
if isinstance(shape, Variable):
attrs_expand_shape.append(-1)
else:
attrs_expand_shape.append(shape)
assert shape > 0 or shape == -1, (
"All elements in shape of broadcast_to must be positive or -1."
)
return attrs_expand_shape
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs['Shape'] = shape
elif isinstance(shape, (list, tuple)):
attrs['shape'] = get_attr_expand_shape(shape)
if utils._contain_var(shape):
inputs['expand_shapes_tensor'] = utils._convert_to_tensor_list(
shape)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand_v2', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def expand(x, shape, name=None):
if in_dygraph_mode():
return core.ops.expand_v2(x, 'shape', shape)
if isinstance(shape, Variable):
assert len(shape.shape) == 1, ('shape must be an 1-D Tensor.')
else:
for elem in shape:
if isinstance(elem, Variable):
assert len(elem.shape) == 1, (
'Elements in shape must be 1-D Tensors or integers.')
else:
type_tuple = (int, np.int32, np.int64)
assert isinstance(elem, type_tuple), (
'Elements in shape must be 1-D Tensors or integers.')
check_variable_and_dtype(
x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'expand')
check_type(shape, 'shape', (list, tuple, Variable), 'expand')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False:
raise ValueError("When the data type of input 'x' for expand is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input.")
inputs = {"X": [x]}
attrs = {}
helper = LayerHelper('expand', **locals())
def get_attr_expand_shape(list_expand_shape):
attrs_expand_shape = []
for idx, shape in enumerate(list_expand_shape):
if isinstance(shape, Variable):
attrs_expand_shape.append(-2)
else:
attrs_expand_shape.append(shape)
assert shape > 0 or shape == -1, (
"All elements in shape of expand must be positive or -1.")
return attrs_expand_shape
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs['Shape'] = shape
elif isinstance(shape, (list, tuple)):
attrs['shape'] = get_attr_expand_shape(shape)
if utils._contain_var(shape):
inputs['expand_shapes_tensor'] = utils._convert_to_tensor_list(
shape)
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='expand_v2', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def reshape(x, shape, name=None):
return paddle.fluid.layers.reshape(x=x, shape=shape, name=name)
@inplace_apis_in_dygraph_only
def reshape_(x, shape, name=None):
if isinstance(shape, (list, tuple)):
shape = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in shape
]
out, _ = core.ops.reshape2_(x, None, 'shape', shape)
return out
elif isinstance(shape, Variable):
shape.stop_gradient = True
out, _ = core.ops.reshape2_(x, shape)
return out
def gather_nd(x, index, name=None):
return paddle.fluid.layers.gather_nd(input=x, index=index, name=name)
def strided_slice(x, axes, starts, ends, strides, name=None):
return paddle.fluid.layers.strided_slice(
input=x, axes=axes, starts=starts, ends=ends, strides=strides)
| true | true |
1c33d271d23f367d670aae3f8ea6cabf2bb8701c | 563 | py | Python | setup.py | fuenfundachtzig/pylhe | 07c994d68cef3c4b66792e7668d82a4f274bcb68 | [
"Apache-2.0"
] | null | null | null | setup.py | fuenfundachtzig/pylhe | 07c994d68cef3c4b66792e7668d82a4f274bcb68 | [
"Apache-2.0"
] | null | null | null | setup.py | fuenfundachtzig/pylhe | 07c994d68cef3c4b66792e7668d82a4f274bcb68 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
extras_require = {
"test": ["pytest", "pytest-cov>=2.5.1", "scikit-hep-testdata>=0.3.1", "pydocstyle"],
}
extras_require["lint"] = sorted(set(["flake8", "black;python_version>='3.6'"]))
extras_require["develop"] = sorted(
set(
extras_require["test"]
+ ["pre-commit", "check-manifest", "bump2version~=1.0", "twine"]
)
)
extras_require["complete"] = sorted(set(sum(extras_require.values(), [])))
setup(
extras_require=extras_require,
use_scm_version=lambda: {"local_scheme": lambda version: ""},
)
| 29.631579 | 88 | 0.651865 | from setuptools import setup
extras_require = {
"test": ["pytest", "pytest-cov>=2.5.1", "scikit-hep-testdata>=0.3.1", "pydocstyle"],
}
extras_require["lint"] = sorted(set(["flake8", "black;python_version>='3.6'"]))
extras_require["develop"] = sorted(
set(
extras_require["test"]
+ ["pre-commit", "check-manifest", "bump2version~=1.0", "twine"]
)
)
extras_require["complete"] = sorted(set(sum(extras_require.values(), [])))
setup(
extras_require=extras_require,
use_scm_version=lambda: {"local_scheme": lambda version: ""},
)
| true | true |
1c33d2f27cdf6e35e3ed2d176f1d978caa28ecf1 | 1,474 | py | Python | dfc_pkg/commands/nginx_server/install_nginx.py | drc288/dfc | 91a64a3adb1ac83fcc26d3978264fe7837fb588c | [
"MIT"
] | 1 | 2020-08-24T17:50:32.000Z | 2020-08-24T17:50:32.000Z | dfc_pkg/commands/nginx_server/install_nginx.py | drc288/dfc | 91a64a3adb1ac83fcc26d3978264fe7837fb588c | [
"MIT"
] | 7 | 2020-03-06T15:52:30.000Z | 2020-03-13T00:02:13.000Z | dfc_pkg/commands/nginx_server/install_nginx.py | drc288/dfc | 91a64a3adb1ac83fcc26d3978264fe7837fb588c | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from colored import stylize, fg, attr
import paramiko
import socket
import typer
def install_nginx(server):
"""
This function install NGINX server
:param ip: Ip address
:param user: User to connect
:param server: the connection
:return: void
"""
try:
# Updating the server
typer.echo(stylize("The server is being updated", fg("blue")))
server.run('sudo sed -i "s/^mesg n$/tty -s && mesg n/g" /root/.profile')
server.run("sudo apt-get install dialog apt-utils -y > /dev/null 3> /dev/null")
server.run("sudo apt-get update -y > /dev/null")
typer.echo(stylize("Server updated", fg("green"), attr("bold")))
# Init to install nginx server
typer.echo(stylize("Installing NGINX server", fg("blue")))
server.run("sudo apt-get install nginx -y > /dev/null 3> /dev/null")
# Verify the path for mysql dependencies
typer.echo(stylize("Installing pip3 for python3", fg("blue")))
server.run("sudo apt-get install python3-pip -y > /dev/null")
server.run("sudo pip3 install -U pip > /dev/null")
typer.echo(stylize("NGINX and PIP are installed ", fg("green"), attr("bold")))
except socket.error:
typer.echo(stylize(f"Unable to connect", fg("red")))
exit(0)
except paramiko.ssh_exception.AuthenticationException:
typer.echo(stylize(f"SSH Error, verify the kay path", fg("red")))
exit(0)
| 39.837838 | 87 | 0.632293 |
from colored import stylize, fg, attr
import paramiko
import socket
import typer
def install_nginx(server):
try:
typer.echo(stylize("The server is being updated", fg("blue")))
server.run('sudo sed -i "s/^mesg n$/tty -s && mesg n/g" /root/.profile')
server.run("sudo apt-get install dialog apt-utils -y > /dev/null 3> /dev/null")
server.run("sudo apt-get update -y > /dev/null")
typer.echo(stylize("Server updated", fg("green"), attr("bold")))
typer.echo(stylize("Installing NGINX server", fg("blue")))
server.run("sudo apt-get install nginx -y > /dev/null 3> /dev/null")
typer.echo(stylize("Installing pip3 for python3", fg("blue")))
server.run("sudo apt-get install python3-pip -y > /dev/null")
server.run("sudo pip3 install -U pip > /dev/null")
typer.echo(stylize("NGINX and PIP are installed ", fg("green"), attr("bold")))
except socket.error:
typer.echo(stylize(f"Unable to connect", fg("red")))
exit(0)
except paramiko.ssh_exception.AuthenticationException:
typer.echo(stylize(f"SSH Error, verify the kay path", fg("red")))
exit(0)
| true | true |
1c33d31a67558bb09a580f78c0ed07b8cc869caf | 7,267 | py | Python | nova/cells/utils.py | nelsnelson/nova | 826fe1cc6af2df291d5aaafdc5d498d626475d19 | [
"Apache-2.0"
] | null | null | null | nova/cells/utils.py | nelsnelson/nova | 826fe1cc6af2df291d5aaafdc5d498d626475d19 | [
"Apache-2.0"
] | null | null | null | nova/cells/utils.py | nelsnelson/nova | 826fe1cc6af2df291d5aaafdc5d498d626475d19 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Utility Methods
"""
import random
import sys
from oslo_config import cfg
import six
from nova import objects
from nova.objects import base as obj_base
# Separator used between cell names for the 'full cell name' and routing
# path
PATH_CELL_SEP = '!'
# Flag prepended to a cell name to indicate data shouldn't be synced during
# an instance save. There are no illegal chars in a cell name so using the
# meaningful PATH_CELL_SEP in an invalid way will need to suffice.
BLOCK_SYNC_FLAG = '!!'
# Separator used between cell name and item
_CELL_ITEM_SEP = '@'
CONF = cfg.CONF
CONF.import_opt('instance_update_sync_database_limit', 'nova.cells.opts',
group='cells')
class ProxyObjectSerializer(obj_base.NovaObjectSerializer):
def __init__(self):
super(ProxyObjectSerializer, self).__init__()
self.serializer = super(ProxyObjectSerializer, self)
def _process_object(self, context, objprim):
return _CellProxy.obj_from_primitive(self.serializer, objprim, context)
class _CellProxy(object):
def __init__(self, obj, cell_path):
self._obj = obj
self._cell_path = cell_path
@property
def id(self):
return cell_with_item(self._cell_path, self._obj.id)
@property
def host(self):
return cell_with_item(self._cell_path, self._obj.host)
def __getitem__(self, key):
if key == 'id':
return self.id
if key == 'host':
return self.host
return getattr(self._obj, key)
def obj_to_primitive(self):
obj_p = self._obj.obj_to_primitive()
obj_p['cell_proxy.class_name'] = self.__class__.__name__
obj_p['cell_proxy.cell_path'] = self._cell_path
return obj_p
@classmethod
def obj_from_primitive(cls, serializer, primitive, context=None):
obj_primitive = primitive.copy()
cell_path = obj_primitive.pop('cell_proxy.cell_path', None)
klass_name = obj_primitive.pop('cell_proxy.class_name', None)
obj = serializer._process_object(context, obj_primitive)
if klass_name is not None and cell_path is not None:
klass = getattr(sys.modules[__name__], klass_name)
return klass(obj, cell_path)
else:
return obj
# dict-ish syntax sugar
def _iteritems(self):
"""For backwards-compatibility with dict-based objects.
NOTE(sbauza): May be removed in the future.
"""
for name in self._obj.obj_fields:
if (self._obj.obj_attr_is_set(name) or
name in self._obj.obj_extra_fields):
if name == 'id':
yield name, self.id
elif name == 'host':
yield name, self.host
else:
yield name, getattr(self._obj, name)
if six.PY2:
iteritems = _iteritems
else:
items = _iteritems
def __getattr__(self, key):
return getattr(self._obj, key)
class ComputeNodeProxy(_CellProxy):
pass
class ServiceProxy(_CellProxy):
def __getattr__(self, key):
if key == 'compute_node':
# NOTE(sbauza): As the Service object is still having a nested
# ComputeNode object that consumers of this Proxy don't use, we can
# safely remove it from what's returned
raise AttributeError
return getattr(self._obj, key)
def get_instances_to_sync(context, updated_since=None, project_id=None,
deleted=True, shuffle=False, uuids_only=False):
"""Return a generator that will return a list of active and
deleted instances to sync with parent cells. The list may
optionally be shuffled for periodic updates so that multiple
cells services aren't self-healing the same instances in nearly
lockstep.
"""
def _get_paginated_instances(context, filters, shuffle, limit, marker):
instances = objects.InstanceList.get_by_filters(
context, filters, sort_key='deleted', sort_dir='asc',
limit=limit, marker=marker)
if len(instances) > 0:
marker = instances[-1]['uuid']
# NOTE(melwitt/alaski): Need a list that supports assignment for
# shuffle. And pop() on the returned result.
instances = list(instances)
if shuffle:
random.shuffle(instances)
return instances, marker
filters = {}
if updated_since is not None:
filters['changes-since'] = updated_since
if project_id is not None:
filters['project_id'] = project_id
if not deleted:
filters['deleted'] = False
# Active instances first.
limit = CONF.cells.instance_update_sync_database_limit
marker = None
instances = []
while True:
if not instances:
instances, marker = _get_paginated_instances(context, filters,
shuffle, limit, marker)
if not instances:
break
instance = instances.pop(0)
if uuids_only:
yield instance.uuid
else:
yield instance
def cell_with_item(cell_name, item):
"""Turn cell_name and item into <cell_name>@<item>."""
if cell_name is None:
return item
return cell_name + _CELL_ITEM_SEP + str(item)
def split_cell_and_item(cell_and_item):
"""Split a combined cell@item and return them."""
result = cell_and_item.rsplit(_CELL_ITEM_SEP, 1)
if len(result) == 1:
return (None, cell_and_item)
else:
return result
def add_cell_to_compute_node(compute_node, cell_name):
"""Fix compute_node attributes that should be unique. Allows
API cell to query the 'id' by cell@id.
"""
# NOTE(sbauza): As compute_node is a ComputeNode object, we need to wrap it
# for adding the cell_path information
compute_proxy = ComputeNodeProxy(compute_node, cell_name)
return compute_proxy
def add_cell_to_service(service, cell_name):
"""Fix service attributes that should be unique. Allows
API cell to query the 'id' or 'host' by cell@id/host.
"""
# NOTE(sbauza): As service is a Service object, we need to wrap it
# for adding the cell_path information
service_proxy = ServiceProxy(service, cell_name)
return service_proxy
def add_cell_to_task_log(task_log, cell_name):
"""Fix task_log attributes that should be unique. In particular,
the 'id' and 'host' fields should be prepended with cell name.
"""
task_log['id'] = cell_with_item(cell_name, task_log['id'])
task_log['host'] = cell_with_item(cell_name, task_log['host'])
| 33.182648 | 79 | 0.662447 |
import random
import sys
from oslo_config import cfg
import six
from nova import objects
from nova.objects import base as obj_base
PATH_CELL_SEP = '!'
# an instance save. There are no illegal chars in a cell name so using the
# meaningful PATH_CELL_SEP in an invalid way will need to suffice.
BLOCK_SYNC_FLAG = '!!'
# Separator used between cell name and item
_CELL_ITEM_SEP = '@'
CONF = cfg.CONF
CONF.import_opt('instance_update_sync_database_limit', 'nova.cells.opts',
group='cells')
class ProxyObjectSerializer(obj_base.NovaObjectSerializer):
def __init__(self):
super(ProxyObjectSerializer, self).__init__()
self.serializer = super(ProxyObjectSerializer, self)
def _process_object(self, context, objprim):
return _CellProxy.obj_from_primitive(self.serializer, objprim, context)
class _CellProxy(object):
def __init__(self, obj, cell_path):
self._obj = obj
self._cell_path = cell_path
@property
def id(self):
return cell_with_item(self._cell_path, self._obj.id)
@property
def host(self):
return cell_with_item(self._cell_path, self._obj.host)
def __getitem__(self, key):
if key == 'id':
return self.id
if key == 'host':
return self.host
return getattr(self._obj, key)
def obj_to_primitive(self):
obj_p = self._obj.obj_to_primitive()
obj_p['cell_proxy.class_name'] = self.__class__.__name__
obj_p['cell_proxy.cell_path'] = self._cell_path
return obj_p
@classmethod
def obj_from_primitive(cls, serializer, primitive, context=None):
obj_primitive = primitive.copy()
cell_path = obj_primitive.pop('cell_proxy.cell_path', None)
klass_name = obj_primitive.pop('cell_proxy.class_name', None)
obj = serializer._process_object(context, obj_primitive)
if klass_name is not None and cell_path is not None:
klass = getattr(sys.modules[__name__], klass_name)
return klass(obj, cell_path)
else:
return obj
# dict-ish syntax sugar
def _iteritems(self):
for name in self._obj.obj_fields:
if (self._obj.obj_attr_is_set(name) or
name in self._obj.obj_extra_fields):
if name == 'id':
yield name, self.id
elif name == 'host':
yield name, self.host
else:
yield name, getattr(self._obj, name)
if six.PY2:
iteritems = _iteritems
else:
items = _iteritems
def __getattr__(self, key):
return getattr(self._obj, key)
class ComputeNodeProxy(_CellProxy):
pass
class ServiceProxy(_CellProxy):
def __getattr__(self, key):
if key == 'compute_node':
# NOTE(sbauza): As the Service object is still having a nested
# ComputeNode object that consumers of this Proxy don't use, we can
raise AttributeError
return getattr(self._obj, key)
def get_instances_to_sync(context, updated_since=None, project_id=None,
deleted=True, shuffle=False, uuids_only=False):
def _get_paginated_instances(context, filters, shuffle, limit, marker):
instances = objects.InstanceList.get_by_filters(
context, filters, sort_key='deleted', sort_dir='asc',
limit=limit, marker=marker)
if len(instances) > 0:
marker = instances[-1]['uuid']
# NOTE(melwitt/alaski): Need a list that supports assignment for
# shuffle. And pop() on the returned result.
instances = list(instances)
if shuffle:
random.shuffle(instances)
return instances, marker
filters = {}
if updated_since is not None:
filters['changes-since'] = updated_since
if project_id is not None:
filters['project_id'] = project_id
if not deleted:
filters['deleted'] = False
# Active instances first.
limit = CONF.cells.instance_update_sync_database_limit
marker = None
instances = []
while True:
if not instances:
instances, marker = _get_paginated_instances(context, filters,
shuffle, limit, marker)
if not instances:
break
instance = instances.pop(0)
if uuids_only:
yield instance.uuid
else:
yield instance
def cell_with_item(cell_name, item):
if cell_name is None:
return item
return cell_name + _CELL_ITEM_SEP + str(item)
def split_cell_and_item(cell_and_item):
result = cell_and_item.rsplit(_CELL_ITEM_SEP, 1)
if len(result) == 1:
return (None, cell_and_item)
else:
return result
def add_cell_to_compute_node(compute_node, cell_name):
# NOTE(sbauza): As compute_node is a ComputeNode object, we need to wrap it
# for adding the cell_path information
compute_proxy = ComputeNodeProxy(compute_node, cell_name)
return compute_proxy
def add_cell_to_service(service, cell_name):
# NOTE(sbauza): As service is a Service object, we need to wrap it
# for adding the cell_path information
service_proxy = ServiceProxy(service, cell_name)
return service_proxy
def add_cell_to_task_log(task_log, cell_name):
task_log['id'] = cell_with_item(cell_name, task_log['id'])
task_log['host'] = cell_with_item(cell_name, task_log['host'])
| true | true |
1c33d3e19a8e38b624c75bc8d646ae8fb563783d | 154 | py | Python | hdx_exports/mailer.py | hotosm/hot-exports-two | d60530445e89b2a46bd55ea3b7c2e72409b0f493 | [
"BSD-3-Clause"
] | 95 | 2017-09-29T13:20:38.000Z | 2022-03-14T06:43:47.000Z | hdx_exports/mailer.py | hotosm/hot-exports-two | d60530445e89b2a46bd55ea3b7c2e72409b0f493 | [
"BSD-3-Clause"
] | 229 | 2015-07-29T08:50:27.000Z | 2017-09-21T18:05:56.000Z | hdx_exports/mailer.py | hotosm/hot-exports-two | d60530445e89b2a46bd55ea3b7c2e72409b0f493 | [
"BSD-3-Clause"
] | 30 | 2017-10-06T23:53:48.000Z | 2022-03-10T06:17:07.000Z | # send to a predefined email address like a Google Group
# on each successful scheduled export run.
# Or report failures.
class Mailer(object):
pass
| 22 | 56 | 0.753247 |
class Mailer(object):
pass
| true | true |
1c33d3f65fdbd4a7fed5e6155008227e4c2bc59c | 896 | py | Python | TestAgentMaps.py | jdong-sw/rbe-swarm-intelligence | 7c9cae040f80c7f7f41c81b2d379d214dd0b2f30 | [
"MIT"
] | null | null | null | TestAgentMaps.py | jdong-sw/rbe-swarm-intelligence | 7c9cae040f80c7f7f41c81b2d379d214dd0b2f30 | [
"MIT"
] | null | null | null | TestAgentMaps.py | jdong-sw/rbe-swarm-intelligence | 7c9cae040f80c7f7f41c81b2d379d214dd0b2f30 | [
"MIT"
] | null | null | null | from swarm_mapping.world import World
import cv2
import numpy as np
# Display size
display_width = 800
display_height = 800
world = World(100, 100, 50,
space_fill=0.4, hazard_fill=0.2, fast=False,
sensor_range=3, marker_size=3)
step = 0
world.step()
while True:
frame = world.render()
frame = cv2.resize(frame, (display_width, display_height), interpolation = cv2.INTER_AREA)
cv2.imshow('Agent Map',cv2.cvtColor((frame*255).astype(np.uint8), cv2.COLOR_RGB2BGR))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
frame2 = world.render(world.agents_map)
frame2 = cv2.resize(frame2, (display_width, display_height), interpolation = cv2.INTER_AREA)
cv2.imshow('Sim',cv2.cvtColor((frame2*255).astype(np.uint8), cv2.COLOR_RGB2BGR))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
world.step()
step += 1
cv2.destroyAllWindows()
| 33.185185 | 96 | 0.677455 | from swarm_mapping.world import World
import cv2
import numpy as np
display_width = 800
display_height = 800
world = World(100, 100, 50,
space_fill=0.4, hazard_fill=0.2, fast=False,
sensor_range=3, marker_size=3)
step = 0
world.step()
while True:
frame = world.render()
frame = cv2.resize(frame, (display_width, display_height), interpolation = cv2.INTER_AREA)
cv2.imshow('Agent Map',cv2.cvtColor((frame*255).astype(np.uint8), cv2.COLOR_RGB2BGR))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
frame2 = world.render(world.agents_map)
frame2 = cv2.resize(frame2, (display_width, display_height), interpolation = cv2.INTER_AREA)
cv2.imshow('Sim',cv2.cvtColor((frame2*255).astype(np.uint8), cv2.COLOR_RGB2BGR))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
world.step()
step += 1
cv2.destroyAllWindows()
| true | true |
1c33d405658498b7efd16c4ea00bc0852497d415 | 10,303 | py | Python | test/units/modules/network/f5/test_bigip_monitor_tcp_half_open.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 37 | 2017-08-15T15:02:43.000Z | 2021-07-23T03:44:31.000Z | test/units/modules/network/f5/test_bigip_monitor_tcp_half_open.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 12 | 2018-01-10T05:25:25.000Z | 2021-11-28T06:55:48.000Z | test/units/modules/network/f5/test_bigip_monitor_tcp_half_open.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 49 | 2017-08-15T09:52:13.000Z | 2022-03-21T17:11:54.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
import pytest
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_monitor_tcp_half_open import Parameters
from library.modules.bigip_monitor_tcp_half_open import ModuleManager
from library.modules.bigip_monitor_tcp_half_open import ArgumentSpec
from library.modules.bigip_monitor_tcp_half_open import HAS_F5SDK
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_monitor_tcp_half_open import Parameters
from ansible.modules.network.f5.bigip_monitor_tcp_half_open import ModuleManager
from ansible.modules.network.f5.bigip_monitor_tcp_half_open import ArgumentSpec
from ansible.modules.network.f5.bigip_monitor_tcp_half_open import HAS_F5SDK
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='parent',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
time_until_up=60,
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.ip == '10.10.10.10'
assert p.port == 80
assert p.type == 'tcp_half_open'
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
def test_module_parameters_ints_as_strings(self):
args = dict(
name='foo',
parent='parent',
ip='10.10.10.10',
port=80,
interval='20',
timeout='30',
time_until_up='60',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.ip == '10.10.10.10'
assert p.port == 80
assert p.type == 'tcp_half_open'
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
def test_api_parameters(self):
args = dict(
name='foo',
defaultsFrom='/Common/parent',
destination='10.10.10.10:80',
interval=20,
timeout=30,
timeUntilUp=60
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.ip == '10.10.10.10'
assert p.port == 80
assert p.type == 'tcp_half_open'
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
time_until_up=60,
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_create_monitor_idempotent(self, *args):
set_module_args(dict(
name='foo',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
time_until_up=60,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is False
def test_update_interval(self, *args):
set_module_args(dict(
name='foo',
interval=10,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['interval'] == 10
def test_update_interval_larger_than_existing_timeout(self, *args):
set_module_args(dict(
name='foo',
interval=30,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
with pytest.raises(F5ModuleError) as ex:
mm.exec_module()
assert "must be less than" in str(ex)
def test_update_interval_larger_than_new_timeout(self, *args):
set_module_args(dict(
name='foo',
interval=10,
timeout=5,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
with pytest.raises(F5ModuleError) as ex:
mm.exec_module()
assert "must be less than" in str(ex)
def test_update_timeout(self, *args):
set_module_args(dict(
name='foo',
timeout=300,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['timeout'] == 300
def test_update_time_until_up(self, *args):
set_module_args(dict(
name='foo',
time_until_up=300,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['time_until_up'] == 300
| 31.897833 | 91 | 0.623217 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
import pytest
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_monitor_tcp_half_open import Parameters
from library.modules.bigip_monitor_tcp_half_open import ModuleManager
from library.modules.bigip_monitor_tcp_half_open import ArgumentSpec
from library.modules.bigip_monitor_tcp_half_open import HAS_F5SDK
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_monitor_tcp_half_open import Parameters
from ansible.modules.network.f5.bigip_monitor_tcp_half_open import ModuleManager
from ansible.modules.network.f5.bigip_monitor_tcp_half_open import ArgumentSpec
from ansible.modules.network.f5.bigip_monitor_tcp_half_open import HAS_F5SDK
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='parent',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
time_until_up=60,
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.ip == '10.10.10.10'
assert p.port == 80
assert p.type == 'tcp_half_open'
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
def test_module_parameters_ints_as_strings(self):
args = dict(
name='foo',
parent='parent',
ip='10.10.10.10',
port=80,
interval='20',
timeout='30',
time_until_up='60',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.ip == '10.10.10.10'
assert p.port == 80
assert p.type == 'tcp_half_open'
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
def test_api_parameters(self):
args = dict(
name='foo',
defaultsFrom='/Common/parent',
destination='10.10.10.10:80',
interval=20,
timeout=30,
timeUntilUp=60
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.ip == '10.10.10.10'
assert p.port == 80
assert p.type == 'tcp_half_open'
assert p.destination == '10.10.10.10:80'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
time_until_up=60,
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_create_monitor_idempotent(self, *args):
set_module_args(dict(
name='foo',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
time_until_up=60,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is False
def test_update_interval(self, *args):
set_module_args(dict(
name='foo',
interval=10,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['interval'] == 10
def test_update_interval_larger_than_existing_timeout(self, *args):
set_module_args(dict(
name='foo',
interval=30,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
with pytest.raises(F5ModuleError) as ex:
mm.exec_module()
assert "must be less than" in str(ex)
def test_update_interval_larger_than_new_timeout(self, *args):
set_module_args(dict(
name='foo',
interval=10,
timeout=5,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
with pytest.raises(F5ModuleError) as ex:
mm.exec_module()
assert "must be less than" in str(ex)
def test_update_timeout(self, *args):
set_module_args(dict(
name='foo',
timeout=300,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['timeout'] == 300
def test_update_time_until_up(self, *args):
set_module_args(dict(
name='foo',
time_until_up=300,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_half_open.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['time_until_up'] == 300
| true | true |
1c33d43871b7029797fb7d3e58483a66e5a4d9b0 | 15,446 | py | Python | haproxy.py | Pigueiras/collectd-haproxy | c00cf052834b11b742830a1d96865a09877ee14c | [
"MIT"
] | null | null | null | haproxy.py | Pigueiras/collectd-haproxy | c00cf052834b11b742830a1d96865a09877ee14c | [
"MIT"
] | null | null | null | haproxy.py | Pigueiras/collectd-haproxy | c00cf052834b11b742830a1d96865a09877ee14c | [
"MIT"
] | null | null | null | # haproxy-collectd-plugin - haproxy.py
#
# Author: Michael Leinartas
# Description: This is a collectd plugin which runs under the Python plugin to
# collect metrics from haproxy.
# Plugin structure and logging func taken from
# https://github.com/phrawzty/rabbitmq-collectd-plugin
#
# Modified by "Warren Turkal" <wt@signalfuse.com>, "Volodymyr Zhabiuk" <vzhabiuk@signalfx.com>
import cStringIO as StringIO
import socket
import csv
import pprint
import collectd
PLUGIN_NAME = 'haproxy'
RECV_SIZE = 1024
DEFAULT_METRICS = {
'ConnRate': ('connection_rate', 'gauge'),
'CumReq': ('requests', 'derive'),
'Idle_pct': ('idle_pct', 'gauge'),
'scur': ('session_current', 'gauge'),
'SessRate': ('session_rate_all', 'gauge'),
'lbtot': ('server_selected_total', 'counter'),
'bout': ('bytes_out', 'derive'),
'bin': ('bytes_in', 'derive'),
'ttime': ('session_time_avg', 'gauge'),
'req_rate': ('request_rate', 'gauge'),
'rate': ('session_rate', 'gauge'),
'hrsp_2xx': ('response_2xx', 'derive'),
'hrsp_4xx': ('response_4xx', 'derive'),
'hrsp_5xx': ('response_5xx', 'derive'),
'ereq': ('error_request', 'derive'),
'dreq': ('denied_request', 'derive'),
'econ': ('error_connection', 'derive'),
'dresp': ('denied_response', 'derive'),
'qcur': ('queue_current', 'gauge'),
'qtime': ('queue_time_avg', 'gauge'),
'rtime': ('response_time_avg', 'gauge'),
'eresp': ('error_response', 'derive'),
'wretr': ('retries', 'derive'),
'wredis': ('redispatched', 'derive'),
}
ENHANCED_METRICS = {
# Metrics that are collected for the whole haproxy instance.
# The format is haproxy_metricname : {'signalfx_corresponding_metric': 'collectd_type'}
# Currently signalfx_corresponding_metric match haproxy_metricname
# Correspond to 'show info' socket command
'MaxConn': ('max_connections', 'gauge'),
'CumConns': ('connections', 'derive'),
'MaxConnRate': ('max_connection_rate', 'gauge'),
'MaxSessRate': ('max_session_rate', 'gauge'),
'MaxSslConns': ('max_ssl_connections', 'gauge'),
'CumSslConns': ('ssl_connections', 'derive'),
'MaxPipes': ('max_pipes', 'gauge'),
'Tasks': ('tasks', 'gauge'),
'Run_queue': ('run_queue', 'gauge'),
'PipesUsed': ('pipes_used', 'gauge'),
'PipesFree': ('pipes_free', 'gauge'),
'Uptime_sec': ('uptime_seconds', 'derive'),
'CurrConns': ('current_connections', 'gauge'),
'CurrSslConns': ('current_ssl_connections', 'gauge'),
'SslRate': ('ssl_rate', 'gauge'),
'SslFrontendKeyRate': ('ssl_frontend_key_rate', 'gauge'),
'SslBackendKeyRate': ('ssl_backend_key_rate', 'gauge'),
'SslCacheLookups': ('ssl_cache_lookups', 'derive'),
'SslCacheMisses': ('ssl_cache_misses', 'derive'),
'CompressBpsIn': ('compress_bps_in', 'derive'),
'CompressBpsOut': ('compress_bps_out', 'derive'),
'ZlibMemUsage': ('zlib_mem_usage', 'gauge'),
# Metrics that are collected per each proxy separately.
# Proxy name would be the dimension as well as service_name
# Correspond to 'show stats' socket command
'chkfail': ('failed_checks', 'derive'),
'downtime': ('downtime', 'derive'),
'hrsp_1xx': ('response_1xx', 'derive'),
'hrsp_3xx': ('response_3xx', 'derive'),
'hrsp_other': ('response_other', 'derive'),
'qmax': ('queue_max', 'gauge'),
'qlimit': ('queue_limit', 'gauge'),
'rate_lim': ('session_rate_limit', 'gauge'),
'rate_max': ('session_rate_max', 'gauge'),
'req_rate_max': ('request_rate_max', 'gauge'),
'stot': ('session_total', 'derive'),
'slim': ('session_limit', 'gauge'),
'smax': ('session_max', 'gauge'),
'throttle': ('throttle', 'gauge'),
'cli_abrt': ('cli_abrt', 'derive'),
'srv_abrt': ('srv_abrt', 'derive'),
'comp_in': ('comp_in', 'derive'),
'comp_out': ('comp_out', 'derive'),
'comp_byp': ('comp_byp', 'derive'),
'comp_rsp': ('comp_rsp', 'derive'),
'ctime': ('connect_time_avg', 'gauge'),
'act': ('active_servers', 'gauge'),
'bck': ('backup_servers', 'gauge'),
'check_duration': ('health_check_duration', 'gauge'),
'lastsess': ('last_session', 'gauge'),
'conn_rate': ('conn_rate', 'gauge'),
'conn_rate_max': ('conn_rate_max', 'gauge'),
'conn_tot': ('conn_total', 'counter'),
'intercepted': ('intercepted', 'gauge'),
'dcon': ('denied_tcp_conn', 'gauge'),
'dses': ('denied_tcp_sess', 'gauge'),
}
DIMENSIONS_LIST = [
'pxname',
'svname',
'pid',
'sid',
'iid',
'type',
'addr',
'cookie',
'mode',
'algo',
]
DEFAULT_METRICS = dict((k.lower(), v) for k, v in DEFAULT_METRICS.items())
ENHANCED_METRICS = dict((k.lower(), v) for k, v in ENHANCED_METRICS.items())
METRIC_DELIM = '.' # for the frontend/backend stats
DEFAULT_SOCKET = '/var/run/haproxy.sock'
DEFAULT_PROXY_MONITORS = ['server', 'frontend', 'backend']
class HAProxySocket(object):
"""
Encapsulates communication with HAProxy via the socket interface
"""
def __init__(self, socket_file=DEFAULT_SOCKET):
self.socket_file = socket_file
def connect(self):
# unix sockets all start with '/', use tcp otherwise
is_unix = self.socket_file.startswith('/')
if is_unix:
stat_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
stat_sock.connect(self.socket_file)
return stat_sock
else:
socket_host, separator, port = self.socket_file.rpartition(':')
if socket_host is not '' and port is not '' and separator is ':':
stat_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
stat_sock.connect((socket_host, int(port)))
return stat_sock
else:
collectd.error('Could not connect to socket with host %s. Check HAProxy config.' % self.socket_file)
return
def communicate(self, command):
'''Get response from single command.
Args:
command: string command to send to haproxy stat socket
Returns:
a string of the response data
'''
if not command.endswith('\n'):
command += '\n'
stat_sock = self.connect()
if stat_sock is None:
return ''
stat_sock.sendall(command)
result_buf = StringIO.StringIO()
buf = stat_sock.recv(RECV_SIZE)
while buf:
result_buf.write(buf)
buf = stat_sock.recv(RECV_SIZE)
stat_sock.close()
return result_buf.getvalue()
def get_server_info(self):
result = {}
output = self.communicate('show info')
for line in output.splitlines():
try:
key, val = line.split(':', 1)
except ValueError:
continue
result[key.strip()] = val.strip()
return result
def get_server_stats(self):
output = self.communicate('show stat')
# sanitize and make a list of lines
output = output.lstrip('# ').strip()
output = [l.strip(',') for l in output.splitlines()]
csvreader = csv.DictReader(output)
result = [d.copy() for d in csvreader]
return result
def get_stats(module_config):
"""
Makes two calls to haproxy to fetch server info and server stats.
Returns the dict containing metric name as the key and a tuple of metric value and the dict of dimensions if any
"""
if module_config['socket'] is None:
collectd.error("Socket configuration parameter is undefined. Couldn't get the stats")
return
stats = []
haproxy = HAProxySocket(module_config['socket'])
try:
server_info = haproxy.get_server_info()
server_stats = haproxy.get_server_stats()
except socket.error:
collectd.warning('status err Unable to connect to HAProxy socket at %s' % module_config['socket'])
return stats
# server wide stats
for key, val in server_info.iteritems():
try:
stats.append((key, int(val), dict()))
except (TypeError, ValueError):
pass
# proxy specific stats
for statdict in server_stats:
dimensions = _build_dimension_dict(statdict)
if not (statdict['svname'].lower() in module_config['proxy_monitors'] or
statdict['pxname'].lower() in module_config['proxy_monitors']):
continue
for metricname, val in statdict.items():
try:
stats.append((metricname, int(val), dimensions))
except (TypeError, ValueError):
pass
return stats
def _build_dimension_dict(statdict):
"""
Builds dimensions dict to send back with metrics with readable metric names
Args:
statdict dictionary of metrics from HAProxy to be filtered for dimensions
"""
dimensions = {}
for key in DIMENSIONS_LIST:
if key in statdict and key == 'pxname':
dimensions['proxy_name'] = statdict['pxname']
elif key in statdict and key == 'svname':
dimensions['service_name'] = statdict['svname']
elif key in statdict and key == 'pid':
dimensions['process_id'] = statdict['pid']
elif key in statdict and key == 'sid':
dimensions['server_id'] = statdict['sid']
elif key in statdict and key == 'iid':
dimensions['unique_proxy_id'] = statdict['iid']
elif key in statdict and key == 'type':
dimensions['type'] = _get_proxy_type(statdict['type'])
elif key in statdict and key == 'addr':
dimensions['address'] = statdict['addr']
elif key in statdict and key == 'algo':
dimensions['algorithm'] = statdict['algo']
elif key in statdict:
dimensions[key] = statdict[key]
return dimensions
def config(config_values):
"""
A callback method that loads information from the HaProxy collectd plugin config file.
Args:
config_values (collectd.Config): Object containing config values
"""
module_config = {}
socket = DEFAULT_SOCKET
proxy_monitors = []
excluded_metrics = set()
enhanced_metrics = False
interval = None
testing = False
custom_dimensions = {}
for node in config_values.children:
if node.key == "ProxyMonitor" and node.values[0]:
proxy_monitors.append(node.values[0])
elif node.key == "Socket" and node.values[0]:
socket = node.values[0]
elif node.key == "Interval" and node.values[0]:
interval = node.values[0]
elif node.key == "EnhancedMetrics" and node.values[0]:
enhanced_metrics = _str_to_bool(node.values[0])
elif node.key == "ExcludeMetric" and node.values[0]:
excluded_metrics.add(node.values[0])
elif node.key == "Testing" and node.values[0]:
testing = _str_to_bool(node.values[0])
elif node.key == 'Dimension':
if len(node.values) == 2:
custom_dimensions.update({node.values[0]: node.values[1]})
else:
collectd.warning("WARNING: Check configuration \
setting for %s" % node.key)
else:
collectd.warning('Unknown config key: %s' % node.key)
if not proxy_monitors:
proxy_monitors += DEFAULT_PROXY_MONITORS
module_config = {
'socket': socket,
'proxy_monitors': proxy_monitors,
'interval': interval,
'enhanced_metrics': enhanced_metrics,
'excluded_metrics': excluded_metrics,
'custom_dimensions': custom_dimensions,
'testing': testing,
}
proxys = "_".join(proxy_monitors)
if testing:
return module_config
interval_kwarg = {}
if interval:
interval_kwarg['interval'] = interval
collectd.register_read(collect_metrics, data=module_config,
name='node_' + module_config['socket'] + '_' + proxys,
**interval_kwarg)
def _format_dimensions(dimensions):
"""
Formats a dictionary of dimensions to a format that enables them to be
specified as key, value pairs in plugin_instance to signalfx. E.g.
>>> dimensions = {'a': 'foo', 'b': 'bar'}
>>> _format_dimensions(dimensions)
"[a=foo,b=bar]"
Args:
dimensions (dict): Mapping of {dimension_name: value, ...}
Returns:
str: Comma-separated list of dimensions
"""
dim_pairs = ["%s=%s" % (k, v) for k, v in dimensions.iteritems()]
return "[%s]" % (",".join(dim_pairs))
def _get_proxy_type(type_id):
"""
Return human readable proxy type
Args:
type_id: 0=frontend, 1=backend, 2=server, 3=socket/listener
"""
proxy_types = {
0: 'frontend',
1: 'backend',
2: 'server',
3: 'socket/listener',
}
return proxy_types.get(int(type_id))
def _str_to_bool(val):
'''
Converts a true/false string to a boolean
'''
val = str(val).strip().lower()
if val == 'true':
return True
elif val != 'false':
collectd.warning('Warning: String (%s) could not be converted to a boolean. Returning false.' % val)
return False
def collect_metrics(module_config):
collectd.debug('beginning collect_metrics')
"""
A callback method that gets metrics from HAProxy and records them to collectd.
"""
info = get_stats(module_config)
if not info:
collectd.warning('%s: No data received' % PLUGIN_NAME)
return
for metric_name, metric_value, dimensions in info:
# assert metric is in valid metrics lists
if not metric_name.lower() in DEFAULT_METRICS and not metric_name.lower() in ENHANCED_METRICS:
collectd.debug("metric %s is not in either metric list" % metric_name.lower())
continue
# skip metrics in enhanced metrics mode if not enabled
if not module_config['enhanced_metrics'] and metric_name.lower() in ENHANCED_METRICS:
continue
# pull metric name & type from respective metrics list
if metric_name.lower() in DEFAULT_METRICS:
translated_metric_name, val_type = DEFAULT_METRICS[metric_name.lower()]
else:
translated_metric_name, val_type = ENHANCED_METRICS[metric_name.lower()]
# skip over any exlcluded metrics
if translated_metric_name in module_config['excluded_metrics']:
collectd.debug("excluding metric %s" % translated_metric_name)
continue
# create datapoint and dispatch
datapoint = collectd.Values()
datapoint.type = val_type
datapoint.type_instance = translated_metric_name
datapoint.plugin = PLUGIN_NAME
dimensions.update(module_config['custom_dimensions'])
if len(dimensions) > 0:
datapoint.plugin_instance = _format_dimensions(dimensions)
datapoint.values = (metric_value,)
pprint_dict = {
'plugin': datapoint.plugin,
'plugin_instance': datapoint.plugin_instance,
'type': datapoint.type,
'type_instance': datapoint.type_instance,
'values': datapoint.values
}
collectd.debug(pprint.pformat(pprint_dict))
datapoint.dispatch()
collectd.register_config(config)
| 35.345538 | 120 | 0.614981 |
import cStringIO as StringIO
import socket
import csv
import pprint
import collectd
PLUGIN_NAME = 'haproxy'
RECV_SIZE = 1024
DEFAULT_METRICS = {
'ConnRate': ('connection_rate', 'gauge'),
'CumReq': ('requests', 'derive'),
'Idle_pct': ('idle_pct', 'gauge'),
'scur': ('session_current', 'gauge'),
'SessRate': ('session_rate_all', 'gauge'),
'lbtot': ('server_selected_total', 'counter'),
'bout': ('bytes_out', 'derive'),
'bin': ('bytes_in', 'derive'),
'ttime': ('session_time_avg', 'gauge'),
'req_rate': ('request_rate', 'gauge'),
'rate': ('session_rate', 'gauge'),
'hrsp_2xx': ('response_2xx', 'derive'),
'hrsp_4xx': ('response_4xx', 'derive'),
'hrsp_5xx': ('response_5xx', 'derive'),
'ereq': ('error_request', 'derive'),
'dreq': ('denied_request', 'derive'),
'econ': ('error_connection', 'derive'),
'dresp': ('denied_response', 'derive'),
'qcur': ('queue_current', 'gauge'),
'qtime': ('queue_time_avg', 'gauge'),
'rtime': ('response_time_avg', 'gauge'),
'eresp': ('error_response', 'derive'),
'wretr': ('retries', 'derive'),
'wredis': ('redispatched', 'derive'),
}
ENHANCED_METRICS = {
'MaxConn': ('max_connections', 'gauge'),
'CumConns': ('connections', 'derive'),
'MaxConnRate': ('max_connection_rate', 'gauge'),
'MaxSessRate': ('max_session_rate', 'gauge'),
'MaxSslConns': ('max_ssl_connections', 'gauge'),
'CumSslConns': ('ssl_connections', 'derive'),
'MaxPipes': ('max_pipes', 'gauge'),
'Tasks': ('tasks', 'gauge'),
'Run_queue': ('run_queue', 'gauge'),
'PipesUsed': ('pipes_used', 'gauge'),
'PipesFree': ('pipes_free', 'gauge'),
'Uptime_sec': ('uptime_seconds', 'derive'),
'CurrConns': ('current_connections', 'gauge'),
'CurrSslConns': ('current_ssl_connections', 'gauge'),
'SslRate': ('ssl_rate', 'gauge'),
'SslFrontendKeyRate': ('ssl_frontend_key_rate', 'gauge'),
'SslBackendKeyRate': ('ssl_backend_key_rate', 'gauge'),
'SslCacheLookups': ('ssl_cache_lookups', 'derive'),
'SslCacheMisses': ('ssl_cache_misses', 'derive'),
'CompressBpsIn': ('compress_bps_in', 'derive'),
'CompressBpsOut': ('compress_bps_out', 'derive'),
'ZlibMemUsage': ('zlib_mem_usage', 'gauge'),
'chkfail': ('failed_checks', 'derive'),
'downtime': ('downtime', 'derive'),
'hrsp_1xx': ('response_1xx', 'derive'),
'hrsp_3xx': ('response_3xx', 'derive'),
'hrsp_other': ('response_other', 'derive'),
'qmax': ('queue_max', 'gauge'),
'qlimit': ('queue_limit', 'gauge'),
'rate_lim': ('session_rate_limit', 'gauge'),
'rate_max': ('session_rate_max', 'gauge'),
'req_rate_max': ('request_rate_max', 'gauge'),
'stot': ('session_total', 'derive'),
'slim': ('session_limit', 'gauge'),
'smax': ('session_max', 'gauge'),
'throttle': ('throttle', 'gauge'),
'cli_abrt': ('cli_abrt', 'derive'),
'srv_abrt': ('srv_abrt', 'derive'),
'comp_in': ('comp_in', 'derive'),
'comp_out': ('comp_out', 'derive'),
'comp_byp': ('comp_byp', 'derive'),
'comp_rsp': ('comp_rsp', 'derive'),
'ctime': ('connect_time_avg', 'gauge'),
'act': ('active_servers', 'gauge'),
'bck': ('backup_servers', 'gauge'),
'check_duration': ('health_check_duration', 'gauge'),
'lastsess': ('last_session', 'gauge'),
'conn_rate': ('conn_rate', 'gauge'),
'conn_rate_max': ('conn_rate_max', 'gauge'),
'conn_tot': ('conn_total', 'counter'),
'intercepted': ('intercepted', 'gauge'),
'dcon': ('denied_tcp_conn', 'gauge'),
'dses': ('denied_tcp_sess', 'gauge'),
}
DIMENSIONS_LIST = [
'pxname',
'svname',
'pid',
'sid',
'iid',
'type',
'addr',
'cookie',
'mode',
'algo',
]
DEFAULT_METRICS = dict((k.lower(), v) for k, v in DEFAULT_METRICS.items())
ENHANCED_METRICS = dict((k.lower(), v) for k, v in ENHANCED_METRICS.items())
METRIC_DELIM = '.'
DEFAULT_SOCKET = '/var/run/haproxy.sock'
DEFAULT_PROXY_MONITORS = ['server', 'frontend', 'backend']
class HAProxySocket(object):
def __init__(self, socket_file=DEFAULT_SOCKET):
self.socket_file = socket_file
def connect(self):
is_unix = self.socket_file.startswith('/')
if is_unix:
stat_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
stat_sock.connect(self.socket_file)
return stat_sock
else:
socket_host, separator, port = self.socket_file.rpartition(':')
if socket_host is not '' and port is not '' and separator is ':':
stat_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
stat_sock.connect((socket_host, int(port)))
return stat_sock
else:
collectd.error('Could not connect to socket with host %s. Check HAProxy config.' % self.socket_file)
return
def communicate(self, command):
if not command.endswith('\n'):
command += '\n'
stat_sock = self.connect()
if stat_sock is None:
return ''
stat_sock.sendall(command)
result_buf = StringIO.StringIO()
buf = stat_sock.recv(RECV_SIZE)
while buf:
result_buf.write(buf)
buf = stat_sock.recv(RECV_SIZE)
stat_sock.close()
return result_buf.getvalue()
def get_server_info(self):
result = {}
output = self.communicate('show info')
for line in output.splitlines():
try:
key, val = line.split(':', 1)
except ValueError:
continue
result[key.strip()] = val.strip()
return result
def get_server_stats(self):
output = self.communicate('show stat')
output = output.lstrip('# ').strip()
output = [l.strip(',') for l in output.splitlines()]
csvreader = csv.DictReader(output)
result = [d.copy() for d in csvreader]
return result
def get_stats(module_config):
if module_config['socket'] is None:
collectd.error("Socket configuration parameter is undefined. Couldn't get the stats")
return
stats = []
haproxy = HAProxySocket(module_config['socket'])
try:
server_info = haproxy.get_server_info()
server_stats = haproxy.get_server_stats()
except socket.error:
collectd.warning('status err Unable to connect to HAProxy socket at %s' % module_config['socket'])
return stats
# server wide stats
for key, val in server_info.iteritems():
try:
stats.append((key, int(val), dict()))
except (TypeError, ValueError):
pass
# proxy specific stats
for statdict in server_stats:
dimensions = _build_dimension_dict(statdict)
if not (statdict['svname'].lower() in module_config['proxy_monitors'] or
statdict['pxname'].lower() in module_config['proxy_monitors']):
continue
for metricname, val in statdict.items():
try:
stats.append((metricname, int(val), dimensions))
except (TypeError, ValueError):
pass
return stats
def _build_dimension_dict(statdict):
dimensions = {}
for key in DIMENSIONS_LIST:
if key in statdict and key == 'pxname':
dimensions['proxy_name'] = statdict['pxname']
elif key in statdict and key == 'svname':
dimensions['service_name'] = statdict['svname']
elif key in statdict and key == 'pid':
dimensions['process_id'] = statdict['pid']
elif key in statdict and key == 'sid':
dimensions['server_id'] = statdict['sid']
elif key in statdict and key == 'iid':
dimensions['unique_proxy_id'] = statdict['iid']
elif key in statdict and key == 'type':
dimensions['type'] = _get_proxy_type(statdict['type'])
elif key in statdict and key == 'addr':
dimensions['address'] = statdict['addr']
elif key in statdict and key == 'algo':
dimensions['algorithm'] = statdict['algo']
elif key in statdict:
dimensions[key] = statdict[key]
return dimensions
def config(config_values):
module_config = {}
socket = DEFAULT_SOCKET
proxy_monitors = []
excluded_metrics = set()
enhanced_metrics = False
interval = None
testing = False
custom_dimensions = {}
for node in config_values.children:
if node.key == "ProxyMonitor" and node.values[0]:
proxy_monitors.append(node.values[0])
elif node.key == "Socket" and node.values[0]:
socket = node.values[0]
elif node.key == "Interval" and node.values[0]:
interval = node.values[0]
elif node.key == "EnhancedMetrics" and node.values[0]:
enhanced_metrics = _str_to_bool(node.values[0])
elif node.key == "ExcludeMetric" and node.values[0]:
excluded_metrics.add(node.values[0])
elif node.key == "Testing" and node.values[0]:
testing = _str_to_bool(node.values[0])
elif node.key == 'Dimension':
if len(node.values) == 2:
custom_dimensions.update({node.values[0]: node.values[1]})
else:
collectd.warning("WARNING: Check configuration \
setting for %s" % node.key)
else:
collectd.warning('Unknown config key: %s' % node.key)
if not proxy_monitors:
proxy_monitors += DEFAULT_PROXY_MONITORS
module_config = {
'socket': socket,
'proxy_monitors': proxy_monitors,
'interval': interval,
'enhanced_metrics': enhanced_metrics,
'excluded_metrics': excluded_metrics,
'custom_dimensions': custom_dimensions,
'testing': testing,
}
proxys = "_".join(proxy_monitors)
if testing:
return module_config
interval_kwarg = {}
if interval:
interval_kwarg['interval'] = interval
collectd.register_read(collect_metrics, data=module_config,
name='node_' + module_config['socket'] + '_' + proxys,
**interval_kwarg)
def _format_dimensions(dimensions):
dim_pairs = ["%s=%s" % (k, v) for k, v in dimensions.iteritems()]
return "[%s]" % (",".join(dim_pairs))
def _get_proxy_type(type_id):
proxy_types = {
0: 'frontend',
1: 'backend',
2: 'server',
3: 'socket/listener',
}
return proxy_types.get(int(type_id))
def _str_to_bool(val):
val = str(val).strip().lower()
if val == 'true':
return True
elif val != 'false':
collectd.warning('Warning: String (%s) could not be converted to a boolean. Returning false.' % val)
return False
def collect_metrics(module_config):
collectd.debug('beginning collect_metrics')
info = get_stats(module_config)
if not info:
collectd.warning('%s: No data received' % PLUGIN_NAME)
return
for metric_name, metric_value, dimensions in info:
# assert metric is in valid metrics lists
if not metric_name.lower() in DEFAULT_METRICS and not metric_name.lower() in ENHANCED_METRICS:
collectd.debug("metric %s is not in either metric list" % metric_name.lower())
continue
# skip metrics in enhanced metrics mode if not enabled
if not module_config['enhanced_metrics'] and metric_name.lower() in ENHANCED_METRICS:
continue
# pull metric name & type from respective metrics list
if metric_name.lower() in DEFAULT_METRICS:
translated_metric_name, val_type = DEFAULT_METRICS[metric_name.lower()]
else:
translated_metric_name, val_type = ENHANCED_METRICS[metric_name.lower()]
# skip over any exlcluded metrics
if translated_metric_name in module_config['excluded_metrics']:
collectd.debug("excluding metric %s" % translated_metric_name)
continue
# create datapoint and dispatch
datapoint = collectd.Values()
datapoint.type = val_type
datapoint.type_instance = translated_metric_name
datapoint.plugin = PLUGIN_NAME
dimensions.update(module_config['custom_dimensions'])
if len(dimensions) > 0:
datapoint.plugin_instance = _format_dimensions(dimensions)
datapoint.values = (metric_value,)
pprint_dict = {
'plugin': datapoint.plugin,
'plugin_instance': datapoint.plugin_instance,
'type': datapoint.type,
'type_instance': datapoint.type_instance,
'values': datapoint.values
}
collectd.debug(pprint.pformat(pprint_dict))
datapoint.dispatch()
collectd.register_config(config)
| true | true |
1c33d6777841b1659189493027fd375b3ea627d8 | 2,601 | py | Python | alipay/aop/api/domain/AlipayMarketingShowwindowContentSyncModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/AlipayMarketingShowwindowContentSyncModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/AlipayMarketingShowwindowContentSyncModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.IotDeviceInfo import IotDeviceInfo
class AlipayMarketingShowwindowContentSyncModel(object):
def __init__(self):
self._device_info_list = None
self._event_tag = None
self._source = None
@property
def device_info_list(self):
return self._device_info_list
@device_info_list.setter
def device_info_list(self, value):
if isinstance(value, list):
self._device_info_list = list()
for i in value:
if isinstance(i, IotDeviceInfo):
self._device_info_list.append(i)
else:
self._device_info_list.append(IotDeviceInfo.from_alipay_dict(i))
@property
def event_tag(self):
return self._event_tag
@event_tag.setter
def event_tag(self, value):
self._event_tag = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
def to_alipay_dict(self):
params = dict()
if self.device_info_list:
if isinstance(self.device_info_list, list):
for i in range(0, len(self.device_info_list)):
element = self.device_info_list[i]
if hasattr(element, 'to_alipay_dict'):
self.device_info_list[i] = element.to_alipay_dict()
if hasattr(self.device_info_list, 'to_alipay_dict'):
params['device_info_list'] = self.device_info_list.to_alipay_dict()
else:
params['device_info_list'] = self.device_info_list
if self.event_tag:
if hasattr(self.event_tag, 'to_alipay_dict'):
params['event_tag'] = self.event_tag.to_alipay_dict()
else:
params['event_tag'] = self.event_tag
if self.source:
if hasattr(self.source, 'to_alipay_dict'):
params['source'] = self.source.to_alipay_dict()
else:
params['source'] = self.source
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingShowwindowContentSyncModel()
if 'device_info_list' in d:
o.device_info_list = d['device_info_list']
if 'event_tag' in d:
o.event_tag = d['event_tag']
if 'source' in d:
o.source = d['source']
return o
| 31.337349 | 84 | 0.594002 |
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.IotDeviceInfo import IotDeviceInfo
class AlipayMarketingShowwindowContentSyncModel(object):
def __init__(self):
self._device_info_list = None
self._event_tag = None
self._source = None
@property
def device_info_list(self):
return self._device_info_list
@device_info_list.setter
def device_info_list(self, value):
if isinstance(value, list):
self._device_info_list = list()
for i in value:
if isinstance(i, IotDeviceInfo):
self._device_info_list.append(i)
else:
self._device_info_list.append(IotDeviceInfo.from_alipay_dict(i))
@property
def event_tag(self):
return self._event_tag
@event_tag.setter
def event_tag(self, value):
self._event_tag = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
def to_alipay_dict(self):
params = dict()
if self.device_info_list:
if isinstance(self.device_info_list, list):
for i in range(0, len(self.device_info_list)):
element = self.device_info_list[i]
if hasattr(element, 'to_alipay_dict'):
self.device_info_list[i] = element.to_alipay_dict()
if hasattr(self.device_info_list, 'to_alipay_dict'):
params['device_info_list'] = self.device_info_list.to_alipay_dict()
else:
params['device_info_list'] = self.device_info_list
if self.event_tag:
if hasattr(self.event_tag, 'to_alipay_dict'):
params['event_tag'] = self.event_tag.to_alipay_dict()
else:
params['event_tag'] = self.event_tag
if self.source:
if hasattr(self.source, 'to_alipay_dict'):
params['source'] = self.source.to_alipay_dict()
else:
params['source'] = self.source
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingShowwindowContentSyncModel()
if 'device_info_list' in d:
o.device_info_list = d['device_info_list']
if 'event_tag' in d:
o.event_tag = d['event_tag']
if 'source' in d:
o.source = d['source']
return o
| true | true |
1c33d6f521723588d60178a1a57730551916112c | 6,475 | py | Python | pyod/test/test_xgbod.py | BillyGareth/pyod | 4ad1ab8cd88382fe15c237e8db8ad8e3a9302eaf | [
"BSD-2-Clause"
] | 2 | 2017-10-07T21:41:48.000Z | 2017-10-08T02:51:12.000Z | pyod/test/test_xgbod.py | BillyGareth/pyod | 4ad1ab8cd88382fe15c237e8db8ad8e3a9302eaf | [
"BSD-2-Clause"
] | 4 | 2021-11-01T18:40:00.000Z | 2022-03-05T19:26:48.000Z | pyod/test/test_xgbod.py | Pandinosaurus/pyod | 7aeefcf65ceb0196434b7adb4fd706bfb404e4e2 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import os
import sys
from os import path
import unittest
# noinspection PyProtectedMember
from numpy.testing import assert_allclose
from numpy.testing import assert_array_less
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from sklearn.metrics import roc_auc_score
from sklearn.base import clone
from sklearn.model_selection import train_test_split
from sklearn.utils.validation import check_X_y
from scipy.io import loadmat
from scipy.stats import rankdata
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pyod.models.xgbod import XGBOD
from pyod.utils.data import generate_data
class TestXGBOD(unittest.TestCase):
def setUp(self):
# Define data file and read X and y
# Generate some data if the source data is missing
this_directory = path.abspath(path.dirname(__file__))
mat_file = 'pima.mat'
try:
mat = loadmat(path.join(*[this_directory, 'data', mat_file]))
except TypeError:
print('{data_file} does not exist. Use generated data'.format(
data_file=mat_file))
X, y = generate_data(train_only=True) # load data
except IOError:
print('{data_file} does not exist. Use generated data'.format(
data_file=mat_file))
X, y = generate_data(train_only=True) # load data
else:
X = mat['X']
y = mat['y'].ravel()
X, y = check_X_y(X, y)
self.X_train, self.X_test, self.y_train, self.y_test = \
train_test_split(X, y, test_size=0.4, random_state=42)
self.clf = XGBOD(random_state=42)
self.clf.fit(self.X_train, self.y_train)
self.roc_floor = 0.75
def test_parameters(self):
assert (hasattr(self.clf, 'clf_') and
self.clf.decision_scores_ is not None)
assert (hasattr(self.clf, '_scalar') and
self.clf.labels_ is not None)
assert (hasattr(self.clf, 'n_detector_') and
self.clf.labels_ is not None)
assert (hasattr(self.clf, 'X_train_add_') and
self.clf.labels_ is not None)
assert (hasattr(self.clf, 'decision_scores_') and
self.clf.decision_scores_ is not None)
assert (hasattr(self.clf, 'labels_') and
self.clf.labels_ is not None)
def test_train_scores(self):
assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])
def test_prediction_scores(self):
pred_scores = self.clf.decision_function(self.X_test)
# check score shapes
assert_equal(pred_scores.shape[0], self.X_test.shape[0])
# check performance
assert (roc_auc_score(self.y_test, pred_scores) >= self.roc_floor)
def test_prediction_labels(self):
pred_labels = self.clf.predict(self.X_test)
assert_equal(pred_labels.shape, self.y_test.shape)
def test_prediction_proba(self):
pred_proba = self.clf.predict_proba(self.X_test)
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
# def test_prediction_proba_linear(self):
# pred_proba = self.clf.predict_proba(self.X_test, method='linear')
# assert (pred_proba.min() >= 0)
# assert (pred_proba.max() <= 1)
#
# def test_prediction_proba_unify(self):
# pred_proba = self.clf.predict_proba(self.X_test, method='unify')
# assert (pred_proba.min() >= 0)
# assert (pred_proba.max() <= 1)
#
# def test_prediction_proba_parameter(self):
# with assert_raises(ValueError):
# self.clf.predict_proba(self.X_test, method='something')
# def test_prediction_labels_confidence(self):
# pred_labels, confidence = self.clf.predict(self.X_test,
# return_confidence=True)
# assert_equal(pred_labels.shape, self.y_test.shape)
# assert_equal(confidence.shape, self.y_test.shape)
# assert (confidence.min() >= 0)
# assert (confidence.max() <= 1)
#
# def test_prediction_proba_linear_confidence(self):
# pred_proba, confidence = self.clf.predict_proba(self.X_test,
# method='linear',
# return_confidence=True)
# assert (pred_proba.min() >= 0)
# assert (pred_proba.max() <= 1)
#
# assert_equal(confidence.shape, self.y_test.shape)
# assert (confidence.min() >= 0)
# assert (confidence.max() <= 1)
def test_fit_predict(self):
pred_labels = self.clf.fit_predict(self.X_train, self.y_train)
assert_equal(pred_labels.shape, self.y_train.shape)
def test_fit_predict_score(self):
self.clf.fit_predict_score(self.X_test, self.y_test)
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='roc_auc_score')
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='prc_n_score')
with assert_raises(NotImplementedError):
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='something')
def test_predict_rank(self):
pred_socres = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test)
print(pred_ranks)
# assert the order is reserved
assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), rtol=4)
assert_array_less(pred_ranks, self.X_train.shape[0] + 1)
assert_array_less(-0.1, pred_ranks)
def test_predict_rank_normalized(self):
pred_socres = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test, normalized=True)
# assert the order is reserved
assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), rtol=4)
assert_array_less(pred_ranks, 1.01)
assert_array_less(-0.1, pred_ranks)
def test_model_clone(self):
clone_clf = clone(self.clf)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| 37.645349 | 81 | 0.638456 |
from __future__ import division
from __future__ import print_function
import os
import sys
from os import path
import unittest
from numpy.testing import assert_allclose
from numpy.testing import assert_array_less
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from sklearn.metrics import roc_auc_score
from sklearn.base import clone
from sklearn.model_selection import train_test_split
from sklearn.utils.validation import check_X_y
from scipy.io import loadmat
from scipy.stats import rankdata
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pyod.models.xgbod import XGBOD
from pyod.utils.data import generate_data
class TestXGBOD(unittest.TestCase):
def setUp(self):
this_directory = path.abspath(path.dirname(__file__))
mat_file = 'pima.mat'
try:
mat = loadmat(path.join(*[this_directory, 'data', mat_file]))
except TypeError:
print('{data_file} does not exist. Use generated data'.format(
data_file=mat_file))
X, y = generate_data(train_only=True)
except IOError:
print('{data_file} does not exist. Use generated data'.format(
data_file=mat_file))
X, y = generate_data(train_only=True)
else:
X = mat['X']
y = mat['y'].ravel()
X, y = check_X_y(X, y)
self.X_train, self.X_test, self.y_train, self.y_test = \
train_test_split(X, y, test_size=0.4, random_state=42)
self.clf = XGBOD(random_state=42)
self.clf.fit(self.X_train, self.y_train)
self.roc_floor = 0.75
def test_parameters(self):
assert (hasattr(self.clf, 'clf_') and
self.clf.decision_scores_ is not None)
assert (hasattr(self.clf, '_scalar') and
self.clf.labels_ is not None)
assert (hasattr(self.clf, 'n_detector_') and
self.clf.labels_ is not None)
assert (hasattr(self.clf, 'X_train_add_') and
self.clf.labels_ is not None)
assert (hasattr(self.clf, 'decision_scores_') and
self.clf.decision_scores_ is not None)
assert (hasattr(self.clf, 'labels_') and
self.clf.labels_ is not None)
def test_train_scores(self):
assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])
def test_prediction_scores(self):
pred_scores = self.clf.decision_function(self.X_test)
assert_equal(pred_scores.shape[0], self.X_test.shape[0])
assert (roc_auc_score(self.y_test, pred_scores) >= self.roc_floor)
def test_prediction_labels(self):
pred_labels = self.clf.predict(self.X_test)
assert_equal(pred_labels.shape, self.y_test.shape)
def test_prediction_proba(self):
pred_proba = self.clf.predict_proba(self.X_test)
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def test_fit_predict(self):
pred_labels = self.clf.fit_predict(self.X_train, self.y_train)
assert_equal(pred_labels.shape, self.y_train.shape)
def test_fit_predict_score(self):
self.clf.fit_predict_score(self.X_test, self.y_test)
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='roc_auc_score')
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='prc_n_score')
with assert_raises(NotImplementedError):
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='something')
def test_predict_rank(self):
pred_socres = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test)
print(pred_ranks)
assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), rtol=4)
assert_array_less(pred_ranks, self.X_train.shape[0] + 1)
assert_array_less(-0.1, pred_ranks)
def test_predict_rank_normalized(self):
pred_socres = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test, normalized=True)
assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), rtol=4)
assert_array_less(pred_ranks, 1.01)
assert_array_less(-0.1, pred_ranks)
def test_model_clone(self):
clone_clf = clone(self.clf)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
1c33d7614c66eed168c8402bfab6770a52275af7 | 295 | py | Python | zapcli/exceptions.py | kiwi-bop/zap-cli | 55d3341622074f65af287fe07d43196a55c515f1 | [
"MIT"
] | 196 | 2015-06-22T06:23:28.000Z | 2022-03-23T08:54:10.000Z | zapcli/exceptions.py | kiwi-bop/zap-cli | 55d3341622074f65af287fe07d43196a55c515f1 | [
"MIT"
] | 89 | 2015-12-02T17:07:57.000Z | 2022-02-03T10:20:50.000Z | zapcli/exceptions.py | kiwi-bop/zap-cli | 55d3341622074f65af287fe07d43196a55c515f1 | [
"MIT"
] | 65 | 2015-12-14T16:27:59.000Z | 2022-02-21T22:59:52.000Z | """
Custom exception classes for the ZAP CLI.
.. moduleauthor:: Daniel Grunwell (grunny)
"""
class ZAPError(Exception):
"""
Generic exception for ZAP CLI.
"""
def __init__(self, message, extra=None):
super(ZAPError, self).__init__(message)
self.extra = extra
| 18.4375 | 47 | 0.644068 |
class ZAPError(Exception):
def __init__(self, message, extra=None):
super(ZAPError, self).__init__(message)
self.extra = extra
| true | true |
1c33d78e37721057e2d7e4ee643542dccc9ac883 | 24,294 | py | Python | lib/googlecloudsdk/third_party/apis/redis/v1/redis_v1_client.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/googlecloudsdk/third_party/apis/redis/v1/redis_v1_client.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/third_party/apis/redis/v1/redis_v1_client.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | """Generated client library for redis version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.redis.v1 import redis_v1_messages as messages
class RedisV1(base_api.BaseApiClient):
"""Generated client library for service redis version v1."""
MESSAGES_MODULE = messages
BASE_URL = 'https://redis.googleapis.com/'
MTLS_BASE_URL = 'https://redis.mtls.googleapis.com/'
_PACKAGE = 'redis'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
_VERSION = 'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'RedisV1'
_URL_VERSION = 'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new redis handle."""
url = url or self.BASE_URL
super(RedisV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_locations_instances = self.ProjectsLocationsInstancesService(self)
self.projects_locations_operations = self.ProjectsLocationsOperationsService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsLocationsInstancesService(base_api.BaseApiService):
"""Service class for the projects_locations_instances resource."""
_NAME = 'projects_locations_instances'
def __init__(self, client):
super(RedisV1.ProjectsLocationsInstancesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a Redis instance based on the specified tier and memory size. By default, the instance is accessible from the project's [default network](https://cloud.google.com/vpc/docs/vpc). The creation is executed asynchronously and callers may check the returned operation to track its progress. Once the operation is completed the Redis instance will be fully functional. Completed longrunning.Operation will contain the new instance object in the response field. The returned operation is automatically deleted after a few hours, so there is no need to call DeleteOperation.
Args:
request: (RedisProjectsLocationsInstancesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances',
http_method='POST',
method_id='redis.projects.locations.instances.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['instanceId'],
relative_path='v1/{+parent}/instances',
request_field='instance',
request_type_name='RedisProjectsLocationsInstancesCreateRequest',
response_type_name='Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a specific Redis instance. Instance stops serving and data is deleted.
Args:
request: (RedisProjectsLocationsInstancesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}',
http_method='DELETE',
method_id='redis.projects.locations.instances.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RedisProjectsLocationsInstancesDeleteRequest',
response_type_name='Operation',
supports_download=False,
)
def Export(self, request, global_params=None):
r"""Export Redis instance data into a Redis RDB format file in Cloud Storage. Redis will continue serving during this operation. The returned operation is automatically deleted after a few hours, so there is no need to call DeleteOperation.
Args:
request: (RedisProjectsLocationsInstancesExportRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Export')
return self._RunMethod(
config, request, global_params=global_params)
Export.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:export',
http_method='POST',
method_id='redis.projects.locations.instances.export',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:export',
request_field='exportInstanceRequest',
request_type_name='RedisProjectsLocationsInstancesExportRequest',
response_type_name='Operation',
supports_download=False,
)
def Failover(self, request, global_params=None):
r"""Initiates a failover of the primary node to current replica node for a specific STANDARD tier Cloud Memorystore for Redis instance.
Args:
request: (RedisProjectsLocationsInstancesFailoverRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Failover')
return self._RunMethod(
config, request, global_params=global_params)
Failover.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:failover',
http_method='POST',
method_id='redis.projects.locations.instances.failover',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:failover',
request_field='failoverInstanceRequest',
request_type_name='RedisProjectsLocationsInstancesFailoverRequest',
response_type_name='Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the details of a specific Redis instance.
Args:
request: (RedisProjectsLocationsInstancesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Instance) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}',
http_method='GET',
method_id='redis.projects.locations.instances.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RedisProjectsLocationsInstancesGetRequest',
response_type_name='Instance',
supports_download=False,
)
def GetAuthString(self, request, global_params=None):
r"""Gets the AUTH string for a Redis instance. If AUTH is not enabled for the instance the response will be empty. This information is not included in the details returned to GetInstance.
Args:
request: (RedisProjectsLocationsInstancesGetAuthStringRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(InstanceAuthString) The response message.
"""
config = self.GetMethodConfig('GetAuthString')
return self._RunMethod(
config, request, global_params=global_params)
GetAuthString.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}/authString',
http_method='GET',
method_id='redis.projects.locations.instances.getAuthString',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}/authString',
request_field='',
request_type_name='RedisProjectsLocationsInstancesGetAuthStringRequest',
response_type_name='InstanceAuthString',
supports_download=False,
)
def Import(self, request, global_params=None):
r"""Import a Redis RDB snapshot file from Cloud Storage into a Redis instance. Redis may stop serving during this operation. Instance state will be IMPORTING for entire operation. When complete, the instance will contain only data from the imported file. The returned operation is automatically deleted after a few hours, so there is no need to call DeleteOperation.
Args:
request: (RedisProjectsLocationsInstancesImportRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Import')
return self._RunMethod(
config, request, global_params=global_params)
Import.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:import',
http_method='POST',
method_id='redis.projects.locations.instances.import',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:import',
request_field='importInstanceRequest',
request_type_name='RedisProjectsLocationsInstancesImportRequest',
response_type_name='Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists all Redis instances owned by a project in either the specified location (region) or all locations. The location should have the following format: * `projects/{project_id}/locations/{location_id}` If `location_id` is specified as `-` (wildcard), then all regions available to the project are queried, and the results are aggregated.
Args:
request: (RedisProjectsLocationsInstancesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListInstancesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances',
http_method='GET',
method_id='redis.projects.locations.instances.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['pageSize', 'pageToken'],
relative_path='v1/{+parent}/instances',
request_field='',
request_type_name='RedisProjectsLocationsInstancesListRequest',
response_type_name='ListInstancesResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates the metadata and configuration of a specific Redis instance. Completed longrunning.Operation will contain the new instance object in the response field. The returned operation is automatically deleted after a few hours, so there is no need to call DeleteOperation.
Args:
request: (RedisProjectsLocationsInstancesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}',
http_method='PATCH',
method_id='redis.projects.locations.instances.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['updateMask'],
relative_path='v1/{+name}',
request_field='instance',
request_type_name='RedisProjectsLocationsInstancesPatchRequest',
response_type_name='Operation',
supports_download=False,
)
def RescheduleMaintenance(self, request, global_params=None):
r"""Reschedule maintenance for a given instance in a given project and location.
Args:
request: (RedisProjectsLocationsInstancesRescheduleMaintenanceRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('RescheduleMaintenance')
return self._RunMethod(
config, request, global_params=global_params)
RescheduleMaintenance.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:rescheduleMaintenance',
http_method='POST',
method_id='redis.projects.locations.instances.rescheduleMaintenance',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:rescheduleMaintenance',
request_field='rescheduleMaintenanceRequest',
request_type_name='RedisProjectsLocationsInstancesRescheduleMaintenanceRequest',
response_type_name='Operation',
supports_download=False,
)
def Upgrade(self, request, global_params=None):
r"""Upgrades Redis instance to the newer Redis version specified in the request.
Args:
request: (RedisProjectsLocationsInstancesUpgradeRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Upgrade')
return self._RunMethod(
config, request, global_params=global_params)
Upgrade.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:upgrade',
http_method='POST',
method_id='redis.projects.locations.instances.upgrade',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:upgrade',
request_field='upgradeInstanceRequest',
request_type_name='RedisProjectsLocationsInstancesUpgradeRequest',
response_type_name='Operation',
supports_download=False,
)
class ProjectsLocationsOperationsService(base_api.BaseApiService):
"""Service class for the projects_locations_operations resource."""
_NAME = 'projects_locations_operations'
def __init__(self, client):
super(RedisV1.ProjectsLocationsOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
r"""Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
Args:
request: (RedisProjectsLocationsOperationsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel',
http_method='POST',
method_id='redis.projects.locations.operations.cancel',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:cancel',
request_field='',
request_type_name='RedisProjectsLocationsOperationsCancelRequest',
response_type_name='Empty',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
Args:
request: (RedisProjectsLocationsOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='DELETE',
method_id='redis.projects.locations.operations.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RedisProjectsLocationsOperationsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
Args:
request: (RedisProjectsLocationsOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='GET',
method_id='redis.projects.locations.operations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RedisProjectsLocationsOperationsGetRequest',
response_type_name='Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `"/v1/{name=users/*}/operations"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
Args:
request: (RedisProjectsLocationsOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/operations',
http_method='GET',
method_id='redis.projects.locations.operations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+name}/operations',
request_field='',
request_type_name='RedisProjectsLocationsOperationsListRequest',
response_type_name='ListOperationsResponse',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = 'projects_locations'
def __init__(self, client):
super(RedisV1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets information about a location.
Args:
request: (RedisProjectsLocationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Location) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}',
http_method='GET',
method_id='redis.projects.locations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RedisProjectsLocationsGetRequest',
response_type_name='Location',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists information about the supported locations for this service.
Args:
request: (RedisProjectsLocationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListLocationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations',
http_method='GET',
method_id='redis.projects.locations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+name}/locations',
request_field='',
request_type_name='RedisProjectsLocationsListRequest',
response_type_name='ListLocationsResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(RedisV1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| 44.576147 | 615 | 0.705359 |
from __future__ import absolute_import
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.redis.v1 import redis_v1_messages as messages
class RedisV1(base_api.BaseApiClient):
MESSAGES_MODULE = messages
BASE_URL = 'https://redis.googleapis.com/'
MTLS_BASE_URL = 'https://redis.mtls.googleapis.com/'
_PACKAGE = 'redis'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
_VERSION = 'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'RedisV1'
_URL_VERSION = 'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
url = url or self.BASE_URL
super(RedisV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_locations_instances = self.ProjectsLocationsInstancesService(self)
self.projects_locations_operations = self.ProjectsLocationsOperationsService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsLocationsInstancesService(base_api.BaseApiService):
_NAME = 'projects_locations_instances'
def __init__(self, client):
super(RedisV1.ProjectsLocationsInstancesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances',
http_method='POST',
method_id='redis.projects.locations.instances.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['instanceId'],
relative_path='v1/{+parent}/instances',
request_field='instance',
request_type_name='RedisProjectsLocationsInstancesCreateRequest',
response_type_name='Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}',
http_method='DELETE',
method_id='redis.projects.locations.instances.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RedisProjectsLocationsInstancesDeleteRequest',
response_type_name='Operation',
supports_download=False,
)
def Export(self, request, global_params=None):
config = self.GetMethodConfig('Export')
return self._RunMethod(
config, request, global_params=global_params)
Export.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:export',
http_method='POST',
method_id='redis.projects.locations.instances.export',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:export',
request_field='exportInstanceRequest',
request_type_name='RedisProjectsLocationsInstancesExportRequest',
response_type_name='Operation',
supports_download=False,
)
def Failover(self, request, global_params=None):
config = self.GetMethodConfig('Failover')
return self._RunMethod(
config, request, global_params=global_params)
Failover.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:failover',
http_method='POST',
method_id='redis.projects.locations.instances.failover',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:failover',
request_field='failoverInstanceRequest',
request_type_name='RedisProjectsLocationsInstancesFailoverRequest',
response_type_name='Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}',
http_method='GET',
method_id='redis.projects.locations.instances.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RedisProjectsLocationsInstancesGetRequest',
response_type_name='Instance',
supports_download=False,
)
def GetAuthString(self, request, global_params=None):
config = self.GetMethodConfig('GetAuthString')
return self._RunMethod(
config, request, global_params=global_params)
GetAuthString.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}/authString',
http_method='GET',
method_id='redis.projects.locations.instances.getAuthString',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}/authString',
request_field='',
request_type_name='RedisProjectsLocationsInstancesGetAuthStringRequest',
response_type_name='InstanceAuthString',
supports_download=False,
)
def Import(self, request, global_params=None):
config = self.GetMethodConfig('Import')
return self._RunMethod(
config, request, global_params=global_params)
Import.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:import',
http_method='POST',
method_id='redis.projects.locations.instances.import',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:import',
request_field='importInstanceRequest',
request_type_name='RedisProjectsLocationsInstancesImportRequest',
response_type_name='Operation',
supports_download=False,
)
def List(self, request, global_params=None):
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances',
http_method='GET',
method_id='redis.projects.locations.instances.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['pageSize', 'pageToken'],
relative_path='v1/{+parent}/instances',
request_field='',
request_type_name='RedisProjectsLocationsInstancesListRequest',
response_type_name='ListInstancesResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}',
http_method='PATCH',
method_id='redis.projects.locations.instances.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['updateMask'],
relative_path='v1/{+name}',
request_field='instance',
request_type_name='RedisProjectsLocationsInstancesPatchRequest',
response_type_name='Operation',
supports_download=False,
)
def RescheduleMaintenance(self, request, global_params=None):
config = self.GetMethodConfig('RescheduleMaintenance')
return self._RunMethod(
config, request, global_params=global_params)
RescheduleMaintenance.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:rescheduleMaintenance',
http_method='POST',
method_id='redis.projects.locations.instances.rescheduleMaintenance',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:rescheduleMaintenance',
request_field='rescheduleMaintenanceRequest',
request_type_name='RedisProjectsLocationsInstancesRescheduleMaintenanceRequest',
response_type_name='Operation',
supports_download=False,
)
def Upgrade(self, request, global_params=None):
config = self.GetMethodConfig('Upgrade')
return self._RunMethod(
config, request, global_params=global_params)
Upgrade.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:upgrade',
http_method='POST',
method_id='redis.projects.locations.instances.upgrade',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:upgrade',
request_field='upgradeInstanceRequest',
request_type_name='RedisProjectsLocationsInstancesUpgradeRequest',
response_type_name='Operation',
supports_download=False,
)
class ProjectsLocationsOperationsService(base_api.BaseApiService):
_NAME = 'projects_locations_operations'
def __init__(self, client):
super(RedisV1.ProjectsLocationsOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel',
http_method='POST',
method_id='redis.projects.locations.operations.cancel',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:cancel',
request_field='',
request_type_name='RedisProjectsLocationsOperationsCancelRequest',
response_type_name='Empty',
supports_download=False,
)
def Delete(self, request, global_params=None):
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='DELETE',
method_id='redis.projects.locations.operations.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RedisProjectsLocationsOperationsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='GET',
method_id='redis.projects.locations.operations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RedisProjectsLocationsOperationsGetRequest',
response_type_name='Operation',
supports_download=False,
)
def List(self, request, global_params=None):
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/operations',
http_method='GET',
method_id='redis.projects.locations.operations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+name}/operations',
request_field='',
request_type_name='RedisProjectsLocationsOperationsListRequest',
response_type_name='ListOperationsResponse',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
_NAME = 'projects_locations'
def __init__(self, client):
super(RedisV1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}',
http_method='GET',
method_id='redis.projects.locations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RedisProjectsLocationsGetRequest',
response_type_name='Location',
supports_download=False,
)
def List(self, request, global_params=None):
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations',
http_method='GET',
method_id='redis.projects.locations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+name}/locations',
request_field='',
request_type_name='RedisProjectsLocationsListRequest',
response_type_name='ListLocationsResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
_NAME = 'projects'
def __init__(self, client):
super(RedisV1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| true | true |
1c33d8356bf4ee30d7d701511d609385cbbbe06c | 11,309 | py | Python | tzlink/datasets/share_clef/subsets.py | lfurrer/tzlink | 0fd09a4c48d73cbd51e8f1628628812a74f209a7 | [
"BSD-3-Clause"
] | 4 | 2019-11-08T10:59:08.000Z | 2020-03-22T21:47:50.000Z | tzlink/datasets/share_clef/subsets.py | lfurrer/tzlink | 0fd09a4c48d73cbd51e8f1628628812a74f209a7 | [
"BSD-3-Clause"
] | null | null | null | tzlink/datasets/share_clef/subsets.py | lfurrer/tzlink | 0fd09a4c48d73cbd51e8f1628628812a74f209a7 | [
"BSD-3-Clause"
] | 1 | 2018-11-08T15:32:12.000Z | 2018-11-08T15:32:12.000Z | #!/usr/bin/env python3
# coding: utf8
# Author: Lenz Furrer, 2018
'''
Filename listings for train/dev/test and different folds.
'''
import itertools as it
def docs(subset):
'''
Get document IDs for the given subset.
'''
subdir = 'test' if subset == 'test' else 'train'
return subdir, _docs(subset)
def _docs(subset):
# Predefined test set.
if subset == 'test':
return _test
# Non-folded train/dev split.
if subset == 'dev':
return _dev0
if subset == 'train':
exclude = set(_dev0)
ids = it.filterfalse(exclude.__contains__, it.chain(*_folds))
return list(ids)
# Folded train/dev split.
label, n = _split_subset_label(subset)
if label == 'dev':
return _folds[n]
if label == 'train':
ids = it.chain(*_folds[:n], *_folds[n+1:])
return list(ids)
raise ValueError('invalid subset: {}'.format(subset))
def _split_subset_label(label):
fold = label.lstrip('traindev')
fold = int(fold) - 1
label = label.rstrip('12345')
return label, fold
# Test set provided by the shared-task organisers (99 docs).
_test = '''
00176-102920-ECHO_REPORT
00381-006281-DISCHARGE_SUMMARY
00534-017453-DISCHARGE_SUMMARY
00534-100076-ECHO_REPORT
01160-000945-DISCHARGE_SUMMARY
01163-001840-DISCHARGE_SUMMARY
01222-104065-ECHO_REPORT
02740-024700-DISCHARGE_SUMMARY
03087-026480-DISCHARGE_SUMMARY
03298-014440-DISCHARGE_SUMMARY
03628-023268-DISCHARGE_SUMMARY
03835-028462-DISCHARGE_SUMMARY
04082-167766-RADIOLOGY_REPORT
04525-003099-DISCHARGE_SUMMARY
04882-004677-DISCHARGE_SUMMARY
04995-028156-DISCHARGE_SUMMARY
05065-011493-DISCHARGE_SUMMARY
05163-019624-DISCHARGE_SUMMARY
05382-010331-DISCHARGE_SUMMARY
05837-000274-DISCHARGE_SUMMARY
06134-005003-DISCHARGE_SUMMARY
06557-009968-DISCHARGE_SUMMARY
07214-025053-DISCHARGE_SUMMARY
07546-000040-DISCHARGE_SUMMARY
07683-016743-DISCHARGE_SUMMARY
07797-005646-DISCHARGE_SUMMARY
08216-388895-RADIOLOGY_REPORT
08324-097667-ECHO_REPORT
08415-016301-DISCHARGE_SUMMARY
08786-003318-DISCHARGE_SUMMARY
08990-002227-DISCHARGE_SUMMARY
09248-026497-DISCHARGE_SUMMARY
09339-028983-DISCHARGE_SUMMARY
09531-108127-ECHO_REPORT
09602-000963-DISCHARGE_SUMMARY
09703-109051-ECHO_REPORT
10434-169426-RADIOLOGY_REPORT
10539-022213-DISCHARGE_SUMMARY
10689-110055-ECHO_REPORT
10773-027033-DISCHARGE_SUMMARY
11098-004672-DISCHARGE_SUMMARY
11378-103592-ECHO_REPORT
11392-010791-DISCHARGE_SUMMARY
11552-026221-DISCHARGE_SUMMARY
11681-022505-DISCHARGE_SUMMARY
12125-022364-DISCHARGE_SUMMARY
12530-004020-DISCHARGE_SUMMARY
12582-011060-DISCHARGE_SUMMARY
12618-027862-DISCHARGE_SUMMARY
12627-109059-ECHO_REPORT
13990-101915-ECHO_REPORT
14285-022846-DISCHARGE_SUMMARY
15021-016750-DISCHARGE_SUMMARY
15230-012950-DISCHARGE_SUMMARY
15664-014779-DISCHARGE_SUMMARY
15751-026988-DISCHARGE_SUMMARY
15789-007213-DISCHARGE_SUMMARY
16055-152402-RADIOLOGY_REPORT
16072-170823-RADIOLOGY_REPORT
16134-204168-RADIOLOGY_REPORT
16247-028319-DISCHARGE_SUMMARY
16660-004075-DISCHARGE_SUMMARY
16677-010128-DISCHARGE_SUMMARY
16743-013010-DISCHARGE_SUMMARY
16997-000825-DISCHARGE_SUMMARY
17054-016976-DISCHARGE_SUMMARY
17097-368450-RADIOLOGY_REPORT
17467-010718-DISCHARGE_SUMMARY
17583-022047-DISCHARGE_SUMMARY
17644-017974-DISCHARGE_SUMMARY
17652-018982-DISCHARGE_SUMMARY
17774-014129-DISCHARGE_SUMMARY
18108-381702-RADIOLOGY_REPORT
18114-360237-RADIOLOGY_REPORT
18317-007698-DISCHARGE_SUMMARY
18531-010240-DISCHARGE_SUMMARY
19138-025729-DISCHARGE_SUMMARY
19154-166217-RADIOLOGY_REPORT
19596-007256-DISCHARGE_SUMMARY
19778-001791-DISCHARGE_SUMMARY
19911-175533-RADIOLOGY_REPORT
20223-103427-ECHO_REPORT
20389-024150-DISCHARGE_SUMMARY
20442-023289-DISCHARGE_SUMMARY
20701-013632-DISCHARGE_SUMMARY
20706-009354-DISCHARGE_SUMMARY
21115-101632-ECHO_REPORT
21312-018707-DISCHARGE_SUMMARY
21815-002962-DISCHARGE_SUMMARY
21951-203738-RADIOLOGY_REPORT
21979-010316-DISCHARGE_SUMMARY
22159-004946-DISCHARGE_SUMMARY
22788-021533-DISCHARGE_SUMMARY
24307-009748-DISCHARGE_SUMMARY
24435-000622-DISCHARGE_SUMMARY
24786-014472-DISCHARGE_SUMMARY
25150-027400-DISCHARGE_SUMMARY
25775-007416-DISCHARGE_SUMMARY
26522-011368-DISCHARGE_SUMMARY
'''.split()
# Preliminary non-folded train/dev split (149/50).
_dev0 = '''
01314-028800-DISCHARGE_SUMMARY
02115-010823-DISCHARGE_SUMMARY
02652-006395-DISCHARGE_SUMMARY
04266-000520-DISCHARGE_SUMMARY
04303-005081-DISCHARGE_SUMMARY
05062-230044-RADIOLOGY_REPORT
05797-095003-ECG_REPORT
06292-371824-RADIOLOGY_REPORT
06445-096221-ECHO_REPORT
06653-081911-ECG_REPORT
07048-294691-RADIOLOGY_REPORT
07514-025655-DISCHARGE_SUMMARY
07908-129574-RADIOLOGY_REPORT
08114-027513-DISCHARGE_SUMMARY
08870-061373-ECG_REPORT
08951-002958-DISCHARGE_SUMMARY
09337-018472-DISCHARGE_SUMMARY
09569-067879-ECG_REPORT
10588-105794-ECHO_REPORT
10612-047357-ECG_REPORT
10644-007491-DISCHARGE_SUMMARY
10691-220707-RADIOLOGY_REPORT
11801-104538-ECHO_REPORT
12050-081563-ECG_REPORT
12748-021750-DISCHARGE_SUMMARY
13033-020154-DISCHARGE_SUMMARY
13265-104380-ECHO_REPORT
13913-106200-ECHO_REPORT
14493-110891-RADIOLOGY_REPORT
14522-104279-ECHO_REPORT
15272-026154-DISCHARGE_SUMMARY
15737-095456-ECHO_REPORT
15770-109559-ECHO_REPORT
18318-102656-ECHO_REPORT
18426-060090-ECG_REPORT
18912-067495-ECG_REPORT
19623-085193-ECG_REPORT
19649-021294-DISCHARGE_SUMMARY
21219-092548-ECG_REPORT
21273-244548-RADIOLOGY_REPORT
21280-272404-RADIOLOGY_REPORT
21349-034056-ECG_REPORT
21647-105660-ECHO_REPORT
21662-107599-ECHO_REPORT
22225-075494-ECG_REPORT
22818-041469-ECG_REPORT
22891-058961-ECG_REPORT
25585-058370-ECG_REPORT
25950-160092-RADIOLOGY_REPORT
26176-226973-RADIOLOGY_REPORT'''.split()
# 5-fold train/dev split (40/40/40/40/39).
_dev1 = '''
00098-016139-DISCHARGE_SUMMARY
00500-097836-ECHO_REPORT
00587-400001-RADIOLOGY_REPORT
01114-083601-ECG_REPORT
01234-029456-DISCHARGE_SUMMARY
01455-067052-ECG_REPORT
02034-037300-ECG_REPORT
02115-010823-DISCHARGE_SUMMARY
02136-017465-DISCHARGE_SUMMARY
03066-084521-ECG_REPORT
03273-009330-DISCHARGE_SUMMARY
03392-360395-RADIOLOGY_REPORT
03702-098383-ECHO_REPORT
05308-090812-ECG_REPORT
07700-413490-RADIOLOGY_REPORT
07780-347384-RADIOLOGY_REPORT
08014-097374-ECHO_REPORT
09569-067879-ECG_REPORT
11439-014138-DISCHARGE_SUMMARY
12050-081563-ECG_REPORT
13265-104380-ECHO_REPORT
14108-340203-RADIOLOGY_REPORT
14522-104279-ECHO_REPORT
14708-006815-DISCHARGE_SUMMARY
15621-077411-ECG_REPORT
15737-095456-ECHO_REPORT
16013-015541-DISCHARGE_SUMMARY
16093-011230-DISCHARGE_SUMMARY
16817-077812-ECG_REPORT
17090-026395-DISCHARGE_SUMMARY
17522-024788-DISCHARGE_SUMMARY
18076-246143-RADIOLOGY_REPORT
19623-085193-ECG_REPORT
20038-028322-DISCHARGE_SUMMARY
21280-272404-RADIOLOGY_REPORT
21286-109632-ECHO_REPORT
21662-107599-ECHO_REPORT
22891-058961-ECG_REPORT
25003-338492-RADIOLOGY_REPORT
26563-387055-RADIOLOGY_REPORT
'''.split()
_dev2 = '''
02405-069810-ECG_REPORT
02410-026171-DISCHARGE_SUMMARY
02652-006395-DISCHARGE_SUMMARY
04269-027967-DISCHARGE_SUMMARY
05367-106998-ECHO_REPORT
05797-095003-ECG_REPORT
05955-087704-ECG_REPORT
06292-371824-RADIOLOGY_REPORT
07048-294691-RADIOLOGY_REPORT
07352-013977-DISCHARGE_SUMMARY
07514-025655-DISCHARGE_SUMMARY
07786-029701-ECG_REPORT
07908-129574-RADIOLOGY_REPORT
07968-074957-ECG_REPORT
08114-027513-DISCHARGE_SUMMARY
08380-043167-ECG_REPORT
08870-061373-ECG_REPORT
09775-416048-RADIOLOGY_REPORT
13913-106200-ECHO_REPORT
14158-075452-ECG_REPORT
14822-000161-DISCHARGE_SUMMARY
14897-025566-DISCHARGE_SUMMARY
15295-348292-RADIOLOGY_REPORT
15770-109559-ECHO_REPORT
17336-021181-DISCHARGE_SUMMARY
17451-147855-RADIOLOGY_REPORT
18908-109838-ECHO_REPORT
19155-103735-ECHO_REPORT
19584-178988-RADIOLOGY_REPORT
19709-026760-DISCHARGE_SUMMARY
20050-395622-RADIOLOGY_REPORT
20145-362538-RADIOLOGY_REPORT
21349-034056-ECG_REPORT
21967-106697-ECHO_REPORT
21971-062555-ECG_REPORT
22230-040122-ECG_REPORT
22743-004908-DISCHARGE_SUMMARY
23389-095475-ECHO_REPORT
24432-013472-DISCHARGE_SUMMARY
25497-096953-ECHO_REPORT
'''.split()
_dev3 = '''
01427-342648-RADIOLOGY_REPORT
02916-100844-ECHO_REPORT
04266-000520-DISCHARGE_SUMMARY
06091-383430-RADIOLOGY_REPORT
06296-089652-ECG_REPORT
06567-071352-ECG_REPORT
07726-023607-DISCHARGE_SUMMARY
07866-088203-ECG_REPORT
08087-099659-ECHO_REPORT
08703-021487-DISCHARGE_SUMMARY
09030-087790-ECG_REPORT
09166-409725-RADIOLOGY_REPORT
09375-099229-ECHO_REPORT
09622-087101-ECG_REPORT
09963-257487-RADIOLOGY_REPORT
10422-276335-RADIOLOGY_REPORT
10588-105794-ECHO_REPORT
10644-007491-DISCHARGE_SUMMARY
10668-239022-RADIOLOGY_REPORT
10907-103779-ECHO_REPORT
11762-027273-DISCHARGE_SUMMARY
11801-104538-ECHO_REPORT
11823-007872-DISCHARGE_SUMMARY
13033-020154-DISCHARGE_SUMMARY
14835-325902-RADIOLOGY_REPORT
14888-014879-DISCHARGE_SUMMARY
15128-008249-DISCHARGE_SUMMARY
16888-003484-DISCHARGE_SUMMARY
18318-102656-ECHO_REPORT
18321-022756-DISCHARGE_SUMMARY
18426-060090-ECG_REPORT
18912-067495-ECG_REPORT
19012-089185-ECG_REPORT
19140-056193-ECG_REPORT
19267-104724-ECHO_REPORT
19791-003873-DISCHARGE_SUMMARY
20996-105850-ECHO_REPORT
21273-244548-RADIOLOGY_REPORT
23590-017830-DISCHARGE_SUMMARY
25585-058370-ECG_REPORT
'''.split()
_dev4 = '''
00211-027889-DISCHARGE_SUMMARY
01982-060190-ECG_REPORT
03089-097913-ECHO_REPORT
03990-040506-ECG_REPORT
05062-230044-RADIOLOGY_REPORT
05967-095720-ECHO_REPORT
06653-081911-ECG_REPORT
07429-001857-DISCHARGE_SUMMARY
07978-322989-RADIOLOGY_REPORT
09040-052377-ECG_REPORT
09337-018472-DISCHARGE_SUMMARY
09536-102867-ECHO_REPORT
09584-107853-ECHO_REPORT
10101-012638-DISCHARGE_SUMMARY
10612-047357-ECG_REPORT
10668-107159-ECHO_REPORT
10691-220707-RADIOLOGY_REPORT
10906-067559-ECG_REPORT
16333-034160-ECG_REPORT
16994-022078-DISCHARGE_SUMMARY
17217-011306-DISCHARGE_SUMMARY
19230-039952-ECG_REPORT
19246-093639-ECG_REPORT
19649-021294-DISCHARGE_SUMMARY
20288-027184-DISCHARGE_SUMMARY
20400-049875-ECG_REPORT
20807-104709-ECHO_REPORT
21215-274571-RADIOLOGY_REPORT
21305-020227-DISCHARGE_SUMMARY
21413-012450-DISCHARGE_SUMMARY
21633-029484-DISCHARGE_SUMMARY
21647-105660-ECHO_REPORT
21833-003461-DISCHARGE_SUMMARY
22225-075494-ECG_REPORT
22264-260776-RADIOLOGY_REPORT
22682-065777-ECG_REPORT
24638-098945-ECHO_REPORT
24813-183267-RADIOLOGY_REPORT
25950-160092-RADIOLOGY_REPORT
26176-226973-RADIOLOGY_REPORT
'''.split()
_dev5 = '''
00414-104513-ECHO_REPORT
01314-028800-DISCHARGE_SUMMARY
01487-290421-RADIOLOGY_REPORT
04303-005081-DISCHARGE_SUMMARY
06445-096221-ECHO_REPORT
07156-096163-ECHO_REPORT
07452-053844-ECG_REPORT
07761-036998-ECG_REPORT
08951-002958-DISCHARGE_SUMMARY
09001-000036-DISCHARGE_SUMMARY
09665-101538-ECHO_REPORT
10124-289890-RADIOLOGY_REPORT
12152-087134-ECG_REPORT
12156-067807-ECG_REPORT
12748-021750-DISCHARGE_SUMMARY
13101-048474-ECG_REPORT
13594-066846-ECG_REPORT
14493-110891-RADIOLOGY_REPORT
15013-102321-ECHO_REPORT
15272-026154-DISCHARGE_SUMMARY
16044-019401-DISCHARGE_SUMMARY
16597-023618-DISCHARGE_SUMMARY
16660-199016-RADIOLOGY_REPORT
17473-000673-DISCHARGE_SUMMARY
17582-104422-ECHO_REPORT
18673-102519-ECHO_REPORT
21219-092548-ECG_REPORT
21745-025001-DISCHARGE_SUMMARY
22566-151087-RADIOLOGY_REPORT
22739-020612-DISCHARGE_SUMMARY
22818-041469-ECG_REPORT
22821-026994-DISCHARGE_SUMMARY
23039-078076-ECG_REPORT
23298-326737-RADIOLOGY_REPORT
23893-094803-ECG_REPORT
23969-299900-RADIOLOGY_REPORT
25217-257214-RADIOLOGY_REPORT
25844-097135-ECHO_REPORT
26136-101545-ECHO_REPORT
'''.split()
_folds = [_dev1, _dev2, _dev3, _dev4, _dev5]
| 26.609412 | 69 | 0.855867 |
import itertools as it
def docs(subset):
subdir = 'test' if subset == 'test' else 'train'
return subdir, _docs(subset)
def _docs(subset):
if subset == 'test':
return _test
if subset == 'dev':
return _dev0
if subset == 'train':
exclude = set(_dev0)
ids = it.filterfalse(exclude.__contains__, it.chain(*_folds))
return list(ids)
label, n = _split_subset_label(subset)
if label == 'dev':
return _folds[n]
if label == 'train':
ids = it.chain(*_folds[:n], *_folds[n+1:])
return list(ids)
raise ValueError('invalid subset: {}'.format(subset))
def _split_subset_label(label):
fold = label.lstrip('traindev')
fold = int(fold) - 1
label = label.rstrip('12345')
return label, fold
_test = '''
00176-102920-ECHO_REPORT
00381-006281-DISCHARGE_SUMMARY
00534-017453-DISCHARGE_SUMMARY
00534-100076-ECHO_REPORT
01160-000945-DISCHARGE_SUMMARY
01163-001840-DISCHARGE_SUMMARY
01222-104065-ECHO_REPORT
02740-024700-DISCHARGE_SUMMARY
03087-026480-DISCHARGE_SUMMARY
03298-014440-DISCHARGE_SUMMARY
03628-023268-DISCHARGE_SUMMARY
03835-028462-DISCHARGE_SUMMARY
04082-167766-RADIOLOGY_REPORT
04525-003099-DISCHARGE_SUMMARY
04882-004677-DISCHARGE_SUMMARY
04995-028156-DISCHARGE_SUMMARY
05065-011493-DISCHARGE_SUMMARY
05163-019624-DISCHARGE_SUMMARY
05382-010331-DISCHARGE_SUMMARY
05837-000274-DISCHARGE_SUMMARY
06134-005003-DISCHARGE_SUMMARY
06557-009968-DISCHARGE_SUMMARY
07214-025053-DISCHARGE_SUMMARY
07546-000040-DISCHARGE_SUMMARY
07683-016743-DISCHARGE_SUMMARY
07797-005646-DISCHARGE_SUMMARY
08216-388895-RADIOLOGY_REPORT
08324-097667-ECHO_REPORT
08415-016301-DISCHARGE_SUMMARY
08786-003318-DISCHARGE_SUMMARY
08990-002227-DISCHARGE_SUMMARY
09248-026497-DISCHARGE_SUMMARY
09339-028983-DISCHARGE_SUMMARY
09531-108127-ECHO_REPORT
09602-000963-DISCHARGE_SUMMARY
09703-109051-ECHO_REPORT
10434-169426-RADIOLOGY_REPORT
10539-022213-DISCHARGE_SUMMARY
10689-110055-ECHO_REPORT
10773-027033-DISCHARGE_SUMMARY
11098-004672-DISCHARGE_SUMMARY
11378-103592-ECHO_REPORT
11392-010791-DISCHARGE_SUMMARY
11552-026221-DISCHARGE_SUMMARY
11681-022505-DISCHARGE_SUMMARY
12125-022364-DISCHARGE_SUMMARY
12530-004020-DISCHARGE_SUMMARY
12582-011060-DISCHARGE_SUMMARY
12618-027862-DISCHARGE_SUMMARY
12627-109059-ECHO_REPORT
13990-101915-ECHO_REPORT
14285-022846-DISCHARGE_SUMMARY
15021-016750-DISCHARGE_SUMMARY
15230-012950-DISCHARGE_SUMMARY
15664-014779-DISCHARGE_SUMMARY
15751-026988-DISCHARGE_SUMMARY
15789-007213-DISCHARGE_SUMMARY
16055-152402-RADIOLOGY_REPORT
16072-170823-RADIOLOGY_REPORT
16134-204168-RADIOLOGY_REPORT
16247-028319-DISCHARGE_SUMMARY
16660-004075-DISCHARGE_SUMMARY
16677-010128-DISCHARGE_SUMMARY
16743-013010-DISCHARGE_SUMMARY
16997-000825-DISCHARGE_SUMMARY
17054-016976-DISCHARGE_SUMMARY
17097-368450-RADIOLOGY_REPORT
17467-010718-DISCHARGE_SUMMARY
17583-022047-DISCHARGE_SUMMARY
17644-017974-DISCHARGE_SUMMARY
17652-018982-DISCHARGE_SUMMARY
17774-014129-DISCHARGE_SUMMARY
18108-381702-RADIOLOGY_REPORT
18114-360237-RADIOLOGY_REPORT
18317-007698-DISCHARGE_SUMMARY
18531-010240-DISCHARGE_SUMMARY
19138-025729-DISCHARGE_SUMMARY
19154-166217-RADIOLOGY_REPORT
19596-007256-DISCHARGE_SUMMARY
19778-001791-DISCHARGE_SUMMARY
19911-175533-RADIOLOGY_REPORT
20223-103427-ECHO_REPORT
20389-024150-DISCHARGE_SUMMARY
20442-023289-DISCHARGE_SUMMARY
20701-013632-DISCHARGE_SUMMARY
20706-009354-DISCHARGE_SUMMARY
21115-101632-ECHO_REPORT
21312-018707-DISCHARGE_SUMMARY
21815-002962-DISCHARGE_SUMMARY
21951-203738-RADIOLOGY_REPORT
21979-010316-DISCHARGE_SUMMARY
22159-004946-DISCHARGE_SUMMARY
22788-021533-DISCHARGE_SUMMARY
24307-009748-DISCHARGE_SUMMARY
24435-000622-DISCHARGE_SUMMARY
24786-014472-DISCHARGE_SUMMARY
25150-027400-DISCHARGE_SUMMARY
25775-007416-DISCHARGE_SUMMARY
26522-011368-DISCHARGE_SUMMARY
'''.split()
_dev0 = '''
01314-028800-DISCHARGE_SUMMARY
02115-010823-DISCHARGE_SUMMARY
02652-006395-DISCHARGE_SUMMARY
04266-000520-DISCHARGE_SUMMARY
04303-005081-DISCHARGE_SUMMARY
05062-230044-RADIOLOGY_REPORT
05797-095003-ECG_REPORT
06292-371824-RADIOLOGY_REPORT
06445-096221-ECHO_REPORT
06653-081911-ECG_REPORT
07048-294691-RADIOLOGY_REPORT
07514-025655-DISCHARGE_SUMMARY
07908-129574-RADIOLOGY_REPORT
08114-027513-DISCHARGE_SUMMARY
08870-061373-ECG_REPORT
08951-002958-DISCHARGE_SUMMARY
09337-018472-DISCHARGE_SUMMARY
09569-067879-ECG_REPORT
10588-105794-ECHO_REPORT
10612-047357-ECG_REPORT
10644-007491-DISCHARGE_SUMMARY
10691-220707-RADIOLOGY_REPORT
11801-104538-ECHO_REPORT
12050-081563-ECG_REPORT
12748-021750-DISCHARGE_SUMMARY
13033-020154-DISCHARGE_SUMMARY
13265-104380-ECHO_REPORT
13913-106200-ECHO_REPORT
14493-110891-RADIOLOGY_REPORT
14522-104279-ECHO_REPORT
15272-026154-DISCHARGE_SUMMARY
15737-095456-ECHO_REPORT
15770-109559-ECHO_REPORT
18318-102656-ECHO_REPORT
18426-060090-ECG_REPORT
18912-067495-ECG_REPORT
19623-085193-ECG_REPORT
19649-021294-DISCHARGE_SUMMARY
21219-092548-ECG_REPORT
21273-244548-RADIOLOGY_REPORT
21280-272404-RADIOLOGY_REPORT
21349-034056-ECG_REPORT
21647-105660-ECHO_REPORT
21662-107599-ECHO_REPORT
22225-075494-ECG_REPORT
22818-041469-ECG_REPORT
22891-058961-ECG_REPORT
25585-058370-ECG_REPORT
25950-160092-RADIOLOGY_REPORT
26176-226973-RADIOLOGY_REPORT'''.split()
_dev1 = '''
00098-016139-DISCHARGE_SUMMARY
00500-097836-ECHO_REPORT
00587-400001-RADIOLOGY_REPORT
01114-083601-ECG_REPORT
01234-029456-DISCHARGE_SUMMARY
01455-067052-ECG_REPORT
02034-037300-ECG_REPORT
02115-010823-DISCHARGE_SUMMARY
02136-017465-DISCHARGE_SUMMARY
03066-084521-ECG_REPORT
03273-009330-DISCHARGE_SUMMARY
03392-360395-RADIOLOGY_REPORT
03702-098383-ECHO_REPORT
05308-090812-ECG_REPORT
07700-413490-RADIOLOGY_REPORT
07780-347384-RADIOLOGY_REPORT
08014-097374-ECHO_REPORT
09569-067879-ECG_REPORT
11439-014138-DISCHARGE_SUMMARY
12050-081563-ECG_REPORT
13265-104380-ECHO_REPORT
14108-340203-RADIOLOGY_REPORT
14522-104279-ECHO_REPORT
14708-006815-DISCHARGE_SUMMARY
15621-077411-ECG_REPORT
15737-095456-ECHO_REPORT
16013-015541-DISCHARGE_SUMMARY
16093-011230-DISCHARGE_SUMMARY
16817-077812-ECG_REPORT
17090-026395-DISCHARGE_SUMMARY
17522-024788-DISCHARGE_SUMMARY
18076-246143-RADIOLOGY_REPORT
19623-085193-ECG_REPORT
20038-028322-DISCHARGE_SUMMARY
21280-272404-RADIOLOGY_REPORT
21286-109632-ECHO_REPORT
21662-107599-ECHO_REPORT
22891-058961-ECG_REPORT
25003-338492-RADIOLOGY_REPORT
26563-387055-RADIOLOGY_REPORT
'''.split()
_dev2 = '''
02405-069810-ECG_REPORT
02410-026171-DISCHARGE_SUMMARY
02652-006395-DISCHARGE_SUMMARY
04269-027967-DISCHARGE_SUMMARY
05367-106998-ECHO_REPORT
05797-095003-ECG_REPORT
05955-087704-ECG_REPORT
06292-371824-RADIOLOGY_REPORT
07048-294691-RADIOLOGY_REPORT
07352-013977-DISCHARGE_SUMMARY
07514-025655-DISCHARGE_SUMMARY
07786-029701-ECG_REPORT
07908-129574-RADIOLOGY_REPORT
07968-074957-ECG_REPORT
08114-027513-DISCHARGE_SUMMARY
08380-043167-ECG_REPORT
08870-061373-ECG_REPORT
09775-416048-RADIOLOGY_REPORT
13913-106200-ECHO_REPORT
14158-075452-ECG_REPORT
14822-000161-DISCHARGE_SUMMARY
14897-025566-DISCHARGE_SUMMARY
15295-348292-RADIOLOGY_REPORT
15770-109559-ECHO_REPORT
17336-021181-DISCHARGE_SUMMARY
17451-147855-RADIOLOGY_REPORT
18908-109838-ECHO_REPORT
19155-103735-ECHO_REPORT
19584-178988-RADIOLOGY_REPORT
19709-026760-DISCHARGE_SUMMARY
20050-395622-RADIOLOGY_REPORT
20145-362538-RADIOLOGY_REPORT
21349-034056-ECG_REPORT
21967-106697-ECHO_REPORT
21971-062555-ECG_REPORT
22230-040122-ECG_REPORT
22743-004908-DISCHARGE_SUMMARY
23389-095475-ECHO_REPORT
24432-013472-DISCHARGE_SUMMARY
25497-096953-ECHO_REPORT
'''.split()
_dev3 = '''
01427-342648-RADIOLOGY_REPORT
02916-100844-ECHO_REPORT
04266-000520-DISCHARGE_SUMMARY
06091-383430-RADIOLOGY_REPORT
06296-089652-ECG_REPORT
06567-071352-ECG_REPORT
07726-023607-DISCHARGE_SUMMARY
07866-088203-ECG_REPORT
08087-099659-ECHO_REPORT
08703-021487-DISCHARGE_SUMMARY
09030-087790-ECG_REPORT
09166-409725-RADIOLOGY_REPORT
09375-099229-ECHO_REPORT
09622-087101-ECG_REPORT
09963-257487-RADIOLOGY_REPORT
10422-276335-RADIOLOGY_REPORT
10588-105794-ECHO_REPORT
10644-007491-DISCHARGE_SUMMARY
10668-239022-RADIOLOGY_REPORT
10907-103779-ECHO_REPORT
11762-027273-DISCHARGE_SUMMARY
11801-104538-ECHO_REPORT
11823-007872-DISCHARGE_SUMMARY
13033-020154-DISCHARGE_SUMMARY
14835-325902-RADIOLOGY_REPORT
14888-014879-DISCHARGE_SUMMARY
15128-008249-DISCHARGE_SUMMARY
16888-003484-DISCHARGE_SUMMARY
18318-102656-ECHO_REPORT
18321-022756-DISCHARGE_SUMMARY
18426-060090-ECG_REPORT
18912-067495-ECG_REPORT
19012-089185-ECG_REPORT
19140-056193-ECG_REPORT
19267-104724-ECHO_REPORT
19791-003873-DISCHARGE_SUMMARY
20996-105850-ECHO_REPORT
21273-244548-RADIOLOGY_REPORT
23590-017830-DISCHARGE_SUMMARY
25585-058370-ECG_REPORT
'''.split()
_dev4 = '''
00211-027889-DISCHARGE_SUMMARY
01982-060190-ECG_REPORT
03089-097913-ECHO_REPORT
03990-040506-ECG_REPORT
05062-230044-RADIOLOGY_REPORT
05967-095720-ECHO_REPORT
06653-081911-ECG_REPORT
07429-001857-DISCHARGE_SUMMARY
07978-322989-RADIOLOGY_REPORT
09040-052377-ECG_REPORT
09337-018472-DISCHARGE_SUMMARY
09536-102867-ECHO_REPORT
09584-107853-ECHO_REPORT
10101-012638-DISCHARGE_SUMMARY
10612-047357-ECG_REPORT
10668-107159-ECHO_REPORT
10691-220707-RADIOLOGY_REPORT
10906-067559-ECG_REPORT
16333-034160-ECG_REPORT
16994-022078-DISCHARGE_SUMMARY
17217-011306-DISCHARGE_SUMMARY
19230-039952-ECG_REPORT
19246-093639-ECG_REPORT
19649-021294-DISCHARGE_SUMMARY
20288-027184-DISCHARGE_SUMMARY
20400-049875-ECG_REPORT
20807-104709-ECHO_REPORT
21215-274571-RADIOLOGY_REPORT
21305-020227-DISCHARGE_SUMMARY
21413-012450-DISCHARGE_SUMMARY
21633-029484-DISCHARGE_SUMMARY
21647-105660-ECHO_REPORT
21833-003461-DISCHARGE_SUMMARY
22225-075494-ECG_REPORT
22264-260776-RADIOLOGY_REPORT
22682-065777-ECG_REPORT
24638-098945-ECHO_REPORT
24813-183267-RADIOLOGY_REPORT
25950-160092-RADIOLOGY_REPORT
26176-226973-RADIOLOGY_REPORT
'''.split()
_dev5 = '''
00414-104513-ECHO_REPORT
01314-028800-DISCHARGE_SUMMARY
01487-290421-RADIOLOGY_REPORT
04303-005081-DISCHARGE_SUMMARY
06445-096221-ECHO_REPORT
07156-096163-ECHO_REPORT
07452-053844-ECG_REPORT
07761-036998-ECG_REPORT
08951-002958-DISCHARGE_SUMMARY
09001-000036-DISCHARGE_SUMMARY
09665-101538-ECHO_REPORT
10124-289890-RADIOLOGY_REPORT
12152-087134-ECG_REPORT
12156-067807-ECG_REPORT
12748-021750-DISCHARGE_SUMMARY
13101-048474-ECG_REPORT
13594-066846-ECG_REPORT
14493-110891-RADIOLOGY_REPORT
15013-102321-ECHO_REPORT
15272-026154-DISCHARGE_SUMMARY
16044-019401-DISCHARGE_SUMMARY
16597-023618-DISCHARGE_SUMMARY
16660-199016-RADIOLOGY_REPORT
17473-000673-DISCHARGE_SUMMARY
17582-104422-ECHO_REPORT
18673-102519-ECHO_REPORT
21219-092548-ECG_REPORT
21745-025001-DISCHARGE_SUMMARY
22566-151087-RADIOLOGY_REPORT
22739-020612-DISCHARGE_SUMMARY
22818-041469-ECG_REPORT
22821-026994-DISCHARGE_SUMMARY
23039-078076-ECG_REPORT
23298-326737-RADIOLOGY_REPORT
23893-094803-ECG_REPORT
23969-299900-RADIOLOGY_REPORT
25217-257214-RADIOLOGY_REPORT
25844-097135-ECHO_REPORT
26136-101545-ECHO_REPORT
'''.split()
_folds = [_dev1, _dev2, _dev3, _dev4, _dev5]
| true | true |
1c33d8788746e8e2a77ab79b938d957add5907e2 | 1,555 | py | Python | src/doc/en/installation/conf.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | 1,742 | 2015-01-04T07:06:13.000Z | 2022-03-30T11:32:52.000Z | src/doc/en/installation/conf.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | 66 | 2015-03-19T19:17:24.000Z | 2022-03-16T11:59:30.000Z | src/doc/en/installation/conf.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | 495 | 2015-01-10T10:23:18.000Z | 2022-03-24T22:06:11.000Z | # Sage Installation Guide documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 22 15:04:04 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from sage.docs.conf import release
from sage.docs.conf import * # NOQA
# Add any paths that contain custom static files (such as style sheets),
# relative to this directory to html_static_path. They are copied after the
# builtin static files, so a file named "default.css" will overwrite the
# builtin "default.css". html_common_static_path imported from sage.docs.conf
# contains common paths.
html_static_path = [] + html_common_static_path
# General information about the project.
project = "Sage Installation Guide"
name = 'installation'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = project + " v"+release
html_short_title = "Install Guide v" + release
# Output file base name for HTML help builder.
htmlhelp_basename = name
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', name + '.tex', 'Sage Installation Guide',
'The Sage Development Team', 'manual'),
]
| 37.926829 | 82 | 0.758842 |
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from sage.docs.conf import release
from sage.docs.conf import * # NOQA
# Add any paths that contain custom static files (such as style sheets),
# relative to this directory to html_static_path. They are copied after the
# builtin static files, so a file named "default.css" will overwrite the
# builtin "default.css". html_common_static_path imported from sage.docs.conf
# contains common paths.
html_static_path = [] + html_common_static_path
# General information about the project.
project = "Sage Installation Guide"
name = 'installation'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = project + " v"+release
html_short_title = "Install Guide v" + release
# Output file base name for HTML help builder.
htmlhelp_basename = name
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', name + '.tex', 'Sage Installation Guide',
'The Sage Development Team', 'manual'),
]
| true | true |
1c33da1e2a9e8bdfae24b5cf596e950332e1ca46 | 1,702 | py | Python | interactionviz/cli/viewer/__main__.py | rosshemsley/interactionviz | 032eef47667e0748f14cd27f675cbff1a0a1bf37 | [
"Apache-2.0"
] | 3 | 2020-09-25T16:13:25.000Z | 2021-08-02T01:55:31.000Z | interactionviz/cli/viewer/__main__.py | rosshemsley/interactionviz | 032eef47667e0748f14cd27f675cbff1a0a1bf37 | [
"Apache-2.0"
] | null | null | null | interactionviz/cli/viewer/__main__.py | rosshemsley/interactionviz | 032eef47667e0748f14cd27f675cbff1a0a1bf37 | [
"Apache-2.0"
] | null | null | null | import os
import pathlib
import click
from interactionviz.maps import load_map_xml
from interactionviz.viewers import ArcadeViewer, WebViewer
from interactionviz.tracks import Tracks, load_tracks_files
@click.command()
@click.option(
"--root-dir",
required=True,
type=click.Path(exists=True, dir_okay=True, file_okay=False),
help="Root directory of the interaction dataset.",
)
@click.option("--dataset", default="DR_CHN_Merging_ZS")
@click.option(
"--viewer-kind",
default="web",
type=click.Choice(["web", "native"], case_sensitive=False),
)
@click.option("--session", type=int, default=0, help="session to load for tracks")
def main(viewer_kind: str, root_dir: str, dataset: str, session: int):
root = pathlib.Path(root_dir)
map_path = root.joinpath("maps", f"{dataset}.osm_xy")
interaction_map = load_map_xml(map_path)
tracks = _load_tracks(root, dataset, session)
if viewer_kind == "web":
viewer = WebViewer(interaction_map, tracks=tracks)
else:
viewer = ArcadeViewer(interaction_map, tracks=tracks)
viewer.run()
def _load_tracks(root: pathlib.Path, dataset: str, session: int) -> Tracks:
paths = []
tracks_dir = root.joinpath("recorded_trackfiles", dataset)
vehicles = tracks_dir.joinpath(f"pedestrian_tracks_{session:03d}.csv")
pedestrians = tracks_dir.joinpath(f"vehicle_tracks_{session:03d}.csv")
if vehicles.exists():
paths.append(vehicles)
if pedestrians.exists():
paths.append(pedestrians)
if len(paths) == 0:
raise ValueError(f"no tracks found at {vehicles} or {pedestrians}")
return load_tracks_files(*paths)
if __name__ == "__main__":
main()
| 28.847458 | 82 | 0.70329 | import os
import pathlib
import click
from interactionviz.maps import load_map_xml
from interactionviz.viewers import ArcadeViewer, WebViewer
from interactionviz.tracks import Tracks, load_tracks_files
@click.command()
@click.option(
"--root-dir",
required=True,
type=click.Path(exists=True, dir_okay=True, file_okay=False),
help="Root directory of the interaction dataset.",
)
@click.option("--dataset", default="DR_CHN_Merging_ZS")
@click.option(
"--viewer-kind",
default="web",
type=click.Choice(["web", "native"], case_sensitive=False),
)
@click.option("--session", type=int, default=0, help="session to load for tracks")
def main(viewer_kind: str, root_dir: str, dataset: str, session: int):
root = pathlib.Path(root_dir)
map_path = root.joinpath("maps", f"{dataset}.osm_xy")
interaction_map = load_map_xml(map_path)
tracks = _load_tracks(root, dataset, session)
if viewer_kind == "web":
viewer = WebViewer(interaction_map, tracks=tracks)
else:
viewer = ArcadeViewer(interaction_map, tracks=tracks)
viewer.run()
def _load_tracks(root: pathlib.Path, dataset: str, session: int) -> Tracks:
paths = []
tracks_dir = root.joinpath("recorded_trackfiles", dataset)
vehicles = tracks_dir.joinpath(f"pedestrian_tracks_{session:03d}.csv")
pedestrians = tracks_dir.joinpath(f"vehicle_tracks_{session:03d}.csv")
if vehicles.exists():
paths.append(vehicles)
if pedestrians.exists():
paths.append(pedestrians)
if len(paths) == 0:
raise ValueError(f"no tracks found at {vehicles} or {pedestrians}")
return load_tracks_files(*paths)
if __name__ == "__main__":
main()
| true | true |
1c33da2275a63031e8cbd04fb6ca5bcda2e1d791 | 33,432 | py | Python | pyzoo/zoo/tfpark/tf_optimizer.py | Asjidkalam/analytics-zoo | 0afa8437abc3e5cf5289d2cfde68b237a45f9d0d | [
"Apache-2.0"
] | null | null | null | pyzoo/zoo/tfpark/tf_optimizer.py | Asjidkalam/analytics-zoo | 0afa8437abc3e5cf5289d2cfde68b237a45f9d0d | [
"Apache-2.0"
] | 1 | 2021-01-20T15:41:01.000Z | 2021-01-20T15:41:01.000Z | pyzoo/zoo/tfpark/tf_optimizer.py | Asjidkalam/analytics-zoo | 0afa8437abc3e5cf5289d2cfde68b237a45f9d0d | [
"Apache-2.0"
] | 1 | 2020-12-21T11:48:49.000Z | 2020-12-21T11:48:49.000Z | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import sys
import tempfile
from bigdl.nn.criterion import Criterion
from bigdl.nn.layer import Layer
from bigdl.optim.optimizer import MaxEpoch, EveryEpoch
from bigdl.util.common import to_list, JavaValue
from zoo.common.utils import callZooFunc
from zoo.pipeline.api.keras.engine.topology import to_bigdl_metric, Loss, OptimMethod
from zoo.pipeline.api.net.utils import find_placeholders, to_bigdl_optim_method, find_tensors
from zoo.pipeline.estimator import Estimator
from zoo.util import nest
if sys.version >= '3':
long = int
unicode = str
class IdentityCriterion(Criterion):
def __init__(self):
super(IdentityCriterion, self).__init__(None, "float")
class TFValidationMethod(JavaValue):
def __init__(self, val_method, name, output_indices, label_indices):
self.name = name
self.val_method = val_method
JavaValue.__init__(self, None, "float",
val_method, name, output_indices, label_indices)
class StatelessMetric(JavaValue):
def __init__(self, metric_name, idx, count_idx):
self.name = metric_name
self.idx = idx
self.count_idx = count_idx
JavaValue.__init__(self, None, "float", metric_name, idx, count_idx)
class BigDLMetric(object):
def __init__(self, val_method, outputs, labels):
self.val_method = val_method
self.outputs = outputs
self.labels = labels
class TFTrainingHelper(Layer):
def __init__(self, path, config_proto, saver, meta, sess):
self.saver = saver
self.meta = meta
self.export_dir = path
self.sess = sess
if config_proto is not None:
import tensorflow as tf
assert isinstance(config_proto, tf.ConfigProto), \
"session_config should be a tf.ConfigProto"
config_proto.use_per_session_threads = True
byte_arr = bytearray(config_proto.SerializeToString())
else:
byte_arr = None
super(TFTrainingHelper, self).__init__(None, "float", path, byte_arr)
def save_checkpoint(self):
callZooFunc(self.bigdl_type, "saveCheckpoint",
self.value)
def get_weights_to_python(self):
self.save_checkpoint()
self.saver.restore(self.sess, os.path.join(self.export_dir, "model"))
def load_checkpoint(self, path):
callZooFunc(self.bigdl_type, "loadZooCheckpoint", self.value, path)
self.get_weights_to_python()
def _to_operation_name(name):
return name.split(":")[0]
def _to_floats(vs):
return [float(v) for v in vs]
class TFModel(object):
def __init__(self, training_helper_layer, criterion, val_methods):
self.training_helper_layer = training_helper_layer
self.criterion = criterion
self.val_methods = val_methods
@staticmethod
def _expand_inputs(inputs, tensors_with_value, loss):
additional_inputs = []
additional_values = []
inputs = nest.flatten(inputs)
names = set([i.name for i in inputs])
if tensors_with_value:
for t, v in tensors_with_value.items():
if t.name in names:
msg = f"tensor {t} already in inputs, cannot put it in tensor_with_value"
raise ValueError(msg)
additional_inputs.append(t)
additional_values.append(v)
return inputs, additional_inputs, additional_values
@staticmethod
def _process_session_config(session_config):
import tensorflow as tf
if session_config is not None:
assert isinstance(session_config, tf.ConfigProto), \
"session_config should be a tf.ConfigProto"
session_config.use_per_session_threads = True
return session_config
@staticmethod
def _process_grads(graph, grads):
with graph.as_default():
from zoo.util.tf import process_grad
grads = [process_grad(grad) for grad in grads]
return grads
@staticmethod
def _process_metrics(graph, metrics, real_batch_size):
import tensorflow as tf
outputs = [real_batch_size]
val_methods = None
if metrics is not None:
idx = 1
val_methods = []
for metric_name in metrics:
metric = metrics[metric_name]
if tf.is_numeric_tensor(metric):
outputs.append(metric)
val_methods.append(StatelessMetric(metric_name, idx, 0))
idx += 1
else:
outputs += metric.outputs
with graph.as_default():
val_labels = [tf.identity(v) for v in metric.labels]
outputs += val_labels
method = TFValidationMethod(metric.val_method,
metric_name,
list(range(idx, idx + len(metric.outputs))),
list(range(idx + len(metric.outputs),
idx + len(metric.outputs)
+ len(val_labels))))
val_methods.append(method)
idx += len(metric.outputs) + len(val_labels)
outputs = [tf.to_float(output) for output in outputs]
return outputs, val_methods
@staticmethod
def _process_variables(graph, variables, updates):
import tensorflow as tf
all_trainable_variables = variables
name2idx = dict([(v.name, idx) for idx, v in enumerate(all_trainable_variables)])
all_variables = graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
update_ops = graph.get_collection(tf.GraphKeys.UPDATE_OPS)
if updates is not None:
update_ops += updates
trainable_variables = [0] * len(all_trainable_variables)
trainable_assigns = [0] * len(all_trainable_variables)
trainable_variable_placeholders = [0] * len(all_trainable_variables)
extra_variables = []
extra_variable_assigns = []
extra_variable_assign_placeholders = []
for v in all_variables:
p = tf.placeholder(dtype=v.dtype, shape=v.shape)
a = tf.assign(v, p)
# special treatment for ResourceVariable
if v.op.type == "VarHandleOp":
v_float_value = tf.to_float(v.read_value())
else:
v_float_value = tf.to_float(v)
if v.name in name2idx:
trainable_variables[name2idx[v.name]] = v_float_value
trainable_assigns[name2idx[v.name]] = a
trainable_variable_placeholders[name2idx[v.name]] = p
else:
extra_variables.append(v_float_value)
extra_variable_assigns.append(a)
extra_variable_assign_placeholders.append(p)
extra_variable_assign = tf.group(*extra_variable_assigns)
trainable_assign = tf.group(*trainable_assigns)
update_op = tf.group(update_ops)
return trainable_variables, trainable_variable_placeholders, trainable_assign, \
extra_variables, extra_variable_assign_placeholders, \
extra_variable_assign, update_op
@staticmethod
def _save_to_dir(folder, sess, graph,
metric_tensors,
batch_size_tensor,
loss_tensor, inputs, labels, predictions,
trainable_variables,
trainable_variable_placeholders,
trainable_assign,
extra_variables,
extra_variable_assign_placeholders,
extra_variable_assign,
grads, update_op, train_op,
additional_inputs,
additional_values):
import tensorflow as tf
from tensorflow import gfile
saver = tf.train.Saver()
if not os.path.isdir(folder):
os.makedirs(folder)
saver.save(sess, os.path.join(folder, "model"), write_meta_graph=False)
meta = {
"inputs": [i.name for i in inputs],
"input_types": [i.dtype.as_datatype_enum for i in inputs],
"additional_inputs": [i.name for i in additional_inputs],
"additional_input_types": [i.dtype.as_datatype_enum for i in additional_inputs],
"labels": [l.name for l in labels],
"label_types": [i.dtype.as_datatype_enum for i in labels],
"predictions": [t.name for t in predictions] if predictions else [],
"metric_tensors": [t.name for t in metric_tensors],
"batch_size_tensor": batch_size_tensor.name,
"loss_tensor": loss_tensor.name,
"variables": [v.name for v in trainable_variables],
"variable_types": [v.dtype.as_datatype_enum for v in trainable_variable_placeholders],
"variable_assign_placeholders": [v.name for v in trainable_variable_placeholders],
"assign_variable_op": trainable_assign.name,
"extra_variables": [v.name for v in extra_variables],
"extra_variable_types": [v.dtype.as_datatype_enum for v
in extra_variable_assign_placeholders],
"extra_variable_assign_placeholders": [p.name for p in
extra_variable_assign_placeholders],
"assign_extra_variable_op": extra_variable_assign.name,
"grad_variables": [g.name for g in grads],
"update_op": update_op.name,
"restore_op": saver.saver_def.restore_op_name,
"restore_path_placeholder": saver.saver_def.filename_tensor_name,
"save_op": _to_operation_name(saver.saver_def.save_tensor_name),
"save_path_placeholder": saver.saver_def.filename_tensor_name,
"default_tensor_value": [_to_floats(v) for v in additional_values],
"init_op": tf.tables_initializer().name
}
if train_op is not None:
meta["train_op"] = train_op.name
with open(os.path.join(folder, "training_meta.json"), "w") as f:
f.write(json.dumps(meta))
with gfile.GFile(os.path.join(folder, "model.meta"), "wb") as f:
f.write(graph.as_graph_def().SerializeToString())
return meta, saver
@staticmethod
def export(model_dir, loss_tensor, sess, inputs, labels, predictions, grads, variables, graph,
tensors_with_value, metrics, updates, train_op=None):
import tensorflow as tf
with graph.as_default():
batch_size_tensor = tf.to_float(tf.shape(inputs[0])[0])
inputs, additional_inputs, additional_values = \
TFModel._expand_inputs(inputs, tensors_with_value, loss_tensor)
metric_tensors, val_methods = TFModel._process_metrics(graph, metrics, batch_size_tensor)
grads = TFModel._process_grads(graph, grads)
trainable_variables, trainable_variable_placeholders, trainable_assign, \
extra_variables, extra_variable_assign_placeholders, \
extra_variable_assign, update_op = \
TFModel._process_variables(graph, variables, updates)
meta, saver = \
TFModel._save_to_dir(model_dir, sess, graph,
metric_tensors,
batch_size_tensor,
loss_tensor, inputs, labels, predictions,
trainable_variables,
trainable_variable_placeholders,
trainable_assign,
extra_variables,
extra_variable_assign_placeholders,
extra_variable_assign,
grads, update_op, train_op,
additional_inputs,
additional_values)
return meta, saver, val_methods
@staticmethod
def create(loss_tensor, sess, inputs, labels, predictions, grads, variables, graph,
tensors_with_value, session_config, metrics, updates,
model_dir, train_op=None):
if model_dir is None:
model_dir = tempfile.mkdtemp()
else:
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
meta, saver, val_methods = TFModel.export(model_dir, loss_tensor, sess,
inputs, labels, predictions, grads, variables,
graph, tensors_with_value, metrics, updates,
train_op)
training_helper_layer = TFTrainingHelper(model_dir,
session_config, saver, meta, sess)
criterion = IdentityCriterion()
return TFModel(training_helper_layer, criterion, val_methods)
class TFOptimizer:
def __init__(self, tf_model, optim_method,
sess=None, dataset=None,
clip_norm=None, clip_value=None,
model_dir=None):
"""
TFOptimizer is used for distributed training of TensorFlow
on Spark/BigDL.
Note that if grads and variables are not None, then they need to be sorted by name
if you want to use multiple optimization methods for a TensorFlow model according to
variable names.
:param loss: The loss tensor of the TensorFlow model, should be a scalar
:param optim_method: the optimization method to be used, such as bigdl.optim.optimizer.Adam
:param sess: the current TensorFlow Session, if you want to used a pre-trained model, you
should use the Session to load the pre-trained variables and pass it to TFOptimizer.
"""
self.optim_method = optim_method
self.sess = sess
self.dataset = dataset
self.clip_norm = clip_norm
if clip_value is not None and not isinstance(clip_value, tuple):
raise ValueError("The clip_value argument should be a tuple (min_value, max_value)")
self.clip_constant = clip_value
if self.dataset.batch_size <= 0:
raise ValueError("You should set batch_size instead of batch_per_thread for training")
self.model_dir = model_dir
self.tf_model = tf_model
batch_size = self.dataset.batch_size
self.train_data = self.dataset.get_training_data()
self.val_data = self.dataset.get_validation_data()
self.batch_size = batch_size
self.estimator = Estimator(self.tf_model.training_helper_layer,
self.optim_method,
self.model_dir)
if self.clip_norm:
self.estimator.set_l2_norm_gradient_clipping(self.clip_norm)
if self.clip_constant:
min_value, max_value = self.clip_constant
self.estimator.set_constant_gradient_clipping(min_value, max_value)
def load_checkpoint(self, path, version):
# todo make version optional
model_path = os.path.join(path, "model.{}".format(version))
optim_method_path = os.path.join(path, "optimMethod-TFParkTraining.{}".format(version))
self.tf_model.training_helper_layer.load_checkpoint(model_path)
self.optim_method = OptimMethod.load(optim_method_path)
self.estimator = Estimator(self.tf_model.training_helper_layer,
self.optim_method,
self.model_dir)
if self.clip_norm:
self.estimator.set_l2_norm_gradient_clipping(self.clip_norm)
if self.clip_constant:
min_value, max_value = self.clip_constant
self.estimator.set_constant_gradient_clipping(min_value, max_value)
@staticmethod
def _get_or_create_session(session):
import tensorflow as tf
if session is None:
sess = tf.Session()
sess.run(tf.global_variables_initializer())
else:
sess = session
return sess
@staticmethod
def _get_dataset_from_loss(loss):
import tensorflow as tf
all_required_inputs = find_placeholders([loss])
dataset = tf.get_collection(all_required_inputs[0].name)[0]
return dataset
@staticmethod
def _get_vars_grads(loss):
import tensorflow as tf
grads_vars = tf.train.GradientDescentOptimizer(0).compute_gradients(loss)
grads_vars.sort(key=lambda grad_var: grad_var[1].name)
variables = []
grads = []
for (grad, var) in grads_vars:
if grad is not None:
variables.append(var)
grads.append(grad)
return grads, variables
@staticmethod
def _get_vars_grads_from_train_op(train_op):
def predicate(t):
return t.name.split("/")[-1].startswith("zoo_identity_op_for_grad")
grads = find_tensors([train_op], predicate)
grad_ops = [grad.op for grad in grads]
variables = []
for grad in grad_ops:
var = list(grad.control_inputs)[0]
if var.name == "VarHandleOp":
variables.append(var)
else:
variables.append(list(var.outputs)[0])
# variables = [grad.op.control_inputs[0].outputs[0] for grad in grads]
return grads, variables
@classmethod
def from_train_op(cls, train_op, loss, *, inputs=None, labels=None, metrics=None, updates=None,
sess=None, dataset=None, tensor_with_value=None, session_config=None,
model_dir=None):
sess = TFOptimizer._get_or_create_session(sess)
grads, variables = TFOptimizer._get_vars_grads_from_train_op(train_op)
if dataset is None:
dataset = TFOptimizer._get_dataset_from_loss(loss)
_ = dataset.tensors # trigger create tensors if not available
dataset_inputs = dataset._original_tensors
if isinstance(dataset_inputs, tuple) and len(dataset_inputs) == 2:
if inputs is None:
inputs = dataset_inputs[0]
if labels is None:
labels = dataset_inputs[1]
else:
if inputs is None:
inputs = dataset_inputs
if labels is None:
labels = []
inputs = nest.flatten(inputs)
labels = nest.flatten(labels)
from zoo.tfpark.zoo_optimizer import FakeOptimMethod
return TFOptimizer._from_grads(loss=loss, sess=sess, inputs=inputs, labels=labels,
grads=grads,
variables=variables, dataset=dataset, metrics=metrics,
tensor_with_value=tensor_with_value,
optim_method=FakeOptimMethod(),
session_config=session_config, updates=updates,
model_dir=model_dir, train_op=train_op)
@classmethod
def _from_grads(cls, loss, sess, inputs, labels, grads, variables, dataset, optim_method=None,
clip_norm=None, clip_value=None,
metrics=None, tensor_with_value=None, session_config=None,
model_dir=None, updates=None, train_op=None):
graph = loss.graph
if metrics is None:
metrics = {}
tf_model = TFModel.create(loss, sess, inputs, labels, [], grads, variables, graph,
tensor_with_value, session_config, metrics,
updates, model_dir=None, train_op=train_op)
return cls(tf_model, optim_method, sess=sess, dataset=dataset,
clip_norm=clip_norm, clip_value=clip_value, model_dir=model_dir)
@classmethod
def from_loss(cls, loss, optim_method, session=None, inputs=None, dataset=None,
val_outputs=None, val_labels=None, val_method=None,
clip_norm=None, clip_value=None, metrics=None,
tensor_with_value=None, session_config=None, model_dir=None, updates=None):
"""
Create a TFOptimizer from a TensorFlow loss tensor.
The loss tensor must come from a TensorFlow graph that only takes TFDataset.tensors and
the tensors in `tensor_with_value` as inputs.
:param loss: The loss tensor of the TensorFlow model, should be a scalar
:param optim_method: the optimization method to be used, such as bigdl.optim.optimizer.Adam
:param session: the current TensorFlow Session, if you want to used a pre-trained model,
you should use the Session to load the pre-trained variables and pass it to TFOptimizer.
:param val_outputs: the validation output TensorFlow tensor to be used by val_methods
:param val_labels: the validation label TensorFlow tensor to be used by val_methods
:param val_method: the BigDL val_method(s) to be used.
:param clip_norm: float >= 0. Gradients will be clipped when their L2 norm exceeds
this value.
:param clip_value: float >= 0. Gradients will be clipped when their absolute value
exceeds this value.
:param metrics: a dictionary. The key should be a string representing the metric's name
and the value should be the corresponding TensorFlow tensor, which should be a scalar.
:param tensor_with_value: a dictionary. The key is TensorFlow tensor, usually a
placeholder, the value of the dictionary is a tuple of two elements. The first one of
the tuple is the value to feed to the tensor in training phase and the second one
is the value to feed to the tensor in validation phase.
:return: a TFOptimizer
"""
sess = TFOptimizer._get_or_create_session(session)
grads, variables = TFOptimizer._get_vars_grads(loss)
if dataset is None and inputs is None:
dataset = TFOptimizer._get_dataset_from_loss(loss)
inputs = dataset._original_tensors
else:
if inputs is None:
raise ValueError("please specify inputs")
_ = dataset.tensors # trigger creating placeholders
if isinstance(inputs, tuple) and len(inputs) == 2:
inputs, labels = inputs
else:
labels = []
inputs = nest.flatten(inputs)
labels = nest.flatten(labels)
if clip_value is not None:
if isinstance(clip_value, float) or isinstance(clip_value, int):
if clip_value <= 0:
ValueError("The clip_value argument should be positive number")
clip_value = (-float(clip_value), float(clip_value))
if not isinstance(clip_value, tuple):
raise ValueError("The clip_value argument should be" +
" a positive float/int which clips to" +
" (-clip_value, clip_value); " +
"or a tuple which clips to (min_value, max_value)")
if val_method is not None:
val_methods = to_list(val_method)
if metrics is None:
metrics = {}
for i, method in enumerate(val_methods):
metrics['bigdl_metric_' + str(i)] = BigDLMetric(method, val_outputs, val_labels)
return TFOptimizer._from_grads(loss, sess, inputs, labels, grads, variables, dataset,
optim_method, clip_norm, clip_value,
metrics, tensor_with_value, session_config,
model_dir, updates)
@staticmethod
def export_training_model(export_dir, loss, sess, inputs, labels=None, predictions=None,
metrics=None, tensor_with_value=None, updates=None):
grads, variables = TFOptimizer._get_vars_grads(loss)
TFModel.export(export_dir, loss, sess, inputs, labels, predictions, grads, variables,
loss.graph, tensor_with_value, metrics, updates)
logging.info("Exported TensorFlow model in {} for training".format(export_dir))
@staticmethod
def _shape_match(model_shape, dataset_shape):
for i in range(len(dataset_shape)):
if dataset_shape[i].value is None:
return model_shape[i].value is None
else:
return dataset_shape[i].value == model_shape[i].value or \
model_shape[i].value is None
@classmethod
def from_keras(cls, keras_model, dataset,
session_config=None, model_dir=None, metrics=None, optimizer=None):
"""
Create a TFOptimizer from a tensorflow.keras model. The model must be compiled.
:param keras_model: the tensorflow.keras model, which must be compiled.
:param dataset: a TFDataset
:return:
"""
import tensorflow.keras.backend as K
model_inputs = keras_model.inputs
if hasattr(keras_model, "targets"):
model_targets = keras_model.targets
else:
model_targets = keras_model._targets
# target can be None if loss is None
model_targets = list(filter(lambda x: x is not None, model_targets))
flatten_inputs = nest.flatten(dataset.feature_tensors)
assert len(model_inputs) == len(flatten_inputs), \
("the keras model and TFDataset should have the same number of tensors" +
" keras model has {} inputs " +
"while TFDataset has {} inputs").format(len(model_inputs),
len(flatten_inputs))
for i in range(len(flatten_inputs)):
if not TFOptimizer._shape_match(model_inputs[i].shape, flatten_inputs[i].shape):
raise ValueError(("The {}th input in keras model {}"
" does not match the TFDataset"
"input {}").format(i,
model_inputs[i],
flatten_inputs[i]))
flatten_targets = nest.flatten(dataset.label_tensors)
assert len(model_targets) == len(flatten_targets), \
("the keras model and TFDataset should have the same number of tensors" +
" keras model has {} targets " +
"while TFDataset has {} labels").format(len(model_targets),
len(flatten_inputs))
# todo check targets shape, currently checking target shape will
# cause too much false alarm.
loss = keras_model.total_loss
variables = keras_model._collected_trainable_weights
variables.sort(key=lambda variable: variable.name)
keras_optimizer = keras_model.optimizer
from zoo.tfpark.zoo_optimizer import get_gradients_for_keras
grads = get_gradients_for_keras(keras_optimizer, loss, variables)
grads_and_vars = list(zip(grads, variables))
import tensorflow.python.keras.optimizers as koptimizers
if isinstance(keras_optimizer, koptimizers.TFOptimizer):
# work around keras TFOptimzier bug
train_op = keras_optimizer.optimizer.apply_gradients(grads_and_vars)
else:
train_op = keras_optimizer.apply_gradients(grads_and_vars)
sess = K.get_session()
if keras_model.metrics and (dataset.get_validation_data() is not None):
if isinstance(keras_model.metrics, dict):
raise ValueError(
"different metrics for different outputs are not supported right now")
if len(keras_model.outputs) > 1:
if not all([name.endswith("loss") for name in keras_model.metrics_names]):
raise ValueError("metrics (except loss) for multi-head model is not supported")
else:
bigdl_val_methods = [Loss()]
val_outputs = keras_model.outputs
val_labels = model_targets
else:
bigdl_val_methods = \
[to_bigdl_metric(m, keras_model.loss) for m in keras_model.metrics_names]
val_outputs = keras_model.outputs
val_labels = model_targets
else:
val_outputs = None
val_labels = None
bigdl_val_methods = None
tensor_with_value = {
K.learning_phase(): [True, False]
}
updates = []
updates += keras_model.get_updates_for(None)
# Conditional updates relevant to this model
updates += keras_model.get_updates_for(keras_model.inputs)
if bigdl_val_methods is not None:
val_methods = to_list(bigdl_val_methods)
bigdl_metrics = {}
for i, method in enumerate(val_methods):
bigdl_metrics['bigdl_metric_' + str(i)] = BigDLMetric(method,
val_outputs,
val_labels)
if metrics is None:
metrics = bigdl_metrics
else:
metrics.update(bigdl_metrics)
if optimizer is not None:
clip_norm = None
clip_value = None
if hasattr(keras_optimizer, 'clipnorm'):
clip_norm = keras_optimizer.clipnorm
if hasattr(keras_optimizer, 'clipvalue'):
clip_value = (-keras_optimizer.clipvalue, keras_optimizer.clipvalue)
tf_model = TFModel.create(loss, sess, model_inputs, model_targets, keras_model.outputs,
grads, variables, loss.graph,
tensor_with_value, session_config, metrics,
updates, model_dir=None)
return cls(tf_model, optimizer, sess=sess, dataset=dataset,
clip_norm=clip_norm, clip_value=clip_value, model_dir=model_dir)
return cls.from_train_op(train_op, loss, inputs=model_inputs, labels=model_targets,
metrics=metrics, updates=updates, sess=sess, dataset=dataset,
tensor_with_value=tensor_with_value, session_config=session_config,
model_dir=model_dir)
def set_constant_gradient_clipping(self, min_value, max_value):
"""
Configure constant clipping settings.
:param min_value: the minimum value to clip by
:param max_value: the maxmimum value to clip by
"""
self.estimator.set_constant_gradient_clipping(min_value, max_value)
def set_gradient_clipping_by_l2_norm(self, clip_norm):
"""
Configure L2 norm clipping settings.
:param clip_norm: gradient L2-Norm threshold
"""
self.estimator.set_l2_norm_gradient_clipping(clip_norm)
def optimize(self, end_trigger=None, checkpoint_trigger=None):
"""
Run the training loop of the this optimizer
:param end_trigger: BigDL's Trigger to indicate when to stop the training.
:param checkpoint_trigger: When to save a checkpoint and evaluate model.
"""
if end_trigger is None:
end_trigger = MaxEpoch(1)
if checkpoint_trigger is None:
checkpoint_trigger = EveryEpoch()
if self.tf_model.val_methods and self.val_data is not None:
self.estimator.train_minibatch(train_set=self.train_data,
criterion=self.tf_model.criterion,
end_trigger=end_trigger,
checkpoint_trigger=checkpoint_trigger,
validation_set=self.val_data,
validation_method=self.tf_model.val_methods)
else:
self.estimator.train_minibatch(train_set=self.train_data,
criterion=self.tf_model.criterion,
end_trigger=end_trigger,
checkpoint_trigger=checkpoint_trigger)
self.tf_model.training_helper_layer.get_weights_to_python()
| 43.53125 | 100 | 0.604451 |
import json
import logging
import os
import sys
import tempfile
from bigdl.nn.criterion import Criterion
from bigdl.nn.layer import Layer
from bigdl.optim.optimizer import MaxEpoch, EveryEpoch
from bigdl.util.common import to_list, JavaValue
from zoo.common.utils import callZooFunc
from zoo.pipeline.api.keras.engine.topology import to_bigdl_metric, Loss, OptimMethod
from zoo.pipeline.api.net.utils import find_placeholders, to_bigdl_optim_method, find_tensors
from zoo.pipeline.estimator import Estimator
from zoo.util import nest
if sys.version >= '3':
long = int
unicode = str
class IdentityCriterion(Criterion):
def __init__(self):
super(IdentityCriterion, self).__init__(None, "float")
class TFValidationMethod(JavaValue):
def __init__(self, val_method, name, output_indices, label_indices):
self.name = name
self.val_method = val_method
JavaValue.__init__(self, None, "float",
val_method, name, output_indices, label_indices)
class StatelessMetric(JavaValue):
def __init__(self, metric_name, idx, count_idx):
self.name = metric_name
self.idx = idx
self.count_idx = count_idx
JavaValue.__init__(self, None, "float", metric_name, idx, count_idx)
class BigDLMetric(object):
def __init__(self, val_method, outputs, labels):
self.val_method = val_method
self.outputs = outputs
self.labels = labels
class TFTrainingHelper(Layer):
def __init__(self, path, config_proto, saver, meta, sess):
self.saver = saver
self.meta = meta
self.export_dir = path
self.sess = sess
if config_proto is not None:
import tensorflow as tf
assert isinstance(config_proto, tf.ConfigProto), \
"session_config should be a tf.ConfigProto"
config_proto.use_per_session_threads = True
byte_arr = bytearray(config_proto.SerializeToString())
else:
byte_arr = None
super(TFTrainingHelper, self).__init__(None, "float", path, byte_arr)
def save_checkpoint(self):
callZooFunc(self.bigdl_type, "saveCheckpoint",
self.value)
def get_weights_to_python(self):
self.save_checkpoint()
self.saver.restore(self.sess, os.path.join(self.export_dir, "model"))
def load_checkpoint(self, path):
callZooFunc(self.bigdl_type, "loadZooCheckpoint", self.value, path)
self.get_weights_to_python()
def _to_operation_name(name):
return name.split(":")[0]
def _to_floats(vs):
return [float(v) for v in vs]
class TFModel(object):
def __init__(self, training_helper_layer, criterion, val_methods):
self.training_helper_layer = training_helper_layer
self.criterion = criterion
self.val_methods = val_methods
@staticmethod
def _expand_inputs(inputs, tensors_with_value, loss):
additional_inputs = []
additional_values = []
inputs = nest.flatten(inputs)
names = set([i.name for i in inputs])
if tensors_with_value:
for t, v in tensors_with_value.items():
if t.name in names:
msg = f"tensor {t} already in inputs, cannot put it in tensor_with_value"
raise ValueError(msg)
additional_inputs.append(t)
additional_values.append(v)
return inputs, additional_inputs, additional_values
@staticmethod
def _process_session_config(session_config):
import tensorflow as tf
if session_config is not None:
assert isinstance(session_config, tf.ConfigProto), \
"session_config should be a tf.ConfigProto"
session_config.use_per_session_threads = True
return session_config
@staticmethod
def _process_grads(graph, grads):
with graph.as_default():
from zoo.util.tf import process_grad
grads = [process_grad(grad) for grad in grads]
return grads
@staticmethod
def _process_metrics(graph, metrics, real_batch_size):
import tensorflow as tf
outputs = [real_batch_size]
val_methods = None
if metrics is not None:
idx = 1
val_methods = []
for metric_name in metrics:
metric = metrics[metric_name]
if tf.is_numeric_tensor(metric):
outputs.append(metric)
val_methods.append(StatelessMetric(metric_name, idx, 0))
idx += 1
else:
outputs += metric.outputs
with graph.as_default():
val_labels = [tf.identity(v) for v in metric.labels]
outputs += val_labels
method = TFValidationMethod(metric.val_method,
metric_name,
list(range(idx, idx + len(metric.outputs))),
list(range(idx + len(metric.outputs),
idx + len(metric.outputs)
+ len(val_labels))))
val_methods.append(method)
idx += len(metric.outputs) + len(val_labels)
outputs = [tf.to_float(output) for output in outputs]
return outputs, val_methods
@staticmethod
def _process_variables(graph, variables, updates):
import tensorflow as tf
all_trainable_variables = variables
name2idx = dict([(v.name, idx) for idx, v in enumerate(all_trainable_variables)])
all_variables = graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
update_ops = graph.get_collection(tf.GraphKeys.UPDATE_OPS)
if updates is not None:
update_ops += updates
trainable_variables = [0] * len(all_trainable_variables)
trainable_assigns = [0] * len(all_trainable_variables)
trainable_variable_placeholders = [0] * len(all_trainable_variables)
extra_variables = []
extra_variable_assigns = []
extra_variable_assign_placeholders = []
for v in all_variables:
p = tf.placeholder(dtype=v.dtype, shape=v.shape)
a = tf.assign(v, p)
if v.op.type == "VarHandleOp":
v_float_value = tf.to_float(v.read_value())
else:
v_float_value = tf.to_float(v)
if v.name in name2idx:
trainable_variables[name2idx[v.name]] = v_float_value
trainable_assigns[name2idx[v.name]] = a
trainable_variable_placeholders[name2idx[v.name]] = p
else:
extra_variables.append(v_float_value)
extra_variable_assigns.append(a)
extra_variable_assign_placeholders.append(p)
extra_variable_assign = tf.group(*extra_variable_assigns)
trainable_assign = tf.group(*trainable_assigns)
update_op = tf.group(update_ops)
return trainable_variables, trainable_variable_placeholders, trainable_assign, \
extra_variables, extra_variable_assign_placeholders, \
extra_variable_assign, update_op
@staticmethod
def _save_to_dir(folder, sess, graph,
metric_tensors,
batch_size_tensor,
loss_tensor, inputs, labels, predictions,
trainable_variables,
trainable_variable_placeholders,
trainable_assign,
extra_variables,
extra_variable_assign_placeholders,
extra_variable_assign,
grads, update_op, train_op,
additional_inputs,
additional_values):
import tensorflow as tf
from tensorflow import gfile
saver = tf.train.Saver()
if not os.path.isdir(folder):
os.makedirs(folder)
saver.save(sess, os.path.join(folder, "model"), write_meta_graph=False)
meta = {
"inputs": [i.name for i in inputs],
"input_types": [i.dtype.as_datatype_enum for i in inputs],
"additional_inputs": [i.name for i in additional_inputs],
"additional_input_types": [i.dtype.as_datatype_enum for i in additional_inputs],
"labels": [l.name for l in labels],
"label_types": [i.dtype.as_datatype_enum for i in labels],
"predictions": [t.name for t in predictions] if predictions else [],
"metric_tensors": [t.name for t in metric_tensors],
"batch_size_tensor": batch_size_tensor.name,
"loss_tensor": loss_tensor.name,
"variables": [v.name for v in trainable_variables],
"variable_types": [v.dtype.as_datatype_enum for v in trainable_variable_placeholders],
"variable_assign_placeholders": [v.name for v in trainable_variable_placeholders],
"assign_variable_op": trainable_assign.name,
"extra_variables": [v.name for v in extra_variables],
"extra_variable_types": [v.dtype.as_datatype_enum for v
in extra_variable_assign_placeholders],
"extra_variable_assign_placeholders": [p.name for p in
extra_variable_assign_placeholders],
"assign_extra_variable_op": extra_variable_assign.name,
"grad_variables": [g.name for g in grads],
"update_op": update_op.name,
"restore_op": saver.saver_def.restore_op_name,
"restore_path_placeholder": saver.saver_def.filename_tensor_name,
"save_op": _to_operation_name(saver.saver_def.save_tensor_name),
"save_path_placeholder": saver.saver_def.filename_tensor_name,
"default_tensor_value": [_to_floats(v) for v in additional_values],
"init_op": tf.tables_initializer().name
}
if train_op is not None:
meta["train_op"] = train_op.name
with open(os.path.join(folder, "training_meta.json"), "w") as f:
f.write(json.dumps(meta))
with gfile.GFile(os.path.join(folder, "model.meta"), "wb") as f:
f.write(graph.as_graph_def().SerializeToString())
return meta, saver
@staticmethod
def export(model_dir, loss_tensor, sess, inputs, labels, predictions, grads, variables, graph,
tensors_with_value, metrics, updates, train_op=None):
import tensorflow as tf
with graph.as_default():
batch_size_tensor = tf.to_float(tf.shape(inputs[0])[0])
inputs, additional_inputs, additional_values = \
TFModel._expand_inputs(inputs, tensors_with_value, loss_tensor)
metric_tensors, val_methods = TFModel._process_metrics(graph, metrics, batch_size_tensor)
grads = TFModel._process_grads(graph, grads)
trainable_variables, trainable_variable_placeholders, trainable_assign, \
extra_variables, extra_variable_assign_placeholders, \
extra_variable_assign, update_op = \
TFModel._process_variables(graph, variables, updates)
meta, saver = \
TFModel._save_to_dir(model_dir, sess, graph,
metric_tensors,
batch_size_tensor,
loss_tensor, inputs, labels, predictions,
trainable_variables,
trainable_variable_placeholders,
trainable_assign,
extra_variables,
extra_variable_assign_placeholders,
extra_variable_assign,
grads, update_op, train_op,
additional_inputs,
additional_values)
return meta, saver, val_methods
@staticmethod
def create(loss_tensor, sess, inputs, labels, predictions, grads, variables, graph,
tensors_with_value, session_config, metrics, updates,
model_dir, train_op=None):
if model_dir is None:
model_dir = tempfile.mkdtemp()
else:
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
meta, saver, val_methods = TFModel.export(model_dir, loss_tensor, sess,
inputs, labels, predictions, grads, variables,
graph, tensors_with_value, metrics, updates,
train_op)
training_helper_layer = TFTrainingHelper(model_dir,
session_config, saver, meta, sess)
criterion = IdentityCriterion()
return TFModel(training_helper_layer, criterion, val_methods)
class TFOptimizer:
def __init__(self, tf_model, optim_method,
sess=None, dataset=None,
clip_norm=None, clip_value=None,
model_dir=None):
self.optim_method = optim_method
self.sess = sess
self.dataset = dataset
self.clip_norm = clip_norm
if clip_value is not None and not isinstance(clip_value, tuple):
raise ValueError("The clip_value argument should be a tuple (min_value, max_value)")
self.clip_constant = clip_value
if self.dataset.batch_size <= 0:
raise ValueError("You should set batch_size instead of batch_per_thread for training")
self.model_dir = model_dir
self.tf_model = tf_model
batch_size = self.dataset.batch_size
self.train_data = self.dataset.get_training_data()
self.val_data = self.dataset.get_validation_data()
self.batch_size = batch_size
self.estimator = Estimator(self.tf_model.training_helper_layer,
self.optim_method,
self.model_dir)
if self.clip_norm:
self.estimator.set_l2_norm_gradient_clipping(self.clip_norm)
if self.clip_constant:
min_value, max_value = self.clip_constant
self.estimator.set_constant_gradient_clipping(min_value, max_value)
def load_checkpoint(self, path, version):
model_path = os.path.join(path, "model.{}".format(version))
optim_method_path = os.path.join(path, "optimMethod-TFParkTraining.{}".format(version))
self.tf_model.training_helper_layer.load_checkpoint(model_path)
self.optim_method = OptimMethod.load(optim_method_path)
self.estimator = Estimator(self.tf_model.training_helper_layer,
self.optim_method,
self.model_dir)
if self.clip_norm:
self.estimator.set_l2_norm_gradient_clipping(self.clip_norm)
if self.clip_constant:
min_value, max_value = self.clip_constant
self.estimator.set_constant_gradient_clipping(min_value, max_value)
@staticmethod
def _get_or_create_session(session):
import tensorflow as tf
if session is None:
sess = tf.Session()
sess.run(tf.global_variables_initializer())
else:
sess = session
return sess
@staticmethod
def _get_dataset_from_loss(loss):
import tensorflow as tf
all_required_inputs = find_placeholders([loss])
dataset = tf.get_collection(all_required_inputs[0].name)[0]
return dataset
@staticmethod
def _get_vars_grads(loss):
import tensorflow as tf
grads_vars = tf.train.GradientDescentOptimizer(0).compute_gradients(loss)
grads_vars.sort(key=lambda grad_var: grad_var[1].name)
variables = []
grads = []
for (grad, var) in grads_vars:
if grad is not None:
variables.append(var)
grads.append(grad)
return grads, variables
@staticmethod
def _get_vars_grads_from_train_op(train_op):
def predicate(t):
return t.name.split("/")[-1].startswith("zoo_identity_op_for_grad")
grads = find_tensors([train_op], predicate)
grad_ops = [grad.op for grad in grads]
variables = []
for grad in grad_ops:
var = list(grad.control_inputs)[0]
if var.name == "VarHandleOp":
variables.append(var)
else:
variables.append(list(var.outputs)[0])
return grads, variables
@classmethod
def from_train_op(cls, train_op, loss, *, inputs=None, labels=None, metrics=None, updates=None,
sess=None, dataset=None, tensor_with_value=None, session_config=None,
model_dir=None):
sess = TFOptimizer._get_or_create_session(sess)
grads, variables = TFOptimizer._get_vars_grads_from_train_op(train_op)
if dataset is None:
dataset = TFOptimizer._get_dataset_from_loss(loss)
_ = dataset.tensors
dataset_inputs = dataset._original_tensors
if isinstance(dataset_inputs, tuple) and len(dataset_inputs) == 2:
if inputs is None:
inputs = dataset_inputs[0]
if labels is None:
labels = dataset_inputs[1]
else:
if inputs is None:
inputs = dataset_inputs
if labels is None:
labels = []
inputs = nest.flatten(inputs)
labels = nest.flatten(labels)
from zoo.tfpark.zoo_optimizer import FakeOptimMethod
return TFOptimizer._from_grads(loss=loss, sess=sess, inputs=inputs, labels=labels,
grads=grads,
variables=variables, dataset=dataset, metrics=metrics,
tensor_with_value=tensor_with_value,
optim_method=FakeOptimMethod(),
session_config=session_config, updates=updates,
model_dir=model_dir, train_op=train_op)
@classmethod
def _from_grads(cls, loss, sess, inputs, labels, grads, variables, dataset, optim_method=None,
clip_norm=None, clip_value=None,
metrics=None, tensor_with_value=None, session_config=None,
model_dir=None, updates=None, train_op=None):
graph = loss.graph
if metrics is None:
metrics = {}
tf_model = TFModel.create(loss, sess, inputs, labels, [], grads, variables, graph,
tensor_with_value, session_config, metrics,
updates, model_dir=None, train_op=train_op)
return cls(tf_model, optim_method, sess=sess, dataset=dataset,
clip_norm=clip_norm, clip_value=clip_value, model_dir=model_dir)
@classmethod
def from_loss(cls, loss, optim_method, session=None, inputs=None, dataset=None,
val_outputs=None, val_labels=None, val_method=None,
clip_norm=None, clip_value=None, metrics=None,
tensor_with_value=None, session_config=None, model_dir=None, updates=None):
sess = TFOptimizer._get_or_create_session(session)
grads, variables = TFOptimizer._get_vars_grads(loss)
if dataset is None and inputs is None:
dataset = TFOptimizer._get_dataset_from_loss(loss)
inputs = dataset._original_tensors
else:
if inputs is None:
raise ValueError("please specify inputs")
_ = dataset.tensors
if isinstance(inputs, tuple) and len(inputs) == 2:
inputs, labels = inputs
else:
labels = []
inputs = nest.flatten(inputs)
labels = nest.flatten(labels)
if clip_value is not None:
if isinstance(clip_value, float) or isinstance(clip_value, int):
if clip_value <= 0:
ValueError("The clip_value argument should be positive number")
clip_value = (-float(clip_value), float(clip_value))
if not isinstance(clip_value, tuple):
raise ValueError("The clip_value argument should be" +
" a positive float/int which clips to" +
" (-clip_value, clip_value); " +
"or a tuple which clips to (min_value, max_value)")
if val_method is not None:
val_methods = to_list(val_method)
if metrics is None:
metrics = {}
for i, method in enumerate(val_methods):
metrics['bigdl_metric_' + str(i)] = BigDLMetric(method, val_outputs, val_labels)
return TFOptimizer._from_grads(loss, sess, inputs, labels, grads, variables, dataset,
optim_method, clip_norm, clip_value,
metrics, tensor_with_value, session_config,
model_dir, updates)
@staticmethod
def export_training_model(export_dir, loss, sess, inputs, labels=None, predictions=None,
metrics=None, tensor_with_value=None, updates=None):
grads, variables = TFOptimizer._get_vars_grads(loss)
TFModel.export(export_dir, loss, sess, inputs, labels, predictions, grads, variables,
loss.graph, tensor_with_value, metrics, updates)
logging.info("Exported TensorFlow model in {} for training".format(export_dir))
@staticmethod
def _shape_match(model_shape, dataset_shape):
for i in range(len(dataset_shape)):
if dataset_shape[i].value is None:
return model_shape[i].value is None
else:
return dataset_shape[i].value == model_shape[i].value or \
model_shape[i].value is None
@classmethod
def from_keras(cls, keras_model, dataset,
session_config=None, model_dir=None, metrics=None, optimizer=None):
import tensorflow.keras.backend as K
model_inputs = keras_model.inputs
if hasattr(keras_model, "targets"):
model_targets = keras_model.targets
else:
model_targets = keras_model._targets
model_targets = list(filter(lambda x: x is not None, model_targets))
flatten_inputs = nest.flatten(dataset.feature_tensors)
assert len(model_inputs) == len(flatten_inputs), \
("the keras model and TFDataset should have the same number of tensors" +
" keras model has {} inputs " +
"while TFDataset has {} inputs").format(len(model_inputs),
len(flatten_inputs))
for i in range(len(flatten_inputs)):
if not TFOptimizer._shape_match(model_inputs[i].shape, flatten_inputs[i].shape):
raise ValueError(("The {}th input in keras model {}"
" does not match the TFDataset"
"input {}").format(i,
model_inputs[i],
flatten_inputs[i]))
flatten_targets = nest.flatten(dataset.label_tensors)
assert len(model_targets) == len(flatten_targets), \
("the keras model and TFDataset should have the same number of tensors" +
" keras model has {} targets " +
"while TFDataset has {} labels").format(len(model_targets),
len(flatten_inputs))
loss = keras_model.total_loss
variables = keras_model._collected_trainable_weights
variables.sort(key=lambda variable: variable.name)
keras_optimizer = keras_model.optimizer
from zoo.tfpark.zoo_optimizer import get_gradients_for_keras
grads = get_gradients_for_keras(keras_optimizer, loss, variables)
grads_and_vars = list(zip(grads, variables))
import tensorflow.python.keras.optimizers as koptimizers
if isinstance(keras_optimizer, koptimizers.TFOptimizer):
train_op = keras_optimizer.optimizer.apply_gradients(grads_and_vars)
else:
train_op = keras_optimizer.apply_gradients(grads_and_vars)
sess = K.get_session()
if keras_model.metrics and (dataset.get_validation_data() is not None):
if isinstance(keras_model.metrics, dict):
raise ValueError(
"different metrics for different outputs are not supported right now")
if len(keras_model.outputs) > 1:
if not all([name.endswith("loss") for name in keras_model.metrics_names]):
raise ValueError("metrics (except loss) for multi-head model is not supported")
else:
bigdl_val_methods = [Loss()]
val_outputs = keras_model.outputs
val_labels = model_targets
else:
bigdl_val_methods = \
[to_bigdl_metric(m, keras_model.loss) for m in keras_model.metrics_names]
val_outputs = keras_model.outputs
val_labels = model_targets
else:
val_outputs = None
val_labels = None
bigdl_val_methods = None
tensor_with_value = {
K.learning_phase(): [True, False]
}
updates = []
updates += keras_model.get_updates_for(None)
updates += keras_model.get_updates_for(keras_model.inputs)
if bigdl_val_methods is not None:
val_methods = to_list(bigdl_val_methods)
bigdl_metrics = {}
for i, method in enumerate(val_methods):
bigdl_metrics['bigdl_metric_' + str(i)] = BigDLMetric(method,
val_outputs,
val_labels)
if metrics is None:
metrics = bigdl_metrics
else:
metrics.update(bigdl_metrics)
if optimizer is not None:
clip_norm = None
clip_value = None
if hasattr(keras_optimizer, 'clipnorm'):
clip_norm = keras_optimizer.clipnorm
if hasattr(keras_optimizer, 'clipvalue'):
clip_value = (-keras_optimizer.clipvalue, keras_optimizer.clipvalue)
tf_model = TFModel.create(loss, sess, model_inputs, model_targets, keras_model.outputs,
grads, variables, loss.graph,
tensor_with_value, session_config, metrics,
updates, model_dir=None)
return cls(tf_model, optimizer, sess=sess, dataset=dataset,
clip_norm=clip_norm, clip_value=clip_value, model_dir=model_dir)
return cls.from_train_op(train_op, loss, inputs=model_inputs, labels=model_targets,
metrics=metrics, updates=updates, sess=sess, dataset=dataset,
tensor_with_value=tensor_with_value, session_config=session_config,
model_dir=model_dir)
def set_constant_gradient_clipping(self, min_value, max_value):
self.estimator.set_constant_gradient_clipping(min_value, max_value)
def set_gradient_clipping_by_l2_norm(self, clip_norm):
self.estimator.set_l2_norm_gradient_clipping(clip_norm)
def optimize(self, end_trigger=None, checkpoint_trigger=None):
if end_trigger is None:
end_trigger = MaxEpoch(1)
if checkpoint_trigger is None:
checkpoint_trigger = EveryEpoch()
if self.tf_model.val_methods and self.val_data is not None:
self.estimator.train_minibatch(train_set=self.train_data,
criterion=self.tf_model.criterion,
end_trigger=end_trigger,
checkpoint_trigger=checkpoint_trigger,
validation_set=self.val_data,
validation_method=self.tf_model.val_methods)
else:
self.estimator.train_minibatch(train_set=self.train_data,
criterion=self.tf_model.criterion,
end_trigger=end_trigger,
checkpoint_trigger=checkpoint_trigger)
self.tf_model.training_helper_layer.get_weights_to_python()
| true | true |
1c33da407ad99283d1a971061d91d579fea47eb8 | 1,831 | py | Python | src/101_createIndex.py | hp-db/dev | de0924f791534f554120c6eb74f0409b5b3dc39a | [
"Apache-2.0"
] | null | null | null | src/101_createIndex.py | hp-db/dev | de0924f791534f554120c6eb74f0409b5b3dc39a | [
"Apache-2.0"
] | null | null | null | src/101_createIndex.py | hp-db/dev | de0924f791534f554120c6eb74f0409b5b3dc39a | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from rdflib import URIRef, BNode, Literal, Graph
from rdflib.namespace import RDF, RDFS, FOAF, XSD
from rdflib import Namespace
import numpy as np
import math
import sys
import argparse
import json
import urllib.parse
path = "../static/data/curation_old.json"
json_open = open(path, 'r')
df = json.load(json_open)
selections = df["selections"]
# print(len(selections))
index = []
for selection in selections:
members = selection["members"]
manifest = selection["within"]["@id"]
for member in members:
# print(member)
metadataObj = {}
metadata = member["metadata"]
metadata2 = []
for m in metadata:
label = m["label"]
value = m["value"]
if label not in metadataObj:
metadataObj[label] = []
values = value if isinstance(value, list) else [str(value)]
for value in values:
metadataObj[label].append(value)
id = member["label"].replace("[", "").replace("]", "")
# print(metadataObj)
metadataObj["_label"] = metadataObj["Hieratic No"][0]+"("+metadataObj["Hieroglyph No"][0]+")"
metadataObj["_id"] = id
metadataObj["_image"] = member["thumbnail"]
mid = member["@id"]
mid_spl = mid.split("#xywh=")
canvas = mid_spl[0]
xywh = mid_spl[1]
related = "http://codh.rois.ac.jp/software/iiif-curation-viewer/demo/?manifest="+manifest+"&canvas="+canvas+"&xywh="+xywh+"&xywh_highlight=border"
metadataObj["_related"] = related
metadataObj["_url"] = "https://w3id.org/hpdb/item/" + id
index.append(metadataObj)
fw = open("../static/data/index.json", 'w')
json.dump(index, fw, ensure_ascii=False, indent=4,
sort_keys=True, separators=(',', ': ')) | 24.413333 | 154 | 0.602403 | import pandas as pd
from rdflib import URIRef, BNode, Literal, Graph
from rdflib.namespace import RDF, RDFS, FOAF, XSD
from rdflib import Namespace
import numpy as np
import math
import sys
import argparse
import json
import urllib.parse
path = "../static/data/curation_old.json"
json_open = open(path, 'r')
df = json.load(json_open)
selections = df["selections"]
index = []
for selection in selections:
members = selection["members"]
manifest = selection["within"]["@id"]
for member in members:
metadataObj = {}
metadata = member["metadata"]
metadata2 = []
for m in metadata:
label = m["label"]
value = m["value"]
if label not in metadataObj:
metadataObj[label] = []
values = value if isinstance(value, list) else [str(value)]
for value in values:
metadataObj[label].append(value)
id = member["label"].replace("[", "").replace("]", "")
metadataObj["_label"] = metadataObj["Hieratic No"][0]+"("+metadataObj["Hieroglyph No"][0]+")"
metadataObj["_id"] = id
metadataObj["_image"] = member["thumbnail"]
mid = member["@id"]
mid_spl = mid.split("#xywh=")
canvas = mid_spl[0]
xywh = mid_spl[1]
related = "http://codh.rois.ac.jp/software/iiif-curation-viewer/demo/?manifest="+manifest+"&canvas="+canvas+"&xywh="+xywh+"&xywh_highlight=border"
metadataObj["_related"] = related
metadataObj["_url"] = "https://w3id.org/hpdb/item/" + id
index.append(metadataObj)
fw = open("../static/data/index.json", 'w')
json.dump(index, fw, ensure_ascii=False, indent=4,
sort_keys=True, separators=(',', ': ')) | true | true |
1c33dae046d778c2acefa8efab3c4ae7565e1bc3 | 348 | py | Python | spark_work.py | nszceta/spark-python-celery-demo | c5b03be4bb96699f8e41aa8a42fecd4c25c76331 | [
"MIT"
] | 8 | 2016-01-19T15:59:36.000Z | 2018-04-25T09:00:57.000Z | spark_work.py | nszceta/spark-python-celery-demo | c5b03be4bb96699f8e41aa8a42fecd4c25c76331 | [
"MIT"
] | null | null | null | spark_work.py | nszceta/spark-python-celery-demo | c5b03be4bb96699f8e41aa8a42fecd4c25c76331 | [
"MIT"
] | null | null | null | import sys
from pyspark import SparkContext
import json
print('spark got python path -> ' + str(sys.executable))
logfile = sys.argv[1]
sc = SparkContext()
logdata = sc.textFile(logfile).cache()
a_count = logdata.filter(lambda s: 'a' in s).count()
b_count = logdata.filter(lambda s: 'b' in s).count()
print(json.dumps({'a': a_count, 'b': b_count}))
| 31.636364 | 56 | 0.70977 | import sys
from pyspark import SparkContext
import json
print('spark got python path -> ' + str(sys.executable))
logfile = sys.argv[1]
sc = SparkContext()
logdata = sc.textFile(logfile).cache()
a_count = logdata.filter(lambda s: 'a' in s).count()
b_count = logdata.filter(lambda s: 'b' in s).count()
print(json.dumps({'a': a_count, 'b': b_count}))
| true | true |
1c33dcbe1bba058258275b69fcc8e6ef20067d3a | 18,583 | py | Python | pypowervm/tests/test_util.py | stephenfin/pypowervm | 68f2b586b4f17489f379534ab52fc56a524b6da5 | [
"Apache-2.0"
] | 24 | 2015-12-02T19:49:45.000Z | 2021-11-17T11:43:51.000Z | pypowervm/tests/test_util.py | stephenfin/pypowervm | 68f2b586b4f17489f379534ab52fc56a524b6da5 | [
"Apache-2.0"
] | 18 | 2017-03-01T05:54:25.000Z | 2022-03-14T17:32:47.000Z | pypowervm/tests/test_util.py | stephenfin/pypowervm | 68f2b586b4f17489f379534ab52fc56a524b6da5 | [
"Apache-2.0"
] | 17 | 2016-02-10T22:53:04.000Z | 2021-11-10T09:47:10.000Z | # Copyright 2014, 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
import unittest
from pypowervm import const
from pypowervm import util
if six.PY2:
import __builtin__ as builtins
elif six.PY3:
import builtins
dummyuuid1 = "abcdef01-2345-2345-2345-67890abcdef0"
dummyuuid2 = "67890abc-5432-5432-5432-def0abcdef01"
class TestUtil(unittest.TestCase):
"""Unit tests for pypowervm.util."""
def test_convert_bytes_to_gb(self):
# A round 1 GB
test = util.convert_bytes_to_gb(1024 * 1024 * 1024)
self.assertEqual(1.0, test)
# A single MB
test = util.convert_bytes_to_gb(1024 * 1024.0)
self.assertEqual(0.0009765625, test)
# A single byte - should be the low Value
self.assertEqual(.0001, util.convert_bytes_to_gb(1))
# Try changing the low value
self.assertEqual(.0005, util.convert_bytes_to_gb(1, .0005))
# Round up
self.assertEqual(1.15, util.convert_bytes_to_gb(1224067890, dp=2))
# Low value still honors dp
self.assertEqual(0.01, util.convert_bytes_to_gb(1, dp=2))
def test_round_gb_size_up(self):
self.assertEqual(12.35, util.round_gb_size_up(12.34000000001))
self.assertEqual(12.34000000001, util.round_gb_size_up(12.34000000001,
dp=11))
self.assertEqual(1048576, util.round_gb_size_up(1048576.0, dp=0))
self.assertEqual(1048576, util.round_gb_size_up(1048575.1, dp=0))
self.assertEqual(1048576, util.round_gb_size_up(1048576, dp=0))
self.assertEqual(1048600, util.round_gb_size_up(1048576.1234, dp=-2))
def test_sanitize_bool_for_api(self):
self.assertEqual('true', util.sanitize_bool_for_api(True))
self.assertEqual('false', util.sanitize_bool_for_api(False))
self.assertEqual('true', util.sanitize_bool_for_api('True'))
self.assertEqual('false', util.sanitize_bool_for_api('False'))
def test_find_wrapper(self):
wrap1 = mock.MagicMock()
wrap1.uuid = 'a'
wrap2 = mock.MagicMock()
wrap2.uuid = 'b'
wraps = [wrap1, wrap2]
self.assertEqual(wrap1, util.find_wrapper(wraps, 'a'))
self.assertEqual(wrap2, util.find_wrapper(wraps, 'b'))
self.assertIsNone(util.find_wrapper(wraps, 'c'))
def test_dice_href(self):
href = 'https://server:1234/rest/api/uom/Obj/UUID//?group=One,Two#frag'
self.assertEqual(util.dice_href(href),
'/rest/api/uom/Obj/UUID?group=One,Two#frag')
self.assertEqual(util.dice_href(href, include_query=True),
'/rest/api/uom/Obj/UUID?group=One,Two#frag')
self.assertEqual(util.dice_href(href, include_fragment=False),
'/rest/api/uom/Obj/UUID?group=One,Two')
self.assertEqual(util.dice_href(href, include_query=False),
'/rest/api/uom/Obj/UUID#frag')
self.assertEqual(util.dice_href(href, include_fragment=True),
'/rest/api/uom/Obj/UUID?group=One,Two#frag')
self.assertEqual(util.dice_href(href, include_query=False,
include_fragment=True),
'/rest/api/uom/Obj/UUID#frag')
self.assertEqual(util.dice_href(href, include_scheme_netloc=True,
include_query=False,
include_fragment=False),
'https://server:1234/rest/api/uom/Obj/UUID')
def test_get_req_path_uuid_and_is_instance_path(self):
# Fail: no '/'
path = dummyuuid1
self.assertIsNone(util.get_req_path_uuid(path))
self.assertRaises(IndexError, util.is_instance_path, path)
path = '/' + dummyuuid1
self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
self.assertTrue(util.is_instance_path(path))
path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1
self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
self.assertTrue(util.is_instance_path(path))
# Fail: last path element is not a UUID
path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1 + '/Child'
self.assertIsNone(util.get_req_path_uuid(path))
self.assertFalse(util.is_instance_path(path))
# Fail: last path element is not quiiiite a UUID
path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1[1:]
self.assertIsNone(util.get_req_path_uuid(path))
self.assertFalse(util.is_instance_path(path))
# Ignore query/fragment
path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
'?group=One,Two#frag')
self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
self.assertTrue(util.is_instance_path(path))
# Fail: last path element (having removed query/fragment) is not a UUID
path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
'/Child?group=One,Two#frag')
self.assertIsNone(util.get_req_path_uuid(path))
self.assertFalse(util.is_instance_path(path))
# Default case conversion
path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1.upper()
self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
self.assertEqual(dummyuuid1, util.get_req_path_uuid(
path, preserve_case=False))
self.assertTrue(util.is_instance_path(path))
# Force no case conversion
self.assertEqual(dummyuuid1.upper(), util.get_req_path_uuid(
path, preserve_case=True))
# Child URI gets child UUID by default
path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
'/Child/' + dummyuuid2)
self.assertEqual(dummyuuid2, util.get_req_path_uuid(path))
self.assertTrue(util.is_instance_path(path))
# Get root UUID from child URI
path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
'/Child/' + dummyuuid2)
self.assertEqual(dummyuuid1, util.get_req_path_uuid(path, root=True))
self.assertTrue(util.is_instance_path(path))
# root=True redundant on a root path
path = '/' + dummyuuid1
self.assertEqual(dummyuuid1, util.get_req_path_uuid(path, root=True))
path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1
self.assertEqual(dummyuuid1, util.get_req_path_uuid(path, root=True))
def test_extend_basepath(self):
ext = '/foo'
# Various forms without query params or fragments
for path in (dummyuuid1, '/' + dummyuuid1,
'https://server:1234/rest/api/uom/Obj/' + dummyuuid1,
'https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
'/Child'):
self.assertEqual(path + ext, util.extend_basepath(path, ext))
basepath = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1
qp = '?foo=bar,baz&blah=123'
frag = '#frag'
# Query params
self.assertEqual(basepath + ext + qp,
util.extend_basepath(basepath + qp, ext))
# Fragment
self.assertEqual(basepath + ext + frag,
util.extend_basepath(basepath + frag, ext))
# Query params & fragment
self.assertEqual(basepath + ext + qp + frag,
util.extend_basepath(basepath + qp + frag, ext))
def test_sanitize_file_name_for_api(self):
allc = ''.join(map(chr, range(256)))
self.assertEqual('foo', util.sanitize_file_name_for_api('foo'))
self.assertEqual(
'config_foo.iso', util.sanitize_file_name_for_api(
'foo', prefix='config_', suffix='.iso'))
self.assertEqual(
'______________________________________________._0123456789_______'
'ABCDEFGHIJKLMN',
util.sanitize_file_name_for_api(allc))
self.assertEqual(
'OPQRSTUVWXYZ______abcdefghijklmnopqrstuvwxyz_____________________'
'______________',
util.sanitize_file_name_for_api(allc[79:])
)
self.assertEqual(
'_________________________________________________________________'
'______________',
util.sanitize_file_name_for_api(allc[158:])
)
self.assertEqual('___________________',
util.sanitize_file_name_for_api(allc[237:]))
self.assertEqual(
(dummyuuid1 + dummyuuid2[:7] + dummyuuid1).replace('-', '_'),
util.sanitize_file_name_for_api(
dummyuuid2, prefix=dummyuuid1, suffix=dummyuuid1))
self.assertEqual('I____________',
util.sanitize_file_name_for_api(
u'I \u611B \u01A4\u0177\u03C1\uFF4F\u05E9\u5DF3'
u'\u5C3A\uFF56\uFF4D'))
self.assertRaises(ValueError, util.sanitize_file_name_for_api, allc,
prefix=allc, suffix=allc)
self.assertRaises(ValueError, util.sanitize_file_name_for_api, '')
# Non-default max_len values
self.assertEqual('abcdefghijklmno', util.sanitize_file_name_for_api(
'abcdefghijklmnopqrstuvwxyz', max_len=const.MaxLen.VDISK_NAME))
self.assertEqual(
'abcdefghijklmnopqrstuvwxyz0123456789A',
util.sanitize_file_name_for_api(
'abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNO',
max_len=const.MaxLen.VOPT_NAME))
def test_sanitize_partition_name_for_api(self):
allc = ''.join(map(chr, range(256)))
self.assertEqual('foo', util.sanitize_partition_name_for_api('foo'))
self.assertEqual('_______________________________',
util.sanitize_partition_name_for_api(allc))
self.assertEqual('_ !_#_%_____+,-./0123456789:;_=',
util.sanitize_partition_name_for_api(allc[31:]))
self.assertEqual('__@ABCDEFGHIJKLMNOPQRSTUVWXYZ__',
util.sanitize_partition_name_for_api(allc[62:]))
self.assertEqual('_^__abcdefghijklmnopqrstuvwxyz{',
util.sanitize_partition_name_for_api(allc[93:]))
self.assertEqual('_}_____________________________',
util.sanitize_partition_name_for_api(allc[124:]))
for start in (155, 186, 217):
self.assertEqual(
'_______________________________',
util.sanitize_partition_name_for_api(allc[start:]))
self.assertEqual('________',
util.sanitize_partition_name_for_api(allc[248:]))
self.assertEqual('I _ _________',
util.sanitize_partition_name_for_api(
u'I \u611B \u01A4\u0177\u03C1\uFF4F\u05E9\u5DF3'
u'\u5C3A\uFF56\uFF4D'))
self.assertRaises(ValueError, util.sanitize_partition_name_for_api,
allc, trunc_ok=False)
self.assertRaises(ValueError, util.sanitize_partition_name_for_api, '')
self.assertRaises(ValueError, util.sanitize_partition_name_for_api,
None)
# Tests for check_and_apply_xag covered by
# test_adapter.TestAdapter.test_extended_path
def test_part_id_by_loc_code(self):
test_loc = 'U8247.22L.2125D6A-V2-C3'
fail_loc = 'abc1234'
self.assertEqual(util.part_id_by_loc_code(test_loc), 2)
self.assertIsNone(util.part_id_by_loc_code(fail_loc))
def test_xag_attrs(self):
base = const.DEFAULT_SCHEMA_ATTR
self.assertEqual(dict(base), util.xag_attrs(''))
self.assertEqual(dict(base), util.xag_attrs(None))
self.assertEqual(dict(base, group='foo'), util.xag_attrs('foo'))
# Test other bases
self.assertEqual(dict(one=2), util.xag_attrs(None, base=dict(one=2)))
self.assertEqual(dict(one=2, group='foo'),
util.xag_attrs('foo', base=dict(one=2)))
@mock.patch.object(builtins, 'open')
def test_my_partition_id(self, m_open):
"""Test my_partition_id."""
def rit():
for line in ('foo=bar\n', 'partition_id=1234\n', '\n', 'a=b\n'):
yield line
m_open.return_value.__enter__.return_value.__iter__.side_effect = rit
self.assertEqual(1234, util.my_partition_id())
def test_parent_spec(self):
"""Test parent_spec."""
# All params are None (ROOT request)
self.assertEqual((None, None), util.parent_spec(None, None, None))
# Get values from parent
parent = mock.Mock(schema_type='schema_type', uuid='uuid')
self.assertEqual(('schema_type', 'uuid'), util.parent_spec(
parent, None, None))
# Parent overrides parent_type/parent_uuid
self.assertEqual(('schema_type', 'uuid'), util.parent_spec(
parent, 'something', 'else'))
# ValueError if type xor uuid specified
self.assertRaises(ValueError, util.parent_spec, None, 'one', None)
self.assertRaises(ValueError, util.parent_spec, None, None, 'two')
# Non-wrapper, non-string parent type raises ValueError
self.assertRaises(ValueError, util.parent_spec, None, 42, 'foo')
# parent_type can be wrapper or string
self.assertEqual(('schema_type', 'uuid2'), util.parent_spec(
None, parent, 'uuid2'))
self.assertEqual(('schema_type2', 'uuid2'), util.parent_spec(
None, 'schema_type2', 'uuid2'))
def test_retry_io_command(self):
class MyOSError(OSError):
def __init__(self, errno):
super(MyOSError, self).__init__()
self.errno = errno
class MyIOError(IOError):
def __init__(self, errno):
super(MyIOError, self).__init__()
self.errno = errno
class MyValError(ValueError):
def __init__(self, errno):
super(MyValError, self).__init__()
self.errno = errno
func = mock.Mock()
mock_os_intr = MyOSError(4)
mock_io_intr = MyIOError(4)
mock_val_intr = MyValError(4)
mock_os_hup = MyOSError(1)
mock_io_hup = MyIOError(1)
func.side_effect = [mock_os_intr, mock_io_intr, mock_val_intr]
self.assertRaises(MyValError, util.retry_io_command, func)
self.assertEqual(3, func.call_count)
func.reset_mock()
func.side_effect = mock_os_hup
self.assertRaises(MyOSError, util.retry_io_command, func, 1, 'a')
func.assert_called_once_with(1, 'a')
func.reset_mock()
func.side_effect = mock_io_hup
self.assertRaises(MyIOError, util.retry_io_command, func)
func.assert_called_once_with()
class TestAllowedList(unittest.TestCase):
def test_all_none(self):
for cls in (util.VLANList, util.MACList):
for val in ('ALL', 'NONE'):
self.assertEqual(val, cls.unmarshal(val))
for val in ('ALL', 'NONE', 'all', 'none', 'aLl', 'nOnE'):
self.assertEqual(val.upper(), cls.marshal(val))
self.assertEqual(val.upper(), cls.const_or_list(val))
self.assertEqual(val.upper(), cls.marshal([val]))
self.assertEqual(val.upper(), cls.const_or_list([val]))
def test_unmarshal(self):
# Test VLAN lists
self.assertEqual([1, 2], util.VLANList.unmarshal('1 2'))
self.assertEqual([0], util.VLANList.unmarshal('0'))
self.assertEqual([5, 6, 2230, 3340],
util.VLANList.unmarshal('5 6 2230 3340'))
# Test MAC lists
self.assertEqual(['AB12CD34EF56', '12AB34CD56EF'],
util.MACList.unmarshal('AB12CD34EF56 12AB34CD56EF'))
self.assertEqual(['AB12CD34EF56'],
util.MACList.unmarshal('AB12CD34EF56'))
def test_marshal(self):
# Test VLAN lists
self.assertEqual('1 2', util.VLANList.marshal([1, 2]))
self.assertEqual('0', util.VLANList.marshal([0]))
self.assertEqual('5 6 2230 3340',
util.VLANList.marshal([5, 6, '2230', 3340]))
# Test MAC lists
self.assertEqual('AB12CD34EF56 12AB34CD56EF', util.MACList.marshal(
['aB:12:Cd:34:eF:56', '12Ab34cD56Ef']))
self.assertEqual('AB12CD34EF56', util.MACList.marshal(
['Ab:12:cD:34:Ef:56']))
# Test error cases
for cls in (util.VLANList, util.MACList):
self.assertRaises(ValueError, cls.marshal, None)
self.assertRaises(ValueError, cls.marshal, '')
self.assertRaises(ValueError, cls.marshal, ' ')
self.assertRaises(ValueError, cls.marshal, 'bogus')
def test_const_or_list(self):
# Test VLAN lists
for l2t in ([1, 2], [0], [5, 6, 2230, 3340]):
self.assertEqual(l2t, util.VLANList.const_or_list(l2t))
# Test MAC lists
self.assertEqual(['AB12CD34EF56', '12AB34CD56EF'],
util.MACList.const_or_list(
['aB:12:Cd:34:eF:56', '12Ab34cD56Ef']))
self.assertEqual(['AB12CD34EF56'], util.MACList.const_or_list(
['Ab:12:cD:34:Ef:56']))
# Test error cases
for cls in (util.VLANList, util.MACList):
for meth in (cls.marshal, cls.const_or_list):
self.assertRaises(ValueError, meth, None)
self.assertRaises(ValueError, meth, '')
self.assertRaises(ValueError, meth, ' ')
self.assertRaises(ValueError, meth, 'bogus')
self.assertRaises(ValueError, util.VLANList.marshal, ['1', 'NaN', 2])
self.assertRaises(ValueError, util.VLANList.const_or_list, ['1', 'NaN',
2])
| 46.111663 | 79 | 0.617715 |
import mock
import six
import unittest
from pypowervm import const
from pypowervm import util
if six.PY2:
import __builtin__ as builtins
elif six.PY3:
import builtins
dummyuuid1 = "abcdef01-2345-2345-2345-67890abcdef0"
dummyuuid2 = "67890abc-5432-5432-5432-def0abcdef01"
class TestUtil(unittest.TestCase):
def test_convert_bytes_to_gb(self):
test = util.convert_bytes_to_gb(1024 * 1024 * 1024)
self.assertEqual(1.0, test)
test = util.convert_bytes_to_gb(1024 * 1024.0)
self.assertEqual(0.0009765625, test)
self.assertEqual(.0001, util.convert_bytes_to_gb(1))
self.assertEqual(.0005, util.convert_bytes_to_gb(1, .0005))
self.assertEqual(1.15, util.convert_bytes_to_gb(1224067890, dp=2))
self.assertEqual(0.01, util.convert_bytes_to_gb(1, dp=2))
def test_round_gb_size_up(self):
self.assertEqual(12.35, util.round_gb_size_up(12.34000000001))
self.assertEqual(12.34000000001, util.round_gb_size_up(12.34000000001,
dp=11))
self.assertEqual(1048576, util.round_gb_size_up(1048576.0, dp=0))
self.assertEqual(1048576, util.round_gb_size_up(1048575.1, dp=0))
self.assertEqual(1048576, util.round_gb_size_up(1048576, dp=0))
self.assertEqual(1048600, util.round_gb_size_up(1048576.1234, dp=-2))
def test_sanitize_bool_for_api(self):
self.assertEqual('true', util.sanitize_bool_for_api(True))
self.assertEqual('false', util.sanitize_bool_for_api(False))
self.assertEqual('true', util.sanitize_bool_for_api('True'))
self.assertEqual('false', util.sanitize_bool_for_api('False'))
def test_find_wrapper(self):
wrap1 = mock.MagicMock()
wrap1.uuid = 'a'
wrap2 = mock.MagicMock()
wrap2.uuid = 'b'
wraps = [wrap1, wrap2]
self.assertEqual(wrap1, util.find_wrapper(wraps, 'a'))
self.assertEqual(wrap2, util.find_wrapper(wraps, 'b'))
self.assertIsNone(util.find_wrapper(wraps, 'c'))
def test_dice_href(self):
href = 'https://server:1234/rest/api/uom/Obj/UUID//?group=One,Two#frag'
self.assertEqual(util.dice_href(href),
'/rest/api/uom/Obj/UUID?group=One,Two#frag')
self.assertEqual(util.dice_href(href, include_query=True),
'/rest/api/uom/Obj/UUID?group=One,Two#frag')
self.assertEqual(util.dice_href(href, include_fragment=False),
'/rest/api/uom/Obj/UUID?group=One,Two')
self.assertEqual(util.dice_href(href, include_query=False),
'/rest/api/uom/Obj/UUID#frag')
self.assertEqual(util.dice_href(href, include_fragment=True),
'/rest/api/uom/Obj/UUID?group=One,Two#frag')
self.assertEqual(util.dice_href(href, include_query=False,
include_fragment=True),
'/rest/api/uom/Obj/UUID#frag')
self.assertEqual(util.dice_href(href, include_scheme_netloc=True,
include_query=False,
include_fragment=False),
'https://server:1234/rest/api/uom/Obj/UUID')
def test_get_req_path_uuid_and_is_instance_path(self):
path = dummyuuid1
self.assertIsNone(util.get_req_path_uuid(path))
self.assertRaises(IndexError, util.is_instance_path, path)
path = '/' + dummyuuid1
self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
self.assertTrue(util.is_instance_path(path))
path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1
self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
self.assertTrue(util.is_instance_path(path))
path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1 + '/Child'
self.assertIsNone(util.get_req_path_uuid(path))
self.assertFalse(util.is_instance_path(path))
path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1[1:]
self.assertIsNone(util.get_req_path_uuid(path))
self.assertFalse(util.is_instance_path(path))
path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
'?group=One,Two#frag')
self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
self.assertTrue(util.is_instance_path(path))
path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
'/Child?group=One,Two#frag')
self.assertIsNone(util.get_req_path_uuid(path))
self.assertFalse(util.is_instance_path(path))
path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1.upper()
self.assertEqual(dummyuuid1, util.get_req_path_uuid(path))
self.assertEqual(dummyuuid1, util.get_req_path_uuid(
path, preserve_case=False))
self.assertTrue(util.is_instance_path(path))
self.assertEqual(dummyuuid1.upper(), util.get_req_path_uuid(
path, preserve_case=True))
path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
'/Child/' + dummyuuid2)
self.assertEqual(dummyuuid2, util.get_req_path_uuid(path))
self.assertTrue(util.is_instance_path(path))
path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
'/Child/' + dummyuuid2)
self.assertEqual(dummyuuid1, util.get_req_path_uuid(path, root=True))
self.assertTrue(util.is_instance_path(path))
path = '/' + dummyuuid1
self.assertEqual(dummyuuid1, util.get_req_path_uuid(path, root=True))
path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1
self.assertEqual(dummyuuid1, util.get_req_path_uuid(path, root=True))
def test_extend_basepath(self):
ext = '/foo'
for path in (dummyuuid1, '/' + dummyuuid1,
'https://server:1234/rest/api/uom/Obj/' + dummyuuid1,
'https://server:1234/rest/api/uom/Obj/' + dummyuuid1 +
'/Child'):
self.assertEqual(path + ext, util.extend_basepath(path, ext))
basepath = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1
qp = '?foo=bar,baz&blah=123'
frag = '#frag'
self.assertEqual(basepath + ext + qp,
util.extend_basepath(basepath + qp, ext))
self.assertEqual(basepath + ext + frag,
util.extend_basepath(basepath + frag, ext))
self.assertEqual(basepath + ext + qp + frag,
util.extend_basepath(basepath + qp + frag, ext))
def test_sanitize_file_name_for_api(self):
allc = ''.join(map(chr, range(256)))
self.assertEqual('foo', util.sanitize_file_name_for_api('foo'))
self.assertEqual(
'config_foo.iso', util.sanitize_file_name_for_api(
'foo', prefix='config_', suffix='.iso'))
self.assertEqual(
'______________________________________________._0123456789_______'
'ABCDEFGHIJKLMN',
util.sanitize_file_name_for_api(allc))
self.assertEqual(
'OPQRSTUVWXYZ______abcdefghijklmnopqrstuvwxyz_____________________'
'______________',
util.sanitize_file_name_for_api(allc[79:])
)
self.assertEqual(
'_________________________________________________________________'
'______________',
util.sanitize_file_name_for_api(allc[158:])
)
self.assertEqual('___________________',
util.sanitize_file_name_for_api(allc[237:]))
self.assertEqual(
(dummyuuid1 + dummyuuid2[:7] + dummyuuid1).replace('-', '_'),
util.sanitize_file_name_for_api(
dummyuuid2, prefix=dummyuuid1, suffix=dummyuuid1))
self.assertEqual('I____________',
util.sanitize_file_name_for_api(
u'I \u611B \u01A4\u0177\u03C1\uFF4F\u05E9\u5DF3'
u'\u5C3A\uFF56\uFF4D'))
self.assertRaises(ValueError, util.sanitize_file_name_for_api, allc,
prefix=allc, suffix=allc)
self.assertRaises(ValueError, util.sanitize_file_name_for_api, '')
self.assertEqual('abcdefghijklmno', util.sanitize_file_name_for_api(
'abcdefghijklmnopqrstuvwxyz', max_len=const.MaxLen.VDISK_NAME))
self.assertEqual(
'abcdefghijklmnopqrstuvwxyz0123456789A',
util.sanitize_file_name_for_api(
'abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNO',
max_len=const.MaxLen.VOPT_NAME))
def test_sanitize_partition_name_for_api(self):
allc = ''.join(map(chr, range(256)))
self.assertEqual('foo', util.sanitize_partition_name_for_api('foo'))
self.assertEqual('_______________________________',
util.sanitize_partition_name_for_api(allc))
self.assertEqual('_ !_#_%_____+,-./0123456789:;_=',
util.sanitize_partition_name_for_api(allc[31:]))
self.assertEqual('__@ABCDEFGHIJKLMNOPQRSTUVWXYZ__',
util.sanitize_partition_name_for_api(allc[62:]))
self.assertEqual('_^__abcdefghijklmnopqrstuvwxyz{',
util.sanitize_partition_name_for_api(allc[93:]))
self.assertEqual('_}_____________________________',
util.sanitize_partition_name_for_api(allc[124:]))
for start in (155, 186, 217):
self.assertEqual(
'_______________________________',
util.sanitize_partition_name_for_api(allc[start:]))
self.assertEqual('________',
util.sanitize_partition_name_for_api(allc[248:]))
self.assertEqual('I _ _________',
util.sanitize_partition_name_for_api(
u'I \u611B \u01A4\u0177\u03C1\uFF4F\u05E9\u5DF3'
u'\u5C3A\uFF56\uFF4D'))
self.assertRaises(ValueError, util.sanitize_partition_name_for_api,
allc, trunc_ok=False)
self.assertRaises(ValueError, util.sanitize_partition_name_for_api, '')
self.assertRaises(ValueError, util.sanitize_partition_name_for_api,
None)
def test_part_id_by_loc_code(self):
test_loc = 'U8247.22L.2125D6A-V2-C3'
fail_loc = 'abc1234'
self.assertEqual(util.part_id_by_loc_code(test_loc), 2)
self.assertIsNone(util.part_id_by_loc_code(fail_loc))
def test_xag_attrs(self):
base = const.DEFAULT_SCHEMA_ATTR
self.assertEqual(dict(base), util.xag_attrs(''))
self.assertEqual(dict(base), util.xag_attrs(None))
self.assertEqual(dict(base, group='foo'), util.xag_attrs('foo'))
self.assertEqual(dict(one=2), util.xag_attrs(None, base=dict(one=2)))
self.assertEqual(dict(one=2, group='foo'),
util.xag_attrs('foo', base=dict(one=2)))
@mock.patch.object(builtins, 'open')
def test_my_partition_id(self, m_open):
def rit():
for line in ('foo=bar\n', 'partition_id=1234\n', '\n', 'a=b\n'):
yield line
m_open.return_value.__enter__.return_value.__iter__.side_effect = rit
self.assertEqual(1234, util.my_partition_id())
def test_parent_spec(self):
self.assertEqual((None, None), util.parent_spec(None, None, None))
parent = mock.Mock(schema_type='schema_type', uuid='uuid')
self.assertEqual(('schema_type', 'uuid'), util.parent_spec(
parent, None, None))
self.assertEqual(('schema_type', 'uuid'), util.parent_spec(
parent, 'something', 'else'))
self.assertRaises(ValueError, util.parent_spec, None, 'one', None)
self.assertRaises(ValueError, util.parent_spec, None, None, 'two')
self.assertRaises(ValueError, util.parent_spec, None, 42, 'foo')
self.assertEqual(('schema_type', 'uuid2'), util.parent_spec(
None, parent, 'uuid2'))
self.assertEqual(('schema_type2', 'uuid2'), util.parent_spec(
None, 'schema_type2', 'uuid2'))
def test_retry_io_command(self):
class MyOSError(OSError):
def __init__(self, errno):
super(MyOSError, self).__init__()
self.errno = errno
class MyIOError(IOError):
def __init__(self, errno):
super(MyIOError, self).__init__()
self.errno = errno
class MyValError(ValueError):
def __init__(self, errno):
super(MyValError, self).__init__()
self.errno = errno
func = mock.Mock()
mock_os_intr = MyOSError(4)
mock_io_intr = MyIOError(4)
mock_val_intr = MyValError(4)
mock_os_hup = MyOSError(1)
mock_io_hup = MyIOError(1)
func.side_effect = [mock_os_intr, mock_io_intr, mock_val_intr]
self.assertRaises(MyValError, util.retry_io_command, func)
self.assertEqual(3, func.call_count)
func.reset_mock()
func.side_effect = mock_os_hup
self.assertRaises(MyOSError, util.retry_io_command, func, 1, 'a')
func.assert_called_once_with(1, 'a')
func.reset_mock()
func.side_effect = mock_io_hup
self.assertRaises(MyIOError, util.retry_io_command, func)
func.assert_called_once_with()
class TestAllowedList(unittest.TestCase):
def test_all_none(self):
for cls in (util.VLANList, util.MACList):
for val in ('ALL', 'NONE'):
self.assertEqual(val, cls.unmarshal(val))
for val in ('ALL', 'NONE', 'all', 'none', 'aLl', 'nOnE'):
self.assertEqual(val.upper(), cls.marshal(val))
self.assertEqual(val.upper(), cls.const_or_list(val))
self.assertEqual(val.upper(), cls.marshal([val]))
self.assertEqual(val.upper(), cls.const_or_list([val]))
def test_unmarshal(self):
self.assertEqual([1, 2], util.VLANList.unmarshal('1 2'))
self.assertEqual([0], util.VLANList.unmarshal('0'))
self.assertEqual([5, 6, 2230, 3340],
util.VLANList.unmarshal('5 6 2230 3340'))
self.assertEqual(['AB12CD34EF56', '12AB34CD56EF'],
util.MACList.unmarshal('AB12CD34EF56 12AB34CD56EF'))
self.assertEqual(['AB12CD34EF56'],
util.MACList.unmarshal('AB12CD34EF56'))
def test_marshal(self):
self.assertEqual('1 2', util.VLANList.marshal([1, 2]))
self.assertEqual('0', util.VLANList.marshal([0]))
self.assertEqual('5 6 2230 3340',
util.VLANList.marshal([5, 6, '2230', 3340]))
self.assertEqual('AB12CD34EF56 12AB34CD56EF', util.MACList.marshal(
['aB:12:Cd:34:eF:56', '12Ab34cD56Ef']))
self.assertEqual('AB12CD34EF56', util.MACList.marshal(
['Ab:12:cD:34:Ef:56']))
for cls in (util.VLANList, util.MACList):
self.assertRaises(ValueError, cls.marshal, None)
self.assertRaises(ValueError, cls.marshal, '')
self.assertRaises(ValueError, cls.marshal, ' ')
self.assertRaises(ValueError, cls.marshal, 'bogus')
def test_const_or_list(self):
for l2t in ([1, 2], [0], [5, 6, 2230, 3340]):
self.assertEqual(l2t, util.VLANList.const_or_list(l2t))
self.assertEqual(['AB12CD34EF56', '12AB34CD56EF'],
util.MACList.const_or_list(
['aB:12:Cd:34:eF:56', '12Ab34cD56Ef']))
self.assertEqual(['AB12CD34EF56'], util.MACList.const_or_list(
['Ab:12:cD:34:Ef:56']))
for cls in (util.VLANList, util.MACList):
for meth in (cls.marshal, cls.const_or_list):
self.assertRaises(ValueError, meth, None)
self.assertRaises(ValueError, meth, '')
self.assertRaises(ValueError, meth, ' ')
self.assertRaises(ValueError, meth, 'bogus')
self.assertRaises(ValueError, util.VLANList.marshal, ['1', 'NaN', 2])
self.assertRaises(ValueError, util.VLANList.const_or_list, ['1', 'NaN',
2])
| true | true |
1c33dcfd0c06b3215bcbfd696803bb148de2d0f7 | 1,780 | py | Python | src/logChunk/allRunn.py | saledouble/gitcproc | 009d614fa1a56dc75acb0277ecc98ea27e91750b | [
"BSD-3-Clause"
] | null | null | null | src/logChunk/allRunn.py | saledouble/gitcproc | 009d614fa1a56dc75acb0277ecc98ea27e91750b | [
"BSD-3-Clause"
] | 3 | 2020-11-12T14:42:22.000Z | 2021-01-13T22:30:23.000Z | src/logChunk/allRunn.py | saledouble/gitcproc | 009d614fa1a56dc75acb0277ecc98ea27e91750b | [
"BSD-3-Clause"
] | 2 | 2020-11-11T22:27:28.000Z | 2021-01-13T21:07:14.000Z |
#get the path of directory in which project directories are there. Assume dirsPath
#rootdir ='C:\Users\Yagnik\PycharmProjects\Top_Project'
import os
import sys
import ghProc
from logChunk import logChunk
#print os.listdir(rootdir)
# for subdir, dirs, files in os.walk(rootdir):
# print dirs
def main():
print("Utility to BULK process github logs")
if len(sys.argv) < 2:
print("!!! Usage: python allRun.py top_project directory")
sys.exit()
if not os.path.isdir("../Results"):
os.mkdir("../Results")
fPtrChangeSummary=open("../Results/"+"ChangeSummary.csv",'w')
fPtrChangeSummary.write("project,sha,author,author_email,commit_date,is_bug\n")
fPtrPatchSummary=open("../Results/"+"PatchSummary.csv",'w')
fPtrMisMatchSummary=open("../Results/"+"MisMatchSummary.csv",'w')
fPtrMisMatchSummary.write("project,Total,Match,MisMatch,Exception,matchException,misMatchException\n")
lst=[]
listToDict={}
mockChunk=logChunk("")
mockChunk.readKeywords(lst)
keywords= [sub_list[0] for sub_list in lst]
for keyword in keywords:
listToDict[str(keyword)+" Adds"]=0
listToDict[str(keyword)+" Dels"]=0
#fPtrPatchSummary.write("project, sha, language, file_name, is_test,bracket_diff,isExceptionPatch, method_name,total_add,total_del,uniqueExcepAdd,uniqueExcepDel,%s\n"%",".join(listToDict.keys()))
fPtrPatchSummary.write("project, sha, language, file_name, is_test, method_name,total_add,total_del,%s\n"%",".join(sorted(listToDict.keys())))
fPtrChangeSummary.close()
fPtrPatchSummary.close()
fPtrMisMatchSummary.close()
rootdir = str(sys.argv[1])
for dir in os.listdir(rootdir):
path= os.path.join(rootdir,dir)
print(path)
os.system('python ghProc.py %s'%path)
if __name__ == '__main__':
main()
| 30.689655 | 197 | 0.723034 |
import os
import sys
import ghProc
from logChunk import logChunk
def main():
print("Utility to BULK process github logs")
if len(sys.argv) < 2:
print("!!! Usage: python allRun.py top_project directory")
sys.exit()
if not os.path.isdir("../Results"):
os.mkdir("../Results")
fPtrChangeSummary=open("../Results/"+"ChangeSummary.csv",'w')
fPtrChangeSummary.write("project,sha,author,author_email,commit_date,is_bug\n")
fPtrPatchSummary=open("../Results/"+"PatchSummary.csv",'w')
fPtrMisMatchSummary=open("../Results/"+"MisMatchSummary.csv",'w')
fPtrMisMatchSummary.write("project,Total,Match,MisMatch,Exception,matchException,misMatchException\n")
lst=[]
listToDict={}
mockChunk=logChunk("")
mockChunk.readKeywords(lst)
keywords= [sub_list[0] for sub_list in lst]
for keyword in keywords:
listToDict[str(keyword)+" Adds"]=0
listToDict[str(keyword)+" Dels"]=0
fPtrPatchSummary.write("project, sha, language, file_name, is_test, method_name,total_add,total_del,%s\n"%",".join(sorted(listToDict.keys())))
fPtrChangeSummary.close()
fPtrPatchSummary.close()
fPtrMisMatchSummary.close()
rootdir = str(sys.argv[1])
for dir in os.listdir(rootdir):
path= os.path.join(rootdir,dir)
print(path)
os.system('python ghProc.py %s'%path)
if __name__ == '__main__':
main()
| true | true |
1c33dd43e5aac4729f3611201fbd0862be806dae | 625 | py | Python | answer.py | ZYSzys/Answer-Assistant | efff7d2949d12f27b7d99cfa0e35f32757cbc8ad | [
"MIT"
] | 2 | 2018-04-17T09:42:41.000Z | 2018-04-17T09:57:35.000Z | answer.py | ZYSzys/Answer-Assistant | efff7d2949d12f27b7d99cfa0e35f32757cbc8ad | [
"MIT"
] | null | null | null | answer.py | ZYSzys/Answer-Assistant | efff7d2949d12f27b7d99cfa0e35f32757cbc8ad | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
import wda
import webbrowser
import urllib.parse
from PIL import Image
from configparser import ConfigParser
from ocr import ocr_img
def search(question):
webbrowser.open('https://baidu.com/s?wd='+urllib.parse.quote(question))
if __name__ == '__main__':
c = wda.Client()
config = ConfigParser()
config.read('./config.conf', encoding='utf-8')
print('回车继续,输入 x 回车结束\n')
while True:
c.screenshot('screenshot.png')
img = Image.open('./screenshot.png')
question = ocr_img(img, config)
print(question)
#print(choices)
search(question)
nxt = input()
if nxt == 'x':
break
| 16.891892 | 72 | 0.688 |
import wda
import webbrowser
import urllib.parse
from PIL import Image
from configparser import ConfigParser
from ocr import ocr_img
def search(question):
webbrowser.open('https://baidu.com/s?wd='+urllib.parse.quote(question))
if __name__ == '__main__':
c = wda.Client()
config = ConfigParser()
config.read('./config.conf', encoding='utf-8')
print('回车继续,输入 x 回车结束\n')
while True:
c.screenshot('screenshot.png')
img = Image.open('./screenshot.png')
question = ocr_img(img, config)
print(question)
search(question)
nxt = input()
if nxt == 'x':
break
| true | true |
1c33ddbb2adf6a132487ca1c86cebd1d85abf5e7 | 16,094 | py | Python | crabmeyerpy/ssc.py | tunbehaun273/crabmeyerpy | d36fe3ed9b8591bb92bd9915996dd21d79fc4dad | [
"BSD-3-Clause"
] | null | null | null | crabmeyerpy/ssc.py | tunbehaun273/crabmeyerpy | d36fe3ed9b8591bb92bd9915996dd21d79fc4dad | [
"BSD-3-Clause"
] | null | null | null | crabmeyerpy/ssc.py | tunbehaun273/crabmeyerpy | d36fe3ed9b8591bb92bd9915996dd21d79fc4dad | [
"BSD-3-Clause"
] | 2 | 2021-06-24T10:53:48.000Z | 2021-11-03T13:25:15.000Z | import yaml
from scipy.special import kv # Bessel function
from scipy.integrate import simps
from scipy.interpolate import interp1d
# imports to speed up integrations:
from numpy import meshgrid, linspace, ones, zeros
from numpy import log, exp, pi, sqrt, power, tan
# import functions for photon fields
from .photonfields import *
from astropy import units as u
from astropy import constants as c
from astropy.cosmology import Planck15 as cosmo
# define conversion factors
kpc2cm = u.kpc.to('cm')
eV2Hz = 1. / (c.h.value * u.J.to('eV'))
eV2erg = u.eV.to('erg')
m_e_eV = (c.m_e * c.c**2.).to('eV').value
arcmin2rad = u.arcmin.to('rad')
def ic_kernel(nu, gamma, e):
"""
Calculate the full inverse Compton Kernel, unitless
Parameters
----------
nu: array-like
final photon frequency in Hz
gamma: array-like
gamma factor of electrons
e: array-like
initial photon energy in eV
Returns
-------
Inner IC kernel including KN limit
Notes
-----
gamma, e, and e1 need to have same shape.
See Blumenthal & Gould 1970, Eq. 2.47 - 2.51
"""
q = nu / eV2Hz / 4. / gamma ** 2. / e / (1. - nu / eV2Hz / m_e_eV / gamma)
m = (q <= 1.) & (q >= 1. / 4. / gamma ** 2.)
f = zeros(q.shape)
f[m] = 2. * q[m] * log(q[m]) + (1. + 2. * q[m]) * (1. - q[m]) + \
(4. * e[m] / m_e_eV * gamma[m] * q[m]) ** 2. \
/ 2. / (1. + 4. * e[m] / m_e_eV * gamma[m] * q[m]) \
* (1. - q[m])
return f
class CrabSSC(object):
def __init__(self, config, n_el, B=124.e-6, d=2., nu_sync_min=1e7, nu_sync_max=1e30):
"""
Initialize the class
Parameters
----------
config: str or dict
path to config file with model parameters.
Should contain three dictionaries:
- params_n_el: parameters for the electron density
- params_n_seed: parameters for the photon density
n_el: function pointer
electron density spectrum. Should be called with n_el(gamma, **params_n_el)
{options}
B: float
magnetic field of the nebula in G
d: float
distance to the nebula in kpc
nu_sync_min: float
minimum frequency considered for syncrotron radiation
nu_sync_max: float
maximum frequency considered for syncrotron radiation
"""
# read in config file
if isinstance(config, dict):
conf = config
else:
with open(config) as f:
conf = yaml.safe_load(f)
self._params_n_el = conf['params_n_el']
self._params_n_seed = conf['params_n_seed']
self._nu_sync_min = nu_sync_min
self._nu_sync_max = nu_sync_max
self._n_el = n_el
self._B = B
self._d = d
# Interpolate x F(x) of synchrotron function,
# see e.g. Fig. 13 in Blumenthal & Gould 1970
steps = 100
self.__start = -40 # upper limit for x F (x) integration
self.__end = 20 # upper limit for x F (x) integration
# build a 2d array for interpolation
logx = np.linspace(self.__start, self.__end+1, steps)
for i, s in enumerate(logx):
if not i:
logx_arr = np.linspace(s, self.__end, steps)
else:
logx_arr = np.vstack((logx_arr, np.linspace(s, self.__end, steps)))
xF = np.exp(logx) * simps(kv(5./3., np.exp(logx_arr)) * np.exp(logx_arr), logx_arr, axis=1)
xF[xF < 1e-40] = np.full(np.sum(xF < 1e-40), 1e-40)
self.log_xF = interp1d(logx, np.log(xF))
self.FSyncInterp = None
return
@property
def params_n_el(self):
return self._params_n_el
@property
def params_n_seed(self):
return self._params_n_seed
@property
def n_el(self):
return self._n_el
@property
def B(self):
return self._B
@property
def d(self):
return self._d
@n_el.setter
def n_el(self, n_el):
self._n_el = n_el
@B.setter
def B(self, B):
self._B = B
@d.setter
def d(self, d):
self._d = d
def sync(self, nu, g_steps=50, gmin=None, gmax=None):
"""
Spectral synchrotron luminosity F_nu in erg/s/Hz/cm^2 as integral over electron distribution
Parameters:
-----------
nu: array-like
frequencies in Hz
{options}
g_steps: int
number of integration steps
gmin: float or None
minimum lorentz factor
gmax: float or None
maximum lorentz factor
Returns:
--------
array with spectral luminosity F_nu density at frequency nu
"""
if gmin is None:
gmin = self._params_n_el['gradio_min']
if gmax is None:
gmax = self._params_n_el['gwind_max']
# 2d grid for Freq and gamma factors
nn, gg = meshgrid(nu, linspace(log(gmin), log(gmax), g_steps), indexing='ij')
# x = nu / nu_c as 2d grid,
# nu_c: critical frequency for B in G; Longair vol.2 p. 261
nu_c = 4.199e10 * self._B * u.G.to('T') * exp(gg)**2.
x = nn / nu_c
# define a mask for integration
m = (log(x) > self.__start) & (log(x) < self.__end)
result = np.full(x.shape, 1e-40)
# synchrotron function
result[m] = exp(self.log_xF(log(x[m])))
# multiply with electron spectrum
result *= self._n_el(exp(gg), **self._params_n_el)
# integrate over gamma
result = simps(result * exp(gg), gg, axis=1)
# pre factors: sqrt(3) * e^3 / mc^2 with B in G, see e.g. B&G 4.44
# this has then units Fr^3 s^2 B g-1 cm-2
# When you use Fr G s^2 / (cm g) = 1 you get
# units Fr^2 / cm and with Fr = cm^3/2 g^1/2 s^-1
# this becomes g cm^2 s^2 = erg = erg / Hz / s.
# The pre factor is then consistent with 18.36 in Longair Vol.2
# since he calculates in W and for B in Tesla
result *= ((c.e.esu**3.) / (c.m_e.cgs * c.c.cgs**2.) * sqrt(3.)).value
# this is equal to 2.344355730864404e-22
# average over all pitch angles gives 2/3
result *= self._B * sqrt(2.0/3.0)
# divide by the distance squared
# change from intrinsic luminosity to flux
result /= 4. * pi * self._d * self._d * kpc2cm * kpc2cm
# returns value in unites erg/s/Hz/cm^2
return result
def interp_sync_init(self, g_steps=100):
"""
Initialize interpolation of Spectral synchrotron luminosity F_nu in erg/s/Hz/cm^2 for given electron spectrum,
in log - log space.
Sets self.FSyncInterp function pointer.
Parameters
----------
g_steps: int,
number of integration steps
"""
nu = np.logspace(np.log10(self._nu_sync_min), np.log10(self._nu_sync_max), 200)
F_sync = self.sync(nu, g_steps=g_steps)
self.FSyncInterp = interp1d(log(nu), log(F_sync))
def grey_body_old(self, nu):
"""
Return grey body nu F_nu spectrum in erg/s/cm^2
Parameters
----------
nu: array like
array with frequencies in Hz
Returns
-------
array with grey body flux in erg/s/cm^2
Note
----
TODO: I don't think that this is correct.
TODO: From the photon density you should simply
TODO: multiply with (h nu) * c / 4 pi to get the specific intensity
"""
# photons dens of black body in photons/eV/cm^3
result = black_body(nu / eV2Hz, self._params_n_seed['dust_T'])
result *= self._params_n_seed['dust_norm']
# this is in units of photons/cm^3/eV
# assume an emitting volume, using the scale length
# suggested by Hillas: 1.3 arcmin
# now this is in units of photons / eV
result *= 4.0 / 3.0 * pi * power(tan(self._params_n_seed['dust_extension'] * arcmin2rad)
* self._d * kpc2cm, 3.)
# calculate erg per s per cm**2
result *= (nu * nu / eV2Hz / eV2Hz) * eV2erg
result /= 4.0 * pi * (self._params['d'] * kpc2cm * self._d * kpc2cm)
return result
def grey_body(self, nu):
"""
Return grey body nu F_nu spectrum in erg/s/cm^2
Parameters
----------
nu: array like
array with frequencies in Hz
Returns
-------
array with grey body flux in erg/s/cm^2/Hz
"""
# photons dens of black body in photons/eV/cm^3
result = black_body(nu / eV2Hz, self._params_n_seed['dust_T'])
result *= self._params_n_seed['dust_norm']
# change to dens in photon / Hz / cm^3, dn / d nu = dn / de * de / d nu = dn / de * h
result *= c.h.to('eV s').value
# multiply with energy to get energy density per Hz
result *= nu * c.h.to('erg s').value
# multiply with c / 4 pi to get energy flux in erg / s / cm^2 / Hz
result *= c.c.cgs.value / 4. / pi
# rescale this from sphere of emitting region
# suggested by Hillas: 1.3 arcmin to distance of the Crab
# 4 pi tan(theta) d ** 2 / 4 pi d**2 = tan(theta)
result *= tan(self._params_n_seed['dust_extension'] * arcmin2rad)
return result
def sync_phot_dens(self, eps, gamma):
"""
Calculate synchrotron photon number density of Crab nebula according to Hillas et al. (1998)
Parameters
----------
eps: array-like
n-dim array with energy of photons, in eV
gamma: array
m-dim array with gamma factor of electrons
Returns
-------
m x n-dim array with photon densities in photons / eV / cm^3
Notes
-----
See https://arxiv.org/pdf/1008.4524.pdf Eq. (A3)
"""
# eps is in units of eV
# get synchrotron luminosity in units of erg/s/cm^2/Hz, F_nu
S = np.full(eps.shape[0], 1e-40)
# include synchrotron photon density
if self._params_n_seed['ic_sync']:
# initialize synchrotron interpolation
if self.FSyncInterp is None:
self.interp_sync_init()
# mask for frequencies
m = (log(eps * eV2Hz) > log(self._nu_sync_min)) & \
(log(eps * eV2Hz) < log(self._nu_sync_max))
# calculate synchrotron intergral from interpolation
S[m] = exp(self.FSyncInterp(log(eps * eV2Hz)[m]))
# conversion:
# Now in units of erg/s/cm^2
# nu F_nu
S *= eps * eV2Hz
# convert in units of photons/cm^2/s
#S /= (eps * u.eV.to('J') / c.h.value) * u.eV.to('erg')
S /= (eps * eV2erg)
# total production rate of photons in units of 1/s */
S *= (4.0 * pi * (self._d * kpc2cm)**2.)
# calculate the scale length of the electrons "seeing" the photons according to Hillas et al. (1998)
rho = zeros(gamma.shape)
m = gamma * m_e_eV / 1e9 < 34.
rho[m] = tan(1.35 * arcmin2rad) * self._d * kpc2cm
extension = 0.15 + 1.2*power(gamma[~m] * m_e_eV / 34. / 1e9, -0.17)
rho[~m] = tan(extension * arcmin2rad) * self._d * kpc2cm
# calculate scale length of photon density in the nebular
sigma = zeros(eps.shape)
m = eps < 0.02
sigma[m] = tan(1.35 * arcmin2rad) * self._d * kpc2cm
extension = 0.16 + 1.19 * power(eps[~m]/0.02, -0.09)
sigma[~m] = tan(extension * arcmin2rad) * self._d * kpc2cm
# Add Dust Component and line emission
if self._params_n_seed['ic_dust']:
S_dust = self.grey_body(eps * eV2Hz)
S_dust *= eps * eV2Hz
S_dust /= (eps * eV2erg)
S_dust *= (4.0 * pi * (self._d * kpc2cm)**2.)
# calculate scale length of photon density in the nebular
sigma_dust = tan(self._params_n_seed['dust_extension'] * arcmin2rad) * self._d * kpc2cm
# TODO: check if this combination is the right way to do it
# TODO: or if the overlap has to be calculated differently
# calculate photon density in photons/cm**3/eV
if len(sigma.shape) == 1 and not sigma.shape[0] == rho.shape[0]:
ss, rr = meshgrid(sigma, rho)
S, _ = meshgrid(S, rho)
ee, _ = meshgrid(eps, gamma)
S /= (4.0 * pi * c.c.cgs.value * (ss * ss + rr * rr))
if self._params_n_seed['ic_dust']:
sd, _ = meshgrid(sigma_dust, rho)
S_dust, _ = meshgrid(S_dust, rho)
S_dust /= (4.0 * pi * c.c.cgs.value * (sd * sd + rr * rr))
S += S_dust
S /= ee
else:
S /= (4.0 * pi * c.c.cgs.value * (sigma * sigma + rho * rho))
if self._params_n_seed['ic_dust']:
S_dust /= (4.0 * pi * c.c.cgs.value * (sigma_dust * sigma_dust + rho * rho))
S += S_dust
S /= eps
return S
def ic(self, nu, g_steps=200, e_steps=90):
"""
Spectral luminosity F_nu in erg/s/Hz/cm^2 for inverse Compton scattering.
Parameters:
-----------
nu: array-like
n-dim array with frequencies in Hz
{options}
g_steps: int
number of integration steps for gamma
e_steps: int
number of integration steps for energy
Returns:
--------
n-dim numpy array spectral luminosity F_nu density at frequency nu
"""
log_g = linspace(log(self._params_n_el['gmin']), log(self._params_n_el['gmax']), g_steps)
gamma = exp(log_g)
result = zeros(nu.shape[0])
# generate the arrays for observed freq nu, gamma factor, in energy of photon field
nn, gg = meshgrid(nu, log_g, indexing='ij')
nnn, ggg, eee = meshgrid(nu, log_g, linspace(0., 1., e_steps), indexing='ij')
x1 = log(nnn / eV2Hz / 4. / ggg ** 2.)
x1[x1 < 1e-18] = 1e-18
x2 = log(nnn / eV2Hz)
log_eee = zeros(nnn.shape)
m = zeros(nnn.shape, dtype=np.bool)
for i, n in enumerate(nu):
for j, lg in enumerate(log_g):
x1 = max(log(n / eV2Hz / 4. / gamma[j] ** 2.), log(1e-18))
x2 = log(n / eV2Hz)
# now log_eps has shape g_steps x e_steps
log_eee[i, j] = linspace(x1, x2, e_steps)
if x2 > x1:
m[i, j] = True
# calculate photon densities:
# these are in photons / eV / cm^3
phot_dens = np.zeros(eee.shape)
if self._params_n_seed['ic_sync'] or self._params_n_seed['ic_dust']:
phot_dens[m] = self.sync_phot_dens(exp(log_eee[m]), exp(ggg[m]))
if self._params_n_seed['ic_cmb']:
phot_dens[m] += black_body(exp(log_eee[m]), cosmo.Tcmb0.value)
# IC scattering kernel
f = ic_kernel(nnn, exp(ggg), exp(log_eee))
# multiply the two in integrate over initial photon energy
kernel_in = phot_dens * f
# kernel needs to be divided by exp(log_eee) but
# cancels since we're integrating over log(energy).
# now in photons / cm^3 / eV
kernel_out = simps(kernel_in, log_eee, axis=2)
kernel_out *= self._n_el(exp(gg), **self._params_n_el) / exp(gg) ** 2.
# integrate over electron gamma factor
result = simps(kernel_out * exp(gg), gg, axis=1)
# result of integration is in units of photons/cm**3/eV
# multiplying with Thomson*c*energy gives and convert to
# units of erg/sec/eV
result *= 3. / 4. * (c.sigma_T.cgs * c.c.cgs).value * nu / eV2Hz * eV2erg
# convert to erg / sec / Hz
# this is the spectral luminosity L_nu
result /= eV2Hz
# divide by the distance squared to get the flux
result /= 4. * pi * (self._d * kpc2cm)**2.
return result
| 32.64503 | 118 | 0.555859 | import yaml
from scipy.special import kv
from scipy.integrate import simps
from scipy.interpolate import interp1d
from numpy import meshgrid, linspace, ones, zeros
from numpy import log, exp, pi, sqrt, power, tan
from .photonfields import *
from astropy import units as u
from astropy import constants as c
from astropy.cosmology import Planck15 as cosmo
kpc2cm = u.kpc.to('cm')
eV2Hz = 1. / (c.h.value * u.J.to('eV'))
eV2erg = u.eV.to('erg')
m_e_eV = (c.m_e * c.c**2.).to('eV').value
arcmin2rad = u.arcmin.to('rad')
def ic_kernel(nu, gamma, e):
q = nu / eV2Hz / 4. / gamma ** 2. / e / (1. - nu / eV2Hz / m_e_eV / gamma)
m = (q <= 1.) & (q >= 1. / 4. / gamma ** 2.)
f = zeros(q.shape)
f[m] = 2. * q[m] * log(q[m]) + (1. + 2. * q[m]) * (1. - q[m]) + \
(4. * e[m] / m_e_eV * gamma[m] * q[m]) ** 2. \
/ 2. / (1. + 4. * e[m] / m_e_eV * gamma[m] * q[m]) \
* (1. - q[m])
return f
class CrabSSC(object):
def __init__(self, config, n_el, B=124.e-6, d=2., nu_sync_min=1e7, nu_sync_max=1e30):
if isinstance(config, dict):
conf = config
else:
with open(config) as f:
conf = yaml.safe_load(f)
self._params_n_el = conf['params_n_el']
self._params_n_seed = conf['params_n_seed']
self._nu_sync_min = nu_sync_min
self._nu_sync_max = nu_sync_max
self._n_el = n_el
self._B = B
self._d = d
steps = 100
self.__start = -40
self.__end = 20
logx = np.linspace(self.__start, self.__end+1, steps)
for i, s in enumerate(logx):
if not i:
logx_arr = np.linspace(s, self.__end, steps)
else:
logx_arr = np.vstack((logx_arr, np.linspace(s, self.__end, steps)))
xF = np.exp(logx) * simps(kv(5./3., np.exp(logx_arr)) * np.exp(logx_arr), logx_arr, axis=1)
xF[xF < 1e-40] = np.full(np.sum(xF < 1e-40), 1e-40)
self.log_xF = interp1d(logx, np.log(xF))
self.FSyncInterp = None
return
@property
def params_n_el(self):
return self._params_n_el
@property
def params_n_seed(self):
return self._params_n_seed
@property
def n_el(self):
return self._n_el
@property
def B(self):
return self._B
@property
def d(self):
return self._d
@n_el.setter
def n_el(self, n_el):
self._n_el = n_el
@B.setter
def B(self, B):
self._B = B
@d.setter
def d(self, d):
self._d = d
def sync(self, nu, g_steps=50, gmin=None, gmax=None):
if gmin is None:
gmin = self._params_n_el['gradio_min']
if gmax is None:
gmax = self._params_n_el['gwind_max']
nn, gg = meshgrid(nu, linspace(log(gmin), log(gmax), g_steps), indexing='ij')
nu_c = 4.199e10 * self._B * u.G.to('T') * exp(gg)**2.
x = nn / nu_c
m = (log(x) > self.__start) & (log(x) < self.__end)
result = np.full(x.shape, 1e-40)
result[m] = exp(self.log_xF(log(x[m])))
result *= self._n_el(exp(gg), **self._params_n_el)
result = simps(result * exp(gg), gg, axis=1)
result *= ((c.e.esu**3.) / (c.m_e.cgs * c.c.cgs**2.) * sqrt(3.)).value
result *= self._B * sqrt(2.0/3.0)
result /= 4. * pi * self._d * self._d * kpc2cm * kpc2cm
return result
def interp_sync_init(self, g_steps=100):
nu = np.logspace(np.log10(self._nu_sync_min), np.log10(self._nu_sync_max), 200)
F_sync = self.sync(nu, g_steps=g_steps)
self.FSyncInterp = interp1d(log(nu), log(F_sync))
def grey_body_old(self, nu):
result = black_body(nu / eV2Hz, self._params_n_seed['dust_T'])
result *= self._params_n_seed['dust_norm']
result *= 4.0 / 3.0 * pi * power(tan(self._params_n_seed['dust_extension'] * arcmin2rad)
* self._d * kpc2cm, 3.)
result *= (nu * nu / eV2Hz / eV2Hz) * eV2erg
result /= 4.0 * pi * (self._params['d'] * kpc2cm * self._d * kpc2cm)
return result
def grey_body(self, nu):
result = black_body(nu / eV2Hz, self._params_n_seed['dust_T'])
result *= self._params_n_seed['dust_norm']
result *= c.h.to('eV s').value
result *= nu * c.h.to('erg s').value
result *= c.c.cgs.value / 4. / pi
result *= tan(self._params_n_seed['dust_extension'] * arcmin2rad)
return result
def sync_phot_dens(self, eps, gamma):
S = np.full(eps.shape[0], 1e-40)
if self._params_n_seed['ic_sync']:
if self.FSyncInterp is None:
self.interp_sync_init()
m = (log(eps * eV2Hz) > log(self._nu_sync_min)) & \
(log(eps * eV2Hz) < log(self._nu_sync_max))
S[m] = exp(self.FSyncInterp(log(eps * eV2Hz)[m]))
S *= eps * eV2Hz
S /= (eps * eV2erg)
S *= (4.0 * pi * (self._d * kpc2cm)**2.)
rho = zeros(gamma.shape)
m = gamma * m_e_eV / 1e9 < 34.
rho[m] = tan(1.35 * arcmin2rad) * self._d * kpc2cm
extension = 0.15 + 1.2*power(gamma[~m] * m_e_eV / 34. / 1e9, -0.17)
rho[~m] = tan(extension * arcmin2rad) * self._d * kpc2cm
sigma = zeros(eps.shape)
m = eps < 0.02
sigma[m] = tan(1.35 * arcmin2rad) * self._d * kpc2cm
extension = 0.16 + 1.19 * power(eps[~m]/0.02, -0.09)
sigma[~m] = tan(extension * arcmin2rad) * self._d * kpc2cm
if self._params_n_seed['ic_dust']:
S_dust = self.grey_body(eps * eV2Hz)
S_dust *= eps * eV2Hz
S_dust /= (eps * eV2erg)
S_dust *= (4.0 * pi * (self._d * kpc2cm)**2.)
sigma_dust = tan(self._params_n_seed['dust_extension'] * arcmin2rad) * self._d * kpc2cm
if len(sigma.shape) == 1 and not sigma.shape[0] == rho.shape[0]:
ss, rr = meshgrid(sigma, rho)
S, _ = meshgrid(S, rho)
ee, _ = meshgrid(eps, gamma)
S /= (4.0 * pi * c.c.cgs.value * (ss * ss + rr * rr))
if self._params_n_seed['ic_dust']:
sd, _ = meshgrid(sigma_dust, rho)
S_dust, _ = meshgrid(S_dust, rho)
S_dust /= (4.0 * pi * c.c.cgs.value * (sd * sd + rr * rr))
S += S_dust
S /= ee
else:
S /= (4.0 * pi * c.c.cgs.value * (sigma * sigma + rho * rho))
if self._params_n_seed['ic_dust']:
S_dust /= (4.0 * pi * c.c.cgs.value * (sigma_dust * sigma_dust + rho * rho))
S += S_dust
S /= eps
return S
def ic(self, nu, g_steps=200, e_steps=90):
log_g = linspace(log(self._params_n_el['gmin']), log(self._params_n_el['gmax']), g_steps)
gamma = exp(log_g)
result = zeros(nu.shape[0])
nn, gg = meshgrid(nu, log_g, indexing='ij')
nnn, ggg, eee = meshgrid(nu, log_g, linspace(0., 1., e_steps), indexing='ij')
x1 = log(nnn / eV2Hz / 4. / ggg ** 2.)
x1[x1 < 1e-18] = 1e-18
x2 = log(nnn / eV2Hz)
log_eee = zeros(nnn.shape)
m = zeros(nnn.shape, dtype=np.bool)
for i, n in enumerate(nu):
for j, lg in enumerate(log_g):
x1 = max(log(n / eV2Hz / 4. / gamma[j] ** 2.), log(1e-18))
x2 = log(n / eV2Hz)
log_eee[i, j] = linspace(x1, x2, e_steps)
if x2 > x1:
m[i, j] = True
phot_dens = np.zeros(eee.shape)
if self._params_n_seed['ic_sync'] or self._params_n_seed['ic_dust']:
phot_dens[m] = self.sync_phot_dens(exp(log_eee[m]), exp(ggg[m]))
if self._params_n_seed['ic_cmb']:
phot_dens[m] += black_body(exp(log_eee[m]), cosmo.Tcmb0.value)
f = ic_kernel(nnn, exp(ggg), exp(log_eee))
kernel_in = phot_dens * f
# now in photons / cm^3 / eV
kernel_out = simps(kernel_in, log_eee, axis=2)
kernel_out *= self._n_el(exp(gg), **self._params_n_el) / exp(gg) ** 2.
# integrate over electron gamma factor
result = simps(kernel_out * exp(gg), gg, axis=1)
# result of integration is in units of photons/cm**3/eV
# multiplying with Thomson*c*energy gives and convert to
# units of erg/sec/eV
result *= 3. / 4. * (c.sigma_T.cgs * c.c.cgs).value * nu / eV2Hz * eV2erg
# convert to erg / sec / Hz
# this is the spectral luminosity L_nu
result /= eV2Hz
# divide by the distance squared to get the flux
result /= 4. * pi * (self._d * kpc2cm)**2.
return result
| true | true |
1c33ddc36f8de434473a62f0e05259e807d0838e | 743 | py | Python | bodyhands/utils/extend_utils_boxes.py | cvlab-stonybrook/BodyHands | dcfe470f6fd31a048d4d17d4ae9a2a524538b380 | [
"MIT"
] | 1 | 2022-03-06T08:18:33.000Z | 2022-03-06T08:18:33.000Z | bodyhands/utils/extend_utils_boxes.py | cvlab-stonybrook/BodyHands | dcfe470f6fd31a048d4d17d4ae9a2a524538b380 | [
"MIT"
] | null | null | null | bodyhands/utils/extend_utils_boxes.py | cvlab-stonybrook/BodyHands | dcfe470f6fd31a048d4d17d4ae9a2a524538b380 | [
"MIT"
] | null | null | null | import torch
from detectron2.structures import Boxes
def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
boxes1, boxes2 = boxes1.tensor, boxes2.tensor
width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(
boxes1[:, None, :2], boxes2[:, :2]
) # [N,M,2]
width_height.clamp_(min=0) # [N,M,2]
intersection = width_height.prod(dim=2) # [N,M]
return intersection
def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
area2 = boxes2.area() # [M]
inter = pairwise_intersection(boxes1, boxes2)
# handle empty boxes
ioa = torch.where(
inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device)
)
return ioa | 30.958333 | 88 | 0.648721 | import torch
from detectron2.structures import Boxes
def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
boxes1, boxes2 = boxes1.tensor, boxes2.tensor
width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(
boxes1[:, None, :2], boxes2[:, :2]
)
width_height.clamp_(min=0)
intersection = width_height.prod(dim=2)
return intersection
def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
area2 = boxes2.area()
inter = pairwise_intersection(boxes1, boxes2)
ioa = torch.where(
inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device)
)
return ioa | true | true |
1c33ddd6685c5cb98b0629eb5a2a360c0975d34b | 12,044 | py | Python | pyscf/lo/orth.py | maxscheurer/pyscf | 162c37942289c0aec70e70ba1ea98ade3ec34da5 | [
"Apache-2.0"
] | null | null | null | pyscf/lo/orth.py | maxscheurer/pyscf | 162c37942289c0aec70e70ba1ea98ade3ec34da5 | [
"Apache-2.0"
] | null | null | null | pyscf/lo/orth.py | maxscheurer/pyscf | 162c37942289c0aec70e70ba1ea98ade3ec34da5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
from functools import reduce
import numpy
import scipy.linalg
from pyscf.lib import param
from pyscf.lib import logger
from pyscf import gto
from pyscf import __config__
REF_BASIS = getattr(__config__, 'lo_orth_pre_orth_ao_method', 'ANO')
ORTH_METHOD = getattr(__config__, 'lo_orth_orth_ao_method', 'meta_lowdin')
PROJECT_ECP_BASIS = getattr(__config__, 'lo_orth_project_ecp_basis', True)
def lowdin(s):
''' new basis is |mu> c^{lowdin}_{mu i} '''
e, v = scipy.linalg.eigh(s)
idx = e > 1e-15
return numpy.dot(v[:,idx]/numpy.sqrt(e[idx]), v[:,idx].conj().T)
def schmidt(s):
c = numpy.linalg.cholesky(s)
return scipy.linalg.solve_triangular(c, numpy.eye(c.shape[1]), lower=True,
overwrite_b=False).conj().T
def vec_lowdin(c, s=1):
''' lowdin orth for the metric c.T*s*c and get x, then c*x'''
#u, w, vh = numpy.linalg.svd(c)
#return numpy.dot(u, vh)
# svd is slower than eigh
return numpy.dot(c, lowdin(reduce(numpy.dot, (c.conj().T,s,c))))
def vec_schmidt(c, s=1):
''' schmidt orth for the metric c.T*s*c and get x, then c*x'''
if isinstance(s, numpy.ndarray):
return numpy.dot(c, schmidt(reduce(numpy.dot, (c.conj().T,s,c))))
else:
return numpy.linalg.qr(c)[0]
def weight_orth(s, weight):
''' new basis is |mu> c_{mu i}, c = w[(wsw)^{-1/2}]'''
s1 = weight[:,None] * s * weight
c = lowdin(s1)
return weight[:,None] * c
def pre_orth_ao(mol, method=REF_BASIS):
'''Restore AO characters. Possible methods include the ANO/MINAO
projection or fraction-averaged atomic RHF calculation'''
if isinstance(method, str) and method.upper() in ('ANO', 'MINAO'):
# Use ANO/MINAO basis to define the strongly occupied set
return project_to_atomic_orbitals(mol, method)
else:
return pre_orth_ao_atm_scf(mol)
restore_ao_character = pre_orth_ao
def project_to_atomic_orbitals(mol, basname):
'''projected AO = |bas><bas|ANO>
'''
from pyscf.scf.addons import project_mo_nr2nr
from pyscf.scf import atom_hf
from pyscf.gto.ecp import core_configuration
def search_atm_l(atm, l):
bas_ang = atm._bas[:,gto.ANG_OF]
ao_loc = atm.ao_loc_nr()
idx = []
for ib in numpy.where(bas_ang == l)[0]:
idx.extend(range(ao_loc[ib], ao_loc[ib+1]))
return idx
# Overlap of ANO and ECP basis
def ecp_ano_det_ovlp(atm_ecp, atm_ano, ecpcore):
ecp_ao_loc = atm_ecp.ao_loc_nr()
ano_ao_loc = atm_ano.ao_loc_nr()
ecp_ao_dim = ecp_ao_loc[1:] - ecp_ao_loc[:-1]
ano_ao_dim = ano_ao_loc[1:] - ano_ao_loc[:-1]
ecp_bas_l = [[atm_ecp.bas_angular(i)]*d for i,d in enumerate(ecp_ao_dim)]
ano_bas_l = [[atm_ano.bas_angular(i)]*d for i,d in enumerate(ano_ao_dim)]
ecp_bas_l = numpy.hstack(ecp_bas_l)
ano_bas_l = numpy.hstack(ano_bas_l)
nelec_core = 0
ecp_occ_tmp = []
ecp_idx = []
ano_idx = []
for l in range(4):
nocc, frac = atom_hf.frac_occ(stdsymb, l)
l_occ = [2] * ((nocc-ecpcore[l])*(2*l+1))
if frac > 1e-15:
l_occ.extend([frac] * (2*l+1))
nocc += 1
if nocc == 0:
break
nelec_core += 2 * ecpcore[l] * (2*l+1)
i0 = ecpcore[l] * (2*l+1)
i1 = nocc * (2*l+1)
ecp_idx.append(numpy.where(ecp_bas_l==l)[0][:i1-i0])
ano_idx.append(numpy.where(ano_bas_l==l)[0][i0:i1])
ecp_occ_tmp.append(l_occ[:i1-i0])
ecp_idx = numpy.hstack(ecp_idx)
ano_idx = numpy.hstack(ano_idx)
ecp_occ = numpy.zeros(atm_ecp.nao_nr())
ecp_occ[ecp_idx] = numpy.hstack(ecp_occ_tmp)
nelec_valence_left = int(gto.charge(stdsymb) - nelec_core
- sum(ecp_occ[ecp_idx]))
if nelec_valence_left > 0:
logger.warn(mol, 'Characters of %d valence electrons are not identified.\n'
'It can affect the "meta-lowdin" localization method '
'and the population analysis of SCF method.\n'
'Adjustment to the core/valence partition may be needed '
'(see function lo.nao.set_atom_conf)\nto get reasonable '
'local orbitals or Mulliken population.\n',
nelec_valence_left)
# Return 0 to force the projection to ANO basis
return 0
else:
s12 = gto.intor_cross('int1e_ovlp', atm_ecp, atm_ano)[ecp_idx][:,ano_idx]
return numpy.linalg.det(s12)
nelec_ecp_dic = {}
for ia in range(mol.natm):
symb = mol.atom_symbol(ia)
if symb not in nelec_ecp_dic:
nelec_ecp_dic[symb] = mol.atom_nelec_core(ia)
aos = {}
atm = gto.Mole()
atmp = gto.Mole()
for symb in mol._basis.keys():
stdsymb = gto.mole._std_symbol(symb)
atm._atm, atm._bas, atm._env = \
atm.make_env([[stdsymb,(0,0,0)]], {stdsymb:mol._basis[symb]}, [])
atm.cart = mol.cart
atm._built = True
s0 = atm.intor_symmetric('int1e_ovlp')
if gto.is_ghost_atom(symb):
aos[symb] = numpy.diag(1./numpy.sqrt(s0.diagonal()))
continue
basis_add = gto.basis.load(basname, stdsymb)
atmp._atm, atmp._bas, atmp._env = \
atmp.make_env([[stdsymb,(0,0,0)]], {stdsymb:basis_add}, [])
atmp.cart = mol.cart
atmp._built = True
if symb in nelec_ecp_dic and nelec_ecp_dic[symb] > 0:
# If ECP basis has good atomic character, ECP basis can be used in the
# localization/population analysis directly. Otherwise project ECP
# basis to ANO basis.
if not PROJECT_ECP_BASIS:
continue
ecpcore = core_configuration(nelec_ecp_dic[symb])
# Comparing to ANO valence basis, to check whether the ECP basis set has
# reasonable AO-character contraction. The ANO valence AO should have
# significant overlap to ECP basis if the ECP basis has AO-character.
if abs(ecp_ano_det_ovlp(atm, atmp, ecpcore)) > .1:
aos[symb] = numpy.diag(1./numpy.sqrt(s0.diagonal()))
continue
else:
ecpcore = [0] * 4
# MINAO for heavier elements needs to be used with pseudo potential
if (basname.upper() == 'MINAO' and
gto.charge(stdsymb) > 36 and symb not in nelec_ecp_dic):
raise RuntimeError('Basis MINAO has to be used with ecp for heavy elements')
ano = project_mo_nr2nr(atmp, numpy.eye(atmp.nao_nr()), atm)
rm_ano = numpy.eye(ano.shape[0]) - reduce(numpy.dot, (ano, ano.T, s0))
c = rm_ano.copy()
for l in range(param.L_MAX):
idx = numpy.asarray(search_atm_l(atm, l))
nbf_atm_l = len(idx)
if nbf_atm_l == 0:
break
idxp = numpy.asarray(search_atm_l(atmp, l))
if l < 4:
idxp = idxp[ecpcore[l]:]
nbf_ano_l = len(idxp)
if mol.cart:
degen = (l + 1) * (l + 2) // 2
else:
degen = l * 2 + 1
if nbf_atm_l > nbf_ano_l > 0:
# For angular l, first place the projected ANO, then the rest AOs.
sdiag = reduce(numpy.dot, (rm_ano[:,idx].T, s0, rm_ano[:,idx])).diagonal()
nleft = (nbf_atm_l - nbf_ano_l) // degen
shell_average = numpy.einsum('ij->i', sdiag.reshape(-1,degen))
shell_rest = numpy.argsort(-shell_average)[:nleft]
idx_rest = []
for k in shell_rest:
idx_rest.extend(idx[k*degen:(k+1)*degen])
c[:,idx[:nbf_ano_l]] = ano[:,idxp]
c[:,idx[nbf_ano_l:]] = rm_ano[:,idx_rest]
elif nbf_ano_l >= nbf_atm_l > 0: # More ANOs than the mol basis functions
c[:,idx] = ano[:,idxp[:nbf_atm_l]]
sdiag = numpy.einsum('pi,pq,qi->i', c, s0, c)
c *= 1./numpy.sqrt(sdiag)
aos[symb] = c
nao = mol.nao_nr()
c = numpy.zeros((nao,nao))
p1 = 0
for ia in range(mol.natm):
symb = mol.atom_symbol(ia)
if symb in mol._basis:
ano = aos[symb]
else:
ano = aos[mol.atom_pure_symbol(ia)]
p0, p1 = p1, p1 + ano.shape[1]
c[p0:p1,p0:p1] = ano
return c
pre_orth_project_ano = project_to_atomic_orbitals
def pre_orth_ao_atm_scf(mol):
assert(not mol.cart)
from pyscf.scf import atom_hf
atm_scf = atom_hf.get_atm_nrhf(mol)
aoslice = mol.aoslice_by_atom()
coeff = []
for ia in range(mol.natm):
symb = mol.atom_symbol(ia)
if symb not in atm_scf:
symb = mol.atom_pure_symbol(ia)
if symb in atm_scf:
e_hf, e, c, occ = atm_scf[symb]
else: # symb's basis is not specified in the input
nao_atm = aoslice[ia,3] - aoslice[ia,2]
c = numpy.zeros((nao_atm, nao_atm))
coeff.append(c)
return scipy.linalg.block_diag(*coeff)
def orth_ao(mf_or_mol, method=ORTH_METHOD, pre_orth_ao=None, scf_method=None,
s=None):
'''Orthogonalize AOs
Kwargs:
method : str
One of
| lowdin : Symmetric orthogonalization
| meta-lowdin : Lowdin orth within core, valence, virtual space separately (JCTC, 10, 3784)
| NAO
'''
from pyscf.lo import nao
mf = scf_method
if isinstance(mf_or_mol, gto.Mole):
mol = mf_or_mol
else:
mol = mf_or_mol.mol
if mf is None:
mf = mf_or_mol
if s is None:
if getattr(mol, 'pbc_intor', None): # whether mol object is a cell
s = mol.pbc_intor('int1e_ovlp', hermi=1)
else:
s = mol.intor_symmetric('int1e_ovlp')
if pre_orth_ao is None:
pre_orth_ao = project_to_atomic_orbitals(mol, REF_BASIS)
if method.lower() == 'lowdin':
s1 = reduce(numpy.dot, (pre_orth_ao.conj().T, s, pre_orth_ao))
c_orth = numpy.dot(pre_orth_ao, lowdin(s1))
elif method.lower() == 'nao':
assert(mf is not None)
c_orth = nao.nao(mol, mf, s)
else:
# meta_lowdin: partition AOs into core, valence and Rydberg sets,
# orthogonalizing within each set
weight = numpy.ones(pre_orth_ao.shape[0])
c_orth = nao._nao_sub(mol, weight, pre_orth_ao, s)
# adjust phase
for i in range(c_orth.shape[1]):
if c_orth[i,i] < 0:
c_orth[:,i] *= -1
return c_orth
del(ORTH_METHOD)
if __name__ == '__main__':
from pyscf import scf
from pyscf.lo import nao
mol = gto.Mole()
mol.verbose = 1
mol.output = 'out_orth'
mol.atom.extend([
['O' , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ])
mol.basis = {'H': '6-31g',
'O': '6-31g',}
mol.build()
mf = scf.RHF(mol)
mf.scf()
c0 = nao.prenao(mol, mf.make_rdm1())
c = orth_ao(mol, 'meta_lowdin', c0)
s = mol.intor_symmetric('int1e_ovlp_sph')
p = reduce(numpy.dot, (s, mf.make_rdm1(), s))
print(reduce(numpy.dot, (c.T, p, c)).diagonal())
| 36.607903 | 103 | 0.583112 |
from functools import reduce
import numpy
import scipy.linalg
from pyscf.lib import param
from pyscf.lib import logger
from pyscf import gto
from pyscf import __config__
REF_BASIS = getattr(__config__, 'lo_orth_pre_orth_ao_method', 'ANO')
ORTH_METHOD = getattr(__config__, 'lo_orth_orth_ao_method', 'meta_lowdin')
PROJECT_ECP_BASIS = getattr(__config__, 'lo_orth_project_ecp_basis', True)
def lowdin(s):
e, v = scipy.linalg.eigh(s)
idx = e > 1e-15
return numpy.dot(v[:,idx]/numpy.sqrt(e[idx]), v[:,idx].conj().T)
def schmidt(s):
c = numpy.linalg.cholesky(s)
return scipy.linalg.solve_triangular(c, numpy.eye(c.shape[1]), lower=True,
overwrite_b=False).conj().T
def vec_lowdin(c, s=1):
return numpy.dot(c, lowdin(reduce(numpy.dot, (c.conj().T,s,c))))
def vec_schmidt(c, s=1):
if isinstance(s, numpy.ndarray):
return numpy.dot(c, schmidt(reduce(numpy.dot, (c.conj().T,s,c))))
else:
return numpy.linalg.qr(c)[0]
def weight_orth(s, weight):
s1 = weight[:,None] * s * weight
c = lowdin(s1)
return weight[:,None] * c
def pre_orth_ao(mol, method=REF_BASIS):
if isinstance(method, str) and method.upper() in ('ANO', 'MINAO'):
return project_to_atomic_orbitals(mol, method)
else:
return pre_orth_ao_atm_scf(mol)
restore_ao_character = pre_orth_ao
def project_to_atomic_orbitals(mol, basname):
from pyscf.scf.addons import project_mo_nr2nr
from pyscf.scf import atom_hf
from pyscf.gto.ecp import core_configuration
def search_atm_l(atm, l):
bas_ang = atm._bas[:,gto.ANG_OF]
ao_loc = atm.ao_loc_nr()
idx = []
for ib in numpy.where(bas_ang == l)[0]:
idx.extend(range(ao_loc[ib], ao_loc[ib+1]))
return idx
def ecp_ano_det_ovlp(atm_ecp, atm_ano, ecpcore):
ecp_ao_loc = atm_ecp.ao_loc_nr()
ano_ao_loc = atm_ano.ao_loc_nr()
ecp_ao_dim = ecp_ao_loc[1:] - ecp_ao_loc[:-1]
ano_ao_dim = ano_ao_loc[1:] - ano_ao_loc[:-1]
ecp_bas_l = [[atm_ecp.bas_angular(i)]*d for i,d in enumerate(ecp_ao_dim)]
ano_bas_l = [[atm_ano.bas_angular(i)]*d for i,d in enumerate(ano_ao_dim)]
ecp_bas_l = numpy.hstack(ecp_bas_l)
ano_bas_l = numpy.hstack(ano_bas_l)
nelec_core = 0
ecp_occ_tmp = []
ecp_idx = []
ano_idx = []
for l in range(4):
nocc, frac = atom_hf.frac_occ(stdsymb, l)
l_occ = [2] * ((nocc-ecpcore[l])*(2*l+1))
if frac > 1e-15:
l_occ.extend([frac] * (2*l+1))
nocc += 1
if nocc == 0:
break
nelec_core += 2 * ecpcore[l] * (2*l+1)
i0 = ecpcore[l] * (2*l+1)
i1 = nocc * (2*l+1)
ecp_idx.append(numpy.where(ecp_bas_l==l)[0][:i1-i0])
ano_idx.append(numpy.where(ano_bas_l==l)[0][i0:i1])
ecp_occ_tmp.append(l_occ[:i1-i0])
ecp_idx = numpy.hstack(ecp_idx)
ano_idx = numpy.hstack(ano_idx)
ecp_occ = numpy.zeros(atm_ecp.nao_nr())
ecp_occ[ecp_idx] = numpy.hstack(ecp_occ_tmp)
nelec_valence_left = int(gto.charge(stdsymb) - nelec_core
- sum(ecp_occ[ecp_idx]))
if nelec_valence_left > 0:
logger.warn(mol, 'Characters of %d valence electrons are not identified.\n'
'It can affect the "meta-lowdin" localization method '
'and the population analysis of SCF method.\n'
'Adjustment to the core/valence partition may be needed '
'(see function lo.nao.set_atom_conf)\nto get reasonable '
'local orbitals or Mulliken population.\n',
nelec_valence_left)
return 0
else:
s12 = gto.intor_cross('int1e_ovlp', atm_ecp, atm_ano)[ecp_idx][:,ano_idx]
return numpy.linalg.det(s12)
nelec_ecp_dic = {}
for ia in range(mol.natm):
symb = mol.atom_symbol(ia)
if symb not in nelec_ecp_dic:
nelec_ecp_dic[symb] = mol.atom_nelec_core(ia)
aos = {}
atm = gto.Mole()
atmp = gto.Mole()
for symb in mol._basis.keys():
stdsymb = gto.mole._std_symbol(symb)
atm._atm, atm._bas, atm._env = \
atm.make_env([[stdsymb,(0,0,0)]], {stdsymb:mol._basis[symb]}, [])
atm.cart = mol.cart
atm._built = True
s0 = atm.intor_symmetric('int1e_ovlp')
if gto.is_ghost_atom(symb):
aos[symb] = numpy.diag(1./numpy.sqrt(s0.diagonal()))
continue
basis_add = gto.basis.load(basname, stdsymb)
atmp._atm, atmp._bas, atmp._env = \
atmp.make_env([[stdsymb,(0,0,0)]], {stdsymb:basis_add}, [])
atmp.cart = mol.cart
atmp._built = True
if symb in nelec_ecp_dic and nelec_ecp_dic[symb] > 0:
if not PROJECT_ECP_BASIS:
continue
ecpcore = core_configuration(nelec_ecp_dic[symb])
if abs(ecp_ano_det_ovlp(atm, atmp, ecpcore)) > .1:
aos[symb] = numpy.diag(1./numpy.sqrt(s0.diagonal()))
continue
else:
ecpcore = [0] * 4
if (basname.upper() == 'MINAO' and
gto.charge(stdsymb) > 36 and symb not in nelec_ecp_dic):
raise RuntimeError('Basis MINAO has to be used with ecp for heavy elements')
ano = project_mo_nr2nr(atmp, numpy.eye(atmp.nao_nr()), atm)
rm_ano = numpy.eye(ano.shape[0]) - reduce(numpy.dot, (ano, ano.T, s0))
c = rm_ano.copy()
for l in range(param.L_MAX):
idx = numpy.asarray(search_atm_l(atm, l))
nbf_atm_l = len(idx)
if nbf_atm_l == 0:
break
idxp = numpy.asarray(search_atm_l(atmp, l))
if l < 4:
idxp = idxp[ecpcore[l]:]
nbf_ano_l = len(idxp)
if mol.cart:
degen = (l + 1) * (l + 2) // 2
else:
degen = l * 2 + 1
if nbf_atm_l > nbf_ano_l > 0:
sdiag = reduce(numpy.dot, (rm_ano[:,idx].T, s0, rm_ano[:,idx])).diagonal()
nleft = (nbf_atm_l - nbf_ano_l) // degen
shell_average = numpy.einsum('ij->i', sdiag.reshape(-1,degen))
shell_rest = numpy.argsort(-shell_average)[:nleft]
idx_rest = []
for k in shell_rest:
idx_rest.extend(idx[k*degen:(k+1)*degen])
c[:,idx[:nbf_ano_l]] = ano[:,idxp]
c[:,idx[nbf_ano_l:]] = rm_ano[:,idx_rest]
elif nbf_ano_l >= nbf_atm_l > 0:
c[:,idx] = ano[:,idxp[:nbf_atm_l]]
sdiag = numpy.einsum('pi,pq,qi->i', c, s0, c)
c *= 1./numpy.sqrt(sdiag)
aos[symb] = c
nao = mol.nao_nr()
c = numpy.zeros((nao,nao))
p1 = 0
for ia in range(mol.natm):
symb = mol.atom_symbol(ia)
if symb in mol._basis:
ano = aos[symb]
else:
ano = aos[mol.atom_pure_symbol(ia)]
p0, p1 = p1, p1 + ano.shape[1]
c[p0:p1,p0:p1] = ano
return c
pre_orth_project_ano = project_to_atomic_orbitals
def pre_orth_ao_atm_scf(mol):
assert(not mol.cart)
from pyscf.scf import atom_hf
atm_scf = atom_hf.get_atm_nrhf(mol)
aoslice = mol.aoslice_by_atom()
coeff = []
for ia in range(mol.natm):
symb = mol.atom_symbol(ia)
if symb not in atm_scf:
symb = mol.atom_pure_symbol(ia)
if symb in atm_scf:
e_hf, e, c, occ = atm_scf[symb]
else:
nao_atm = aoslice[ia,3] - aoslice[ia,2]
c = numpy.zeros((nao_atm, nao_atm))
coeff.append(c)
return scipy.linalg.block_diag(*coeff)
def orth_ao(mf_or_mol, method=ORTH_METHOD, pre_orth_ao=None, scf_method=None,
s=None):
from pyscf.lo import nao
mf = scf_method
if isinstance(mf_or_mol, gto.Mole):
mol = mf_or_mol
else:
mol = mf_or_mol.mol
if mf is None:
mf = mf_or_mol
if s is None:
if getattr(mol, 'pbc_intor', None): # whether mol object is a cell
s = mol.pbc_intor('int1e_ovlp', hermi=1)
else:
s = mol.intor_symmetric('int1e_ovlp')
if pre_orth_ao is None:
pre_orth_ao = project_to_atomic_orbitals(mol, REF_BASIS)
if method.lower() == 'lowdin':
s1 = reduce(numpy.dot, (pre_orth_ao.conj().T, s, pre_orth_ao))
c_orth = numpy.dot(pre_orth_ao, lowdin(s1))
elif method.lower() == 'nao':
assert(mf is not None)
c_orth = nao.nao(mol, mf, s)
else:
# meta_lowdin: partition AOs into core, valence and Rydberg sets,
# orthogonalizing within each set
weight = numpy.ones(pre_orth_ao.shape[0])
c_orth = nao._nao_sub(mol, weight, pre_orth_ao, s)
# adjust phase
for i in range(c_orth.shape[1]):
if c_orth[i,i] < 0:
c_orth[:,i] *= -1
return c_orth
del(ORTH_METHOD)
if __name__ == '__main__':
from pyscf import scf
from pyscf.lo import nao
mol = gto.Mole()
mol.verbose = 1
mol.output = 'out_orth'
mol.atom.extend([
['O' , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ])
mol.basis = {'H': '6-31g',
'O': '6-31g',}
mol.build()
mf = scf.RHF(mol)
mf.scf()
c0 = nao.prenao(mol, mf.make_rdm1())
c = orth_ao(mol, 'meta_lowdin', c0)
s = mol.intor_symmetric('int1e_ovlp_sph')
p = reduce(numpy.dot, (s, mf.make_rdm1(), s))
print(reduce(numpy.dot, (c.T, p, c)).diagonal())
| true | true |
1c33de508aa72facefdf67a50a6c86af3d232f08 | 17,427 | py | Python | ttlock2mqtt/src/ttlock_adapter.py | tonyldo/tonyldo-hassio-addons | 3005df8cd58d178bc0452d944d3498820eeacee9 | [
"Apache-2.0"
] | 6 | 2020-07-30T08:50:20.000Z | 2022-03-01T02:56:53.000Z | ttlock2mqtt/src/ttlock_adapter.py | tonyldo/tonyldo-hassio-addons | 3005df8cd58d178bc0452d944d3498820eeacee9 | [
"Apache-2.0"
] | 10 | 2020-07-28T17:28:52.000Z | 2022-01-10T20:28:16.000Z | ttlock2mqtt/src/ttlock_adapter.py | tonyldo/tonyldo-hassio-addons | 3005df8cd58d178bc0452d944d3498820eeacee9 | [
"Apache-2.0"
] | 9 | 2020-07-28T17:19:42.000Z | 2021-12-18T05:18:56.000Z | import paho.mqtt.client as mqtt
import time
import threading
import concurrent.futures
import getopt
import sys
import logging
from ttlockwrapper import TTLock, TTlockAPIError, constants
class TTLock2MQTTClient(mqtt.Client):
def __init__(self, ttlock, broker, port, broker_user, broker_pass, keepalive):
super().__init__(self.mqttClientId, False)
self.ttlock = ttlock
self.connected_flag = False
self.on_connect = TTLock2MQTTClient.cb_on_connect
self.on_disconnect = TTLock2MQTTClient.cb_on_disconnect
self.on_message = TTLock2MQTTClient.cb_on_message
self.broker_host = broker
self.broker_port = port
self.keepalive_mqtt = keepalive
if broker_user and broker_pass:
self.username_pw_set(broker_user, password=broker_pass)
logging.info("Client {} TTlock Mqtt Created".format(
self.mqttClientId))
self.COMMAND_TOPIC = None
def sendMensage(self, topic, msg, retain=False):
logging.debug('Client {} sending mensage "{}" to topic "{}" and retained {}'.format(
self.mqttClientId, msg, topic, retain))
self.publish(topic, msg, 0, retain)
def mqttConnection(self):
logging.debug("Client {} try connection at {}:{}".format(
self.mqttClientId, self.broker_host, self.broker_port))
self.connect(self.broker_host, self.broker_port, self.keepalive_mqtt)
@classmethod
def cb_on_message(cls, client, userdata, message):
try:
time.sleep(1)
logging.debug("Client {} message received: {}".format(client.mqttClientId, str(message.payload.decode("utf-8"))))
client.handleMessage(message)
except Exception:
logging.exception('Client {} error on received mqtt message'.format(client.getLockId()))
@classmethod
def cb_on_disconnect(cls, client, userdata, rc):
client.connected_flag = False # set flag
logging.info("Client {} disconnected!".format(client.mqttClientId))
@classmethod
def cb_on_connect(cls, client, userdata, flags, rc):
try:
if rc == 0:
client.connected_flag = True # set flag
logging.info("Client {} connected OK!".format(client.mqttClientId))
if client.COMMAND_TOPIC:
logging.info("Client {} subscribe on command topic: {}".format(
client.mqttClientId, client.COMMAND_TOPIC))
client.subscribe(client.COMMAND_TOPIC)
client.sendDiscoveryMsgs()
time.sleep(20)
client.forcePublishInfos()
else:
logging.error("Client {} Bad connection Returned code= {}".format(
client.mqttClientId, rc))
except Exception:
logging.exception('Client {} error on connect'.format(client.mqttClientId))
class TTLock2MQTTClientGateway(TTLock2MQTTClient):
def __init__(self, gateway, ttlock, broker, port, broker_user, broker_pass, connection_info_delay, keepalive):
self.gateway = gateway
self.mqttClientId = "GATEWAY-{}-{}".format(str(self.getGatewayId()), str(int(time.time())))
super().__init__(ttlock, broker, port, broker_user, broker_pass, keepalive)
self.DISCOVERY_GATEWAY_CONNECTION_TOPIC = 'homeassistant/binary_sensor/ttlock/{}_gateway/config'.format(
self.getGatewayId())
self.CONNECTION_BINARY_SENSOR_TOPIC = 'ttlocktomqtt/{}/connection'.format(
self.getGatewayId())
self.CONNECTION_BINARY_SENSOR_PAYLOAD = '{{"device_class": "connectivity", "name": "{} connection", "state_topic": "{}", "value_template": "{{{{ value_json.connection }}}}", "uniq_id":"{}_CONNECTION","device":{{"identifiers":["{}"], "name": "TTLOCK_GATEWAY_{}", "connections":[["mac","{}"]]}} }}'
self.CONNECTION_PAYLOAD = '{{"connection": "{}"}}'
self.lastConnectionPublishInfo = time.time()
self.connection_info_delay = connection_info_delay
def getGatewayId(self):
return self.gateway.get(constants.GATEWAY_ID_FIELD)
def getMac(self):
return self.gateway.get(constants.GATEWAY_MAC_FIELD)
def getName(self):
return self.gateway.get('gatewayName')
def updateGatewayJson(self):
try:
for gateway in self.ttlock.get_gateway_generator():
if gateway.get(constants.GATEWAY_ID_FIELD)==self.getGatewayId():
self.gateway = gateway
return
except Exception as error:
logging.error('Client {} error while update Gateway Json: {}'.format(
self.mqttClientId, str(error)))
def publishInfos(self):
if time.time()-self.lastConnectionPublishInfo > self.connection_info_delay:
self.updateGatewayJson()
self.forcePublishConnectionInfo()
def forcePublishConnectionInfo(self):
try:
logging.info(
'Client {} publish connection info.'.format(self.mqttClientId))
self.sendGatewayConnectionLevel()
except Exception as error:
logging.error('Client {} error: {}'.format(
self.mqttClientId, str(error)))
finally:
self.lastConnectionPublishInfo = time.time()
def forcePublishInfos(self):
self.forcePublishConnectionInfo()
def sendGatewayConnectionLevel(self):
connectionState = 'ON' if self.gateway.get('isOnline') else 'OFF'
msg = self.CONNECTION_PAYLOAD.format(connectionState)
self.sendMensage(self.CONNECTION_BINARY_SENSOR_TOPIC, msg)
def sendDiscoveryMsgs(self):
logging.info(
'Client {} sending discoveries msgs.'.format(self.mqttClientId))
msg = self.CONNECTION_BINARY_SENSOR_PAYLOAD.format(self.getName(
), self.CONNECTION_BINARY_SENSOR_TOPIC, self.getGatewayId(), self.getGatewayId(), self.getGatewayId(), self.getMac())
self.sendMensage(self.DISCOVERY_GATEWAY_CONNECTION_TOPIC, msg, True)
class TTLock2MQTTClientLock(TTLock2MQTTClient):
def __init__(self, lock, gateway, ttlock, broker, port, broker_user, broker_pass, state_delay, battery_delay, keepalive):
self.lock = lock
self.gateway = gateway
self.mqttClientId = "LOCK-{}-{}".format(str(self.getLockId()), str(int(time.time())))
super().__init__(ttlock, broker, port, broker_user, broker_pass, keepalive)
self.DISCOVERY_LOCK_TOPIC = 'homeassistant/lock/ttlock/{}_lock/config'.format(
self.getLockId())
self.DISCOVERY_SENSOR_TOPIC = 'homeassistant/sensor/ttlock/{}_battery/config'.format(
self.getLockId())
self.BATTERY_LEVEL_SENSOR_TOPIC = 'ttlocktomqtt/{}/battery'.format(
self.getLockId())
self.COMMAND_TOPIC = 'ttlocktomqtt/{}/command'.format(self.getLockId())
self.STATE_SENSOR_TOPIC = 'ttlocktomqtt/{}/state'.format(
self.getLockId())
self.DISCOVERY_LOCK_PAYLOAD = '{{"name": "{} lock", "command_topic": "{}", "state_topic": "{}", "value_template": "{{{{ value_json.state }}}}", "uniq_id":"{}_lock","device":{{"identifiers":["{}"], "name": "TTLOCK_LOCK_{}", "connections":[["mac","{}"]]}} }}'
self.DISCOVERY_BATTERY_LEVEL_SENSOR_PAYLOAD = '{{"device_class": "battery", "name": "{} battery", "state_topic": "{}", "unit_of_measurement": "%", "value_template": "{{{{ value_json.battery }}}}", "uniq_id":"{}_battery","device":{{"identifiers":["{}"], "name": "TTLOCK_LOCK_{}", "connections":[["mac","{}"]]}} }}'
self.STATE_PAYLOAD = '{{"state": "{}"}}'
self.BATTERY_LEVEL_PAYLOAD = '{{"battery": {}}}'
self.lastStatePublishInfo = time.time()
self.lastBatteryPublishInfo = time.time()
self.state_delay = state_delay
self.battery_delay = battery_delay
def getName(self):
return self.lock.get(constants.LOCK_ALIAS_FIELD)
def getLockId(self):
return self.lock.get(constants.LOCK_ID_FIELD)
def getMac(self):
return self.lock.get(constants.LOCK_MAC_FIELD)
def getGatewayId(self):
return self.gateway.get(constants.GATEWAY_ID_FIELD)
def handleMessage(self, message):
result = False
command = str(message.payload.decode("utf-8"))
if command == 'LOCK':
result = self.ttlock.lock(self.getLockId())
elif command == 'UNLOCK':
result = self.ttlock.unlock(self.getLockId())
else:
logging.info('Invalid command.')
return
if not result:
logging.warning(
'Client {} has fail to send API command.'.format(self.mqttClientId))
# todo: send unavailble msg
return
time.sleep(3)
self.forcePublishStateInfo()
def publishInfos(self):
if time.time()-self.lastStatePublishInfo > self.state_delay:
self.forcePublishStateInfo()
if time.time()-self.lastBatteryPublishInfo > self.battery_delay:
self.forcePublishBatteryInfo()
def forcePublishStateInfo(self):
try:
logging.info(
'Client {} publish lock state.'.format(self.mqttClientId))
self.sendLockState()
except Exception as error:
logging.error('Client {} error: {}'.format(
self.mqttClientId, str(error)))
finally:
self.lastStatePublishInfo = time.time()
def forcePublishBatteryInfo(self):
try:
logging.info(
'Client {} publish battery info.'.format(self.mqttClientId))
self.sendLockBatteryLevel()
except Exception as error:
logging.error('Client {} error: {}'.format(
self.mqttClientId, str(error)))
finally:
self.lastBatteryPublishInfo = time.time()
def forcePublishInfos(self):
self.forcePublishStateInfo()
self.forcePublishBatteryInfo()
def sendLockBatteryLevel(self):
batteryLevel = self.ttlock.lock_electric_quantity(self.getLockId())
msg = self.BATTERY_LEVEL_PAYLOAD.format(batteryLevel)
self.sendMensage(self.BATTERY_LEVEL_SENSOR_TOPIC, msg)
def sendLockState(self):
# Open state of lock:0-locked,1-unlocked,2-unknown
state = self.ttlock.lock_state(self.getLockId())
if state == 2:
logging.warning(
'Client {} lock state TTlockAPI return "unknown".'.format(self.mqttClientId))
return
lock_is = 'UNLOCKED' if state else 'LOCKED'
msg = self.STATE_PAYLOAD.format(lock_is)
self.sendMensage(self.STATE_SENSOR_TOPIC, msg, True)
def sendDiscoveryMsgs(self):
logging.info(
'Client {} sending discoveries msgs.'.format(self.mqttClientId))
msg = self.DISCOVERY_BATTERY_LEVEL_SENSOR_PAYLOAD.format(self.getName(
), self.BATTERY_LEVEL_SENSOR_TOPIC, self.getLockId(), self.getLockId(), self.getLockId(), self.getMac())
self.sendMensage(self.DISCOVERY_SENSOR_TOPIC, msg, True)
msg = self.DISCOVERY_LOCK_PAYLOAD.format(self.getName(), self.COMMAND_TOPIC, self.STATE_SENSOR_TOPIC, self.getLockId(
), self.getLockId(), self.getLockId(), self.getMac())
self.sendMensage(self.DISCOVERY_LOCK_TOPIC, msg, True)
def client_loop(ttlock2MqttClient, loop_delay=2.0, run_forever=False):
try:
logging.info("Client {} TTlock Mqtt on client_loop".format(
ttlock2MqttClient.mqttClientId))
bad_connection = 0
ttlock2MqttClient.mqttConnection()
while run_flag: # loop
ttlock2MqttClient.loop(loop_delay)
if ttlock2MqttClient.connected_flag:
ttlock2MqttClient.publishInfos()
else:
if bad_connection > 5 and not run_forever:
logging.error("Client {} has 5 times bad connection".format(
ttlock2MqttClient.mqttClientId))
break
bad_connection += 1
time.sleep(10)
if ttlock2MqttClient.connected_flag:
ttlock2MqttClient.disconnect()
except Exception as e:
logging.exception("Client {} Loop Thread Error ".format(
ttlock2MqttClient.mqttClientId))
finally:
logging.debug("Client {} return future".format(
ttlock2MqttClient.mqttClientId))
return ttlock2MqttClient
def create_futures(id,client):
if not client:
logging.debug('TTlock Element {} Client is empty...'.format(id))
elif id in client_futures.keys() and not client_futures.get(id).done():
logging.debug('TTlock Element {} Client already created...'.format(id))
else:
client_futures[id] = executor.submit(client_loop, client)
time.sleep(DELAY_BETWEEN_NEW_THREADS_CREATION)
def createClients(broker, port, broker_user, broker_pass, ttlock_client, ttlock_token,state_delay,battery_delay):
ttlock = TTLock(ttlock_client, ttlock_token)
ttlock2MqttClient = None
for gateway in ttlock.get_gateway_generator():
ttlock2MqttClient = TTLock2MQTTClientGateway(gateway, ttlock, broker, port, broker_user, broker_pass, battery_delay, DELAY_BETWEEN_LOCK_PUBLISH_INFOS*2)
create_futures(gateway.get(constants.GATEWAY_ID_FIELD),ttlock2MqttClient)
for lock in ttlock.get_locks_per_gateway_generator(gateway.get(constants.GATEWAY_ID_FIELD)):
ttlock2MqttClient = TTLock2MQTTClientLock(
lock, gateway, ttlock, broker, port, broker_user, broker_pass, state_delay, battery_delay, DELAY_BETWEEN_LOCK_PUBLISH_INFOS*2)
create_futures(lock.get(constants.LOCK_ID_FIELD),ttlock2MqttClient)
def main(broker, port, broker_user, broker_pass, ttlock_client, ttlock_token,state_delay,battery_delay):
try:
if not ttlock_client or not ttlock_token:
raise ValueError('Invalid ttlock client or token.')
logging.debug("Starting main loop...")
while True:
try:
createClients(broker, port, broker_user, broker_pass,
ttlock_client, ttlock_token,state_delay,battery_delay)
logging.info("Current threads: {}".format(
threading.active_count()))
except Exception as e:
logging.exception("Error main method")
time.sleep(DELAY_BETWEEN_NEW_THREADS_CREATION)
except KeyboardInterrupt:
logging.info("Ending...")
global run_flag
run_flag = False
for id, future in client_futures.items():
logging.info("Client {} thread is over!".format(
future.result().mqttClientId))
except ValueError as e:
logging.exception('Exiting script...')
def isEmptyStr(s):
return s == 'null' or len(s) == 0 or s.isspace()
DELAY_BETWEEN_NEW_THREADS_CREATION = 60
DELAY_BETWEEN_LOCK_PUBLISH_INFOS = 60
run_flag = True
client_futures = dict()
executor = concurrent.futures.ThreadPoolExecutor()
if __name__ == '__main__':
broker = 'localhost'
port = 1883
broker_user = None
broker_pass = None
ttlock_client = None
ttlock_token = None
state_delay = DELAY_BETWEEN_LOCK_PUBLISH_INFOS
battery_delay = DELAY_BETWEEN_LOCK_PUBLISH_INFOS*5
loglevel = 'INFO'
full_cmd_arguments = sys.argv
argument_list = full_cmd_arguments[1:]
short_options = 'b:p:u:P:c:t:l:S:B:'
long_options = ['broker=', 'port=', 'user=',
'Pass=', 'client=', 'token=', 'log_level=', 'State_delay=','Battery_delay=']
try:
arguments, values = getopt.getopt(
argument_list, short_options, long_options)
except getopt.error as e:
raise ValueError('Invalid parameters!')
for current_argument, current_value in arguments:
if isEmptyStr(current_value):
pass
elif current_argument in ("-b", "--broker"):
broker = current_value
elif current_argument in ("-p", "--port"):
port = int(current_value)
elif current_argument in ("-u", "--user"):
broker_user = current_value
elif current_argument in ("-P", "--Pass"):
broker_pass = current_value
elif current_argument in ("-c", "--client"):
ttlock_client = current_value
elif current_argument in ("-t", "--token"):
ttlock_token = current_value
elif current_argument in ("-l", "--log_level"):
loglevel = current_value
elif current_argument in ("-S", "--State_delay"):
state_delay = int(current_value)
elif current_argument in ("-B", "--Battery_delay"):
battery_delay = int(current_value)
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level, datefmt='%Y-%m-%d %H:%M:%S',
format='%(asctime)-15s - [%(levelname)s] TTLOCK2MQTT: %(message)s', )
logging.debug("Options: {}, {}, {}, {}, {}, {}, {}, {}, {}".format(
ttlock_client, ttlock_token, broker, port, broker_user,loglevel, broker_pass,state_delay,battery_delay))
main(broker, port, broker_user, broker_pass, ttlock_client, ttlock_token,state_delay,battery_delay)
| 43.5675 | 321 | 0.643369 | import paho.mqtt.client as mqtt
import time
import threading
import concurrent.futures
import getopt
import sys
import logging
from ttlockwrapper import TTLock, TTlockAPIError, constants
class TTLock2MQTTClient(mqtt.Client):
def __init__(self, ttlock, broker, port, broker_user, broker_pass, keepalive):
super().__init__(self.mqttClientId, False)
self.ttlock = ttlock
self.connected_flag = False
self.on_connect = TTLock2MQTTClient.cb_on_connect
self.on_disconnect = TTLock2MQTTClient.cb_on_disconnect
self.on_message = TTLock2MQTTClient.cb_on_message
self.broker_host = broker
self.broker_port = port
self.keepalive_mqtt = keepalive
if broker_user and broker_pass:
self.username_pw_set(broker_user, password=broker_pass)
logging.info("Client {} TTlock Mqtt Created".format(
self.mqttClientId))
self.COMMAND_TOPIC = None
def sendMensage(self, topic, msg, retain=False):
logging.debug('Client {} sending mensage "{}" to topic "{}" and retained {}'.format(
self.mqttClientId, msg, topic, retain))
self.publish(topic, msg, 0, retain)
def mqttConnection(self):
logging.debug("Client {} try connection at {}:{}".format(
self.mqttClientId, self.broker_host, self.broker_port))
self.connect(self.broker_host, self.broker_port, self.keepalive_mqtt)
@classmethod
def cb_on_message(cls, client, userdata, message):
try:
time.sleep(1)
logging.debug("Client {} message received: {}".format(client.mqttClientId, str(message.payload.decode("utf-8"))))
client.handleMessage(message)
except Exception:
logging.exception('Client {} error on received mqtt message'.format(client.getLockId()))
@classmethod
def cb_on_disconnect(cls, client, userdata, rc):
client.connected_flag = False
logging.info("Client {} disconnected!".format(client.mqttClientId))
@classmethod
def cb_on_connect(cls, client, userdata, flags, rc):
try:
if rc == 0:
client.connected_flag = True
logging.info("Client {} connected OK!".format(client.mqttClientId))
if client.COMMAND_TOPIC:
logging.info("Client {} subscribe on command topic: {}".format(
client.mqttClientId, client.COMMAND_TOPIC))
client.subscribe(client.COMMAND_TOPIC)
client.sendDiscoveryMsgs()
time.sleep(20)
client.forcePublishInfos()
else:
logging.error("Client {} Bad connection Returned code= {}".format(
client.mqttClientId, rc))
except Exception:
logging.exception('Client {} error on connect'.format(client.mqttClientId))
class TTLock2MQTTClientGateway(TTLock2MQTTClient):
def __init__(self, gateway, ttlock, broker, port, broker_user, broker_pass, connection_info_delay, keepalive):
self.gateway = gateway
self.mqttClientId = "GATEWAY-{}-{}".format(str(self.getGatewayId()), str(int(time.time())))
super().__init__(ttlock, broker, port, broker_user, broker_pass, keepalive)
self.DISCOVERY_GATEWAY_CONNECTION_TOPIC = 'homeassistant/binary_sensor/ttlock/{}_gateway/config'.format(
self.getGatewayId())
self.CONNECTION_BINARY_SENSOR_TOPIC = 'ttlocktomqtt/{}/connection'.format(
self.getGatewayId())
self.CONNECTION_BINARY_SENSOR_PAYLOAD = '{{"device_class": "connectivity", "name": "{} connection", "state_topic": "{}", "value_template": "{{{{ value_json.connection }}}}", "uniq_id":"{}_CONNECTION","device":{{"identifiers":["{}"], "name": "TTLOCK_GATEWAY_{}", "connections":[["mac","{}"]]}} }}'
self.CONNECTION_PAYLOAD = '{{"connection": "{}"}}'
self.lastConnectionPublishInfo = time.time()
self.connection_info_delay = connection_info_delay
def getGatewayId(self):
return self.gateway.get(constants.GATEWAY_ID_FIELD)
def getMac(self):
return self.gateway.get(constants.GATEWAY_MAC_FIELD)
def getName(self):
return self.gateway.get('gatewayName')
def updateGatewayJson(self):
try:
for gateway in self.ttlock.get_gateway_generator():
if gateway.get(constants.GATEWAY_ID_FIELD)==self.getGatewayId():
self.gateway = gateway
return
except Exception as error:
logging.error('Client {} error while update Gateway Json: {}'.format(
self.mqttClientId, str(error)))
def publishInfos(self):
if time.time()-self.lastConnectionPublishInfo > self.connection_info_delay:
self.updateGatewayJson()
self.forcePublishConnectionInfo()
def forcePublishConnectionInfo(self):
try:
logging.info(
'Client {} publish connection info.'.format(self.mqttClientId))
self.sendGatewayConnectionLevel()
except Exception as error:
logging.error('Client {} error: {}'.format(
self.mqttClientId, str(error)))
finally:
self.lastConnectionPublishInfo = time.time()
def forcePublishInfos(self):
self.forcePublishConnectionInfo()
def sendGatewayConnectionLevel(self):
connectionState = 'ON' if self.gateway.get('isOnline') else 'OFF'
msg = self.CONNECTION_PAYLOAD.format(connectionState)
self.sendMensage(self.CONNECTION_BINARY_SENSOR_TOPIC, msg)
def sendDiscoveryMsgs(self):
logging.info(
'Client {} sending discoveries msgs.'.format(self.mqttClientId))
msg = self.CONNECTION_BINARY_SENSOR_PAYLOAD.format(self.getName(
), self.CONNECTION_BINARY_SENSOR_TOPIC, self.getGatewayId(), self.getGatewayId(), self.getGatewayId(), self.getMac())
self.sendMensage(self.DISCOVERY_GATEWAY_CONNECTION_TOPIC, msg, True)
class TTLock2MQTTClientLock(TTLock2MQTTClient):
def __init__(self, lock, gateway, ttlock, broker, port, broker_user, broker_pass, state_delay, battery_delay, keepalive):
self.lock = lock
self.gateway = gateway
self.mqttClientId = "LOCK-{}-{}".format(str(self.getLockId()), str(int(time.time())))
super().__init__(ttlock, broker, port, broker_user, broker_pass, keepalive)
self.DISCOVERY_LOCK_TOPIC = 'homeassistant/lock/ttlock/{}_lock/config'.format(
self.getLockId())
self.DISCOVERY_SENSOR_TOPIC = 'homeassistant/sensor/ttlock/{}_battery/config'.format(
self.getLockId())
self.BATTERY_LEVEL_SENSOR_TOPIC = 'ttlocktomqtt/{}/battery'.format(
self.getLockId())
self.COMMAND_TOPIC = 'ttlocktomqtt/{}/command'.format(self.getLockId())
self.STATE_SENSOR_TOPIC = 'ttlocktomqtt/{}/state'.format(
self.getLockId())
self.DISCOVERY_LOCK_PAYLOAD = '{{"name": "{} lock", "command_topic": "{}", "state_topic": "{}", "value_template": "{{{{ value_json.state }}}}", "uniq_id":"{}_lock","device":{{"identifiers":["{}"], "name": "TTLOCK_LOCK_{}", "connections":[["mac","{}"]]}} }}'
self.DISCOVERY_BATTERY_LEVEL_SENSOR_PAYLOAD = '{{"device_class": "battery", "name": "{} battery", "state_topic": "{}", "unit_of_measurement": "%", "value_template": "{{{{ value_json.battery }}}}", "uniq_id":"{}_battery","device":{{"identifiers":["{}"], "name": "TTLOCK_LOCK_{}", "connections":[["mac","{}"]]}} }}'
self.STATE_PAYLOAD = '{{"state": "{}"}}'
self.BATTERY_LEVEL_PAYLOAD = '{{"battery": {}}}'
self.lastStatePublishInfo = time.time()
self.lastBatteryPublishInfo = time.time()
self.state_delay = state_delay
self.battery_delay = battery_delay
def getName(self):
return self.lock.get(constants.LOCK_ALIAS_FIELD)
def getLockId(self):
return self.lock.get(constants.LOCK_ID_FIELD)
def getMac(self):
return self.lock.get(constants.LOCK_MAC_FIELD)
def getGatewayId(self):
return self.gateway.get(constants.GATEWAY_ID_FIELD)
def handleMessage(self, message):
result = False
command = str(message.payload.decode("utf-8"))
if command == 'LOCK':
result = self.ttlock.lock(self.getLockId())
elif command == 'UNLOCK':
result = self.ttlock.unlock(self.getLockId())
else:
logging.info('Invalid command.')
return
if not result:
logging.warning(
'Client {} has fail to send API command.'.format(self.mqttClientId))
return
time.sleep(3)
self.forcePublishStateInfo()
def publishInfos(self):
if time.time()-self.lastStatePublishInfo > self.state_delay:
self.forcePublishStateInfo()
if time.time()-self.lastBatteryPublishInfo > self.battery_delay:
self.forcePublishBatteryInfo()
def forcePublishStateInfo(self):
try:
logging.info(
'Client {} publish lock state.'.format(self.mqttClientId))
self.sendLockState()
except Exception as error:
logging.error('Client {} error: {}'.format(
self.mqttClientId, str(error)))
finally:
self.lastStatePublishInfo = time.time()
def forcePublishBatteryInfo(self):
try:
logging.info(
'Client {} publish battery info.'.format(self.mqttClientId))
self.sendLockBatteryLevel()
except Exception as error:
logging.error('Client {} error: {}'.format(
self.mqttClientId, str(error)))
finally:
self.lastBatteryPublishInfo = time.time()
def forcePublishInfos(self):
self.forcePublishStateInfo()
self.forcePublishBatteryInfo()
def sendLockBatteryLevel(self):
batteryLevel = self.ttlock.lock_electric_quantity(self.getLockId())
msg = self.BATTERY_LEVEL_PAYLOAD.format(batteryLevel)
self.sendMensage(self.BATTERY_LEVEL_SENSOR_TOPIC, msg)
def sendLockState(self):
state = self.ttlock.lock_state(self.getLockId())
if state == 2:
logging.warning(
'Client {} lock state TTlockAPI return "unknown".'.format(self.mqttClientId))
return
lock_is = 'UNLOCKED' if state else 'LOCKED'
msg = self.STATE_PAYLOAD.format(lock_is)
self.sendMensage(self.STATE_SENSOR_TOPIC, msg, True)
def sendDiscoveryMsgs(self):
logging.info(
'Client {} sending discoveries msgs.'.format(self.mqttClientId))
msg = self.DISCOVERY_BATTERY_LEVEL_SENSOR_PAYLOAD.format(self.getName(
), self.BATTERY_LEVEL_SENSOR_TOPIC, self.getLockId(), self.getLockId(), self.getLockId(), self.getMac())
self.sendMensage(self.DISCOVERY_SENSOR_TOPIC, msg, True)
msg = self.DISCOVERY_LOCK_PAYLOAD.format(self.getName(), self.COMMAND_TOPIC, self.STATE_SENSOR_TOPIC, self.getLockId(
), self.getLockId(), self.getLockId(), self.getMac())
self.sendMensage(self.DISCOVERY_LOCK_TOPIC, msg, True)
def client_loop(ttlock2MqttClient, loop_delay=2.0, run_forever=False):
try:
logging.info("Client {} TTlock Mqtt on client_loop".format(
ttlock2MqttClient.mqttClientId))
bad_connection = 0
ttlock2MqttClient.mqttConnection()
while run_flag:
ttlock2MqttClient.loop(loop_delay)
if ttlock2MqttClient.connected_flag:
ttlock2MqttClient.publishInfos()
else:
if bad_connection > 5 and not run_forever:
logging.error("Client {} has 5 times bad connection".format(
ttlock2MqttClient.mqttClientId))
break
bad_connection += 1
time.sleep(10)
if ttlock2MqttClient.connected_flag:
ttlock2MqttClient.disconnect()
except Exception as e:
logging.exception("Client {} Loop Thread Error ".format(
ttlock2MqttClient.mqttClientId))
finally:
logging.debug("Client {} return future".format(
ttlock2MqttClient.mqttClientId))
return ttlock2MqttClient
def create_futures(id,client):
if not client:
logging.debug('TTlock Element {} Client is empty...'.format(id))
elif id in client_futures.keys() and not client_futures.get(id).done():
logging.debug('TTlock Element {} Client already created...'.format(id))
else:
client_futures[id] = executor.submit(client_loop, client)
time.sleep(DELAY_BETWEEN_NEW_THREADS_CREATION)
def createClients(broker, port, broker_user, broker_pass, ttlock_client, ttlock_token,state_delay,battery_delay):
ttlock = TTLock(ttlock_client, ttlock_token)
ttlock2MqttClient = None
for gateway in ttlock.get_gateway_generator():
ttlock2MqttClient = TTLock2MQTTClientGateway(gateway, ttlock, broker, port, broker_user, broker_pass, battery_delay, DELAY_BETWEEN_LOCK_PUBLISH_INFOS*2)
create_futures(gateway.get(constants.GATEWAY_ID_FIELD),ttlock2MqttClient)
for lock in ttlock.get_locks_per_gateway_generator(gateway.get(constants.GATEWAY_ID_FIELD)):
ttlock2MqttClient = TTLock2MQTTClientLock(
lock, gateway, ttlock, broker, port, broker_user, broker_pass, state_delay, battery_delay, DELAY_BETWEEN_LOCK_PUBLISH_INFOS*2)
create_futures(lock.get(constants.LOCK_ID_FIELD),ttlock2MqttClient)
def main(broker, port, broker_user, broker_pass, ttlock_client, ttlock_token,state_delay,battery_delay):
try:
if not ttlock_client or not ttlock_token:
raise ValueError('Invalid ttlock client or token.')
logging.debug("Starting main loop...")
while True:
try:
createClients(broker, port, broker_user, broker_pass,
ttlock_client, ttlock_token,state_delay,battery_delay)
logging.info("Current threads: {}".format(
threading.active_count()))
except Exception as e:
logging.exception("Error main method")
time.sleep(DELAY_BETWEEN_NEW_THREADS_CREATION)
except KeyboardInterrupt:
logging.info("Ending...")
global run_flag
run_flag = False
for id, future in client_futures.items():
logging.info("Client {} thread is over!".format(
future.result().mqttClientId))
except ValueError as e:
logging.exception('Exiting script...')
def isEmptyStr(s):
return s == 'null' or len(s) == 0 or s.isspace()
DELAY_BETWEEN_NEW_THREADS_CREATION = 60
DELAY_BETWEEN_LOCK_PUBLISH_INFOS = 60
run_flag = True
client_futures = dict()
executor = concurrent.futures.ThreadPoolExecutor()
if __name__ == '__main__':
broker = 'localhost'
port = 1883
broker_user = None
broker_pass = None
ttlock_client = None
ttlock_token = None
state_delay = DELAY_BETWEEN_LOCK_PUBLISH_INFOS
battery_delay = DELAY_BETWEEN_LOCK_PUBLISH_INFOS*5
loglevel = 'INFO'
full_cmd_arguments = sys.argv
argument_list = full_cmd_arguments[1:]
short_options = 'b:p:u:P:c:t:l:S:B:'
long_options = ['broker=', 'port=', 'user=',
'Pass=', 'client=', 'token=', 'log_level=', 'State_delay=','Battery_delay=']
try:
arguments, values = getopt.getopt(
argument_list, short_options, long_options)
except getopt.error as e:
raise ValueError('Invalid parameters!')
for current_argument, current_value in arguments:
if isEmptyStr(current_value):
pass
elif current_argument in ("-b", "--broker"):
broker = current_value
elif current_argument in ("-p", "--port"):
port = int(current_value)
elif current_argument in ("-u", "--user"):
broker_user = current_value
elif current_argument in ("-P", "--Pass"):
broker_pass = current_value
elif current_argument in ("-c", "--client"):
ttlock_client = current_value
elif current_argument in ("-t", "--token"):
ttlock_token = current_value
elif current_argument in ("-l", "--log_level"):
loglevel = current_value
elif current_argument in ("-S", "--State_delay"):
state_delay = int(current_value)
elif current_argument in ("-B", "--Battery_delay"):
battery_delay = int(current_value)
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level, datefmt='%Y-%m-%d %H:%M:%S',
format='%(asctime)-15s - [%(levelname)s] TTLOCK2MQTT: %(message)s', )
logging.debug("Options: {}, {}, {}, {}, {}, {}, {}, {}, {}".format(
ttlock_client, ttlock_token, broker, port, broker_user,loglevel, broker_pass,state_delay,battery_delay))
main(broker, port, broker_user, broker_pass, ttlock_client, ttlock_token,state_delay,battery_delay)
| true | true |
1c33e0a5b5b73fab447359be446c4ac32de31484 | 15,297 | py | Python | src/m2_more_sequences.py | kellyzc/16-SequencesAndMutation | 92a73f059c85f677ffe497ccef29f613f7172eea | [
"MIT"
] | null | null | null | src/m2_more_sequences.py | kellyzc/16-SequencesAndMutation | 92a73f059c85f677ffe497ccef29f613f7172eea | [
"MIT"
] | null | null | null | src/m2_more_sequences.py | kellyzc/16-SequencesAndMutation | 92a73f059c85f677ffe497ccef29f613f7172eea | [
"MIT"
] | null | null | null | """
This module lets you practice various patterns
for ITERATING through SEQUENCES, including selections from:
-- Beginning to end
-- Other ranges (e.g., backwards and every-3rd-item)
-- The COUNT/SUM/etc pattern
-- The FIND pattern (via LINEAR SEARCH)
-- The MAX/MIN pattern
-- Looking two places in the sequence at once
-- Looking at two sequences in parallel
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Zach Kelly.
""" # Done: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the TEST functions in this module. """
run_test_shortest_string()
run_test_index_of_largest_number()
run_test_number_of_stutters()
run_test_is_palindrome()
run_test_count_same()
# ----------------------------------------------------------------------
# Some problems iterate (loop) through the sequence to find the LARGEST
# (or SMALLEST) item in the sequence, returning its INDEX (or possibly
# the item itself), as in the following problems:
# ----------------------------------------------------------------------
def run_test_shortest_string():
""" Tests the shortest_string function. """
print()
print('--------------------------------------------------')
print('Testing the shortest_string function:')
print('--------------------------------------------------')
sequence1 = ('all', 'we', 'are', 'saying',
'is', 'give', 'peace', 'a', 'chance')
sequence2 = ('all', 'we', 'are', 'saying',
'is', 'give', 'peace', 'a chance')
sequence3 = ('all we', 'are saying',
'is', 'give', 'peace', 'a chance')
sequence4 = ('all we are saying is give peace a chance',)
sequence5 = ('a', '', 'a')
expected = 'a'
answer = shortest_string(sequence1)
print('Expected and actual are:', expected, answer)
if expected != answer:
print(' Your answer is WRONG.')
expected = 'we'
answer = shortest_string(sequence2)
print('Expected and actual are:', expected, answer)
if expected != answer:
print(' Your answer is WRONG.')
expected = 'is'
answer = shortest_string(sequence3)
print('Expected and actual are:', expected, answer)
if expected != answer:
print(' Your answer is WRONG.')
expected = 'all we are saying is give peace a chance'
answer = shortest_string(sequence4)
print('Expected is:', expected)
print('Actual is: ', answer)
if expected != answer:
print(' Your answer is WRONG.')
expected = ''
answer = shortest_string(sequence5)
print('Expected and actual are:', expected, answer)
print('The expected and actual should both be the empty string.')
if expected != answer:
print(' Your answer is WRONG.')
def shortest_string(strings):
"""
What comes in:
-- a non-empty sequence of strings
What goes out: Returns the shortest string in the given sequence
of strings. If there is a tie for shortest string, returns the one
(among the ties) whose index is smallest.
Side effects: None.
Examples:
If the argument is:
['all', 'we', 'are saying', 'is', 'give', 'peace', 'a chance']
then this function returns 'we'
If the argument is:
['all we', 'are saying', 'is give', 'peace', 'a chance']
then this function returns 'peace'
If the argument is:
['all we are saying', 'is give', 'peace a chance']
then this function returns 'is give'
If the argument is ['abc'], then this function returns 'abc'.
Type hints:
:type strings: list[str] or tuple(str)
"""
# ------------------------------------------------------------------
# Done: 2. Implement and test this function.
# The testing code is already written for you (above).
# ------------------------------------------------------------------
smallest = strings[0]
for i in strings:
if len(i) < len(smallest):
smallest = i
return smallest
def run_test_index_of_largest_number():
""" Tests the index_of_largest_number function. """
print()
print('--------------------------------------------------')
print('Testing the index_of_largest_number function:')
print('--------------------------------------------------')
expected = 2
answer = index_of_largest_number([90, 0, 100, -5, 100, -10, 15], 3)
print('Expected and actual are:', expected, answer)
expected = 0
answer = index_of_largest_number([90, 0, 95, -5, 95, -10, 15], 2)
print('Expected and actual are:', expected, answer)
expected = 2
answer = index_of_largest_number([90, 0, 93, -5, 93, -10, 15], 7)
print('Expected and actual are:', expected, answer)
expected = 5
answer = index_of_largest_number([5, 30, 10, 15, 1, 60], 6)
print('Expected and actual are:', expected, answer)
expected = 0
answer = index_of_largest_number([-5, 30, 10, 15, 1, 60], 1)
print('Expected and actual are:', expected, answer)
expected = 1
answer = index_of_largest_number([-500000000000000000000000000000,
- 400000000000000000000000000000],
2)
print('Expected and actual are:', expected, answer)
expected = 0
answer = index_of_largest_number([-40000000000000000000000000000000000,
- 50000000000000000000000000000000000],
2)
print('Expected and actual are:', expected, answer)
def index_of_largest_number(numbers, n):
"""
What comes in:
-- a sequence of numbers
-- a positive integer n that is less than or equal to
the length of the given sequence
What goes out: INDEX of the largest number in the first n numbers
of the given sequence of numbers. If there is a tie for largest
number, returns the smallest of the indices of the tied numbers.
Side effects: None.
Examples:
If the first argument is:
[90, 0, 100, 200, -5, 100, -10, 200, 15]
and the second argument n is 3,
then this function returns 2 (because 100, at index 2,
is the largest of the first 3 numbers in the list).
Another example: for the same list as above, but with n = 2,
this function returns 0 (because 90, at index 0,
is the largest of the first 2 numbers in the list).
Yet another example: For the same list as above, but with n = 9,
this function returns 3 (because 200, at indices 3 and 7,
is the largest of the first 9 numbers in the list,
and we break the tie in favor of the smaller index).
Type hints:
:type numbers: list[float] or tuple[float]
:type n: int
"""
# ------------------------------------------------------------------
# Done: 3. Implement and test this function.
# The testing code is already written for you (above).
# ------------------------------------------------------------------
largest = 0
for i in range(1, n):
if numbers[i] > numbers[largest]:
largest = i
return largest
# ----------------------------------------------------------------------
# Some problems iterate (loop) through the sequence accessing TWO
# (or more) places in the sequence AT THE SAME ITERATION, like these:
# ----------------------------------------------------------------------
def run_test_number_of_stutters():
""" Tests the number_of_stutters function. """
print()
print('--------------------------------------------------')
print('Testing the number_of_stutters function:')
print('--------------------------------------------------')
expected = 2
answer = number_of_stutters('xhhbrrs')
print('Expected and actual are:', expected, answer)
expected = 3
answer = number_of_stutters('xxxx')
print('Expected and actual are:', expected, answer)
expected = 0
answer = number_of_stutters('xaxaxa')
print('Expected and actual are:', expected, answer)
expected = 7
answer = number_of_stutters('xxx yyy xxxx')
print('Expected and actual are:', expected, answer)
expected = 7
answer = number_of_stutters('xxxyyyxxxx')
print('Expected and actual are:', expected, answer)
def number_of_stutters(s):
"""
What comes in:
-- a string s
What goes out: Returns the number of times a letter is repeated
twice-in-a-row in the given string s.
Side effects: None.
Examples:
-- number_of_stutters('xhhbrrs') returns 2
-- number_of_stutters('xxxx') returns 3
-- number_of_stutters('xaxaxa') returns 0
-- number_of_stutters('xxx yyy xxxx') returns 7
-- number_of_stutters('xxxyyyxxxx') returns 7
-- number_of_stutters('') returns 0
Type hints:
:type s: str
"""
# ------------------------------------------------------------------
# Done: 4. Implement and test this function.
# The testing code is already written for you (above).
# ------------------------------------------------------------------
stutters = 0
for i in range(len(s) - 1):
if s[i] == s[i + 1]:
stutters = stutters + 1
return stutters
def run_test_is_palindrome():
""" Tests the is_palindrome function. """
print()
print('--------------------------------------------------')
print('Testing the is_palindrome function:')
print('--------------------------------------------------')
# Five tests.
answer1 = is_palindrome('bob')
answer2 = is_palindrome('obbo')
answer3 = is_palindrome('nope')
answer4 = is_palindrome('almosttxomla')
answer5 = is_palindrome('abbz')
# The next would normally be written:
# Murder for a jar of red rum
# It IS a palindrome (ignoring spaces and punctuation).
answer6 = is_palindrome('murderforajarofredrum')
print('Test is_palindrome: ',
answer1, answer2, answer3, answer4, answer5, answer6)
print('The above should be: True True False False False True')
# Explicit checks, to help students who return STRINGS that LOOK
# like True False.
if answer1 is not True:
print('Your code failed the 1st test for is_palindrome.')
if answer2 is not True:
print('Your code failed the 2nd test for is_palindrome.')
if answer3 is not False:
print('Your code failed the 3rd test for is_palindrome.')
if answer4 is not False:
print('Your code failed the 4th test for is_palindrome.')
if answer5 is not False:
print('Your code failed the 5th test for is_palindrome.')
if answer6 is not True:
print('Your code failed the 6th test for is_palindrome.')
def is_palindrome(s):
"""
What comes in:
-- a string s that (in this simple version of the palindrome
problem) contains only lower-case letters
(no spaces, no punctuation, no upper-case characters)
What goes out: Returns True if the given string s is a palindrome,
i.e., reads the same backwards as forwards.
Returns False if the given string s is not a palindrome.
Side effects: None.
Examples:
abba reads backwards as abba so it IS a palindrome
but
abbz reads backwards as zbba so it is NOT a palindrome
Here are two more examples: (Note: I have put spaces into the
strings for readability; the real problem is the string WITHOUT
the spaces.)
a b c d e x x e d c b a reads backwards as
a b c d e x x e d c b a
so it IS a palindrome
but
a b c d e x y e d c b a reads backwards as
a b c d e y x e d c b a
so it is NOT a palindrome
Type hints:
:type s: str
"""
# ------------------------------------------------------------------
# Done: 5. Implement and test this function.
# The testing code is already written for you (above).
#
####################################################################
# IMPORTANT: As with ALL problems, work a concrete example BY HAND
# to figure out how to solve this problem. The last two examples
# above are particularly good examples to work by hand.
####################################################################
# ------------------------------------------------------------------
for i in range(len(s) // 2):
if s[i] != s[-1 - i]:
return False
return True
# ----------------------------------------------------------------------
# Some problems loop (iterate) through two or more sequences
# IN PARALLEL, as in the count_same problem below.
# ----------------------------------------------------------------------
def run_test_count_same():
""" Tests the count_same function. """
print()
print('--------------------------------------------------')
print('Testing the count_same function:')
print('--------------------------------------------------')
expected = 1
answer = count_same([1, 44, 55],
[0, 44, 77])
print('Expected and actual are:', expected, answer)
expected = 3
answer = count_same([1, 44, 55, 88, 44],
[0, 44, 77, 88, 44])
print('Expected and actual are:', expected, answer)
expected = 0
answer = count_same([1, 44, 55, 88, 44],
[0, 43, 77, 8, 4])
print('Expected and actual are:', expected, answer)
def count_same(sequence1, sequence2):
"""
What comes in:
-- two sequences that have the same length
What goes out: Returns the number of indices at which the two
given sequences have the same item at that index.
Side effects: None.
Examples:
If the sequences are:
(11, 33, 83, 18, 30, 55)
(99, 33, 83, 19, 30, 44)
then this function returns 3
since the two sequences have the same item at:
-- index 1 (both are 33)
-- index 2 (both are 83)
-- index 4 (both are 30)
Another example: if the sequences are:
'how are you today?'
'HOW? r ex u tiday?'
then this function returns 8 since the sequences are the same
at indices 5 (both are 'r'), 10 (both are 'u'), 11 (both are ' '),
12 (both are 't'), 14 (both are 'd'), 15 (both are 'a'),
16 (both are 'y') and 17 (both are '?') -- 8 indices.
Type hints:
type: sequence1: tuple or list or string
type: sequence2: tuple or list or string
"""
# ------------------------------------------------------------------
# Done: 6. Implement and test this function.
# The testing code is already written for you (above).
# ------------------------------------------------------------------
count = 0
for i in range(len(sequence1)):
if sequence1[i] == sequence2[i]:
count = count + 1
return count
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 37.218978 | 77 | 0.541217 |
def main():
run_test_shortest_string()
run_test_index_of_largest_number()
run_test_number_of_stutters()
run_test_is_palindrome()
run_test_count_same()
def run_test_shortest_string():
print()
print('--------------------------------------------------')
print('Testing the shortest_string function:')
print('--------------------------------------------------')
sequence1 = ('all', 'we', 'are', 'saying',
'is', 'give', 'peace', 'a', 'chance')
sequence2 = ('all', 'we', 'are', 'saying',
'is', 'give', 'peace', 'a chance')
sequence3 = ('all we', 'are saying',
'is', 'give', 'peace', 'a chance')
sequence4 = ('all we are saying is give peace a chance',)
sequence5 = ('a', '', 'a')
expected = 'a'
answer = shortest_string(sequence1)
print('Expected and actual are:', expected, answer)
if expected != answer:
print(' Your answer is WRONG.')
expected = 'we'
answer = shortest_string(sequence2)
print('Expected and actual are:', expected, answer)
if expected != answer:
print(' Your answer is WRONG.')
expected = 'is'
answer = shortest_string(sequence3)
print('Expected and actual are:', expected, answer)
if expected != answer:
print(' Your answer is WRONG.')
expected = 'all we are saying is give peace a chance'
answer = shortest_string(sequence4)
print('Expected is:', expected)
print('Actual is: ', answer)
if expected != answer:
print(' Your answer is WRONG.')
expected = ''
answer = shortest_string(sequence5)
print('Expected and actual are:', expected, answer)
print('The expected and actual should both be the empty string.')
if expected != answer:
print(' Your answer is WRONG.')
def shortest_string(strings):
smallest = strings[0]
for i in strings:
if len(i) < len(smallest):
smallest = i
return smallest
def run_test_index_of_largest_number():
print()
print('--------------------------------------------------')
print('Testing the index_of_largest_number function:')
print('--------------------------------------------------')
expected = 2
answer = index_of_largest_number([90, 0, 100, -5, 100, -10, 15], 3)
print('Expected and actual are:', expected, answer)
expected = 0
answer = index_of_largest_number([90, 0, 95, -5, 95, -10, 15], 2)
print('Expected and actual are:', expected, answer)
expected = 2
answer = index_of_largest_number([90, 0, 93, -5, 93, -10, 15], 7)
print('Expected and actual are:', expected, answer)
expected = 5
answer = index_of_largest_number([5, 30, 10, 15, 1, 60], 6)
print('Expected and actual are:', expected, answer)
expected = 0
answer = index_of_largest_number([-5, 30, 10, 15, 1, 60], 1)
print('Expected and actual are:', expected, answer)
expected = 1
answer = index_of_largest_number([-500000000000000000000000000000,
- 400000000000000000000000000000],
2)
print('Expected and actual are:', expected, answer)
expected = 0
answer = index_of_largest_number([-40000000000000000000000000000000000,
- 50000000000000000000000000000000000],
2)
print('Expected and actual are:', expected, answer)
def index_of_largest_number(numbers, n):
largest = 0
for i in range(1, n):
if numbers[i] > numbers[largest]:
largest = i
return largest
def run_test_number_of_stutters():
print()
print('--------------------------------------------------')
print('Testing the number_of_stutters function:')
print('--------------------------------------------------')
expected = 2
answer = number_of_stutters('xhhbrrs')
print('Expected and actual are:', expected, answer)
expected = 3
answer = number_of_stutters('xxxx')
print('Expected and actual are:', expected, answer)
expected = 0
answer = number_of_stutters('xaxaxa')
print('Expected and actual are:', expected, answer)
expected = 7
answer = number_of_stutters('xxx yyy xxxx')
print('Expected and actual are:', expected, answer)
expected = 7
answer = number_of_stutters('xxxyyyxxxx')
print('Expected and actual are:', expected, answer)
def number_of_stutters(s):
stutters = 0
for i in range(len(s) - 1):
if s[i] == s[i + 1]:
stutters = stutters + 1
return stutters
def run_test_is_palindrome():
print()
print('--------------------------------------------------')
print('Testing the is_palindrome function:')
print('--------------------------------------------------')
answer1 = is_palindrome('bob')
answer2 = is_palindrome('obbo')
answer3 = is_palindrome('nope')
answer4 = is_palindrome('almosttxomla')
answer5 = is_palindrome('abbz')
answer6 = is_palindrome('murderforajarofredrum')
print('Test is_palindrome: ',
answer1, answer2, answer3, answer4, answer5, answer6)
print('The above should be: True True False False False True')
if answer1 is not True:
print('Your code failed the 1st test for is_palindrome.')
if answer2 is not True:
print('Your code failed the 2nd test for is_palindrome.')
if answer3 is not False:
print('Your code failed the 3rd test for is_palindrome.')
if answer4 is not False:
print('Your code failed the 4th test for is_palindrome.')
if answer5 is not False:
print('Your code failed the 5th test for is_palindrome.')
if answer6 is not True:
print('Your code failed the 6th test for is_palindrome.')
def is_palindrome(s):
| true | true |
1c33e1cecd6c05dc0a9806ea1b1352fc1333bd65 | 1,620 | py | Python | test/vpp_bond_interface.py | quantonium/vpp | 57612ebcf3b5414c6a2f6153a3338803ac94d759 | [
"Apache-2.0"
] | null | null | null | test/vpp_bond_interface.py | quantonium/vpp | 57612ebcf3b5414c6a2f6153a3338803ac94d759 | [
"Apache-2.0"
] | null | null | null | test/vpp_bond_interface.py | quantonium/vpp | 57612ebcf3b5414c6a2f6153a3338803ac94d759 | [
"Apache-2.0"
] | null | null | null | from vpp_object import VppObject
from vpp_interface import VppInterface
class VppBondInterface(VppInterface):
"""VPP bond interface."""
def __init__(self, test, mode, lb=0,
use_custom_mac=0, mac_address=''):
""" Create VPP Bond interface """
self._test = test
self.mode = mode
self.lb = lb
self.use_custom_mac = use_custom_mac
self.mac_address = mac_address
self._sw_if_index = 0
super(VppBondInterface, self).__init__(test)
def add_vpp_config(self):
r = self.test.vapi.bond_create(self.mode,
self.lb,
self.use_custom_mac,
self.mac_address)
self._sw_if_index = r.sw_if_index
def remove_vpp_config(self):
self.test.vapi.bond_delete(self.sw_if_index)
def enslave_vpp_bond_interface(self,
sw_if_index,
is_passive,
is_long_timeout):
self.test.vapi.bond_enslave(sw_if_index,
self.sw_if_index,
is_passive,
is_long_timeout)
def detach_vpp_bond_interface(self,
sw_if_index):
self.test.vapi.bond_detach_slave(sw_if_index)
def is_interface_config_in_dump(self, dump):
for i in dump:
if i.sw_if_index == self.sw_if_index:
return True
else:
return False
| 33.061224 | 59 | 0.52037 | from vpp_object import VppObject
from vpp_interface import VppInterface
class VppBondInterface(VppInterface):
def __init__(self, test, mode, lb=0,
use_custom_mac=0, mac_address=''):
self._test = test
self.mode = mode
self.lb = lb
self.use_custom_mac = use_custom_mac
self.mac_address = mac_address
self._sw_if_index = 0
super(VppBondInterface, self).__init__(test)
def add_vpp_config(self):
r = self.test.vapi.bond_create(self.mode,
self.lb,
self.use_custom_mac,
self.mac_address)
self._sw_if_index = r.sw_if_index
def remove_vpp_config(self):
self.test.vapi.bond_delete(self.sw_if_index)
def enslave_vpp_bond_interface(self,
sw_if_index,
is_passive,
is_long_timeout):
self.test.vapi.bond_enslave(sw_if_index,
self.sw_if_index,
is_passive,
is_long_timeout)
def detach_vpp_bond_interface(self,
sw_if_index):
self.test.vapi.bond_detach_slave(sw_if_index)
def is_interface_config_in_dump(self, dump):
for i in dump:
if i.sw_if_index == self.sw_if_index:
return True
else:
return False
| true | true |
1c33e23b40cc904d68669a94274e02ca7608984f | 6,240 | py | Python | dvc/repo/reproduce.py | sahilbhosale63/dvc | 999c9e188801f971b75f51ca84f5bad533cb462c | [
"Apache-2.0"
] | null | null | null | dvc/repo/reproduce.py | sahilbhosale63/dvc | 999c9e188801f971b75f51ca84f5bad533cb462c | [
"Apache-2.0"
] | null | null | null | dvc/repo/reproduce.py | sahilbhosale63/dvc | 999c9e188801f971b75f51ca84f5bad533cb462c | [
"Apache-2.0"
] | null | null | null | import logging
from dvc.exceptions import InvalidArgumentError, ReproductionError
from dvc.repo.scm_context import scm_context
from . import locked
from .graph import get_pipeline, get_pipelines
logger = logging.getLogger(__name__)
def _reproduce_stage(stage, **kwargs):
if stage.frozen and not stage.is_import:
logger.warning(
"{} is frozen. Its dependencies are"
" not going to be reproduced.".format(stage)
)
stage = stage.reproduce(**kwargs)
if not stage:
return []
if not kwargs.get("dry", False):
from ..dvcfile import Dvcfile
dvcfile = Dvcfile(stage.repo, stage.path)
dvcfile.dump(stage)
return [stage]
def _get_active_graph(G):
import networkx as nx
active = G.copy()
for stage in G:
if not stage.frozen:
continue
active.remove_edges_from(G.out_edges(stage))
for edge in G.out_edges(stage):
_, to_stage = edge
for node in nx.dfs_preorder_nodes(G, to_stage):
# NOTE: `in_degree` will return InDegreeView({}) if stage
# no longer exists in the `active` DAG.
if not active.in_degree(node):
# NOTE: if some edge no longer exists `remove_edges_from`
# will ignore it without error.
active.remove_edges_from(G.out_edges(node))
active.remove_node(node)
return active
@locked
@scm_context
def reproduce(
self,
target=None,
recursive=False,
pipeline=False,
all_pipelines=False,
**kwargs
):
from dvc.utils import parse_target
assert target is None or isinstance(target, str)
if not target and not all_pipelines:
raise InvalidArgumentError(
"Neither `target` nor `--all-pipelines` are specified."
)
interactive = kwargs.get("interactive", False)
if not interactive:
kwargs["interactive"] = self.config["core"].get("interactive", False)
active_graph = _get_active_graph(self.graph)
active_pipelines = get_pipelines(active_graph)
path, name = parse_target(target)
if pipeline or all_pipelines:
if all_pipelines:
pipelines = active_pipelines
else:
stage = self.get_stage(path, name)
pipelines = [get_pipeline(active_pipelines, stage)]
targets = []
for pipeline in pipelines:
for stage in pipeline:
if pipeline.in_degree(stage) == 0:
targets.append(stage)
else:
targets = self.collect(target, recursive=recursive, graph=active_graph)
return _reproduce_stages(active_graph, targets, **kwargs)
def _reproduce_stages(
G, stages, downstream=False, single_item=False, **kwargs
):
r"""Derive the evaluation of the given node for the given graph.
When you _reproduce a stage_, you want to _evaluate the descendants_
to know if it make sense to _recompute_ it. A post-ordered search
will give us an order list of the nodes we want.
For example, let's say that we have the following pipeline:
E
/ \
D F
/ \ \
B C G
\ /
A
The derived evaluation of D would be: [A, B, C, D]
In case that `downstream` option is specified, the desired effect
is to derive the evaluation starting from the given stage up to the
ancestors. However, the `networkx.ancestors` returns a set, without
any guarantee of any order, so we are going to reverse the graph and
use a reverse post-ordered search using the given stage as a starting
point.
E A
/ \ / \
D F B C G
/ \ \ --- reverse --> \ / /
B C G D F
\ / \ /
A E
The derived evaluation of _downstream_ B would be: [B, D, E]
"""
import networkx as nx
if single_item:
all_pipelines = stages
else:
all_pipelines = []
for stage in stages:
if downstream:
# NOTE (py3 only):
# Python's `deepcopy` defaults to pickle/unpickle the object.
# Stages are complex objects (with references to `repo`,
# `outs`, and `deps`) that cause struggles when you try
# to serialize them. We need to create a copy of the graph
# itself, and then reverse it, instead of using
# graph.reverse() directly because it calls `deepcopy`
# underneath -- unless copy=False is specified.
nodes = nx.dfs_postorder_nodes(
G.copy().reverse(copy=False), stage
)
all_pipelines += reversed(list(nodes))
else:
all_pipelines += nx.dfs_postorder_nodes(G, stage)
pipeline = []
for stage in all_pipelines:
if stage not in pipeline:
pipeline.append(stage)
force_downstream = kwargs.pop("force_downstream", False)
result = []
# `ret` is used to add a cosmetic newline.
ret = []
for stage in pipeline:
if ret:
logger.info("")
try:
ret = _reproduce_stage(stage, **kwargs)
if len(ret) != 0 and force_downstream:
# NOTE: we are walking our pipeline from the top to the
# bottom. If one stage is changed, it will be reproduced,
# which tells us that we should force reproducing all of
# the other stages down below, even if their direct
# dependencies didn't change.
kwargs["force"] = True
result.extend(ret)
except Exception as exc:
raise ReproductionError(stage.relpath) from exc
return result
| 33.191489 | 79 | 0.55609 | import logging
from dvc.exceptions import InvalidArgumentError, ReproductionError
from dvc.repo.scm_context import scm_context
from . import locked
from .graph import get_pipeline, get_pipelines
logger = logging.getLogger(__name__)
def _reproduce_stage(stage, **kwargs):
if stage.frozen and not stage.is_import:
logger.warning(
"{} is frozen. Its dependencies are"
" not going to be reproduced.".format(stage)
)
stage = stage.reproduce(**kwargs)
if not stage:
return []
if not kwargs.get("dry", False):
from ..dvcfile import Dvcfile
dvcfile = Dvcfile(stage.repo, stage.path)
dvcfile.dump(stage)
return [stage]
def _get_active_graph(G):
import networkx as nx
active = G.copy()
for stage in G:
if not stage.frozen:
continue
active.remove_edges_from(G.out_edges(stage))
for edge in G.out_edges(stage):
_, to_stage = edge
for node in nx.dfs_preorder_nodes(G, to_stage):
if not active.in_degree(node):
active.remove_edges_from(G.out_edges(node))
active.remove_node(node)
return active
@locked
@scm_context
def reproduce(
self,
target=None,
recursive=False,
pipeline=False,
all_pipelines=False,
**kwargs
):
from dvc.utils import parse_target
assert target is None or isinstance(target, str)
if not target and not all_pipelines:
raise InvalidArgumentError(
"Neither `target` nor `--all-pipelines` are specified."
)
interactive = kwargs.get("interactive", False)
if not interactive:
kwargs["interactive"] = self.config["core"].get("interactive", False)
active_graph = _get_active_graph(self.graph)
active_pipelines = get_pipelines(active_graph)
path, name = parse_target(target)
if pipeline or all_pipelines:
if all_pipelines:
pipelines = active_pipelines
else:
stage = self.get_stage(path, name)
pipelines = [get_pipeline(active_pipelines, stage)]
targets = []
for pipeline in pipelines:
for stage in pipeline:
if pipeline.in_degree(stage) == 0:
targets.append(stage)
else:
targets = self.collect(target, recursive=recursive, graph=active_graph)
return _reproduce_stages(active_graph, targets, **kwargs)
def _reproduce_stages(
G, stages, downstream=False, single_item=False, **kwargs
):
import networkx as nx
if single_item:
all_pipelines = stages
else:
all_pipelines = []
for stage in stages:
if downstream:
# Stages are complex objects (with references to `repo`,
# `outs`, and `deps`) that cause struggles when you try
# to serialize them. We need to create a copy of the graph
# itself, and then reverse it, instead of using
# graph.reverse() directly because it calls `deepcopy`
# underneath -- unless copy=False is specified.
nodes = nx.dfs_postorder_nodes(
G.copy().reverse(copy=False), stage
)
all_pipelines += reversed(list(nodes))
else:
all_pipelines += nx.dfs_postorder_nodes(G, stage)
pipeline = []
for stage in all_pipelines:
if stage not in pipeline:
pipeline.append(stage)
force_downstream = kwargs.pop("force_downstream", False)
result = []
# `ret` is used to add a cosmetic newline.
ret = []
for stage in pipeline:
if ret:
logger.info("")
try:
ret = _reproduce_stage(stage, **kwargs)
if len(ret) != 0 and force_downstream:
# NOTE: we are walking our pipeline from the top to the
# bottom. If one stage is changed, it will be reproduced,
# which tells us that we should force reproducing all of
# the other stages down below, even if their direct
# dependencies didn't change.
kwargs["force"] = True
result.extend(ret)
except Exception as exc:
raise ReproductionError(stage.relpath) from exc
return result
| true | true |
1c33e23fa22cebfd129075adb7e71157f71612ea | 344 | py | Python | runtests.py | gasman/wagtailmodelchooser | 1aef9c0f3589d9ad81fe04dadeacc90a27e315d8 | [
"BSD-2-Clause"
] | 49 | 2019-03-01T15:50:32.000Z | 2022-03-01T10:47:57.000Z | runtests.py | gasman/wagtailmodelchooser | 1aef9c0f3589d9ad81fe04dadeacc90a27e315d8 | [
"BSD-2-Clause"
] | 15 | 2019-08-08T11:47:27.000Z | 2022-02-15T06:18:48.000Z | runtests.py | gasman/wagtailmodelchooser | 1aef9c0f3589d9ad81fe04dadeacc90a27e315d8 | [
"BSD-2-Clause"
] | 18 | 2019-03-11T19:30:49.000Z | 2022-03-02T13:07:13.000Z | #!/usr/bin/env python
import os
import sys
def run():
from django.core.management import execute_from_command_line
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
os.environ.setdefault('DATABASE_NAME', ':memory:')
execute_from_command_line([sys.argv[0], 'test'] + sys.argv[1:])
if __name__ == '__main__':
run()
| 21.5 | 67 | 0.700581 |
import os
import sys
def run():
from django.core.management import execute_from_command_line
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
os.environ.setdefault('DATABASE_NAME', ':memory:')
execute_from_command_line([sys.argv[0], 'test'] + sys.argv[1:])
if __name__ == '__main__':
run()
| true | true |
1c33e355dcc8c83d9ee4fe92e664f027b881475a | 578 | py | Python | pluto/finance/commission/models.py | chalant/pluto | e7bfd35a2c1fc0e0753bd2f840b0a4385b5124fc | [
"Apache-2.0"
] | null | null | null | pluto/finance/commission/models.py | chalant/pluto | e7bfd35a2c1fc0e0753bd2f840b0a4385b5124fc | [
"Apache-2.0"
] | null | null | null | pluto/finance/commission/models.py | chalant/pluto | e7bfd35a2c1fc0e0753bd2f840b0a4385b5124fc | [
"Apache-2.0"
] | null | null | null | class CommissionModels(object):
def __init__(self, commissions_setup):
self._models = commissions_setup
def get_commission_model(self, asset_type, exchange):
return self._models[asset_type][exchange]
def __repr__(self):
return repr(self._models)
def __str__(self):
return str(self._models)
def get_commission_model(model_type, asset_class):
# todo: we load parameters from yaml files...
# parameters can be filled by the user, and overwrites the
# previous ones. There are default parameters as-well.
pass
| 27.52381 | 63 | 0.704152 | class CommissionModels(object):
def __init__(self, commissions_setup):
self._models = commissions_setup
def get_commission_model(self, asset_type, exchange):
return self._models[asset_type][exchange]
def __repr__(self):
return repr(self._models)
def __str__(self):
return str(self._models)
def get_commission_model(model_type, asset_class):
pass
| true | true |
1c33e372b310eff0d626ed6cbbbea55bcce490bb | 6,035 | py | Python | grr/client/grr_response_client/client_stats.py | dekoder/grr | 27ba38dc0f5ad4f3e0cdbfb146a0a789e3b0d27b | [
"Apache-2.0"
] | 3 | 2018-09-30T01:31:29.000Z | 2019-04-22T11:44:54.000Z | grr/client/grr_response_client/client_stats.py | tomchop/grr | 27ba38dc0f5ad4f3e0cdbfb146a0a789e3b0d27b | [
"Apache-2.0"
] | 1 | 2022-03-02T09:58:05.000Z | 2022-03-02T09:58:05.000Z | grr/client/grr_response_client/client_stats.py | tomchop/grr | 27ba38dc0f5ad4f3e0cdbfb146a0a789e3b0d27b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""CPU/IO stats collector."""
from __future__ import unicode_literals
import threading
import time
import psutil
from grr_response_client.client_actions import admin
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import stats
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
class ClientStatsCollector(threading.Thread):
"""This thread keeps track of client stats."""
SLEEP_DURATION = rdfvalue.Duration("10s") # A delay between main loop ticks.
KEEP_DURATION = rdfvalue.Duration("1h") # How long we preserve samples.
MIN_SEND_INTERVAL = rdfvalue.Duration("60s")
MAX_SEND_INTERVAL = rdfvalue.Duration("50m")
# TODO(hanuszczak): This is a hack used to make `grr/server/front_end_test.py`
# work. While not terrible, including any kind of hacks to production code
# just to make the tests work does not seem like a great idea. It should be
# investigated whether we can get rid of it and make the tests work in some
# other way.
exit = False # Setting this value to `True` terminates the thread.
def __init__(self, worker):
"""Initializes the stat collector.
Args:
worker: A `GRRClientWorker` instance that spawned this stat collector.
"""
super(ClientStatsCollector, self).__init__()
self.daemon = True
self._worker = worker
self._process = psutil.Process()
self._cpu_samples = []
self._io_samples = []
self._last_send_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0)
self._should_send = False
stats.STATS.RegisterGaugeMetric("grr_client_cpu_usage", str)
stats.STATS.SetGaugeCallback("grr_client_cpu_usage", self._PrintCpuSamples)
stats.STATS.RegisterGaugeMetric("grr_client_io_usage", str)
stats.STATS.SetGaugeCallback("grr_client_io_usage", self._PrintIOSample)
def RequestSend(self):
"""Requests to send the collected data.
This method does not send the data immediately and does not block. Instead,
it will upload samples in near future provided that sufficient amount of
time has elapsed since the last upload.
"""
self._should_send = True
def CpuSamplesBetween(self, start_time, end_time):
"""Computes CPU samples collected between specified time range.
Args:
start_time: A lower bound for the timestamp of returned samples.
end_time: An upper bound for the timestamp of returned samples.
Returns:
A list of `CpuSample` instances.
"""
return _SamplesBetween(self._cpu_samples, start_time, end_time)
def IOSamplesBetween(self, start_time, end_time):
"""Computes IO samples collected between specified time range.
Args:
start_time: A lower bound for the timestamp of returned samples.
end_time: An upper bound for the timestamp of returned samples.
Returns:
A list of `IOSample` instances.
"""
return _SamplesBetween(self._io_samples, start_time, end_time)
def run(self):
while not self.exit:
self._Collect()
self._Send()
time.sleep(self.SLEEP_DURATION.seconds)
def _Send(self):
if not self._ShouldSend():
return
# TODO(hanuszczak): We shouldn't manually create action instances. Instead,
# we should refactor action code to some other function and make the action
# class use that function. Then here we should use that function as well.
#
# Also, it looks like there is a very weird dependency triangle: the worker
# creates stat collector (which requires a worker), then the stats action
# requires a worker and uses stat collector internally. But this action is
# spawned by the stat collector. What...?
action = admin.GetClientStatsAuto(grr_worker=self._worker)
request = rdf_client_action.GetClientStatsRequest(
start_time=self._last_send_time)
action.Run(request)
self._should_send = False
self._last_send_time = rdfvalue.RDFDatetime.Now()
def _ShouldSend(self):
delta = rdfvalue.RDFDatetime.Now() - self._last_send_time
if delta < self.MIN_SEND_INTERVAL:
return False
if delta > self.MAX_SEND_INTERVAL:
return True
return self._should_send or self._worker.IsActive()
def _Collect(self):
self._CollectCpuUsage()
self._CollectIOUsage()
def _CollectCpuUsage(self):
cpu_times = self._process.cpu_times()
cpu_percent = self._process.cpu_percent()
sample = rdf_client_stats.CpuSample(
timestamp=rdfvalue.RDFDatetime.Now(),
user_cpu_time=cpu_times.user,
system_cpu_time=cpu_times.system,
cpu_percent=cpu_percent)
self._cpu_samples.append(sample)
self._cpu_samples = self.CpuSamplesBetween(
start_time=rdfvalue.RDFDatetime.Now() - self.KEEP_DURATION,
end_time=rdfvalue.RDFDatetime.Now())
def _CollectIOUsage(self):
# Not supported on MacOS.
try:
io_counters = self._process.io_counters()
except (AttributeError, NotImplementedError, psutil.Error):
return
sample = rdf_client_stats.IOSample(
timestamp=rdfvalue.RDFDatetime.Now(),
read_bytes=io_counters.read_bytes,
write_bytes=io_counters.write_bytes,
read_count=io_counters.read_count,
write_count=io_counters.write_count)
self._io_samples.append(sample)
self._io_samples = self.IOSamplesBetween(
start_time=rdfvalue.RDFDatetime.Now() - self.KEEP_DURATION,
end_time=rdfvalue.RDFDatetime.Now())
def _PrintCpuSamples(self):
"""Returns a string with last 20 cpu load samples."""
samples = [str(sample.percent) for sample in self._cpu_samples[-20:]]
return ", ".join(samples)
def _PrintIOSample(self):
try:
return str(self._process.io_counters())
except (NotImplementedError, AttributeError):
return "Not available on this platform."
def _SamplesBetween(samples, start_time, end_time):
return [s for s in samples if start_time <= s.timestamp <= end_time]
| 34.289773 | 80 | 0.72792 |
from __future__ import unicode_literals
import threading
import time
import psutil
from grr_response_client.client_actions import admin
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import stats
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
class ClientStatsCollector(threading.Thread):
SLEEP_DURATION = rdfvalue.Duration("10s")
KEEP_DURATION = rdfvalue.Duration("1h")
MIN_SEND_INTERVAL = rdfvalue.Duration("60s")
MAX_SEND_INTERVAL = rdfvalue.Duration("50m")
exit = False
def __init__(self, worker):
super(ClientStatsCollector, self).__init__()
self.daemon = True
self._worker = worker
self._process = psutil.Process()
self._cpu_samples = []
self._io_samples = []
self._last_send_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0)
self._should_send = False
stats.STATS.RegisterGaugeMetric("grr_client_cpu_usage", str)
stats.STATS.SetGaugeCallback("grr_client_cpu_usage", self._PrintCpuSamples)
stats.STATS.RegisterGaugeMetric("grr_client_io_usage", str)
stats.STATS.SetGaugeCallback("grr_client_io_usage", self._PrintIOSample)
def RequestSend(self):
self._should_send = True
def CpuSamplesBetween(self, start_time, end_time):
return _SamplesBetween(self._cpu_samples, start_time, end_time)
def IOSamplesBetween(self, start_time, end_time):
return _SamplesBetween(self._io_samples, start_time, end_time)
def run(self):
while not self.exit:
self._Collect()
self._Send()
time.sleep(self.SLEEP_DURATION.seconds)
def _Send(self):
if not self._ShouldSend():
return
# we should refactor action code to some other function and make the action
# class use that function. Then here we should use that function as well.
#
# Also, it looks like there is a very weird dependency triangle: the worker
# creates stat collector (which requires a worker), then the stats action
# requires a worker and uses stat collector internally. But this action is
# spawned by the stat collector. What...?
action = admin.GetClientStatsAuto(grr_worker=self._worker)
request = rdf_client_action.GetClientStatsRequest(
start_time=self._last_send_time)
action.Run(request)
self._should_send = False
self._last_send_time = rdfvalue.RDFDatetime.Now()
def _ShouldSend(self):
delta = rdfvalue.RDFDatetime.Now() - self._last_send_time
if delta < self.MIN_SEND_INTERVAL:
return False
if delta > self.MAX_SEND_INTERVAL:
return True
return self._should_send or self._worker.IsActive()
def _Collect(self):
self._CollectCpuUsage()
self._CollectIOUsage()
def _CollectCpuUsage(self):
cpu_times = self._process.cpu_times()
cpu_percent = self._process.cpu_percent()
sample = rdf_client_stats.CpuSample(
timestamp=rdfvalue.RDFDatetime.Now(),
user_cpu_time=cpu_times.user,
system_cpu_time=cpu_times.system,
cpu_percent=cpu_percent)
self._cpu_samples.append(sample)
self._cpu_samples = self.CpuSamplesBetween(
start_time=rdfvalue.RDFDatetime.Now() - self.KEEP_DURATION,
end_time=rdfvalue.RDFDatetime.Now())
def _CollectIOUsage(self):
# Not supported on MacOS.
try:
io_counters = self._process.io_counters()
except (AttributeError, NotImplementedError, psutil.Error):
return
sample = rdf_client_stats.IOSample(
timestamp=rdfvalue.RDFDatetime.Now(),
read_bytes=io_counters.read_bytes,
write_bytes=io_counters.write_bytes,
read_count=io_counters.read_count,
write_count=io_counters.write_count)
self._io_samples.append(sample)
self._io_samples = self.IOSamplesBetween(
start_time=rdfvalue.RDFDatetime.Now() - self.KEEP_DURATION,
end_time=rdfvalue.RDFDatetime.Now())
def _PrintCpuSamples(self):
samples = [str(sample.percent) for sample in self._cpu_samples[-20:]]
return ", ".join(samples)
def _PrintIOSample(self):
try:
return str(self._process.io_counters())
except (NotImplementedError, AttributeError):
return "Not available on this platform."
def _SamplesBetween(samples, start_time, end_time):
return [s for s in samples if start_time <= s.timestamp <= end_time]
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.